diff options
511 files changed, 8182 insertions, 3319 deletions
diff --git a/Documentation/devicetree/bindings/i2c/i2c-mux-pinctrl.txt b/Documentation/devicetree/bindings/i2c/i2c-mux-pinctrl.txt new file mode 100644 index 000000000000..ae8af1694e95 --- /dev/null +++ b/Documentation/devicetree/bindings/i2c/i2c-mux-pinctrl.txt | |||
| @@ -0,0 +1,93 @@ | |||
| 1 | Pinctrl-based I2C Bus Mux | ||
| 2 | |||
| 3 | This binding describes an I2C bus multiplexer that uses pin multiplexing to | ||
| 4 | route the I2C signals, and represents the pin multiplexing configuration | ||
| 5 | using the pinctrl device tree bindings. | ||
| 6 | |||
| 7 | +-----+ +-----+ | ||
| 8 | | dev | | dev | | ||
| 9 | +------------------------+ +-----+ +-----+ | ||
| 10 | | SoC | | | | ||
| 11 | | /----|------+--------+ | ||
| 12 | | +---+ +------+ | child bus A, on first set of pins | ||
| 13 | | |I2C|---|Pinmux| | | ||
| 14 | | +---+ +------+ | child bus B, on second set of pins | ||
| 15 | | \----|------+--------+--------+ | ||
| 16 | | | | | | | ||
| 17 | +------------------------+ +-----+ +-----+ +-----+ | ||
| 18 | | dev | | dev | | dev | | ||
| 19 | +-----+ +-----+ +-----+ | ||
| 20 | |||
| 21 | Required properties: | ||
| 22 | - compatible: i2c-mux-pinctrl | ||
| 23 | - i2c-parent: The phandle of the I2C bus that this multiplexer's master-side | ||
| 24 | port is connected to. | ||
| 25 | |||
| 26 | Also required are: | ||
| 27 | |||
| 28 | * Standard pinctrl properties that specify the pin mux state for each child | ||
| 29 | bus. See ../pinctrl/pinctrl-bindings.txt. | ||
| 30 | |||
| 31 | * Standard I2C mux properties. See mux.txt in this directory. | ||
| 32 | |||
| 33 | * I2C child bus nodes. See mux.txt in this directory. | ||
| 34 | |||
| 35 | For each named state defined in the pinctrl-names property, an I2C child bus | ||
| 36 | will be created. I2C child bus numbers are assigned based on the index into | ||
| 37 | the pinctrl-names property. | ||
| 38 | |||
| 39 | The only exception is that no bus will be created for a state named "idle". If | ||
| 40 | such a state is defined, it must be the last entry in pinctrl-names. For | ||
| 41 | example: | ||
| 42 | |||
| 43 | pinctrl-names = "ddc", "pta", "idle" -> ddc = bus 0, pta = bus 1 | ||
| 44 | pinctrl-names = "ddc", "idle", "pta" -> Invalid ("idle" not last) | ||
| 45 | pinctrl-names = "idle", "ddc", "pta" -> Invalid ("idle" not last) | ||
| 46 | |||
| 47 | Whenever an access is made to a device on a child bus, the relevant pinctrl | ||
| 48 | state will be programmed into hardware. | ||
| 49 | |||
| 50 | If an idle state is defined, whenever an access is not being made to a device | ||
| 51 | on a child bus, the idle pinctrl state will be programmed into hardware. | ||
| 52 | |||
| 53 | If an idle state is not defined, the most recently used pinctrl state will be | ||
| 54 | left programmed into hardware whenever no access is being made of a device on | ||
| 55 | a child bus. | ||
| 56 | |||
| 57 | Example: | ||
| 58 | |||
| 59 | i2cmux { | ||
| 60 | compatible = "i2c-mux-pinctrl"; | ||
| 61 | #address-cells = <1>; | ||
| 62 | #size-cells = <0>; | ||
| 63 | |||
| 64 | i2c-parent = <&i2c1>; | ||
| 65 | |||
| 66 | pinctrl-names = "ddc", "pta", "idle"; | ||
| 67 | pinctrl-0 = <&state_i2cmux_ddc>; | ||
| 68 | pinctrl-1 = <&state_i2cmux_pta>; | ||
| 69 | pinctrl-2 = <&state_i2cmux_idle>; | ||
| 70 | |||
| 71 | i2c@0 { | ||
| 72 | reg = <0>; | ||
| 73 | #address-cells = <1>; | ||
| 74 | #size-cells = <0>; | ||
| 75 | |||
| 76 | eeprom { | ||
| 77 | compatible = "eeprom"; | ||
| 78 | reg = <0x50>; | ||
| 79 | }; | ||
| 80 | }; | ||
| 81 | |||
| 82 | i2c@1 { | ||
| 83 | reg = <1>; | ||
| 84 | #address-cells = <1>; | ||
| 85 | #size-cells = <0>; | ||
| 86 | |||
| 87 | eeprom { | ||
| 88 | compatible = "eeprom"; | ||
| 89 | reg = <0x50>; | ||
| 90 | }; | ||
| 91 | }; | ||
| 92 | }; | ||
| 93 | |||
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt index c45513d806ab..a92c5ebf373e 100644 --- a/Documentation/kernel-parameters.txt +++ b/Documentation/kernel-parameters.txt | |||
| @@ -2543,6 +2543,15 @@ bytes respectively. Such letter suffixes can also be entirely omitted. | |||
| 2543 | 2543 | ||
| 2544 | sched_debug [KNL] Enables verbose scheduler debug messages. | 2544 | sched_debug [KNL] Enables verbose scheduler debug messages. |
| 2545 | 2545 | ||
| 2546 | skew_tick= [KNL] Offset the periodic timer tick per cpu to mitigate | ||
| 2547 | xtime_lock contention on larger systems, and/or RCU lock | ||
| 2548 | contention on all systems with CONFIG_MAXSMP set. | ||
| 2549 | Format: { "0" | "1" } | ||
| 2550 | 0 -- disable. (may be 1 via CONFIG_CMDLINE="skew_tick=1" | ||
| 2551 | 1 -- enable. | ||
| 2552 | Note: increases power consumption, thus should only be | ||
| 2553 | enabled if running jitter sensitive (HPC/RT) workloads. | ||
| 2554 | |||
| 2546 | security= [SECURITY] Choose a security module to enable at boot. | 2555 | security= [SECURITY] Choose a security module to enable at boot. |
| 2547 | If this boot parameter is not specified, only the first | 2556 | If this boot parameter is not specified, only the first |
| 2548 | security module asking for security registration will be | 2557 | security module asking for security registration will be |
diff --git a/Documentation/networking/stmmac.txt b/Documentation/networking/stmmac.txt index ab1e8d7004c5..5cb9a1972460 100644 --- a/Documentation/networking/stmmac.txt +++ b/Documentation/networking/stmmac.txt | |||
| @@ -10,8 +10,8 @@ Currently this network device driver is for all STM embedded MAC/GMAC | |||
| 10 | (i.e. 7xxx/5xxx SoCs), SPEAr (arm), Loongson1B (mips) and XLINX XC2V3000 | 10 | (i.e. 7xxx/5xxx SoCs), SPEAr (arm), Loongson1B (mips) and XLINX XC2V3000 |
| 11 | FF1152AMT0221 D1215994A VIRTEX FPGA board. | 11 | FF1152AMT0221 D1215994A VIRTEX FPGA board. |
| 12 | 12 | ||
| 13 | DWC Ether MAC 10/100/1000 Universal version 3.60a (and older) and DWC Ether MAC 10/100 | 13 | DWC Ether MAC 10/100/1000 Universal version 3.60a (and older) and DWC Ether |
| 14 | Universal version 4.0 have been used for developing this driver. | 14 | MAC 10/100 Universal version 4.0 have been used for developing this driver. |
| 15 | 15 | ||
| 16 | This driver supports both the platform bus and PCI. | 16 | This driver supports both the platform bus and PCI. |
| 17 | 17 | ||
| @@ -54,27 +54,27 @@ net_device structure enabling the scatter/gather feature. | |||
| 54 | When one or more packets are received, an interrupt happens. The interrupts | 54 | When one or more packets are received, an interrupt happens. The interrupts |
| 55 | are not queued so the driver has to scan all the descriptors in the ring during | 55 | are not queued so the driver has to scan all the descriptors in the ring during |
| 56 | the receive process. | 56 | the receive process. |
| 57 | This is based on NAPI so the interrupt handler signals only if there is work to be | 57 | This is based on NAPI so the interrupt handler signals only if there is work |
| 58 | done, and it exits. | 58 | to be done, and it exits. |
| 59 | Then the poll method will be scheduled at some future point. | 59 | Then the poll method will be scheduled at some future point. |
| 60 | The incoming packets are stored, by the DMA, in a list of pre-allocated socket | 60 | The incoming packets are stored, by the DMA, in a list of pre-allocated socket |
| 61 | buffers in order to avoid the memcpy (Zero-copy). | 61 | buffers in order to avoid the memcpy (Zero-copy). |
| 62 | 62 | ||
| 63 | 4.3) Timer-Driver Interrupt | 63 | 4.3) Timer-Driver Interrupt |
| 64 | Instead of having the device that asynchronously notifies the frame receptions, the | 64 | Instead of having the device that asynchronously notifies the frame receptions, |
| 65 | driver configures a timer to generate an interrupt at regular intervals. | 65 | the driver configures a timer to generate an interrupt at regular intervals. |
| 66 | Based on the granularity of the timer, the frames that are received by the device | 66 | Based on the granularity of the timer, the frames that are received by the |
| 67 | will experience different levels of latency. Some NICs have dedicated timer | 67 | device will experience different levels of latency. Some NICs have dedicated |
| 68 | device to perform this task. STMMAC can use either the RTC device or the TMU | 68 | timer device to perform this task. STMMAC can use either the RTC device or the |
| 69 | channel 2 on STLinux platforms. | 69 | TMU channel 2 on STLinux platforms. |
| 70 | The timers frequency can be passed to the driver as parameter; when change it, | 70 | The timers frequency can be passed to the driver as parameter; when change it, |
| 71 | take care of both hardware capability and network stability/performance impact. | 71 | take care of both hardware capability and network stability/performance impact. |
| 72 | Several performance tests on STM platforms showed this optimisation allows to spare | 72 | Several performance tests on STM platforms showed this optimisation allows to |
| 73 | the CPU while having the maximum throughput. | 73 | spare the CPU while having the maximum throughput. |
| 74 | 74 | ||
| 75 | 4.4) WOL | 75 | 4.4) WOL |
| 76 | Wake up on Lan feature through Magic and Unicast frames are supported for the GMAC | 76 | Wake up on Lan feature through Magic and Unicast frames are supported for the |
| 77 | core. | 77 | GMAC core. |
| 78 | 78 | ||
| 79 | 4.5) DMA descriptors | 79 | 4.5) DMA descriptors |
| 80 | Driver handles both normal and enhanced descriptors. The latter has been only | 80 | Driver handles both normal and enhanced descriptors. The latter has been only |
| @@ -106,7 +106,8 @@ Several driver's information can be passed through the platform | |||
| 106 | These are included in the include/linux/stmmac.h header file | 106 | These are included in the include/linux/stmmac.h header file |
| 107 | and detailed below as well: | 107 | and detailed below as well: |
| 108 | 108 | ||
| 109 | struct plat_stmmacenet_data { | 109 | struct plat_stmmacenet_data { |
| 110 | char *phy_bus_name; | ||
| 110 | int bus_id; | 111 | int bus_id; |
| 111 | int phy_addr; | 112 | int phy_addr; |
| 112 | int interface; | 113 | int interface; |
| @@ -124,19 +125,24 @@ and detailed below as well: | |||
| 124 | void (*bus_setup)(void __iomem *ioaddr); | 125 | void (*bus_setup)(void __iomem *ioaddr); |
| 125 | int (*init)(struct platform_device *pdev); | 126 | int (*init)(struct platform_device *pdev); |
| 126 | void (*exit)(struct platform_device *pdev); | 127 | void (*exit)(struct platform_device *pdev); |
| 128 | void *custom_cfg; | ||
| 129 | void *custom_data; | ||
| 127 | void *bsp_priv; | 130 | void *bsp_priv; |
| 128 | }; | 131 | }; |
| 129 | 132 | ||
| 130 | Where: | 133 | Where: |
| 134 | o phy_bus_name: phy bus name to attach to the stmmac. | ||
| 131 | o bus_id: bus identifier. | 135 | o bus_id: bus identifier. |
| 132 | o phy_addr: the physical address can be passed from the platform. | 136 | o phy_addr: the physical address can be passed from the platform. |
| 133 | If it is set to -1 the driver will automatically | 137 | If it is set to -1 the driver will automatically |
| 134 | detect it at run-time by probing all the 32 addresses. | 138 | detect it at run-time by probing all the 32 addresses. |
| 135 | o interface: PHY device's interface. | 139 | o interface: PHY device's interface. |
| 136 | o mdio_bus_data: specific platform fields for the MDIO bus. | 140 | o mdio_bus_data: specific platform fields for the MDIO bus. |
| 137 | o pbl: the Programmable Burst Length is maximum number of beats to | 141 | o dma_cfg: internal DMA parameters |
| 142 | o pbl: the Programmable Burst Length is maximum number of beats to | ||
| 138 | be transferred in one DMA transaction. | 143 | be transferred in one DMA transaction. |
| 139 | GMAC also enables the 4xPBL by default. | 144 | GMAC also enables the 4xPBL by default. |
| 145 | o fixed_burst/mixed_burst/burst_len | ||
| 140 | o clk_csr: fixed CSR Clock range selection. | 146 | o clk_csr: fixed CSR Clock range selection. |
| 141 | o has_gmac: uses the GMAC core. | 147 | o has_gmac: uses the GMAC core. |
| 142 | o enh_desc: if sets the MAC will use the enhanced descriptor structure. | 148 | o enh_desc: if sets the MAC will use the enhanced descriptor structure. |
| @@ -160,8 +166,9 @@ Where: | |||
| 160 | this is sometime necessary on some platforms (e.g. ST boxes) | 166 | this is sometime necessary on some platforms (e.g. ST boxes) |
| 161 | where the HW needs to have set some PIO lines or system cfg | 167 | where the HW needs to have set some PIO lines or system cfg |
| 162 | registers. | 168 | registers. |
| 163 | o custom_cfg: this is a custom configuration that can be passed while | 169 | o custom_cfg/custom_data: this is a custom configuration that can be passed |
| 164 | initialising the resources. | 170 | while initialising the resources. |
| 171 | o bsp_priv: another private poiter. | ||
| 165 | 172 | ||
| 166 | For MDIO bus The we have: | 173 | For MDIO bus The we have: |
| 167 | 174 | ||
| @@ -180,7 +187,6 @@ Where: | |||
| 180 | o irqs: list of IRQs, one per PHY. | 187 | o irqs: list of IRQs, one per PHY. |
| 181 | o probed_phy_irq: if irqs is NULL, use this for probed PHY. | 188 | o probed_phy_irq: if irqs is NULL, use this for probed PHY. |
| 182 | 189 | ||
| 183 | |||
| 184 | For DMA engine we have the following internal fields that should be | 190 | For DMA engine we have the following internal fields that should be |
| 185 | tuned according to the HW capabilities. | 191 | tuned according to the HW capabilities. |
| 186 | 192 | ||
diff --git a/Documentation/vm/frontswap.txt b/Documentation/vm/frontswap.txt new file mode 100644 index 000000000000..37067cf455f4 --- /dev/null +++ b/Documentation/vm/frontswap.txt | |||
| @@ -0,0 +1,278 @@ | |||
| 1 | Frontswap provides a "transcendent memory" interface for swap pages. | ||
| 2 | In some environments, dramatic performance savings may be obtained because | ||
| 3 | swapped pages are saved in RAM (or a RAM-like device) instead of a swap disk. | ||
| 4 | |||
| 5 | (Note, frontswap -- and cleancache (merged at 3.0) -- are the "frontends" | ||
| 6 | and the only necessary changes to the core kernel for transcendent memory; | ||
| 7 | all other supporting code -- the "backends" -- is implemented as drivers. | ||
| 8 | See the LWN.net article "Transcendent memory in a nutshell" for a detailed | ||
| 9 | overview of frontswap and related kernel parts: | ||
| 10 | https://lwn.net/Articles/454795/ ) | ||
| 11 | |||
| 12 | Frontswap is so named because it can be thought of as the opposite of | ||
| 13 | a "backing" store for a swap device. The storage is assumed to be | ||
| 14 | a synchronous concurrency-safe page-oriented "pseudo-RAM device" conforming | ||
| 15 | to the requirements of transcendent memory (such as Xen's "tmem", or | ||
| 16 | in-kernel compressed memory, aka "zcache", or future RAM-like devices); | ||
| 17 | this pseudo-RAM device is not directly accessible or addressable by the | ||
| 18 | kernel and is of unknown and possibly time-varying size. The driver | ||
| 19 | links itself to frontswap by calling frontswap_register_ops to set the | ||
| 20 | frontswap_ops funcs appropriately and the functions it provides must | ||
| 21 | conform to certain policies as follows: | ||
| 22 | |||
| 23 | An "init" prepares the device to receive frontswap pages associated | ||
| 24 | with the specified swap device number (aka "type"). A "store" will | ||
| 25 | copy the page to transcendent memory and associate it with the type and | ||
| 26 | offset associated with the page. A "load" will copy the page, if found, | ||
| 27 | from transcendent memory into kernel memory, but will NOT remove the page | ||
| 28 | from from transcendent memory. An "invalidate_page" will remove the page | ||
| 29 | from transcendent memory and an "invalidate_area" will remove ALL pages | ||
| 30 | associated with the swap type (e.g., like swapoff) and notify the "device" | ||
| 31 | to refuse further stores with that swap type. | ||
| 32 | |||
| 33 | Once a page is successfully stored, a matching load on the page will normally | ||
| 34 | succeed. So when the kernel finds itself in a situation where it needs | ||
| 35 | to swap out a page, it first attempts to use frontswap. If the store returns | ||
| 36 | success, the data has been successfully saved to transcendent memory and | ||
| 37 | a disk write and, if the data is later read back, a disk read are avoided. | ||
| 38 | If a store returns failure, transcendent memory has rejected the data, and the | ||
| 39 | page can be written to swap as usual. | ||
| 40 | |||
| 41 | If a backend chooses, frontswap can be configured as a "writethrough | ||
| 42 | cache" by calling frontswap_writethrough(). In this mode, the reduction | ||
| 43 | in swap device writes is lost (and also a non-trivial performance advantage) | ||
| 44 | in order to allow the backend to arbitrarily "reclaim" space used to | ||
| 45 | store frontswap pages to more completely manage its memory usage. | ||
| 46 | |||
| 47 | Note that if a page is stored and the page already exists in transcendent memory | ||
| 48 | (a "duplicate" store), either the store succeeds and the data is overwritten, | ||
| 49 | or the store fails AND the page is invalidated. This ensures stale data may | ||
| 50 | never be obtained from frontswap. | ||
| 51 | |||
| 52 | If properly configured, monitoring of frontswap is done via debugfs in | ||
| 53 | the /sys/kernel/debug/frontswap directory. The effectiveness of | ||
| 54 | frontswap can be measured (across all swap devices) with: | ||
| 55 | |||
| 56 | failed_stores - how many store attempts have failed | ||
| 57 | loads - how many loads were attempted (all should succeed) | ||
| 58 | succ_stores - how many store attempts have succeeded | ||
| 59 | invalidates - how many invalidates were attempted | ||
| 60 | |||
| 61 | A backend implementation may provide additional metrics. | ||
| 62 | |||
| 63 | FAQ | ||
| 64 | |||
| 65 | 1) Where's the value? | ||
| 66 | |||
| 67 | When a workload starts swapping, performance falls through the floor. | ||
| 68 | Frontswap significantly increases performance in many such workloads by | ||
| 69 | providing a clean, dynamic interface to read and write swap pages to | ||
| 70 | "transcendent memory" that is otherwise not directly addressable to the kernel. | ||
| 71 | This interface is ideal when data is transformed to a different form | ||
| 72 | and size (such as with compression) or secretly moved (as might be | ||
| 73 | useful for write-balancing for some RAM-like devices). Swap pages (and | ||
| 74 | evicted page-cache pages) are a great use for this kind of slower-than-RAM- | ||
| 75 | but-much-faster-than-disk "pseudo-RAM device" and the frontswap (and | ||
| 76 | cleancache) interface to transcendent memory provides a nice way to read | ||
| 77 | and write -- and indirectly "name" -- the pages. | ||
| 78 | |||
| 79 | Frontswap -- and cleancache -- with a fairly small impact on the kernel, | ||
| 80 | provides a huge amount of flexibility for more dynamic, flexible RAM | ||
| 81 | utilization in various system configurations: | ||
| 82 | |||
| 83 | In the single kernel case, aka "zcache", pages are compressed and | ||
| 84 | stored in local memory, thus increasing the total anonymous pages | ||
| 85 | that can be safely kept in RAM. Zcache essentially trades off CPU | ||
| 86 | cycles used in compression/decompression for better memory utilization. | ||
| 87 | Benchmarks have shown little or no impact when memory pressure is | ||
| 88 | low while providing a significant performance improvement (25%+) | ||
| 89 | on some workloads under high memory pressure. | ||
| 90 | |||
| 91 | "RAMster" builds on zcache by adding "peer-to-peer" transcendent memory | ||
| 92 | support for clustered systems. Frontswap pages are locally compressed | ||
| 93 | as in zcache, but then "remotified" to another system's RAM. This | ||
| 94 | allows RAM to be dynamically load-balanced back-and-forth as needed, | ||
| 95 | i.e. when system A is overcommitted, it can swap to system B, and | ||
| 96 | vice versa. RAMster can also be configured as a memory server so | ||
| 97 | many servers in a cluster can swap, dynamically as needed, to a single | ||
| 98 | server configured with a large amount of RAM... without pre-configuring | ||
| 99 | how much of the RAM is available for each of the clients! | ||
| 100 | |||
| 101 | In the virtual case, the whole point of virtualization is to statistically | ||
| 102 | multiplex physical resources acrosst the varying demands of multiple | ||
| 103 | virtual machines. This is really hard to do with RAM and efforts to do | ||
| 104 | it well with no kernel changes have essentially failed (except in some | ||
| 105 | well-publicized special-case workloads). | ||
| 106 | Specifically, the Xen Transcendent Memory backend allows otherwise | ||
| 107 | "fallow" hypervisor-owned RAM to not only be "time-shared" between multiple | ||
| 108 | virtual machines, but the pages can be compressed and deduplicated to | ||
| 109 | optimize RAM utilization. And when guest OS's are induced to surrender | ||
| 110 | underutilized RAM (e.g. with "selfballooning"), sudden unexpected | ||
| 111 | memory pressure may result in swapping; frontswap allows those pages | ||
| 112 | to be swapped to and from hypervisor RAM (if overall host system memory | ||
| 113 | conditions allow), thus mitigating the potentially awful performance impact | ||
| 114 | of unplanned swapping. | ||
| 115 | |||
| 116 | A KVM implementation is underway and has been RFC'ed to lkml. And, | ||
| 117 | using frontswap, investigation is also underway on the use of NVM as | ||
| 118 | a memory extension technology. | ||
| 119 | |||
| 120 | 2) Sure there may be performance advantages in some situations, but | ||
| 121 | what's the space/time overhead of frontswap? | ||
| 122 | |||
| 123 | If CONFIG_FRONTSWAP is disabled, every frontswap hook compiles into | ||
| 124 | nothingness and the only overhead is a few extra bytes per swapon'ed | ||
| 125 | swap device. If CONFIG_FRONTSWAP is enabled but no frontswap "backend" | ||
| 126 | registers, there is one extra global variable compared to zero for | ||
| 127 | every swap page read or written. If CONFIG_FRONTSWAP is enabled | ||
| 128 | AND a frontswap backend registers AND the backend fails every "store" | ||
| 129 | request (i.e. provides no memory despite claiming it might), | ||
| 130 | CPU overhead is still negligible -- and since every frontswap fail | ||
| 131 | precedes a swap page write-to-disk, the system is highly likely | ||
| 132 | to be I/O bound and using a small fraction of a percent of a CPU | ||
| 133 | will be irrelevant anyway. | ||
| 134 | |||
| 135 | As for space, if CONFIG_FRONTSWAP is enabled AND a frontswap backend | ||
| 136 | registers, one bit is allocated for every swap page for every swap | ||
| 137 | device that is swapon'd. This is added to the EIGHT bits (which | ||
| 138 | was sixteen until about 2.6.34) that the kernel already allocates | ||
| 139 | for every swap page for every swap device that is swapon'd. (Hugh | ||
| 140 | Dickins has observed that frontswap could probably steal one of | ||
| 141 | the existing eight bits, but let's worry about that minor optimization | ||
| 142 | later.) For very large swap disks (which are rare) on a standard | ||
| 143 | 4K pagesize, this is 1MB per 32GB swap. | ||
| 144 | |||
| 145 | When swap pages are stored in transcendent memory instead of written | ||
| 146 | out to disk, there is a side effect that this may create more memory | ||
| 147 | pressure that can potentially outweigh the other advantages. A | ||
| 148 | backend, such as zcache, must implement policies to carefully (but | ||
| 149 | dynamically) manage memory limits to ensure this doesn't happen. | ||
| 150 | |||
| 151 | 3) OK, how about a quick overview of what this frontswap patch does | ||
| 152 | in terms that a kernel hacker can grok? | ||
| 153 | |||
| 154 | Let's assume that a frontswap "backend" has registered during | ||
| 155 | kernel initialization; this registration indicates that this | ||
| 156 | frontswap backend has access to some "memory" that is not directly | ||
| 157 | accessible by the kernel. Exactly how much memory it provides is | ||
| 158 | entirely dynamic and random. | ||
| 159 | |||
| 160 | Whenever a swap-device is swapon'd frontswap_init() is called, | ||
| 161 | passing the swap device number (aka "type") as a parameter. | ||
| 162 | This notifies frontswap to expect attempts to "store" swap pages | ||
| 163 | associated with that number. | ||
| 164 | |||
| 165 | Whenever the swap subsystem is readying a page to write to a swap | ||
| 166 | device (c.f swap_writepage()), frontswap_store is called. Frontswap | ||
| 167 | consults with the frontswap backend and if the backend says it does NOT | ||
| 168 | have room, frontswap_store returns -1 and the kernel swaps the page | ||
| 169 | to the swap device as normal. Note that the response from the frontswap | ||
| 170 | backend is unpredictable to the kernel; it may choose to never accept a | ||
| 171 | page, it could accept every ninth page, or it might accept every | ||
| 172 | page. But if the backend does accept a page, the data from the page | ||
| 173 | has already been copied and associated with the type and offset, | ||
| 174 | and the backend guarantees the persistence of the data. In this case, | ||
| 175 | frontswap sets a bit in the "frontswap_map" for the swap device | ||
| 176 | corresponding to the page offset on the swap device to which it would | ||
| 177 | otherwise have written the data. | ||
| 178 | |||
| 179 | When the swap subsystem needs to swap-in a page (swap_readpage()), | ||
| 180 | it first calls frontswap_load() which checks the frontswap_map to | ||
| 181 | see if the page was earlier accepted by the frontswap backend. If | ||
| 182 | it was, the page of data is filled from the frontswap backend and | ||
| 183 | the swap-in is complete. If not, the normal swap-in code is | ||
| 184 | executed to obtain the page of data from the real swap device. | ||
| 185 | |||
| 186 | So every time the frontswap backend accepts a page, a swap device read | ||
| 187 | and (potentially) a swap device write are replaced by a "frontswap backend | ||
| 188 | store" and (possibly) a "frontswap backend loads", which are presumably much | ||
| 189 | faster. | ||
| 190 | |||
| 191 | 4) Can't frontswap be configured as a "special" swap device that is | ||
| 192 | just higher priority than any real swap device (e.g. like zswap, | ||
| 193 | or maybe swap-over-nbd/NFS)? | ||
| 194 | |||
| 195 | No. First, the existing swap subsystem doesn't allow for any kind of | ||
| 196 | swap hierarchy. Perhaps it could be rewritten to accomodate a hierarchy, | ||
| 197 | but this would require fairly drastic changes. Even if it were | ||
| 198 | rewritten, the existing swap subsystem uses the block I/O layer which | ||
| 199 | assumes a swap device is fixed size and any page in it is linearly | ||
| 200 | addressable. Frontswap barely touches the existing swap subsystem, | ||
| 201 | and works around the constraints of the block I/O subsystem to provide | ||
| 202 | a great deal of flexibility and dynamicity. | ||
| 203 | |||
| 204 | For example, the acceptance of any swap page by the frontswap backend is | ||
| 205 | entirely unpredictable. This is critical to the definition of frontswap | ||
| 206 | backends because it grants completely dynamic discretion to the | ||
| 207 | backend. In zcache, one cannot know a priori how compressible a page is. | ||
| 208 | "Poorly" compressible pages can be rejected, and "poorly" can itself be | ||
| 209 | defined dynamically depending on current memory constraints. | ||
| 210 | |||
| 211 | Further, frontswap is entirely synchronous whereas a real swap | ||
| 212 | device is, by definition, asynchronous and uses block I/O. The | ||
| 213 | block I/O layer is not only unnecessary, but may perform "optimizations" | ||
| 214 | that are inappropriate for a RAM-oriented device including delaying | ||
| 215 | the write of some pages for a significant amount of time. Synchrony is | ||
| 216 | required to ensure the dynamicity of the backend and to avoid thorny race | ||
| 217 | conditions that would unnecessarily and greatly complicate frontswap | ||
| 218 | and/or the block I/O subsystem. That said, only the initial "store" | ||
| 219 | and "load" operations need be synchronous. A separate asynchronous thread | ||
| 220 | is free to manipulate the pages stored by frontswap. For example, | ||
| 221 | the "remotification" thread in RAMster uses standard asynchronous | ||
| 222 | kernel sockets to move compressed frontswap pages to a remote machine. | ||
| 223 | Similarly, a KVM guest-side implementation could do in-guest compression | ||
| 224 | and use "batched" hypercalls. | ||
| 225 | |||
| 226 | In a virtualized environment, the dynamicity allows the hypervisor | ||
| 227 | (or host OS) to do "intelligent overcommit". For example, it can | ||
| 228 | choose to accept pages only until host-swapping might be imminent, | ||
| 229 | then force guests to do their own swapping. | ||
| 230 | |||
| 231 | There is a downside to the transcendent memory specifications for | ||
| 232 | frontswap: Since any "store" might fail, there must always be a real | ||
| 233 | slot on a real swap device to swap the page. Thus frontswap must be | ||
| 234 | implemented as a "shadow" to every swapon'd device with the potential | ||
| 235 | capability of holding every page that the swap device might have held | ||
| 236 | and the possibility that it might hold no pages at all. This means | ||
| 237 | that frontswap cannot contain more pages than the total of swapon'd | ||
| 238 | swap devices. For example, if NO swap device is configured on some | ||
| 239 | installation, frontswap is useless. Swapless portable devices | ||
| 240 | can still use frontswap but a backend for such devices must configure | ||
| 241 | some kind of "ghost" swap device and ensure that it is never used. | ||
| 242 | |||
| 243 | 5) Why this weird definition about "duplicate stores"? If a page | ||
| 244 | has been previously successfully stored, can't it always be | ||
| 245 | successfully overwritten? | ||
| 246 | |||
| 247 | Nearly always it can, but no, sometimes it cannot. Consider an example | ||
| 248 | where data is compressed and the original 4K page has been compressed | ||
| 249 | to 1K. Now an attempt is made to overwrite the page with data that | ||
| 250 | is non-compressible and so would take the entire 4K. But the backend | ||
| 251 | has no more space. In this case, the store must be rejected. Whenever | ||
| 252 | frontswap rejects a store that would overwrite, it also must invalidate | ||
| 253 | the old data and ensure that it is no longer accessible. Since the | ||
| 254 | swap subsystem then writes the new data to the read swap device, | ||
| 255 | this is the correct course of action to ensure coherency. | ||
| 256 | |||
| 257 | 6) What is frontswap_shrink for? | ||
| 258 | |||
| 259 | When the (non-frontswap) swap subsystem swaps out a page to a real | ||
| 260 | swap device, that page is only taking up low-value pre-allocated disk | ||
| 261 | space. But if frontswap has placed a page in transcendent memory, that | ||
| 262 | page may be taking up valuable real estate. The frontswap_shrink | ||
| 263 | routine allows code outside of the swap subsystem to force pages out | ||
| 264 | of the memory managed by frontswap and back into kernel-addressable memory. | ||
| 265 | For example, in RAMster, a "suction driver" thread will attempt | ||
| 266 | to "repatriate" pages sent to a remote machine back to the local machine; | ||
| 267 | this is driven using the frontswap_shrink mechanism when memory pressure | ||
| 268 | subsides. | ||
| 269 | |||
| 270 | 7) Why does the frontswap patch create the new include file swapfile.h? | ||
| 271 | |||
| 272 | The frontswap code depends on some swap-subsystem-internal data | ||
| 273 | structures that have, over the years, moved back and forth between | ||
| 274 | static and global. This seemed a reasonable compromise: Define | ||
| 275 | them as global but declare them in a new include file that isn't | ||
| 276 | included by the large number of source files that include swap.h. | ||
| 277 | |||
| 278 | Dan Magenheimer, last updated April 9, 2012 | ||
diff --git a/MAINTAINERS b/MAINTAINERS index 55f0fda602ec..3e30a3afe2a4 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
| @@ -1077,7 +1077,7 @@ F: drivers/media/video/s5p-fimc/ | |||
| 1077 | ARM/SAMSUNG S5P SERIES Multi Format Codec (MFC) SUPPORT | 1077 | ARM/SAMSUNG S5P SERIES Multi Format Codec (MFC) SUPPORT |
| 1078 | M: Kyungmin Park <kyungmin.park@samsung.com> | 1078 | M: Kyungmin Park <kyungmin.park@samsung.com> |
| 1079 | M: Kamil Debski <k.debski@samsung.com> | 1079 | M: Kamil Debski <k.debski@samsung.com> |
| 1080 | M: Jeongtae Park <jtp.park@samsung.com> | 1080 | M: Jeongtae Park <jtp.park@samsung.com> |
| 1081 | L: linux-arm-kernel@lists.infradead.org | 1081 | L: linux-arm-kernel@lists.infradead.org |
| 1082 | L: linux-media@vger.kernel.org | 1082 | L: linux-media@vger.kernel.org |
| 1083 | S: Maintained | 1083 | S: Maintained |
| @@ -1646,11 +1646,11 @@ S: Maintained | |||
| 1646 | F: drivers/gpio/gpio-bt8xx.c | 1646 | F: drivers/gpio/gpio-bt8xx.c |
| 1647 | 1647 | ||
| 1648 | BTRFS FILE SYSTEM | 1648 | BTRFS FILE SYSTEM |
| 1649 | M: Chris Mason <chris.mason@oracle.com> | 1649 | M: Chris Mason <chris.mason@fusionio.com> |
| 1650 | L: linux-btrfs@vger.kernel.org | 1650 | L: linux-btrfs@vger.kernel.org |
| 1651 | W: http://btrfs.wiki.kernel.org/ | 1651 | W: http://btrfs.wiki.kernel.org/ |
| 1652 | Q: http://patchwork.kernel.org/project/linux-btrfs/list/ | 1652 | Q: http://patchwork.kernel.org/project/linux-btrfs/list/ |
| 1653 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/mason/btrfs-unstable.git | 1653 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/mason/linux-btrfs.git |
| 1654 | S: Maintained | 1654 | S: Maintained |
| 1655 | F: Documentation/filesystems/btrfs.txt | 1655 | F: Documentation/filesystems/btrfs.txt |
| 1656 | F: fs/btrfs/ | 1656 | F: fs/btrfs/ |
| @@ -1743,10 +1743,10 @@ F: include/linux/can/platform/ | |||
| 1743 | CAPABILITIES | 1743 | CAPABILITIES |
| 1744 | M: Serge Hallyn <serge.hallyn@canonical.com> | 1744 | M: Serge Hallyn <serge.hallyn@canonical.com> |
| 1745 | L: linux-security-module@vger.kernel.org | 1745 | L: linux-security-module@vger.kernel.org |
| 1746 | S: Supported | 1746 | S: Supported |
| 1747 | F: include/linux/capability.h | 1747 | F: include/linux/capability.h |
| 1748 | F: security/capability.c | 1748 | F: security/capability.c |
| 1749 | F: security/commoncap.c | 1749 | F: security/commoncap.c |
| 1750 | F: kernel/capability.c | 1750 | F: kernel/capability.c |
| 1751 | 1751 | ||
| 1752 | CELL BROADBAND ENGINE ARCHITECTURE | 1752 | CELL BROADBAND ENGINE ARCHITECTURE |
| @@ -1800,6 +1800,9 @@ F: include/linux/cfag12864b.h | |||
| 1800 | CFG80211 and NL80211 | 1800 | CFG80211 and NL80211 |
| 1801 | M: Johannes Berg <johannes@sipsolutions.net> | 1801 | M: Johannes Berg <johannes@sipsolutions.net> |
| 1802 | L: linux-wireless@vger.kernel.org | 1802 | L: linux-wireless@vger.kernel.org |
| 1803 | W: http://wireless.kernel.org/ | ||
| 1804 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/jberg/mac80211.git | ||
| 1805 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/jberg/mac80211-next.git | ||
| 1803 | S: Maintained | 1806 | S: Maintained |
| 1804 | F: include/linux/nl80211.h | 1807 | F: include/linux/nl80211.h |
| 1805 | F: include/net/cfg80211.h | 1808 | F: include/net/cfg80211.h |
| @@ -2146,11 +2149,11 @@ S: Orphan | |||
| 2146 | F: drivers/net/wan/pc300* | 2149 | F: drivers/net/wan/pc300* |
| 2147 | 2150 | ||
| 2148 | CYTTSP TOUCHSCREEN DRIVER | 2151 | CYTTSP TOUCHSCREEN DRIVER |
| 2149 | M: Javier Martinez Canillas <javier@dowhile0.org> | 2152 | M: Javier Martinez Canillas <javier@dowhile0.org> |
| 2150 | L: linux-input@vger.kernel.org | 2153 | L: linux-input@vger.kernel.org |
| 2151 | S: Maintained | 2154 | S: Maintained |
| 2152 | F: drivers/input/touchscreen/cyttsp* | 2155 | F: drivers/input/touchscreen/cyttsp* |
| 2153 | F: include/linux/input/cyttsp.h | 2156 | F: include/linux/input/cyttsp.h |
| 2154 | 2157 | ||
| 2155 | DAMA SLAVE for AX.25 | 2158 | DAMA SLAVE for AX.25 |
| 2156 | M: Joerg Reuter <jreuter@yaina.de> | 2159 | M: Joerg Reuter <jreuter@yaina.de> |
| @@ -2270,7 +2273,7 @@ F: include/linux/device-mapper.h | |||
| 2270 | F: include/linux/dm-*.h | 2273 | F: include/linux/dm-*.h |
| 2271 | 2274 | ||
| 2272 | DIOLAN U2C-12 I2C DRIVER | 2275 | DIOLAN U2C-12 I2C DRIVER |
| 2273 | M: Guenter Roeck <guenter.roeck@ericsson.com> | 2276 | M: Guenter Roeck <linux@roeck-us.net> |
| 2274 | L: linux-i2c@vger.kernel.org | 2277 | L: linux-i2c@vger.kernel.org |
| 2275 | S: Maintained | 2278 | S: Maintained |
| 2276 | F: drivers/i2c/busses/i2c-diolan-u2c.c | 2279 | F: drivers/i2c/busses/i2c-diolan-u2c.c |
| @@ -2930,6 +2933,13 @@ F: Documentation/power/freezing-of-tasks.txt | |||
| 2930 | F: include/linux/freezer.h | 2933 | F: include/linux/freezer.h |
| 2931 | F: kernel/freezer.c | 2934 | F: kernel/freezer.c |
| 2932 | 2935 | ||
| 2936 | FRONTSWAP API | ||
| 2937 | M: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> | ||
| 2938 | L: linux-kernel@vger.kernel.org | ||
| 2939 | S: Maintained | ||
| 2940 | F: mm/frontswap.c | ||
| 2941 | F: include/linux/frontswap.h | ||
| 2942 | |||
| 2933 | FS-CACHE: LOCAL CACHING FOR NETWORK FILESYSTEMS | 2943 | FS-CACHE: LOCAL CACHING FOR NETWORK FILESYSTEMS |
| 2934 | M: David Howells <dhowells@redhat.com> | 2944 | M: David Howells <dhowells@redhat.com> |
| 2935 | L: linux-cachefs@redhat.com | 2945 | L: linux-cachefs@redhat.com |
| @@ -3138,7 +3148,7 @@ F: drivers/tty/hvc/ | |||
| 3138 | 3148 | ||
| 3139 | HARDWARE MONITORING | 3149 | HARDWARE MONITORING |
| 3140 | M: Jean Delvare <khali@linux-fr.org> | 3150 | M: Jean Delvare <khali@linux-fr.org> |
| 3141 | M: Guenter Roeck <guenter.roeck@ericsson.com> | 3151 | M: Guenter Roeck <linux@roeck-us.net> |
| 3142 | L: lm-sensors@lm-sensors.org | 3152 | L: lm-sensors@lm-sensors.org |
| 3143 | W: http://www.lm-sensors.org/ | 3153 | W: http://www.lm-sensors.org/ |
| 3144 | T: quilt kernel.org/pub/linux/kernel/people/jdelvare/linux-2.6/jdelvare-hwmon/ | 3154 | T: quilt kernel.org/pub/linux/kernel/people/jdelvare/linux-2.6/jdelvare-hwmon/ |
| @@ -4096,6 +4106,8 @@ F: drivers/scsi/53c700* | |||
| 4096 | LED SUBSYSTEM | 4106 | LED SUBSYSTEM |
| 4097 | M: Bryan Wu <bryan.wu@canonical.com> | 4107 | M: Bryan Wu <bryan.wu@canonical.com> |
| 4098 | M: Richard Purdie <rpurdie@rpsys.net> | 4108 | M: Richard Purdie <rpurdie@rpsys.net> |
| 4109 | L: linux-leds@vger.kernel.org | ||
| 4110 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/cooloney/linux-leds.git | ||
| 4099 | S: Maintained | 4111 | S: Maintained |
| 4100 | F: drivers/leds/ | 4112 | F: drivers/leds/ |
| 4101 | F: include/linux/leds.h | 4113 | F: include/linux/leds.h |
| @@ -4340,7 +4352,8 @@ MAC80211 | |||
| 4340 | M: Johannes Berg <johannes@sipsolutions.net> | 4352 | M: Johannes Berg <johannes@sipsolutions.net> |
| 4341 | L: linux-wireless@vger.kernel.org | 4353 | L: linux-wireless@vger.kernel.org |
| 4342 | W: http://linuxwireless.org/ | 4354 | W: http://linuxwireless.org/ |
| 4343 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/linville/wireless.git | 4355 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/jberg/mac80211.git |
| 4356 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/jberg/mac80211-next.git | ||
| 4344 | S: Maintained | 4357 | S: Maintained |
| 4345 | F: Documentation/networking/mac80211-injection.txt | 4358 | F: Documentation/networking/mac80211-injection.txt |
| 4346 | F: include/net/mac80211.h | 4359 | F: include/net/mac80211.h |
| @@ -4351,7 +4364,8 @@ M: Stefano Brivio <stefano.brivio@polimi.it> | |||
| 4351 | M: Mattias Nissler <mattias.nissler@gmx.de> | 4364 | M: Mattias Nissler <mattias.nissler@gmx.de> |
| 4352 | L: linux-wireless@vger.kernel.org | 4365 | L: linux-wireless@vger.kernel.org |
| 4353 | W: http://linuxwireless.org/en/developers/Documentation/mac80211/RateControl/PID | 4366 | W: http://linuxwireless.org/en/developers/Documentation/mac80211/RateControl/PID |
| 4354 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/linville/wireless.git | 4367 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/jberg/mac80211.git |
| 4368 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/jberg/mac80211-next.git | ||
| 4355 | S: Maintained | 4369 | S: Maintained |
| 4356 | F: net/mac80211/rc80211_pid* | 4370 | F: net/mac80211/rc80211_pid* |
| 4357 | 4371 | ||
| @@ -4411,6 +4425,13 @@ S: Orphan | |||
| 4411 | F: drivers/video/matrox/matroxfb_* | 4425 | F: drivers/video/matrox/matroxfb_* |
| 4412 | F: include/linux/matroxfb.h | 4426 | F: include/linux/matroxfb.h |
| 4413 | 4427 | ||
| 4428 | MAX16065 HARDWARE MONITOR DRIVER | ||
| 4429 | M: Guenter Roeck <linux@roeck-us.net> | ||
| 4430 | L: lm-sensors@lm-sensors.org | ||
| 4431 | S: Maintained | ||
| 4432 | F: Documentation/hwmon/max16065 | ||
| 4433 | F: drivers/hwmon/max16065.c | ||
| 4434 | |||
| 4414 | MAX6650 HARDWARE MONITOR AND FAN CONTROLLER DRIVER | 4435 | MAX6650 HARDWARE MONITOR AND FAN CONTROLLER DRIVER |
| 4415 | M: "Hans J. Koch" <hjk@hansjkoch.de> | 4436 | M: "Hans J. Koch" <hjk@hansjkoch.de> |
| 4416 | L: lm-sensors@lm-sensors.org | 4437 | L: lm-sensors@lm-sensors.org |
| @@ -5149,7 +5170,7 @@ F: drivers/leds/leds-pca9532.c | |||
| 5149 | F: include/linux/leds-pca9532.h | 5170 | F: include/linux/leds-pca9532.h |
| 5150 | 5171 | ||
| 5151 | PCA9541 I2C BUS MASTER SELECTOR DRIVER | 5172 | PCA9541 I2C BUS MASTER SELECTOR DRIVER |
| 5152 | M: Guenter Roeck <guenter.roeck@ericsson.com> | 5173 | M: Guenter Roeck <linux@roeck-us.net> |
| 5153 | L: linux-i2c@vger.kernel.org | 5174 | L: linux-i2c@vger.kernel.org |
| 5154 | S: Maintained | 5175 | S: Maintained |
| 5155 | F: drivers/i2c/muxes/i2c-mux-pca9541.c | 5176 | F: drivers/i2c/muxes/i2c-mux-pca9541.c |
| @@ -5169,7 +5190,7 @@ S: Maintained | |||
| 5169 | F: drivers/firmware/pcdp.* | 5190 | F: drivers/firmware/pcdp.* |
| 5170 | 5191 | ||
| 5171 | PCI ERROR RECOVERY | 5192 | PCI ERROR RECOVERY |
| 5172 | M: Linas Vepstas <linasvepstas@gmail.com> | 5193 | M: Linas Vepstas <linasvepstas@gmail.com> |
| 5173 | L: linux-pci@vger.kernel.org | 5194 | L: linux-pci@vger.kernel.org |
| 5174 | S: Supported | 5195 | S: Supported |
| 5175 | F: Documentation/PCI/pci-error-recovery.txt | 5196 | F: Documentation/PCI/pci-error-recovery.txt |
| @@ -5299,7 +5320,7 @@ F: drivers/video/fb-puv3.c | |||
| 5299 | F: drivers/rtc/rtc-puv3.c | 5320 | F: drivers/rtc/rtc-puv3.c |
| 5300 | 5321 | ||
| 5301 | PMBUS HARDWARE MONITORING DRIVERS | 5322 | PMBUS HARDWARE MONITORING DRIVERS |
| 5302 | M: Guenter Roeck <guenter.roeck@ericsson.com> | 5323 | M: Guenter Roeck <linux@roeck-us.net> |
| 5303 | L: lm-sensors@lm-sensors.org | 5324 | L: lm-sensors@lm-sensors.org |
| 5304 | W: http://www.lm-sensors.org/ | 5325 | W: http://www.lm-sensors.org/ |
| 5305 | W: http://www.roeck-us.net/linux/drivers/ | 5326 | W: http://www.roeck-us.net/linux/drivers/ |
| @@ -5695,6 +5716,9 @@ F: include/linux/remoteproc.h | |||
| 5695 | RFKILL | 5716 | RFKILL |
| 5696 | M: Johannes Berg <johannes@sipsolutions.net> | 5717 | M: Johannes Berg <johannes@sipsolutions.net> |
| 5697 | L: linux-wireless@vger.kernel.org | 5718 | L: linux-wireless@vger.kernel.org |
| 5719 | W: http://wireless.kernel.org/ | ||
| 5720 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/jberg/mac80211.git | ||
| 5721 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/jberg/mac80211-next.git | ||
| 5698 | S: Maintained | 5722 | S: Maintained |
| 5699 | F: Documentation/rfkill.txt | 5723 | F: Documentation/rfkill.txt |
| 5700 | F: net/rfkill/ | 5724 | F: net/rfkill/ |
| @@ -7291,11 +7315,11 @@ F: Documentation/DocBook/uio-howto.tmpl | |||
| 7291 | F: drivers/uio/ | 7315 | F: drivers/uio/ |
| 7292 | F: include/linux/uio*.h | 7316 | F: include/linux/uio*.h |
| 7293 | 7317 | ||
| 7294 | UTIL-LINUX-NG PACKAGE | 7318 | UTIL-LINUX PACKAGE |
| 7295 | M: Karel Zak <kzak@redhat.com> | 7319 | M: Karel Zak <kzak@redhat.com> |
| 7296 | L: util-linux-ng@vger.kernel.org | 7320 | L: util-linux@vger.kernel.org |
| 7297 | W: http://kernel.org/~kzak/util-linux-ng/ | 7321 | W: http://en.wikipedia.org/wiki/Util-linux |
| 7298 | T: git git://git.kernel.org/pub/scm/utils/util-linux-ng/util-linux-ng.git | 7322 | T: git git://git.kernel.org/pub/scm/utils/util-linux/util-linux.git |
| 7299 | S: Maintained | 7323 | S: Maintained |
| 7300 | 7324 | ||
| 7301 | UVESAFB DRIVER | 7325 | UVESAFB DRIVER |
| @@ -1,7 +1,7 @@ | |||
| 1 | VERSION = 3 | 1 | VERSION = 3 |
| 2 | PATCHLEVEL = 5 | 2 | PATCHLEVEL = 5 |
| 3 | SUBLEVEL = 0 | 3 | SUBLEVEL = 0 |
| 4 | EXTRAVERSION = -rc1 | 4 | EXTRAVERSION = -rc3 |
| 5 | NAME = Saber-toothed Squirrel | 5 | NAME = Saber-toothed Squirrel |
| 6 | 6 | ||
| 7 | # *DOCUMENTATION* | 7 | # *DOCUMENTATION* |
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 567b4323c9e6..b1b27525b24d 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig | |||
| @@ -7,7 +7,6 @@ config ARM | |||
| 7 | select HAVE_IDE if PCI || ISA || PCMCIA | 7 | select HAVE_IDE if PCI || ISA || PCMCIA |
| 8 | select HAVE_DMA_ATTRS | 8 | select HAVE_DMA_ATTRS |
| 9 | select HAVE_DMA_CONTIGUOUS if (CPU_V6 || CPU_V6K || CPU_V7) | 9 | select HAVE_DMA_CONTIGUOUS if (CPU_V6 || CPU_V6K || CPU_V7) |
| 10 | select CMA if (CPU_V6 || CPU_V6K || CPU_V7) | ||
| 11 | select HAVE_MEMBLOCK | 10 | select HAVE_MEMBLOCK |
| 12 | select RTC_LIB | 11 | select RTC_LIB |
| 13 | select SYS_SUPPORTS_APM_EMULATION | 12 | select SYS_SUPPORTS_APM_EMULATION |
diff --git a/arch/arm/common/dmabounce.c b/arch/arm/common/dmabounce.c index 9d7eb530f95f..aa07f5938f05 100644 --- a/arch/arm/common/dmabounce.c +++ b/arch/arm/common/dmabounce.c | |||
| @@ -366,8 +366,8 @@ static int __dmabounce_sync_for_cpu(struct device *dev, dma_addr_t addr, | |||
| 366 | struct safe_buffer *buf; | 366 | struct safe_buffer *buf; |
| 367 | unsigned long off; | 367 | unsigned long off; |
| 368 | 368 | ||
| 369 | dev_dbg(dev, "%s(dma=%#x,off=%#lx,sz=%zx,dir=%x)\n", | 369 | dev_dbg(dev, "%s(dma=%#x,sz=%zx,dir=%x)\n", |
| 370 | __func__, addr, off, sz, dir); | 370 | __func__, addr, sz, dir); |
| 371 | 371 | ||
| 372 | buf = find_safe_buffer_dev(dev, addr, __func__); | 372 | buf = find_safe_buffer_dev(dev, addr, __func__); |
| 373 | if (!buf) | 373 | if (!buf) |
| @@ -377,8 +377,8 @@ static int __dmabounce_sync_for_cpu(struct device *dev, dma_addr_t addr, | |||
| 377 | 377 | ||
| 378 | BUG_ON(buf->direction != dir); | 378 | BUG_ON(buf->direction != dir); |
| 379 | 379 | ||
| 380 | dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n", | 380 | dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x off=%#lx) mapped to %p (dma=%#x)\n", |
| 381 | __func__, buf->ptr, virt_to_dma(dev, buf->ptr), | 381 | __func__, buf->ptr, virt_to_dma(dev, buf->ptr), off, |
| 382 | buf->safe, buf->safe_dma_addr); | 382 | buf->safe, buf->safe_dma_addr); |
| 383 | 383 | ||
| 384 | DO_STATS(dev->archdata.dmabounce->bounce_count++); | 384 | DO_STATS(dev->archdata.dmabounce->bounce_count++); |
| @@ -406,8 +406,8 @@ static int __dmabounce_sync_for_device(struct device *dev, dma_addr_t addr, | |||
| 406 | struct safe_buffer *buf; | 406 | struct safe_buffer *buf; |
| 407 | unsigned long off; | 407 | unsigned long off; |
| 408 | 408 | ||
| 409 | dev_dbg(dev, "%s(dma=%#x,off=%#lx,sz=%zx,dir=%x)\n", | 409 | dev_dbg(dev, "%s(dma=%#x,sz=%zx,dir=%x)\n", |
| 410 | __func__, addr, off, sz, dir); | 410 | __func__, addr, sz, dir); |
| 411 | 411 | ||
| 412 | buf = find_safe_buffer_dev(dev, addr, __func__); | 412 | buf = find_safe_buffer_dev(dev, addr, __func__); |
| 413 | if (!buf) | 413 | if (!buf) |
| @@ -417,8 +417,8 @@ static int __dmabounce_sync_for_device(struct device *dev, dma_addr_t addr, | |||
| 417 | 417 | ||
| 418 | BUG_ON(buf->direction != dir); | 418 | BUG_ON(buf->direction != dir); |
| 419 | 419 | ||
| 420 | dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n", | 420 | dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x off=%#lx) mapped to %p (dma=%#x)\n", |
| 421 | __func__, buf->ptr, virt_to_dma(dev, buf->ptr), | 421 | __func__, buf->ptr, virt_to_dma(dev, buf->ptr), off, |
| 422 | buf->safe, buf->safe_dma_addr); | 422 | buf->safe, buf->safe_dma_addr); |
| 423 | 423 | ||
| 424 | DO_STATS(dev->archdata.dmabounce->bounce_count++); | 424 | DO_STATS(dev->archdata.dmabounce->bounce_count++); |
diff --git a/arch/arm/mach-omap2/display.c b/arch/arm/mach-omap2/display.c index 54d49ddb9b81..5fb47a14f4ba 100644 --- a/arch/arm/mach-omap2/display.c +++ b/arch/arm/mach-omap2/display.c | |||
| @@ -271,9 +271,9 @@ static struct platform_device *create_simple_dss_pdev(const char *pdev_name, | |||
| 271 | goto err; | 271 | goto err; |
| 272 | } | 272 | } |
| 273 | 273 | ||
| 274 | r = omap_device_register(pdev); | 274 | r = platform_device_add(pdev); |
| 275 | if (r) { | 275 | if (r) { |
| 276 | pr_err("Could not register omap_device for %s\n", pdev_name); | 276 | pr_err("Could not register platform_device for %s\n", pdev_name); |
| 277 | goto err; | 277 | goto err; |
| 278 | } | 278 | } |
| 279 | 279 | ||
diff --git a/arch/arm/mach-shmobile/Kconfig b/arch/arm/mach-shmobile/Kconfig index f31383c32f9c..df33909205e2 100644 --- a/arch/arm/mach-shmobile/Kconfig +++ b/arch/arm/mach-shmobile/Kconfig | |||
| @@ -186,6 +186,12 @@ config SH_TIMER_TMU | |||
| 186 | help | 186 | help |
| 187 | This enables build of the TMU timer driver. | 187 | This enables build of the TMU timer driver. |
| 188 | 188 | ||
| 189 | config EM_TIMER_STI | ||
| 190 | bool "STI timer driver" | ||
| 191 | default y | ||
| 192 | help | ||
| 193 | This enables build of the STI timer driver. | ||
| 194 | |||
| 189 | endmenu | 195 | endmenu |
| 190 | 196 | ||
| 191 | config SH_CLK_CPG | 197 | config SH_CLK_CPG |
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index ea6b43154090..d766e4256b74 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c | |||
| @@ -228,7 +228,7 @@ static pte_t **consistent_pte; | |||
| 228 | 228 | ||
| 229 | #define DEFAULT_CONSISTENT_DMA_SIZE SZ_2M | 229 | #define DEFAULT_CONSISTENT_DMA_SIZE SZ_2M |
| 230 | 230 | ||
| 231 | unsigned long consistent_base = CONSISTENT_END - DEFAULT_CONSISTENT_DMA_SIZE; | 231 | static unsigned long consistent_base = CONSISTENT_END - DEFAULT_CONSISTENT_DMA_SIZE; |
| 232 | 232 | ||
| 233 | void __init init_consistent_dma_size(unsigned long size) | 233 | void __init init_consistent_dma_size(unsigned long size) |
| 234 | { | 234 | { |
| @@ -268,10 +268,8 @@ static int __init consistent_init(void) | |||
| 268 | unsigned long base = consistent_base; | 268 | unsigned long base = consistent_base; |
| 269 | unsigned long num_ptes = (CONSISTENT_END - base) >> PMD_SHIFT; | 269 | unsigned long num_ptes = (CONSISTENT_END - base) >> PMD_SHIFT; |
| 270 | 270 | ||
| 271 | #ifndef CONFIG_ARM_DMA_USE_IOMMU | 271 | if (IS_ENABLED(CONFIG_CMA) && !IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU)) |
| 272 | if (cpu_architecture() >= CPU_ARCH_ARMv6) | ||
| 273 | return 0; | 272 | return 0; |
| 274 | #endif | ||
| 275 | 273 | ||
| 276 | consistent_pte = kmalloc(num_ptes * sizeof(pte_t), GFP_KERNEL); | 274 | consistent_pte = kmalloc(num_ptes * sizeof(pte_t), GFP_KERNEL); |
| 277 | if (!consistent_pte) { | 275 | if (!consistent_pte) { |
| @@ -323,7 +321,7 @@ static struct arm_vmregion_head coherent_head = { | |||
| 323 | .vm_list = LIST_HEAD_INIT(coherent_head.vm_list), | 321 | .vm_list = LIST_HEAD_INIT(coherent_head.vm_list), |
| 324 | }; | 322 | }; |
| 325 | 323 | ||
| 326 | size_t coherent_pool_size = DEFAULT_CONSISTENT_DMA_SIZE / 8; | 324 | static size_t coherent_pool_size = DEFAULT_CONSISTENT_DMA_SIZE / 8; |
| 327 | 325 | ||
| 328 | static int __init early_coherent_pool(char *p) | 326 | static int __init early_coherent_pool(char *p) |
| 329 | { | 327 | { |
| @@ -342,7 +340,7 @@ static int __init coherent_init(void) | |||
| 342 | struct page *page; | 340 | struct page *page; |
| 343 | void *ptr; | 341 | void *ptr; |
| 344 | 342 | ||
| 345 | if (cpu_architecture() < CPU_ARCH_ARMv6) | 343 | if (!IS_ENABLED(CONFIG_CMA)) |
| 346 | return 0; | 344 | return 0; |
| 347 | 345 | ||
| 348 | ptr = __alloc_from_contiguous(NULL, size, prot, &page); | 346 | ptr = __alloc_from_contiguous(NULL, size, prot, &page); |
| @@ -704,7 +702,7 @@ static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, | |||
| 704 | 702 | ||
| 705 | if (arch_is_coherent() || nommu()) | 703 | if (arch_is_coherent() || nommu()) |
| 706 | addr = __alloc_simple_buffer(dev, size, gfp, &page); | 704 | addr = __alloc_simple_buffer(dev, size, gfp, &page); |
| 707 | else if (cpu_architecture() < CPU_ARCH_ARMv6) | 705 | else if (!IS_ENABLED(CONFIG_CMA)) |
| 708 | addr = __alloc_remap_buffer(dev, size, gfp, prot, &page, caller); | 706 | addr = __alloc_remap_buffer(dev, size, gfp, prot, &page, caller); |
| 709 | else if (gfp & GFP_ATOMIC) | 707 | else if (gfp & GFP_ATOMIC) |
| 710 | addr = __alloc_from_pool(dev, size, &page, caller); | 708 | addr = __alloc_from_pool(dev, size, &page, caller); |
| @@ -773,7 +771,7 @@ void arm_dma_free(struct device *dev, size_t size, void *cpu_addr, | |||
| 773 | 771 | ||
| 774 | if (arch_is_coherent() || nommu()) { | 772 | if (arch_is_coherent() || nommu()) { |
| 775 | __dma_free_buffer(page, size); | 773 | __dma_free_buffer(page, size); |
| 776 | } else if (cpu_architecture() < CPU_ARCH_ARMv6) { | 774 | } else if (!IS_ENABLED(CONFIG_CMA)) { |
| 777 | __dma_free_remap(cpu_addr, size); | 775 | __dma_free_remap(cpu_addr, size); |
| 778 | __dma_free_buffer(page, size); | 776 | __dma_free_buffer(page, size); |
| 779 | } else { | 777 | } else { |
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c index c21d06c7dd7e..f54d59219764 100644 --- a/arch/arm/mm/init.c +++ b/arch/arm/mm/init.c | |||
| @@ -212,7 +212,7 @@ EXPORT_SYMBOL(arm_dma_zone_size); | |||
| 212 | * allocations. This must be the smallest DMA mask in the system, | 212 | * allocations. This must be the smallest DMA mask in the system, |
| 213 | * so a successful GFP_DMA allocation will always satisfy this. | 213 | * so a successful GFP_DMA allocation will always satisfy this. |
| 214 | */ | 214 | */ |
| 215 | u32 arm_dma_limit; | 215 | phys_addr_t arm_dma_limit; |
| 216 | 216 | ||
| 217 | static void __init arm_adjust_dma_zone(unsigned long *size, unsigned long *hole, | 217 | static void __init arm_adjust_dma_zone(unsigned long *size, unsigned long *hole, |
| 218 | unsigned long dma_size) | 218 | unsigned long dma_size) |
diff --git a/arch/arm/mm/mm.h b/arch/arm/mm/mm.h index 93dc0c17cdcb..c471436c7952 100644 --- a/arch/arm/mm/mm.h +++ b/arch/arm/mm/mm.h | |||
| @@ -62,7 +62,7 @@ extern void __flush_dcache_page(struct address_space *mapping, struct page *page | |||
| 62 | #endif | 62 | #endif |
| 63 | 63 | ||
| 64 | #ifdef CONFIG_ZONE_DMA | 64 | #ifdef CONFIG_ZONE_DMA |
| 65 | extern u32 arm_dma_limit; | 65 | extern phys_addr_t arm_dma_limit; |
| 66 | #else | 66 | #else |
| 67 | #define arm_dma_limit ((u32)~0) | 67 | #define arm_dma_limit ((u32)~0) |
| 68 | #endif | 68 | #endif |
diff --git a/arch/avr32/kernel/signal.c b/arch/avr32/kernel/signal.c index c140f9b41dce..d552a854dacc 100644 --- a/arch/avr32/kernel/signal.c +++ b/arch/avr32/kernel/signal.c | |||
| @@ -300,7 +300,7 @@ asmlinkage void do_notify_resume(struct pt_regs *regs, struct thread_info *ti) | |||
| 300 | if ((sysreg_read(SR) & MODE_MASK) == MODE_SUPERVISOR) | 300 | if ((sysreg_read(SR) & MODE_MASK) == MODE_SUPERVISOR) |
| 301 | syscall = 1; | 301 | syscall = 1; |
| 302 | 302 | ||
| 303 | if (ti->flags & _TIF_SIGPENDING)) | 303 | if (ti->flags & _TIF_SIGPENDING) |
| 304 | do_signal(regs, syscall); | 304 | do_signal(regs, syscall); |
| 305 | 305 | ||
| 306 | if (ti->flags & _TIF_NOTIFY_RESUME) { | 306 | if (ti->flags & _TIF_NOTIFY_RESUME) { |
diff --git a/arch/blackfin/kernel/process.c b/arch/blackfin/kernel/process.c index 2e3994b20169..62bcea7dcc6d 100644 --- a/arch/blackfin/kernel/process.c +++ b/arch/blackfin/kernel/process.c | |||
| @@ -173,7 +173,7 @@ asmlinkage int bfin_clone(struct pt_regs *regs) | |||
| 173 | unsigned long newsp; | 173 | unsigned long newsp; |
| 174 | 174 | ||
| 175 | #ifdef __ARCH_SYNC_CORE_DCACHE | 175 | #ifdef __ARCH_SYNC_CORE_DCACHE |
| 176 | if (current->rt.nr_cpus_allowed == num_possible_cpus()) | 176 | if (current->nr_cpus_allowed == num_possible_cpus()) |
| 177 | set_cpus_allowed_ptr(current, cpumask_of(smp_processor_id())); | 177 | set_cpus_allowed_ptr(current, cpumask_of(smp_processor_id())); |
| 178 | #endif | 178 | #endif |
| 179 | 179 | ||
diff --git a/arch/m68k/Kconfig b/arch/m68k/Kconfig index cac5b6be572a..147120128260 100644 --- a/arch/m68k/Kconfig +++ b/arch/m68k/Kconfig | |||
| @@ -7,6 +7,8 @@ config M68K | |||
| 7 | select GENERIC_IRQ_SHOW | 7 | select GENERIC_IRQ_SHOW |
| 8 | select ARCH_HAVE_NMI_SAFE_CMPXCHG if RMW_INSNS | 8 | select ARCH_HAVE_NMI_SAFE_CMPXCHG if RMW_INSNS |
| 9 | select GENERIC_CPU_DEVICES | 9 | select GENERIC_CPU_DEVICES |
| 10 | select GENERIC_STRNCPY_FROM_USER if MMU | ||
| 11 | select GENERIC_STRNLEN_USER if MMU | ||
| 10 | select FPU if MMU | 12 | select FPU if MMU |
| 11 | select ARCH_USES_GETTIMEOFFSET if MMU && !COLDFIRE | 13 | select ARCH_USES_GETTIMEOFFSET if MMU && !COLDFIRE |
| 12 | 14 | ||
diff --git a/arch/m68k/include/asm/Kbuild b/arch/m68k/include/asm/Kbuild index 1a922fad76f7..eafa2539a8ee 100644 --- a/arch/m68k/include/asm/Kbuild +++ b/arch/m68k/include/asm/Kbuild | |||
| @@ -1,2 +1,4 @@ | |||
| 1 | include include/asm-generic/Kbuild.asm | 1 | include include/asm-generic/Kbuild.asm |
| 2 | header-y += cachectl.h | 2 | header-y += cachectl.h |
| 3 | |||
| 4 | generic-y += word-at-a-time.h | ||
diff --git a/arch/m68k/include/asm/m528xsim.h b/arch/m68k/include/asm/m528xsim.h index d63b99ff7ff7..497c31c803ff 100644 --- a/arch/m68k/include/asm/m528xsim.h +++ b/arch/m68k/include/asm/m528xsim.h | |||
| @@ -86,7 +86,7 @@ | |||
| 86 | /* | 86 | /* |
| 87 | * QSPI module. | 87 | * QSPI module. |
| 88 | */ | 88 | */ |
| 89 | #define MCFQSPI_IOBASE (MCF_IPSBAR + 0x340) | 89 | #define MCFQSPI_BASE (MCF_IPSBAR + 0x340) |
| 90 | #define MCFQSPI_SIZE 0x40 | 90 | #define MCFQSPI_SIZE 0x40 |
| 91 | 91 | ||
| 92 | #define MCFQSPI_CS0 147 | 92 | #define MCFQSPI_CS0 147 |
diff --git a/arch/m68k/include/asm/uaccess_mm.h b/arch/m68k/include/asm/uaccess_mm.h index 9c80cd515b20..472c891a4aee 100644 --- a/arch/m68k/include/asm/uaccess_mm.h +++ b/arch/m68k/include/asm/uaccess_mm.h | |||
| @@ -379,12 +379,15 @@ __constant_copy_to_user(void __user *to, const void *from, unsigned long n) | |||
| 379 | #define copy_from_user(to, from, n) __copy_from_user(to, from, n) | 379 | #define copy_from_user(to, from, n) __copy_from_user(to, from, n) |
| 380 | #define copy_to_user(to, from, n) __copy_to_user(to, from, n) | 380 | #define copy_to_user(to, from, n) __copy_to_user(to, from, n) |
| 381 | 381 | ||
| 382 | long strncpy_from_user(char *dst, const char __user *src, long count); | 382 | #define user_addr_max() \ |
| 383 | long strnlen_user(const char __user *src, long n); | 383 | (segment_eq(get_fs(), USER_DS) ? TASK_SIZE : ~0UL) |
| 384 | |||
| 385 | extern long strncpy_from_user(char *dst, const char __user *src, long count); | ||
| 386 | extern __must_check long strlen_user(const char __user *str); | ||
| 387 | extern __must_check long strnlen_user(const char __user *str, long n); | ||
| 388 | |||
| 384 | unsigned long __clear_user(void __user *to, unsigned long n); | 389 | unsigned long __clear_user(void __user *to, unsigned long n); |
| 385 | 390 | ||
| 386 | #define clear_user __clear_user | 391 | #define clear_user __clear_user |
| 387 | 392 | ||
| 388 | #define strlen_user(str) strnlen_user(str, 32767) | ||
| 389 | |||
| 390 | #endif /* _M68K_UACCESS_H */ | 393 | #endif /* _M68K_UACCESS_H */ |
diff --git a/arch/m68k/kernel/ptrace.c b/arch/m68k/kernel/ptrace.c index 8b4a2222e658..1bc10e62b9af 100644 --- a/arch/m68k/kernel/ptrace.c +++ b/arch/m68k/kernel/ptrace.c | |||
| @@ -286,7 +286,7 @@ asmlinkage void syscall_trace(void) | |||
| 286 | } | 286 | } |
| 287 | } | 287 | } |
| 288 | 288 | ||
| 289 | #ifdef CONFIG_COLDFIRE | 289 | #if defined(CONFIG_COLDFIRE) || !defined(CONFIG_MMU) |
| 290 | asmlinkage int syscall_trace_enter(void) | 290 | asmlinkage int syscall_trace_enter(void) |
| 291 | { | 291 | { |
| 292 | int ret = 0; | 292 | int ret = 0; |
diff --git a/arch/m68k/kernel/time.c b/arch/m68k/kernel/time.c index d7deb7fc7eb5..707f0573ec6b 100644 --- a/arch/m68k/kernel/time.c +++ b/arch/m68k/kernel/time.c | |||
| @@ -85,7 +85,7 @@ void __init time_init(void) | |||
| 85 | mach_sched_init(timer_interrupt); | 85 | mach_sched_init(timer_interrupt); |
| 86 | } | 86 | } |
| 87 | 87 | ||
| 88 | #ifdef CONFIG_M68KCLASSIC | 88 | #ifdef CONFIG_ARCH_USES_GETTIMEOFFSET |
| 89 | 89 | ||
| 90 | u32 arch_gettimeoffset(void) | 90 | u32 arch_gettimeoffset(void) |
| 91 | { | 91 | { |
| @@ -108,4 +108,4 @@ static int __init rtc_init(void) | |||
| 108 | 108 | ||
| 109 | module_init(rtc_init); | 109 | module_init(rtc_init); |
| 110 | 110 | ||
| 111 | #endif /* CONFIG_M68KCLASSIC */ | 111 | #endif /* CONFIG_ARCH_USES_GETTIMEOFFSET */ |
diff --git a/arch/m68k/lib/uaccess.c b/arch/m68k/lib/uaccess.c index 5664386338da..5e97f2ee7c11 100644 --- a/arch/m68k/lib/uaccess.c +++ b/arch/m68k/lib/uaccess.c | |||
| @@ -104,80 +104,6 @@ unsigned long __generic_copy_to_user(void __user *to, const void *from, | |||
| 104 | EXPORT_SYMBOL(__generic_copy_to_user); | 104 | EXPORT_SYMBOL(__generic_copy_to_user); |
| 105 | 105 | ||
| 106 | /* | 106 | /* |
| 107 | * Copy a null terminated string from userspace. | ||
| 108 | */ | ||
| 109 | long strncpy_from_user(char *dst, const char __user *src, long count) | ||
| 110 | { | ||
| 111 | long res; | ||
| 112 | char c; | ||
| 113 | |||
| 114 | if (count <= 0) | ||
| 115 | return count; | ||
| 116 | |||
| 117 | asm volatile ("\n" | ||
| 118 | "1: "MOVES".b (%2)+,%4\n" | ||
| 119 | " move.b %4,(%1)+\n" | ||
| 120 | " jeq 2f\n" | ||
| 121 | " subq.l #1,%3\n" | ||
| 122 | " jne 1b\n" | ||
| 123 | "2: sub.l %3,%0\n" | ||
| 124 | "3:\n" | ||
| 125 | " .section .fixup,\"ax\"\n" | ||
| 126 | " .even\n" | ||
| 127 | "10: move.l %5,%0\n" | ||
| 128 | " jra 3b\n" | ||
| 129 | " .previous\n" | ||
| 130 | "\n" | ||
| 131 | " .section __ex_table,\"a\"\n" | ||
| 132 | " .align 4\n" | ||
| 133 | " .long 1b,10b\n" | ||
| 134 | " .previous" | ||
| 135 | : "=d" (res), "+a" (dst), "+a" (src), "+r" (count), "=&d" (c) | ||
| 136 | : "i" (-EFAULT), "0" (count)); | ||
| 137 | |||
| 138 | return res; | ||
| 139 | } | ||
| 140 | EXPORT_SYMBOL(strncpy_from_user); | ||
| 141 | |||
| 142 | /* | ||
| 143 | * Return the size of a string (including the ending 0) | ||
| 144 | * | ||
| 145 | * Return 0 on exception, a value greater than N if too long | ||
| 146 | */ | ||
| 147 | long strnlen_user(const char __user *src, long n) | ||
| 148 | { | ||
| 149 | char c; | ||
| 150 | long res; | ||
| 151 | |||
| 152 | asm volatile ("\n" | ||
| 153 | "1: subq.l #1,%1\n" | ||
| 154 | " jmi 3f\n" | ||
| 155 | "2: "MOVES".b (%0)+,%2\n" | ||
| 156 | " tst.b %2\n" | ||
| 157 | " jne 1b\n" | ||
| 158 | " jra 4f\n" | ||
| 159 | "\n" | ||
| 160 | "3: addq.l #1,%0\n" | ||
| 161 | "4: sub.l %4,%0\n" | ||
| 162 | "5:\n" | ||
| 163 | " .section .fixup,\"ax\"\n" | ||
| 164 | " .even\n" | ||
| 165 | "20: sub.l %0,%0\n" | ||
| 166 | " jra 5b\n" | ||
| 167 | " .previous\n" | ||
| 168 | "\n" | ||
| 169 | " .section __ex_table,\"a\"\n" | ||
| 170 | " .align 4\n" | ||
| 171 | " .long 2b,20b\n" | ||
| 172 | " .previous\n" | ||
| 173 | : "=&a" (res), "+d" (n), "=&d" (c) | ||
| 174 | : "0" (src), "r" (src)); | ||
| 175 | |||
| 176 | return res; | ||
| 177 | } | ||
| 178 | EXPORT_SYMBOL(strnlen_user); | ||
| 179 | |||
| 180 | /* | ||
| 181 | * Zero Userspace | 107 | * Zero Userspace |
| 182 | */ | 108 | */ |
| 183 | 109 | ||
diff --git a/arch/m68k/platform/68328/timers.c b/arch/m68k/platform/68328/timers.c index c801c172b822..f4dc9b295609 100644 --- a/arch/m68k/platform/68328/timers.c +++ b/arch/m68k/platform/68328/timers.c | |||
| @@ -53,6 +53,7 @@ | |||
| 53 | #endif | 53 | #endif |
| 54 | 54 | ||
| 55 | static u32 m68328_tick_cnt; | 55 | static u32 m68328_tick_cnt; |
| 56 | static irq_handler_t timer_interrupt; | ||
| 56 | 57 | ||
| 57 | /***************************************************************************/ | 58 | /***************************************************************************/ |
| 58 | 59 | ||
| @@ -62,7 +63,7 @@ static irqreturn_t hw_tick(int irq, void *dummy) | |||
| 62 | TSTAT &= 0; | 63 | TSTAT &= 0; |
| 63 | 64 | ||
| 64 | m68328_tick_cnt += TICKS_PER_JIFFY; | 65 | m68328_tick_cnt += TICKS_PER_JIFFY; |
| 65 | return arch_timer_interrupt(irq, dummy); | 66 | return timer_interrupt(irq, dummy); |
| 66 | } | 67 | } |
| 67 | 68 | ||
| 68 | /***************************************************************************/ | 69 | /***************************************************************************/ |
| @@ -99,7 +100,7 @@ static struct clocksource m68328_clk = { | |||
| 99 | 100 | ||
| 100 | /***************************************************************************/ | 101 | /***************************************************************************/ |
| 101 | 102 | ||
| 102 | void hw_timer_init(void) | 103 | void hw_timer_init(irq_handler_t handler) |
| 103 | { | 104 | { |
| 104 | /* disable timer 1 */ | 105 | /* disable timer 1 */ |
| 105 | TCTL = 0; | 106 | TCTL = 0; |
| @@ -115,6 +116,7 @@ void hw_timer_init(void) | |||
| 115 | /* Enable timer 1 */ | 116 | /* Enable timer 1 */ |
| 116 | TCTL |= TCTL_TEN; | 117 | TCTL |= TCTL_TEN; |
| 117 | clocksource_register_hz(&m68328_clk, TICKS_PER_JIFFY*HZ); | 118 | clocksource_register_hz(&m68328_clk, TICKS_PER_JIFFY*HZ); |
| 119 | timer_interrupt = handler; | ||
| 118 | } | 120 | } |
| 119 | 121 | ||
| 120 | /***************************************************************************/ | 122 | /***************************************************************************/ |
diff --git a/arch/m68k/platform/68360/config.c b/arch/m68k/platform/68360/config.c index 255fc03913e9..9877cefad1e7 100644 --- a/arch/m68k/platform/68360/config.c +++ b/arch/m68k/platform/68360/config.c | |||
| @@ -35,6 +35,7 @@ extern void m360_cpm_reset(void); | |||
| 35 | #define OSCILLATOR (unsigned long int)33000000 | 35 | #define OSCILLATOR (unsigned long int)33000000 |
| 36 | #endif | 36 | #endif |
| 37 | 37 | ||
| 38 | static irq_handler_t timer_interrupt; | ||
| 38 | unsigned long int system_clock; | 39 | unsigned long int system_clock; |
| 39 | 40 | ||
| 40 | extern QUICC *pquicc; | 41 | extern QUICC *pquicc; |
| @@ -52,7 +53,7 @@ static irqreturn_t hw_tick(int irq, void *dummy) | |||
| 52 | 53 | ||
| 53 | pquicc->timer_ter1 = 0x0002; /* clear timer event */ | 54 | pquicc->timer_ter1 = 0x0002; /* clear timer event */ |
| 54 | 55 | ||
| 55 | return arch_timer_interrupt(irq, dummy); | 56 | return timer_interrupt(irq, dummy); |
| 56 | } | 57 | } |
| 57 | 58 | ||
| 58 | static struct irqaction m68360_timer_irq = { | 59 | static struct irqaction m68360_timer_irq = { |
| @@ -61,7 +62,7 @@ static struct irqaction m68360_timer_irq = { | |||
| 61 | .handler = hw_tick, | 62 | .handler = hw_tick, |
| 62 | }; | 63 | }; |
| 63 | 64 | ||
| 64 | void hw_timer_init(void) | 65 | void hw_timer_init(irq_handler_t handler) |
| 65 | { | 66 | { |
| 66 | unsigned char prescaler; | 67 | unsigned char prescaler; |
| 67 | unsigned short tgcr_save; | 68 | unsigned short tgcr_save; |
| @@ -94,6 +95,8 @@ void hw_timer_init(void) | |||
| 94 | 95 | ||
| 95 | pquicc->timer_ter1 = 0x0003; /* clear timer events */ | 96 | pquicc->timer_ter1 = 0x0003; /* clear timer events */ |
| 96 | 97 | ||
| 98 | timer_interrupt = handler; | ||
| 99 | |||
| 97 | /* enable timer 1 interrupt in CIMR */ | 100 | /* enable timer 1 interrupt in CIMR */ |
| 98 | setup_irq(CPMVEC_TIMER1, &m68360_timer_irq); | 101 | setup_irq(CPMVEC_TIMER1, &m68360_timer_irq); |
| 99 | 102 | ||
diff --git a/arch/parisc/Makefile b/arch/parisc/Makefile index dbc3850b1d0d..5707f1a62341 100644 --- a/arch/parisc/Makefile +++ b/arch/parisc/Makefile | |||
| @@ -21,6 +21,7 @@ KBUILD_DEFCONFIG := default_defconfig | |||
| 21 | 21 | ||
| 22 | NM = sh $(srctree)/arch/parisc/nm | 22 | NM = sh $(srctree)/arch/parisc/nm |
| 23 | CHECKFLAGS += -D__hppa__=1 | 23 | CHECKFLAGS += -D__hppa__=1 |
| 24 | LIBGCC = $(shell $(CC) $(KBUILD_CFLAGS) -print-libgcc-file-name) | ||
| 24 | 25 | ||
| 25 | MACHINE := $(shell uname -m) | 26 | MACHINE := $(shell uname -m) |
| 26 | ifeq ($(MACHINE),parisc*) | 27 | ifeq ($(MACHINE),parisc*) |
| @@ -79,7 +80,7 @@ kernel-y := mm/ kernel/ math-emu/ | |||
| 79 | kernel-$(CONFIG_HPUX) += hpux/ | 80 | kernel-$(CONFIG_HPUX) += hpux/ |
| 80 | 81 | ||
| 81 | core-y += $(addprefix arch/parisc/, $(kernel-y)) | 82 | core-y += $(addprefix arch/parisc/, $(kernel-y)) |
| 82 | libs-y += arch/parisc/lib/ `$(CC) -print-libgcc-file-name` | 83 | libs-y += arch/parisc/lib/ $(LIBGCC) |
| 83 | 84 | ||
| 84 | drivers-$(CONFIG_OPROFILE) += arch/parisc/oprofile/ | 85 | drivers-$(CONFIG_OPROFILE) += arch/parisc/oprofile/ |
| 85 | 86 | ||
diff --git a/arch/parisc/include/asm/Kbuild b/arch/parisc/include/asm/Kbuild index 19a434f55059..4383707d9801 100644 --- a/arch/parisc/include/asm/Kbuild +++ b/arch/parisc/include/asm/Kbuild | |||
| @@ -1,3 +1,4 @@ | |||
| 1 | include include/asm-generic/Kbuild.asm | 1 | include include/asm-generic/Kbuild.asm |
| 2 | 2 | ||
| 3 | header-y += pdc.h | 3 | header-y += pdc.h |
| 4 | generic-y += word-at-a-time.h | ||
diff --git a/arch/parisc/include/asm/bug.h b/arch/parisc/include/asm/bug.h index 72cfdb0cfdd1..62a33338549c 100644 --- a/arch/parisc/include/asm/bug.h +++ b/arch/parisc/include/asm/bug.h | |||
| @@ -1,6 +1,8 @@ | |||
| 1 | #ifndef _PARISC_BUG_H | 1 | #ifndef _PARISC_BUG_H |
| 2 | #define _PARISC_BUG_H | 2 | #define _PARISC_BUG_H |
| 3 | 3 | ||
| 4 | #include <linux/kernel.h> /* for BUGFLAG_TAINT */ | ||
| 5 | |||
| 4 | /* | 6 | /* |
| 5 | * Tell the user there is some problem. | 7 | * Tell the user there is some problem. |
| 6 | * The offending file and line are encoded in the __bug_table section. | 8 | * The offending file and line are encoded in the __bug_table section. |
diff --git a/arch/powerpc/include/asm/hw_irq.h b/arch/powerpc/include/asm/hw_irq.h index c9aac24b02e2..32b394f3b854 100644 --- a/arch/powerpc/include/asm/hw_irq.h +++ b/arch/powerpc/include/asm/hw_irq.h | |||
| @@ -100,6 +100,9 @@ static inline void hard_irq_disable(void) | |||
| 100 | get_paca()->irq_happened |= PACA_IRQ_HARD_DIS; | 100 | get_paca()->irq_happened |= PACA_IRQ_HARD_DIS; |
| 101 | } | 101 | } |
| 102 | 102 | ||
| 103 | /* include/linux/interrupt.h needs hard_irq_disable to be a macro */ | ||
| 104 | #define hard_irq_disable hard_irq_disable | ||
| 105 | |||
| 103 | /* | 106 | /* |
| 104 | * This is called by asynchronous interrupts to conditionally | 107 | * This is called by asynchronous interrupts to conditionally |
| 105 | * re-enable hard interrupts when soft-disabled after having | 108 | * re-enable hard interrupts when soft-disabled after having |
diff --git a/arch/powerpc/kernel/module_32.c b/arch/powerpc/kernel/module_32.c index 0b6d79617d7b..2e3200ca485f 100644 --- a/arch/powerpc/kernel/module_32.c +++ b/arch/powerpc/kernel/module_32.c | |||
| @@ -176,8 +176,8 @@ int module_frob_arch_sections(Elf32_Ehdr *hdr, | |||
| 176 | 176 | ||
| 177 | static inline int entry_matches(struct ppc_plt_entry *entry, Elf32_Addr val) | 177 | static inline int entry_matches(struct ppc_plt_entry *entry, Elf32_Addr val) |
| 178 | { | 178 | { |
| 179 | if (entry->jump[0] == 0x3d600000 + ((val + 0x8000) >> 16) | 179 | if (entry->jump[0] == 0x3d800000 + ((val + 0x8000) >> 16) |
| 180 | && entry->jump[1] == 0x396b0000 + (val & 0xffff)) | 180 | && entry->jump[1] == 0x398c0000 + (val & 0xffff)) |
| 181 | return 1; | 181 | return 1; |
| 182 | return 0; | 182 | return 0; |
| 183 | } | 183 | } |
| @@ -204,10 +204,9 @@ static uint32_t do_plt_call(void *location, | |||
| 204 | entry++; | 204 | entry++; |
| 205 | } | 205 | } |
| 206 | 206 | ||
| 207 | /* Stolen from Paul Mackerras as well... */ | 207 | entry->jump[0] = 0x3d800000+((val+0x8000)>>16); /* lis r12,sym@ha */ |
| 208 | entry->jump[0] = 0x3d600000+((val+0x8000)>>16); /* lis r11,sym@ha */ | 208 | entry->jump[1] = 0x398c0000 + (val&0xffff); /* addi r12,r12,sym@l*/ |
| 209 | entry->jump[1] = 0x396b0000 + (val&0xffff); /* addi r11,r11,sym@l*/ | 209 | entry->jump[2] = 0x7d8903a6; /* mtctr r12 */ |
| 210 | entry->jump[2] = 0x7d6903a6; /* mtctr r11 */ | ||
| 211 | entry->jump[3] = 0x4e800420; /* bctr */ | 210 | entry->jump[3] = 0x4e800420; /* bctr */ |
| 212 | 211 | ||
| 213 | DEBUGP("Initialized plt for 0x%x at %p\n", val, entry); | 212 | DEBUGP("Initialized plt for 0x%x at %p\n", val, entry); |
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c index 99a995c2a3f2..be171ee73bf8 100644 --- a/arch/powerpc/kernel/time.c +++ b/arch/powerpc/kernel/time.c | |||
| @@ -475,6 +475,7 @@ void timer_interrupt(struct pt_regs * regs) | |||
| 475 | struct pt_regs *old_regs; | 475 | struct pt_regs *old_regs; |
| 476 | u64 *next_tb = &__get_cpu_var(decrementers_next_tb); | 476 | u64 *next_tb = &__get_cpu_var(decrementers_next_tb); |
| 477 | struct clock_event_device *evt = &__get_cpu_var(decrementers); | 477 | struct clock_event_device *evt = &__get_cpu_var(decrementers); |
| 478 | u64 now; | ||
| 478 | 479 | ||
| 479 | /* Ensure a positive value is written to the decrementer, or else | 480 | /* Ensure a positive value is written to the decrementer, or else |
| 480 | * some CPUs will continue to take decrementer exceptions. | 481 | * some CPUs will continue to take decrementer exceptions. |
| @@ -509,9 +510,16 @@ void timer_interrupt(struct pt_regs * regs) | |||
| 509 | irq_work_run(); | 510 | irq_work_run(); |
| 510 | } | 511 | } |
| 511 | 512 | ||
| 512 | *next_tb = ~(u64)0; | 513 | now = get_tb_or_rtc(); |
| 513 | if (evt->event_handler) | 514 | if (now >= *next_tb) { |
| 514 | evt->event_handler(evt); | 515 | *next_tb = ~(u64)0; |
| 516 | if (evt->event_handler) | ||
| 517 | evt->event_handler(evt); | ||
| 518 | } else { | ||
| 519 | now = *next_tb - now; | ||
| 520 | if (now <= DECREMENTER_MAX) | ||
| 521 | set_dec((int)now); | ||
| 522 | } | ||
| 515 | 523 | ||
| 516 | #ifdef CONFIG_PPC64 | 524 | #ifdef CONFIG_PPC64 |
| 517 | /* collect purr register values often, for accurate calculations */ | 525 | /* collect purr register values often, for accurate calculations */ |
diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig index 99bcd0ee838d..31d9db7913e4 100644 --- a/arch/sh/Kconfig +++ b/arch/sh/Kconfig | |||
| @@ -32,6 +32,8 @@ config SUPERH | |||
| 32 | select GENERIC_SMP_IDLE_THREAD | 32 | select GENERIC_SMP_IDLE_THREAD |
| 33 | select GENERIC_CLOCKEVENTS | 33 | select GENERIC_CLOCKEVENTS |
| 34 | select GENERIC_CMOS_UPDATE if SH_SH03 || SH_DREAMCAST | 34 | select GENERIC_CMOS_UPDATE if SH_SH03 || SH_DREAMCAST |
| 35 | select GENERIC_STRNCPY_FROM_USER | ||
| 36 | select GENERIC_STRNLEN_USER | ||
| 35 | help | 37 | help |
| 36 | The SuperH is a RISC processor targeted for use in embedded systems | 38 | The SuperH is a RISC processor targeted for use in embedded systems |
| 37 | and consumer electronics; it was also used in the Sega Dreamcast | 39 | and consumer electronics; it was also used in the Sega Dreamcast |
diff --git a/arch/sh/Makefile b/arch/sh/Makefile index 46edf070da1c..aed701c7b11b 100644 --- a/arch/sh/Makefile +++ b/arch/sh/Makefile | |||
| @@ -9,6 +9,12 @@ | |||
| 9 | # License. See the file "COPYING" in the main directory of this archive | 9 | # License. See the file "COPYING" in the main directory of this archive |
| 10 | # for more details. | 10 | # for more details. |
| 11 | # | 11 | # |
| 12 | ifneq ($(SUBARCH),$(ARCH)) | ||
| 13 | ifeq ($(CROSS_COMPILE),) | ||
| 14 | CROSS_COMPILE := $(call cc-cross-prefix, $(UTS_MACHINE)-linux- $(UTS_MACHINE)-linux-gnu- $(UTS_MACHINE)-unknown-linux-gnu-) | ||
| 15 | endif | ||
| 16 | endif | ||
| 17 | |||
| 12 | isa-y := any | 18 | isa-y := any |
| 13 | isa-$(CONFIG_SH_DSP) := sh | 19 | isa-$(CONFIG_SH_DSP) := sh |
| 14 | isa-$(CONFIG_CPU_SH2) := sh2 | 20 | isa-$(CONFIG_CPU_SH2) := sh2 |
| @@ -106,19 +112,13 @@ LDFLAGS_vmlinux += --defsym phys_stext=_stext-$(CONFIG_PAGE_OFFSET) \ | |||
| 106 | KBUILD_DEFCONFIG := cayman_defconfig | 112 | KBUILD_DEFCONFIG := cayman_defconfig |
| 107 | endif | 113 | endif |
| 108 | 114 | ||
| 109 | ifneq ($(SUBARCH),$(ARCH)) | ||
| 110 | ifeq ($(CROSS_COMPILE),) | ||
| 111 | CROSS_COMPILE := $(call cc-cross-prefix, $(UTS_MACHINE)-linux- $(UTS_MACHINE)-linux-gnu- $(UTS_MACHINE)-unknown-linux-gnu-) | ||
| 112 | endif | ||
| 113 | endif | ||
| 114 | |||
| 115 | ifdef CONFIG_CPU_LITTLE_ENDIAN | 115 | ifdef CONFIG_CPU_LITTLE_ENDIAN |
| 116 | ld-bfd := elf32-$(UTS_MACHINE)-linux | 116 | ld-bfd := elf32-$(UTS_MACHINE)-linux |
| 117 | LDFLAGS_vmlinux += --defsym 'jiffies=jiffies_64' --oformat $(ld-bfd) | 117 | LDFLAGS_vmlinux += --defsym jiffies=jiffies_64 --oformat $(ld-bfd) |
| 118 | LDFLAGS += -EL | 118 | LDFLAGS += -EL |
| 119 | else | 119 | else |
| 120 | ld-bfd := elf32-$(UTS_MACHINE)big-linux | 120 | ld-bfd := elf32-$(UTS_MACHINE)big-linux |
| 121 | LDFLAGS_vmlinux += --defsym 'jiffies=jiffies_64+4' --oformat $(ld-bfd) | 121 | LDFLAGS_vmlinux += --defsym jiffies=jiffies_64+4 --oformat $(ld-bfd) |
| 122 | LDFLAGS += -EB | 122 | LDFLAGS += -EB |
| 123 | endif | 123 | endif |
| 124 | 124 | ||
diff --git a/arch/sh/include/asm/Kbuild b/arch/sh/include/asm/Kbuild index 7beb42322f60..7b673ddcd555 100644 --- a/arch/sh/include/asm/Kbuild +++ b/arch/sh/include/asm/Kbuild | |||
| @@ -1,5 +1,39 @@ | |||
| 1 | include include/asm-generic/Kbuild.asm | 1 | include include/asm-generic/Kbuild.asm |
| 2 | 2 | ||
| 3 | generic-y += bitsperlong.h | ||
| 4 | generic-y += cputime.h | ||
| 5 | generic-y += current.h | ||
| 6 | generic-y += delay.h | ||
| 7 | generic-y += div64.h | ||
| 8 | generic-y += emergency-restart.h | ||
| 9 | generic-y += errno.h | ||
| 10 | generic-y += fcntl.h | ||
| 11 | generic-y += ioctl.h | ||
| 12 | generic-y += ipcbuf.h | ||
| 13 | generic-y += irq_regs.h | ||
| 14 | generic-y += kvm_para.h | ||
| 15 | generic-y += local.h | ||
| 16 | generic-y += local64.h | ||
| 17 | generic-y += param.h | ||
| 18 | generic-y += parport.h | ||
| 19 | generic-y += percpu.h | ||
| 20 | generic-y += poll.h | ||
| 21 | generic-y += mman.h | ||
| 22 | generic-y += msgbuf.h | ||
| 23 | generic-y += resource.h | ||
| 24 | generic-y += scatterlist.h | ||
| 25 | generic-y += sembuf.h | ||
| 26 | generic-y += serial.h | ||
| 27 | generic-y += shmbuf.h | ||
| 28 | generic-y += siginfo.h | ||
| 29 | generic-y += sizes.h | ||
| 30 | generic-y += socket.h | ||
| 31 | generic-y += statfs.h | ||
| 32 | generic-y += termbits.h | ||
| 33 | generic-y += termios.h | ||
| 34 | generic-y += ucontext.h | ||
| 35 | generic-y += xor.h | ||
| 36 | |||
| 3 | header-y += cachectl.h | 37 | header-y += cachectl.h |
| 4 | header-y += cpu-features.h | 38 | header-y += cpu-features.h |
| 5 | header-y += hw_breakpoint.h | 39 | header-y += hw_breakpoint.h |
diff --git a/arch/sh/include/asm/bitsperlong.h b/arch/sh/include/asm/bitsperlong.h deleted file mode 100644 index 6dc0bb0c13b2..000000000000 --- a/arch/sh/include/asm/bitsperlong.h +++ /dev/null | |||
| @@ -1 +0,0 @@ | |||
| 1 | #include <asm-generic/bitsperlong.h> | ||
diff --git a/arch/sh/include/asm/cputime.h b/arch/sh/include/asm/cputime.h deleted file mode 100644 index 6ca395d1393e..000000000000 --- a/arch/sh/include/asm/cputime.h +++ /dev/null | |||
| @@ -1,6 +0,0 @@ | |||
| 1 | #ifndef __SH_CPUTIME_H | ||
| 2 | #define __SH_CPUTIME_H | ||
| 3 | |||
| 4 | #include <asm-generic/cputime.h> | ||
| 5 | |||
| 6 | #endif /* __SH_CPUTIME_H */ | ||
diff --git a/arch/sh/include/asm/current.h b/arch/sh/include/asm/current.h deleted file mode 100644 index 4c51401b5537..000000000000 --- a/arch/sh/include/asm/current.h +++ /dev/null | |||
| @@ -1 +0,0 @@ | |||
| 1 | #include <asm-generic/current.h> | ||
diff --git a/arch/sh/include/asm/delay.h b/arch/sh/include/asm/delay.h deleted file mode 100644 index 9670e127b7b2..000000000000 --- a/arch/sh/include/asm/delay.h +++ /dev/null | |||
| @@ -1 +0,0 @@ | |||
| 1 | #include <asm-generic/delay.h> | ||
diff --git a/arch/sh/include/asm/div64.h b/arch/sh/include/asm/div64.h deleted file mode 100644 index 6cd978cefb28..000000000000 --- a/arch/sh/include/asm/div64.h +++ /dev/null | |||
| @@ -1 +0,0 @@ | |||
| 1 | #include <asm-generic/div64.h> | ||
diff --git a/arch/sh/include/asm/emergency-restart.h b/arch/sh/include/asm/emergency-restart.h deleted file mode 100644 index 108d8c48e42e..000000000000 --- a/arch/sh/include/asm/emergency-restart.h +++ /dev/null | |||
| @@ -1,6 +0,0 @@ | |||
| 1 | #ifndef _ASM_EMERGENCY_RESTART_H | ||
| 2 | #define _ASM_EMERGENCY_RESTART_H | ||
| 3 | |||
| 4 | #include <asm-generic/emergency-restart.h> | ||
| 5 | |||
| 6 | #endif /* _ASM_EMERGENCY_RESTART_H */ | ||
diff --git a/arch/sh/include/asm/errno.h b/arch/sh/include/asm/errno.h deleted file mode 100644 index 51cf6f9cebb8..000000000000 --- a/arch/sh/include/asm/errno.h +++ /dev/null | |||
| @@ -1,6 +0,0 @@ | |||
| 1 | #ifndef __ASM_SH_ERRNO_H | ||
| 2 | #define __ASM_SH_ERRNO_H | ||
| 3 | |||
| 4 | #include <asm-generic/errno.h> | ||
| 5 | |||
| 6 | #endif /* __ASM_SH_ERRNO_H */ | ||
diff --git a/arch/sh/include/asm/fcntl.h b/arch/sh/include/asm/fcntl.h deleted file mode 100644 index 46ab12db5739..000000000000 --- a/arch/sh/include/asm/fcntl.h +++ /dev/null | |||
| @@ -1 +0,0 @@ | |||
| 1 | #include <asm-generic/fcntl.h> | ||
diff --git a/arch/sh/include/asm/ioctl.h b/arch/sh/include/asm/ioctl.h deleted file mode 100644 index b279fe06dfe5..000000000000 --- a/arch/sh/include/asm/ioctl.h +++ /dev/null | |||
| @@ -1 +0,0 @@ | |||
| 1 | #include <asm-generic/ioctl.h> | ||
diff --git a/arch/sh/include/asm/ipcbuf.h b/arch/sh/include/asm/ipcbuf.h deleted file mode 100644 index 84c7e51cb6d0..000000000000 --- a/arch/sh/include/asm/ipcbuf.h +++ /dev/null | |||
| @@ -1 +0,0 @@ | |||
| 1 | #include <asm-generic/ipcbuf.h> | ||
diff --git a/arch/sh/include/asm/irq_regs.h b/arch/sh/include/asm/irq_regs.h deleted file mode 100644 index 3dd9c0b70270..000000000000 --- a/arch/sh/include/asm/irq_regs.h +++ /dev/null | |||
| @@ -1 +0,0 @@ | |||
| 1 | #include <asm-generic/irq_regs.h> | ||
diff --git a/arch/sh/include/asm/kvm_para.h b/arch/sh/include/asm/kvm_para.h deleted file mode 100644 index 14fab8f0b957..000000000000 --- a/arch/sh/include/asm/kvm_para.h +++ /dev/null | |||
| @@ -1 +0,0 @@ | |||
| 1 | #include <asm-generic/kvm_para.h> | ||
diff --git a/arch/sh/include/asm/local.h b/arch/sh/include/asm/local.h deleted file mode 100644 index 9ed9b9cb459a..000000000000 --- a/arch/sh/include/asm/local.h +++ /dev/null | |||
| @@ -1,7 +0,0 @@ | |||
| 1 | #ifndef __ASM_SH_LOCAL_H | ||
| 2 | #define __ASM_SH_LOCAL_H | ||
| 3 | |||
| 4 | #include <asm-generic/local.h> | ||
| 5 | |||
| 6 | #endif /* __ASM_SH_LOCAL_H */ | ||
| 7 | |||
diff --git a/arch/sh/include/asm/local64.h b/arch/sh/include/asm/local64.h deleted file mode 100644 index 36c93b5cc239..000000000000 --- a/arch/sh/include/asm/local64.h +++ /dev/null | |||
| @@ -1 +0,0 @@ | |||
| 1 | #include <asm-generic/local64.h> | ||
diff --git a/arch/sh/include/asm/mman.h b/arch/sh/include/asm/mman.h deleted file mode 100644 index 8eebf89f5ab1..000000000000 --- a/arch/sh/include/asm/mman.h +++ /dev/null | |||
| @@ -1 +0,0 @@ | |||
| 1 | #include <asm-generic/mman.h> | ||
diff --git a/arch/sh/include/asm/msgbuf.h b/arch/sh/include/asm/msgbuf.h deleted file mode 100644 index 809134c644a6..000000000000 --- a/arch/sh/include/asm/msgbuf.h +++ /dev/null | |||
| @@ -1 +0,0 @@ | |||
| 1 | #include <asm-generic/msgbuf.h> | ||
diff --git a/arch/sh/include/asm/param.h b/arch/sh/include/asm/param.h deleted file mode 100644 index 965d45427975..000000000000 --- a/arch/sh/include/asm/param.h +++ /dev/null | |||
| @@ -1 +0,0 @@ | |||
| 1 | #include <asm-generic/param.h> | ||
diff --git a/arch/sh/include/asm/parport.h b/arch/sh/include/asm/parport.h deleted file mode 100644 index cf252af64590..000000000000 --- a/arch/sh/include/asm/parport.h +++ /dev/null | |||
| @@ -1 +0,0 @@ | |||
| 1 | #include <asm-generic/parport.h> | ||
diff --git a/arch/sh/include/asm/percpu.h b/arch/sh/include/asm/percpu.h deleted file mode 100644 index 4db4b39a4399..000000000000 --- a/arch/sh/include/asm/percpu.h +++ /dev/null | |||
| @@ -1,6 +0,0 @@ | |||
| 1 | #ifndef __ARCH_SH_PERCPU | ||
| 2 | #define __ARCH_SH_PERCPU | ||
| 3 | |||
| 4 | #include <asm-generic/percpu.h> | ||
| 5 | |||
| 6 | #endif /* __ARCH_SH_PERCPU */ | ||
diff --git a/arch/sh/include/asm/poll.h b/arch/sh/include/asm/poll.h deleted file mode 100644 index c98509d3149e..000000000000 --- a/arch/sh/include/asm/poll.h +++ /dev/null | |||
| @@ -1 +0,0 @@ | |||
| 1 | #include <asm-generic/poll.h> | ||
diff --git a/arch/sh/include/asm/resource.h b/arch/sh/include/asm/resource.h deleted file mode 100644 index 9c2499a86ec0..000000000000 --- a/arch/sh/include/asm/resource.h +++ /dev/null | |||
| @@ -1,6 +0,0 @@ | |||
| 1 | #ifndef __ASM_SH_RESOURCE_H | ||
| 2 | #define __ASM_SH_RESOURCE_H | ||
| 3 | |||
| 4 | #include <asm-generic/resource.h> | ||
| 5 | |||
| 6 | #endif /* __ASM_SH_RESOURCE_H */ | ||
diff --git a/arch/sh/include/asm/scatterlist.h b/arch/sh/include/asm/scatterlist.h deleted file mode 100644 index 98dfc3510f10..000000000000 --- a/arch/sh/include/asm/scatterlist.h +++ /dev/null | |||
| @@ -1,6 +0,0 @@ | |||
| 1 | #ifndef __ASM_SH_SCATTERLIST_H | ||
| 2 | #define __ASM_SH_SCATTERLIST_H | ||
| 3 | |||
| 4 | #include <asm-generic/scatterlist.h> | ||
| 5 | |||
| 6 | #endif /* __ASM_SH_SCATTERLIST_H */ | ||
diff --git a/arch/sh/include/asm/sembuf.h b/arch/sh/include/asm/sembuf.h deleted file mode 100644 index 7673b83cfef7..000000000000 --- a/arch/sh/include/asm/sembuf.h +++ /dev/null | |||
| @@ -1 +0,0 @@ | |||
| 1 | #include <asm-generic/sembuf.h> | ||
diff --git a/arch/sh/include/asm/serial.h b/arch/sh/include/asm/serial.h deleted file mode 100644 index a0cb0caff152..000000000000 --- a/arch/sh/include/asm/serial.h +++ /dev/null | |||
| @@ -1 +0,0 @@ | |||
| 1 | #include <asm-generic/serial.h> | ||
diff --git a/arch/sh/include/asm/shmbuf.h b/arch/sh/include/asm/shmbuf.h deleted file mode 100644 index 83c05fc2de38..000000000000 --- a/arch/sh/include/asm/shmbuf.h +++ /dev/null | |||
| @@ -1 +0,0 @@ | |||
| 1 | #include <asm-generic/shmbuf.h> | ||
diff --git a/arch/sh/include/asm/siginfo.h b/arch/sh/include/asm/siginfo.h deleted file mode 100644 index 813040ed68a9..000000000000 --- a/arch/sh/include/asm/siginfo.h +++ /dev/null | |||
| @@ -1,6 +0,0 @@ | |||
| 1 | #ifndef __ASM_SH_SIGINFO_H | ||
| 2 | #define __ASM_SH_SIGINFO_H | ||
| 3 | |||
| 4 | #include <asm-generic/siginfo.h> | ||
| 5 | |||
| 6 | #endif /* __ASM_SH_SIGINFO_H */ | ||
diff --git a/arch/sh/include/asm/sizes.h b/arch/sh/include/asm/sizes.h deleted file mode 100644 index dd248c2e1085..000000000000 --- a/arch/sh/include/asm/sizes.h +++ /dev/null | |||
| @@ -1 +0,0 @@ | |||
| 1 | #include <asm-generic/sizes.h> | ||
diff --git a/arch/sh/include/asm/socket.h b/arch/sh/include/asm/socket.h deleted file mode 100644 index 6b71384b9d8b..000000000000 --- a/arch/sh/include/asm/socket.h +++ /dev/null | |||
| @@ -1 +0,0 @@ | |||
| 1 | #include <asm-generic/socket.h> | ||
diff --git a/arch/sh/include/asm/statfs.h b/arch/sh/include/asm/statfs.h deleted file mode 100644 index 9202a023328f..000000000000 --- a/arch/sh/include/asm/statfs.h +++ /dev/null | |||
| @@ -1,6 +0,0 @@ | |||
| 1 | #ifndef __ASM_SH_STATFS_H | ||
| 2 | #define __ASM_SH_STATFS_H | ||
| 3 | |||
| 4 | #include <asm-generic/statfs.h> | ||
| 5 | |||
| 6 | #endif /* __ASM_SH_STATFS_H */ | ||
diff --git a/arch/sh/include/asm/termbits.h b/arch/sh/include/asm/termbits.h deleted file mode 100644 index 3935b106de79..000000000000 --- a/arch/sh/include/asm/termbits.h +++ /dev/null | |||
| @@ -1 +0,0 @@ | |||
| 1 | #include <asm-generic/termbits.h> | ||
diff --git a/arch/sh/include/asm/termios.h b/arch/sh/include/asm/termios.h deleted file mode 100644 index 280d78a9d966..000000000000 --- a/arch/sh/include/asm/termios.h +++ /dev/null | |||
| @@ -1 +0,0 @@ | |||
| 1 | #include <asm-generic/termios.h> | ||
diff --git a/arch/sh/include/asm/uaccess.h b/arch/sh/include/asm/uaccess.h index 050f221fa898..8698a80ed00c 100644 --- a/arch/sh/include/asm/uaccess.h +++ b/arch/sh/include/asm/uaccess.h | |||
| @@ -25,6 +25,8 @@ | |||
| 25 | (__chk_user_ptr(addr), \ | 25 | (__chk_user_ptr(addr), \ |
| 26 | __access_ok((unsigned long __force)(addr), (size))) | 26 | __access_ok((unsigned long __force)(addr), (size))) |
| 27 | 27 | ||
| 28 | #define user_addr_max() (current_thread_info()->addr_limit.seg) | ||
| 29 | |||
| 28 | /* | 30 | /* |
| 29 | * Uh, these should become the main single-value transfer routines ... | 31 | * Uh, these should become the main single-value transfer routines ... |
| 30 | * They automatically use the right size if we just have the right | 32 | * They automatically use the right size if we just have the right |
| @@ -100,6 +102,11 @@ struct __large_struct { unsigned long buf[100]; }; | |||
| 100 | # include "uaccess_64.h" | 102 | # include "uaccess_64.h" |
| 101 | #endif | 103 | #endif |
| 102 | 104 | ||
| 105 | extern long strncpy_from_user(char *dest, const char __user *src, long count); | ||
| 106 | |||
| 107 | extern __must_check long strlen_user(const char __user *str); | ||
| 108 | extern __must_check long strnlen_user(const char __user *str, long n); | ||
| 109 | |||
| 103 | /* Generic arbitrary sized copy. */ | 110 | /* Generic arbitrary sized copy. */ |
| 104 | /* Return the number of bytes NOT copied */ | 111 | /* Return the number of bytes NOT copied */ |
| 105 | __kernel_size_t __copy_user(void *to, const void *from, __kernel_size_t n); | 112 | __kernel_size_t __copy_user(void *to, const void *from, __kernel_size_t n); |
| @@ -137,37 +144,6 @@ __kernel_size_t __clear_user(void *addr, __kernel_size_t size); | |||
| 137 | __cl_size; \ | 144 | __cl_size; \ |
| 138 | }) | 145 | }) |
| 139 | 146 | ||
| 140 | /** | ||
| 141 | * strncpy_from_user: - Copy a NUL terminated string from userspace. | ||
| 142 | * @dst: Destination address, in kernel space. This buffer must be at | ||
| 143 | * least @count bytes long. | ||
| 144 | * @src: Source address, in user space. | ||
| 145 | * @count: Maximum number of bytes to copy, including the trailing NUL. | ||
| 146 | * | ||
| 147 | * Copies a NUL-terminated string from userspace to kernel space. | ||
| 148 | * | ||
| 149 | * On success, returns the length of the string (not including the trailing | ||
| 150 | * NUL). | ||
| 151 | * | ||
| 152 | * If access to userspace fails, returns -EFAULT (some data may have been | ||
| 153 | * copied). | ||
| 154 | * | ||
| 155 | * If @count is smaller than the length of the string, copies @count bytes | ||
| 156 | * and returns @count. | ||
| 157 | */ | ||
| 158 | #define strncpy_from_user(dest,src,count) \ | ||
| 159 | ({ \ | ||
| 160 | unsigned long __sfu_src = (unsigned long)(src); \ | ||
| 161 | int __sfu_count = (int)(count); \ | ||
| 162 | long __sfu_res = -EFAULT; \ | ||
| 163 | \ | ||
| 164 | if (__access_ok(__sfu_src, __sfu_count)) \ | ||
| 165 | __sfu_res = __strncpy_from_user((unsigned long)(dest), \ | ||
| 166 | __sfu_src, __sfu_count); \ | ||
| 167 | \ | ||
| 168 | __sfu_res; \ | ||
| 169 | }) | ||
| 170 | |||
| 171 | static inline unsigned long | 147 | static inline unsigned long |
| 172 | copy_from_user(void *to, const void __user *from, unsigned long n) | 148 | copy_from_user(void *to, const void __user *from, unsigned long n) |
| 173 | { | 149 | { |
| @@ -192,43 +168,6 @@ copy_to_user(void __user *to, const void *from, unsigned long n) | |||
| 192 | return __copy_size; | 168 | return __copy_size; |
| 193 | } | 169 | } |
| 194 | 170 | ||
| 195 | /** | ||
| 196 | * strnlen_user: - Get the size of a string in user space. | ||
| 197 | * @s: The string to measure. | ||
| 198 | * @n: The maximum valid length | ||
| 199 | * | ||
| 200 | * Context: User context only. This function may sleep. | ||
| 201 | * | ||
| 202 | * Get the size of a NUL-terminated string in user space. | ||
| 203 | * | ||
| 204 | * Returns the size of the string INCLUDING the terminating NUL. | ||
| 205 | * On exception, returns 0. | ||
| 206 | * If the string is too long, returns a value greater than @n. | ||
| 207 | */ | ||
| 208 | static inline long strnlen_user(const char __user *s, long n) | ||
| 209 | { | ||
| 210 | if (!__addr_ok(s)) | ||
| 211 | return 0; | ||
| 212 | else | ||
| 213 | return __strnlen_user(s, n); | ||
| 214 | } | ||
| 215 | |||
| 216 | /** | ||
| 217 | * strlen_user: - Get the size of a string in user space. | ||
| 218 | * @str: The string to measure. | ||
| 219 | * | ||
| 220 | * Context: User context only. This function may sleep. | ||
| 221 | * | ||
| 222 | * Get the size of a NUL-terminated string in user space. | ||
| 223 | * | ||
| 224 | * Returns the size of the string INCLUDING the terminating NUL. | ||
| 225 | * On exception, returns 0. | ||
| 226 | * | ||
| 227 | * If there is a limit on the length of a valid string, you may wish to | ||
| 228 | * consider using strnlen_user() instead. | ||
| 229 | */ | ||
| 230 | #define strlen_user(str) strnlen_user(str, ~0UL >> 1) | ||
| 231 | |||
| 232 | /* | 171 | /* |
| 233 | * The exception table consists of pairs of addresses: the first is the | 172 | * The exception table consists of pairs of addresses: the first is the |
| 234 | * address of an instruction that is allowed to fault, and the second is | 173 | * address of an instruction that is allowed to fault, and the second is |
diff --git a/arch/sh/include/asm/uaccess_32.h b/arch/sh/include/asm/uaccess_32.h index ae0d24f6653f..c0de7ee35ab7 100644 --- a/arch/sh/include/asm/uaccess_32.h +++ b/arch/sh/include/asm/uaccess_32.h | |||
| @@ -170,79 +170,4 @@ __asm__ __volatile__( \ | |||
| 170 | 170 | ||
| 171 | extern void __put_user_unknown(void); | 171 | extern void __put_user_unknown(void); |
| 172 | 172 | ||
| 173 | static inline int | ||
| 174 | __strncpy_from_user(unsigned long __dest, unsigned long __user __src, int __count) | ||
| 175 | { | ||
| 176 | __kernel_size_t res; | ||
| 177 | unsigned long __dummy, _d, _s, _c; | ||
| 178 | |||
| 179 | __asm__ __volatile__( | ||
| 180 | "9:\n" | ||
| 181 | "mov.b @%2+, %1\n\t" | ||
| 182 | "cmp/eq #0, %1\n\t" | ||
| 183 | "bt/s 2f\n" | ||
| 184 | "1:\n" | ||
| 185 | "mov.b %1, @%3\n\t" | ||
| 186 | "dt %4\n\t" | ||
| 187 | "bf/s 9b\n\t" | ||
| 188 | " add #1, %3\n\t" | ||
| 189 | "2:\n\t" | ||
| 190 | "sub %4, %0\n" | ||
| 191 | "3:\n" | ||
| 192 | ".section .fixup,\"ax\"\n" | ||
| 193 | "4:\n\t" | ||
| 194 | "mov.l 5f, %1\n\t" | ||
| 195 | "jmp @%1\n\t" | ||
| 196 | " mov %9, %0\n\t" | ||
| 197 | ".balign 4\n" | ||
| 198 | "5: .long 3b\n" | ||
| 199 | ".previous\n" | ||
| 200 | ".section __ex_table,\"a\"\n" | ||
| 201 | " .balign 4\n" | ||
| 202 | " .long 9b,4b\n" | ||
| 203 | ".previous" | ||
| 204 | : "=r" (res), "=&z" (__dummy), "=r" (_s), "=r" (_d), "=r"(_c) | ||
| 205 | : "0" (__count), "2" (__src), "3" (__dest), "4" (__count), | ||
| 206 | "i" (-EFAULT) | ||
| 207 | : "memory", "t"); | ||
| 208 | |||
| 209 | return res; | ||
| 210 | } | ||
| 211 | |||
| 212 | /* | ||
| 213 | * Return the size of a string (including the ending 0 even when we have | ||
| 214 | * exceeded the maximum string length). | ||
| 215 | */ | ||
| 216 | static inline long __strnlen_user(const char __user *__s, long __n) | ||
| 217 | { | ||
| 218 | unsigned long res; | ||
| 219 | unsigned long __dummy; | ||
| 220 | |||
| 221 | __asm__ __volatile__( | ||
| 222 | "1:\t" | ||
| 223 | "mov.b @(%0,%3), %1\n\t" | ||
| 224 | "cmp/eq %4, %0\n\t" | ||
| 225 | "bt/s 2f\n\t" | ||
| 226 | " add #1, %0\n\t" | ||
| 227 | "tst %1, %1\n\t" | ||
| 228 | "bf 1b\n\t" | ||
| 229 | "2:\n" | ||
| 230 | ".section .fixup,\"ax\"\n" | ||
| 231 | "3:\n\t" | ||
| 232 | "mov.l 4f, %1\n\t" | ||
| 233 | "jmp @%1\n\t" | ||
| 234 | " mov #0, %0\n" | ||
| 235 | ".balign 4\n" | ||
| 236 | "4: .long 2b\n" | ||
| 237 | ".previous\n" | ||
| 238 | ".section __ex_table,\"a\"\n" | ||
| 239 | " .balign 4\n" | ||
| 240 | " .long 1b,3b\n" | ||
| 241 | ".previous" | ||
| 242 | : "=z" (res), "=&r" (__dummy) | ||
| 243 | : "0" (0), "r" (__s), "r" (__n) | ||
| 244 | : "t"); | ||
| 245 | return res; | ||
| 246 | } | ||
| 247 | |||
| 248 | #endif /* __ASM_SH_UACCESS_32_H */ | 173 | #endif /* __ASM_SH_UACCESS_32_H */ |
diff --git a/arch/sh/include/asm/uaccess_64.h b/arch/sh/include/asm/uaccess_64.h index 56fd20b8cdcc..2e07e0f40c6a 100644 --- a/arch/sh/include/asm/uaccess_64.h +++ b/arch/sh/include/asm/uaccess_64.h | |||
| @@ -84,8 +84,4 @@ extern long __put_user_asm_l(void *, long); | |||
| 84 | extern long __put_user_asm_q(void *, long); | 84 | extern long __put_user_asm_q(void *, long); |
| 85 | extern void __put_user_unknown(void); | 85 | extern void __put_user_unknown(void); |
| 86 | 86 | ||
| 87 | extern long __strnlen_user(const char *__s, long __n); | ||
| 88 | extern int __strncpy_from_user(unsigned long __dest, | ||
| 89 | unsigned long __user __src, int __count); | ||
| 90 | |||
| 91 | #endif /* __ASM_SH_UACCESS_64_H */ | 87 | #endif /* __ASM_SH_UACCESS_64_H */ |
diff --git a/arch/sh/include/asm/ucontext.h b/arch/sh/include/asm/ucontext.h deleted file mode 100644 index 9bc07b9f30fb..000000000000 --- a/arch/sh/include/asm/ucontext.h +++ /dev/null | |||
| @@ -1 +0,0 @@ | |||
| 1 | #include <asm-generic/ucontext.h> | ||
diff --git a/arch/sh/include/asm/word-at-a-time.h b/arch/sh/include/asm/word-at-a-time.h new file mode 100644 index 000000000000..6e38953ff7fd --- /dev/null +++ b/arch/sh/include/asm/word-at-a-time.h | |||
| @@ -0,0 +1,53 @@ | |||
| 1 | #ifndef __ASM_SH_WORD_AT_A_TIME_H | ||
| 2 | #define __ASM_SH_WORD_AT_A_TIME_H | ||
| 3 | |||
| 4 | #ifdef CONFIG_CPU_BIG_ENDIAN | ||
| 5 | # include <asm-generic/word-at-a-time.h> | ||
| 6 | #else | ||
| 7 | /* | ||
| 8 | * Little-endian version cribbed from x86. | ||
| 9 | */ | ||
| 10 | struct word_at_a_time { | ||
| 11 | const unsigned long one_bits, high_bits; | ||
| 12 | }; | ||
| 13 | |||
| 14 | #define WORD_AT_A_TIME_CONSTANTS { REPEAT_BYTE(0x01), REPEAT_BYTE(0x80) } | ||
| 15 | |||
| 16 | /* Carl Chatfield / Jan Achrenius G+ version for 32-bit */ | ||
| 17 | static inline long count_masked_bytes(long mask) | ||
| 18 | { | ||
| 19 | /* (000000 0000ff 00ffff ffffff) -> ( 1 1 2 3 ) */ | ||
| 20 | long a = (0x0ff0001+mask) >> 23; | ||
| 21 | /* Fix the 1 for 00 case */ | ||
| 22 | return a & mask; | ||
| 23 | } | ||
| 24 | |||
| 25 | /* Return nonzero if it has a zero */ | ||
| 26 | static inline unsigned long has_zero(unsigned long a, unsigned long *bits, const struct word_at_a_time *c) | ||
| 27 | { | ||
| 28 | unsigned long mask = ((a - c->one_bits) & ~a) & c->high_bits; | ||
| 29 | *bits = mask; | ||
| 30 | return mask; | ||
| 31 | } | ||
| 32 | |||
| 33 | static inline unsigned long prep_zero_mask(unsigned long a, unsigned long bits, const struct word_at_a_time *c) | ||
| 34 | { | ||
| 35 | return bits; | ||
| 36 | } | ||
| 37 | |||
| 38 | static inline unsigned long create_zero_mask(unsigned long bits) | ||
| 39 | { | ||
| 40 | bits = (bits - 1) & ~bits; | ||
| 41 | return bits >> 7; | ||
| 42 | } | ||
| 43 | |||
| 44 | /* The mask we created is directly usable as a bytemask */ | ||
| 45 | #define zero_bytemask(mask) (mask) | ||
| 46 | |||
| 47 | static inline unsigned long find_zero(unsigned long mask) | ||
| 48 | { | ||
| 49 | return count_masked_bytes(mask); | ||
| 50 | } | ||
| 51 | #endif | ||
| 52 | |||
| 53 | #endif | ||
diff --git a/arch/sh/include/asm/xor.h b/arch/sh/include/asm/xor.h deleted file mode 100644 index c82eb12a5b18..000000000000 --- a/arch/sh/include/asm/xor.h +++ /dev/null | |||
| @@ -1 +0,0 @@ | |||
| 1 | #include <asm-generic/xor.h> | ||
diff --git a/arch/sh/include/cpu-sh2a/cpu/ubc.h b/arch/sh/include/cpu-sh2a/cpu/ubc.h deleted file mode 100644 index 1192e1c761a7..000000000000 --- a/arch/sh/include/cpu-sh2a/cpu/ubc.h +++ /dev/null | |||
| @@ -1,28 +0,0 @@ | |||
| 1 | /* | ||
| 2 | * SH-2A UBC definitions | ||
| 3 | * | ||
| 4 | * Copyright (C) 2008 Kieran Bingham | ||
| 5 | * | ||
| 6 | * This file is subject to the terms and conditions of the GNU General Public | ||
| 7 | * License. See the file "COPYING" in the main directory of this archive | ||
| 8 | * for more details. | ||
| 9 | */ | ||
| 10 | |||
| 11 | #ifndef __ASM_CPU_SH2A_UBC_H | ||
| 12 | #define __ASM_CPU_SH2A_UBC_H | ||
| 13 | |||
| 14 | #define UBC_BARA 0xfffc0400 | ||
| 15 | #define UBC_BAMRA 0xfffc0404 | ||
| 16 | #define UBC_BBRA 0xfffc04a0 /* 16 bit access */ | ||
| 17 | #define UBC_BDRA 0xfffc0408 | ||
| 18 | #define UBC_BDMRA 0xfffc040c | ||
| 19 | |||
| 20 | #define UBC_BARB 0xfffc0410 | ||
| 21 | #define UBC_BAMRB 0xfffc0414 | ||
| 22 | #define UBC_BBRB 0xfffc04b0 /* 16 bit access */ | ||
| 23 | #define UBC_BDRB 0xfffc0418 | ||
| 24 | #define UBC_BDMRB 0xfffc041c | ||
| 25 | |||
| 26 | #define UBC_BRCR 0xfffc04c0 | ||
| 27 | |||
| 28 | #endif /* __ASM_CPU_SH2A_UBC_H */ | ||
diff --git a/arch/sh/kernel/cpu/sh5/entry.S b/arch/sh/kernel/cpu/sh5/entry.S index ff1f0e6e9bec..b7cf6a547f11 100644 --- a/arch/sh/kernel/cpu/sh5/entry.S +++ b/arch/sh/kernel/cpu/sh5/entry.S | |||
| @@ -1569,86 +1569,6 @@ ___clear_user_exit: | |||
| 1569 | #endif /* CONFIG_MMU */ | 1569 | #endif /* CONFIG_MMU */ |
| 1570 | 1570 | ||
| 1571 | /* | 1571 | /* |
| 1572 | * int __strncpy_from_user(unsigned long __dest, unsigned long __src, | ||
| 1573 | * int __count) | ||
| 1574 | * | ||
| 1575 | * Inputs: | ||
| 1576 | * (r2) target address | ||
| 1577 | * (r3) source address | ||
| 1578 | * (r4) maximum size in bytes | ||
| 1579 | * | ||
| 1580 | * Ouputs: | ||
| 1581 | * (*r2) copied data | ||
| 1582 | * (r2) -EFAULT (in case of faulting) | ||
| 1583 | * copied data (otherwise) | ||
| 1584 | */ | ||
| 1585 | .global __strncpy_from_user | ||
| 1586 | __strncpy_from_user: | ||
| 1587 | pta ___strncpy_from_user1, tr0 | ||
| 1588 | pta ___strncpy_from_user_done, tr1 | ||
| 1589 | or r4, ZERO, r5 /* r5 = original count */ | ||
| 1590 | beq/u r4, r63, tr1 /* early exit if r4==0 */ | ||
| 1591 | movi -(EFAULT), r6 /* r6 = reply, no real fixup */ | ||
| 1592 | or ZERO, ZERO, r7 /* r7 = data, clear top byte of data */ | ||
| 1593 | |||
| 1594 | ___strncpy_from_user1: | ||
| 1595 | ld.b r3, 0, r7 /* Fault address: only in reading */ | ||
| 1596 | st.b r2, 0, r7 | ||
| 1597 | addi r2, 1, r2 | ||
| 1598 | addi r3, 1, r3 | ||
| 1599 | beq/u ZERO, r7, tr1 | ||
| 1600 | addi r4, -1, r4 /* return real number of copied bytes */ | ||
| 1601 | bne/l ZERO, r4, tr0 | ||
| 1602 | |||
| 1603 | ___strncpy_from_user_done: | ||
| 1604 | sub r5, r4, r6 /* If done, return copied */ | ||
| 1605 | |||
| 1606 | ___strncpy_from_user_exit: | ||
| 1607 | or r6, ZERO, r2 | ||
| 1608 | ptabs LINK, tr0 | ||
| 1609 | blink tr0, ZERO | ||
| 1610 | |||
| 1611 | /* | ||
| 1612 | * extern long __strnlen_user(const char *__s, long __n) | ||
| 1613 | * | ||
| 1614 | * Inputs: | ||
| 1615 | * (r2) source address | ||
| 1616 | * (r3) source size in bytes | ||
| 1617 | * | ||
| 1618 | * Ouputs: | ||
| 1619 | * (r2) -EFAULT (in case of faulting) | ||
| 1620 | * string length (otherwise) | ||
| 1621 | */ | ||
| 1622 | .global __strnlen_user | ||
| 1623 | __strnlen_user: | ||
| 1624 | pta ___strnlen_user_set_reply, tr0 | ||
| 1625 | pta ___strnlen_user1, tr1 | ||
| 1626 | or ZERO, ZERO, r5 /* r5 = counter */ | ||
| 1627 | movi -(EFAULT), r6 /* r6 = reply, no real fixup */ | ||
| 1628 | or ZERO, ZERO, r7 /* r7 = data, clear top byte of data */ | ||
| 1629 | beq r3, ZERO, tr0 | ||
| 1630 | |||
| 1631 | ___strnlen_user1: | ||
| 1632 | ldx.b r2, r5, r7 /* Fault address: only in reading */ | ||
| 1633 | addi r3, -1, r3 /* No real fixup */ | ||
| 1634 | addi r5, 1, r5 | ||
| 1635 | beq r3, ZERO, tr0 | ||
| 1636 | bne r7, ZERO, tr1 | ||
| 1637 | ! The line below used to be active. This meant led to a junk byte lying between each pair | ||
| 1638 | ! of entries in the argv & envp structures in memory. Whilst the program saw the right data | ||
| 1639 | ! via the argv and envp arguments to main, it meant the 'flat' representation visible through | ||
| 1640 | ! /proc/$pid/cmdline was corrupt, causing trouble with ps, for example. | ||
| 1641 | ! addi r5, 1, r5 /* Include '\0' */ | ||
| 1642 | |||
| 1643 | ___strnlen_user_set_reply: | ||
| 1644 | or r5, ZERO, r6 /* If done, return counter */ | ||
| 1645 | |||
| 1646 | ___strnlen_user_exit: | ||
| 1647 | or r6, ZERO, r2 | ||
| 1648 | ptabs LINK, tr0 | ||
| 1649 | blink tr0, ZERO | ||
| 1650 | |||
| 1651 | /* | ||
| 1652 | * extern long __get_user_asm_?(void *val, long addr) | 1572 | * extern long __get_user_asm_?(void *val, long addr) |
| 1653 | * | 1573 | * |
| 1654 | * Inputs: | 1574 | * Inputs: |
| @@ -1982,8 +1902,6 @@ asm_uaccess_start: | |||
| 1982 | .long ___copy_user2, ___copy_user_exit | 1902 | .long ___copy_user2, ___copy_user_exit |
| 1983 | .long ___clear_user1, ___clear_user_exit | 1903 | .long ___clear_user1, ___clear_user_exit |
| 1984 | #endif | 1904 | #endif |
| 1985 | .long ___strncpy_from_user1, ___strncpy_from_user_exit | ||
| 1986 | .long ___strnlen_user1, ___strnlen_user_exit | ||
| 1987 | .long ___get_user_asm_b1, ___get_user_asm_b_exit | 1905 | .long ___get_user_asm_b1, ___get_user_asm_b_exit |
| 1988 | .long ___get_user_asm_w1, ___get_user_asm_w_exit | 1906 | .long ___get_user_asm_w1, ___get_user_asm_w_exit |
| 1989 | .long ___get_user_asm_l1, ___get_user_asm_l_exit | 1907 | .long ___get_user_asm_l1, ___get_user_asm_l_exit |
diff --git a/arch/sh/kernel/process.c b/arch/sh/kernel/process.c index 9b7a459a4613..055d91b70305 100644 --- a/arch/sh/kernel/process.c +++ b/arch/sh/kernel/process.c | |||
| @@ -4,6 +4,7 @@ | |||
| 4 | #include <linux/sched.h> | 4 | #include <linux/sched.h> |
| 5 | #include <linux/export.h> | 5 | #include <linux/export.h> |
| 6 | #include <linux/stackprotector.h> | 6 | #include <linux/stackprotector.h> |
| 7 | #include <asm/fpu.h> | ||
| 7 | 8 | ||
| 8 | struct kmem_cache *task_xstate_cachep = NULL; | 9 | struct kmem_cache *task_xstate_cachep = NULL; |
| 9 | unsigned int xstate_size; | 10 | unsigned int xstate_size; |
diff --git a/arch/sh/kernel/process_64.c b/arch/sh/kernel/process_64.c index 4264583eabac..602545b12a86 100644 --- a/arch/sh/kernel/process_64.c +++ b/arch/sh/kernel/process_64.c | |||
| @@ -33,6 +33,7 @@ | |||
| 33 | #include <asm/switch_to.h> | 33 | #include <asm/switch_to.h> |
| 34 | 34 | ||
| 35 | struct task_struct *last_task_used_math = NULL; | 35 | struct task_struct *last_task_used_math = NULL; |
| 36 | struct pt_regs fake_swapper_regs = { 0, }; | ||
| 36 | 37 | ||
| 37 | void show_regs(struct pt_regs *regs) | 38 | void show_regs(struct pt_regs *regs) |
| 38 | { | 39 | { |
diff --git a/arch/sh/kernel/sh_ksyms_64.c b/arch/sh/kernel/sh_ksyms_64.c index 45afa5c51f67..26a0774f5272 100644 --- a/arch/sh/kernel/sh_ksyms_64.c +++ b/arch/sh/kernel/sh_ksyms_64.c | |||
| @@ -32,8 +32,6 @@ EXPORT_SYMBOL(__get_user_asm_b); | |||
| 32 | EXPORT_SYMBOL(__get_user_asm_w); | 32 | EXPORT_SYMBOL(__get_user_asm_w); |
| 33 | EXPORT_SYMBOL(__get_user_asm_l); | 33 | EXPORT_SYMBOL(__get_user_asm_l); |
| 34 | EXPORT_SYMBOL(__get_user_asm_q); | 34 | EXPORT_SYMBOL(__get_user_asm_q); |
| 35 | EXPORT_SYMBOL(__strnlen_user); | ||
| 36 | EXPORT_SYMBOL(__strncpy_from_user); | ||
| 37 | EXPORT_SYMBOL(__clear_user); | 35 | EXPORT_SYMBOL(__clear_user); |
| 38 | EXPORT_SYMBOL(copy_page); | 36 | EXPORT_SYMBOL(copy_page); |
| 39 | EXPORT_SYMBOL(__copy_user); | 37 | EXPORT_SYMBOL(__copy_user); |
diff --git a/arch/sparc/include/asm/cmt.h b/arch/sparc/include/asm/cmt.h deleted file mode 100644 index 870db5928577..000000000000 --- a/arch/sparc/include/asm/cmt.h +++ /dev/null | |||
| @@ -1,59 +0,0 @@ | |||
| 1 | #ifndef _SPARC64_CMT_H | ||
| 2 | #define _SPARC64_CMT_H | ||
| 3 | |||
| 4 | /* cmt.h: Chip Multi-Threading register definitions | ||
| 5 | * | ||
| 6 | * Copyright (C) 2004 David S. Miller (davem@redhat.com) | ||
| 7 | */ | ||
| 8 | |||
| 9 | /* ASI_CORE_ID - private */ | ||
| 10 | #define LP_ID 0x0000000000000010UL | ||
| 11 | #define LP_ID_MAX 0x00000000003f0000UL | ||
| 12 | #define LP_ID_ID 0x000000000000003fUL | ||
| 13 | |||
| 14 | /* ASI_INTR_ID - private */ | ||
| 15 | #define LP_INTR_ID 0x0000000000000000UL | ||
| 16 | #define LP_INTR_ID_ID 0x00000000000003ffUL | ||
| 17 | |||
| 18 | /* ASI_CESR_ID - private */ | ||
| 19 | #define CESR_ID 0x0000000000000040UL | ||
| 20 | #define CESR_ID_ID 0x00000000000000ffUL | ||
| 21 | |||
| 22 | /* ASI_CORE_AVAILABLE - shared */ | ||
| 23 | #define LP_AVAIL 0x0000000000000000UL | ||
| 24 | #define LP_AVAIL_1 0x0000000000000002UL | ||
| 25 | #define LP_AVAIL_0 0x0000000000000001UL | ||
| 26 | |||
| 27 | /* ASI_CORE_ENABLE_STATUS - shared */ | ||
| 28 | #define LP_ENAB_STAT 0x0000000000000010UL | ||
| 29 | #define LP_ENAB_STAT_1 0x0000000000000002UL | ||
| 30 | #define LP_ENAB_STAT_0 0x0000000000000001UL | ||
| 31 | |||
| 32 | /* ASI_CORE_ENABLE - shared */ | ||
| 33 | #define LP_ENAB 0x0000000000000020UL | ||
| 34 | #define LP_ENAB_1 0x0000000000000002UL | ||
| 35 | #define LP_ENAB_0 0x0000000000000001UL | ||
| 36 | |||
| 37 | /* ASI_CORE_RUNNING - shared */ | ||
| 38 | #define LP_RUNNING_RW 0x0000000000000050UL | ||
| 39 | #define LP_RUNNING_W1S 0x0000000000000060UL | ||
| 40 | #define LP_RUNNING_W1C 0x0000000000000068UL | ||
| 41 | #define LP_RUNNING_1 0x0000000000000002UL | ||
| 42 | #define LP_RUNNING_0 0x0000000000000001UL | ||
| 43 | |||
| 44 | /* ASI_CORE_RUNNING_STAT - shared */ | ||
| 45 | #define LP_RUN_STAT 0x0000000000000058UL | ||
| 46 | #define LP_RUN_STAT_1 0x0000000000000002UL | ||
| 47 | #define LP_RUN_STAT_0 0x0000000000000001UL | ||
| 48 | |||
| 49 | /* ASI_XIR_STEERING - shared */ | ||
| 50 | #define LP_XIR_STEER 0x0000000000000030UL | ||
| 51 | #define LP_XIR_STEER_1 0x0000000000000002UL | ||
| 52 | #define LP_XIR_STEER_0 0x0000000000000001UL | ||
| 53 | |||
| 54 | /* ASI_CMT_ERROR_STEERING - shared */ | ||
| 55 | #define CMT_ER_STEER 0x0000000000000040UL | ||
| 56 | #define CMT_ER_STEER_1 0x0000000000000002UL | ||
| 57 | #define CMT_ER_STEER_0 0x0000000000000001UL | ||
| 58 | |||
| 59 | #endif /* _SPARC64_CMT_H */ | ||
diff --git a/arch/sparc/include/asm/mpmbox.h b/arch/sparc/include/asm/mpmbox.h deleted file mode 100644 index f8423039b242..000000000000 --- a/arch/sparc/include/asm/mpmbox.h +++ /dev/null | |||
| @@ -1,67 +0,0 @@ | |||
| 1 | /* | ||
| 2 | * mpmbox.h: Interface and defines for the OpenProm mailbox | ||
| 3 | * facilities for MP machines under Linux. | ||
| 4 | * | ||
| 5 | * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu) | ||
| 6 | */ | ||
| 7 | |||
| 8 | #ifndef _SPARC_MPMBOX_H | ||
| 9 | #define _SPARC_MPMBOX_H | ||
| 10 | |||
| 11 | /* The prom allocates, for each CPU on the machine an unsigned | ||
| 12 | * byte in physical ram. You probe the device tree prom nodes | ||
| 13 | * for these values. The purpose of this byte is to be able to | ||
| 14 | * pass messages from one cpu to another. | ||
| 15 | */ | ||
| 16 | |||
| 17 | /* These are the main message types we have to look for in our | ||
| 18 | * Cpu mailboxes, based upon these values we decide what course | ||
| 19 | * of action to take. | ||
| 20 | */ | ||
| 21 | |||
| 22 | /* The CPU is executing code in the kernel. */ | ||
| 23 | #define MAILBOX_ISRUNNING 0xf0 | ||
| 24 | |||
| 25 | /* Another CPU called romvec->pv_exit(), you should call | ||
| 26 | * prom_stopcpu() when you see this in your mailbox. | ||
| 27 | */ | ||
| 28 | #define MAILBOX_EXIT 0xfb | ||
| 29 | |||
| 30 | /* Another CPU called romvec->pv_enter(), you should call | ||
| 31 | * prom_cpuidle() when this is seen. | ||
| 32 | */ | ||
| 33 | #define MAILBOX_GOSPIN 0xfc | ||
| 34 | |||
| 35 | /* Another CPU has hit a breakpoint either into kadb or the prom | ||
| 36 | * itself. Just like MAILBOX_GOSPIN, you should call prom_cpuidle() | ||
| 37 | * at this point. | ||
| 38 | */ | ||
| 39 | #define MAILBOX_BPT_SPIN 0xfd | ||
| 40 | |||
| 41 | /* Oh geese, some other nitwit got a damn watchdog reset. The party's | ||
| 42 | * over so go call prom_stopcpu(). | ||
| 43 | */ | ||
| 44 | #define MAILBOX_WDOG_STOP 0xfe | ||
| 45 | |||
| 46 | #ifndef __ASSEMBLY__ | ||
| 47 | |||
| 48 | /* Handy macro's to determine a cpu's state. */ | ||
| 49 | |||
| 50 | /* Is the cpu still in Power On Self Test? */ | ||
| 51 | #define MBOX_POST_P(letter) ((letter) >= 0x00 && (letter) <= 0x7f) | ||
| 52 | |||
| 53 | /* Is the cpu at the 'ok' prompt of the PROM? */ | ||
| 54 | #define MBOX_PROMPROMPT_P(letter) ((letter) >= 0x80 && (letter) <= 0x8f) | ||
| 55 | |||
| 56 | /* Is the cpu spinning in the PROM? */ | ||
| 57 | #define MBOX_PROMSPIN_P(letter) ((letter) >= 0x90 && (letter) <= 0xef) | ||
| 58 | |||
| 59 | /* Sanity check... This is junk mail, throw it out. */ | ||
| 60 | #define MBOX_BOGON_P(letter) ((letter) >= 0xf1 && (letter) <= 0xfa) | ||
| 61 | |||
| 62 | /* Is the cpu actively running an application/kernel-code? */ | ||
| 63 | #define MBOX_RUNNING_P(letter) ((letter) == MAILBOX_ISRUNNING) | ||
| 64 | |||
| 65 | #endif /* !(__ASSEMBLY__) */ | ||
| 66 | |||
| 67 | #endif /* !(_SPARC_MPMBOX_H) */ | ||
diff --git a/arch/tile/include/asm/thread_info.h b/arch/tile/include/asm/thread_info.h index 7e1fef36bde6..e9c670d7a7fe 100644 --- a/arch/tile/include/asm/thread_info.h +++ b/arch/tile/include/asm/thread_info.h | |||
| @@ -91,11 +91,6 @@ extern void smp_nap(void); | |||
| 91 | /* Enable interrupts racelessly and nap forever: helper for cpu_idle(). */ | 91 | /* Enable interrupts racelessly and nap forever: helper for cpu_idle(). */ |
| 92 | extern void _cpu_idle(void); | 92 | extern void _cpu_idle(void); |
| 93 | 93 | ||
| 94 | /* Switch boot idle thread to a freshly-allocated stack and free old stack. */ | ||
| 95 | extern void cpu_idle_on_new_stack(struct thread_info *old_ti, | ||
| 96 | unsigned long new_sp, | ||
| 97 | unsigned long new_ss10); | ||
| 98 | |||
| 99 | #else /* __ASSEMBLY__ */ | 94 | #else /* __ASSEMBLY__ */ |
| 100 | 95 | ||
| 101 | /* | 96 | /* |
diff --git a/arch/tile/include/asm/uaccess.h b/arch/tile/include/asm/uaccess.h index c3dd275f25e2..9ab078a4605d 100644 --- a/arch/tile/include/asm/uaccess.h +++ b/arch/tile/include/asm/uaccess.h | |||
| @@ -146,7 +146,7 @@ extern int fixup_exception(struct pt_regs *regs); | |||
| 146 | #ifdef __tilegx__ | 146 | #ifdef __tilegx__ |
| 147 | #define __get_user_1(x, ptr, ret) __get_user_asm(ld1u, x, ptr, ret) | 147 | #define __get_user_1(x, ptr, ret) __get_user_asm(ld1u, x, ptr, ret) |
| 148 | #define __get_user_2(x, ptr, ret) __get_user_asm(ld2u, x, ptr, ret) | 148 | #define __get_user_2(x, ptr, ret) __get_user_asm(ld2u, x, ptr, ret) |
| 149 | #define __get_user_4(x, ptr, ret) __get_user_asm(ld4u, x, ptr, ret) | 149 | #define __get_user_4(x, ptr, ret) __get_user_asm(ld4s, x, ptr, ret) |
| 150 | #define __get_user_8(x, ptr, ret) __get_user_asm(ld, x, ptr, ret) | 150 | #define __get_user_8(x, ptr, ret) __get_user_asm(ld, x, ptr, ret) |
| 151 | #else | 151 | #else |
| 152 | #define __get_user_1(x, ptr, ret) __get_user_asm(lb_u, x, ptr, ret) | 152 | #define __get_user_1(x, ptr, ret) __get_user_asm(lb_u, x, ptr, ret) |
diff --git a/arch/tile/kernel/entry.S b/arch/tile/kernel/entry.S index 133c4b56a99e..c31637baff28 100644 --- a/arch/tile/kernel/entry.S +++ b/arch/tile/kernel/entry.S | |||
| @@ -68,20 +68,6 @@ STD_ENTRY(KBacktraceIterator_init_current) | |||
| 68 | jrp lr /* keep backtracer happy */ | 68 | jrp lr /* keep backtracer happy */ |
| 69 | STD_ENDPROC(KBacktraceIterator_init_current) | 69 | STD_ENDPROC(KBacktraceIterator_init_current) |
| 70 | 70 | ||
| 71 | /* | ||
| 72 | * Reset our stack to r1/r2 (sp and ksp0+cpu respectively), then | ||
| 73 | * free the old stack (passed in r0) and re-invoke cpu_idle(). | ||
| 74 | * We update sp and ksp0 simultaneously to avoid backtracer warnings. | ||
| 75 | */ | ||
| 76 | STD_ENTRY(cpu_idle_on_new_stack) | ||
| 77 | { | ||
| 78 | move sp, r1 | ||
| 79 | mtspr SPR_SYSTEM_SAVE_K_0, r2 | ||
| 80 | } | ||
| 81 | jal free_thread_info | ||
| 82 | j cpu_idle | ||
| 83 | STD_ENDPROC(cpu_idle_on_new_stack) | ||
| 84 | |||
| 85 | /* Loop forever on a nap during SMP boot. */ | 71 | /* Loop forever on a nap during SMP boot. */ |
| 86 | STD_ENTRY(smp_nap) | 72 | STD_ENTRY(smp_nap) |
| 87 | nap | 73 | nap |
diff --git a/arch/tile/kernel/setup.c b/arch/tile/kernel/setup.c index 6098ccc59be2..dd87f3420390 100644 --- a/arch/tile/kernel/setup.c +++ b/arch/tile/kernel/setup.c | |||
| @@ -29,6 +29,7 @@ | |||
| 29 | #include <linux/smp.h> | 29 | #include <linux/smp.h> |
| 30 | #include <linux/timex.h> | 30 | #include <linux/timex.h> |
| 31 | #include <linux/hugetlb.h> | 31 | #include <linux/hugetlb.h> |
| 32 | #include <linux/start_kernel.h> | ||
| 32 | #include <asm/setup.h> | 33 | #include <asm/setup.h> |
| 33 | #include <asm/sections.h> | 34 | #include <asm/sections.h> |
| 34 | #include <asm/cacheflush.h> | 35 | #include <asm/cacheflush.h> |
diff --git a/arch/x86/boot/header.S b/arch/x86/boot/header.S index 8bbea6aa40d9..efe5acfc79c3 100644 --- a/arch/x86/boot/header.S +++ b/arch/x86/boot/header.S | |||
| @@ -94,10 +94,10 @@ bs_die: | |||
| 94 | 94 | ||
| 95 | .section ".bsdata", "a" | 95 | .section ".bsdata", "a" |
| 96 | bugger_off_msg: | 96 | bugger_off_msg: |
| 97 | .ascii "Direct booting from floppy is no longer supported.\r\n" | 97 | .ascii "Direct floppy boot is not supported. " |
| 98 | .ascii "Please use a boot loader program instead.\r\n" | 98 | .ascii "Use a boot loader program instead.\r\n" |
| 99 | .ascii "\n" | 99 | .ascii "\n" |
| 100 | .ascii "Remove disk and press any key to reboot . . .\r\n" | 100 | .ascii "Remove disk and press any key to reboot ...\r\n" |
| 101 | .byte 0 | 101 | .byte 0 |
| 102 | 102 | ||
| 103 | #ifdef CONFIG_EFI_STUB | 103 | #ifdef CONFIG_EFI_STUB |
| @@ -111,7 +111,7 @@ coff_header: | |||
| 111 | #else | 111 | #else |
| 112 | .word 0x8664 # x86-64 | 112 | .word 0x8664 # x86-64 |
| 113 | #endif | 113 | #endif |
| 114 | .word 2 # nr_sections | 114 | .word 3 # nr_sections |
| 115 | .long 0 # TimeDateStamp | 115 | .long 0 # TimeDateStamp |
| 116 | .long 0 # PointerToSymbolTable | 116 | .long 0 # PointerToSymbolTable |
| 117 | .long 1 # NumberOfSymbols | 117 | .long 1 # NumberOfSymbols |
| @@ -158,8 +158,8 @@ extra_header_fields: | |||
| 158 | #else | 158 | #else |
| 159 | .quad 0 # ImageBase | 159 | .quad 0 # ImageBase |
| 160 | #endif | 160 | #endif |
| 161 | .long 0x1000 # SectionAlignment | 161 | .long 0x20 # SectionAlignment |
| 162 | .long 0x200 # FileAlignment | 162 | .long 0x20 # FileAlignment |
| 163 | .word 0 # MajorOperatingSystemVersion | 163 | .word 0 # MajorOperatingSystemVersion |
| 164 | .word 0 # MinorOperatingSystemVersion | 164 | .word 0 # MinorOperatingSystemVersion |
| 165 | .word 0 # MajorImageVersion | 165 | .word 0 # MajorImageVersion |
| @@ -200,8 +200,10 @@ extra_header_fields: | |||
| 200 | 200 | ||
| 201 | # Section table | 201 | # Section table |
| 202 | section_table: | 202 | section_table: |
| 203 | .ascii ".text" | 203 | # |
| 204 | .byte 0 | 204 | # The offset & size fields are filled in by build.c. |
| 205 | # | ||
| 206 | .ascii ".setup" | ||
| 205 | .byte 0 | 207 | .byte 0 |
| 206 | .byte 0 | 208 | .byte 0 |
| 207 | .long 0 | 209 | .long 0 |
| @@ -217,9 +219,8 @@ section_table: | |||
| 217 | 219 | ||
| 218 | # | 220 | # |
| 219 | # The EFI application loader requires a relocation section | 221 | # The EFI application loader requires a relocation section |
| 220 | # because EFI applications must be relocatable. But since | 222 | # because EFI applications must be relocatable. The .reloc |
| 221 | # we don't need the loader to fixup any relocs for us, we | 223 | # offset & size fields are filled in by build.c. |
| 222 | # just create an empty (zero-length) .reloc section header. | ||
| 223 | # | 224 | # |
| 224 | .ascii ".reloc" | 225 | .ascii ".reloc" |
| 225 | .byte 0 | 226 | .byte 0 |
| @@ -233,6 +234,25 @@ section_table: | |||
| 233 | .word 0 # NumberOfRelocations | 234 | .word 0 # NumberOfRelocations |
| 234 | .word 0 # NumberOfLineNumbers | 235 | .word 0 # NumberOfLineNumbers |
| 235 | .long 0x42100040 # Characteristics (section flags) | 236 | .long 0x42100040 # Characteristics (section flags) |
| 237 | |||
| 238 | # | ||
| 239 | # The offset & size fields are filled in by build.c. | ||
| 240 | # | ||
| 241 | .ascii ".text" | ||
| 242 | .byte 0 | ||
| 243 | .byte 0 | ||
| 244 | .byte 0 | ||
| 245 | .long 0 | ||
| 246 | .long 0x0 # startup_{32,64} | ||
| 247 | .long 0 # Size of initialized data | ||
| 248 | # on disk | ||
| 249 | .long 0x0 # startup_{32,64} | ||
| 250 | .long 0 # PointerToRelocations | ||
| 251 | .long 0 # PointerToLineNumbers | ||
| 252 | .word 0 # NumberOfRelocations | ||
| 253 | .word 0 # NumberOfLineNumbers | ||
| 254 | .long 0x60500020 # Characteristics (section flags) | ||
| 255 | |||
| 236 | #endif /* CONFIG_EFI_STUB */ | 256 | #endif /* CONFIG_EFI_STUB */ |
| 237 | 257 | ||
| 238 | # Kernel attributes; used by setup. This is part 1 of the | 258 | # Kernel attributes; used by setup. This is part 1 of the |
diff --git a/arch/x86/boot/tools/build.c b/arch/x86/boot/tools/build.c index 3f61f6e2b46f..4b8e165ee572 100644 --- a/arch/x86/boot/tools/build.c +++ b/arch/x86/boot/tools/build.c | |||
| @@ -50,6 +50,8 @@ typedef unsigned int u32; | |||
| 50 | u8 buf[SETUP_SECT_MAX*512]; | 50 | u8 buf[SETUP_SECT_MAX*512]; |
| 51 | int is_big_kernel; | 51 | int is_big_kernel; |
| 52 | 52 | ||
| 53 | #define PECOFF_RELOC_RESERVE 0x20 | ||
| 54 | |||
| 53 | /*----------------------------------------------------------------------*/ | 55 | /*----------------------------------------------------------------------*/ |
| 54 | 56 | ||
| 55 | static const u32 crctab32[] = { | 57 | static const u32 crctab32[] = { |
| @@ -133,11 +135,103 @@ static void usage(void) | |||
| 133 | die("Usage: build setup system [> image]"); | 135 | die("Usage: build setup system [> image]"); |
| 134 | } | 136 | } |
| 135 | 137 | ||
| 136 | int main(int argc, char ** argv) | ||
| 137 | { | ||
| 138 | #ifdef CONFIG_EFI_STUB | 138 | #ifdef CONFIG_EFI_STUB |
| 139 | unsigned int file_sz, pe_header; | 139 | |
| 140 | static void update_pecoff_section_header(char *section_name, u32 offset, u32 size) | ||
| 141 | { | ||
| 142 | unsigned int pe_header; | ||
| 143 | unsigned short num_sections; | ||
| 144 | u8 *section; | ||
| 145 | |||
| 146 | pe_header = get_unaligned_le32(&buf[0x3c]); | ||
| 147 | num_sections = get_unaligned_le16(&buf[pe_header + 6]); | ||
| 148 | |||
| 149 | #ifdef CONFIG_X86_32 | ||
| 150 | section = &buf[pe_header + 0xa8]; | ||
| 151 | #else | ||
| 152 | section = &buf[pe_header + 0xb8]; | ||
| 140 | #endif | 153 | #endif |
| 154 | |||
| 155 | while (num_sections > 0) { | ||
| 156 | if (strncmp((char*)section, section_name, 8) == 0) { | ||
| 157 | /* section header size field */ | ||
| 158 | put_unaligned_le32(size, section + 0x8); | ||
| 159 | |||
| 160 | /* section header vma field */ | ||
| 161 | put_unaligned_le32(offset, section + 0xc); | ||
| 162 | |||
| 163 | /* section header 'size of initialised data' field */ | ||
| 164 | put_unaligned_le32(size, section + 0x10); | ||
| 165 | |||
| 166 | /* section header 'file offset' field */ | ||
| 167 | put_unaligned_le32(offset, section + 0x14); | ||
| 168 | |||
| 169 | break; | ||
| 170 | } | ||
| 171 | section += 0x28; | ||
| 172 | num_sections--; | ||
| 173 | } | ||
| 174 | } | ||
| 175 | |||
| 176 | static void update_pecoff_setup_and_reloc(unsigned int size) | ||
| 177 | { | ||
| 178 | u32 setup_offset = 0x200; | ||
| 179 | u32 reloc_offset = size - PECOFF_RELOC_RESERVE; | ||
| 180 | u32 setup_size = reloc_offset - setup_offset; | ||
| 181 | |||
| 182 | update_pecoff_section_header(".setup", setup_offset, setup_size); | ||
| 183 | update_pecoff_section_header(".reloc", reloc_offset, PECOFF_RELOC_RESERVE); | ||
| 184 | |||
| 185 | /* | ||
| 186 | * Modify .reloc section contents with a single entry. The | ||
| 187 | * relocation is applied to offset 10 of the relocation section. | ||
| 188 | */ | ||
| 189 | put_unaligned_le32(reloc_offset + 10, &buf[reloc_offset]); | ||
| 190 | put_unaligned_le32(10, &buf[reloc_offset + 4]); | ||
| 191 | } | ||
| 192 | |||
| 193 | static void update_pecoff_text(unsigned int text_start, unsigned int file_sz) | ||
| 194 | { | ||
| 195 | unsigned int pe_header; | ||
| 196 | unsigned int text_sz = file_sz - text_start; | ||
| 197 | |||
| 198 | pe_header = get_unaligned_le32(&buf[0x3c]); | ||
| 199 | |||
| 200 | /* Size of image */ | ||
| 201 | put_unaligned_le32(file_sz, &buf[pe_header + 0x50]); | ||
| 202 | |||
| 203 | /* | ||
| 204 | * Size of code: Subtract the size of the first sector (512 bytes) | ||
| 205 | * which includes the header. | ||
| 206 | */ | ||
| 207 | put_unaligned_le32(file_sz - 512, &buf[pe_header + 0x1c]); | ||
| 208 | |||
| 209 | #ifdef CONFIG_X86_32 | ||
| 210 | /* | ||
| 211 | * Address of entry point. | ||
| 212 | * | ||
| 213 | * The EFI stub entry point is +16 bytes from the start of | ||
| 214 | * the .text section. | ||
| 215 | */ | ||
| 216 | put_unaligned_le32(text_start + 16, &buf[pe_header + 0x28]); | ||
| 217 | #else | ||
| 218 | /* | ||
| 219 | * Address of entry point. startup_32 is at the beginning and | ||
| 220 | * the 64-bit entry point (startup_64) is always 512 bytes | ||
| 221 | * after. The EFI stub entry point is 16 bytes after that, as | ||
| 222 | * the first instruction allows legacy loaders to jump over | ||
| 223 | * the EFI stub initialisation | ||
| 224 | */ | ||
| 225 | put_unaligned_le32(text_start + 528, &buf[pe_header + 0x28]); | ||
| 226 | #endif /* CONFIG_X86_32 */ | ||
| 227 | |||
| 228 | update_pecoff_section_header(".text", text_start, text_sz); | ||
| 229 | } | ||
| 230 | |||
| 231 | #endif /* CONFIG_EFI_STUB */ | ||
| 232 | |||
| 233 | int main(int argc, char ** argv) | ||
| 234 | { | ||
| 141 | unsigned int i, sz, setup_sectors; | 235 | unsigned int i, sz, setup_sectors; |
| 142 | int c; | 236 | int c; |
| 143 | u32 sys_size; | 237 | u32 sys_size; |
| @@ -163,6 +257,12 @@ int main(int argc, char ** argv) | |||
| 163 | die("Boot block hasn't got boot flag (0xAA55)"); | 257 | die("Boot block hasn't got boot flag (0xAA55)"); |
| 164 | fclose(file); | 258 | fclose(file); |
| 165 | 259 | ||
| 260 | #ifdef CONFIG_EFI_STUB | ||
| 261 | /* Reserve 0x20 bytes for .reloc section */ | ||
| 262 | memset(buf+c, 0, PECOFF_RELOC_RESERVE); | ||
| 263 | c += PECOFF_RELOC_RESERVE; | ||
| 264 | #endif | ||
| 265 | |||
| 166 | /* Pad unused space with zeros */ | 266 | /* Pad unused space with zeros */ |
| 167 | setup_sectors = (c + 511) / 512; | 267 | setup_sectors = (c + 511) / 512; |
| 168 | if (setup_sectors < SETUP_SECT_MIN) | 268 | if (setup_sectors < SETUP_SECT_MIN) |
| @@ -170,6 +270,10 @@ int main(int argc, char ** argv) | |||
| 170 | i = setup_sectors*512; | 270 | i = setup_sectors*512; |
| 171 | memset(buf+c, 0, i-c); | 271 | memset(buf+c, 0, i-c); |
| 172 | 272 | ||
| 273 | #ifdef CONFIG_EFI_STUB | ||
| 274 | update_pecoff_setup_and_reloc(i); | ||
| 275 | #endif | ||
| 276 | |||
| 173 | /* Set the default root device */ | 277 | /* Set the default root device */ |
| 174 | put_unaligned_le16(DEFAULT_ROOT_DEV, &buf[508]); | 278 | put_unaligned_le16(DEFAULT_ROOT_DEV, &buf[508]); |
| 175 | 279 | ||
| @@ -194,66 +298,8 @@ int main(int argc, char ** argv) | |||
| 194 | put_unaligned_le32(sys_size, &buf[0x1f4]); | 298 | put_unaligned_le32(sys_size, &buf[0x1f4]); |
| 195 | 299 | ||
| 196 | #ifdef CONFIG_EFI_STUB | 300 | #ifdef CONFIG_EFI_STUB |
| 197 | file_sz = sz + i + ((sys_size * 16) - sz); | 301 | update_pecoff_text(setup_sectors * 512, sz + i + ((sys_size * 16) - sz)); |
| 198 | 302 | #endif | |
| 199 | pe_header = get_unaligned_le32(&buf[0x3c]); | ||
| 200 | |||
| 201 | /* Size of image */ | ||
| 202 | put_unaligned_le32(file_sz, &buf[pe_header + 0x50]); | ||
| 203 | |||
| 204 | /* | ||
| 205 | * Subtract the size of the first section (512 bytes) which | ||
| 206 | * includes the header and .reloc section. The remaining size | ||
| 207 | * is that of the .text section. | ||
| 208 | */ | ||
| 209 | file_sz -= 512; | ||
| 210 | |||
| 211 | /* Size of code */ | ||
| 212 | put_unaligned_le32(file_sz, &buf[pe_header + 0x1c]); | ||
| 213 | |||
| 214 | #ifdef CONFIG_X86_32 | ||
| 215 | /* | ||
| 216 | * Address of entry point. | ||
| 217 | * | ||
| 218 | * The EFI stub entry point is +16 bytes from the start of | ||
| 219 | * the .text section. | ||
| 220 | */ | ||
| 221 | put_unaligned_le32(i + 16, &buf[pe_header + 0x28]); | ||
| 222 | |||
| 223 | /* .text size */ | ||
| 224 | put_unaligned_le32(file_sz, &buf[pe_header + 0xb0]); | ||
| 225 | |||
| 226 | /* .text vma */ | ||
| 227 | put_unaligned_le32(0x200, &buf[pe_header + 0xb4]); | ||
| 228 | |||
| 229 | /* .text size of initialised data */ | ||
| 230 | put_unaligned_le32(file_sz, &buf[pe_header + 0xb8]); | ||
| 231 | |||
| 232 | /* .text file offset */ | ||
| 233 | put_unaligned_le32(0x200, &buf[pe_header + 0xbc]); | ||
| 234 | #else | ||
| 235 | /* | ||
| 236 | * Address of entry point. startup_32 is at the beginning and | ||
| 237 | * the 64-bit entry point (startup_64) is always 512 bytes | ||
| 238 | * after. The EFI stub entry point is 16 bytes after that, as | ||
| 239 | * the first instruction allows legacy loaders to jump over | ||
| 240 | * the EFI stub initialisation | ||
| 241 | */ | ||
| 242 | put_unaligned_le32(i + 528, &buf[pe_header + 0x28]); | ||
| 243 | |||
| 244 | /* .text size */ | ||
| 245 | put_unaligned_le32(file_sz, &buf[pe_header + 0xc0]); | ||
| 246 | |||
| 247 | /* .text vma */ | ||
| 248 | put_unaligned_le32(0x200, &buf[pe_header + 0xc4]); | ||
| 249 | |||
| 250 | /* .text size of initialised data */ | ||
| 251 | put_unaligned_le32(file_sz, &buf[pe_header + 0xc8]); | ||
| 252 | |||
| 253 | /* .text file offset */ | ||
| 254 | put_unaligned_le32(0x200, &buf[pe_header + 0xcc]); | ||
| 255 | #endif /* CONFIG_X86_32 */ | ||
| 256 | #endif /* CONFIG_EFI_STUB */ | ||
| 257 | 303 | ||
| 258 | crc = partial_crc32(buf, i, crc); | 304 | crc = partial_crc32(buf, i, crc); |
| 259 | if (fwrite(buf, 1, i, stdout) != i) | 305 | if (fwrite(buf, 1, i, stdout) != i) |
diff --git a/arch/x86/crypto/aesni-intel_asm.S b/arch/x86/crypto/aesni-intel_asm.S index be6d9e365a80..3470624d7835 100644 --- a/arch/x86/crypto/aesni-intel_asm.S +++ b/arch/x86/crypto/aesni-intel_asm.S | |||
| @@ -2460,10 +2460,12 @@ ENTRY(aesni_cbc_dec) | |||
| 2460 | pxor IN3, STATE4 | 2460 | pxor IN3, STATE4 |
| 2461 | movaps IN4, IV | 2461 | movaps IN4, IV |
| 2462 | #else | 2462 | #else |
| 2463 | pxor (INP), STATE2 | ||
| 2464 | pxor 0x10(INP), STATE3 | ||
| 2465 | pxor IN1, STATE4 | 2463 | pxor IN1, STATE4 |
| 2466 | movaps IN2, IV | 2464 | movaps IN2, IV |
| 2465 | movups (INP), IN1 | ||
| 2466 | pxor IN1, STATE2 | ||
| 2467 | movups 0x10(INP), IN2 | ||
| 2468 | pxor IN2, STATE3 | ||
| 2467 | #endif | 2469 | #endif |
| 2468 | movups STATE1, (OUTP) | 2470 | movups STATE1, (OUTP) |
| 2469 | movups STATE2, 0x10(OUTP) | 2471 | movups STATE2, 0x10(OUTP) |
diff --git a/arch/x86/include/asm/nmi.h b/arch/x86/include/asm/nmi.h index 0e3793b821ef..dc580c42851c 100644 --- a/arch/x86/include/asm/nmi.h +++ b/arch/x86/include/asm/nmi.h | |||
| @@ -54,6 +54,20 @@ struct nmiaction { | |||
| 54 | __register_nmi_handler((t), &fn##_na); \ | 54 | __register_nmi_handler((t), &fn##_na); \ |
| 55 | }) | 55 | }) |
| 56 | 56 | ||
| 57 | /* | ||
| 58 | * For special handlers that register/unregister in the | ||
| 59 | * init section only. This should be considered rare. | ||
| 60 | */ | ||
| 61 | #define register_nmi_handler_initonly(t, fn, fg, n) \ | ||
| 62 | ({ \ | ||
| 63 | static struct nmiaction fn##_na __initdata = { \ | ||
| 64 | .handler = (fn), \ | ||
| 65 | .name = (n), \ | ||
| 66 | .flags = (fg), \ | ||
| 67 | }; \ | ||
| 68 | __register_nmi_handler((t), &fn##_na); \ | ||
| 69 | }) | ||
| 70 | |||
| 57 | int __register_nmi_handler(unsigned int, struct nmiaction *); | 71 | int __register_nmi_handler(unsigned int, struct nmiaction *); |
| 58 | 72 | ||
| 59 | void unregister_nmi_handler(unsigned int, const char *); | 73 | void unregister_nmi_handler(unsigned int, const char *); |
diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h index 04cd6882308e..e1f3a17034fc 100644 --- a/arch/x86/include/asm/uaccess.h +++ b/arch/x86/include/asm/uaccess.h | |||
| @@ -33,9 +33,8 @@ | |||
| 33 | #define segment_eq(a, b) ((a).seg == (b).seg) | 33 | #define segment_eq(a, b) ((a).seg == (b).seg) |
| 34 | 34 | ||
| 35 | #define user_addr_max() (current_thread_info()->addr_limit.seg) | 35 | #define user_addr_max() (current_thread_info()->addr_limit.seg) |
| 36 | #define __addr_ok(addr) \ | 36 | #define __addr_ok(addr) \ |
| 37 | ((unsigned long __force)(addr) < \ | 37 | ((unsigned long __force)(addr) < user_addr_max()) |
| 38 | (current_thread_info()->addr_limit.seg)) | ||
| 39 | 38 | ||
| 40 | /* | 39 | /* |
| 41 | * Test whether a block of memory is a valid user space address. | 40 | * Test whether a block of memory is a valid user space address. |
| @@ -47,14 +46,14 @@ | |||
| 47 | * This needs 33-bit (65-bit for x86_64) arithmetic. We have a carry... | 46 | * This needs 33-bit (65-bit for x86_64) arithmetic. We have a carry... |
| 48 | */ | 47 | */ |
| 49 | 48 | ||
| 50 | #define __range_not_ok(addr, size) \ | 49 | #define __range_not_ok(addr, size, limit) \ |
| 51 | ({ \ | 50 | ({ \ |
| 52 | unsigned long flag, roksum; \ | 51 | unsigned long flag, roksum; \ |
| 53 | __chk_user_ptr(addr); \ | 52 | __chk_user_ptr(addr); \ |
| 54 | asm("add %3,%1 ; sbb %0,%0 ; cmp %1,%4 ; sbb $0,%0" \ | 53 | asm("add %3,%1 ; sbb %0,%0 ; cmp %1,%4 ; sbb $0,%0" \ |
| 55 | : "=&r" (flag), "=r" (roksum) \ | 54 | : "=&r" (flag), "=r" (roksum) \ |
| 56 | : "1" (addr), "g" ((long)(size)), \ | 55 | : "1" (addr), "g" ((long)(size)), \ |
| 57 | "rm" (current_thread_info()->addr_limit.seg)); \ | 56 | "rm" (limit)); \ |
| 58 | flag; \ | 57 | flag; \ |
| 59 | }) | 58 | }) |
| 60 | 59 | ||
| @@ -77,7 +76,8 @@ | |||
| 77 | * checks that the pointer is in the user space range - after calling | 76 | * checks that the pointer is in the user space range - after calling |
| 78 | * this function, memory access functions may still return -EFAULT. | 77 | * this function, memory access functions may still return -EFAULT. |
| 79 | */ | 78 | */ |
| 80 | #define access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0)) | 79 | #define access_ok(type, addr, size) \ |
| 80 | (likely(__range_not_ok(addr, size, user_addr_max()) == 0)) | ||
| 81 | 81 | ||
| 82 | /* | 82 | /* |
| 83 | * The exception table consists of pairs of addresses relative to the | 83 | * The exception table consists of pairs of addresses relative to the |
diff --git a/arch/x86/include/asm/uv/uv_bau.h b/arch/x86/include/asm/uv/uv_bau.h index becf47b81735..6149b476d9df 100644 --- a/arch/x86/include/asm/uv/uv_bau.h +++ b/arch/x86/include/asm/uv/uv_bau.h | |||
| @@ -149,7 +149,6 @@ | |||
| 149 | /* 4 bits of software ack period */ | 149 | /* 4 bits of software ack period */ |
| 150 | #define UV2_ACK_MASK 0x7UL | 150 | #define UV2_ACK_MASK 0x7UL |
| 151 | #define UV2_ACK_UNITS_SHFT 3 | 151 | #define UV2_ACK_UNITS_SHFT 3 |
| 152 | #define UV2_LEG_SHFT UV2H_LB_BAU_MISC_CONTROL_USE_LEGACY_DESCRIPTOR_FORMATS_SHFT | ||
| 153 | #define UV2_EXT_SHFT UV2H_LB_BAU_MISC_CONTROL_ENABLE_EXTENDED_SB_STATUS_SHFT | 152 | #define UV2_EXT_SHFT UV2H_LB_BAU_MISC_CONTROL_ENABLE_EXTENDED_SB_STATUS_SHFT |
| 154 | 153 | ||
| 155 | /* | 154 | /* |
diff --git a/arch/x86/kernel/aperture_64.c b/arch/x86/kernel/aperture_64.c index 6e76c191a835..d5fd66f0d4cd 100644 --- a/arch/x86/kernel/aperture_64.c +++ b/arch/x86/kernel/aperture_64.c | |||
| @@ -20,7 +20,6 @@ | |||
| 20 | #include <linux/bitops.h> | 20 | #include <linux/bitops.h> |
| 21 | #include <linux/ioport.h> | 21 | #include <linux/ioport.h> |
| 22 | #include <linux/suspend.h> | 22 | #include <linux/suspend.h> |
| 23 | #include <linux/kmemleak.h> | ||
| 24 | #include <asm/e820.h> | 23 | #include <asm/e820.h> |
| 25 | #include <asm/io.h> | 24 | #include <asm/io.h> |
| 26 | #include <asm/iommu.h> | 25 | #include <asm/iommu.h> |
| @@ -95,11 +94,6 @@ static u32 __init allocate_aperture(void) | |||
| 95 | return 0; | 94 | return 0; |
| 96 | } | 95 | } |
| 97 | memblock_reserve(addr, aper_size); | 96 | memblock_reserve(addr, aper_size); |
| 98 | /* | ||
| 99 | * Kmemleak should not scan this block as it may not be mapped via the | ||
| 100 | * kernel direct mapping. | ||
| 101 | */ | ||
| 102 | kmemleak_ignore(phys_to_virt(addr)); | ||
| 103 | printk(KERN_INFO "Mapping aperture over %d KB of RAM @ %lx\n", | 97 | printk(KERN_INFO "Mapping aperture over %d KB of RAM @ %lx\n", |
| 104 | aper_size >> 10, addr); | 98 | aper_size >> 10, addr); |
| 105 | insert_aperture_resource((u32)addr, aper_size); | 99 | insert_aperture_resource((u32)addr, aper_size); |
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c index ac96561d1a99..5f0ff597437c 100644 --- a/arch/x86/kernel/apic/io_apic.c +++ b/arch/x86/kernel/apic/io_apic.c | |||
| @@ -1195,7 +1195,7 @@ static void __clear_irq_vector(int irq, struct irq_cfg *cfg) | |||
| 1195 | BUG_ON(!cfg->vector); | 1195 | BUG_ON(!cfg->vector); |
| 1196 | 1196 | ||
| 1197 | vector = cfg->vector; | 1197 | vector = cfg->vector; |
| 1198 | for_each_cpu_and(cpu, cfg->domain, cpu_online_mask) | 1198 | for_each_cpu(cpu, cfg->domain) |
| 1199 | per_cpu(vector_irq, cpu)[vector] = -1; | 1199 | per_cpu(vector_irq, cpu)[vector] = -1; |
| 1200 | 1200 | ||
| 1201 | cfg->vector = 0; | 1201 | cfg->vector = 0; |
| @@ -1203,7 +1203,7 @@ static void __clear_irq_vector(int irq, struct irq_cfg *cfg) | |||
| 1203 | 1203 | ||
| 1204 | if (likely(!cfg->move_in_progress)) | 1204 | if (likely(!cfg->move_in_progress)) |
| 1205 | return; | 1205 | return; |
| 1206 | for_each_cpu_and(cpu, cfg->old_domain, cpu_online_mask) { | 1206 | for_each_cpu(cpu, cfg->old_domain) { |
| 1207 | for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; | 1207 | for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; |
| 1208 | vector++) { | 1208 | vector++) { |
| 1209 | if (per_cpu(vector_irq, cpu)[vector] != irq) | 1209 | if (per_cpu(vector_irq, cpu)[vector] != irq) |
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c index 0a687fd185e6..da27c5d2168a 100644 --- a/arch/x86/kernel/cpu/mcheck/mce.c +++ b/arch/x86/kernel/cpu/mcheck/mce.c | |||
| @@ -1274,7 +1274,7 @@ static void mce_timer_fn(unsigned long data) | |||
| 1274 | */ | 1274 | */ |
| 1275 | iv = __this_cpu_read(mce_next_interval); | 1275 | iv = __this_cpu_read(mce_next_interval); |
| 1276 | if (mce_notify_irq()) | 1276 | if (mce_notify_irq()) |
| 1277 | iv = max(iv, (unsigned long) HZ/100); | 1277 | iv = max(iv / 2, (unsigned long) HZ/100); |
| 1278 | else | 1278 | else |
| 1279 | iv = min(iv * 2, round_jiffies_relative(check_interval * HZ)); | 1279 | iv = min(iv * 2, round_jiffies_relative(check_interval * HZ)); |
| 1280 | __this_cpu_write(mce_next_interval, iv); | 1280 | __this_cpu_write(mce_next_interval, iv); |
| @@ -1557,7 +1557,7 @@ static void __mcheck_cpu_init_vendor(struct cpuinfo_x86 *c) | |||
| 1557 | static void __mcheck_cpu_init_timer(void) | 1557 | static void __mcheck_cpu_init_timer(void) |
| 1558 | { | 1558 | { |
| 1559 | struct timer_list *t = &__get_cpu_var(mce_timer); | 1559 | struct timer_list *t = &__get_cpu_var(mce_timer); |
| 1560 | unsigned long iv = __this_cpu_read(mce_next_interval); | 1560 | unsigned long iv = check_interval * HZ; |
| 1561 | 1561 | ||
| 1562 | setup_timer(t, mce_timer_fn, smp_processor_id()); | 1562 | setup_timer(t, mce_timer_fn, smp_processor_id()); |
| 1563 | 1563 | ||
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c index e049d6da0183..c4706cf9c011 100644 --- a/arch/x86/kernel/cpu/perf_event.c +++ b/arch/x86/kernel/cpu/perf_event.c | |||
| @@ -1496,6 +1496,7 @@ static struct cpu_hw_events *allocate_fake_cpuc(void) | |||
| 1496 | if (!cpuc->shared_regs) | 1496 | if (!cpuc->shared_regs) |
| 1497 | goto error; | 1497 | goto error; |
| 1498 | } | 1498 | } |
| 1499 | cpuc->is_fake = 1; | ||
| 1499 | return cpuc; | 1500 | return cpuc; |
| 1500 | error: | 1501 | error: |
| 1501 | free_fake_cpuc(cpuc); | 1502 | free_fake_cpuc(cpuc); |
| @@ -1756,6 +1757,12 @@ perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs) | |||
| 1756 | dump_trace(NULL, regs, NULL, 0, &backtrace_ops, entry); | 1757 | dump_trace(NULL, regs, NULL, 0, &backtrace_ops, entry); |
| 1757 | } | 1758 | } |
| 1758 | 1759 | ||
| 1760 | static inline int | ||
| 1761 | valid_user_frame(const void __user *fp, unsigned long size) | ||
| 1762 | { | ||
| 1763 | return (__range_not_ok(fp, size, TASK_SIZE) == 0); | ||
| 1764 | } | ||
| 1765 | |||
| 1759 | #ifdef CONFIG_COMPAT | 1766 | #ifdef CONFIG_COMPAT |
| 1760 | 1767 | ||
| 1761 | #include <asm/compat.h> | 1768 | #include <asm/compat.h> |
| @@ -1780,7 +1787,7 @@ perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry *entry) | |||
| 1780 | if (bytes != sizeof(frame)) | 1787 | if (bytes != sizeof(frame)) |
| 1781 | break; | 1788 | break; |
| 1782 | 1789 | ||
| 1783 | if (fp < compat_ptr(regs->sp)) | 1790 | if (!valid_user_frame(fp, sizeof(frame))) |
| 1784 | break; | 1791 | break; |
| 1785 | 1792 | ||
| 1786 | perf_callchain_store(entry, frame.return_address); | 1793 | perf_callchain_store(entry, frame.return_address); |
| @@ -1826,7 +1833,7 @@ perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs) | |||
| 1826 | if (bytes != sizeof(frame)) | 1833 | if (bytes != sizeof(frame)) |
| 1827 | break; | 1834 | break; |
| 1828 | 1835 | ||
| 1829 | if ((unsigned long)fp < regs->sp) | 1836 | if (!valid_user_frame(fp, sizeof(frame))) |
| 1830 | break; | 1837 | break; |
| 1831 | 1838 | ||
| 1832 | perf_callchain_store(entry, frame.return_address); | 1839 | perf_callchain_store(entry, frame.return_address); |
diff --git a/arch/x86/kernel/cpu/perf_event.h b/arch/x86/kernel/cpu/perf_event.h index 6638aaf54493..7241e2fc3c17 100644 --- a/arch/x86/kernel/cpu/perf_event.h +++ b/arch/x86/kernel/cpu/perf_event.h | |||
| @@ -117,6 +117,7 @@ struct cpu_hw_events { | |||
| 117 | struct perf_event *event_list[X86_PMC_IDX_MAX]; /* in enabled order */ | 117 | struct perf_event *event_list[X86_PMC_IDX_MAX]; /* in enabled order */ |
| 118 | 118 | ||
| 119 | unsigned int group_flag; | 119 | unsigned int group_flag; |
| 120 | int is_fake; | ||
| 120 | 121 | ||
| 121 | /* | 122 | /* |
| 122 | * Intel DebugStore bits | 123 | * Intel DebugStore bits |
| @@ -364,6 +365,7 @@ struct x86_pmu { | |||
| 364 | int pebs_record_size; | 365 | int pebs_record_size; |
| 365 | void (*drain_pebs)(struct pt_regs *regs); | 366 | void (*drain_pebs)(struct pt_regs *regs); |
| 366 | struct event_constraint *pebs_constraints; | 367 | struct event_constraint *pebs_constraints; |
| 368 | void (*pebs_aliases)(struct perf_event *event); | ||
| 367 | 369 | ||
| 368 | /* | 370 | /* |
| 369 | * Intel LBR | 371 | * Intel LBR |
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c index 166546ec6aef..187c294bc658 100644 --- a/arch/x86/kernel/cpu/perf_event_intel.c +++ b/arch/x86/kernel/cpu/perf_event_intel.c | |||
| @@ -1119,27 +1119,33 @@ intel_bts_constraints(struct perf_event *event) | |||
| 1119 | return NULL; | 1119 | return NULL; |
| 1120 | } | 1120 | } |
| 1121 | 1121 | ||
| 1122 | static bool intel_try_alt_er(struct perf_event *event, int orig_idx) | 1122 | static int intel_alt_er(int idx) |
| 1123 | { | 1123 | { |
| 1124 | if (!(x86_pmu.er_flags & ERF_HAS_RSP_1)) | 1124 | if (!(x86_pmu.er_flags & ERF_HAS_RSP_1)) |
| 1125 | return false; | 1125 | return idx; |
| 1126 | 1126 | ||
| 1127 | if (event->hw.extra_reg.idx == EXTRA_REG_RSP_0) { | 1127 | if (idx == EXTRA_REG_RSP_0) |
| 1128 | event->hw.config &= ~INTEL_ARCH_EVENT_MASK; | 1128 | return EXTRA_REG_RSP_1; |
| 1129 | event->hw.config |= 0x01bb; | 1129 | |
| 1130 | event->hw.extra_reg.idx = EXTRA_REG_RSP_1; | 1130 | if (idx == EXTRA_REG_RSP_1) |
| 1131 | event->hw.extra_reg.reg = MSR_OFFCORE_RSP_1; | 1131 | return EXTRA_REG_RSP_0; |
| 1132 | } else if (event->hw.extra_reg.idx == EXTRA_REG_RSP_1) { | 1132 | |
| 1133 | return idx; | ||
| 1134 | } | ||
| 1135 | |||
| 1136 | static void intel_fixup_er(struct perf_event *event, int idx) | ||
| 1137 | { | ||
| 1138 | event->hw.extra_reg.idx = idx; | ||
| 1139 | |||
| 1140 | if (idx == EXTRA_REG_RSP_0) { | ||
| 1133 | event->hw.config &= ~INTEL_ARCH_EVENT_MASK; | 1141 | event->hw.config &= ~INTEL_ARCH_EVENT_MASK; |
| 1134 | event->hw.config |= 0x01b7; | 1142 | event->hw.config |= 0x01b7; |
| 1135 | event->hw.extra_reg.idx = EXTRA_REG_RSP_0; | ||
| 1136 | event->hw.extra_reg.reg = MSR_OFFCORE_RSP_0; | 1143 | event->hw.extra_reg.reg = MSR_OFFCORE_RSP_0; |
| 1144 | } else if (idx == EXTRA_REG_RSP_1) { | ||
| 1145 | event->hw.config &= ~INTEL_ARCH_EVENT_MASK; | ||
| 1146 | event->hw.config |= 0x01bb; | ||
| 1147 | event->hw.extra_reg.reg = MSR_OFFCORE_RSP_1; | ||
| 1137 | } | 1148 | } |
| 1138 | |||
| 1139 | if (event->hw.extra_reg.idx == orig_idx) | ||
| 1140 | return false; | ||
| 1141 | |||
| 1142 | return true; | ||
| 1143 | } | 1149 | } |
| 1144 | 1150 | ||
| 1145 | /* | 1151 | /* |
| @@ -1157,14 +1163,18 @@ __intel_shared_reg_get_constraints(struct cpu_hw_events *cpuc, | |||
| 1157 | struct event_constraint *c = &emptyconstraint; | 1163 | struct event_constraint *c = &emptyconstraint; |
| 1158 | struct er_account *era; | 1164 | struct er_account *era; |
| 1159 | unsigned long flags; | 1165 | unsigned long flags; |
| 1160 | int orig_idx = reg->idx; | 1166 | int idx = reg->idx; |
| 1161 | 1167 | ||
| 1162 | /* already allocated shared msr */ | 1168 | /* |
| 1163 | if (reg->alloc) | 1169 | * reg->alloc can be set due to existing state, so for fake cpuc we |
| 1170 | * need to ignore this, otherwise we might fail to allocate proper fake | ||
| 1171 | * state for this extra reg constraint. Also see the comment below. | ||
| 1172 | */ | ||
| 1173 | if (reg->alloc && !cpuc->is_fake) | ||
| 1164 | return NULL; /* call x86_get_event_constraint() */ | 1174 | return NULL; /* call x86_get_event_constraint() */ |
| 1165 | 1175 | ||
| 1166 | again: | 1176 | again: |
| 1167 | era = &cpuc->shared_regs->regs[reg->idx]; | 1177 | era = &cpuc->shared_regs->regs[idx]; |
| 1168 | /* | 1178 | /* |
| 1169 | * we use spin_lock_irqsave() to avoid lockdep issues when | 1179 | * we use spin_lock_irqsave() to avoid lockdep issues when |
| 1170 | * passing a fake cpuc | 1180 | * passing a fake cpuc |
| @@ -1173,6 +1183,29 @@ again: | |||
| 1173 | 1183 | ||
| 1174 | if (!atomic_read(&era->ref) || era->config == reg->config) { | 1184 | if (!atomic_read(&era->ref) || era->config == reg->config) { |
| 1175 | 1185 | ||
| 1186 | /* | ||
| 1187 | * If its a fake cpuc -- as per validate_{group,event}() we | ||
| 1188 | * shouldn't touch event state and we can avoid doing so | ||
| 1189 | * since both will only call get_event_constraints() once | ||
| 1190 | * on each event, this avoids the need for reg->alloc. | ||
| 1191 | * | ||
| 1192 | * Not doing the ER fixup will only result in era->reg being | ||
| 1193 | * wrong, but since we won't actually try and program hardware | ||
| 1194 | * this isn't a problem either. | ||
| 1195 | */ | ||
| 1196 | if (!cpuc->is_fake) { | ||
| 1197 | if (idx != reg->idx) | ||
| 1198 | intel_fixup_er(event, idx); | ||
| 1199 | |||
| 1200 | /* | ||
| 1201 | * x86_schedule_events() can call get_event_constraints() | ||
| 1202 | * multiple times on events in the case of incremental | ||
| 1203 | * scheduling(). reg->alloc ensures we only do the ER | ||
| 1204 | * allocation once. | ||
| 1205 | */ | ||
| 1206 | reg->alloc = 1; | ||
| 1207 | } | ||
| 1208 | |||
| 1176 | /* lock in msr value */ | 1209 | /* lock in msr value */ |
| 1177 | era->config = reg->config; | 1210 | era->config = reg->config; |
| 1178 | era->reg = reg->reg; | 1211 | era->reg = reg->reg; |
| @@ -1180,17 +1213,17 @@ again: | |||
| 1180 | /* one more user */ | 1213 | /* one more user */ |
| 1181 | atomic_inc(&era->ref); | 1214 | atomic_inc(&era->ref); |
| 1182 | 1215 | ||
| 1183 | /* no need to reallocate during incremental event scheduling */ | ||
| 1184 | reg->alloc = 1; | ||
| 1185 | |||
| 1186 | /* | 1216 | /* |
| 1187 | * need to call x86_get_event_constraint() | 1217 | * need to call x86_get_event_constraint() |
| 1188 | * to check if associated event has constraints | 1218 | * to check if associated event has constraints |
| 1189 | */ | 1219 | */ |
| 1190 | c = NULL; | 1220 | c = NULL; |
| 1191 | } else if (intel_try_alt_er(event, orig_idx)) { | 1221 | } else { |
| 1192 | raw_spin_unlock_irqrestore(&era->lock, flags); | 1222 | idx = intel_alt_er(idx); |
| 1193 | goto again; | 1223 | if (idx != reg->idx) { |
| 1224 | raw_spin_unlock_irqrestore(&era->lock, flags); | ||
| 1225 | goto again; | ||
| 1226 | } | ||
| 1194 | } | 1227 | } |
| 1195 | raw_spin_unlock_irqrestore(&era->lock, flags); | 1228 | raw_spin_unlock_irqrestore(&era->lock, flags); |
| 1196 | 1229 | ||
| @@ -1204,11 +1237,14 @@ __intel_shared_reg_put_constraints(struct cpu_hw_events *cpuc, | |||
| 1204 | struct er_account *era; | 1237 | struct er_account *era; |
| 1205 | 1238 | ||
| 1206 | /* | 1239 | /* |
| 1207 | * only put constraint if extra reg was actually | 1240 | * Only put constraint if extra reg was actually allocated. Also takes |
| 1208 | * allocated. Also takes care of event which do | 1241 | * care of event which do not use an extra shared reg. |
| 1209 | * not use an extra shared reg | 1242 | * |
| 1243 | * Also, if this is a fake cpuc we shouldn't touch any event state | ||
| 1244 | * (reg->alloc) and we don't care about leaving inconsistent cpuc state | ||
| 1245 | * either since it'll be thrown out. | ||
| 1210 | */ | 1246 | */ |
| 1211 | if (!reg->alloc) | 1247 | if (!reg->alloc || cpuc->is_fake) |
| 1212 | return; | 1248 | return; |
| 1213 | 1249 | ||
| 1214 | era = &cpuc->shared_regs->regs[reg->idx]; | 1250 | era = &cpuc->shared_regs->regs[reg->idx]; |
| @@ -1300,15 +1336,9 @@ static void intel_put_event_constraints(struct cpu_hw_events *cpuc, | |||
| 1300 | intel_put_shared_regs_event_constraints(cpuc, event); | 1336 | intel_put_shared_regs_event_constraints(cpuc, event); |
| 1301 | } | 1337 | } |
| 1302 | 1338 | ||
| 1303 | static int intel_pmu_hw_config(struct perf_event *event) | 1339 | static void intel_pebs_aliases_core2(struct perf_event *event) |
| 1304 | { | 1340 | { |
| 1305 | int ret = x86_pmu_hw_config(event); | 1341 | if ((event->hw.config & X86_RAW_EVENT_MASK) == 0x003c) { |
| 1306 | |||
| 1307 | if (ret) | ||
| 1308 | return ret; | ||
| 1309 | |||
| 1310 | if (event->attr.precise_ip && | ||
| 1311 | (event->hw.config & X86_RAW_EVENT_MASK) == 0x003c) { | ||
| 1312 | /* | 1342 | /* |
| 1313 | * Use an alternative encoding for CPU_CLK_UNHALTED.THREAD_P | 1343 | * Use an alternative encoding for CPU_CLK_UNHALTED.THREAD_P |
| 1314 | * (0x003c) so that we can use it with PEBS. | 1344 | * (0x003c) so that we can use it with PEBS. |
| @@ -1329,10 +1359,48 @@ static int intel_pmu_hw_config(struct perf_event *event) | |||
| 1329 | */ | 1359 | */ |
| 1330 | u64 alt_config = X86_CONFIG(.event=0xc0, .inv=1, .cmask=16); | 1360 | u64 alt_config = X86_CONFIG(.event=0xc0, .inv=1, .cmask=16); |
| 1331 | 1361 | ||
| 1362 | alt_config |= (event->hw.config & ~X86_RAW_EVENT_MASK); | ||
| 1363 | event->hw.config = alt_config; | ||
| 1364 | } | ||
| 1365 | } | ||
| 1366 | |||
| 1367 | static void intel_pebs_aliases_snb(struct perf_event *event) | ||
| 1368 | { | ||
| 1369 | if ((event->hw.config & X86_RAW_EVENT_MASK) == 0x003c) { | ||
| 1370 | /* | ||
| 1371 | * Use an alternative encoding for CPU_CLK_UNHALTED.THREAD_P | ||
| 1372 | * (0x003c) so that we can use it with PEBS. | ||
| 1373 | * | ||
| 1374 | * The regular CPU_CLK_UNHALTED.THREAD_P event (0x003c) isn't | ||
| 1375 | * PEBS capable. However we can use UOPS_RETIRED.ALL | ||
| 1376 | * (0x01c2), which is a PEBS capable event, to get the same | ||
| 1377 | * count. | ||
| 1378 | * | ||
| 1379 | * UOPS_RETIRED.ALL counts the number of cycles that retires | ||
| 1380 | * CNTMASK micro-ops. By setting CNTMASK to a value (16) | ||
| 1381 | * larger than the maximum number of micro-ops that can be | ||
| 1382 | * retired per cycle (4) and then inverting the condition, we | ||
| 1383 | * count all cycles that retire 16 or less micro-ops, which | ||
| 1384 | * is every cycle. | ||
| 1385 | * | ||
| 1386 | * Thereby we gain a PEBS capable cycle counter. | ||
| 1387 | */ | ||
| 1388 | u64 alt_config = X86_CONFIG(.event=0xc2, .umask=0x01, .inv=1, .cmask=16); | ||
| 1332 | 1389 | ||
| 1333 | alt_config |= (event->hw.config & ~X86_RAW_EVENT_MASK); | 1390 | alt_config |= (event->hw.config & ~X86_RAW_EVENT_MASK); |
| 1334 | event->hw.config = alt_config; | 1391 | event->hw.config = alt_config; |
| 1335 | } | 1392 | } |
| 1393 | } | ||
| 1394 | |||
| 1395 | static int intel_pmu_hw_config(struct perf_event *event) | ||
| 1396 | { | ||
| 1397 | int ret = x86_pmu_hw_config(event); | ||
| 1398 | |||
| 1399 | if (ret) | ||
| 1400 | return ret; | ||
| 1401 | |||
| 1402 | if (event->attr.precise_ip && x86_pmu.pebs_aliases) | ||
| 1403 | x86_pmu.pebs_aliases(event); | ||
| 1336 | 1404 | ||
| 1337 | if (intel_pmu_needs_lbr_smpl(event)) { | 1405 | if (intel_pmu_needs_lbr_smpl(event)) { |
| 1338 | ret = intel_pmu_setup_lbr_filter(event); | 1406 | ret = intel_pmu_setup_lbr_filter(event); |
| @@ -1607,6 +1675,7 @@ static __initconst const struct x86_pmu intel_pmu = { | |||
| 1607 | .max_period = (1ULL << 31) - 1, | 1675 | .max_period = (1ULL << 31) - 1, |
| 1608 | .get_event_constraints = intel_get_event_constraints, | 1676 | .get_event_constraints = intel_get_event_constraints, |
| 1609 | .put_event_constraints = intel_put_event_constraints, | 1677 | .put_event_constraints = intel_put_event_constraints, |
| 1678 | .pebs_aliases = intel_pebs_aliases_core2, | ||
| 1610 | 1679 | ||
| 1611 | .format_attrs = intel_arch3_formats_attr, | 1680 | .format_attrs = intel_arch3_formats_attr, |
| 1612 | 1681 | ||
| @@ -1840,8 +1909,9 @@ __init int intel_pmu_init(void) | |||
| 1840 | break; | 1909 | break; |
| 1841 | 1910 | ||
| 1842 | case 42: /* SandyBridge */ | 1911 | case 42: /* SandyBridge */ |
| 1843 | x86_add_quirk(intel_sandybridge_quirk); | ||
| 1844 | case 45: /* SandyBridge, "Romely-EP" */ | 1912 | case 45: /* SandyBridge, "Romely-EP" */ |
| 1913 | x86_add_quirk(intel_sandybridge_quirk); | ||
| 1914 | case 58: /* IvyBridge */ | ||
| 1845 | memcpy(hw_cache_event_ids, snb_hw_cache_event_ids, | 1915 | memcpy(hw_cache_event_ids, snb_hw_cache_event_ids, |
| 1846 | sizeof(hw_cache_event_ids)); | 1916 | sizeof(hw_cache_event_ids)); |
| 1847 | 1917 | ||
| @@ -1849,6 +1919,7 @@ __init int intel_pmu_init(void) | |||
| 1849 | 1919 | ||
| 1850 | x86_pmu.event_constraints = intel_snb_event_constraints; | 1920 | x86_pmu.event_constraints = intel_snb_event_constraints; |
| 1851 | x86_pmu.pebs_constraints = intel_snb_pebs_event_constraints; | 1921 | x86_pmu.pebs_constraints = intel_snb_pebs_event_constraints; |
| 1922 | x86_pmu.pebs_aliases = intel_pebs_aliases_snb; | ||
| 1852 | x86_pmu.extra_regs = intel_snb_extra_regs; | 1923 | x86_pmu.extra_regs = intel_snb_extra_regs; |
| 1853 | /* all extra regs are per-cpu when HT is on */ | 1924 | /* all extra regs are per-cpu when HT is on */ |
| 1854 | x86_pmu.er_flags |= ERF_HAS_RSP_1; | 1925 | x86_pmu.er_flags |= ERF_HAS_RSP_1; |
diff --git a/arch/x86/kernel/cpu/perf_event_intel_ds.c b/arch/x86/kernel/cpu/perf_event_intel_ds.c index 5a3edc27f6e5..35e2192df9f4 100644 --- a/arch/x86/kernel/cpu/perf_event_intel_ds.c +++ b/arch/x86/kernel/cpu/perf_event_intel_ds.c | |||
| @@ -400,14 +400,7 @@ struct event_constraint intel_snb_pebs_event_constraints[] = { | |||
| 400 | INTEL_EVENT_CONSTRAINT(0xc4, 0xf), /* BR_INST_RETIRED.* */ | 400 | INTEL_EVENT_CONSTRAINT(0xc4, 0xf), /* BR_INST_RETIRED.* */ |
| 401 | INTEL_EVENT_CONSTRAINT(0xc5, 0xf), /* BR_MISP_RETIRED.* */ | 401 | INTEL_EVENT_CONSTRAINT(0xc5, 0xf), /* BR_MISP_RETIRED.* */ |
| 402 | INTEL_EVENT_CONSTRAINT(0xcd, 0x8), /* MEM_TRANS_RETIRED.* */ | 402 | INTEL_EVENT_CONSTRAINT(0xcd, 0x8), /* MEM_TRANS_RETIRED.* */ |
| 403 | INTEL_UEVENT_CONSTRAINT(0x11d0, 0xf), /* MEM_UOP_RETIRED.STLB_MISS_LOADS */ | 403 | INTEL_EVENT_CONSTRAINT(0xd0, 0xf), /* MEM_UOP_RETIRED.* */ |
| 404 | INTEL_UEVENT_CONSTRAINT(0x12d0, 0xf), /* MEM_UOP_RETIRED.STLB_MISS_STORES */ | ||
| 405 | INTEL_UEVENT_CONSTRAINT(0x21d0, 0xf), /* MEM_UOP_RETIRED.LOCK_LOADS */ | ||
| 406 | INTEL_UEVENT_CONSTRAINT(0x22d0, 0xf), /* MEM_UOP_RETIRED.LOCK_STORES */ | ||
| 407 | INTEL_UEVENT_CONSTRAINT(0x41d0, 0xf), /* MEM_UOP_RETIRED.SPLIT_LOADS */ | ||
| 408 | INTEL_UEVENT_CONSTRAINT(0x42d0, 0xf), /* MEM_UOP_RETIRED.SPLIT_STORES */ | ||
| 409 | INTEL_UEVENT_CONSTRAINT(0x81d0, 0xf), /* MEM_UOP_RETIRED.ANY_LOADS */ | ||
| 410 | INTEL_UEVENT_CONSTRAINT(0x82d0, 0xf), /* MEM_UOP_RETIRED.ANY_STORES */ | ||
| 411 | INTEL_EVENT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */ | 404 | INTEL_EVENT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */ |
| 412 | INTEL_EVENT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */ | 405 | INTEL_EVENT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */ |
| 413 | INTEL_UEVENT_CONSTRAINT(0x02d4, 0xf), /* MEM_LOAD_UOPS_MISC_RETIRED.LLC_MISS */ | 406 | INTEL_UEVENT_CONSTRAINT(0x02d4, 0xf), /* MEM_LOAD_UOPS_MISC_RETIRED.LLC_MISS */ |
diff --git a/arch/x86/kernel/kvmclock.c b/arch/x86/kernel/kvmclock.c index 086eb58c6e80..f1b42b3a186c 100644 --- a/arch/x86/kernel/kvmclock.c +++ b/arch/x86/kernel/kvmclock.c | |||
| @@ -120,11 +120,6 @@ bool kvm_check_and_clear_guest_paused(void) | |||
| 120 | bool ret = false; | 120 | bool ret = false; |
| 121 | struct pvclock_vcpu_time_info *src; | 121 | struct pvclock_vcpu_time_info *src; |
| 122 | 122 | ||
| 123 | /* | ||
| 124 | * per_cpu() is safe here because this function is only called from | ||
| 125 | * timer functions where preemption is already disabled. | ||
| 126 | */ | ||
| 127 | WARN_ON(!in_atomic()); | ||
| 128 | src = &__get_cpu_var(hv_clock); | 123 | src = &__get_cpu_var(hv_clock); |
| 129 | if ((src->flags & PVCLOCK_GUEST_STOPPED) != 0) { | 124 | if ((src->flags & PVCLOCK_GUEST_STOPPED) != 0) { |
| 130 | __this_cpu_and(hv_clock.flags, ~PVCLOCK_GUEST_STOPPED); | 125 | __this_cpu_and(hv_clock.flags, ~PVCLOCK_GUEST_STOPPED); |
diff --git a/arch/x86/kernel/nmi_selftest.c b/arch/x86/kernel/nmi_selftest.c index e31bf8d5c4d2..149b8d9c6ad4 100644 --- a/arch/x86/kernel/nmi_selftest.c +++ b/arch/x86/kernel/nmi_selftest.c | |||
| @@ -42,7 +42,7 @@ static int __init nmi_unk_cb(unsigned int val, struct pt_regs *regs) | |||
| 42 | static void __init init_nmi_testsuite(void) | 42 | static void __init init_nmi_testsuite(void) |
| 43 | { | 43 | { |
| 44 | /* trap all the unknown NMIs we may generate */ | 44 | /* trap all the unknown NMIs we may generate */ |
| 45 | register_nmi_handler(NMI_UNKNOWN, nmi_unk_cb, 0, "nmi_selftest_unk"); | 45 | register_nmi_handler_initonly(NMI_UNKNOWN, nmi_unk_cb, 0, "nmi_selftest_unk"); |
| 46 | } | 46 | } |
| 47 | 47 | ||
| 48 | static void __init cleanup_nmi_testsuite(void) | 48 | static void __init cleanup_nmi_testsuite(void) |
| @@ -64,7 +64,7 @@ static void __init test_nmi_ipi(struct cpumask *mask) | |||
| 64 | { | 64 | { |
| 65 | unsigned long timeout; | 65 | unsigned long timeout; |
| 66 | 66 | ||
| 67 | if (register_nmi_handler(NMI_LOCAL, test_nmi_ipi_callback, | 67 | if (register_nmi_handler_initonly(NMI_LOCAL, test_nmi_ipi_callback, |
| 68 | NMI_FLAG_FIRST, "nmi_selftest")) { | 68 | NMI_FLAG_FIRST, "nmi_selftest")) { |
| 69 | nmi_fail = FAILURE; | 69 | nmi_fail = FAILURE; |
| 70 | return; | 70 | return; |
diff --git a/arch/x86/kernel/pci-dma.c b/arch/x86/kernel/pci-dma.c index 62c9457ccd2f..c0f420f76cd3 100644 --- a/arch/x86/kernel/pci-dma.c +++ b/arch/x86/kernel/pci-dma.c | |||
| @@ -100,7 +100,7 @@ void *dma_generic_alloc_coherent(struct device *dev, size_t size, | |||
| 100 | struct dma_attrs *attrs) | 100 | struct dma_attrs *attrs) |
| 101 | { | 101 | { |
| 102 | unsigned long dma_mask; | 102 | unsigned long dma_mask; |
| 103 | struct page *page = NULL; | 103 | struct page *page; |
| 104 | unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT; | 104 | unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT; |
| 105 | dma_addr_t addr; | 105 | dma_addr_t addr; |
| 106 | 106 | ||
| @@ -108,6 +108,7 @@ void *dma_generic_alloc_coherent(struct device *dev, size_t size, | |||
| 108 | 108 | ||
| 109 | flag |= __GFP_ZERO; | 109 | flag |= __GFP_ZERO; |
| 110 | again: | 110 | again: |
| 111 | page = NULL; | ||
| 111 | if (!(flag & GFP_ATOMIC)) | 112 | if (!(flag & GFP_ATOMIC)) |
| 112 | page = dma_alloc_from_contiguous(dev, count, get_order(size)); | 113 | page = dma_alloc_from_contiguous(dev, count, get_order(size)); |
| 113 | if (!page) | 114 | if (!page) |
diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c index 79c45af81604..25b48edb847c 100644 --- a/arch/x86/kernel/reboot.c +++ b/arch/x86/kernel/reboot.c | |||
| @@ -639,9 +639,11 @@ void native_machine_shutdown(void) | |||
| 639 | set_cpus_allowed_ptr(current, cpumask_of(reboot_cpu_id)); | 639 | set_cpus_allowed_ptr(current, cpumask_of(reboot_cpu_id)); |
| 640 | 640 | ||
| 641 | /* | 641 | /* |
| 642 | * O.K Now that I'm on the appropriate processor, | 642 | * O.K Now that I'm on the appropriate processor, stop all of the |
| 643 | * stop all of the others. | 643 | * others. Also disable the local irq to not receive the per-cpu |
| 644 | * timer interrupt which may trigger scheduler's load balance. | ||
| 644 | */ | 645 | */ |
| 646 | local_irq_disable(); | ||
| 645 | stop_other_cpus(); | 647 | stop_other_cpus(); |
| 646 | #endif | 648 | #endif |
| 647 | 649 | ||
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index f56f96da77f5..7bd8a0823654 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c | |||
| @@ -349,9 +349,12 @@ static bool __cpuinit match_llc(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o) | |||
| 349 | 349 | ||
| 350 | static bool __cpuinit match_mc(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o) | 350 | static bool __cpuinit match_mc(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o) |
| 351 | { | 351 | { |
| 352 | if (c->phys_proc_id == o->phys_proc_id) | 352 | if (c->phys_proc_id == o->phys_proc_id) { |
| 353 | return topology_sane(c, o, "mc"); | 353 | if (cpu_has(c, X86_FEATURE_AMD_DCM)) |
| 354 | return true; | ||
| 354 | 355 | ||
| 356 | return topology_sane(c, o, "mc"); | ||
| 357 | } | ||
| 355 | return false; | 358 | return false; |
| 356 | } | 359 | } |
| 357 | 360 | ||
| @@ -382,6 +385,15 @@ void __cpuinit set_cpu_sibling_map(int cpu) | |||
| 382 | if ((i == cpu) || (has_mc && match_llc(c, o))) | 385 | if ((i == cpu) || (has_mc && match_llc(c, o))) |
| 383 | link_mask(llc_shared, cpu, i); | 386 | link_mask(llc_shared, cpu, i); |
| 384 | 387 | ||
| 388 | } | ||
| 389 | |||
| 390 | /* | ||
| 391 | * This needs a separate iteration over the cpus because we rely on all | ||
| 392 | * cpu_sibling_mask links to be set-up. | ||
| 393 | */ | ||
| 394 | for_each_cpu(i, cpu_sibling_setup_mask) { | ||
| 395 | o = &cpu_data(i); | ||
| 396 | |||
| 385 | if ((i == cpu) || (has_mc && match_mc(c, o))) { | 397 | if ((i == cpu) || (has_mc && match_mc(c, o))) { |
| 386 | link_mask(core, cpu, i); | 398 | link_mask(core, cpu, i); |
| 387 | 399 | ||
| @@ -410,15 +422,7 @@ void __cpuinit set_cpu_sibling_map(int cpu) | |||
| 410 | /* maps the cpu to the sched domain representing multi-core */ | 422 | /* maps the cpu to the sched domain representing multi-core */ |
| 411 | const struct cpumask *cpu_coregroup_mask(int cpu) | 423 | const struct cpumask *cpu_coregroup_mask(int cpu) |
| 412 | { | 424 | { |
| 413 | struct cpuinfo_x86 *c = &cpu_data(cpu); | 425 | return cpu_llc_shared_mask(cpu); |
| 414 | /* | ||
| 415 | * For perf, we return last level cache shared map. | ||
| 416 | * And for power savings, we return cpu_core_map | ||
| 417 | */ | ||
| 418 | if (!(cpu_has(c, X86_FEATURE_AMD_DCM))) | ||
| 419 | return cpu_core_mask(cpu); | ||
| 420 | else | ||
| 421 | return cpu_llc_shared_mask(cpu); | ||
| 422 | } | 426 | } |
| 423 | 427 | ||
| 424 | static void impress_friends(void) | 428 | static void impress_friends(void) |
diff --git a/arch/x86/lib/usercopy.c b/arch/x86/lib/usercopy.c index f61ee67ec00f..4f74d94c8d97 100644 --- a/arch/x86/lib/usercopy.c +++ b/arch/x86/lib/usercopy.c | |||
| @@ -8,6 +8,7 @@ | |||
| 8 | #include <linux/module.h> | 8 | #include <linux/module.h> |
| 9 | 9 | ||
| 10 | #include <asm/word-at-a-time.h> | 10 | #include <asm/word-at-a-time.h> |
| 11 | #include <linux/sched.h> | ||
| 11 | 12 | ||
| 12 | /* | 13 | /* |
| 13 | * best effort, GUP based copy_from_user() that is NMI-safe | 14 | * best effort, GUP based copy_from_user() that is NMI-safe |
| @@ -21,6 +22,9 @@ copy_from_user_nmi(void *to, const void __user *from, unsigned long n) | |||
| 21 | void *map; | 22 | void *map; |
| 22 | int ret; | 23 | int ret; |
| 23 | 24 | ||
| 25 | if (__range_not_ok(from, n, TASK_SIZE)) | ||
| 26 | return len; | ||
| 27 | |||
| 24 | do { | 28 | do { |
| 25 | ret = __get_user_pages_fast(addr, 1, 0, &page); | 29 | ret = __get_user_pages_fast(addr, 1, 0, &page); |
| 26 | if (!ret) | 30 | if (!ret) |
diff --git a/arch/x86/lib/x86-opcode-map.txt b/arch/x86/lib/x86-opcode-map.txt index 819137904428..5d7e51f3fd28 100644 --- a/arch/x86/lib/x86-opcode-map.txt +++ b/arch/x86/lib/x86-opcode-map.txt | |||
| @@ -28,7 +28,7 @@ | |||
| 28 | # - (66): the last prefix is 0x66 | 28 | # - (66): the last prefix is 0x66 |
| 29 | # - (F3): the last prefix is 0xF3 | 29 | # - (F3): the last prefix is 0xF3 |
| 30 | # - (F2): the last prefix is 0xF2 | 30 | # - (F2): the last prefix is 0xF2 |
| 31 | # | 31 | # - (!F3) : the last prefix is not 0xF3 (including non-last prefix case) |
| 32 | 32 | ||
| 33 | Table: one byte opcode | 33 | Table: one byte opcode |
| 34 | Referrer: | 34 | Referrer: |
| @@ -515,12 +515,12 @@ b4: LFS Gv,Mp | |||
| 515 | b5: LGS Gv,Mp | 515 | b5: LGS Gv,Mp |
| 516 | b6: MOVZX Gv,Eb | 516 | b6: MOVZX Gv,Eb |
| 517 | b7: MOVZX Gv,Ew | 517 | b7: MOVZX Gv,Ew |
| 518 | b8: JMPE | POPCNT Gv,Ev (F3) | 518 | b8: JMPE (!F3) | POPCNT Gv,Ev (F3) |
| 519 | b9: Grp10 (1A) | 519 | b9: Grp10 (1A) |
| 520 | ba: Grp8 Ev,Ib (1A) | 520 | ba: Grp8 Ev,Ib (1A) |
| 521 | bb: BTC Ev,Gv | 521 | bb: BTC Ev,Gv |
| 522 | bc: BSF Gv,Ev | TZCNT Gv,Ev (F3) | 522 | bc: BSF Gv,Ev (!F3) | TZCNT Gv,Ev (F3) |
| 523 | bd: BSR Gv,Ev | LZCNT Gv,Ev (F3) | 523 | bd: BSR Gv,Ev (!F3) | LZCNT Gv,Ev (F3) |
| 524 | be: MOVSX Gv,Eb | 524 | be: MOVSX Gv,Eb |
| 525 | bf: MOVSX Gv,Ew | 525 | bf: MOVSX Gv,Ew |
| 526 | # 0x0f 0xc0-0xcf | 526 | # 0x0f 0xc0-0xcf |
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c index 97141c26a13a..bc4e9d84157f 100644 --- a/arch/x86/mm/init.c +++ b/arch/x86/mm/init.c | |||
| @@ -62,7 +62,8 @@ static void __init find_early_table_space(struct map_range *mr, unsigned long en | |||
| 62 | extra += PMD_SIZE; | 62 | extra += PMD_SIZE; |
| 63 | #endif | 63 | #endif |
| 64 | /* The first 2/4M doesn't use large pages. */ | 64 | /* The first 2/4M doesn't use large pages. */ |
| 65 | extra += mr->end - mr->start; | 65 | if (mr->start < PMD_SIZE) |
| 66 | extra += mr->end - mr->start; | ||
| 66 | 67 | ||
| 67 | ptes = (extra + PAGE_SIZE - 1) >> PAGE_SHIFT; | 68 | ptes = (extra + PAGE_SIZE - 1) >> PAGE_SHIFT; |
| 68 | } else | 69 | } else |
diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c index be1ef574ce9a..78fe3f1ac49f 100644 --- a/arch/x86/mm/ioremap.c +++ b/arch/x86/mm/ioremap.c | |||
| @@ -180,7 +180,7 @@ err_free_memtype: | |||
| 180 | 180 | ||
| 181 | /** | 181 | /** |
| 182 | * ioremap_nocache - map bus memory into CPU space | 182 | * ioremap_nocache - map bus memory into CPU space |
| 183 | * @offset: bus address of the memory | 183 | * @phys_addr: bus address of the memory |
| 184 | * @size: size of the resource to map | 184 | * @size: size of the resource to map |
| 185 | * | 185 | * |
| 186 | * ioremap_nocache performs a platform specific sequence of operations to | 186 | * ioremap_nocache performs a platform specific sequence of operations to |
| @@ -217,7 +217,7 @@ EXPORT_SYMBOL(ioremap_nocache); | |||
| 217 | 217 | ||
| 218 | /** | 218 | /** |
| 219 | * ioremap_wc - map memory into CPU space write combined | 219 | * ioremap_wc - map memory into CPU space write combined |
| 220 | * @offset: bus address of the memory | 220 | * @phys_addr: bus address of the memory |
| 221 | * @size: size of the resource to map | 221 | * @size: size of the resource to map |
| 222 | * | 222 | * |
| 223 | * This version of ioremap ensures that the memory is marked write combining. | 223 | * This version of ioremap ensures that the memory is marked write combining. |
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c index e1ebde315210..a718e0d23503 100644 --- a/arch/x86/mm/pageattr.c +++ b/arch/x86/mm/pageattr.c | |||
| @@ -122,7 +122,7 @@ within(unsigned long addr, unsigned long start, unsigned long end) | |||
| 122 | 122 | ||
| 123 | /** | 123 | /** |
| 124 | * clflush_cache_range - flush a cache range with clflush | 124 | * clflush_cache_range - flush a cache range with clflush |
| 125 | * @addr: virtual start address | 125 | * @vaddr: virtual start address |
| 126 | * @size: number of bytes to flush | 126 | * @size: number of bytes to flush |
| 127 | * | 127 | * |
| 128 | * clflush is an unordered instruction which needs fencing with mfence | 128 | * clflush is an unordered instruction which needs fencing with mfence |
diff --git a/arch/x86/mm/srat.c b/arch/x86/mm/srat.c index 732af3a96183..4599c3e8bcb6 100644 --- a/arch/x86/mm/srat.c +++ b/arch/x86/mm/srat.c | |||
| @@ -176,6 +176,8 @@ acpi_numa_memory_affinity_init(struct acpi_srat_mem_affinity *ma) | |||
| 176 | return; | 176 | return; |
| 177 | } | 177 | } |
| 178 | 178 | ||
| 179 | node_set(node, numa_nodes_parsed); | ||
| 180 | |||
| 179 | printk(KERN_INFO "SRAT: Node %u PXM %u [mem %#010Lx-%#010Lx]\n", | 181 | printk(KERN_INFO "SRAT: Node %u PXM %u [mem %#010Lx-%#010Lx]\n", |
| 180 | node, pxm, | 182 | node, pxm, |
| 181 | (unsigned long long) start, (unsigned long long) end - 1); | 183 | (unsigned long long) start, (unsigned long long) end - 1); |
diff --git a/arch/x86/platform/mrst/mrst.c b/arch/x86/platform/mrst/mrst.c index e31bcd8f2eee..fd41a9262d65 100644 --- a/arch/x86/platform/mrst/mrst.c +++ b/arch/x86/platform/mrst/mrst.c | |||
| @@ -782,7 +782,7 @@ BLOCKING_NOTIFIER_HEAD(intel_scu_notifier); | |||
| 782 | EXPORT_SYMBOL_GPL(intel_scu_notifier); | 782 | EXPORT_SYMBOL_GPL(intel_scu_notifier); |
| 783 | 783 | ||
| 784 | /* Called by IPC driver */ | 784 | /* Called by IPC driver */ |
| 785 | void intel_scu_devices_create(void) | 785 | void __devinit intel_scu_devices_create(void) |
| 786 | { | 786 | { |
| 787 | int i; | 787 | int i; |
| 788 | 788 | ||
diff --git a/arch/x86/platform/uv/tlb_uv.c b/arch/x86/platform/uv/tlb_uv.c index 3ae0e61abd23..59880afa851f 100644 --- a/arch/x86/platform/uv/tlb_uv.c +++ b/arch/x86/platform/uv/tlb_uv.c | |||
| @@ -1295,7 +1295,6 @@ static void __init enable_timeouts(void) | |||
| 1295 | */ | 1295 | */ |
| 1296 | mmr_image |= (1L << SOFTACK_MSHIFT); | 1296 | mmr_image |= (1L << SOFTACK_MSHIFT); |
| 1297 | if (is_uv2_hub()) { | 1297 | if (is_uv2_hub()) { |
| 1298 | mmr_image &= ~(1L << UV2_LEG_SHFT); | ||
| 1299 | mmr_image |= (1L << UV2_EXT_SHFT); | 1298 | mmr_image |= (1L << UV2_EXT_SHFT); |
| 1300 | } | 1299 | } |
| 1301 | write_mmr_misc_control(pnode, mmr_image); | 1300 | write_mmr_misc_control(pnode, mmr_image); |
diff --git a/arch/x86/tools/gen-insn-attr-x86.awk b/arch/x86/tools/gen-insn-attr-x86.awk index 5f6a5b6c3a15..ddcf39b1a18d 100644 --- a/arch/x86/tools/gen-insn-attr-x86.awk +++ b/arch/x86/tools/gen-insn-attr-x86.awk | |||
| @@ -66,9 +66,10 @@ BEGIN { | |||
| 66 | rex_expr = "^REX(\\.[XRWB]+)*" | 66 | rex_expr = "^REX(\\.[XRWB]+)*" |
| 67 | fpu_expr = "^ESC" # TODO | 67 | fpu_expr = "^ESC" # TODO |
| 68 | 68 | ||
| 69 | lprefix1_expr = "\\(66\\)" | 69 | lprefix1_expr = "\\((66|!F3)\\)" |
| 70 | lprefix2_expr = "\\(F3\\)" | 70 | lprefix2_expr = "\\(F3\\)" |
| 71 | lprefix3_expr = "\\(F2\\)" | 71 | lprefix3_expr = "\\((F2|!F3)\\)" |
| 72 | lprefix_expr = "\\((66|F2|F3)\\)" | ||
| 72 | max_lprefix = 4 | 73 | max_lprefix = 4 |
| 73 | 74 | ||
| 74 | # All opcodes starting with lower-case 'v' or with (v1) superscript | 75 | # All opcodes starting with lower-case 'v' or with (v1) superscript |
| @@ -333,13 +334,16 @@ function convert_operands(count,opnd, i,j,imm,mod) | |||
| 333 | if (match(ext, lprefix1_expr)) { | 334 | if (match(ext, lprefix1_expr)) { |
| 334 | lptable1[idx] = add_flags(lptable1[idx],flags) | 335 | lptable1[idx] = add_flags(lptable1[idx],flags) |
| 335 | variant = "INAT_VARIANT" | 336 | variant = "INAT_VARIANT" |
| 336 | } else if (match(ext, lprefix2_expr)) { | 337 | } |
| 338 | if (match(ext, lprefix2_expr)) { | ||
| 337 | lptable2[idx] = add_flags(lptable2[idx],flags) | 339 | lptable2[idx] = add_flags(lptable2[idx],flags) |
| 338 | variant = "INAT_VARIANT" | 340 | variant = "INAT_VARIANT" |
| 339 | } else if (match(ext, lprefix3_expr)) { | 341 | } |
| 342 | if (match(ext, lprefix3_expr)) { | ||
| 340 | lptable3[idx] = add_flags(lptable3[idx],flags) | 343 | lptable3[idx] = add_flags(lptable3[idx],flags) |
| 341 | variant = "INAT_VARIANT" | 344 | variant = "INAT_VARIANT" |
| 342 | } else { | 345 | } |
| 346 | if (!match(ext, lprefix_expr)){ | ||
| 343 | table[idx] = add_flags(table[idx],flags) | 347 | table[idx] = add_flags(table[idx],flags) |
| 344 | } | 348 | } |
| 345 | } | 349 | } |
diff --git a/arch/x86/um/sys_call_table_32.c b/arch/x86/um/sys_call_table_32.c index 416bd40c0eba..68d1dc91b37b 100644 --- a/arch/x86/um/sys_call_table_32.c +++ b/arch/x86/um/sys_call_table_32.c | |||
| @@ -39,9 +39,9 @@ | |||
| 39 | #undef __SYSCALL_I386 | 39 | #undef __SYSCALL_I386 |
| 40 | #define __SYSCALL_I386(nr, sym, compat) [ nr ] = sym, | 40 | #define __SYSCALL_I386(nr, sym, compat) [ nr ] = sym, |
| 41 | 41 | ||
| 42 | typedef void (*sys_call_ptr_t)(void); | 42 | typedef asmlinkage void (*sys_call_ptr_t)(void); |
| 43 | 43 | ||
| 44 | extern void sys_ni_syscall(void); | 44 | extern asmlinkage void sys_ni_syscall(void); |
| 45 | 45 | ||
| 46 | const sys_call_ptr_t sys_call_table[] __cacheline_aligned = { | 46 | const sys_call_ptr_t sys_call_table[] __cacheline_aligned = { |
| 47 | /* | 47 | /* |
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c index e74df9548a02..ff962d4b821e 100644 --- a/arch/x86/xen/enlighten.c +++ b/arch/x86/xen/enlighten.c | |||
| @@ -209,6 +209,9 @@ static void __init xen_banner(void) | |||
| 209 | xen_feature(XENFEAT_mmu_pt_update_preserve_ad) ? " (preserve-AD)" : ""); | 209 | xen_feature(XENFEAT_mmu_pt_update_preserve_ad) ? " (preserve-AD)" : ""); |
| 210 | } | 210 | } |
| 211 | 211 | ||
| 212 | #define CPUID_THERM_POWER_LEAF 6 | ||
| 213 | #define APERFMPERF_PRESENT 0 | ||
| 214 | |||
| 212 | static __read_mostly unsigned int cpuid_leaf1_edx_mask = ~0; | 215 | static __read_mostly unsigned int cpuid_leaf1_edx_mask = ~0; |
| 213 | static __read_mostly unsigned int cpuid_leaf1_ecx_mask = ~0; | 216 | static __read_mostly unsigned int cpuid_leaf1_ecx_mask = ~0; |
| 214 | 217 | ||
| @@ -242,6 +245,11 @@ static void xen_cpuid(unsigned int *ax, unsigned int *bx, | |||
| 242 | *dx = cpuid_leaf5_edx_val; | 245 | *dx = cpuid_leaf5_edx_val; |
| 243 | return; | 246 | return; |
| 244 | 247 | ||
| 248 | case CPUID_THERM_POWER_LEAF: | ||
| 249 | /* Disabling APERFMPERF for kernel usage */ | ||
| 250 | maskecx = ~(1 << APERFMPERF_PRESENT); | ||
| 251 | break; | ||
| 252 | |||
| 245 | case 0xb: | 253 | case 0xb: |
| 246 | /* Suppress extended topology stuff */ | 254 | /* Suppress extended topology stuff */ |
| 247 | maskebx = 0; | 255 | maskebx = 0; |
diff --git a/arch/x86/xen/p2m.c b/arch/x86/xen/p2m.c index ffd08c414e91..64effdc6da94 100644 --- a/arch/x86/xen/p2m.c +++ b/arch/x86/xen/p2m.c | |||
| @@ -706,6 +706,7 @@ int m2p_add_override(unsigned long mfn, struct page *page, | |||
| 706 | unsigned long uninitialized_var(address); | 706 | unsigned long uninitialized_var(address); |
| 707 | unsigned level; | 707 | unsigned level; |
| 708 | pte_t *ptep = NULL; | 708 | pte_t *ptep = NULL; |
| 709 | int ret = 0; | ||
| 709 | 710 | ||
| 710 | pfn = page_to_pfn(page); | 711 | pfn = page_to_pfn(page); |
| 711 | if (!PageHighMem(page)) { | 712 | if (!PageHighMem(page)) { |
| @@ -741,6 +742,24 @@ int m2p_add_override(unsigned long mfn, struct page *page, | |||
| 741 | list_add(&page->lru, &m2p_overrides[mfn_hash(mfn)]); | 742 | list_add(&page->lru, &m2p_overrides[mfn_hash(mfn)]); |
| 742 | spin_unlock_irqrestore(&m2p_override_lock, flags); | 743 | spin_unlock_irqrestore(&m2p_override_lock, flags); |
| 743 | 744 | ||
| 745 | /* p2m(m2p(mfn)) == mfn: the mfn is already present somewhere in | ||
| 746 | * this domain. Set the FOREIGN_FRAME_BIT in the p2m for the other | ||
| 747 | * pfn so that the following mfn_to_pfn(mfn) calls will return the | ||
| 748 | * pfn from the m2p_override (the backend pfn) instead. | ||
| 749 | * We need to do this because the pages shared by the frontend | ||
| 750 | * (xen-blkfront) can be already locked (lock_page, called by | ||
| 751 | * do_read_cache_page); when the userspace backend tries to use them | ||
| 752 | * with direct_IO, mfn_to_pfn returns the pfn of the frontend, so | ||
| 753 | * do_blockdev_direct_IO is going to try to lock the same pages | ||
| 754 | * again resulting in a deadlock. | ||
| 755 | * As a side effect get_user_pages_fast might not be safe on the | ||
| 756 | * frontend pages while they are being shared with the backend, | ||
| 757 | * because mfn_to_pfn (that ends up being called by GUPF) will | ||
| 758 | * return the backend pfn rather than the frontend pfn. */ | ||
| 759 | ret = __get_user(pfn, &machine_to_phys_mapping[mfn]); | ||
| 760 | if (ret == 0 && get_phys_to_machine(pfn) == mfn) | ||
| 761 | set_phys_to_machine(pfn, FOREIGN_FRAME(mfn)); | ||
| 762 | |||
| 744 | return 0; | 763 | return 0; |
| 745 | } | 764 | } |
| 746 | EXPORT_SYMBOL_GPL(m2p_add_override); | 765 | EXPORT_SYMBOL_GPL(m2p_add_override); |
| @@ -752,6 +771,7 @@ int m2p_remove_override(struct page *page, bool clear_pte) | |||
| 752 | unsigned long uninitialized_var(address); | 771 | unsigned long uninitialized_var(address); |
| 753 | unsigned level; | 772 | unsigned level; |
| 754 | pte_t *ptep = NULL; | 773 | pte_t *ptep = NULL; |
| 774 | int ret = 0; | ||
| 755 | 775 | ||
| 756 | pfn = page_to_pfn(page); | 776 | pfn = page_to_pfn(page); |
| 757 | mfn = get_phys_to_machine(pfn); | 777 | mfn = get_phys_to_machine(pfn); |
| @@ -821,6 +841,22 @@ int m2p_remove_override(struct page *page, bool clear_pte) | |||
| 821 | } else | 841 | } else |
| 822 | set_phys_to_machine(pfn, page->index); | 842 | set_phys_to_machine(pfn, page->index); |
| 823 | 843 | ||
| 844 | /* p2m(m2p(mfn)) == FOREIGN_FRAME(mfn): the mfn is already present | ||
| 845 | * somewhere in this domain, even before being added to the | ||
| 846 | * m2p_override (see comment above in m2p_add_override). | ||
| 847 | * If there are no other entries in the m2p_override corresponding | ||
| 848 | * to this mfn, then remove the FOREIGN_FRAME_BIT from the p2m for | ||
| 849 | * the original pfn (the one shared by the frontend): the backend | ||
| 850 | * cannot do any IO on this page anymore because it has been | ||
| 851 | * unshared. Removing the FOREIGN_FRAME_BIT from the p2m entry of | ||
| 852 | * the original pfn causes mfn_to_pfn(mfn) to return the frontend | ||
| 853 | * pfn again. */ | ||
| 854 | mfn &= ~FOREIGN_FRAME_BIT; | ||
| 855 | ret = __get_user(pfn, &machine_to_phys_mapping[mfn]); | ||
| 856 | if (ret == 0 && get_phys_to_machine(pfn) == FOREIGN_FRAME(mfn) && | ||
| 857 | m2p_find_override(mfn) == NULL) | ||
| 858 | set_phys_to_machine(pfn, mfn); | ||
| 859 | |||
| 824 | return 0; | 860 | return 0; |
| 825 | } | 861 | } |
| 826 | EXPORT_SYMBOL_GPL(m2p_remove_override); | 862 | EXPORT_SYMBOL_GPL(m2p_remove_override); |
diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c index 3ebba0753d38..a4790bf22c59 100644 --- a/arch/x86/xen/setup.c +++ b/arch/x86/xen/setup.c | |||
| @@ -371,7 +371,8 @@ char * __init xen_memory_setup(void) | |||
| 371 | populated = xen_populate_chunk(map, memmap.nr_entries, | 371 | populated = xen_populate_chunk(map, memmap.nr_entries, |
| 372 | max_pfn, &last_pfn, xen_released_pages); | 372 | max_pfn, &last_pfn, xen_released_pages); |
| 373 | 373 | ||
| 374 | extra_pages += (xen_released_pages - populated); | 374 | xen_released_pages -= populated; |
| 375 | extra_pages += xen_released_pages; | ||
| 375 | 376 | ||
| 376 | if (last_pfn > max_pfn) { | 377 | if (last_pfn > max_pfn) { |
| 377 | max_pfn = min(MAX_DOMAIN_PAGES, last_pfn); | 378 | max_pfn = min(MAX_DOMAIN_PAGES, last_pfn); |
diff --git a/arch/xtensa/include/asm/syscall.h b/arch/xtensa/include/asm/syscall.h index 0b9f2e13c781..c1dacca312f3 100644 --- a/arch/xtensa/include/asm/syscall.h +++ b/arch/xtensa/include/asm/syscall.h | |||
| @@ -31,5 +31,5 @@ asmlinkage long sys_pselect6(int n, fd_set __user *inp, fd_set __user *outp, | |||
| 31 | asmlinkage long sys_ppoll(struct pollfd __user *ufds, unsigned int nfds, | 31 | asmlinkage long sys_ppoll(struct pollfd __user *ufds, unsigned int nfds, |
| 32 | struct timespec __user *tsp, const sigset_t __user *sigmask, | 32 | struct timespec __user *tsp, const sigset_t __user *sigmask, |
| 33 | size_t sigsetsize); | 33 | size_t sigsetsize); |
| 34 | 34 | asmlinkage long sys_rt_sigsuspend(sigset_t __user *unewset, | |
| 35 | 35 | size_t sigsetsize); | |
diff --git a/arch/xtensa/kernel/signal.c b/arch/xtensa/kernel/signal.c index b9f8e5850d3a..efe4e854b3cd 100644 --- a/arch/xtensa/kernel/signal.c +++ b/arch/xtensa/kernel/signal.c | |||
| @@ -493,7 +493,7 @@ static void do_signal(struct pt_regs *regs) | |||
| 493 | if (ret) | 493 | if (ret) |
| 494 | return; | 494 | return; |
| 495 | 495 | ||
| 496 | signal_delivered(signr, info, ka, regs, 0); | 496 | signal_delivered(signr, &info, &ka, regs, 0); |
| 497 | if (current->ptrace & PT_SINGLESTEP) | 497 | if (current->ptrace & PT_SINGLESTEP) |
| 498 | task_pt_regs(current)->icountlevel = 1; | 498 | task_pt_regs(current)->icountlevel = 1; |
| 499 | 499 | ||
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig index 47768ff87343..80998958cf45 100644 --- a/drivers/acpi/Kconfig +++ b/drivers/acpi/Kconfig | |||
| @@ -208,7 +208,7 @@ config ACPI_IPMI | |||
| 208 | 208 | ||
| 209 | config ACPI_HOTPLUG_CPU | 209 | config ACPI_HOTPLUG_CPU |
| 210 | bool | 210 | bool |
| 211 | depends on ACPI_PROCESSOR && HOTPLUG_CPU | 211 | depends on EXPERIMENTAL && ACPI_PROCESSOR && HOTPLUG_CPU |
| 212 | select ACPI_CONTAINER | 212 | select ACPI_CONTAINER |
| 213 | default y | 213 | default y |
| 214 | 214 | ||
diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c index 86933ca8b472..7dd3f9fb9f3f 100644 --- a/drivers/acpi/battery.c +++ b/drivers/acpi/battery.c | |||
| @@ -643,11 +643,19 @@ static int acpi_battery_update(struct acpi_battery *battery) | |||
| 643 | 643 | ||
| 644 | static void acpi_battery_refresh(struct acpi_battery *battery) | 644 | static void acpi_battery_refresh(struct acpi_battery *battery) |
| 645 | { | 645 | { |
| 646 | int power_unit; | ||
| 647 | |||
| 646 | if (!battery->bat.dev) | 648 | if (!battery->bat.dev) |
| 647 | return; | 649 | return; |
| 648 | 650 | ||
| 651 | power_unit = battery->power_unit; | ||
| 652 | |||
| 649 | acpi_battery_get_info(battery); | 653 | acpi_battery_get_info(battery); |
| 650 | /* The battery may have changed its reporting units. */ | 654 | |
| 655 | if (power_unit == battery->power_unit) | ||
| 656 | return; | ||
| 657 | |||
| 658 | /* The battery has changed its reporting units. */ | ||
| 651 | sysfs_remove_battery(battery); | 659 | sysfs_remove_battery(battery); |
| 652 | sysfs_add_battery(battery); | 660 | sysfs_add_battery(battery); |
| 653 | } | 661 | } |
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c index 3188da3df8da..adceafda9c17 100644 --- a/drivers/acpi/bus.c +++ b/drivers/acpi/bus.c | |||
| @@ -182,41 +182,66 @@ EXPORT_SYMBOL(acpi_bus_get_private_data); | |||
| 182 | Power Management | 182 | Power Management |
| 183 | -------------------------------------------------------------------------- */ | 183 | -------------------------------------------------------------------------- */ |
| 184 | 184 | ||
| 185 | static const char *state_string(int state) | ||
| 186 | { | ||
| 187 | switch (state) { | ||
| 188 | case ACPI_STATE_D0: | ||
| 189 | return "D0"; | ||
| 190 | case ACPI_STATE_D1: | ||
| 191 | return "D1"; | ||
| 192 | case ACPI_STATE_D2: | ||
| 193 | return "D2"; | ||
| 194 | case ACPI_STATE_D3_HOT: | ||
| 195 | return "D3hot"; | ||
| 196 | case ACPI_STATE_D3_COLD: | ||
| 197 | return "D3"; | ||
| 198 | default: | ||
| 199 | return "(unknown)"; | ||
| 200 | } | ||
| 201 | } | ||
| 202 | |||
| 185 | static int __acpi_bus_get_power(struct acpi_device *device, int *state) | 203 | static int __acpi_bus_get_power(struct acpi_device *device, int *state) |
| 186 | { | 204 | { |
| 187 | int result = 0; | 205 | int result = ACPI_STATE_UNKNOWN; |
| 188 | acpi_status status = 0; | ||
| 189 | unsigned long long psc = 0; | ||
| 190 | 206 | ||
| 191 | if (!device || !state) | 207 | if (!device || !state) |
| 192 | return -EINVAL; | 208 | return -EINVAL; |
| 193 | 209 | ||
| 194 | *state = ACPI_STATE_UNKNOWN; | 210 | if (!device->flags.power_manageable) { |
| 195 | |||
| 196 | if (device->flags.power_manageable) { | ||
| 197 | /* | ||
| 198 | * Get the device's power state either directly (via _PSC) or | ||
| 199 | * indirectly (via power resources). | ||
| 200 | */ | ||
| 201 | if (device->power.flags.power_resources) { | ||
| 202 | result = acpi_power_get_inferred_state(device, state); | ||
| 203 | if (result) | ||
| 204 | return result; | ||
| 205 | } else if (device->power.flags.explicit_get) { | ||
| 206 | status = acpi_evaluate_integer(device->handle, "_PSC", | ||
| 207 | NULL, &psc); | ||
| 208 | if (ACPI_FAILURE(status)) | ||
| 209 | return -ENODEV; | ||
| 210 | *state = (int)psc; | ||
| 211 | } | ||
| 212 | } else { | ||
| 213 | /* TBD: Non-recursive algorithm for walking up hierarchy. */ | 211 | /* TBD: Non-recursive algorithm for walking up hierarchy. */ |
| 214 | *state = device->parent ? | 212 | *state = device->parent ? |
| 215 | device->parent->power.state : ACPI_STATE_D0; | 213 | device->parent->power.state : ACPI_STATE_D0; |
| 214 | goto out; | ||
| 215 | } | ||
| 216 | |||
| 217 | /* | ||
| 218 | * Get the device's power state either directly (via _PSC) or | ||
| 219 | * indirectly (via power resources). | ||
| 220 | */ | ||
| 221 | if (device->power.flags.explicit_get) { | ||
| 222 | unsigned long long psc; | ||
| 223 | acpi_status status = acpi_evaluate_integer(device->handle, | ||
| 224 | "_PSC", NULL, &psc); | ||
| 225 | if (ACPI_FAILURE(status)) | ||
| 226 | return -ENODEV; | ||
| 227 | |||
| 228 | result = psc; | ||
| 229 | } | ||
| 230 | /* The test below covers ACPI_STATE_UNKNOWN too. */ | ||
| 231 | if (result <= ACPI_STATE_D2) { | ||
| 232 | ; /* Do nothing. */ | ||
| 233 | } else if (device->power.flags.power_resources) { | ||
| 234 | int error = acpi_power_get_inferred_state(device, &result); | ||
| 235 | if (error) | ||
| 236 | return error; | ||
| 237 | } else if (result == ACPI_STATE_D3_HOT) { | ||
| 238 | result = ACPI_STATE_D3; | ||
| 216 | } | 239 | } |
| 240 | *state = result; | ||
| 217 | 241 | ||
| 218 | ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Device [%s] power state is D%d\n", | 242 | out: |
| 219 | device->pnp.bus_id, *state)); | 243 | ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Device [%s] power state is %s\n", |
| 244 | device->pnp.bus_id, state_string(*state))); | ||
| 220 | 245 | ||
| 221 | return 0; | 246 | return 0; |
| 222 | } | 247 | } |
| @@ -234,13 +259,14 @@ static int __acpi_bus_set_power(struct acpi_device *device, int state) | |||
| 234 | /* Make sure this is a valid target state */ | 259 | /* Make sure this is a valid target state */ |
| 235 | 260 | ||
| 236 | if (state == device->power.state) { | 261 | if (state == device->power.state) { |
| 237 | ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Device is already at D%d\n", | 262 | ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Device is already at %s\n", |
| 238 | state)); | 263 | state_string(state))); |
| 239 | return 0; | 264 | return 0; |
| 240 | } | 265 | } |
| 241 | 266 | ||
| 242 | if (!device->power.states[state].flags.valid) { | 267 | if (!device->power.states[state].flags.valid) { |
| 243 | printk(KERN_WARNING PREFIX "Device does not support D%d\n", state); | 268 | printk(KERN_WARNING PREFIX "Device does not support %s\n", |
| 269 | state_string(state)); | ||
| 244 | return -ENODEV; | 270 | return -ENODEV; |
| 245 | } | 271 | } |
| 246 | if (device->parent && (state < device->parent->power.state)) { | 272 | if (device->parent && (state < device->parent->power.state)) { |
| @@ -294,13 +320,13 @@ static int __acpi_bus_set_power(struct acpi_device *device, int state) | |||
| 294 | end: | 320 | end: |
| 295 | if (result) | 321 | if (result) |
| 296 | printk(KERN_WARNING PREFIX | 322 | printk(KERN_WARNING PREFIX |
| 297 | "Device [%s] failed to transition to D%d\n", | 323 | "Device [%s] failed to transition to %s\n", |
| 298 | device->pnp.bus_id, state); | 324 | device->pnp.bus_id, state_string(state)); |
| 299 | else { | 325 | else { |
| 300 | device->power.state = state; | 326 | device->power.state = state; |
| 301 | ACPI_DEBUG_PRINT((ACPI_DB_INFO, | 327 | ACPI_DEBUG_PRINT((ACPI_DB_INFO, |
| 302 | "Device [%s] transitioned to D%d\n", | 328 | "Device [%s] transitioned to %s\n", |
| 303 | device->pnp.bus_id, state)); | 329 | device->pnp.bus_id, state_string(state))); |
| 304 | } | 330 | } |
| 305 | 331 | ||
| 306 | return result; | 332 | return result; |
diff --git a/drivers/acpi/power.c b/drivers/acpi/power.c index 0500f719f63e..dd6d6a3c6780 100644 --- a/drivers/acpi/power.c +++ b/drivers/acpi/power.c | |||
| @@ -631,7 +631,7 @@ int acpi_power_get_inferred_state(struct acpi_device *device, int *state) | |||
| 631 | * We know a device's inferred power state when all the resources | 631 | * We know a device's inferred power state when all the resources |
| 632 | * required for a given D-state are 'on'. | 632 | * required for a given D-state are 'on'. |
| 633 | */ | 633 | */ |
| 634 | for (i = ACPI_STATE_D0; i < ACPI_STATE_D3_HOT; i++) { | 634 | for (i = ACPI_STATE_D0; i <= ACPI_STATE_D3_HOT; i++) { |
| 635 | list = &device->power.states[i].resources; | 635 | list = &device->power.states[i].resources; |
| 636 | if (list->count < 1) | 636 | if (list->count < 1) |
| 637 | continue; | 637 | continue; |
diff --git a/drivers/acpi/processor_perflib.c b/drivers/acpi/processor_perflib.c index 0af48a8554cd..a093dc163a42 100644 --- a/drivers/acpi/processor_perflib.c +++ b/drivers/acpi/processor_perflib.c | |||
| @@ -333,6 +333,7 @@ static int acpi_processor_get_performance_states(struct acpi_processor *pr) | |||
| 333 | struct acpi_buffer state = { 0, NULL }; | 333 | struct acpi_buffer state = { 0, NULL }; |
| 334 | union acpi_object *pss = NULL; | 334 | union acpi_object *pss = NULL; |
| 335 | int i; | 335 | int i; |
| 336 | int last_invalid = -1; | ||
| 336 | 337 | ||
| 337 | 338 | ||
| 338 | status = acpi_evaluate_object(pr->handle, "_PSS", NULL, &buffer); | 339 | status = acpi_evaluate_object(pr->handle, "_PSS", NULL, &buffer); |
| @@ -394,14 +395,33 @@ static int acpi_processor_get_performance_states(struct acpi_processor *pr) | |||
| 394 | ((u32)(px->core_frequency * 1000) != | 395 | ((u32)(px->core_frequency * 1000) != |
| 395 | (px->core_frequency * 1000))) { | 396 | (px->core_frequency * 1000))) { |
| 396 | printk(KERN_ERR FW_BUG PREFIX | 397 | printk(KERN_ERR FW_BUG PREFIX |
| 397 | "Invalid BIOS _PSS frequency: 0x%llx MHz\n", | 398 | "Invalid BIOS _PSS frequency found for processor %d: 0x%llx MHz\n", |
| 398 | px->core_frequency); | 399 | pr->id, px->core_frequency); |
| 399 | result = -EFAULT; | 400 | if (last_invalid == -1) |
| 400 | kfree(pr->performance->states); | 401 | last_invalid = i; |
| 401 | goto end; | 402 | } else { |
| 403 | if (last_invalid != -1) { | ||
| 404 | /* | ||
| 405 | * Copy this valid entry over last_invalid entry | ||
| 406 | */ | ||
| 407 | memcpy(&(pr->performance->states[last_invalid]), | ||
| 408 | px, sizeof(struct acpi_processor_px)); | ||
| 409 | ++last_invalid; | ||
| 410 | } | ||
| 402 | } | 411 | } |
| 403 | } | 412 | } |
| 404 | 413 | ||
| 414 | if (last_invalid == 0) { | ||
| 415 | printk(KERN_ERR FW_BUG PREFIX | ||
| 416 | "No valid BIOS _PSS frequency found for processor %d\n", pr->id); | ||
| 417 | result = -EFAULT; | ||
| 418 | kfree(pr->performance->states); | ||
| 419 | pr->performance->states = NULL; | ||
| 420 | } | ||
| 421 | |||
| 422 | if (last_invalid > 0) | ||
| 423 | pr->performance->state_count = last_invalid; | ||
| 424 | |||
| 405 | end: | 425 | end: |
| 406 | kfree(buffer.pointer); | 426 | kfree(buffer.pointer); |
| 407 | 427 | ||
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c index 85cbfdccc97c..c8a1f3b68110 100644 --- a/drivers/acpi/scan.c +++ b/drivers/acpi/scan.c | |||
| @@ -1567,6 +1567,7 @@ static int acpi_bus_scan_fixed(void) | |||
| 1567 | ACPI_BUS_TYPE_POWER_BUTTON, | 1567 | ACPI_BUS_TYPE_POWER_BUTTON, |
| 1568 | ACPI_STA_DEFAULT, | 1568 | ACPI_STA_DEFAULT, |
| 1569 | &ops); | 1569 | &ops); |
| 1570 | device_init_wakeup(&device->dev, true); | ||
| 1570 | } | 1571 | } |
| 1571 | 1572 | ||
| 1572 | if ((acpi_gbl_FADT.flags & ACPI_FADT_SLEEP_BUTTON) == 0) { | 1573 | if ((acpi_gbl_FADT.flags & ACPI_FADT_SLEEP_BUTTON) == 0) { |
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c index 74ee4ab577b6..88561029cca8 100644 --- a/drivers/acpi/sleep.c +++ b/drivers/acpi/sleep.c | |||
| @@ -57,6 +57,7 @@ MODULE_PARM_DESC(gts, "Enable evaluation of _GTS on suspend."); | |||
| 57 | MODULE_PARM_DESC(bfs, "Enable evaluation of _BFS on resume".); | 57 | MODULE_PARM_DESC(bfs, "Enable evaluation of _BFS on resume".); |
| 58 | 58 | ||
| 59 | static u8 sleep_states[ACPI_S_STATE_COUNT]; | 59 | static u8 sleep_states[ACPI_S_STATE_COUNT]; |
| 60 | static bool pwr_btn_event_pending; | ||
| 60 | 61 | ||
| 61 | static void acpi_sleep_tts_switch(u32 acpi_state) | 62 | static void acpi_sleep_tts_switch(u32 acpi_state) |
| 62 | { | 63 | { |
| @@ -184,6 +185,14 @@ static int acpi_pm_prepare(void) | |||
| 184 | return error; | 185 | return error; |
| 185 | } | 186 | } |
| 186 | 187 | ||
| 188 | static int find_powerf_dev(struct device *dev, void *data) | ||
| 189 | { | ||
| 190 | struct acpi_device *device = to_acpi_device(dev); | ||
| 191 | const char *hid = acpi_device_hid(device); | ||
| 192 | |||
| 193 | return !strcmp(hid, ACPI_BUTTON_HID_POWERF); | ||
| 194 | } | ||
| 195 | |||
| 187 | /** | 196 | /** |
| 188 | * acpi_pm_finish - Instruct the platform to leave a sleep state. | 197 | * acpi_pm_finish - Instruct the platform to leave a sleep state. |
| 189 | * | 198 | * |
| @@ -192,6 +201,7 @@ static int acpi_pm_prepare(void) | |||
| 192 | */ | 201 | */ |
| 193 | static void acpi_pm_finish(void) | 202 | static void acpi_pm_finish(void) |
| 194 | { | 203 | { |
| 204 | struct device *pwr_btn_dev; | ||
| 195 | u32 acpi_state = acpi_target_sleep_state; | 205 | u32 acpi_state = acpi_target_sleep_state; |
| 196 | 206 | ||
| 197 | acpi_ec_unblock_transactions(); | 207 | acpi_ec_unblock_transactions(); |
| @@ -209,6 +219,23 @@ static void acpi_pm_finish(void) | |||
| 209 | acpi_set_firmware_waking_vector((acpi_physical_address) 0); | 219 | acpi_set_firmware_waking_vector((acpi_physical_address) 0); |
| 210 | 220 | ||
| 211 | acpi_target_sleep_state = ACPI_STATE_S0; | 221 | acpi_target_sleep_state = ACPI_STATE_S0; |
| 222 | |||
| 223 | /* If we were woken with the fixed power button, provide a small | ||
| 224 | * hint to userspace in the form of a wakeup event on the fixed power | ||
| 225 | * button device (if it can be found). | ||
| 226 | * | ||
| 227 | * We delay the event generation til now, as the PM layer requires | ||
| 228 | * timekeeping to be running before we generate events. */ | ||
| 229 | if (!pwr_btn_event_pending) | ||
| 230 | return; | ||
| 231 | |||
| 232 | pwr_btn_event_pending = false; | ||
| 233 | pwr_btn_dev = bus_find_device(&acpi_bus_type, NULL, NULL, | ||
| 234 | find_powerf_dev); | ||
| 235 | if (pwr_btn_dev) { | ||
| 236 | pm_wakeup_event(pwr_btn_dev, 0); | ||
| 237 | put_device(pwr_btn_dev); | ||
| 238 | } | ||
| 212 | } | 239 | } |
| 213 | 240 | ||
| 214 | /** | 241 | /** |
| @@ -298,9 +325,23 @@ static int acpi_suspend_enter(suspend_state_t pm_state) | |||
| 298 | /* ACPI 3.0 specs (P62) says that it's the responsibility | 325 | /* ACPI 3.0 specs (P62) says that it's the responsibility |
| 299 | * of the OSPM to clear the status bit [ implying that the | 326 | * of the OSPM to clear the status bit [ implying that the |
| 300 | * POWER_BUTTON event should not reach userspace ] | 327 | * POWER_BUTTON event should not reach userspace ] |
| 328 | * | ||
| 329 | * However, we do generate a small hint for userspace in the form of | ||
| 330 | * a wakeup event. We flag this condition for now and generate the | ||
| 331 | * event later, as we're currently too early in resume to be able to | ||
| 332 | * generate wakeup events. | ||
| 301 | */ | 333 | */ |
| 302 | if (ACPI_SUCCESS(status) && (acpi_state == ACPI_STATE_S3)) | 334 | if (ACPI_SUCCESS(status) && (acpi_state == ACPI_STATE_S3)) { |
| 303 | acpi_clear_event(ACPI_EVENT_POWER_BUTTON); | 335 | acpi_event_status pwr_btn_status; |
| 336 | |||
| 337 | acpi_get_event_status(ACPI_EVENT_POWER_BUTTON, &pwr_btn_status); | ||
| 338 | |||
| 339 | if (pwr_btn_status & ACPI_EVENT_FLAG_SET) { | ||
| 340 | acpi_clear_event(ACPI_EVENT_POWER_BUTTON); | ||
| 341 | /* Flag for later */ | ||
| 342 | pwr_btn_event_pending = true; | ||
| 343 | } | ||
| 344 | } | ||
| 304 | 345 | ||
| 305 | /* | 346 | /* |
| 306 | * Disable and clear GPE status before interrupt is enabled. Some GPEs | 347 | * Disable and clear GPE status before interrupt is enabled. Some GPEs |
| @@ -730,8 +771,8 @@ int acpi_pm_device_sleep_state(struct device *dev, int *d_min_p) | |||
| 730 | * can wake the system. _S0W may be valid, too. | 771 | * can wake the system. _S0W may be valid, too. |
| 731 | */ | 772 | */ |
| 732 | if (acpi_target_sleep_state == ACPI_STATE_S0 || | 773 | if (acpi_target_sleep_state == ACPI_STATE_S0 || |
| 733 | (device_may_wakeup(dev) && | 774 | (device_may_wakeup(dev) && adev->wakeup.flags.valid && |
| 734 | adev->wakeup.sleep_state <= acpi_target_sleep_state)) { | 775 | adev->wakeup.sleep_state >= acpi_target_sleep_state)) { |
| 735 | acpi_status status; | 776 | acpi_status status; |
| 736 | 777 | ||
| 737 | acpi_method[3] = 'W'; | 778 | acpi_method[3] = 'W'; |
diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c index 9577b6fa2650..a576575617d7 100644 --- a/drivers/acpi/video.c +++ b/drivers/acpi/video.c | |||
| @@ -1687,10 +1687,6 @@ static int acpi_video_bus_add(struct acpi_device *device) | |||
| 1687 | set_bit(KEY_BRIGHTNESS_ZERO, input->keybit); | 1687 | set_bit(KEY_BRIGHTNESS_ZERO, input->keybit); |
| 1688 | set_bit(KEY_DISPLAY_OFF, input->keybit); | 1688 | set_bit(KEY_DISPLAY_OFF, input->keybit); |
| 1689 | 1689 | ||
| 1690 | error = input_register_device(input); | ||
| 1691 | if (error) | ||
| 1692 | goto err_stop_video; | ||
| 1693 | |||
| 1694 | printk(KERN_INFO PREFIX "%s [%s] (multi-head: %s rom: %s post: %s)\n", | 1690 | printk(KERN_INFO PREFIX "%s [%s] (multi-head: %s rom: %s post: %s)\n", |
| 1695 | ACPI_VIDEO_DEVICE_NAME, acpi_device_bid(device), | 1691 | ACPI_VIDEO_DEVICE_NAME, acpi_device_bid(device), |
| 1696 | video->flags.multihead ? "yes" : "no", | 1692 | video->flags.multihead ? "yes" : "no", |
| @@ -1701,12 +1697,16 @@ static int acpi_video_bus_add(struct acpi_device *device) | |||
| 1701 | video->pm_nb.priority = 0; | 1697 | video->pm_nb.priority = 0; |
| 1702 | error = register_pm_notifier(&video->pm_nb); | 1698 | error = register_pm_notifier(&video->pm_nb); |
| 1703 | if (error) | 1699 | if (error) |
| 1704 | goto err_unregister_input_dev; | 1700 | goto err_stop_video; |
| 1701 | |||
| 1702 | error = input_register_device(input); | ||
| 1703 | if (error) | ||
| 1704 | goto err_unregister_pm_notifier; | ||
| 1705 | 1705 | ||
| 1706 | return 0; | 1706 | return 0; |
| 1707 | 1707 | ||
| 1708 | err_unregister_input_dev: | 1708 | err_unregister_pm_notifier: |
| 1709 | input_unregister_device(input); | 1709 | unregister_pm_notifier(&video->pm_nb); |
| 1710 | err_stop_video: | 1710 | err_stop_video: |
| 1711 | acpi_video_bus_stop_devices(video); | 1711 | acpi_video_bus_stop_devices(video); |
| 1712 | err_free_input_dev: | 1712 | err_free_input_dev: |
| @@ -1743,9 +1743,18 @@ static int acpi_video_bus_remove(struct acpi_device *device, int type) | |||
| 1743 | return 0; | 1743 | return 0; |
| 1744 | } | 1744 | } |
| 1745 | 1745 | ||
| 1746 | static int __init is_i740(struct pci_dev *dev) | ||
| 1747 | { | ||
| 1748 | if (dev->device == 0x00D1) | ||
| 1749 | return 1; | ||
| 1750 | if (dev->device == 0x7000) | ||
| 1751 | return 1; | ||
| 1752 | return 0; | ||
| 1753 | } | ||
| 1754 | |||
| 1746 | static int __init intel_opregion_present(void) | 1755 | static int __init intel_opregion_present(void) |
| 1747 | { | 1756 | { |
| 1748 | #if defined(CONFIG_DRM_I915) || defined(CONFIG_DRM_I915_MODULE) | 1757 | int opregion = 0; |
| 1749 | struct pci_dev *dev = NULL; | 1758 | struct pci_dev *dev = NULL; |
| 1750 | u32 address; | 1759 | u32 address; |
| 1751 | 1760 | ||
| @@ -1754,13 +1763,15 @@ static int __init intel_opregion_present(void) | |||
| 1754 | continue; | 1763 | continue; |
| 1755 | if (dev->vendor != PCI_VENDOR_ID_INTEL) | 1764 | if (dev->vendor != PCI_VENDOR_ID_INTEL) |
| 1756 | continue; | 1765 | continue; |
| 1766 | /* We don't want to poke around undefined i740 registers */ | ||
| 1767 | if (is_i740(dev)) | ||
| 1768 | continue; | ||
| 1757 | pci_read_config_dword(dev, 0xfc, &address); | 1769 | pci_read_config_dword(dev, 0xfc, &address); |
| 1758 | if (!address) | 1770 | if (!address) |
| 1759 | continue; | 1771 | continue; |
| 1760 | return 1; | 1772 | opregion = 1; |
| 1761 | } | 1773 | } |
| 1762 | #endif | 1774 | return opregion; |
| 1763 | return 0; | ||
| 1764 | } | 1775 | } |
| 1765 | 1776 | ||
| 1766 | int acpi_video_register(void) | 1777 | int acpi_video_register(void) |
diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c index 0bcda488f11c..c89aa01fb1de 100644 --- a/drivers/base/regmap/regmap.c +++ b/drivers/base/regmap/regmap.c | |||
| @@ -246,11 +246,11 @@ struct regmap *regmap_init(struct device *dev, | |||
| 246 | map->lock = regmap_lock_mutex; | 246 | map->lock = regmap_lock_mutex; |
| 247 | map->unlock = regmap_unlock_mutex; | 247 | map->unlock = regmap_unlock_mutex; |
| 248 | } | 248 | } |
| 249 | map->format.buf_size = (config->reg_bits + config->val_bits) / 8; | ||
| 250 | map->format.reg_bytes = DIV_ROUND_UP(config->reg_bits, 8); | 249 | map->format.reg_bytes = DIV_ROUND_UP(config->reg_bits, 8); |
| 251 | map->format.pad_bytes = config->pad_bits / 8; | 250 | map->format.pad_bytes = config->pad_bits / 8; |
| 252 | map->format.val_bytes = DIV_ROUND_UP(config->val_bits, 8); | 251 | map->format.val_bytes = DIV_ROUND_UP(config->val_bits, 8); |
| 253 | map->format.buf_size += map->format.pad_bytes; | 252 | map->format.buf_size = DIV_ROUND_UP(config->reg_bits + |
| 253 | config->val_bits + config->pad_bits, 8); | ||
| 254 | map->reg_shift = config->pad_bits % 8; | 254 | map->reg_shift = config->pad_bits % 8; |
| 255 | if (config->reg_stride) | 255 | if (config->reg_stride) |
| 256 | map->reg_stride = config->reg_stride; | 256 | map->reg_stride = config->reg_stride; |
| @@ -368,7 +368,7 @@ struct regmap *regmap_init(struct device *dev, | |||
| 368 | 368 | ||
| 369 | ret = regcache_init(map, config); | 369 | ret = regcache_init(map, config); |
| 370 | if (ret < 0) | 370 | if (ret < 0) |
| 371 | goto err_free_workbuf; | 371 | goto err_debugfs; |
| 372 | 372 | ||
| 373 | /* Add a devres resource for dev_get_regmap() */ | 373 | /* Add a devres resource for dev_get_regmap() */ |
| 374 | m = devres_alloc(dev_get_regmap_release, sizeof(*m), GFP_KERNEL); | 374 | m = devres_alloc(dev_get_regmap_release, sizeof(*m), GFP_KERNEL); |
| @@ -383,7 +383,8 @@ struct regmap *regmap_init(struct device *dev, | |||
| 383 | 383 | ||
| 384 | err_cache: | 384 | err_cache: |
| 385 | regcache_exit(map); | 385 | regcache_exit(map); |
| 386 | err_free_workbuf: | 386 | err_debugfs: |
| 387 | regmap_debugfs_exit(map); | ||
| 387 | kfree(map->work_buf); | 388 | kfree(map->work_buf); |
| 388 | err_map: | 389 | err_map: |
| 389 | kfree(map); | 390 | kfree(map); |
| @@ -471,6 +472,7 @@ int regmap_reinit_cache(struct regmap *map, const struct regmap_config *config) | |||
| 471 | 472 | ||
| 472 | return ret; | 473 | return ret; |
| 473 | } | 474 | } |
| 475 | EXPORT_SYMBOL_GPL(regmap_reinit_cache); | ||
| 474 | 476 | ||
| 475 | /** | 477 | /** |
| 476 | * regmap_exit(): Free a previously allocated register map | 478 | * regmap_exit(): Free a previously allocated register map |
diff --git a/drivers/bcma/driver_chipcommon_pmu.c b/drivers/bcma/driver_chipcommon_pmu.c index a058842f14fd..61ce4054b3c3 100644 --- a/drivers/bcma/driver_chipcommon_pmu.c +++ b/drivers/bcma/driver_chipcommon_pmu.c | |||
| @@ -139,7 +139,9 @@ void bcma_pmu_workarounds(struct bcma_drv_cc *cc) | |||
| 139 | bcma_chipco_chipctl_maskset(cc, 0, ~0, 0x7); | 139 | bcma_chipco_chipctl_maskset(cc, 0, ~0, 0x7); |
| 140 | break; | 140 | break; |
| 141 | case 0x4331: | 141 | case 0x4331: |
| 142 | /* BCM4331 workaround is SPROM-related, we put it in sprom.c */ | 142 | case 43431: |
| 143 | /* Ext PA lines must be enabled for tx on BCM4331 */ | ||
| 144 | bcma_chipco_bcm4331_ext_pa_lines_ctl(cc, true); | ||
| 143 | break; | 145 | break; |
| 144 | case 43224: | 146 | case 43224: |
| 145 | if (bus->chipinfo.rev == 0) { | 147 | if (bus->chipinfo.rev == 0) { |
diff --git a/drivers/bcma/driver_pci.c b/drivers/bcma/driver_pci.c index 9a96f14c8f47..c32ebd537abe 100644 --- a/drivers/bcma/driver_pci.c +++ b/drivers/bcma/driver_pci.c | |||
| @@ -232,17 +232,19 @@ void __devinit bcma_core_pci_init(struct bcma_drv_pci *pc) | |||
| 232 | int bcma_core_pci_irq_ctl(struct bcma_drv_pci *pc, struct bcma_device *core, | 232 | int bcma_core_pci_irq_ctl(struct bcma_drv_pci *pc, struct bcma_device *core, |
| 233 | bool enable) | 233 | bool enable) |
| 234 | { | 234 | { |
| 235 | struct pci_dev *pdev = pc->core->bus->host_pci; | 235 | struct pci_dev *pdev; |
| 236 | u32 coremask, tmp; | 236 | u32 coremask, tmp; |
| 237 | int err = 0; | 237 | int err = 0; |
| 238 | 238 | ||
| 239 | if (core->bus->hosttype != BCMA_HOSTTYPE_PCI) { | 239 | if (!pc || core->bus->hosttype != BCMA_HOSTTYPE_PCI) { |
| 240 | /* This bcma device is not on a PCI host-bus. So the IRQs are | 240 | /* This bcma device is not on a PCI host-bus. So the IRQs are |
| 241 | * not routed through the PCI core. | 241 | * not routed through the PCI core. |
| 242 | * So we must not enable routing through the PCI core. */ | 242 | * So we must not enable routing through the PCI core. */ |
| 243 | goto out; | 243 | goto out; |
| 244 | } | 244 | } |
| 245 | 245 | ||
| 246 | pdev = pc->core->bus->host_pci; | ||
| 247 | |||
| 246 | err = pci_read_config_dword(pdev, BCMA_PCI_IRQMASK, &tmp); | 248 | err = pci_read_config_dword(pdev, BCMA_PCI_IRQMASK, &tmp); |
| 247 | if (err) | 249 | if (err) |
| 248 | goto out; | 250 | goto out; |
diff --git a/drivers/bcma/sprom.c b/drivers/bcma/sprom.c index c7f93359acb0..f16f42d36071 100644 --- a/drivers/bcma/sprom.c +++ b/drivers/bcma/sprom.c | |||
| @@ -579,13 +579,13 @@ int bcma_sprom_get(struct bcma_bus *bus) | |||
| 579 | if (!sprom) | 579 | if (!sprom) |
| 580 | return -ENOMEM; | 580 | return -ENOMEM; |
| 581 | 581 | ||
| 582 | if (bus->chipinfo.id == 0x4331) | 582 | if (bus->chipinfo.id == 0x4331 || bus->chipinfo.id == 43431) |
| 583 | bcma_chipco_bcm4331_ext_pa_lines_ctl(&bus->drv_cc, false); | 583 | bcma_chipco_bcm4331_ext_pa_lines_ctl(&bus->drv_cc, false); |
| 584 | 584 | ||
| 585 | pr_debug("SPROM offset 0x%x\n", offset); | 585 | pr_debug("SPROM offset 0x%x\n", offset); |
| 586 | bcma_sprom_read(bus, offset, sprom); | 586 | bcma_sprom_read(bus, offset, sprom); |
| 587 | 587 | ||
| 588 | if (bus->chipinfo.id == 0x4331) | 588 | if (bus->chipinfo.id == 0x4331 || bus->chipinfo.id == 43431) |
| 589 | bcma_chipco_bcm4331_ext_pa_lines_ctl(&bus->drv_cc, true); | 589 | bcma_chipco_bcm4331_ext_pa_lines_ctl(&bus->drv_cc, true); |
| 590 | 590 | ||
| 591 | err = bcma_sprom_valid(sprom); | 591 | err = bcma_sprom_valid(sprom); |
diff --git a/drivers/char/agp/intel-agp.c b/drivers/char/agp/intel-agp.c index 764f70c5e690..0a4185279417 100644 --- a/drivers/char/agp/intel-agp.c +++ b/drivers/char/agp/intel-agp.c | |||
| @@ -898,6 +898,7 @@ static struct pci_device_id agp_intel_pci_table[] = { | |||
| 898 | ID(PCI_DEVICE_ID_INTEL_B43_HB), | 898 | ID(PCI_DEVICE_ID_INTEL_B43_HB), |
| 899 | ID(PCI_DEVICE_ID_INTEL_B43_1_HB), | 899 | ID(PCI_DEVICE_ID_INTEL_B43_1_HB), |
| 900 | ID(PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB), | 900 | ID(PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB), |
| 901 | ID(PCI_DEVICE_ID_INTEL_IRONLAKE_D2_HB), | ||
| 901 | ID(PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB), | 902 | ID(PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB), |
| 902 | ID(PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB), | 903 | ID(PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB), |
| 903 | ID(PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB), | 904 | ID(PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB), |
diff --git a/drivers/char/agp/intel-agp.h b/drivers/char/agp/intel-agp.h index c0091753a0d1..8e2d9140f300 100644 --- a/drivers/char/agp/intel-agp.h +++ b/drivers/char/agp/intel-agp.h | |||
| @@ -212,6 +212,7 @@ | |||
| 212 | #define PCI_DEVICE_ID_INTEL_G41_HB 0x2E30 | 212 | #define PCI_DEVICE_ID_INTEL_G41_HB 0x2E30 |
| 213 | #define PCI_DEVICE_ID_INTEL_G41_IG 0x2E32 | 213 | #define PCI_DEVICE_ID_INTEL_G41_IG 0x2E32 |
| 214 | #define PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB 0x0040 | 214 | #define PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB 0x0040 |
| 215 | #define PCI_DEVICE_ID_INTEL_IRONLAKE_D2_HB 0x0069 | ||
| 215 | #define PCI_DEVICE_ID_INTEL_IRONLAKE_D_IG 0x0042 | 216 | #define PCI_DEVICE_ID_INTEL_IRONLAKE_D_IG 0x0042 |
| 216 | #define PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB 0x0044 | 217 | #define PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB 0x0044 |
| 217 | #define PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB 0x0062 | 218 | #define PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB 0x0062 |
diff --git a/drivers/char/hw_random/atmel-rng.c b/drivers/char/hw_random/atmel-rng.c index f518b99f53f5..6289f0eee24c 100644 --- a/drivers/char/hw_random/atmel-rng.c +++ b/drivers/char/hw_random/atmel-rng.c | |||
| @@ -36,6 +36,13 @@ static int atmel_trng_read(struct hwrng *rng, void *buf, size_t max, | |||
| 36 | /* data ready? */ | 36 | /* data ready? */ |
| 37 | if (readl(trng->base + TRNG_ODATA) & 1) { | 37 | if (readl(trng->base + TRNG_ODATA) & 1) { |
| 38 | *data = readl(trng->base + TRNG_ODATA); | 38 | *data = readl(trng->base + TRNG_ODATA); |
| 39 | /* | ||
| 40 | ensure data ready is only set again AFTER the next data | ||
| 41 | word is ready in case it got set between checking ISR | ||
| 42 | and reading ODATA, so we don't risk re-reading the | ||
| 43 | same word | ||
| 44 | */ | ||
| 45 | readl(trng->base + TRNG_ISR); | ||
| 39 | return 4; | 46 | return 4; |
| 40 | } else | 47 | } else |
| 41 | return 0; | 48 | return 0; |
diff --git a/drivers/clocksource/Makefile b/drivers/clocksource/Makefile index 8d81a1d32653..dd3e661a124d 100644 --- a/drivers/clocksource/Makefile +++ b/drivers/clocksource/Makefile | |||
| @@ -6,6 +6,7 @@ obj-$(CONFIG_CS5535_CLOCK_EVENT_SRC) += cs5535-clockevt.o | |||
| 6 | obj-$(CONFIG_SH_TIMER_CMT) += sh_cmt.o | 6 | obj-$(CONFIG_SH_TIMER_CMT) += sh_cmt.o |
| 7 | obj-$(CONFIG_SH_TIMER_MTU2) += sh_mtu2.o | 7 | obj-$(CONFIG_SH_TIMER_MTU2) += sh_mtu2.o |
| 8 | obj-$(CONFIG_SH_TIMER_TMU) += sh_tmu.o | 8 | obj-$(CONFIG_SH_TIMER_TMU) += sh_tmu.o |
| 9 | obj-$(CONFIG_EM_TIMER_STI) += em_sti.o | ||
| 9 | obj-$(CONFIG_CLKBLD_I8253) += i8253.o | 10 | obj-$(CONFIG_CLKBLD_I8253) += i8253.o |
| 10 | obj-$(CONFIG_CLKSRC_MMIO) += mmio.o | 11 | obj-$(CONFIG_CLKSRC_MMIO) += mmio.o |
| 11 | obj-$(CONFIG_DW_APB_TIMER) += dw_apb_timer.o | 12 | obj-$(CONFIG_DW_APB_TIMER) += dw_apb_timer.o |
diff --git a/drivers/clocksource/em_sti.c b/drivers/clocksource/em_sti.c new file mode 100644 index 000000000000..372051d1bba8 --- /dev/null +++ b/drivers/clocksource/em_sti.c | |||
| @@ -0,0 +1,406 @@ | |||
| 1 | /* | ||
| 2 | * Emma Mobile Timer Support - STI | ||
| 3 | * | ||
| 4 | * Copyright (C) 2012 Magnus Damm | ||
| 5 | * | ||
| 6 | * This program is free software; you can redistribute it and/or modify | ||
| 7 | * it under the terms of the GNU General Public License as published by | ||
| 8 | * the Free Software Foundation; either version 2 of the License | ||
| 9 | * | ||
| 10 | * This program is distributed in the hope that it will be useful, | ||
| 11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 13 | * GNU General Public License for more details. | ||
| 14 | * | ||
| 15 | * You should have received a copy of the GNU General Public License | ||
| 16 | * along with this program; if not, write to the Free Software | ||
| 17 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
| 18 | */ | ||
| 19 | |||
| 20 | #include <linux/init.h> | ||
| 21 | #include <linux/platform_device.h> | ||
| 22 | #include <linux/spinlock.h> | ||
| 23 | #include <linux/interrupt.h> | ||
| 24 | #include <linux/ioport.h> | ||
| 25 | #include <linux/io.h> | ||
| 26 | #include <linux/clk.h> | ||
| 27 | #include <linux/irq.h> | ||
| 28 | #include <linux/err.h> | ||
| 29 | #include <linux/delay.h> | ||
| 30 | #include <linux/clocksource.h> | ||
| 31 | #include <linux/clockchips.h> | ||
| 32 | #include <linux/slab.h> | ||
| 33 | #include <linux/module.h> | ||
| 34 | |||
| 35 | enum { USER_CLOCKSOURCE, USER_CLOCKEVENT, USER_NR }; | ||
| 36 | |||
| 37 | struct em_sti_priv { | ||
| 38 | void __iomem *base; | ||
| 39 | struct clk *clk; | ||
| 40 | struct platform_device *pdev; | ||
| 41 | unsigned int active[USER_NR]; | ||
| 42 | unsigned long rate; | ||
| 43 | raw_spinlock_t lock; | ||
| 44 | struct clock_event_device ced; | ||
| 45 | struct clocksource cs; | ||
| 46 | }; | ||
| 47 | |||
| 48 | #define STI_CONTROL 0x00 | ||
| 49 | #define STI_COMPA_H 0x10 | ||
| 50 | #define STI_COMPA_L 0x14 | ||
| 51 | #define STI_COMPB_H 0x18 | ||
| 52 | #define STI_COMPB_L 0x1c | ||
| 53 | #define STI_COUNT_H 0x20 | ||
| 54 | #define STI_COUNT_L 0x24 | ||
| 55 | #define STI_COUNT_RAW_H 0x28 | ||
| 56 | #define STI_COUNT_RAW_L 0x2c | ||
| 57 | #define STI_SET_H 0x30 | ||
| 58 | #define STI_SET_L 0x34 | ||
| 59 | #define STI_INTSTATUS 0x40 | ||
| 60 | #define STI_INTRAWSTATUS 0x44 | ||
| 61 | #define STI_INTENSET 0x48 | ||
| 62 | #define STI_INTENCLR 0x4c | ||
| 63 | #define STI_INTFFCLR 0x50 | ||
| 64 | |||
| 65 | static inline unsigned long em_sti_read(struct em_sti_priv *p, int offs) | ||
| 66 | { | ||
| 67 | return ioread32(p->base + offs); | ||
| 68 | } | ||
| 69 | |||
| 70 | static inline void em_sti_write(struct em_sti_priv *p, int offs, | ||
| 71 | unsigned long value) | ||
| 72 | { | ||
| 73 | iowrite32(value, p->base + offs); | ||
| 74 | } | ||
| 75 | |||
| 76 | static int em_sti_enable(struct em_sti_priv *p) | ||
| 77 | { | ||
| 78 | int ret; | ||
| 79 | |||
| 80 | /* enable clock */ | ||
| 81 | ret = clk_enable(p->clk); | ||
| 82 | if (ret) { | ||
| 83 | dev_err(&p->pdev->dev, "cannot enable clock\n"); | ||
| 84 | return ret; | ||
| 85 | } | ||
| 86 | |||
| 87 | /* configure channel, periodic mode and maximum timeout */ | ||
| 88 | p->rate = clk_get_rate(p->clk); | ||
| 89 | |||
| 90 | /* reset the counter */ | ||
| 91 | em_sti_write(p, STI_SET_H, 0x40000000); | ||
| 92 | em_sti_write(p, STI_SET_L, 0x00000000); | ||
| 93 | |||
| 94 | /* mask and clear pending interrupts */ | ||
| 95 | em_sti_write(p, STI_INTENCLR, 3); | ||
| 96 | em_sti_write(p, STI_INTFFCLR, 3); | ||
| 97 | |||
| 98 | /* enable updates of counter registers */ | ||
| 99 | em_sti_write(p, STI_CONTROL, 1); | ||
| 100 | |||
| 101 | return 0; | ||
| 102 | } | ||
| 103 | |||
| 104 | static void em_sti_disable(struct em_sti_priv *p) | ||
| 105 | { | ||
| 106 | /* mask interrupts */ | ||
| 107 | em_sti_write(p, STI_INTENCLR, 3); | ||
| 108 | |||
| 109 | /* stop clock */ | ||
| 110 | clk_disable(p->clk); | ||
| 111 | } | ||
| 112 | |||
| 113 | static cycle_t em_sti_count(struct em_sti_priv *p) | ||
| 114 | { | ||
| 115 | cycle_t ticks; | ||
| 116 | unsigned long flags; | ||
| 117 | |||
| 118 | /* the STI hardware buffers the 48-bit count, but to | ||
| 119 | * break it out into two 32-bit access the registers | ||
| 120 | * must be accessed in a certain order. | ||
| 121 | * Always read STI_COUNT_H before STI_COUNT_L. | ||
| 122 | */ | ||
| 123 | raw_spin_lock_irqsave(&p->lock, flags); | ||
| 124 | ticks = (cycle_t)(em_sti_read(p, STI_COUNT_H) & 0xffff) << 32; | ||
| 125 | ticks |= em_sti_read(p, STI_COUNT_L); | ||
| 126 | raw_spin_unlock_irqrestore(&p->lock, flags); | ||
| 127 | |||
| 128 | return ticks; | ||
| 129 | } | ||
| 130 | |||
| 131 | static cycle_t em_sti_set_next(struct em_sti_priv *p, cycle_t next) | ||
| 132 | { | ||
| 133 | unsigned long flags; | ||
| 134 | |||
| 135 | raw_spin_lock_irqsave(&p->lock, flags); | ||
| 136 | |||
| 137 | /* mask compare A interrupt */ | ||
| 138 | em_sti_write(p, STI_INTENCLR, 1); | ||
| 139 | |||
| 140 | /* update compare A value */ | ||
| 141 | em_sti_write(p, STI_COMPA_H, next >> 32); | ||
| 142 | em_sti_write(p, STI_COMPA_L, next & 0xffffffff); | ||
| 143 | |||
| 144 | /* clear compare A interrupt source */ | ||
| 145 | em_sti_write(p, STI_INTFFCLR, 1); | ||
| 146 | |||
| 147 | /* unmask compare A interrupt */ | ||
| 148 | em_sti_write(p, STI_INTENSET, 1); | ||
| 149 | |||
| 150 | raw_spin_unlock_irqrestore(&p->lock, flags); | ||
| 151 | |||
| 152 | return next; | ||
| 153 | } | ||
| 154 | |||
| 155 | static irqreturn_t em_sti_interrupt(int irq, void *dev_id) | ||
| 156 | { | ||
| 157 | struct em_sti_priv *p = dev_id; | ||
| 158 | |||
| 159 | p->ced.event_handler(&p->ced); | ||
| 160 | return IRQ_HANDLED; | ||
| 161 | } | ||
| 162 | |||
| 163 | static int em_sti_start(struct em_sti_priv *p, unsigned int user) | ||
| 164 | { | ||
| 165 | unsigned long flags; | ||
| 166 | int used_before; | ||
| 167 | int ret = 0; | ||
| 168 | |||
| 169 | raw_spin_lock_irqsave(&p->lock, flags); | ||
| 170 | used_before = p->active[USER_CLOCKSOURCE] | p->active[USER_CLOCKEVENT]; | ||
| 171 | if (!used_before) | ||
| 172 | ret = em_sti_enable(p); | ||
| 173 | |||
| 174 | if (!ret) | ||
| 175 | p->active[user] = 1; | ||
| 176 | raw_spin_unlock_irqrestore(&p->lock, flags); | ||
| 177 | |||
| 178 | return ret; | ||
| 179 | } | ||
| 180 | |||
| 181 | static void em_sti_stop(struct em_sti_priv *p, unsigned int user) | ||
| 182 | { | ||
| 183 | unsigned long flags; | ||
| 184 | int used_before, used_after; | ||
| 185 | |||
| 186 | raw_spin_lock_irqsave(&p->lock, flags); | ||
| 187 | used_before = p->active[USER_CLOCKSOURCE] | p->active[USER_CLOCKEVENT]; | ||
| 188 | p->active[user] = 0; | ||
| 189 | used_after = p->active[USER_CLOCKSOURCE] | p->active[USER_CLOCKEVENT]; | ||
| 190 | |||
| 191 | if (used_before && !used_after) | ||
| 192 | em_sti_disable(p); | ||
| 193 | raw_spin_unlock_irqrestore(&p->lock, flags); | ||
| 194 | } | ||
| 195 | |||
| 196 | static struct em_sti_priv *cs_to_em_sti(struct clocksource *cs) | ||
| 197 | { | ||
| 198 | return container_of(cs, struct em_sti_priv, cs); | ||
| 199 | } | ||
| 200 | |||
| 201 | static cycle_t em_sti_clocksource_read(struct clocksource *cs) | ||
| 202 | { | ||
| 203 | return em_sti_count(cs_to_em_sti(cs)); | ||
| 204 | } | ||
| 205 | |||
| 206 | static int em_sti_clocksource_enable(struct clocksource *cs) | ||
| 207 | { | ||
| 208 | int ret; | ||
| 209 | struct em_sti_priv *p = cs_to_em_sti(cs); | ||
| 210 | |||
| 211 | ret = em_sti_start(p, USER_CLOCKSOURCE); | ||
| 212 | if (!ret) | ||
| 213 | __clocksource_updatefreq_hz(cs, p->rate); | ||
| 214 | return ret; | ||
| 215 | } | ||
| 216 | |||
| 217 | static void em_sti_clocksource_disable(struct clocksource *cs) | ||
| 218 | { | ||
| 219 | em_sti_stop(cs_to_em_sti(cs), USER_CLOCKSOURCE); | ||
| 220 | } | ||
| 221 | |||
| 222 | static void em_sti_clocksource_resume(struct clocksource *cs) | ||
| 223 | { | ||
| 224 | em_sti_clocksource_enable(cs); | ||
| 225 | } | ||
| 226 | |||
| 227 | static int em_sti_register_clocksource(struct em_sti_priv *p) | ||
| 228 | { | ||
| 229 | struct clocksource *cs = &p->cs; | ||
| 230 | |||
| 231 | memset(cs, 0, sizeof(*cs)); | ||
| 232 | cs->name = dev_name(&p->pdev->dev); | ||
| 233 | cs->rating = 200; | ||
| 234 | cs->read = em_sti_clocksource_read; | ||
| 235 | cs->enable = em_sti_clocksource_enable; | ||
| 236 | cs->disable = em_sti_clocksource_disable; | ||
| 237 | cs->suspend = em_sti_clocksource_disable; | ||
| 238 | cs->resume = em_sti_clocksource_resume; | ||
| 239 | cs->mask = CLOCKSOURCE_MASK(48); | ||
| 240 | cs->flags = CLOCK_SOURCE_IS_CONTINUOUS; | ||
| 241 | |||
| 242 | dev_info(&p->pdev->dev, "used as clock source\n"); | ||
| 243 | |||
| 244 | /* Register with dummy 1 Hz value, gets updated in ->enable() */ | ||
| 245 | clocksource_register_hz(cs, 1); | ||
| 246 | return 0; | ||
| 247 | } | ||
| 248 | |||
| 249 | static struct em_sti_priv *ced_to_em_sti(struct clock_event_device *ced) | ||
| 250 | { | ||
| 251 | return container_of(ced, struct em_sti_priv, ced); | ||
| 252 | } | ||
| 253 | |||
| 254 | static void em_sti_clock_event_mode(enum clock_event_mode mode, | ||
| 255 | struct clock_event_device *ced) | ||
| 256 | { | ||
| 257 | struct em_sti_priv *p = ced_to_em_sti(ced); | ||
| 258 | |||
| 259 | /* deal with old setting first */ | ||
| 260 | switch (ced->mode) { | ||
| 261 | case CLOCK_EVT_MODE_ONESHOT: | ||
| 262 | em_sti_stop(p, USER_CLOCKEVENT); | ||
| 263 | break; | ||
| 264 | default: | ||
| 265 | break; | ||
| 266 | } | ||
| 267 | |||
| 268 | switch (mode) { | ||
| 269 | case CLOCK_EVT_MODE_ONESHOT: | ||
| 270 | dev_info(&p->pdev->dev, "used for oneshot clock events\n"); | ||
| 271 | em_sti_start(p, USER_CLOCKEVENT); | ||
| 272 | clockevents_config(&p->ced, p->rate); | ||
| 273 | break; | ||
| 274 | case CLOCK_EVT_MODE_SHUTDOWN: | ||
| 275 | case CLOCK_EVT_MODE_UNUSED: | ||
| 276 | em_sti_stop(p, USER_CLOCKEVENT); | ||
| 277 | break; | ||
| 278 | default: | ||
| 279 | break; | ||
| 280 | } | ||
| 281 | } | ||
| 282 | |||
| 283 | static int em_sti_clock_event_next(unsigned long delta, | ||
| 284 | struct clock_event_device *ced) | ||
| 285 | { | ||
| 286 | struct em_sti_priv *p = ced_to_em_sti(ced); | ||
| 287 | cycle_t next; | ||
| 288 | int safe; | ||
| 289 | |||
| 290 | next = em_sti_set_next(p, em_sti_count(p) + delta); | ||
| 291 | safe = em_sti_count(p) < (next - 1); | ||
| 292 | |||
| 293 | return !safe; | ||
| 294 | } | ||
| 295 | |||
| 296 | static void em_sti_register_clockevent(struct em_sti_priv *p) | ||
| 297 | { | ||
| 298 | struct clock_event_device *ced = &p->ced; | ||
| 299 | |||
| 300 | memset(ced, 0, sizeof(*ced)); | ||
| 301 | ced->name = dev_name(&p->pdev->dev); | ||
| 302 | ced->features = CLOCK_EVT_FEAT_ONESHOT; | ||
| 303 | ced->rating = 200; | ||
| 304 | ced->cpumask = cpumask_of(0); | ||
| 305 | ced->set_next_event = em_sti_clock_event_next; | ||
| 306 | ced->set_mode = em_sti_clock_event_mode; | ||
| 307 | |||
| 308 | dev_info(&p->pdev->dev, "used for clock events\n"); | ||
| 309 | |||
| 310 | /* Register with dummy 1 Hz value, gets updated in ->set_mode() */ | ||
| 311 | clockevents_config_and_register(ced, 1, 2, 0xffffffff); | ||
| 312 | } | ||
| 313 | |||
| 314 | static int __devinit em_sti_probe(struct platform_device *pdev) | ||
| 315 | { | ||
| 316 | struct em_sti_priv *p; | ||
| 317 | struct resource *res; | ||
| 318 | int irq, ret; | ||
| 319 | |||
| 320 | p = kzalloc(sizeof(*p), GFP_KERNEL); | ||
| 321 | if (p == NULL) { | ||
| 322 | dev_err(&pdev->dev, "failed to allocate driver data\n"); | ||
| 323 | ret = -ENOMEM; | ||
| 324 | goto err0; | ||
| 325 | } | ||
| 326 | |||
| 327 | p->pdev = pdev; | ||
| 328 | platform_set_drvdata(pdev, p); | ||
| 329 | |||
| 330 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
| 331 | if (!res) { | ||
| 332 | dev_err(&pdev->dev, "failed to get I/O memory\n"); | ||
| 333 | ret = -EINVAL; | ||
| 334 | goto err0; | ||
| 335 | } | ||
| 336 | |||
| 337 | irq = platform_get_irq(pdev, 0); | ||
| 338 | if (irq < 0) { | ||
| 339 | dev_err(&pdev->dev, "failed to get irq\n"); | ||
| 340 | ret = -EINVAL; | ||
| 341 | goto err0; | ||
| 342 | } | ||
| 343 | |||
| 344 | /* map memory, let base point to the STI instance */ | ||
| 345 | p->base = ioremap_nocache(res->start, resource_size(res)); | ||
| 346 | if (p->base == NULL) { | ||
| 347 | dev_err(&pdev->dev, "failed to remap I/O memory\n"); | ||
| 348 | ret = -ENXIO; | ||
| 349 | goto err0; | ||
| 350 | } | ||
| 351 | |||
| 352 | /* get hold of clock */ | ||
| 353 | p->clk = clk_get(&pdev->dev, "sclk"); | ||
| 354 | if (IS_ERR(p->clk)) { | ||
| 355 | dev_err(&pdev->dev, "cannot get clock\n"); | ||
| 356 | ret = PTR_ERR(p->clk); | ||
| 357 | goto err1; | ||
| 358 | } | ||
| 359 | |||
| 360 | if (request_irq(irq, em_sti_interrupt, | ||
| 361 | IRQF_TIMER | IRQF_IRQPOLL | IRQF_NOBALANCING, | ||
| 362 | dev_name(&pdev->dev), p)) { | ||
| 363 | dev_err(&pdev->dev, "failed to request low IRQ\n"); | ||
| 364 | ret = -ENOENT; | ||
| 365 | goto err2; | ||
| 366 | } | ||
| 367 | |||
| 368 | raw_spin_lock_init(&p->lock); | ||
| 369 | em_sti_register_clockevent(p); | ||
| 370 | em_sti_register_clocksource(p); | ||
| 371 | return 0; | ||
| 372 | |||
| 373 | err2: | ||
| 374 | clk_put(p->clk); | ||
| 375 | err1: | ||
| 376 | iounmap(p->base); | ||
| 377 | err0: | ||
| 378 | kfree(p); | ||
| 379 | return ret; | ||
| 380 | } | ||
| 381 | |||
| 382 | static int __devexit em_sti_remove(struct platform_device *pdev) | ||
| 383 | { | ||
| 384 | return -EBUSY; /* cannot unregister clockevent and clocksource */ | ||
| 385 | } | ||
| 386 | |||
| 387 | static const struct of_device_id em_sti_dt_ids[] __devinitconst = { | ||
| 388 | { .compatible = "renesas,em-sti", }, | ||
| 389 | {}, | ||
| 390 | }; | ||
| 391 | MODULE_DEVICE_TABLE(of, em_sti_dt_ids); | ||
| 392 | |||
| 393 | static struct platform_driver em_sti_device_driver = { | ||
| 394 | .probe = em_sti_probe, | ||
| 395 | .remove = __devexit_p(em_sti_remove), | ||
| 396 | .driver = { | ||
| 397 | .name = "em_sti", | ||
| 398 | .of_match_table = em_sti_dt_ids, | ||
| 399 | } | ||
| 400 | }; | ||
| 401 | |||
| 402 | module_platform_driver(em_sti_device_driver); | ||
| 403 | |||
| 404 | MODULE_AUTHOR("Magnus Damm"); | ||
| 405 | MODULE_DESCRIPTION("Renesas Emma Mobile STI Timer Driver"); | ||
| 406 | MODULE_LICENSE("GPL v2"); | ||
diff --git a/drivers/clocksource/sh_cmt.c b/drivers/clocksource/sh_cmt.c index 32fe9ef5cc5c..98b06baafcc6 100644 --- a/drivers/clocksource/sh_cmt.c +++ b/drivers/clocksource/sh_cmt.c | |||
| @@ -48,13 +48,13 @@ struct sh_cmt_priv { | |||
| 48 | unsigned long next_match_value; | 48 | unsigned long next_match_value; |
| 49 | unsigned long max_match_value; | 49 | unsigned long max_match_value; |
| 50 | unsigned long rate; | 50 | unsigned long rate; |
| 51 | spinlock_t lock; | 51 | raw_spinlock_t lock; |
| 52 | struct clock_event_device ced; | 52 | struct clock_event_device ced; |
| 53 | struct clocksource cs; | 53 | struct clocksource cs; |
| 54 | unsigned long total_cycles; | 54 | unsigned long total_cycles; |
| 55 | }; | 55 | }; |
| 56 | 56 | ||
| 57 | static DEFINE_SPINLOCK(sh_cmt_lock); | 57 | static DEFINE_RAW_SPINLOCK(sh_cmt_lock); |
| 58 | 58 | ||
| 59 | #define CMSTR -1 /* shared register */ | 59 | #define CMSTR -1 /* shared register */ |
| 60 | #define CMCSR 0 /* channel register */ | 60 | #define CMCSR 0 /* channel register */ |
| @@ -139,7 +139,7 @@ static void sh_cmt_start_stop_ch(struct sh_cmt_priv *p, int start) | |||
| 139 | unsigned long flags, value; | 139 | unsigned long flags, value; |
| 140 | 140 | ||
| 141 | /* start stop register shared by multiple timer channels */ | 141 | /* start stop register shared by multiple timer channels */ |
| 142 | spin_lock_irqsave(&sh_cmt_lock, flags); | 142 | raw_spin_lock_irqsave(&sh_cmt_lock, flags); |
| 143 | value = sh_cmt_read(p, CMSTR); | 143 | value = sh_cmt_read(p, CMSTR); |
| 144 | 144 | ||
| 145 | if (start) | 145 | if (start) |
| @@ -148,7 +148,7 @@ static void sh_cmt_start_stop_ch(struct sh_cmt_priv *p, int start) | |||
| 148 | value &= ~(1 << cfg->timer_bit); | 148 | value &= ~(1 << cfg->timer_bit); |
| 149 | 149 | ||
| 150 | sh_cmt_write(p, CMSTR, value); | 150 | sh_cmt_write(p, CMSTR, value); |
| 151 | spin_unlock_irqrestore(&sh_cmt_lock, flags); | 151 | raw_spin_unlock_irqrestore(&sh_cmt_lock, flags); |
| 152 | } | 152 | } |
| 153 | 153 | ||
| 154 | static int sh_cmt_enable(struct sh_cmt_priv *p, unsigned long *rate) | 154 | static int sh_cmt_enable(struct sh_cmt_priv *p, unsigned long *rate) |
| @@ -328,9 +328,9 @@ static void sh_cmt_set_next(struct sh_cmt_priv *p, unsigned long delta) | |||
| 328 | { | 328 | { |
| 329 | unsigned long flags; | 329 | unsigned long flags; |
| 330 | 330 | ||
| 331 | spin_lock_irqsave(&p->lock, flags); | 331 | raw_spin_lock_irqsave(&p->lock, flags); |
| 332 | __sh_cmt_set_next(p, delta); | 332 | __sh_cmt_set_next(p, delta); |
| 333 | spin_unlock_irqrestore(&p->lock, flags); | 333 | raw_spin_unlock_irqrestore(&p->lock, flags); |
| 334 | } | 334 | } |
| 335 | 335 | ||
| 336 | static irqreturn_t sh_cmt_interrupt(int irq, void *dev_id) | 336 | static irqreturn_t sh_cmt_interrupt(int irq, void *dev_id) |
| @@ -385,7 +385,7 @@ static int sh_cmt_start(struct sh_cmt_priv *p, unsigned long flag) | |||
| 385 | int ret = 0; | 385 | int ret = 0; |
| 386 | unsigned long flags; | 386 | unsigned long flags; |
| 387 | 387 | ||
| 388 | spin_lock_irqsave(&p->lock, flags); | 388 | raw_spin_lock_irqsave(&p->lock, flags); |
| 389 | 389 | ||
| 390 | if (!(p->flags & (FLAG_CLOCKEVENT | FLAG_CLOCKSOURCE))) | 390 | if (!(p->flags & (FLAG_CLOCKEVENT | FLAG_CLOCKSOURCE))) |
| 391 | ret = sh_cmt_enable(p, &p->rate); | 391 | ret = sh_cmt_enable(p, &p->rate); |
| @@ -398,7 +398,7 @@ static int sh_cmt_start(struct sh_cmt_priv *p, unsigned long flag) | |||
| 398 | if ((flag == FLAG_CLOCKSOURCE) && (!(p->flags & FLAG_CLOCKEVENT))) | 398 | if ((flag == FLAG_CLOCKSOURCE) && (!(p->flags & FLAG_CLOCKEVENT))) |
| 399 | __sh_cmt_set_next(p, p->max_match_value); | 399 | __sh_cmt_set_next(p, p->max_match_value); |
| 400 | out: | 400 | out: |
| 401 | spin_unlock_irqrestore(&p->lock, flags); | 401 | raw_spin_unlock_irqrestore(&p->lock, flags); |
| 402 | 402 | ||
| 403 | return ret; | 403 | return ret; |
| 404 | } | 404 | } |
| @@ -408,7 +408,7 @@ static void sh_cmt_stop(struct sh_cmt_priv *p, unsigned long flag) | |||
| 408 | unsigned long flags; | 408 | unsigned long flags; |
| 409 | unsigned long f; | 409 | unsigned long f; |
| 410 | 410 | ||
| 411 | spin_lock_irqsave(&p->lock, flags); | 411 | raw_spin_lock_irqsave(&p->lock, flags); |
| 412 | 412 | ||
| 413 | f = p->flags & (FLAG_CLOCKEVENT | FLAG_CLOCKSOURCE); | 413 | f = p->flags & (FLAG_CLOCKEVENT | FLAG_CLOCKSOURCE); |
| 414 | p->flags &= ~flag; | 414 | p->flags &= ~flag; |
| @@ -420,7 +420,7 @@ static void sh_cmt_stop(struct sh_cmt_priv *p, unsigned long flag) | |||
| 420 | if ((flag == FLAG_CLOCKEVENT) && (p->flags & FLAG_CLOCKSOURCE)) | 420 | if ((flag == FLAG_CLOCKEVENT) && (p->flags & FLAG_CLOCKSOURCE)) |
| 421 | __sh_cmt_set_next(p, p->max_match_value); | 421 | __sh_cmt_set_next(p, p->max_match_value); |
| 422 | 422 | ||
| 423 | spin_unlock_irqrestore(&p->lock, flags); | 423 | raw_spin_unlock_irqrestore(&p->lock, flags); |
| 424 | } | 424 | } |
| 425 | 425 | ||
| 426 | static struct sh_cmt_priv *cs_to_sh_cmt(struct clocksource *cs) | 426 | static struct sh_cmt_priv *cs_to_sh_cmt(struct clocksource *cs) |
| @@ -435,13 +435,13 @@ static cycle_t sh_cmt_clocksource_read(struct clocksource *cs) | |||
| 435 | unsigned long value; | 435 | unsigned long value; |
| 436 | int has_wrapped; | 436 | int has_wrapped; |
| 437 | 437 | ||
| 438 | spin_lock_irqsave(&p->lock, flags); | 438 | raw_spin_lock_irqsave(&p->lock, flags); |
| 439 | value = p->total_cycles; | 439 | value = p->total_cycles; |
| 440 | raw = sh_cmt_get_counter(p, &has_wrapped); | 440 | raw = sh_cmt_get_counter(p, &has_wrapped); |
| 441 | 441 | ||
| 442 | if (unlikely(has_wrapped)) | 442 | if (unlikely(has_wrapped)) |
| 443 | raw += p->match_value + 1; | 443 | raw += p->match_value + 1; |
| 444 | spin_unlock_irqrestore(&p->lock, flags); | 444 | raw_spin_unlock_irqrestore(&p->lock, flags); |
| 445 | 445 | ||
| 446 | return value + raw; | 446 | return value + raw; |
| 447 | } | 447 | } |
| @@ -591,7 +591,7 @@ static int sh_cmt_register(struct sh_cmt_priv *p, char *name, | |||
| 591 | p->max_match_value = (1 << p->width) - 1; | 591 | p->max_match_value = (1 << p->width) - 1; |
| 592 | 592 | ||
| 593 | p->match_value = p->max_match_value; | 593 | p->match_value = p->max_match_value; |
| 594 | spin_lock_init(&p->lock); | 594 | raw_spin_lock_init(&p->lock); |
| 595 | 595 | ||
| 596 | if (clockevent_rating) | 596 | if (clockevent_rating) |
| 597 | sh_cmt_register_clockevent(p, name, clockevent_rating); | 597 | sh_cmt_register_clockevent(p, name, clockevent_rating); |
diff --git a/drivers/clocksource/sh_mtu2.c b/drivers/clocksource/sh_mtu2.c index a2172f690418..d9b76ca64a61 100644 --- a/drivers/clocksource/sh_mtu2.c +++ b/drivers/clocksource/sh_mtu2.c | |||
| @@ -43,7 +43,7 @@ struct sh_mtu2_priv { | |||
| 43 | struct clock_event_device ced; | 43 | struct clock_event_device ced; |
| 44 | }; | 44 | }; |
| 45 | 45 | ||
| 46 | static DEFINE_SPINLOCK(sh_mtu2_lock); | 46 | static DEFINE_RAW_SPINLOCK(sh_mtu2_lock); |
| 47 | 47 | ||
| 48 | #define TSTR -1 /* shared register */ | 48 | #define TSTR -1 /* shared register */ |
| 49 | #define TCR 0 /* channel register */ | 49 | #define TCR 0 /* channel register */ |
| @@ -107,7 +107,7 @@ static void sh_mtu2_start_stop_ch(struct sh_mtu2_priv *p, int start) | |||
| 107 | unsigned long flags, value; | 107 | unsigned long flags, value; |
| 108 | 108 | ||
| 109 | /* start stop register shared by multiple timer channels */ | 109 | /* start stop register shared by multiple timer channels */ |
| 110 | spin_lock_irqsave(&sh_mtu2_lock, flags); | 110 | raw_spin_lock_irqsave(&sh_mtu2_lock, flags); |
| 111 | value = sh_mtu2_read(p, TSTR); | 111 | value = sh_mtu2_read(p, TSTR); |
| 112 | 112 | ||
| 113 | if (start) | 113 | if (start) |
| @@ -116,7 +116,7 @@ static void sh_mtu2_start_stop_ch(struct sh_mtu2_priv *p, int start) | |||
| 116 | value &= ~(1 << cfg->timer_bit); | 116 | value &= ~(1 << cfg->timer_bit); |
| 117 | 117 | ||
| 118 | sh_mtu2_write(p, TSTR, value); | 118 | sh_mtu2_write(p, TSTR, value); |
| 119 | spin_unlock_irqrestore(&sh_mtu2_lock, flags); | 119 | raw_spin_unlock_irqrestore(&sh_mtu2_lock, flags); |
| 120 | } | 120 | } |
| 121 | 121 | ||
| 122 | static int sh_mtu2_enable(struct sh_mtu2_priv *p) | 122 | static int sh_mtu2_enable(struct sh_mtu2_priv *p) |
diff --git a/drivers/clocksource/sh_tmu.c b/drivers/clocksource/sh_tmu.c index 97f54b634be4..c1b51d49d106 100644 --- a/drivers/clocksource/sh_tmu.c +++ b/drivers/clocksource/sh_tmu.c | |||
| @@ -45,7 +45,7 @@ struct sh_tmu_priv { | |||
| 45 | struct clocksource cs; | 45 | struct clocksource cs; |
| 46 | }; | 46 | }; |
| 47 | 47 | ||
| 48 | static DEFINE_SPINLOCK(sh_tmu_lock); | 48 | static DEFINE_RAW_SPINLOCK(sh_tmu_lock); |
| 49 | 49 | ||
| 50 | #define TSTR -1 /* shared register */ | 50 | #define TSTR -1 /* shared register */ |
| 51 | #define TCOR 0 /* channel register */ | 51 | #define TCOR 0 /* channel register */ |
| @@ -95,7 +95,7 @@ static void sh_tmu_start_stop_ch(struct sh_tmu_priv *p, int start) | |||
| 95 | unsigned long flags, value; | 95 | unsigned long flags, value; |
| 96 | 96 | ||
| 97 | /* start stop register shared by multiple timer channels */ | 97 | /* start stop register shared by multiple timer channels */ |
| 98 | spin_lock_irqsave(&sh_tmu_lock, flags); | 98 | raw_spin_lock_irqsave(&sh_tmu_lock, flags); |
| 99 | value = sh_tmu_read(p, TSTR); | 99 | value = sh_tmu_read(p, TSTR); |
| 100 | 100 | ||
| 101 | if (start) | 101 | if (start) |
| @@ -104,7 +104,7 @@ static void sh_tmu_start_stop_ch(struct sh_tmu_priv *p, int start) | |||
| 104 | value &= ~(1 << cfg->timer_bit); | 104 | value &= ~(1 << cfg->timer_bit); |
| 105 | 105 | ||
| 106 | sh_tmu_write(p, TSTR, value); | 106 | sh_tmu_write(p, TSTR, value); |
| 107 | spin_unlock_irqrestore(&sh_tmu_lock, flags); | 107 | raw_spin_unlock_irqrestore(&sh_tmu_lock, flags); |
| 108 | } | 108 | } |
| 109 | 109 | ||
| 110 | static int sh_tmu_enable(struct sh_tmu_priv *p) | 110 | static int sh_tmu_enable(struct sh_tmu_priv *p) |
| @@ -245,12 +245,7 @@ static void sh_tmu_clock_event_start(struct sh_tmu_priv *p, int periodic) | |||
| 245 | 245 | ||
| 246 | sh_tmu_enable(p); | 246 | sh_tmu_enable(p); |
| 247 | 247 | ||
| 248 | /* TODO: calculate good shift from rate and counter bit width */ | 248 | clockevents_config(ced, p->rate); |
| 249 | |||
| 250 | ced->shift = 32; | ||
| 251 | ced->mult = div_sc(p->rate, NSEC_PER_SEC, ced->shift); | ||
| 252 | ced->max_delta_ns = clockevent_delta2ns(0xffffffff, ced); | ||
| 253 | ced->min_delta_ns = 5000; | ||
| 254 | 249 | ||
| 255 | if (periodic) { | 250 | if (periodic) { |
| 256 | p->periodic = (p->rate + HZ/2) / HZ; | 251 | p->periodic = (p->rate + HZ/2) / HZ; |
| @@ -323,7 +318,8 @@ static void sh_tmu_register_clockevent(struct sh_tmu_priv *p, | |||
| 323 | ced->set_mode = sh_tmu_clock_event_mode; | 318 | ced->set_mode = sh_tmu_clock_event_mode; |
| 324 | 319 | ||
| 325 | dev_info(&p->pdev->dev, "used for clock events\n"); | 320 | dev_info(&p->pdev->dev, "used for clock events\n"); |
| 326 | clockevents_register_device(ced); | 321 | |
| 322 | clockevents_config_and_register(ced, 1, 0x300, 0xffffffff); | ||
| 327 | 323 | ||
| 328 | ret = setup_irq(p->irqaction.irq, &p->irqaction); | 324 | ret = setup_irq(p->irqaction.irq, &p->irqaction); |
| 329 | if (ret) { | 325 | if (ret) { |
diff --git a/drivers/gpio/gpio-samsung.c b/drivers/gpio/gpio-samsung.c index 7bb00448e13d..b6453d0e44ad 100644 --- a/drivers/gpio/gpio-samsung.c +++ b/drivers/gpio/gpio-samsung.c | |||
| @@ -2833,7 +2833,7 @@ static __init void exynos5_gpiolib_init(void) | |||
| 2833 | } | 2833 | } |
| 2834 | 2834 | ||
| 2835 | /* need to set base address for gpc4 */ | 2835 | /* need to set base address for gpc4 */ |
| 2836 | exonys5_gpios_1[11].base = gpio_base1 + 0x2E0; | 2836 | exynos5_gpios_1[11].base = gpio_base1 + 0x2E0; |
| 2837 | 2837 | ||
| 2838 | /* need to set base address for gpx */ | 2838 | /* need to set base address for gpx */ |
| 2839 | chip = &exynos5_gpios_1[21]; | 2839 | chip = &exynos5_gpios_1[21]; |
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c index 420953197d0a..d6de2e07fa03 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_drv.c +++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c | |||
| @@ -244,8 +244,8 @@ static const struct file_operations exynos_drm_driver_fops = { | |||
| 244 | }; | 244 | }; |
| 245 | 245 | ||
| 246 | static struct drm_driver exynos_drm_driver = { | 246 | static struct drm_driver exynos_drm_driver = { |
| 247 | .driver_features = DRIVER_HAVE_IRQ | DRIVER_BUS_PLATFORM | | 247 | .driver_features = DRIVER_HAVE_IRQ | DRIVER_MODESET | |
| 248 | DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME, | 248 | DRIVER_GEM | DRIVER_PRIME, |
| 249 | .load = exynos_drm_load, | 249 | .load = exynos_drm_load, |
| 250 | .unload = exynos_drm_unload, | 250 | .unload = exynos_drm_unload, |
| 251 | .open = exynos_drm_open, | 251 | .open = exynos_drm_open, |
diff --git a/drivers/gpu/drm/exynos/exynos_drm_encoder.c b/drivers/gpu/drm/exynos/exynos_drm_encoder.c index 6e9ac7bd1dcf..23d5ad379f86 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_encoder.c +++ b/drivers/gpu/drm/exynos/exynos_drm_encoder.c | |||
| @@ -172,19 +172,12 @@ static void exynos_drm_encoder_commit(struct drm_encoder *encoder) | |||
| 172 | manager_ops->commit(manager->dev); | 172 | manager_ops->commit(manager->dev); |
| 173 | } | 173 | } |
| 174 | 174 | ||
| 175 | static struct drm_crtc * | ||
| 176 | exynos_drm_encoder_get_crtc(struct drm_encoder *encoder) | ||
| 177 | { | ||
| 178 | return encoder->crtc; | ||
| 179 | } | ||
| 180 | |||
| 181 | static struct drm_encoder_helper_funcs exynos_encoder_helper_funcs = { | 175 | static struct drm_encoder_helper_funcs exynos_encoder_helper_funcs = { |
| 182 | .dpms = exynos_drm_encoder_dpms, | 176 | .dpms = exynos_drm_encoder_dpms, |
| 183 | .mode_fixup = exynos_drm_encoder_mode_fixup, | 177 | .mode_fixup = exynos_drm_encoder_mode_fixup, |
| 184 | .mode_set = exynos_drm_encoder_mode_set, | 178 | .mode_set = exynos_drm_encoder_mode_set, |
| 185 | .prepare = exynos_drm_encoder_prepare, | 179 | .prepare = exynos_drm_encoder_prepare, |
| 186 | .commit = exynos_drm_encoder_commit, | 180 | .commit = exynos_drm_encoder_commit, |
| 187 | .get_crtc = exynos_drm_encoder_get_crtc, | ||
| 188 | }; | 181 | }; |
| 189 | 182 | ||
| 190 | static void exynos_drm_encoder_destroy(struct drm_encoder *encoder) | 183 | static void exynos_drm_encoder_destroy(struct drm_encoder *encoder) |
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fb.c b/drivers/gpu/drm/exynos/exynos_drm_fb.c index f82a299553fb..4ccfe4328fab 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_fb.c +++ b/drivers/gpu/drm/exynos/exynos_drm_fb.c | |||
| @@ -51,11 +51,22 @@ struct exynos_drm_fb { | |||
| 51 | static void exynos_drm_fb_destroy(struct drm_framebuffer *fb) | 51 | static void exynos_drm_fb_destroy(struct drm_framebuffer *fb) |
| 52 | { | 52 | { |
| 53 | struct exynos_drm_fb *exynos_fb = to_exynos_fb(fb); | 53 | struct exynos_drm_fb *exynos_fb = to_exynos_fb(fb); |
| 54 | unsigned int i; | ||
| 54 | 55 | ||
| 55 | DRM_DEBUG_KMS("%s\n", __FILE__); | 56 | DRM_DEBUG_KMS("%s\n", __FILE__); |
| 56 | 57 | ||
| 57 | drm_framebuffer_cleanup(fb); | 58 | drm_framebuffer_cleanup(fb); |
| 58 | 59 | ||
| 60 | for (i = 0; i < ARRAY_SIZE(exynos_fb->exynos_gem_obj); i++) { | ||
| 61 | struct drm_gem_object *obj; | ||
| 62 | |||
| 63 | if (exynos_fb->exynos_gem_obj[i] == NULL) | ||
| 64 | continue; | ||
| 65 | |||
| 66 | obj = &exynos_fb->exynos_gem_obj[i]->base; | ||
| 67 | drm_gem_object_unreference_unlocked(obj); | ||
| 68 | } | ||
| 69 | |||
| 59 | kfree(exynos_fb); | 70 | kfree(exynos_fb); |
| 60 | exynos_fb = NULL; | 71 | exynos_fb = NULL; |
| 61 | } | 72 | } |
| @@ -134,11 +145,11 @@ exynos_user_fb_create(struct drm_device *dev, struct drm_file *file_priv, | |||
| 134 | return ERR_PTR(-ENOENT); | 145 | return ERR_PTR(-ENOENT); |
| 135 | } | 146 | } |
| 136 | 147 | ||
| 137 | drm_gem_object_unreference_unlocked(obj); | ||
| 138 | |||
| 139 | fb = exynos_drm_framebuffer_init(dev, mode_cmd, obj); | 148 | fb = exynos_drm_framebuffer_init(dev, mode_cmd, obj); |
| 140 | if (IS_ERR(fb)) | 149 | if (IS_ERR(fb)) { |
| 150 | drm_gem_object_unreference_unlocked(obj); | ||
| 141 | return fb; | 151 | return fb; |
| 152 | } | ||
| 142 | 153 | ||
| 143 | exynos_fb = to_exynos_fb(fb); | 154 | exynos_fb = to_exynos_fb(fb); |
| 144 | nr = exynos_drm_format_num_buffers(fb->pixel_format); | 155 | nr = exynos_drm_format_num_buffers(fb->pixel_format); |
| @@ -152,8 +163,6 @@ exynos_user_fb_create(struct drm_device *dev, struct drm_file *file_priv, | |||
| 152 | return ERR_PTR(-ENOENT); | 163 | return ERR_PTR(-ENOENT); |
| 153 | } | 164 | } |
| 154 | 165 | ||
| 155 | drm_gem_object_unreference_unlocked(obj); | ||
| 156 | |||
| 157 | exynos_fb->exynos_gem_obj[i] = to_exynos_gem_obj(obj); | 166 | exynos_fb->exynos_gem_obj[i] = to_exynos_gem_obj(obj); |
| 158 | } | 167 | } |
| 159 | 168 | ||
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fb.h b/drivers/gpu/drm/exynos/exynos_drm_fb.h index 3ecb30d93552..50823756cdea 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_fb.h +++ b/drivers/gpu/drm/exynos/exynos_drm_fb.h | |||
| @@ -31,10 +31,10 @@ | |||
| 31 | static inline int exynos_drm_format_num_buffers(uint32_t format) | 31 | static inline int exynos_drm_format_num_buffers(uint32_t format) |
| 32 | { | 32 | { |
| 33 | switch (format) { | 33 | switch (format) { |
| 34 | case DRM_FORMAT_NV12M: | 34 | case DRM_FORMAT_NV12: |
| 35 | case DRM_FORMAT_NV12MT: | 35 | case DRM_FORMAT_NV12MT: |
| 36 | return 2; | 36 | return 2; |
| 37 | case DRM_FORMAT_YUV420M: | 37 | case DRM_FORMAT_YUV420: |
| 38 | return 3; | 38 | return 3; |
| 39 | default: | 39 | default: |
| 40 | return 1; | 40 | return 1; |
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c index fc91293c4560..5c8b683029ea 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_gem.c +++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c | |||
| @@ -689,7 +689,6 @@ int exynos_drm_gem_dumb_map_offset(struct drm_file *file_priv, | |||
| 689 | struct drm_device *dev, uint32_t handle, | 689 | struct drm_device *dev, uint32_t handle, |
| 690 | uint64_t *offset) | 690 | uint64_t *offset) |
| 691 | { | 691 | { |
| 692 | struct exynos_drm_gem_obj *exynos_gem_obj; | ||
| 693 | struct drm_gem_object *obj; | 692 | struct drm_gem_object *obj; |
| 694 | int ret = 0; | 693 | int ret = 0; |
| 695 | 694 | ||
| @@ -710,15 +709,13 @@ int exynos_drm_gem_dumb_map_offset(struct drm_file *file_priv, | |||
| 710 | goto unlock; | 709 | goto unlock; |
| 711 | } | 710 | } |
| 712 | 711 | ||
| 713 | exynos_gem_obj = to_exynos_gem_obj(obj); | 712 | if (!obj->map_list.map) { |
| 714 | 713 | ret = drm_gem_create_mmap_offset(obj); | |
| 715 | if (!exynos_gem_obj->base.map_list.map) { | ||
| 716 | ret = drm_gem_create_mmap_offset(&exynos_gem_obj->base); | ||
| 717 | if (ret) | 714 | if (ret) |
| 718 | goto out; | 715 | goto out; |
| 719 | } | 716 | } |
| 720 | 717 | ||
| 721 | *offset = (u64)exynos_gem_obj->base.map_list.hash.key << PAGE_SHIFT; | 718 | *offset = (u64)obj->map_list.hash.key << PAGE_SHIFT; |
| 722 | DRM_DEBUG_KMS("offset = 0x%lx\n", (unsigned long)*offset); | 719 | DRM_DEBUG_KMS("offset = 0x%lx\n", (unsigned long)*offset); |
| 723 | 720 | ||
| 724 | out: | 721 | out: |
diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c index 68ef01028375..e2147a2ddcec 100644 --- a/drivers/gpu/drm/exynos/exynos_mixer.c +++ b/drivers/gpu/drm/exynos/exynos_mixer.c | |||
| @@ -365,7 +365,7 @@ static void vp_video_buffer(struct mixer_context *ctx, int win) | |||
| 365 | switch (win_data->pixel_format) { | 365 | switch (win_data->pixel_format) { |
| 366 | case DRM_FORMAT_NV12MT: | 366 | case DRM_FORMAT_NV12MT: |
| 367 | tiled_mode = true; | 367 | tiled_mode = true; |
| 368 | case DRM_FORMAT_NV12M: | 368 | case DRM_FORMAT_NV12: |
| 369 | crcb_mode = false; | 369 | crcb_mode = false; |
| 370 | buf_num = 2; | 370 | buf_num = 2; |
| 371 | break; | 371 | break; |
| @@ -601,18 +601,20 @@ static void mixer_win_reset(struct mixer_context *ctx) | |||
| 601 | mixer_reg_write(res, MXR_BG_COLOR2, 0x008080); | 601 | mixer_reg_write(res, MXR_BG_COLOR2, 0x008080); |
| 602 | 602 | ||
| 603 | /* setting graphical layers */ | 603 | /* setting graphical layers */ |
| 604 | |||
| 605 | val = MXR_GRP_CFG_COLOR_KEY_DISABLE; /* no blank key */ | 604 | val = MXR_GRP_CFG_COLOR_KEY_DISABLE; /* no blank key */ |
| 606 | val |= MXR_GRP_CFG_WIN_BLEND_EN; | 605 | val |= MXR_GRP_CFG_WIN_BLEND_EN; |
| 606 | val |= MXR_GRP_CFG_BLEND_PRE_MUL; | ||
| 607 | val |= MXR_GRP_CFG_PIXEL_BLEND_EN; | ||
| 607 | val |= MXR_GRP_CFG_ALPHA_VAL(0xff); /* non-transparent alpha */ | 608 | val |= MXR_GRP_CFG_ALPHA_VAL(0xff); /* non-transparent alpha */ |
| 608 | 609 | ||
| 609 | /* the same configuration for both layers */ | 610 | /* the same configuration for both layers */ |
| 610 | mixer_reg_write(res, MXR_GRAPHIC_CFG(0), val); | 611 | mixer_reg_write(res, MXR_GRAPHIC_CFG(0), val); |
| 611 | |||
| 612 | val |= MXR_GRP_CFG_BLEND_PRE_MUL; | ||
| 613 | val |= MXR_GRP_CFG_PIXEL_BLEND_EN; | ||
| 614 | mixer_reg_write(res, MXR_GRAPHIC_CFG(1), val); | 612 | mixer_reg_write(res, MXR_GRAPHIC_CFG(1), val); |
| 615 | 613 | ||
| 614 | /* setting video layers */ | ||
| 615 | val = MXR_GRP_CFG_ALPHA_VAL(0); | ||
| 616 | mixer_reg_write(res, MXR_VIDEO_CFG, val); | ||
| 617 | |||
| 616 | /* configuration of Video Processor Registers */ | 618 | /* configuration of Video Processor Registers */ |
| 617 | vp_win_reset(ctx); | 619 | vp_win_reset(ctx); |
| 618 | vp_default_filter(res); | 620 | vp_default_filter(res); |
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 238a52165833..9fe9ebe52a7a 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c | |||
| @@ -233,6 +233,7 @@ static const struct intel_device_info intel_sandybridge_d_info = { | |||
| 233 | .has_blt_ring = 1, | 233 | .has_blt_ring = 1, |
| 234 | .has_llc = 1, | 234 | .has_llc = 1, |
| 235 | .has_pch_split = 1, | 235 | .has_pch_split = 1, |
| 236 | .has_force_wake = 1, | ||
| 236 | }; | 237 | }; |
| 237 | 238 | ||
| 238 | static const struct intel_device_info intel_sandybridge_m_info = { | 239 | static const struct intel_device_info intel_sandybridge_m_info = { |
| @@ -243,6 +244,7 @@ static const struct intel_device_info intel_sandybridge_m_info = { | |||
| 243 | .has_blt_ring = 1, | 244 | .has_blt_ring = 1, |
| 244 | .has_llc = 1, | 245 | .has_llc = 1, |
| 245 | .has_pch_split = 1, | 246 | .has_pch_split = 1, |
| 247 | .has_force_wake = 1, | ||
| 246 | }; | 248 | }; |
| 247 | 249 | ||
| 248 | static const struct intel_device_info intel_ivybridge_d_info = { | 250 | static const struct intel_device_info intel_ivybridge_d_info = { |
| @@ -252,6 +254,7 @@ static const struct intel_device_info intel_ivybridge_d_info = { | |||
| 252 | .has_blt_ring = 1, | 254 | .has_blt_ring = 1, |
| 253 | .has_llc = 1, | 255 | .has_llc = 1, |
| 254 | .has_pch_split = 1, | 256 | .has_pch_split = 1, |
| 257 | .has_force_wake = 1, | ||
| 255 | }; | 258 | }; |
| 256 | 259 | ||
| 257 | static const struct intel_device_info intel_ivybridge_m_info = { | 260 | static const struct intel_device_info intel_ivybridge_m_info = { |
| @@ -262,6 +265,7 @@ static const struct intel_device_info intel_ivybridge_m_info = { | |||
| 262 | .has_blt_ring = 1, | 265 | .has_blt_ring = 1, |
| 263 | .has_llc = 1, | 266 | .has_llc = 1, |
| 264 | .has_pch_split = 1, | 267 | .has_pch_split = 1, |
| 268 | .has_force_wake = 1, | ||
| 265 | }; | 269 | }; |
| 266 | 270 | ||
| 267 | static const struct intel_device_info intel_valleyview_m_info = { | 271 | static const struct intel_device_info intel_valleyview_m_info = { |
| @@ -289,6 +293,7 @@ static const struct intel_device_info intel_haswell_d_info = { | |||
| 289 | .has_blt_ring = 1, | 293 | .has_blt_ring = 1, |
| 290 | .has_llc = 1, | 294 | .has_llc = 1, |
| 291 | .has_pch_split = 1, | 295 | .has_pch_split = 1, |
| 296 | .has_force_wake = 1, | ||
| 292 | }; | 297 | }; |
| 293 | 298 | ||
| 294 | static const struct intel_device_info intel_haswell_m_info = { | 299 | static const struct intel_device_info intel_haswell_m_info = { |
| @@ -298,6 +303,7 @@ static const struct intel_device_info intel_haswell_m_info = { | |||
| 298 | .has_blt_ring = 1, | 303 | .has_blt_ring = 1, |
| 299 | .has_llc = 1, | 304 | .has_llc = 1, |
| 300 | .has_pch_split = 1, | 305 | .has_pch_split = 1, |
| 306 | .has_force_wake = 1, | ||
| 301 | }; | 307 | }; |
| 302 | 308 | ||
| 303 | static const struct pci_device_id pciidlist[] = { /* aka */ | 309 | static const struct pci_device_id pciidlist[] = { /* aka */ |
| @@ -1139,10 +1145,9 @@ MODULE_LICENSE("GPL and additional rights"); | |||
| 1139 | 1145 | ||
| 1140 | /* We give fast paths for the really cool registers */ | 1146 | /* We give fast paths for the really cool registers */ |
| 1141 | #define NEEDS_FORCE_WAKE(dev_priv, reg) \ | 1147 | #define NEEDS_FORCE_WAKE(dev_priv, reg) \ |
| 1142 | (((dev_priv)->info->gen >= 6) && \ | 1148 | ((HAS_FORCE_WAKE((dev_priv)->dev)) && \ |
| 1143 | ((reg) < 0x40000) && \ | 1149 | ((reg) < 0x40000) && \ |
| 1144 | ((reg) != FORCEWAKE)) && \ | 1150 | ((reg) != FORCEWAKE)) |
| 1145 | (!IS_VALLEYVIEW((dev_priv)->dev)) | ||
| 1146 | 1151 | ||
| 1147 | #define __i915_read(x, y) \ | 1152 | #define __i915_read(x, y) \ |
| 1148 | u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg) { \ | 1153 | u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg) { \ |
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index c9cfc67c2cf5..b0b676abde0d 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h | |||
| @@ -285,6 +285,7 @@ struct intel_device_info { | |||
| 285 | u8 is_ivybridge:1; | 285 | u8 is_ivybridge:1; |
| 286 | u8 is_valleyview:1; | 286 | u8 is_valleyview:1; |
| 287 | u8 has_pch_split:1; | 287 | u8 has_pch_split:1; |
| 288 | u8 has_force_wake:1; | ||
| 288 | u8 is_haswell:1; | 289 | u8 is_haswell:1; |
| 289 | u8 has_fbc:1; | 290 | u8 has_fbc:1; |
| 290 | u8 has_pipe_cxsr:1; | 291 | u8 has_pipe_cxsr:1; |
| @@ -1101,6 +1102,8 @@ struct drm_i915_file_private { | |||
| 1101 | #define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT) | 1102 | #define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT) |
| 1102 | #define HAS_PCH_IBX(dev) (INTEL_PCH_TYPE(dev) == PCH_IBX) | 1103 | #define HAS_PCH_IBX(dev) (INTEL_PCH_TYPE(dev) == PCH_IBX) |
| 1103 | 1104 | ||
| 1105 | #define HAS_FORCE_WAKE(dev) (INTEL_INFO(dev)->has_force_wake) | ||
| 1106 | |||
| 1104 | #include "i915_trace.h" | 1107 | #include "i915_trace.h" |
| 1105 | 1108 | ||
| 1106 | /** | 1109 | /** |
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 1417660a93ec..b1fe0edda955 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c | |||
| @@ -510,7 +510,7 @@ out: | |||
| 510 | return ret; | 510 | return ret; |
| 511 | } | 511 | } |
| 512 | 512 | ||
| 513 | static void pch_irq_handler(struct drm_device *dev, u32 pch_iir) | 513 | static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir) |
| 514 | { | 514 | { |
| 515 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | 515 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
| 516 | int pipe; | 516 | int pipe; |
| @@ -550,6 +550,35 @@ static void pch_irq_handler(struct drm_device *dev, u32 pch_iir) | |||
| 550 | DRM_DEBUG_DRIVER("PCH transcoder A underrun interrupt\n"); | 550 | DRM_DEBUG_DRIVER("PCH transcoder A underrun interrupt\n"); |
| 551 | } | 551 | } |
| 552 | 552 | ||
| 553 | static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir) | ||
| 554 | { | ||
| 555 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | ||
| 556 | int pipe; | ||
| 557 | |||
| 558 | if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) | ||
| 559 | DRM_DEBUG_DRIVER("PCH audio power change on port %d\n", | ||
| 560 | (pch_iir & SDE_AUDIO_POWER_MASK_CPT) >> | ||
| 561 | SDE_AUDIO_POWER_SHIFT_CPT); | ||
| 562 | |||
| 563 | if (pch_iir & SDE_AUX_MASK_CPT) | ||
| 564 | DRM_DEBUG_DRIVER("AUX channel interrupt\n"); | ||
| 565 | |||
| 566 | if (pch_iir & SDE_GMBUS_CPT) | ||
| 567 | DRM_DEBUG_DRIVER("PCH GMBUS interrupt\n"); | ||
| 568 | |||
| 569 | if (pch_iir & SDE_AUDIO_CP_REQ_CPT) | ||
| 570 | DRM_DEBUG_DRIVER("Audio CP request interrupt\n"); | ||
| 571 | |||
| 572 | if (pch_iir & SDE_AUDIO_CP_CHG_CPT) | ||
| 573 | DRM_DEBUG_DRIVER("Audio CP change interrupt\n"); | ||
| 574 | |||
| 575 | if (pch_iir & SDE_FDI_MASK_CPT) | ||
| 576 | for_each_pipe(pipe) | ||
| 577 | DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n", | ||
| 578 | pipe_name(pipe), | ||
| 579 | I915_READ(FDI_RX_IIR(pipe))); | ||
| 580 | } | ||
| 581 | |||
| 553 | static irqreturn_t ivybridge_irq_handler(DRM_IRQ_ARGS) | 582 | static irqreturn_t ivybridge_irq_handler(DRM_IRQ_ARGS) |
| 554 | { | 583 | { |
| 555 | struct drm_device *dev = (struct drm_device *) arg; | 584 | struct drm_device *dev = (struct drm_device *) arg; |
| @@ -591,7 +620,7 @@ static irqreturn_t ivybridge_irq_handler(DRM_IRQ_ARGS) | |||
| 591 | 620 | ||
| 592 | if (pch_iir & SDE_HOTPLUG_MASK_CPT) | 621 | if (pch_iir & SDE_HOTPLUG_MASK_CPT) |
| 593 | queue_work(dev_priv->wq, &dev_priv->hotplug_work); | 622 | queue_work(dev_priv->wq, &dev_priv->hotplug_work); |
| 594 | pch_irq_handler(dev, pch_iir); | 623 | cpt_irq_handler(dev, pch_iir); |
| 595 | 624 | ||
| 596 | /* clear PCH hotplug event before clear CPU irq */ | 625 | /* clear PCH hotplug event before clear CPU irq */ |
| 597 | I915_WRITE(SDEIIR, pch_iir); | 626 | I915_WRITE(SDEIIR, pch_iir); |
| @@ -684,7 +713,10 @@ static irqreturn_t ironlake_irq_handler(DRM_IRQ_ARGS) | |||
| 684 | if (de_iir & DE_PCH_EVENT) { | 713 | if (de_iir & DE_PCH_EVENT) { |
| 685 | if (pch_iir & hotplug_mask) | 714 | if (pch_iir & hotplug_mask) |
| 686 | queue_work(dev_priv->wq, &dev_priv->hotplug_work); | 715 | queue_work(dev_priv->wq, &dev_priv->hotplug_work); |
| 687 | pch_irq_handler(dev, pch_iir); | 716 | if (HAS_PCH_CPT(dev)) |
| 717 | cpt_irq_handler(dev, pch_iir); | ||
| 718 | else | ||
| 719 | ibx_irq_handler(dev, pch_iir); | ||
| 688 | } | 720 | } |
| 689 | 721 | ||
| 690 | if (de_iir & DE_PCU_EVENT) { | 722 | if (de_iir & DE_PCU_EVENT) { |
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 2d49b9507ed0..48d5e8e051cf 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h | |||
| @@ -210,6 +210,14 @@ | |||
| 210 | #define MI_DISPLAY_FLIP MI_INSTR(0x14, 2) | 210 | #define MI_DISPLAY_FLIP MI_INSTR(0x14, 2) |
| 211 | #define MI_DISPLAY_FLIP_I915 MI_INSTR(0x14, 1) | 211 | #define MI_DISPLAY_FLIP_I915 MI_INSTR(0x14, 1) |
| 212 | #define MI_DISPLAY_FLIP_PLANE(n) ((n) << 20) | 212 | #define MI_DISPLAY_FLIP_PLANE(n) ((n) << 20) |
| 213 | /* IVB has funny definitions for which plane to flip. */ | ||
| 214 | #define MI_DISPLAY_FLIP_IVB_PLANE_A (0 << 19) | ||
| 215 | #define MI_DISPLAY_FLIP_IVB_PLANE_B (1 << 19) | ||
| 216 | #define MI_DISPLAY_FLIP_IVB_SPRITE_A (2 << 19) | ||
| 217 | #define MI_DISPLAY_FLIP_IVB_SPRITE_B (3 << 19) | ||
| 218 | #define MI_DISPLAY_FLIP_IVB_PLANE_C (4 << 19) | ||
| 219 | #define MI_DISPLAY_FLIP_IVB_SPRITE_C (5 << 19) | ||
| 220 | |||
| 213 | #define MI_SET_CONTEXT MI_INSTR(0x18, 0) | 221 | #define MI_SET_CONTEXT MI_INSTR(0x18, 0) |
| 214 | #define MI_MM_SPACE_GTT (1<<8) | 222 | #define MI_MM_SPACE_GTT (1<<8) |
| 215 | #define MI_MM_SPACE_PHYSICAL (0<<8) | 223 | #define MI_MM_SPACE_PHYSICAL (0<<8) |
| @@ -3313,7 +3321,7 @@ | |||
| 3313 | 3321 | ||
| 3314 | /* PCH */ | 3322 | /* PCH */ |
| 3315 | 3323 | ||
| 3316 | /* south display engine interrupt */ | 3324 | /* south display engine interrupt: IBX */ |
| 3317 | #define SDE_AUDIO_POWER_D (1 << 27) | 3325 | #define SDE_AUDIO_POWER_D (1 << 27) |
| 3318 | #define SDE_AUDIO_POWER_C (1 << 26) | 3326 | #define SDE_AUDIO_POWER_C (1 << 26) |
| 3319 | #define SDE_AUDIO_POWER_B (1 << 25) | 3327 | #define SDE_AUDIO_POWER_B (1 << 25) |
| @@ -3349,15 +3357,44 @@ | |||
| 3349 | #define SDE_TRANSA_CRC_ERR (1 << 1) | 3357 | #define SDE_TRANSA_CRC_ERR (1 << 1) |
| 3350 | #define SDE_TRANSA_FIFO_UNDER (1 << 0) | 3358 | #define SDE_TRANSA_FIFO_UNDER (1 << 0) |
| 3351 | #define SDE_TRANS_MASK (0x3f) | 3359 | #define SDE_TRANS_MASK (0x3f) |
| 3352 | /* CPT */ | 3360 | |
| 3353 | #define SDE_CRT_HOTPLUG_CPT (1 << 19) | 3361 | /* south display engine interrupt: CPT/PPT */ |
| 3362 | #define SDE_AUDIO_POWER_D_CPT (1 << 31) | ||
| 3363 | #define SDE_AUDIO_POWER_C_CPT (1 << 30) | ||
| 3364 | #define SDE_AUDIO_POWER_B_CPT (1 << 29) | ||
| 3365 | #define SDE_AUDIO_POWER_SHIFT_CPT 29 | ||
| 3366 | #define SDE_AUDIO_POWER_MASK_CPT (7 << 29) | ||
| 3367 | #define SDE_AUXD_CPT (1 << 27) | ||
| 3368 | #define SDE_AUXC_CPT (1 << 26) | ||
| 3369 | #define SDE_AUXB_CPT (1 << 25) | ||
| 3370 | #define SDE_AUX_MASK_CPT (7 << 25) | ||
| 3354 | #define SDE_PORTD_HOTPLUG_CPT (1 << 23) | 3371 | #define SDE_PORTD_HOTPLUG_CPT (1 << 23) |
| 3355 | #define SDE_PORTC_HOTPLUG_CPT (1 << 22) | 3372 | #define SDE_PORTC_HOTPLUG_CPT (1 << 22) |
| 3356 | #define SDE_PORTB_HOTPLUG_CPT (1 << 21) | 3373 | #define SDE_PORTB_HOTPLUG_CPT (1 << 21) |
| 3374 | #define SDE_CRT_HOTPLUG_CPT (1 << 19) | ||
| 3357 | #define SDE_HOTPLUG_MASK_CPT (SDE_CRT_HOTPLUG_CPT | \ | 3375 | #define SDE_HOTPLUG_MASK_CPT (SDE_CRT_HOTPLUG_CPT | \ |
| 3358 | SDE_PORTD_HOTPLUG_CPT | \ | 3376 | SDE_PORTD_HOTPLUG_CPT | \ |
| 3359 | SDE_PORTC_HOTPLUG_CPT | \ | 3377 | SDE_PORTC_HOTPLUG_CPT | \ |
| 3360 | SDE_PORTB_HOTPLUG_CPT) | 3378 | SDE_PORTB_HOTPLUG_CPT) |
| 3379 | #define SDE_GMBUS_CPT (1 << 17) | ||
| 3380 | #define SDE_AUDIO_CP_REQ_C_CPT (1 << 10) | ||
| 3381 | #define SDE_AUDIO_CP_CHG_C_CPT (1 << 9) | ||
| 3382 | #define SDE_FDI_RXC_CPT (1 << 8) | ||
| 3383 | #define SDE_AUDIO_CP_REQ_B_CPT (1 << 6) | ||
| 3384 | #define SDE_AUDIO_CP_CHG_B_CPT (1 << 5) | ||
| 3385 | #define SDE_FDI_RXB_CPT (1 << 4) | ||
| 3386 | #define SDE_AUDIO_CP_REQ_A_CPT (1 << 2) | ||
| 3387 | #define SDE_AUDIO_CP_CHG_A_CPT (1 << 1) | ||
| 3388 | #define SDE_FDI_RXA_CPT (1 << 0) | ||
| 3389 | #define SDE_AUDIO_CP_REQ_CPT (SDE_AUDIO_CP_REQ_C_CPT | \ | ||
| 3390 | SDE_AUDIO_CP_REQ_B_CPT | \ | ||
| 3391 | SDE_AUDIO_CP_REQ_A_CPT) | ||
| 3392 | #define SDE_AUDIO_CP_CHG_CPT (SDE_AUDIO_CP_CHG_C_CPT | \ | ||
| 3393 | SDE_AUDIO_CP_CHG_B_CPT | \ | ||
| 3394 | SDE_AUDIO_CP_CHG_A_CPT) | ||
| 3395 | #define SDE_FDI_MASK_CPT (SDE_FDI_RXC_CPT | \ | ||
| 3396 | SDE_FDI_RXB_CPT | \ | ||
| 3397 | SDE_FDI_RXA_CPT) | ||
| 3361 | 3398 | ||
| 3362 | #define SDEISR 0xc4000 | 3399 | #define SDEISR 0xc4000 |
| 3363 | #define SDEIMR 0xc4004 | 3400 | #define SDEIMR 0xc4004 |
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 914789420906..e0aa064def31 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c | |||
| @@ -6158,17 +6158,34 @@ static int intel_gen7_queue_flip(struct drm_device *dev, | |||
| 6158 | struct drm_i915_private *dev_priv = dev->dev_private; | 6158 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 6159 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 6159 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
| 6160 | struct intel_ring_buffer *ring = &dev_priv->ring[BCS]; | 6160 | struct intel_ring_buffer *ring = &dev_priv->ring[BCS]; |
| 6161 | uint32_t plane_bit = 0; | ||
| 6161 | int ret; | 6162 | int ret; |
| 6162 | 6163 | ||
| 6163 | ret = intel_pin_and_fence_fb_obj(dev, obj, ring); | 6164 | ret = intel_pin_and_fence_fb_obj(dev, obj, ring); |
| 6164 | if (ret) | 6165 | if (ret) |
| 6165 | goto err; | 6166 | goto err; |
| 6166 | 6167 | ||
| 6168 | switch(intel_crtc->plane) { | ||
| 6169 | case PLANE_A: | ||
| 6170 | plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_A; | ||
| 6171 | break; | ||
| 6172 | case PLANE_B: | ||
| 6173 | plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_B; | ||
| 6174 | break; | ||
| 6175 | case PLANE_C: | ||
| 6176 | plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_C; | ||
| 6177 | break; | ||
| 6178 | default: | ||
| 6179 | WARN_ONCE(1, "unknown plane in flip command\n"); | ||
| 6180 | ret = -ENODEV; | ||
| 6181 | goto err; | ||
| 6182 | } | ||
| 6183 | |||
| 6167 | ret = intel_ring_begin(ring, 4); | 6184 | ret = intel_ring_begin(ring, 4); |
| 6168 | if (ret) | 6185 | if (ret) |
| 6169 | goto err_unpin; | 6186 | goto err_unpin; |
| 6170 | 6187 | ||
| 6171 | intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | (intel_crtc->plane << 19)); | 6188 | intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | plane_bit); |
| 6172 | intel_ring_emit(ring, (fb->pitches[0] | obj->tiling_mode)); | 6189 | intel_ring_emit(ring, (fb->pitches[0] | obj->tiling_mode)); |
| 6173 | intel_ring_emit(ring, (obj->gtt_offset)); | 6190 | intel_ring_emit(ring, (obj->gtt_offset)); |
| 6174 | intel_ring_emit(ring, (MI_NOOP)); | 6191 | intel_ring_emit(ring, (MI_NOOP)); |
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index b59b6d5b7583..e5b84ff89ca5 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c | |||
| @@ -266,10 +266,15 @@ u32 intel_ring_get_active_head(struct intel_ring_buffer *ring) | |||
| 266 | 266 | ||
| 267 | static int init_ring_common(struct intel_ring_buffer *ring) | 267 | static int init_ring_common(struct intel_ring_buffer *ring) |
| 268 | { | 268 | { |
| 269 | drm_i915_private_t *dev_priv = ring->dev->dev_private; | 269 | struct drm_device *dev = ring->dev; |
| 270 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
| 270 | struct drm_i915_gem_object *obj = ring->obj; | 271 | struct drm_i915_gem_object *obj = ring->obj; |
| 272 | int ret = 0; | ||
| 271 | u32 head; | 273 | u32 head; |
| 272 | 274 | ||
| 275 | if (HAS_FORCE_WAKE(dev)) | ||
| 276 | gen6_gt_force_wake_get(dev_priv); | ||
| 277 | |||
| 273 | /* Stop the ring if it's running. */ | 278 | /* Stop the ring if it's running. */ |
| 274 | I915_WRITE_CTL(ring, 0); | 279 | I915_WRITE_CTL(ring, 0); |
| 275 | I915_WRITE_HEAD(ring, 0); | 280 | I915_WRITE_HEAD(ring, 0); |
| @@ -317,7 +322,8 @@ static int init_ring_common(struct intel_ring_buffer *ring) | |||
| 317 | I915_READ_HEAD(ring), | 322 | I915_READ_HEAD(ring), |
| 318 | I915_READ_TAIL(ring), | 323 | I915_READ_TAIL(ring), |
| 319 | I915_READ_START(ring)); | 324 | I915_READ_START(ring)); |
| 320 | return -EIO; | 325 | ret = -EIO; |
| 326 | goto out; | ||
| 321 | } | 327 | } |
| 322 | 328 | ||
| 323 | if (!drm_core_check_feature(ring->dev, DRIVER_MODESET)) | 329 | if (!drm_core_check_feature(ring->dev, DRIVER_MODESET)) |
| @@ -326,9 +332,14 @@ static int init_ring_common(struct intel_ring_buffer *ring) | |||
| 326 | ring->head = I915_READ_HEAD(ring); | 332 | ring->head = I915_READ_HEAD(ring); |
| 327 | ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR; | 333 | ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR; |
| 328 | ring->space = ring_space(ring); | 334 | ring->space = ring_space(ring); |
| 335 | ring->last_retired_head = -1; | ||
| 329 | } | 336 | } |
| 330 | 337 | ||
| 331 | return 0; | 338 | out: |
| 339 | if (HAS_FORCE_WAKE(dev)) | ||
| 340 | gen6_gt_force_wake_put(dev_priv); | ||
| 341 | |||
| 342 | return ret; | ||
| 332 | } | 343 | } |
| 333 | 344 | ||
| 334 | static int | 345 | static int |
| @@ -987,6 +998,10 @@ static int intel_init_ring_buffer(struct drm_device *dev, | |||
| 987 | if (ret) | 998 | if (ret) |
| 988 | goto err_unref; | 999 | goto err_unref; |
| 989 | 1000 | ||
| 1001 | ret = i915_gem_object_set_to_gtt_domain(obj, true); | ||
| 1002 | if (ret) | ||
| 1003 | goto err_unpin; | ||
| 1004 | |||
| 990 | ring->virtual_start = ioremap_wc(dev->agp->base + obj->gtt_offset, | 1005 | ring->virtual_start = ioremap_wc(dev->agp->base + obj->gtt_offset, |
| 991 | ring->size); | 1006 | ring->size); |
| 992 | if (ring->virtual_start == NULL) { | 1007 | if (ring->virtual_start == NULL) { |
diff --git a/drivers/gpu/drm/radeon/evergreen_cs.c b/drivers/gpu/drm/radeon/evergreen_cs.c index 4e7dd2b4843d..c16554122ccd 100644 --- a/drivers/gpu/drm/radeon/evergreen_cs.c +++ b/drivers/gpu/drm/radeon/evergreen_cs.c | |||
| @@ -52,6 +52,7 @@ struct evergreen_cs_track { | |||
| 52 | u32 cb_color_view[12]; | 52 | u32 cb_color_view[12]; |
| 53 | u32 cb_color_pitch[12]; | 53 | u32 cb_color_pitch[12]; |
| 54 | u32 cb_color_slice[12]; | 54 | u32 cb_color_slice[12]; |
| 55 | u32 cb_color_slice_idx[12]; | ||
| 55 | u32 cb_color_attrib[12]; | 56 | u32 cb_color_attrib[12]; |
| 56 | u32 cb_color_cmask_slice[8];/* unused */ | 57 | u32 cb_color_cmask_slice[8];/* unused */ |
| 57 | u32 cb_color_fmask_slice[8];/* unused */ | 58 | u32 cb_color_fmask_slice[8];/* unused */ |
| @@ -127,12 +128,14 @@ static void evergreen_cs_track_init(struct evergreen_cs_track *track) | |||
| 127 | track->cb_color_info[i] = 0; | 128 | track->cb_color_info[i] = 0; |
| 128 | track->cb_color_view[i] = 0xFFFFFFFF; | 129 | track->cb_color_view[i] = 0xFFFFFFFF; |
| 129 | track->cb_color_pitch[i] = 0; | 130 | track->cb_color_pitch[i] = 0; |
| 130 | track->cb_color_slice[i] = 0; | 131 | track->cb_color_slice[i] = 0xfffffff; |
| 132 | track->cb_color_slice_idx[i] = 0; | ||
| 131 | } | 133 | } |
| 132 | track->cb_target_mask = 0xFFFFFFFF; | 134 | track->cb_target_mask = 0xFFFFFFFF; |
| 133 | track->cb_shader_mask = 0xFFFFFFFF; | 135 | track->cb_shader_mask = 0xFFFFFFFF; |
| 134 | track->cb_dirty = true; | 136 | track->cb_dirty = true; |
| 135 | 137 | ||
| 138 | track->db_depth_slice = 0xffffffff; | ||
| 136 | track->db_depth_view = 0xFFFFC000; | 139 | track->db_depth_view = 0xFFFFC000; |
| 137 | track->db_depth_size = 0xFFFFFFFF; | 140 | track->db_depth_size = 0xFFFFFFFF; |
| 138 | track->db_depth_control = 0xFFFFFFFF; | 141 | track->db_depth_control = 0xFFFFFFFF; |
| @@ -250,10 +253,9 @@ static int evergreen_surface_check_2d(struct radeon_cs_parser *p, | |||
| 250 | { | 253 | { |
| 251 | struct evergreen_cs_track *track = p->track; | 254 | struct evergreen_cs_track *track = p->track; |
| 252 | unsigned palign, halign, tileb, slice_pt; | 255 | unsigned palign, halign, tileb, slice_pt; |
| 256 | unsigned mtile_pr, mtile_ps, mtileb; | ||
| 253 | 257 | ||
| 254 | tileb = 64 * surf->bpe * surf->nsamples; | 258 | tileb = 64 * surf->bpe * surf->nsamples; |
| 255 | palign = track->group_size / (8 * surf->bpe * surf->nsamples); | ||
| 256 | palign = MAX(8, palign); | ||
| 257 | slice_pt = 1; | 259 | slice_pt = 1; |
| 258 | if (tileb > surf->tsplit) { | 260 | if (tileb > surf->tsplit) { |
| 259 | slice_pt = tileb / surf->tsplit; | 261 | slice_pt = tileb / surf->tsplit; |
| @@ -262,7 +264,10 @@ static int evergreen_surface_check_2d(struct radeon_cs_parser *p, | |||
| 262 | /* macro tile width & height */ | 264 | /* macro tile width & height */ |
| 263 | palign = (8 * surf->bankw * track->npipes) * surf->mtilea; | 265 | palign = (8 * surf->bankw * track->npipes) * surf->mtilea; |
| 264 | halign = (8 * surf->bankh * surf->nbanks) / surf->mtilea; | 266 | halign = (8 * surf->bankh * surf->nbanks) / surf->mtilea; |
| 265 | surf->layer_size = surf->nbx * surf->nby * surf->bpe * slice_pt; | 267 | mtileb = (palign / 8) * (halign / 8) * tileb;; |
| 268 | mtile_pr = surf->nbx / palign; | ||
| 269 | mtile_ps = (mtile_pr * surf->nby) / halign; | ||
| 270 | surf->layer_size = mtile_ps * mtileb * slice_pt; | ||
| 266 | surf->base_align = (palign / 8) * (halign / 8) * tileb; | 271 | surf->base_align = (palign / 8) * (halign / 8) * tileb; |
| 267 | surf->palign = palign; | 272 | surf->palign = palign; |
| 268 | surf->halign = halign; | 273 | surf->halign = halign; |
| @@ -434,6 +439,39 @@ static int evergreen_cs_track_validate_cb(struct radeon_cs_parser *p, unsigned i | |||
| 434 | 439 | ||
| 435 | offset += surf.layer_size * mslice; | 440 | offset += surf.layer_size * mslice; |
| 436 | if (offset > radeon_bo_size(track->cb_color_bo[id])) { | 441 | if (offset > radeon_bo_size(track->cb_color_bo[id])) { |
| 442 | /* old ddx are broken they allocate bo with w*h*bpp but | ||
| 443 | * program slice with ALIGN(h, 8), catch this and patch | ||
| 444 | * command stream. | ||
| 445 | */ | ||
| 446 | if (!surf.mode) { | ||
| 447 | volatile u32 *ib = p->ib.ptr; | ||
| 448 | unsigned long tmp, nby, bsize, size, min = 0; | ||
| 449 | |||
| 450 | /* find the height the ddx wants */ | ||
| 451 | if (surf.nby > 8) { | ||
| 452 | min = surf.nby - 8; | ||
| 453 | } | ||
| 454 | bsize = radeon_bo_size(track->cb_color_bo[id]); | ||
| 455 | tmp = track->cb_color_bo_offset[id] << 8; | ||
| 456 | for (nby = surf.nby; nby > min; nby--) { | ||
| 457 | size = nby * surf.nbx * surf.bpe * surf.nsamples; | ||
| 458 | if ((tmp + size * mslice) <= bsize) { | ||
| 459 | break; | ||
| 460 | } | ||
| 461 | } | ||
| 462 | if (nby > min) { | ||
| 463 | surf.nby = nby; | ||
| 464 | slice = ((nby * surf.nbx) / 64) - 1; | ||
| 465 | if (!evergreen_surface_check(p, &surf, "cb")) { | ||
| 466 | /* check if this one works */ | ||
| 467 | tmp += surf.layer_size * mslice; | ||
| 468 | if (tmp <= bsize) { | ||
| 469 | ib[track->cb_color_slice_idx[id]] = slice; | ||
| 470 | goto old_ddx_ok; | ||
| 471 | } | ||
| 472 | } | ||
| 473 | } | ||
| 474 | } | ||
| 437 | dev_warn(p->dev, "%s:%d cb[%d] bo too small (layer size %d, " | 475 | dev_warn(p->dev, "%s:%d cb[%d] bo too small (layer size %d, " |
| 438 | "offset %d, max layer %d, bo size %ld, slice %d)\n", | 476 | "offset %d, max layer %d, bo size %ld, slice %d)\n", |
| 439 | __func__, __LINE__, id, surf.layer_size, | 477 | __func__, __LINE__, id, surf.layer_size, |
| @@ -446,6 +484,7 @@ static int evergreen_cs_track_validate_cb(struct radeon_cs_parser *p, unsigned i | |||
| 446 | surf.tsplit, surf.mtilea); | 484 | surf.tsplit, surf.mtilea); |
| 447 | return -EINVAL; | 485 | return -EINVAL; |
| 448 | } | 486 | } |
| 487 | old_ddx_ok: | ||
| 449 | 488 | ||
| 450 | return 0; | 489 | return 0; |
| 451 | } | 490 | } |
| @@ -1532,6 +1571,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx) | |||
| 1532 | case CB_COLOR7_SLICE: | 1571 | case CB_COLOR7_SLICE: |
| 1533 | tmp = (reg - CB_COLOR0_SLICE) / 0x3c; | 1572 | tmp = (reg - CB_COLOR0_SLICE) / 0x3c; |
| 1534 | track->cb_color_slice[tmp] = radeon_get_ib_value(p, idx); | 1573 | track->cb_color_slice[tmp] = radeon_get_ib_value(p, idx); |
| 1574 | track->cb_color_slice_idx[tmp] = idx; | ||
| 1535 | track->cb_dirty = true; | 1575 | track->cb_dirty = true; |
| 1536 | break; | 1576 | break; |
| 1537 | case CB_COLOR8_SLICE: | 1577 | case CB_COLOR8_SLICE: |
| @@ -1540,6 +1580,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx) | |||
| 1540 | case CB_COLOR11_SLICE: | 1580 | case CB_COLOR11_SLICE: |
| 1541 | tmp = ((reg - CB_COLOR8_SLICE) / 0x1c) + 8; | 1581 | tmp = ((reg - CB_COLOR8_SLICE) / 0x1c) + 8; |
| 1542 | track->cb_color_slice[tmp] = radeon_get_ib_value(p, idx); | 1582 | track->cb_color_slice[tmp] = radeon_get_ib_value(p, idx); |
| 1583 | track->cb_color_slice_idx[tmp] = idx; | ||
| 1543 | track->cb_dirty = true; | 1584 | track->cb_dirty = true; |
| 1544 | break; | 1585 | break; |
| 1545 | case CB_COLOR0_ATTRIB: | 1586 | case CB_COLOR0_ATTRIB: |
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c index 3df4efa11942..3186522a4458 100644 --- a/drivers/gpu/drm/radeon/ni.c +++ b/drivers/gpu/drm/radeon/ni.c | |||
| @@ -460,15 +460,28 @@ static void cayman_gpu_init(struct radeon_device *rdev) | |||
| 460 | rdev->config.cayman.max_pipes_per_simd = 4; | 460 | rdev->config.cayman.max_pipes_per_simd = 4; |
| 461 | rdev->config.cayman.max_tile_pipes = 2; | 461 | rdev->config.cayman.max_tile_pipes = 2; |
| 462 | if ((rdev->pdev->device == 0x9900) || | 462 | if ((rdev->pdev->device == 0x9900) || |
| 463 | (rdev->pdev->device == 0x9901)) { | 463 | (rdev->pdev->device == 0x9901) || |
| 464 | (rdev->pdev->device == 0x9905) || | ||
| 465 | (rdev->pdev->device == 0x9906) || | ||
| 466 | (rdev->pdev->device == 0x9907) || | ||
| 467 | (rdev->pdev->device == 0x9908) || | ||
| 468 | (rdev->pdev->device == 0x9909) || | ||
| 469 | (rdev->pdev->device == 0x9910) || | ||
| 470 | (rdev->pdev->device == 0x9917)) { | ||
| 464 | rdev->config.cayman.max_simds_per_se = 6; | 471 | rdev->config.cayman.max_simds_per_se = 6; |
| 465 | rdev->config.cayman.max_backends_per_se = 2; | 472 | rdev->config.cayman.max_backends_per_se = 2; |
| 466 | } else if ((rdev->pdev->device == 0x9903) || | 473 | } else if ((rdev->pdev->device == 0x9903) || |
| 467 | (rdev->pdev->device == 0x9904)) { | 474 | (rdev->pdev->device == 0x9904) || |
| 475 | (rdev->pdev->device == 0x990A) || | ||
| 476 | (rdev->pdev->device == 0x9913) || | ||
| 477 | (rdev->pdev->device == 0x9918)) { | ||
| 468 | rdev->config.cayman.max_simds_per_se = 4; | 478 | rdev->config.cayman.max_simds_per_se = 4; |
| 469 | rdev->config.cayman.max_backends_per_se = 2; | 479 | rdev->config.cayman.max_backends_per_se = 2; |
| 470 | } else if ((rdev->pdev->device == 0x9990) || | 480 | } else if ((rdev->pdev->device == 0x9919) || |
| 471 | (rdev->pdev->device == 0x9991)) { | 481 | (rdev->pdev->device == 0x9990) || |
| 482 | (rdev->pdev->device == 0x9991) || | ||
| 483 | (rdev->pdev->device == 0x9994) || | ||
| 484 | (rdev->pdev->device == 0x99A0)) { | ||
| 472 | rdev->config.cayman.max_simds_per_se = 3; | 485 | rdev->config.cayman.max_simds_per_se = 3; |
| 473 | rdev->config.cayman.max_backends_per_se = 1; | 486 | rdev->config.cayman.max_backends_per_se = 1; |
| 474 | } else { | 487 | } else { |
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c index 45cfcea63507..f30dc95f83b1 100644 --- a/drivers/gpu/drm/radeon/r600.c +++ b/drivers/gpu/drm/radeon/r600.c | |||
| @@ -2426,6 +2426,12 @@ int r600_startup(struct radeon_device *rdev) | |||
| 2426 | if (r) | 2426 | if (r) |
| 2427 | return r; | 2427 | return r; |
| 2428 | 2428 | ||
| 2429 | r = r600_audio_init(rdev); | ||
| 2430 | if (r) { | ||
| 2431 | DRM_ERROR("radeon: audio init failed\n"); | ||
| 2432 | return r; | ||
| 2433 | } | ||
| 2434 | |||
| 2429 | return 0; | 2435 | return 0; |
| 2430 | } | 2436 | } |
| 2431 | 2437 | ||
| @@ -2462,12 +2468,6 @@ int r600_resume(struct radeon_device *rdev) | |||
| 2462 | return r; | 2468 | return r; |
| 2463 | } | 2469 | } |
| 2464 | 2470 | ||
| 2465 | r = r600_audio_init(rdev); | ||
| 2466 | if (r) { | ||
| 2467 | DRM_ERROR("radeon: audio resume failed\n"); | ||
| 2468 | return r; | ||
| 2469 | } | ||
| 2470 | |||
| 2471 | return r; | 2471 | return r; |
| 2472 | } | 2472 | } |
| 2473 | 2473 | ||
| @@ -2577,9 +2577,6 @@ int r600_init(struct radeon_device *rdev) | |||
| 2577 | rdev->accel_working = false; | 2577 | rdev->accel_working = false; |
| 2578 | } | 2578 | } |
| 2579 | 2579 | ||
| 2580 | r = r600_audio_init(rdev); | ||
| 2581 | if (r) | ||
| 2582 | return r; /* TODO error handling */ | ||
| 2583 | return 0; | 2580 | return 0; |
| 2584 | } | 2581 | } |
| 2585 | 2582 | ||
diff --git a/drivers/gpu/drm/radeon/r600_audio.c b/drivers/gpu/drm/radeon/r600_audio.c index 7c4fa77f018f..7479a5c503e4 100644 --- a/drivers/gpu/drm/radeon/r600_audio.c +++ b/drivers/gpu/drm/radeon/r600_audio.c | |||
| @@ -192,6 +192,7 @@ void r600_audio_set_clock(struct drm_encoder *encoder, int clock) | |||
| 192 | struct radeon_device *rdev = dev->dev_private; | 192 | struct radeon_device *rdev = dev->dev_private; |
| 193 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); | 193 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); |
| 194 | struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; | 194 | struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; |
| 195 | struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc); | ||
| 195 | int base_rate = 48000; | 196 | int base_rate = 48000; |
| 196 | 197 | ||
| 197 | switch (radeon_encoder->encoder_id) { | 198 | switch (radeon_encoder->encoder_id) { |
| @@ -217,8 +218,8 @@ void r600_audio_set_clock(struct drm_encoder *encoder, int clock) | |||
| 217 | WREG32(EVERGREEN_AUDIO_PLL1_DIV, clock * 10); | 218 | WREG32(EVERGREEN_AUDIO_PLL1_DIV, clock * 10); |
| 218 | WREG32(EVERGREEN_AUDIO_PLL1_UNK, 0x00000071); | 219 | WREG32(EVERGREEN_AUDIO_PLL1_UNK, 0x00000071); |
| 219 | 220 | ||
| 220 | /* Some magic trigger or src sel? */ | 221 | /* Select DTO source */ |
| 221 | WREG32_P(0x5ac, 0x01, ~0x77); | 222 | WREG32(0x5ac, radeon_crtc->crtc_id); |
| 222 | } else { | 223 | } else { |
| 223 | switch (dig->dig_encoder) { | 224 | switch (dig->dig_encoder) { |
| 224 | case 0: | 225 | case 0: |
diff --git a/drivers/gpu/drm/radeon/r600_hdmi.c b/drivers/gpu/drm/radeon/r600_hdmi.c index 226379e00ac1..969c27529dfe 100644 --- a/drivers/gpu/drm/radeon/r600_hdmi.c +++ b/drivers/gpu/drm/radeon/r600_hdmi.c | |||
| @@ -348,7 +348,6 @@ void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mod | |||
| 348 | WREG32(HDMI0_AUDIO_PACKET_CONTROL + offset, | 348 | WREG32(HDMI0_AUDIO_PACKET_CONTROL + offset, |
| 349 | HDMI0_AUDIO_SAMPLE_SEND | /* send audio packets */ | 349 | HDMI0_AUDIO_SAMPLE_SEND | /* send audio packets */ |
| 350 | HDMI0_AUDIO_DELAY_EN(1) | /* default audio delay */ | 350 | HDMI0_AUDIO_DELAY_EN(1) | /* default audio delay */ |
| 351 | HDMI0_AUDIO_SEND_MAX_PACKETS | /* send NULL packets if no audio is available */ | ||
| 352 | HDMI0_AUDIO_PACKETS_PER_LINE(3) | /* should be suffient for all audio modes and small enough for all hblanks */ | 351 | HDMI0_AUDIO_PACKETS_PER_LINE(3) | /* should be suffient for all audio modes and small enough for all hblanks */ |
| 353 | HDMI0_60958_CS_UPDATE); /* allow 60958 channel status fields to be updated */ | 352 | HDMI0_60958_CS_UPDATE); /* allow 60958 channel status fields to be updated */ |
| 354 | } | 353 | } |
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index 85dac33e3cce..fefcca55c1eb 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h | |||
| @@ -1374,9 +1374,9 @@ struct cayman_asic { | |||
| 1374 | 1374 | ||
| 1375 | struct si_asic { | 1375 | struct si_asic { |
| 1376 | unsigned max_shader_engines; | 1376 | unsigned max_shader_engines; |
| 1377 | unsigned max_pipes_per_simd; | ||
| 1378 | unsigned max_tile_pipes; | 1377 | unsigned max_tile_pipes; |
| 1379 | unsigned max_simds_per_se; | 1378 | unsigned max_cu_per_sh; |
| 1379 | unsigned max_sh_per_se; | ||
| 1380 | unsigned max_backends_per_se; | 1380 | unsigned max_backends_per_se; |
| 1381 | unsigned max_texture_channel_caches; | 1381 | unsigned max_texture_channel_caches; |
| 1382 | unsigned max_gprs; | 1382 | unsigned max_gprs; |
| @@ -1387,7 +1387,6 @@ struct si_asic { | |||
| 1387 | unsigned sc_hiz_tile_fifo_size; | 1387 | unsigned sc_hiz_tile_fifo_size; |
| 1388 | unsigned sc_earlyz_tile_fifo_size; | 1388 | unsigned sc_earlyz_tile_fifo_size; |
| 1389 | 1389 | ||
| 1390 | unsigned num_shader_engines; | ||
| 1391 | unsigned num_tile_pipes; | 1390 | unsigned num_tile_pipes; |
| 1392 | unsigned num_backends_per_se; | 1391 | unsigned num_backends_per_se; |
| 1393 | unsigned backend_disable_mask_per_asic; | 1392 | unsigned backend_disable_mask_per_asic; |
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c index f0bb2b543b13..03e5f5df40f1 100644 --- a/drivers/gpu/drm/radeon/radeon_drv.c +++ b/drivers/gpu/drm/radeon/radeon_drv.c | |||
| @@ -57,9 +57,10 @@ | |||
| 57 | * 2.13.0 - virtual memory support, streamout | 57 | * 2.13.0 - virtual memory support, streamout |
| 58 | * 2.14.0 - add evergreen tiling informations | 58 | * 2.14.0 - add evergreen tiling informations |
| 59 | * 2.15.0 - add max_pipes query | 59 | * 2.15.0 - add max_pipes query |
| 60 | * 2.16.0 - fix evergreen 2D tiled surface calculation | ||
| 60 | */ | 61 | */ |
| 61 | #define KMS_DRIVER_MAJOR 2 | 62 | #define KMS_DRIVER_MAJOR 2 |
| 62 | #define KMS_DRIVER_MINOR 15 | 63 | #define KMS_DRIVER_MINOR 16 |
| 63 | #define KMS_DRIVER_PATCHLEVEL 0 | 64 | #define KMS_DRIVER_PATCHLEVEL 0 |
| 64 | int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags); | 65 | int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags); |
| 65 | int radeon_driver_unload_kms(struct drm_device *dev); | 66 | int radeon_driver_unload_kms(struct drm_device *dev); |
diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c index 79db56e6c2ac..59d44937dd9f 100644 --- a/drivers/gpu/drm/radeon/radeon_gart.c +++ b/drivers/gpu/drm/radeon/radeon_gart.c | |||
| @@ -476,12 +476,18 @@ int radeon_vm_bo_add(struct radeon_device *rdev, | |||
| 476 | 476 | ||
| 477 | mutex_lock(&vm->mutex); | 477 | mutex_lock(&vm->mutex); |
| 478 | if (last_pfn > vm->last_pfn) { | 478 | if (last_pfn > vm->last_pfn) { |
| 479 | /* grow va space 32M by 32M */ | 479 | /* release mutex and lock in right order */ |
| 480 | unsigned align = ((32 << 20) >> 12) - 1; | 480 | mutex_unlock(&vm->mutex); |
| 481 | radeon_mutex_lock(&rdev->cs_mutex); | 481 | radeon_mutex_lock(&rdev->cs_mutex); |
| 482 | radeon_vm_unbind_locked(rdev, vm); | 482 | mutex_lock(&vm->mutex); |
| 483 | /* and check again */ | ||
| 484 | if (last_pfn > vm->last_pfn) { | ||
| 485 | /* grow va space 32M by 32M */ | ||
| 486 | unsigned align = ((32 << 20) >> 12) - 1; | ||
| 487 | radeon_vm_unbind_locked(rdev, vm); | ||
| 488 | vm->last_pfn = (last_pfn + align) & ~align; | ||
| 489 | } | ||
| 483 | radeon_mutex_unlock(&rdev->cs_mutex); | 490 | radeon_mutex_unlock(&rdev->cs_mutex); |
| 484 | vm->last_pfn = (last_pfn + align) & ~align; | ||
| 485 | } | 491 | } |
| 486 | head = &vm->va; | 492 | head = &vm->va; |
| 487 | last_offset = 0; | 493 | last_offset = 0; |
| @@ -595,8 +601,8 @@ int radeon_vm_bo_rmv(struct radeon_device *rdev, | |||
| 595 | if (bo_va == NULL) | 601 | if (bo_va == NULL) |
| 596 | return 0; | 602 | return 0; |
| 597 | 603 | ||
| 598 | mutex_lock(&vm->mutex); | ||
| 599 | radeon_mutex_lock(&rdev->cs_mutex); | 604 | radeon_mutex_lock(&rdev->cs_mutex); |
| 605 | mutex_lock(&vm->mutex); | ||
| 600 | radeon_vm_bo_update_pte(rdev, vm, bo, NULL); | 606 | radeon_vm_bo_update_pte(rdev, vm, bo, NULL); |
| 601 | radeon_mutex_unlock(&rdev->cs_mutex); | 607 | radeon_mutex_unlock(&rdev->cs_mutex); |
| 602 | list_del(&bo_va->vm_list); | 608 | list_del(&bo_va->vm_list); |
| @@ -641,9 +647,8 @@ void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm) | |||
| 641 | struct radeon_bo_va *bo_va, *tmp; | 647 | struct radeon_bo_va *bo_va, *tmp; |
| 642 | int r; | 648 | int r; |
| 643 | 649 | ||
| 644 | mutex_lock(&vm->mutex); | ||
| 645 | |||
| 646 | radeon_mutex_lock(&rdev->cs_mutex); | 650 | radeon_mutex_lock(&rdev->cs_mutex); |
| 651 | mutex_lock(&vm->mutex); | ||
| 647 | radeon_vm_unbind_locked(rdev, vm); | 652 | radeon_vm_unbind_locked(rdev, vm); |
| 648 | radeon_mutex_unlock(&rdev->cs_mutex); | 653 | radeon_mutex_unlock(&rdev->cs_mutex); |
| 649 | 654 | ||
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c index f1016a5820d1..5c58d7d90cb2 100644 --- a/drivers/gpu/drm/radeon/radeon_kms.c +++ b/drivers/gpu/drm/radeon/radeon_kms.c | |||
| @@ -273,7 +273,7 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) | |||
| 273 | break; | 273 | break; |
| 274 | case RADEON_INFO_MAX_PIPES: | 274 | case RADEON_INFO_MAX_PIPES: |
| 275 | if (rdev->family >= CHIP_TAHITI) | 275 | if (rdev->family >= CHIP_TAHITI) |
| 276 | value = rdev->config.si.max_pipes_per_simd; | 276 | value = rdev->config.si.max_cu_per_sh; |
| 277 | else if (rdev->family >= CHIP_CAYMAN) | 277 | else if (rdev->family >= CHIP_CAYMAN) |
| 278 | value = rdev->config.cayman.max_pipes_per_simd; | 278 | value = rdev->config.cayman.max_pipes_per_simd; |
| 279 | else if (rdev->family >= CHIP_CEDAR) | 279 | else if (rdev->family >= CHIP_CEDAR) |
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c index 25f9eef12c42..e95c5e61d4e2 100644 --- a/drivers/gpu/drm/radeon/rs600.c +++ b/drivers/gpu/drm/radeon/rs600.c | |||
| @@ -908,12 +908,6 @@ static int rs600_startup(struct radeon_device *rdev) | |||
| 908 | return r; | 908 | return r; |
| 909 | } | 909 | } |
| 910 | 910 | ||
| 911 | r = r600_audio_init(rdev); | ||
| 912 | if (r) { | ||
| 913 | dev_err(rdev->dev, "failed initializing audio\n"); | ||
| 914 | return r; | ||
| 915 | } | ||
| 916 | |||
| 917 | r = radeon_ib_pool_start(rdev); | 911 | r = radeon_ib_pool_start(rdev); |
| 918 | if (r) | 912 | if (r) |
| 919 | return r; | 913 | return r; |
| @@ -922,6 +916,12 @@ static int rs600_startup(struct radeon_device *rdev) | |||
| 922 | if (r) | 916 | if (r) |
| 923 | return r; | 917 | return r; |
| 924 | 918 | ||
| 919 | r = r600_audio_init(rdev); | ||
| 920 | if (r) { | ||
| 921 | dev_err(rdev->dev, "failed initializing audio\n"); | ||
| 922 | return r; | ||
| 923 | } | ||
| 924 | |||
| 925 | return 0; | 925 | return 0; |
| 926 | } | 926 | } |
| 927 | 927 | ||
diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c index 3277ddecfe9f..159b6a43fda0 100644 --- a/drivers/gpu/drm/radeon/rs690.c +++ b/drivers/gpu/drm/radeon/rs690.c | |||
| @@ -637,12 +637,6 @@ static int rs690_startup(struct radeon_device *rdev) | |||
| 637 | return r; | 637 | return r; |
| 638 | } | 638 | } |
| 639 | 639 | ||
| 640 | r = r600_audio_init(rdev); | ||
| 641 | if (r) { | ||
| 642 | dev_err(rdev->dev, "failed initializing audio\n"); | ||
| 643 | return r; | ||
| 644 | } | ||
| 645 | |||
| 646 | r = radeon_ib_pool_start(rdev); | 640 | r = radeon_ib_pool_start(rdev); |
| 647 | if (r) | 641 | if (r) |
| 648 | return r; | 642 | return r; |
| @@ -651,6 +645,12 @@ static int rs690_startup(struct radeon_device *rdev) | |||
| 651 | if (r) | 645 | if (r) |
| 652 | return r; | 646 | return r; |
| 653 | 647 | ||
| 648 | r = r600_audio_init(rdev); | ||
| 649 | if (r) { | ||
| 650 | dev_err(rdev->dev, "failed initializing audio\n"); | ||
| 651 | return r; | ||
| 652 | } | ||
| 653 | |||
| 654 | return 0; | 654 | return 0; |
| 655 | } | 655 | } |
| 656 | 656 | ||
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c index 04ddc365a908..4ad0281fdc37 100644 --- a/drivers/gpu/drm/radeon/rv770.c +++ b/drivers/gpu/drm/radeon/rv770.c | |||
| @@ -956,6 +956,12 @@ static int rv770_startup(struct radeon_device *rdev) | |||
| 956 | if (r) | 956 | if (r) |
| 957 | return r; | 957 | return r; |
| 958 | 958 | ||
| 959 | r = r600_audio_init(rdev); | ||
| 960 | if (r) { | ||
| 961 | DRM_ERROR("radeon: audio init failed\n"); | ||
| 962 | return r; | ||
| 963 | } | ||
| 964 | |||
| 959 | return 0; | 965 | return 0; |
| 960 | } | 966 | } |
| 961 | 967 | ||
| @@ -978,12 +984,6 @@ int rv770_resume(struct radeon_device *rdev) | |||
| 978 | return r; | 984 | return r; |
| 979 | } | 985 | } |
| 980 | 986 | ||
| 981 | r = r600_audio_init(rdev); | ||
| 982 | if (r) { | ||
| 983 | dev_err(rdev->dev, "radeon: audio init failed\n"); | ||
| 984 | return r; | ||
| 985 | } | ||
| 986 | |||
| 987 | return r; | 987 | return r; |
| 988 | 988 | ||
| 989 | } | 989 | } |
| @@ -1092,12 +1092,6 @@ int rv770_init(struct radeon_device *rdev) | |||
| 1092 | rdev->accel_working = false; | 1092 | rdev->accel_working = false; |
| 1093 | } | 1093 | } |
| 1094 | 1094 | ||
| 1095 | r = r600_audio_init(rdev); | ||
| 1096 | if (r) { | ||
| 1097 | dev_err(rdev->dev, "radeon: audio init failed\n"); | ||
| 1098 | return r; | ||
| 1099 | } | ||
| 1100 | |||
| 1101 | return 0; | 1095 | return 0; |
| 1102 | } | 1096 | } |
| 1103 | 1097 | ||
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c index 549732e56ca9..c7b61f16ecfd 100644 --- a/drivers/gpu/drm/radeon/si.c +++ b/drivers/gpu/drm/radeon/si.c | |||
| @@ -867,200 +867,6 @@ void dce6_bandwidth_update(struct radeon_device *rdev) | |||
| 867 | /* | 867 | /* |
| 868 | * Core functions | 868 | * Core functions |
| 869 | */ | 869 | */ |
| 870 | static u32 si_get_tile_pipe_to_backend_map(struct radeon_device *rdev, | ||
| 871 | u32 num_tile_pipes, | ||
| 872 | u32 num_backends_per_asic, | ||
| 873 | u32 *backend_disable_mask_per_asic, | ||
| 874 | u32 num_shader_engines) | ||
| 875 | { | ||
| 876 | u32 backend_map = 0; | ||
| 877 | u32 enabled_backends_mask = 0; | ||
| 878 | u32 enabled_backends_count = 0; | ||
| 879 | u32 num_backends_per_se; | ||
| 880 | u32 cur_pipe; | ||
| 881 | u32 swizzle_pipe[SI_MAX_PIPES]; | ||
| 882 | u32 cur_backend = 0; | ||
| 883 | u32 i; | ||
| 884 | bool force_no_swizzle; | ||
| 885 | |||
| 886 | /* force legal values */ | ||
| 887 | if (num_tile_pipes < 1) | ||
| 888 | num_tile_pipes = 1; | ||
| 889 | if (num_tile_pipes > rdev->config.si.max_tile_pipes) | ||
| 890 | num_tile_pipes = rdev->config.si.max_tile_pipes; | ||
| 891 | if (num_shader_engines < 1) | ||
| 892 | num_shader_engines = 1; | ||
| 893 | if (num_shader_engines > rdev->config.si.max_shader_engines) | ||
| 894 | num_shader_engines = rdev->config.si.max_shader_engines; | ||
| 895 | if (num_backends_per_asic < num_shader_engines) | ||
| 896 | num_backends_per_asic = num_shader_engines; | ||
| 897 | if (num_backends_per_asic > (rdev->config.si.max_backends_per_se * num_shader_engines)) | ||
| 898 | num_backends_per_asic = rdev->config.si.max_backends_per_se * num_shader_engines; | ||
| 899 | |||
| 900 | /* make sure we have the same number of backends per se */ | ||
| 901 | num_backends_per_asic = ALIGN(num_backends_per_asic, num_shader_engines); | ||
| 902 | /* set up the number of backends per se */ | ||
| 903 | num_backends_per_se = num_backends_per_asic / num_shader_engines; | ||
| 904 | if (num_backends_per_se > rdev->config.si.max_backends_per_se) { | ||
| 905 | num_backends_per_se = rdev->config.si.max_backends_per_se; | ||
| 906 | num_backends_per_asic = num_backends_per_se * num_shader_engines; | ||
| 907 | } | ||
| 908 | |||
| 909 | /* create enable mask and count for enabled backends */ | ||
| 910 | for (i = 0; i < SI_MAX_BACKENDS; ++i) { | ||
| 911 | if (((*backend_disable_mask_per_asic >> i) & 1) == 0) { | ||
| 912 | enabled_backends_mask |= (1 << i); | ||
| 913 | ++enabled_backends_count; | ||
| 914 | } | ||
| 915 | if (enabled_backends_count == num_backends_per_asic) | ||
| 916 | break; | ||
| 917 | } | ||
| 918 | |||
| 919 | /* force the backends mask to match the current number of backends */ | ||
| 920 | if (enabled_backends_count != num_backends_per_asic) { | ||
| 921 | u32 this_backend_enabled; | ||
| 922 | u32 shader_engine; | ||
| 923 | u32 backend_per_se; | ||
| 924 | |||
| 925 | enabled_backends_mask = 0; | ||
| 926 | enabled_backends_count = 0; | ||
| 927 | *backend_disable_mask_per_asic = SI_MAX_BACKENDS_MASK; | ||
| 928 | for (i = 0; i < SI_MAX_BACKENDS; ++i) { | ||
| 929 | /* calc the current se */ | ||
| 930 | shader_engine = i / rdev->config.si.max_backends_per_se; | ||
| 931 | /* calc the backend per se */ | ||
| 932 | backend_per_se = i % rdev->config.si.max_backends_per_se; | ||
| 933 | /* default to not enabled */ | ||
| 934 | this_backend_enabled = 0; | ||
| 935 | if ((shader_engine < num_shader_engines) && | ||
| 936 | (backend_per_se < num_backends_per_se)) | ||
| 937 | this_backend_enabled = 1; | ||
| 938 | if (this_backend_enabled) { | ||
| 939 | enabled_backends_mask |= (1 << i); | ||
| 940 | *backend_disable_mask_per_asic &= ~(1 << i); | ||
| 941 | ++enabled_backends_count; | ||
| 942 | } | ||
| 943 | } | ||
| 944 | } | ||
| 945 | |||
| 946 | |||
| 947 | memset((uint8_t *)&swizzle_pipe[0], 0, sizeof(u32) * SI_MAX_PIPES); | ||
| 948 | switch (rdev->family) { | ||
| 949 | case CHIP_TAHITI: | ||
| 950 | case CHIP_PITCAIRN: | ||
| 951 | case CHIP_VERDE: | ||
| 952 | force_no_swizzle = true; | ||
| 953 | break; | ||
| 954 | default: | ||
| 955 | force_no_swizzle = false; | ||
| 956 | break; | ||
| 957 | } | ||
| 958 | if (force_no_swizzle) { | ||
| 959 | bool last_backend_enabled = false; | ||
| 960 | |||
| 961 | force_no_swizzle = false; | ||
| 962 | for (i = 0; i < SI_MAX_BACKENDS; ++i) { | ||
| 963 | if (((enabled_backends_mask >> i) & 1) == 1) { | ||
| 964 | if (last_backend_enabled) | ||
| 965 | force_no_swizzle = true; | ||
| 966 | last_backend_enabled = true; | ||
| 967 | } else | ||
| 968 | last_backend_enabled = false; | ||
| 969 | } | ||
| 970 | } | ||
| 971 | |||
| 972 | switch (num_tile_pipes) { | ||
| 973 | case 1: | ||
| 974 | case 3: | ||
| 975 | case 5: | ||
| 976 | case 7: | ||
| 977 | DRM_ERROR("odd number of pipes!\n"); | ||
| 978 | break; | ||
| 979 | case 2: | ||
| 980 | swizzle_pipe[0] = 0; | ||
| 981 | swizzle_pipe[1] = 1; | ||
| 982 | break; | ||
| 983 | case 4: | ||
| 984 | if (force_no_swizzle) { | ||
| 985 | swizzle_pipe[0] = 0; | ||
| 986 | swizzle_pipe[1] = 1; | ||
| 987 | swizzle_pipe[2] = 2; | ||
| 988 | swizzle_pipe[3] = 3; | ||
| 989 | } else { | ||
| 990 | swizzle_pipe[0] = 0; | ||
| 991 | swizzle_pipe[1] = 2; | ||
| 992 | swizzle_pipe[2] = 1; | ||
| 993 | swizzle_pipe[3] = 3; | ||
| 994 | } | ||
| 995 | break; | ||
| 996 | case 6: | ||
| 997 | if (force_no_swizzle) { | ||
| 998 | swizzle_pipe[0] = 0; | ||
| 999 | swizzle_pipe[1] = 1; | ||
| 1000 | swizzle_pipe[2] = 2; | ||
| 1001 | swizzle_pipe[3] = 3; | ||
| 1002 | swizzle_pipe[4] = 4; | ||
| 1003 | swizzle_pipe[5] = 5; | ||
| 1004 | } else { | ||
| 1005 | swizzle_pipe[0] = 0; | ||
| 1006 | swizzle_pipe[1] = 2; | ||
| 1007 | swizzle_pipe[2] = 4; | ||
| 1008 | swizzle_pipe[3] = 1; | ||
| 1009 | swizzle_pipe[4] = 3; | ||
| 1010 | swizzle_pipe[5] = 5; | ||
| 1011 | } | ||
| 1012 | break; | ||
| 1013 | case 8: | ||
| 1014 | if (force_no_swizzle) { | ||
| 1015 | swizzle_pipe[0] = 0; | ||
| 1016 | swizzle_pipe[1] = 1; | ||
| 1017 | swizzle_pipe[2] = 2; | ||
| 1018 | swizzle_pipe[3] = 3; | ||
| 1019 | swizzle_pipe[4] = 4; | ||
| 1020 | swizzle_pipe[5] = 5; | ||
| 1021 | swizzle_pipe[6] = 6; | ||
| 1022 | swizzle_pipe[7] = 7; | ||
| 1023 | } else { | ||
| 1024 | swizzle_pipe[0] = 0; | ||
| 1025 | swizzle_pipe[1] = 2; | ||
| 1026 | swizzle_pipe[2] = 4; | ||
| 1027 | swizzle_pipe[3] = 6; | ||
| 1028 | swizzle_pipe[4] = 1; | ||
| 1029 | swizzle_pipe[5] = 3; | ||
| 1030 | swizzle_pipe[6] = 5; | ||
| 1031 | swizzle_pipe[7] = 7; | ||
| 1032 | } | ||
| 1033 | break; | ||
| 1034 | } | ||
| 1035 | |||
| 1036 | for (cur_pipe = 0; cur_pipe < num_tile_pipes; ++cur_pipe) { | ||
| 1037 | while (((1 << cur_backend) & enabled_backends_mask) == 0) | ||
| 1038 | cur_backend = (cur_backend + 1) % SI_MAX_BACKENDS; | ||
| 1039 | |||
| 1040 | backend_map |= (((cur_backend & 0xf) << (swizzle_pipe[cur_pipe] * 4))); | ||
| 1041 | |||
| 1042 | cur_backend = (cur_backend + 1) % SI_MAX_BACKENDS; | ||
| 1043 | } | ||
| 1044 | |||
| 1045 | return backend_map; | ||
| 1046 | } | ||
| 1047 | |||
| 1048 | static u32 si_get_disable_mask_per_asic(struct radeon_device *rdev, | ||
| 1049 | u32 disable_mask_per_se, | ||
| 1050 | u32 max_disable_mask_per_se, | ||
| 1051 | u32 num_shader_engines) | ||
| 1052 | { | ||
| 1053 | u32 disable_field_width_per_se = r600_count_pipe_bits(disable_mask_per_se); | ||
| 1054 | u32 disable_mask_per_asic = disable_mask_per_se & max_disable_mask_per_se; | ||
| 1055 | |||
| 1056 | if (num_shader_engines == 1) | ||
| 1057 | return disable_mask_per_asic; | ||
| 1058 | else if (num_shader_engines == 2) | ||
| 1059 | return disable_mask_per_asic | (disable_mask_per_asic << disable_field_width_per_se); | ||
| 1060 | else | ||
| 1061 | return 0xffffffff; | ||
| 1062 | } | ||
| 1063 | |||
| 1064 | static void si_tiling_mode_table_init(struct radeon_device *rdev) | 870 | static void si_tiling_mode_table_init(struct radeon_device *rdev) |
| 1065 | { | 871 | { |
| 1066 | const u32 num_tile_mode_states = 32; | 872 | const u32 num_tile_mode_states = 32; |
| @@ -1562,18 +1368,151 @@ static void si_tiling_mode_table_init(struct radeon_device *rdev) | |||
| 1562 | DRM_ERROR("unknown asic: 0x%x\n", rdev->family); | 1368 | DRM_ERROR("unknown asic: 0x%x\n", rdev->family); |
| 1563 | } | 1369 | } |
| 1564 | 1370 | ||
| 1371 | static void si_select_se_sh(struct radeon_device *rdev, | ||
| 1372 | u32 se_num, u32 sh_num) | ||
| 1373 | { | ||
| 1374 | u32 data = INSTANCE_BROADCAST_WRITES; | ||
| 1375 | |||
| 1376 | if ((se_num == 0xffffffff) && (sh_num == 0xffffffff)) | ||
| 1377 | data = SH_BROADCAST_WRITES | SE_BROADCAST_WRITES; | ||
| 1378 | else if (se_num == 0xffffffff) | ||
| 1379 | data |= SE_BROADCAST_WRITES | SH_INDEX(sh_num); | ||
| 1380 | else if (sh_num == 0xffffffff) | ||
| 1381 | data |= SH_BROADCAST_WRITES | SE_INDEX(se_num); | ||
| 1382 | else | ||
| 1383 | data |= SH_INDEX(sh_num) | SE_INDEX(se_num); | ||
| 1384 | WREG32(GRBM_GFX_INDEX, data); | ||
| 1385 | } | ||
| 1386 | |||
| 1387 | static u32 si_create_bitmask(u32 bit_width) | ||
| 1388 | { | ||
| 1389 | u32 i, mask = 0; | ||
| 1390 | |||
| 1391 | for (i = 0; i < bit_width; i++) { | ||
| 1392 | mask <<= 1; | ||
| 1393 | mask |= 1; | ||
| 1394 | } | ||
| 1395 | return mask; | ||
| 1396 | } | ||
| 1397 | |||
| 1398 | static u32 si_get_cu_enabled(struct radeon_device *rdev, u32 cu_per_sh) | ||
| 1399 | { | ||
| 1400 | u32 data, mask; | ||
| 1401 | |||
| 1402 | data = RREG32(CC_GC_SHADER_ARRAY_CONFIG); | ||
| 1403 | if (data & 1) | ||
| 1404 | data &= INACTIVE_CUS_MASK; | ||
| 1405 | else | ||
| 1406 | data = 0; | ||
| 1407 | data |= RREG32(GC_USER_SHADER_ARRAY_CONFIG); | ||
| 1408 | |||
| 1409 | data >>= INACTIVE_CUS_SHIFT; | ||
| 1410 | |||
| 1411 | mask = si_create_bitmask(cu_per_sh); | ||
| 1412 | |||
| 1413 | return ~data & mask; | ||
| 1414 | } | ||
| 1415 | |||
| 1416 | static void si_setup_spi(struct radeon_device *rdev, | ||
| 1417 | u32 se_num, u32 sh_per_se, | ||
| 1418 | u32 cu_per_sh) | ||
| 1419 | { | ||
| 1420 | int i, j, k; | ||
| 1421 | u32 data, mask, active_cu; | ||
| 1422 | |||
| 1423 | for (i = 0; i < se_num; i++) { | ||
| 1424 | for (j = 0; j < sh_per_se; j++) { | ||
| 1425 | si_select_se_sh(rdev, i, j); | ||
| 1426 | data = RREG32(SPI_STATIC_THREAD_MGMT_3); | ||
| 1427 | active_cu = si_get_cu_enabled(rdev, cu_per_sh); | ||
| 1428 | |||
| 1429 | mask = 1; | ||
| 1430 | for (k = 0; k < 16; k++) { | ||
| 1431 | mask <<= k; | ||
| 1432 | if (active_cu & mask) { | ||
| 1433 | data &= ~mask; | ||
| 1434 | WREG32(SPI_STATIC_THREAD_MGMT_3, data); | ||
| 1435 | break; | ||
| 1436 | } | ||
| 1437 | } | ||
| 1438 | } | ||
| 1439 | } | ||
| 1440 | si_select_se_sh(rdev, 0xffffffff, 0xffffffff); | ||
| 1441 | } | ||
| 1442 | |||
| 1443 | static u32 si_get_rb_disabled(struct radeon_device *rdev, | ||
| 1444 | u32 max_rb_num, u32 se_num, | ||
| 1445 | u32 sh_per_se) | ||
| 1446 | { | ||
| 1447 | u32 data, mask; | ||
| 1448 | |||
| 1449 | data = RREG32(CC_RB_BACKEND_DISABLE); | ||
| 1450 | if (data & 1) | ||
| 1451 | data &= BACKEND_DISABLE_MASK; | ||
| 1452 | else | ||
| 1453 | data = 0; | ||
| 1454 | data |= RREG32(GC_USER_RB_BACKEND_DISABLE); | ||
| 1455 | |||
| 1456 | data >>= BACKEND_DISABLE_SHIFT; | ||
| 1457 | |||
| 1458 | mask = si_create_bitmask(max_rb_num / se_num / sh_per_se); | ||
| 1459 | |||
| 1460 | return data & mask; | ||
| 1461 | } | ||
| 1462 | |||
| 1463 | static void si_setup_rb(struct radeon_device *rdev, | ||
| 1464 | u32 se_num, u32 sh_per_se, | ||
| 1465 | u32 max_rb_num) | ||
| 1466 | { | ||
| 1467 | int i, j; | ||
| 1468 | u32 data, mask; | ||
| 1469 | u32 disabled_rbs = 0; | ||
| 1470 | u32 enabled_rbs = 0; | ||
| 1471 | |||
| 1472 | for (i = 0; i < se_num; i++) { | ||
| 1473 | for (j = 0; j < sh_per_se; j++) { | ||
| 1474 | si_select_se_sh(rdev, i, j); | ||
| 1475 | data = si_get_rb_disabled(rdev, max_rb_num, se_num, sh_per_se); | ||
| 1476 | disabled_rbs |= data << ((i * sh_per_se + j) * TAHITI_RB_BITMAP_WIDTH_PER_SH); | ||
| 1477 | } | ||
| 1478 | } | ||
| 1479 | si_select_se_sh(rdev, 0xffffffff, 0xffffffff); | ||
| 1480 | |||
| 1481 | mask = 1; | ||
| 1482 | for (i = 0; i < max_rb_num; i++) { | ||
| 1483 | if (!(disabled_rbs & mask)) | ||
| 1484 | enabled_rbs |= mask; | ||
| 1485 | mask <<= 1; | ||
| 1486 | } | ||
| 1487 | |||
| 1488 | for (i = 0; i < se_num; i++) { | ||
| 1489 | si_select_se_sh(rdev, i, 0xffffffff); | ||
| 1490 | data = 0; | ||
| 1491 | for (j = 0; j < sh_per_se; j++) { | ||
| 1492 | switch (enabled_rbs & 3) { | ||
| 1493 | case 1: | ||
| 1494 | data |= (RASTER_CONFIG_RB_MAP_0 << (i * sh_per_se + j) * 2); | ||
| 1495 | break; | ||
| 1496 | case 2: | ||
| 1497 | data |= (RASTER_CONFIG_RB_MAP_3 << (i * sh_per_se + j) * 2); | ||
| 1498 | break; | ||
| 1499 | case 3: | ||
| 1500 | default: | ||
| 1501 | data |= (RASTER_CONFIG_RB_MAP_2 << (i * sh_per_se + j) * 2); | ||
| 1502 | break; | ||
| 1503 | } | ||
| 1504 | enabled_rbs >>= 2; | ||
| 1505 | } | ||
| 1506 | WREG32(PA_SC_RASTER_CONFIG, data); | ||
| 1507 | } | ||
| 1508 | si_select_se_sh(rdev, 0xffffffff, 0xffffffff); | ||
| 1509 | } | ||
| 1510 | |||
| 1565 | static void si_gpu_init(struct radeon_device *rdev) | 1511 | static void si_gpu_init(struct radeon_device *rdev) |
| 1566 | { | 1512 | { |
| 1567 | u32 cc_rb_backend_disable = 0; | ||
| 1568 | u32 cc_gc_shader_array_config; | ||
| 1569 | u32 gb_addr_config = 0; | 1513 | u32 gb_addr_config = 0; |
| 1570 | u32 mc_shared_chmap, mc_arb_ramcfg; | 1514 | u32 mc_shared_chmap, mc_arb_ramcfg; |
| 1571 | u32 gb_backend_map; | ||
| 1572 | u32 cgts_tcc_disable; | ||
| 1573 | u32 sx_debug_1; | 1515 | u32 sx_debug_1; |
| 1574 | u32 gc_user_shader_array_config; | ||
| 1575 | u32 gc_user_rb_backend_disable; | ||
| 1576 | u32 cgts_user_tcc_disable; | ||
| 1577 | u32 hdp_host_path_cntl; | 1516 | u32 hdp_host_path_cntl; |
| 1578 | u32 tmp; | 1517 | u32 tmp; |
| 1579 | int i, j; | 1518 | int i, j; |
| @@ -1581,9 +1520,9 @@ static void si_gpu_init(struct radeon_device *rdev) | |||
| 1581 | switch (rdev->family) { | 1520 | switch (rdev->family) { |
| 1582 | case CHIP_TAHITI: | 1521 | case CHIP_TAHITI: |
| 1583 | rdev->config.si.max_shader_engines = 2; | 1522 | rdev->config.si.max_shader_engines = 2; |
| 1584 | rdev->config.si.max_pipes_per_simd = 4; | ||
| 1585 | rdev->config.si.max_tile_pipes = 12; | 1523 | rdev->config.si.max_tile_pipes = 12; |
| 1586 | rdev->config.si.max_simds_per_se = 8; | 1524 | rdev->config.si.max_cu_per_sh = 8; |
| 1525 | rdev->config.si.max_sh_per_se = 2; | ||
| 1587 | rdev->config.si.max_backends_per_se = 4; | 1526 | rdev->config.si.max_backends_per_se = 4; |
| 1588 | rdev->config.si.max_texture_channel_caches = 12; | 1527 | rdev->config.si.max_texture_channel_caches = 12; |
| 1589 | rdev->config.si.max_gprs = 256; | 1528 | rdev->config.si.max_gprs = 256; |
| @@ -1594,12 +1533,13 @@ static void si_gpu_init(struct radeon_device *rdev) | |||
| 1594 | rdev->config.si.sc_prim_fifo_size_backend = 0x100; | 1533 | rdev->config.si.sc_prim_fifo_size_backend = 0x100; |
| 1595 | rdev->config.si.sc_hiz_tile_fifo_size = 0x30; | 1534 | rdev->config.si.sc_hiz_tile_fifo_size = 0x30; |
| 1596 | rdev->config.si.sc_earlyz_tile_fifo_size = 0x130; | 1535 | rdev->config.si.sc_earlyz_tile_fifo_size = 0x130; |
| 1536 | gb_addr_config = TAHITI_GB_ADDR_CONFIG_GOLDEN; | ||
| 1597 | break; | 1537 | break; |
| 1598 | case CHIP_PITCAIRN: | 1538 | case CHIP_PITCAIRN: |
| 1599 | rdev->config.si.max_shader_engines = 2; | 1539 | rdev->config.si.max_shader_engines = 2; |
| 1600 | rdev->config.si.max_pipes_per_simd = 4; | ||
| 1601 | rdev->config.si.max_tile_pipes = 8; | 1540 | rdev->config.si.max_tile_pipes = 8; |
| 1602 | rdev->config.si.max_simds_per_se = 5; | 1541 | rdev->config.si.max_cu_per_sh = 5; |
| 1542 | rdev->config.si.max_sh_per_se = 2; | ||
| 1603 | rdev->config.si.max_backends_per_se = 4; | 1543 | rdev->config.si.max_backends_per_se = 4; |
| 1604 | rdev->config.si.max_texture_channel_caches = 8; | 1544 | rdev->config.si.max_texture_channel_caches = 8; |
| 1605 | rdev->config.si.max_gprs = 256; | 1545 | rdev->config.si.max_gprs = 256; |
| @@ -1610,13 +1550,14 @@ static void si_gpu_init(struct radeon_device *rdev) | |||
| 1610 | rdev->config.si.sc_prim_fifo_size_backend = 0x100; | 1550 | rdev->config.si.sc_prim_fifo_size_backend = 0x100; |
| 1611 | rdev->config.si.sc_hiz_tile_fifo_size = 0x30; | 1551 | rdev->config.si.sc_hiz_tile_fifo_size = 0x30; |
| 1612 | rdev->config.si.sc_earlyz_tile_fifo_size = 0x130; | 1552 | rdev->config.si.sc_earlyz_tile_fifo_size = 0x130; |
| 1553 | gb_addr_config = TAHITI_GB_ADDR_CONFIG_GOLDEN; | ||
| 1613 | break; | 1554 | break; |
| 1614 | case CHIP_VERDE: | 1555 | case CHIP_VERDE: |
| 1615 | default: | 1556 | default: |
| 1616 | rdev->config.si.max_shader_engines = 1; | 1557 | rdev->config.si.max_shader_engines = 1; |
| 1617 | rdev->config.si.max_pipes_per_simd = 4; | ||
| 1618 | rdev->config.si.max_tile_pipes = 4; | 1558 | rdev->config.si.max_tile_pipes = 4; |
| 1619 | rdev->config.si.max_simds_per_se = 2; | 1559 | rdev->config.si.max_cu_per_sh = 2; |
| 1560 | rdev->config.si.max_sh_per_se = 2; | ||
| 1620 | rdev->config.si.max_backends_per_se = 4; | 1561 | rdev->config.si.max_backends_per_se = 4; |
| 1621 | rdev->config.si.max_texture_channel_caches = 4; | 1562 | rdev->config.si.max_texture_channel_caches = 4; |
| 1622 | rdev->config.si.max_gprs = 256; | 1563 | rdev->config.si.max_gprs = 256; |
| @@ -1627,6 +1568,7 @@ static void si_gpu_init(struct radeon_device *rdev) | |||
| 1627 | rdev->config.si.sc_prim_fifo_size_backend = 0x40; | 1568 | rdev->config.si.sc_prim_fifo_size_backend = 0x40; |
| 1628 | rdev->config.si.sc_hiz_tile_fifo_size = 0x30; | 1569 | rdev->config.si.sc_hiz_tile_fifo_size = 0x30; |
| 1629 | rdev->config.si.sc_earlyz_tile_fifo_size = 0x130; | 1570 | rdev->config.si.sc_earlyz_tile_fifo_size = 0x130; |
| 1571 | gb_addr_config = VERDE_GB_ADDR_CONFIG_GOLDEN; | ||
| 1630 | break; | 1572 | break; |
| 1631 | } | 1573 | } |
| 1632 | 1574 | ||
| @@ -1648,31 +1590,7 @@ static void si_gpu_init(struct radeon_device *rdev) | |||
| 1648 | mc_shared_chmap = RREG32(MC_SHARED_CHMAP); | 1590 | mc_shared_chmap = RREG32(MC_SHARED_CHMAP); |
| 1649 | mc_arb_ramcfg = RREG32(MC_ARB_RAMCFG); | 1591 | mc_arb_ramcfg = RREG32(MC_ARB_RAMCFG); |
| 1650 | 1592 | ||
| 1651 | cc_rb_backend_disable = RREG32(CC_RB_BACKEND_DISABLE); | ||
| 1652 | cc_gc_shader_array_config = RREG32(CC_GC_SHADER_ARRAY_CONFIG); | ||
| 1653 | cgts_tcc_disable = 0xffff0000; | ||
| 1654 | for (i = 0; i < rdev->config.si.max_texture_channel_caches; i++) | ||
| 1655 | cgts_tcc_disable &= ~(1 << (16 + i)); | ||
| 1656 | gc_user_rb_backend_disable = RREG32(GC_USER_RB_BACKEND_DISABLE); | ||
| 1657 | gc_user_shader_array_config = RREG32(GC_USER_SHADER_ARRAY_CONFIG); | ||
| 1658 | cgts_user_tcc_disable = RREG32(CGTS_USER_TCC_DISABLE); | ||
| 1659 | |||
| 1660 | rdev->config.si.num_shader_engines = rdev->config.si.max_shader_engines; | ||
| 1661 | rdev->config.si.num_tile_pipes = rdev->config.si.max_tile_pipes; | 1593 | rdev->config.si.num_tile_pipes = rdev->config.si.max_tile_pipes; |
| 1662 | tmp = ((~gc_user_rb_backend_disable) & BACKEND_DISABLE_MASK) >> BACKEND_DISABLE_SHIFT; | ||
| 1663 | rdev->config.si.num_backends_per_se = r600_count_pipe_bits(tmp); | ||
| 1664 | tmp = (gc_user_rb_backend_disable & BACKEND_DISABLE_MASK) >> BACKEND_DISABLE_SHIFT; | ||
| 1665 | rdev->config.si.backend_disable_mask_per_asic = | ||
| 1666 | si_get_disable_mask_per_asic(rdev, tmp, SI_MAX_BACKENDS_PER_SE_MASK, | ||
| 1667 | rdev->config.si.num_shader_engines); | ||
| 1668 | rdev->config.si.backend_map = | ||
| 1669 | si_get_tile_pipe_to_backend_map(rdev, rdev->config.si.num_tile_pipes, | ||
| 1670 | rdev->config.si.num_backends_per_se * | ||
| 1671 | rdev->config.si.num_shader_engines, | ||
| 1672 | &rdev->config.si.backend_disable_mask_per_asic, | ||
| 1673 | rdev->config.si.num_shader_engines); | ||
| 1674 | tmp = ((~cgts_user_tcc_disable) & TCC_DISABLE_MASK) >> TCC_DISABLE_SHIFT; | ||
| 1675 | rdev->config.si.num_texture_channel_caches = r600_count_pipe_bits(tmp); | ||
| 1676 | rdev->config.si.mem_max_burst_length_bytes = 256; | 1594 | rdev->config.si.mem_max_burst_length_bytes = 256; |
| 1677 | tmp = (mc_arb_ramcfg & NOOFCOLS_MASK) >> NOOFCOLS_SHIFT; | 1595 | tmp = (mc_arb_ramcfg & NOOFCOLS_MASK) >> NOOFCOLS_SHIFT; |
| 1678 | rdev->config.si.mem_row_size_in_kb = (4 * (1 << (8 + tmp))) / 1024; | 1596 | rdev->config.si.mem_row_size_in_kb = (4 * (1 << (8 + tmp))) / 1024; |
| @@ -1683,55 +1601,8 @@ static void si_gpu_init(struct radeon_device *rdev) | |||
| 1683 | rdev->config.si.num_gpus = 1; | 1601 | rdev->config.si.num_gpus = 1; |
| 1684 | rdev->config.si.multi_gpu_tile_size = 64; | 1602 | rdev->config.si.multi_gpu_tile_size = 64; |
| 1685 | 1603 | ||
| 1686 | gb_addr_config = 0; | 1604 | /* fix up row size */ |
| 1687 | switch (rdev->config.si.num_tile_pipes) { | 1605 | gb_addr_config &= ~ROW_SIZE_MASK; |
| 1688 | case 1: | ||
| 1689 | gb_addr_config |= NUM_PIPES(0); | ||
| 1690 | break; | ||
| 1691 | case 2: | ||
| 1692 | gb_addr_config |= NUM_PIPES(1); | ||
| 1693 | break; | ||
| 1694 | case 4: | ||
| 1695 | gb_addr_config |= NUM_PIPES(2); | ||
| 1696 | break; | ||
| 1697 | case 8: | ||
| 1698 | default: | ||
| 1699 | gb_addr_config |= NUM_PIPES(3); | ||
| 1700 | break; | ||
| 1701 | } | ||
| 1702 | |||
| 1703 | tmp = (rdev->config.si.mem_max_burst_length_bytes / 256) - 1; | ||
| 1704 | gb_addr_config |= PIPE_INTERLEAVE_SIZE(tmp); | ||
| 1705 | gb_addr_config |= NUM_SHADER_ENGINES(rdev->config.si.num_shader_engines - 1); | ||
| 1706 | tmp = (rdev->config.si.shader_engine_tile_size / 16) - 1; | ||
| 1707 | gb_addr_config |= SHADER_ENGINE_TILE_SIZE(tmp); | ||
| 1708 | switch (rdev->config.si.num_gpus) { | ||
| 1709 | case 1: | ||
| 1710 | default: | ||
| 1711 | gb_addr_config |= NUM_GPUS(0); | ||
| 1712 | break; | ||
| 1713 | case 2: | ||
| 1714 | gb_addr_config |= NUM_GPUS(1); | ||
| 1715 | break; | ||
| 1716 | case 4: | ||
| 1717 | gb_addr_config |= NUM_GPUS(2); | ||
| 1718 | break; | ||
| 1719 | } | ||
| 1720 | switch (rdev->config.si.multi_gpu_tile_size) { | ||
| 1721 | case 16: | ||
| 1722 | gb_addr_config |= MULTI_GPU_TILE_SIZE(0); | ||
| 1723 | break; | ||
| 1724 | case 32: | ||
| 1725 | default: | ||
| 1726 | gb_addr_config |= MULTI_GPU_TILE_SIZE(1); | ||
| 1727 | break; | ||
| 1728 | case 64: | ||
| 1729 | gb_addr_config |= MULTI_GPU_TILE_SIZE(2); | ||
| 1730 | break; | ||
| 1731 | case 128: | ||
| 1732 | gb_addr_config |= MULTI_GPU_TILE_SIZE(3); | ||
| 1733 | break; | ||
| 1734 | } | ||
| 1735 | switch (rdev->config.si.mem_row_size_in_kb) { | 1606 | switch (rdev->config.si.mem_row_size_in_kb) { |
| 1736 | case 1: | 1607 | case 1: |
| 1737 | default: | 1608 | default: |
| @@ -1745,26 +1616,6 @@ static void si_gpu_init(struct radeon_device *rdev) | |||
| 1745 | break; | 1616 | break; |
| 1746 | } | 1617 | } |
| 1747 | 1618 | ||
| 1748 | tmp = (gb_addr_config & NUM_PIPES_MASK) >> NUM_PIPES_SHIFT; | ||
| 1749 | rdev->config.si.num_tile_pipes = (1 << tmp); | ||
| 1750 | tmp = (gb_addr_config & PIPE_INTERLEAVE_SIZE_MASK) >> PIPE_INTERLEAVE_SIZE_SHIFT; | ||
| 1751 | rdev->config.si.mem_max_burst_length_bytes = (tmp + 1) * 256; | ||
| 1752 | tmp = (gb_addr_config & NUM_SHADER_ENGINES_MASK) >> NUM_SHADER_ENGINES_SHIFT; | ||
| 1753 | rdev->config.si.num_shader_engines = tmp + 1; | ||
| 1754 | tmp = (gb_addr_config & NUM_GPUS_MASK) >> NUM_GPUS_SHIFT; | ||
| 1755 | rdev->config.si.num_gpus = tmp + 1; | ||
| 1756 | tmp = (gb_addr_config & MULTI_GPU_TILE_SIZE_MASK) >> MULTI_GPU_TILE_SIZE_SHIFT; | ||
| 1757 | rdev->config.si.multi_gpu_tile_size = 1 << tmp; | ||
| 1758 | tmp = (gb_addr_config & ROW_SIZE_MASK) >> ROW_SIZE_SHIFT; | ||
| 1759 | rdev->config.si.mem_row_size_in_kb = 1 << tmp; | ||
| 1760 | |||
| 1761 | gb_backend_map = | ||
| 1762 | si_get_tile_pipe_to_backend_map(rdev, rdev->config.si.num_tile_pipes, | ||
| 1763 | rdev->config.si.num_backends_per_se * | ||
| 1764 | rdev->config.si.num_shader_engines, | ||
| 1765 | &rdev->config.si.backend_disable_mask_per_asic, | ||
| 1766 | rdev->config.si.num_shader_engines); | ||
| 1767 | |||
| 1768 | /* setup tiling info dword. gb_addr_config is not adequate since it does | 1619 | /* setup tiling info dword. gb_addr_config is not adequate since it does |
| 1769 | * not have bank info, so create a custom tiling dword. | 1620 | * not have bank info, so create a custom tiling dword. |
| 1770 | * bits 3:0 num_pipes | 1621 | * bits 3:0 num_pipes |
| @@ -1789,33 +1640,29 @@ static void si_gpu_init(struct radeon_device *rdev) | |||
| 1789 | rdev->config.si.tile_config |= (3 << 0); | 1640 | rdev->config.si.tile_config |= (3 << 0); |
| 1790 | break; | 1641 | break; |
| 1791 | } | 1642 | } |
| 1792 | rdev->config.si.tile_config |= | 1643 | if ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT) |
| 1793 | ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT) << 4; | 1644 | rdev->config.si.tile_config |= 1 << 4; |
| 1645 | else | ||
| 1646 | rdev->config.si.tile_config |= 0 << 4; | ||
| 1794 | rdev->config.si.tile_config |= | 1647 | rdev->config.si.tile_config |= |
| 1795 | ((gb_addr_config & PIPE_INTERLEAVE_SIZE_MASK) >> PIPE_INTERLEAVE_SIZE_SHIFT) << 8; | 1648 | ((gb_addr_config & PIPE_INTERLEAVE_SIZE_MASK) >> PIPE_INTERLEAVE_SIZE_SHIFT) << 8; |
| 1796 | rdev->config.si.tile_config |= | 1649 | rdev->config.si.tile_config |= |
| 1797 | ((gb_addr_config & ROW_SIZE_MASK) >> ROW_SIZE_SHIFT) << 12; | 1650 | ((gb_addr_config & ROW_SIZE_MASK) >> ROW_SIZE_SHIFT) << 12; |
| 1798 | 1651 | ||
| 1799 | rdev->config.si.backend_map = gb_backend_map; | ||
| 1800 | WREG32(GB_ADDR_CONFIG, gb_addr_config); | 1652 | WREG32(GB_ADDR_CONFIG, gb_addr_config); |
| 1801 | WREG32(DMIF_ADDR_CONFIG, gb_addr_config); | 1653 | WREG32(DMIF_ADDR_CONFIG, gb_addr_config); |
| 1802 | WREG32(HDP_ADDR_CONFIG, gb_addr_config); | 1654 | WREG32(HDP_ADDR_CONFIG, gb_addr_config); |
| 1803 | 1655 | ||
| 1804 | /* primary versions */ | 1656 | si_tiling_mode_table_init(rdev); |
| 1805 | WREG32(CC_RB_BACKEND_DISABLE, cc_rb_backend_disable); | ||
| 1806 | WREG32(CC_SYS_RB_BACKEND_DISABLE, cc_rb_backend_disable); | ||
| 1807 | WREG32(CC_GC_SHADER_ARRAY_CONFIG, cc_gc_shader_array_config); | ||
| 1808 | |||
| 1809 | WREG32(CGTS_TCC_DISABLE, cgts_tcc_disable); | ||
| 1810 | 1657 | ||
| 1811 | /* user versions */ | 1658 | si_setup_rb(rdev, rdev->config.si.max_shader_engines, |
| 1812 | WREG32(GC_USER_RB_BACKEND_DISABLE, cc_rb_backend_disable); | 1659 | rdev->config.si.max_sh_per_se, |
| 1813 | WREG32(GC_USER_SYS_RB_BACKEND_DISABLE, cc_rb_backend_disable); | 1660 | rdev->config.si.max_backends_per_se); |
| 1814 | WREG32(GC_USER_SHADER_ARRAY_CONFIG, cc_gc_shader_array_config); | ||
| 1815 | 1661 | ||
| 1816 | WREG32(CGTS_USER_TCC_DISABLE, cgts_tcc_disable); | 1662 | si_setup_spi(rdev, rdev->config.si.max_shader_engines, |
| 1663 | rdev->config.si.max_sh_per_se, | ||
| 1664 | rdev->config.si.max_cu_per_sh); | ||
| 1817 | 1665 | ||
| 1818 | si_tiling_mode_table_init(rdev); | ||
| 1819 | 1666 | ||
| 1820 | /* set HW defaults for 3D engine */ | 1667 | /* set HW defaults for 3D engine */ |
| 1821 | WREG32(CP_QUEUE_THRESHOLDS, (ROQ_IB1_START(0x16) | | 1668 | WREG32(CP_QUEUE_THRESHOLDS, (ROQ_IB1_START(0x16) | |
diff --git a/drivers/gpu/drm/radeon/sid.h b/drivers/gpu/drm/radeon/sid.h index 53ea2c42dbd6..db4067962868 100644 --- a/drivers/gpu/drm/radeon/sid.h +++ b/drivers/gpu/drm/radeon/sid.h | |||
| @@ -24,6 +24,11 @@ | |||
| 24 | #ifndef SI_H | 24 | #ifndef SI_H |
| 25 | #define SI_H | 25 | #define SI_H |
| 26 | 26 | ||
| 27 | #define TAHITI_RB_BITMAP_WIDTH_PER_SH 2 | ||
| 28 | |||
| 29 | #define TAHITI_GB_ADDR_CONFIG_GOLDEN 0x12011003 | ||
| 30 | #define VERDE_GB_ADDR_CONFIG_GOLDEN 0x12010002 | ||
| 31 | |||
| 27 | #define CG_MULT_THERMAL_STATUS 0x714 | 32 | #define CG_MULT_THERMAL_STATUS 0x714 |
| 28 | #define ASIC_MAX_TEMP(x) ((x) << 0) | 33 | #define ASIC_MAX_TEMP(x) ((x) << 0) |
| 29 | #define ASIC_MAX_TEMP_MASK 0x000001ff | 34 | #define ASIC_MAX_TEMP_MASK 0x000001ff |
| @@ -408,6 +413,12 @@ | |||
| 408 | #define SOFT_RESET_IA (1 << 15) | 413 | #define SOFT_RESET_IA (1 << 15) |
| 409 | 414 | ||
| 410 | #define GRBM_GFX_INDEX 0x802C | 415 | #define GRBM_GFX_INDEX 0x802C |
| 416 | #define INSTANCE_INDEX(x) ((x) << 0) | ||
| 417 | #define SH_INDEX(x) ((x) << 8) | ||
| 418 | #define SE_INDEX(x) ((x) << 16) | ||
| 419 | #define SH_BROADCAST_WRITES (1 << 29) | ||
| 420 | #define INSTANCE_BROADCAST_WRITES (1 << 30) | ||
| 421 | #define SE_BROADCAST_WRITES (1 << 31) | ||
| 411 | 422 | ||
| 412 | #define GRBM_INT_CNTL 0x8060 | 423 | #define GRBM_INT_CNTL 0x8060 |
| 413 | # define RDERR_INT_ENABLE (1 << 0) | 424 | # define RDERR_INT_ENABLE (1 << 0) |
| @@ -480,6 +491,8 @@ | |||
| 480 | #define VGT_TF_MEMORY_BASE 0x89B8 | 491 | #define VGT_TF_MEMORY_BASE 0x89B8 |
| 481 | 492 | ||
| 482 | #define CC_GC_SHADER_ARRAY_CONFIG 0x89bc | 493 | #define CC_GC_SHADER_ARRAY_CONFIG 0x89bc |
| 494 | #define INACTIVE_CUS_MASK 0xFFFF0000 | ||
| 495 | #define INACTIVE_CUS_SHIFT 16 | ||
| 483 | #define GC_USER_SHADER_ARRAY_CONFIG 0x89c0 | 496 | #define GC_USER_SHADER_ARRAY_CONFIG 0x89c0 |
| 484 | 497 | ||
| 485 | #define PA_CL_ENHANCE 0x8A14 | 498 | #define PA_CL_ENHANCE 0x8A14 |
| @@ -688,6 +701,12 @@ | |||
| 688 | #define RLC_MC_CNTL 0xC344 | 701 | #define RLC_MC_CNTL 0xC344 |
| 689 | #define RLC_UCODE_CNTL 0xC348 | 702 | #define RLC_UCODE_CNTL 0xC348 |
| 690 | 703 | ||
| 704 | #define PA_SC_RASTER_CONFIG 0x28350 | ||
| 705 | # define RASTER_CONFIG_RB_MAP_0 0 | ||
| 706 | # define RASTER_CONFIG_RB_MAP_1 1 | ||
| 707 | # define RASTER_CONFIG_RB_MAP_2 2 | ||
| 708 | # define RASTER_CONFIG_RB_MAP_3 3 | ||
| 709 | |||
| 691 | #define VGT_EVENT_INITIATOR 0x28a90 | 710 | #define VGT_EVENT_INITIATOR 0x28a90 |
| 692 | # define SAMPLE_STREAMOUTSTATS1 (1 << 0) | 711 | # define SAMPLE_STREAMOUTSTATS1 (1 << 0) |
| 693 | # define SAMPLE_STREAMOUTSTATS2 (2 << 0) | 712 | # define SAMPLE_STREAMOUTSTATS2 (2 << 0) |
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index b67cfcaa661f..36f4b28c1b90 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c | |||
| @@ -1204,6 +1204,7 @@ int ttm_bo_init(struct ttm_bo_device *bdev, | |||
| 1204 | (*destroy)(bo); | 1204 | (*destroy)(bo); |
| 1205 | else | 1205 | else |
| 1206 | kfree(bo); | 1206 | kfree(bo); |
| 1207 | ttm_mem_global_free(mem_glob, acc_size); | ||
| 1207 | return -EINVAL; | 1208 | return -EINVAL; |
| 1208 | } | 1209 | } |
| 1209 | bo->destroy = destroy; | 1210 | bo->destroy = destroy; |
| @@ -1307,22 +1308,14 @@ int ttm_bo_create(struct ttm_bo_device *bdev, | |||
| 1307 | struct ttm_buffer_object **p_bo) | 1308 | struct ttm_buffer_object **p_bo) |
| 1308 | { | 1309 | { |
| 1309 | struct ttm_buffer_object *bo; | 1310 | struct ttm_buffer_object *bo; |
| 1310 | struct ttm_mem_global *mem_glob = bdev->glob->mem_glob; | ||
| 1311 | size_t acc_size; | 1311 | size_t acc_size; |
| 1312 | int ret; | 1312 | int ret; |
| 1313 | 1313 | ||
| 1314 | acc_size = ttm_bo_acc_size(bdev, size, sizeof(struct ttm_buffer_object)); | ||
| 1315 | ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false); | ||
| 1316 | if (unlikely(ret != 0)) | ||
| 1317 | return ret; | ||
| 1318 | |||
| 1319 | bo = kzalloc(sizeof(*bo), GFP_KERNEL); | 1314 | bo = kzalloc(sizeof(*bo), GFP_KERNEL); |
| 1320 | 1315 | if (unlikely(bo == NULL)) | |
| 1321 | if (unlikely(bo == NULL)) { | ||
| 1322 | ttm_mem_global_free(mem_glob, acc_size); | ||
| 1323 | return -ENOMEM; | 1316 | return -ENOMEM; |
| 1324 | } | ||
| 1325 | 1317 | ||
| 1318 | acc_size = ttm_bo_acc_size(bdev, size, sizeof(struct ttm_buffer_object)); | ||
| 1326 | ret = ttm_bo_init(bdev, bo, size, type, placement, page_alignment, | 1319 | ret = ttm_bo_init(bdev, bo, size, type, placement, page_alignment, |
| 1327 | buffer_start, interruptible, | 1320 | buffer_start, interruptible, |
| 1328 | persistent_swap_storage, acc_size, NULL, NULL); | 1321 | persistent_swap_storage, acc_size, NULL, NULL); |
diff --git a/drivers/gpu/vga/vga_switcheroo.c b/drivers/gpu/vga/vga_switcheroo.c index 38f9534ac513..5b3c7d135dc9 100644 --- a/drivers/gpu/vga/vga_switcheroo.c +++ b/drivers/gpu/vga/vga_switcheroo.c | |||
| @@ -190,6 +190,19 @@ find_active_client(struct list_head *head) | |||
| 190 | return NULL; | 190 | return NULL; |
| 191 | } | 191 | } |
| 192 | 192 | ||
| 193 | int vga_switcheroo_get_client_state(struct pci_dev *pdev) | ||
| 194 | { | ||
| 195 | struct vga_switcheroo_client *client; | ||
| 196 | |||
| 197 | client = find_client_from_pci(&vgasr_priv.clients, pdev); | ||
| 198 | if (!client) | ||
| 199 | return VGA_SWITCHEROO_NOT_FOUND; | ||
| 200 | if (!vgasr_priv.active) | ||
| 201 | return VGA_SWITCHEROO_INIT; | ||
| 202 | return client->pwr_state; | ||
| 203 | } | ||
| 204 | EXPORT_SYMBOL(vga_switcheroo_get_client_state); | ||
| 205 | |||
| 193 | void vga_switcheroo_unregister_client(struct pci_dev *pdev) | 206 | void vga_switcheroo_unregister_client(struct pci_dev *pdev) |
| 194 | { | 207 | { |
| 195 | struct vga_switcheroo_client *client; | 208 | struct vga_switcheroo_client *client; |
| @@ -291,8 +304,6 @@ static int vga_switchto_stage1(struct vga_switcheroo_client *new_client) | |||
| 291 | vga_switchon(new_client); | 304 | vga_switchon(new_client); |
| 292 | 305 | ||
| 293 | vga_set_default_device(new_client->pdev); | 306 | vga_set_default_device(new_client->pdev); |
| 294 | set_audio_state(new_client->id, VGA_SWITCHEROO_ON); | ||
| 295 | |||
| 296 | return 0; | 307 | return 0; |
| 297 | } | 308 | } |
| 298 | 309 | ||
| @@ -308,6 +319,8 @@ static int vga_switchto_stage2(struct vga_switcheroo_client *new_client) | |||
| 308 | 319 | ||
| 309 | active->active = false; | 320 | active->active = false; |
| 310 | 321 | ||
| 322 | set_audio_state(active->id, VGA_SWITCHEROO_OFF); | ||
| 323 | |||
| 311 | if (new_client->fb_info) { | 324 | if (new_client->fb_info) { |
| 312 | struct fb_event event; | 325 | struct fb_event event; |
| 313 | event.info = new_client->fb_info; | 326 | event.info = new_client->fb_info; |
| @@ -321,11 +334,11 @@ static int vga_switchto_stage2(struct vga_switcheroo_client *new_client) | |||
| 321 | if (new_client->ops->reprobe) | 334 | if (new_client->ops->reprobe) |
| 322 | new_client->ops->reprobe(new_client->pdev); | 335 | new_client->ops->reprobe(new_client->pdev); |
| 323 | 336 | ||
| 324 | set_audio_state(active->id, VGA_SWITCHEROO_OFF); | ||
| 325 | |||
| 326 | if (active->pwr_state == VGA_SWITCHEROO_ON) | 337 | if (active->pwr_state == VGA_SWITCHEROO_ON) |
| 327 | vga_switchoff(active); | 338 | vga_switchoff(active); |
| 328 | 339 | ||
| 340 | set_audio_state(new_client->id, VGA_SWITCHEROO_ON); | ||
| 341 | |||
| 329 | new_client->active = true; | 342 | new_client->active = true; |
| 330 | return 0; | 343 | return 0; |
| 331 | } | 344 | } |
| @@ -371,8 +384,9 @@ vga_switcheroo_debugfs_write(struct file *filp, const char __user *ubuf, | |||
| 371 | /* pwr off the device not in use */ | 384 | /* pwr off the device not in use */ |
| 372 | if (strncmp(usercmd, "OFF", 3) == 0) { | 385 | if (strncmp(usercmd, "OFF", 3) == 0) { |
| 373 | list_for_each_entry(client, &vgasr_priv.clients, list) { | 386 | list_for_each_entry(client, &vgasr_priv.clients, list) { |
| 374 | if (client->active) | 387 | if (client->active || client_is_audio(client)) |
| 375 | continue; | 388 | continue; |
| 389 | set_audio_state(client->id, VGA_SWITCHEROO_OFF); | ||
| 376 | if (client->pwr_state == VGA_SWITCHEROO_ON) | 390 | if (client->pwr_state == VGA_SWITCHEROO_ON) |
| 377 | vga_switchoff(client); | 391 | vga_switchoff(client); |
| 378 | } | 392 | } |
| @@ -381,10 +395,11 @@ vga_switcheroo_debugfs_write(struct file *filp, const char __user *ubuf, | |||
| 381 | /* pwr on the device not in use */ | 395 | /* pwr on the device not in use */ |
| 382 | if (strncmp(usercmd, "ON", 2) == 0) { | 396 | if (strncmp(usercmd, "ON", 2) == 0) { |
| 383 | list_for_each_entry(client, &vgasr_priv.clients, list) { | 397 | list_for_each_entry(client, &vgasr_priv.clients, list) { |
| 384 | if (client->active) | 398 | if (client->active || client_is_audio(client)) |
| 385 | continue; | 399 | continue; |
| 386 | if (client->pwr_state == VGA_SWITCHEROO_OFF) | 400 | if (client->pwr_state == VGA_SWITCHEROO_OFF) |
| 387 | vga_switchon(client); | 401 | vga_switchon(client); |
| 402 | set_audio_state(client->id, VGA_SWITCHEROO_ON); | ||
| 388 | } | 403 | } |
| 389 | goto out; | 404 | goto out; |
| 390 | } | 405 | } |
diff --git a/drivers/i2c/muxes/Kconfig b/drivers/i2c/muxes/Kconfig index beb2491db274..a0edd9854218 100644 --- a/drivers/i2c/muxes/Kconfig +++ b/drivers/i2c/muxes/Kconfig | |||
| @@ -37,4 +37,16 @@ config I2C_MUX_PCA954x | |||
| 37 | This driver can also be built as a module. If so, the module | 37 | This driver can also be built as a module. If so, the module |
| 38 | will be called i2c-mux-pca954x. | 38 | will be called i2c-mux-pca954x. |
| 39 | 39 | ||
| 40 | config I2C_MUX_PINCTRL | ||
| 41 | tristate "pinctrl-based I2C multiplexer" | ||
| 42 | depends on PINCTRL | ||
| 43 | help | ||
| 44 | If you say yes to this option, support will be included for an I2C | ||
| 45 | multiplexer that uses the pinctrl subsystem, i.e. pin multiplexing. | ||
| 46 | This is useful for SoCs whose I2C module's signals can be routed to | ||
| 47 | different sets of pins at run-time. | ||
| 48 | |||
| 49 | This driver can also be built as a module. If so, the module will be | ||
| 50 | called pinctrl-i2cmux. | ||
| 51 | |||
| 40 | endmenu | 52 | endmenu |
diff --git a/drivers/i2c/muxes/Makefile b/drivers/i2c/muxes/Makefile index 5826249b29ca..76da8692afff 100644 --- a/drivers/i2c/muxes/Makefile +++ b/drivers/i2c/muxes/Makefile | |||
| @@ -4,5 +4,6 @@ | |||
| 4 | obj-$(CONFIG_I2C_MUX_GPIO) += i2c-mux-gpio.o | 4 | obj-$(CONFIG_I2C_MUX_GPIO) += i2c-mux-gpio.o |
| 5 | obj-$(CONFIG_I2C_MUX_PCA9541) += i2c-mux-pca9541.o | 5 | obj-$(CONFIG_I2C_MUX_PCA9541) += i2c-mux-pca9541.o |
| 6 | obj-$(CONFIG_I2C_MUX_PCA954x) += i2c-mux-pca954x.o | 6 | obj-$(CONFIG_I2C_MUX_PCA954x) += i2c-mux-pca954x.o |
| 7 | obj-$(CONFIG_I2C_MUX_PINCTRL) += i2c-mux-pinctrl.o | ||
| 7 | 8 | ||
| 8 | ccflags-$(CONFIG_I2C_DEBUG_BUS) := -DDEBUG | 9 | ccflags-$(CONFIG_I2C_DEBUG_BUS) := -DDEBUG |
diff --git a/drivers/i2c/muxes/i2c-mux-pinctrl.c b/drivers/i2c/muxes/i2c-mux-pinctrl.c new file mode 100644 index 000000000000..46a669763476 --- /dev/null +++ b/drivers/i2c/muxes/i2c-mux-pinctrl.c | |||
| @@ -0,0 +1,279 @@ | |||
| 1 | /* | ||
| 2 | * I2C multiplexer using pinctrl API | ||
| 3 | * | ||
| 4 | * Copyright (c) 2012, NVIDIA CORPORATION. All rights reserved. | ||
| 5 | * | ||
| 6 | * This program is free software; you can redistribute it and/or modify it | ||
| 7 | * under the terms and conditions of the GNU General Public License, | ||
| 8 | * version 2, as published by the Free Software Foundation. | ||
| 9 | * | ||
| 10 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
| 11 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
| 12 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
| 13 | * more details. | ||
| 14 | * | ||
| 15 | * You should have received a copy of the GNU General Public License | ||
| 16 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
| 17 | */ | ||
| 18 | |||
| 19 | #include <linux/i2c.h> | ||
| 20 | #include <linux/i2c-mux.h> | ||
| 21 | #include <linux/init.h> | ||
| 22 | #include <linux/module.h> | ||
| 23 | #include <linux/of_i2c.h> | ||
| 24 | #include <linux/pinctrl/consumer.h> | ||
| 25 | #include <linux/i2c-mux-pinctrl.h> | ||
| 26 | #include <linux/platform_device.h> | ||
| 27 | #include <linux/slab.h> | ||
| 28 | |||
| 29 | struct i2c_mux_pinctrl { | ||
| 30 | struct device *dev; | ||
| 31 | struct i2c_mux_pinctrl_platform_data *pdata; | ||
| 32 | struct pinctrl *pinctrl; | ||
| 33 | struct pinctrl_state **states; | ||
| 34 | struct pinctrl_state *state_idle; | ||
| 35 | struct i2c_adapter *parent; | ||
| 36 | struct i2c_adapter **busses; | ||
| 37 | }; | ||
| 38 | |||
| 39 | static int i2c_mux_pinctrl_select(struct i2c_adapter *adap, void *data, | ||
| 40 | u32 chan) | ||
| 41 | { | ||
| 42 | struct i2c_mux_pinctrl *mux = data; | ||
| 43 | |||
| 44 | return pinctrl_select_state(mux->pinctrl, mux->states[chan]); | ||
| 45 | } | ||
| 46 | |||
| 47 | static int i2c_mux_pinctrl_deselect(struct i2c_adapter *adap, void *data, | ||
| 48 | u32 chan) | ||
| 49 | { | ||
| 50 | struct i2c_mux_pinctrl *mux = data; | ||
| 51 | |||
| 52 | return pinctrl_select_state(mux->pinctrl, mux->state_idle); | ||
| 53 | } | ||
| 54 | |||
| 55 | #ifdef CONFIG_OF | ||
| 56 | static int i2c_mux_pinctrl_parse_dt(struct i2c_mux_pinctrl *mux, | ||
| 57 | struct platform_device *pdev) | ||
| 58 | { | ||
| 59 | struct device_node *np = pdev->dev.of_node; | ||
| 60 | int num_names, i, ret; | ||
| 61 | struct device_node *adapter_np; | ||
| 62 | struct i2c_adapter *adapter; | ||
| 63 | |||
| 64 | if (!np) | ||
| 65 | return 0; | ||
| 66 | |||
| 67 | mux->pdata = devm_kzalloc(&pdev->dev, sizeof(*mux->pdata), GFP_KERNEL); | ||
| 68 | if (!mux->pdata) { | ||
| 69 | dev_err(mux->dev, | ||
| 70 | "Cannot allocate i2c_mux_pinctrl_platform_data\n"); | ||
| 71 | return -ENOMEM; | ||
| 72 | } | ||
| 73 | |||
| 74 | num_names = of_property_count_strings(np, "pinctrl-names"); | ||
| 75 | if (num_names < 0) { | ||
| 76 | dev_err(mux->dev, "Cannot parse pinctrl-names: %d\n", | ||
| 77 | num_names); | ||
| 78 | return num_names; | ||
| 79 | } | ||
| 80 | |||
| 81 | mux->pdata->pinctrl_states = devm_kzalloc(&pdev->dev, | ||
| 82 | sizeof(*mux->pdata->pinctrl_states) * num_names, | ||
| 83 | GFP_KERNEL); | ||
| 84 | if (!mux->pdata->pinctrl_states) { | ||
| 85 | dev_err(mux->dev, "Cannot allocate pinctrl_states\n"); | ||
| 86 | return -ENOMEM; | ||
| 87 | } | ||
| 88 | |||
| 89 | for (i = 0; i < num_names; i++) { | ||
| 90 | ret = of_property_read_string_index(np, "pinctrl-names", i, | ||
| 91 | &mux->pdata->pinctrl_states[mux->pdata->bus_count]); | ||
| 92 | if (ret < 0) { | ||
| 93 | dev_err(mux->dev, "Cannot parse pinctrl-names: %d\n", | ||
| 94 | ret); | ||
| 95 | return ret; | ||
| 96 | } | ||
| 97 | if (!strcmp(mux->pdata->pinctrl_states[mux->pdata->bus_count], | ||
| 98 | "idle")) { | ||
| 99 | if (i != num_names - 1) { | ||
| 100 | dev_err(mux->dev, "idle state must be last\n"); | ||
| 101 | return -EINVAL; | ||
| 102 | } | ||
| 103 | mux->pdata->pinctrl_state_idle = "idle"; | ||
| 104 | } else { | ||
| 105 | mux->pdata->bus_count++; | ||
| 106 | } | ||
| 107 | } | ||
| 108 | |||
| 109 | adapter_np = of_parse_phandle(np, "i2c-parent", 0); | ||
| 110 | if (!adapter_np) { | ||
| 111 | dev_err(mux->dev, "Cannot parse i2c-parent\n"); | ||
| 112 | return -ENODEV; | ||
| 113 | } | ||
| 114 | adapter = of_find_i2c_adapter_by_node(adapter_np); | ||
| 115 | if (!adapter) { | ||
| 116 | dev_err(mux->dev, "Cannot find parent bus\n"); | ||
| 117 | return -ENODEV; | ||
| 118 | } | ||
| 119 | mux->pdata->parent_bus_num = i2c_adapter_id(adapter); | ||
| 120 | put_device(&adapter->dev); | ||
| 121 | |||
| 122 | return 0; | ||
| 123 | } | ||
| 124 | #else | ||
| 125 | static inline int i2c_mux_pinctrl_parse_dt(struct i2c_mux_pinctrl *mux, | ||
| 126 | struct platform_device *pdev) | ||
| 127 | { | ||
| 128 | return 0; | ||
| 129 | } | ||
| 130 | #endif | ||
| 131 | |||
| 132 | static int __devinit i2c_mux_pinctrl_probe(struct platform_device *pdev) | ||
| 133 | { | ||
| 134 | struct i2c_mux_pinctrl *mux; | ||
| 135 | int (*deselect)(struct i2c_adapter *, void *, u32); | ||
| 136 | int i, ret; | ||
| 137 | |||
| 138 | mux = devm_kzalloc(&pdev->dev, sizeof(*mux), GFP_KERNEL); | ||
| 139 | if (!mux) { | ||
| 140 | dev_err(&pdev->dev, "Cannot allocate i2c_mux_pinctrl\n"); | ||
| 141 | ret = -ENOMEM; | ||
| 142 | goto err; | ||
| 143 | } | ||
| 144 | platform_set_drvdata(pdev, mux); | ||
| 145 | |||
| 146 | mux->dev = &pdev->dev; | ||
| 147 | |||
| 148 | mux->pdata = pdev->dev.platform_data; | ||
| 149 | if (!mux->pdata) { | ||
| 150 | ret = i2c_mux_pinctrl_parse_dt(mux, pdev); | ||
| 151 | if (ret < 0) | ||
| 152 | goto err; | ||
| 153 | } | ||
| 154 | if (!mux->pdata) { | ||
| 155 | dev_err(&pdev->dev, "Missing platform data\n"); | ||
| 156 | ret = -ENODEV; | ||
| 157 | goto err; | ||
| 158 | } | ||
| 159 | |||
| 160 | mux->states = devm_kzalloc(&pdev->dev, | ||
| 161 | sizeof(*mux->states) * mux->pdata->bus_count, | ||
| 162 | GFP_KERNEL); | ||
| 163 | if (!mux->states) { | ||
| 164 | dev_err(&pdev->dev, "Cannot allocate states\n"); | ||
| 165 | ret = -ENOMEM; | ||
| 166 | goto err; | ||
| 167 | } | ||
| 168 | |||
| 169 | mux->busses = devm_kzalloc(&pdev->dev, | ||
| 170 | sizeof(mux->busses) * mux->pdata->bus_count, | ||
| 171 | GFP_KERNEL); | ||
| 172 | if (!mux->states) { | ||
| 173 | dev_err(&pdev->dev, "Cannot allocate busses\n"); | ||
| 174 | ret = -ENOMEM; | ||
| 175 | goto err; | ||
| 176 | } | ||
| 177 | |||
| 178 | mux->pinctrl = devm_pinctrl_get(&pdev->dev); | ||
| 179 | if (IS_ERR(mux->pinctrl)) { | ||
| 180 | ret = PTR_ERR(mux->pinctrl); | ||
| 181 | dev_err(&pdev->dev, "Cannot get pinctrl: %d\n", ret); | ||
| 182 | goto err; | ||
| 183 | } | ||
| 184 | for (i = 0; i < mux->pdata->bus_count; i++) { | ||
| 185 | mux->states[i] = pinctrl_lookup_state(mux->pinctrl, | ||
| 186 | mux->pdata->pinctrl_states[i]); | ||
| 187 | if (IS_ERR(mux->states[i])) { | ||
| 188 | ret = PTR_ERR(mux->states[i]); | ||
| 189 | dev_err(&pdev->dev, | ||
| 190 | "Cannot look up pinctrl state %s: %d\n", | ||
| 191 | mux->pdata->pinctrl_states[i], ret); | ||
| 192 | goto err; | ||
| 193 | } | ||
| 194 | } | ||
| 195 | if (mux->pdata->pinctrl_state_idle) { | ||
| 196 | mux->state_idle = pinctrl_lookup_state(mux->pinctrl, | ||
| 197 | mux->pdata->pinctrl_state_idle); | ||
| 198 | if (IS_ERR(mux->state_idle)) { | ||
| 199 | ret = PTR_ERR(mux->state_idle); | ||
| 200 | dev_err(&pdev->dev, | ||
| 201 | "Cannot look up pinctrl state %s: %d\n", | ||
| 202 | mux->pdata->pinctrl_state_idle, ret); | ||
| 203 | goto err; | ||
| 204 | } | ||
| 205 | |||
| 206 | deselect = i2c_mux_pinctrl_deselect; | ||
| 207 | } else { | ||
| 208 | deselect = NULL; | ||
| 209 | } | ||
| 210 | |||
| 211 | mux->parent = i2c_get_adapter(mux->pdata->parent_bus_num); | ||
| 212 | if (!mux->parent) { | ||
| 213 | dev_err(&pdev->dev, "Parent adapter (%d) not found\n", | ||
| 214 | mux->pdata->parent_bus_num); | ||
| 215 | ret = -ENODEV; | ||
| 216 | goto err; | ||
| 217 | } | ||
| 218 | |||
| 219 | for (i = 0; i < mux->pdata->bus_count; i++) { | ||
| 220 | u32 bus = mux->pdata->base_bus_num ? | ||
| 221 | (mux->pdata->base_bus_num + i) : 0; | ||
| 222 | |||
| 223 | mux->busses[i] = i2c_add_mux_adapter(mux->parent, &pdev->dev, | ||
| 224 | mux, bus, i, | ||
| 225 | i2c_mux_pinctrl_select, | ||
| 226 | deselect); | ||
| 227 | if (!mux->busses[i]) { | ||
| 228 | ret = -ENODEV; | ||
| 229 | dev_err(&pdev->dev, "Failed to add adapter %d\n", i); | ||
| 230 | goto err_del_adapter; | ||
| 231 | } | ||
| 232 | } | ||
| 233 | |||
| 234 | return 0; | ||
| 235 | |||
| 236 | err_del_adapter: | ||
| 237 | for (; i > 0; i--) | ||
| 238 | i2c_del_mux_adapter(mux->busses[i - 1]); | ||
| 239 | i2c_put_adapter(mux->parent); | ||
| 240 | err: | ||
| 241 | return ret; | ||
| 242 | } | ||
| 243 | |||
| 244 | static int __devexit i2c_mux_pinctrl_remove(struct platform_device *pdev) | ||
| 245 | { | ||
| 246 | struct i2c_mux_pinctrl *mux = platform_get_drvdata(pdev); | ||
| 247 | int i; | ||
| 248 | |||
| 249 | for (i = 0; i < mux->pdata->bus_count; i++) | ||
| 250 | i2c_del_mux_adapter(mux->busses[i]); | ||
| 251 | |||
| 252 | i2c_put_adapter(mux->parent); | ||
| 253 | |||
| 254 | return 0; | ||
| 255 | } | ||
| 256 | |||
| 257 | #ifdef CONFIG_OF | ||
| 258 | static const struct of_device_id i2c_mux_pinctrl_of_match[] __devinitconst = { | ||
| 259 | { .compatible = "i2c-mux-pinctrl", }, | ||
| 260 | {}, | ||
| 261 | }; | ||
| 262 | MODULE_DEVICE_TABLE(of, i2c_mux_pinctrl_of_match); | ||
| 263 | #endif | ||
| 264 | |||
| 265 | static struct platform_driver i2c_mux_pinctrl_driver = { | ||
| 266 | .driver = { | ||
| 267 | .name = "i2c-mux-pinctrl", | ||
| 268 | .owner = THIS_MODULE, | ||
| 269 | .of_match_table = of_match_ptr(i2c_mux_pinctrl_of_match), | ||
| 270 | }, | ||
| 271 | .probe = i2c_mux_pinctrl_probe, | ||
| 272 | .remove = __devexit_p(i2c_mux_pinctrl_remove), | ||
| 273 | }; | ||
| 274 | module_platform_driver(i2c_mux_pinctrl_driver); | ||
| 275 | |||
| 276 | MODULE_DESCRIPTION("pinctrl-based I2C multiplexer driver"); | ||
| 277 | MODULE_AUTHOR("Stephen Warren <swarren@nvidia.com>"); | ||
| 278 | MODULE_LICENSE("GPL v2"); | ||
| 279 | MODULE_ALIAS("platform:i2c-mux-pinctrl"); | ||
diff --git a/drivers/ide/icside.c b/drivers/ide/icside.c index 8716066a2f2b..bcb507b0cfd4 100644 --- a/drivers/ide/icside.c +++ b/drivers/ide/icside.c | |||
| @@ -236,7 +236,7 @@ static const struct ide_port_ops icside_v6_no_dma_port_ops = { | |||
| 236 | */ | 236 | */ |
| 237 | static void icside_set_dma_mode(ide_hwif_t *hwif, ide_drive_t *drive) | 237 | static void icside_set_dma_mode(ide_hwif_t *hwif, ide_drive_t *drive) |
| 238 | { | 238 | { |
| 239 | unsigned long cycle_time; | 239 | unsigned long cycle_time = 0; |
| 240 | int use_dma_info = 0; | 240 | int use_dma_info = 0; |
| 241 | const u8 xfer_mode = drive->dma_mode; | 241 | const u8 xfer_mode = drive->dma_mode; |
| 242 | 242 | ||
| @@ -271,9 +271,9 @@ static void icside_set_dma_mode(ide_hwif_t *hwif, ide_drive_t *drive) | |||
| 271 | 271 | ||
| 272 | ide_set_drivedata(drive, (void *)cycle_time); | 272 | ide_set_drivedata(drive, (void *)cycle_time); |
| 273 | 273 | ||
| 274 | printk("%s: %s selected (peak %dMB/s)\n", drive->name, | 274 | printk(KERN_INFO "%s: %s selected (peak %luMB/s)\n", |
| 275 | ide_xfer_verbose(xfer_mode), | 275 | drive->name, ide_xfer_verbose(xfer_mode), |
| 276 | 2000 / (unsigned long)ide_get_drivedata(drive)); | 276 | 2000 / (cycle_time ? cycle_time : (unsigned long) -1)); |
| 277 | } | 277 | } |
| 278 | 278 | ||
| 279 | static const struct ide_port_ops icside_v6_port_ops = { | 279 | static const struct ide_port_ops icside_v6_port_ops = { |
| @@ -375,8 +375,6 @@ static const struct ide_dma_ops icside_v6_dma_ops = { | |||
| 375 | .dma_test_irq = icside_dma_test_irq, | 375 | .dma_test_irq = icside_dma_test_irq, |
| 376 | .dma_lost_irq = ide_dma_lost_irq, | 376 | .dma_lost_irq = ide_dma_lost_irq, |
| 377 | }; | 377 | }; |
| 378 | #else | ||
| 379 | #define icside_v6_dma_ops NULL | ||
| 380 | #endif | 378 | #endif |
| 381 | 379 | ||
| 382 | static int icside_dma_off_init(ide_hwif_t *hwif, const struct ide_port_info *d) | 380 | static int icside_dma_off_init(ide_hwif_t *hwif, const struct ide_port_info *d) |
| @@ -456,7 +454,6 @@ err_free: | |||
| 456 | static const struct ide_port_info icside_v6_port_info __initdata = { | 454 | static const struct ide_port_info icside_v6_port_info __initdata = { |
| 457 | .init_dma = icside_dma_off_init, | 455 | .init_dma = icside_dma_off_init, |
| 458 | .port_ops = &icside_v6_no_dma_port_ops, | 456 | .port_ops = &icside_v6_no_dma_port_ops, |
| 459 | .dma_ops = &icside_v6_dma_ops, | ||
| 460 | .host_flags = IDE_HFLAG_SERIALIZE | IDE_HFLAG_MMIO, | 457 | .host_flags = IDE_HFLAG_SERIALIZE | IDE_HFLAG_MMIO, |
| 461 | .mwdma_mask = ATA_MWDMA2, | 458 | .mwdma_mask = ATA_MWDMA2, |
| 462 | .swdma_mask = ATA_SWDMA2, | 459 | .swdma_mask = ATA_SWDMA2, |
| @@ -518,11 +515,13 @@ icside_register_v6(struct icside_state *state, struct expansion_card *ec) | |||
| 518 | 515 | ||
| 519 | ecard_set_drvdata(ec, state); | 516 | ecard_set_drvdata(ec, state); |
| 520 | 517 | ||
| 518 | #ifdef CONFIG_BLK_DEV_IDEDMA_ICS | ||
| 521 | if (ec->dma != NO_DMA && !request_dma(ec->dma, DRV_NAME)) { | 519 | if (ec->dma != NO_DMA && !request_dma(ec->dma, DRV_NAME)) { |
| 522 | d.init_dma = icside_dma_init; | 520 | d.init_dma = icside_dma_init; |
| 523 | d.port_ops = &icside_v6_port_ops; | 521 | d.port_ops = &icside_v6_port_ops; |
| 524 | } else | 522 | d.dma_ops = &icside_v6_dma_ops; |
| 525 | d.dma_ops = NULL; | 523 | } |
| 524 | #endif | ||
| 526 | 525 | ||
| 527 | ret = ide_host_register(host, &d, hws); | 526 | ret = ide_host_register(host, &d, hws); |
| 528 | if (ret) | 527 | if (ret) |
diff --git a/drivers/ide/ide-cs.c b/drivers/ide/ide-cs.c index 28e344ea514c..f1e922e2479a 100644 --- a/drivers/ide/ide-cs.c +++ b/drivers/ide/ide-cs.c | |||
| @@ -167,7 +167,8 @@ static int pcmcia_check_one_config(struct pcmcia_device *pdev, void *priv_data) | |||
| 167 | { | 167 | { |
| 168 | int *is_kme = priv_data; | 168 | int *is_kme = priv_data; |
| 169 | 169 | ||
| 170 | if (!(pdev->resource[0]->flags & IO_DATA_PATH_WIDTH_8)) { | 170 | if ((pdev->resource[0]->flags & IO_DATA_PATH_WIDTH) |
| 171 | != IO_DATA_PATH_WIDTH_8) { | ||
| 171 | pdev->resource[0]->flags &= ~IO_DATA_PATH_WIDTH; | 172 | pdev->resource[0]->flags &= ~IO_DATA_PATH_WIDTH; |
| 172 | pdev->resource[0]->flags |= IO_DATA_PATH_WIDTH_AUTO; | 173 | pdev->resource[0]->flags |= IO_DATA_PATH_WIDTH_AUTO; |
| 173 | } | 174 | } |
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c index 55ab284e22f2..b18870c455ad 100644 --- a/drivers/infiniband/hw/cxgb4/cm.c +++ b/drivers/infiniband/hw/cxgb4/cm.c | |||
| @@ -1593,6 +1593,10 @@ static int import_ep(struct c4iw_ep *ep, __be32 peer_ip, struct dst_entry *dst, | |||
| 1593 | struct net_device *pdev; | 1593 | struct net_device *pdev; |
| 1594 | 1594 | ||
| 1595 | pdev = ip_dev_find(&init_net, peer_ip); | 1595 | pdev = ip_dev_find(&init_net, peer_ip); |
| 1596 | if (!pdev) { | ||
| 1597 | err = -ENODEV; | ||
| 1598 | goto out; | ||
| 1599 | } | ||
| 1596 | ep->l2t = cxgb4_l2t_get(cdev->rdev.lldi.l2t, | 1600 | ep->l2t = cxgb4_l2t_get(cdev->rdev.lldi.l2t, |
| 1597 | n, pdev, 0); | 1601 | n, pdev, 0); |
| 1598 | if (!ep->l2t) | 1602 | if (!ep->l2t) |
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c index ee1c577238f7..3530c41fcd1f 100644 --- a/drivers/infiniband/hw/mlx4/main.c +++ b/drivers/infiniband/hw/mlx4/main.c | |||
| @@ -140,7 +140,7 @@ static int mlx4_ib_query_device(struct ib_device *ibdev, | |||
| 140 | props->max_mr_size = ~0ull; | 140 | props->max_mr_size = ~0ull; |
| 141 | props->page_size_cap = dev->dev->caps.page_size_cap; | 141 | props->page_size_cap = dev->dev->caps.page_size_cap; |
| 142 | props->max_qp = dev->dev->caps.num_qps - dev->dev->caps.reserved_qps; | 142 | props->max_qp = dev->dev->caps.num_qps - dev->dev->caps.reserved_qps; |
| 143 | props->max_qp_wr = dev->dev->caps.max_wqes; | 143 | props->max_qp_wr = dev->dev->caps.max_wqes - MLX4_IB_SQ_MAX_SPARE; |
| 144 | props->max_sge = min(dev->dev->caps.max_sq_sg, | 144 | props->max_sge = min(dev->dev->caps.max_sq_sg, |
| 145 | dev->dev->caps.max_rq_sg); | 145 | dev->dev->caps.max_rq_sg); |
| 146 | props->max_cq = dev->dev->caps.num_cqs - dev->dev->caps.reserved_cqs; | 146 | props->max_cq = dev->dev->caps.num_cqs - dev->dev->caps.reserved_cqs; |
| @@ -1084,12 +1084,9 @@ static void mlx4_ib_alloc_eqs(struct mlx4_dev *dev, struct mlx4_ib_dev *ibdev) | |||
| 1084 | int total_eqs = 0; | 1084 | int total_eqs = 0; |
| 1085 | int i, j, eq; | 1085 | int i, j, eq; |
| 1086 | 1086 | ||
| 1087 | /* Init eq table */ | 1087 | /* Legacy mode or comp_pool is not large enough */ |
| 1088 | ibdev->eq_table = NULL; | 1088 | if (dev->caps.comp_pool == 0 || |
| 1089 | ibdev->eq_added = 0; | 1089 | dev->caps.num_ports > dev->caps.comp_pool) |
| 1090 | |||
| 1091 | /* Legacy mode? */ | ||
| 1092 | if (dev->caps.comp_pool == 0) | ||
| 1093 | return; | 1090 | return; |
| 1094 | 1091 | ||
| 1095 | eq_per_port = rounddown_pow_of_two(dev->caps.comp_pool/ | 1092 | eq_per_port = rounddown_pow_of_two(dev->caps.comp_pool/ |
| @@ -1135,7 +1132,10 @@ static void mlx4_ib_alloc_eqs(struct mlx4_dev *dev, struct mlx4_ib_dev *ibdev) | |||
| 1135 | static void mlx4_ib_free_eqs(struct mlx4_dev *dev, struct mlx4_ib_dev *ibdev) | 1132 | static void mlx4_ib_free_eqs(struct mlx4_dev *dev, struct mlx4_ib_dev *ibdev) |
| 1136 | { | 1133 | { |
| 1137 | int i; | 1134 | int i; |
| 1138 | int total_eqs; | 1135 | |
| 1136 | /* no additional eqs were added */ | ||
| 1137 | if (!ibdev->eq_table) | ||
| 1138 | return; | ||
| 1139 | 1139 | ||
| 1140 | /* Reset the advertised EQ number */ | 1140 | /* Reset the advertised EQ number */ |
| 1141 | ibdev->ib_dev.num_comp_vectors = dev->caps.num_comp_vectors; | 1141 | ibdev->ib_dev.num_comp_vectors = dev->caps.num_comp_vectors; |
| @@ -1148,12 +1148,7 @@ static void mlx4_ib_free_eqs(struct mlx4_dev *dev, struct mlx4_ib_dev *ibdev) | |||
| 1148 | mlx4_release_eq(dev, ibdev->eq_table[i]); | 1148 | mlx4_release_eq(dev, ibdev->eq_table[i]); |
| 1149 | } | 1149 | } |
| 1150 | 1150 | ||
| 1151 | total_eqs = dev->caps.num_comp_vectors + ibdev->eq_added; | ||
| 1152 | memset(ibdev->eq_table, 0, total_eqs * sizeof(int)); | ||
| 1153 | kfree(ibdev->eq_table); | 1151 | kfree(ibdev->eq_table); |
| 1154 | |||
| 1155 | ibdev->eq_table = NULL; | ||
| 1156 | ibdev->eq_added = 0; | ||
| 1157 | } | 1152 | } |
| 1158 | 1153 | ||
| 1159 | static void *mlx4_ib_add(struct mlx4_dev *dev) | 1154 | static void *mlx4_ib_add(struct mlx4_dev *dev) |
diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h index e62297cc77cc..ff36655d23d3 100644 --- a/drivers/infiniband/hw/mlx4/mlx4_ib.h +++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h | |||
| @@ -44,6 +44,14 @@ | |||
| 44 | #include <linux/mlx4/device.h> | 44 | #include <linux/mlx4/device.h> |
| 45 | #include <linux/mlx4/doorbell.h> | 45 | #include <linux/mlx4/doorbell.h> |
| 46 | 46 | ||
| 47 | enum { | ||
| 48 | MLX4_IB_SQ_MIN_WQE_SHIFT = 6, | ||
| 49 | MLX4_IB_MAX_HEADROOM = 2048 | ||
| 50 | }; | ||
| 51 | |||
| 52 | #define MLX4_IB_SQ_HEADROOM(shift) ((MLX4_IB_MAX_HEADROOM >> (shift)) + 1) | ||
| 53 | #define MLX4_IB_SQ_MAX_SPARE (MLX4_IB_SQ_HEADROOM(MLX4_IB_SQ_MIN_WQE_SHIFT)) | ||
| 54 | |||
| 47 | struct mlx4_ib_ucontext { | 55 | struct mlx4_ib_ucontext { |
| 48 | struct ib_ucontext ibucontext; | 56 | struct ib_ucontext ibucontext; |
| 49 | struct mlx4_uar uar; | 57 | struct mlx4_uar uar; |
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c index ceb33327091a..8d4ed24aef93 100644 --- a/drivers/infiniband/hw/mlx4/qp.c +++ b/drivers/infiniband/hw/mlx4/qp.c | |||
| @@ -310,8 +310,8 @@ static int set_rq_size(struct mlx4_ib_dev *dev, struct ib_qp_cap *cap, | |||
| 310 | int is_user, int has_rq, struct mlx4_ib_qp *qp) | 310 | int is_user, int has_rq, struct mlx4_ib_qp *qp) |
| 311 | { | 311 | { |
| 312 | /* Sanity check RQ size before proceeding */ | 312 | /* Sanity check RQ size before proceeding */ |
| 313 | if (cap->max_recv_wr > dev->dev->caps.max_wqes || | 313 | if (cap->max_recv_wr > dev->dev->caps.max_wqes - MLX4_IB_SQ_MAX_SPARE || |
| 314 | cap->max_recv_sge > dev->dev->caps.max_rq_sg) | 314 | cap->max_recv_sge > min(dev->dev->caps.max_sq_sg, dev->dev->caps.max_rq_sg)) |
| 315 | return -EINVAL; | 315 | return -EINVAL; |
| 316 | 316 | ||
| 317 | if (!has_rq) { | 317 | if (!has_rq) { |
| @@ -329,8 +329,17 @@ static int set_rq_size(struct mlx4_ib_dev *dev, struct ib_qp_cap *cap, | |||
| 329 | qp->rq.wqe_shift = ilog2(qp->rq.max_gs * sizeof (struct mlx4_wqe_data_seg)); | 329 | qp->rq.wqe_shift = ilog2(qp->rq.max_gs * sizeof (struct mlx4_wqe_data_seg)); |
| 330 | } | 330 | } |
| 331 | 331 | ||
| 332 | cap->max_recv_wr = qp->rq.max_post = qp->rq.wqe_cnt; | 332 | /* leave userspace return values as they were, so as not to break ABI */ |
| 333 | cap->max_recv_sge = qp->rq.max_gs; | 333 | if (is_user) { |
| 334 | cap->max_recv_wr = qp->rq.max_post = qp->rq.wqe_cnt; | ||
| 335 | cap->max_recv_sge = qp->rq.max_gs; | ||
| 336 | } else { | ||
| 337 | cap->max_recv_wr = qp->rq.max_post = | ||
| 338 | min(dev->dev->caps.max_wqes - MLX4_IB_SQ_MAX_SPARE, qp->rq.wqe_cnt); | ||
| 339 | cap->max_recv_sge = min(qp->rq.max_gs, | ||
| 340 | min(dev->dev->caps.max_sq_sg, | ||
| 341 | dev->dev->caps.max_rq_sg)); | ||
| 342 | } | ||
| 334 | 343 | ||
| 335 | return 0; | 344 | return 0; |
| 336 | } | 345 | } |
| @@ -341,8 +350,8 @@ static int set_kernel_sq_size(struct mlx4_ib_dev *dev, struct ib_qp_cap *cap, | |||
| 341 | int s; | 350 | int s; |
| 342 | 351 | ||
| 343 | /* Sanity check SQ size before proceeding */ | 352 | /* Sanity check SQ size before proceeding */ |
| 344 | if (cap->max_send_wr > dev->dev->caps.max_wqes || | 353 | if (cap->max_send_wr > (dev->dev->caps.max_wqes - MLX4_IB_SQ_MAX_SPARE) || |
| 345 | cap->max_send_sge > dev->dev->caps.max_sq_sg || | 354 | cap->max_send_sge > min(dev->dev->caps.max_sq_sg, dev->dev->caps.max_rq_sg) || |
| 346 | cap->max_inline_data + send_wqe_overhead(type, qp->flags) + | 355 | cap->max_inline_data + send_wqe_overhead(type, qp->flags) + |
| 347 | sizeof (struct mlx4_wqe_inline_seg) > dev->dev->caps.max_sq_desc_sz) | 356 | sizeof (struct mlx4_wqe_inline_seg) > dev->dev->caps.max_sq_desc_sz) |
| 348 | return -EINVAL; | 357 | return -EINVAL; |
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma.h b/drivers/infiniband/hw/ocrdma/ocrdma.h index 85a69c958559..037f5cea85bd 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma.h +++ b/drivers/infiniband/hw/ocrdma/ocrdma.h | |||
| @@ -231,7 +231,6 @@ struct ocrdma_qp_hwq_info { | |||
| 231 | u32 entry_size; | 231 | u32 entry_size; |
| 232 | u32 max_cnt; | 232 | u32 max_cnt; |
| 233 | u32 max_wqe_idx; | 233 | u32 max_wqe_idx; |
| 234 | u32 free_delta; | ||
| 235 | u16 dbid; /* qid, where to ring the doorbell. */ | 234 | u16 dbid; /* qid, where to ring the doorbell. */ |
| 236 | u32 len; | 235 | u32 len; |
| 237 | dma_addr_t pa; | 236 | dma_addr_t pa; |
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_abi.h b/drivers/infiniband/hw/ocrdma/ocrdma_abi.h index a411a4e3193d..517ab20b727c 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_abi.h +++ b/drivers/infiniband/hw/ocrdma/ocrdma_abi.h | |||
| @@ -101,8 +101,6 @@ struct ocrdma_create_qp_uresp { | |||
| 101 | u32 rsvd1; | 101 | u32 rsvd1; |
| 102 | u32 num_wqe_allocated; | 102 | u32 num_wqe_allocated; |
| 103 | u32 num_rqe_allocated; | 103 | u32 num_rqe_allocated; |
| 104 | u32 free_wqe_delta; | ||
| 105 | u32 free_rqe_delta; | ||
| 106 | u32 db_sq_offset; | 104 | u32 db_sq_offset; |
| 107 | u32 db_rq_offset; | 105 | u32 db_rq_offset; |
| 108 | u32 db_shift; | 106 | u32 db_shift; |
| @@ -126,8 +124,7 @@ struct ocrdma_create_srq_uresp { | |||
| 126 | u32 db_rq_offset; | 124 | u32 db_rq_offset; |
| 127 | u32 db_shift; | 125 | u32 db_shift; |
| 128 | 126 | ||
| 129 | u32 free_rqe_delta; | 127 | u64 rsvd2; |
| 130 | u32 rsvd2; | ||
| 131 | u64 rsvd3; | 128 | u64 rsvd3; |
| 132 | } __packed; | 129 | } __packed; |
| 133 | 130 | ||
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c index 9b204b1ba336..9343a1522977 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c +++ b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c | |||
| @@ -732,7 +732,7 @@ static void ocrdma_dispatch_ibevent(struct ocrdma_dev *dev, | |||
| 732 | break; | 732 | break; |
| 733 | case OCRDMA_SRQ_LIMIT_EVENT: | 733 | case OCRDMA_SRQ_LIMIT_EVENT: |
| 734 | ib_evt.element.srq = &qp->srq->ibsrq; | 734 | ib_evt.element.srq = &qp->srq->ibsrq; |
| 735 | ib_evt.event = IB_EVENT_QP_LAST_WQE_REACHED; | 735 | ib_evt.event = IB_EVENT_SRQ_LIMIT_REACHED; |
| 736 | srq_event = 1; | 736 | srq_event = 1; |
| 737 | qp_event = 0; | 737 | qp_event = 0; |
| 738 | break; | 738 | break; |
| @@ -1990,19 +1990,12 @@ static void ocrdma_get_create_qp_rsp(struct ocrdma_create_qp_rsp *rsp, | |||
| 1990 | max_wqe_allocated = 1 << max_wqe_allocated; | 1990 | max_wqe_allocated = 1 << max_wqe_allocated; |
| 1991 | max_rqe_allocated = 1 << ((u16)rsp->max_wqe_rqe); | 1991 | max_rqe_allocated = 1 << ((u16)rsp->max_wqe_rqe); |
| 1992 | 1992 | ||
| 1993 | if (qp->dev->nic_info.dev_family == OCRDMA_GEN2_FAMILY) { | ||
| 1994 | qp->sq.free_delta = 0; | ||
| 1995 | qp->rq.free_delta = 1; | ||
| 1996 | } else | ||
| 1997 | qp->sq.free_delta = 1; | ||
| 1998 | |||
| 1999 | qp->sq.max_cnt = max_wqe_allocated; | 1993 | qp->sq.max_cnt = max_wqe_allocated; |
| 2000 | qp->sq.max_wqe_idx = max_wqe_allocated - 1; | 1994 | qp->sq.max_wqe_idx = max_wqe_allocated - 1; |
| 2001 | 1995 | ||
| 2002 | if (!attrs->srq) { | 1996 | if (!attrs->srq) { |
| 2003 | qp->rq.max_cnt = max_rqe_allocated; | 1997 | qp->rq.max_cnt = max_rqe_allocated; |
| 2004 | qp->rq.max_wqe_idx = max_rqe_allocated - 1; | 1998 | qp->rq.max_wqe_idx = max_rqe_allocated - 1; |
| 2005 | qp->rq.free_delta = 1; | ||
| 2006 | } | 1999 | } |
| 2007 | } | 2000 | } |
| 2008 | 2001 | ||
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_main.c b/drivers/infiniband/hw/ocrdma/ocrdma_main.c index a20d16eaae71..04fef3de6d75 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_main.c +++ b/drivers/infiniband/hw/ocrdma/ocrdma_main.c | |||
| @@ -26,7 +26,6 @@ | |||
| 26 | *******************************************************************/ | 26 | *******************************************************************/ |
| 27 | 27 | ||
| 28 | #include <linux/module.h> | 28 | #include <linux/module.h> |
| 29 | #include <linux/version.h> | ||
| 30 | #include <linux/idr.h> | 29 | #include <linux/idr.h> |
| 31 | #include <rdma/ib_verbs.h> | 30 | #include <rdma/ib_verbs.h> |
| 32 | #include <rdma/ib_user_verbs.h> | 31 | #include <rdma/ib_user_verbs.h> |
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c index e9f74d1b48f6..d16d172b6b6b 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c +++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c | |||
| @@ -940,8 +940,6 @@ static int ocrdma_copy_qp_uresp(struct ocrdma_qp *qp, | |||
| 940 | uresp.db_rq_offset = OCRDMA_DB_RQ_OFFSET; | 940 | uresp.db_rq_offset = OCRDMA_DB_RQ_OFFSET; |
| 941 | uresp.db_shift = 16; | 941 | uresp.db_shift = 16; |
| 942 | } | 942 | } |
| 943 | uresp.free_wqe_delta = qp->sq.free_delta; | ||
| 944 | uresp.free_rqe_delta = qp->rq.free_delta; | ||
| 945 | 943 | ||
| 946 | if (qp->dpp_enabled) { | 944 | if (qp->dpp_enabled) { |
| 947 | uresp.dpp_credit = dpp_credit_lmt; | 945 | uresp.dpp_credit = dpp_credit_lmt; |
| @@ -1307,8 +1305,6 @@ static int ocrdma_hwq_free_cnt(struct ocrdma_qp_hwq_info *q) | |||
| 1307 | free_cnt = (q->max_cnt - q->head) + q->tail; | 1305 | free_cnt = (q->max_cnt - q->head) + q->tail; |
| 1308 | else | 1306 | else |
| 1309 | free_cnt = q->tail - q->head; | 1307 | free_cnt = q->tail - q->head; |
| 1310 | if (q->free_delta) | ||
| 1311 | free_cnt -= q->free_delta; | ||
| 1312 | return free_cnt; | 1308 | return free_cnt; |
| 1313 | } | 1309 | } |
| 1314 | 1310 | ||
| @@ -1501,7 +1497,6 @@ static int ocrdma_copy_srq_uresp(struct ocrdma_srq *srq, struct ib_udata *udata) | |||
| 1501 | (srq->pd->id * srq->dev->nic_info.db_page_size); | 1497 | (srq->pd->id * srq->dev->nic_info.db_page_size); |
| 1502 | uresp.db_page_size = srq->dev->nic_info.db_page_size; | 1498 | uresp.db_page_size = srq->dev->nic_info.db_page_size; |
| 1503 | uresp.num_rqe_allocated = srq->rq.max_cnt; | 1499 | uresp.num_rqe_allocated = srq->rq.max_cnt; |
| 1504 | uresp.free_rqe_delta = 1; | ||
| 1505 | if (srq->dev->nic_info.dev_family == OCRDMA_GEN2_FAMILY) { | 1500 | if (srq->dev->nic_info.dev_family == OCRDMA_GEN2_FAMILY) { |
| 1506 | uresp.db_rq_offset = OCRDMA_DB_GEN2_RQ1_OFFSET; | 1501 | uresp.db_rq_offset = OCRDMA_DB_GEN2_RQ1_OFFSET; |
| 1507 | uresp.db_shift = 24; | 1502 | uresp.db_shift = 24; |
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.h b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.h index e6483439f25f..633f03d80274 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.h +++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.h | |||
| @@ -28,7 +28,6 @@ | |||
| 28 | #ifndef __OCRDMA_VERBS_H__ | 28 | #ifndef __OCRDMA_VERBS_H__ |
| 29 | #define __OCRDMA_VERBS_H__ | 29 | #define __OCRDMA_VERBS_H__ |
| 30 | 30 | ||
| 31 | #include <linux/version.h> | ||
| 32 | int ocrdma_post_send(struct ib_qp *, struct ib_send_wr *, | 31 | int ocrdma_post_send(struct ib_qp *, struct ib_send_wr *, |
| 33 | struct ib_send_wr **bad_wr); | 32 | struct ib_send_wr **bad_wr); |
| 34 | int ocrdma_post_recv(struct ib_qp *, struct ib_recv_wr *, | 33 | int ocrdma_post_recv(struct ib_qp *, struct ib_recv_wr *, |
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c index d90a421e9cac..a2e418cba0ff 100644 --- a/drivers/iommu/amd_iommu.c +++ b/drivers/iommu/amd_iommu.c | |||
| @@ -547,26 +547,12 @@ static void iommu_poll_events(struct amd_iommu *iommu) | |||
| 547 | spin_unlock_irqrestore(&iommu->lock, flags); | 547 | spin_unlock_irqrestore(&iommu->lock, flags); |
| 548 | } | 548 | } |
| 549 | 549 | ||
| 550 | static void iommu_handle_ppr_entry(struct amd_iommu *iommu, u32 head) | 550 | static void iommu_handle_ppr_entry(struct amd_iommu *iommu, u64 *raw) |
| 551 | { | 551 | { |
| 552 | struct amd_iommu_fault fault; | 552 | struct amd_iommu_fault fault; |
| 553 | volatile u64 *raw; | ||
| 554 | int i; | ||
| 555 | 553 | ||
| 556 | INC_STATS_COUNTER(pri_requests); | 554 | INC_STATS_COUNTER(pri_requests); |
| 557 | 555 | ||
| 558 | raw = (u64 *)(iommu->ppr_log + head); | ||
| 559 | |||
| 560 | /* | ||
| 561 | * Hardware bug: Interrupt may arrive before the entry is written to | ||
| 562 | * memory. If this happens we need to wait for the entry to arrive. | ||
| 563 | */ | ||
| 564 | for (i = 0; i < LOOP_TIMEOUT; ++i) { | ||
| 565 | if (PPR_REQ_TYPE(raw[0]) != 0) | ||
| 566 | break; | ||
| 567 | udelay(1); | ||
| 568 | } | ||
| 569 | |||
| 570 | if (PPR_REQ_TYPE(raw[0]) != PPR_REQ_FAULT) { | 556 | if (PPR_REQ_TYPE(raw[0]) != PPR_REQ_FAULT) { |
| 571 | pr_err_ratelimited("AMD-Vi: Unknown PPR request received\n"); | 557 | pr_err_ratelimited("AMD-Vi: Unknown PPR request received\n"); |
| 572 | return; | 558 | return; |
| @@ -578,12 +564,6 @@ static void iommu_handle_ppr_entry(struct amd_iommu *iommu, u32 head) | |||
| 578 | fault.tag = PPR_TAG(raw[0]); | 564 | fault.tag = PPR_TAG(raw[0]); |
| 579 | fault.flags = PPR_FLAGS(raw[0]); | 565 | fault.flags = PPR_FLAGS(raw[0]); |
| 580 | 566 | ||
| 581 | /* | ||
| 582 | * To detect the hardware bug we need to clear the entry | ||
| 583 | * to back to zero. | ||
| 584 | */ | ||
| 585 | raw[0] = raw[1] = 0; | ||
| 586 | |||
| 587 | atomic_notifier_call_chain(&ppr_notifier, 0, &fault); | 567 | atomic_notifier_call_chain(&ppr_notifier, 0, &fault); |
| 588 | } | 568 | } |
| 589 | 569 | ||
| @@ -595,25 +575,62 @@ static void iommu_poll_ppr_log(struct amd_iommu *iommu) | |||
| 595 | if (iommu->ppr_log == NULL) | 575 | if (iommu->ppr_log == NULL) |
| 596 | return; | 576 | return; |
| 597 | 577 | ||
| 578 | /* enable ppr interrupts again */ | ||
| 579 | writel(MMIO_STATUS_PPR_INT_MASK, iommu->mmio_base + MMIO_STATUS_OFFSET); | ||
| 580 | |||
| 598 | spin_lock_irqsave(&iommu->lock, flags); | 581 | spin_lock_irqsave(&iommu->lock, flags); |
| 599 | 582 | ||
| 600 | head = readl(iommu->mmio_base + MMIO_PPR_HEAD_OFFSET); | 583 | head = readl(iommu->mmio_base + MMIO_PPR_HEAD_OFFSET); |
| 601 | tail = readl(iommu->mmio_base + MMIO_PPR_TAIL_OFFSET); | 584 | tail = readl(iommu->mmio_base + MMIO_PPR_TAIL_OFFSET); |
| 602 | 585 | ||
| 603 | while (head != tail) { | 586 | while (head != tail) { |
| 587 | volatile u64 *raw; | ||
| 588 | u64 entry[2]; | ||
| 589 | int i; | ||
| 604 | 590 | ||
| 605 | /* Handle PPR entry */ | 591 | raw = (u64 *)(iommu->ppr_log + head); |
| 606 | iommu_handle_ppr_entry(iommu, head); | 592 | |
| 593 | /* | ||
| 594 | * Hardware bug: Interrupt may arrive before the entry is | ||
| 595 | * written to memory. If this happens we need to wait for the | ||
| 596 | * entry to arrive. | ||
| 597 | */ | ||
| 598 | for (i = 0; i < LOOP_TIMEOUT; ++i) { | ||
| 599 | if (PPR_REQ_TYPE(raw[0]) != 0) | ||
| 600 | break; | ||
| 601 | udelay(1); | ||
| 602 | } | ||
| 603 | |||
| 604 | /* Avoid memcpy function-call overhead */ | ||
| 605 | entry[0] = raw[0]; | ||
| 606 | entry[1] = raw[1]; | ||
| 607 | 607 | ||
| 608 | /* Update and refresh ring-buffer state*/ | 608 | /* |
| 609 | * To detect the hardware bug we need to clear the entry | ||
| 610 | * back to zero. | ||
| 611 | */ | ||
| 612 | raw[0] = raw[1] = 0UL; | ||
| 613 | |||
| 614 | /* Update head pointer of hardware ring-buffer */ | ||
| 609 | head = (head + PPR_ENTRY_SIZE) % PPR_LOG_SIZE; | 615 | head = (head + PPR_ENTRY_SIZE) % PPR_LOG_SIZE; |
| 610 | writel(head, iommu->mmio_base + MMIO_PPR_HEAD_OFFSET); | 616 | writel(head, iommu->mmio_base + MMIO_PPR_HEAD_OFFSET); |
| 617 | |||
| 618 | /* | ||
| 619 | * Release iommu->lock because ppr-handling might need to | ||
| 620 | * re-aquire it | ||
| 621 | */ | ||
| 622 | spin_unlock_irqrestore(&iommu->lock, flags); | ||
| 623 | |||
| 624 | /* Handle PPR entry */ | ||
| 625 | iommu_handle_ppr_entry(iommu, entry); | ||
| 626 | |||
| 627 | spin_lock_irqsave(&iommu->lock, flags); | ||
| 628 | |||
| 629 | /* Refresh ring-buffer information */ | ||
| 630 | head = readl(iommu->mmio_base + MMIO_PPR_HEAD_OFFSET); | ||
| 611 | tail = readl(iommu->mmio_base + MMIO_PPR_TAIL_OFFSET); | 631 | tail = readl(iommu->mmio_base + MMIO_PPR_TAIL_OFFSET); |
| 612 | } | 632 | } |
| 613 | 633 | ||
| 614 | /* enable ppr interrupts again */ | ||
| 615 | writel(MMIO_STATUS_PPR_INT_MASK, iommu->mmio_base + MMIO_STATUS_OFFSET); | ||
| 616 | |||
| 617 | spin_unlock_irqrestore(&iommu->lock, flags); | 634 | spin_unlock_irqrestore(&iommu->lock, flags); |
| 618 | } | 635 | } |
| 619 | 636 | ||
diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c index c56790375e0f..542024ba6dba 100644 --- a/drivers/iommu/amd_iommu_init.c +++ b/drivers/iommu/amd_iommu_init.c | |||
| @@ -1029,6 +1029,9 @@ static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h) | |||
| 1029 | if (!iommu->dev) | 1029 | if (!iommu->dev) |
| 1030 | return 1; | 1030 | return 1; |
| 1031 | 1031 | ||
| 1032 | iommu->root_pdev = pci_get_bus_and_slot(iommu->dev->bus->number, | ||
| 1033 | PCI_DEVFN(0, 0)); | ||
| 1034 | |||
| 1032 | iommu->cap_ptr = h->cap_ptr; | 1035 | iommu->cap_ptr = h->cap_ptr; |
| 1033 | iommu->pci_seg = h->pci_seg; | 1036 | iommu->pci_seg = h->pci_seg; |
| 1034 | iommu->mmio_phys = h->mmio_phys; | 1037 | iommu->mmio_phys = h->mmio_phys; |
| @@ -1323,20 +1326,16 @@ static void iommu_apply_resume_quirks(struct amd_iommu *iommu) | |||
| 1323 | { | 1326 | { |
| 1324 | int i, j; | 1327 | int i, j; |
| 1325 | u32 ioc_feature_control; | 1328 | u32 ioc_feature_control; |
| 1326 | struct pci_dev *pdev = NULL; | 1329 | struct pci_dev *pdev = iommu->root_pdev; |
| 1327 | 1330 | ||
| 1328 | /* RD890 BIOSes may not have completely reconfigured the iommu */ | 1331 | /* RD890 BIOSes may not have completely reconfigured the iommu */ |
| 1329 | if (!is_rd890_iommu(iommu->dev)) | 1332 | if (!is_rd890_iommu(iommu->dev) || !pdev) |
| 1330 | return; | 1333 | return; |
| 1331 | 1334 | ||
| 1332 | /* | 1335 | /* |
| 1333 | * First, we need to ensure that the iommu is enabled. This is | 1336 | * First, we need to ensure that the iommu is enabled. This is |
| 1334 | * controlled by a register in the northbridge | 1337 | * controlled by a register in the northbridge |
| 1335 | */ | 1338 | */ |
| 1336 | pdev = pci_get_bus_and_slot(iommu->dev->bus->number, PCI_DEVFN(0, 0)); | ||
| 1337 | |||
| 1338 | if (!pdev) | ||
| 1339 | return; | ||
| 1340 | 1339 | ||
| 1341 | /* Select Northbridge indirect register 0x75 and enable writing */ | 1340 | /* Select Northbridge indirect register 0x75 and enable writing */ |
| 1342 | pci_write_config_dword(pdev, 0x60, 0x75 | (1 << 7)); | 1341 | pci_write_config_dword(pdev, 0x60, 0x75 | (1 << 7)); |
| @@ -1346,8 +1345,6 @@ static void iommu_apply_resume_quirks(struct amd_iommu *iommu) | |||
| 1346 | if (!(ioc_feature_control & 0x1)) | 1345 | if (!(ioc_feature_control & 0x1)) |
| 1347 | pci_write_config_dword(pdev, 0x64, ioc_feature_control | 1); | 1346 | pci_write_config_dword(pdev, 0x64, ioc_feature_control | 1); |
| 1348 | 1347 | ||
| 1349 | pci_dev_put(pdev); | ||
| 1350 | |||
| 1351 | /* Restore the iommu BAR */ | 1348 | /* Restore the iommu BAR */ |
| 1352 | pci_write_config_dword(iommu->dev, iommu->cap_ptr + 4, | 1349 | pci_write_config_dword(iommu->dev, iommu->cap_ptr + 4, |
| 1353 | iommu->stored_addr_lo); | 1350 | iommu->stored_addr_lo); |
diff --git a/drivers/iommu/amd_iommu_types.h b/drivers/iommu/amd_iommu_types.h index 2452f3b71736..24355559a2ad 100644 --- a/drivers/iommu/amd_iommu_types.h +++ b/drivers/iommu/amd_iommu_types.h | |||
| @@ -481,6 +481,9 @@ struct amd_iommu { | |||
| 481 | /* Pointer to PCI device of this IOMMU */ | 481 | /* Pointer to PCI device of this IOMMU */ |
| 482 | struct pci_dev *dev; | 482 | struct pci_dev *dev; |
| 483 | 483 | ||
| 484 | /* Cache pdev to root device for resume quirks */ | ||
| 485 | struct pci_dev *root_pdev; | ||
| 486 | |||
| 484 | /* physical address of MMIO space */ | 487 | /* physical address of MMIO space */ |
| 485 | u64 mmio_phys; | 488 | u64 mmio_phys; |
| 486 | /* virtual address of MMIO space */ | 489 | /* virtual address of MMIO space */ |
diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig index 04cb8c88d74b..12b2b55c519e 100644 --- a/drivers/leds/Kconfig +++ b/drivers/leds/Kconfig | |||
| @@ -379,7 +379,7 @@ config LEDS_NETXBIG | |||
| 379 | 379 | ||
| 380 | config LEDS_ASIC3 | 380 | config LEDS_ASIC3 |
| 381 | bool "LED support for the HTC ASIC3" | 381 | bool "LED support for the HTC ASIC3" |
| 382 | depends on LEDS_CLASS | 382 | depends on LEDS_CLASS=y |
| 383 | depends on MFD_ASIC3 | 383 | depends on MFD_ASIC3 |
| 384 | default y | 384 | default y |
| 385 | help | 385 | help |
| @@ -390,7 +390,7 @@ config LEDS_ASIC3 | |||
| 390 | 390 | ||
| 391 | config LEDS_RENESAS_TPU | 391 | config LEDS_RENESAS_TPU |
| 392 | bool "LED support for Renesas TPU" | 392 | bool "LED support for Renesas TPU" |
| 393 | depends on LEDS_CLASS && HAVE_CLK && GENERIC_GPIO | 393 | depends on LEDS_CLASS=y && HAVE_CLK && GENERIC_GPIO |
| 394 | help | 394 | help |
| 395 | This option enables build of the LED TPU platform driver, | 395 | This option enables build of the LED TPU platform driver, |
| 396 | suitable to drive any TPU channel on newer Renesas SoCs. | 396 | suitable to drive any TPU channel on newer Renesas SoCs. |
diff --git a/drivers/leds/led-class.c b/drivers/leds/led-class.c index 8ee92c81aec2..e663e6f413e9 100644 --- a/drivers/leds/led-class.c +++ b/drivers/leds/led-class.c | |||
| @@ -29,7 +29,7 @@ static void led_update_brightness(struct led_classdev *led_cdev) | |||
| 29 | led_cdev->brightness = led_cdev->brightness_get(led_cdev); | 29 | led_cdev->brightness = led_cdev->brightness_get(led_cdev); |
| 30 | } | 30 | } |
| 31 | 31 | ||
| 32 | static ssize_t led_brightness_show(struct device *dev, | 32 | static ssize_t led_brightness_show(struct device *dev, |
| 33 | struct device_attribute *attr, char *buf) | 33 | struct device_attribute *attr, char *buf) |
| 34 | { | 34 | { |
| 35 | struct led_classdev *led_cdev = dev_get_drvdata(dev); | 35 | struct led_classdev *led_cdev = dev_get_drvdata(dev); |
diff --git a/drivers/leds/led-core.c b/drivers/leds/led-core.c index d6860043f6f9..d65353d8d3fc 100644 --- a/drivers/leds/led-core.c +++ b/drivers/leds/led-core.c | |||
| @@ -44,13 +44,6 @@ static void led_set_software_blink(struct led_classdev *led_cdev, | |||
| 44 | if (!led_cdev->blink_brightness) | 44 | if (!led_cdev->blink_brightness) |
| 45 | led_cdev->blink_brightness = led_cdev->max_brightness; | 45 | led_cdev->blink_brightness = led_cdev->max_brightness; |
| 46 | 46 | ||
| 47 | if (led_get_trigger_data(led_cdev) && | ||
| 48 | delay_on == led_cdev->blink_delay_on && | ||
| 49 | delay_off == led_cdev->blink_delay_off) | ||
| 50 | return; | ||
| 51 | |||
| 52 | led_stop_software_blink(led_cdev); | ||
| 53 | |||
| 54 | led_cdev->blink_delay_on = delay_on; | 47 | led_cdev->blink_delay_on = delay_on; |
| 55 | led_cdev->blink_delay_off = delay_off; | 48 | led_cdev->blink_delay_off = delay_off; |
| 56 | 49 | ||
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index 835de7168cd3..a9c7981ddd24 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c | |||
| @@ -2550,6 +2550,7 @@ static struct r1conf *setup_conf(struct mddev *mddev) | |||
| 2550 | err = -EINVAL; | 2550 | err = -EINVAL; |
| 2551 | spin_lock_init(&conf->device_lock); | 2551 | spin_lock_init(&conf->device_lock); |
| 2552 | rdev_for_each(rdev, mddev) { | 2552 | rdev_for_each(rdev, mddev) { |
| 2553 | struct request_queue *q; | ||
| 2553 | int disk_idx = rdev->raid_disk; | 2554 | int disk_idx = rdev->raid_disk; |
| 2554 | if (disk_idx >= mddev->raid_disks | 2555 | if (disk_idx >= mddev->raid_disks |
| 2555 | || disk_idx < 0) | 2556 | || disk_idx < 0) |
| @@ -2562,6 +2563,9 @@ static struct r1conf *setup_conf(struct mddev *mddev) | |||
| 2562 | if (disk->rdev) | 2563 | if (disk->rdev) |
| 2563 | goto abort; | 2564 | goto abort; |
| 2564 | disk->rdev = rdev; | 2565 | disk->rdev = rdev; |
| 2566 | q = bdev_get_queue(rdev->bdev); | ||
| 2567 | if (q->merge_bvec_fn) | ||
| 2568 | mddev->merge_check_needed = 1; | ||
| 2565 | 2569 | ||
| 2566 | disk->head_position = 0; | 2570 | disk->head_position = 0; |
| 2567 | } | 2571 | } |
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index 987db37cb875..99ae6068e456 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c | |||
| @@ -3475,6 +3475,7 @@ static int run(struct mddev *mddev) | |||
| 3475 | 3475 | ||
| 3476 | rdev_for_each(rdev, mddev) { | 3476 | rdev_for_each(rdev, mddev) { |
| 3477 | long long diff; | 3477 | long long diff; |
| 3478 | struct request_queue *q; | ||
| 3478 | 3479 | ||
| 3479 | disk_idx = rdev->raid_disk; | 3480 | disk_idx = rdev->raid_disk; |
| 3480 | if (disk_idx < 0) | 3481 | if (disk_idx < 0) |
| @@ -3493,6 +3494,9 @@ static int run(struct mddev *mddev) | |||
| 3493 | goto out_free_conf; | 3494 | goto out_free_conf; |
| 3494 | disk->rdev = rdev; | 3495 | disk->rdev = rdev; |
| 3495 | } | 3496 | } |
| 3497 | q = bdev_get_queue(rdev->bdev); | ||
| 3498 | if (q->merge_bvec_fn) | ||
| 3499 | mddev->merge_check_needed = 1; | ||
| 3496 | diff = (rdev->new_data_offset - rdev->data_offset); | 3500 | diff = (rdev->new_data_offset - rdev->data_offset); |
| 3497 | if (!mddev->reshape_backwards) | 3501 | if (!mddev->reshape_backwards) |
| 3498 | diff = -diff; | 3502 | diff = -diff; |
diff --git a/drivers/mtd/ubi/debug.c b/drivers/mtd/ubi/debug.c index 9f957c2d48e9..09d4f8d9d592 100644 --- a/drivers/mtd/ubi/debug.c +++ b/drivers/mtd/ubi/debug.c | |||
| @@ -264,6 +264,9 @@ static struct dentry *dfs_rootdir; | |||
| 264 | */ | 264 | */ |
| 265 | int ubi_debugfs_init(void) | 265 | int ubi_debugfs_init(void) |
| 266 | { | 266 | { |
| 267 | if (!IS_ENABLED(DEBUG_FS)) | ||
| 268 | return 0; | ||
| 269 | |||
| 267 | dfs_rootdir = debugfs_create_dir("ubi", NULL); | 270 | dfs_rootdir = debugfs_create_dir("ubi", NULL); |
| 268 | if (IS_ERR_OR_NULL(dfs_rootdir)) { | 271 | if (IS_ERR_OR_NULL(dfs_rootdir)) { |
| 269 | int err = dfs_rootdir ? -ENODEV : PTR_ERR(dfs_rootdir); | 272 | int err = dfs_rootdir ? -ENODEV : PTR_ERR(dfs_rootdir); |
| @@ -281,7 +284,8 @@ int ubi_debugfs_init(void) | |||
| 281 | */ | 284 | */ |
| 282 | void ubi_debugfs_exit(void) | 285 | void ubi_debugfs_exit(void) |
| 283 | { | 286 | { |
| 284 | debugfs_remove(dfs_rootdir); | 287 | if (IS_ENABLED(DEBUG_FS)) |
| 288 | debugfs_remove(dfs_rootdir); | ||
| 285 | } | 289 | } |
| 286 | 290 | ||
| 287 | /* Read an UBI debugfs file */ | 291 | /* Read an UBI debugfs file */ |
| @@ -403,6 +407,9 @@ int ubi_debugfs_init_dev(struct ubi_device *ubi) | |||
| 403 | struct dentry *dent; | 407 | struct dentry *dent; |
| 404 | struct ubi_debug_info *d = ubi->dbg; | 408 | struct ubi_debug_info *d = ubi->dbg; |
| 405 | 409 | ||
| 410 | if (!IS_ENABLED(DEBUG_FS)) | ||
| 411 | return 0; | ||
| 412 | |||
| 406 | n = snprintf(d->dfs_dir_name, UBI_DFS_DIR_LEN + 1, UBI_DFS_DIR_NAME, | 413 | n = snprintf(d->dfs_dir_name, UBI_DFS_DIR_LEN + 1, UBI_DFS_DIR_NAME, |
| 407 | ubi->ubi_num); | 414 | ubi->ubi_num); |
| 408 | if (n == UBI_DFS_DIR_LEN) { | 415 | if (n == UBI_DFS_DIR_LEN) { |
| @@ -470,5 +477,6 @@ out: | |||
| 470 | */ | 477 | */ |
| 471 | void ubi_debugfs_exit_dev(struct ubi_device *ubi) | 478 | void ubi_debugfs_exit_dev(struct ubi_device *ubi) |
| 472 | { | 479 | { |
| 473 | debugfs_remove_recursive(ubi->dbg->dfs_dir); | 480 | if (IS_ENABLED(DEBUG_FS)) |
| 481 | debugfs_remove_recursive(ubi->dbg->dfs_dir); | ||
| 474 | } | 482 | } |
diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c index 9df100a4ec38..b6be644e7b85 100644 --- a/drivers/mtd/ubi/wl.c +++ b/drivers/mtd/ubi/wl.c | |||
| @@ -1262,11 +1262,11 @@ int ubi_wl_flush(struct ubi_device *ubi, int vol_id, int lnum) | |||
| 1262 | dbg_wl("flush pending work for LEB %d:%d (%d pending works)", | 1262 | dbg_wl("flush pending work for LEB %d:%d (%d pending works)", |
| 1263 | vol_id, lnum, ubi->works_count); | 1263 | vol_id, lnum, ubi->works_count); |
| 1264 | 1264 | ||
| 1265 | down_write(&ubi->work_sem); | ||
| 1266 | while (found) { | 1265 | while (found) { |
| 1267 | struct ubi_work *wrk; | 1266 | struct ubi_work *wrk; |
| 1268 | found = 0; | 1267 | found = 0; |
| 1269 | 1268 | ||
| 1269 | down_read(&ubi->work_sem); | ||
| 1270 | spin_lock(&ubi->wl_lock); | 1270 | spin_lock(&ubi->wl_lock); |
| 1271 | list_for_each_entry(wrk, &ubi->works, list) { | 1271 | list_for_each_entry(wrk, &ubi->works, list) { |
| 1272 | if ((vol_id == UBI_ALL || wrk->vol_id == vol_id) && | 1272 | if ((vol_id == UBI_ALL || wrk->vol_id == vol_id) && |
| @@ -1277,18 +1277,27 @@ int ubi_wl_flush(struct ubi_device *ubi, int vol_id, int lnum) | |||
| 1277 | spin_unlock(&ubi->wl_lock); | 1277 | spin_unlock(&ubi->wl_lock); |
| 1278 | 1278 | ||
| 1279 | err = wrk->func(ubi, wrk, 0); | 1279 | err = wrk->func(ubi, wrk, 0); |
| 1280 | if (err) | 1280 | if (err) { |
| 1281 | goto out; | 1281 | up_read(&ubi->work_sem); |
| 1282 | return err; | ||
| 1283 | } | ||
| 1284 | |||
| 1282 | spin_lock(&ubi->wl_lock); | 1285 | spin_lock(&ubi->wl_lock); |
| 1283 | found = 1; | 1286 | found = 1; |
| 1284 | break; | 1287 | break; |
| 1285 | } | 1288 | } |
| 1286 | } | 1289 | } |
| 1287 | spin_unlock(&ubi->wl_lock); | 1290 | spin_unlock(&ubi->wl_lock); |
| 1291 | up_read(&ubi->work_sem); | ||
| 1288 | } | 1292 | } |
| 1289 | 1293 | ||
| 1290 | out: | 1294 | /* |
| 1295 | * Make sure all the works which have been done in parallel are | ||
| 1296 | * finished. | ||
| 1297 | */ | ||
| 1298 | down_write(&ubi->work_sem); | ||
| 1291 | up_write(&ubi->work_sem); | 1299 | up_write(&ubi->work_sem); |
| 1300 | |||
| 1292 | return err; | 1301 | return err; |
| 1293 | } | 1302 | } |
| 1294 | 1303 | ||
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 2ee8cf9e8a3b..b9c2ae62166d 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c | |||
| @@ -76,6 +76,7 @@ | |||
| 76 | #include <net/route.h> | 76 | #include <net/route.h> |
| 77 | #include <net/net_namespace.h> | 77 | #include <net/net_namespace.h> |
| 78 | #include <net/netns/generic.h> | 78 | #include <net/netns/generic.h> |
| 79 | #include <net/pkt_sched.h> | ||
| 79 | #include "bonding.h" | 80 | #include "bonding.h" |
| 80 | #include "bond_3ad.h" | 81 | #include "bond_3ad.h" |
| 81 | #include "bond_alb.h" | 82 | #include "bond_alb.h" |
| @@ -381,8 +382,6 @@ struct vlan_entry *bond_next_vlan(struct bonding *bond, struct vlan_entry *curr) | |||
| 381 | return next; | 382 | return next; |
| 382 | } | 383 | } |
| 383 | 384 | ||
| 384 | #define bond_queue_mapping(skb) (*(u16 *)((skb)->cb)) | ||
| 385 | |||
| 386 | /** | 385 | /** |
| 387 | * bond_dev_queue_xmit - Prepare skb for xmit. | 386 | * bond_dev_queue_xmit - Prepare skb for xmit. |
| 388 | * | 387 | * |
| @@ -395,7 +394,9 @@ int bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb, | |||
| 395 | { | 394 | { |
| 396 | skb->dev = slave_dev; | 395 | skb->dev = slave_dev; |
| 397 | 396 | ||
| 398 | skb->queue_mapping = bond_queue_mapping(skb); | 397 | BUILD_BUG_ON(sizeof(skb->queue_mapping) != |
| 398 | sizeof(qdisc_skb_cb(skb)->bond_queue_mapping)); | ||
| 399 | skb->queue_mapping = qdisc_skb_cb(skb)->bond_queue_mapping; | ||
| 399 | 400 | ||
| 400 | if (unlikely(netpoll_tx_running(slave_dev))) | 401 | if (unlikely(netpoll_tx_running(slave_dev))) |
| 401 | bond_netpoll_send_skb(bond_get_slave_by_dev(bond, slave_dev), skb); | 402 | bond_netpoll_send_skb(bond_get_slave_by_dev(bond, slave_dev), skb); |
| @@ -4171,7 +4172,7 @@ static u16 bond_select_queue(struct net_device *dev, struct sk_buff *skb) | |||
| 4171 | /* | 4172 | /* |
| 4172 | * Save the original txq to restore before passing to the driver | 4173 | * Save the original txq to restore before passing to the driver |
| 4173 | */ | 4174 | */ |
| 4174 | bond_queue_mapping(skb) = skb->queue_mapping; | 4175 | qdisc_skb_cb(skb)->bond_queue_mapping = skb->queue_mapping; |
| 4175 | 4176 | ||
| 4176 | if (unlikely(txq >= dev->real_num_tx_queues)) { | 4177 | if (unlikely(txq >= dev->real_num_tx_queues)) { |
| 4177 | do { | 4178 | do { |
diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c index aef42f045320..485bedb8278c 100644 --- a/drivers/net/bonding/bond_sysfs.c +++ b/drivers/net/bonding/bond_sysfs.c | |||
| @@ -1082,8 +1082,12 @@ static ssize_t bonding_store_primary(struct device *d, | |||
| 1082 | } | 1082 | } |
| 1083 | } | 1083 | } |
| 1084 | 1084 | ||
| 1085 | pr_info("%s: Unable to set %.*s as primary slave.\n", | 1085 | strncpy(bond->params.primary, ifname, IFNAMSIZ); |
| 1086 | bond->dev->name, (int)strlen(buf) - 1, buf); | 1086 | bond->params.primary[IFNAMSIZ - 1] = 0; |
| 1087 | |||
| 1088 | pr_info("%s: Recording %s as primary, " | ||
| 1089 | "but it has not been enslaved to %s yet.\n", | ||
| 1090 | bond->dev->name, ifname, bond->dev->name); | ||
| 1087 | out: | 1091 | out: |
| 1088 | write_unlock_bh(&bond->curr_slave_lock); | 1092 | write_unlock_bh(&bond->curr_slave_lock); |
| 1089 | read_unlock(&bond->lock); | 1093 | read_unlock(&bond->lock); |
diff --git a/drivers/net/can/c_can/c_can.c b/drivers/net/can/c_can/c_can.c index 536bda072a16..8dc84d66eea1 100644 --- a/drivers/net/can/c_can/c_can.c +++ b/drivers/net/can/c_can/c_can.c | |||
| @@ -686,7 +686,7 @@ static int c_can_get_berr_counter(const struct net_device *dev, | |||
| 686 | * | 686 | * |
| 687 | * We iterate from priv->tx_echo to priv->tx_next and check if the | 687 | * We iterate from priv->tx_echo to priv->tx_next and check if the |
| 688 | * packet has been transmitted, echo it back to the CAN framework. | 688 | * packet has been transmitted, echo it back to the CAN framework. |
| 689 | * If we discover a not yet transmitted package, stop looking for more. | 689 | * If we discover a not yet transmitted packet, stop looking for more. |
| 690 | */ | 690 | */ |
| 691 | static void c_can_do_tx(struct net_device *dev) | 691 | static void c_can_do_tx(struct net_device *dev) |
| 692 | { | 692 | { |
| @@ -698,7 +698,7 @@ static void c_can_do_tx(struct net_device *dev) | |||
| 698 | for (/* nix */; (priv->tx_next - priv->tx_echo) > 0; priv->tx_echo++) { | 698 | for (/* nix */; (priv->tx_next - priv->tx_echo) > 0; priv->tx_echo++) { |
| 699 | msg_obj_no = get_tx_echo_msg_obj(priv); | 699 | msg_obj_no = get_tx_echo_msg_obj(priv); |
| 700 | val = c_can_read_reg32(priv, &priv->regs->txrqst1); | 700 | val = c_can_read_reg32(priv, &priv->regs->txrqst1); |
| 701 | if (!(val & (1 << msg_obj_no))) { | 701 | if (!(val & (1 << (msg_obj_no - 1)))) { |
| 702 | can_get_echo_skb(dev, | 702 | can_get_echo_skb(dev, |
| 703 | msg_obj_no - C_CAN_MSG_OBJ_TX_FIRST); | 703 | msg_obj_no - C_CAN_MSG_OBJ_TX_FIRST); |
| 704 | stats->tx_bytes += priv->read_reg(priv, | 704 | stats->tx_bytes += priv->read_reg(priv, |
| @@ -706,6 +706,8 @@ static void c_can_do_tx(struct net_device *dev) | |||
| 706 | & IF_MCONT_DLC_MASK; | 706 | & IF_MCONT_DLC_MASK; |
| 707 | stats->tx_packets++; | 707 | stats->tx_packets++; |
| 708 | c_can_inval_msg_object(dev, 0, msg_obj_no); | 708 | c_can_inval_msg_object(dev, 0, msg_obj_no); |
| 709 | } else { | ||
| 710 | break; | ||
| 709 | } | 711 | } |
| 710 | } | 712 | } |
| 711 | 713 | ||
| @@ -950,7 +952,7 @@ static int c_can_poll(struct napi_struct *napi, int quota) | |||
| 950 | struct net_device *dev = napi->dev; | 952 | struct net_device *dev = napi->dev; |
| 951 | struct c_can_priv *priv = netdev_priv(dev); | 953 | struct c_can_priv *priv = netdev_priv(dev); |
| 952 | 954 | ||
| 953 | irqstatus = priv->read_reg(priv, &priv->regs->interrupt); | 955 | irqstatus = priv->irqstatus; |
| 954 | if (!irqstatus) | 956 | if (!irqstatus) |
| 955 | goto end; | 957 | goto end; |
| 956 | 958 | ||
| @@ -1028,12 +1030,11 @@ end: | |||
| 1028 | 1030 | ||
| 1029 | static irqreturn_t c_can_isr(int irq, void *dev_id) | 1031 | static irqreturn_t c_can_isr(int irq, void *dev_id) |
| 1030 | { | 1032 | { |
| 1031 | u16 irqstatus; | ||
| 1032 | struct net_device *dev = (struct net_device *)dev_id; | 1033 | struct net_device *dev = (struct net_device *)dev_id; |
| 1033 | struct c_can_priv *priv = netdev_priv(dev); | 1034 | struct c_can_priv *priv = netdev_priv(dev); |
| 1034 | 1035 | ||
| 1035 | irqstatus = priv->read_reg(priv, &priv->regs->interrupt); | 1036 | priv->irqstatus = priv->read_reg(priv, &priv->regs->interrupt); |
| 1036 | if (!irqstatus) | 1037 | if (!priv->irqstatus) |
| 1037 | return IRQ_NONE; | 1038 | return IRQ_NONE; |
| 1038 | 1039 | ||
| 1039 | /* disable all interrupts and schedule the NAPI */ | 1040 | /* disable all interrupts and schedule the NAPI */ |
| @@ -1063,10 +1064,11 @@ static int c_can_open(struct net_device *dev) | |||
| 1063 | goto exit_irq_fail; | 1064 | goto exit_irq_fail; |
| 1064 | } | 1065 | } |
| 1065 | 1066 | ||
| 1067 | napi_enable(&priv->napi); | ||
| 1068 | |||
| 1066 | /* start the c_can controller */ | 1069 | /* start the c_can controller */ |
| 1067 | c_can_start(dev); | 1070 | c_can_start(dev); |
| 1068 | 1071 | ||
| 1069 | napi_enable(&priv->napi); | ||
| 1070 | netif_start_queue(dev); | 1072 | netif_start_queue(dev); |
| 1071 | 1073 | ||
| 1072 | return 0; | 1074 | return 0; |
diff --git a/drivers/net/can/c_can/c_can.h b/drivers/net/can/c_can/c_can.h index 9b7fbef3d09a..5f32d34af507 100644 --- a/drivers/net/can/c_can/c_can.h +++ b/drivers/net/can/c_can/c_can.h | |||
| @@ -76,6 +76,7 @@ struct c_can_priv { | |||
| 76 | unsigned int tx_next; | 76 | unsigned int tx_next; |
| 77 | unsigned int tx_echo; | 77 | unsigned int tx_echo; |
| 78 | void *priv; /* for board-specific data */ | 78 | void *priv; /* for board-specific data */ |
| 79 | u16 irqstatus; | ||
| 79 | }; | 80 | }; |
| 80 | 81 | ||
| 81 | struct net_device *alloc_c_can_dev(void); | 82 | struct net_device *alloc_c_can_dev(void); |
diff --git a/drivers/net/can/cc770/cc770_platform.c b/drivers/net/can/cc770/cc770_platform.c index 53115eee8075..688371cda37a 100644 --- a/drivers/net/can/cc770/cc770_platform.c +++ b/drivers/net/can/cc770/cc770_platform.c | |||
| @@ -154,7 +154,7 @@ static int __devinit cc770_get_platform_data(struct platform_device *pdev, | |||
| 154 | struct cc770_platform_data *pdata = pdev->dev.platform_data; | 154 | struct cc770_platform_data *pdata = pdev->dev.platform_data; |
| 155 | 155 | ||
| 156 | priv->can.clock.freq = pdata->osc_freq; | 156 | priv->can.clock.freq = pdata->osc_freq; |
| 157 | if (priv->cpu_interface | CPUIF_DSC) | 157 | if (priv->cpu_interface & CPUIF_DSC) |
| 158 | priv->can.clock.freq /= 2; | 158 | priv->can.clock.freq /= 2; |
| 159 | priv->clkout = pdata->cor; | 159 | priv->clkout = pdata->cor; |
| 160 | priv->bus_config = pdata->bcr; | 160 | priv->bus_config = pdata->bcr; |
diff --git a/drivers/net/dummy.c b/drivers/net/dummy.c index 442d91a2747b..bab0158f1cc3 100644 --- a/drivers/net/dummy.c +++ b/drivers/net/dummy.c | |||
| @@ -187,8 +187,10 @@ static int __init dummy_init_module(void) | |||
| 187 | rtnl_lock(); | 187 | rtnl_lock(); |
| 188 | err = __rtnl_link_register(&dummy_link_ops); | 188 | err = __rtnl_link_register(&dummy_link_ops); |
| 189 | 189 | ||
| 190 | for (i = 0; i < numdummies && !err; i++) | 190 | for (i = 0; i < numdummies && !err; i++) { |
| 191 | err = dummy_init_one(); | 191 | err = dummy_init_one(); |
| 192 | cond_resched(); | ||
| 193 | } | ||
| 192 | if (err < 0) | 194 | if (err < 0) |
| 193 | __rtnl_link_unregister(&dummy_link_ops); | 195 | __rtnl_link_unregister(&dummy_link_ops); |
| 194 | rtnl_unlock(); | 196 | rtnl_unlock(); |
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h index e30e2a2f354c..7de824184979 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h | |||
| @@ -747,21 +747,6 @@ struct bnx2x_fastpath { | |||
| 747 | 747 | ||
| 748 | #define ETH_RX_ERROR_FALGS ETH_FAST_PATH_RX_CQE_PHY_DECODE_ERR_FLG | 748 | #define ETH_RX_ERROR_FALGS ETH_FAST_PATH_RX_CQE_PHY_DECODE_ERR_FLG |
| 749 | 749 | ||
| 750 | #define BNX2X_IP_CSUM_ERR(cqe) \ | ||
| 751 | (!((cqe)->fast_path_cqe.status_flags & \ | ||
| 752 | ETH_FAST_PATH_RX_CQE_IP_XSUM_NO_VALIDATION_FLG) && \ | ||
| 753 | ((cqe)->fast_path_cqe.type_error_flags & \ | ||
| 754 | ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG)) | ||
| 755 | |||
| 756 | #define BNX2X_L4_CSUM_ERR(cqe) \ | ||
| 757 | (!((cqe)->fast_path_cqe.status_flags & \ | ||
| 758 | ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG) && \ | ||
| 759 | ((cqe)->fast_path_cqe.type_error_flags & \ | ||
| 760 | ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG)) | ||
| 761 | |||
| 762 | #define BNX2X_RX_CSUM_OK(cqe) \ | ||
| 763 | (!(BNX2X_L4_CSUM_ERR(cqe) || BNX2X_IP_CSUM_ERR(cqe))) | ||
| 764 | |||
| 765 | #define BNX2X_PRS_FLAG_OVERETH_IPV4(flags) \ | 750 | #define BNX2X_PRS_FLAG_OVERETH_IPV4(flags) \ |
| 766 | (((le16_to_cpu(flags) & \ | 751 | (((le16_to_cpu(flags) & \ |
| 767 | PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) >> \ | 752 | PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) >> \ |
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c index ad0743bf4bde..cbc56f274e0c 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c | |||
| @@ -617,6 +617,25 @@ static int bnx2x_alloc_rx_data(struct bnx2x *bp, | |||
| 617 | return 0; | 617 | return 0; |
| 618 | } | 618 | } |
| 619 | 619 | ||
| 620 | static void bnx2x_csum_validate(struct sk_buff *skb, union eth_rx_cqe *cqe, | ||
| 621 | struct bnx2x_fastpath *fp) | ||
| 622 | { | ||
| 623 | /* Do nothing if no IP/L4 csum validation was done */ | ||
| 624 | |||
| 625 | if (cqe->fast_path_cqe.status_flags & | ||
| 626 | (ETH_FAST_PATH_RX_CQE_IP_XSUM_NO_VALIDATION_FLG | | ||
| 627 | ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG)) | ||
| 628 | return; | ||
| 629 | |||
| 630 | /* If both IP/L4 validation were done, check if an error was found. */ | ||
| 631 | |||
| 632 | if (cqe->fast_path_cqe.type_error_flags & | ||
| 633 | (ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG | | ||
| 634 | ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG)) | ||
| 635 | fp->eth_q_stats.hw_csum_err++; | ||
| 636 | else | ||
| 637 | skb->ip_summed = CHECKSUM_UNNECESSARY; | ||
| 638 | } | ||
| 620 | 639 | ||
| 621 | int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget) | 640 | int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget) |
| 622 | { | 641 | { |
| @@ -806,13 +825,9 @@ reuse_rx: | |||
| 806 | 825 | ||
| 807 | skb_checksum_none_assert(skb); | 826 | skb_checksum_none_assert(skb); |
| 808 | 827 | ||
| 809 | if (bp->dev->features & NETIF_F_RXCSUM) { | 828 | if (bp->dev->features & NETIF_F_RXCSUM) |
| 829 | bnx2x_csum_validate(skb, cqe, fp); | ||
| 810 | 830 | ||
| 811 | if (likely(BNX2X_RX_CSUM_OK(cqe))) | ||
| 812 | skb->ip_summed = CHECKSUM_UNNECESSARY; | ||
| 813 | else | ||
| 814 | fp->eth_q_stats.hw_csum_err++; | ||
| 815 | } | ||
| 816 | 831 | ||
| 817 | skb_record_rx_queue(skb, fp->rx_queue); | 832 | skb_record_rx_queue(skb, fp->rx_queue); |
| 818 | 833 | ||
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index edeeb516807a..e47ff8be1d7b 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c | |||
| @@ -14275,7 +14275,8 @@ static int __devinit tg3_get_invariants(struct tg3 *tp) | |||
| 14275 | } | 14275 | } |
| 14276 | } | 14276 | } |
| 14277 | 14277 | ||
| 14278 | if (tg3_flag(tp, 5755_PLUS)) | 14278 | if (tg3_flag(tp, 5755_PLUS) || |
| 14279 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) | ||
| 14279 | tg3_flag_set(tp, SHORT_DMA_BUG); | 14280 | tg3_flag_set(tp, SHORT_DMA_BUG); |
| 14280 | 14281 | ||
| 14281 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) | 14282 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) |
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index 08efd308d78a..fdb50cec6b51 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c | |||
| @@ -736,6 +736,8 @@ static netdev_tx_t be_xmit(struct sk_buff *skb, | |||
| 736 | 736 | ||
| 737 | copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb); | 737 | copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb); |
| 738 | if (copied) { | 738 | if (copied) { |
| 739 | int gso_segs = skb_shinfo(skb)->gso_segs; | ||
| 740 | |||
| 739 | /* record the sent skb in the sent_skb table */ | 741 | /* record the sent skb in the sent_skb table */ |
| 740 | BUG_ON(txo->sent_skb_list[start]); | 742 | BUG_ON(txo->sent_skb_list[start]); |
| 741 | txo->sent_skb_list[start] = skb; | 743 | txo->sent_skb_list[start] = skb; |
| @@ -753,8 +755,7 @@ static netdev_tx_t be_xmit(struct sk_buff *skb, | |||
| 753 | 755 | ||
| 754 | be_txq_notify(adapter, txq->id, wrb_cnt); | 756 | be_txq_notify(adapter, txq->id, wrb_cnt); |
| 755 | 757 | ||
| 756 | be_tx_stats_update(txo, wrb_cnt, copied, | 758 | be_tx_stats_update(txo, wrb_cnt, copied, gso_segs, stopped); |
| 757 | skb_shinfo(skb)->gso_segs, stopped); | ||
| 758 | } else { | 759 | } else { |
| 759 | txq->head = start; | 760 | txq->head = start; |
| 760 | dev_kfree_skb_any(skb); | 761 | dev_kfree_skb_any(skb); |
diff --git a/drivers/net/ethernet/intel/e1000e/ethtool.c b/drivers/net/ethernet/intel/e1000e/ethtool.c index d863075df7a4..905e2147d918 100644 --- a/drivers/net/ethernet/intel/e1000e/ethtool.c +++ b/drivers/net/ethernet/intel/e1000e/ethtool.c | |||
| @@ -258,7 +258,8 @@ static int e1000_set_settings(struct net_device *netdev, | |||
| 258 | * When SoL/IDER sessions are active, autoneg/speed/duplex | 258 | * When SoL/IDER sessions are active, autoneg/speed/duplex |
| 259 | * cannot be changed | 259 | * cannot be changed |
| 260 | */ | 260 | */ |
| 261 | if (hw->phy.ops.check_reset_block(hw)) { | 261 | if (hw->phy.ops.check_reset_block && |
| 262 | hw->phy.ops.check_reset_block(hw)) { | ||
| 262 | e_err("Cannot change link characteristics when SoL/IDER is active.\n"); | 263 | e_err("Cannot change link characteristics when SoL/IDER is active.\n"); |
| 263 | return -EINVAL; | 264 | return -EINVAL; |
| 264 | } | 265 | } |
| @@ -1615,7 +1616,8 @@ static int e1000_loopback_test(struct e1000_adapter *adapter, u64 *data) | |||
| 1615 | * PHY loopback cannot be performed if SoL/IDER | 1616 | * PHY loopback cannot be performed if SoL/IDER |
| 1616 | * sessions are active | 1617 | * sessions are active |
| 1617 | */ | 1618 | */ |
| 1618 | if (hw->phy.ops.check_reset_block(hw)) { | 1619 | if (hw->phy.ops.check_reset_block && |
| 1620 | hw->phy.ops.check_reset_block(hw)) { | ||
| 1619 | e_err("Cannot do PHY loopback test when SoL/IDER is active.\n"); | 1621 | e_err("Cannot do PHY loopback test when SoL/IDER is active.\n"); |
| 1620 | *data = 0; | 1622 | *data = 0; |
| 1621 | goto out; | 1623 | goto out; |
diff --git a/drivers/net/ethernet/intel/e1000e/mac.c b/drivers/net/ethernet/intel/e1000e/mac.c index 026e8b3ab52e..a13439928488 100644 --- a/drivers/net/ethernet/intel/e1000e/mac.c +++ b/drivers/net/ethernet/intel/e1000e/mac.c | |||
| @@ -709,7 +709,7 @@ s32 e1000e_setup_link_generic(struct e1000_hw *hw) | |||
| 709 | * In the case of the phy reset being blocked, we already have a link. | 709 | * In the case of the phy reset being blocked, we already have a link. |
| 710 | * We do not need to set it up again. | 710 | * We do not need to set it up again. |
| 711 | */ | 711 | */ |
| 712 | if (hw->phy.ops.check_reset_block(hw)) | 712 | if (hw->phy.ops.check_reset_block && hw->phy.ops.check_reset_block(hw)) |
| 713 | return 0; | 713 | return 0; |
| 714 | 714 | ||
| 715 | /* | 715 | /* |
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index a4b0435b00dc..31d37a2b5ba8 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c | |||
| @@ -6237,7 +6237,7 @@ static int __devinit e1000_probe(struct pci_dev *pdev, | |||
| 6237 | adapter->hw.phy.ms_type = e1000_ms_hw_default; | 6237 | adapter->hw.phy.ms_type = e1000_ms_hw_default; |
| 6238 | } | 6238 | } |
| 6239 | 6239 | ||
| 6240 | if (hw->phy.ops.check_reset_block(hw)) | 6240 | if (hw->phy.ops.check_reset_block && hw->phy.ops.check_reset_block(hw)) |
| 6241 | e_info("PHY reset is blocked due to SOL/IDER session.\n"); | 6241 | e_info("PHY reset is blocked due to SOL/IDER session.\n"); |
| 6242 | 6242 | ||
| 6243 | /* Set initial default active device features */ | 6243 | /* Set initial default active device features */ |
| @@ -6404,7 +6404,7 @@ err_register: | |||
| 6404 | if (!(adapter->flags & FLAG_HAS_AMT)) | 6404 | if (!(adapter->flags & FLAG_HAS_AMT)) |
| 6405 | e1000e_release_hw_control(adapter); | 6405 | e1000e_release_hw_control(adapter); |
| 6406 | err_eeprom: | 6406 | err_eeprom: |
| 6407 | if (!hw->phy.ops.check_reset_block(hw)) | 6407 | if (hw->phy.ops.check_reset_block && !hw->phy.ops.check_reset_block(hw)) |
| 6408 | e1000_phy_hw_reset(&adapter->hw); | 6408 | e1000_phy_hw_reset(&adapter->hw); |
| 6409 | err_hw_init: | 6409 | err_hw_init: |
| 6410 | kfree(adapter->tx_ring); | 6410 | kfree(adapter->tx_ring); |
diff --git a/drivers/net/ethernet/intel/e1000e/phy.c b/drivers/net/ethernet/intel/e1000e/phy.c index 0334d013bc3c..b860d4f7ea2a 100644 --- a/drivers/net/ethernet/intel/e1000e/phy.c +++ b/drivers/net/ethernet/intel/e1000e/phy.c | |||
| @@ -2155,9 +2155,11 @@ s32 e1000e_phy_hw_reset_generic(struct e1000_hw *hw) | |||
| 2155 | s32 ret_val; | 2155 | s32 ret_val; |
| 2156 | u32 ctrl; | 2156 | u32 ctrl; |
| 2157 | 2157 | ||
| 2158 | ret_val = phy->ops.check_reset_block(hw); | 2158 | if (phy->ops.check_reset_block) { |
| 2159 | if (ret_val) | 2159 | ret_val = phy->ops.check_reset_block(hw); |
| 2160 | return 0; | 2160 | if (ret_val) |
| 2161 | return 0; | ||
| 2162 | } | ||
| 2161 | 2163 | ||
| 2162 | ret_val = phy->ops.acquire(hw); | 2164 | ret_val = phy->ops.acquire(hw); |
| 2163 | if (ret_val) | 2165 | if (ret_val) |
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index bf20457ea23a..17ad6a3c1be1 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | |||
| @@ -1390,6 +1390,8 @@ static void ixgbe_process_skb_fields(struct ixgbe_ring *rx_ring, | |||
| 1390 | union ixgbe_adv_rx_desc *rx_desc, | 1390 | union ixgbe_adv_rx_desc *rx_desc, |
| 1391 | struct sk_buff *skb) | 1391 | struct sk_buff *skb) |
| 1392 | { | 1392 | { |
| 1393 | struct net_device *dev = rx_ring->netdev; | ||
| 1394 | |||
| 1393 | ixgbe_update_rsc_stats(rx_ring, skb); | 1395 | ixgbe_update_rsc_stats(rx_ring, skb); |
| 1394 | 1396 | ||
| 1395 | ixgbe_rx_hash(rx_ring, rx_desc, skb); | 1397 | ixgbe_rx_hash(rx_ring, rx_desc, skb); |
| @@ -1401,14 +1403,15 @@ static void ixgbe_process_skb_fields(struct ixgbe_ring *rx_ring, | |||
| 1401 | ixgbe_ptp_rx_hwtstamp(rx_ring->q_vector, skb); | 1403 | ixgbe_ptp_rx_hwtstamp(rx_ring->q_vector, skb); |
| 1402 | #endif | 1404 | #endif |
| 1403 | 1405 | ||
| 1404 | if (ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_VP)) { | 1406 | if ((dev->features & NETIF_F_HW_VLAN_RX) && |
| 1407 | ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_VP)) { | ||
| 1405 | u16 vid = le16_to_cpu(rx_desc->wb.upper.vlan); | 1408 | u16 vid = le16_to_cpu(rx_desc->wb.upper.vlan); |
| 1406 | __vlan_hwaccel_put_tag(skb, vid); | 1409 | __vlan_hwaccel_put_tag(skb, vid); |
| 1407 | } | 1410 | } |
| 1408 | 1411 | ||
| 1409 | skb_record_rx_queue(skb, rx_ring->queue_index); | 1412 | skb_record_rx_queue(skb, rx_ring->queue_index); |
| 1410 | 1413 | ||
| 1411 | skb->protocol = eth_type_trans(skb, rx_ring->netdev); | 1414 | skb->protocol = eth_type_trans(skb, dev); |
| 1412 | } | 1415 | } |
| 1413 | 1416 | ||
| 1414 | static void ixgbe_rx_skb(struct ixgbe_q_vector *q_vector, | 1417 | static void ixgbe_rx_skb(struct ixgbe_q_vector *q_vector, |
| @@ -3607,10 +3610,6 @@ static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter) | |||
| 3607 | if (hw->mac.type == ixgbe_mac_82598EB) | 3610 | if (hw->mac.type == ixgbe_mac_82598EB) |
| 3608 | netif_set_gso_max_size(adapter->netdev, 32768); | 3611 | netif_set_gso_max_size(adapter->netdev, 32768); |
| 3609 | 3612 | ||
| 3610 | |||
| 3611 | /* Enable VLAN tag insert/strip */ | ||
| 3612 | adapter->netdev->features |= NETIF_F_HW_VLAN_RX; | ||
| 3613 | |||
| 3614 | hw->mac.ops.set_vfta(&adapter->hw, 0, 0, true); | 3613 | hw->mac.ops.set_vfta(&adapter->hw, 0, 0, true); |
| 3615 | 3614 | ||
| 3616 | #ifdef IXGBE_FCOE | 3615 | #ifdef IXGBE_FCOE |
| @@ -6701,11 +6700,6 @@ static netdev_features_t ixgbe_fix_features(struct net_device *netdev, | |||
| 6701 | { | 6700 | { |
| 6702 | struct ixgbe_adapter *adapter = netdev_priv(netdev); | 6701 | struct ixgbe_adapter *adapter = netdev_priv(netdev); |
| 6703 | 6702 | ||
| 6704 | #ifdef CONFIG_DCB | ||
| 6705 | if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) | ||
| 6706 | features &= ~NETIF_F_HW_VLAN_RX; | ||
| 6707 | #endif | ||
| 6708 | |||
| 6709 | /* return error if RXHASH is being enabled when RSS is not supported */ | 6703 | /* return error if RXHASH is being enabled when RSS is not supported */ |
| 6710 | if (!(adapter->flags & IXGBE_FLAG_RSS_ENABLED)) | 6704 | if (!(adapter->flags & IXGBE_FLAG_RSS_ENABLED)) |
| 6711 | features &= ~NETIF_F_RXHASH; | 6705 | features &= ~NETIF_F_RXHASH; |
| @@ -6718,7 +6712,6 @@ static netdev_features_t ixgbe_fix_features(struct net_device *netdev, | |||
| 6718 | if (!(adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE)) | 6712 | if (!(adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE)) |
| 6719 | features &= ~NETIF_F_LRO; | 6713 | features &= ~NETIF_F_LRO; |
| 6720 | 6714 | ||
| 6721 | |||
| 6722 | return features; | 6715 | return features; |
| 6723 | } | 6716 | } |
| 6724 | 6717 | ||
| @@ -6766,6 +6759,11 @@ static int ixgbe_set_features(struct net_device *netdev, | |||
| 6766 | need_reset = true; | 6759 | need_reset = true; |
| 6767 | } | 6760 | } |
| 6768 | 6761 | ||
| 6762 | if (features & NETIF_F_HW_VLAN_RX) | ||
| 6763 | ixgbe_vlan_strip_enable(adapter); | ||
| 6764 | else | ||
| 6765 | ixgbe_vlan_strip_disable(adapter); | ||
| 6766 | |||
| 6769 | if (changed & NETIF_F_RXALL) | 6767 | if (changed & NETIF_F_RXALL) |
| 6770 | need_reset = true; | 6768 | need_reset = true; |
| 6771 | 6769 | ||
diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c index 04d901d0ff63..f0f06b2bc28b 100644 --- a/drivers/net/ethernet/marvell/mv643xx_eth.c +++ b/drivers/net/ethernet/marvell/mv643xx_eth.c | |||
| @@ -436,7 +436,9 @@ struct mv643xx_eth_private { | |||
| 436 | /* | 436 | /* |
| 437 | * Hardware-specific parameters. | 437 | * Hardware-specific parameters. |
| 438 | */ | 438 | */ |
| 439 | #if defined(CONFIG_HAVE_CLK) | ||
| 439 | struct clk *clk; | 440 | struct clk *clk; |
| 441 | #endif | ||
| 440 | unsigned int t_clk; | 442 | unsigned int t_clk; |
| 441 | }; | 443 | }; |
| 442 | 444 | ||
| @@ -2895,17 +2897,17 @@ static int mv643xx_eth_probe(struct platform_device *pdev) | |||
| 2895 | mp->dev = dev; | 2897 | mp->dev = dev; |
| 2896 | 2898 | ||
| 2897 | /* | 2899 | /* |
| 2898 | * Get the clk rate, if there is one, otherwise use the default. | 2900 | * Start with a default rate, and if there is a clock, allow |
| 2901 | * it to override the default. | ||
| 2899 | */ | 2902 | */ |
| 2903 | mp->t_clk = 133000000; | ||
| 2904 | #if defined(CONFIG_HAVE_CLK) | ||
| 2900 | mp->clk = clk_get(&pdev->dev, (pdev->id ? "1" : "0")); | 2905 | mp->clk = clk_get(&pdev->dev, (pdev->id ? "1" : "0")); |
| 2901 | if (!IS_ERR(mp->clk)) { | 2906 | if (!IS_ERR(mp->clk)) { |
| 2902 | clk_prepare_enable(mp->clk); | 2907 | clk_prepare_enable(mp->clk); |
| 2903 | mp->t_clk = clk_get_rate(mp->clk); | 2908 | mp->t_clk = clk_get_rate(mp->clk); |
| 2904 | } else { | ||
| 2905 | mp->t_clk = 133000000; | ||
| 2906 | printk(KERN_WARNING "Unable to get clock"); | ||
| 2907 | } | 2909 | } |
| 2908 | 2910 | #endif | |
| 2909 | set_params(mp, pd); | 2911 | set_params(mp, pd); |
| 2910 | netif_set_real_num_tx_queues(dev, mp->txq_count); | 2912 | netif_set_real_num_tx_queues(dev, mp->txq_count); |
| 2911 | netif_set_real_num_rx_queues(dev, mp->rxq_count); | 2913 | netif_set_real_num_rx_queues(dev, mp->rxq_count); |
| @@ -2995,10 +2997,13 @@ static int mv643xx_eth_remove(struct platform_device *pdev) | |||
| 2995 | phy_detach(mp->phy); | 2997 | phy_detach(mp->phy); |
| 2996 | cancel_work_sync(&mp->tx_timeout_task); | 2998 | cancel_work_sync(&mp->tx_timeout_task); |
| 2997 | 2999 | ||
| 3000 | #if defined(CONFIG_HAVE_CLK) | ||
| 2998 | if (!IS_ERR(mp->clk)) { | 3001 | if (!IS_ERR(mp->clk)) { |
| 2999 | clk_disable_unprepare(mp->clk); | 3002 | clk_disable_unprepare(mp->clk); |
| 3000 | clk_put(mp->clk); | 3003 | clk_put(mp->clk); |
| 3001 | } | 3004 | } |
| 3005 | #endif | ||
| 3006 | |||
| 3002 | free_netdev(mp->dev); | 3007 | free_netdev(mp->dev); |
| 3003 | 3008 | ||
| 3004 | platform_set_drvdata(pdev, NULL); | 3009 | platform_set_drvdata(pdev, NULL); |
diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c index cace36f2ab92..28a54451a3e5 100644 --- a/drivers/net/ethernet/marvell/sky2.c +++ b/drivers/net/ethernet/marvell/sky2.c | |||
| @@ -4381,10 +4381,12 @@ static int sky2_set_features(struct net_device *dev, netdev_features_t features) | |||
| 4381 | struct sky2_port *sky2 = netdev_priv(dev); | 4381 | struct sky2_port *sky2 = netdev_priv(dev); |
| 4382 | netdev_features_t changed = dev->features ^ features; | 4382 | netdev_features_t changed = dev->features ^ features; |
| 4383 | 4383 | ||
| 4384 | if (changed & NETIF_F_RXCSUM) { | 4384 | if ((changed & NETIF_F_RXCSUM) && |
| 4385 | bool on = features & NETIF_F_RXCSUM; | 4385 | !(sky2->hw->flags & SKY2_HW_NEW_LE)) { |
| 4386 | sky2_write32(sky2->hw, Q_ADDR(rxqaddr[sky2->port], Q_CSR), | 4386 | sky2_write32(sky2->hw, |
| 4387 | on ? BMU_ENA_RX_CHKSUM : BMU_DIS_RX_CHKSUM); | 4387 | Q_ADDR(rxqaddr[sky2->port], Q_CSR), |
| 4388 | (features & NETIF_F_RXCSUM) | ||
| 4389 | ? BMU_ENA_RX_CHKSUM : BMU_DIS_RX_CHKSUM); | ||
| 4388 | } | 4390 | } |
| 4389 | 4391 | ||
| 4390 | if (changed & NETIF_F_RXHASH) | 4392 | if (changed & NETIF_F_RXHASH) |
diff --git a/drivers/net/ethernet/mellanox/mlx4/port.c b/drivers/net/ethernet/mellanox/mlx4/port.c index 1fe2c7a8b40c..a8fb52992c64 100644 --- a/drivers/net/ethernet/mellanox/mlx4/port.c +++ b/drivers/net/ethernet/mellanox/mlx4/port.c | |||
| @@ -697,10 +697,10 @@ static int mlx4_common_set_port(struct mlx4_dev *dev, int slave, u32 in_mod, | |||
| 697 | if (slave != dev->caps.function) | 697 | if (slave != dev->caps.function) |
| 698 | memset(inbox->buf, 0, 256); | 698 | memset(inbox->buf, 0, 256); |
| 699 | if (dev->flags & MLX4_FLAG_OLD_PORT_CMDS) { | 699 | if (dev->flags & MLX4_FLAG_OLD_PORT_CMDS) { |
| 700 | *(u8 *) inbox->buf = !!reset_qkey_viols << 6; | 700 | *(u8 *) inbox->buf |= !!reset_qkey_viols << 6; |
| 701 | ((__be32 *) inbox->buf)[2] = agg_cap_mask; | 701 | ((__be32 *) inbox->buf)[2] = agg_cap_mask; |
| 702 | } else { | 702 | } else { |
| 703 | ((u8 *) inbox->buf)[3] = !!reset_qkey_viols; | 703 | ((u8 *) inbox->buf)[3] |= !!reset_qkey_viols; |
| 704 | ((__be32 *) inbox->buf)[1] = agg_cap_mask; | 704 | ((__be32 *) inbox->buf)[1] = agg_cap_mask; |
| 705 | } | 705 | } |
| 706 | 706 | ||
diff --git a/drivers/net/ethernet/nxp/lpc_eth.c b/drivers/net/ethernet/nxp/lpc_eth.c index 8d2666fcffd7..083d6715335c 100644 --- a/drivers/net/ethernet/nxp/lpc_eth.c +++ b/drivers/net/ethernet/nxp/lpc_eth.c | |||
| @@ -946,16 +946,16 @@ static void __lpc_handle_xmit(struct net_device *ndev) | |||
| 946 | /* Update stats */ | 946 | /* Update stats */ |
| 947 | ndev->stats.tx_packets++; | 947 | ndev->stats.tx_packets++; |
| 948 | ndev->stats.tx_bytes += skb->len; | 948 | ndev->stats.tx_bytes += skb->len; |
| 949 | |||
| 950 | /* Free buffer */ | ||
| 951 | dev_kfree_skb_irq(skb); | ||
| 952 | } | 949 | } |
| 950 | dev_kfree_skb_irq(skb); | ||
| 953 | 951 | ||
| 954 | txcidx = readl(LPC_ENET_TXCONSUMEINDEX(pldat->net_base)); | 952 | txcidx = readl(LPC_ENET_TXCONSUMEINDEX(pldat->net_base)); |
| 955 | } | 953 | } |
| 956 | 954 | ||
| 957 | if (netif_queue_stopped(ndev)) | 955 | if (pldat->num_used_tx_buffs <= ENET_TX_DESC/2) { |
| 958 | netif_wake_queue(ndev); | 956 | if (netif_queue_stopped(ndev)) |
| 957 | netif_wake_queue(ndev); | ||
| 958 | } | ||
| 959 | } | 959 | } |
| 960 | 960 | ||
| 961 | static int __lpc_handle_recv(struct net_device *ndev, int budget) | 961 | static int __lpc_handle_recv(struct net_device *ndev, int budget) |
| @@ -1320,6 +1320,7 @@ static const struct net_device_ops lpc_netdev_ops = { | |||
| 1320 | .ndo_set_rx_mode = lpc_eth_set_multicast_list, | 1320 | .ndo_set_rx_mode = lpc_eth_set_multicast_list, |
| 1321 | .ndo_do_ioctl = lpc_eth_ioctl, | 1321 | .ndo_do_ioctl = lpc_eth_ioctl, |
| 1322 | .ndo_set_mac_address = lpc_set_mac_address, | 1322 | .ndo_set_mac_address = lpc_set_mac_address, |
| 1323 | .ndo_change_mtu = eth_change_mtu, | ||
| 1323 | }; | 1324 | }; |
| 1324 | 1325 | ||
| 1325 | static int lpc_eth_drv_probe(struct platform_device *pdev) | 1326 | static int lpc_eth_drv_probe(struct platform_device *pdev) |
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c index 9757ce3543a0..7260aa79466a 100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c | |||
| @@ -5889,11 +5889,7 @@ static void rtl_slow_event_work(struct rtl8169_private *tp) | |||
| 5889 | if (status & LinkChg) | 5889 | if (status & LinkChg) |
| 5890 | __rtl8169_check_link_status(dev, tp, tp->mmio_addr, true); | 5890 | __rtl8169_check_link_status(dev, tp, tp->mmio_addr, true); |
| 5891 | 5891 | ||
| 5892 | napi_disable(&tp->napi); | 5892 | rtl_irq_enable_all(tp); |
| 5893 | rtl_irq_disable(tp); | ||
| 5894 | |||
| 5895 | napi_enable(&tp->napi); | ||
| 5896 | napi_schedule(&tp->napi); | ||
| 5897 | } | 5893 | } |
| 5898 | 5894 | ||
| 5899 | static void rtl_task(struct work_struct *work) | 5895 | static void rtl_task(struct work_struct *work) |
diff --git a/drivers/net/ethernet/stmicro/stmmac/Kconfig b/drivers/net/ethernet/stmicro/stmmac/Kconfig index 036428348faa..9f448279e12a 100644 --- a/drivers/net/ethernet/stmicro/stmmac/Kconfig +++ b/drivers/net/ethernet/stmicro/stmmac/Kconfig | |||
| @@ -13,7 +13,7 @@ config STMMAC_ETH | |||
| 13 | if STMMAC_ETH | 13 | if STMMAC_ETH |
| 14 | 14 | ||
| 15 | config STMMAC_PLATFORM | 15 | config STMMAC_PLATFORM |
| 16 | tristate "STMMAC platform bus support" | 16 | bool "STMMAC Platform bus support" |
| 17 | depends on STMMAC_ETH | 17 | depends on STMMAC_ETH |
| 18 | default y | 18 | default y |
| 19 | ---help--- | 19 | ---help--- |
| @@ -26,7 +26,7 @@ config STMMAC_PLATFORM | |||
| 26 | If unsure, say N. | 26 | If unsure, say N. |
| 27 | 27 | ||
| 28 | config STMMAC_PCI | 28 | config STMMAC_PCI |
| 29 | tristate "STMMAC support on PCI bus (EXPERIMENTAL)" | 29 | bool "STMMAC PCI bus support (EXPERIMENTAL)" |
| 30 | depends on STMMAC_ETH && PCI && EXPERIMENTAL | 30 | depends on STMMAC_ETH && PCI && EXPERIMENTAL |
| 31 | ---help--- | 31 | ---help--- |
| 32 | This is to select the Synopsys DWMAC available on PCI devices, | 32 | This is to select the Synopsys DWMAC available on PCI devices, |
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h index 6b5d060ee9de..dc20c56efc9d 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h | |||
| @@ -26,6 +26,7 @@ | |||
| 26 | #include <linux/clk.h> | 26 | #include <linux/clk.h> |
| 27 | #include <linux/stmmac.h> | 27 | #include <linux/stmmac.h> |
| 28 | #include <linux/phy.h> | 28 | #include <linux/phy.h> |
| 29 | #include <linux/pci.h> | ||
| 29 | #include "common.h" | 30 | #include "common.h" |
| 30 | #ifdef CONFIG_STMMAC_TIMER | 31 | #ifdef CONFIG_STMMAC_TIMER |
| 31 | #include "stmmac_timer.h" | 32 | #include "stmmac_timer.h" |
| @@ -95,7 +96,6 @@ extern int stmmac_mdio_register(struct net_device *ndev); | |||
| 95 | extern void stmmac_set_ethtool_ops(struct net_device *netdev); | 96 | extern void stmmac_set_ethtool_ops(struct net_device *netdev); |
| 96 | extern const struct stmmac_desc_ops enh_desc_ops; | 97 | extern const struct stmmac_desc_ops enh_desc_ops; |
| 97 | extern const struct stmmac_desc_ops ndesc_ops; | 98 | extern const struct stmmac_desc_ops ndesc_ops; |
| 98 | |||
| 99 | int stmmac_freeze(struct net_device *ndev); | 99 | int stmmac_freeze(struct net_device *ndev); |
| 100 | int stmmac_restore(struct net_device *ndev); | 100 | int stmmac_restore(struct net_device *ndev); |
| 101 | int stmmac_resume(struct net_device *ndev); | 101 | int stmmac_resume(struct net_device *ndev); |
| @@ -109,7 +109,7 @@ struct stmmac_priv *stmmac_dvr_probe(struct device *device, | |||
| 109 | static inline int stmmac_clk_enable(struct stmmac_priv *priv) | 109 | static inline int stmmac_clk_enable(struct stmmac_priv *priv) |
| 110 | { | 110 | { |
| 111 | if (!IS_ERR(priv->stmmac_clk)) | 111 | if (!IS_ERR(priv->stmmac_clk)) |
| 112 | return clk_enable(priv->stmmac_clk); | 112 | return clk_prepare_enable(priv->stmmac_clk); |
| 113 | 113 | ||
| 114 | return 0; | 114 | return 0; |
| 115 | } | 115 | } |
| @@ -119,7 +119,7 @@ static inline void stmmac_clk_disable(struct stmmac_priv *priv) | |||
| 119 | if (IS_ERR(priv->stmmac_clk)) | 119 | if (IS_ERR(priv->stmmac_clk)) |
| 120 | return; | 120 | return; |
| 121 | 121 | ||
| 122 | clk_disable(priv->stmmac_clk); | 122 | clk_disable_unprepare(priv->stmmac_clk); |
| 123 | } | 123 | } |
| 124 | static inline int stmmac_clk_get(struct stmmac_priv *priv) | 124 | static inline int stmmac_clk_get(struct stmmac_priv *priv) |
| 125 | { | 125 | { |
| @@ -143,3 +143,60 @@ static inline int stmmac_clk_get(struct stmmac_priv *priv) | |||
| 143 | return 0; | 143 | return 0; |
| 144 | } | 144 | } |
| 145 | #endif /* CONFIG_HAVE_CLK */ | 145 | #endif /* CONFIG_HAVE_CLK */ |
| 146 | |||
| 147 | |||
| 148 | #ifdef CONFIG_STMMAC_PLATFORM | ||
| 149 | extern struct platform_driver stmmac_pltfr_driver; | ||
| 150 | static inline int stmmac_register_platform(void) | ||
| 151 | { | ||
| 152 | int err; | ||
| 153 | |||
| 154 | err = platform_driver_register(&stmmac_pltfr_driver); | ||
| 155 | if (err) | ||
| 156 | pr_err("stmmac: failed to register the platform driver\n"); | ||
| 157 | |||
| 158 | return err; | ||
| 159 | } | ||
| 160 | static inline void stmmac_unregister_platform(void) | ||
| 161 | { | ||
| 162 | platform_driver_register(&stmmac_pltfr_driver); | ||
| 163 | } | ||
| 164 | #else | ||
| 165 | static inline int stmmac_register_platform(void) | ||
| 166 | { | ||
| 167 | pr_debug("stmmac: do not register the platf driver\n"); | ||
| 168 | |||
| 169 | return -EINVAL; | ||
| 170 | } | ||
| 171 | static inline void stmmac_unregister_platform(void) | ||
| 172 | { | ||
| 173 | } | ||
| 174 | #endif /* CONFIG_STMMAC_PLATFORM */ | ||
| 175 | |||
| 176 | #ifdef CONFIG_STMMAC_PCI | ||
| 177 | extern struct pci_driver stmmac_pci_driver; | ||
| 178 | static inline int stmmac_register_pci(void) | ||
| 179 | { | ||
| 180 | int err; | ||
| 181 | |||
| 182 | err = pci_register_driver(&stmmac_pci_driver); | ||
| 183 | if (err) | ||
| 184 | pr_err("stmmac: failed to register the PCI driver\n"); | ||
| 185 | |||
| 186 | return err; | ||
| 187 | } | ||
| 188 | static inline void stmmac_unregister_pci(void) | ||
| 189 | { | ||
| 190 | pci_unregister_driver(&stmmac_pci_driver); | ||
| 191 | } | ||
| 192 | #else | ||
| 193 | static inline int stmmac_register_pci(void) | ||
| 194 | { | ||
| 195 | pr_debug("stmmac: do not register the PCI driver\n"); | ||
| 196 | |||
| 197 | return -EINVAL; | ||
| 198 | } | ||
| 199 | static inline void stmmac_unregister_pci(void) | ||
| 200 | { | ||
| 201 | } | ||
| 202 | #endif /* CONFIG_STMMAC_PCI */ | ||
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 70966330f44e..51b3b68528ee 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c | |||
| @@ -833,8 +833,9 @@ static u32 stmmac_get_synopsys_id(struct stmmac_priv *priv) | |||
| 833 | 833 | ||
| 834 | /** | 834 | /** |
| 835 | * stmmac_selec_desc_mode | 835 | * stmmac_selec_desc_mode |
| 836 | * @dev : device pointer | 836 | * @priv : private structure |
| 837 | * Description: select the Enhanced/Alternate or Normal descriptors */ | 837 | * Description: select the Enhanced/Alternate or Normal descriptors |
| 838 | */ | ||
| 838 | static void stmmac_selec_desc_mode(struct stmmac_priv *priv) | 839 | static void stmmac_selec_desc_mode(struct stmmac_priv *priv) |
| 839 | { | 840 | { |
| 840 | if (priv->plat->enh_desc) { | 841 | if (priv->plat->enh_desc) { |
| @@ -1861,6 +1862,8 @@ static int stmmac_hw_init(struct stmmac_priv *priv) | |||
| 1861 | /** | 1862 | /** |
| 1862 | * stmmac_dvr_probe | 1863 | * stmmac_dvr_probe |
| 1863 | * @device: device pointer | 1864 | * @device: device pointer |
| 1865 | * @plat_dat: platform data pointer | ||
| 1866 | * @addr: iobase memory address | ||
| 1864 | * Description: this is the main probe function used to | 1867 | * Description: this is the main probe function used to |
| 1865 | * call the alloc_etherdev, allocate the priv structure. | 1868 | * call the alloc_etherdev, allocate the priv structure. |
| 1866 | */ | 1869 | */ |
| @@ -2090,6 +2093,34 @@ int stmmac_restore(struct net_device *ndev) | |||
| 2090 | } | 2093 | } |
| 2091 | #endif /* CONFIG_PM */ | 2094 | #endif /* CONFIG_PM */ |
| 2092 | 2095 | ||
| 2096 | /* Driver can be configured w/ and w/ both PCI and Platf drivers | ||
| 2097 | * depending on the configuration selected. | ||
| 2098 | */ | ||
| 2099 | static int __init stmmac_init(void) | ||
| 2100 | { | ||
| 2101 | int err_plt = 0; | ||
| 2102 | int err_pci = 0; | ||
| 2103 | |||
| 2104 | err_plt = stmmac_register_platform(); | ||
| 2105 | err_pci = stmmac_register_pci(); | ||
| 2106 | |||
| 2107 | if ((err_pci) && (err_plt)) { | ||
| 2108 | pr_err("stmmac: driver registration failed\n"); | ||
| 2109 | return -EINVAL; | ||
| 2110 | } | ||
| 2111 | |||
| 2112 | return 0; | ||
| 2113 | } | ||
| 2114 | |||
| 2115 | static void __exit stmmac_exit(void) | ||
| 2116 | { | ||
| 2117 | stmmac_unregister_platform(); | ||
| 2118 | stmmac_unregister_pci(); | ||
| 2119 | } | ||
| 2120 | |||
| 2121 | module_init(stmmac_init); | ||
| 2122 | module_exit(stmmac_exit); | ||
| 2123 | |||
| 2093 | #ifndef MODULE | 2124 | #ifndef MODULE |
| 2094 | static int __init stmmac_cmdline_opt(char *str) | 2125 | static int __init stmmac_cmdline_opt(char *str) |
| 2095 | { | 2126 | { |
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c index 58fab5303e9c..cf826e6b6aa1 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c | |||
| @@ -179,7 +179,7 @@ static DEFINE_PCI_DEVICE_TABLE(stmmac_id_table) = { | |||
| 179 | 179 | ||
| 180 | MODULE_DEVICE_TABLE(pci, stmmac_id_table); | 180 | MODULE_DEVICE_TABLE(pci, stmmac_id_table); |
| 181 | 181 | ||
| 182 | static struct pci_driver stmmac_driver = { | 182 | struct pci_driver stmmac_pci_driver = { |
| 183 | .name = STMMAC_RESOURCE_NAME, | 183 | .name = STMMAC_RESOURCE_NAME, |
| 184 | .id_table = stmmac_id_table, | 184 | .id_table = stmmac_id_table, |
| 185 | .probe = stmmac_pci_probe, | 185 | .probe = stmmac_pci_probe, |
| @@ -190,33 +190,6 @@ static struct pci_driver stmmac_driver = { | |||
| 190 | #endif | 190 | #endif |
| 191 | }; | 191 | }; |
| 192 | 192 | ||
| 193 | /** | ||
| 194 | * stmmac_init_module - Entry point for the driver | ||
| 195 | * Description: This function is the entry point for the driver. | ||
| 196 | */ | ||
| 197 | static int __init stmmac_init_module(void) | ||
| 198 | { | ||
| 199 | int ret; | ||
| 200 | |||
| 201 | ret = pci_register_driver(&stmmac_driver); | ||
| 202 | if (ret < 0) | ||
| 203 | pr_err("%s: ERROR: driver registration failed\n", __func__); | ||
| 204 | |||
| 205 | return ret; | ||
| 206 | } | ||
| 207 | |||
| 208 | /** | ||
| 209 | * stmmac_cleanup_module - Cleanup routine for the driver | ||
| 210 | * Description: This function is the cleanup routine for the driver. | ||
| 211 | */ | ||
| 212 | static void __exit stmmac_cleanup_module(void) | ||
| 213 | { | ||
| 214 | pci_unregister_driver(&stmmac_driver); | ||
| 215 | } | ||
| 216 | |||
| 217 | module_init(stmmac_init_module); | ||
| 218 | module_exit(stmmac_cleanup_module); | ||
| 219 | |||
| 220 | MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet PCI driver"); | 193 | MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet PCI driver"); |
| 221 | MODULE_AUTHOR("Rayagond Kokatanur <rayagond.kokatanur@vayavyalabs.com>"); | 194 | MODULE_AUTHOR("Rayagond Kokatanur <rayagond.kokatanur@vayavyalabs.com>"); |
| 222 | MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>"); | 195 | MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>"); |
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c index 3dd8f0803808..680d2b8dfe27 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c | |||
| @@ -255,7 +255,7 @@ static const struct of_device_id stmmac_dt_ids[] = { | |||
| 255 | }; | 255 | }; |
| 256 | MODULE_DEVICE_TABLE(of, stmmac_dt_ids); | 256 | MODULE_DEVICE_TABLE(of, stmmac_dt_ids); |
| 257 | 257 | ||
| 258 | static struct platform_driver stmmac_driver = { | 258 | struct platform_driver stmmac_pltfr_driver = { |
| 259 | .probe = stmmac_pltfr_probe, | 259 | .probe = stmmac_pltfr_probe, |
| 260 | .remove = stmmac_pltfr_remove, | 260 | .remove = stmmac_pltfr_remove, |
| 261 | .driver = { | 261 | .driver = { |
| @@ -266,8 +266,6 @@ static struct platform_driver stmmac_driver = { | |||
| 266 | }, | 266 | }, |
| 267 | }; | 267 | }; |
| 268 | 268 | ||
| 269 | module_platform_driver(stmmac_driver); | ||
| 270 | |||
| 271 | MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet PLATFORM driver"); | 269 | MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet PLATFORM driver"); |
| 272 | MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>"); | 270 | MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>"); |
| 273 | MODULE_LICENSE("GPL"); | 271 | MODULE_LICENSE("GPL"); |
diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c index 703c8cce2a2c..8c726b7004d3 100644 --- a/drivers/net/ethernet/sun/niu.c +++ b/drivers/net/ethernet/sun/niu.c | |||
| @@ -3598,7 +3598,6 @@ static int release_tx_packet(struct niu *np, struct tx_ring_info *rp, int idx) | |||
| 3598 | static void niu_tx_work(struct niu *np, struct tx_ring_info *rp) | 3598 | static void niu_tx_work(struct niu *np, struct tx_ring_info *rp) |
| 3599 | { | 3599 | { |
| 3600 | struct netdev_queue *txq; | 3600 | struct netdev_queue *txq; |
| 3601 | unsigned int tx_bytes; | ||
| 3602 | u16 pkt_cnt, tmp; | 3601 | u16 pkt_cnt, tmp; |
| 3603 | int cons, index; | 3602 | int cons, index; |
| 3604 | u64 cs; | 3603 | u64 cs; |
| @@ -3621,18 +3620,12 @@ static void niu_tx_work(struct niu *np, struct tx_ring_info *rp) | |||
| 3621 | netif_printk(np, tx_done, KERN_DEBUG, np->dev, | 3620 | netif_printk(np, tx_done, KERN_DEBUG, np->dev, |
| 3622 | "%s() pkt_cnt[%u] cons[%d]\n", __func__, pkt_cnt, cons); | 3621 | "%s() pkt_cnt[%u] cons[%d]\n", __func__, pkt_cnt, cons); |
| 3623 | 3622 | ||
| 3624 | tx_bytes = 0; | 3623 | while (pkt_cnt--) |
| 3625 | tmp = pkt_cnt; | ||
| 3626 | while (tmp--) { | ||
| 3627 | tx_bytes += rp->tx_buffs[cons].skb->len; | ||
| 3628 | cons = release_tx_packet(np, rp, cons); | 3624 | cons = release_tx_packet(np, rp, cons); |
| 3629 | } | ||
| 3630 | 3625 | ||
| 3631 | rp->cons = cons; | 3626 | rp->cons = cons; |
| 3632 | smp_mb(); | 3627 | smp_mb(); |
| 3633 | 3628 | ||
| 3634 | netdev_tx_completed_queue(txq, pkt_cnt, tx_bytes); | ||
| 3635 | |||
| 3636 | out: | 3629 | out: |
| 3637 | if (unlikely(netif_tx_queue_stopped(txq) && | 3630 | if (unlikely(netif_tx_queue_stopped(txq) && |
| 3638 | (niu_tx_avail(rp) > NIU_TX_WAKEUP_THRESH(rp)))) { | 3631 | (niu_tx_avail(rp) > NIU_TX_WAKEUP_THRESH(rp)))) { |
| @@ -4333,7 +4326,6 @@ static void niu_free_channels(struct niu *np) | |||
| 4333 | struct tx_ring_info *rp = &np->tx_rings[i]; | 4326 | struct tx_ring_info *rp = &np->tx_rings[i]; |
| 4334 | 4327 | ||
| 4335 | niu_free_tx_ring_info(np, rp); | 4328 | niu_free_tx_ring_info(np, rp); |
| 4336 | netdev_tx_reset_queue(netdev_get_tx_queue(np->dev, i)); | ||
| 4337 | } | 4329 | } |
| 4338 | kfree(np->tx_rings); | 4330 | kfree(np->tx_rings); |
| 4339 | np->tx_rings = NULL; | 4331 | np->tx_rings = NULL; |
| @@ -6739,8 +6731,6 @@ static netdev_tx_t niu_start_xmit(struct sk_buff *skb, | |||
| 6739 | prod = NEXT_TX(rp, prod); | 6731 | prod = NEXT_TX(rp, prod); |
| 6740 | } | 6732 | } |
| 6741 | 6733 | ||
| 6742 | netdev_tx_sent_queue(txq, skb->len); | ||
| 6743 | |||
| 6744 | if (prod < rp->prod) | 6734 | if (prod < rp->prod) |
| 6745 | rp->wrap_bit ^= TX_RING_KICK_WRAP; | 6735 | rp->wrap_bit ^= TX_RING_KICK_WRAP; |
| 6746 | rp->prod = prod; | 6736 | rp->prod = prod; |
diff --git a/drivers/net/ethernet/tile/Kconfig b/drivers/net/ethernet/tile/Kconfig index 2d9218f86bca..098b1c42b393 100644 --- a/drivers/net/ethernet/tile/Kconfig +++ b/drivers/net/ethernet/tile/Kconfig | |||
| @@ -7,6 +7,8 @@ config TILE_NET | |||
| 7 | depends on TILE | 7 | depends on TILE |
| 8 | default y | 8 | default y |
| 9 | select CRC32 | 9 | select CRC32 |
| 10 | select TILE_GXIO_MPIPE if TILEGX | ||
| 11 | select HIGH_RES_TIMERS if TILEGX | ||
| 10 | ---help--- | 12 | ---help--- |
| 11 | This is a standard Linux network device driver for the | 13 | This is a standard Linux network device driver for the |
| 12 | on-chip Tilera Gigabit Ethernet and XAUI interfaces. | 14 | on-chip Tilera Gigabit Ethernet and XAUI interfaces. |
diff --git a/drivers/net/ethernet/tile/Makefile b/drivers/net/ethernet/tile/Makefile index f634f142cab4..0ef9eefd3211 100644 --- a/drivers/net/ethernet/tile/Makefile +++ b/drivers/net/ethernet/tile/Makefile | |||
| @@ -4,7 +4,7 @@ | |||
| 4 | 4 | ||
| 5 | obj-$(CONFIG_TILE_NET) += tile_net.o | 5 | obj-$(CONFIG_TILE_NET) += tile_net.o |
| 6 | ifdef CONFIG_TILEGX | 6 | ifdef CONFIG_TILEGX |
| 7 | tile_net-objs := tilegx.o mpipe.o iorpc_mpipe.o dma_queue.o | 7 | tile_net-y := tilegx.o |
| 8 | else | 8 | else |
| 9 | tile_net-objs := tilepro.o | 9 | tile_net-y := tilepro.o |
| 10 | endif | 10 | endif |
diff --git a/drivers/net/ethernet/tile/tilegx.c b/drivers/net/ethernet/tile/tilegx.c new file mode 100644 index 000000000000..83b4b388ad49 --- /dev/null +++ b/drivers/net/ethernet/tile/tilegx.c | |||
| @@ -0,0 +1,1898 @@ | |||
| 1 | /* | ||
| 2 | * Copyright 2012 Tilera Corporation. All Rights Reserved. | ||
| 3 | * | ||
| 4 | * This program is free software; you can redistribute it and/or | ||
| 5 | * modify it under the terms of the GNU General Public License | ||
| 6 | * as published by the Free Software Foundation, version 2. | ||
| 7 | * | ||
| 8 | * This program is distributed in the hope that it will be useful, but | ||
| 9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
| 11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
| 12 | * more details. | ||
| 13 | */ | ||
| 14 | |||
| 15 | #include <linux/module.h> | ||
| 16 | #include <linux/init.h> | ||
| 17 | #include <linux/moduleparam.h> | ||
| 18 | #include <linux/sched.h> | ||
| 19 | #include <linux/kernel.h> /* printk() */ | ||
| 20 | #include <linux/slab.h> /* kmalloc() */ | ||
| 21 | #include <linux/errno.h> /* error codes */ | ||
| 22 | #include <linux/types.h> /* size_t */ | ||
| 23 | #include <linux/interrupt.h> | ||
| 24 | #include <linux/in.h> | ||
| 25 | #include <linux/irq.h> | ||
| 26 | #include <linux/netdevice.h> /* struct device, and other headers */ | ||
| 27 | #include <linux/etherdevice.h> /* eth_type_trans */ | ||
| 28 | #include <linux/skbuff.h> | ||
| 29 | #include <linux/ioctl.h> | ||
| 30 | #include <linux/cdev.h> | ||
| 31 | #include <linux/hugetlb.h> | ||
| 32 | #include <linux/in6.h> | ||
| 33 | #include <linux/timer.h> | ||
| 34 | #include <linux/hrtimer.h> | ||
| 35 | #include <linux/ktime.h> | ||
| 36 | #include <linux/io.h> | ||
| 37 | #include <linux/ctype.h> | ||
| 38 | #include <linux/ip.h> | ||
| 39 | #include <linux/tcp.h> | ||
| 40 | |||
| 41 | #include <asm/checksum.h> | ||
| 42 | #include <asm/homecache.h> | ||
| 43 | #include <gxio/mpipe.h> | ||
| 44 | #include <arch/sim.h> | ||
| 45 | |||
| 46 | /* Default transmit lockup timeout period, in jiffies. */ | ||
| 47 | #define TILE_NET_TIMEOUT (5 * HZ) | ||
| 48 | |||
| 49 | /* The maximum number of distinct channels (idesc.channel is 5 bits). */ | ||
| 50 | #define TILE_NET_CHANNELS 32 | ||
| 51 | |||
| 52 | /* Maximum number of idescs to handle per "poll". */ | ||
| 53 | #define TILE_NET_BATCH 128 | ||
| 54 | |||
| 55 | /* Maximum number of packets to handle per "poll". */ | ||
| 56 | #define TILE_NET_WEIGHT 64 | ||
| 57 | |||
| 58 | /* Number of entries in each iqueue. */ | ||
| 59 | #define IQUEUE_ENTRIES 512 | ||
| 60 | |||
| 61 | /* Number of entries in each equeue. */ | ||
| 62 | #define EQUEUE_ENTRIES 2048 | ||
| 63 | |||
| 64 | /* Total header bytes per equeue slot. Must be big enough for 2 bytes | ||
| 65 | * of NET_IP_ALIGN alignment, plus 14 bytes (?) of L2 header, plus up to | ||
| 66 | * 60 bytes of actual TCP header. We round up to align to cache lines. | ||
| 67 | */ | ||
| 68 | #define HEADER_BYTES 128 | ||
| 69 | |||
| 70 | /* Maximum completions per cpu per device (must be a power of two). | ||
| 71 | * ISSUE: What is the right number here? If this is too small, then | ||
| 72 | * egress might block waiting for free space in a completions array. | ||
| 73 | * ISSUE: At the least, allocate these only for initialized echannels. | ||
| 74 | */ | ||
| 75 | #define TILE_NET_MAX_COMPS 64 | ||
| 76 | |||
| 77 | #define MAX_FRAGS (MAX_SKB_FRAGS + 1) | ||
| 78 | |||
| 79 | /* Size of completions data to allocate. | ||
| 80 | * ISSUE: Probably more than needed since we don't use all the channels. | ||
| 81 | */ | ||
| 82 | #define COMPS_SIZE (TILE_NET_CHANNELS * sizeof(struct tile_net_comps)) | ||
| 83 | |||
| 84 | /* Size of NotifRing data to allocate. */ | ||
| 85 | #define NOTIF_RING_SIZE (IQUEUE_ENTRIES * sizeof(gxio_mpipe_idesc_t)) | ||
| 86 | |||
| 87 | /* Timeout to wake the per-device TX timer after we stop the queue. | ||
| 88 | * We don't want the timeout too short (adds overhead, and might end | ||
| 89 | * up causing stop/wake/stop/wake cycles) or too long (affects performance). | ||
| 90 | * For the 10 Gb NIC, 30 usec means roughly 30+ 1500-byte packets. | ||
| 91 | */ | ||
| 92 | #define TX_TIMER_DELAY_USEC 30 | ||
| 93 | |||
| 94 | /* Timeout to wake the per-cpu egress timer to free completions. */ | ||
| 95 | #define EGRESS_TIMER_DELAY_USEC 1000 | ||
| 96 | |||
| 97 | MODULE_AUTHOR("Tilera Corporation"); | ||
| 98 | MODULE_LICENSE("GPL"); | ||
| 99 | |||
| 100 | /* A "packet fragment" (a chunk of memory). */ | ||
| 101 | struct frag { | ||
| 102 | void *buf; | ||
| 103 | size_t length; | ||
| 104 | }; | ||
| 105 | |||
| 106 | /* A single completion. */ | ||
| 107 | struct tile_net_comp { | ||
| 108 | /* The "complete_count" when the completion will be complete. */ | ||
| 109 | s64 when; | ||
| 110 | /* The buffer to be freed when the completion is complete. */ | ||
| 111 | struct sk_buff *skb; | ||
| 112 | }; | ||
| 113 | |||
| 114 | /* The completions for a given cpu and echannel. */ | ||
| 115 | struct tile_net_comps { | ||
| 116 | /* The completions. */ | ||
| 117 | struct tile_net_comp comp_queue[TILE_NET_MAX_COMPS]; | ||
| 118 | /* The number of completions used. */ | ||
| 119 | unsigned long comp_next; | ||
| 120 | /* The number of completions freed. */ | ||
| 121 | unsigned long comp_last; | ||
| 122 | }; | ||
| 123 | |||
| 124 | /* The transmit wake timer for a given cpu and echannel. */ | ||
| 125 | struct tile_net_tx_wake { | ||
| 126 | struct hrtimer timer; | ||
| 127 | struct net_device *dev; | ||
| 128 | }; | ||
| 129 | |||
| 130 | /* Info for a specific cpu. */ | ||
| 131 | struct tile_net_info { | ||
| 132 | /* The NAPI struct. */ | ||
| 133 | struct napi_struct napi; | ||
| 134 | /* Packet queue. */ | ||
| 135 | gxio_mpipe_iqueue_t iqueue; | ||
| 136 | /* Our cpu. */ | ||
| 137 | int my_cpu; | ||
| 138 | /* True if iqueue is valid. */ | ||
| 139 | bool has_iqueue; | ||
| 140 | /* NAPI flags. */ | ||
| 141 | bool napi_added; | ||
| 142 | bool napi_enabled; | ||
| 143 | /* Number of small sk_buffs which must still be provided. */ | ||
| 144 | unsigned int num_needed_small_buffers; | ||
| 145 | /* Number of large sk_buffs which must still be provided. */ | ||
| 146 | unsigned int num_needed_large_buffers; | ||
| 147 | /* A timer for handling egress completions. */ | ||
| 148 | struct hrtimer egress_timer; | ||
| 149 | /* True if "egress_timer" is scheduled. */ | ||
| 150 | bool egress_timer_scheduled; | ||
| 151 | /* Comps for each egress channel. */ | ||
| 152 | struct tile_net_comps *comps_for_echannel[TILE_NET_CHANNELS]; | ||
| 153 | /* Transmit wake timer for each egress channel. */ | ||
| 154 | struct tile_net_tx_wake tx_wake[TILE_NET_CHANNELS]; | ||
| 155 | }; | ||
| 156 | |||
| 157 | /* Info for egress on a particular egress channel. */ | ||
| 158 | struct tile_net_egress { | ||
| 159 | /* The "equeue". */ | ||
| 160 | gxio_mpipe_equeue_t *equeue; | ||
| 161 | /* The headers for TSO. */ | ||
| 162 | unsigned char *headers; | ||
| 163 | }; | ||
| 164 | |||
| 165 | /* Info for a specific device. */ | ||
| 166 | struct tile_net_priv { | ||
| 167 | /* Our network device. */ | ||
| 168 | struct net_device *dev; | ||
| 169 | /* The primary link. */ | ||
| 170 | gxio_mpipe_link_t link; | ||
| 171 | /* The primary channel, if open, else -1. */ | ||
| 172 | int channel; | ||
| 173 | /* The "loopify" egress link, if needed. */ | ||
| 174 | gxio_mpipe_link_t loopify_link; | ||
| 175 | /* The "loopify" egress channel, if open, else -1. */ | ||
| 176 | int loopify_channel; | ||
| 177 | /* The egress channel (channel or loopify_channel). */ | ||
| 178 | int echannel; | ||
| 179 | /* Total stats. */ | ||
| 180 | struct net_device_stats stats; | ||
| 181 | }; | ||
| 182 | |||
| 183 | /* Egress info, indexed by "priv->echannel" (lazily created as needed). */ | ||
| 184 | static struct tile_net_egress egress_for_echannel[TILE_NET_CHANNELS]; | ||
| 185 | |||
| 186 | /* Devices currently associated with each channel. | ||
| 187 | * NOTE: The array entry can become NULL after ifconfig down, but | ||
| 188 | * we do not free the underlying net_device structures, so it is | ||
| 189 | * safe to use a pointer after reading it from this array. | ||
| 190 | */ | ||
| 191 | static struct net_device *tile_net_devs_for_channel[TILE_NET_CHANNELS]; | ||
| 192 | |||
| 193 | /* A mutex for "tile_net_devs_for_channel". */ | ||
| 194 | static DEFINE_MUTEX(tile_net_devs_for_channel_mutex); | ||
| 195 | |||
| 196 | /* The per-cpu info. */ | ||
| 197 | static DEFINE_PER_CPU(struct tile_net_info, per_cpu_info); | ||
| 198 | |||
| 199 | /* The "context" for all devices. */ | ||
| 200 | static gxio_mpipe_context_t context; | ||
| 201 | |||
| 202 | /* Buffer sizes and mpipe enum codes for buffer stacks. | ||
| 203 | * See arch/tile/include/gxio/mpipe.h for the set of possible values. | ||
| 204 | */ | ||
| 205 | #define BUFFER_SIZE_SMALL_ENUM GXIO_MPIPE_BUFFER_SIZE_128 | ||
| 206 | #define BUFFER_SIZE_SMALL 128 | ||
| 207 | #define BUFFER_SIZE_LARGE_ENUM GXIO_MPIPE_BUFFER_SIZE_1664 | ||
| 208 | #define BUFFER_SIZE_LARGE 1664 | ||
| 209 | |||
| 210 | /* The small/large "buffer stacks". */ | ||
| 211 | static int small_buffer_stack = -1; | ||
| 212 | static int large_buffer_stack = -1; | ||
| 213 | |||
| 214 | /* Amount of memory allocated for each buffer stack. */ | ||
| 215 | static size_t buffer_stack_size; | ||
| 216 | |||
| 217 | /* The actual memory allocated for the buffer stacks. */ | ||
| 218 | static void *small_buffer_stack_va; | ||
| 219 | static void *large_buffer_stack_va; | ||
| 220 | |||
| 221 | /* The buckets. */ | ||
| 222 | static int first_bucket = -1; | ||
| 223 | static int num_buckets = 1; | ||
| 224 | |||
| 225 | /* The ingress irq. */ | ||
| 226 | static int ingress_irq = -1; | ||
| 227 | |||
| 228 | /* Text value of tile_net.cpus if passed as a module parameter. */ | ||
| 229 | static char *network_cpus_string; | ||
| 230 | |||
| 231 | /* The actual cpus in "network_cpus". */ | ||
| 232 | static struct cpumask network_cpus_map; | ||
| 233 | |||
| 234 | /* If "loopify=LINK" was specified, this is "LINK". */ | ||
| 235 | static char *loopify_link_name; | ||
| 236 | |||
| 237 | /* If "tile_net.custom" was specified, this is non-NULL. */ | ||
| 238 | static char *custom_str; | ||
| 239 | |||
| 240 | /* The "tile_net.cpus" argument specifies the cpus that are dedicated | ||
| 241 | * to handle ingress packets. | ||
| 242 | * | ||
| 243 | * The parameter should be in the form "tile_net.cpus=m-n[,x-y]", where | ||
| 244 | * m, n, x, y are integer numbers that represent the cpus that can be | ||
| 245 | * neither a dedicated cpu nor a dataplane cpu. | ||
| 246 | */ | ||
| 247 | static bool network_cpus_init(void) | ||
| 248 | { | ||
| 249 | char buf[1024]; | ||
| 250 | int rc; | ||
| 251 | |||
| 252 | if (network_cpus_string == NULL) | ||
| 253 | return false; | ||
| 254 | |||
| 255 | rc = cpulist_parse_crop(network_cpus_string, &network_cpus_map); | ||
| 256 | if (rc != 0) { | ||
| 257 | pr_warn("tile_net.cpus=%s: malformed cpu list\n", | ||
| 258 | network_cpus_string); | ||
| 259 | return false; | ||
| 260 | } | ||
| 261 | |||
| 262 | /* Remove dedicated cpus. */ | ||
| 263 | cpumask_and(&network_cpus_map, &network_cpus_map, cpu_possible_mask); | ||
| 264 | |||
| 265 | if (cpumask_empty(&network_cpus_map)) { | ||
| 266 | pr_warn("Ignoring empty tile_net.cpus='%s'.\n", | ||
| 267 | network_cpus_string); | ||
| 268 | return false; | ||
| 269 | } | ||
| 270 | |||
| 271 | cpulist_scnprintf(buf, sizeof(buf), &network_cpus_map); | ||
| 272 | pr_info("Linux network CPUs: %s\n", buf); | ||
| 273 | return true; | ||
| 274 | } | ||
| 275 | |||
| 276 | module_param_named(cpus, network_cpus_string, charp, 0444); | ||
| 277 | MODULE_PARM_DESC(cpus, "cpulist of cores that handle network interrupts"); | ||
| 278 | |||
| 279 | /* The "tile_net.loopify=LINK" argument causes the named device to | ||
| 280 | * actually use "loop0" for ingress, and "loop1" for egress. This | ||
| 281 | * allows an app to sit between the actual link and linux, passing | ||
| 282 | * (some) packets along to linux, and forwarding (some) packets sent | ||
| 283 | * out by linux. | ||
| 284 | */ | ||
| 285 | module_param_named(loopify, loopify_link_name, charp, 0444); | ||
| 286 | MODULE_PARM_DESC(loopify, "name the device to use loop0/1 for ingress/egress"); | ||
| 287 | |||
| 288 | /* The "tile_net.custom" argument causes us to ignore the "conventional" | ||
| 289 | * classifier metadata, in particular, the "l2_offset". | ||
| 290 | */ | ||
| 291 | module_param_named(custom, custom_str, charp, 0444); | ||
| 292 | MODULE_PARM_DESC(custom, "indicates a (heavily) customized classifier"); | ||
| 293 | |||
| 294 | /* Atomically update a statistics field. | ||
| 295 | * Note that on TILE-Gx, this operation is fire-and-forget on the | ||
| 296 | * issuing core (single-cycle dispatch) and takes only a few cycles | ||
| 297 | * longer than a regular store when the request reaches the home cache. | ||
| 298 | * No expensive bus management overhead is required. | ||
| 299 | */ | ||
| 300 | static void tile_net_stats_add(unsigned long value, unsigned long *field) | ||
| 301 | { | ||
| 302 | BUILD_BUG_ON(sizeof(atomic_long_t) != sizeof(unsigned long)); | ||
| 303 | atomic_long_add(value, (atomic_long_t *)field); | ||
| 304 | } | ||
| 305 | |||
| 306 | /* Allocate and push a buffer. */ | ||
| 307 | static bool tile_net_provide_buffer(bool small) | ||
| 308 | { | ||
| 309 | int stack = small ? small_buffer_stack : large_buffer_stack; | ||
| 310 | const unsigned long buffer_alignment = 128; | ||
| 311 | struct sk_buff *skb; | ||
| 312 | int len; | ||
| 313 | |||
| 314 | len = sizeof(struct sk_buff **) + buffer_alignment; | ||
| 315 | len += (small ? BUFFER_SIZE_SMALL : BUFFER_SIZE_LARGE); | ||
| 316 | skb = dev_alloc_skb(len); | ||
| 317 | if (skb == NULL) | ||
| 318 | return false; | ||
| 319 | |||
| 320 | /* Make room for a back-pointer to 'skb' and guarantee alignment. */ | ||
| 321 | skb_reserve(skb, sizeof(struct sk_buff **)); | ||
| 322 | skb_reserve(skb, -(long)skb->data & (buffer_alignment - 1)); | ||
| 323 | |||
| 324 | /* Save a back-pointer to 'skb'. */ | ||
| 325 | *(struct sk_buff **)(skb->data - sizeof(struct sk_buff **)) = skb; | ||
| 326 | |||
| 327 | /* Make sure "skb" and the back-pointer have been flushed. */ | ||
| 328 | wmb(); | ||
| 329 | |||
| 330 | gxio_mpipe_push_buffer(&context, stack, | ||
| 331 | (void *)va_to_tile_io_addr(skb->data)); | ||
| 332 | |||
| 333 | return true; | ||
| 334 | } | ||
| 335 | |||
| 336 | /* Convert a raw mpipe buffer to its matching skb pointer. */ | ||
| 337 | static struct sk_buff *mpipe_buf_to_skb(void *va) | ||
| 338 | { | ||
| 339 | /* Acquire the associated "skb". */ | ||
| 340 | struct sk_buff **skb_ptr = va - sizeof(*skb_ptr); | ||
| 341 | struct sk_buff *skb = *skb_ptr; | ||
| 342 | |||
| 343 | /* Paranoia. */ | ||
| 344 | if (skb->data != va) { | ||
| 345 | /* Panic here since there's a reasonable chance | ||
| 346 | * that corrupt buffers means generic memory | ||
| 347 | * corruption, with unpredictable system effects. | ||
| 348 | */ | ||
| 349 | panic("Corrupt linux buffer! va=%p, skb=%p, skb->data=%p", | ||
| 350 | va, skb, skb->data); | ||
| 351 | } | ||
| 352 | |||
| 353 | return skb; | ||
| 354 | } | ||
| 355 | |||
| 356 | static void tile_net_pop_all_buffers(int stack) | ||
| 357 | { | ||
| 358 | for (;;) { | ||
| 359 | tile_io_addr_t addr = | ||
| 360 | (tile_io_addr_t)gxio_mpipe_pop_buffer(&context, stack); | ||
| 361 | if (addr == 0) | ||
| 362 | break; | ||
| 363 | dev_kfree_skb_irq(mpipe_buf_to_skb(tile_io_addr_to_va(addr))); | ||
| 364 | } | ||
| 365 | } | ||
| 366 | |||
| 367 | /* Provide linux buffers to mPIPE. */ | ||
| 368 | static void tile_net_provide_needed_buffers(void) | ||
| 369 | { | ||
| 370 | struct tile_net_info *info = &__get_cpu_var(per_cpu_info); | ||
| 371 | |||
| 372 | while (info->num_needed_small_buffers != 0) { | ||
| 373 | if (!tile_net_provide_buffer(true)) | ||
| 374 | goto oops; | ||
| 375 | info->num_needed_small_buffers--; | ||
| 376 | } | ||
| 377 | |||
| 378 | while (info->num_needed_large_buffers != 0) { | ||
| 379 | if (!tile_net_provide_buffer(false)) | ||
| 380 | goto oops; | ||
| 381 | info->num_needed_large_buffers--; | ||
| 382 | } | ||
| 383 | |||
| 384 | return; | ||
| 385 | |||
| 386 | oops: | ||
| 387 | /* Add a description to the page allocation failure dump. */ | ||
| 388 | pr_notice("Tile %d still needs some buffers\n", info->my_cpu); | ||
| 389 | } | ||
| 390 | |||
| 391 | static inline bool filter_packet(struct net_device *dev, void *buf) | ||
| 392 | { | ||
| 393 | /* Filter packets received before we're up. */ | ||
| 394 | if (dev == NULL || !(dev->flags & IFF_UP)) | ||
| 395 | return true; | ||
| 396 | |||
| 397 | /* Filter out packets that aren't for us. */ | ||
| 398 | if (!(dev->flags & IFF_PROMISC) && | ||
| 399 | !is_multicast_ether_addr(buf) && | ||
| 400 | compare_ether_addr(dev->dev_addr, buf) != 0) | ||
| 401 | return true; | ||
| 402 | |||
| 403 | return false; | ||
| 404 | } | ||
| 405 | |||
| 406 | static void tile_net_receive_skb(struct net_device *dev, struct sk_buff *skb, | ||
| 407 | gxio_mpipe_idesc_t *idesc, unsigned long len) | ||
| 408 | { | ||
| 409 | struct tile_net_info *info = &__get_cpu_var(per_cpu_info); | ||
| 410 | struct tile_net_priv *priv = netdev_priv(dev); | ||
| 411 | |||
| 412 | /* Encode the actual packet length. */ | ||
| 413 | skb_put(skb, len); | ||
| 414 | |||
| 415 | skb->protocol = eth_type_trans(skb, dev); | ||
| 416 | |||
| 417 | /* Acknowledge "good" hardware checksums. */ | ||
| 418 | if (idesc->cs && idesc->csum_seed_val == 0xFFFF) | ||
| 419 | skb->ip_summed = CHECKSUM_UNNECESSARY; | ||
| 420 | |||
| 421 | netif_receive_skb(skb); | ||
| 422 | |||
| 423 | /* Update stats. */ | ||
| 424 | tile_net_stats_add(1, &priv->stats.rx_packets); | ||
| 425 | tile_net_stats_add(len, &priv->stats.rx_bytes); | ||
| 426 | |||
| 427 | /* Need a new buffer. */ | ||
| 428 | if (idesc->size == BUFFER_SIZE_SMALL_ENUM) | ||
| 429 | info->num_needed_small_buffers++; | ||
| 430 | else | ||
| 431 | info->num_needed_large_buffers++; | ||
| 432 | } | ||
| 433 | |||
| 434 | /* Handle a packet. Return true if "processed", false if "filtered". */ | ||
| 435 | static bool tile_net_handle_packet(gxio_mpipe_idesc_t *idesc) | ||
| 436 | { | ||
| 437 | struct tile_net_info *info = &__get_cpu_var(per_cpu_info); | ||
| 438 | struct net_device *dev = tile_net_devs_for_channel[idesc->channel]; | ||
| 439 | uint8_t l2_offset; | ||
| 440 | void *va; | ||
| 441 | void *buf; | ||
| 442 | unsigned long len; | ||
| 443 | bool filter; | ||
| 444 | |||
| 445 | /* Drop packets for which no buffer was available. | ||
| 446 | * NOTE: This happens under heavy load. | ||
| 447 | */ | ||
| 448 | if (idesc->be) { | ||
| 449 | struct tile_net_priv *priv = netdev_priv(dev); | ||
| 450 | tile_net_stats_add(1, &priv->stats.rx_dropped); | ||
| 451 | gxio_mpipe_iqueue_consume(&info->iqueue, idesc); | ||
| 452 | if (net_ratelimit()) | ||
| 453 | pr_info("Dropping packet (insufficient buffers).\n"); | ||
| 454 | return false; | ||
| 455 | } | ||
| 456 | |||
| 457 | /* Get the "l2_offset", if allowed. */ | ||
| 458 | l2_offset = custom_str ? 0 : gxio_mpipe_idesc_get_l2_offset(idesc); | ||
| 459 | |||
| 460 | /* Get the raw buffer VA (includes "headroom"). */ | ||
| 461 | va = tile_io_addr_to_va((unsigned long)(long)idesc->va); | ||
| 462 | |||
| 463 | /* Get the actual packet start/length. */ | ||
| 464 | buf = va + l2_offset; | ||
| 465 | len = idesc->l2_size - l2_offset; | ||
| 466 | |||
| 467 | /* Point "va" at the raw buffer. */ | ||
| 468 | va -= NET_IP_ALIGN; | ||
| 469 | |||
| 470 | filter = filter_packet(dev, buf); | ||
| 471 | if (filter) { | ||
| 472 | gxio_mpipe_iqueue_drop(&info->iqueue, idesc); | ||
| 473 | } else { | ||
| 474 | struct sk_buff *skb = mpipe_buf_to_skb(va); | ||
| 475 | |||
| 476 | /* Skip headroom, and any custom header. */ | ||
| 477 | skb_reserve(skb, NET_IP_ALIGN + l2_offset); | ||
| 478 | |||
| 479 | tile_net_receive_skb(dev, skb, idesc, len); | ||
| 480 | } | ||
| 481 | |||
| 482 | gxio_mpipe_iqueue_consume(&info->iqueue, idesc); | ||
| 483 | return !filter; | ||
| 484 | } | ||
| 485 | |||
| 486 | /* Handle some packets for the current CPU. | ||
| 487 | * | ||
| 488 | * This function handles up to TILE_NET_BATCH idescs per call. | ||
| 489 | * | ||
| 490 | * ISSUE: Since we do not provide new buffers until this function is | ||
| 491 | * complete, we must initially provide enough buffers for each network | ||
| 492 | * cpu to fill its iqueue and also its batched idescs. | ||
| 493 | * | ||
| 494 | * ISSUE: The "rotting packet" race condition occurs if a packet | ||
| 495 | * arrives after the queue appears to be empty, and before the | ||
| 496 | * hypervisor interrupt is re-enabled. | ||
| 497 | */ | ||
| 498 | static int tile_net_poll(struct napi_struct *napi, int budget) | ||
| 499 | { | ||
| 500 | struct tile_net_info *info = &__get_cpu_var(per_cpu_info); | ||
| 501 | unsigned int work = 0; | ||
| 502 | gxio_mpipe_idesc_t *idesc; | ||
| 503 | int i, n; | ||
| 504 | |||
| 505 | /* Process packets. */ | ||
| 506 | while ((n = gxio_mpipe_iqueue_try_peek(&info->iqueue, &idesc)) > 0) { | ||
| 507 | for (i = 0; i < n; i++) { | ||
| 508 | if (i == TILE_NET_BATCH) | ||
| 509 | goto done; | ||
| 510 | if (tile_net_handle_packet(idesc + i)) { | ||
| 511 | if (++work >= budget) | ||
| 512 | goto done; | ||
| 513 | } | ||
| 514 | } | ||
| 515 | } | ||
| 516 | |||
| 517 | /* There are no packets left. */ | ||
| 518 | napi_complete(&info->napi); | ||
| 519 | |||
| 520 | /* Re-enable hypervisor interrupts. */ | ||
| 521 | gxio_mpipe_enable_notif_ring_interrupt(&context, info->iqueue.ring); | ||
| 522 | |||
| 523 | /* HACK: Avoid the "rotting packet" problem. */ | ||
| 524 | if (gxio_mpipe_iqueue_try_peek(&info->iqueue, &idesc) > 0) | ||
| 525 | napi_schedule(&info->napi); | ||
| 526 | |||
| 527 | /* ISSUE: Handle completions? */ | ||
| 528 | |||
| 529 | done: | ||
| 530 | tile_net_provide_needed_buffers(); | ||
| 531 | |||
| 532 | return work; | ||
| 533 | } | ||
| 534 | |||
| 535 | /* Handle an ingress interrupt on the current cpu. */ | ||
| 536 | static irqreturn_t tile_net_handle_ingress_irq(int irq, void *unused) | ||
| 537 | { | ||
| 538 | struct tile_net_info *info = &__get_cpu_var(per_cpu_info); | ||
| 539 | napi_schedule(&info->napi); | ||
| 540 | return IRQ_HANDLED; | ||
| 541 | } | ||
| 542 | |||
| 543 | /* Free some completions. This must be called with interrupts blocked. */ | ||
| 544 | static int tile_net_free_comps(gxio_mpipe_equeue_t *equeue, | ||
| 545 | struct tile_net_comps *comps, | ||
| 546 | int limit, bool force_update) | ||
| 547 | { | ||
| 548 | int n = 0; | ||
| 549 | while (comps->comp_last < comps->comp_next) { | ||
| 550 | unsigned int cid = comps->comp_last % TILE_NET_MAX_COMPS; | ||
| 551 | struct tile_net_comp *comp = &comps->comp_queue[cid]; | ||
| 552 | if (!gxio_mpipe_equeue_is_complete(equeue, comp->when, | ||
| 553 | force_update || n == 0)) | ||
| 554 | break; | ||
| 555 | dev_kfree_skb_irq(comp->skb); | ||
| 556 | comps->comp_last++; | ||
| 557 | if (++n == limit) | ||
| 558 | break; | ||
| 559 | } | ||
| 560 | return n; | ||
| 561 | } | ||
| 562 | |||
| 563 | /* Add a completion. This must be called with interrupts blocked. | ||
| 564 | * tile_net_equeue_try_reserve() will have ensured a free completion entry. | ||
| 565 | */ | ||
| 566 | static void add_comp(gxio_mpipe_equeue_t *equeue, | ||
| 567 | struct tile_net_comps *comps, | ||
| 568 | uint64_t when, struct sk_buff *skb) | ||
| 569 | { | ||
| 570 | int cid = comps->comp_next % TILE_NET_MAX_COMPS; | ||
| 571 | comps->comp_queue[cid].when = when; | ||
| 572 | comps->comp_queue[cid].skb = skb; | ||
| 573 | comps->comp_next++; | ||
| 574 | } | ||
| 575 | |||
| 576 | static void tile_net_schedule_tx_wake_timer(struct net_device *dev) | ||
| 577 | { | ||
| 578 | struct tile_net_info *info = &__get_cpu_var(per_cpu_info); | ||
| 579 | struct tile_net_priv *priv = netdev_priv(dev); | ||
| 580 | |||
| 581 | hrtimer_start(&info->tx_wake[priv->echannel].timer, | ||
| 582 | ktime_set(0, TX_TIMER_DELAY_USEC * 1000UL), | ||
| 583 | HRTIMER_MODE_REL_PINNED); | ||
| 584 | } | ||
| 585 | |||
| 586 | static enum hrtimer_restart tile_net_handle_tx_wake_timer(struct hrtimer *t) | ||
| 587 | { | ||
| 588 | struct tile_net_tx_wake *tx_wake = | ||
| 589 | container_of(t, struct tile_net_tx_wake, timer); | ||
| 590 | netif_wake_subqueue(tx_wake->dev, smp_processor_id()); | ||
| 591 | return HRTIMER_NORESTART; | ||
| 592 | } | ||
| 593 | |||
| 594 | /* Make sure the egress timer is scheduled. */ | ||
| 595 | static void tile_net_schedule_egress_timer(void) | ||
| 596 | { | ||
| 597 | struct tile_net_info *info = &__get_cpu_var(per_cpu_info); | ||
| 598 | |||
| 599 | if (!info->egress_timer_scheduled) { | ||
| 600 | hrtimer_start(&info->egress_timer, | ||
| 601 | ktime_set(0, EGRESS_TIMER_DELAY_USEC * 1000UL), | ||
| 602 | HRTIMER_MODE_REL_PINNED); | ||
| 603 | info->egress_timer_scheduled = true; | ||
| 604 | } | ||
| 605 | } | ||
| 606 | |||
| 607 | /* The "function" for "info->egress_timer". | ||
| 608 | * | ||
| 609 | * This timer will reschedule itself as long as there are any pending | ||
| 610 | * completions expected for this tile. | ||
| 611 | */ | ||
| 612 | static enum hrtimer_restart tile_net_handle_egress_timer(struct hrtimer *t) | ||
| 613 | { | ||
| 614 | struct tile_net_info *info = &__get_cpu_var(per_cpu_info); | ||
| 615 | unsigned long irqflags; | ||
| 616 | bool pending = false; | ||
| 617 | int i; | ||
| 618 | |||
| 619 | local_irq_save(irqflags); | ||
| 620 | |||
| 621 | /* The timer is no longer scheduled. */ | ||
| 622 | info->egress_timer_scheduled = false; | ||
| 623 | |||
| 624 | /* Free all possible comps for this tile. */ | ||
| 625 | for (i = 0; i < TILE_NET_CHANNELS; i++) { | ||
| 626 | struct tile_net_egress *egress = &egress_for_echannel[i]; | ||
| 627 | struct tile_net_comps *comps = info->comps_for_echannel[i]; | ||
| 628 | if (comps->comp_last >= comps->comp_next) | ||
| 629 | continue; | ||
| 630 | tile_net_free_comps(egress->equeue, comps, -1, true); | ||
| 631 | pending = pending || (comps->comp_last < comps->comp_next); | ||
| 632 | } | ||
| 633 | |||
| 634 | /* Reschedule timer if needed. */ | ||
| 635 | if (pending) | ||
| 636 | tile_net_schedule_egress_timer(); | ||
| 637 | |||
| 638 | local_irq_restore(irqflags); | ||
| 639 | |||
| 640 | return HRTIMER_NORESTART; | ||
| 641 | } | ||
| 642 | |||
| 643 | /* Helper function for "tile_net_update()". | ||
| 644 | * "dev" (i.e. arg) is the device being brought up or down, | ||
| 645 | * or NULL if all devices are now down. | ||
| 646 | */ | ||
| 647 | static void tile_net_update_cpu(void *arg) | ||
| 648 | { | ||
| 649 | struct tile_net_info *info = &__get_cpu_var(per_cpu_info); | ||
| 650 | struct net_device *dev = arg; | ||
| 651 | |||
| 652 | if (!info->has_iqueue) | ||
| 653 | return; | ||
| 654 | |||
| 655 | if (dev != NULL) { | ||
| 656 | if (!info->napi_added) { | ||
| 657 | netif_napi_add(dev, &info->napi, | ||
| 658 | tile_net_poll, TILE_NET_WEIGHT); | ||
| 659 | info->napi_added = true; | ||
| 660 | } | ||
| 661 | if (!info->napi_enabled) { | ||
| 662 | napi_enable(&info->napi); | ||
| 663 | info->napi_enabled = true; | ||
| 664 | } | ||
| 665 | enable_percpu_irq(ingress_irq, 0); | ||
| 666 | } else { | ||
| 667 | disable_percpu_irq(ingress_irq); | ||
| 668 | if (info->napi_enabled) { | ||
| 669 | napi_disable(&info->napi); | ||
| 670 | info->napi_enabled = false; | ||
| 671 | } | ||
| 672 | /* FIXME: Drain the iqueue. */ | ||
| 673 | } | ||
| 674 | } | ||
| 675 | |||
| 676 | /* Helper function for tile_net_open() and tile_net_stop(). | ||
| 677 | * Always called under tile_net_devs_for_channel_mutex. | ||
| 678 | */ | ||
| 679 | static int tile_net_update(struct net_device *dev) | ||
| 680 | { | ||
| 681 | static gxio_mpipe_rules_t rules; /* too big to fit on the stack */ | ||
| 682 | bool saw_channel = false; | ||
| 683 | int channel; | ||
| 684 | int rc; | ||
| 685 | int cpu; | ||
| 686 | |||
| 687 | gxio_mpipe_rules_init(&rules, &context); | ||
| 688 | |||
| 689 | for (channel = 0; channel < TILE_NET_CHANNELS; channel++) { | ||
| 690 | if (tile_net_devs_for_channel[channel] == NULL) | ||
| 691 | continue; | ||
| 692 | if (!saw_channel) { | ||
| 693 | saw_channel = true; | ||
| 694 | gxio_mpipe_rules_begin(&rules, first_bucket, | ||
| 695 | num_buckets, NULL); | ||
| 696 | gxio_mpipe_rules_set_headroom(&rules, NET_IP_ALIGN); | ||
| 697 | } | ||
| 698 | gxio_mpipe_rules_add_channel(&rules, channel); | ||
| 699 | } | ||
| 700 | |||
| 701 | /* NOTE: This can fail if there is no classifier. | ||
| 702 | * ISSUE: Can anything else cause it to fail? | ||
| 703 | */ | ||
| 704 | rc = gxio_mpipe_rules_commit(&rules); | ||
| 705 | if (rc != 0) { | ||
| 706 | netdev_warn(dev, "gxio_mpipe_rules_commit failed: %d\n", rc); | ||
| 707 | return -EIO; | ||
| 708 | } | ||
| 709 | |||
| 710 | /* Update all cpus, sequentially (to protect "netif_napi_add()"). */ | ||
| 711 | for_each_online_cpu(cpu) | ||
| 712 | smp_call_function_single(cpu, tile_net_update_cpu, | ||
| 713 | (saw_channel ? dev : NULL), 1); | ||
| 714 | |||
| 715 | /* HACK: Allow packets to flow in the simulator. */ | ||
| 716 | if (saw_channel) | ||
| 717 | sim_enable_mpipe_links(0, -1); | ||
| 718 | |||
| 719 | return 0; | ||
| 720 | } | ||
| 721 | |||
| 722 | /* Allocate and initialize mpipe buffer stacks, and register them in | ||
| 723 | * the mPIPE TLBs, for both small and large packet sizes. | ||
| 724 | * This routine supports tile_net_init_mpipe(), below. | ||
| 725 | */ | ||
| 726 | static int init_buffer_stacks(struct net_device *dev, int num_buffers) | ||
| 727 | { | ||
| 728 | pte_t hash_pte = pte_set_home((pte_t) { 0 }, PAGE_HOME_HASH); | ||
| 729 | int rc; | ||
| 730 | |||
| 731 | /* Compute stack bytes; we round up to 64KB and then use | ||
| 732 | * alloc_pages() so we get the required 64KB alignment as well. | ||
| 733 | */ | ||
| 734 | buffer_stack_size = | ||
| 735 | ALIGN(gxio_mpipe_calc_buffer_stack_bytes(num_buffers), | ||
| 736 | 64 * 1024); | ||
| 737 | |||
| 738 | /* Allocate two buffer stack indices. */ | ||
| 739 | rc = gxio_mpipe_alloc_buffer_stacks(&context, 2, 0, 0); | ||
| 740 | if (rc < 0) { | ||
| 741 | netdev_err(dev, "gxio_mpipe_alloc_buffer_stacks failed: %d\n", | ||
| 742 | rc); | ||
| 743 | return rc; | ||
| 744 | } | ||
| 745 | small_buffer_stack = rc; | ||
| 746 | large_buffer_stack = rc + 1; | ||
| 747 | |||
| 748 | /* Allocate the small memory stack. */ | ||
| 749 | small_buffer_stack_va = | ||
| 750 | alloc_pages_exact(buffer_stack_size, GFP_KERNEL); | ||
| 751 | if (small_buffer_stack_va == NULL) { | ||
| 752 | netdev_err(dev, | ||
| 753 | "Could not alloc %zd bytes for buffer stacks\n", | ||
| 754 | buffer_stack_size); | ||
| 755 | return -ENOMEM; | ||
| 756 | } | ||
| 757 | rc = gxio_mpipe_init_buffer_stack(&context, small_buffer_stack, | ||
| 758 | BUFFER_SIZE_SMALL_ENUM, | ||
| 759 | small_buffer_stack_va, | ||
| 760 | buffer_stack_size, 0); | ||
| 761 | if (rc != 0) { | ||
| 762 | netdev_err(dev, "gxio_mpipe_init_buffer_stack: %d\n", rc); | ||
| 763 | return rc; | ||
| 764 | } | ||
| 765 | rc = gxio_mpipe_register_client_memory(&context, small_buffer_stack, | ||
| 766 | hash_pte, 0); | ||
| 767 | if (rc != 0) { | ||
| 768 | netdev_err(dev, | ||
| 769 | "gxio_mpipe_register_buffer_memory failed: %d\n", | ||
| 770 | rc); | ||
| 771 | return rc; | ||
| 772 | } | ||
| 773 | |||
| 774 | /* Allocate the large buffer stack. */ | ||
| 775 | large_buffer_stack_va = | ||
| 776 | alloc_pages_exact(buffer_stack_size, GFP_KERNEL); | ||
| 777 | if (large_buffer_stack_va == NULL) { | ||
| 778 | netdev_err(dev, | ||
| 779 | "Could not alloc %zd bytes for buffer stacks\n", | ||
| 780 | buffer_stack_size); | ||
| 781 | return -ENOMEM; | ||
| 782 | } | ||
| 783 | rc = gxio_mpipe_init_buffer_stack(&context, large_buffer_stack, | ||
| 784 | BUFFER_SIZE_LARGE_ENUM, | ||
| 785 | large_buffer_stack_va, | ||
| 786 | buffer_stack_size, 0); | ||
| 787 | if (rc != 0) { | ||
| 788 | netdev_err(dev, "gxio_mpipe_init_buffer_stack failed: %d\n", | ||
| 789 | rc); | ||
| 790 | return rc; | ||
| 791 | } | ||
| 792 | rc = gxio_mpipe_register_client_memory(&context, large_buffer_stack, | ||
| 793 | hash_pte, 0); | ||
| 794 | if (rc != 0) { | ||
| 795 | netdev_err(dev, | ||
| 796 | "gxio_mpipe_register_buffer_memory failed: %d\n", | ||
| 797 | rc); | ||
| 798 | return rc; | ||
| 799 | } | ||
| 800 | |||
| 801 | return 0; | ||
| 802 | } | ||
| 803 | |||
| 804 | /* Allocate per-cpu resources (memory for completions and idescs). | ||
| 805 | * This routine supports tile_net_init_mpipe(), below. | ||
| 806 | */ | ||
| 807 | static int alloc_percpu_mpipe_resources(struct net_device *dev, | ||
| 808 | int cpu, int ring) | ||
| 809 | { | ||
| 810 | struct tile_net_info *info = &per_cpu(per_cpu_info, cpu); | ||
| 811 | int order, i, rc; | ||
| 812 | struct page *page; | ||
| 813 | void *addr; | ||
| 814 | |||
| 815 | /* Allocate the "comps". */ | ||
| 816 | order = get_order(COMPS_SIZE); | ||
| 817 | page = homecache_alloc_pages(GFP_KERNEL, order, cpu); | ||
| 818 | if (page == NULL) { | ||
| 819 | netdev_err(dev, "Failed to alloc %zd bytes comps memory\n", | ||
| 820 | COMPS_SIZE); | ||
| 821 | return -ENOMEM; | ||
| 822 | } | ||
| 823 | addr = pfn_to_kaddr(page_to_pfn(page)); | ||
| 824 | memset(addr, 0, COMPS_SIZE); | ||
| 825 | for (i = 0; i < TILE_NET_CHANNELS; i++) | ||
| 826 | info->comps_for_echannel[i] = | ||
| 827 | addr + i * sizeof(struct tile_net_comps); | ||
| 828 | |||
| 829 | /* If this is a network cpu, create an iqueue. */ | ||
| 830 | if (cpu_isset(cpu, network_cpus_map)) { | ||
| 831 | order = get_order(NOTIF_RING_SIZE); | ||
| 832 | page = homecache_alloc_pages(GFP_KERNEL, order, cpu); | ||
| 833 | if (page == NULL) { | ||
| 834 | netdev_err(dev, | ||
| 835 | "Failed to alloc %zd bytes iqueue memory\n", | ||
| 836 | NOTIF_RING_SIZE); | ||
| 837 | return -ENOMEM; | ||
| 838 | } | ||
| 839 | addr = pfn_to_kaddr(page_to_pfn(page)); | ||
| 840 | rc = gxio_mpipe_iqueue_init(&info->iqueue, &context, ring++, | ||
| 841 | addr, NOTIF_RING_SIZE, 0); | ||
| 842 | if (rc < 0) { | ||
| 843 | netdev_err(dev, | ||
| 844 | "gxio_mpipe_iqueue_init failed: %d\n", rc); | ||
| 845 | return rc; | ||
| 846 | } | ||
| 847 | info->has_iqueue = true; | ||
| 848 | } | ||
| 849 | |||
| 850 | return ring; | ||
| 851 | } | ||
| 852 | |||
| 853 | /* Initialize NotifGroup and buckets. | ||
| 854 | * This routine supports tile_net_init_mpipe(), below. | ||
| 855 | */ | ||
| 856 | static int init_notif_group_and_buckets(struct net_device *dev, | ||
| 857 | int ring, int network_cpus_count) | ||
| 858 | { | ||
| 859 | int group, rc; | ||
| 860 | |||
| 861 | /* Allocate one NotifGroup. */ | ||
| 862 | rc = gxio_mpipe_alloc_notif_groups(&context, 1, 0, 0); | ||
| 863 | if (rc < 0) { | ||
| 864 | netdev_err(dev, "gxio_mpipe_alloc_notif_groups failed: %d\n", | ||
| 865 | rc); | ||
| 866 | return rc; | ||
| 867 | } | ||
| 868 | group = rc; | ||
| 869 | |||
| 870 | /* Initialize global num_buckets value. */ | ||
| 871 | if (network_cpus_count > 4) | ||
| 872 | num_buckets = 256; | ||
| 873 | else if (network_cpus_count > 1) | ||
| 874 | num_buckets = 16; | ||
| 875 | |||
| 876 | /* Allocate some buckets, and set global first_bucket value. */ | ||
| 877 | rc = gxio_mpipe_alloc_buckets(&context, num_buckets, 0, 0); | ||
| 878 | if (rc < 0) { | ||
| 879 | netdev_err(dev, "gxio_mpipe_alloc_buckets failed: %d\n", rc); | ||
| 880 | return rc; | ||
| 881 | } | ||
| 882 | first_bucket = rc; | ||
| 883 | |||
| 884 | /* Init group and buckets. */ | ||
| 885 | rc = gxio_mpipe_init_notif_group_and_buckets( | ||
| 886 | &context, group, ring, network_cpus_count, | ||
| 887 | first_bucket, num_buckets, | ||
| 888 | GXIO_MPIPE_BUCKET_STICKY_FLOW_LOCALITY); | ||
| 889 | if (rc != 0) { | ||
| 890 | netdev_err( | ||
| 891 | dev, | ||
| 892 | "gxio_mpipe_init_notif_group_and_buckets failed: %d\n", | ||
| 893 | rc); | ||
| 894 | return rc; | ||
| 895 | } | ||
| 896 | |||
| 897 | return 0; | ||
| 898 | } | ||
| 899 | |||
| 900 | /* Create an irq and register it, then activate the irq and request | ||
| 901 | * interrupts on all cores. Note that "ingress_irq" being initialized | ||
| 902 | * is how we know not to call tile_net_init_mpipe() again. | ||
| 903 | * This routine supports tile_net_init_mpipe(), below. | ||
| 904 | */ | ||
| 905 | static int tile_net_setup_interrupts(struct net_device *dev) | ||
| 906 | { | ||
| 907 | int cpu, rc; | ||
| 908 | |||
| 909 | rc = create_irq(); | ||
| 910 | if (rc < 0) { | ||
| 911 | netdev_err(dev, "create_irq failed: %d\n", rc); | ||
| 912 | return rc; | ||
| 913 | } | ||
| 914 | ingress_irq = rc; | ||
| 915 | tile_irq_activate(ingress_irq, TILE_IRQ_PERCPU); | ||
| 916 | rc = request_irq(ingress_irq, tile_net_handle_ingress_irq, | ||
| 917 | 0, NULL, NULL); | ||
| 918 | if (rc != 0) { | ||
| 919 | netdev_err(dev, "request_irq failed: %d\n", rc); | ||
| 920 | destroy_irq(ingress_irq); | ||
| 921 | ingress_irq = -1; | ||
| 922 | return rc; | ||
| 923 | } | ||
| 924 | |||
| 925 | for_each_online_cpu(cpu) { | ||
| 926 | struct tile_net_info *info = &per_cpu(per_cpu_info, cpu); | ||
| 927 | if (info->has_iqueue) { | ||
| 928 | gxio_mpipe_request_notif_ring_interrupt( | ||
| 929 | &context, cpu_x(cpu), cpu_y(cpu), | ||
| 930 | 1, ingress_irq, info->iqueue.ring); | ||
| 931 | } | ||
| 932 | } | ||
| 933 | |||
| 934 | return 0; | ||
| 935 | } | ||
| 936 | |||
| 937 | /* Undo any state set up partially by a failed call to tile_net_init_mpipe. */ | ||
| 938 | static void tile_net_init_mpipe_fail(void) | ||
| 939 | { | ||
| 940 | int cpu; | ||
| 941 | |||
| 942 | /* Do cleanups that require the mpipe context first. */ | ||
| 943 | if (small_buffer_stack >= 0) | ||
| 944 | tile_net_pop_all_buffers(small_buffer_stack); | ||
| 945 | if (large_buffer_stack >= 0) | ||
| 946 | tile_net_pop_all_buffers(large_buffer_stack); | ||
| 947 | |||
| 948 | /* Destroy mpipe context so the hardware no longer owns any memory. */ | ||
| 949 | gxio_mpipe_destroy(&context); | ||
| 950 | |||
| 951 | for_each_online_cpu(cpu) { | ||
| 952 | struct tile_net_info *info = &per_cpu(per_cpu_info, cpu); | ||
| 953 | free_pages((unsigned long)(info->comps_for_echannel[0]), | ||
| 954 | get_order(COMPS_SIZE)); | ||
| 955 | info->comps_for_echannel[0] = NULL; | ||
| 956 | free_pages((unsigned long)(info->iqueue.idescs), | ||
| 957 | get_order(NOTIF_RING_SIZE)); | ||
| 958 | info->iqueue.idescs = NULL; | ||
| 959 | } | ||
| 960 | |||
| 961 | if (small_buffer_stack_va) | ||
| 962 | free_pages_exact(small_buffer_stack_va, buffer_stack_size); | ||
| 963 | if (large_buffer_stack_va) | ||
| 964 | free_pages_exact(large_buffer_stack_va, buffer_stack_size); | ||
| 965 | |||
| 966 | small_buffer_stack_va = NULL; | ||
| 967 | large_buffer_stack_va = NULL; | ||
| 968 | large_buffer_stack = -1; | ||
| 969 | small_buffer_stack = -1; | ||
| 970 | first_bucket = -1; | ||
| 971 | } | ||
| 972 | |||
| 973 | /* The first time any tilegx network device is opened, we initialize | ||
| 974 | * the global mpipe state. If this step fails, we fail to open the | ||
| 975 | * device, but if it succeeds, we never need to do it again, and since | ||
| 976 | * tile_net can't be unloaded, we never undo it. | ||
| 977 | * | ||
| 978 | * Note that some resources in this path (buffer stack indices, | ||
| 979 | * bindings from init_buffer_stack, etc.) are hypervisor resources | ||
| 980 | * that are freed implicitly by gxio_mpipe_destroy(). | ||
| 981 | */ | ||
| 982 | static int tile_net_init_mpipe(struct net_device *dev) | ||
| 983 | { | ||
| 984 | int i, num_buffers, rc; | ||
| 985 | int cpu; | ||
| 986 | int first_ring, ring; | ||
| 987 | int network_cpus_count = cpus_weight(network_cpus_map); | ||
| 988 | |||
| 989 | if (!hash_default) { | ||
| 990 | netdev_err(dev, "Networking requires hash_default!\n"); | ||
| 991 | return -EIO; | ||
| 992 | } | ||
| 993 | |||
| 994 | rc = gxio_mpipe_init(&context, 0); | ||
| 995 | if (rc != 0) { | ||
| 996 | netdev_err(dev, "gxio_mpipe_init failed: %d\n", rc); | ||
| 997 | return -EIO; | ||
| 998 | } | ||
| 999 | |||
| 1000 | /* Set up the buffer stacks. */ | ||
| 1001 | num_buffers = | ||
| 1002 | network_cpus_count * (IQUEUE_ENTRIES + TILE_NET_BATCH); | ||
| 1003 | rc = init_buffer_stacks(dev, num_buffers); | ||
| 1004 | if (rc != 0) | ||
| 1005 | goto fail; | ||
| 1006 | |||
| 1007 | /* Provide initial buffers. */ | ||
| 1008 | rc = -ENOMEM; | ||
| 1009 | for (i = 0; i < num_buffers; i++) { | ||
| 1010 | if (!tile_net_provide_buffer(true)) { | ||
| 1011 | netdev_err(dev, "Cannot allocate initial sk_bufs!\n"); | ||
| 1012 | goto fail; | ||
| 1013 | } | ||
| 1014 | } | ||
| 1015 | for (i = 0; i < num_buffers; i++) { | ||
| 1016 | if (!tile_net_provide_buffer(false)) { | ||
| 1017 | netdev_err(dev, "Cannot allocate initial sk_bufs!\n"); | ||
| 1018 | goto fail; | ||
| 1019 | } | ||
| 1020 | } | ||
| 1021 | |||
| 1022 | /* Allocate one NotifRing for each network cpu. */ | ||
| 1023 | rc = gxio_mpipe_alloc_notif_rings(&context, network_cpus_count, 0, 0); | ||
| 1024 | if (rc < 0) { | ||
| 1025 | netdev_err(dev, "gxio_mpipe_alloc_notif_rings failed %d\n", | ||
| 1026 | rc); | ||
| 1027 | goto fail; | ||
| 1028 | } | ||
| 1029 | |||
| 1030 | /* Init NotifRings per-cpu. */ | ||
| 1031 | first_ring = rc; | ||
| 1032 | ring = first_ring; | ||
| 1033 | for_each_online_cpu(cpu) { | ||
| 1034 | rc = alloc_percpu_mpipe_resources(dev, cpu, ring); | ||
| 1035 | if (rc < 0) | ||
| 1036 | goto fail; | ||
| 1037 | ring = rc; | ||
| 1038 | } | ||
| 1039 | |||
| 1040 | /* Initialize NotifGroup and buckets. */ | ||
| 1041 | rc = init_notif_group_and_buckets(dev, first_ring, network_cpus_count); | ||
| 1042 | if (rc != 0) | ||
| 1043 | goto fail; | ||
| 1044 | |||
| 1045 | /* Create and enable interrupts. */ | ||
| 1046 | rc = tile_net_setup_interrupts(dev); | ||
| 1047 | if (rc != 0) | ||
| 1048 | goto fail; | ||
| 1049 | |||
| 1050 | return 0; | ||
| 1051 | |||
| 1052 | fail: | ||
| 1053 | tile_net_init_mpipe_fail(); | ||
| 1054 | return rc; | ||
| 1055 | } | ||
| 1056 | |||
| 1057 | /* Create persistent egress info for a given egress channel. | ||
| 1058 | * Note that this may be shared between, say, "gbe0" and "xgbe0". | ||
| 1059 | * ISSUE: Defer header allocation until TSO is actually needed? | ||
| 1060 | */ | ||
| 1061 | static int tile_net_init_egress(struct net_device *dev, int echannel) | ||
| 1062 | { | ||
| 1063 | struct page *headers_page, *edescs_page, *equeue_page; | ||
| 1064 | gxio_mpipe_edesc_t *edescs; | ||
| 1065 | gxio_mpipe_equeue_t *equeue; | ||
| 1066 | unsigned char *headers; | ||
| 1067 | int headers_order, edescs_order, equeue_order; | ||
| 1068 | size_t edescs_size; | ||
| 1069 | int edma; | ||
| 1070 | int rc = -ENOMEM; | ||
| 1071 | |||
| 1072 | /* Only initialize once. */ | ||
| 1073 | if (egress_for_echannel[echannel].equeue != NULL) | ||
| 1074 | return 0; | ||
| 1075 | |||
| 1076 | /* Allocate memory for the "headers". */ | ||
| 1077 | headers_order = get_order(EQUEUE_ENTRIES * HEADER_BYTES); | ||
| 1078 | headers_page = alloc_pages(GFP_KERNEL, headers_order); | ||
| 1079 | if (headers_page == NULL) { | ||
| 1080 | netdev_warn(dev, | ||
| 1081 | "Could not alloc %zd bytes for TSO headers.\n", | ||
| 1082 | PAGE_SIZE << headers_order); | ||
| 1083 | goto fail; | ||
| 1084 | } | ||
| 1085 | headers = pfn_to_kaddr(page_to_pfn(headers_page)); | ||
| 1086 | |||
| 1087 | /* Allocate memory for the "edescs". */ | ||
| 1088 | edescs_size = EQUEUE_ENTRIES * sizeof(*edescs); | ||
| 1089 | edescs_order = get_order(edescs_size); | ||
| 1090 | edescs_page = alloc_pages(GFP_KERNEL, edescs_order); | ||
| 1091 | if (edescs_page == NULL) { | ||
| 1092 | netdev_warn(dev, | ||
| 1093 | "Could not alloc %zd bytes for eDMA ring.\n", | ||
| 1094 | edescs_size); | ||
| 1095 | goto fail_headers; | ||
| 1096 | } | ||
| 1097 | edescs = pfn_to_kaddr(page_to_pfn(edescs_page)); | ||
| 1098 | |||
| 1099 | /* Allocate memory for the "equeue". */ | ||
| 1100 | equeue_order = get_order(sizeof(*equeue)); | ||
| 1101 | equeue_page = alloc_pages(GFP_KERNEL, equeue_order); | ||
| 1102 | if (equeue_page == NULL) { | ||
| 1103 | netdev_warn(dev, | ||
| 1104 | "Could not alloc %zd bytes for equeue info.\n", | ||
| 1105 | PAGE_SIZE << equeue_order); | ||
| 1106 | goto fail_edescs; | ||
| 1107 | } | ||
| 1108 | equeue = pfn_to_kaddr(page_to_pfn(equeue_page)); | ||
| 1109 | |||
| 1110 | /* Allocate an edma ring. Note that in practice this can't | ||
| 1111 | * fail, which is good, because we will leak an edma ring if so. | ||
| 1112 | */ | ||
| 1113 | rc = gxio_mpipe_alloc_edma_rings(&context, 1, 0, 0); | ||
| 1114 | if (rc < 0) { | ||
| 1115 | netdev_warn(dev, "gxio_mpipe_alloc_edma_rings failed: %d\n", | ||
| 1116 | rc); | ||
| 1117 | goto fail_equeue; | ||
| 1118 | } | ||
| 1119 | edma = rc; | ||
| 1120 | |||
| 1121 | /* Initialize the equeue. */ | ||
| 1122 | rc = gxio_mpipe_equeue_init(equeue, &context, edma, echannel, | ||
| 1123 | edescs, edescs_size, 0); | ||
| 1124 | if (rc != 0) { | ||
| 1125 | netdev_err(dev, "gxio_mpipe_equeue_init failed: %d\n", rc); | ||
| 1126 | goto fail_equeue; | ||
| 1127 | } | ||
| 1128 | |||
| 1129 | /* Done. */ | ||
| 1130 | egress_for_echannel[echannel].equeue = equeue; | ||
| 1131 | egress_for_echannel[echannel].headers = headers; | ||
| 1132 | return 0; | ||
| 1133 | |||
| 1134 | fail_equeue: | ||
| 1135 | __free_pages(equeue_page, equeue_order); | ||
| 1136 | |||
| 1137 | fail_edescs: | ||
| 1138 | __free_pages(edescs_page, edescs_order); | ||
| 1139 | |||
| 1140 | fail_headers: | ||
| 1141 | __free_pages(headers_page, headers_order); | ||
| 1142 | |||
| 1143 | fail: | ||
| 1144 | return rc; | ||
| 1145 | } | ||
| 1146 | |||
| 1147 | /* Return channel number for a newly-opened link. */ | ||
| 1148 | static int tile_net_link_open(struct net_device *dev, gxio_mpipe_link_t *link, | ||
| 1149 | const char *link_name) | ||
| 1150 | { | ||
| 1151 | int rc = gxio_mpipe_link_open(link, &context, link_name, 0); | ||
| 1152 | if (rc < 0) { | ||
| 1153 | netdev_err(dev, "Failed to open '%s'\n", link_name); | ||
| 1154 | return rc; | ||
| 1155 | } | ||
| 1156 | rc = gxio_mpipe_link_channel(link); | ||
| 1157 | if (rc < 0 || rc >= TILE_NET_CHANNELS) { | ||
| 1158 | netdev_err(dev, "gxio_mpipe_link_channel bad value: %d\n", rc); | ||
| 1159 | gxio_mpipe_link_close(link); | ||
| 1160 | return -EINVAL; | ||
| 1161 | } | ||
| 1162 | return rc; | ||
| 1163 | } | ||
| 1164 | |||
| 1165 | /* Help the kernel activate the given network interface. */ | ||
| 1166 | static int tile_net_open(struct net_device *dev) | ||
| 1167 | { | ||
| 1168 | struct tile_net_priv *priv = netdev_priv(dev); | ||
| 1169 | int cpu, rc; | ||
| 1170 | |||
| 1171 | mutex_lock(&tile_net_devs_for_channel_mutex); | ||
| 1172 | |||
| 1173 | /* Do one-time initialization the first time any device is opened. */ | ||
| 1174 | if (ingress_irq < 0) { | ||
| 1175 | rc = tile_net_init_mpipe(dev); | ||
| 1176 | if (rc != 0) | ||
| 1177 | goto fail; | ||
| 1178 | } | ||
| 1179 | |||
| 1180 | /* Determine if this is the "loopify" device. */ | ||
| 1181 | if (unlikely((loopify_link_name != NULL) && | ||
| 1182 | !strcmp(dev->name, loopify_link_name))) { | ||
| 1183 | rc = tile_net_link_open(dev, &priv->link, "loop0"); | ||
| 1184 | if (rc < 0) | ||
| 1185 | goto fail; | ||
| 1186 | priv->channel = rc; | ||
| 1187 | rc = tile_net_link_open(dev, &priv->loopify_link, "loop1"); | ||
| 1188 | if (rc < 0) | ||
| 1189 | goto fail; | ||
| 1190 | priv->loopify_channel = rc; | ||
| 1191 | priv->echannel = rc; | ||
| 1192 | } else { | ||
| 1193 | rc = tile_net_link_open(dev, &priv->link, dev->name); | ||
| 1194 | if (rc < 0) | ||
| 1195 | goto fail; | ||
| 1196 | priv->channel = rc; | ||
| 1197 | priv->echannel = rc; | ||
| 1198 | } | ||
| 1199 | |||
| 1200 | /* Initialize egress info (if needed). Once ever, per echannel. */ | ||
| 1201 | rc = tile_net_init_egress(dev, priv->echannel); | ||
| 1202 | if (rc != 0) | ||
| 1203 | goto fail; | ||
| 1204 | |||
| 1205 | tile_net_devs_for_channel[priv->channel] = dev; | ||
| 1206 | |||
| 1207 | rc = tile_net_update(dev); | ||
| 1208 | if (rc != 0) | ||
| 1209 | goto fail; | ||
| 1210 | |||
| 1211 | mutex_unlock(&tile_net_devs_for_channel_mutex); | ||
| 1212 | |||
| 1213 | /* Initialize the transmit wake timer for this device for each cpu. */ | ||
| 1214 | for_each_online_cpu(cpu) { | ||
| 1215 | struct tile_net_info *info = &per_cpu(per_cpu_info, cpu); | ||
| 1216 | struct tile_net_tx_wake *tx_wake = | ||
| 1217 | &info->tx_wake[priv->echannel]; | ||
| 1218 | |||
| 1219 | hrtimer_init(&tx_wake->timer, CLOCK_MONOTONIC, | ||
| 1220 | HRTIMER_MODE_REL); | ||
| 1221 | tx_wake->timer.function = tile_net_handle_tx_wake_timer; | ||
| 1222 | tx_wake->dev = dev; | ||
| 1223 | } | ||
| 1224 | |||
| 1225 | for_each_online_cpu(cpu) | ||
| 1226 | netif_start_subqueue(dev, cpu); | ||
| 1227 | netif_carrier_on(dev); | ||
| 1228 | return 0; | ||
| 1229 | |||
| 1230 | fail: | ||
| 1231 | if (priv->loopify_channel >= 0) { | ||
| 1232 | if (gxio_mpipe_link_close(&priv->loopify_link) != 0) | ||
| 1233 | netdev_warn(dev, "Failed to close loopify link!\n"); | ||
| 1234 | priv->loopify_channel = -1; | ||
| 1235 | } | ||
| 1236 | if (priv->channel >= 0) { | ||
| 1237 | if (gxio_mpipe_link_close(&priv->link) != 0) | ||
| 1238 | netdev_warn(dev, "Failed to close link!\n"); | ||
| 1239 | priv->channel = -1; | ||
| 1240 | } | ||
| 1241 | priv->echannel = -1; | ||
| 1242 | tile_net_devs_for_channel[priv->channel] = NULL; | ||
| 1243 | mutex_unlock(&tile_net_devs_for_channel_mutex); | ||
| 1244 | |||
| 1245 | /* Don't return raw gxio error codes to generic Linux. */ | ||
| 1246 | return (rc > -512) ? rc : -EIO; | ||
| 1247 | } | ||
| 1248 | |||
| 1249 | /* Help the kernel deactivate the given network interface. */ | ||
| 1250 | static int tile_net_stop(struct net_device *dev) | ||
| 1251 | { | ||
| 1252 | struct tile_net_priv *priv = netdev_priv(dev); | ||
| 1253 | int cpu; | ||
| 1254 | |||
| 1255 | for_each_online_cpu(cpu) { | ||
| 1256 | struct tile_net_info *info = &per_cpu(per_cpu_info, cpu); | ||
| 1257 | struct tile_net_tx_wake *tx_wake = | ||
| 1258 | &info->tx_wake[priv->echannel]; | ||
| 1259 | |||
| 1260 | hrtimer_cancel(&tx_wake->timer); | ||
| 1261 | netif_stop_subqueue(dev, cpu); | ||
| 1262 | } | ||
| 1263 | |||
| 1264 | mutex_lock(&tile_net_devs_for_channel_mutex); | ||
| 1265 | tile_net_devs_for_channel[priv->channel] = NULL; | ||
| 1266 | (void)tile_net_update(dev); | ||
| 1267 | if (priv->loopify_channel >= 0) { | ||
| 1268 | if (gxio_mpipe_link_close(&priv->loopify_link) != 0) | ||
| 1269 | netdev_warn(dev, "Failed to close loopify link!\n"); | ||
| 1270 | priv->loopify_channel = -1; | ||
| 1271 | } | ||
| 1272 | if (priv->channel >= 0) { | ||
| 1273 | if (gxio_mpipe_link_close(&priv->link) != 0) | ||
| 1274 | netdev_warn(dev, "Failed to close link!\n"); | ||
| 1275 | priv->channel = -1; | ||
| 1276 | } | ||
| 1277 | priv->echannel = -1; | ||
| 1278 | mutex_unlock(&tile_net_devs_for_channel_mutex); | ||
| 1279 | |||
| 1280 | return 0; | ||
| 1281 | } | ||
| 1282 | |||
| 1283 | /* Determine the VA for a fragment. */ | ||
| 1284 | static inline void *tile_net_frag_buf(skb_frag_t *f) | ||
| 1285 | { | ||
| 1286 | unsigned long pfn = page_to_pfn(skb_frag_page(f)); | ||
| 1287 | return pfn_to_kaddr(pfn) + f->page_offset; | ||
| 1288 | } | ||
| 1289 | |||
| 1290 | /* Acquire a completion entry and an egress slot, or if we can't, | ||
| 1291 | * stop the queue and schedule the tx_wake timer. | ||
| 1292 | */ | ||
| 1293 | static s64 tile_net_equeue_try_reserve(struct net_device *dev, | ||
| 1294 | struct tile_net_comps *comps, | ||
| 1295 | gxio_mpipe_equeue_t *equeue, | ||
| 1296 | int num_edescs) | ||
| 1297 | { | ||
| 1298 | /* Try to acquire a completion entry. */ | ||
| 1299 | if (comps->comp_next - comps->comp_last < TILE_NET_MAX_COMPS - 1 || | ||
| 1300 | tile_net_free_comps(equeue, comps, 32, false) != 0) { | ||
| 1301 | |||
| 1302 | /* Try to acquire an egress slot. */ | ||
| 1303 | s64 slot = gxio_mpipe_equeue_try_reserve(equeue, num_edescs); | ||
| 1304 | if (slot >= 0) | ||
| 1305 | return slot; | ||
| 1306 | |||
| 1307 | /* Freeing some completions gives the equeue time to drain. */ | ||
| 1308 | tile_net_free_comps(equeue, comps, TILE_NET_MAX_COMPS, false); | ||
| 1309 | |||
| 1310 | slot = gxio_mpipe_equeue_try_reserve(equeue, num_edescs); | ||
| 1311 | if (slot >= 0) | ||
| 1312 | return slot; | ||
| 1313 | } | ||
| 1314 | |||
| 1315 | /* Still nothing; give up and stop the queue for a short while. */ | ||
| 1316 | netif_stop_subqueue(dev, smp_processor_id()); | ||
| 1317 | tile_net_schedule_tx_wake_timer(dev); | ||
| 1318 | return -1; | ||
| 1319 | } | ||
| 1320 | |||
| 1321 | /* Determine how many edesc's are needed for TSO. | ||
| 1322 | * | ||
| 1323 | * Sometimes, if "sendfile()" requires copying, we will be called with | ||
| 1324 | * "data" containing the header and payload, with "frags" being empty. | ||
| 1325 | * Sometimes, for example when using NFS over TCP, a single segment can | ||
| 1326 | * span 3 fragments. This requires special care. | ||
| 1327 | */ | ||
| 1328 | static int tso_count_edescs(struct sk_buff *skb) | ||
| 1329 | { | ||
| 1330 | struct skb_shared_info *sh = skb_shinfo(skb); | ||
| 1331 | unsigned int data_len = skb->data_len; | ||
| 1332 | unsigned int p_len = sh->gso_size; | ||
| 1333 | long f_id = -1; /* id of the current fragment */ | ||
| 1334 | long f_size = -1; /* size of the current fragment */ | ||
| 1335 | long f_used = -1; /* bytes used from the current fragment */ | ||
| 1336 | long n; /* size of the current piece of payload */ | ||
| 1337 | int num_edescs = 0; | ||
| 1338 | int segment; | ||
| 1339 | |||
| 1340 | for (segment = 0; segment < sh->gso_segs; segment++) { | ||
| 1341 | |||
| 1342 | unsigned int p_used = 0; | ||
| 1343 | |||
| 1344 | /* One edesc for header and for each piece of the payload. */ | ||
| 1345 | for (num_edescs++; p_used < p_len; num_edescs++) { | ||
| 1346 | |||
| 1347 | /* Advance as needed. */ | ||
| 1348 | while (f_used >= f_size) { | ||
| 1349 | f_id++; | ||
| 1350 | f_size = sh->frags[f_id].size; | ||
| 1351 | f_used = 0; | ||
| 1352 | } | ||
| 1353 | |||
| 1354 | /* Use bytes from the current fragment. */ | ||
| 1355 | n = p_len - p_used; | ||
| 1356 | if (n > f_size - f_used) | ||
| 1357 | n = f_size - f_used; | ||
| 1358 | f_used += n; | ||
| 1359 | p_used += n; | ||
| 1360 | } | ||
| 1361 | |||
| 1362 | /* The last segment may be less than gso_size. */ | ||
| 1363 | data_len -= p_len; | ||
| 1364 | if (data_len < p_len) | ||
| 1365 | p_len = data_len; | ||
| 1366 | } | ||
| 1367 | |||
| 1368 | return num_edescs; | ||
| 1369 | } | ||
| 1370 | |||
| 1371 | /* Prepare modified copies of the skbuff headers. | ||
| 1372 | * FIXME: add support for IPv6. | ||
| 1373 | */ | ||
| 1374 | static void tso_headers_prepare(struct sk_buff *skb, unsigned char *headers, | ||
| 1375 | s64 slot) | ||
| 1376 | { | ||
| 1377 | struct skb_shared_info *sh = skb_shinfo(skb); | ||
| 1378 | struct iphdr *ih; | ||
| 1379 | struct tcphdr *th; | ||
| 1380 | unsigned int data_len = skb->data_len; | ||
| 1381 | unsigned char *data = skb->data; | ||
| 1382 | unsigned int ih_off, th_off, sh_len, p_len; | ||
| 1383 | unsigned int isum_seed, tsum_seed, id, seq; | ||
| 1384 | long f_id = -1; /* id of the current fragment */ | ||
| 1385 | long f_size = -1; /* size of the current fragment */ | ||
| 1386 | long f_used = -1; /* bytes used from the current fragment */ | ||
| 1387 | long n; /* size of the current piece of payload */ | ||
| 1388 | int segment; | ||
| 1389 | |||
| 1390 | /* Locate original headers and compute various lengths. */ | ||
| 1391 | ih = ip_hdr(skb); | ||
| 1392 | th = tcp_hdr(skb); | ||
| 1393 | ih_off = skb_network_offset(skb); | ||
| 1394 | th_off = skb_transport_offset(skb); | ||
| 1395 | sh_len = th_off + tcp_hdrlen(skb); | ||
| 1396 | p_len = sh->gso_size; | ||
| 1397 | |||
| 1398 | /* Set up seed values for IP and TCP csum and initialize id and seq. */ | ||
| 1399 | isum_seed = ((0xFFFF - ih->check) + | ||
| 1400 | (0xFFFF - ih->tot_len) + | ||
| 1401 | (0xFFFF - ih->id)); | ||
| 1402 | tsum_seed = th->check + (0xFFFF ^ htons(skb->len)); | ||
| 1403 | id = ntohs(ih->id); | ||
| 1404 | seq = ntohl(th->seq); | ||
| 1405 | |||
| 1406 | /* Prepare all the headers. */ | ||
| 1407 | for (segment = 0; segment < sh->gso_segs; segment++) { | ||
| 1408 | unsigned char *buf; | ||
| 1409 | unsigned int p_used = 0; | ||
| 1410 | |||
| 1411 | /* Copy to the header memory for this segment. */ | ||
| 1412 | buf = headers + (slot % EQUEUE_ENTRIES) * HEADER_BYTES + | ||
| 1413 | NET_IP_ALIGN; | ||
| 1414 | memcpy(buf, data, sh_len); | ||
| 1415 | |||
| 1416 | /* Update copied ip header. */ | ||
| 1417 | ih = (struct iphdr *)(buf + ih_off); | ||
| 1418 | ih->tot_len = htons(sh_len + p_len - ih_off); | ||
| 1419 | ih->id = htons(id); | ||
| 1420 | ih->check = csum_long(isum_seed + ih->tot_len + | ||
| 1421 | ih->id) ^ 0xffff; | ||
| 1422 | |||
| 1423 | /* Update copied tcp header. */ | ||
| 1424 | th = (struct tcphdr *)(buf + th_off); | ||
| 1425 | th->seq = htonl(seq); | ||
| 1426 | th->check = csum_long(tsum_seed + htons(sh_len + p_len)); | ||
| 1427 | if (segment != sh->gso_segs - 1) { | ||
| 1428 | th->fin = 0; | ||
| 1429 | th->psh = 0; | ||
| 1430 | } | ||
| 1431 | |||
| 1432 | /* Skip past the header. */ | ||
| 1433 | slot++; | ||
| 1434 | |||
| 1435 | /* Skip past the payload. */ | ||
| 1436 | while (p_used < p_len) { | ||
| 1437 | |||
| 1438 | /* Advance as needed. */ | ||
| 1439 | while (f_used >= f_size) { | ||
| 1440 | f_id++; | ||
| 1441 | f_size = sh->frags[f_id].size; | ||
| 1442 | f_used = 0; | ||
| 1443 | } | ||
| 1444 | |||
| 1445 | /* Use bytes from the current fragment. */ | ||
| 1446 | n = p_len - p_used; | ||
| 1447 | if (n > f_size - f_used) | ||
| 1448 | n = f_size - f_used; | ||
| 1449 | f_used += n; | ||
| 1450 | p_used += n; | ||
| 1451 | |||
| 1452 | slot++; | ||
| 1453 | } | ||
| 1454 | |||
| 1455 | id++; | ||
| 1456 | seq += p_len; | ||
| 1457 | |||
| 1458 | /* The last segment may be less than gso_size. */ | ||
| 1459 | data_len -= p_len; | ||
| 1460 | if (data_len < p_len) | ||
| 1461 | p_len = data_len; | ||
| 1462 | } | ||
| 1463 | |||
| 1464 | /* Flush the headers so they are ready for hardware DMA. */ | ||
| 1465 | wmb(); | ||
| 1466 | } | ||
| 1467 | |||
| 1468 | /* Pass all the data to mpipe for egress. */ | ||
| 1469 | static void tso_egress(struct net_device *dev, gxio_mpipe_equeue_t *equeue, | ||
| 1470 | struct sk_buff *skb, unsigned char *headers, s64 slot) | ||
| 1471 | { | ||
| 1472 | struct tile_net_priv *priv = netdev_priv(dev); | ||
| 1473 | struct skb_shared_info *sh = skb_shinfo(skb); | ||
| 1474 | unsigned int data_len = skb->data_len; | ||
| 1475 | unsigned int p_len = sh->gso_size; | ||
| 1476 | gxio_mpipe_edesc_t edesc_head = { { 0 } }; | ||
| 1477 | gxio_mpipe_edesc_t edesc_body = { { 0 } }; | ||
| 1478 | long f_id = -1; /* id of the current fragment */ | ||
| 1479 | long f_size = -1; /* size of the current fragment */ | ||
| 1480 | long f_used = -1; /* bytes used from the current fragment */ | ||
| 1481 | long n; /* size of the current piece of payload */ | ||
| 1482 | unsigned long tx_packets = 0, tx_bytes = 0; | ||
| 1483 | unsigned int csum_start, sh_len; | ||
| 1484 | int segment; | ||
| 1485 | |||
| 1486 | /* Prepare to egress the headers: set up header edesc. */ | ||
| 1487 | csum_start = skb_checksum_start_offset(skb); | ||
| 1488 | sh_len = skb_transport_offset(skb) + tcp_hdrlen(skb); | ||
| 1489 | edesc_head.csum = 1; | ||
| 1490 | edesc_head.csum_start = csum_start; | ||
| 1491 | edesc_head.csum_dest = csum_start + skb->csum_offset; | ||
| 1492 | edesc_head.xfer_size = sh_len; | ||
| 1493 | |||
| 1494 | /* This is only used to specify the TLB. */ | ||
| 1495 | edesc_head.stack_idx = large_buffer_stack; | ||
| 1496 | edesc_body.stack_idx = large_buffer_stack; | ||
| 1497 | |||
| 1498 | /* Egress all the edescs. */ | ||
| 1499 | for (segment = 0; segment < sh->gso_segs; segment++) { | ||
| 1500 | void *va; | ||
| 1501 | unsigned char *buf; | ||
| 1502 | unsigned int p_used = 0; | ||
| 1503 | |||
| 1504 | /* Egress the header. */ | ||
| 1505 | buf = headers + (slot % EQUEUE_ENTRIES) * HEADER_BYTES + | ||
| 1506 | NET_IP_ALIGN; | ||
| 1507 | edesc_head.va = va_to_tile_io_addr(buf); | ||
| 1508 | gxio_mpipe_equeue_put_at(equeue, edesc_head, slot); | ||
| 1509 | slot++; | ||
| 1510 | |||
| 1511 | /* Egress the payload. */ | ||
| 1512 | while (p_used < p_len) { | ||
| 1513 | |||
| 1514 | /* Advance as needed. */ | ||
| 1515 | while (f_used >= f_size) { | ||
| 1516 | f_id++; | ||
| 1517 | f_size = sh->frags[f_id].size; | ||
| 1518 | f_used = 0; | ||
| 1519 | } | ||
| 1520 | |||
| 1521 | va = tile_net_frag_buf(&sh->frags[f_id]) + f_used; | ||
| 1522 | |||
| 1523 | /* Use bytes from the current fragment. */ | ||
| 1524 | n = p_len - p_used; | ||
| 1525 | if (n > f_size - f_used) | ||
| 1526 | n = f_size - f_used; | ||
| 1527 | f_used += n; | ||
| 1528 | p_used += n; | ||
| 1529 | |||
| 1530 | /* Egress a piece of the payload. */ | ||
| 1531 | edesc_body.va = va_to_tile_io_addr(va); | ||
| 1532 | edesc_body.xfer_size = n; | ||
| 1533 | edesc_body.bound = !(p_used < p_len); | ||
| 1534 | gxio_mpipe_equeue_put_at(equeue, edesc_body, slot); | ||
| 1535 | slot++; | ||
| 1536 | } | ||
| 1537 | |||
| 1538 | tx_packets++; | ||
| 1539 | tx_bytes += sh_len + p_len; | ||
| 1540 | |||
| 1541 | /* The last segment may be less than gso_size. */ | ||
| 1542 | data_len -= p_len; | ||
| 1543 | if (data_len < p_len) | ||
| 1544 | p_len = data_len; | ||
| 1545 | } | ||
| 1546 | |||
| 1547 | /* Update stats. */ | ||
| 1548 | tile_net_stats_add(tx_packets, &priv->stats.tx_packets); | ||
| 1549 | tile_net_stats_add(tx_bytes, &priv->stats.tx_bytes); | ||
| 1550 | } | ||
| 1551 | |||
| 1552 | /* Do "TSO" handling for egress. | ||
| 1553 | * | ||
| 1554 | * Normally drivers set NETIF_F_TSO only to support hardware TSO; | ||
| 1555 | * otherwise the stack uses scatter-gather to implement GSO in software. | ||
| 1556 | * On our testing, enabling GSO support (via NETIF_F_SG) drops network | ||
| 1557 | * performance down to around 7.5 Gbps on the 10G interfaces, although | ||
| 1558 | * also dropping cpu utilization way down, to under 8%. But | ||
| 1559 | * implementing "TSO" in the driver brings performance back up to line | ||
| 1560 | * rate, while dropping cpu usage even further, to less than 4%. In | ||
| 1561 | * practice, profiling of GSO shows that skb_segment() is what causes | ||
| 1562 | * the performance overheads; we benefit in the driver from using | ||
| 1563 | * preallocated memory to duplicate the TCP/IP headers. | ||
| 1564 | */ | ||
| 1565 | static int tile_net_tx_tso(struct sk_buff *skb, struct net_device *dev) | ||
| 1566 | { | ||
| 1567 | struct tile_net_info *info = &__get_cpu_var(per_cpu_info); | ||
| 1568 | struct tile_net_priv *priv = netdev_priv(dev); | ||
| 1569 | int channel = priv->echannel; | ||
| 1570 | struct tile_net_egress *egress = &egress_for_echannel[channel]; | ||
| 1571 | struct tile_net_comps *comps = info->comps_for_echannel[channel]; | ||
| 1572 | gxio_mpipe_equeue_t *equeue = egress->equeue; | ||
| 1573 | unsigned long irqflags; | ||
| 1574 | int num_edescs; | ||
| 1575 | s64 slot; | ||
| 1576 | |||
| 1577 | /* Determine how many mpipe edesc's are needed. */ | ||
| 1578 | num_edescs = tso_count_edescs(skb); | ||
| 1579 | |||
| 1580 | local_irq_save(irqflags); | ||
| 1581 | |||
| 1582 | /* Try to acquire a completion entry and an egress slot. */ | ||
| 1583 | slot = tile_net_equeue_try_reserve(dev, comps, equeue, num_edescs); | ||
| 1584 | if (slot < 0) { | ||
| 1585 | local_irq_restore(irqflags); | ||
| 1586 | return NETDEV_TX_BUSY; | ||
| 1587 | } | ||
| 1588 | |||
| 1589 | /* Set up copies of header data properly. */ | ||
| 1590 | tso_headers_prepare(skb, egress->headers, slot); | ||
| 1591 | |||
| 1592 | /* Actually pass the data to the network hardware. */ | ||
| 1593 | tso_egress(dev, equeue, skb, egress->headers, slot); | ||
| 1594 | |||
| 1595 | /* Add a completion record. */ | ||
| 1596 | add_comp(equeue, comps, slot + num_edescs - 1, skb); | ||
| 1597 | |||
| 1598 | local_irq_restore(irqflags); | ||
| 1599 | |||
| 1600 | /* Make sure the egress timer is scheduled. */ | ||
| 1601 | tile_net_schedule_egress_timer(); | ||
| 1602 | |||
| 1603 | return NETDEV_TX_OK; | ||
| 1604 | } | ||
| 1605 | |||
| 1606 | /* Analyze the body and frags for a transmit request. */ | ||
| 1607 | static unsigned int tile_net_tx_frags(struct frag *frags, | ||
| 1608 | struct sk_buff *skb, | ||
| 1609 | void *b_data, unsigned int b_len) | ||
| 1610 | { | ||
| 1611 | unsigned int i, n = 0; | ||
| 1612 | |||
| 1613 | struct skb_shared_info *sh = skb_shinfo(skb); | ||
| 1614 | |||
| 1615 | if (b_len != 0) { | ||
| 1616 | frags[n].buf = b_data; | ||
| 1617 | frags[n++].length = b_len; | ||
| 1618 | } | ||
| 1619 | |||
| 1620 | for (i = 0; i < sh->nr_frags; i++) { | ||
| 1621 | skb_frag_t *f = &sh->frags[i]; | ||
| 1622 | frags[n].buf = tile_net_frag_buf(f); | ||
| 1623 | frags[n++].length = skb_frag_size(f); | ||
| 1624 | } | ||
| 1625 | |||
| 1626 | return n; | ||
| 1627 | } | ||
| 1628 | |||
| 1629 | /* Help the kernel transmit a packet. */ | ||
| 1630 | static int tile_net_tx(struct sk_buff *skb, struct net_device *dev) | ||
| 1631 | { | ||
| 1632 | struct tile_net_info *info = &__get_cpu_var(per_cpu_info); | ||
| 1633 | struct tile_net_priv *priv = netdev_priv(dev); | ||
| 1634 | struct tile_net_egress *egress = &egress_for_echannel[priv->echannel]; | ||
| 1635 | gxio_mpipe_equeue_t *equeue = egress->equeue; | ||
| 1636 | struct tile_net_comps *comps = | ||
| 1637 | info->comps_for_echannel[priv->echannel]; | ||
| 1638 | unsigned int len = skb->len; | ||
| 1639 | unsigned char *data = skb->data; | ||
| 1640 | unsigned int num_edescs; | ||
| 1641 | struct frag frags[MAX_FRAGS]; | ||
| 1642 | gxio_mpipe_edesc_t edescs[MAX_FRAGS]; | ||
| 1643 | unsigned long irqflags; | ||
| 1644 | gxio_mpipe_edesc_t edesc = { { 0 } }; | ||
| 1645 | unsigned int i; | ||
| 1646 | s64 slot; | ||
| 1647 | |||
| 1648 | if (skb_is_gso(skb)) | ||
| 1649 | return tile_net_tx_tso(skb, dev); | ||
| 1650 | |||
| 1651 | num_edescs = tile_net_tx_frags(frags, skb, data, skb_headlen(skb)); | ||
| 1652 | |||
| 1653 | /* This is only used to specify the TLB. */ | ||
| 1654 | edesc.stack_idx = large_buffer_stack; | ||
| 1655 | |||
| 1656 | /* Prepare the edescs. */ | ||
| 1657 | for (i = 0; i < num_edescs; i++) { | ||
| 1658 | edesc.xfer_size = frags[i].length; | ||
| 1659 | edesc.va = va_to_tile_io_addr(frags[i].buf); | ||
| 1660 | edescs[i] = edesc; | ||
| 1661 | } | ||
| 1662 | |||
| 1663 | /* Mark the final edesc. */ | ||
| 1664 | edescs[num_edescs - 1].bound = 1; | ||
| 1665 | |||
| 1666 | /* Add checksum info to the initial edesc, if needed. */ | ||
| 1667 | if (skb->ip_summed == CHECKSUM_PARTIAL) { | ||
| 1668 | unsigned int csum_start = skb_checksum_start_offset(skb); | ||
| 1669 | edescs[0].csum = 1; | ||
| 1670 | edescs[0].csum_start = csum_start; | ||
| 1671 | edescs[0].csum_dest = csum_start + skb->csum_offset; | ||
| 1672 | } | ||
| 1673 | |||
| 1674 | local_irq_save(irqflags); | ||
| 1675 | |||
| 1676 | /* Try to acquire a completion entry and an egress slot. */ | ||
| 1677 | slot = tile_net_equeue_try_reserve(dev, comps, equeue, num_edescs); | ||
| 1678 | if (slot < 0) { | ||
| 1679 | local_irq_restore(irqflags); | ||
| 1680 | return NETDEV_TX_BUSY; | ||
| 1681 | } | ||
| 1682 | |||
| 1683 | for (i = 0; i < num_edescs; i++) | ||
| 1684 | gxio_mpipe_equeue_put_at(equeue, edescs[i], slot++); | ||
| 1685 | |||
| 1686 | /* Add a completion record. */ | ||
| 1687 | add_comp(equeue, comps, slot - 1, skb); | ||
| 1688 | |||
| 1689 | /* NOTE: Use ETH_ZLEN for short packets (e.g. 42 < 60). */ | ||
| 1690 | tile_net_stats_add(1, &priv->stats.tx_packets); | ||
| 1691 | tile_net_stats_add(max_t(unsigned int, len, ETH_ZLEN), | ||
| 1692 | &priv->stats.tx_bytes); | ||
| 1693 | |||
| 1694 | local_irq_restore(irqflags); | ||
| 1695 | |||
| 1696 | /* Make sure the egress timer is scheduled. */ | ||
| 1697 | tile_net_schedule_egress_timer(); | ||
| 1698 | |||
| 1699 | return NETDEV_TX_OK; | ||
| 1700 | } | ||
| 1701 | |||
| 1702 | /* Return subqueue id on this core (one per core). */ | ||
| 1703 | static u16 tile_net_select_queue(struct net_device *dev, struct sk_buff *skb) | ||
| 1704 | { | ||
| 1705 | return smp_processor_id(); | ||
| 1706 | } | ||
| 1707 | |||
| 1708 | /* Deal with a transmit timeout. */ | ||
| 1709 | static void tile_net_tx_timeout(struct net_device *dev) | ||
| 1710 | { | ||
| 1711 | int cpu; | ||
| 1712 | |||
| 1713 | for_each_online_cpu(cpu) | ||
| 1714 | netif_wake_subqueue(dev, cpu); | ||
| 1715 | } | ||
| 1716 | |||
| 1717 | /* Ioctl commands. */ | ||
| 1718 | static int tile_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) | ||
| 1719 | { | ||
| 1720 | return -EOPNOTSUPP; | ||
| 1721 | } | ||
| 1722 | |||
| 1723 | /* Get system network statistics for device. */ | ||
| 1724 | static struct net_device_stats *tile_net_get_stats(struct net_device *dev) | ||
| 1725 | { | ||
| 1726 | struct tile_net_priv *priv = netdev_priv(dev); | ||
| 1727 | return &priv->stats; | ||
| 1728 | } | ||
| 1729 | |||
| 1730 | /* Change the MTU. */ | ||
| 1731 | static int tile_net_change_mtu(struct net_device *dev, int new_mtu) | ||
| 1732 | { | ||
| 1733 | if ((new_mtu < 68) || (new_mtu > 1500)) | ||
| 1734 | return -EINVAL; | ||
| 1735 | dev->mtu = new_mtu; | ||
| 1736 | return 0; | ||
| 1737 | } | ||
| 1738 | |||
| 1739 | /* Change the Ethernet address of the NIC. | ||
| 1740 | * | ||
| 1741 | * The hypervisor driver does not support changing MAC address. However, | ||
| 1742 | * the hardware does not do anything with the MAC address, so the address | ||
| 1743 | * which gets used on outgoing packets, and which is accepted on incoming | ||
| 1744 | * packets, is completely up to us. | ||
| 1745 | * | ||
| 1746 | * Returns 0 on success, negative on failure. | ||
| 1747 | */ | ||
| 1748 | static int tile_net_set_mac_address(struct net_device *dev, void *p) | ||
| 1749 | { | ||
| 1750 | struct sockaddr *addr = p; | ||
| 1751 | |||
| 1752 | if (!is_valid_ether_addr(addr->sa_data)) | ||
| 1753 | return -EINVAL; | ||
| 1754 | memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); | ||
| 1755 | return 0; | ||
| 1756 | } | ||
| 1757 | |||
| 1758 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
| 1759 | /* Polling 'interrupt' - used by things like netconsole to send skbs | ||
| 1760 | * without having to re-enable interrupts. It's not called while | ||
| 1761 | * the interrupt routine is executing. | ||
| 1762 | */ | ||
| 1763 | static void tile_net_netpoll(struct net_device *dev) | ||
| 1764 | { | ||
| 1765 | disable_percpu_irq(ingress_irq); | ||
| 1766 | tile_net_handle_ingress_irq(ingress_irq, NULL); | ||
| 1767 | enable_percpu_irq(ingress_irq, 0); | ||
| 1768 | } | ||
| 1769 | #endif | ||
| 1770 | |||
| 1771 | static const struct net_device_ops tile_net_ops = { | ||
| 1772 | .ndo_open = tile_net_open, | ||
| 1773 | .ndo_stop = tile_net_stop, | ||
| 1774 | .ndo_start_xmit = tile_net_tx, | ||
| 1775 | .ndo_select_queue = tile_net_select_queue, | ||
| 1776 | .ndo_do_ioctl = tile_net_ioctl, | ||
| 1777 | .ndo_get_stats = tile_net_get_stats, | ||
| 1778 | .ndo_change_mtu = tile_net_change_mtu, | ||
| 1779 | .ndo_tx_timeout = tile_net_tx_timeout, | ||
| 1780 | .ndo_set_mac_address = tile_net_set_mac_address, | ||
| 1781 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
| 1782 | .ndo_poll_controller = tile_net_netpoll, | ||
| 1783 | #endif | ||
| 1784 | }; | ||
| 1785 | |||
| 1786 | /* The setup function. | ||
| 1787 | * | ||
| 1788 | * This uses ether_setup() to assign various fields in dev, including | ||
| 1789 | * setting IFF_BROADCAST and IFF_MULTICAST, then sets some extra fields. | ||
| 1790 | */ | ||
| 1791 | static void tile_net_setup(struct net_device *dev) | ||
| 1792 | { | ||
| 1793 | ether_setup(dev); | ||
| 1794 | dev->netdev_ops = &tile_net_ops; | ||
| 1795 | dev->watchdog_timeo = TILE_NET_TIMEOUT; | ||
| 1796 | dev->features |= NETIF_F_LLTX; | ||
| 1797 | dev->features |= NETIF_F_HW_CSUM; | ||
| 1798 | dev->features |= NETIF_F_SG; | ||
| 1799 | dev->features |= NETIF_F_TSO; | ||
| 1800 | dev->mtu = 1500; | ||
| 1801 | } | ||
| 1802 | |||
| 1803 | /* Allocate the device structure, register the device, and obtain the | ||
| 1804 | * MAC address from the hypervisor. | ||
| 1805 | */ | ||
| 1806 | static void tile_net_dev_init(const char *name, const uint8_t *mac) | ||
| 1807 | { | ||
| 1808 | int ret; | ||
| 1809 | int i; | ||
| 1810 | int nz_addr = 0; | ||
| 1811 | struct net_device *dev; | ||
| 1812 | struct tile_net_priv *priv; | ||
| 1813 | |||
| 1814 | /* HACK: Ignore "loop" links. */ | ||
| 1815 | if (strncmp(name, "loop", 4) == 0) | ||
| 1816 | return; | ||
| 1817 | |||
| 1818 | /* Allocate the device structure. Normally, "name" is a | ||
| 1819 | * template, instantiated by register_netdev(), but not for us. | ||
| 1820 | */ | ||
| 1821 | dev = alloc_netdev_mqs(sizeof(*priv), name, tile_net_setup, | ||
| 1822 | NR_CPUS, 1); | ||
| 1823 | if (!dev) { | ||
| 1824 | pr_err("alloc_netdev_mqs(%s) failed\n", name); | ||
| 1825 | return; | ||
| 1826 | } | ||
| 1827 | |||
| 1828 | /* Initialize "priv". */ | ||
| 1829 | priv = netdev_priv(dev); | ||
| 1830 | memset(priv, 0, sizeof(*priv)); | ||
| 1831 | priv->dev = dev; | ||
| 1832 | priv->channel = -1; | ||
| 1833 | priv->loopify_channel = -1; | ||
| 1834 | priv->echannel = -1; | ||
| 1835 | |||
| 1836 | /* Get the MAC address and set it in the device struct; this must | ||
| 1837 | * be done before the device is opened. If the MAC is all zeroes, | ||
| 1838 | * we use a random address, since we're probably on the simulator. | ||
| 1839 | */ | ||
| 1840 | for (i = 0; i < 6; i++) | ||
| 1841 | nz_addr |= mac[i]; | ||
| 1842 | |||
| 1843 | if (nz_addr) { | ||
| 1844 | memcpy(dev->dev_addr, mac, 6); | ||
| 1845 | dev->addr_len = 6; | ||
| 1846 | } else { | ||
| 1847 | random_ether_addr(dev->dev_addr); | ||
| 1848 | } | ||
| 1849 | |||
| 1850 | /* Register the network device. */ | ||
| 1851 | ret = register_netdev(dev); | ||
| 1852 | if (ret) { | ||
| 1853 | netdev_err(dev, "register_netdev failed %d\n", ret); | ||
| 1854 | free_netdev(dev); | ||
| 1855 | return; | ||
| 1856 | } | ||
| 1857 | } | ||
| 1858 | |||
| 1859 | /* Per-cpu module initialization. */ | ||
| 1860 | static void tile_net_init_module_percpu(void *unused) | ||
| 1861 | { | ||
| 1862 | struct tile_net_info *info = &__get_cpu_var(per_cpu_info); | ||
| 1863 | int my_cpu = smp_processor_id(); | ||
| 1864 | |||
| 1865 | info->has_iqueue = false; | ||
| 1866 | |||
| 1867 | info->my_cpu = my_cpu; | ||
| 1868 | |||
| 1869 | /* Initialize the egress timer. */ | ||
| 1870 | hrtimer_init(&info->egress_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); | ||
| 1871 | info->egress_timer.function = tile_net_handle_egress_timer; | ||
| 1872 | } | ||
| 1873 | |||
| 1874 | /* Module initialization. */ | ||
| 1875 | static int __init tile_net_init_module(void) | ||
| 1876 | { | ||
| 1877 | int i; | ||
| 1878 | char name[GXIO_MPIPE_LINK_NAME_LEN]; | ||
| 1879 | uint8_t mac[6]; | ||
| 1880 | |||
| 1881 | pr_info("Tilera Network Driver\n"); | ||
| 1882 | |||
| 1883 | mutex_init(&tile_net_devs_for_channel_mutex); | ||
| 1884 | |||
| 1885 | /* Initialize each CPU. */ | ||
| 1886 | on_each_cpu(tile_net_init_module_percpu, NULL, 1); | ||
| 1887 | |||
| 1888 | /* Find out what devices we have, and initialize them. */ | ||
| 1889 | for (i = 0; gxio_mpipe_link_enumerate_mac(i, name, mac) >= 0; i++) | ||
| 1890 | tile_net_dev_init(name, mac); | ||
| 1891 | |||
| 1892 | if (!network_cpus_init()) | ||
| 1893 | network_cpus_map = *cpu_online_mask; | ||
| 1894 | |||
| 1895 | return 0; | ||
| 1896 | } | ||
| 1897 | |||
| 1898 | module_init(tile_net_init_module); | ||
diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h index 4ffcd57b011b..2857ab078aac 100644 --- a/drivers/net/hyperv/hyperv_net.h +++ b/drivers/net/hyperv/hyperv_net.h | |||
| @@ -478,6 +478,7 @@ struct netvsc_device { | |||
| 478 | u32 nvsp_version; | 478 | u32 nvsp_version; |
| 479 | 479 | ||
| 480 | atomic_t num_outstanding_sends; | 480 | atomic_t num_outstanding_sends; |
| 481 | wait_queue_head_t wait_drain; | ||
| 481 | bool start_remove; | 482 | bool start_remove; |
| 482 | bool destroy; | 483 | bool destroy; |
| 483 | /* | 484 | /* |
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c index 8b919471472f..0c569831db5a 100644 --- a/drivers/net/hyperv/netvsc.c +++ b/drivers/net/hyperv/netvsc.c | |||
| @@ -42,6 +42,7 @@ static struct netvsc_device *alloc_net_device(struct hv_device *device) | |||
| 42 | if (!net_device) | 42 | if (!net_device) |
| 43 | return NULL; | 43 | return NULL; |
| 44 | 44 | ||
| 45 | init_waitqueue_head(&net_device->wait_drain); | ||
| 45 | net_device->start_remove = false; | 46 | net_device->start_remove = false; |
| 46 | net_device->destroy = false; | 47 | net_device->destroy = false; |
| 47 | net_device->dev = device; | 48 | net_device->dev = device; |
| @@ -387,12 +388,8 @@ int netvsc_device_remove(struct hv_device *device) | |||
| 387 | spin_unlock_irqrestore(&device->channel->inbound_lock, flags); | 388 | spin_unlock_irqrestore(&device->channel->inbound_lock, flags); |
| 388 | 389 | ||
| 389 | /* Wait for all send completions */ | 390 | /* Wait for all send completions */ |
| 390 | while (atomic_read(&net_device->num_outstanding_sends)) { | 391 | wait_event(net_device->wait_drain, |
| 391 | dev_info(&device->device, | 392 | atomic_read(&net_device->num_outstanding_sends) == 0); |
| 392 | "waiting for %d requests to complete...\n", | ||
| 393 | atomic_read(&net_device->num_outstanding_sends)); | ||
| 394 | udelay(100); | ||
| 395 | } | ||
| 396 | 393 | ||
| 397 | netvsc_disconnect_vsp(net_device); | 394 | netvsc_disconnect_vsp(net_device); |
| 398 | 395 | ||
| @@ -486,6 +483,9 @@ static void netvsc_send_completion(struct hv_device *device, | |||
| 486 | num_outstanding_sends = | 483 | num_outstanding_sends = |
| 487 | atomic_dec_return(&net_device->num_outstanding_sends); | 484 | atomic_dec_return(&net_device->num_outstanding_sends); |
| 488 | 485 | ||
| 486 | if (net_device->destroy && num_outstanding_sends == 0) | ||
| 487 | wake_up(&net_device->wait_drain); | ||
| 488 | |||
| 489 | if (netif_queue_stopped(ndev) && !net_device->start_remove && | 489 | if (netif_queue_stopped(ndev) && !net_device->start_remove && |
| 490 | (hv_ringbuf_avail_percent(&device->channel->outbound) | 490 | (hv_ringbuf_avail_percent(&device->channel->outbound) |
| 491 | > RING_AVAIL_PERCENT_HIWATER || | 491 | > RING_AVAIL_PERCENT_HIWATER || |
diff --git a/drivers/net/phy/icplus.c b/drivers/net/phy/icplus.c index 5ac46f5226f3..47f8e8939266 100644 --- a/drivers/net/phy/icplus.c +++ b/drivers/net/phy/icplus.c | |||
| @@ -41,6 +41,8 @@ MODULE_LICENSE("GPL"); | |||
| 41 | #define IP1001_APS_ON 11 /* IP1001 APS Mode bit */ | 41 | #define IP1001_APS_ON 11 /* IP1001 APS Mode bit */ |
| 42 | #define IP101A_G_APS_ON 2 /* IP101A/G APS Mode bit */ | 42 | #define IP101A_G_APS_ON 2 /* IP101A/G APS Mode bit */ |
| 43 | #define IP101A_G_IRQ_CONF_STATUS 0x11 /* Conf Info IRQ & Status Reg */ | 43 | #define IP101A_G_IRQ_CONF_STATUS 0x11 /* Conf Info IRQ & Status Reg */ |
| 44 | #define IP101A_G_IRQ_PIN_USED (1<<15) /* INTR pin used */ | ||
| 45 | #define IP101A_G_IRQ_DEFAULT IP101A_G_IRQ_PIN_USED | ||
| 44 | 46 | ||
| 45 | static int ip175c_config_init(struct phy_device *phydev) | 47 | static int ip175c_config_init(struct phy_device *phydev) |
| 46 | { | 48 | { |
| @@ -136,6 +138,11 @@ static int ip1001_config_init(struct phy_device *phydev) | |||
| 136 | if (c < 0) | 138 | if (c < 0) |
| 137 | return c; | 139 | return c; |
| 138 | 140 | ||
| 141 | /* INTR pin used: speed/link/duplex will cause an interrupt */ | ||
| 142 | c = phy_write(phydev, IP101A_G_IRQ_CONF_STATUS, IP101A_G_IRQ_DEFAULT); | ||
| 143 | if (c < 0) | ||
| 144 | return c; | ||
| 145 | |||
| 139 | if (phydev->interface == PHY_INTERFACE_MODE_RGMII) { | 146 | if (phydev->interface == PHY_INTERFACE_MODE_RGMII) { |
| 140 | /* Additional delay (2ns) used to adjust RX clock phase | 147 | /* Additional delay (2ns) used to adjust RX clock phase |
| 141 | * at RGMII interface */ | 148 | * at RGMII interface */ |
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c index 683ef1ce5519..5061608f408c 100644 --- a/drivers/net/phy/mdio_bus.c +++ b/drivers/net/phy/mdio_bus.c | |||
| @@ -96,7 +96,7 @@ static int of_mdio_bus_match(struct device *dev, void *mdio_bus_np) | |||
| 96 | } | 96 | } |
| 97 | /** | 97 | /** |
| 98 | * of_mdio_find_bus - Given an mii_bus node, find the mii_bus. | 98 | * of_mdio_find_bus - Given an mii_bus node, find the mii_bus. |
| 99 | * @mdio_np: Pointer to the mii_bus. | 99 | * @mdio_bus_np: Pointer to the mii_bus. |
| 100 | * | 100 | * |
| 101 | * Returns a pointer to the mii_bus, or NULL if none found. | 101 | * Returns a pointer to the mii_bus, or NULL if none found. |
| 102 | * | 102 | * |
diff --git a/drivers/net/usb/sierra_net.c b/drivers/net/usb/sierra_net.c index 3faef5670d1f..d75d1f56becf 100644 --- a/drivers/net/usb/sierra_net.c +++ b/drivers/net/usb/sierra_net.c | |||
| @@ -946,7 +946,7 @@ struct sk_buff *sierra_net_tx_fixup(struct usbnet *dev, struct sk_buff *skb, | |||
| 946 | } | 946 | } |
| 947 | 947 | ||
| 948 | static const u8 sierra_net_ifnum_list[] = { 7, 10, 11 }; | 948 | static const u8 sierra_net_ifnum_list[] = { 7, 10, 11 }; |
| 949 | static const struct sierra_net_info_data sierra_net_info_data_68A3 = { | 949 | static const struct sierra_net_info_data sierra_net_info_data_direct_ip = { |
| 950 | .rx_urb_size = 8 * 1024, | 950 | .rx_urb_size = 8 * 1024, |
| 951 | .whitelist = { | 951 | .whitelist = { |
| 952 | .infolen = ARRAY_SIZE(sierra_net_ifnum_list), | 952 | .infolen = ARRAY_SIZE(sierra_net_ifnum_list), |
| @@ -954,7 +954,7 @@ static const struct sierra_net_info_data sierra_net_info_data_68A3 = { | |||
| 954 | } | 954 | } |
| 955 | }; | 955 | }; |
| 956 | 956 | ||
| 957 | static const struct driver_info sierra_net_info_68A3 = { | 957 | static const struct driver_info sierra_net_info_direct_ip = { |
| 958 | .description = "Sierra Wireless USB-to-WWAN Modem", | 958 | .description = "Sierra Wireless USB-to-WWAN Modem", |
| 959 | .flags = FLAG_WWAN | FLAG_SEND_ZLP, | 959 | .flags = FLAG_WWAN | FLAG_SEND_ZLP, |
| 960 | .bind = sierra_net_bind, | 960 | .bind = sierra_net_bind, |
| @@ -962,12 +962,18 @@ static const struct driver_info sierra_net_info_68A3 = { | |||
| 962 | .status = sierra_net_status, | 962 | .status = sierra_net_status, |
| 963 | .rx_fixup = sierra_net_rx_fixup, | 963 | .rx_fixup = sierra_net_rx_fixup, |
| 964 | .tx_fixup = sierra_net_tx_fixup, | 964 | .tx_fixup = sierra_net_tx_fixup, |
| 965 | .data = (unsigned long)&sierra_net_info_data_68A3, | 965 | .data = (unsigned long)&sierra_net_info_data_direct_ip, |
| 966 | }; | 966 | }; |
| 967 | 967 | ||
| 968 | static const struct usb_device_id products[] = { | 968 | static const struct usb_device_id products[] = { |
| 969 | {USB_DEVICE(0x1199, 0x68A3), /* Sierra Wireless USB-to-WWAN modem */ | 969 | {USB_DEVICE(0x1199, 0x68A3), /* Sierra Wireless USB-to-WWAN modem */ |
| 970 | .driver_info = (unsigned long) &sierra_net_info_68A3}, | 970 | .driver_info = (unsigned long) &sierra_net_info_direct_ip}, |
| 971 | {USB_DEVICE(0x0F3D, 0x68A3), /* AT&T Direct IP modem */ | ||
| 972 | .driver_info = (unsigned long) &sierra_net_info_direct_ip}, | ||
| 973 | {USB_DEVICE(0x1199, 0x68AA), /* Sierra Wireless Direct IP LTE modem */ | ||
| 974 | .driver_info = (unsigned long) &sierra_net_info_direct_ip}, | ||
| 975 | {USB_DEVICE(0x0F3D, 0x68AA), /* AT&T Direct IP LTE modem */ | ||
| 976 | .driver_info = (unsigned long) &sierra_net_info_direct_ip}, | ||
| 971 | 977 | ||
| 972 | {}, /* last item */ | 978 | {}, /* last item */ |
| 973 | }; | 979 | }; |
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 5214b1eceb95..f18149ae2588 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c | |||
| @@ -42,7 +42,8 @@ module_param(gso, bool, 0444); | |||
| 42 | #define VIRTNET_DRIVER_VERSION "1.0.0" | 42 | #define VIRTNET_DRIVER_VERSION "1.0.0" |
| 43 | 43 | ||
| 44 | struct virtnet_stats { | 44 | struct virtnet_stats { |
| 45 | struct u64_stats_sync syncp; | 45 | struct u64_stats_sync tx_syncp; |
| 46 | struct u64_stats_sync rx_syncp; | ||
| 46 | u64 tx_bytes; | 47 | u64 tx_bytes; |
| 47 | u64 tx_packets; | 48 | u64 tx_packets; |
| 48 | 49 | ||
| @@ -300,10 +301,10 @@ static void receive_buf(struct net_device *dev, void *buf, unsigned int len) | |||
| 300 | 301 | ||
| 301 | hdr = skb_vnet_hdr(skb); | 302 | hdr = skb_vnet_hdr(skb); |
| 302 | 303 | ||
| 303 | u64_stats_update_begin(&stats->syncp); | 304 | u64_stats_update_begin(&stats->rx_syncp); |
| 304 | stats->rx_bytes += skb->len; | 305 | stats->rx_bytes += skb->len; |
| 305 | stats->rx_packets++; | 306 | stats->rx_packets++; |
| 306 | u64_stats_update_end(&stats->syncp); | 307 | u64_stats_update_end(&stats->rx_syncp); |
| 307 | 308 | ||
| 308 | if (hdr->hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) { | 309 | if (hdr->hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) { |
| 309 | pr_debug("Needs csum!\n"); | 310 | pr_debug("Needs csum!\n"); |
| @@ -565,10 +566,10 @@ static unsigned int free_old_xmit_skbs(struct virtnet_info *vi) | |||
| 565 | while ((skb = virtqueue_get_buf(vi->svq, &len)) != NULL) { | 566 | while ((skb = virtqueue_get_buf(vi->svq, &len)) != NULL) { |
| 566 | pr_debug("Sent skb %p\n", skb); | 567 | pr_debug("Sent skb %p\n", skb); |
| 567 | 568 | ||
| 568 | u64_stats_update_begin(&stats->syncp); | 569 | u64_stats_update_begin(&stats->tx_syncp); |
| 569 | stats->tx_bytes += skb->len; | 570 | stats->tx_bytes += skb->len; |
| 570 | stats->tx_packets++; | 571 | stats->tx_packets++; |
| 571 | u64_stats_update_end(&stats->syncp); | 572 | u64_stats_update_end(&stats->tx_syncp); |
| 572 | 573 | ||
| 573 | tot_sgs += skb_vnet_hdr(skb)->num_sg; | 574 | tot_sgs += skb_vnet_hdr(skb)->num_sg; |
| 574 | dev_kfree_skb_any(skb); | 575 | dev_kfree_skb_any(skb); |
| @@ -703,12 +704,16 @@ static struct rtnl_link_stats64 *virtnet_stats(struct net_device *dev, | |||
| 703 | u64 tpackets, tbytes, rpackets, rbytes; | 704 | u64 tpackets, tbytes, rpackets, rbytes; |
| 704 | 705 | ||
| 705 | do { | 706 | do { |
| 706 | start = u64_stats_fetch_begin(&stats->syncp); | 707 | start = u64_stats_fetch_begin(&stats->tx_syncp); |
| 707 | tpackets = stats->tx_packets; | 708 | tpackets = stats->tx_packets; |
| 708 | tbytes = stats->tx_bytes; | 709 | tbytes = stats->tx_bytes; |
| 710 | } while (u64_stats_fetch_retry(&stats->tx_syncp, start)); | ||
| 711 | |||
| 712 | do { | ||
| 713 | start = u64_stats_fetch_begin(&stats->rx_syncp); | ||
| 709 | rpackets = stats->rx_packets; | 714 | rpackets = stats->rx_packets; |
| 710 | rbytes = stats->rx_bytes; | 715 | rbytes = stats->rx_bytes; |
| 711 | } while (u64_stats_fetch_retry(&stats->syncp, start)); | 716 | } while (u64_stats_fetch_retry(&stats->rx_syncp, start)); |
| 712 | 717 | ||
| 713 | tot->rx_packets += rpackets; | 718 | tot->rx_packets += rpackets; |
| 714 | tot->tx_packets += tpackets; | 719 | tot->tx_packets += tpackets; |
diff --git a/drivers/net/wireless/b43/b43.h b/drivers/net/wireless/b43/b43.h index 67c13af6f206..c06b6cb5c91e 100644 --- a/drivers/net/wireless/b43/b43.h +++ b/drivers/net/wireless/b43/b43.h | |||
| @@ -877,6 +877,10 @@ struct b43_wl { | |||
| 877 | * from the mac80211 subsystem. */ | 877 | * from the mac80211 subsystem. */ |
| 878 | u16 mac80211_initially_registered_queues; | 878 | u16 mac80211_initially_registered_queues; |
| 879 | 879 | ||
| 880 | /* Set this if we call ieee80211_register_hw() and check if we call | ||
| 881 | * ieee80211_unregister_hw(). */ | ||
| 882 | bool hw_registred; | ||
| 883 | |||
| 880 | /* We can only have one operating interface (802.11 core) | 884 | /* We can only have one operating interface (802.11 core) |
| 881 | * at a time. General information about this interface follows. | 885 | * at a time. General information about this interface follows. |
| 882 | */ | 886 | */ |
diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c index 5a39b226b2e3..acd03a4f9730 100644 --- a/drivers/net/wireless/b43/main.c +++ b/drivers/net/wireless/b43/main.c | |||
| @@ -2437,6 +2437,7 @@ start_ieee80211: | |||
| 2437 | err = ieee80211_register_hw(wl->hw); | 2437 | err = ieee80211_register_hw(wl->hw); |
| 2438 | if (err) | 2438 | if (err) |
| 2439 | goto err_one_core_detach; | 2439 | goto err_one_core_detach; |
| 2440 | wl->hw_registred = true; | ||
| 2440 | b43_leds_register(wl->current_dev); | 2441 | b43_leds_register(wl->current_dev); |
| 2441 | goto out; | 2442 | goto out; |
| 2442 | 2443 | ||
| @@ -5299,6 +5300,7 @@ static struct b43_wl *b43_wireless_init(struct b43_bus_dev *dev) | |||
| 5299 | 5300 | ||
| 5300 | hw->queues = modparam_qos ? B43_QOS_QUEUE_NUM : 1; | 5301 | hw->queues = modparam_qos ? B43_QOS_QUEUE_NUM : 1; |
| 5301 | wl->mac80211_initially_registered_queues = hw->queues; | 5302 | wl->mac80211_initially_registered_queues = hw->queues; |
| 5303 | wl->hw_registred = false; | ||
| 5302 | hw->max_rates = 2; | 5304 | hw->max_rates = 2; |
| 5303 | SET_IEEE80211_DEV(hw, dev->dev); | 5305 | SET_IEEE80211_DEV(hw, dev->dev); |
| 5304 | if (is_valid_ether_addr(sprom->et1mac)) | 5306 | if (is_valid_ether_addr(sprom->et1mac)) |
| @@ -5370,12 +5372,15 @@ static void b43_bcma_remove(struct bcma_device *core) | |||
| 5370 | * as the ieee80211 unreg will destroy the workqueue. */ | 5372 | * as the ieee80211 unreg will destroy the workqueue. */ |
| 5371 | cancel_work_sync(&wldev->restart_work); | 5373 | cancel_work_sync(&wldev->restart_work); |
| 5372 | 5374 | ||
| 5373 | /* Restore the queues count before unregistering, because firmware detect | 5375 | B43_WARN_ON(!wl); |
| 5374 | * might have modified it. Restoring is important, so the networking | 5376 | if (wl->current_dev == wldev && wl->hw_registred) { |
| 5375 | * stack can properly free resources. */ | 5377 | /* Restore the queues count before unregistering, because firmware detect |
| 5376 | wl->hw->queues = wl->mac80211_initially_registered_queues; | 5378 | * might have modified it. Restoring is important, so the networking |
| 5377 | b43_leds_stop(wldev); | 5379 | * stack can properly free resources. */ |
| 5378 | ieee80211_unregister_hw(wl->hw); | 5380 | wl->hw->queues = wl->mac80211_initially_registered_queues; |
| 5381 | b43_leds_stop(wldev); | ||
| 5382 | ieee80211_unregister_hw(wl->hw); | ||
| 5383 | } | ||
| 5379 | 5384 | ||
| 5380 | b43_one_core_detach(wldev->dev); | 5385 | b43_one_core_detach(wldev->dev); |
| 5381 | 5386 | ||
| @@ -5446,7 +5451,7 @@ static void b43_ssb_remove(struct ssb_device *sdev) | |||
| 5446 | cancel_work_sync(&wldev->restart_work); | 5451 | cancel_work_sync(&wldev->restart_work); |
| 5447 | 5452 | ||
| 5448 | B43_WARN_ON(!wl); | 5453 | B43_WARN_ON(!wl); |
| 5449 | if (wl->current_dev == wldev) { | 5454 | if (wl->current_dev == wldev && wl->hw_registred) { |
| 5450 | /* Restore the queues count before unregistering, because firmware detect | 5455 | /* Restore the queues count before unregistering, because firmware detect |
| 5451 | * might have modified it. Restoring is important, so the networking | 5456 | * might have modified it. Restoring is important, so the networking |
| 5452 | * stack can properly free resources. */ | 5457 | * stack can properly free resources. */ |
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c index e2480d196276..8e7e6928c936 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c | |||
| @@ -89,9 +89,9 @@ int brcmf_sdio_intr_register(struct brcmf_sdio_dev *sdiodev) | |||
| 89 | data |= 1 << SDIO_FUNC_1 | 1 << SDIO_FUNC_2 | 1; | 89 | data |= 1 << SDIO_FUNC_1 | 1 << SDIO_FUNC_2 | 1; |
| 90 | brcmf_sdio_regwb(sdiodev, SDIO_CCCR_IENx, data, &ret); | 90 | brcmf_sdio_regwb(sdiodev, SDIO_CCCR_IENx, data, &ret); |
| 91 | 91 | ||
| 92 | /* redirect, configure ane enable io for interrupt signal */ | 92 | /* redirect, configure and enable io for interrupt signal */ |
| 93 | data = SDIO_SEPINT_MASK | SDIO_SEPINT_OE; | 93 | data = SDIO_SEPINT_MASK | SDIO_SEPINT_OE; |
| 94 | if (sdiodev->irq_flags | IRQF_TRIGGER_HIGH) | 94 | if (sdiodev->irq_flags & IRQF_TRIGGER_HIGH) |
| 95 | data |= SDIO_SEPINT_ACT_HI; | 95 | data |= SDIO_SEPINT_ACT_HI; |
| 96 | brcmf_sdio_regwb(sdiodev, SDIO_CCCR_BRCM_SEPINT, data, &ret); | 96 | brcmf_sdio_regwb(sdiodev, SDIO_CCCR_BRCM_SEPINT, data, &ret); |
| 97 | 97 | ||
diff --git a/drivers/net/wireless/ipw2x00/ipw2100.c b/drivers/net/wireless/ipw2x00/ipw2100.c index 9cfae0c08707..95aa8e1683ec 100644 --- a/drivers/net/wireless/ipw2x00/ipw2100.c +++ b/drivers/net/wireless/ipw2x00/ipw2100.c | |||
| @@ -1903,14 +1903,6 @@ static void ipw2100_down(struct ipw2100_priv *priv) | |||
| 1903 | netif_stop_queue(priv->net_dev); | 1903 | netif_stop_queue(priv->net_dev); |
| 1904 | } | 1904 | } |
| 1905 | 1905 | ||
| 1906 | /* Called by register_netdev() */ | ||
| 1907 | static int ipw2100_net_init(struct net_device *dev) | ||
| 1908 | { | ||
| 1909 | struct ipw2100_priv *priv = libipw_priv(dev); | ||
| 1910 | |||
| 1911 | return ipw2100_up(priv, 1); | ||
| 1912 | } | ||
| 1913 | |||
| 1914 | static int ipw2100_wdev_init(struct net_device *dev) | 1906 | static int ipw2100_wdev_init(struct net_device *dev) |
| 1915 | { | 1907 | { |
| 1916 | struct ipw2100_priv *priv = libipw_priv(dev); | 1908 | struct ipw2100_priv *priv = libipw_priv(dev); |
| @@ -6087,7 +6079,6 @@ static const struct net_device_ops ipw2100_netdev_ops = { | |||
| 6087 | .ndo_stop = ipw2100_close, | 6079 | .ndo_stop = ipw2100_close, |
| 6088 | .ndo_start_xmit = libipw_xmit, | 6080 | .ndo_start_xmit = libipw_xmit, |
| 6089 | .ndo_change_mtu = libipw_change_mtu, | 6081 | .ndo_change_mtu = libipw_change_mtu, |
| 6090 | .ndo_init = ipw2100_net_init, | ||
| 6091 | .ndo_tx_timeout = ipw2100_tx_timeout, | 6082 | .ndo_tx_timeout = ipw2100_tx_timeout, |
| 6092 | .ndo_set_mac_address = ipw2100_set_address, | 6083 | .ndo_set_mac_address = ipw2100_set_address, |
| 6093 | .ndo_validate_addr = eth_validate_addr, | 6084 | .ndo_validate_addr = eth_validate_addr, |
| @@ -6329,6 +6320,10 @@ static int ipw2100_pci_init_one(struct pci_dev *pci_dev, | |||
| 6329 | printk(KERN_INFO DRV_NAME | 6320 | printk(KERN_INFO DRV_NAME |
| 6330 | ": Detected Intel PRO/Wireless 2100 Network Connection\n"); | 6321 | ": Detected Intel PRO/Wireless 2100 Network Connection\n"); |
| 6331 | 6322 | ||
| 6323 | err = ipw2100_up(priv, 1); | ||
| 6324 | if (err) | ||
| 6325 | goto fail; | ||
| 6326 | |||
| 6332 | err = ipw2100_wdev_init(dev); | 6327 | err = ipw2100_wdev_init(dev); |
| 6333 | if (err) | 6328 | if (err) |
| 6334 | goto fail; | 6329 | goto fail; |
| @@ -6338,12 +6333,7 @@ static int ipw2100_pci_init_one(struct pci_dev *pci_dev, | |||
| 6338 | * network device we would call ipw2100_up. This introduced a race | 6333 | * network device we would call ipw2100_up. This introduced a race |
| 6339 | * condition with newer hotplug configurations (network was coming | 6334 | * condition with newer hotplug configurations (network was coming |
| 6340 | * up and making calls before the device was initialized). | 6335 | * up and making calls before the device was initialized). |
| 6341 | * | 6336 | */ |
| 6342 | * If we called ipw2100_up before we registered the device, then the | ||
| 6343 | * device name wasn't registered. So, we instead use the net_dev->init | ||
| 6344 | * member to call a function that then just turns and calls ipw2100_up. | ||
| 6345 | * net_dev->init is called after name allocation but before the | ||
| 6346 | * notifier chain is called */ | ||
| 6347 | err = register_netdev(dev); | 6337 | err = register_netdev(dev); |
| 6348 | if (err) { | 6338 | if (err) { |
| 6349 | printk(KERN_WARNING DRV_NAME | 6339 | printk(KERN_WARNING DRV_NAME |
diff --git a/drivers/net/wireless/iwlwifi/iwl-6000.c b/drivers/net/wireless/iwlwifi/iwl-6000.c index 19f7ee84ae89..e5e8ada4aaf6 100644 --- a/drivers/net/wireless/iwlwifi/iwl-6000.c +++ b/drivers/net/wireless/iwlwifi/iwl-6000.c | |||
| @@ -35,17 +35,20 @@ | |||
| 35 | #define IWL6000_UCODE_API_MAX 6 | 35 | #define IWL6000_UCODE_API_MAX 6 |
| 36 | #define IWL6050_UCODE_API_MAX 5 | 36 | #define IWL6050_UCODE_API_MAX 5 |
| 37 | #define IWL6000G2_UCODE_API_MAX 6 | 37 | #define IWL6000G2_UCODE_API_MAX 6 |
| 38 | #define IWL6035_UCODE_API_MAX 6 | ||
| 38 | 39 | ||
| 39 | /* Oldest version we won't warn about */ | 40 | /* Oldest version we won't warn about */ |
| 40 | #define IWL6000_UCODE_API_OK 4 | 41 | #define IWL6000_UCODE_API_OK 4 |
| 41 | #define IWL6000G2_UCODE_API_OK 5 | 42 | #define IWL6000G2_UCODE_API_OK 5 |
| 42 | #define IWL6050_UCODE_API_OK 5 | 43 | #define IWL6050_UCODE_API_OK 5 |
| 43 | #define IWL6000G2B_UCODE_API_OK 6 | 44 | #define IWL6000G2B_UCODE_API_OK 6 |
| 45 | #define IWL6035_UCODE_API_OK 6 | ||
| 44 | 46 | ||
| 45 | /* Lowest firmware API version supported */ | 47 | /* Lowest firmware API version supported */ |
| 46 | #define IWL6000_UCODE_API_MIN 4 | 48 | #define IWL6000_UCODE_API_MIN 4 |
| 47 | #define IWL6050_UCODE_API_MIN 4 | 49 | #define IWL6050_UCODE_API_MIN 4 |
| 48 | #define IWL6000G2_UCODE_API_MIN 4 | 50 | #define IWL6000G2_UCODE_API_MIN 5 |
| 51 | #define IWL6035_UCODE_API_MIN 6 | ||
| 49 | 52 | ||
| 50 | /* EEPROM versions */ | 53 | /* EEPROM versions */ |
| 51 | #define EEPROM_6000_TX_POWER_VERSION (4) | 54 | #define EEPROM_6000_TX_POWER_VERSION (4) |
| @@ -227,9 +230,25 @@ const struct iwl_cfg iwl6030_2bg_cfg = { | |||
| 227 | IWL_DEVICE_6030, | 230 | IWL_DEVICE_6030, |
| 228 | }; | 231 | }; |
| 229 | 232 | ||
| 233 | #define IWL_DEVICE_6035 \ | ||
| 234 | .fw_name_pre = IWL6030_FW_PRE, \ | ||
| 235 | .ucode_api_max = IWL6035_UCODE_API_MAX, \ | ||
| 236 | .ucode_api_ok = IWL6035_UCODE_API_OK, \ | ||
| 237 | .ucode_api_min = IWL6035_UCODE_API_MIN, \ | ||
| 238 | .device_family = IWL_DEVICE_FAMILY_6030, \ | ||
| 239 | .max_inst_size = IWL60_RTC_INST_SIZE, \ | ||
| 240 | .max_data_size = IWL60_RTC_DATA_SIZE, \ | ||
| 241 | .eeprom_ver = EEPROM_6030_EEPROM_VERSION, \ | ||
| 242 | .eeprom_calib_ver = EEPROM_6030_TX_POWER_VERSION, \ | ||
| 243 | .base_params = &iwl6000_g2_base_params, \ | ||
| 244 | .bt_params = &iwl6000_bt_params, \ | ||
| 245 | .need_temp_offset_calib = true, \ | ||
| 246 | .led_mode = IWL_LED_RF_STATE, \ | ||
| 247 | .adv_pm = true | ||
| 248 | |||
| 230 | const struct iwl_cfg iwl6035_2agn_cfg = { | 249 | const struct iwl_cfg iwl6035_2agn_cfg = { |
| 231 | .name = "Intel(R) Centrino(R) Advanced-N 6235 AGN", | 250 | .name = "Intel(R) Centrino(R) Advanced-N 6235 AGN", |
| 232 | IWL_DEVICE_6030, | 251 | IWL_DEVICE_6035, |
| 233 | .ht_params = &iwl6000_ht_params, | 252 | .ht_params = &iwl6000_ht_params, |
| 234 | }; | 253 | }; |
| 235 | 254 | ||
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-sta.c b/drivers/net/wireless/iwlwifi/iwl-agn-sta.c index aea07aab3c9e..eb6a8eaf42fc 100644 --- a/drivers/net/wireless/iwlwifi/iwl-agn-sta.c +++ b/drivers/net/wireless/iwlwifi/iwl-agn-sta.c | |||
| @@ -1267,7 +1267,7 @@ int iwl_remove_dynamic_key(struct iwl_priv *priv, | |||
| 1267 | key_flags |= STA_KEY_MULTICAST_MSK; | 1267 | key_flags |= STA_KEY_MULTICAST_MSK; |
| 1268 | 1268 | ||
| 1269 | sta_cmd.key.key_flags = key_flags; | 1269 | sta_cmd.key.key_flags = key_flags; |
| 1270 | sta_cmd.key.key_offset = WEP_INVALID_OFFSET; | 1270 | sta_cmd.key.key_offset = keyconf->hw_key_idx; |
| 1271 | sta_cmd.sta.modify_mask = STA_MODIFY_KEY_MASK; | 1271 | sta_cmd.sta.modify_mask = STA_MODIFY_KEY_MASK; |
| 1272 | sta_cmd.mode = STA_CONTROL_MODIFY_MSK; | 1272 | sta_cmd.mode = STA_CONTROL_MODIFY_MSK; |
| 1273 | 1273 | ||
diff --git a/drivers/net/wireless/iwlwifi/iwl-drv.c b/drivers/net/wireless/iwlwifi/iwl-drv.c index d742900969ea..fac67a526a30 100644 --- a/drivers/net/wireless/iwlwifi/iwl-drv.c +++ b/drivers/net/wireless/iwlwifi/iwl-drv.c | |||
| @@ -861,13 +861,18 @@ static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context) | |||
| 861 | 861 | ||
| 862 | /* We have our copies now, allow OS release its copies */ | 862 | /* We have our copies now, allow OS release its copies */ |
| 863 | release_firmware(ucode_raw); | 863 | release_firmware(ucode_raw); |
| 864 | complete(&drv->request_firmware_complete); | ||
| 865 | 864 | ||
| 866 | drv->op_mode = iwl_dvm_ops.start(drv->trans, drv->cfg, &drv->fw); | 865 | drv->op_mode = iwl_dvm_ops.start(drv->trans, drv->cfg, &drv->fw); |
| 867 | 866 | ||
| 868 | if (!drv->op_mode) | 867 | if (!drv->op_mode) |
| 869 | goto out_free_fw; | 868 | goto out_unbind; |
| 870 | 869 | ||
| 870 | /* | ||
| 871 | * Complete the firmware request last so that | ||
| 872 | * a driver unbind (stop) doesn't run while we | ||
| 873 | * are doing the start() above. | ||
| 874 | */ | ||
| 875 | complete(&drv->request_firmware_complete); | ||
| 871 | return; | 876 | return; |
| 872 | 877 | ||
| 873 | try_again: | 878 | try_again: |
diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom.c b/drivers/net/wireless/iwlwifi/iwl-eeprom.c index 50c58911e718..b8e2b223ac36 100644 --- a/drivers/net/wireless/iwlwifi/iwl-eeprom.c +++ b/drivers/net/wireless/iwlwifi/iwl-eeprom.c | |||
| @@ -568,28 +568,28 @@ static int iwl_find_otp_image(struct iwl_trans *trans, | |||
| 568 | * iwl_get_max_txpower_avg - get the highest tx power from all chains. | 568 | * iwl_get_max_txpower_avg - get the highest tx power from all chains. |
| 569 | * find the highest tx power from all chains for the channel | 569 | * find the highest tx power from all chains for the channel |
| 570 | */ | 570 | */ |
| 571 | static s8 iwl_get_max_txpower_avg(const struct iwl_cfg *cfg, | 571 | static s8 iwl_get_max_txpower_avg(struct iwl_priv *priv, |
| 572 | struct iwl_eeprom_enhanced_txpwr *enhanced_txpower, | 572 | struct iwl_eeprom_enhanced_txpwr *enhanced_txpower, |
| 573 | int element, s8 *max_txpower_in_half_dbm) | 573 | int element, s8 *max_txpower_in_half_dbm) |
| 574 | { | 574 | { |
| 575 | s8 max_txpower_avg = 0; /* (dBm) */ | 575 | s8 max_txpower_avg = 0; /* (dBm) */ |
| 576 | 576 | ||
| 577 | /* Take the highest tx power from any valid chains */ | 577 | /* Take the highest tx power from any valid chains */ |
| 578 | if ((cfg->valid_tx_ant & ANT_A) && | 578 | if ((priv->hw_params.valid_tx_ant & ANT_A) && |
| 579 | (enhanced_txpower[element].chain_a_max > max_txpower_avg)) | 579 | (enhanced_txpower[element].chain_a_max > max_txpower_avg)) |
| 580 | max_txpower_avg = enhanced_txpower[element].chain_a_max; | 580 | max_txpower_avg = enhanced_txpower[element].chain_a_max; |
| 581 | if ((cfg->valid_tx_ant & ANT_B) && | 581 | if ((priv->hw_params.valid_tx_ant & ANT_B) && |
| 582 | (enhanced_txpower[element].chain_b_max > max_txpower_avg)) | 582 | (enhanced_txpower[element].chain_b_max > max_txpower_avg)) |
| 583 | max_txpower_avg = enhanced_txpower[element].chain_b_max; | 583 | max_txpower_avg = enhanced_txpower[element].chain_b_max; |
| 584 | if ((cfg->valid_tx_ant & ANT_C) && | 584 | if ((priv->hw_params.valid_tx_ant & ANT_C) && |
| 585 | (enhanced_txpower[element].chain_c_max > max_txpower_avg)) | 585 | (enhanced_txpower[element].chain_c_max > max_txpower_avg)) |
| 586 | max_txpower_avg = enhanced_txpower[element].chain_c_max; | 586 | max_txpower_avg = enhanced_txpower[element].chain_c_max; |
| 587 | if (((cfg->valid_tx_ant == ANT_AB) | | 587 | if (((priv->hw_params.valid_tx_ant == ANT_AB) | |
| 588 | (cfg->valid_tx_ant == ANT_BC) | | 588 | (priv->hw_params.valid_tx_ant == ANT_BC) | |
| 589 | (cfg->valid_tx_ant == ANT_AC)) && | 589 | (priv->hw_params.valid_tx_ant == ANT_AC)) && |
| 590 | (enhanced_txpower[element].mimo2_max > max_txpower_avg)) | 590 | (enhanced_txpower[element].mimo2_max > max_txpower_avg)) |
| 591 | max_txpower_avg = enhanced_txpower[element].mimo2_max; | 591 | max_txpower_avg = enhanced_txpower[element].mimo2_max; |
| 592 | if ((cfg->valid_tx_ant == ANT_ABC) && | 592 | if ((priv->hw_params.valid_tx_ant == ANT_ABC) && |
| 593 | (enhanced_txpower[element].mimo3_max > max_txpower_avg)) | 593 | (enhanced_txpower[element].mimo3_max > max_txpower_avg)) |
| 594 | max_txpower_avg = enhanced_txpower[element].mimo3_max; | 594 | max_txpower_avg = enhanced_txpower[element].mimo3_max; |
| 595 | 595 | ||
| @@ -691,7 +691,7 @@ static void iwl_eeprom_enhanced_txpower(struct iwl_priv *priv) | |||
| 691 | ((txp->delta_20_in_40 & 0xf0) >> 4), | 691 | ((txp->delta_20_in_40 & 0xf0) >> 4), |
| 692 | (txp->delta_20_in_40 & 0x0f)); | 692 | (txp->delta_20_in_40 & 0x0f)); |
| 693 | 693 | ||
| 694 | max_txp_avg = iwl_get_max_txpower_avg(priv->cfg, txp_array, idx, | 694 | max_txp_avg = iwl_get_max_txpower_avg(priv, txp_array, idx, |
| 695 | &max_txp_avg_halfdbm); | 695 | &max_txp_avg_halfdbm); |
| 696 | 696 | ||
| 697 | /* | 697 | /* |
diff --git a/drivers/net/wireless/iwlwifi/iwl-mac80211.c b/drivers/net/wireless/iwlwifi/iwl-mac80211.c index ab2f4d7500a4..3ee23134c02b 100644 --- a/drivers/net/wireless/iwlwifi/iwl-mac80211.c +++ b/drivers/net/wireless/iwlwifi/iwl-mac80211.c | |||
| @@ -199,6 +199,7 @@ int iwlagn_mac_setup_register(struct iwl_priv *priv, | |||
| 199 | WIPHY_FLAG_DISABLE_BEACON_HINTS | | 199 | WIPHY_FLAG_DISABLE_BEACON_HINTS | |
| 200 | WIPHY_FLAG_IBSS_RSN; | 200 | WIPHY_FLAG_IBSS_RSN; |
| 201 | 201 | ||
| 202 | #ifdef CONFIG_PM_SLEEP | ||
| 202 | if (priv->fw->img[IWL_UCODE_WOWLAN].sec[0].len && | 203 | if (priv->fw->img[IWL_UCODE_WOWLAN].sec[0].len && |
| 203 | priv->trans->ops->wowlan_suspend && | 204 | priv->trans->ops->wowlan_suspend && |
| 204 | device_can_wakeup(priv->trans->dev)) { | 205 | device_can_wakeup(priv->trans->dev)) { |
| @@ -217,6 +218,7 @@ int iwlagn_mac_setup_register(struct iwl_priv *priv, | |||
| 217 | hw->wiphy->wowlan.pattern_max_len = | 218 | hw->wiphy->wowlan.pattern_max_len = |
| 218 | IWLAGN_WOWLAN_MAX_PATTERN_LEN; | 219 | IWLAGN_WOWLAN_MAX_PATTERN_LEN; |
| 219 | } | 220 | } |
| 221 | #endif | ||
| 220 | 222 | ||
| 221 | if (iwlwifi_mod_params.power_save) | 223 | if (iwlwifi_mod_params.power_save) |
| 222 | hw->wiphy->flags |= WIPHY_FLAG_PS_ON_BY_DEFAULT; | 224 | hw->wiphy->flags |= WIPHY_FLAG_PS_ON_BY_DEFAULT; |
| @@ -249,6 +251,7 @@ int iwlagn_mac_setup_register(struct iwl_priv *priv, | |||
| 249 | ret = ieee80211_register_hw(priv->hw); | 251 | ret = ieee80211_register_hw(priv->hw); |
| 250 | if (ret) { | 252 | if (ret) { |
| 251 | IWL_ERR(priv, "Failed to register hw (error %d)\n", ret); | 253 | IWL_ERR(priv, "Failed to register hw (error %d)\n", ret); |
| 254 | iwl_leds_exit(priv); | ||
| 252 | return ret; | 255 | return ret; |
| 253 | } | 256 | } |
| 254 | priv->mac80211_registered = 1; | 257 | priv->mac80211_registered = 1; |
diff --git a/drivers/net/wireless/iwlwifi/iwl-prph.h b/drivers/net/wireless/iwlwifi/iwl-prph.h index 3b1069290fa9..dfd54662e3e6 100644 --- a/drivers/net/wireless/iwlwifi/iwl-prph.h +++ b/drivers/net/wireless/iwlwifi/iwl-prph.h | |||
| @@ -224,6 +224,7 @@ | |||
| 224 | #define SCD_TXFACT (SCD_BASE + 0x10) | 224 | #define SCD_TXFACT (SCD_BASE + 0x10) |
| 225 | #define SCD_ACTIVE (SCD_BASE + 0x14) | 225 | #define SCD_ACTIVE (SCD_BASE + 0x14) |
| 226 | #define SCD_QUEUECHAIN_SEL (SCD_BASE + 0xe8) | 226 | #define SCD_QUEUECHAIN_SEL (SCD_BASE + 0xe8) |
| 227 | #define SCD_CHAINEXT_EN (SCD_BASE + 0x244) | ||
| 227 | #define SCD_AGGR_SEL (SCD_BASE + 0x248) | 228 | #define SCD_AGGR_SEL (SCD_BASE + 0x248) |
| 228 | #define SCD_INTERRUPT_MASK (SCD_BASE + 0x108) | 229 | #define SCD_INTERRUPT_MASK (SCD_BASE + 0x108) |
| 229 | 230 | ||
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans-pcie.c b/drivers/net/wireless/iwlwifi/iwl-trans-pcie.c index ec6fb395b84d..79c6b91417f9 100644 --- a/drivers/net/wireless/iwlwifi/iwl-trans-pcie.c +++ b/drivers/net/wireless/iwlwifi/iwl-trans-pcie.c | |||
| @@ -1058,6 +1058,11 @@ static void iwl_tx_start(struct iwl_trans *trans) | |||
| 1058 | iwl_write_prph(trans, SCD_DRAM_BASE_ADDR, | 1058 | iwl_write_prph(trans, SCD_DRAM_BASE_ADDR, |
| 1059 | trans_pcie->scd_bc_tbls.dma >> 10); | 1059 | trans_pcie->scd_bc_tbls.dma >> 10); |
| 1060 | 1060 | ||
| 1061 | /* The chain extension of the SCD doesn't work well. This feature is | ||
| 1062 | * enabled by default by the HW, so we need to disable it manually. | ||
| 1063 | */ | ||
| 1064 | iwl_write_prph(trans, SCD_CHAINEXT_EN, 0); | ||
| 1065 | |||
| 1061 | /* Enable DMA channel */ | 1066 | /* Enable DMA channel */ |
| 1062 | for (chan = 0; chan < FH_TCSR_CHNL_NUM ; chan++) | 1067 | for (chan = 0; chan < FH_TCSR_CHNL_NUM ; chan++) |
| 1063 | iwl_write_direct32(trans, FH_TCSR_CHNL_TX_CONFIG_REG(chan), | 1068 | iwl_write_direct32(trans, FH_TCSR_CHNL_TX_CONFIG_REG(chan), |
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c index fb787df01666..a0b7cfd34685 100644 --- a/drivers/net/wireless/mac80211_hwsim.c +++ b/drivers/net/wireless/mac80211_hwsim.c | |||
| @@ -1555,6 +1555,7 @@ static int hwsim_tx_info_frame_received_nl(struct sk_buff *skb_2, | |||
| 1555 | hdr = (struct ieee80211_hdr *) skb->data; | 1555 | hdr = (struct ieee80211_hdr *) skb->data; |
| 1556 | mac80211_hwsim_monitor_ack(data2->hw, hdr->addr2); | 1556 | mac80211_hwsim_monitor_ack(data2->hw, hdr->addr2); |
| 1557 | } | 1557 | } |
| 1558 | txi->flags |= IEEE80211_TX_STAT_ACK; | ||
| 1558 | } | 1559 | } |
| 1559 | ieee80211_tx_status_irqsafe(data2->hw, skb); | 1560 | ieee80211_tx_status_irqsafe(data2->hw, skb); |
| 1560 | return 0; | 1561 | return 0; |
| @@ -1721,6 +1722,24 @@ static void hwsim_exit_netlink(void) | |||
| 1721 | "unregister family %i\n", ret); | 1722 | "unregister family %i\n", ret); |
| 1722 | } | 1723 | } |
| 1723 | 1724 | ||
| 1725 | static const struct ieee80211_iface_limit hwsim_if_limits[] = { | ||
| 1726 | { .max = 1, .types = BIT(NL80211_IFTYPE_ADHOC) }, | ||
| 1727 | { .max = 2048, .types = BIT(NL80211_IFTYPE_STATION) | | ||
| 1728 | BIT(NL80211_IFTYPE_P2P_CLIENT) | | ||
| 1729 | #ifdef CONFIG_MAC80211_MESH | ||
| 1730 | BIT(NL80211_IFTYPE_MESH_POINT) | | ||
| 1731 | #endif | ||
| 1732 | BIT(NL80211_IFTYPE_AP) | | ||
| 1733 | BIT(NL80211_IFTYPE_P2P_GO) }, | ||
| 1734 | }; | ||
| 1735 | |||
| 1736 | static const struct ieee80211_iface_combination hwsim_if_comb = { | ||
| 1737 | .limits = hwsim_if_limits, | ||
| 1738 | .n_limits = ARRAY_SIZE(hwsim_if_limits), | ||
| 1739 | .max_interfaces = 2048, | ||
| 1740 | .num_different_channels = 1, | ||
| 1741 | }; | ||
| 1742 | |||
| 1724 | static int __init init_mac80211_hwsim(void) | 1743 | static int __init init_mac80211_hwsim(void) |
| 1725 | { | 1744 | { |
| 1726 | int i, err = 0; | 1745 | int i, err = 0; |
| @@ -1782,6 +1801,9 @@ static int __init init_mac80211_hwsim(void) | |||
| 1782 | hw->wiphy->n_addresses = 2; | 1801 | hw->wiphy->n_addresses = 2; |
| 1783 | hw->wiphy->addresses = data->addresses; | 1802 | hw->wiphy->addresses = data->addresses; |
| 1784 | 1803 | ||
| 1804 | hw->wiphy->iface_combinations = &hwsim_if_comb; | ||
| 1805 | hw->wiphy->n_iface_combinations = 1; | ||
| 1806 | |||
| 1785 | if (fake_hw_scan) { | 1807 | if (fake_hw_scan) { |
| 1786 | hw->wiphy->max_scan_ssids = 255; | 1808 | hw->wiphy->max_scan_ssids = 255; |
| 1787 | hw->wiphy->max_scan_ie_len = IEEE80211_MAX_DATA_LEN; | 1809 | hw->wiphy->max_scan_ie_len = IEEE80211_MAX_DATA_LEN; |
diff --git a/drivers/net/wireless/mwifiex/cfg80211.c b/drivers/net/wireless/mwifiex/cfg80211.c index 87671446e24b..015fec3371a0 100644 --- a/drivers/net/wireless/mwifiex/cfg80211.c +++ b/drivers/net/wireless/mwifiex/cfg80211.c | |||
| @@ -948,6 +948,19 @@ static int mwifiex_cfg80211_start_ap(struct wiphy *wiphy, | |||
| 948 | bss_cfg->ssid.ssid_len = params->ssid_len; | 948 | bss_cfg->ssid.ssid_len = params->ssid_len; |
| 949 | } | 949 | } |
| 950 | 950 | ||
| 951 | switch (params->hidden_ssid) { | ||
| 952 | case NL80211_HIDDEN_SSID_NOT_IN_USE: | ||
| 953 | bss_cfg->bcast_ssid_ctl = 1; | ||
| 954 | break; | ||
| 955 | case NL80211_HIDDEN_SSID_ZERO_LEN: | ||
| 956 | bss_cfg->bcast_ssid_ctl = 0; | ||
| 957 | break; | ||
| 958 | case NL80211_HIDDEN_SSID_ZERO_CONTENTS: | ||
| 959 | /* firmware doesn't support this type of hidden SSID */ | ||
| 960 | default: | ||
| 961 | return -EINVAL; | ||
| 962 | } | ||
| 963 | |||
| 951 | if (mwifiex_set_secure_params(priv, bss_cfg, params)) { | 964 | if (mwifiex_set_secure_params(priv, bss_cfg, params)) { |
| 952 | kfree(bss_cfg); | 965 | kfree(bss_cfg); |
| 953 | wiphy_err(wiphy, "Failed to parse secuirty parameters!\n"); | 966 | wiphy_err(wiphy, "Failed to parse secuirty parameters!\n"); |
diff --git a/drivers/net/wireless/mwifiex/fw.h b/drivers/net/wireless/mwifiex/fw.h index 9f674bbebe65..561452a5c818 100644 --- a/drivers/net/wireless/mwifiex/fw.h +++ b/drivers/net/wireless/mwifiex/fw.h | |||
| @@ -122,6 +122,7 @@ enum MWIFIEX_802_11_PRIVACY_FILTER { | |||
| 122 | #define TLV_TYPE_CHANNELBANDLIST (PROPRIETARY_TLV_BASE_ID + 42) | 122 | #define TLV_TYPE_CHANNELBANDLIST (PROPRIETARY_TLV_BASE_ID + 42) |
| 123 | #define TLV_TYPE_UAP_BEACON_PERIOD (PROPRIETARY_TLV_BASE_ID + 44) | 123 | #define TLV_TYPE_UAP_BEACON_PERIOD (PROPRIETARY_TLV_BASE_ID + 44) |
| 124 | #define TLV_TYPE_UAP_DTIM_PERIOD (PROPRIETARY_TLV_BASE_ID + 45) | 124 | #define TLV_TYPE_UAP_DTIM_PERIOD (PROPRIETARY_TLV_BASE_ID + 45) |
| 125 | #define TLV_TYPE_UAP_BCAST_SSID (PROPRIETARY_TLV_BASE_ID + 48) | ||
| 125 | #define TLV_TYPE_UAP_RTS_THRESHOLD (PROPRIETARY_TLV_BASE_ID + 51) | 126 | #define TLV_TYPE_UAP_RTS_THRESHOLD (PROPRIETARY_TLV_BASE_ID + 51) |
| 126 | #define TLV_TYPE_UAP_WPA_PASSPHRASE (PROPRIETARY_TLV_BASE_ID + 60) | 127 | #define TLV_TYPE_UAP_WPA_PASSPHRASE (PROPRIETARY_TLV_BASE_ID + 60) |
| 127 | #define TLV_TYPE_UAP_ENCRY_PROTOCOL (PROPRIETARY_TLV_BASE_ID + 64) | 128 | #define TLV_TYPE_UAP_ENCRY_PROTOCOL (PROPRIETARY_TLV_BASE_ID + 64) |
| @@ -1209,6 +1210,11 @@ struct host_cmd_tlv_ssid { | |||
| 1209 | u8 ssid[0]; | 1210 | u8 ssid[0]; |
| 1210 | } __packed; | 1211 | } __packed; |
| 1211 | 1212 | ||
| 1213 | struct host_cmd_tlv_bcast_ssid { | ||
| 1214 | struct host_cmd_tlv tlv; | ||
| 1215 | u8 bcast_ctl; | ||
| 1216 | } __packed; | ||
| 1217 | |||
| 1212 | struct host_cmd_tlv_beacon_period { | 1218 | struct host_cmd_tlv_beacon_period { |
| 1213 | struct host_cmd_tlv tlv; | 1219 | struct host_cmd_tlv tlv; |
| 1214 | __le16 period; | 1220 | __le16 period; |
diff --git a/drivers/net/wireless/mwifiex/uap_cmd.c b/drivers/net/wireless/mwifiex/uap_cmd.c index 76dfbc42a732..8173ab66066d 100644 --- a/drivers/net/wireless/mwifiex/uap_cmd.c +++ b/drivers/net/wireless/mwifiex/uap_cmd.c | |||
| @@ -132,6 +132,7 @@ mwifiex_uap_bss_param_prepare(u8 *tlv, void *cmd_buf, u16 *param_size) | |||
| 132 | struct host_cmd_tlv_dtim_period *dtim_period; | 132 | struct host_cmd_tlv_dtim_period *dtim_period; |
| 133 | struct host_cmd_tlv_beacon_period *beacon_period; | 133 | struct host_cmd_tlv_beacon_period *beacon_period; |
| 134 | struct host_cmd_tlv_ssid *ssid; | 134 | struct host_cmd_tlv_ssid *ssid; |
| 135 | struct host_cmd_tlv_bcast_ssid *bcast_ssid; | ||
| 135 | struct host_cmd_tlv_channel_band *chan_band; | 136 | struct host_cmd_tlv_channel_band *chan_band; |
| 136 | struct host_cmd_tlv_frag_threshold *frag_threshold; | 137 | struct host_cmd_tlv_frag_threshold *frag_threshold; |
| 137 | struct host_cmd_tlv_rts_threshold *rts_threshold; | 138 | struct host_cmd_tlv_rts_threshold *rts_threshold; |
| @@ -153,6 +154,14 @@ mwifiex_uap_bss_param_prepare(u8 *tlv, void *cmd_buf, u16 *param_size) | |||
| 153 | cmd_size += sizeof(struct host_cmd_tlv) + | 154 | cmd_size += sizeof(struct host_cmd_tlv) + |
| 154 | bss_cfg->ssid.ssid_len; | 155 | bss_cfg->ssid.ssid_len; |
| 155 | tlv += sizeof(struct host_cmd_tlv) + bss_cfg->ssid.ssid_len; | 156 | tlv += sizeof(struct host_cmd_tlv) + bss_cfg->ssid.ssid_len; |
| 157 | |||
| 158 | bcast_ssid = (struct host_cmd_tlv_bcast_ssid *)tlv; | ||
| 159 | bcast_ssid->tlv.type = cpu_to_le16(TLV_TYPE_UAP_BCAST_SSID); | ||
| 160 | bcast_ssid->tlv.len = | ||
| 161 | cpu_to_le16(sizeof(bcast_ssid->bcast_ctl)); | ||
| 162 | bcast_ssid->bcast_ctl = bss_cfg->bcast_ssid_ctl; | ||
| 163 | cmd_size += sizeof(struct host_cmd_tlv_bcast_ssid); | ||
| 164 | tlv += sizeof(struct host_cmd_tlv_bcast_ssid); | ||
| 156 | } | 165 | } |
| 157 | if (bss_cfg->channel && bss_cfg->channel <= MAX_CHANNEL_BAND_BG) { | 166 | if (bss_cfg->channel && bss_cfg->channel <= MAX_CHANNEL_BAND_BG) { |
| 158 | chan_band = (struct host_cmd_tlv_channel_band *)tlv; | 167 | chan_band = (struct host_cmd_tlv_channel_band *)tlv; |
| @@ -416,6 +425,7 @@ int mwifiex_uap_set_channel(struct mwifiex_private *priv, int channel) | |||
| 416 | if (!bss_cfg) | 425 | if (!bss_cfg) |
| 417 | return -ENOMEM; | 426 | return -ENOMEM; |
| 418 | 427 | ||
| 428 | mwifiex_set_sys_config_invalid_data(bss_cfg); | ||
| 419 | bss_cfg->band_cfg = BAND_CONFIG_MANUAL; | 429 | bss_cfg->band_cfg = BAND_CONFIG_MANUAL; |
| 420 | bss_cfg->channel = channel; | 430 | bss_cfg->channel = channel; |
| 421 | 431 | ||
diff --git a/drivers/net/wireless/rt2x00/rt2x00.h b/drivers/net/wireless/rt2x00/rt2x00.h index ca36cccaba31..8f754025b06e 100644 --- a/drivers/net/wireless/rt2x00/rt2x00.h +++ b/drivers/net/wireless/rt2x00/rt2x00.h | |||
| @@ -396,8 +396,7 @@ struct rt2x00_intf { | |||
| 396 | * for hardware which doesn't support hardware | 396 | * for hardware which doesn't support hardware |
| 397 | * sequence counting. | 397 | * sequence counting. |
| 398 | */ | 398 | */ |
| 399 | spinlock_t seqlock; | 399 | atomic_t seqno; |
| 400 | u16 seqno; | ||
| 401 | }; | 400 | }; |
| 402 | 401 | ||
| 403 | static inline struct rt2x00_intf* vif_to_intf(struct ieee80211_vif *vif) | 402 | static inline struct rt2x00_intf* vif_to_intf(struct ieee80211_vif *vif) |
diff --git a/drivers/net/wireless/rt2x00/rt2x00mac.c b/drivers/net/wireless/rt2x00/rt2x00mac.c index b49773ef72f2..dd24b2663b5e 100644 --- a/drivers/net/wireless/rt2x00/rt2x00mac.c +++ b/drivers/net/wireless/rt2x00/rt2x00mac.c | |||
| @@ -277,7 +277,6 @@ int rt2x00mac_add_interface(struct ieee80211_hw *hw, | |||
| 277 | else | 277 | else |
| 278 | rt2x00dev->intf_sta_count++; | 278 | rt2x00dev->intf_sta_count++; |
| 279 | 279 | ||
| 280 | spin_lock_init(&intf->seqlock); | ||
| 281 | mutex_init(&intf->beacon_skb_mutex); | 280 | mutex_init(&intf->beacon_skb_mutex); |
| 282 | intf->beacon = entry; | 281 | intf->beacon = entry; |
| 283 | 282 | ||
diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.c b/drivers/net/wireless/rt2x00/rt2x00queue.c index 4c662eccf53c..2fd830103415 100644 --- a/drivers/net/wireless/rt2x00/rt2x00queue.c +++ b/drivers/net/wireless/rt2x00/rt2x00queue.c | |||
| @@ -207,6 +207,7 @@ static void rt2x00queue_create_tx_descriptor_seq(struct rt2x00_dev *rt2x00dev, | |||
| 207 | struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); | 207 | struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); |
| 208 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; | 208 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; |
| 209 | struct rt2x00_intf *intf = vif_to_intf(tx_info->control.vif); | 209 | struct rt2x00_intf *intf = vif_to_intf(tx_info->control.vif); |
| 210 | u16 seqno; | ||
| 210 | 211 | ||
| 211 | if (!(tx_info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ)) | 212 | if (!(tx_info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ)) |
| 212 | return; | 213 | return; |
| @@ -238,15 +239,13 @@ static void rt2x00queue_create_tx_descriptor_seq(struct rt2x00_dev *rt2x00dev, | |||
| 238 | * sequence counting per-frame, since those will override the | 239 | * sequence counting per-frame, since those will override the |
| 239 | * sequence counter given by mac80211. | 240 | * sequence counter given by mac80211. |
| 240 | */ | 241 | */ |
| 241 | spin_lock(&intf->seqlock); | ||
| 242 | |||
| 243 | if (test_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags)) | 242 | if (test_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags)) |
| 244 | intf->seqno += 0x10; | 243 | seqno = atomic_add_return(0x10, &intf->seqno); |
| 245 | hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG); | 244 | else |
| 246 | hdr->seq_ctrl |= cpu_to_le16(intf->seqno); | 245 | seqno = atomic_read(&intf->seqno); |
| 247 | |||
| 248 | spin_unlock(&intf->seqlock); | ||
| 249 | 246 | ||
| 247 | hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG); | ||
| 248 | hdr->seq_ctrl |= cpu_to_le16(seqno); | ||
| 250 | } | 249 | } |
| 251 | 250 | ||
| 252 | static void rt2x00queue_create_tx_descriptor_plcp(struct rt2x00_dev *rt2x00dev, | 251 | static void rt2x00queue_create_tx_descriptor_plcp(struct rt2x00_dev *rt2x00dev, |
diff --git a/drivers/net/wireless/rtl818x/rtl8187/leds.c b/drivers/net/wireless/rtl818x/rtl8187/leds.c index 2e0de2f5f0f9..c2d5b495c179 100644 --- a/drivers/net/wireless/rtl818x/rtl8187/leds.c +++ b/drivers/net/wireless/rtl818x/rtl8187/leds.c | |||
| @@ -117,7 +117,7 @@ static void rtl8187_led_brightness_set(struct led_classdev *led_dev, | |||
| 117 | radio_on = true; | 117 | radio_on = true; |
| 118 | } else if (radio_on) { | 118 | } else if (radio_on) { |
| 119 | radio_on = false; | 119 | radio_on = false; |
| 120 | cancel_delayed_work_sync(&priv->led_on); | 120 | cancel_delayed_work(&priv->led_on); |
| 121 | ieee80211_queue_delayed_work(hw, &priv->led_off, 0); | 121 | ieee80211_queue_delayed_work(hw, &priv->led_off, 0); |
| 122 | } | 122 | } |
| 123 | } else if (radio_on) { | 123 | } else if (radio_on) { |
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index 447e83472c01..77cb54a65cde 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c | |||
| @@ -1744,6 +1744,11 @@ int pci_prepare_to_sleep(struct pci_dev *dev) | |||
| 1744 | if (target_state == PCI_POWER_ERROR) | 1744 | if (target_state == PCI_POWER_ERROR) |
| 1745 | return -EIO; | 1745 | return -EIO; |
| 1746 | 1746 | ||
| 1747 | /* Some devices mustn't be in D3 during system sleep */ | ||
| 1748 | if (target_state == PCI_D3hot && | ||
| 1749 | (dev->dev_flags & PCI_DEV_FLAGS_NO_D3_DURING_SLEEP)) | ||
| 1750 | return 0; | ||
| 1751 | |||
| 1747 | pci_enable_wake(dev, target_state, device_may_wakeup(&dev->dev)); | 1752 | pci_enable_wake(dev, target_state, device_may_wakeup(&dev->dev)); |
| 1748 | 1753 | ||
| 1749 | error = pci_set_power_state(dev, target_state); | 1754 | error = pci_set_power_state(dev, target_state); |
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c index 2a7521677541..194b243a2817 100644 --- a/drivers/pci/quirks.c +++ b/drivers/pci/quirks.c | |||
| @@ -2929,6 +2929,32 @@ static void __devinit disable_igfx_irq(struct pci_dev *dev) | |||
| 2929 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0102, disable_igfx_irq); | 2929 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0102, disable_igfx_irq); |
| 2930 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x010a, disable_igfx_irq); | 2930 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x010a, disable_igfx_irq); |
| 2931 | 2931 | ||
| 2932 | /* | ||
| 2933 | * The Intel 6 Series/C200 Series chipset's EHCI controllers on many | ||
| 2934 | * ASUS motherboards will cause memory corruption or a system crash | ||
| 2935 | * if they are in D3 while the system is put into S3 sleep. | ||
| 2936 | */ | ||
| 2937 | static void __devinit asus_ehci_no_d3(struct pci_dev *dev) | ||
| 2938 | { | ||
| 2939 | const char *sys_info; | ||
| 2940 | static const char good_Asus_board[] = "P8Z68-V"; | ||
| 2941 | |||
| 2942 | if (dev->dev_flags & PCI_DEV_FLAGS_NO_D3_DURING_SLEEP) | ||
| 2943 | return; | ||
| 2944 | if (dev->subsystem_vendor != PCI_VENDOR_ID_ASUSTEK) | ||
| 2945 | return; | ||
| 2946 | sys_info = dmi_get_system_info(DMI_BOARD_NAME); | ||
| 2947 | if (sys_info && memcmp(sys_info, good_Asus_board, | ||
| 2948 | sizeof(good_Asus_board) - 1) == 0) | ||
| 2949 | return; | ||
| 2950 | |||
| 2951 | dev_info(&dev->dev, "broken D3 during system sleep on ASUS\n"); | ||
| 2952 | dev->dev_flags |= PCI_DEV_FLAGS_NO_D3_DURING_SLEEP; | ||
| 2953 | device_set_wakeup_capable(&dev->dev, false); | ||
| 2954 | } | ||
| 2955 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1c26, asus_ehci_no_d3); | ||
| 2956 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1c2d, asus_ehci_no_d3); | ||
| 2957 | |||
| 2932 | static void pci_do_fixups(struct pci_dev *dev, struct pci_fixup *f, | 2958 | static void pci_do_fixups(struct pci_dev *dev, struct pci_fixup *f, |
| 2933 | struct pci_fixup *end) | 2959 | struct pci_fixup *end) |
| 2934 | { | 2960 | { |
diff --git a/drivers/pinctrl/core.c b/drivers/pinctrl/core.c index c3b331b74fa0..0cc053af70bd 100644 --- a/drivers/pinctrl/core.c +++ b/drivers/pinctrl/core.c | |||
| @@ -61,7 +61,7 @@ static LIST_HEAD(pinctrl_maps); | |||
| 61 | list_for_each_entry(_maps_node_, &pinctrl_maps, node) \ | 61 | list_for_each_entry(_maps_node_, &pinctrl_maps, node) \ |
| 62 | for (_i_ = 0, _map_ = &_maps_node_->maps[_i_]; \ | 62 | for (_i_ = 0, _map_ = &_maps_node_->maps[_i_]; \ |
| 63 | _i_ < _maps_node_->num_maps; \ | 63 | _i_ < _maps_node_->num_maps; \ |
| 64 | i++, _map_ = &_maps_node_->maps[_i_]) | 64 | _i_++, _map_ = &_maps_node_->maps[_i_]) |
| 65 | 65 | ||
| 66 | /** | 66 | /** |
| 67 | * pinctrl_provide_dummies() - indicate if pinctrl provides dummy state support | 67 | * pinctrl_provide_dummies() - indicate if pinctrl provides dummy state support |
diff --git a/drivers/pinctrl/pinctrl-imx.c b/drivers/pinctrl/pinctrl-imx.c index f6e7c670906c..dd6d93aa5334 100644 --- a/drivers/pinctrl/pinctrl-imx.c +++ b/drivers/pinctrl/pinctrl-imx.c | |||
| @@ -27,16 +27,16 @@ | |||
| 27 | #include "core.h" | 27 | #include "core.h" |
| 28 | #include "pinctrl-imx.h" | 28 | #include "pinctrl-imx.h" |
| 29 | 29 | ||
| 30 | #define IMX_PMX_DUMP(info, p, m, c, n) \ | 30 | #define IMX_PMX_DUMP(info, p, m, c, n) \ |
| 31 | { \ | 31 | { \ |
| 32 | int i, j; \ | 32 | int i, j; \ |
| 33 | printk("Format: Pin Mux Config\n"); \ | 33 | printk(KERN_DEBUG "Format: Pin Mux Config\n"); \ |
| 34 | for (i = 0; i < n; i++) { \ | 34 | for (i = 0; i < n; i++) { \ |
| 35 | j = p[i]; \ | 35 | j = p[i]; \ |
| 36 | printk("%s %d 0x%lx\n", \ | 36 | printk(KERN_DEBUG "%s %d 0x%lx\n", \ |
| 37 | info->pins[j].name, \ | 37 | info->pins[j].name, \ |
| 38 | m[i], c[i]); \ | 38 | m[i], c[i]); \ |
| 39 | } \ | 39 | } \ |
| 40 | } | 40 | } |
| 41 | 41 | ||
| 42 | /* The bits in CONFIG cell defined in binding doc*/ | 42 | /* The bits in CONFIG cell defined in binding doc*/ |
| @@ -173,8 +173,10 @@ static int imx_dt_node_to_map(struct pinctrl_dev *pctldev, | |||
| 173 | 173 | ||
| 174 | /* create mux map */ | 174 | /* create mux map */ |
| 175 | parent = of_get_parent(np); | 175 | parent = of_get_parent(np); |
| 176 | if (!parent) | 176 | if (!parent) { |
| 177 | kfree(new_map); | ||
| 177 | return -EINVAL; | 178 | return -EINVAL; |
| 179 | } | ||
| 178 | new_map[0].type = PIN_MAP_TYPE_MUX_GROUP; | 180 | new_map[0].type = PIN_MAP_TYPE_MUX_GROUP; |
| 179 | new_map[0].data.mux.function = parent->name; | 181 | new_map[0].data.mux.function = parent->name; |
| 180 | new_map[0].data.mux.group = np->name; | 182 | new_map[0].data.mux.group = np->name; |
| @@ -193,7 +195,7 @@ static int imx_dt_node_to_map(struct pinctrl_dev *pctldev, | |||
| 193 | } | 195 | } |
| 194 | 196 | ||
| 195 | dev_dbg(pctldev->dev, "maps: function %s group %s num %d\n", | 197 | dev_dbg(pctldev->dev, "maps: function %s group %s num %d\n", |
| 196 | new_map->data.mux.function, new_map->data.mux.group, map_num); | 198 | (*map)->data.mux.function, (*map)->data.mux.group, map_num); |
| 197 | 199 | ||
| 198 | return 0; | 200 | return 0; |
| 199 | } | 201 | } |
| @@ -201,10 +203,7 @@ static int imx_dt_node_to_map(struct pinctrl_dev *pctldev, | |||
| 201 | static void imx_dt_free_map(struct pinctrl_dev *pctldev, | 203 | static void imx_dt_free_map(struct pinctrl_dev *pctldev, |
| 202 | struct pinctrl_map *map, unsigned num_maps) | 204 | struct pinctrl_map *map, unsigned num_maps) |
| 203 | { | 205 | { |
| 204 | int i; | 206 | kfree(map); |
| 205 | |||
| 206 | for (i = 0; i < num_maps; i++) | ||
| 207 | kfree(map); | ||
| 208 | } | 207 | } |
| 209 | 208 | ||
| 210 | static struct pinctrl_ops imx_pctrl_ops = { | 209 | static struct pinctrl_ops imx_pctrl_ops = { |
| @@ -475,9 +474,8 @@ static int __devinit imx_pinctrl_parse_groups(struct device_node *np, | |||
| 475 | grp->configs[j] = config & ~IMX_PAD_SION; | 474 | grp->configs[j] = config & ~IMX_PAD_SION; |
| 476 | } | 475 | } |
| 477 | 476 | ||
| 478 | #ifdef DEBUG | ||
| 479 | IMX_PMX_DUMP(info, grp->pins, grp->mux_mode, grp->configs, grp->npins); | 477 | IMX_PMX_DUMP(info, grp->pins, grp->mux_mode, grp->configs, grp->npins); |
| 480 | #endif | 478 | |
| 481 | return 0; | 479 | return 0; |
| 482 | } | 480 | } |
| 483 | 481 | ||
diff --git a/drivers/pinctrl/pinctrl-mxs.c b/drivers/pinctrl/pinctrl-mxs.c index 556e45a213eb..afb50ee64598 100644 --- a/drivers/pinctrl/pinctrl-mxs.c +++ b/drivers/pinctrl/pinctrl-mxs.c | |||
| @@ -107,8 +107,10 @@ static int mxs_dt_node_to_map(struct pinctrl_dev *pctldev, | |||
| 107 | 107 | ||
| 108 | /* Compose group name */ | 108 | /* Compose group name */ |
| 109 | group = kzalloc(length, GFP_KERNEL); | 109 | group = kzalloc(length, GFP_KERNEL); |
| 110 | if (!group) | 110 | if (!group) { |
| 111 | return -ENOMEM; | 111 | ret = -ENOMEM; |
| 112 | goto free; | ||
| 113 | } | ||
| 112 | snprintf(group, length, "%s.%d", np->name, reg); | 114 | snprintf(group, length, "%s.%d", np->name, reg); |
| 113 | new_map[i].data.mux.group = group; | 115 | new_map[i].data.mux.group = group; |
| 114 | i++; | 116 | i++; |
| @@ -118,7 +120,7 @@ static int mxs_dt_node_to_map(struct pinctrl_dev *pctldev, | |||
| 118 | pconfig = kmemdup(&config, sizeof(config), GFP_KERNEL); | 120 | pconfig = kmemdup(&config, sizeof(config), GFP_KERNEL); |
| 119 | if (!pconfig) { | 121 | if (!pconfig) { |
| 120 | ret = -ENOMEM; | 122 | ret = -ENOMEM; |
| 121 | goto free; | 123 | goto free_group; |
| 122 | } | 124 | } |
| 123 | 125 | ||
| 124 | new_map[i].type = PIN_MAP_TYPE_CONFIGS_GROUP; | 126 | new_map[i].type = PIN_MAP_TYPE_CONFIGS_GROUP; |
| @@ -133,6 +135,9 @@ static int mxs_dt_node_to_map(struct pinctrl_dev *pctldev, | |||
| 133 | 135 | ||
| 134 | return 0; | 136 | return 0; |
| 135 | 137 | ||
| 138 | free_group: | ||
| 139 | if (!purecfg) | ||
| 140 | free(group); | ||
| 136 | free: | 141 | free: |
| 137 | kfree(new_map); | 142 | kfree(new_map); |
| 138 | return ret; | 143 | return ret; |
| @@ -511,6 +516,7 @@ int __devinit mxs_pinctrl_probe(struct platform_device *pdev, | |||
| 511 | return 0; | 516 | return 0; |
| 512 | 517 | ||
| 513 | err: | 518 | err: |
| 519 | platform_set_drvdata(pdev, NULL); | ||
| 514 | iounmap(d->base); | 520 | iounmap(d->base); |
| 515 | return ret; | 521 | return ret; |
| 516 | } | 522 | } |
| @@ -520,6 +526,7 @@ int __devexit mxs_pinctrl_remove(struct platform_device *pdev) | |||
| 520 | { | 526 | { |
| 521 | struct mxs_pinctrl_data *d = platform_get_drvdata(pdev); | 527 | struct mxs_pinctrl_data *d = platform_get_drvdata(pdev); |
| 522 | 528 | ||
| 529 | platform_set_drvdata(pdev, NULL); | ||
| 523 | pinctrl_unregister(d->pctl); | 530 | pinctrl_unregister(d->pctl); |
| 524 | iounmap(d->base); | 531 | iounmap(d->base); |
| 525 | 532 | ||
diff --git a/drivers/pinctrl/pinctrl-nomadik.c b/drivers/pinctrl/pinctrl-nomadik.c index b26395d16347..e8937e7e4999 100644 --- a/drivers/pinctrl/pinctrl-nomadik.c +++ b/drivers/pinctrl/pinctrl-nomadik.c | |||
| @@ -673,7 +673,7 @@ static void __nmk_gpio_set_wake(struct nmk_gpio_chip *nmk_chip, | |||
| 673 | * wakeup is anyhow controlled by the RIMSC and FIMSC registers. | 673 | * wakeup is anyhow controlled by the RIMSC and FIMSC registers. |
| 674 | */ | 674 | */ |
| 675 | if (nmk_chip->sleepmode && on) { | 675 | if (nmk_chip->sleepmode && on) { |
| 676 | __nmk_gpio_set_slpm(nmk_chip, gpio % nmk_chip->chip.base, | 676 | __nmk_gpio_set_slpm(nmk_chip, gpio % NMK_GPIO_PER_CHIP, |
| 677 | NMK_GPIO_SLPM_WAKEUP_ENABLE); | 677 | NMK_GPIO_SLPM_WAKEUP_ENABLE); |
| 678 | } | 678 | } |
| 679 | 679 | ||
| @@ -1246,6 +1246,7 @@ static int __devinit nmk_gpio_probe(struct platform_device *dev) | |||
| 1246 | ret = PTR_ERR(clk); | 1246 | ret = PTR_ERR(clk); |
| 1247 | goto out_unmap; | 1247 | goto out_unmap; |
| 1248 | } | 1248 | } |
| 1249 | clk_prepare(clk); | ||
| 1249 | 1250 | ||
| 1250 | nmk_chip = kzalloc(sizeof(*nmk_chip), GFP_KERNEL); | 1251 | nmk_chip = kzalloc(sizeof(*nmk_chip), GFP_KERNEL); |
| 1251 | if (!nmk_chip) { | 1252 | if (!nmk_chip) { |
diff --git a/drivers/pinctrl/pinctrl-sirf.c b/drivers/pinctrl/pinctrl-sirf.c index ba15b1a29e52..e9f8e7d11001 100644 --- a/drivers/pinctrl/pinctrl-sirf.c +++ b/drivers/pinctrl/pinctrl-sirf.c | |||
| @@ -1184,7 +1184,7 @@ out_no_gpio_remap: | |||
| 1184 | return ret; | 1184 | return ret; |
| 1185 | } | 1185 | } |
| 1186 | 1186 | ||
| 1187 | static const struct of_device_id pinmux_ids[] = { | 1187 | static const struct of_device_id pinmux_ids[] __devinitconst = { |
| 1188 | { .compatible = "sirf,prima2-gpio-pinmux" }, | 1188 | { .compatible = "sirf,prima2-gpio-pinmux" }, |
| 1189 | {} | 1189 | {} |
| 1190 | }; | 1190 | }; |
diff --git a/drivers/platform/x86/acerhdf.c b/drivers/platform/x86/acerhdf.c index 639db4d0aa76..2fd9d36acd15 100644 --- a/drivers/platform/x86/acerhdf.c +++ b/drivers/platform/x86/acerhdf.c | |||
| @@ -5,7 +5,7 @@ | |||
| 5 | * | 5 | * |
| 6 | * (C) 2009 - Peter Feuerer peter (a) piie.net | 6 | * (C) 2009 - Peter Feuerer peter (a) piie.net |
| 7 | * http://piie.net | 7 | * http://piie.net |
| 8 | * 2009 Borislav Petkov <petkovbb@gmail.com> | 8 | * 2009 Borislav Petkov bp (a) alien8.de |
| 9 | * | 9 | * |
| 10 | * Inspired by and many thanks to: | 10 | * Inspired by and many thanks to: |
| 11 | * o acerfand - Rachel Greenham | 11 | * o acerfand - Rachel Greenham |
diff --git a/drivers/regulator/anatop-regulator.c b/drivers/regulator/anatop-regulator.c index 3660bace123c..e82e7eaac0f1 100644 --- a/drivers/regulator/anatop-regulator.c +++ b/drivers/regulator/anatop-regulator.c | |||
| @@ -224,7 +224,7 @@ static struct platform_driver anatop_regulator_driver = { | |||
| 224 | .of_match_table = of_anatop_regulator_match_tbl, | 224 | .of_match_table = of_anatop_regulator_match_tbl, |
| 225 | }, | 225 | }, |
| 226 | .probe = anatop_regulator_probe, | 226 | .probe = anatop_regulator_probe, |
| 227 | .remove = anatop_regulator_remove, | 227 | .remove = __devexit_p(anatop_regulator_remove), |
| 228 | }; | 228 | }; |
| 229 | 229 | ||
| 230 | static int __init anatop_regulator_init(void) | 230 | static int __init anatop_regulator_init(void) |
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c index 7584a74eec8a..09a737c868b5 100644 --- a/drivers/regulator/core.c +++ b/drivers/regulator/core.c | |||
| @@ -2050,6 +2050,9 @@ int regulator_map_voltage_linear(struct regulator_dev *rdev, | |||
| 2050 | return -EINVAL; | 2050 | return -EINVAL; |
| 2051 | } | 2051 | } |
| 2052 | 2052 | ||
| 2053 | if (min_uV < rdev->desc->min_uV) | ||
| 2054 | min_uV = rdev->desc->min_uV; | ||
| 2055 | |||
| 2053 | ret = DIV_ROUND_UP(min_uV - rdev->desc->min_uV, rdev->desc->uV_step); | 2056 | ret = DIV_ROUND_UP(min_uV - rdev->desc->min_uV, rdev->desc->uV_step); |
| 2054 | if (ret < 0) | 2057 | if (ret < 0) |
| 2055 | return ret; | 2058 | return ret; |
diff --git a/drivers/regulator/gpio-regulator.c b/drivers/regulator/gpio-regulator.c index 9997d7aaca84..242851a4c1a6 100644 --- a/drivers/regulator/gpio-regulator.c +++ b/drivers/regulator/gpio-regulator.c | |||
| @@ -101,16 +101,20 @@ static int gpio_regulator_get_value(struct regulator_dev *dev) | |||
| 101 | } | 101 | } |
| 102 | 102 | ||
| 103 | static int gpio_regulator_set_value(struct regulator_dev *dev, | 103 | static int gpio_regulator_set_value(struct regulator_dev *dev, |
| 104 | int min, int max) | 104 | int min, int max, unsigned *selector) |
| 105 | { | 105 | { |
| 106 | struct gpio_regulator_data *data = rdev_get_drvdata(dev); | 106 | struct gpio_regulator_data *data = rdev_get_drvdata(dev); |
| 107 | int ptr, target, state, best_val = INT_MAX; | 107 | int ptr, target = 0, state, best_val = INT_MAX; |
| 108 | 108 | ||
| 109 | for (ptr = 0; ptr < data->nr_states; ptr++) | 109 | for (ptr = 0; ptr < data->nr_states; ptr++) |
| 110 | if (data->states[ptr].value < best_val && | 110 | if (data->states[ptr].value < best_val && |
| 111 | data->states[ptr].value >= min && | 111 | data->states[ptr].value >= min && |
| 112 | data->states[ptr].value <= max) | 112 | data->states[ptr].value <= max) { |
| 113 | target = data->states[ptr].gpios; | 113 | target = data->states[ptr].gpios; |
| 114 | best_val = data->states[ptr].value; | ||
| 115 | if (selector) | ||
| 116 | *selector = ptr; | ||
| 117 | } | ||
| 114 | 118 | ||
| 115 | if (best_val == INT_MAX) | 119 | if (best_val == INT_MAX) |
| 116 | return -EINVAL; | 120 | return -EINVAL; |
| @@ -128,7 +132,7 @@ static int gpio_regulator_set_voltage(struct regulator_dev *dev, | |||
| 128 | int min_uV, int max_uV, | 132 | int min_uV, int max_uV, |
| 129 | unsigned *selector) | 133 | unsigned *selector) |
| 130 | { | 134 | { |
| 131 | return gpio_regulator_set_value(dev, min_uV, max_uV); | 135 | return gpio_regulator_set_value(dev, min_uV, max_uV, selector); |
| 132 | } | 136 | } |
| 133 | 137 | ||
| 134 | static int gpio_regulator_list_voltage(struct regulator_dev *dev, | 138 | static int gpio_regulator_list_voltage(struct regulator_dev *dev, |
| @@ -145,7 +149,7 @@ static int gpio_regulator_list_voltage(struct regulator_dev *dev, | |||
| 145 | static int gpio_regulator_set_current_limit(struct regulator_dev *dev, | 149 | static int gpio_regulator_set_current_limit(struct regulator_dev *dev, |
| 146 | int min_uA, int max_uA) | 150 | int min_uA, int max_uA) |
| 147 | { | 151 | { |
| 148 | return gpio_regulator_set_value(dev, min_uA, max_uA); | 152 | return gpio_regulator_set_value(dev, min_uA, max_uA, NULL); |
| 149 | } | 153 | } |
| 150 | 154 | ||
| 151 | static struct regulator_ops gpio_regulator_voltage_ops = { | 155 | static struct regulator_ops gpio_regulator_voltage_ops = { |
| @@ -286,7 +290,7 @@ static int __devinit gpio_regulator_probe(struct platform_device *pdev) | |||
| 286 | 290 | ||
| 287 | cfg.dev = &pdev->dev; | 291 | cfg.dev = &pdev->dev; |
| 288 | cfg.init_data = config->init_data; | 292 | cfg.init_data = config->init_data; |
| 289 | cfg.driver_data = &drvdata; | 293 | cfg.driver_data = drvdata; |
| 290 | 294 | ||
| 291 | drvdata->dev = regulator_register(&drvdata->desc, &cfg); | 295 | drvdata->dev = regulator_register(&drvdata->desc, &cfg); |
| 292 | if (IS_ERR(drvdata->dev)) { | 296 | if (IS_ERR(drvdata->dev)) { |
diff --git a/drivers/regulator/max8649.c b/drivers/regulator/max8649.c index 1f4bb80457b3..9d540cd02dab 100644 --- a/drivers/regulator/max8649.c +++ b/drivers/regulator/max8649.c | |||
| @@ -259,6 +259,7 @@ static int __devinit max8649_regulator_probe(struct i2c_client *client, | |||
| 259 | config.dev = &client->dev; | 259 | config.dev = &client->dev; |
| 260 | config.init_data = pdata->regulator; | 260 | config.init_data = pdata->regulator; |
| 261 | config.driver_data = info; | 261 | config.driver_data = info; |
| 262 | config.regmap = info->regmap; | ||
| 262 | 263 | ||
| 263 | info->regulator = regulator_register(&dcdc_desc, &config); | 264 | info->regulator = regulator_register(&dcdc_desc, &config); |
| 264 | if (IS_ERR(info->regulator)) { | 265 | if (IS_ERR(info->regulator)) { |
diff --git a/drivers/regulator/palmas-regulator.c b/drivers/regulator/palmas-regulator.c index c4435f608df7..9b7ca90057d5 100644 --- a/drivers/regulator/palmas-regulator.c +++ b/drivers/regulator/palmas-regulator.c | |||
| @@ -775,9 +775,6 @@ static __devinit int palmas_probe(struct platform_device *pdev) | |||
| 775 | err_unregister_regulator: | 775 | err_unregister_regulator: |
| 776 | while (--id >= 0) | 776 | while (--id >= 0) |
| 777 | regulator_unregister(pmic->rdev[id]); | 777 | regulator_unregister(pmic->rdev[id]); |
| 778 | kfree(pmic->rdev); | ||
| 779 | kfree(pmic->desc); | ||
| 780 | kfree(pmic); | ||
| 781 | return ret; | 778 | return ret; |
| 782 | } | 779 | } |
| 783 | 780 | ||
| @@ -788,10 +785,6 @@ static int __devexit palmas_remove(struct platform_device *pdev) | |||
| 788 | 785 | ||
| 789 | for (id = 0; id < PALMAS_NUM_REGS; id++) | 786 | for (id = 0; id < PALMAS_NUM_REGS; id++) |
| 790 | regulator_unregister(pmic->rdev[id]); | 787 | regulator_unregister(pmic->rdev[id]); |
| 791 | |||
| 792 | kfree(pmic->rdev); | ||
| 793 | kfree(pmic->desc); | ||
| 794 | kfree(pmic); | ||
| 795 | return 0; | 788 | return 0; |
| 796 | } | 789 | } |
| 797 | 790 | ||
diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c index 7d5f56edb8ef..4267789ca995 100644 --- a/drivers/rtc/rtc-cmos.c +++ b/drivers/rtc/rtc-cmos.c | |||
| @@ -910,14 +910,17 @@ static inline int cmos_poweroff(struct device *dev) | |||
| 910 | 910 | ||
| 911 | static u32 rtc_handler(void *context) | 911 | static u32 rtc_handler(void *context) |
| 912 | { | 912 | { |
| 913 | struct device *dev = context; | ||
| 914 | |||
| 915 | pm_wakeup_event(dev, 0); | ||
| 913 | acpi_clear_event(ACPI_EVENT_RTC); | 916 | acpi_clear_event(ACPI_EVENT_RTC); |
| 914 | acpi_disable_event(ACPI_EVENT_RTC, 0); | 917 | acpi_disable_event(ACPI_EVENT_RTC, 0); |
| 915 | return ACPI_INTERRUPT_HANDLED; | 918 | return ACPI_INTERRUPT_HANDLED; |
| 916 | } | 919 | } |
| 917 | 920 | ||
| 918 | static inline void rtc_wake_setup(void) | 921 | static inline void rtc_wake_setup(struct device *dev) |
| 919 | { | 922 | { |
| 920 | acpi_install_fixed_event_handler(ACPI_EVENT_RTC, rtc_handler, NULL); | 923 | acpi_install_fixed_event_handler(ACPI_EVENT_RTC, rtc_handler, dev); |
| 921 | /* | 924 | /* |
| 922 | * After the RTC handler is installed, the Fixed_RTC event should | 925 | * After the RTC handler is installed, the Fixed_RTC event should |
| 923 | * be disabled. Only when the RTC alarm is set will it be enabled. | 926 | * be disabled. Only when the RTC alarm is set will it be enabled. |
| @@ -950,7 +953,7 @@ cmos_wake_setup(struct device *dev) | |||
| 950 | if (acpi_disabled) | 953 | if (acpi_disabled) |
| 951 | return; | 954 | return; |
| 952 | 955 | ||
| 953 | rtc_wake_setup(); | 956 | rtc_wake_setup(dev); |
| 954 | acpi_rtc_info.wake_on = rtc_wake_on; | 957 | acpi_rtc_info.wake_on = rtc_wake_on; |
| 955 | acpi_rtc_info.wake_off = rtc_wake_off; | 958 | acpi_rtc_info.wake_off = rtc_wake_off; |
| 956 | 959 | ||
diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.c b/drivers/scsi/mpt2sas/mpt2sas_base.c index 6102ef2cb2d8..9d46fcbe7755 100644 --- a/drivers/scsi/mpt2sas/mpt2sas_base.c +++ b/drivers/scsi/mpt2sas/mpt2sas_base.c | |||
| @@ -1792,7 +1792,7 @@ static inline void _base_writeq(__u64 b, volatile void __iomem *addr, | |||
| 1792 | static inline u8 | 1792 | static inline u8 |
| 1793 | _base_get_msix_index(struct MPT2SAS_ADAPTER *ioc) | 1793 | _base_get_msix_index(struct MPT2SAS_ADAPTER *ioc) |
| 1794 | { | 1794 | { |
| 1795 | return ioc->cpu_msix_table[smp_processor_id()]; | 1795 | return ioc->cpu_msix_table[raw_smp_processor_id()]; |
| 1796 | } | 1796 | } |
| 1797 | 1797 | ||
| 1798 | /** | 1798 | /** |
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c index 04f80ebf09eb..6986552b47e6 100644 --- a/drivers/scsi/qla2xxx/qla_target.c +++ b/drivers/scsi/qla2xxx/qla_target.c | |||
| @@ -26,7 +26,6 @@ | |||
| 26 | #include <linux/module.h> | 26 | #include <linux/module.h> |
| 27 | #include <linux/init.h> | 27 | #include <linux/init.h> |
| 28 | #include <linux/types.h> | 28 | #include <linux/types.h> |
| 29 | #include <linux/version.h> | ||
| 30 | #include <linux/blkdev.h> | 29 | #include <linux/blkdev.h> |
| 31 | #include <linux/interrupt.h> | 30 | #include <linux/interrupt.h> |
| 32 | #include <linux/pci.h> | 31 | #include <linux/pci.h> |
| @@ -2477,11 +2476,9 @@ static void qlt_do_ctio_completion(struct scsi_qla_host *vha, uint32_t handle, | |||
| 2477 | } | 2476 | } |
| 2478 | 2477 | ||
| 2479 | cmd = qlt_ctio_to_cmd(vha, handle, ctio); | 2478 | cmd = qlt_ctio_to_cmd(vha, handle, ctio); |
| 2480 | if (cmd == NULL) { | 2479 | if (cmd == NULL) |
| 2481 | if (status != CTIO_SUCCESS) | ||
| 2482 | qlt_term_ctio_exchange(vha, ctio, NULL, status); | ||
| 2483 | return; | 2480 | return; |
| 2484 | } | 2481 | |
| 2485 | se_cmd = &cmd->se_cmd; | 2482 | se_cmd = &cmd->se_cmd; |
| 2486 | tfo = se_cmd->se_tfo; | 2483 | tfo = se_cmd->se_tfo; |
| 2487 | 2484 | ||
| @@ -2727,10 +2724,12 @@ static void qlt_do_work(struct work_struct *work) | |||
| 2727 | out_term: | 2724 | out_term: |
| 2728 | ql_dbg(ql_dbg_tgt_mgt, vha, 0xf020, "Terminating work cmd %p", cmd); | 2725 | ql_dbg(ql_dbg_tgt_mgt, vha, 0xf020, "Terminating work cmd %p", cmd); |
| 2729 | /* | 2726 | /* |
| 2730 | * cmd has not sent to target yet, so pass NULL as the second argument | 2727 | * cmd has not sent to target yet, so pass NULL as the second |
| 2728 | * argument to qlt_send_term_exchange() and free the memory here. | ||
| 2731 | */ | 2729 | */ |
| 2732 | spin_lock_irqsave(&ha->hardware_lock, flags); | 2730 | spin_lock_irqsave(&ha->hardware_lock, flags); |
| 2733 | qlt_send_term_exchange(vha, NULL, &cmd->atio, 1); | 2731 | qlt_send_term_exchange(vha, NULL, &cmd->atio, 1); |
| 2732 | kmem_cache_free(qla_tgt_cmd_cachep, cmd); | ||
| 2734 | spin_unlock_irqrestore(&ha->hardware_lock, flags); | 2733 | spin_unlock_irqrestore(&ha->hardware_lock, flags); |
| 2735 | if (sess) | 2734 | if (sess) |
| 2736 | ha->tgt.tgt_ops->put_sess(sess); | 2735 | ha->tgt.tgt_ops->put_sess(sess); |
diff --git a/drivers/scsi/qla2xxx/qla_target.h b/drivers/scsi/qla2xxx/qla_target.h index 9ec19bc2f0fe..9f9ef1644fd9 100644 --- a/drivers/scsi/qla2xxx/qla_target.h +++ b/drivers/scsi/qla2xxx/qla_target.h | |||
| @@ -919,7 +919,6 @@ struct qla_tgt_srr_ctio { | |||
| 919 | #define QLA_TGT_XMIT_STATUS 2 | 919 | #define QLA_TGT_XMIT_STATUS 2 |
| 920 | #define QLA_TGT_XMIT_ALL (QLA_TGT_XMIT_STATUS|QLA_TGT_XMIT_DATA) | 920 | #define QLA_TGT_XMIT_ALL (QLA_TGT_XMIT_STATUS|QLA_TGT_XMIT_DATA) |
| 921 | 921 | ||
| 922 | #include <linux/version.h> | ||
| 923 | 922 | ||
| 924 | extern struct qla_tgt_data qla_target; | 923 | extern struct qla_tgt_data qla_target; |
| 925 | /* | 924 | /* |
diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c index 436598f57404..6e64314dbbb3 100644 --- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c +++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c | |||
| @@ -137,13 +137,15 @@ static char *tcm_qla2xxx_get_fabric_name(void) | |||
| 137 | */ | 137 | */ |
| 138 | static int tcm_qla2xxx_npiv_extract_wwn(const char *ns, u64 *nm) | 138 | static int tcm_qla2xxx_npiv_extract_wwn(const char *ns, u64 *nm) |
| 139 | { | 139 | { |
| 140 | unsigned int i, j, value; | 140 | unsigned int i, j; |
| 141 | u8 wwn[8]; | 141 | u8 wwn[8]; |
| 142 | 142 | ||
| 143 | memset(wwn, 0, sizeof(wwn)); | 143 | memset(wwn, 0, sizeof(wwn)); |
| 144 | 144 | ||
| 145 | /* Validate and store the new name */ | 145 | /* Validate and store the new name */ |
| 146 | for (i = 0, j = 0; i < 16; i++) { | 146 | for (i = 0, j = 0; i < 16; i++) { |
| 147 | int value; | ||
| 148 | |||
| 147 | value = hex_to_bin(*ns++); | 149 | value = hex_to_bin(*ns++); |
| 148 | if (value >= 0) | 150 | if (value >= 0) |
| 149 | j = (j << 4) | value; | 151 | j = (j << 4) | value; |
| @@ -652,8 +654,8 @@ static int tcm_qla2xxx_handle_data(struct qla_tgt_cmd *cmd) | |||
| 652 | /* | 654 | /* |
| 653 | * Called from qla_target.c:qlt_issue_task_mgmt() | 655 | * Called from qla_target.c:qlt_issue_task_mgmt() |
| 654 | */ | 656 | */ |
| 655 | int tcm_qla2xxx_handle_tmr(struct qla_tgt_mgmt_cmd *mcmd, uint32_t lun, | 657 | static int tcm_qla2xxx_handle_tmr(struct qla_tgt_mgmt_cmd *mcmd, uint32_t lun, |
| 656 | uint8_t tmr_func, uint32_t tag) | 658 | uint8_t tmr_func, uint32_t tag) |
| 657 | { | 659 | { |
| 658 | struct qla_tgt_sess *sess = mcmd->sess; | 660 | struct qla_tgt_sess *sess = mcmd->sess; |
| 659 | struct se_cmd *se_cmd = &mcmd->se_cmd; | 661 | struct se_cmd *se_cmd = &mcmd->se_cmd; |
| @@ -762,65 +764,8 @@ static u16 tcm_qla2xxx_set_fabric_sense_len(struct se_cmd *se_cmd, | |||
| 762 | struct target_fabric_configfs *tcm_qla2xxx_fabric_configfs; | 764 | struct target_fabric_configfs *tcm_qla2xxx_fabric_configfs; |
| 763 | struct target_fabric_configfs *tcm_qla2xxx_npiv_fabric_configfs; | 765 | struct target_fabric_configfs *tcm_qla2xxx_npiv_fabric_configfs; |
| 764 | 766 | ||
| 765 | static int tcm_qla2xxx_setup_nacl_from_rport( | 767 | static void tcm_qla2xxx_clear_sess_lookup(struct tcm_qla2xxx_lport *, |
| 766 | struct se_portal_group *se_tpg, | 768 | struct tcm_qla2xxx_nacl *, struct qla_tgt_sess *); |
| 767 | struct se_node_acl *se_nacl, | ||
| 768 | struct tcm_qla2xxx_lport *lport, | ||
| 769 | struct tcm_qla2xxx_nacl *nacl, | ||
| 770 | u64 rport_wwnn) | ||
| 771 | { | ||
| 772 | struct scsi_qla_host *vha = lport->qla_vha; | ||
| 773 | struct Scsi_Host *sh = vha->host; | ||
| 774 | struct fc_host_attrs *fc_host = shost_to_fc_host(sh); | ||
| 775 | struct fc_rport *rport; | ||
| 776 | unsigned long flags; | ||
| 777 | void *node; | ||
| 778 | int rc; | ||
| 779 | |||
| 780 | /* | ||
| 781 | * Scan the existing rports, and create a session for the | ||
| 782 | * explict NodeACL is an matching rport->node_name already | ||
| 783 | * exists. | ||
| 784 | */ | ||
| 785 | spin_lock_irqsave(sh->host_lock, flags); | ||
| 786 | list_for_each_entry(rport, &fc_host->rports, peers) { | ||
| 787 | if (rport_wwnn != rport->node_name) | ||
| 788 | continue; | ||
| 789 | |||
| 790 | pr_debug("Located existing rport_wwpn and rport->node_name: 0x%016LX, port_id: 0x%04x\n", | ||
| 791 | rport->node_name, rport->port_id); | ||
| 792 | nacl->nport_id = rport->port_id; | ||
| 793 | |||
| 794 | spin_unlock_irqrestore(sh->host_lock, flags); | ||
| 795 | |||
| 796 | spin_lock_irqsave(&vha->hw->hardware_lock, flags); | ||
| 797 | node = btree_lookup32(&lport->lport_fcport_map, rport->port_id); | ||
| 798 | if (node) { | ||
| 799 | rc = btree_update32(&lport->lport_fcport_map, | ||
| 800 | rport->port_id, se_nacl); | ||
| 801 | } else { | ||
| 802 | rc = btree_insert32(&lport->lport_fcport_map, | ||
| 803 | rport->port_id, se_nacl, | ||
| 804 | GFP_ATOMIC); | ||
| 805 | } | ||
| 806 | spin_unlock_irqrestore(&vha->hw->hardware_lock, flags); | ||
| 807 | |||
| 808 | if (rc) { | ||
| 809 | pr_err("Unable to insert se_nacl into fcport_map"); | ||
| 810 | WARN_ON(rc > 0); | ||
| 811 | return rc; | ||
| 812 | } | ||
| 813 | |||
| 814 | pr_debug("Inserted into fcport_map: %p for WWNN: 0x%016LX, port_id: 0x%08x\n", | ||
| 815 | se_nacl, rport_wwnn, nacl->nport_id); | ||
| 816 | |||
| 817 | return 1; | ||
| 818 | } | ||
| 819 | spin_unlock_irqrestore(sh->host_lock, flags); | ||
| 820 | |||
| 821 | return 0; | ||
| 822 | } | ||
| 823 | |||
| 824 | /* | 769 | /* |
| 825 | * Expected to be called with struct qla_hw_data->hardware_lock held | 770 | * Expected to be called with struct qla_hw_data->hardware_lock held |
| 826 | */ | 771 | */ |
| @@ -842,11 +787,40 @@ static void tcm_qla2xxx_clear_nacl_from_fcport_map(struct qla_tgt_sess *sess) | |||
| 842 | 787 | ||
| 843 | pr_debug("Removed from fcport_map: %p for WWNN: 0x%016LX, port_id: 0x%06x\n", | 788 | pr_debug("Removed from fcport_map: %p for WWNN: 0x%016LX, port_id: 0x%06x\n", |
| 844 | se_nacl, nacl->nport_wwnn, nacl->nport_id); | 789 | se_nacl, nacl->nport_wwnn, nacl->nport_id); |
| 790 | /* | ||
| 791 | * Now clear the se_nacl and session pointers from our HW lport lookup | ||
| 792 | * table mapping for this initiator's fabric S_ID and LOOP_ID entries. | ||
| 793 | * | ||
| 794 | * This is done ahead of callbacks into tcm_qla2xxx_free_session() -> | ||
| 795 | * target_wait_for_sess_cmds() before the session waits for outstanding | ||
| 796 | * I/O to complete, to avoid a race between session shutdown execution | ||
| 797 | * and incoming ATIOs or TMRs picking up a stale se_node_act reference. | ||
| 798 | */ | ||
| 799 | tcm_qla2xxx_clear_sess_lookup(lport, nacl, sess); | ||
| 800 | } | ||
| 801 | |||
| 802 | static void tcm_qla2xxx_release_session(struct kref *kref) | ||
| 803 | { | ||
| 804 | struct se_session *se_sess = container_of(kref, | ||
| 805 | struct se_session, sess_kref); | ||
| 806 | |||
| 807 | qlt_unreg_sess(se_sess->fabric_sess_ptr); | ||
| 808 | } | ||
| 809 | |||
| 810 | static void tcm_qla2xxx_put_session(struct se_session *se_sess) | ||
| 811 | { | ||
| 812 | struct qla_tgt_sess *sess = se_sess->fabric_sess_ptr; | ||
| 813 | struct qla_hw_data *ha = sess->vha->hw; | ||
| 814 | unsigned long flags; | ||
| 815 | |||
| 816 | spin_lock_irqsave(&ha->hardware_lock, flags); | ||
| 817 | kref_put(&se_sess->sess_kref, tcm_qla2xxx_release_session); | ||
| 818 | spin_unlock_irqrestore(&ha->hardware_lock, flags); | ||
| 845 | } | 819 | } |
| 846 | 820 | ||
| 847 | static void tcm_qla2xxx_put_sess(struct qla_tgt_sess *sess) | 821 | static void tcm_qla2xxx_put_sess(struct qla_tgt_sess *sess) |
| 848 | { | 822 | { |
| 849 | target_put_session(sess->se_sess); | 823 | tcm_qla2xxx_put_session(sess->se_sess); |
| 850 | } | 824 | } |
| 851 | 825 | ||
| 852 | static void tcm_qla2xxx_shutdown_sess(struct qla_tgt_sess *sess) | 826 | static void tcm_qla2xxx_shutdown_sess(struct qla_tgt_sess *sess) |
| @@ -859,14 +833,10 @@ static struct se_node_acl *tcm_qla2xxx_make_nodeacl( | |||
| 859 | struct config_group *group, | 833 | struct config_group *group, |
| 860 | const char *name) | 834 | const char *name) |
| 861 | { | 835 | { |
| 862 | struct se_wwn *se_wwn = se_tpg->se_tpg_wwn; | ||
| 863 | struct tcm_qla2xxx_lport *lport = container_of(se_wwn, | ||
| 864 | struct tcm_qla2xxx_lport, lport_wwn); | ||
| 865 | struct se_node_acl *se_nacl, *se_nacl_new; | 836 | struct se_node_acl *se_nacl, *se_nacl_new; |
| 866 | struct tcm_qla2xxx_nacl *nacl; | 837 | struct tcm_qla2xxx_nacl *nacl; |
| 867 | u64 wwnn; | 838 | u64 wwnn; |
| 868 | u32 qla2xxx_nexus_depth; | 839 | u32 qla2xxx_nexus_depth; |
| 869 | int rc; | ||
| 870 | 840 | ||
| 871 | if (tcm_qla2xxx_parse_wwn(name, &wwnn, 1) < 0) | 841 | if (tcm_qla2xxx_parse_wwn(name, &wwnn, 1) < 0) |
| 872 | return ERR_PTR(-EINVAL); | 842 | return ERR_PTR(-EINVAL); |
| @@ -893,16 +863,6 @@ static struct se_node_acl *tcm_qla2xxx_make_nodeacl( | |||
| 893 | nacl = container_of(se_nacl, struct tcm_qla2xxx_nacl, se_node_acl); | 863 | nacl = container_of(se_nacl, struct tcm_qla2xxx_nacl, se_node_acl); |
| 894 | nacl->nport_wwnn = wwnn; | 864 | nacl->nport_wwnn = wwnn; |
| 895 | tcm_qla2xxx_format_wwn(&nacl->nport_name[0], TCM_QLA2XXX_NAMELEN, wwnn); | 865 | tcm_qla2xxx_format_wwn(&nacl->nport_name[0], TCM_QLA2XXX_NAMELEN, wwnn); |
| 896 | /* | ||
| 897 | * Setup a se_nacl handle based on an a matching struct fc_rport setup | ||
| 898 | * via drivers/scsi/qla2xxx/qla_init.c:qla2x00_reg_remote_port() | ||
| 899 | */ | ||
| 900 | rc = tcm_qla2xxx_setup_nacl_from_rport(se_tpg, se_nacl, lport, | ||
| 901 | nacl, wwnn); | ||
| 902 | if (rc < 0) { | ||
| 903 | tcm_qla2xxx_release_fabric_acl(se_tpg, se_nacl_new); | ||
| 904 | return ERR_PTR(rc); | ||
| 905 | } | ||
| 906 | 866 | ||
| 907 | return se_nacl; | 867 | return se_nacl; |
| 908 | } | 868 | } |
| @@ -1390,6 +1350,25 @@ static void tcm_qla2xxx_set_sess_by_loop_id( | |||
| 1390 | nacl->qla_tgt_sess, new_se_nacl, new_se_nacl->initiatorname); | 1350 | nacl->qla_tgt_sess, new_se_nacl, new_se_nacl->initiatorname); |
| 1391 | } | 1351 | } |
| 1392 | 1352 | ||
| 1353 | /* | ||
| 1354 | * Should always be called with qla_hw_data->hardware_lock held. | ||
| 1355 | */ | ||
| 1356 | static void tcm_qla2xxx_clear_sess_lookup(struct tcm_qla2xxx_lport *lport, | ||
| 1357 | struct tcm_qla2xxx_nacl *nacl, struct qla_tgt_sess *sess) | ||
| 1358 | { | ||
| 1359 | struct se_session *se_sess = sess->se_sess; | ||
| 1360 | unsigned char be_sid[3]; | ||
| 1361 | |||
| 1362 | be_sid[0] = sess->s_id.b.domain; | ||
| 1363 | be_sid[1] = sess->s_id.b.area; | ||
| 1364 | be_sid[2] = sess->s_id.b.al_pa; | ||
| 1365 | |||
| 1366 | tcm_qla2xxx_set_sess_by_s_id(lport, NULL, nacl, se_sess, | ||
| 1367 | sess, be_sid); | ||
| 1368 | tcm_qla2xxx_set_sess_by_loop_id(lport, NULL, nacl, se_sess, | ||
| 1369 | sess, sess->loop_id); | ||
| 1370 | } | ||
| 1371 | |||
| 1393 | static void tcm_qla2xxx_free_session(struct qla_tgt_sess *sess) | 1372 | static void tcm_qla2xxx_free_session(struct qla_tgt_sess *sess) |
| 1394 | { | 1373 | { |
| 1395 | struct qla_tgt *tgt = sess->tgt; | 1374 | struct qla_tgt *tgt = sess->tgt; |
| @@ -1398,8 +1377,6 @@ static void tcm_qla2xxx_free_session(struct qla_tgt_sess *sess) | |||
| 1398 | struct se_node_acl *se_nacl; | 1377 | struct se_node_acl *se_nacl; |
| 1399 | struct tcm_qla2xxx_lport *lport; | 1378 | struct tcm_qla2xxx_lport *lport; |
| 1400 | struct tcm_qla2xxx_nacl *nacl; | 1379 | struct tcm_qla2xxx_nacl *nacl; |
| 1401 | unsigned char be_sid[3]; | ||
| 1402 | unsigned long flags; | ||
| 1403 | 1380 | ||
| 1404 | BUG_ON(in_interrupt()); | 1381 | BUG_ON(in_interrupt()); |
| 1405 | 1382 | ||
| @@ -1419,21 +1396,6 @@ static void tcm_qla2xxx_free_session(struct qla_tgt_sess *sess) | |||
| 1419 | return; | 1396 | return; |
| 1420 | } | 1397 | } |
| 1421 | target_wait_for_sess_cmds(se_sess, 0); | 1398 | target_wait_for_sess_cmds(se_sess, 0); |
| 1422 | /* | ||
| 1423 | * And now clear the se_nacl and session pointers from our HW lport | ||
| 1424 | * mappings for fabric S_ID and LOOP_ID. | ||
| 1425 | */ | ||
| 1426 | memset(&be_sid, 0, 3); | ||
| 1427 | be_sid[0] = sess->s_id.b.domain; | ||
| 1428 | be_sid[1] = sess->s_id.b.area; | ||
| 1429 | be_sid[2] = sess->s_id.b.al_pa; | ||
| 1430 | |||
| 1431 | spin_lock_irqsave(&ha->hardware_lock, flags); | ||
| 1432 | tcm_qla2xxx_set_sess_by_s_id(lport, NULL, nacl, se_sess, | ||
| 1433 | sess, be_sid); | ||
| 1434 | tcm_qla2xxx_set_sess_by_loop_id(lport, NULL, nacl, se_sess, | ||
| 1435 | sess, sess->loop_id); | ||
| 1436 | spin_unlock_irqrestore(&ha->hardware_lock, flags); | ||
| 1437 | 1399 | ||
| 1438 | transport_deregister_session_configfs(sess->se_sess); | 1400 | transport_deregister_session_configfs(sess->se_sess); |
| 1439 | transport_deregister_session(sess->se_sess); | 1401 | transport_deregister_session(sess->se_sess); |
| @@ -1731,6 +1693,7 @@ static struct target_core_fabric_ops tcm_qla2xxx_ops = { | |||
| 1731 | .new_cmd_map = NULL, | 1693 | .new_cmd_map = NULL, |
| 1732 | .check_stop_free = tcm_qla2xxx_check_stop_free, | 1694 | .check_stop_free = tcm_qla2xxx_check_stop_free, |
| 1733 | .release_cmd = tcm_qla2xxx_release_cmd, | 1695 | .release_cmd = tcm_qla2xxx_release_cmd, |
| 1696 | .put_session = tcm_qla2xxx_put_session, | ||
| 1734 | .shutdown_session = tcm_qla2xxx_shutdown_session, | 1697 | .shutdown_session = tcm_qla2xxx_shutdown_session, |
| 1735 | .close_session = tcm_qla2xxx_close_session, | 1698 | .close_session = tcm_qla2xxx_close_session, |
| 1736 | .sess_get_index = tcm_qla2xxx_sess_get_index, | 1699 | .sess_get_index = tcm_qla2xxx_sess_get_index, |
| @@ -1779,6 +1742,7 @@ static struct target_core_fabric_ops tcm_qla2xxx_npiv_ops = { | |||
| 1779 | .tpg_release_fabric_acl = tcm_qla2xxx_release_fabric_acl, | 1742 | .tpg_release_fabric_acl = tcm_qla2xxx_release_fabric_acl, |
| 1780 | .tpg_get_inst_index = tcm_qla2xxx_tpg_get_inst_index, | 1743 | .tpg_get_inst_index = tcm_qla2xxx_tpg_get_inst_index, |
| 1781 | .release_cmd = tcm_qla2xxx_release_cmd, | 1744 | .release_cmd = tcm_qla2xxx_release_cmd, |
| 1745 | .put_session = tcm_qla2xxx_put_session, | ||
| 1782 | .shutdown_session = tcm_qla2xxx_shutdown_session, | 1746 | .shutdown_session = tcm_qla2xxx_shutdown_session, |
| 1783 | .close_session = tcm_qla2xxx_close_session, | 1747 | .close_session = tcm_qla2xxx_close_session, |
| 1784 | .sess_get_index = tcm_qla2xxx_sess_get_index, | 1748 | .sess_get_index = tcm_qla2xxx_sess_get_index, |
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c index 61c82a345f82..bbbc9c918d4c 100644 --- a/drivers/scsi/scsi.c +++ b/drivers/scsi/scsi.c | |||
| @@ -90,11 +90,9 @@ unsigned int scsi_logging_level; | |||
| 90 | EXPORT_SYMBOL(scsi_logging_level); | 90 | EXPORT_SYMBOL(scsi_logging_level); |
| 91 | #endif | 91 | #endif |
| 92 | 92 | ||
| 93 | #if IS_ENABLED(CONFIG_PM) || IS_ENABLED(CONFIG_BLK_DEV_SD) | 93 | /* sd, scsi core and power management need to coordinate flushing async actions */ |
| 94 | /* sd and scsi_pm need to coordinate flushing async actions */ | ||
| 95 | LIST_HEAD(scsi_sd_probe_domain); | 94 | LIST_HEAD(scsi_sd_probe_domain); |
| 96 | EXPORT_SYMBOL(scsi_sd_probe_domain); | 95 | EXPORT_SYMBOL(scsi_sd_probe_domain); |
| 97 | #endif | ||
| 98 | 96 | ||
| 99 | /* NB: These are exposed through /proc/scsi/scsi and form part of the ABI. | 97 | /* NB: These are exposed through /proc/scsi/scsi and form part of the ABI. |
| 100 | * You may not alter any existing entry (although adding new ones is | 98 | * You may not alter any existing entry (although adding new ones is |
diff --git a/drivers/staging/ramster/zcache-main.c b/drivers/staging/ramster/zcache-main.c index 4e7ef0e6b79c..d46764b5aaba 100644 --- a/drivers/staging/ramster/zcache-main.c +++ b/drivers/staging/ramster/zcache-main.c | |||
| @@ -3002,7 +3002,7 @@ static inline struct tmem_oid oswiz(unsigned type, u32 ind) | |||
| 3002 | return oid; | 3002 | return oid; |
| 3003 | } | 3003 | } |
| 3004 | 3004 | ||
| 3005 | static int zcache_frontswap_put_page(unsigned type, pgoff_t offset, | 3005 | static int zcache_frontswap_store(unsigned type, pgoff_t offset, |
| 3006 | struct page *page) | 3006 | struct page *page) |
| 3007 | { | 3007 | { |
| 3008 | u64 ind64 = (u64)offset; | 3008 | u64 ind64 = (u64)offset; |
| @@ -3025,7 +3025,7 @@ static int zcache_frontswap_put_page(unsigned type, pgoff_t offset, | |||
| 3025 | 3025 | ||
| 3026 | /* returns 0 if the page was successfully gotten from frontswap, -1 if | 3026 | /* returns 0 if the page was successfully gotten from frontswap, -1 if |
| 3027 | * was not present (should never happen!) */ | 3027 | * was not present (should never happen!) */ |
| 3028 | static int zcache_frontswap_get_page(unsigned type, pgoff_t offset, | 3028 | static int zcache_frontswap_load(unsigned type, pgoff_t offset, |
| 3029 | struct page *page) | 3029 | struct page *page) |
| 3030 | { | 3030 | { |
| 3031 | u64 ind64 = (u64)offset; | 3031 | u64 ind64 = (u64)offset; |
| @@ -3080,8 +3080,8 @@ static void zcache_frontswap_init(unsigned ignored) | |||
| 3080 | } | 3080 | } |
| 3081 | 3081 | ||
| 3082 | static struct frontswap_ops zcache_frontswap_ops = { | 3082 | static struct frontswap_ops zcache_frontswap_ops = { |
| 3083 | .put_page = zcache_frontswap_put_page, | 3083 | .store = zcache_frontswap_store, |
| 3084 | .get_page = zcache_frontswap_get_page, | 3084 | .load = zcache_frontswap_load, |
| 3085 | .invalidate_page = zcache_frontswap_flush_page, | 3085 | .invalidate_page = zcache_frontswap_flush_page, |
| 3086 | .invalidate_area = zcache_frontswap_flush_area, | 3086 | .invalidate_area = zcache_frontswap_flush_area, |
| 3087 | .init = zcache_frontswap_init | 3087 | .init = zcache_frontswap_init |
diff --git a/drivers/staging/zcache/zcache-main.c b/drivers/staging/zcache/zcache-main.c index 2734dacacbaf..784c796b9848 100644 --- a/drivers/staging/zcache/zcache-main.c +++ b/drivers/staging/zcache/zcache-main.c | |||
| @@ -1835,7 +1835,7 @@ static int zcache_frontswap_poolid = -1; | |||
| 1835 | * Swizzling increases objects per swaptype, increasing tmem concurrency | 1835 | * Swizzling increases objects per swaptype, increasing tmem concurrency |
| 1836 | * for heavy swaploads. Later, larger nr_cpus -> larger SWIZ_BITS | 1836 | * for heavy swaploads. Later, larger nr_cpus -> larger SWIZ_BITS |
| 1837 | * Setting SWIZ_BITS to 27 basically reconstructs the swap entry from | 1837 | * Setting SWIZ_BITS to 27 basically reconstructs the swap entry from |
| 1838 | * frontswap_get_page(), but has side-effects. Hence using 8. | 1838 | * frontswap_load(), but has side-effects. Hence using 8. |
| 1839 | */ | 1839 | */ |
| 1840 | #define SWIZ_BITS 8 | 1840 | #define SWIZ_BITS 8 |
| 1841 | #define SWIZ_MASK ((1 << SWIZ_BITS) - 1) | 1841 | #define SWIZ_MASK ((1 << SWIZ_BITS) - 1) |
| @@ -1849,7 +1849,7 @@ static inline struct tmem_oid oswiz(unsigned type, u32 ind) | |||
| 1849 | return oid; | 1849 | return oid; |
| 1850 | } | 1850 | } |
| 1851 | 1851 | ||
| 1852 | static int zcache_frontswap_put_page(unsigned type, pgoff_t offset, | 1852 | static int zcache_frontswap_store(unsigned type, pgoff_t offset, |
| 1853 | struct page *page) | 1853 | struct page *page) |
| 1854 | { | 1854 | { |
| 1855 | u64 ind64 = (u64)offset; | 1855 | u64 ind64 = (u64)offset; |
| @@ -1870,7 +1870,7 @@ static int zcache_frontswap_put_page(unsigned type, pgoff_t offset, | |||
| 1870 | 1870 | ||
| 1871 | /* returns 0 if the page was successfully gotten from frontswap, -1 if | 1871 | /* returns 0 if the page was successfully gotten from frontswap, -1 if |
| 1872 | * was not present (should never happen!) */ | 1872 | * was not present (should never happen!) */ |
| 1873 | static int zcache_frontswap_get_page(unsigned type, pgoff_t offset, | 1873 | static int zcache_frontswap_load(unsigned type, pgoff_t offset, |
| 1874 | struct page *page) | 1874 | struct page *page) |
| 1875 | { | 1875 | { |
| 1876 | u64 ind64 = (u64)offset; | 1876 | u64 ind64 = (u64)offset; |
| @@ -1919,8 +1919,8 @@ static void zcache_frontswap_init(unsigned ignored) | |||
| 1919 | } | 1919 | } |
| 1920 | 1920 | ||
| 1921 | static struct frontswap_ops zcache_frontswap_ops = { | 1921 | static struct frontswap_ops zcache_frontswap_ops = { |
| 1922 | .put_page = zcache_frontswap_put_page, | 1922 | .store = zcache_frontswap_store, |
| 1923 | .get_page = zcache_frontswap_get_page, | 1923 | .load = zcache_frontswap_load, |
| 1924 | .invalidate_page = zcache_frontswap_flush_page, | 1924 | .invalidate_page = zcache_frontswap_flush_page, |
| 1925 | .invalidate_area = zcache_frontswap_flush_area, | 1925 | .invalidate_area = zcache_frontswap_flush_area, |
| 1926 | .init = zcache_frontswap_init | 1926 | .init = zcache_frontswap_init |
diff --git a/drivers/target/sbp/sbp_target.c b/drivers/target/sbp/sbp_target.c index 37c609898f84..7e6136e2ce81 100644 --- a/drivers/target/sbp/sbp_target.c +++ b/drivers/target/sbp/sbp_target.c | |||
| @@ -587,14 +587,14 @@ static void sbp_management_request_logout( | |||
| 587 | { | 587 | { |
| 588 | struct sbp_tport *tport = agent->tport; | 588 | struct sbp_tport *tport = agent->tport; |
| 589 | struct sbp_tpg *tpg = tport->tpg; | 589 | struct sbp_tpg *tpg = tport->tpg; |
| 590 | int login_id; | 590 | int id; |
| 591 | struct sbp_login_descriptor *login; | 591 | struct sbp_login_descriptor *login; |
| 592 | 592 | ||
| 593 | login_id = LOGOUT_ORB_LOGIN_ID(be32_to_cpu(req->orb.misc)); | 593 | id = LOGOUT_ORB_LOGIN_ID(be32_to_cpu(req->orb.misc)); |
| 594 | 594 | ||
| 595 | login = sbp_login_find_by_id(tpg, login_id); | 595 | login = sbp_login_find_by_id(tpg, id); |
| 596 | if (!login) { | 596 | if (!login) { |
| 597 | pr_warn("cannot find login: %d\n", login_id); | 597 | pr_warn("cannot find login: %d\n", id); |
| 598 | 598 | ||
| 599 | req->status.status = cpu_to_be32( | 599 | req->status.status = cpu_to_be32( |
| 600 | STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) | | 600 | STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) | |
diff --git a/drivers/target/target_core_alua.c b/drivers/target/target_core_alua.c index e624b836469c..91799973081a 100644 --- a/drivers/target/target_core_alua.c +++ b/drivers/target/target_core_alua.c | |||
| @@ -374,8 +374,9 @@ int target_emulate_set_target_port_groups(struct se_cmd *cmd) | |||
| 374 | 374 | ||
| 375 | out: | 375 | out: |
| 376 | transport_kunmap_data_sg(cmd); | 376 | transport_kunmap_data_sg(cmd); |
| 377 | target_complete_cmd(cmd, GOOD); | 377 | if (!rc) |
| 378 | return 0; | 378 | target_complete_cmd(cmd, GOOD); |
| 379 | return rc; | ||
| 379 | } | 380 | } |
| 380 | 381 | ||
| 381 | static inline int core_alua_state_nonoptimized( | 382 | static inline int core_alua_state_nonoptimized( |
diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c index 686dba189f8e..9f99d0404908 100644 --- a/drivers/target/target_core_file.c +++ b/drivers/target/target_core_file.c | |||
| @@ -133,16 +133,11 @@ static struct se_device *fd_create_virtdevice( | |||
| 133 | ret = PTR_ERR(dev_p); | 133 | ret = PTR_ERR(dev_p); |
| 134 | goto fail; | 134 | goto fail; |
| 135 | } | 135 | } |
| 136 | |||
| 137 | /* O_DIRECT too? */ | ||
| 138 | flags = O_RDWR | O_CREAT | O_LARGEFILE; | ||
| 139 | |||
| 140 | /* | 136 | /* |
| 141 | * If fd_buffered_io=1 has not been set explicitly (the default), | 137 | * Use O_DSYNC by default instead of O_SYNC to forgo syncing |
| 142 | * use O_SYNC to force FILEIO writes to disk. | 138 | * of pure timestamp updates. |
| 143 | */ | 139 | */ |
| 144 | if (!(fd_dev->fbd_flags & FDBD_USE_BUFFERED_IO)) | 140 | flags = O_RDWR | O_CREAT | O_LARGEFILE | O_DSYNC; |
| 145 | flags |= O_SYNC; | ||
| 146 | 141 | ||
| 147 | file = filp_open(dev_p, flags, 0600); | 142 | file = filp_open(dev_p, flags, 0600); |
| 148 | if (IS_ERR(file)) { | 143 | if (IS_ERR(file)) { |
| @@ -380,23 +375,6 @@ static void fd_emulate_sync_cache(struct se_cmd *cmd) | |||
| 380 | } | 375 | } |
| 381 | } | 376 | } |
| 382 | 377 | ||
| 383 | static void fd_emulate_write_fua(struct se_cmd *cmd) | ||
| 384 | { | ||
| 385 | struct se_device *dev = cmd->se_dev; | ||
| 386 | struct fd_dev *fd_dev = dev->dev_ptr; | ||
| 387 | loff_t start = cmd->t_task_lba * | ||
| 388 | dev->se_sub_dev->se_dev_attrib.block_size; | ||
| 389 | loff_t end = start + cmd->data_length; | ||
| 390 | int ret; | ||
| 391 | |||
| 392 | pr_debug("FILEIO: FUA WRITE LBA: %llu, bytes: %u\n", | ||
| 393 | cmd->t_task_lba, cmd->data_length); | ||
| 394 | |||
| 395 | ret = vfs_fsync_range(fd_dev->fd_file, start, end, 1); | ||
| 396 | if (ret != 0) | ||
| 397 | pr_err("FILEIO: vfs_fsync_range() failed: %d\n", ret); | ||
| 398 | } | ||
| 399 | |||
| 400 | static int fd_execute_cmd(struct se_cmd *cmd, struct scatterlist *sgl, | 378 | static int fd_execute_cmd(struct se_cmd *cmd, struct scatterlist *sgl, |
| 401 | u32 sgl_nents, enum dma_data_direction data_direction) | 379 | u32 sgl_nents, enum dma_data_direction data_direction) |
| 402 | { | 380 | { |
| @@ -411,19 +389,21 @@ static int fd_execute_cmd(struct se_cmd *cmd, struct scatterlist *sgl, | |||
| 411 | ret = fd_do_readv(cmd, sgl, sgl_nents); | 389 | ret = fd_do_readv(cmd, sgl, sgl_nents); |
| 412 | } else { | 390 | } else { |
| 413 | ret = fd_do_writev(cmd, sgl, sgl_nents); | 391 | ret = fd_do_writev(cmd, sgl, sgl_nents); |
| 414 | 392 | /* | |
| 393 | * Perform implict vfs_fsync_range() for fd_do_writev() ops | ||
| 394 | * for SCSI WRITEs with Forced Unit Access (FUA) set. | ||
| 395 | * Allow this to happen independent of WCE=0 setting. | ||
| 396 | */ | ||
| 415 | if (ret > 0 && | 397 | if (ret > 0 && |
| 416 | dev->se_sub_dev->se_dev_attrib.emulate_write_cache > 0 && | ||
| 417 | dev->se_sub_dev->se_dev_attrib.emulate_fua_write > 0 && | 398 | dev->se_sub_dev->se_dev_attrib.emulate_fua_write > 0 && |
| 418 | (cmd->se_cmd_flags & SCF_FUA)) { | 399 | (cmd->se_cmd_flags & SCF_FUA)) { |
| 419 | /* | 400 | struct fd_dev *fd_dev = dev->dev_ptr; |
| 420 | * We might need to be a bit smarter here | 401 | loff_t start = cmd->t_task_lba * |
| 421 | * and return some sense data to let the initiator | 402 | dev->se_sub_dev->se_dev_attrib.block_size; |
| 422 | * know the FUA WRITE cache sync failed..? | 403 | loff_t end = start + cmd->data_length; |
| 423 | */ | ||
| 424 | fd_emulate_write_fua(cmd); | ||
| 425 | } | ||
| 426 | 404 | ||
| 405 | vfs_fsync_range(fd_dev->fd_file, start, end, 1); | ||
| 406 | } | ||
| 427 | } | 407 | } |
| 428 | 408 | ||
| 429 | if (ret < 0) { | 409 | if (ret < 0) { |
| @@ -442,7 +422,6 @@ enum { | |||
| 442 | static match_table_t tokens = { | 422 | static match_table_t tokens = { |
| 443 | {Opt_fd_dev_name, "fd_dev_name=%s"}, | 423 | {Opt_fd_dev_name, "fd_dev_name=%s"}, |
| 444 | {Opt_fd_dev_size, "fd_dev_size=%s"}, | 424 | {Opt_fd_dev_size, "fd_dev_size=%s"}, |
| 445 | {Opt_fd_buffered_io, "fd_buffered_io=%d"}, | ||
| 446 | {Opt_err, NULL} | 425 | {Opt_err, NULL} |
| 447 | }; | 426 | }; |
| 448 | 427 | ||
| @@ -454,7 +433,7 @@ static ssize_t fd_set_configfs_dev_params( | |||
| 454 | struct fd_dev *fd_dev = se_dev->se_dev_su_ptr; | 433 | struct fd_dev *fd_dev = se_dev->se_dev_su_ptr; |
| 455 | char *orig, *ptr, *arg_p, *opts; | 434 | char *orig, *ptr, *arg_p, *opts; |
| 456 | substring_t args[MAX_OPT_ARGS]; | 435 | substring_t args[MAX_OPT_ARGS]; |
| 457 | int ret = 0, arg, token; | 436 | int ret = 0, token; |
| 458 | 437 | ||
| 459 | opts = kstrdup(page, GFP_KERNEL); | 438 | opts = kstrdup(page, GFP_KERNEL); |
| 460 | if (!opts) | 439 | if (!opts) |
| @@ -498,19 +477,6 @@ static ssize_t fd_set_configfs_dev_params( | |||
| 498 | " bytes\n", fd_dev->fd_dev_size); | 477 | " bytes\n", fd_dev->fd_dev_size); |
| 499 | fd_dev->fbd_flags |= FBDF_HAS_SIZE; | 478 | fd_dev->fbd_flags |= FBDF_HAS_SIZE; |
| 500 | break; | 479 | break; |
| 501 | case Opt_fd_buffered_io: | ||
| 502 | match_int(args, &arg); | ||
| 503 | if (arg != 1) { | ||
| 504 | pr_err("bogus fd_buffered_io=%d value\n", arg); | ||
| 505 | ret = -EINVAL; | ||
| 506 | goto out; | ||
| 507 | } | ||
| 508 | |||
| 509 | pr_debug("FILEIO: Using buffered I/O" | ||
| 510 | " operations for struct fd_dev\n"); | ||
| 511 | |||
| 512 | fd_dev->fbd_flags |= FDBD_USE_BUFFERED_IO; | ||
| 513 | break; | ||
| 514 | default: | 480 | default: |
| 515 | break; | 481 | break; |
| 516 | } | 482 | } |
| @@ -542,10 +508,8 @@ static ssize_t fd_show_configfs_dev_params( | |||
| 542 | ssize_t bl = 0; | 508 | ssize_t bl = 0; |
| 543 | 509 | ||
| 544 | bl = sprintf(b + bl, "TCM FILEIO ID: %u", fd_dev->fd_dev_id); | 510 | bl = sprintf(b + bl, "TCM FILEIO ID: %u", fd_dev->fd_dev_id); |
| 545 | bl += sprintf(b + bl, " File: %s Size: %llu Mode: %s\n", | 511 | bl += sprintf(b + bl, " File: %s Size: %llu Mode: O_DSYNC\n", |
| 546 | fd_dev->fd_dev_name, fd_dev->fd_dev_size, | 512 | fd_dev->fd_dev_name, fd_dev->fd_dev_size); |
| 547 | (fd_dev->fbd_flags & FDBD_USE_BUFFERED_IO) ? | ||
| 548 | "Buffered" : "Synchronous"); | ||
| 549 | return bl; | 513 | return bl; |
| 550 | } | 514 | } |
| 551 | 515 | ||
diff --git a/drivers/target/target_core_file.h b/drivers/target/target_core_file.h index fbd59ef7d8be..70ce7fd7111d 100644 --- a/drivers/target/target_core_file.h +++ b/drivers/target/target_core_file.h | |||
| @@ -14,7 +14,6 @@ | |||
| 14 | 14 | ||
| 15 | #define FBDF_HAS_PATH 0x01 | 15 | #define FBDF_HAS_PATH 0x01 |
| 16 | #define FBDF_HAS_SIZE 0x02 | 16 | #define FBDF_HAS_SIZE 0x02 |
| 17 | #define FDBD_USE_BUFFERED_IO 0x04 | ||
| 18 | 17 | ||
| 19 | struct fd_dev { | 18 | struct fd_dev { |
| 20 | u32 fbd_flags; | 19 | u32 fbd_flags; |
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index b05fdc0c05d3..634d0f31a28c 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c | |||
| @@ -315,7 +315,7 @@ void transport_register_session( | |||
| 315 | } | 315 | } |
| 316 | EXPORT_SYMBOL(transport_register_session); | 316 | EXPORT_SYMBOL(transport_register_session); |
| 317 | 317 | ||
| 318 | static void target_release_session(struct kref *kref) | 318 | void target_release_session(struct kref *kref) |
| 319 | { | 319 | { |
| 320 | struct se_session *se_sess = container_of(kref, | 320 | struct se_session *se_sess = container_of(kref, |
| 321 | struct se_session, sess_kref); | 321 | struct se_session, sess_kref); |
| @@ -332,6 +332,12 @@ EXPORT_SYMBOL(target_get_session); | |||
| 332 | 332 | ||
| 333 | void target_put_session(struct se_session *se_sess) | 333 | void target_put_session(struct se_session *se_sess) |
| 334 | { | 334 | { |
| 335 | struct se_portal_group *tpg = se_sess->se_tpg; | ||
| 336 | |||
| 337 | if (tpg->se_tpg_tfo->put_session != NULL) { | ||
| 338 | tpg->se_tpg_tfo->put_session(se_sess); | ||
| 339 | return; | ||
| 340 | } | ||
| 335 | kref_put(&se_sess->sess_kref, target_release_session); | 341 | kref_put(&se_sess->sess_kref, target_release_session); |
| 336 | } | 342 | } |
| 337 | EXPORT_SYMBOL(target_put_session); | 343 | EXPORT_SYMBOL(target_put_session); |
diff --git a/drivers/tty/hvc/hvc_xen.c b/drivers/tty/hvc/hvc_xen.c index d3d91dae065c..944eaeb8e0cf 100644 --- a/drivers/tty/hvc/hvc_xen.c +++ b/drivers/tty/hvc/hvc_xen.c | |||
| @@ -214,24 +214,24 @@ static int xen_hvm_console_init(void) | |||
| 214 | /* already configured */ | 214 | /* already configured */ |
| 215 | if (info->intf != NULL) | 215 | if (info->intf != NULL) |
| 216 | return 0; | 216 | return 0; |
| 217 | 217 | /* | |
| 218 | * If the toolstack (or the hypervisor) hasn't set these values, the | ||
| 219 | * default value is 0. Even though mfn = 0 and evtchn = 0 are | ||
| 220 | * theoretically correct values, in practice they never are and they | ||
| 221 | * mean that a legacy toolstack hasn't initialized the pv console correctly. | ||
| 222 | */ | ||
| 218 | r = hvm_get_parameter(HVM_PARAM_CONSOLE_EVTCHN, &v); | 223 | r = hvm_get_parameter(HVM_PARAM_CONSOLE_EVTCHN, &v); |
| 219 | if (r < 0) { | 224 | if (r < 0 || v == 0) |
| 220 | kfree(info); | 225 | goto err; |
| 221 | return -ENODEV; | ||
| 222 | } | ||
| 223 | info->evtchn = v; | 226 | info->evtchn = v; |
| 224 | hvm_get_parameter(HVM_PARAM_CONSOLE_PFN, &v); | 227 | v = 0; |
| 225 | if (r < 0) { | 228 | r = hvm_get_parameter(HVM_PARAM_CONSOLE_PFN, &v); |
| 226 | kfree(info); | 229 | if (r < 0 || v == 0) |
| 227 | return -ENODEV; | 230 | goto err; |
| 228 | } | ||
| 229 | mfn = v; | 231 | mfn = v; |
| 230 | info->intf = ioremap(mfn << PAGE_SHIFT, PAGE_SIZE); | 232 | info->intf = ioremap(mfn << PAGE_SHIFT, PAGE_SIZE); |
| 231 | if (info->intf == NULL) { | 233 | if (info->intf == NULL) |
| 232 | kfree(info); | 234 | goto err; |
| 233 | return -ENODEV; | ||
| 234 | } | ||
| 235 | info->vtermno = HVC_COOKIE; | 235 | info->vtermno = HVC_COOKIE; |
| 236 | 236 | ||
| 237 | spin_lock(&xencons_lock); | 237 | spin_lock(&xencons_lock); |
| @@ -239,6 +239,9 @@ static int xen_hvm_console_init(void) | |||
| 239 | spin_unlock(&xencons_lock); | 239 | spin_unlock(&xencons_lock); |
| 240 | 240 | ||
| 241 | return 0; | 241 | return 0; |
| 242 | err: | ||
| 243 | kfree(info); | ||
| 244 | return -ENODEV; | ||
| 242 | } | 245 | } |
| 243 | 246 | ||
| 244 | static int xen_pv_console_init(void) | 247 | static int xen_pv_console_init(void) |
diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c index 4604153b7954..1bd9163bc118 100644 --- a/drivers/tty/serial/sh-sci.c +++ b/drivers/tty/serial/sh-sci.c | |||
| @@ -2179,6 +2179,16 @@ static int __devinit sci_init_single(struct platform_device *dev, | |||
| 2179 | return 0; | 2179 | return 0; |
| 2180 | } | 2180 | } |
| 2181 | 2181 | ||
| 2182 | static void sci_cleanup_single(struct sci_port *port) | ||
| 2183 | { | ||
| 2184 | sci_free_gpios(port); | ||
| 2185 | |||
| 2186 | clk_put(port->iclk); | ||
| 2187 | clk_put(port->fclk); | ||
| 2188 | |||
| 2189 | pm_runtime_disable(port->port.dev); | ||
| 2190 | } | ||
| 2191 | |||
| 2182 | #ifdef CONFIG_SERIAL_SH_SCI_CONSOLE | 2192 | #ifdef CONFIG_SERIAL_SH_SCI_CONSOLE |
| 2183 | static void serial_console_putchar(struct uart_port *port, int ch) | 2193 | static void serial_console_putchar(struct uart_port *port, int ch) |
| 2184 | { | 2194 | { |
| @@ -2360,14 +2370,10 @@ static int sci_remove(struct platform_device *dev) | |||
| 2360 | cpufreq_unregister_notifier(&port->freq_transition, | 2370 | cpufreq_unregister_notifier(&port->freq_transition, |
| 2361 | CPUFREQ_TRANSITION_NOTIFIER); | 2371 | CPUFREQ_TRANSITION_NOTIFIER); |
| 2362 | 2372 | ||
| 2363 | sci_free_gpios(port); | ||
| 2364 | |||
| 2365 | uart_remove_one_port(&sci_uart_driver, &port->port); | 2373 | uart_remove_one_port(&sci_uart_driver, &port->port); |
| 2366 | 2374 | ||
| 2367 | clk_put(port->iclk); | 2375 | sci_cleanup_single(port); |
| 2368 | clk_put(port->fclk); | ||
| 2369 | 2376 | ||
| 2370 | pm_runtime_disable(&dev->dev); | ||
| 2371 | return 0; | 2377 | return 0; |
| 2372 | } | 2378 | } |
| 2373 | 2379 | ||
| @@ -2385,14 +2391,20 @@ static int __devinit sci_probe_single(struct platform_device *dev, | |||
| 2385 | index+1, SCI_NPORTS); | 2391 | index+1, SCI_NPORTS); |
| 2386 | dev_notice(&dev->dev, "Consider bumping " | 2392 | dev_notice(&dev->dev, "Consider bumping " |
| 2387 | "CONFIG_SERIAL_SH_SCI_NR_UARTS!\n"); | 2393 | "CONFIG_SERIAL_SH_SCI_NR_UARTS!\n"); |
| 2388 | return 0; | 2394 | return -EINVAL; |
| 2389 | } | 2395 | } |
| 2390 | 2396 | ||
| 2391 | ret = sci_init_single(dev, sciport, index, p); | 2397 | ret = sci_init_single(dev, sciport, index, p); |
| 2392 | if (ret) | 2398 | if (ret) |
| 2393 | return ret; | 2399 | return ret; |
| 2394 | 2400 | ||
| 2395 | return uart_add_one_port(&sci_uart_driver, &sciport->port); | 2401 | ret = uart_add_one_port(&sci_uart_driver, &sciport->port); |
| 2402 | if (ret) { | ||
| 2403 | sci_cleanup_single(sciport); | ||
| 2404 | return ret; | ||
| 2405 | } | ||
| 2406 | |||
| 2407 | return 0; | ||
| 2396 | } | 2408 | } |
| 2397 | 2409 | ||
| 2398 | static int __devinit sci_probe(struct platform_device *dev) | 2410 | static int __devinit sci_probe(struct platform_device *dev) |
| @@ -2413,24 +2425,22 @@ static int __devinit sci_probe(struct platform_device *dev) | |||
| 2413 | 2425 | ||
| 2414 | ret = sci_probe_single(dev, dev->id, p, sp); | 2426 | ret = sci_probe_single(dev, dev->id, p, sp); |
| 2415 | if (ret) | 2427 | if (ret) |
| 2416 | goto err_unreg; | 2428 | return ret; |
| 2417 | 2429 | ||
| 2418 | sp->freq_transition.notifier_call = sci_notifier; | 2430 | sp->freq_transition.notifier_call = sci_notifier; |
| 2419 | 2431 | ||
| 2420 | ret = cpufreq_register_notifier(&sp->freq_transition, | 2432 | ret = cpufreq_register_notifier(&sp->freq_transition, |
| 2421 | CPUFREQ_TRANSITION_NOTIFIER); | 2433 | CPUFREQ_TRANSITION_NOTIFIER); |
| 2422 | if (unlikely(ret < 0)) | 2434 | if (unlikely(ret < 0)) { |
| 2423 | goto err_unreg; | 2435 | sci_cleanup_single(sp); |
| 2436 | return ret; | ||
| 2437 | } | ||
| 2424 | 2438 | ||
| 2425 | #ifdef CONFIG_SH_STANDARD_BIOS | 2439 | #ifdef CONFIG_SH_STANDARD_BIOS |
| 2426 | sh_bios_gdb_detach(); | 2440 | sh_bios_gdb_detach(); |
| 2427 | #endif | 2441 | #endif |
| 2428 | 2442 | ||
| 2429 | return 0; | 2443 | return 0; |
| 2430 | |||
| 2431 | err_unreg: | ||
| 2432 | sci_remove(dev); | ||
| 2433 | return ret; | ||
| 2434 | } | 2444 | } |
| 2435 | 2445 | ||
| 2436 | static int sci_suspend(struct device *dev) | 2446 | static int sci_suspend(struct device *dev) |
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c index f2a120eea9d4..36a2a0b7b82c 100644 --- a/drivers/usb/class/cdc-acm.c +++ b/drivers/usb/class/cdc-acm.c | |||
| @@ -567,6 +567,14 @@ static int acm_port_activate(struct tty_port *port, struct tty_struct *tty) | |||
| 567 | 567 | ||
| 568 | usb_autopm_put_interface(acm->control); | 568 | usb_autopm_put_interface(acm->control); |
| 569 | 569 | ||
| 570 | /* | ||
| 571 | * Unthrottle device in case the TTY was closed while throttled. | ||
| 572 | */ | ||
| 573 | spin_lock_irq(&acm->read_lock); | ||
| 574 | acm->throttled = 0; | ||
| 575 | acm->throttle_req = 0; | ||
| 576 | spin_unlock_irq(&acm->read_lock); | ||
| 577 | |||
| 570 | if (acm_submit_read_urbs(acm, GFP_KERNEL)) | 578 | if (acm_submit_read_urbs(acm, GFP_KERNEL)) |
| 571 | goto error_submit_read_urbs; | 579 | goto error_submit_read_urbs; |
| 572 | 580 | ||
diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c index ea8b304f0e85..8fd398dffced 100644 --- a/drivers/usb/class/cdc-wdm.c +++ b/drivers/usb/class/cdc-wdm.c | |||
| @@ -55,6 +55,15 @@ static const struct usb_device_id wdm_ids[] = { | |||
| 55 | .bInterfaceSubClass = 1, | 55 | .bInterfaceSubClass = 1, |
| 56 | .bInterfaceProtocol = 9, /* NOTE: CDC ECM control interface! */ | 56 | .bInterfaceProtocol = 9, /* NOTE: CDC ECM control interface! */ |
| 57 | }, | 57 | }, |
| 58 | { | ||
| 59 | /* Vodafone/Huawei K5005 (12d1:14c8) and similar modems */ | ||
| 60 | .match_flags = USB_DEVICE_ID_MATCH_VENDOR | | ||
| 61 | USB_DEVICE_ID_MATCH_INT_INFO, | ||
| 62 | .idVendor = HUAWEI_VENDOR_ID, | ||
| 63 | .bInterfaceClass = USB_CLASS_VENDOR_SPEC, | ||
| 64 | .bInterfaceSubClass = 1, | ||
| 65 | .bInterfaceProtocol = 57, /* NOTE: CDC ECM control interface! */ | ||
| 66 | }, | ||
| 58 | { } | 67 | { } |
| 59 | }; | 68 | }; |
| 60 | 69 | ||
diff --git a/drivers/usb/core/hcd-pci.c b/drivers/usb/core/hcd-pci.c index 57ed9e400c06..622b4a48e732 100644 --- a/drivers/usb/core/hcd-pci.c +++ b/drivers/usb/core/hcd-pci.c | |||
| @@ -493,15 +493,6 @@ static int hcd_pci_suspend_noirq(struct device *dev) | |||
| 493 | 493 | ||
| 494 | pci_save_state(pci_dev); | 494 | pci_save_state(pci_dev); |
| 495 | 495 | ||
| 496 | /* | ||
| 497 | * Some systems crash if an EHCI controller is in D3 during | ||
| 498 | * a sleep transition. We have to leave such controllers in D0. | ||
| 499 | */ | ||
| 500 | if (hcd->broken_pci_sleep) { | ||
| 501 | dev_dbg(dev, "Staying in PCI D0\n"); | ||
| 502 | return retval; | ||
| 503 | } | ||
| 504 | |||
| 505 | /* If the root hub is dead rather than suspended, disallow remote | 496 | /* If the root hub is dead rather than suspended, disallow remote |
| 506 | * wakeup. usb_hc_died() should ensure that both hosts are marked as | 497 | * wakeup. usb_hc_died() should ensure that both hosts are marked as |
| 507 | * dying, so we only need to check the primary roothub. | 498 | * dying, so we only need to check the primary roothub. |
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c index 04fb834c3fa1..25a7422ee657 100644 --- a/drivers/usb/core/hub.c +++ b/drivers/usb/core/hub.c | |||
| @@ -3379,7 +3379,7 @@ int usb_disable_lpm(struct usb_device *udev) | |||
| 3379 | return 0; | 3379 | return 0; |
| 3380 | 3380 | ||
| 3381 | udev->lpm_disable_count++; | 3381 | udev->lpm_disable_count++; |
| 3382 | if ((udev->u1_params.timeout == 0 && udev->u1_params.timeout == 0)) | 3382 | if ((udev->u1_params.timeout == 0 && udev->u2_params.timeout == 0)) |
| 3383 | return 0; | 3383 | return 0; |
| 3384 | 3384 | ||
| 3385 | /* If LPM is enabled, attempt to disable it. */ | 3385 | /* If LPM is enabled, attempt to disable it. */ |
diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c index b548cf1dbc62..bdd1c6749d88 100644 --- a/drivers/usb/core/message.c +++ b/drivers/usb/core/message.c | |||
| @@ -1838,7 +1838,6 @@ free_interfaces: | |||
| 1838 | intfc = cp->intf_cache[i]; | 1838 | intfc = cp->intf_cache[i]; |
| 1839 | intf->altsetting = intfc->altsetting; | 1839 | intf->altsetting = intfc->altsetting; |
| 1840 | intf->num_altsetting = intfc->num_altsetting; | 1840 | intf->num_altsetting = intfc->num_altsetting; |
| 1841 | intf->intf_assoc = find_iad(dev, cp, i); | ||
| 1842 | kref_get(&intfc->ref); | 1841 | kref_get(&intfc->ref); |
| 1843 | 1842 | ||
| 1844 | alt = usb_altnum_to_altsetting(intf, 0); | 1843 | alt = usb_altnum_to_altsetting(intf, 0); |
| @@ -1851,6 +1850,8 @@ free_interfaces: | |||
| 1851 | if (!alt) | 1850 | if (!alt) |
| 1852 | alt = &intf->altsetting[0]; | 1851 | alt = &intf->altsetting[0]; |
| 1853 | 1852 | ||
| 1853 | intf->intf_assoc = | ||
| 1854 | find_iad(dev, cp, alt->desc.bInterfaceNumber); | ||
| 1854 | intf->cur_altsetting = alt; | 1855 | intf->cur_altsetting = alt; |
| 1855 | usb_enable_interface(dev, intf, true); | 1856 | usb_enable_interface(dev, intf, true); |
| 1856 | intf->dev.parent = &dev->dev; | 1857 | intf->dev.parent = &dev->dev; |
diff --git a/drivers/usb/gadget/atmel_usba_udc.c b/drivers/usb/gadget/atmel_usba_udc.c index e23bf7984aaf..9a9bced813ed 100644 --- a/drivers/usb/gadget/atmel_usba_udc.c +++ b/drivers/usb/gadget/atmel_usba_udc.c | |||
| @@ -599,12 +599,6 @@ usba_ep_enable(struct usb_ep *_ep, const struct usb_endpoint_descriptor *desc) | |||
| 599 | 599 | ||
| 600 | spin_lock_irqsave(&ep->udc->lock, flags); | 600 | spin_lock_irqsave(&ep->udc->lock, flags); |
| 601 | 601 | ||
| 602 | if (ep->ep.desc) { | ||
| 603 | spin_unlock_irqrestore(&ep->udc->lock, flags); | ||
| 604 | DBG(DBG_ERR, "ep%d already enabled\n", ep->index); | ||
| 605 | return -EBUSY; | ||
| 606 | } | ||
| 607 | |||
| 608 | ep->ep.desc = desc; | 602 | ep->ep.desc = desc; |
| 609 | ep->ep.maxpacket = maxpacket; | 603 | ep->ep.maxpacket = maxpacket; |
| 610 | 604 | ||
diff --git a/drivers/usb/gadget/fsl_qe_udc.c b/drivers/usb/gadget/fsl_qe_udc.c index 51881f3bd07a..b09452d6f33a 100644 --- a/drivers/usb/gadget/fsl_qe_udc.c +++ b/drivers/usb/gadget/fsl_qe_udc.c | |||
| @@ -1596,7 +1596,7 @@ static int qe_ep_enable(struct usb_ep *_ep, | |||
| 1596 | ep = container_of(_ep, struct qe_ep, ep); | 1596 | ep = container_of(_ep, struct qe_ep, ep); |
| 1597 | 1597 | ||
| 1598 | /* catch various bogus parameters */ | 1598 | /* catch various bogus parameters */ |
| 1599 | if (!_ep || !desc || ep->ep.desc || _ep->name == ep_name[0] || | 1599 | if (!_ep || !desc || _ep->name == ep_name[0] || |
| 1600 | (desc->bDescriptorType != USB_DT_ENDPOINT)) | 1600 | (desc->bDescriptorType != USB_DT_ENDPOINT)) |
| 1601 | return -EINVAL; | 1601 | return -EINVAL; |
| 1602 | 1602 | ||
diff --git a/drivers/usb/gadget/fsl_udc_core.c b/drivers/usb/gadget/fsl_udc_core.c index 28316858208b..bc6f9bb9994a 100644 --- a/drivers/usb/gadget/fsl_udc_core.c +++ b/drivers/usb/gadget/fsl_udc_core.c | |||
| @@ -567,7 +567,7 @@ static int fsl_ep_enable(struct usb_ep *_ep, | |||
| 567 | ep = container_of(_ep, struct fsl_ep, ep); | 567 | ep = container_of(_ep, struct fsl_ep, ep); |
| 568 | 568 | ||
| 569 | /* catch various bogus parameters */ | 569 | /* catch various bogus parameters */ |
| 570 | if (!_ep || !desc || ep->ep.desc | 570 | if (!_ep || !desc |
| 571 | || (desc->bDescriptorType != USB_DT_ENDPOINT)) | 571 | || (desc->bDescriptorType != USB_DT_ENDPOINT)) |
| 572 | return -EINVAL; | 572 | return -EINVAL; |
| 573 | 573 | ||
| @@ -2575,7 +2575,7 @@ static int __init fsl_udc_probe(struct platform_device *pdev) | |||
| 2575 | /* for ep0: the desc defined here; | 2575 | /* for ep0: the desc defined here; |
| 2576 | * for other eps, gadget layer called ep_enable with defined desc | 2576 | * for other eps, gadget layer called ep_enable with defined desc |
| 2577 | */ | 2577 | */ |
| 2578 | udc_controller->eps[0].desc = &fsl_ep0_desc; | 2578 | udc_controller->eps[0].ep.desc = &fsl_ep0_desc; |
| 2579 | udc_controller->eps[0].ep.maxpacket = USB_MAX_CTRL_PAYLOAD; | 2579 | udc_controller->eps[0].ep.maxpacket = USB_MAX_CTRL_PAYLOAD; |
| 2580 | 2580 | ||
| 2581 | /* setup the udc->eps[] for non-control endpoints and link | 2581 | /* setup the udc->eps[] for non-control endpoints and link |
diff --git a/drivers/usb/gadget/fsl_usb2_udc.h b/drivers/usb/gadget/fsl_usb2_udc.h index 5cd7b7e7ddb4..f61a967f7082 100644 --- a/drivers/usb/gadget/fsl_usb2_udc.h +++ b/drivers/usb/gadget/fsl_usb2_udc.h | |||
| @@ -568,10 +568,10 @@ static void dump_msg(const char *label, const u8 * buf, unsigned int length) | |||
| 568 | /* | 568 | /* |
| 569 | * ### internal used help routines. | 569 | * ### internal used help routines. |
| 570 | */ | 570 | */ |
| 571 | #define ep_index(EP) ((EP)->desc->bEndpointAddress&0xF) | 571 | #define ep_index(EP) ((EP)->ep.desc->bEndpointAddress&0xF) |
| 572 | #define ep_maxpacket(EP) ((EP)->ep.maxpacket) | 572 | #define ep_maxpacket(EP) ((EP)->ep.maxpacket) |
| 573 | #define ep_is_in(EP) ( (ep_index(EP) == 0) ? (EP->udc->ep0_dir == \ | 573 | #define ep_is_in(EP) ( (ep_index(EP) == 0) ? (EP->udc->ep0_dir == \ |
| 574 | USB_DIR_IN ):((EP)->desc->bEndpointAddress \ | 574 | USB_DIR_IN) : ((EP)->ep.desc->bEndpointAddress \ |
| 575 | & USB_DIR_IN)==USB_DIR_IN) | 575 | & USB_DIR_IN)==USB_DIR_IN) |
| 576 | #define get_ep_by_pipe(udc, pipe) ((pipe == 1)? &udc->eps[0]: \ | 576 | #define get_ep_by_pipe(udc, pipe) ((pipe == 1)? &udc->eps[0]: \ |
| 577 | &udc->eps[pipe]) | 577 | &udc->eps[pipe]) |
diff --git a/drivers/usb/gadget/goku_udc.c b/drivers/usb/gadget/goku_udc.c index b241e6c6a7f2..3d28fb976c78 100644 --- a/drivers/usb/gadget/goku_udc.c +++ b/drivers/usb/gadget/goku_udc.c | |||
| @@ -102,7 +102,7 @@ goku_ep_enable(struct usb_ep *_ep, const struct usb_endpoint_descriptor *desc) | |||
| 102 | unsigned long flags; | 102 | unsigned long flags; |
| 103 | 103 | ||
| 104 | ep = container_of(_ep, struct goku_ep, ep); | 104 | ep = container_of(_ep, struct goku_ep, ep); |
| 105 | if (!_ep || !desc || ep->ep.desc | 105 | if (!_ep || !desc |
| 106 | || desc->bDescriptorType != USB_DT_ENDPOINT) | 106 | || desc->bDescriptorType != USB_DT_ENDPOINT) |
| 107 | return -EINVAL; | 107 | return -EINVAL; |
| 108 | dev = ep->dev; | 108 | dev = ep->dev; |
diff --git a/drivers/usb/gadget/mv_udc_core.c b/drivers/usb/gadget/mv_udc_core.c index dbcd1329495e..117a4bba1b8c 100644 --- a/drivers/usb/gadget/mv_udc_core.c +++ b/drivers/usb/gadget/mv_udc_core.c | |||
| @@ -464,7 +464,7 @@ static int mv_ep_enable(struct usb_ep *_ep, | |||
| 464 | ep = container_of(_ep, struct mv_ep, ep); | 464 | ep = container_of(_ep, struct mv_ep, ep); |
| 465 | udc = ep->udc; | 465 | udc = ep->udc; |
| 466 | 466 | ||
| 467 | if (!_ep || !desc || ep->ep.desc | 467 | if (!_ep || !desc |
| 468 | || desc->bDescriptorType != USB_DT_ENDPOINT) | 468 | || desc->bDescriptorType != USB_DT_ENDPOINT) |
| 469 | return -EINVAL; | 469 | return -EINVAL; |
| 470 | 470 | ||
diff --git a/drivers/usb/gadget/omap_udc.c b/drivers/usb/gadget/omap_udc.c index 7ba32469c5bd..a460e8c204f4 100644 --- a/drivers/usb/gadget/omap_udc.c +++ b/drivers/usb/gadget/omap_udc.c | |||
| @@ -153,7 +153,7 @@ static int omap_ep_enable(struct usb_ep *_ep, | |||
| 153 | u16 maxp; | 153 | u16 maxp; |
| 154 | 154 | ||
| 155 | /* catch various bogus parameters */ | 155 | /* catch various bogus parameters */ |
| 156 | if (!_ep || !desc || ep->ep.desc | 156 | if (!_ep || !desc |
| 157 | || desc->bDescriptorType != USB_DT_ENDPOINT | 157 | || desc->bDescriptorType != USB_DT_ENDPOINT |
| 158 | || ep->bEndpointAddress != desc->bEndpointAddress | 158 | || ep->bEndpointAddress != desc->bEndpointAddress |
| 159 | || ep->maxpacket < usb_endpoint_maxp(desc)) { | 159 | || ep->maxpacket < usb_endpoint_maxp(desc)) { |
diff --git a/drivers/usb/gadget/pxa25x_udc.c b/drivers/usb/gadget/pxa25x_udc.c index d7c8cb3bf759..f7ff9e8e746a 100644 --- a/drivers/usb/gadget/pxa25x_udc.c +++ b/drivers/usb/gadget/pxa25x_udc.c | |||
| @@ -218,7 +218,7 @@ static int pxa25x_ep_enable (struct usb_ep *_ep, | |||
| 218 | struct pxa25x_udc *dev; | 218 | struct pxa25x_udc *dev; |
| 219 | 219 | ||
| 220 | ep = container_of (_ep, struct pxa25x_ep, ep); | 220 | ep = container_of (_ep, struct pxa25x_ep, ep); |
| 221 | if (!_ep || !desc || ep->ep.desc || _ep->name == ep0name | 221 | if (!_ep || !desc || _ep->name == ep0name |
| 222 | || desc->bDescriptorType != USB_DT_ENDPOINT | 222 | || desc->bDescriptorType != USB_DT_ENDPOINT |
| 223 | || ep->bEndpointAddress != desc->bEndpointAddress | 223 | || ep->bEndpointAddress != desc->bEndpointAddress |
| 224 | || ep->fifo_size < usb_endpoint_maxp (desc)) { | 224 | || ep->fifo_size < usb_endpoint_maxp (desc)) { |
diff --git a/drivers/usb/gadget/s3c-hsudc.c b/drivers/usb/gadget/s3c-hsudc.c index 36c6836eeb0f..236b271871a0 100644 --- a/drivers/usb/gadget/s3c-hsudc.c +++ b/drivers/usb/gadget/s3c-hsudc.c | |||
| @@ -760,7 +760,7 @@ static int s3c_hsudc_ep_enable(struct usb_ep *_ep, | |||
| 760 | u32 ecr = 0; | 760 | u32 ecr = 0; |
| 761 | 761 | ||
| 762 | hsep = our_ep(_ep); | 762 | hsep = our_ep(_ep); |
| 763 | if (!_ep || !desc || hsep->ep.desc || _ep->name == ep0name | 763 | if (!_ep || !desc || _ep->name == ep0name |
| 764 | || desc->bDescriptorType != USB_DT_ENDPOINT | 764 | || desc->bDescriptorType != USB_DT_ENDPOINT |
| 765 | || hsep->bEndpointAddress != desc->bEndpointAddress | 765 | || hsep->bEndpointAddress != desc->bEndpointAddress |
| 766 | || ep_maxpacket(hsep) < usb_endpoint_maxp(desc)) | 766 | || ep_maxpacket(hsep) < usb_endpoint_maxp(desc)) |
diff --git a/drivers/usb/gadget/s3c2410_udc.c b/drivers/usb/gadget/s3c2410_udc.c index 3de71d37d75e..f2e51f50e528 100644 --- a/drivers/usb/gadget/s3c2410_udc.c +++ b/drivers/usb/gadget/s3c2410_udc.c | |||
| @@ -1062,7 +1062,7 @@ static int s3c2410_udc_ep_enable(struct usb_ep *_ep, | |||
| 1062 | 1062 | ||
| 1063 | ep = to_s3c2410_ep(_ep); | 1063 | ep = to_s3c2410_ep(_ep); |
| 1064 | 1064 | ||
| 1065 | if (!_ep || !desc || ep->ep.desc | 1065 | if (!_ep || !desc |
| 1066 | || _ep->name == ep0name | 1066 | || _ep->name == ep0name |
| 1067 | || desc->bDescriptorType != USB_DT_ENDPOINT) | 1067 | || desc->bDescriptorType != USB_DT_ENDPOINT) |
| 1068 | return -EINVAL; | 1068 | return -EINVAL; |
diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c index b100f5f9f4b6..800be38c78b4 100644 --- a/drivers/usb/host/ehci-hcd.c +++ b/drivers/usb/host/ehci-hcd.c | |||
| @@ -671,7 +671,9 @@ static int ehci_init(struct usb_hcd *hcd) | |||
| 671 | hw = ehci->async->hw; | 671 | hw = ehci->async->hw; |
| 672 | hw->hw_next = QH_NEXT(ehci, ehci->async->qh_dma); | 672 | hw->hw_next = QH_NEXT(ehci, ehci->async->qh_dma); |
| 673 | hw->hw_info1 = cpu_to_hc32(ehci, QH_HEAD); | 673 | hw->hw_info1 = cpu_to_hc32(ehci, QH_HEAD); |
| 674 | #if defined(CONFIG_PPC_PS3) | ||
| 674 | hw->hw_info1 |= cpu_to_hc32(ehci, (1 << 7)); /* I = 1 */ | 675 | hw->hw_info1 |= cpu_to_hc32(ehci, (1 << 7)); /* I = 1 */ |
| 676 | #endif | ||
| 675 | hw->hw_token = cpu_to_hc32(ehci, QTD_STS_HALT); | 677 | hw->hw_token = cpu_to_hc32(ehci, QTD_STS_HALT); |
| 676 | hw->hw_qtd_next = EHCI_LIST_END(ehci); | 678 | hw->hw_qtd_next = EHCI_LIST_END(ehci); |
| 677 | ehci->async->qh_state = QH_STATE_LINKED; | 679 | ehci->async->qh_state = QH_STATE_LINKED; |
diff --git a/drivers/usb/host/ehci-omap.c b/drivers/usb/host/ehci-omap.c index a44294d13494..17cfb8a1131c 100644 --- a/drivers/usb/host/ehci-omap.c +++ b/drivers/usb/host/ehci-omap.c | |||
| @@ -43,6 +43,7 @@ | |||
| 43 | #include <linux/regulator/consumer.h> | 43 | #include <linux/regulator/consumer.h> |
| 44 | #include <linux/pm_runtime.h> | 44 | #include <linux/pm_runtime.h> |
| 45 | #include <linux/gpio.h> | 45 | #include <linux/gpio.h> |
| 46 | #include <linux/clk.h> | ||
| 46 | 47 | ||
| 47 | /* EHCI Register Set */ | 48 | /* EHCI Register Set */ |
| 48 | #define EHCI_INSNREG04 (0xA0) | 49 | #define EHCI_INSNREG04 (0xA0) |
| @@ -55,6 +56,15 @@ | |||
| 55 | #define EHCI_INSNREG05_ULPI_EXTREGADD_SHIFT 8 | 56 | #define EHCI_INSNREG05_ULPI_EXTREGADD_SHIFT 8 |
| 56 | #define EHCI_INSNREG05_ULPI_WRDATA_SHIFT 0 | 57 | #define EHCI_INSNREG05_ULPI_WRDATA_SHIFT 0 |
| 57 | 58 | ||
| 59 | /* Errata i693 */ | ||
| 60 | static struct clk *utmi_p1_fck; | ||
| 61 | static struct clk *utmi_p2_fck; | ||
| 62 | static struct clk *xclk60mhsp1_ck; | ||
| 63 | static struct clk *xclk60mhsp2_ck; | ||
| 64 | static struct clk *usbhost_p1_fck; | ||
| 65 | static struct clk *usbhost_p2_fck; | ||
| 66 | static struct clk *init_60m_fclk; | ||
| 67 | |||
| 58 | /*-------------------------------------------------------------------------*/ | 68 | /*-------------------------------------------------------------------------*/ |
| 59 | 69 | ||
| 60 | static const struct hc_driver ehci_omap_hc_driver; | 70 | static const struct hc_driver ehci_omap_hc_driver; |
| @@ -70,6 +80,41 @@ static inline u32 ehci_read(void __iomem *base, u32 reg) | |||
| 70 | return __raw_readl(base + reg); | 80 | return __raw_readl(base + reg); |
| 71 | } | 81 | } |
| 72 | 82 | ||
| 83 | /* Erratum i693 workaround sequence */ | ||
| 84 | static void omap_ehci_erratum_i693(struct ehci_hcd *ehci) | ||
| 85 | { | ||
| 86 | int ret = 0; | ||
| 87 | |||
| 88 | /* Switch to the internal 60 MHz clock */ | ||
| 89 | ret = clk_set_parent(utmi_p1_fck, init_60m_fclk); | ||
| 90 | if (ret != 0) | ||
| 91 | ehci_err(ehci, "init_60m_fclk set parent" | ||
| 92 | "failed error:%d\n", ret); | ||
| 93 | |||
| 94 | ret = clk_set_parent(utmi_p2_fck, init_60m_fclk); | ||
| 95 | if (ret != 0) | ||
| 96 | ehci_err(ehci, "init_60m_fclk set parent" | ||
| 97 | "failed error:%d\n", ret); | ||
| 98 | |||
| 99 | clk_enable(usbhost_p1_fck); | ||
| 100 | clk_enable(usbhost_p2_fck); | ||
| 101 | |||
| 102 | /* Wait 1ms and switch back to the external clock */ | ||
| 103 | mdelay(1); | ||
| 104 | ret = clk_set_parent(utmi_p1_fck, xclk60mhsp1_ck); | ||
| 105 | if (ret != 0) | ||
| 106 | ehci_err(ehci, "xclk60mhsp1_ck set parent" | ||
| 107 | "failed error:%d\n", ret); | ||
| 108 | |||
| 109 | ret = clk_set_parent(utmi_p2_fck, xclk60mhsp2_ck); | ||
| 110 | if (ret != 0) | ||
| 111 | ehci_err(ehci, "xclk60mhsp2_ck set parent" | ||
| 112 | "failed error:%d\n", ret); | ||
| 113 | |||
| 114 | clk_disable(usbhost_p1_fck); | ||
| 115 | clk_disable(usbhost_p2_fck); | ||
| 116 | } | ||
| 117 | |||
| 73 | static void omap_ehci_soft_phy_reset(struct platform_device *pdev, u8 port) | 118 | static void omap_ehci_soft_phy_reset(struct platform_device *pdev, u8 port) |
| 74 | { | 119 | { |
| 75 | struct usb_hcd *hcd = dev_get_drvdata(&pdev->dev); | 120 | struct usb_hcd *hcd = dev_get_drvdata(&pdev->dev); |
| @@ -100,6 +145,50 @@ static void omap_ehci_soft_phy_reset(struct platform_device *pdev, u8 port) | |||
| 100 | } | 145 | } |
| 101 | } | 146 | } |
| 102 | 147 | ||
| 148 | static int omap_ehci_hub_control( | ||
| 149 | struct usb_hcd *hcd, | ||
| 150 | u16 typeReq, | ||
| 151 | u16 wValue, | ||
| 152 | u16 wIndex, | ||
| 153 | char *buf, | ||
| 154 | u16 wLength | ||
| 155 | ) | ||
| 156 | { | ||
| 157 | struct ehci_hcd *ehci = hcd_to_ehci(hcd); | ||
| 158 | u32 __iomem *status_reg = &ehci->regs->port_status[ | ||
| 159 | (wIndex & 0xff) - 1]; | ||
| 160 | u32 temp; | ||
| 161 | unsigned long flags; | ||
| 162 | int retval = 0; | ||
| 163 | |||
| 164 | spin_lock_irqsave(&ehci->lock, flags); | ||
| 165 | |||
| 166 | if (typeReq == SetPortFeature && wValue == USB_PORT_FEAT_SUSPEND) { | ||
| 167 | temp = ehci_readl(ehci, status_reg); | ||
| 168 | if ((temp & PORT_PE) == 0 || (temp & PORT_RESET) != 0) { | ||
| 169 | retval = -EPIPE; | ||
| 170 | goto done; | ||
| 171 | } | ||
| 172 | |||
| 173 | temp &= ~PORT_WKCONN_E; | ||
| 174 | temp |= PORT_WKDISC_E | PORT_WKOC_E; | ||
| 175 | ehci_writel(ehci, temp | PORT_SUSPEND, status_reg); | ||
| 176 | |||
| 177 | omap_ehci_erratum_i693(ehci); | ||
| 178 | |||
| 179 | set_bit((wIndex & 0xff) - 1, &ehci->suspended_ports); | ||
| 180 | goto done; | ||
| 181 | } | ||
| 182 | |||
| 183 | spin_unlock_irqrestore(&ehci->lock, flags); | ||
| 184 | |||
| 185 | /* Handle the hub control events here */ | ||
| 186 | return ehci_hub_control(hcd, typeReq, wValue, wIndex, buf, wLength); | ||
| 187 | done: | ||
| 188 | spin_unlock_irqrestore(&ehci->lock, flags); | ||
| 189 | return retval; | ||
| 190 | } | ||
| 191 | |||
| 103 | static void disable_put_regulator( | 192 | static void disable_put_regulator( |
| 104 | struct ehci_hcd_omap_platform_data *pdata) | 193 | struct ehci_hcd_omap_platform_data *pdata) |
| 105 | { | 194 | { |
| @@ -264,8 +353,76 @@ static int ehci_hcd_omap_probe(struct platform_device *pdev) | |||
| 264 | /* root ports should always stay powered */ | 353 | /* root ports should always stay powered */ |
| 265 | ehci_port_power(omap_ehci, 1); | 354 | ehci_port_power(omap_ehci, 1); |
| 266 | 355 | ||
| 356 | /* get clocks */ | ||
| 357 | utmi_p1_fck = clk_get(dev, "utmi_p1_gfclk"); | ||
| 358 | if (IS_ERR(utmi_p1_fck)) { | ||
| 359 | ret = PTR_ERR(utmi_p1_fck); | ||
| 360 | dev_err(dev, "utmi_p1_gfclk failed error:%d\n", ret); | ||
| 361 | goto err_add_hcd; | ||
| 362 | } | ||
| 363 | |||
| 364 | xclk60mhsp1_ck = clk_get(dev, "xclk60mhsp1_ck"); | ||
| 365 | if (IS_ERR(xclk60mhsp1_ck)) { | ||
| 366 | ret = PTR_ERR(xclk60mhsp1_ck); | ||
| 367 | dev_err(dev, "xclk60mhsp1_ck failed error:%d\n", ret); | ||
| 368 | goto err_utmi_p1_fck; | ||
| 369 | } | ||
| 370 | |||
| 371 | utmi_p2_fck = clk_get(dev, "utmi_p2_gfclk"); | ||
| 372 | if (IS_ERR(utmi_p2_fck)) { | ||
| 373 | ret = PTR_ERR(utmi_p2_fck); | ||
| 374 | dev_err(dev, "utmi_p2_gfclk failed error:%d\n", ret); | ||
| 375 | goto err_xclk60mhsp1_ck; | ||
| 376 | } | ||
| 377 | |||
| 378 | xclk60mhsp2_ck = clk_get(dev, "xclk60mhsp2_ck"); | ||
| 379 | if (IS_ERR(xclk60mhsp2_ck)) { | ||
| 380 | ret = PTR_ERR(xclk60mhsp2_ck); | ||
| 381 | dev_err(dev, "xclk60mhsp2_ck failed error:%d\n", ret); | ||
| 382 | goto err_utmi_p2_fck; | ||
| 383 | } | ||
| 384 | |||
| 385 | usbhost_p1_fck = clk_get(dev, "usb_host_hs_utmi_p1_clk"); | ||
| 386 | if (IS_ERR(usbhost_p1_fck)) { | ||
| 387 | ret = PTR_ERR(usbhost_p1_fck); | ||
| 388 | dev_err(dev, "usbhost_p1_fck failed error:%d\n", ret); | ||
| 389 | goto err_xclk60mhsp2_ck; | ||
| 390 | } | ||
| 391 | |||
| 392 | usbhost_p2_fck = clk_get(dev, "usb_host_hs_utmi_p2_clk"); | ||
| 393 | if (IS_ERR(usbhost_p2_fck)) { | ||
| 394 | ret = PTR_ERR(usbhost_p2_fck); | ||
| 395 | dev_err(dev, "usbhost_p2_fck failed error:%d\n", ret); | ||
| 396 | goto err_usbhost_p1_fck; | ||
| 397 | } | ||
| 398 | |||
| 399 | init_60m_fclk = clk_get(dev, "init_60m_fclk"); | ||
| 400 | if (IS_ERR(init_60m_fclk)) { | ||
| 401 | ret = PTR_ERR(init_60m_fclk); | ||
| 402 | dev_err(dev, "init_60m_fclk failed error:%d\n", ret); | ||
| 403 | goto err_usbhost_p2_fck; | ||
| 404 | } | ||
| 405 | |||
| 267 | return 0; | 406 | return 0; |
| 268 | 407 | ||
| 408 | err_usbhost_p2_fck: | ||
| 409 | clk_put(usbhost_p2_fck); | ||
| 410 | |||
| 411 | err_usbhost_p1_fck: | ||
| 412 | clk_put(usbhost_p1_fck); | ||
| 413 | |||
| 414 | err_xclk60mhsp2_ck: | ||
| 415 | clk_put(xclk60mhsp2_ck); | ||
| 416 | |||
| 417 | err_utmi_p2_fck: | ||
| 418 | clk_put(utmi_p2_fck); | ||
| 419 | |||
| 420 | err_xclk60mhsp1_ck: | ||
| 421 | clk_put(xclk60mhsp1_ck); | ||
| 422 | |||
| 423 | err_utmi_p1_fck: | ||
| 424 | clk_put(utmi_p1_fck); | ||
| 425 | |||
| 269 | err_add_hcd: | 426 | err_add_hcd: |
| 270 | disable_put_regulator(pdata); | 427 | disable_put_regulator(pdata); |
| 271 | pm_runtime_put_sync(dev); | 428 | pm_runtime_put_sync(dev); |
| @@ -294,6 +451,15 @@ static int ehci_hcd_omap_remove(struct platform_device *pdev) | |||
| 294 | disable_put_regulator(dev->platform_data); | 451 | disable_put_regulator(dev->platform_data); |
| 295 | iounmap(hcd->regs); | 452 | iounmap(hcd->regs); |
| 296 | usb_put_hcd(hcd); | 453 | usb_put_hcd(hcd); |
| 454 | |||
| 455 | clk_put(utmi_p1_fck); | ||
| 456 | clk_put(utmi_p2_fck); | ||
| 457 | clk_put(xclk60mhsp1_ck); | ||
| 458 | clk_put(xclk60mhsp2_ck); | ||
| 459 | clk_put(usbhost_p1_fck); | ||
| 460 | clk_put(usbhost_p2_fck); | ||
| 461 | clk_put(init_60m_fclk); | ||
| 462 | |||
| 297 | pm_runtime_put_sync(dev); | 463 | pm_runtime_put_sync(dev); |
| 298 | pm_runtime_disable(dev); | 464 | pm_runtime_disable(dev); |
| 299 | 465 | ||
| @@ -364,7 +530,7 @@ static const struct hc_driver ehci_omap_hc_driver = { | |||
| 364 | * root hub support | 530 | * root hub support |
| 365 | */ | 531 | */ |
| 366 | .hub_status_data = ehci_hub_status_data, | 532 | .hub_status_data = ehci_hub_status_data, |
| 367 | .hub_control = ehci_hub_control, | 533 | .hub_control = omap_ehci_hub_control, |
| 368 | .bus_suspend = ehci_bus_suspend, | 534 | .bus_suspend = ehci_bus_suspend, |
| 369 | .bus_resume = ehci_bus_resume, | 535 | .bus_resume = ehci_bus_resume, |
| 370 | 536 | ||
diff --git a/drivers/usb/host/ehci-pci.c b/drivers/usb/host/ehci-pci.c index bc94d7bf072d..123481793a47 100644 --- a/drivers/usb/host/ehci-pci.c +++ b/drivers/usb/host/ehci-pci.c | |||
| @@ -144,14 +144,6 @@ static int ehci_pci_setup(struct usb_hcd *hcd) | |||
| 144 | hcd->has_tt = 1; | 144 | hcd->has_tt = 1; |
| 145 | tdi_reset(ehci); | 145 | tdi_reset(ehci); |
| 146 | } | 146 | } |
| 147 | if (pdev->subsystem_vendor == PCI_VENDOR_ID_ASUSTEK) { | ||
| 148 | /* EHCI #1 or #2 on 6 Series/C200 Series chipset */ | ||
| 149 | if (pdev->device == 0x1c26 || pdev->device == 0x1c2d) { | ||
| 150 | ehci_info(ehci, "broken D3 during system sleep on ASUS\n"); | ||
| 151 | hcd->broken_pci_sleep = 1; | ||
| 152 | device_set_wakeup_capable(&pdev->dev, false); | ||
| 153 | } | ||
| 154 | } | ||
| 155 | break; | 147 | break; |
| 156 | case PCI_VENDOR_ID_TDI: | 148 | case PCI_VENDOR_ID_TDI: |
| 157 | if (pdev->device == PCI_DEVICE_ID_TDI_EHCI) { | 149 | if (pdev->device == PCI_DEVICE_ID_TDI_EHCI) { |
diff --git a/drivers/usb/host/ehci-sh.c b/drivers/usb/host/ehci-sh.c index ca819cdd0c5e..e7cb3925abf8 100644 --- a/drivers/usb/host/ehci-sh.c +++ b/drivers/usb/host/ehci-sh.c | |||
| @@ -126,8 +126,7 @@ static int ehci_hcd_sh_probe(struct platform_device *pdev) | |||
| 126 | goto fail_create_hcd; | 126 | goto fail_create_hcd; |
| 127 | } | 127 | } |
| 128 | 128 | ||
| 129 | if (pdev->dev.platform_data != NULL) | 129 | pdata = pdev->dev.platform_data; |
| 130 | pdata = pdev->dev.platform_data; | ||
| 131 | 130 | ||
| 132 | /* initialize hcd */ | 131 | /* initialize hcd */ |
| 133 | hcd = usb_create_hcd(&ehci_sh_hc_driver, &pdev->dev, | 132 | hcd = usb_create_hcd(&ehci_sh_hc_driver, &pdev->dev, |
diff --git a/drivers/usb/host/ehci-xilinx-of.c b/drivers/usb/host/ehci-xilinx-of.c index 9c2cc4633894..e9713d589e30 100644 --- a/drivers/usb/host/ehci-xilinx-of.c +++ b/drivers/usb/host/ehci-xilinx-of.c | |||
| @@ -270,14 +270,12 @@ static int ehci_hcd_xilinx_of_remove(struct platform_device *op) | |||
| 270 | * | 270 | * |
| 271 | * Properly shutdown the hcd, call driver's shutdown routine. | 271 | * Properly shutdown the hcd, call driver's shutdown routine. |
| 272 | */ | 272 | */ |
| 273 | static int ehci_hcd_xilinx_of_shutdown(struct platform_device *op) | 273 | static void ehci_hcd_xilinx_of_shutdown(struct platform_device *op) |
| 274 | { | 274 | { |
| 275 | struct usb_hcd *hcd = dev_get_drvdata(&op->dev); | 275 | struct usb_hcd *hcd = dev_get_drvdata(&op->dev); |
| 276 | 276 | ||
| 277 | if (hcd->driver->shutdown) | 277 | if (hcd->driver->shutdown) |
| 278 | hcd->driver->shutdown(hcd); | 278 | hcd->driver->shutdown(hcd); |
| 279 | |||
| 280 | return 0; | ||
| 281 | } | 279 | } |
| 282 | 280 | ||
| 283 | 281 | ||
diff --git a/drivers/usb/host/ohci-hub.c b/drivers/usb/host/ohci-hub.c index 836772dfabd3..2f3619eefefa 100644 --- a/drivers/usb/host/ohci-hub.c +++ b/drivers/usb/host/ohci-hub.c | |||
| @@ -317,7 +317,7 @@ static int ohci_bus_resume (struct usb_hcd *hcd) | |||
| 317 | } | 317 | } |
| 318 | 318 | ||
| 319 | /* Carry out the final steps of resuming the controller device */ | 319 | /* Carry out the final steps of resuming the controller device */ |
| 320 | static void ohci_finish_controller_resume(struct usb_hcd *hcd) | 320 | static void __maybe_unused ohci_finish_controller_resume(struct usb_hcd *hcd) |
| 321 | { | 321 | { |
| 322 | struct ohci_hcd *ohci = hcd_to_ohci(hcd); | 322 | struct ohci_hcd *ohci = hcd_to_ohci(hcd); |
| 323 | int port; | 323 | int port; |
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c index ec4338eec826..77689bd64cac 100644 --- a/drivers/usb/host/xhci-mem.c +++ b/drivers/usb/host/xhci-mem.c | |||
| @@ -793,10 +793,9 @@ static void xhci_free_tt_info(struct xhci_hcd *xhci, | |||
| 793 | struct xhci_virt_device *virt_dev, | 793 | struct xhci_virt_device *virt_dev, |
| 794 | int slot_id) | 794 | int slot_id) |
| 795 | { | 795 | { |
| 796 | struct list_head *tt; | ||
| 797 | struct list_head *tt_list_head; | 796 | struct list_head *tt_list_head; |
| 798 | struct list_head *tt_next; | 797 | struct xhci_tt_bw_info *tt_info, *next; |
| 799 | struct xhci_tt_bw_info *tt_info; | 798 | bool slot_found = false; |
| 800 | 799 | ||
| 801 | /* If the device never made it past the Set Address stage, | 800 | /* If the device never made it past the Set Address stage, |
| 802 | * it may not have the real_port set correctly. | 801 | * it may not have the real_port set correctly. |
| @@ -808,34 +807,16 @@ static void xhci_free_tt_info(struct xhci_hcd *xhci, | |||
| 808 | } | 807 | } |
| 809 | 808 | ||
| 810 | tt_list_head = &(xhci->rh_bw[virt_dev->real_port - 1].tts); | 809 | tt_list_head = &(xhci->rh_bw[virt_dev->real_port - 1].tts); |
| 811 | if (list_empty(tt_list_head)) | 810 | list_for_each_entry_safe(tt_info, next, tt_list_head, tt_list) { |
| 812 | return; | 811 | /* Multi-TT hubs will have more than one entry */ |
| 813 | 812 | if (tt_info->slot_id == slot_id) { | |
| 814 | list_for_each(tt, tt_list_head) { | 813 | slot_found = true; |
| 815 | tt_info = list_entry(tt, struct xhci_tt_bw_info, tt_list); | 814 | list_del(&tt_info->tt_list); |
| 816 | if (tt_info->slot_id == slot_id) | 815 | kfree(tt_info); |
| 816 | } else if (slot_found) { | ||
| 817 | break; | 817 | break; |
| 818 | } | ||
| 818 | } | 819 | } |
| 819 | /* Cautionary measure in case the hub was disconnected before we | ||
| 820 | * stored the TT information. | ||
| 821 | */ | ||
| 822 | if (tt_info->slot_id != slot_id) | ||
| 823 | return; | ||
| 824 | |||
| 825 | tt_next = tt->next; | ||
| 826 | tt_info = list_entry(tt, struct xhci_tt_bw_info, | ||
| 827 | tt_list); | ||
| 828 | /* Multi-TT hubs will have more than one entry */ | ||
| 829 | do { | ||
| 830 | list_del(tt); | ||
| 831 | kfree(tt_info); | ||
| 832 | tt = tt_next; | ||
| 833 | if (list_empty(tt_list_head)) | ||
| 834 | break; | ||
| 835 | tt_next = tt->next; | ||
| 836 | tt_info = list_entry(tt, struct xhci_tt_bw_info, | ||
| 837 | tt_list); | ||
| 838 | } while (tt_info->slot_id == slot_id); | ||
| 839 | } | 820 | } |
| 840 | 821 | ||
| 841 | int xhci_alloc_tt_info(struct xhci_hcd *xhci, | 822 | int xhci_alloc_tt_info(struct xhci_hcd *xhci, |
| @@ -1791,17 +1772,9 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci) | |||
| 1791 | { | 1772 | { |
| 1792 | struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller); | 1773 | struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller); |
| 1793 | struct dev_info *dev_info, *next; | 1774 | struct dev_info *dev_info, *next; |
| 1794 | struct list_head *tt_list_head; | ||
| 1795 | struct list_head *tt; | ||
| 1796 | struct list_head *endpoints; | ||
| 1797 | struct list_head *ep, *q; | ||
| 1798 | struct xhci_tt_bw_info *tt_info; | ||
| 1799 | struct xhci_interval_bw_table *bwt; | ||
| 1800 | struct xhci_virt_ep *virt_ep; | ||
| 1801 | |||
| 1802 | unsigned long flags; | 1775 | unsigned long flags; |
| 1803 | int size; | 1776 | int size; |
| 1804 | int i; | 1777 | int i, j, num_ports; |
| 1805 | 1778 | ||
| 1806 | /* Free the Event Ring Segment Table and the actual Event Ring */ | 1779 | /* Free the Event Ring Segment Table and the actual Event Ring */ |
| 1807 | size = sizeof(struct xhci_erst_entry)*(xhci->erst.num_entries); | 1780 | size = sizeof(struct xhci_erst_entry)*(xhci->erst.num_entries); |
| @@ -1860,21 +1833,22 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci) | |||
| 1860 | } | 1833 | } |
| 1861 | spin_unlock_irqrestore(&xhci->lock, flags); | 1834 | spin_unlock_irqrestore(&xhci->lock, flags); |
| 1862 | 1835 | ||
| 1863 | bwt = &xhci->rh_bw->bw_table; | 1836 | num_ports = HCS_MAX_PORTS(xhci->hcs_params1); |
| 1864 | for (i = 0; i < XHCI_MAX_INTERVAL; i++) { | 1837 | for (i = 0; i < num_ports; i++) { |
| 1865 | endpoints = &bwt->interval_bw[i].endpoints; | 1838 | struct xhci_interval_bw_table *bwt = &xhci->rh_bw[i].bw_table; |
| 1866 | list_for_each_safe(ep, q, endpoints) { | 1839 | for (j = 0; j < XHCI_MAX_INTERVAL; j++) { |
| 1867 | virt_ep = list_entry(ep, struct xhci_virt_ep, bw_endpoint_list); | 1840 | struct list_head *ep = &bwt->interval_bw[j].endpoints; |
| 1868 | list_del(&virt_ep->bw_endpoint_list); | 1841 | while (!list_empty(ep)) |
| 1869 | kfree(virt_ep); | 1842 | list_del_init(ep->next); |
| 1870 | } | 1843 | } |
| 1871 | } | 1844 | } |
| 1872 | 1845 | ||
| 1873 | tt_list_head = &xhci->rh_bw->tts; | 1846 | for (i = 0; i < num_ports; i++) { |
| 1874 | list_for_each_safe(tt, q, tt_list_head) { | 1847 | struct xhci_tt_bw_info *tt, *n; |
| 1875 | tt_info = list_entry(tt, struct xhci_tt_bw_info, tt_list); | 1848 | list_for_each_entry_safe(tt, n, &xhci->rh_bw[i].tts, tt_list) { |
| 1876 | list_del(tt); | 1849 | list_del(&tt->tt_list); |
| 1877 | kfree(tt_info); | 1850 | kfree(tt); |
| 1851 | } | ||
| 1878 | } | 1852 | } |
| 1879 | 1853 | ||
| 1880 | xhci->num_usb2_ports = 0; | 1854 | xhci->num_usb2_ports = 0; |
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c index afdc73ee84a6..a979cd0dbe0f 100644 --- a/drivers/usb/host/xhci.c +++ b/drivers/usb/host/xhci.c | |||
| @@ -795,8 +795,8 @@ int xhci_suspend(struct xhci_hcd *xhci) | |||
| 795 | command = xhci_readl(xhci, &xhci->op_regs->command); | 795 | command = xhci_readl(xhci, &xhci->op_regs->command); |
| 796 | command |= CMD_CSS; | 796 | command |= CMD_CSS; |
| 797 | xhci_writel(xhci, command, &xhci->op_regs->command); | 797 | xhci_writel(xhci, command, &xhci->op_regs->command); |
| 798 | if (handshake(xhci, &xhci->op_regs->status, STS_SAVE, 0, 10*100)) { | 798 | if (handshake(xhci, &xhci->op_regs->status, STS_SAVE, 0, 10 * 1000)) { |
| 799 | xhci_warn(xhci, "WARN: xHC CMD_CSS timeout\n"); | 799 | xhci_warn(xhci, "WARN: xHC save state timeout\n"); |
| 800 | spin_unlock_irq(&xhci->lock); | 800 | spin_unlock_irq(&xhci->lock); |
| 801 | return -ETIMEDOUT; | 801 | return -ETIMEDOUT; |
| 802 | } | 802 | } |
| @@ -848,8 +848,8 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated) | |||
| 848 | command |= CMD_CRS; | 848 | command |= CMD_CRS; |
| 849 | xhci_writel(xhci, command, &xhci->op_regs->command); | 849 | xhci_writel(xhci, command, &xhci->op_regs->command); |
| 850 | if (handshake(xhci, &xhci->op_regs->status, | 850 | if (handshake(xhci, &xhci->op_regs->status, |
| 851 | STS_RESTORE, 0, 10*100)) { | 851 | STS_RESTORE, 0, 10 * 1000)) { |
| 852 | xhci_dbg(xhci, "WARN: xHC CMD_CSS timeout\n"); | 852 | xhci_warn(xhci, "WARN: xHC restore state timeout\n"); |
| 853 | spin_unlock_irq(&xhci->lock); | 853 | spin_unlock_irq(&xhci->lock); |
| 854 | return -ETIMEDOUT; | 854 | return -ETIMEDOUT; |
| 855 | } | 855 | } |
| @@ -3906,7 +3906,7 @@ static u16 xhci_get_timeout_no_hub_lpm(struct usb_device *udev, | |||
| 3906 | default: | 3906 | default: |
| 3907 | dev_warn(&udev->dev, "%s: Can't get timeout for non-U1 or U2 state.\n", | 3907 | dev_warn(&udev->dev, "%s: Can't get timeout for non-U1 or U2 state.\n", |
| 3908 | __func__); | 3908 | __func__); |
| 3909 | return -EINVAL; | 3909 | return USB3_LPM_DISABLED; |
| 3910 | } | 3910 | } |
| 3911 | 3911 | ||
| 3912 | if (sel <= max_sel_pel && pel <= max_sel_pel) | 3912 | if (sel <= max_sel_pel && pel <= max_sel_pel) |
diff --git a/drivers/usb/musb/davinci.c b/drivers/usb/musb/davinci.c index 768b4b55c816..9d63ba4d10d6 100644 --- a/drivers/usb/musb/davinci.c +++ b/drivers/usb/musb/davinci.c | |||
| @@ -34,6 +34,7 @@ | |||
| 34 | #include <linux/dma-mapping.h> | 34 | #include <linux/dma-mapping.h> |
| 35 | 35 | ||
| 36 | #include <mach/cputype.h> | 36 | #include <mach/cputype.h> |
| 37 | #include <mach/hardware.h> | ||
| 37 | 38 | ||
| 38 | #include <asm/mach-types.h> | 39 | #include <asm/mach-types.h> |
| 39 | 40 | ||
diff --git a/drivers/usb/musb/davinci.h b/drivers/usb/musb/davinci.h index 046c84433cad..371baa0ee509 100644 --- a/drivers/usb/musb/davinci.h +++ b/drivers/usb/musb/davinci.h | |||
| @@ -15,7 +15,7 @@ | |||
| 15 | */ | 15 | */ |
| 16 | 16 | ||
| 17 | /* Integrated highspeed/otg PHY */ | 17 | /* Integrated highspeed/otg PHY */ |
| 18 | #define USBPHY_CTL_PADDR (DAVINCI_SYSTEM_MODULE_BASE + 0x34) | 18 | #define USBPHY_CTL_PADDR 0x01c40034 |
| 19 | #define USBPHY_DATAPOL BIT(11) /* (dm355) switch D+/D- */ | 19 | #define USBPHY_DATAPOL BIT(11) /* (dm355) switch D+/D- */ |
| 20 | #define USBPHY_PHYCLKGD BIT(8) | 20 | #define USBPHY_PHYCLKGD BIT(8) |
| 21 | #define USBPHY_SESNDEN BIT(7) /* v(sess_end) comparator */ | 21 | #define USBPHY_SESNDEN BIT(7) /* v(sess_end) comparator */ |
| @@ -27,7 +27,7 @@ | |||
| 27 | #define USBPHY_OTGPDWN BIT(1) | 27 | #define USBPHY_OTGPDWN BIT(1) |
| 28 | #define USBPHY_PHYPDWN BIT(0) | 28 | #define USBPHY_PHYPDWN BIT(0) |
| 29 | 29 | ||
| 30 | #define DM355_DEEPSLEEP_PADDR (DAVINCI_SYSTEM_MODULE_BASE + 0x48) | 30 | #define DM355_DEEPSLEEP_PADDR 0x01c40048 |
| 31 | #define DRVVBUS_FORCE BIT(2) | 31 | #define DRVVBUS_FORCE BIT(2) |
| 32 | #define DRVVBUS_OVERRIDE BIT(1) | 32 | #define DRVVBUS_OVERRIDE BIT(1) |
| 33 | 33 | ||
diff --git a/drivers/usb/musb/musb_gadget.c b/drivers/usb/musb/musb_gadget.c index f42c29b11f71..95918dacc99a 100644 --- a/drivers/usb/musb/musb_gadget.c +++ b/drivers/usb/musb/musb_gadget.c | |||
| @@ -1232,6 +1232,7 @@ static int musb_gadget_disable(struct usb_ep *ep) | |||
| 1232 | } | 1232 | } |
| 1233 | 1233 | ||
| 1234 | musb_ep->desc = NULL; | 1234 | musb_ep->desc = NULL; |
| 1235 | musb_ep->end_point.desc = NULL; | ||
| 1235 | 1236 | ||
| 1236 | /* abort all pending DMA and requests */ | 1237 | /* abort all pending DMA and requests */ |
| 1237 | nuke(musb_ep, -ESHUTDOWN); | 1238 | nuke(musb_ep, -ESHUTDOWN); |
diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c index 1b1926200ba7..73d25cd8cba5 100644 --- a/drivers/usb/serial/cp210x.c +++ b/drivers/usb/serial/cp210x.c | |||
| @@ -82,6 +82,7 @@ static const struct usb_device_id id_table[] = { | |||
| 82 | { USB_DEVICE(0x10C4, 0x8066) }, /* Argussoft In-System Programmer */ | 82 | { USB_DEVICE(0x10C4, 0x8066) }, /* Argussoft In-System Programmer */ |
| 83 | { USB_DEVICE(0x10C4, 0x806F) }, /* IMS USB to RS422 Converter Cable */ | 83 | { USB_DEVICE(0x10C4, 0x806F) }, /* IMS USB to RS422 Converter Cable */ |
| 84 | { USB_DEVICE(0x10C4, 0x807A) }, /* Crumb128 board */ | 84 | { USB_DEVICE(0x10C4, 0x807A) }, /* Crumb128 board */ |
| 85 | { USB_DEVICE(0x10C4, 0x80C4) }, /* Cygnal Integrated Products, Inc., Optris infrared thermometer */ | ||
| 85 | { USB_DEVICE(0x10C4, 0x80CA) }, /* Degree Controls Inc */ | 86 | { USB_DEVICE(0x10C4, 0x80CA) }, /* Degree Controls Inc */ |
| 86 | { USB_DEVICE(0x10C4, 0x80DD) }, /* Tracient RFID */ | 87 | { USB_DEVICE(0x10C4, 0x80DD) }, /* Tracient RFID */ |
| 87 | { USB_DEVICE(0x10C4, 0x80F6) }, /* Suunto sports instrument */ | 88 | { USB_DEVICE(0x10C4, 0x80F6) }, /* Suunto sports instrument */ |
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c index 8c084ea34e26..bc912e5a3beb 100644 --- a/drivers/usb/serial/ftdi_sio.c +++ b/drivers/usb/serial/ftdi_sio.c | |||
| @@ -737,6 +737,7 @@ static struct usb_device_id id_table_combined [] = { | |||
| 737 | { USB_DEVICE(TELLDUS_VID, TELLDUS_TELLSTICK_PID) }, | 737 | { USB_DEVICE(TELLDUS_VID, TELLDUS_TELLSTICK_PID) }, |
| 738 | { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_SERIAL_VX7_PID) }, | 738 | { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_SERIAL_VX7_PID) }, |
| 739 | { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_CT29B_PID) }, | 739 | { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_CT29B_PID) }, |
| 740 | { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_RTS01_PID) }, | ||
| 740 | { USB_DEVICE(FTDI_VID, FTDI_MAXSTREAM_PID) }, | 741 | { USB_DEVICE(FTDI_VID, FTDI_MAXSTREAM_PID) }, |
| 741 | { USB_DEVICE(FTDI_VID, FTDI_PHI_FISCO_PID) }, | 742 | { USB_DEVICE(FTDI_VID, FTDI_PHI_FISCO_PID) }, |
| 742 | { USB_DEVICE(TML_VID, TML_USB_SERIAL_PID) }, | 743 | { USB_DEVICE(TML_VID, TML_USB_SERIAL_PID) }, |
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h index f3c7c78ede33..5661c7e2d415 100644 --- a/drivers/usb/serial/ftdi_sio_ids.h +++ b/drivers/usb/serial/ftdi_sio_ids.h | |||
| @@ -784,6 +784,7 @@ | |||
| 784 | #define RTSYSTEMS_VID 0x2100 /* Vendor ID */ | 784 | #define RTSYSTEMS_VID 0x2100 /* Vendor ID */ |
| 785 | #define RTSYSTEMS_SERIAL_VX7_PID 0x9e52 /* Serial converter for VX-7 Radios using FT232RL */ | 785 | #define RTSYSTEMS_SERIAL_VX7_PID 0x9e52 /* Serial converter for VX-7 Radios using FT232RL */ |
| 786 | #define RTSYSTEMS_CT29B_PID 0x9e54 /* CT29B Radio Cable */ | 786 | #define RTSYSTEMS_CT29B_PID 0x9e54 /* CT29B Radio Cable */ |
| 787 | #define RTSYSTEMS_RTS01_PID 0x9e57 /* USB-RTS01 Radio Cable */ | ||
| 787 | 788 | ||
| 788 | 789 | ||
| 789 | /* | 790 | /* |
diff --git a/drivers/usb/serial/generic.c b/drivers/usb/serial/generic.c index 105a6d898ca4..9b026bf7afef 100644 --- a/drivers/usb/serial/generic.c +++ b/drivers/usb/serial/generic.c | |||
| @@ -39,13 +39,6 @@ MODULE_PARM_DESC(product, "User specified USB idProduct"); | |||
| 39 | 39 | ||
| 40 | static struct usb_device_id generic_device_ids[2]; /* Initially all zeroes. */ | 40 | static struct usb_device_id generic_device_ids[2]; /* Initially all zeroes. */ |
| 41 | 41 | ||
| 42 | /* we want to look at all devices, as the vendor/product id can change | ||
| 43 | * depending on the command line argument */ | ||
| 44 | static const struct usb_device_id generic_serial_ids[] = { | ||
| 45 | {.driver_info = 42}, | ||
| 46 | {} | ||
| 47 | }; | ||
| 48 | |||
| 49 | /* All of the device info needed for the Generic Serial Converter */ | 42 | /* All of the device info needed for the Generic Serial Converter */ |
| 50 | struct usb_serial_driver usb_serial_generic_device = { | 43 | struct usb_serial_driver usb_serial_generic_device = { |
| 51 | .driver = { | 44 | .driver = { |
| @@ -79,7 +72,8 @@ int usb_serial_generic_register(int _debug) | |||
| 79 | USB_DEVICE_ID_MATCH_VENDOR | USB_DEVICE_ID_MATCH_PRODUCT; | 72 | USB_DEVICE_ID_MATCH_VENDOR | USB_DEVICE_ID_MATCH_PRODUCT; |
| 80 | 73 | ||
| 81 | /* register our generic driver with ourselves */ | 74 | /* register our generic driver with ourselves */ |
| 82 | retval = usb_serial_register_drivers(serial_drivers, "usbserial_generic", generic_serial_ids); | 75 | retval = usb_serial_register_drivers(serial_drivers, |
| 76 | "usbserial_generic", generic_device_ids); | ||
| 83 | #endif | 77 | #endif |
| 84 | return retval; | 78 | return retval; |
| 85 | } | 79 | } |
diff --git a/drivers/usb/serial/mct_u232.c b/drivers/usb/serial/mct_u232.c index d0ec1aa52719..a71fa0aa0406 100644 --- a/drivers/usb/serial/mct_u232.c +++ b/drivers/usb/serial/mct_u232.c | |||
| @@ -309,13 +309,16 @@ static int mct_u232_set_modem_ctrl(struct usb_serial *serial, | |||
| 309 | MCT_U232_SET_REQUEST_TYPE, | 309 | MCT_U232_SET_REQUEST_TYPE, |
| 310 | 0, 0, buf, MCT_U232_SET_MODEM_CTRL_SIZE, | 310 | 0, 0, buf, MCT_U232_SET_MODEM_CTRL_SIZE, |
| 311 | WDR_TIMEOUT); | 311 | WDR_TIMEOUT); |
| 312 | if (rc < 0) | 312 | kfree(buf); |
| 313 | dev_err(&serial->dev->dev, | 313 | |
| 314 | "Set MODEM CTRL 0x%x failed (error = %d)\n", mcr, rc); | ||
| 315 | dbg("set_modem_ctrl: state=0x%x ==> mcr=0x%x", control_state, mcr); | 314 | dbg("set_modem_ctrl: state=0x%x ==> mcr=0x%x", control_state, mcr); |
| 316 | 315 | ||
| 317 | kfree(buf); | 316 | if (rc < 0) { |
| 318 | return rc; | 317 | dev_err(&serial->dev->dev, |
| 318 | "Set MODEM CTRL 0x%x failed (error = %d)\n", mcr, rc); | ||
| 319 | return rc; | ||
| 320 | } | ||
| 321 | return 0; | ||
| 319 | } /* mct_u232_set_modem_ctrl */ | 322 | } /* mct_u232_set_modem_ctrl */ |
| 320 | 323 | ||
| 321 | static int mct_u232_get_modem_stat(struct usb_serial *serial, | 324 | static int mct_u232_get_modem_stat(struct usb_serial *serial, |
diff --git a/drivers/usb/serial/mos7840.c b/drivers/usb/serial/mos7840.c index 29160f8b5101..57eca2448424 100644 --- a/drivers/usb/serial/mos7840.c +++ b/drivers/usb/serial/mos7840.c | |||
| @@ -190,7 +190,7 @@ | |||
| 190 | 190 | ||
| 191 | static int device_type; | 191 | static int device_type; |
| 192 | 192 | ||
| 193 | static const struct usb_device_id id_table[] __devinitconst = { | 193 | static const struct usb_device_id id_table[] = { |
| 194 | {USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7840)}, | 194 | {USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7840)}, |
| 195 | {USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7820)}, | 195 | {USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7820)}, |
| 196 | {USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7810)}, | 196 | {USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7810)}, |
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c index 1aae9028cd0b..e668a2460bd4 100644 --- a/drivers/usb/serial/option.c +++ b/drivers/usb/serial/option.c | |||
| @@ -47,6 +47,7 @@ | |||
| 47 | /* Function prototypes */ | 47 | /* Function prototypes */ |
| 48 | static int option_probe(struct usb_serial *serial, | 48 | static int option_probe(struct usb_serial *serial, |
| 49 | const struct usb_device_id *id); | 49 | const struct usb_device_id *id); |
| 50 | static void option_release(struct usb_serial *serial); | ||
| 50 | static int option_send_setup(struct usb_serial_port *port); | 51 | static int option_send_setup(struct usb_serial_port *port); |
| 51 | static void option_instat_callback(struct urb *urb); | 52 | static void option_instat_callback(struct urb *urb); |
| 52 | 53 | ||
| @@ -150,6 +151,7 @@ static void option_instat_callback(struct urb *urb); | |||
| 150 | #define HUAWEI_PRODUCT_E14AC 0x14AC | 151 | #define HUAWEI_PRODUCT_E14AC 0x14AC |
| 151 | #define HUAWEI_PRODUCT_K3806 0x14AE | 152 | #define HUAWEI_PRODUCT_K3806 0x14AE |
| 152 | #define HUAWEI_PRODUCT_K4605 0x14C6 | 153 | #define HUAWEI_PRODUCT_K4605 0x14C6 |
| 154 | #define HUAWEI_PRODUCT_K5005 0x14C8 | ||
| 153 | #define HUAWEI_PRODUCT_K3770 0x14C9 | 155 | #define HUAWEI_PRODUCT_K3770 0x14C9 |
| 154 | #define HUAWEI_PRODUCT_K3771 0x14CA | 156 | #define HUAWEI_PRODUCT_K3771 0x14CA |
| 155 | #define HUAWEI_PRODUCT_K4510 0x14CB | 157 | #define HUAWEI_PRODUCT_K4510 0x14CB |
| @@ -425,7 +427,7 @@ static void option_instat_callback(struct urb *urb); | |||
| 425 | #define SAMSUNG_VENDOR_ID 0x04e8 | 427 | #define SAMSUNG_VENDOR_ID 0x04e8 |
| 426 | #define SAMSUNG_PRODUCT_GT_B3730 0x6889 | 428 | #define SAMSUNG_PRODUCT_GT_B3730 0x6889 |
| 427 | 429 | ||
| 428 | /* YUGA products www.yuga-info.com*/ | 430 | /* YUGA products www.yuga-info.com gavin.kx@qq.com */ |
| 429 | #define YUGA_VENDOR_ID 0x257A | 431 | #define YUGA_VENDOR_ID 0x257A |
| 430 | #define YUGA_PRODUCT_CEM600 0x1601 | 432 | #define YUGA_PRODUCT_CEM600 0x1601 |
| 431 | #define YUGA_PRODUCT_CEM610 0x1602 | 433 | #define YUGA_PRODUCT_CEM610 0x1602 |
| @@ -442,6 +444,8 @@ static void option_instat_callback(struct urb *urb); | |||
| 442 | #define YUGA_PRODUCT_CEU516 0x160C | 444 | #define YUGA_PRODUCT_CEU516 0x160C |
| 443 | #define YUGA_PRODUCT_CEU528 0x160D | 445 | #define YUGA_PRODUCT_CEU528 0x160D |
| 444 | #define YUGA_PRODUCT_CEU526 0x160F | 446 | #define YUGA_PRODUCT_CEU526 0x160F |
| 447 | #define YUGA_PRODUCT_CEU881 0x161F | ||
| 448 | #define YUGA_PRODUCT_CEU882 0x162F | ||
| 445 | 449 | ||
| 446 | #define YUGA_PRODUCT_CWM600 0x2601 | 450 | #define YUGA_PRODUCT_CWM600 0x2601 |
| 447 | #define YUGA_PRODUCT_CWM610 0x2602 | 451 | #define YUGA_PRODUCT_CWM610 0x2602 |
| @@ -457,23 +461,26 @@ static void option_instat_callback(struct urb *urb); | |||
| 457 | #define YUGA_PRODUCT_CWU518 0x260B | 461 | #define YUGA_PRODUCT_CWU518 0x260B |
| 458 | #define YUGA_PRODUCT_CWU516 0x260C | 462 | #define YUGA_PRODUCT_CWU516 0x260C |
| 459 | #define YUGA_PRODUCT_CWU528 0x260D | 463 | #define YUGA_PRODUCT_CWU528 0x260D |
| 464 | #define YUGA_PRODUCT_CWU581 0x260E | ||
| 460 | #define YUGA_PRODUCT_CWU526 0x260F | 465 | #define YUGA_PRODUCT_CWU526 0x260F |
| 461 | 466 | #define YUGA_PRODUCT_CWU582 0x261F | |
| 462 | #define YUGA_PRODUCT_CLM600 0x2601 | 467 | #define YUGA_PRODUCT_CWU583 0x262F |
| 463 | #define YUGA_PRODUCT_CLM610 0x2602 | 468 | |
| 464 | #define YUGA_PRODUCT_CLM500 0x2603 | 469 | #define YUGA_PRODUCT_CLM600 0x3601 |
| 465 | #define YUGA_PRODUCT_CLM510 0x2604 | 470 | #define YUGA_PRODUCT_CLM610 0x3602 |
| 466 | #define YUGA_PRODUCT_CLM800 0x2605 | 471 | #define YUGA_PRODUCT_CLM500 0x3603 |
| 467 | #define YUGA_PRODUCT_CLM900 0x2606 | 472 | #define YUGA_PRODUCT_CLM510 0x3604 |
| 468 | 473 | #define YUGA_PRODUCT_CLM800 0x3605 | |
| 469 | #define YUGA_PRODUCT_CLU718 0x2607 | 474 | #define YUGA_PRODUCT_CLM900 0x3606 |
| 470 | #define YUGA_PRODUCT_CLU716 0x2608 | 475 | |
| 471 | #define YUGA_PRODUCT_CLU728 0x2609 | 476 | #define YUGA_PRODUCT_CLU718 0x3607 |
| 472 | #define YUGA_PRODUCT_CLU726 0x260A | 477 | #define YUGA_PRODUCT_CLU716 0x3608 |
| 473 | #define YUGA_PRODUCT_CLU518 0x260B | 478 | #define YUGA_PRODUCT_CLU728 0x3609 |
| 474 | #define YUGA_PRODUCT_CLU516 0x260C | 479 | #define YUGA_PRODUCT_CLU726 0x360A |
| 475 | #define YUGA_PRODUCT_CLU528 0x260D | 480 | #define YUGA_PRODUCT_CLU518 0x360B |
| 476 | #define YUGA_PRODUCT_CLU526 0x260F | 481 | #define YUGA_PRODUCT_CLU516 0x360C |
| 482 | #define YUGA_PRODUCT_CLU528 0x360D | ||
| 483 | #define YUGA_PRODUCT_CLU526 0x360F | ||
| 477 | 484 | ||
| 478 | /* Viettel products */ | 485 | /* Viettel products */ |
| 479 | #define VIETTEL_VENDOR_ID 0x2262 | 486 | #define VIETTEL_VENDOR_ID 0x2262 |
| @@ -666,6 +673,11 @@ static const struct usb_device_id option_ids[] = { | |||
| 666 | { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3806, 0xff, 0xff, 0xff) }, | 673 | { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3806, 0xff, 0xff, 0xff) }, |
| 667 | { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4605, 0xff, 0xff, 0xff), | 674 | { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4605, 0xff, 0xff, 0xff), |
| 668 | .driver_info = (kernel_ulong_t) &huawei_cdc12_blacklist }, | 675 | .driver_info = (kernel_ulong_t) &huawei_cdc12_blacklist }, |
| 676 | { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4605, 0xff, 0x01, 0x31) }, | ||
| 677 | { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4605, 0xff, 0x01, 0x32) }, | ||
| 678 | { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K5005, 0xff, 0x01, 0x31) }, | ||
| 679 | { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K5005, 0xff, 0x01, 0x32) }, | ||
| 680 | { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K5005, 0xff, 0x01, 0x33) }, | ||
| 669 | { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3770, 0xff, 0x02, 0x31) }, | 681 | { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3770, 0xff, 0x02, 0x31) }, |
| 670 | { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3770, 0xff, 0x02, 0x32) }, | 682 | { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3770, 0xff, 0x02, 0x32) }, |
| 671 | { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3771, 0xff, 0x02, 0x31) }, | 683 | { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3771, 0xff, 0x02, 0x31) }, |
| @@ -1209,6 +1221,11 @@ static const struct usb_device_id option_ids[] = { | |||
| 1209 | { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CLU516) }, | 1221 | { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CLU516) }, |
| 1210 | { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CLU528) }, | 1222 | { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CLU528) }, |
| 1211 | { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CLU526) }, | 1223 | { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CLU526) }, |
| 1224 | { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEU881) }, | ||
| 1225 | { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEU882) }, | ||
| 1226 | { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CWU581) }, | ||
| 1227 | { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CWU582) }, | ||
| 1228 | { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CWU583) }, | ||
| 1212 | { USB_DEVICE_AND_INTERFACE_INFO(VIETTEL_VENDOR_ID, VIETTEL_PRODUCT_VT1000, 0xff, 0xff, 0xff) }, | 1229 | { USB_DEVICE_AND_INTERFACE_INFO(VIETTEL_VENDOR_ID, VIETTEL_PRODUCT_VT1000, 0xff, 0xff, 0xff) }, |
| 1213 | { USB_DEVICE_AND_INTERFACE_INFO(ZD_VENDOR_ID, ZD_PRODUCT_7000, 0xff, 0xff, 0xff) }, | 1230 | { USB_DEVICE_AND_INTERFACE_INFO(ZD_VENDOR_ID, ZD_PRODUCT_7000, 0xff, 0xff, 0xff) }, |
| 1214 | { USB_DEVICE(LG_VENDOR_ID, LG_PRODUCT_L02C) }, /* docomo L-02C modem */ | 1231 | { USB_DEVICE(LG_VENDOR_ID, LG_PRODUCT_L02C) }, /* docomo L-02C modem */ |
| @@ -1245,7 +1262,7 @@ static struct usb_serial_driver option_1port_device = { | |||
| 1245 | .ioctl = usb_wwan_ioctl, | 1262 | .ioctl = usb_wwan_ioctl, |
| 1246 | .attach = usb_wwan_startup, | 1263 | .attach = usb_wwan_startup, |
| 1247 | .disconnect = usb_wwan_disconnect, | 1264 | .disconnect = usb_wwan_disconnect, |
| 1248 | .release = usb_wwan_release, | 1265 | .release = option_release, |
| 1249 | .read_int_callback = option_instat_callback, | 1266 | .read_int_callback = option_instat_callback, |
| 1250 | #ifdef CONFIG_PM | 1267 | #ifdef CONFIG_PM |
| 1251 | .suspend = usb_wwan_suspend, | 1268 | .suspend = usb_wwan_suspend, |
| @@ -1259,35 +1276,6 @@ static struct usb_serial_driver * const serial_drivers[] = { | |||
| 1259 | 1276 | ||
| 1260 | static bool debug; | 1277 | static bool debug; |
| 1261 | 1278 | ||
| 1262 | /* per port private data */ | ||
| 1263 | |||
| 1264 | #define N_IN_URB 4 | ||
| 1265 | #define N_OUT_URB 4 | ||
| 1266 | #define IN_BUFLEN 4096 | ||
| 1267 | #define OUT_BUFLEN 4096 | ||
| 1268 | |||
| 1269 | struct option_port_private { | ||
| 1270 | /* Input endpoints and buffer for this port */ | ||
| 1271 | struct urb *in_urbs[N_IN_URB]; | ||
| 1272 | u8 *in_buffer[N_IN_URB]; | ||
| 1273 | /* Output endpoints and buffer for this port */ | ||
| 1274 | struct urb *out_urbs[N_OUT_URB]; | ||
| 1275 | u8 *out_buffer[N_OUT_URB]; | ||
| 1276 | unsigned long out_busy; /* Bit vector of URBs in use */ | ||
| 1277 | int opened; | ||
| 1278 | struct usb_anchor delayed; | ||
| 1279 | |||
| 1280 | /* Settings for the port */ | ||
| 1281 | int rts_state; /* Handshaking pins (outputs) */ | ||
| 1282 | int dtr_state; | ||
| 1283 | int cts_state; /* Handshaking pins (inputs) */ | ||
| 1284 | int dsr_state; | ||
| 1285 | int dcd_state; | ||
| 1286 | int ri_state; | ||
| 1287 | |||
| 1288 | unsigned long tx_start_time[N_OUT_URB]; | ||
| 1289 | }; | ||
| 1290 | |||
| 1291 | module_usb_serial_driver(serial_drivers, option_ids); | 1279 | module_usb_serial_driver(serial_drivers, option_ids); |
| 1292 | 1280 | ||
| 1293 | static bool is_blacklisted(const u8 ifnum, enum option_blacklist_reason reason, | 1281 | static bool is_blacklisted(const u8 ifnum, enum option_blacklist_reason reason, |
| @@ -1356,12 +1344,22 @@ static int option_probe(struct usb_serial *serial, | |||
| 1356 | return 0; | 1344 | return 0; |
| 1357 | } | 1345 | } |
| 1358 | 1346 | ||
| 1347 | static void option_release(struct usb_serial *serial) | ||
| 1348 | { | ||
| 1349 | struct usb_wwan_intf_private *priv = usb_get_serial_data(serial); | ||
| 1350 | |||
| 1351 | usb_wwan_release(serial); | ||
| 1352 | |||
| 1353 | kfree(priv); | ||
| 1354 | } | ||
| 1355 | |||
| 1359 | static void option_instat_callback(struct urb *urb) | 1356 | static void option_instat_callback(struct urb *urb) |
| 1360 | { | 1357 | { |
| 1361 | int err; | 1358 | int err; |
| 1362 | int status = urb->status; | 1359 | int status = urb->status; |
| 1363 | struct usb_serial_port *port = urb->context; | 1360 | struct usb_serial_port *port = urb->context; |
| 1364 | struct option_port_private *portdata = usb_get_serial_port_data(port); | 1361 | struct usb_wwan_port_private *portdata = |
| 1362 | usb_get_serial_port_data(port); | ||
| 1365 | 1363 | ||
| 1366 | dbg("%s: urb %p port %p has data %p", __func__, urb, port, portdata); | 1364 | dbg("%s: urb %p port %p has data %p", __func__, urb, port, portdata); |
| 1367 | 1365 | ||
| @@ -1421,7 +1419,7 @@ static int option_send_setup(struct usb_serial_port *port) | |||
| 1421 | struct usb_serial *serial = port->serial; | 1419 | struct usb_serial *serial = port->serial; |
| 1422 | struct usb_wwan_intf_private *intfdata = | 1420 | struct usb_wwan_intf_private *intfdata = |
| 1423 | (struct usb_wwan_intf_private *) serial->private; | 1421 | (struct usb_wwan_intf_private *) serial->private; |
| 1424 | struct option_port_private *portdata; | 1422 | struct usb_wwan_port_private *portdata; |
| 1425 | int ifNum = serial->interface->cur_altsetting->desc.bInterfaceNumber; | 1423 | int ifNum = serial->interface->cur_altsetting->desc.bInterfaceNumber; |
| 1426 | int val = 0; | 1424 | int val = 0; |
| 1427 | 1425 | ||
diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c index 0d5fe59ebb9e..996015c5f1ac 100644 --- a/drivers/usb/serial/qcserial.c +++ b/drivers/usb/serial/qcserial.c | |||
| @@ -105,7 +105,13 @@ static const struct usb_device_id id_table[] = { | |||
| 105 | {USB_DEVICE(0x1410, 0xa021)}, /* Novatel Gobi 3000 Composite */ | 105 | {USB_DEVICE(0x1410, 0xa021)}, /* Novatel Gobi 3000 Composite */ |
| 106 | {USB_DEVICE(0x413c, 0x8193)}, /* Dell Gobi 3000 QDL */ | 106 | {USB_DEVICE(0x413c, 0x8193)}, /* Dell Gobi 3000 QDL */ |
| 107 | {USB_DEVICE(0x413c, 0x8194)}, /* Dell Gobi 3000 Composite */ | 107 | {USB_DEVICE(0x413c, 0x8194)}, /* Dell Gobi 3000 Composite */ |
| 108 | {USB_DEVICE(0x1199, 0x9010)}, /* Sierra Wireless Gobi 3000 QDL */ | ||
| 109 | {USB_DEVICE(0x1199, 0x9012)}, /* Sierra Wireless Gobi 3000 QDL */ | ||
| 108 | {USB_DEVICE(0x1199, 0x9013)}, /* Sierra Wireless Gobi 3000 Modem device (MC8355) */ | 110 | {USB_DEVICE(0x1199, 0x9013)}, /* Sierra Wireless Gobi 3000 Modem device (MC8355) */ |
| 111 | {USB_DEVICE(0x1199, 0x9014)}, /* Sierra Wireless Gobi 3000 QDL */ | ||
| 112 | {USB_DEVICE(0x1199, 0x9015)}, /* Sierra Wireless Gobi 3000 Modem device */ | ||
| 113 | {USB_DEVICE(0x1199, 0x9018)}, /* Sierra Wireless Gobi 3000 QDL */ | ||
| 114 | {USB_DEVICE(0x1199, 0x9019)}, /* Sierra Wireless Gobi 3000 Modem device */ | ||
| 109 | {USB_DEVICE(0x12D1, 0x14F0)}, /* Sony Gobi 3000 QDL */ | 115 | {USB_DEVICE(0x12D1, 0x14F0)}, /* Sony Gobi 3000 QDL */ |
| 110 | {USB_DEVICE(0x12D1, 0x14F1)}, /* Sony Gobi 3000 Composite */ | 116 | {USB_DEVICE(0x12D1, 0x14F1)}, /* Sony Gobi 3000 Composite */ |
| 111 | { } /* Terminating entry */ | 117 | { } /* Terminating entry */ |
diff --git a/drivers/usb/serial/sierra.c b/drivers/usb/serial/sierra.c index ba54a0a8235c..d423d36acc04 100644 --- a/drivers/usb/serial/sierra.c +++ b/drivers/usb/serial/sierra.c | |||
| @@ -294,6 +294,10 @@ static const struct usb_device_id id_table[] = { | |||
| 294 | { USB_DEVICE(0x1199, 0x68A3), /* Sierra Wireless Direct IP modems */ | 294 | { USB_DEVICE(0x1199, 0x68A3), /* Sierra Wireless Direct IP modems */ |
| 295 | .driver_info = (kernel_ulong_t)&direct_ip_interface_blacklist | 295 | .driver_info = (kernel_ulong_t)&direct_ip_interface_blacklist |
| 296 | }, | 296 | }, |
| 297 | /* AT&T Direct IP LTE modems */ | ||
| 298 | { USB_DEVICE_AND_INTERFACE_INFO(0x0F3D, 0x68AA, 0xFF, 0xFF, 0xFF), | ||
| 299 | .driver_info = (kernel_ulong_t)&direct_ip_interface_blacklist | ||
| 300 | }, | ||
| 297 | { USB_DEVICE(0x0f3d, 0x68A3), /* Airprime/Sierra Wireless Direct IP modems */ | 301 | { USB_DEVICE(0x0f3d, 0x68A3), /* Airprime/Sierra Wireless Direct IP modems */ |
| 298 | .driver_info = (kernel_ulong_t)&direct_ip_interface_blacklist | 302 | .driver_info = (kernel_ulong_t)&direct_ip_interface_blacklist |
| 299 | }, | 303 | }, |
diff --git a/drivers/usb/serial/usb-serial.c b/drivers/usb/serial/usb-serial.c index 6a1b609a0d94..27483f91a4a3 100644 --- a/drivers/usb/serial/usb-serial.c +++ b/drivers/usb/serial/usb-serial.c | |||
| @@ -659,12 +659,14 @@ exit: | |||
| 659 | static struct usb_serial_driver *search_serial_device( | 659 | static struct usb_serial_driver *search_serial_device( |
| 660 | struct usb_interface *iface) | 660 | struct usb_interface *iface) |
| 661 | { | 661 | { |
| 662 | const struct usb_device_id *id; | 662 | const struct usb_device_id *id = NULL; |
| 663 | struct usb_serial_driver *drv; | 663 | struct usb_serial_driver *drv; |
| 664 | struct usb_driver *driver = to_usb_driver(iface->dev.driver); | ||
| 664 | 665 | ||
| 665 | /* Check if the usb id matches a known device */ | 666 | /* Check if the usb id matches a known device */ |
| 666 | list_for_each_entry(drv, &usb_serial_driver_list, driver_list) { | 667 | list_for_each_entry(drv, &usb_serial_driver_list, driver_list) { |
| 667 | id = get_iface_id(drv, iface); | 668 | if (drv->usb_driver == driver) |
| 669 | id = get_iface_id(drv, iface); | ||
| 668 | if (id) | 670 | if (id) |
| 669 | return drv; | 671 | return drv; |
| 670 | } | 672 | } |
| @@ -755,7 +757,7 @@ static int usb_serial_probe(struct usb_interface *interface, | |||
| 755 | 757 | ||
| 756 | if (retval) { | 758 | if (retval) { |
| 757 | dbg("sub driver rejected device"); | 759 | dbg("sub driver rejected device"); |
| 758 | kfree(serial); | 760 | usb_serial_put(serial); |
| 759 | module_put(type->driver.owner); | 761 | module_put(type->driver.owner); |
| 760 | return retval; | 762 | return retval; |
| 761 | } | 763 | } |
| @@ -827,7 +829,7 @@ static int usb_serial_probe(struct usb_interface *interface, | |||
| 827 | */ | 829 | */ |
| 828 | if (num_bulk_in == 0 || num_bulk_out == 0) { | 830 | if (num_bulk_in == 0 || num_bulk_out == 0) { |
| 829 | dev_info(&interface->dev, "PL-2303 hack: descriptors matched but endpoints did not\n"); | 831 | dev_info(&interface->dev, "PL-2303 hack: descriptors matched but endpoints did not\n"); |
| 830 | kfree(serial); | 832 | usb_serial_put(serial); |
| 831 | module_put(type->driver.owner); | 833 | module_put(type->driver.owner); |
| 832 | return -ENODEV; | 834 | return -ENODEV; |
| 833 | } | 835 | } |
| @@ -841,7 +843,7 @@ static int usb_serial_probe(struct usb_interface *interface, | |||
| 841 | if (num_ports == 0) { | 843 | if (num_ports == 0) { |
| 842 | dev_err(&interface->dev, | 844 | dev_err(&interface->dev, |
| 843 | "Generic device with no bulk out, not allowed.\n"); | 845 | "Generic device with no bulk out, not allowed.\n"); |
| 844 | kfree(serial); | 846 | usb_serial_put(serial); |
| 845 | module_put(type->driver.owner); | 847 | module_put(type->driver.owner); |
| 846 | return -EIO; | 848 | return -EIO; |
| 847 | } | 849 | } |
diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h index 1719886bb9be..caf22bf5f822 100644 --- a/drivers/usb/storage/unusual_devs.h +++ b/drivers/usb/storage/unusual_devs.h | |||
| @@ -1107,6 +1107,13 @@ UNUSUAL_DEV( 0x090a, 0x1200, 0x0000, 0x9999, | |||
| 1107 | USB_SC_RBC, USB_PR_BULK, NULL, | 1107 | USB_SC_RBC, USB_PR_BULK, NULL, |
| 1108 | 0 ), | 1108 | 0 ), |
| 1109 | 1109 | ||
| 1110 | /* Feiya QDI U2 DISK, reported by Hans de Goede <hdegoede@redhat.com> */ | ||
| 1111 | UNUSUAL_DEV( 0x090c, 0x1000, 0x0000, 0xffff, | ||
| 1112 | "Feiya", | ||
| 1113 | "QDI U2 DISK", | ||
| 1114 | USB_SC_DEVICE, USB_PR_DEVICE, NULL, | ||
| 1115 | US_FL_NO_READ_CAPACITY_16 ), | ||
| 1116 | |||
| 1110 | /* aeb */ | 1117 | /* aeb */ |
| 1111 | UNUSUAL_DEV( 0x090c, 0x1132, 0x0000, 0xffff, | 1118 | UNUSUAL_DEV( 0x090c, 0x1132, 0x0000, 0xffff, |
| 1112 | "Feiya", | 1119 | "Feiya", |
diff --git a/drivers/video/backlight/Kconfig b/drivers/video/backlight/Kconfig index fa2b03750316..2979292650d6 100644 --- a/drivers/video/backlight/Kconfig +++ b/drivers/video/backlight/Kconfig | |||
| @@ -88,7 +88,7 @@ config LCD_PLATFORM | |||
| 88 | 88 | ||
| 89 | config LCD_TOSA | 89 | config LCD_TOSA |
| 90 | tristate "Sharp SL-6000 LCD Driver" | 90 | tristate "Sharp SL-6000 LCD Driver" |
| 91 | depends on SPI && MACH_TOSA | 91 | depends on I2C && SPI && MACH_TOSA |
| 92 | help | 92 | help |
| 93 | If you have an Sharp SL-6000 Zaurus say Y to enable a driver | 93 | If you have an Sharp SL-6000 Zaurus say Y to enable a driver |
| 94 | for its LCD. | 94 | for its LCD. |
diff --git a/drivers/video/backlight/ili9320.c b/drivers/video/backlight/ili9320.c index 6c9399341bcf..9327cd1b3143 100644 --- a/drivers/video/backlight/ili9320.c +++ b/drivers/video/backlight/ili9320.c | |||
| @@ -263,7 +263,7 @@ int __devinit ili9320_probe_spi(struct spi_device *spi, | |||
| 263 | 263 | ||
| 264 | EXPORT_SYMBOL_GPL(ili9320_probe_spi); | 264 | EXPORT_SYMBOL_GPL(ili9320_probe_spi); |
| 265 | 265 | ||
| 266 | int __devexit ili9320_remove(struct ili9320 *ili) | 266 | int ili9320_remove(struct ili9320 *ili) |
| 267 | { | 267 | { |
| 268 | ili9320_power(ili, FB_BLANK_POWERDOWN); | 268 | ili9320_power(ili, FB_BLANK_POWERDOWN); |
| 269 | 269 | ||
diff --git a/drivers/video/bfin_adv7393fb.c b/drivers/video/bfin_adv7393fb.c index 33ea874c87d2..9bdd4b0c18c8 100644 --- a/drivers/video/bfin_adv7393fb.c +++ b/drivers/video/bfin_adv7393fb.c | |||
| @@ -353,18 +353,16 @@ adv7393_read_proc(char *page, char **start, off_t off, | |||
| 353 | 353 | ||
| 354 | static int | 354 | static int |
| 355 | adv7393_write_proc(struct file *file, const char __user * buffer, | 355 | adv7393_write_proc(struct file *file, const char __user * buffer, |
| 356 | unsigned long count, void *data) | 356 | size_t count, void *data) |
| 357 | { | 357 | { |
| 358 | struct adv7393fb_device *fbdev = data; | 358 | struct adv7393fb_device *fbdev = data; |
| 359 | char line[8]; | ||
| 360 | unsigned int val; | 359 | unsigned int val; |
| 361 | int ret; | 360 | int ret; |
| 362 | 361 | ||
| 363 | ret = copy_from_user(line, buffer, count); | 362 | ret = kstrtouint_from_user(buffer, count, 0, &val); |
| 364 | if (ret) | 363 | if (ret) |
| 365 | return -EFAULT; | 364 | return -EFAULT; |
| 366 | 365 | ||
| 367 | val = simple_strtoul(line, NULL, 0); | ||
| 368 | adv7393_write(fbdev->client, val >> 8, val & 0xff); | 366 | adv7393_write(fbdev->client, val >> 8, val & 0xff); |
| 369 | 367 | ||
| 370 | return count; | 368 | return count; |
diff --git a/drivers/video/broadsheetfb.c b/drivers/video/broadsheetfb.c index 377dde3d5bfc..c95b417d0d41 100644 --- a/drivers/video/broadsheetfb.c +++ b/drivers/video/broadsheetfb.c | |||
| @@ -1211,7 +1211,7 @@ static int __devexit broadsheetfb_remove(struct platform_device *dev) | |||
| 1211 | 1211 | ||
| 1212 | static struct platform_driver broadsheetfb_driver = { | 1212 | static struct platform_driver broadsheetfb_driver = { |
| 1213 | .probe = broadsheetfb_probe, | 1213 | .probe = broadsheetfb_probe, |
| 1214 | .remove = broadsheetfb_remove, | 1214 | .remove = __devexit_p(broadsheetfb_remove), |
| 1215 | .driver = { | 1215 | .driver = { |
| 1216 | .owner = THIS_MODULE, | 1216 | .owner = THIS_MODULE, |
| 1217 | .name = "broadsheetfb", | 1217 | .name = "broadsheetfb", |
diff --git a/drivers/video/console/Kconfig b/drivers/video/console/Kconfig index c2d11fef114b..e2c96d01d8f5 100644 --- a/drivers/video/console/Kconfig +++ b/drivers/video/console/Kconfig | |||
| @@ -224,5 +224,19 @@ config FONT_10x18 | |||
| 224 | big letters. It fits between the sun 12x22 and the normal 8x16 font. | 224 | big letters. It fits between the sun 12x22 and the normal 8x16 font. |
| 225 | If other fonts are too big or too small for you, say Y, otherwise say N. | 225 | If other fonts are too big or too small for you, say Y, otherwise say N. |
| 226 | 226 | ||
| 227 | config FONT_AUTOSELECT | ||
| 228 | def_bool y | ||
| 229 | depends on FRAMEBUFFER_CONSOLE || SGI_NEWPORT_CONSOLE || STI_CONSOLE || USB_SISUSBVGA_CON | ||
| 230 | depends on !FONT_8x8 | ||
| 231 | depends on !FONT_6x11 | ||
| 232 | depends on !FONT_7x14 | ||
| 233 | depends on !FONT_PEARL_8x8 | ||
| 234 | depends on !FONT_ACORN_8x8 | ||
| 235 | depends on !FONT_MINI_4x6 | ||
| 236 | depends on !FONT_SUN8x16 | ||
| 237 | depends on !FONT_SUN12x22 | ||
| 238 | depends on !FONT_10x18 | ||
| 239 | select FONT_8x16 | ||
| 240 | |||
| 227 | endmenu | 241 | endmenu |
| 228 | 242 | ||
diff --git a/drivers/video/mbx/mbxfb.c b/drivers/video/mbx/mbxfb.c index ab0a8e527333..85e4f44bfa61 100644 --- a/drivers/video/mbx/mbxfb.c +++ b/drivers/video/mbx/mbxfb.c | |||
| @@ -1045,7 +1045,7 @@ static int __devexit mbxfb_remove(struct platform_device *dev) | |||
| 1045 | 1045 | ||
| 1046 | static struct platform_driver mbxfb_driver = { | 1046 | static struct platform_driver mbxfb_driver = { |
| 1047 | .probe = mbxfb_probe, | 1047 | .probe = mbxfb_probe, |
| 1048 | .remove = mbxfb_remove, | 1048 | .remove = __devexit_p(mbxfb_remove), |
| 1049 | .suspend = mbxfb_suspend, | 1049 | .suspend = mbxfb_suspend, |
| 1050 | .resume = mbxfb_resume, | 1050 | .resume = mbxfb_resume, |
| 1051 | .driver = { | 1051 | .driver = { |
diff --git a/drivers/video/omap2/displays/panel-taal.c b/drivers/video/omap2/displays/panel-taal.c index 2ce9992f403b..901576eb5a84 100644 --- a/drivers/video/omap2/displays/panel-taal.c +++ b/drivers/video/omap2/displays/panel-taal.c | |||
| @@ -526,7 +526,7 @@ static ssize_t taal_num_errors_show(struct device *dev, | |||
| 526 | { | 526 | { |
| 527 | struct omap_dss_device *dssdev = to_dss_device(dev); | 527 | struct omap_dss_device *dssdev = to_dss_device(dev); |
| 528 | struct taal_data *td = dev_get_drvdata(&dssdev->dev); | 528 | struct taal_data *td = dev_get_drvdata(&dssdev->dev); |
| 529 | u8 errors; | 529 | u8 errors = 0; |
| 530 | int r; | 530 | int r; |
| 531 | 531 | ||
| 532 | mutex_lock(&td->lock); | 532 | mutex_lock(&td->lock); |
diff --git a/drivers/video/omap2/dss/core.c b/drivers/video/omap2/dss/core.c index 72ded9cd2cb0..5066eee10ccf 100644 --- a/drivers/video/omap2/dss/core.c +++ b/drivers/video/omap2/dss/core.c | |||
| @@ -194,8 +194,7 @@ static inline int dss_initialize_debugfs(void) | |||
| 194 | static inline void dss_uninitialize_debugfs(void) | 194 | static inline void dss_uninitialize_debugfs(void) |
| 195 | { | 195 | { |
| 196 | } | 196 | } |
| 197 | static inline int dss_debugfs_create_file(const char *name, | 197 | int dss_debugfs_create_file(const char *name, void (*write)(struct seq_file *)) |
| 198 | void (*write)(struct seq_file *)) | ||
| 199 | { | 198 | { |
| 200 | return 0; | 199 | return 0; |
| 201 | } | 200 | } |
diff --git a/drivers/video/omap2/dss/dsi.c b/drivers/video/omap2/dss/dsi.c index ec363d8390ed..ca8382d346e9 100644 --- a/drivers/video/omap2/dss/dsi.c +++ b/drivers/video/omap2/dss/dsi.c | |||
| @@ -3724,7 +3724,7 @@ static int dsi_compute_interleave_lp(int blank, int enter_hs, int exit_hs, | |||
| 3724 | /* CLKIN4DDR = 16 * TXBYTECLKHS */ | 3724 | /* CLKIN4DDR = 16 * TXBYTECLKHS */ |
| 3725 | tlp_avail = thsbyte_clk * (blank - trans_lp); | 3725 | tlp_avail = thsbyte_clk * (blank - trans_lp); |
| 3726 | 3726 | ||
| 3727 | ttxclkesc = tdsi_fclk / lp_clk_div; | 3727 | ttxclkesc = tdsi_fclk * lp_clk_div; |
| 3728 | 3728 | ||
| 3729 | lp_inter = ((tlp_avail - 8 * thsbyte_clk - 5 * tdsi_fclk) / ttxclkesc - | 3729 | lp_inter = ((tlp_avail - 8 * thsbyte_clk - 5 * tdsi_fclk) / ttxclkesc - |
| 3730 | 26) / 16; | 3730 | 26) / 16; |
diff --git a/drivers/video/omap2/dss/dss.c b/drivers/video/omap2/dss/dss.c index 6ea1ff149f6f..770632359a17 100644 --- a/drivers/video/omap2/dss/dss.c +++ b/drivers/video/omap2/dss/dss.c | |||
| @@ -731,7 +731,7 @@ static void dss_runtime_put(void) | |||
| 731 | DSSDBG("dss_runtime_put\n"); | 731 | DSSDBG("dss_runtime_put\n"); |
| 732 | 732 | ||
| 733 | r = pm_runtime_put_sync(&dss.pdev->dev); | 733 | r = pm_runtime_put_sync(&dss.pdev->dev); |
| 734 | WARN_ON(r < 0); | 734 | WARN_ON(r < 0 && r != -EBUSY); |
| 735 | } | 735 | } |
| 736 | 736 | ||
| 737 | /* DEBUGFS */ | 737 | /* DEBUGFS */ |
diff --git a/drivers/video/s3c-fb.c b/drivers/video/s3c-fb.c index 5f9d8e69029e..ea7b661e7229 100644 --- a/drivers/video/s3c-fb.c +++ b/drivers/video/s3c-fb.c | |||
| @@ -361,7 +361,7 @@ static int s3c_fb_calc_pixclk(struct s3c_fb *sfb, unsigned int pixclk) | |||
| 361 | result = (unsigned int)tmp / 1000; | 361 | result = (unsigned int)tmp / 1000; |
| 362 | 362 | ||
| 363 | dev_dbg(sfb->dev, "pixclk=%u, clk=%lu, div=%d (%lu)\n", | 363 | dev_dbg(sfb->dev, "pixclk=%u, clk=%lu, div=%d (%lu)\n", |
| 364 | pixclk, clk, result, clk / result); | 364 | pixclk, clk, result, result ? clk / result : clk); |
| 365 | 365 | ||
| 366 | return result; | 366 | return result; |
| 367 | } | 367 | } |
| @@ -1348,8 +1348,14 @@ static void s3c_fb_clear_win(struct s3c_fb *sfb, int win) | |||
| 1348 | writel(0, regs + VIDOSD_A(win, sfb->variant)); | 1348 | writel(0, regs + VIDOSD_A(win, sfb->variant)); |
| 1349 | writel(0, regs + VIDOSD_B(win, sfb->variant)); | 1349 | writel(0, regs + VIDOSD_B(win, sfb->variant)); |
| 1350 | writel(0, regs + VIDOSD_C(win, sfb->variant)); | 1350 | writel(0, regs + VIDOSD_C(win, sfb->variant)); |
| 1351 | reg = readl(regs + SHADOWCON); | 1351 | |
| 1352 | writel(reg & ~SHADOWCON_WINx_PROTECT(win), regs + SHADOWCON); | 1352 | if (sfb->variant.has_shadowcon) { |
| 1353 | reg = readl(sfb->regs + SHADOWCON); | ||
| 1354 | reg &= ~(SHADOWCON_WINx_PROTECT(win) | | ||
| 1355 | SHADOWCON_CHx_ENABLE(win) | | ||
| 1356 | SHADOWCON_CHx_LOCAL_ENABLE(win)); | ||
| 1357 | writel(reg, sfb->regs + SHADOWCON); | ||
| 1358 | } | ||
| 1353 | } | 1359 | } |
| 1354 | 1360 | ||
| 1355 | static int __devinit s3c_fb_probe(struct platform_device *pdev) | 1361 | static int __devinit s3c_fb_probe(struct platform_device *pdev) |
diff --git a/drivers/video/savage/savagefb_driver.c b/drivers/video/savage/savagefb_driver.c index cee7803a0a1c..f3d3b9ce4751 100644 --- a/drivers/video/savage/savagefb_driver.c +++ b/drivers/video/savage/savagefb_driver.c | |||
| @@ -1351,7 +1351,7 @@ static void savagefb_set_par_int(struct savagefb_par *par, struct savage_reg *r | |||
| 1351 | /* following part not present in X11 driver */ | 1351 | /* following part not present in X11 driver */ |
| 1352 | cr67 = vga_in8(0x3d5, par) & 0xf; | 1352 | cr67 = vga_in8(0x3d5, par) & 0xf; |
| 1353 | vga_out8(0x3d5, 0x50 | cr67, par); | 1353 | vga_out8(0x3d5, 0x50 | cr67, par); |
| 1354 | udelay(10000); | 1354 | mdelay(10); |
| 1355 | vga_out8(0x3d4, 0x67, par); | 1355 | vga_out8(0x3d4, 0x67, par); |
| 1356 | /* end of part */ | 1356 | /* end of part */ |
| 1357 | vga_out8(0x3d5, reg->CR67 & ~0x0c, par); | 1357 | vga_out8(0x3d5, reg->CR67 & ~0x0c, par); |
| @@ -1904,11 +1904,11 @@ static int savage_init_hw(struct savagefb_par *par) | |||
| 1904 | vga_out8(0x3d4, 0x66, par); | 1904 | vga_out8(0x3d4, 0x66, par); |
| 1905 | cr66 = vga_in8(0x3d5, par); | 1905 | cr66 = vga_in8(0x3d5, par); |
| 1906 | vga_out8(0x3d5, cr66 | 0x02, par); | 1906 | vga_out8(0x3d5, cr66 | 0x02, par); |
| 1907 | udelay(10000); | 1907 | mdelay(10); |
| 1908 | 1908 | ||
| 1909 | vga_out8(0x3d4, 0x66, par); | 1909 | vga_out8(0x3d4, 0x66, par); |
| 1910 | vga_out8(0x3d5, cr66 & ~0x02, par); /* clear reset flag */ | 1910 | vga_out8(0x3d5, cr66 & ~0x02, par); /* clear reset flag */ |
| 1911 | udelay(10000); | 1911 | mdelay(10); |
| 1912 | 1912 | ||
| 1913 | 1913 | ||
| 1914 | /* | 1914 | /* |
| @@ -1918,11 +1918,11 @@ static int savage_init_hw(struct savagefb_par *par) | |||
| 1918 | vga_out8(0x3d4, 0x3f, par); | 1918 | vga_out8(0x3d4, 0x3f, par); |
| 1919 | cr3f = vga_in8(0x3d5, par); | 1919 | cr3f = vga_in8(0x3d5, par); |
| 1920 | vga_out8(0x3d5, cr3f | 0x08, par); | 1920 | vga_out8(0x3d5, cr3f | 0x08, par); |
| 1921 | udelay(10000); | 1921 | mdelay(10); |
| 1922 | 1922 | ||
| 1923 | vga_out8(0x3d4, 0x3f, par); | 1923 | vga_out8(0x3d4, 0x3f, par); |
| 1924 | vga_out8(0x3d5, cr3f & ~0x08, par); /* clear reset flags */ | 1924 | vga_out8(0x3d5, cr3f & ~0x08, par); /* clear reset flags */ |
| 1925 | udelay(10000); | 1925 | mdelay(10); |
| 1926 | 1926 | ||
| 1927 | /* Savage ramdac speeds */ | 1927 | /* Savage ramdac speeds */ |
| 1928 | par->numClocks = 4; | 1928 | par->numClocks = 4; |
diff --git a/drivers/xen/events.c b/drivers/xen/events.c index 6908e4ce2a0d..7595581d032c 100644 --- a/drivers/xen/events.c +++ b/drivers/xen/events.c | |||
| @@ -827,6 +827,9 @@ int bind_evtchn_to_irq(unsigned int evtchn) | |||
| 827 | handle_edge_irq, "event"); | 827 | handle_edge_irq, "event"); |
| 828 | 828 | ||
| 829 | xen_irq_info_evtchn_init(irq, evtchn); | 829 | xen_irq_info_evtchn_init(irq, evtchn); |
| 830 | } else { | ||
| 831 | struct irq_info *info = info_for_irq(irq); | ||
| 832 | WARN_ON(info == NULL || info->type != IRQT_EVTCHN); | ||
| 830 | } | 833 | } |
| 831 | 834 | ||
| 832 | out: | 835 | out: |
| @@ -862,6 +865,9 @@ static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu) | |||
| 862 | xen_irq_info_ipi_init(cpu, irq, evtchn, ipi); | 865 | xen_irq_info_ipi_init(cpu, irq, evtchn, ipi); |
| 863 | 866 | ||
| 864 | bind_evtchn_to_cpu(evtchn, cpu); | 867 | bind_evtchn_to_cpu(evtchn, cpu); |
| 868 | } else { | ||
| 869 | struct irq_info *info = info_for_irq(irq); | ||
| 870 | WARN_ON(info == NULL || info->type != IRQT_IPI); | ||
| 865 | } | 871 | } |
| 866 | 872 | ||
| 867 | out: | 873 | out: |
| @@ -939,6 +945,9 @@ int bind_virq_to_irq(unsigned int virq, unsigned int cpu) | |||
| 939 | xen_irq_info_virq_init(cpu, irq, evtchn, virq); | 945 | xen_irq_info_virq_init(cpu, irq, evtchn, virq); |
| 940 | 946 | ||
| 941 | bind_evtchn_to_cpu(evtchn, cpu); | 947 | bind_evtchn_to_cpu(evtchn, cpu); |
| 948 | } else { | ||
| 949 | struct irq_info *info = info_for_irq(irq); | ||
| 950 | WARN_ON(info == NULL || info->type != IRQT_VIRQ); | ||
| 942 | } | 951 | } |
| 943 | 952 | ||
| 944 | out: | 953 | out: |
diff --git a/drivers/xen/pci.c b/drivers/xen/pci.c index b84bf0b6cc34..18fff88254eb 100644 --- a/drivers/xen/pci.c +++ b/drivers/xen/pci.c | |||
| @@ -59,7 +59,7 @@ static int xen_add_device(struct device *dev) | |||
| 59 | 59 | ||
| 60 | #ifdef CONFIG_ACPI | 60 | #ifdef CONFIG_ACPI |
| 61 | handle = DEVICE_ACPI_HANDLE(&pci_dev->dev); | 61 | handle = DEVICE_ACPI_HANDLE(&pci_dev->dev); |
| 62 | if (!handle) | 62 | if (!handle && pci_dev->bus->bridge) |
| 63 | handle = DEVICE_ACPI_HANDLE(pci_dev->bus->bridge); | 63 | handle = DEVICE_ACPI_HANDLE(pci_dev->bus->bridge); |
| 64 | #ifdef CONFIG_PCI_IOV | 64 | #ifdef CONFIG_PCI_IOV |
| 65 | if (!handle && pci_dev->is_virtfn) | 65 | if (!handle && pci_dev->is_virtfn) |
diff --git a/drivers/xen/tmem.c b/drivers/xen/tmem.c index dcb79521e6c8..89f264c67420 100644 --- a/drivers/xen/tmem.c +++ b/drivers/xen/tmem.c | |||
| @@ -269,7 +269,7 @@ static inline struct tmem_oid oswiz(unsigned type, u32 ind) | |||
| 269 | } | 269 | } |
| 270 | 270 | ||
| 271 | /* returns 0 if the page was successfully put into frontswap, -1 if not */ | 271 | /* returns 0 if the page was successfully put into frontswap, -1 if not */ |
| 272 | static int tmem_frontswap_put_page(unsigned type, pgoff_t offset, | 272 | static int tmem_frontswap_store(unsigned type, pgoff_t offset, |
| 273 | struct page *page) | 273 | struct page *page) |
| 274 | { | 274 | { |
| 275 | u64 ind64 = (u64)offset; | 275 | u64 ind64 = (u64)offset; |
| @@ -295,7 +295,7 @@ static int tmem_frontswap_put_page(unsigned type, pgoff_t offset, | |||
| 295 | * returns 0 if the page was successfully gotten from frontswap, -1 if | 295 | * returns 0 if the page was successfully gotten from frontswap, -1 if |
| 296 | * was not present (should never happen!) | 296 | * was not present (should never happen!) |
| 297 | */ | 297 | */ |
| 298 | static int tmem_frontswap_get_page(unsigned type, pgoff_t offset, | 298 | static int tmem_frontswap_load(unsigned type, pgoff_t offset, |
| 299 | struct page *page) | 299 | struct page *page) |
| 300 | { | 300 | { |
| 301 | u64 ind64 = (u64)offset; | 301 | u64 ind64 = (u64)offset; |
| @@ -362,8 +362,8 @@ static int __init no_frontswap(char *s) | |||
| 362 | __setup("nofrontswap", no_frontswap); | 362 | __setup("nofrontswap", no_frontswap); |
| 363 | 363 | ||
| 364 | static struct frontswap_ops __initdata tmem_frontswap_ops = { | 364 | static struct frontswap_ops __initdata tmem_frontswap_ops = { |
| 365 | .put_page = tmem_frontswap_put_page, | 365 | .store = tmem_frontswap_store, |
| 366 | .get_page = tmem_frontswap_get_page, | 366 | .load = tmem_frontswap_load, |
| 367 | .invalidate_page = tmem_frontswap_flush_page, | 367 | .invalidate_page = tmem_frontswap_flush_page, |
| 368 | .invalidate_area = tmem_frontswap_flush_area, | 368 | .invalidate_area = tmem_frontswap_flush_area, |
| 369 | .init = tmem_frontswap_init | 369 | .init = tmem_frontswap_init |
diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c index 3f75895c919b..8f7d1237b7a0 100644 --- a/fs/btrfs/backref.c +++ b/fs/btrfs/backref.c | |||
| @@ -179,7 +179,8 @@ static int __add_prelim_ref(struct list_head *head, u64 root_id, | |||
| 179 | 179 | ||
| 180 | static int add_all_parents(struct btrfs_root *root, struct btrfs_path *path, | 180 | static int add_all_parents(struct btrfs_root *root, struct btrfs_path *path, |
| 181 | struct ulist *parents, int level, | 181 | struct ulist *parents, int level, |
| 182 | struct btrfs_key *key, u64 wanted_disk_byte, | 182 | struct btrfs_key *key, u64 time_seq, |
| 183 | u64 wanted_disk_byte, | ||
| 183 | const u64 *extent_item_pos) | 184 | const u64 *extent_item_pos) |
| 184 | { | 185 | { |
| 185 | int ret; | 186 | int ret; |
| @@ -212,7 +213,7 @@ add_parent: | |||
| 212 | */ | 213 | */ |
| 213 | while (1) { | 214 | while (1) { |
| 214 | eie = NULL; | 215 | eie = NULL; |
| 215 | ret = btrfs_next_leaf(root, path); | 216 | ret = btrfs_next_old_leaf(root, path, time_seq); |
| 216 | if (ret < 0) | 217 | if (ret < 0) |
| 217 | return ret; | 218 | return ret; |
| 218 | if (ret) | 219 | if (ret) |
| @@ -294,18 +295,10 @@ static int __resolve_indirect_ref(struct btrfs_fs_info *fs_info, | |||
| 294 | goto out; | 295 | goto out; |
| 295 | } | 296 | } |
| 296 | 297 | ||
| 297 | if (level == 0) { | 298 | if (level == 0) |
| 298 | if (ret == 1 && path->slots[0] >= btrfs_header_nritems(eb)) { | ||
| 299 | ret = btrfs_next_leaf(root, path); | ||
| 300 | if (ret) | ||
| 301 | goto out; | ||
| 302 | eb = path->nodes[0]; | ||
| 303 | } | ||
| 304 | |||
| 305 | btrfs_item_key_to_cpu(eb, &key, path->slots[0]); | 299 | btrfs_item_key_to_cpu(eb, &key, path->slots[0]); |
| 306 | } | ||
| 307 | 300 | ||
| 308 | ret = add_all_parents(root, path, parents, level, &key, | 301 | ret = add_all_parents(root, path, parents, level, &key, time_seq, |
| 309 | ref->wanted_disk_byte, extent_item_pos); | 302 | ref->wanted_disk_byte, extent_item_pos); |
| 310 | out: | 303 | out: |
| 311 | btrfs_free_path(path); | 304 | btrfs_free_path(path); |
diff --git a/fs/btrfs/btrfs_inode.h b/fs/btrfs/btrfs_inode.h index e616f8872e69..12394a90d60f 100644 --- a/fs/btrfs/btrfs_inode.h +++ b/fs/btrfs/btrfs_inode.h | |||
| @@ -37,6 +37,7 @@ | |||
| 37 | #define BTRFS_INODE_IN_DEFRAG 3 | 37 | #define BTRFS_INODE_IN_DEFRAG 3 |
| 38 | #define BTRFS_INODE_DELALLOC_META_RESERVED 4 | 38 | #define BTRFS_INODE_DELALLOC_META_RESERVED 4 |
| 39 | #define BTRFS_INODE_HAS_ORPHAN_ITEM 5 | 39 | #define BTRFS_INODE_HAS_ORPHAN_ITEM 5 |
| 40 | #define BTRFS_INODE_HAS_ASYNC_EXTENT 6 | ||
| 40 | 41 | ||
| 41 | /* in memory btrfs inode */ | 42 | /* in memory btrfs inode */ |
| 42 | struct btrfs_inode { | 43 | struct btrfs_inode { |
diff --git a/fs/btrfs/check-integrity.c b/fs/btrfs/check-integrity.c index 9cebb1fd6a3c..da6e9364a5e3 100644 --- a/fs/btrfs/check-integrity.c +++ b/fs/btrfs/check-integrity.c | |||
| @@ -93,6 +93,7 @@ | |||
| 93 | #include "print-tree.h" | 93 | #include "print-tree.h" |
| 94 | #include "locking.h" | 94 | #include "locking.h" |
| 95 | #include "check-integrity.h" | 95 | #include "check-integrity.h" |
| 96 | #include "rcu-string.h" | ||
| 96 | 97 | ||
| 97 | #define BTRFSIC_BLOCK_HASHTABLE_SIZE 0x10000 | 98 | #define BTRFSIC_BLOCK_HASHTABLE_SIZE 0x10000 |
| 98 | #define BTRFSIC_BLOCK_LINK_HASHTABLE_SIZE 0x10000 | 99 | #define BTRFSIC_BLOCK_LINK_HASHTABLE_SIZE 0x10000 |
| @@ -843,13 +844,14 @@ static int btrfsic_process_superblock_dev_mirror( | |||
| 843 | superblock_tmp->never_written = 0; | 844 | superblock_tmp->never_written = 0; |
| 844 | superblock_tmp->mirror_num = 1 + superblock_mirror_num; | 845 | superblock_tmp->mirror_num = 1 + superblock_mirror_num; |
| 845 | if (state->print_mask & BTRFSIC_PRINT_MASK_SUPERBLOCK_WRITE) | 846 | if (state->print_mask & BTRFSIC_PRINT_MASK_SUPERBLOCK_WRITE) |
| 846 | printk(KERN_INFO "New initial S-block (bdev %p, %s)" | 847 | printk_in_rcu(KERN_INFO "New initial S-block (bdev %p, %s)" |
| 847 | " @%llu (%s/%llu/%d)\n", | 848 | " @%llu (%s/%llu/%d)\n", |
| 848 | superblock_bdev, device->name, | 849 | superblock_bdev, |
| 849 | (unsigned long long)dev_bytenr, | 850 | rcu_str_deref(device->name), |
| 850 | dev_state->name, | 851 | (unsigned long long)dev_bytenr, |
| 851 | (unsigned long long)dev_bytenr, | 852 | dev_state->name, |
| 852 | superblock_mirror_num); | 853 | (unsigned long long)dev_bytenr, |
| 854 | superblock_mirror_num); | ||
| 853 | list_add(&superblock_tmp->all_blocks_node, | 855 | list_add(&superblock_tmp->all_blocks_node, |
| 854 | &state->all_blocks_list); | 856 | &state->all_blocks_list); |
| 855 | btrfsic_block_hashtable_add(superblock_tmp, | 857 | btrfsic_block_hashtable_add(superblock_tmp, |
diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c index d7a96cfdc50a..15cbc2bf4ff0 100644 --- a/fs/btrfs/ctree.c +++ b/fs/btrfs/ctree.c | |||
| @@ -467,6 +467,15 @@ static inline int tree_mod_dont_log(struct btrfs_fs_info *fs_info, | |||
| 467 | return 0; | 467 | return 0; |
| 468 | } | 468 | } |
| 469 | 469 | ||
| 470 | /* | ||
| 471 | * This allocates memory and gets a tree modification sequence number when | ||
| 472 | * needed. | ||
| 473 | * | ||
| 474 | * Returns 0 when no sequence number is needed, < 0 on error. | ||
| 475 | * Returns 1 when a sequence number was added. In this case, | ||
| 476 | * fs_info->tree_mod_seq_lock was acquired and must be released by the caller | ||
| 477 | * after inserting into the rb tree. | ||
| 478 | */ | ||
| 470 | static inline int tree_mod_alloc(struct btrfs_fs_info *fs_info, gfp_t flags, | 479 | static inline int tree_mod_alloc(struct btrfs_fs_info *fs_info, gfp_t flags, |
| 471 | struct tree_mod_elem **tm_ret) | 480 | struct tree_mod_elem **tm_ret) |
| 472 | { | 481 | { |
| @@ -491,11 +500,11 @@ static inline int tree_mod_alloc(struct btrfs_fs_info *fs_info, gfp_t flags, | |||
| 491 | */ | 500 | */ |
| 492 | kfree(tm); | 501 | kfree(tm); |
| 493 | seq = 0; | 502 | seq = 0; |
| 503 | spin_unlock(&fs_info->tree_mod_seq_lock); | ||
| 494 | } else { | 504 | } else { |
| 495 | __get_tree_mod_seq(fs_info, &tm->elem); | 505 | __get_tree_mod_seq(fs_info, &tm->elem); |
| 496 | seq = tm->elem.seq; | 506 | seq = tm->elem.seq; |
| 497 | } | 507 | } |
| 498 | spin_unlock(&fs_info->tree_mod_seq_lock); | ||
| 499 | 508 | ||
| 500 | return seq; | 509 | return seq; |
| 501 | } | 510 | } |
| @@ -521,7 +530,9 @@ tree_mod_log_insert_key_mask(struct btrfs_fs_info *fs_info, | |||
| 521 | tm->slot = slot; | 530 | tm->slot = slot; |
| 522 | tm->generation = btrfs_node_ptr_generation(eb, slot); | 531 | tm->generation = btrfs_node_ptr_generation(eb, slot); |
| 523 | 532 | ||
| 524 | return __tree_mod_log_insert(fs_info, tm); | 533 | ret = __tree_mod_log_insert(fs_info, tm); |
| 534 | spin_unlock(&fs_info->tree_mod_seq_lock); | ||
| 535 | return ret; | ||
| 525 | } | 536 | } |
| 526 | 537 | ||
| 527 | static noinline int | 538 | static noinline int |
| @@ -559,7 +570,9 @@ tree_mod_log_insert_move(struct btrfs_fs_info *fs_info, | |||
| 559 | tm->move.nr_items = nr_items; | 570 | tm->move.nr_items = nr_items; |
| 560 | tm->op = MOD_LOG_MOVE_KEYS; | 571 | tm->op = MOD_LOG_MOVE_KEYS; |
| 561 | 572 | ||
| 562 | return __tree_mod_log_insert(fs_info, tm); | 573 | ret = __tree_mod_log_insert(fs_info, tm); |
| 574 | spin_unlock(&fs_info->tree_mod_seq_lock); | ||
| 575 | return ret; | ||
| 563 | } | 576 | } |
| 564 | 577 | ||
| 565 | static noinline int | 578 | static noinline int |
| @@ -580,7 +593,9 @@ tree_mod_log_insert_root(struct btrfs_fs_info *fs_info, | |||
| 580 | tm->generation = btrfs_header_generation(old_root); | 593 | tm->generation = btrfs_header_generation(old_root); |
| 581 | tm->op = MOD_LOG_ROOT_REPLACE; | 594 | tm->op = MOD_LOG_ROOT_REPLACE; |
| 582 | 595 | ||
| 583 | return __tree_mod_log_insert(fs_info, tm); | 596 | ret = __tree_mod_log_insert(fs_info, tm); |
| 597 | spin_unlock(&fs_info->tree_mod_seq_lock); | ||
| 598 | return ret; | ||
| 584 | } | 599 | } |
| 585 | 600 | ||
| 586 | static struct tree_mod_elem * | 601 | static struct tree_mod_elem * |
| @@ -1023,6 +1038,10 @@ __tree_mod_log_oldest_root(struct btrfs_fs_info *fs_info, | |||
| 1023 | looped = 1; | 1038 | looped = 1; |
| 1024 | } | 1039 | } |
| 1025 | 1040 | ||
| 1041 | /* if there's no old root to return, return what we found instead */ | ||
| 1042 | if (!found) | ||
| 1043 | found = tm; | ||
| 1044 | |||
| 1026 | return found; | 1045 | return found; |
| 1027 | } | 1046 | } |
| 1028 | 1047 | ||
| @@ -1143,22 +1162,36 @@ tree_mod_log_rewind(struct btrfs_fs_info *fs_info, struct extent_buffer *eb, | |||
| 1143 | return eb_rewin; | 1162 | return eb_rewin; |
| 1144 | } | 1163 | } |
| 1145 | 1164 | ||
| 1165 | /* | ||
| 1166 | * get_old_root() rewinds the state of @root's root node to the given @time_seq | ||
| 1167 | * value. If there are no changes, the current root->root_node is returned. If | ||
| 1168 | * anything changed in between, there's a fresh buffer allocated on which the | ||
| 1169 | * rewind operations are done. In any case, the returned buffer is read locked. | ||
| 1170 | * Returns NULL on error (with no locks held). | ||
| 1171 | */ | ||
| 1146 | static inline struct extent_buffer * | 1172 | static inline struct extent_buffer * |
| 1147 | get_old_root(struct btrfs_root *root, u64 time_seq) | 1173 | get_old_root(struct btrfs_root *root, u64 time_seq) |
| 1148 | { | 1174 | { |
| 1149 | struct tree_mod_elem *tm; | 1175 | struct tree_mod_elem *tm; |
| 1150 | struct extent_buffer *eb; | 1176 | struct extent_buffer *eb; |
| 1151 | struct tree_mod_root *old_root; | 1177 | struct tree_mod_root *old_root = NULL; |
| 1152 | u64 old_generation; | 1178 | u64 old_generation = 0; |
| 1179 | u64 logical; | ||
| 1153 | 1180 | ||
| 1181 | eb = btrfs_read_lock_root_node(root); | ||
| 1154 | tm = __tree_mod_log_oldest_root(root->fs_info, root, time_seq); | 1182 | tm = __tree_mod_log_oldest_root(root->fs_info, root, time_seq); |
| 1155 | if (!tm) | 1183 | if (!tm) |
| 1156 | return root->node; | 1184 | return root->node; |
| 1157 | 1185 | ||
| 1158 | old_root = &tm->old_root; | 1186 | if (tm->op == MOD_LOG_ROOT_REPLACE) { |
| 1159 | old_generation = tm->generation; | 1187 | old_root = &tm->old_root; |
| 1188 | old_generation = tm->generation; | ||
| 1189 | logical = old_root->logical; | ||
| 1190 | } else { | ||
| 1191 | logical = root->node->start; | ||
| 1192 | } | ||
| 1160 | 1193 | ||
| 1161 | tm = tree_mod_log_search(root->fs_info, old_root->logical, time_seq); | 1194 | tm = tree_mod_log_search(root->fs_info, logical, time_seq); |
| 1162 | /* | 1195 | /* |
| 1163 | * there was an item in the log when __tree_mod_log_oldest_root | 1196 | * there was an item in the log when __tree_mod_log_oldest_root |
| 1164 | * returned. this one must not go away, because the time_seq passed to | 1197 | * returned. this one must not go away, because the time_seq passed to |
| @@ -1166,22 +1199,25 @@ get_old_root(struct btrfs_root *root, u64 time_seq) | |||
| 1166 | */ | 1199 | */ |
| 1167 | BUG_ON(!tm); | 1200 | BUG_ON(!tm); |
| 1168 | 1201 | ||
| 1169 | if (old_root->logical == root->node->start) { | 1202 | if (old_root) |
| 1170 | /* there are logged operations for the current root */ | ||
| 1171 | eb = btrfs_clone_extent_buffer(root->node); | ||
| 1172 | } else { | ||
| 1173 | /* there's a root replace operation for the current root */ | ||
| 1174 | eb = alloc_dummy_extent_buffer(tm->index << PAGE_CACHE_SHIFT, | 1203 | eb = alloc_dummy_extent_buffer(tm->index << PAGE_CACHE_SHIFT, |
| 1175 | root->nodesize); | 1204 | root->nodesize); |
| 1205 | else | ||
| 1206 | eb = btrfs_clone_extent_buffer(root->node); | ||
| 1207 | btrfs_tree_read_unlock(root->node); | ||
| 1208 | free_extent_buffer(root->node); | ||
| 1209 | if (!eb) | ||
| 1210 | return NULL; | ||
| 1211 | btrfs_tree_read_lock(eb); | ||
| 1212 | if (old_root) { | ||
| 1176 | btrfs_set_header_bytenr(eb, eb->start); | 1213 | btrfs_set_header_bytenr(eb, eb->start); |
| 1177 | btrfs_set_header_backref_rev(eb, BTRFS_MIXED_BACKREF_REV); | 1214 | btrfs_set_header_backref_rev(eb, BTRFS_MIXED_BACKREF_REV); |
| 1178 | btrfs_set_header_owner(eb, root->root_key.objectid); | 1215 | btrfs_set_header_owner(eb, root->root_key.objectid); |
| 1216 | btrfs_set_header_level(eb, old_root->level); | ||
| 1217 | btrfs_set_header_generation(eb, old_generation); | ||
| 1179 | } | 1218 | } |
| 1180 | if (!eb) | ||
| 1181 | return NULL; | ||
| 1182 | btrfs_set_header_level(eb, old_root->level); | ||
| 1183 | btrfs_set_header_generation(eb, old_generation); | ||
| 1184 | __tree_mod_log_rewind(eb, time_seq, tm); | 1219 | __tree_mod_log_rewind(eb, time_seq, tm); |
| 1220 | extent_buffer_get(eb); | ||
| 1185 | 1221 | ||
| 1186 | return eb; | 1222 | return eb; |
| 1187 | } | 1223 | } |
| @@ -1650,8 +1686,6 @@ static noinline int balance_level(struct btrfs_trans_handle *trans, | |||
| 1650 | BTRFS_NODEPTRS_PER_BLOCK(root) / 4) | 1686 | BTRFS_NODEPTRS_PER_BLOCK(root) / 4) |
| 1651 | return 0; | 1687 | return 0; |
| 1652 | 1688 | ||
| 1653 | btrfs_header_nritems(mid); | ||
| 1654 | |||
| 1655 | left = read_node_slot(root, parent, pslot - 1); | 1689 | left = read_node_slot(root, parent, pslot - 1); |
| 1656 | if (left) { | 1690 | if (left) { |
| 1657 | btrfs_tree_lock(left); | 1691 | btrfs_tree_lock(left); |
| @@ -1681,7 +1715,6 @@ static noinline int balance_level(struct btrfs_trans_handle *trans, | |||
| 1681 | wret = push_node_left(trans, root, left, mid, 1); | 1715 | wret = push_node_left(trans, root, left, mid, 1); |
| 1682 | if (wret < 0) | 1716 | if (wret < 0) |
| 1683 | ret = wret; | 1717 | ret = wret; |
| 1684 | btrfs_header_nritems(mid); | ||
| 1685 | } | 1718 | } |
| 1686 | 1719 | ||
| 1687 | /* | 1720 | /* |
| @@ -2615,9 +2648,7 @@ int btrfs_search_old_slot(struct btrfs_root *root, struct btrfs_key *key, | |||
| 2615 | 2648 | ||
| 2616 | again: | 2649 | again: |
| 2617 | b = get_old_root(root, time_seq); | 2650 | b = get_old_root(root, time_seq); |
| 2618 | extent_buffer_get(b); | ||
| 2619 | level = btrfs_header_level(b); | 2651 | level = btrfs_header_level(b); |
| 2620 | btrfs_tree_read_lock(b); | ||
| 2621 | p->locks[level] = BTRFS_READ_LOCK; | 2652 | p->locks[level] = BTRFS_READ_LOCK; |
| 2622 | 2653 | ||
| 2623 | while (b) { | 2654 | while (b) { |
| @@ -5001,6 +5032,12 @@ next: | |||
| 5001 | */ | 5032 | */ |
| 5002 | int btrfs_next_leaf(struct btrfs_root *root, struct btrfs_path *path) | 5033 | int btrfs_next_leaf(struct btrfs_root *root, struct btrfs_path *path) |
| 5003 | { | 5034 | { |
| 5035 | return btrfs_next_old_leaf(root, path, 0); | ||
| 5036 | } | ||
| 5037 | |||
| 5038 | int btrfs_next_old_leaf(struct btrfs_root *root, struct btrfs_path *path, | ||
| 5039 | u64 time_seq) | ||
| 5040 | { | ||
| 5004 | int slot; | 5041 | int slot; |
| 5005 | int level; | 5042 | int level; |
| 5006 | struct extent_buffer *c; | 5043 | struct extent_buffer *c; |
| @@ -5025,7 +5062,10 @@ again: | |||
| 5025 | path->keep_locks = 1; | 5062 | path->keep_locks = 1; |
| 5026 | path->leave_spinning = 1; | 5063 | path->leave_spinning = 1; |
| 5027 | 5064 | ||
| 5028 | ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); | 5065 | if (time_seq) |
| 5066 | ret = btrfs_search_old_slot(root, &key, path, time_seq); | ||
| 5067 | else | ||
| 5068 | ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); | ||
| 5029 | path->keep_locks = 0; | 5069 | path->keep_locks = 0; |
| 5030 | 5070 | ||
| 5031 | if (ret < 0) | 5071 | if (ret < 0) |
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 0236d03c6732..8b73b2d4deb7 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h | |||
| @@ -2753,6 +2753,8 @@ static inline int btrfs_insert_empty_item(struct btrfs_trans_handle *trans, | |||
| 2753 | } | 2753 | } |
| 2754 | 2754 | ||
| 2755 | int btrfs_next_leaf(struct btrfs_root *root, struct btrfs_path *path); | 2755 | int btrfs_next_leaf(struct btrfs_root *root, struct btrfs_path *path); |
| 2756 | int btrfs_next_old_leaf(struct btrfs_root *root, struct btrfs_path *path, | ||
| 2757 | u64 time_seq); | ||
| 2756 | static inline int btrfs_next_item(struct btrfs_root *root, struct btrfs_path *p) | 2758 | static inline int btrfs_next_item(struct btrfs_root *root, struct btrfs_path *p) |
| 2757 | { | 2759 | { |
| 2758 | ++p->slots[0]; | 2760 | ++p->slots[0]; |
diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c index c18d0442ae6d..2399f4086915 100644 --- a/fs/btrfs/delayed-inode.c +++ b/fs/btrfs/delayed-inode.c | |||
| @@ -1879,3 +1879,21 @@ void btrfs_kill_all_delayed_nodes(struct btrfs_root *root) | |||
| 1879 | } | 1879 | } |
| 1880 | } | 1880 | } |
| 1881 | } | 1881 | } |
| 1882 | |||
| 1883 | void btrfs_destroy_delayed_inodes(struct btrfs_root *root) | ||
| 1884 | { | ||
| 1885 | struct btrfs_delayed_root *delayed_root; | ||
| 1886 | struct btrfs_delayed_node *curr_node, *prev_node; | ||
| 1887 | |||
| 1888 | delayed_root = btrfs_get_delayed_root(root); | ||
| 1889 | |||
| 1890 | curr_node = btrfs_first_delayed_node(delayed_root); | ||
| 1891 | while (curr_node) { | ||
| 1892 | __btrfs_kill_delayed_node(curr_node); | ||
| 1893 | |||
| 1894 | prev_node = curr_node; | ||
| 1895 | curr_node = btrfs_next_delayed_node(curr_node); | ||
| 1896 | btrfs_release_delayed_node(prev_node); | ||
| 1897 | } | ||
| 1898 | } | ||
| 1899 | |||
diff --git a/fs/btrfs/delayed-inode.h b/fs/btrfs/delayed-inode.h index 7083d08b2a21..f5aa4023d3e1 100644 --- a/fs/btrfs/delayed-inode.h +++ b/fs/btrfs/delayed-inode.h | |||
| @@ -124,6 +124,9 @@ int btrfs_fill_inode(struct inode *inode, u32 *rdev); | |||
| 124 | /* Used for drop dead root */ | 124 | /* Used for drop dead root */ |
| 125 | void btrfs_kill_all_delayed_nodes(struct btrfs_root *root); | 125 | void btrfs_kill_all_delayed_nodes(struct btrfs_root *root); |
| 126 | 126 | ||
| 127 | /* Used for clean the transaction */ | ||
| 128 | void btrfs_destroy_delayed_inodes(struct btrfs_root *root); | ||
| 129 | |||
| 127 | /* Used for readdir() */ | 130 | /* Used for readdir() */ |
| 128 | void btrfs_get_delayed_items(struct inode *inode, struct list_head *ins_list, | 131 | void btrfs_get_delayed_items(struct inode *inode, struct list_head *ins_list, |
| 129 | struct list_head *del_list); | 132 | struct list_head *del_list); |
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 7ae51decf6d3..e1890b1d3075 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c | |||
| @@ -44,6 +44,7 @@ | |||
| 44 | #include "free-space-cache.h" | 44 | #include "free-space-cache.h" |
| 45 | #include "inode-map.h" | 45 | #include "inode-map.h" |
| 46 | #include "check-integrity.h" | 46 | #include "check-integrity.h" |
| 47 | #include "rcu-string.h" | ||
| 47 | 48 | ||
| 48 | static struct extent_io_ops btree_extent_io_ops; | 49 | static struct extent_io_ops btree_extent_io_ops; |
| 49 | static void end_workqueue_fn(struct btrfs_work *work); | 50 | static void end_workqueue_fn(struct btrfs_work *work); |
| @@ -2118,7 +2119,7 @@ int open_ctree(struct super_block *sb, | |||
| 2118 | 2119 | ||
| 2119 | features = btrfs_super_incompat_flags(disk_super); | 2120 | features = btrfs_super_incompat_flags(disk_super); |
| 2120 | features |= BTRFS_FEATURE_INCOMPAT_MIXED_BACKREF; | 2121 | features |= BTRFS_FEATURE_INCOMPAT_MIXED_BACKREF; |
| 2121 | if (tree_root->fs_info->compress_type & BTRFS_COMPRESS_LZO) | 2122 | if (tree_root->fs_info->compress_type == BTRFS_COMPRESS_LZO) |
| 2122 | features |= BTRFS_FEATURE_INCOMPAT_COMPRESS_LZO; | 2123 | features |= BTRFS_FEATURE_INCOMPAT_COMPRESS_LZO; |
| 2123 | 2124 | ||
| 2124 | /* | 2125 | /* |
| @@ -2575,8 +2576,9 @@ static void btrfs_end_buffer_write_sync(struct buffer_head *bh, int uptodate) | |||
| 2575 | struct btrfs_device *device = (struct btrfs_device *) | 2576 | struct btrfs_device *device = (struct btrfs_device *) |
| 2576 | bh->b_private; | 2577 | bh->b_private; |
| 2577 | 2578 | ||
| 2578 | printk_ratelimited(KERN_WARNING "lost page write due to " | 2579 | printk_ratelimited_in_rcu(KERN_WARNING "lost page write due to " |
| 2579 | "I/O error on %s\n", device->name); | 2580 | "I/O error on %s\n", |
| 2581 | rcu_str_deref(device->name)); | ||
| 2580 | /* note, we dont' set_buffer_write_io_error because we have | 2582 | /* note, we dont' set_buffer_write_io_error because we have |
| 2581 | * our own ways of dealing with the IO errors | 2583 | * our own ways of dealing with the IO errors |
| 2582 | */ | 2584 | */ |
| @@ -2749,8 +2751,8 @@ static int write_dev_flush(struct btrfs_device *device, int wait) | |||
| 2749 | wait_for_completion(&device->flush_wait); | 2751 | wait_for_completion(&device->flush_wait); |
| 2750 | 2752 | ||
| 2751 | if (bio_flagged(bio, BIO_EOPNOTSUPP)) { | 2753 | if (bio_flagged(bio, BIO_EOPNOTSUPP)) { |
| 2752 | printk("btrfs: disabling barriers on dev %s\n", | 2754 | printk_in_rcu("btrfs: disabling barriers on dev %s\n", |
| 2753 | device->name); | 2755 | rcu_str_deref(device->name)); |
| 2754 | device->nobarriers = 1; | 2756 | device->nobarriers = 1; |
| 2755 | } | 2757 | } |
| 2756 | if (!bio_flagged(bio, BIO_UPTODATE)) { | 2758 | if (!bio_flagged(bio, BIO_UPTODATE)) { |
| @@ -3400,7 +3402,6 @@ int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans, | |||
| 3400 | 3402 | ||
| 3401 | delayed_refs = &trans->delayed_refs; | 3403 | delayed_refs = &trans->delayed_refs; |
| 3402 | 3404 | ||
| 3403 | again: | ||
| 3404 | spin_lock(&delayed_refs->lock); | 3405 | spin_lock(&delayed_refs->lock); |
| 3405 | if (delayed_refs->num_entries == 0) { | 3406 | if (delayed_refs->num_entries == 0) { |
| 3406 | spin_unlock(&delayed_refs->lock); | 3407 | spin_unlock(&delayed_refs->lock); |
| @@ -3408,31 +3409,36 @@ again: | |||
| 3408 | return ret; | 3409 | return ret; |
| 3409 | } | 3410 | } |
| 3410 | 3411 | ||
| 3411 | node = rb_first(&delayed_refs->root); | 3412 | while ((node = rb_first(&delayed_refs->root)) != NULL) { |
| 3412 | while (node) { | ||
| 3413 | ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node); | 3413 | ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node); |
| 3414 | node = rb_next(node); | ||
| 3415 | |||
| 3416 | ref->in_tree = 0; | ||
| 3417 | rb_erase(&ref->rb_node, &delayed_refs->root); | ||
| 3418 | delayed_refs->num_entries--; | ||
| 3419 | 3414 | ||
| 3420 | atomic_set(&ref->refs, 1); | 3415 | atomic_set(&ref->refs, 1); |
| 3421 | if (btrfs_delayed_ref_is_head(ref)) { | 3416 | if (btrfs_delayed_ref_is_head(ref)) { |
| 3422 | struct btrfs_delayed_ref_head *head; | 3417 | struct btrfs_delayed_ref_head *head; |
| 3423 | 3418 | ||
| 3424 | head = btrfs_delayed_node_to_head(ref); | 3419 | head = btrfs_delayed_node_to_head(ref); |
| 3425 | spin_unlock(&delayed_refs->lock); | 3420 | if (!mutex_trylock(&head->mutex)) { |
| 3426 | mutex_lock(&head->mutex); | 3421 | atomic_inc(&ref->refs); |
| 3422 | spin_unlock(&delayed_refs->lock); | ||
| 3423 | |||
| 3424 | /* Need to wait for the delayed ref to run */ | ||
| 3425 | mutex_lock(&head->mutex); | ||
| 3426 | mutex_unlock(&head->mutex); | ||
| 3427 | btrfs_put_delayed_ref(ref); | ||
| 3428 | |||
| 3429 | continue; | ||
| 3430 | } | ||
| 3431 | |||
| 3427 | kfree(head->extent_op); | 3432 | kfree(head->extent_op); |
| 3428 | delayed_refs->num_heads--; | 3433 | delayed_refs->num_heads--; |
| 3429 | if (list_empty(&head->cluster)) | 3434 | if (list_empty(&head->cluster)) |
| 3430 | delayed_refs->num_heads_ready--; | 3435 | delayed_refs->num_heads_ready--; |
| 3431 | list_del_init(&head->cluster); | 3436 | list_del_init(&head->cluster); |
| 3432 | mutex_unlock(&head->mutex); | ||
| 3433 | btrfs_put_delayed_ref(ref); | ||
| 3434 | goto again; | ||
| 3435 | } | 3437 | } |
| 3438 | ref->in_tree = 0; | ||
| 3439 | rb_erase(&ref->rb_node, &delayed_refs->root); | ||
| 3440 | delayed_refs->num_entries--; | ||
| 3441 | |||
| 3436 | spin_unlock(&delayed_refs->lock); | 3442 | spin_unlock(&delayed_refs->lock); |
| 3437 | btrfs_put_delayed_ref(ref); | 3443 | btrfs_put_delayed_ref(ref); |
| 3438 | 3444 | ||
| @@ -3520,11 +3526,9 @@ static int btrfs_destroy_marked_extents(struct btrfs_root *root, | |||
| 3520 | &(&BTRFS_I(page->mapping->host)->io_tree)->buffer, | 3526 | &(&BTRFS_I(page->mapping->host)->io_tree)->buffer, |
| 3521 | offset >> PAGE_CACHE_SHIFT); | 3527 | offset >> PAGE_CACHE_SHIFT); |
| 3522 | spin_unlock(&dirty_pages->buffer_lock); | 3528 | spin_unlock(&dirty_pages->buffer_lock); |
| 3523 | if (eb) { | 3529 | if (eb) |
| 3524 | ret = test_and_clear_bit(EXTENT_BUFFER_DIRTY, | 3530 | ret = test_and_clear_bit(EXTENT_BUFFER_DIRTY, |
| 3525 | &eb->bflags); | 3531 | &eb->bflags); |
| 3526 | atomic_set(&eb->refs, 1); | ||
| 3527 | } | ||
| 3528 | if (PageWriteback(page)) | 3532 | if (PageWriteback(page)) |
| 3529 | end_page_writeback(page); | 3533 | end_page_writeback(page); |
| 3530 | 3534 | ||
| @@ -3538,8 +3542,8 @@ static int btrfs_destroy_marked_extents(struct btrfs_root *root, | |||
| 3538 | spin_unlock_irq(&page->mapping->tree_lock); | 3542 | spin_unlock_irq(&page->mapping->tree_lock); |
| 3539 | } | 3543 | } |
| 3540 | 3544 | ||
| 3541 | page->mapping->a_ops->invalidatepage(page, 0); | ||
| 3542 | unlock_page(page); | 3545 | unlock_page(page); |
| 3546 | page_cache_release(page); | ||
| 3543 | } | 3547 | } |
| 3544 | } | 3548 | } |
| 3545 | 3549 | ||
| @@ -3553,8 +3557,10 @@ static int btrfs_destroy_pinned_extent(struct btrfs_root *root, | |||
| 3553 | u64 start; | 3557 | u64 start; |
| 3554 | u64 end; | 3558 | u64 end; |
| 3555 | int ret; | 3559 | int ret; |
| 3560 | bool loop = true; | ||
| 3556 | 3561 | ||
| 3557 | unpin = pinned_extents; | 3562 | unpin = pinned_extents; |
| 3563 | again: | ||
| 3558 | while (1) { | 3564 | while (1) { |
| 3559 | ret = find_first_extent_bit(unpin, 0, &start, &end, | 3565 | ret = find_first_extent_bit(unpin, 0, &start, &end, |
| 3560 | EXTENT_DIRTY); | 3566 | EXTENT_DIRTY); |
| @@ -3572,6 +3578,15 @@ static int btrfs_destroy_pinned_extent(struct btrfs_root *root, | |||
| 3572 | cond_resched(); | 3578 | cond_resched(); |
| 3573 | } | 3579 | } |
| 3574 | 3580 | ||
| 3581 | if (loop) { | ||
| 3582 | if (unpin == &root->fs_info->freed_extents[0]) | ||
| 3583 | unpin = &root->fs_info->freed_extents[1]; | ||
| 3584 | else | ||
| 3585 | unpin = &root->fs_info->freed_extents[0]; | ||
| 3586 | loop = false; | ||
| 3587 | goto again; | ||
| 3588 | } | ||
| 3589 | |||
| 3575 | return 0; | 3590 | return 0; |
| 3576 | } | 3591 | } |
| 3577 | 3592 | ||
| @@ -3585,21 +3600,23 @@ void btrfs_cleanup_one_transaction(struct btrfs_transaction *cur_trans, | |||
| 3585 | /* FIXME: cleanup wait for commit */ | 3600 | /* FIXME: cleanup wait for commit */ |
| 3586 | cur_trans->in_commit = 1; | 3601 | cur_trans->in_commit = 1; |
| 3587 | cur_trans->blocked = 1; | 3602 | cur_trans->blocked = 1; |
| 3588 | if (waitqueue_active(&root->fs_info->transaction_blocked_wait)) | 3603 | wake_up(&root->fs_info->transaction_blocked_wait); |
| 3589 | wake_up(&root->fs_info->transaction_blocked_wait); | ||
| 3590 | 3604 | ||
| 3591 | cur_trans->blocked = 0; | 3605 | cur_trans->blocked = 0; |
| 3592 | if (waitqueue_active(&root->fs_info->transaction_wait)) | 3606 | wake_up(&root->fs_info->transaction_wait); |
| 3593 | wake_up(&root->fs_info->transaction_wait); | ||
| 3594 | 3607 | ||
| 3595 | cur_trans->commit_done = 1; | 3608 | cur_trans->commit_done = 1; |
| 3596 | if (waitqueue_active(&cur_trans->commit_wait)) | 3609 | wake_up(&cur_trans->commit_wait); |
| 3597 | wake_up(&cur_trans->commit_wait); | 3610 | |
| 3611 | btrfs_destroy_delayed_inodes(root); | ||
| 3612 | btrfs_assert_delayed_root_empty(root); | ||
| 3598 | 3613 | ||
| 3599 | btrfs_destroy_pending_snapshots(cur_trans); | 3614 | btrfs_destroy_pending_snapshots(cur_trans); |
| 3600 | 3615 | ||
| 3601 | btrfs_destroy_marked_extents(root, &cur_trans->dirty_pages, | 3616 | btrfs_destroy_marked_extents(root, &cur_trans->dirty_pages, |
| 3602 | EXTENT_DIRTY); | 3617 | EXTENT_DIRTY); |
| 3618 | btrfs_destroy_pinned_extent(root, | ||
| 3619 | root->fs_info->pinned_extents); | ||
| 3603 | 3620 | ||
| 3604 | /* | 3621 | /* |
| 3605 | memset(cur_trans, 0, sizeof(*cur_trans)); | 3622 | memset(cur_trans, 0, sizeof(*cur_trans)); |
| @@ -3648,6 +3665,9 @@ int btrfs_cleanup_transaction(struct btrfs_root *root) | |||
| 3648 | if (waitqueue_active(&t->commit_wait)) | 3665 | if (waitqueue_active(&t->commit_wait)) |
| 3649 | wake_up(&t->commit_wait); | 3666 | wake_up(&t->commit_wait); |
| 3650 | 3667 | ||
| 3668 | btrfs_destroy_delayed_inodes(root); | ||
| 3669 | btrfs_assert_delayed_root_empty(root); | ||
| 3670 | |||
| 3651 | btrfs_destroy_pending_snapshots(t); | 3671 | btrfs_destroy_pending_snapshots(t); |
| 3652 | 3672 | ||
| 3653 | btrfs_destroy_delalloc_inodes(root); | 3673 | btrfs_destroy_delalloc_inodes(root); |
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 2c8f7b204617..aaa12c1eb348 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c | |||
| @@ -20,6 +20,7 @@ | |||
| 20 | #include "volumes.h" | 20 | #include "volumes.h" |
| 21 | #include "check-integrity.h" | 21 | #include "check-integrity.h" |
| 22 | #include "locking.h" | 22 | #include "locking.h" |
| 23 | #include "rcu-string.h" | ||
| 23 | 24 | ||
| 24 | static struct kmem_cache *extent_state_cache; | 25 | static struct kmem_cache *extent_state_cache; |
| 25 | static struct kmem_cache *extent_buffer_cache; | 26 | static struct kmem_cache *extent_buffer_cache; |
| @@ -1917,9 +1918,9 @@ int repair_io_failure(struct btrfs_mapping_tree *map_tree, u64 start, | |||
| 1917 | return -EIO; | 1918 | return -EIO; |
| 1918 | } | 1919 | } |
| 1919 | 1920 | ||
| 1920 | printk(KERN_INFO "btrfs read error corrected: ino %lu off %llu (dev %s " | 1921 | printk_in_rcu(KERN_INFO "btrfs read error corrected: ino %lu off %llu " |
| 1921 | "sector %llu)\n", page->mapping->host->i_ino, start, | 1922 | "(dev %s sector %llu)\n", page->mapping->host->i_ino, |
| 1922 | dev->name, sector); | 1923 | start, rcu_str_deref(dev->name), sector); |
| 1923 | 1924 | ||
| 1924 | bio_put(bio); | 1925 | bio_put(bio); |
| 1925 | return 0; | 1926 | return 0; |
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index f6ab6f5e635a..a4f02501da40 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c | |||
| @@ -830,7 +830,7 @@ static noinline int cow_file_range(struct inode *inode, | |||
| 830 | if (IS_ERR(trans)) { | 830 | if (IS_ERR(trans)) { |
| 831 | extent_clear_unlock_delalloc(inode, | 831 | extent_clear_unlock_delalloc(inode, |
| 832 | &BTRFS_I(inode)->io_tree, | 832 | &BTRFS_I(inode)->io_tree, |
| 833 | start, end, NULL, | 833 | start, end, locked_page, |
| 834 | EXTENT_CLEAR_UNLOCK_PAGE | | 834 | EXTENT_CLEAR_UNLOCK_PAGE | |
| 835 | EXTENT_CLEAR_UNLOCK | | 835 | EXTENT_CLEAR_UNLOCK | |
| 836 | EXTENT_CLEAR_DELALLOC | | 836 | EXTENT_CLEAR_DELALLOC | |
| @@ -963,7 +963,7 @@ out: | |||
| 963 | out_unlock: | 963 | out_unlock: |
| 964 | extent_clear_unlock_delalloc(inode, | 964 | extent_clear_unlock_delalloc(inode, |
| 965 | &BTRFS_I(inode)->io_tree, | 965 | &BTRFS_I(inode)->io_tree, |
| 966 | start, end, NULL, | 966 | start, end, locked_page, |
| 967 | EXTENT_CLEAR_UNLOCK_PAGE | | 967 | EXTENT_CLEAR_UNLOCK_PAGE | |
| 968 | EXTENT_CLEAR_UNLOCK | | 968 | EXTENT_CLEAR_UNLOCK | |
| 969 | EXTENT_CLEAR_DELALLOC | | 969 | EXTENT_CLEAR_DELALLOC | |
| @@ -986,8 +986,10 @@ static noinline void async_cow_start(struct btrfs_work *work) | |||
| 986 | compress_file_range(async_cow->inode, async_cow->locked_page, | 986 | compress_file_range(async_cow->inode, async_cow->locked_page, |
| 987 | async_cow->start, async_cow->end, async_cow, | 987 | async_cow->start, async_cow->end, async_cow, |
| 988 | &num_added); | 988 | &num_added); |
| 989 | if (num_added == 0) | 989 | if (num_added == 0) { |
| 990 | iput(async_cow->inode); | ||
| 990 | async_cow->inode = NULL; | 991 | async_cow->inode = NULL; |
| 992 | } | ||
| 991 | } | 993 | } |
| 992 | 994 | ||
| 993 | /* | 995 | /* |
| @@ -1020,6 +1022,8 @@ static noinline void async_cow_free(struct btrfs_work *work) | |||
| 1020 | { | 1022 | { |
| 1021 | struct async_cow *async_cow; | 1023 | struct async_cow *async_cow; |
| 1022 | async_cow = container_of(work, struct async_cow, work); | 1024 | async_cow = container_of(work, struct async_cow, work); |
| 1025 | if (async_cow->inode) | ||
| 1026 | iput(async_cow->inode); | ||
| 1023 | kfree(async_cow); | 1027 | kfree(async_cow); |
| 1024 | } | 1028 | } |
| 1025 | 1029 | ||
| @@ -1038,7 +1042,7 @@ static int cow_file_range_async(struct inode *inode, struct page *locked_page, | |||
| 1038 | while (start < end) { | 1042 | while (start < end) { |
| 1039 | async_cow = kmalloc(sizeof(*async_cow), GFP_NOFS); | 1043 | async_cow = kmalloc(sizeof(*async_cow), GFP_NOFS); |
| 1040 | BUG_ON(!async_cow); /* -ENOMEM */ | 1044 | BUG_ON(!async_cow); /* -ENOMEM */ |
| 1041 | async_cow->inode = inode; | 1045 | async_cow->inode = igrab(inode); |
| 1042 | async_cow->root = root; | 1046 | async_cow->root = root; |
| 1043 | async_cow->locked_page = locked_page; | 1047 | async_cow->locked_page = locked_page; |
| 1044 | async_cow->start = start; | 1048 | async_cow->start = start; |
| @@ -1136,8 +1140,18 @@ static noinline int run_delalloc_nocow(struct inode *inode, | |||
| 1136 | u64 ino = btrfs_ino(inode); | 1140 | u64 ino = btrfs_ino(inode); |
| 1137 | 1141 | ||
| 1138 | path = btrfs_alloc_path(); | 1142 | path = btrfs_alloc_path(); |
| 1139 | if (!path) | 1143 | if (!path) { |
| 1144 | extent_clear_unlock_delalloc(inode, | ||
| 1145 | &BTRFS_I(inode)->io_tree, | ||
| 1146 | start, end, locked_page, | ||
| 1147 | EXTENT_CLEAR_UNLOCK_PAGE | | ||
| 1148 | EXTENT_CLEAR_UNLOCK | | ||
| 1149 | EXTENT_CLEAR_DELALLOC | | ||
| 1150 | EXTENT_CLEAR_DIRTY | | ||
| 1151 | EXTENT_SET_WRITEBACK | | ||
| 1152 | EXTENT_END_WRITEBACK); | ||
| 1140 | return -ENOMEM; | 1153 | return -ENOMEM; |
| 1154 | } | ||
| 1141 | 1155 | ||
| 1142 | nolock = btrfs_is_free_space_inode(root, inode); | 1156 | nolock = btrfs_is_free_space_inode(root, inode); |
| 1143 | 1157 | ||
| @@ -1147,6 +1161,15 @@ static noinline int run_delalloc_nocow(struct inode *inode, | |||
| 1147 | trans = btrfs_join_transaction(root); | 1161 | trans = btrfs_join_transaction(root); |
| 1148 | 1162 | ||
| 1149 | if (IS_ERR(trans)) { | 1163 | if (IS_ERR(trans)) { |
| 1164 | extent_clear_unlock_delalloc(inode, | ||
| 1165 | &BTRFS_I(inode)->io_tree, | ||
| 1166 | start, end, locked_page, | ||
| 1167 | EXTENT_CLEAR_UNLOCK_PAGE | | ||
| 1168 | EXTENT_CLEAR_UNLOCK | | ||
| 1169 | EXTENT_CLEAR_DELALLOC | | ||
| 1170 | EXTENT_CLEAR_DIRTY | | ||
| 1171 | EXTENT_SET_WRITEBACK | | ||
| 1172 | EXTENT_END_WRITEBACK); | ||
| 1150 | btrfs_free_path(path); | 1173 | btrfs_free_path(path); |
| 1151 | return PTR_ERR(trans); | 1174 | return PTR_ERR(trans); |
| 1152 | } | 1175 | } |
| @@ -1327,8 +1350,11 @@ out_check: | |||
| 1327 | } | 1350 | } |
| 1328 | btrfs_release_path(path); | 1351 | btrfs_release_path(path); |
| 1329 | 1352 | ||
| 1330 | if (cur_offset <= end && cow_start == (u64)-1) | 1353 | if (cur_offset <= end && cow_start == (u64)-1) { |
| 1331 | cow_start = cur_offset; | 1354 | cow_start = cur_offset; |
| 1355 | cur_offset = end; | ||
| 1356 | } | ||
| 1357 | |||
| 1332 | if (cow_start != (u64)-1) { | 1358 | if (cow_start != (u64)-1) { |
| 1333 | ret = cow_file_range(inode, locked_page, cow_start, end, | 1359 | ret = cow_file_range(inode, locked_page, cow_start, end, |
| 1334 | page_started, nr_written, 1); | 1360 | page_started, nr_written, 1); |
| @@ -1347,6 +1373,17 @@ error: | |||
| 1347 | if (!ret) | 1373 | if (!ret) |
| 1348 | ret = err; | 1374 | ret = err; |
| 1349 | 1375 | ||
| 1376 | if (ret && cur_offset < end) | ||
| 1377 | extent_clear_unlock_delalloc(inode, | ||
| 1378 | &BTRFS_I(inode)->io_tree, | ||
| 1379 | cur_offset, end, locked_page, | ||
| 1380 | EXTENT_CLEAR_UNLOCK_PAGE | | ||
| 1381 | EXTENT_CLEAR_UNLOCK | | ||
| 1382 | EXTENT_CLEAR_DELALLOC | | ||
| 1383 | EXTENT_CLEAR_DIRTY | | ||
| 1384 | EXTENT_SET_WRITEBACK | | ||
| 1385 | EXTENT_END_WRITEBACK); | ||
| 1386 | |||
| 1350 | btrfs_free_path(path); | 1387 | btrfs_free_path(path); |
| 1351 | return ret; | 1388 | return ret; |
| 1352 | } | 1389 | } |
| @@ -1361,20 +1398,23 @@ static int run_delalloc_range(struct inode *inode, struct page *locked_page, | |||
| 1361 | int ret; | 1398 | int ret; |
| 1362 | struct btrfs_root *root = BTRFS_I(inode)->root; | 1399 | struct btrfs_root *root = BTRFS_I(inode)->root; |
| 1363 | 1400 | ||
| 1364 | if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW) | 1401 | if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW) { |
| 1365 | ret = run_delalloc_nocow(inode, locked_page, start, end, | 1402 | ret = run_delalloc_nocow(inode, locked_page, start, end, |
| 1366 | page_started, 1, nr_written); | 1403 | page_started, 1, nr_written); |
| 1367 | else if (BTRFS_I(inode)->flags & BTRFS_INODE_PREALLOC) | 1404 | } else if (BTRFS_I(inode)->flags & BTRFS_INODE_PREALLOC) { |
| 1368 | ret = run_delalloc_nocow(inode, locked_page, start, end, | 1405 | ret = run_delalloc_nocow(inode, locked_page, start, end, |
| 1369 | page_started, 0, nr_written); | 1406 | page_started, 0, nr_written); |
| 1370 | else if (!btrfs_test_opt(root, COMPRESS) && | 1407 | } else if (!btrfs_test_opt(root, COMPRESS) && |
| 1371 | !(BTRFS_I(inode)->force_compress) && | 1408 | !(BTRFS_I(inode)->force_compress) && |
| 1372 | !(BTRFS_I(inode)->flags & BTRFS_INODE_COMPRESS)) | 1409 | !(BTRFS_I(inode)->flags & BTRFS_INODE_COMPRESS)) { |
| 1373 | ret = cow_file_range(inode, locked_page, start, end, | 1410 | ret = cow_file_range(inode, locked_page, start, end, |
| 1374 | page_started, nr_written, 1); | 1411 | page_started, nr_written, 1); |
| 1375 | else | 1412 | } else { |
| 1413 | set_bit(BTRFS_INODE_HAS_ASYNC_EXTENT, | ||
| 1414 | &BTRFS_I(inode)->runtime_flags); | ||
| 1376 | ret = cow_file_range_async(inode, locked_page, start, end, | 1415 | ret = cow_file_range_async(inode, locked_page, start, end, |
| 1377 | page_started, nr_written); | 1416 | page_started, nr_written); |
| 1417 | } | ||
| 1378 | return ret; | 1418 | return ret; |
| 1379 | } | 1419 | } |
| 1380 | 1420 | ||
| @@ -7054,10 +7094,13 @@ static void fixup_inode_flags(struct inode *dir, struct inode *inode) | |||
| 7054 | else | 7094 | else |
| 7055 | b_inode->flags &= ~BTRFS_INODE_NODATACOW; | 7095 | b_inode->flags &= ~BTRFS_INODE_NODATACOW; |
| 7056 | 7096 | ||
| 7057 | if (b_dir->flags & BTRFS_INODE_COMPRESS) | 7097 | if (b_dir->flags & BTRFS_INODE_COMPRESS) { |
| 7058 | b_inode->flags |= BTRFS_INODE_COMPRESS; | 7098 | b_inode->flags |= BTRFS_INODE_COMPRESS; |
| 7059 | else | 7099 | b_inode->flags &= ~BTRFS_INODE_NOCOMPRESS; |
| 7060 | b_inode->flags &= ~BTRFS_INODE_COMPRESS; | 7100 | } else { |
| 7101 | b_inode->flags &= ~(BTRFS_INODE_COMPRESS | | ||
| 7102 | BTRFS_INODE_NOCOMPRESS); | ||
| 7103 | } | ||
| 7061 | } | 7104 | } |
| 7062 | 7105 | ||
| 7063 | static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry, | 7106 | static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry, |
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index 24b776c08d99..0e92e5763005 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c | |||
| @@ -52,6 +52,7 @@ | |||
| 52 | #include "locking.h" | 52 | #include "locking.h" |
| 53 | #include "inode-map.h" | 53 | #include "inode-map.h" |
| 54 | #include "backref.h" | 54 | #include "backref.h" |
| 55 | #include "rcu-string.h" | ||
| 55 | 56 | ||
| 56 | /* Mask out flags that are inappropriate for the given type of inode. */ | 57 | /* Mask out flags that are inappropriate for the given type of inode. */ |
| 57 | static inline __u32 btrfs_mask_flags(umode_t mode, __u32 flags) | 58 | static inline __u32 btrfs_mask_flags(umode_t mode, __u32 flags) |
| @@ -785,39 +786,57 @@ none: | |||
| 785 | return -ENOENT; | 786 | return -ENOENT; |
| 786 | } | 787 | } |
| 787 | 788 | ||
| 788 | /* | 789 | static struct extent_map *defrag_lookup_extent(struct inode *inode, u64 start) |
| 789 | * Validaty check of prev em and next em: | ||
| 790 | * 1) no prev/next em | ||
| 791 | * 2) prev/next em is an hole/inline extent | ||
| 792 | */ | ||
| 793 | static int check_adjacent_extents(struct inode *inode, struct extent_map *em) | ||
| 794 | { | 790 | { |
| 795 | struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; | 791 | struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; |
| 796 | struct extent_map *prev = NULL, *next = NULL; | 792 | struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; |
| 797 | int ret = 0; | 793 | struct extent_map *em; |
| 794 | u64 len = PAGE_CACHE_SIZE; | ||
| 798 | 795 | ||
| 796 | /* | ||
| 797 | * hopefully we have this extent in the tree already, try without | ||
| 798 | * the full extent lock | ||
| 799 | */ | ||
| 799 | read_lock(&em_tree->lock); | 800 | read_lock(&em_tree->lock); |
| 800 | prev = lookup_extent_mapping(em_tree, em->start - 1, (u64)-1); | 801 | em = lookup_extent_mapping(em_tree, start, len); |
| 801 | next = lookup_extent_mapping(em_tree, em->start + em->len, (u64)-1); | ||
| 802 | read_unlock(&em_tree->lock); | 802 | read_unlock(&em_tree->lock); |
| 803 | 803 | ||
| 804 | if ((!prev || prev->block_start >= EXTENT_MAP_LAST_BYTE) && | 804 | if (!em) { |
| 805 | (!next || next->block_start >= EXTENT_MAP_LAST_BYTE)) | 805 | /* get the big lock and read metadata off disk */ |
| 806 | ret = 1; | 806 | lock_extent(io_tree, start, start + len - 1); |
| 807 | free_extent_map(prev); | 807 | em = btrfs_get_extent(inode, NULL, 0, start, len, 0); |
| 808 | free_extent_map(next); | 808 | unlock_extent(io_tree, start, start + len - 1); |
| 809 | |||
| 810 | if (IS_ERR(em)) | ||
| 811 | return NULL; | ||
| 812 | } | ||
| 813 | |||
| 814 | return em; | ||
| 815 | } | ||
| 816 | |||
| 817 | static bool defrag_check_next_extent(struct inode *inode, struct extent_map *em) | ||
| 818 | { | ||
| 819 | struct extent_map *next; | ||
| 820 | bool ret = true; | ||
| 809 | 821 | ||
| 822 | /* this is the last extent */ | ||
| 823 | if (em->start + em->len >= i_size_read(inode)) | ||
| 824 | return false; | ||
| 825 | |||
| 826 | next = defrag_lookup_extent(inode, em->start + em->len); | ||
| 827 | if (!next || next->block_start >= EXTENT_MAP_LAST_BYTE) | ||
| 828 | ret = false; | ||
| 829 | |||
| 830 | free_extent_map(next); | ||
| 810 | return ret; | 831 | return ret; |
| 811 | } | 832 | } |
| 812 | 833 | ||
| 813 | static int should_defrag_range(struct inode *inode, u64 start, u64 len, | 834 | static int should_defrag_range(struct inode *inode, u64 start, int thresh, |
| 814 | int thresh, u64 *last_len, u64 *skip, | 835 | u64 *last_len, u64 *skip, u64 *defrag_end) |
| 815 | u64 *defrag_end) | ||
| 816 | { | 836 | { |
| 817 | struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; | 837 | struct extent_map *em; |
| 818 | struct extent_map *em = NULL; | ||
| 819 | struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; | ||
| 820 | int ret = 1; | 838 | int ret = 1; |
| 839 | bool next_mergeable = true; | ||
| 821 | 840 | ||
| 822 | /* | 841 | /* |
| 823 | * make sure that once we start defragging an extent, we keep on | 842 | * make sure that once we start defragging an extent, we keep on |
| @@ -828,23 +847,9 @@ static int should_defrag_range(struct inode *inode, u64 start, u64 len, | |||
| 828 | 847 | ||
| 829 | *skip = 0; | 848 | *skip = 0; |
| 830 | 849 | ||
| 831 | /* | 850 | em = defrag_lookup_extent(inode, start); |
| 832 | * hopefully we have this extent in the tree already, try without | 851 | if (!em) |
| 833 | * the full extent lock | 852 | return 0; |
| 834 | */ | ||
| 835 | read_lock(&em_tree->lock); | ||
| 836 | em = lookup_extent_mapping(em_tree, start, len); | ||
| 837 | read_unlock(&em_tree->lock); | ||
| 838 | |||
| 839 | if (!em) { | ||
| 840 | /* get the big lock and read metadata off disk */ | ||
| 841 | lock_extent(io_tree, start, start + len - 1); | ||
| 842 | em = btrfs_get_extent(inode, NULL, 0, start, len, 0); | ||
| 843 | unlock_extent(io_tree, start, start + len - 1); | ||
| 844 | |||
| 845 | if (IS_ERR(em)) | ||
| 846 | return 0; | ||
| 847 | } | ||
| 848 | 853 | ||
| 849 | /* this will cover holes, and inline extents */ | 854 | /* this will cover holes, and inline extents */ |
| 850 | if (em->block_start >= EXTENT_MAP_LAST_BYTE) { | 855 | if (em->block_start >= EXTENT_MAP_LAST_BYTE) { |
| @@ -852,18 +857,15 @@ static int should_defrag_range(struct inode *inode, u64 start, u64 len, | |||
| 852 | goto out; | 857 | goto out; |
| 853 | } | 858 | } |
| 854 | 859 | ||
| 855 | /* If we have nothing to merge with us, just skip. */ | 860 | next_mergeable = defrag_check_next_extent(inode, em); |
| 856 | if (check_adjacent_extents(inode, em)) { | ||
| 857 | ret = 0; | ||
| 858 | goto out; | ||
| 859 | } | ||
| 860 | 861 | ||
| 861 | /* | 862 | /* |
| 862 | * we hit a real extent, if it is big don't bother defragging it again | 863 | * we hit a real extent, if it is big or the next extent is not a |
| 864 | * real extent, don't bother defragging it | ||
| 863 | */ | 865 | */ |
| 864 | if ((*last_len == 0 || *last_len >= thresh) && em->len >= thresh) | 866 | if ((*last_len == 0 || *last_len >= thresh) && |
| 867 | (em->len >= thresh || !next_mergeable)) | ||
| 865 | ret = 0; | 868 | ret = 0; |
| 866 | |||
| 867 | out: | 869 | out: |
| 868 | /* | 870 | /* |
| 869 | * last_len ends up being a counter of how many bytes we've defragged. | 871 | * last_len ends up being a counter of how many bytes we've defragged. |
| @@ -1142,8 +1144,8 @@ int btrfs_defrag_file(struct inode *inode, struct file *file, | |||
| 1142 | break; | 1144 | break; |
| 1143 | 1145 | ||
| 1144 | if (!should_defrag_range(inode, (u64)i << PAGE_CACHE_SHIFT, | 1146 | if (!should_defrag_range(inode, (u64)i << PAGE_CACHE_SHIFT, |
| 1145 | PAGE_CACHE_SIZE, extent_thresh, | 1147 | extent_thresh, &last_len, &skip, |
| 1146 | &last_len, &skip, &defrag_end)) { | 1148 | &defrag_end)) { |
| 1147 | unsigned long next; | 1149 | unsigned long next; |
| 1148 | /* | 1150 | /* |
| 1149 | * the should_defrag function tells us how much to skip | 1151 | * the should_defrag function tells us how much to skip |
| @@ -1304,6 +1306,14 @@ static noinline int btrfs_ioctl_resize(struct btrfs_root *root, | |||
| 1304 | ret = -EINVAL; | 1306 | ret = -EINVAL; |
| 1305 | goto out_free; | 1307 | goto out_free; |
| 1306 | } | 1308 | } |
| 1309 | if (device->fs_devices && device->fs_devices->seeding) { | ||
| 1310 | printk(KERN_INFO "btrfs: resizer unable to apply on " | ||
| 1311 | "seeding device %llu\n", | ||
| 1312 | (unsigned long long)devid); | ||
| 1313 | ret = -EINVAL; | ||
| 1314 | goto out_free; | ||
| 1315 | } | ||
| 1316 | |||
| 1307 | if (!strcmp(sizestr, "max")) | 1317 | if (!strcmp(sizestr, "max")) |
| 1308 | new_size = device->bdev->bd_inode->i_size; | 1318 | new_size = device->bdev->bd_inode->i_size; |
| 1309 | else { | 1319 | else { |
| @@ -1345,8 +1355,9 @@ static noinline int btrfs_ioctl_resize(struct btrfs_root *root, | |||
| 1345 | do_div(new_size, root->sectorsize); | 1355 | do_div(new_size, root->sectorsize); |
| 1346 | new_size *= root->sectorsize; | 1356 | new_size *= root->sectorsize; |
| 1347 | 1357 | ||
| 1348 | printk(KERN_INFO "btrfs: new size for %s is %llu\n", | 1358 | printk_in_rcu(KERN_INFO "btrfs: new size for %s is %llu\n", |
| 1349 | device->name, (unsigned long long)new_size); | 1359 | rcu_str_deref(device->name), |
| 1360 | (unsigned long long)new_size); | ||
| 1350 | 1361 | ||
| 1351 | if (new_size > old_size) { | 1362 | if (new_size > old_size) { |
| 1352 | trans = btrfs_start_transaction(root, 0); | 1363 | trans = btrfs_start_transaction(root, 0); |
| @@ -2264,7 +2275,12 @@ static long btrfs_ioctl_dev_info(struct btrfs_root *root, void __user *arg) | |||
| 2264 | di_args->total_bytes = dev->total_bytes; | 2275 | di_args->total_bytes = dev->total_bytes; |
| 2265 | memcpy(di_args->uuid, dev->uuid, sizeof(di_args->uuid)); | 2276 | memcpy(di_args->uuid, dev->uuid, sizeof(di_args->uuid)); |
| 2266 | if (dev->name) { | 2277 | if (dev->name) { |
| 2267 | strncpy(di_args->path, dev->name, sizeof(di_args->path)); | 2278 | struct rcu_string *name; |
| 2279 | |||
| 2280 | rcu_read_lock(); | ||
| 2281 | name = rcu_dereference(dev->name); | ||
| 2282 | strncpy(di_args->path, name->str, sizeof(di_args->path)); | ||
| 2283 | rcu_read_unlock(); | ||
| 2268 | di_args->path[sizeof(di_args->path) - 1] = 0; | 2284 | di_args->path[sizeof(di_args->path) - 1] = 0; |
| 2269 | } else { | 2285 | } else { |
| 2270 | di_args->path[0] = '\0'; | 2286 | di_args->path[0] = '\0'; |
diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c index 9e138cdc36c5..643335a4fe3c 100644 --- a/fs/btrfs/ordered-data.c +++ b/fs/btrfs/ordered-data.c | |||
| @@ -627,7 +627,27 @@ void btrfs_wait_ordered_range(struct inode *inode, u64 start, u64 len) | |||
| 627 | /* start IO across the range first to instantiate any delalloc | 627 | /* start IO across the range first to instantiate any delalloc |
| 628 | * extents | 628 | * extents |
| 629 | */ | 629 | */ |
| 630 | filemap_write_and_wait_range(inode->i_mapping, start, orig_end); | 630 | filemap_fdatawrite_range(inode->i_mapping, start, orig_end); |
| 631 | |||
| 632 | /* | ||
| 633 | * So with compression we will find and lock a dirty page and clear the | ||
| 634 | * first one as dirty, setup an async extent, and immediately return | ||
| 635 | * with the entire range locked but with nobody actually marked with | ||
| 636 | * writeback. So we can't just filemap_write_and_wait_range() and | ||
| 637 | * expect it to work since it will just kick off a thread to do the | ||
| 638 | * actual work. So we need to call filemap_fdatawrite_range _again_ | ||
| 639 | * since it will wait on the page lock, which won't be unlocked until | ||
| 640 | * after the pages have been marked as writeback and so we're good to go | ||
| 641 | * from there. We have to do this otherwise we'll miss the ordered | ||
| 642 | * extents and that results in badness. Please Josef, do not think you | ||
| 643 | * know better and pull this out at some point in the future, it is | ||
| 644 | * right and you are wrong. | ||
| 645 | */ | ||
| 646 | if (test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT, | ||
| 647 | &BTRFS_I(inode)->runtime_flags)) | ||
| 648 | filemap_fdatawrite_range(inode->i_mapping, start, orig_end); | ||
| 649 | |||
| 650 | filemap_fdatawait_range(inode->i_mapping, start, orig_end); | ||
| 631 | 651 | ||
| 632 | end = orig_end; | 652 | end = orig_end; |
| 633 | found = 0; | 653 | found = 0; |
diff --git a/fs/btrfs/rcu-string.h b/fs/btrfs/rcu-string.h new file mode 100644 index 000000000000..9e111e4576d4 --- /dev/null +++ b/fs/btrfs/rcu-string.h | |||
| @@ -0,0 +1,56 @@ | |||
| 1 | /* | ||
| 2 | * Copyright (C) 2012 Red Hat. All rights reserved. | ||
| 3 | * | ||
| 4 | * This program is free software; you can redistribute it and/or | ||
| 5 | * modify it under the terms of the GNU General Public | ||
| 6 | * License v2 as published by the Free Software Foundation. | ||
| 7 | * | ||
| 8 | * This program is distributed in the hope that it will be useful, | ||
| 9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
| 11 | * General Public License for more details. | ||
| 12 | * | ||
| 13 | * You should have received a copy of the GNU General Public | ||
| 14 | * License along with this program; if not, write to the | ||
| 15 | * Free Software Foundation, Inc., 59 Temple Place - Suite 330, | ||
| 16 | * Boston, MA 021110-1307, USA. | ||
| 17 | */ | ||
| 18 | |||
| 19 | struct rcu_string { | ||
| 20 | struct rcu_head rcu; | ||
| 21 | char str[0]; | ||
| 22 | }; | ||
| 23 | |||
| 24 | static inline struct rcu_string *rcu_string_strdup(const char *src, gfp_t mask) | ||
| 25 | { | ||
| 26 | size_t len = strlen(src) + 1; | ||
| 27 | struct rcu_string *ret = kzalloc(sizeof(struct rcu_string) + | ||
| 28 | (len * sizeof(char)), mask); | ||
| 29 | if (!ret) | ||
| 30 | return ret; | ||
| 31 | strncpy(ret->str, src, len); | ||
| 32 | return ret; | ||
| 33 | } | ||
| 34 | |||
| 35 | static inline void rcu_string_free(struct rcu_string *str) | ||
| 36 | { | ||
| 37 | if (str) | ||
| 38 | kfree_rcu(str, rcu); | ||
| 39 | } | ||
| 40 | |||
| 41 | #define printk_in_rcu(fmt, ...) do { \ | ||
| 42 | rcu_read_lock(); \ | ||
| 43 | printk(fmt, __VA_ARGS__); \ | ||
| 44 | rcu_read_unlock(); \ | ||
| 45 | } while (0) | ||
| 46 | |||
| 47 | #define printk_ratelimited_in_rcu(fmt, ...) do { \ | ||
| 48 | rcu_read_lock(); \ | ||
| 49 | printk_ratelimited(fmt, __VA_ARGS__); \ | ||
| 50 | rcu_read_unlock(); \ | ||
| 51 | } while (0) | ||
| 52 | |||
| 53 | #define rcu_str_deref(rcu_str) ({ \ | ||
| 54 | struct rcu_string *__str = rcu_dereference(rcu_str); \ | ||
| 55 | __str->str; \ | ||
| 56 | }) | ||
diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c index a38cfa4f251e..b223620cd5a6 100644 --- a/fs/btrfs/scrub.c +++ b/fs/btrfs/scrub.c | |||
| @@ -26,6 +26,7 @@ | |||
| 26 | #include "backref.h" | 26 | #include "backref.h" |
| 27 | #include "extent_io.h" | 27 | #include "extent_io.h" |
| 28 | #include "check-integrity.h" | 28 | #include "check-integrity.h" |
| 29 | #include "rcu-string.h" | ||
| 29 | 30 | ||
| 30 | /* | 31 | /* |
| 31 | * This is only the first step towards a full-features scrub. It reads all | 32 | * This is only the first step towards a full-features scrub. It reads all |
| @@ -320,10 +321,10 @@ static int scrub_print_warning_inode(u64 inum, u64 offset, u64 root, void *ctx) | |||
| 320 | * hold all of the paths here | 321 | * hold all of the paths here |
| 321 | */ | 322 | */ |
| 322 | for (i = 0; i < ipath->fspath->elem_cnt; ++i) | 323 | for (i = 0; i < ipath->fspath->elem_cnt; ++i) |
| 323 | printk(KERN_WARNING "btrfs: %s at logical %llu on dev " | 324 | printk_in_rcu(KERN_WARNING "btrfs: %s at logical %llu on dev " |
| 324 | "%s, sector %llu, root %llu, inode %llu, offset %llu, " | 325 | "%s, sector %llu, root %llu, inode %llu, offset %llu, " |
| 325 | "length %llu, links %u (path: %s)\n", swarn->errstr, | 326 | "length %llu, links %u (path: %s)\n", swarn->errstr, |
| 326 | swarn->logical, swarn->dev->name, | 327 | swarn->logical, rcu_str_deref(swarn->dev->name), |
| 327 | (unsigned long long)swarn->sector, root, inum, offset, | 328 | (unsigned long long)swarn->sector, root, inum, offset, |
| 328 | min(isize - offset, (u64)PAGE_SIZE), nlink, | 329 | min(isize - offset, (u64)PAGE_SIZE), nlink, |
| 329 | (char *)(unsigned long)ipath->fspath->val[i]); | 330 | (char *)(unsigned long)ipath->fspath->val[i]); |
| @@ -332,10 +333,10 @@ static int scrub_print_warning_inode(u64 inum, u64 offset, u64 root, void *ctx) | |||
| 332 | return 0; | 333 | return 0; |
| 333 | 334 | ||
| 334 | err: | 335 | err: |
| 335 | printk(KERN_WARNING "btrfs: %s at logical %llu on dev " | 336 | printk_in_rcu(KERN_WARNING "btrfs: %s at logical %llu on dev " |
| 336 | "%s, sector %llu, root %llu, inode %llu, offset %llu: path " | 337 | "%s, sector %llu, root %llu, inode %llu, offset %llu: path " |
| 337 | "resolving failed with ret=%d\n", swarn->errstr, | 338 | "resolving failed with ret=%d\n", swarn->errstr, |
| 338 | swarn->logical, swarn->dev->name, | 339 | swarn->logical, rcu_str_deref(swarn->dev->name), |
| 339 | (unsigned long long)swarn->sector, root, inum, offset, ret); | 340 | (unsigned long long)swarn->sector, root, inum, offset, ret); |
| 340 | 341 | ||
| 341 | free_ipath(ipath); | 342 | free_ipath(ipath); |
| @@ -390,10 +391,11 @@ static void scrub_print_warning(const char *errstr, struct scrub_block *sblock) | |||
| 390 | do { | 391 | do { |
| 391 | ret = tree_backref_for_extent(&ptr, eb, ei, item_size, | 392 | ret = tree_backref_for_extent(&ptr, eb, ei, item_size, |
| 392 | &ref_root, &ref_level); | 393 | &ref_root, &ref_level); |
| 393 | printk(KERN_WARNING | 394 | printk_in_rcu(KERN_WARNING |
| 394 | "btrfs: %s at logical %llu on dev %s, " | 395 | "btrfs: %s at logical %llu on dev %s, " |
| 395 | "sector %llu: metadata %s (level %d) in tree " | 396 | "sector %llu: metadata %s (level %d) in tree " |
| 396 | "%llu\n", errstr, swarn.logical, dev->name, | 397 | "%llu\n", errstr, swarn.logical, |
| 398 | rcu_str_deref(dev->name), | ||
| 397 | (unsigned long long)swarn.sector, | 399 | (unsigned long long)swarn.sector, |
| 398 | ref_level ? "node" : "leaf", | 400 | ref_level ? "node" : "leaf", |
| 399 | ret < 0 ? -1 : ref_level, | 401 | ret < 0 ? -1 : ref_level, |
| @@ -580,9 +582,11 @@ out: | |||
| 580 | spin_lock(&sdev->stat_lock); | 582 | spin_lock(&sdev->stat_lock); |
| 581 | ++sdev->stat.uncorrectable_errors; | 583 | ++sdev->stat.uncorrectable_errors; |
| 582 | spin_unlock(&sdev->stat_lock); | 584 | spin_unlock(&sdev->stat_lock); |
| 583 | printk_ratelimited(KERN_ERR | 585 | |
| 586 | printk_ratelimited_in_rcu(KERN_ERR | ||
| 584 | "btrfs: unable to fixup (nodatasum) error at logical %llu on dev %s\n", | 587 | "btrfs: unable to fixup (nodatasum) error at logical %llu on dev %s\n", |
| 585 | (unsigned long long)fixup->logical, sdev->dev->name); | 588 | (unsigned long long)fixup->logical, |
| 589 | rcu_str_deref(sdev->dev->name)); | ||
| 586 | } | 590 | } |
| 587 | 591 | ||
| 588 | btrfs_free_path(path); | 592 | btrfs_free_path(path); |
| @@ -936,18 +940,20 @@ corrected_error: | |||
| 936 | spin_lock(&sdev->stat_lock); | 940 | spin_lock(&sdev->stat_lock); |
| 937 | sdev->stat.corrected_errors++; | 941 | sdev->stat.corrected_errors++; |
| 938 | spin_unlock(&sdev->stat_lock); | 942 | spin_unlock(&sdev->stat_lock); |
| 939 | printk_ratelimited(KERN_ERR | 943 | printk_ratelimited_in_rcu(KERN_ERR |
| 940 | "btrfs: fixed up error at logical %llu on dev %s\n", | 944 | "btrfs: fixed up error at logical %llu on dev %s\n", |
| 941 | (unsigned long long)logical, sdev->dev->name); | 945 | (unsigned long long)logical, |
| 946 | rcu_str_deref(sdev->dev->name)); | ||
| 942 | } | 947 | } |
| 943 | } else { | 948 | } else { |
| 944 | did_not_correct_error: | 949 | did_not_correct_error: |
| 945 | spin_lock(&sdev->stat_lock); | 950 | spin_lock(&sdev->stat_lock); |
| 946 | sdev->stat.uncorrectable_errors++; | 951 | sdev->stat.uncorrectable_errors++; |
| 947 | spin_unlock(&sdev->stat_lock); | 952 | spin_unlock(&sdev->stat_lock); |
| 948 | printk_ratelimited(KERN_ERR | 953 | printk_ratelimited_in_rcu(KERN_ERR |
| 949 | "btrfs: unable to fixup (regular) error at logical %llu on dev %s\n", | 954 | "btrfs: unable to fixup (regular) error at logical %llu on dev %s\n", |
| 950 | (unsigned long long)logical, sdev->dev->name); | 955 | (unsigned long long)logical, |
| 956 | rcu_str_deref(sdev->dev->name)); | ||
| 951 | } | 957 | } |
| 952 | 958 | ||
| 953 | out: | 959 | out: |
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c index 96eb9fef7bd2..0eb9a4da069e 100644 --- a/fs/btrfs/super.c +++ b/fs/btrfs/super.c | |||
| @@ -54,6 +54,7 @@ | |||
| 54 | #include "version.h" | 54 | #include "version.h" |
| 55 | #include "export.h" | 55 | #include "export.h" |
| 56 | #include "compression.h" | 56 | #include "compression.h" |
| 57 | #include "rcu-string.h" | ||
| 57 | 58 | ||
| 58 | #define CREATE_TRACE_POINTS | 59 | #define CREATE_TRACE_POINTS |
| 59 | #include <trace/events/btrfs.h> | 60 | #include <trace/events/btrfs.h> |
| @@ -1482,12 +1483,44 @@ static void btrfs_fs_dirty_inode(struct inode *inode, int flags) | |||
| 1482 | "error %d\n", btrfs_ino(inode), ret); | 1483 | "error %d\n", btrfs_ino(inode), ret); |
| 1483 | } | 1484 | } |
| 1484 | 1485 | ||
| 1486 | static int btrfs_show_devname(struct seq_file *m, struct dentry *root) | ||
| 1487 | { | ||
| 1488 | struct btrfs_fs_info *fs_info = btrfs_sb(root->d_sb); | ||
| 1489 | struct btrfs_fs_devices *cur_devices; | ||
| 1490 | struct btrfs_device *dev, *first_dev = NULL; | ||
| 1491 | struct list_head *head; | ||
| 1492 | struct rcu_string *name; | ||
| 1493 | |||
| 1494 | mutex_lock(&fs_info->fs_devices->device_list_mutex); | ||
| 1495 | cur_devices = fs_info->fs_devices; | ||
| 1496 | while (cur_devices) { | ||
| 1497 | head = &cur_devices->devices; | ||
| 1498 | list_for_each_entry(dev, head, dev_list) { | ||
| 1499 | if (!first_dev || dev->devid < first_dev->devid) | ||
| 1500 | first_dev = dev; | ||
| 1501 | } | ||
| 1502 | cur_devices = cur_devices->seed; | ||
| 1503 | } | ||
| 1504 | |||
| 1505 | if (first_dev) { | ||
| 1506 | rcu_read_lock(); | ||
| 1507 | name = rcu_dereference(first_dev->name); | ||
| 1508 | seq_escape(m, name->str, " \t\n\\"); | ||
| 1509 | rcu_read_unlock(); | ||
| 1510 | } else { | ||
| 1511 | WARN_ON(1); | ||
| 1512 | } | ||
| 1513 | mutex_unlock(&fs_info->fs_devices->device_list_mutex); | ||
| 1514 | return 0; | ||
| 1515 | } | ||
| 1516 | |||
| 1485 | static const struct super_operations btrfs_super_ops = { | 1517 | static const struct super_operations btrfs_super_ops = { |
| 1486 | .drop_inode = btrfs_drop_inode, | 1518 | .drop_inode = btrfs_drop_inode, |
| 1487 | .evict_inode = btrfs_evict_inode, | 1519 | .evict_inode = btrfs_evict_inode, |
| 1488 | .put_super = btrfs_put_super, | 1520 | .put_super = btrfs_put_super, |
| 1489 | .sync_fs = btrfs_sync_fs, | 1521 | .sync_fs = btrfs_sync_fs, |
| 1490 | .show_options = btrfs_show_options, | 1522 | .show_options = btrfs_show_options, |
| 1523 | .show_devname = btrfs_show_devname, | ||
| 1491 | .write_inode = btrfs_write_inode, | 1524 | .write_inode = btrfs_write_inode, |
| 1492 | .dirty_inode = btrfs_fs_dirty_inode, | 1525 | .dirty_inode = btrfs_fs_dirty_inode, |
| 1493 | .alloc_inode = btrfs_alloc_inode, | 1526 | .alloc_inode = btrfs_alloc_inode, |
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c index 1791c6e3d834..b72b068183ec 100644 --- a/fs/btrfs/transaction.c +++ b/fs/btrfs/transaction.c | |||
| @@ -100,6 +100,10 @@ loop: | |||
| 100 | kmem_cache_free(btrfs_transaction_cachep, cur_trans); | 100 | kmem_cache_free(btrfs_transaction_cachep, cur_trans); |
| 101 | cur_trans = fs_info->running_transaction; | 101 | cur_trans = fs_info->running_transaction; |
| 102 | goto loop; | 102 | goto loop; |
| 103 | } else if (root->fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR) { | ||
| 104 | spin_unlock(&root->fs_info->trans_lock); | ||
| 105 | kmem_cache_free(btrfs_transaction_cachep, cur_trans); | ||
| 106 | return -EROFS; | ||
| 103 | } | 107 | } |
| 104 | 108 | ||
| 105 | atomic_set(&cur_trans->num_writers, 1); | 109 | atomic_set(&cur_trans->num_writers, 1); |
| @@ -1213,14 +1217,20 @@ int btrfs_commit_transaction_async(struct btrfs_trans_handle *trans, | |||
| 1213 | 1217 | ||
| 1214 | 1218 | ||
| 1215 | static void cleanup_transaction(struct btrfs_trans_handle *trans, | 1219 | static void cleanup_transaction(struct btrfs_trans_handle *trans, |
| 1216 | struct btrfs_root *root) | 1220 | struct btrfs_root *root, int err) |
| 1217 | { | 1221 | { |
| 1218 | struct btrfs_transaction *cur_trans = trans->transaction; | 1222 | struct btrfs_transaction *cur_trans = trans->transaction; |
| 1219 | 1223 | ||
| 1220 | WARN_ON(trans->use_count > 1); | 1224 | WARN_ON(trans->use_count > 1); |
| 1221 | 1225 | ||
| 1226 | btrfs_abort_transaction(trans, root, err); | ||
| 1227 | |||
| 1222 | spin_lock(&root->fs_info->trans_lock); | 1228 | spin_lock(&root->fs_info->trans_lock); |
| 1223 | list_del_init(&cur_trans->list); | 1229 | list_del_init(&cur_trans->list); |
| 1230 | if (cur_trans == root->fs_info->running_transaction) { | ||
| 1231 | root->fs_info->running_transaction = NULL; | ||
| 1232 | root->fs_info->trans_no_join = 0; | ||
| 1233 | } | ||
| 1224 | spin_unlock(&root->fs_info->trans_lock); | 1234 | spin_unlock(&root->fs_info->trans_lock); |
| 1225 | 1235 | ||
| 1226 | btrfs_cleanup_one_transaction(trans->transaction, root); | 1236 | btrfs_cleanup_one_transaction(trans->transaction, root); |
| @@ -1526,7 +1536,7 @@ cleanup_transaction: | |||
| 1526 | // WARN_ON(1); | 1536 | // WARN_ON(1); |
| 1527 | if (current->journal_info == trans) | 1537 | if (current->journal_info == trans) |
| 1528 | current->journal_info = NULL; | 1538 | current->journal_info = NULL; |
| 1529 | cleanup_transaction(trans, root); | 1539 | cleanup_transaction(trans, root, ret); |
| 1530 | 1540 | ||
| 1531 | return ret; | 1541 | return ret; |
| 1532 | } | 1542 | } |
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index 7782020996fe..8a3d2594b807 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c | |||
| @@ -35,6 +35,7 @@ | |||
| 35 | #include "volumes.h" | 35 | #include "volumes.h" |
| 36 | #include "async-thread.h" | 36 | #include "async-thread.h" |
| 37 | #include "check-integrity.h" | 37 | #include "check-integrity.h" |
| 38 | #include "rcu-string.h" | ||
| 38 | 39 | ||
| 39 | static int init_first_rw_device(struct btrfs_trans_handle *trans, | 40 | static int init_first_rw_device(struct btrfs_trans_handle *trans, |
| 40 | struct btrfs_root *root, | 41 | struct btrfs_root *root, |
| @@ -64,7 +65,7 @@ static void free_fs_devices(struct btrfs_fs_devices *fs_devices) | |||
| 64 | device = list_entry(fs_devices->devices.next, | 65 | device = list_entry(fs_devices->devices.next, |
| 65 | struct btrfs_device, dev_list); | 66 | struct btrfs_device, dev_list); |
| 66 | list_del(&device->dev_list); | 67 | list_del(&device->dev_list); |
| 67 | kfree(device->name); | 68 | rcu_string_free(device->name); |
| 68 | kfree(device); | 69 | kfree(device); |
| 69 | } | 70 | } |
| 70 | kfree(fs_devices); | 71 | kfree(fs_devices); |
| @@ -334,8 +335,8 @@ static noinline int device_list_add(const char *path, | |||
| 334 | { | 335 | { |
| 335 | struct btrfs_device *device; | 336 | struct btrfs_device *device; |
| 336 | struct btrfs_fs_devices *fs_devices; | 337 | struct btrfs_fs_devices *fs_devices; |
| 338 | struct rcu_string *name; | ||
| 337 | u64 found_transid = btrfs_super_generation(disk_super); | 339 | u64 found_transid = btrfs_super_generation(disk_super); |
| 338 | char *name; | ||
| 339 | 340 | ||
| 340 | fs_devices = find_fsid(disk_super->fsid); | 341 | fs_devices = find_fsid(disk_super->fsid); |
| 341 | if (!fs_devices) { | 342 | if (!fs_devices) { |
| @@ -369,11 +370,13 @@ static noinline int device_list_add(const char *path, | |||
| 369 | memcpy(device->uuid, disk_super->dev_item.uuid, | 370 | memcpy(device->uuid, disk_super->dev_item.uuid, |
| 370 | BTRFS_UUID_SIZE); | 371 | BTRFS_UUID_SIZE); |
| 371 | spin_lock_init(&device->io_lock); | 372 | spin_lock_init(&device->io_lock); |
| 372 | device->name = kstrdup(path, GFP_NOFS); | 373 | |
| 373 | if (!device->name) { | 374 | name = rcu_string_strdup(path, GFP_NOFS); |
| 375 | if (!name) { | ||
| 374 | kfree(device); | 376 | kfree(device); |
| 375 | return -ENOMEM; | 377 | return -ENOMEM; |
| 376 | } | 378 | } |
| 379 | rcu_assign_pointer(device->name, name); | ||
| 377 | INIT_LIST_HEAD(&device->dev_alloc_list); | 380 | INIT_LIST_HEAD(&device->dev_alloc_list); |
| 378 | 381 | ||
| 379 | /* init readahead state */ | 382 | /* init readahead state */ |
| @@ -390,12 +393,12 @@ static noinline int device_list_add(const char *path, | |||
| 390 | 393 | ||
| 391 | device->fs_devices = fs_devices; | 394 | device->fs_devices = fs_devices; |
| 392 | fs_devices->num_devices++; | 395 | fs_devices->num_devices++; |
| 393 | } else if (!device->name || strcmp(device->name, path)) { | 396 | } else if (!device->name || strcmp(device->name->str, path)) { |
| 394 | name = kstrdup(path, GFP_NOFS); | 397 | name = rcu_string_strdup(path, GFP_NOFS); |
| 395 | if (!name) | 398 | if (!name) |
| 396 | return -ENOMEM; | 399 | return -ENOMEM; |
| 397 | kfree(device->name); | 400 | rcu_string_free(device->name); |
| 398 | device->name = name; | 401 | rcu_assign_pointer(device->name, name); |
| 399 | if (device->missing) { | 402 | if (device->missing) { |
| 400 | fs_devices->missing_devices--; | 403 | fs_devices->missing_devices--; |
| 401 | device->missing = 0; | 404 | device->missing = 0; |
| @@ -430,15 +433,22 @@ static struct btrfs_fs_devices *clone_fs_devices(struct btrfs_fs_devices *orig) | |||
| 430 | 433 | ||
| 431 | /* We have held the volume lock, it is safe to get the devices. */ | 434 | /* We have held the volume lock, it is safe to get the devices. */ |
| 432 | list_for_each_entry(orig_dev, &orig->devices, dev_list) { | 435 | list_for_each_entry(orig_dev, &orig->devices, dev_list) { |
| 436 | struct rcu_string *name; | ||
| 437 | |||
| 433 | device = kzalloc(sizeof(*device), GFP_NOFS); | 438 | device = kzalloc(sizeof(*device), GFP_NOFS); |
| 434 | if (!device) | 439 | if (!device) |
| 435 | goto error; | 440 | goto error; |
| 436 | 441 | ||
| 437 | device->name = kstrdup(orig_dev->name, GFP_NOFS); | 442 | /* |
| 438 | if (!device->name) { | 443 | * This is ok to do without rcu read locked because we hold the |
| 444 | * uuid mutex so nothing we touch in here is going to disappear. | ||
| 445 | */ | ||
| 446 | name = rcu_string_strdup(orig_dev->name->str, GFP_NOFS); | ||
| 447 | if (!name) { | ||
| 439 | kfree(device); | 448 | kfree(device); |
| 440 | goto error; | 449 | goto error; |
| 441 | } | 450 | } |
| 451 | rcu_assign_pointer(device->name, name); | ||
| 442 | 452 | ||
| 443 | device->devid = orig_dev->devid; | 453 | device->devid = orig_dev->devid; |
| 444 | device->work.func = pending_bios_fn; | 454 | device->work.func = pending_bios_fn; |
| @@ -491,7 +501,7 @@ again: | |||
| 491 | } | 501 | } |
| 492 | list_del_init(&device->dev_list); | 502 | list_del_init(&device->dev_list); |
| 493 | fs_devices->num_devices--; | 503 | fs_devices->num_devices--; |
| 494 | kfree(device->name); | 504 | rcu_string_free(device->name); |
| 495 | kfree(device); | 505 | kfree(device); |
| 496 | } | 506 | } |
| 497 | 507 | ||
| @@ -516,7 +526,7 @@ static void __free_device(struct work_struct *work) | |||
| 516 | if (device->bdev) | 526 | if (device->bdev) |
| 517 | blkdev_put(device->bdev, device->mode); | 527 | blkdev_put(device->bdev, device->mode); |
| 518 | 528 | ||
| 519 | kfree(device->name); | 529 | rcu_string_free(device->name); |
| 520 | kfree(device); | 530 | kfree(device); |
| 521 | } | 531 | } |
| 522 | 532 | ||
| @@ -540,6 +550,7 @@ static int __btrfs_close_devices(struct btrfs_fs_devices *fs_devices) | |||
| 540 | mutex_lock(&fs_devices->device_list_mutex); | 550 | mutex_lock(&fs_devices->device_list_mutex); |
| 541 | list_for_each_entry(device, &fs_devices->devices, dev_list) { | 551 | list_for_each_entry(device, &fs_devices->devices, dev_list) { |
| 542 | struct btrfs_device *new_device; | 552 | struct btrfs_device *new_device; |
| 553 | struct rcu_string *name; | ||
| 543 | 554 | ||
| 544 | if (device->bdev) | 555 | if (device->bdev) |
| 545 | fs_devices->open_devices--; | 556 | fs_devices->open_devices--; |
| @@ -555,8 +566,11 @@ static int __btrfs_close_devices(struct btrfs_fs_devices *fs_devices) | |||
| 555 | new_device = kmalloc(sizeof(*new_device), GFP_NOFS); | 566 | new_device = kmalloc(sizeof(*new_device), GFP_NOFS); |
| 556 | BUG_ON(!new_device); /* -ENOMEM */ | 567 | BUG_ON(!new_device); /* -ENOMEM */ |
| 557 | memcpy(new_device, device, sizeof(*new_device)); | 568 | memcpy(new_device, device, sizeof(*new_device)); |
| 558 | new_device->name = kstrdup(device->name, GFP_NOFS); | 569 | |
| 559 | BUG_ON(device->name && !new_device->name); /* -ENOMEM */ | 570 | /* Safe because we are under uuid_mutex */ |
| 571 | name = rcu_string_strdup(device->name->str, GFP_NOFS); | ||
| 572 | BUG_ON(device->name && !name); /* -ENOMEM */ | ||
| 573 | rcu_assign_pointer(new_device->name, name); | ||
| 560 | new_device->bdev = NULL; | 574 | new_device->bdev = NULL; |
| 561 | new_device->writeable = 0; | 575 | new_device->writeable = 0; |
| 562 | new_device->in_fs_metadata = 0; | 576 | new_device->in_fs_metadata = 0; |
| @@ -621,9 +635,9 @@ static int __btrfs_open_devices(struct btrfs_fs_devices *fs_devices, | |||
| 621 | if (!device->name) | 635 | if (!device->name) |
| 622 | continue; | 636 | continue; |
| 623 | 637 | ||
| 624 | bdev = blkdev_get_by_path(device->name, flags, holder); | 638 | bdev = blkdev_get_by_path(device->name->str, flags, holder); |
| 625 | if (IS_ERR(bdev)) { | 639 | if (IS_ERR(bdev)) { |
| 626 | printk(KERN_INFO "open %s failed\n", device->name); | 640 | printk(KERN_INFO "open %s failed\n", device->name->str); |
| 627 | goto error; | 641 | goto error; |
| 628 | } | 642 | } |
| 629 | filemap_write_and_wait(bdev->bd_inode->i_mapping); | 643 | filemap_write_and_wait(bdev->bd_inode->i_mapping); |
| @@ -1632,6 +1646,7 @@ int btrfs_init_new_device(struct btrfs_root *root, char *device_path) | |||
| 1632 | struct block_device *bdev; | 1646 | struct block_device *bdev; |
| 1633 | struct list_head *devices; | 1647 | struct list_head *devices; |
| 1634 | struct super_block *sb = root->fs_info->sb; | 1648 | struct super_block *sb = root->fs_info->sb; |
| 1649 | struct rcu_string *name; | ||
| 1635 | u64 total_bytes; | 1650 | u64 total_bytes; |
| 1636 | int seeding_dev = 0; | 1651 | int seeding_dev = 0; |
| 1637 | int ret = 0; | 1652 | int ret = 0; |
| @@ -1671,23 +1686,24 @@ int btrfs_init_new_device(struct btrfs_root *root, char *device_path) | |||
| 1671 | goto error; | 1686 | goto error; |
| 1672 | } | 1687 | } |
| 1673 | 1688 | ||
| 1674 | device->name = kstrdup(device_path, GFP_NOFS); | 1689 | name = rcu_string_strdup(device_path, GFP_NOFS); |
| 1675 | if (!device->name) { | 1690 | if (!name) { |
| 1676 | kfree(device); | 1691 | kfree(device); |
| 1677 | ret = -ENOMEM; | 1692 | ret = -ENOMEM; |
| 1678 | goto error; | 1693 | goto error; |
| 1679 | } | 1694 | } |
| 1695 | rcu_assign_pointer(device->name, name); | ||
| 1680 | 1696 | ||
| 1681 | ret = find_next_devid(root, &device->devid); | 1697 | ret = find_next_devid(root, &device->devid); |
| 1682 | if (ret) { | 1698 | if (ret) { |
| 1683 | kfree(device->name); | 1699 | rcu_string_free(device->name); |
| 1684 | kfree(device); | 1700 | kfree(device); |
| 1685 | goto error; | 1701 | goto error; |
| 1686 | } | 1702 | } |
| 1687 | 1703 | ||
| 1688 | trans = btrfs_start_transaction(root, 0); | 1704 | trans = btrfs_start_transaction(root, 0); |
| 1689 | if (IS_ERR(trans)) { | 1705 | if (IS_ERR(trans)) { |
| 1690 | kfree(device->name); | 1706 | rcu_string_free(device->name); |
| 1691 | kfree(device); | 1707 | kfree(device); |
| 1692 | ret = PTR_ERR(trans); | 1708 | ret = PTR_ERR(trans); |
| 1693 | goto error; | 1709 | goto error; |
| @@ -1796,7 +1812,7 @@ error_trans: | |||
| 1796 | unlock_chunks(root); | 1812 | unlock_chunks(root); |
| 1797 | btrfs_abort_transaction(trans, root, ret); | 1813 | btrfs_abort_transaction(trans, root, ret); |
| 1798 | btrfs_end_transaction(trans, root); | 1814 | btrfs_end_transaction(trans, root); |
| 1799 | kfree(device->name); | 1815 | rcu_string_free(device->name); |
| 1800 | kfree(device); | 1816 | kfree(device); |
| 1801 | error: | 1817 | error: |
| 1802 | blkdev_put(bdev, FMODE_EXCL); | 1818 | blkdev_put(bdev, FMODE_EXCL); |
| @@ -4204,10 +4220,17 @@ int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio, | |||
| 4204 | bio->bi_sector = bbio->stripes[dev_nr].physical >> 9; | 4220 | bio->bi_sector = bbio->stripes[dev_nr].physical >> 9; |
| 4205 | dev = bbio->stripes[dev_nr].dev; | 4221 | dev = bbio->stripes[dev_nr].dev; |
| 4206 | if (dev && dev->bdev && (rw != WRITE || dev->writeable)) { | 4222 | if (dev && dev->bdev && (rw != WRITE || dev->writeable)) { |
| 4223 | #ifdef DEBUG | ||
| 4224 | struct rcu_string *name; | ||
| 4225 | |||
| 4226 | rcu_read_lock(); | ||
| 4227 | name = rcu_dereference(dev->name); | ||
| 4207 | pr_debug("btrfs_map_bio: rw %d, secor=%llu, dev=%lu " | 4228 | pr_debug("btrfs_map_bio: rw %d, secor=%llu, dev=%lu " |
| 4208 | "(%s id %llu), size=%u\n", rw, | 4229 | "(%s id %llu), size=%u\n", rw, |
| 4209 | (u64)bio->bi_sector, (u_long)dev->bdev->bd_dev, | 4230 | (u64)bio->bi_sector, (u_long)dev->bdev->bd_dev, |
| 4210 | dev->name, dev->devid, bio->bi_size); | 4231 | name->str, dev->devid, bio->bi_size); |
| 4232 | rcu_read_unlock(); | ||
| 4233 | #endif | ||
| 4211 | bio->bi_bdev = dev->bdev; | 4234 | bio->bi_bdev = dev->bdev; |
| 4212 | if (async_submit) | 4235 | if (async_submit) |
| 4213 | schedule_bio(root, dev, rw, bio); | 4236 | schedule_bio(root, dev, rw, bio); |
| @@ -4694,8 +4717,9 @@ int btrfs_init_dev_stats(struct btrfs_fs_info *fs_info) | |||
| 4694 | key.offset = device->devid; | 4717 | key.offset = device->devid; |
| 4695 | ret = btrfs_search_slot(NULL, dev_root, &key, path, 0, 0); | 4718 | ret = btrfs_search_slot(NULL, dev_root, &key, path, 0, 0); |
| 4696 | if (ret) { | 4719 | if (ret) { |
| 4697 | printk(KERN_WARNING "btrfs: no dev_stats entry found for device %s (devid %llu) (OK on first mount after mkfs)\n", | 4720 | printk_in_rcu(KERN_WARNING "btrfs: no dev_stats entry found for device %s (devid %llu) (OK on first mount after mkfs)\n", |
| 4698 | device->name, (unsigned long long)device->devid); | 4721 | rcu_str_deref(device->name), |
| 4722 | (unsigned long long)device->devid); | ||
| 4699 | __btrfs_reset_dev_stats(device); | 4723 | __btrfs_reset_dev_stats(device); |
| 4700 | device->dev_stats_valid = 1; | 4724 | device->dev_stats_valid = 1; |
| 4701 | btrfs_release_path(path); | 4725 | btrfs_release_path(path); |
| @@ -4747,8 +4771,8 @@ static int update_dev_stat_item(struct btrfs_trans_handle *trans, | |||
| 4747 | BUG_ON(!path); | 4771 | BUG_ON(!path); |
| 4748 | ret = btrfs_search_slot(trans, dev_root, &key, path, -1, 1); | 4772 | ret = btrfs_search_slot(trans, dev_root, &key, path, -1, 1); |
| 4749 | if (ret < 0) { | 4773 | if (ret < 0) { |
| 4750 | printk(KERN_WARNING "btrfs: error %d while searching for dev_stats item for device %s!\n", | 4774 | printk_in_rcu(KERN_WARNING "btrfs: error %d while searching for dev_stats item for device %s!\n", |
| 4751 | ret, device->name); | 4775 | ret, rcu_str_deref(device->name)); |
| 4752 | goto out; | 4776 | goto out; |
| 4753 | } | 4777 | } |
| 4754 | 4778 | ||
| @@ -4757,8 +4781,8 @@ static int update_dev_stat_item(struct btrfs_trans_handle *trans, | |||
| 4757 | /* need to delete old one and insert a new one */ | 4781 | /* need to delete old one and insert a new one */ |
| 4758 | ret = btrfs_del_item(trans, dev_root, path); | 4782 | ret = btrfs_del_item(trans, dev_root, path); |
| 4759 | if (ret != 0) { | 4783 | if (ret != 0) { |
| 4760 | printk(KERN_WARNING "btrfs: delete too small dev_stats item for device %s failed %d!\n", | 4784 | printk_in_rcu(KERN_WARNING "btrfs: delete too small dev_stats item for device %s failed %d!\n", |
| 4761 | device->name, ret); | 4785 | rcu_str_deref(device->name), ret); |
| 4762 | goto out; | 4786 | goto out; |
| 4763 | } | 4787 | } |
| 4764 | ret = 1; | 4788 | ret = 1; |
| @@ -4770,8 +4794,8 @@ static int update_dev_stat_item(struct btrfs_trans_handle *trans, | |||
| 4770 | ret = btrfs_insert_empty_item(trans, dev_root, path, | 4794 | ret = btrfs_insert_empty_item(trans, dev_root, path, |
| 4771 | &key, sizeof(*ptr)); | 4795 | &key, sizeof(*ptr)); |
| 4772 | if (ret < 0) { | 4796 | if (ret < 0) { |
| 4773 | printk(KERN_WARNING "btrfs: insert dev_stats item for device %s failed %d!\n", | 4797 | printk_in_rcu(KERN_WARNING "btrfs: insert dev_stats item for device %s failed %d!\n", |
| 4774 | device->name, ret); | 4798 | rcu_str_deref(device->name), ret); |
| 4775 | goto out; | 4799 | goto out; |
| 4776 | } | 4800 | } |
| 4777 | } | 4801 | } |
| @@ -4823,9 +4847,9 @@ void btrfs_dev_stat_print_on_error(struct btrfs_device *dev) | |||
| 4823 | { | 4847 | { |
| 4824 | if (!dev->dev_stats_valid) | 4848 | if (!dev->dev_stats_valid) |
| 4825 | return; | 4849 | return; |
| 4826 | printk_ratelimited(KERN_ERR | 4850 | printk_ratelimited_in_rcu(KERN_ERR |
| 4827 | "btrfs: bdev %s errs: wr %u, rd %u, flush %u, corrupt %u, gen %u\n", | 4851 | "btrfs: bdev %s errs: wr %u, rd %u, flush %u, corrupt %u, gen %u\n", |
| 4828 | dev->name, | 4852 | rcu_str_deref(dev->name), |
| 4829 | btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_WRITE_ERRS), | 4853 | btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_WRITE_ERRS), |
| 4830 | btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_READ_ERRS), | 4854 | btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_READ_ERRS), |
| 4831 | btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_FLUSH_ERRS), | 4855 | btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_FLUSH_ERRS), |
| @@ -4837,8 +4861,8 @@ void btrfs_dev_stat_print_on_error(struct btrfs_device *dev) | |||
| 4837 | 4861 | ||
| 4838 | static void btrfs_dev_stat_print_on_load(struct btrfs_device *dev) | 4862 | static void btrfs_dev_stat_print_on_load(struct btrfs_device *dev) |
| 4839 | { | 4863 | { |
| 4840 | printk(KERN_INFO "btrfs: bdev %s errs: wr %u, rd %u, flush %u, corrupt %u, gen %u\n", | 4864 | printk_in_rcu(KERN_INFO "btrfs: bdev %s errs: wr %u, rd %u, flush %u, corrupt %u, gen %u\n", |
| 4841 | dev->name, | 4865 | rcu_str_deref(dev->name), |
| 4842 | btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_WRITE_ERRS), | 4866 | btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_WRITE_ERRS), |
| 4843 | btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_READ_ERRS), | 4867 | btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_READ_ERRS), |
| 4844 | btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_FLUSH_ERRS), | 4868 | btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_FLUSH_ERRS), |
diff --git a/fs/btrfs/volumes.h b/fs/btrfs/volumes.h index 3406a88ca83e..74366f27a76b 100644 --- a/fs/btrfs/volumes.h +++ b/fs/btrfs/volumes.h | |||
| @@ -58,7 +58,7 @@ struct btrfs_device { | |||
| 58 | /* the mode sent to blkdev_get */ | 58 | /* the mode sent to blkdev_get */ |
| 59 | fmode_t mode; | 59 | fmode_t mode; |
| 60 | 60 | ||
| 61 | char *name; | 61 | struct rcu_string *name; |
| 62 | 62 | ||
| 63 | /* the internal btrfs device id */ | 63 | /* the internal btrfs device id */ |
| 64 | u64 devid; | 64 | u64 devid; |
diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h index 20350a93ed99..6df0cbe1cbc9 100644 --- a/fs/cifs/cifsglob.h +++ b/fs/cifs/cifsglob.h | |||
| @@ -174,6 +174,7 @@ struct smb_version_operations { | |||
| 174 | void (*add_credits)(struct TCP_Server_Info *, const unsigned int); | 174 | void (*add_credits)(struct TCP_Server_Info *, const unsigned int); |
| 175 | void (*set_credits)(struct TCP_Server_Info *, const int); | 175 | void (*set_credits)(struct TCP_Server_Info *, const int); |
| 176 | int * (*get_credits_field)(struct TCP_Server_Info *); | 176 | int * (*get_credits_field)(struct TCP_Server_Info *); |
| 177 | __u64 (*get_next_mid)(struct TCP_Server_Info *); | ||
| 177 | /* data offset from read response message */ | 178 | /* data offset from read response message */ |
| 178 | unsigned int (*read_data_offset)(char *); | 179 | unsigned int (*read_data_offset)(char *); |
| 179 | /* data length from read response message */ | 180 | /* data length from read response message */ |
| @@ -399,6 +400,12 @@ set_credits(struct TCP_Server_Info *server, const int val) | |||
| 399 | server->ops->set_credits(server, val); | 400 | server->ops->set_credits(server, val); |
| 400 | } | 401 | } |
| 401 | 402 | ||
| 403 | static inline __u64 | ||
| 404 | get_next_mid(struct TCP_Server_Info *server) | ||
| 405 | { | ||
| 406 | return server->ops->get_next_mid(server); | ||
| 407 | } | ||
| 408 | |||
| 402 | /* | 409 | /* |
| 403 | * Macros to allow the TCP_Server_Info->net field and related code to drop out | 410 | * Macros to allow the TCP_Server_Info->net field and related code to drop out |
| 404 | * when CONFIG_NET_NS isn't set. | 411 | * when CONFIG_NET_NS isn't set. |
diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h index 5ec21ecf7980..0a6cbfe2761e 100644 --- a/fs/cifs/cifsproto.h +++ b/fs/cifs/cifsproto.h | |||
| @@ -114,7 +114,6 @@ extern int small_smb_init_no_tc(const int smb_cmd, const int wct, | |||
| 114 | void **request_buf); | 114 | void **request_buf); |
| 115 | extern int CIFS_SessSetup(unsigned int xid, struct cifs_ses *ses, | 115 | extern int CIFS_SessSetup(unsigned int xid, struct cifs_ses *ses, |
| 116 | const struct nls_table *nls_cp); | 116 | const struct nls_table *nls_cp); |
| 117 | extern __u64 GetNextMid(struct TCP_Server_Info *server); | ||
| 118 | extern struct timespec cifs_NTtimeToUnix(__le64 utc_nanoseconds_since_1601); | 117 | extern struct timespec cifs_NTtimeToUnix(__le64 utc_nanoseconds_since_1601); |
| 119 | extern u64 cifs_UnixTimeToNT(struct timespec); | 118 | extern u64 cifs_UnixTimeToNT(struct timespec); |
| 120 | extern struct timespec cnvrtDosUnixTm(__le16 le_date, __le16 le_time, | 119 | extern struct timespec cnvrtDosUnixTm(__le16 le_date, __le16 le_time, |
diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c index b5ad716b2642..5b400730c213 100644 --- a/fs/cifs/cifssmb.c +++ b/fs/cifs/cifssmb.c | |||
| @@ -268,7 +268,7 @@ small_smb_init_no_tc(const int smb_command, const int wct, | |||
| 268 | return rc; | 268 | return rc; |
| 269 | 269 | ||
| 270 | buffer = (struct smb_hdr *)*request_buf; | 270 | buffer = (struct smb_hdr *)*request_buf; |
| 271 | buffer->Mid = GetNextMid(ses->server); | 271 | buffer->Mid = get_next_mid(ses->server); |
| 272 | if (ses->capabilities & CAP_UNICODE) | 272 | if (ses->capabilities & CAP_UNICODE) |
| 273 | buffer->Flags2 |= SMBFLG2_UNICODE; | 273 | buffer->Flags2 |= SMBFLG2_UNICODE; |
| 274 | if (ses->capabilities & CAP_STATUS32) | 274 | if (ses->capabilities & CAP_STATUS32) |
| @@ -402,7 +402,7 @@ CIFSSMBNegotiate(unsigned int xid, struct cifs_ses *ses) | |||
| 402 | 402 | ||
| 403 | cFYI(1, "secFlags 0x%x", secFlags); | 403 | cFYI(1, "secFlags 0x%x", secFlags); |
| 404 | 404 | ||
| 405 | pSMB->hdr.Mid = GetNextMid(server); | 405 | pSMB->hdr.Mid = get_next_mid(server); |
| 406 | pSMB->hdr.Flags2 |= (SMBFLG2_UNICODE | SMBFLG2_ERR_STATUS); | 406 | pSMB->hdr.Flags2 |= (SMBFLG2_UNICODE | SMBFLG2_ERR_STATUS); |
| 407 | 407 | ||
| 408 | if ((secFlags & CIFSSEC_MUST_KRB5) == CIFSSEC_MUST_KRB5) | 408 | if ((secFlags & CIFSSEC_MUST_KRB5) == CIFSSEC_MUST_KRB5) |
| @@ -782,7 +782,7 @@ CIFSSMBLogoff(const int xid, struct cifs_ses *ses) | |||
| 782 | return rc; | 782 | return rc; |
| 783 | } | 783 | } |
| 784 | 784 | ||
| 785 | pSMB->hdr.Mid = GetNextMid(ses->server); | 785 | pSMB->hdr.Mid = get_next_mid(ses->server); |
| 786 | 786 | ||
| 787 | if (ses->server->sec_mode & | 787 | if (ses->server->sec_mode & |
| 788 | (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED)) | 788 | (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED)) |
| @@ -4762,7 +4762,7 @@ getDFSRetry: | |||
| 4762 | 4762 | ||
| 4763 | /* server pointer checked in called function, | 4763 | /* server pointer checked in called function, |
| 4764 | but should never be null here anyway */ | 4764 | but should never be null here anyway */ |
| 4765 | pSMB->hdr.Mid = GetNextMid(ses->server); | 4765 | pSMB->hdr.Mid = get_next_mid(ses->server); |
| 4766 | pSMB->hdr.Tid = ses->ipc_tid; | 4766 | pSMB->hdr.Tid = ses->ipc_tid; |
| 4767 | pSMB->hdr.Uid = ses->Suid; | 4767 | pSMB->hdr.Uid = ses->Suid; |
| 4768 | if (ses->capabilities & CAP_STATUS32) | 4768 | if (ses->capabilities & CAP_STATUS32) |
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c index ccafdedd0dbc..78db68a5cf44 100644 --- a/fs/cifs/connect.c +++ b/fs/cifs/connect.c | |||
| @@ -1058,13 +1058,15 @@ cifs_demultiplex_thread(void *p) | |||
| 1058 | if (mid_entry != NULL) { | 1058 | if (mid_entry != NULL) { |
| 1059 | if (!mid_entry->multiRsp || mid_entry->multiEnd) | 1059 | if (!mid_entry->multiRsp || mid_entry->multiEnd) |
| 1060 | mid_entry->callback(mid_entry); | 1060 | mid_entry->callback(mid_entry); |
| 1061 | } else if (!server->ops->is_oplock_break(buf, server)) { | 1061 | } else if (!server->ops->is_oplock_break || |
| 1062 | !server->ops->is_oplock_break(buf, server)) { | ||
| 1062 | cERROR(1, "No task to wake, unknown frame received! " | 1063 | cERROR(1, "No task to wake, unknown frame received! " |
| 1063 | "NumMids %d", atomic_read(&midCount)); | 1064 | "NumMids %d", atomic_read(&midCount)); |
| 1064 | cifs_dump_mem("Received Data is: ", buf, | 1065 | cifs_dump_mem("Received Data is: ", buf, |
| 1065 | HEADER_SIZE(server)); | 1066 | HEADER_SIZE(server)); |
| 1066 | #ifdef CONFIG_CIFS_DEBUG2 | 1067 | #ifdef CONFIG_CIFS_DEBUG2 |
| 1067 | server->ops->dump_detail(buf); | 1068 | if (server->ops->dump_detail) |
| 1069 | server->ops->dump_detail(buf); | ||
| 1068 | cifs_dump_mids(server); | 1070 | cifs_dump_mids(server); |
| 1069 | #endif /* CIFS_DEBUG2 */ | 1071 | #endif /* CIFS_DEBUG2 */ |
| 1070 | 1072 | ||
| @@ -3938,7 +3940,7 @@ CIFSTCon(unsigned int xid, struct cifs_ses *ses, | |||
| 3938 | header_assemble(smb_buffer, SMB_COM_TREE_CONNECT_ANDX, | 3940 | header_assemble(smb_buffer, SMB_COM_TREE_CONNECT_ANDX, |
| 3939 | NULL /*no tid */ , 4 /*wct */ ); | 3941 | NULL /*no tid */ , 4 /*wct */ ); |
| 3940 | 3942 | ||
| 3941 | smb_buffer->Mid = GetNextMid(ses->server); | 3943 | smb_buffer->Mid = get_next_mid(ses->server); |
| 3942 | smb_buffer->Uid = ses->Suid; | 3944 | smb_buffer->Uid = ses->Suid; |
| 3943 | pSMB = (TCONX_REQ *) smb_buffer; | 3945 | pSMB = (TCONX_REQ *) smb_buffer; |
| 3944 | pSMBr = (TCONX_RSP *) smb_buffer_response; | 3946 | pSMBr = (TCONX_RSP *) smb_buffer_response; |
diff --git a/fs/cifs/file.c b/fs/cifs/file.c index 253170dfa716..513adbc211d7 100644 --- a/fs/cifs/file.c +++ b/fs/cifs/file.c | |||
| @@ -876,7 +876,7 @@ cifs_push_mandatory_locks(struct cifsFileInfo *cfile) | |||
| 876 | struct cifsLockInfo *li, *tmp; | 876 | struct cifsLockInfo *li, *tmp; |
| 877 | struct cifs_tcon *tcon; | 877 | struct cifs_tcon *tcon; |
| 878 | struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode); | 878 | struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode); |
| 879 | unsigned int num, max_num; | 879 | unsigned int num, max_num, max_buf; |
| 880 | LOCKING_ANDX_RANGE *buf, *cur; | 880 | LOCKING_ANDX_RANGE *buf, *cur; |
| 881 | int types[] = {LOCKING_ANDX_LARGE_FILES, | 881 | int types[] = {LOCKING_ANDX_LARGE_FILES, |
| 882 | LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES}; | 882 | LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES}; |
| @@ -892,8 +892,19 @@ cifs_push_mandatory_locks(struct cifsFileInfo *cfile) | |||
| 892 | return rc; | 892 | return rc; |
| 893 | } | 893 | } |
| 894 | 894 | ||
| 895 | max_num = (tcon->ses->server->maxBuf - sizeof(struct smb_hdr)) / | 895 | /* |
| 896 | sizeof(LOCKING_ANDX_RANGE); | 896 | * Accessing maxBuf is racy with cifs_reconnect - need to store value |
| 897 | * and check it for zero before using. | ||
| 898 | */ | ||
| 899 | max_buf = tcon->ses->server->maxBuf; | ||
| 900 | if (!max_buf) { | ||
| 901 | mutex_unlock(&cinode->lock_mutex); | ||
| 902 | FreeXid(xid); | ||
| 903 | return -EINVAL; | ||
| 904 | } | ||
| 905 | |||
| 906 | max_num = (max_buf - sizeof(struct smb_hdr)) / | ||
| 907 | sizeof(LOCKING_ANDX_RANGE); | ||
| 897 | buf = kzalloc(max_num * sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL); | 908 | buf = kzalloc(max_num * sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL); |
| 898 | if (!buf) { | 909 | if (!buf) { |
| 899 | mutex_unlock(&cinode->lock_mutex); | 910 | mutex_unlock(&cinode->lock_mutex); |
| @@ -1218,7 +1229,7 @@ cifs_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock, int xid) | |||
| 1218 | int types[] = {LOCKING_ANDX_LARGE_FILES, | 1229 | int types[] = {LOCKING_ANDX_LARGE_FILES, |
| 1219 | LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES}; | 1230 | LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES}; |
| 1220 | unsigned int i; | 1231 | unsigned int i; |
| 1221 | unsigned int max_num, num; | 1232 | unsigned int max_num, num, max_buf; |
| 1222 | LOCKING_ANDX_RANGE *buf, *cur; | 1233 | LOCKING_ANDX_RANGE *buf, *cur; |
| 1223 | struct cifs_tcon *tcon = tlink_tcon(cfile->tlink); | 1234 | struct cifs_tcon *tcon = tlink_tcon(cfile->tlink); |
| 1224 | struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode); | 1235 | struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode); |
| @@ -1228,8 +1239,16 @@ cifs_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock, int xid) | |||
| 1228 | 1239 | ||
| 1229 | INIT_LIST_HEAD(&tmp_llist); | 1240 | INIT_LIST_HEAD(&tmp_llist); |
| 1230 | 1241 | ||
| 1231 | max_num = (tcon->ses->server->maxBuf - sizeof(struct smb_hdr)) / | 1242 | /* |
| 1232 | sizeof(LOCKING_ANDX_RANGE); | 1243 | * Accessing maxBuf is racy with cifs_reconnect - need to store value |
| 1244 | * and check it for zero before using. | ||
| 1245 | */ | ||
| 1246 | max_buf = tcon->ses->server->maxBuf; | ||
| 1247 | if (!max_buf) | ||
| 1248 | return -EINVAL; | ||
| 1249 | |||
| 1250 | max_num = (max_buf - sizeof(struct smb_hdr)) / | ||
| 1251 | sizeof(LOCKING_ANDX_RANGE); | ||
| 1233 | buf = kzalloc(max_num * sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL); | 1252 | buf = kzalloc(max_num * sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL); |
| 1234 | if (!buf) | 1253 | if (!buf) |
| 1235 | return -ENOMEM; | 1254 | return -ENOMEM; |
| @@ -1247,46 +1266,7 @@ cifs_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock, int xid) | |||
| 1247 | continue; | 1266 | continue; |
| 1248 | if (types[i] != li->type) | 1267 | if (types[i] != li->type) |
| 1249 | continue; | 1268 | continue; |
| 1250 | if (!cinode->can_cache_brlcks) { | 1269 | if (cinode->can_cache_brlcks) { |
| 1251 | cur->Pid = cpu_to_le16(li->pid); | ||
| 1252 | cur->LengthLow = cpu_to_le32((u32)li->length); | ||
| 1253 | cur->LengthHigh = | ||
| 1254 | cpu_to_le32((u32)(li->length>>32)); | ||
| 1255 | cur->OffsetLow = cpu_to_le32((u32)li->offset); | ||
| 1256 | cur->OffsetHigh = | ||
| 1257 | cpu_to_le32((u32)(li->offset>>32)); | ||
| 1258 | /* | ||
| 1259 | * We need to save a lock here to let us add | ||
| 1260 | * it again to the file's list if the unlock | ||
| 1261 | * range request fails on the server. | ||
| 1262 | */ | ||
| 1263 | list_move(&li->llist, &tmp_llist); | ||
| 1264 | if (++num == max_num) { | ||
| 1265 | stored_rc = cifs_lockv(xid, tcon, | ||
| 1266 | cfile->netfid, | ||
| 1267 | li->type, num, | ||
| 1268 | 0, buf); | ||
| 1269 | if (stored_rc) { | ||
| 1270 | /* | ||
| 1271 | * We failed on the unlock range | ||
| 1272 | * request - add all locks from | ||
| 1273 | * the tmp list to the head of | ||
| 1274 | * the file's list. | ||
| 1275 | */ | ||
| 1276 | cifs_move_llist(&tmp_llist, | ||
| 1277 | &cfile->llist); | ||
| 1278 | rc = stored_rc; | ||
| 1279 | } else | ||
| 1280 | /* | ||
| 1281 | * The unlock range request | ||
| 1282 | * succeed - free the tmp list. | ||
| 1283 | */ | ||
| 1284 | cifs_free_llist(&tmp_llist); | ||
| 1285 | cur = buf; | ||
| 1286 | num = 0; | ||
| 1287 | } else | ||
| 1288 | cur++; | ||
| 1289 | } else { | ||
| 1290 | /* | 1270 | /* |
| 1291 | * We can cache brlock requests - simply remove | 1271 | * We can cache brlock requests - simply remove |
| 1292 | * a lock from the file's list. | 1272 | * a lock from the file's list. |
| @@ -1294,7 +1274,41 @@ cifs_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock, int xid) | |||
| 1294 | list_del(&li->llist); | 1274 | list_del(&li->llist); |
| 1295 | cifs_del_lock_waiters(li); | 1275 | cifs_del_lock_waiters(li); |
| 1296 | kfree(li); | 1276 | kfree(li); |
| 1277 | continue; | ||
| 1297 | } | 1278 | } |
| 1279 | cur->Pid = cpu_to_le16(li->pid); | ||
| 1280 | cur->LengthLow = cpu_to_le32((u32)li->length); | ||
| 1281 | cur->LengthHigh = cpu_to_le32((u32)(li->length>>32)); | ||
| 1282 | cur->OffsetLow = cpu_to_le32((u32)li->offset); | ||
| 1283 | cur->OffsetHigh = cpu_to_le32((u32)(li->offset>>32)); | ||
| 1284 | /* | ||
| 1285 | * We need to save a lock here to let us add it again to | ||
| 1286 | * the file's list if the unlock range request fails on | ||
| 1287 | * the server. | ||
| 1288 | */ | ||
| 1289 | list_move(&li->llist, &tmp_llist); | ||
| 1290 | if (++num == max_num) { | ||
| 1291 | stored_rc = cifs_lockv(xid, tcon, cfile->netfid, | ||
| 1292 | li->type, num, 0, buf); | ||
| 1293 | if (stored_rc) { | ||
| 1294 | /* | ||
| 1295 | * We failed on the unlock range | ||
| 1296 | * request - add all locks from the tmp | ||
| 1297 | * list to the head of the file's list. | ||
| 1298 | */ | ||
| 1299 | cifs_move_llist(&tmp_llist, | ||
| 1300 | &cfile->llist); | ||
| 1301 | rc = stored_rc; | ||
| 1302 | } else | ||
| 1303 | /* | ||
| 1304 | * The unlock range request succeed - | ||
| 1305 | * free the tmp list. | ||
| 1306 | */ | ||
| 1307 | cifs_free_llist(&tmp_llist); | ||
| 1308 | cur = buf; | ||
| 1309 | num = 0; | ||
| 1310 | } else | ||
| 1311 | cur++; | ||
| 1298 | } | 1312 | } |
| 1299 | if (num) { | 1313 | if (num) { |
| 1300 | stored_rc = cifs_lockv(xid, tcon, cfile->netfid, | 1314 | stored_rc = cifs_lockv(xid, tcon, cfile->netfid, |
diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c index e2552d2b2e42..557506ae1e2a 100644 --- a/fs/cifs/misc.c +++ b/fs/cifs/misc.c | |||
| @@ -212,93 +212,6 @@ cifs_small_buf_release(void *buf_to_free) | |||
| 212 | return; | 212 | return; |
| 213 | } | 213 | } |
| 214 | 214 | ||
| 215 | /* | ||
| 216 | * Find a free multiplex id (SMB mid). Otherwise there could be | ||
| 217 | * mid collisions which might cause problems, demultiplexing the | ||
| 218 | * wrong response to this request. Multiplex ids could collide if | ||
| 219 | * one of a series requests takes much longer than the others, or | ||
| 220 | * if a very large number of long lived requests (byte range | ||
| 221 | * locks or FindNotify requests) are pending. No more than | ||
| 222 | * 64K-1 requests can be outstanding at one time. If no | ||
| 223 | * mids are available, return zero. A future optimization | ||
| 224 | * could make the combination of mids and uid the key we use | ||
| 225 | * to demultiplex on (rather than mid alone). | ||
| 226 | * In addition to the above check, the cifs demultiplex | ||
| 227 | * code already used the command code as a secondary | ||
| 228 | * check of the frame and if signing is negotiated the | ||
| 229 | * response would be discarded if the mid were the same | ||
| 230 | * but the signature was wrong. Since the mid is not put in the | ||
| 231 | * pending queue until later (when it is about to be dispatched) | ||
| 232 | * we do have to limit the number of outstanding requests | ||
| 233 | * to somewhat less than 64K-1 although it is hard to imagine | ||
| 234 | * so many threads being in the vfs at one time. | ||
| 235 | */ | ||
| 236 | __u64 GetNextMid(struct TCP_Server_Info *server) | ||
| 237 | { | ||
| 238 | __u64 mid = 0; | ||
| 239 | __u16 last_mid, cur_mid; | ||
| 240 | bool collision; | ||
| 241 | |||
| 242 | spin_lock(&GlobalMid_Lock); | ||
| 243 | |||
| 244 | /* mid is 16 bit only for CIFS/SMB */ | ||
| 245 | cur_mid = (__u16)((server->CurrentMid) & 0xffff); | ||
| 246 | /* we do not want to loop forever */ | ||
| 247 | last_mid = cur_mid; | ||
| 248 | cur_mid++; | ||
| 249 | |||
| 250 | /* | ||
| 251 | * This nested loop looks more expensive than it is. | ||
| 252 | * In practice the list of pending requests is short, | ||
| 253 | * fewer than 50, and the mids are likely to be unique | ||
| 254 | * on the first pass through the loop unless some request | ||
| 255 | * takes longer than the 64 thousand requests before it | ||
| 256 | * (and it would also have to have been a request that | ||
| 257 | * did not time out). | ||
| 258 | */ | ||
| 259 | while (cur_mid != last_mid) { | ||
| 260 | struct mid_q_entry *mid_entry; | ||
| 261 | unsigned int num_mids; | ||
| 262 | |||
| 263 | collision = false; | ||
| 264 | if (cur_mid == 0) | ||
| 265 | cur_mid++; | ||
| 266 | |||
| 267 | num_mids = 0; | ||
| 268 | list_for_each_entry(mid_entry, &server->pending_mid_q, qhead) { | ||
| 269 | ++num_mids; | ||
| 270 | if (mid_entry->mid == cur_mid && | ||
| 271 | mid_entry->mid_state == MID_REQUEST_SUBMITTED) { | ||
| 272 | /* This mid is in use, try a different one */ | ||
| 273 | collision = true; | ||
| 274 | break; | ||
| 275 | } | ||
| 276 | } | ||
| 277 | |||
| 278 | /* | ||
| 279 | * if we have more than 32k mids in the list, then something | ||
| 280 | * is very wrong. Possibly a local user is trying to DoS the | ||
| 281 | * box by issuing long-running calls and SIGKILL'ing them. If | ||
| 282 | * we get to 2^16 mids then we're in big trouble as this | ||
| 283 | * function could loop forever. | ||
| 284 | * | ||
| 285 | * Go ahead and assign out the mid in this situation, but force | ||
| 286 | * an eventual reconnect to clean out the pending_mid_q. | ||
| 287 | */ | ||
| 288 | if (num_mids > 32768) | ||
| 289 | server->tcpStatus = CifsNeedReconnect; | ||
| 290 | |||
| 291 | if (!collision) { | ||
| 292 | mid = (__u64)cur_mid; | ||
| 293 | server->CurrentMid = mid; | ||
| 294 | break; | ||
| 295 | } | ||
| 296 | cur_mid++; | ||
| 297 | } | ||
| 298 | spin_unlock(&GlobalMid_Lock); | ||
| 299 | return mid; | ||
| 300 | } | ||
| 301 | |||
| 302 | /* NB: MID can not be set if treeCon not passed in, in that | 215 | /* NB: MID can not be set if treeCon not passed in, in that |
| 303 | case it is responsbility of caller to set the mid */ | 216 | case it is responsbility of caller to set the mid */ |
| 304 | void | 217 | void |
| @@ -334,7 +247,7 @@ header_assemble(struct smb_hdr *buffer, char smb_command /* command */ , | |||
| 334 | 247 | ||
| 335 | /* Uid is not converted */ | 248 | /* Uid is not converted */ |
| 336 | buffer->Uid = treeCon->ses->Suid; | 249 | buffer->Uid = treeCon->ses->Suid; |
| 337 | buffer->Mid = GetNextMid(treeCon->ses->server); | 250 | buffer->Mid = get_next_mid(treeCon->ses->server); |
| 338 | } | 251 | } |
| 339 | if (treeCon->Flags & SMB_SHARE_IS_IN_DFS) | 252 | if (treeCon->Flags & SMB_SHARE_IS_IN_DFS) |
| 340 | buffer->Flags2 |= SMBFLG2_DFS; | 253 | buffer->Flags2 |= SMBFLG2_DFS; |
diff --git a/fs/cifs/smb1ops.c b/fs/cifs/smb1ops.c index d9d615fbed3f..6dec38f5522d 100644 --- a/fs/cifs/smb1ops.c +++ b/fs/cifs/smb1ops.c | |||
| @@ -125,6 +125,94 @@ cifs_get_credits_field(struct TCP_Server_Info *server) | |||
| 125 | return &server->credits; | 125 | return &server->credits; |
| 126 | } | 126 | } |
| 127 | 127 | ||
| 128 | /* | ||
| 129 | * Find a free multiplex id (SMB mid). Otherwise there could be | ||
| 130 | * mid collisions which might cause problems, demultiplexing the | ||
| 131 | * wrong response to this request. Multiplex ids could collide if | ||
| 132 | * one of a series requests takes much longer than the others, or | ||
| 133 | * if a very large number of long lived requests (byte range | ||
| 134 | * locks or FindNotify requests) are pending. No more than | ||
| 135 | * 64K-1 requests can be outstanding at one time. If no | ||
| 136 | * mids are available, return zero. A future optimization | ||
| 137 | * could make the combination of mids and uid the key we use | ||
| 138 | * to demultiplex on (rather than mid alone). | ||
| 139 | * In addition to the above check, the cifs demultiplex | ||
| 140 | * code already used the command code as a secondary | ||
| 141 | * check of the frame and if signing is negotiated the | ||
| 142 | * response would be discarded if the mid were the same | ||
| 143 | * but the signature was wrong. Since the mid is not put in the | ||
| 144 | * pending queue until later (when it is about to be dispatched) | ||
| 145 | * we do have to limit the number of outstanding requests | ||
| 146 | * to somewhat less than 64K-1 although it is hard to imagine | ||
| 147 | * so many threads being in the vfs at one time. | ||
| 148 | */ | ||
| 149 | static __u64 | ||
| 150 | cifs_get_next_mid(struct TCP_Server_Info *server) | ||
| 151 | { | ||
| 152 | __u64 mid = 0; | ||
| 153 | __u16 last_mid, cur_mid; | ||
| 154 | bool collision; | ||
| 155 | |||
| 156 | spin_lock(&GlobalMid_Lock); | ||
| 157 | |||
| 158 | /* mid is 16 bit only for CIFS/SMB */ | ||
| 159 | cur_mid = (__u16)((server->CurrentMid) & 0xffff); | ||
| 160 | /* we do not want to loop forever */ | ||
| 161 | last_mid = cur_mid; | ||
| 162 | cur_mid++; | ||
| 163 | |||
| 164 | /* | ||
| 165 | * This nested loop looks more expensive than it is. | ||
| 166 | * In practice the list of pending requests is short, | ||
| 167 | * fewer than 50, and the mids are likely to be unique | ||
| 168 | * on the first pass through the loop unless some request | ||
| 169 | * takes longer than the 64 thousand requests before it | ||
| 170 | * (and it would also have to have been a request that | ||
| 171 | * did not time out). | ||
| 172 | */ | ||
| 173 | while (cur_mid != last_mid) { | ||
| 174 | struct mid_q_entry *mid_entry; | ||
| 175 | unsigned int num_mids; | ||
| 176 | |||
| 177 | collision = false; | ||
| 178 | if (cur_mid == 0) | ||
| 179 | cur_mid++; | ||
| 180 | |||
| 181 | num_mids = 0; | ||
| 182 | list_for_each_entry(mid_entry, &server->pending_mid_q, qhead) { | ||
| 183 | ++num_mids; | ||
| 184 | if (mid_entry->mid == cur_mid && | ||
| 185 | mid_entry->mid_state == MID_REQUEST_SUBMITTED) { | ||
| 186 | /* This mid is in use, try a different one */ | ||
| 187 | collision = true; | ||
| 188 | break; | ||
| 189 | } | ||
| 190 | } | ||
| 191 | |||
| 192 | /* | ||
| 193 | * if we have more than 32k mids in the list, then something | ||
| 194 | * is very wrong. Possibly a local user is trying to DoS the | ||
| 195 | * box by issuing long-running calls and SIGKILL'ing them. If | ||
| 196 | * we get to 2^16 mids then we're in big trouble as this | ||
| 197 | * function could loop forever. | ||
| 198 | * | ||
| 199 | * Go ahead and assign out the mid in this situation, but force | ||
| 200 | * an eventual reconnect to clean out the pending_mid_q. | ||
| 201 | */ | ||
| 202 | if (num_mids > 32768) | ||
| 203 | server->tcpStatus = CifsNeedReconnect; | ||
| 204 | |||
| 205 | if (!collision) { | ||
| 206 | mid = (__u64)cur_mid; | ||
| 207 | server->CurrentMid = mid; | ||
| 208 | break; | ||
| 209 | } | ||
| 210 | cur_mid++; | ||
| 211 | } | ||
| 212 | spin_unlock(&GlobalMid_Lock); | ||
| 213 | return mid; | ||
| 214 | } | ||
| 215 | |||
| 128 | struct smb_version_operations smb1_operations = { | 216 | struct smb_version_operations smb1_operations = { |
| 129 | .send_cancel = send_nt_cancel, | 217 | .send_cancel = send_nt_cancel, |
| 130 | .compare_fids = cifs_compare_fids, | 218 | .compare_fids = cifs_compare_fids, |
| @@ -133,6 +221,7 @@ struct smb_version_operations smb1_operations = { | |||
| 133 | .add_credits = cifs_add_credits, | 221 | .add_credits = cifs_add_credits, |
| 134 | .set_credits = cifs_set_credits, | 222 | .set_credits = cifs_set_credits, |
| 135 | .get_credits_field = cifs_get_credits_field, | 223 | .get_credits_field = cifs_get_credits_field, |
| 224 | .get_next_mid = cifs_get_next_mid, | ||
| 136 | .read_data_offset = cifs_read_data_offset, | 225 | .read_data_offset = cifs_read_data_offset, |
| 137 | .read_data_length = cifs_read_data_length, | 226 | .read_data_length = cifs_read_data_length, |
| 138 | .map_error = map_smb_to_linux_error, | 227 | .map_error = map_smb_to_linux_error, |
diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c index 1b36ffe6a47b..3097ee58fd7d 100644 --- a/fs/cifs/transport.c +++ b/fs/cifs/transport.c | |||
| @@ -779,7 +779,7 @@ send_lock_cancel(const unsigned int xid, struct cifs_tcon *tcon, | |||
| 779 | 779 | ||
| 780 | pSMB->LockType = LOCKING_ANDX_CANCEL_LOCK|LOCKING_ANDX_LARGE_FILES; | 780 | pSMB->LockType = LOCKING_ANDX_CANCEL_LOCK|LOCKING_ANDX_LARGE_FILES; |
| 781 | pSMB->Timeout = 0; | 781 | pSMB->Timeout = 0; |
| 782 | pSMB->hdr.Mid = GetNextMid(ses->server); | 782 | pSMB->hdr.Mid = get_next_mid(ses->server); |
| 783 | 783 | ||
| 784 | return SendReceive(xid, ses, in_buf, out_buf, | 784 | return SendReceive(xid, ses, in_buf, out_buf, |
| 785 | &bytes_returned, 0); | 785 | &bytes_returned, 0); |
diff --git a/fs/dcache.c b/fs/dcache.c index 85c9e2bff8e6..40469044088d 100644 --- a/fs/dcache.c +++ b/fs/dcache.c | |||
| @@ -683,6 +683,8 @@ EXPORT_SYMBOL(dget_parent); | |||
| 683 | /** | 683 | /** |
| 684 | * d_find_alias - grab a hashed alias of inode | 684 | * d_find_alias - grab a hashed alias of inode |
| 685 | * @inode: inode in question | 685 | * @inode: inode in question |
| 686 | * @want_discon: flag, used by d_splice_alias, to request | ||
| 687 | * that only a DISCONNECTED alias be returned. | ||
| 686 | * | 688 | * |
| 687 | * If inode has a hashed alias, or is a directory and has any alias, | 689 | * If inode has a hashed alias, or is a directory and has any alias, |
| 688 | * acquire the reference to alias and return it. Otherwise return NULL. | 690 | * acquire the reference to alias and return it. Otherwise return NULL. |
| @@ -691,9 +693,10 @@ EXPORT_SYMBOL(dget_parent); | |||
| 691 | * of a filesystem. | 693 | * of a filesystem. |
| 692 | * | 694 | * |
| 693 | * If the inode has an IS_ROOT, DCACHE_DISCONNECTED alias, then prefer | 695 | * If the inode has an IS_ROOT, DCACHE_DISCONNECTED alias, then prefer |
| 694 | * any other hashed alias over that. | 696 | * any other hashed alias over that one unless @want_discon is set, |
| 697 | * in which case only return an IS_ROOT, DCACHE_DISCONNECTED alias. | ||
| 695 | */ | 698 | */ |
| 696 | static struct dentry *__d_find_alias(struct inode *inode) | 699 | static struct dentry *__d_find_alias(struct inode *inode, int want_discon) |
| 697 | { | 700 | { |
| 698 | struct dentry *alias, *discon_alias; | 701 | struct dentry *alias, *discon_alias; |
| 699 | 702 | ||
| @@ -705,7 +708,7 @@ again: | |||
| 705 | if (IS_ROOT(alias) && | 708 | if (IS_ROOT(alias) && |
| 706 | (alias->d_flags & DCACHE_DISCONNECTED)) { | 709 | (alias->d_flags & DCACHE_DISCONNECTED)) { |
| 707 | discon_alias = alias; | 710 | discon_alias = alias; |
| 708 | } else { | 711 | } else if (!want_discon) { |
| 709 | __dget_dlock(alias); | 712 | __dget_dlock(alias); |
| 710 | spin_unlock(&alias->d_lock); | 713 | spin_unlock(&alias->d_lock); |
| 711 | return alias; | 714 | return alias; |
| @@ -736,7 +739,7 @@ struct dentry *d_find_alias(struct inode *inode) | |||
| 736 | 739 | ||
| 737 | if (!list_empty(&inode->i_dentry)) { | 740 | if (!list_empty(&inode->i_dentry)) { |
| 738 | spin_lock(&inode->i_lock); | 741 | spin_lock(&inode->i_lock); |
| 739 | de = __d_find_alias(inode); | 742 | de = __d_find_alias(inode, 0); |
| 740 | spin_unlock(&inode->i_lock); | 743 | spin_unlock(&inode->i_lock); |
| 741 | } | 744 | } |
| 742 | return de; | 745 | return de; |
| @@ -1647,8 +1650,9 @@ struct dentry *d_splice_alias(struct inode *inode, struct dentry *dentry) | |||
| 1647 | 1650 | ||
| 1648 | if (inode && S_ISDIR(inode->i_mode)) { | 1651 | if (inode && S_ISDIR(inode->i_mode)) { |
| 1649 | spin_lock(&inode->i_lock); | 1652 | spin_lock(&inode->i_lock); |
| 1650 | new = __d_find_any_alias(inode); | 1653 | new = __d_find_alias(inode, 1); |
| 1651 | if (new) { | 1654 | if (new) { |
| 1655 | BUG_ON(!(new->d_flags & DCACHE_DISCONNECTED)); | ||
| 1652 | spin_unlock(&inode->i_lock); | 1656 | spin_unlock(&inode->i_lock); |
| 1653 | security_d_instantiate(new, inode); | 1657 | security_d_instantiate(new, inode); |
| 1654 | d_move(new, dentry); | 1658 | d_move(new, dentry); |
| @@ -2478,7 +2482,7 @@ struct dentry *d_materialise_unique(struct dentry *dentry, struct inode *inode) | |||
| 2478 | struct dentry *alias; | 2482 | struct dentry *alias; |
| 2479 | 2483 | ||
| 2480 | /* Does an aliased dentry already exist? */ | 2484 | /* Does an aliased dentry already exist? */ |
| 2481 | alias = __d_find_alias(inode); | 2485 | alias = __d_find_alias(inode, 0); |
| 2482 | if (alias) { | 2486 | if (alias) { |
| 2483 | actual = alias; | 2487 | actual = alias; |
| 2484 | write_seqlock(&rename_lock); | 2488 | write_seqlock(&rename_lock); |
diff --git a/fs/exofs/sys.c b/fs/exofs/sys.c index e32bc919e4e3..5a7b691e748b 100644 --- a/fs/exofs/sys.c +++ b/fs/exofs/sys.c | |||
| @@ -109,7 +109,7 @@ static struct kobj_type odev_ktype = { | |||
| 109 | static struct kobj_type uuid_ktype = { | 109 | static struct kobj_type uuid_ktype = { |
| 110 | }; | 110 | }; |
| 111 | 111 | ||
| 112 | void exofs_sysfs_dbg_print() | 112 | void exofs_sysfs_dbg_print(void) |
| 113 | { | 113 | { |
| 114 | #ifdef CONFIG_EXOFS_DEBUG | 114 | #ifdef CONFIG_EXOFS_DEBUG |
| 115 | struct kobject *k_name, *k_tmp; | 115 | struct kobject *k_name, *k_tmp; |
diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c index 99b6324290db..cee7812cc3cf 100644 --- a/fs/ext4/balloc.c +++ b/fs/ext4/balloc.c | |||
| @@ -90,8 +90,8 @@ unsigned ext4_num_overhead_clusters(struct super_block *sb, | |||
| 90 | * unusual file system layouts. | 90 | * unusual file system layouts. |
| 91 | */ | 91 | */ |
| 92 | if (ext4_block_in_group(sb, ext4_block_bitmap(sb, gdp), block_group)) { | 92 | if (ext4_block_in_group(sb, ext4_block_bitmap(sb, gdp), block_group)) { |
| 93 | block_cluster = EXT4_B2C(sbi, (start - | 93 | block_cluster = EXT4_B2C(sbi, |
| 94 | ext4_block_bitmap(sb, gdp))); | 94 | ext4_block_bitmap(sb, gdp) - start); |
| 95 | if (block_cluster < num_clusters) | 95 | if (block_cluster < num_clusters) |
| 96 | block_cluster = -1; | 96 | block_cluster = -1; |
| 97 | else if (block_cluster == num_clusters) { | 97 | else if (block_cluster == num_clusters) { |
| @@ -102,7 +102,7 @@ unsigned ext4_num_overhead_clusters(struct super_block *sb, | |||
| 102 | 102 | ||
| 103 | if (ext4_block_in_group(sb, ext4_inode_bitmap(sb, gdp), block_group)) { | 103 | if (ext4_block_in_group(sb, ext4_inode_bitmap(sb, gdp), block_group)) { |
| 104 | inode_cluster = EXT4_B2C(sbi, | 104 | inode_cluster = EXT4_B2C(sbi, |
| 105 | start - ext4_inode_bitmap(sb, gdp)); | 105 | ext4_inode_bitmap(sb, gdp) - start); |
| 106 | if (inode_cluster < num_clusters) | 106 | if (inode_cluster < num_clusters) |
| 107 | inode_cluster = -1; | 107 | inode_cluster = -1; |
| 108 | else if (inode_cluster == num_clusters) { | 108 | else if (inode_cluster == num_clusters) { |
| @@ -114,7 +114,7 @@ unsigned ext4_num_overhead_clusters(struct super_block *sb, | |||
| 114 | itbl_blk = ext4_inode_table(sb, gdp); | 114 | itbl_blk = ext4_inode_table(sb, gdp); |
| 115 | for (i = 0; i < sbi->s_itb_per_group; i++) { | 115 | for (i = 0; i < sbi->s_itb_per_group; i++) { |
| 116 | if (ext4_block_in_group(sb, itbl_blk + i, block_group)) { | 116 | if (ext4_block_in_group(sb, itbl_blk + i, block_group)) { |
| 117 | c = EXT4_B2C(sbi, start - itbl_blk + i); | 117 | c = EXT4_B2C(sbi, itbl_blk + i - start); |
| 118 | if ((c < num_clusters) || (c == inode_cluster) || | 118 | if ((c < num_clusters) || (c == inode_cluster) || |
| 119 | (c == block_cluster) || (c == itbl_cluster)) | 119 | (c == block_cluster) || (c == itbl_cluster)) |
| 120 | continue; | 120 | continue; |
diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c index 8ad112ae0ade..e34deac3f366 100644 --- a/fs/ext4/ioctl.c +++ b/fs/ext4/ioctl.c | |||
| @@ -123,7 +123,6 @@ long ext4_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) | |||
| 123 | else | 123 | else |
| 124 | ext4_clear_inode_flag(inode, i); | 124 | ext4_clear_inode_flag(inode, i); |
| 125 | } | 125 | } |
| 126 | ei->i_flags = flags; | ||
| 127 | 126 | ||
| 128 | ext4_set_inode_flags(inode); | 127 | ext4_set_inode_flags(inode); |
| 129 | inode->i_ctime = ext4_current_time(inode); | 128 | inode->i_ctime = ext4_current_time(inode); |
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c index 8d2fb8c88cf3..41a3ccff18d8 100644 --- a/fs/fs-writeback.c +++ b/fs/fs-writeback.c | |||
| @@ -664,6 +664,7 @@ static long writeback_sb_inodes(struct super_block *sb, | |||
| 664 | /* Wait for I_SYNC. This function drops i_lock... */ | 664 | /* Wait for I_SYNC. This function drops i_lock... */ |
| 665 | inode_sleep_on_writeback(inode); | 665 | inode_sleep_on_writeback(inode); |
| 666 | /* Inode may be gone, start again */ | 666 | /* Inode may be gone, start again */ |
| 667 | spin_lock(&wb->list_lock); | ||
| 667 | continue; | 668 | continue; |
| 668 | } | 669 | } |
| 669 | inode->i_state |= I_SYNC; | 670 | inode->i_state |= I_SYNC; |
diff --git a/fs/fuse/control.c b/fs/fuse/control.c index 42593c587d48..03ff5b1eba93 100644 --- a/fs/fuse/control.c +++ b/fs/fuse/control.c | |||
| @@ -75,19 +75,13 @@ static ssize_t fuse_conn_limit_write(struct file *file, const char __user *buf, | |||
| 75 | unsigned global_limit) | 75 | unsigned global_limit) |
| 76 | { | 76 | { |
| 77 | unsigned long t; | 77 | unsigned long t; |
| 78 | char tmp[32]; | ||
| 79 | unsigned limit = (1 << 16) - 1; | 78 | unsigned limit = (1 << 16) - 1; |
| 80 | int err; | 79 | int err; |
| 81 | 80 | ||
| 82 | if (*ppos || count >= sizeof(tmp) - 1) | 81 | if (*ppos) |
| 83 | return -EINVAL; | ||
| 84 | |||
| 85 | if (copy_from_user(tmp, buf, count)) | ||
| 86 | return -EINVAL; | 82 | return -EINVAL; |
| 87 | 83 | ||
| 88 | tmp[count] = '\0'; | 84 | err = kstrtoul_from_user(buf, count, 0, &t); |
| 89 | |||
| 90 | err = strict_strtoul(tmp, 0, &t); | ||
| 91 | if (err) | 85 | if (err) |
| 92 | return err; | 86 | return err; |
| 93 | 87 | ||
diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c index df5ac048dc74..334e0b18a014 100644 --- a/fs/fuse/dir.c +++ b/fs/fuse/dir.c | |||
| @@ -775,6 +775,8 @@ static int fuse_link(struct dentry *entry, struct inode *newdir, | |||
| 775 | static void fuse_fillattr(struct inode *inode, struct fuse_attr *attr, | 775 | static void fuse_fillattr(struct inode *inode, struct fuse_attr *attr, |
| 776 | struct kstat *stat) | 776 | struct kstat *stat) |
| 777 | { | 777 | { |
| 778 | unsigned int blkbits; | ||
| 779 | |||
| 778 | stat->dev = inode->i_sb->s_dev; | 780 | stat->dev = inode->i_sb->s_dev; |
| 779 | stat->ino = attr->ino; | 781 | stat->ino = attr->ino; |
| 780 | stat->mode = (inode->i_mode & S_IFMT) | (attr->mode & 07777); | 782 | stat->mode = (inode->i_mode & S_IFMT) | (attr->mode & 07777); |
| @@ -790,7 +792,13 @@ static void fuse_fillattr(struct inode *inode, struct fuse_attr *attr, | |||
| 790 | stat->ctime.tv_nsec = attr->ctimensec; | 792 | stat->ctime.tv_nsec = attr->ctimensec; |
| 791 | stat->size = attr->size; | 793 | stat->size = attr->size; |
| 792 | stat->blocks = attr->blocks; | 794 | stat->blocks = attr->blocks; |
| 793 | stat->blksize = (1 << inode->i_blkbits); | 795 | |
| 796 | if (attr->blksize != 0) | ||
| 797 | blkbits = ilog2(attr->blksize); | ||
| 798 | else | ||
| 799 | blkbits = inode->i_sb->s_blocksize_bits; | ||
| 800 | |||
| 801 | stat->blksize = 1 << blkbits; | ||
| 794 | } | 802 | } |
| 795 | 803 | ||
| 796 | static int fuse_do_getattr(struct inode *inode, struct kstat *stat, | 804 | static int fuse_do_getattr(struct inode *inode, struct kstat *stat, |
| @@ -863,6 +871,7 @@ int fuse_update_attributes(struct inode *inode, struct kstat *stat, | |||
| 863 | if (stat) { | 871 | if (stat) { |
| 864 | generic_fillattr(inode, stat); | 872 | generic_fillattr(inode, stat); |
| 865 | stat->mode = fi->orig_i_mode; | 873 | stat->mode = fi->orig_i_mode; |
| 874 | stat->ino = fi->orig_ino; | ||
| 866 | } | 875 | } |
| 867 | } | 876 | } |
| 868 | 877 | ||
diff --git a/fs/fuse/file.c b/fs/fuse/file.c index 9562109d3a87..b321a688cde7 100644 --- a/fs/fuse/file.c +++ b/fs/fuse/file.c | |||
| @@ -2173,6 +2173,44 @@ fuse_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov, | |||
| 2173 | return ret; | 2173 | return ret; |
| 2174 | } | 2174 | } |
| 2175 | 2175 | ||
| 2176 | long fuse_file_fallocate(struct file *file, int mode, loff_t offset, | ||
| 2177 | loff_t length) | ||
| 2178 | { | ||
| 2179 | struct fuse_file *ff = file->private_data; | ||
| 2180 | struct fuse_conn *fc = ff->fc; | ||
| 2181 | struct fuse_req *req; | ||
| 2182 | struct fuse_fallocate_in inarg = { | ||
| 2183 | .fh = ff->fh, | ||
| 2184 | .offset = offset, | ||
| 2185 | .length = length, | ||
| 2186 | .mode = mode | ||
| 2187 | }; | ||
| 2188 | int err; | ||
| 2189 | |||
| 2190 | if (fc->no_fallocate) | ||
| 2191 | return -EOPNOTSUPP; | ||
| 2192 | |||
| 2193 | req = fuse_get_req(fc); | ||
| 2194 | if (IS_ERR(req)) | ||
| 2195 | return PTR_ERR(req); | ||
| 2196 | |||
| 2197 | req->in.h.opcode = FUSE_FALLOCATE; | ||
| 2198 | req->in.h.nodeid = ff->nodeid; | ||
| 2199 | req->in.numargs = 1; | ||
| 2200 | req->in.args[0].size = sizeof(inarg); | ||
| 2201 | req->in.args[0].value = &inarg; | ||
| 2202 | fuse_request_send(fc, req); | ||
| 2203 | err = req->out.h.error; | ||
| 2204 | if (err == -ENOSYS) { | ||
| 2205 | fc->no_fallocate = 1; | ||
| 2206 | err = -EOPNOTSUPP; | ||
| 2207 | } | ||
| 2208 | fuse_put_request(fc, req); | ||
| 2209 | |||
| 2210 | return err; | ||
| 2211 | } | ||
| 2212 | EXPORT_SYMBOL_GPL(fuse_file_fallocate); | ||
| 2213 | |||
| 2176 | static const struct file_operations fuse_file_operations = { | 2214 | static const struct file_operations fuse_file_operations = { |
| 2177 | .llseek = fuse_file_llseek, | 2215 | .llseek = fuse_file_llseek, |
| 2178 | .read = do_sync_read, | 2216 | .read = do_sync_read, |
| @@ -2190,6 +2228,7 @@ static const struct file_operations fuse_file_operations = { | |||
| 2190 | .unlocked_ioctl = fuse_file_ioctl, | 2228 | .unlocked_ioctl = fuse_file_ioctl, |
| 2191 | .compat_ioctl = fuse_file_compat_ioctl, | 2229 | .compat_ioctl = fuse_file_compat_ioctl, |
| 2192 | .poll = fuse_file_poll, | 2230 | .poll = fuse_file_poll, |
| 2231 | .fallocate = fuse_file_fallocate, | ||
| 2193 | }; | 2232 | }; |
| 2194 | 2233 | ||
| 2195 | static const struct file_operations fuse_direct_io_file_operations = { | 2234 | static const struct file_operations fuse_direct_io_file_operations = { |
| @@ -2206,6 +2245,7 @@ static const struct file_operations fuse_direct_io_file_operations = { | |||
| 2206 | .unlocked_ioctl = fuse_file_ioctl, | 2245 | .unlocked_ioctl = fuse_file_ioctl, |
| 2207 | .compat_ioctl = fuse_file_compat_ioctl, | 2246 | .compat_ioctl = fuse_file_compat_ioctl, |
| 2208 | .poll = fuse_file_poll, | 2247 | .poll = fuse_file_poll, |
| 2248 | .fallocate = fuse_file_fallocate, | ||
| 2209 | /* no splice_read */ | 2249 | /* no splice_read */ |
| 2210 | }; | 2250 | }; |
| 2211 | 2251 | ||
diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h index 572cefc78012..771fb6322c07 100644 --- a/fs/fuse/fuse_i.h +++ b/fs/fuse/fuse_i.h | |||
| @@ -82,6 +82,9 @@ struct fuse_inode { | |||
| 82 | preserve the original mode */ | 82 | preserve the original mode */ |
| 83 | umode_t orig_i_mode; | 83 | umode_t orig_i_mode; |
| 84 | 84 | ||
| 85 | /** 64 bit inode number */ | ||
| 86 | u64 orig_ino; | ||
| 87 | |||
| 85 | /** Version of last attribute change */ | 88 | /** Version of last attribute change */ |
| 86 | u64 attr_version; | 89 | u64 attr_version; |
| 87 | 90 | ||
| @@ -478,6 +481,9 @@ struct fuse_conn { | |||
| 478 | /** Are BSD file locking primitives not implemented by fs? */ | 481 | /** Are BSD file locking primitives not implemented by fs? */ |
| 479 | unsigned no_flock:1; | 482 | unsigned no_flock:1; |
| 480 | 483 | ||
| 484 | /** Is fallocate not implemented by fs? */ | ||
| 485 | unsigned no_fallocate:1; | ||
| 486 | |||
| 481 | /** The number of requests waiting for completion */ | 487 | /** The number of requests waiting for completion */ |
| 482 | atomic_t num_waiting; | 488 | atomic_t num_waiting; |
| 483 | 489 | ||
diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c index 42678a33b7bb..1cd61652018c 100644 --- a/fs/fuse/inode.c +++ b/fs/fuse/inode.c | |||
| @@ -91,6 +91,7 @@ static struct inode *fuse_alloc_inode(struct super_block *sb) | |||
| 91 | fi->nlookup = 0; | 91 | fi->nlookup = 0; |
| 92 | fi->attr_version = 0; | 92 | fi->attr_version = 0; |
| 93 | fi->writectr = 0; | 93 | fi->writectr = 0; |
| 94 | fi->orig_ino = 0; | ||
| 94 | INIT_LIST_HEAD(&fi->write_files); | 95 | INIT_LIST_HEAD(&fi->write_files); |
| 95 | INIT_LIST_HEAD(&fi->queued_writes); | 96 | INIT_LIST_HEAD(&fi->queued_writes); |
| 96 | INIT_LIST_HEAD(&fi->writepages); | 97 | INIT_LIST_HEAD(&fi->writepages); |
| @@ -139,6 +140,18 @@ static int fuse_remount_fs(struct super_block *sb, int *flags, char *data) | |||
| 139 | return 0; | 140 | return 0; |
| 140 | } | 141 | } |
| 141 | 142 | ||
| 143 | /* | ||
| 144 | * ino_t is 32-bits on 32-bit arch. We have to squash the 64-bit value down | ||
| 145 | * so that it will fit. | ||
| 146 | */ | ||
| 147 | static ino_t fuse_squash_ino(u64 ino64) | ||
| 148 | { | ||
| 149 | ino_t ino = (ino_t) ino64; | ||
| 150 | if (sizeof(ino_t) < sizeof(u64)) | ||
| 151 | ino ^= ino64 >> (sizeof(u64) - sizeof(ino_t)) * 8; | ||
| 152 | return ino; | ||
| 153 | } | ||
| 154 | |||
| 142 | void fuse_change_attributes_common(struct inode *inode, struct fuse_attr *attr, | 155 | void fuse_change_attributes_common(struct inode *inode, struct fuse_attr *attr, |
| 143 | u64 attr_valid) | 156 | u64 attr_valid) |
| 144 | { | 157 | { |
| @@ -148,7 +161,7 @@ void fuse_change_attributes_common(struct inode *inode, struct fuse_attr *attr, | |||
| 148 | fi->attr_version = ++fc->attr_version; | 161 | fi->attr_version = ++fc->attr_version; |
| 149 | fi->i_time = attr_valid; | 162 | fi->i_time = attr_valid; |
| 150 | 163 | ||
| 151 | inode->i_ino = attr->ino; | 164 | inode->i_ino = fuse_squash_ino(attr->ino); |
| 152 | inode->i_mode = (inode->i_mode & S_IFMT) | (attr->mode & 07777); | 165 | inode->i_mode = (inode->i_mode & S_IFMT) | (attr->mode & 07777); |
| 153 | set_nlink(inode, attr->nlink); | 166 | set_nlink(inode, attr->nlink); |
| 154 | inode->i_uid = attr->uid; | 167 | inode->i_uid = attr->uid; |
| @@ -174,6 +187,8 @@ void fuse_change_attributes_common(struct inode *inode, struct fuse_attr *attr, | |||
| 174 | fi->orig_i_mode = inode->i_mode; | 187 | fi->orig_i_mode = inode->i_mode; |
| 175 | if (!(fc->flags & FUSE_DEFAULT_PERMISSIONS)) | 188 | if (!(fc->flags & FUSE_DEFAULT_PERMISSIONS)) |
| 176 | inode->i_mode &= ~S_ISVTX; | 189 | inode->i_mode &= ~S_ISVTX; |
| 190 | |||
| 191 | fi->orig_ino = attr->ino; | ||
| 177 | } | 192 | } |
| 178 | 193 | ||
| 179 | void fuse_change_attributes(struct inode *inode, struct fuse_attr *attr, | 194 | void fuse_change_attributes(struct inode *inode, struct fuse_attr *attr, |
diff --git a/fs/nfs/callback.c b/fs/nfs/callback.c index 970659daa323..23ff18fe080a 100644 --- a/fs/nfs/callback.c +++ b/fs/nfs/callback.c | |||
| @@ -17,7 +17,6 @@ | |||
| 17 | #include <linux/kthread.h> | 17 | #include <linux/kthread.h> |
| 18 | #include <linux/sunrpc/svcauth_gss.h> | 18 | #include <linux/sunrpc/svcauth_gss.h> |
| 19 | #include <linux/sunrpc/bc_xprt.h> | 19 | #include <linux/sunrpc/bc_xprt.h> |
| 20 | #include <linux/nsproxy.h> | ||
| 21 | 20 | ||
| 22 | #include <net/inet_sock.h> | 21 | #include <net/inet_sock.h> |
| 23 | 22 | ||
| @@ -107,7 +106,7 @@ nfs4_callback_up(struct svc_serv *serv, struct rpc_xprt *xprt) | |||
| 107 | { | 106 | { |
| 108 | int ret; | 107 | int ret; |
| 109 | 108 | ||
| 110 | ret = svc_create_xprt(serv, "tcp", xprt->xprt_net, PF_INET, | 109 | ret = svc_create_xprt(serv, "tcp", &init_net, PF_INET, |
| 111 | nfs_callback_set_tcpport, SVC_SOCK_ANONYMOUS); | 110 | nfs_callback_set_tcpport, SVC_SOCK_ANONYMOUS); |
| 112 | if (ret <= 0) | 111 | if (ret <= 0) |
| 113 | goto out_err; | 112 | goto out_err; |
| @@ -115,7 +114,7 @@ nfs4_callback_up(struct svc_serv *serv, struct rpc_xprt *xprt) | |||
| 115 | dprintk("NFS: Callback listener port = %u (af %u)\n", | 114 | dprintk("NFS: Callback listener port = %u (af %u)\n", |
| 116 | nfs_callback_tcpport, PF_INET); | 115 | nfs_callback_tcpport, PF_INET); |
| 117 | 116 | ||
| 118 | ret = svc_create_xprt(serv, "tcp", xprt->xprt_net, PF_INET6, | 117 | ret = svc_create_xprt(serv, "tcp", &init_net, PF_INET6, |
| 119 | nfs_callback_set_tcpport, SVC_SOCK_ANONYMOUS); | 118 | nfs_callback_set_tcpport, SVC_SOCK_ANONYMOUS); |
| 120 | if (ret > 0) { | 119 | if (ret > 0) { |
| 121 | nfs_callback_tcpport6 = ret; | 120 | nfs_callback_tcpport6 = ret; |
| @@ -184,7 +183,7 @@ nfs41_callback_up(struct svc_serv *serv, struct rpc_xprt *xprt) | |||
| 184 | * fore channel connection. | 183 | * fore channel connection. |
| 185 | * Returns the input port (0) and sets the svc_serv bc_xprt on success | 184 | * Returns the input port (0) and sets the svc_serv bc_xprt on success |
| 186 | */ | 185 | */ |
| 187 | ret = svc_create_xprt(serv, "tcp-bc", xprt->xprt_net, PF_INET, 0, | 186 | ret = svc_create_xprt(serv, "tcp-bc", &init_net, PF_INET, 0, |
| 188 | SVC_SOCK_ANONYMOUS); | 187 | SVC_SOCK_ANONYMOUS); |
| 189 | if (ret < 0) { | 188 | if (ret < 0) { |
| 190 | rqstp = ERR_PTR(ret); | 189 | rqstp = ERR_PTR(ret); |
| @@ -254,7 +253,7 @@ int nfs_callback_up(u32 minorversion, struct rpc_xprt *xprt) | |||
| 254 | char svc_name[12]; | 253 | char svc_name[12]; |
| 255 | int ret = 0; | 254 | int ret = 0; |
| 256 | int minorversion_setup; | 255 | int minorversion_setup; |
| 257 | struct net *net = current->nsproxy->net_ns; | 256 | struct net *net = &init_net; |
| 258 | 257 | ||
| 259 | mutex_lock(&nfs_callback_mutex); | 258 | mutex_lock(&nfs_callback_mutex); |
| 260 | if (cb_info->users++ || cb_info->task != NULL) { | 259 | if (cb_info->users++ || cb_info->task != NULL) { |
| @@ -330,7 +329,7 @@ void nfs_callback_down(int minorversion) | |||
| 330 | cb_info->users--; | 329 | cb_info->users--; |
| 331 | if (cb_info->users == 0 && cb_info->task != NULL) { | 330 | if (cb_info->users == 0 && cb_info->task != NULL) { |
| 332 | kthread_stop(cb_info->task); | 331 | kthread_stop(cb_info->task); |
| 333 | svc_shutdown_net(cb_info->serv, current->nsproxy->net_ns); | 332 | svc_shutdown_net(cb_info->serv, &init_net); |
| 334 | svc_exit_thread(cb_info->rqst); | 333 | svc_exit_thread(cb_info->rqst); |
| 335 | cb_info->serv = NULL; | 334 | cb_info->serv = NULL; |
| 336 | cb_info->rqst = NULL; | 335 | cb_info->rqst = NULL; |
diff --git a/fs/nfs/callback_xdr.c b/fs/nfs/callback_xdr.c index 95bfc243992c..e64b01d2a338 100644 --- a/fs/nfs/callback_xdr.c +++ b/fs/nfs/callback_xdr.c | |||
| @@ -455,9 +455,9 @@ static __be32 decode_cb_sequence_args(struct svc_rqst *rqstp, | |||
| 455 | args->csa_nrclists = ntohl(*p++); | 455 | args->csa_nrclists = ntohl(*p++); |
| 456 | args->csa_rclists = NULL; | 456 | args->csa_rclists = NULL; |
| 457 | if (args->csa_nrclists) { | 457 | if (args->csa_nrclists) { |
| 458 | args->csa_rclists = kmalloc(args->csa_nrclists * | 458 | args->csa_rclists = kmalloc_array(args->csa_nrclists, |
| 459 | sizeof(*args->csa_rclists), | 459 | sizeof(*args->csa_rclists), |
| 460 | GFP_KERNEL); | 460 | GFP_KERNEL); |
| 461 | if (unlikely(args->csa_rclists == NULL)) | 461 | if (unlikely(args->csa_rclists == NULL)) |
| 462 | goto out; | 462 | goto out; |
| 463 | 463 | ||
| @@ -696,7 +696,7 @@ static __be32 encode_cb_sequence_res(struct svc_rqst *rqstp, | |||
| 696 | const struct cb_sequenceres *res) | 696 | const struct cb_sequenceres *res) |
| 697 | { | 697 | { |
| 698 | __be32 *p; | 698 | __be32 *p; |
| 699 | unsigned status = res->csr_status; | 699 | __be32 status = res->csr_status; |
| 700 | 700 | ||
| 701 | if (unlikely(status != 0)) | 701 | if (unlikely(status != 0)) |
| 702 | goto out; | 702 | goto out; |
diff --git a/fs/nfs/client.c b/fs/nfs/client.c index 7d108753af81..17ba6b995659 100644 --- a/fs/nfs/client.c +++ b/fs/nfs/client.c | |||
| @@ -544,8 +544,6 @@ nfs_found_client(const struct nfs_client_initdata *cl_init, | |||
| 544 | 544 | ||
| 545 | smp_rmb(); | 545 | smp_rmb(); |
| 546 | 546 | ||
| 547 | BUG_ON(clp->cl_cons_state != NFS_CS_READY); | ||
| 548 | |||
| 549 | dprintk("<-- %s found nfs_client %p for %s\n", | 547 | dprintk("<-- %s found nfs_client %p for %s\n", |
| 550 | __func__, clp, cl_init->hostname ?: ""); | 548 | __func__, clp, cl_init->hostname ?: ""); |
| 551 | return clp; | 549 | return clp; |
diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c index ad2775d3e219..3168f6e3d4d4 100644 --- a/fs/nfs/direct.c +++ b/fs/nfs/direct.c | |||
| @@ -523,9 +523,9 @@ static void nfs_direct_commit_complete(struct nfs_commit_data *data) | |||
| 523 | nfs_list_remove_request(req); | 523 | nfs_list_remove_request(req); |
| 524 | if (dreq->flags == NFS_ODIRECT_RESCHED_WRITES) { | 524 | if (dreq->flags == NFS_ODIRECT_RESCHED_WRITES) { |
| 525 | /* Note the rewrite will go through mds */ | 525 | /* Note the rewrite will go through mds */ |
| 526 | kref_get(&req->wb_kref); | ||
| 527 | nfs_mark_request_commit(req, NULL, &cinfo); | 526 | nfs_mark_request_commit(req, NULL, &cinfo); |
| 528 | } | 527 | } else |
| 528 | nfs_release_request(req); | ||
| 529 | nfs_unlock_and_release_request(req); | 529 | nfs_unlock_and_release_request(req); |
| 530 | } | 530 | } |
| 531 | 531 | ||
| @@ -716,12 +716,12 @@ static void nfs_direct_write_completion(struct nfs_pgio_header *hdr) | |||
| 716 | if (dreq->flags == NFS_ODIRECT_RESCHED_WRITES) | 716 | if (dreq->flags == NFS_ODIRECT_RESCHED_WRITES) |
| 717 | bit = NFS_IOHDR_NEED_RESCHED; | 717 | bit = NFS_IOHDR_NEED_RESCHED; |
| 718 | else if (dreq->flags == 0) { | 718 | else if (dreq->flags == 0) { |
| 719 | memcpy(&dreq->verf, &req->wb_verf, | 719 | memcpy(&dreq->verf, hdr->verf, |
| 720 | sizeof(dreq->verf)); | 720 | sizeof(dreq->verf)); |
| 721 | bit = NFS_IOHDR_NEED_COMMIT; | 721 | bit = NFS_IOHDR_NEED_COMMIT; |
| 722 | dreq->flags = NFS_ODIRECT_DO_COMMIT; | 722 | dreq->flags = NFS_ODIRECT_DO_COMMIT; |
| 723 | } else if (dreq->flags == NFS_ODIRECT_DO_COMMIT) { | 723 | } else if (dreq->flags == NFS_ODIRECT_DO_COMMIT) { |
| 724 | if (memcmp(&dreq->verf, &req->wb_verf, sizeof(dreq->verf))) { | 724 | if (memcmp(&dreq->verf, hdr->verf, sizeof(dreq->verf))) { |
| 725 | dreq->flags = NFS_ODIRECT_RESCHED_WRITES; | 725 | dreq->flags = NFS_ODIRECT_RESCHED_WRITES; |
| 726 | bit = NFS_IOHDR_NEED_RESCHED; | 726 | bit = NFS_IOHDR_NEED_RESCHED; |
| 727 | } else | 727 | } else |
diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h index c6827f93ab57..cc5900ac61b5 100644 --- a/fs/nfs/nfs4_fs.h +++ b/fs/nfs/nfs4_fs.h | |||
| @@ -295,7 +295,7 @@ is_ds_client(struct nfs_client *clp) | |||
| 295 | 295 | ||
| 296 | extern const struct nfs4_minor_version_ops *nfs_v4_minor_ops[]; | 296 | extern const struct nfs4_minor_version_ops *nfs_v4_minor_ops[]; |
| 297 | 297 | ||
| 298 | extern const u32 nfs4_fattr_bitmap[2]; | 298 | extern const u32 nfs4_fattr_bitmap[3]; |
| 299 | extern const u32 nfs4_statfs_bitmap[2]; | 299 | extern const u32 nfs4_statfs_bitmap[2]; |
| 300 | extern const u32 nfs4_pathconf_bitmap[2]; | 300 | extern const u32 nfs4_pathconf_bitmap[2]; |
| 301 | extern const u32 nfs4_fsinfo_bitmap[3]; | 301 | extern const u32 nfs4_fsinfo_bitmap[3]; |
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index d48dbefa0e71..15fc7e4664ed 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c | |||
| @@ -105,6 +105,8 @@ static int nfs4_map_errors(int err) | |||
| 105 | return -EINVAL; | 105 | return -EINVAL; |
| 106 | case -NFS4ERR_SHARE_DENIED: | 106 | case -NFS4ERR_SHARE_DENIED: |
| 107 | return -EACCES; | 107 | return -EACCES; |
| 108 | case -NFS4ERR_MINOR_VERS_MISMATCH: | ||
| 109 | return -EPROTONOSUPPORT; | ||
| 108 | default: | 110 | default: |
| 109 | dprintk("%s could not handle NFSv4 error %d\n", | 111 | dprintk("%s could not handle NFSv4 error %d\n", |
| 110 | __func__, -err); | 112 | __func__, -err); |
| @@ -116,7 +118,7 @@ static int nfs4_map_errors(int err) | |||
| 116 | /* | 118 | /* |
| 117 | * This is our standard bitmap for GETATTR requests. | 119 | * This is our standard bitmap for GETATTR requests. |
| 118 | */ | 120 | */ |
| 119 | const u32 nfs4_fattr_bitmap[2] = { | 121 | const u32 nfs4_fattr_bitmap[3] = { |
| 120 | FATTR4_WORD0_TYPE | 122 | FATTR4_WORD0_TYPE |
| 121 | | FATTR4_WORD0_CHANGE | 123 | | FATTR4_WORD0_CHANGE |
| 122 | | FATTR4_WORD0_SIZE | 124 | | FATTR4_WORD0_SIZE |
| @@ -133,6 +135,24 @@ const u32 nfs4_fattr_bitmap[2] = { | |||
| 133 | | FATTR4_WORD1_TIME_MODIFY | 135 | | FATTR4_WORD1_TIME_MODIFY |
| 134 | }; | 136 | }; |
| 135 | 137 | ||
| 138 | static const u32 nfs4_pnfs_open_bitmap[3] = { | ||
| 139 | FATTR4_WORD0_TYPE | ||
| 140 | | FATTR4_WORD0_CHANGE | ||
| 141 | | FATTR4_WORD0_SIZE | ||
| 142 | | FATTR4_WORD0_FSID | ||
| 143 | | FATTR4_WORD0_FILEID, | ||
| 144 | FATTR4_WORD1_MODE | ||
| 145 | | FATTR4_WORD1_NUMLINKS | ||
| 146 | | FATTR4_WORD1_OWNER | ||
| 147 | | FATTR4_WORD1_OWNER_GROUP | ||
| 148 | | FATTR4_WORD1_RAWDEV | ||
| 149 | | FATTR4_WORD1_SPACE_USED | ||
| 150 | | FATTR4_WORD1_TIME_ACCESS | ||
| 151 | | FATTR4_WORD1_TIME_METADATA | ||
| 152 | | FATTR4_WORD1_TIME_MODIFY, | ||
| 153 | FATTR4_WORD2_MDSTHRESHOLD | ||
| 154 | }; | ||
| 155 | |||
| 136 | const u32 nfs4_statfs_bitmap[2] = { | 156 | const u32 nfs4_statfs_bitmap[2] = { |
| 137 | FATTR4_WORD0_FILES_AVAIL | 157 | FATTR4_WORD0_FILES_AVAIL |
| 138 | | FATTR4_WORD0_FILES_FREE | 158 | | FATTR4_WORD0_FILES_FREE |
| @@ -844,6 +864,7 @@ static struct nfs4_opendata *nfs4_opendata_alloc(struct dentry *dentry, | |||
| 844 | p->o_arg.name = &dentry->d_name; | 864 | p->o_arg.name = &dentry->d_name; |
| 845 | p->o_arg.server = server; | 865 | p->o_arg.server = server; |
| 846 | p->o_arg.bitmask = server->attr_bitmask; | 866 | p->o_arg.bitmask = server->attr_bitmask; |
| 867 | p->o_arg.open_bitmap = &nfs4_fattr_bitmap[0]; | ||
| 847 | p->o_arg.claim = NFS4_OPEN_CLAIM_NULL; | 868 | p->o_arg.claim = NFS4_OPEN_CLAIM_NULL; |
| 848 | if (attrs != NULL && attrs->ia_valid != 0) { | 869 | if (attrs != NULL && attrs->ia_valid != 0) { |
| 849 | __be32 verf[2]; | 870 | __be32 verf[2]; |
| @@ -1820,6 +1841,7 @@ static int _nfs4_do_open(struct inode *dir, | |||
| 1820 | opendata->f_attr.mdsthreshold = pnfs_mdsthreshold_alloc(); | 1841 | opendata->f_attr.mdsthreshold = pnfs_mdsthreshold_alloc(); |
| 1821 | if (!opendata->f_attr.mdsthreshold) | 1842 | if (!opendata->f_attr.mdsthreshold) |
| 1822 | goto err_opendata_put; | 1843 | goto err_opendata_put; |
| 1844 | opendata->o_arg.open_bitmap = &nfs4_pnfs_open_bitmap[0]; | ||
| 1823 | } | 1845 | } |
| 1824 | if (dentry->d_inode != NULL) | 1846 | if (dentry->d_inode != NULL) |
| 1825 | opendata->state = nfs4_get_open_state(dentry->d_inode, sp); | 1847 | opendata->state = nfs4_get_open_state(dentry->d_inode, sp); |
| @@ -1880,6 +1902,7 @@ static struct nfs4_state *nfs4_do_open(struct inode *dir, | |||
| 1880 | struct nfs4_state *res; | 1902 | struct nfs4_state *res; |
| 1881 | int status; | 1903 | int status; |
| 1882 | 1904 | ||
| 1905 | fmode &= FMODE_READ|FMODE_WRITE; | ||
| 1883 | do { | 1906 | do { |
| 1884 | status = _nfs4_do_open(dir, dentry, fmode, flags, sattr, cred, | 1907 | status = _nfs4_do_open(dir, dentry, fmode, flags, sattr, cred, |
| 1885 | &res, ctx_th); | 1908 | &res, ctx_th); |
| @@ -2526,6 +2549,14 @@ nfs4_proc_setattr(struct dentry *dentry, struct nfs_fattr *fattr, | |||
| 2526 | 2549 | ||
| 2527 | nfs_fattr_init(fattr); | 2550 | nfs_fattr_init(fattr); |
| 2528 | 2551 | ||
| 2552 | /* Deal with open(O_TRUNC) */ | ||
| 2553 | if (sattr->ia_valid & ATTR_OPEN) | ||
| 2554 | sattr->ia_valid &= ~(ATTR_MTIME|ATTR_CTIME|ATTR_OPEN); | ||
| 2555 | |||
| 2556 | /* Optimization: if the end result is no change, don't RPC */ | ||
| 2557 | if ((sattr->ia_valid & ~(ATTR_FILE)) == 0) | ||
| 2558 | return 0; | ||
| 2559 | |||
| 2529 | /* Search for an existing open(O_WRITE) file */ | 2560 | /* Search for an existing open(O_WRITE) file */ |
| 2530 | if (sattr->ia_valid & ATTR_FILE) { | 2561 | if (sattr->ia_valid & ATTR_FILE) { |
| 2531 | struct nfs_open_context *ctx; | 2562 | struct nfs_open_context *ctx; |
| @@ -2537,10 +2568,6 @@ nfs4_proc_setattr(struct dentry *dentry, struct nfs_fattr *fattr, | |||
| 2537 | } | 2568 | } |
| 2538 | } | 2569 | } |
| 2539 | 2570 | ||
| 2540 | /* Deal with open(O_TRUNC) */ | ||
| 2541 | if (sattr->ia_valid & ATTR_OPEN) | ||
| 2542 | sattr->ia_valid &= ~(ATTR_MTIME|ATTR_CTIME|ATTR_OPEN); | ||
| 2543 | |||
| 2544 | status = nfs4_do_setattr(inode, cred, fattr, sattr, state); | 2571 | status = nfs4_do_setattr(inode, cred, fattr, sattr, state); |
| 2545 | if (status == 0) | 2572 | if (status == 0) |
| 2546 | nfs_setattr_update_inode(inode, sattr); | 2573 | nfs_setattr_update_inode(inode, sattr); |
| @@ -5275,7 +5302,7 @@ static int _nfs4_proc_destroy_clientid(struct nfs_client *clp, | |||
| 5275 | 5302 | ||
| 5276 | status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT); | 5303 | status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT); |
| 5277 | if (status) | 5304 | if (status) |
| 5278 | pr_warn("NFS: Got error %d from the server %s on " | 5305 | dprintk("NFS: Got error %d from the server %s on " |
| 5279 | "DESTROY_CLIENTID.", status, clp->cl_hostname); | 5306 | "DESTROY_CLIENTID.", status, clp->cl_hostname); |
| 5280 | return status; | 5307 | return status; |
| 5281 | } | 5308 | } |
| @@ -5746,8 +5773,7 @@ int nfs4_proc_destroy_session(struct nfs4_session *session, | |||
| 5746 | status = rpc_call_sync(session->clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT); | 5773 | status = rpc_call_sync(session->clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT); |
| 5747 | 5774 | ||
| 5748 | if (status) | 5775 | if (status) |
| 5749 | printk(KERN_WARNING | 5776 | dprintk("NFS: Got error %d from the server on DESTROY_SESSION. " |
| 5750 | "NFS: Got error %d from the server on DESTROY_SESSION. " | ||
| 5751 | "Session has been destroyed regardless...\n", status); | 5777 | "Session has been destroyed regardless...\n", status); |
| 5752 | 5778 | ||
| 5753 | dprintk("<-- nfs4_proc_destroy_session\n"); | 5779 | dprintk("<-- nfs4_proc_destroy_session\n"); |
diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c index c679b9ecef63..f38300e9f171 100644 --- a/fs/nfs/nfs4state.c +++ b/fs/nfs/nfs4state.c | |||
| @@ -244,6 +244,16 @@ static int nfs4_begin_drain_session(struct nfs_client *clp) | |||
| 244 | return nfs4_wait_on_slot_tbl(&ses->fc_slot_table); | 244 | return nfs4_wait_on_slot_tbl(&ses->fc_slot_table); |
| 245 | } | 245 | } |
| 246 | 246 | ||
| 247 | static void nfs41_finish_session_reset(struct nfs_client *clp) | ||
| 248 | { | ||
| 249 | clear_bit(NFS4CLNT_LEASE_CONFIRM, &clp->cl_state); | ||
| 250 | clear_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state); | ||
| 251 | /* create_session negotiated new slot table */ | ||
| 252 | clear_bit(NFS4CLNT_RECALL_SLOT, &clp->cl_state); | ||
| 253 | clear_bit(NFS4CLNT_BIND_CONN_TO_SESSION, &clp->cl_state); | ||
| 254 | nfs41_setup_state_renewal(clp); | ||
| 255 | } | ||
| 256 | |||
| 247 | int nfs41_init_clientid(struct nfs_client *clp, struct rpc_cred *cred) | 257 | int nfs41_init_clientid(struct nfs_client *clp, struct rpc_cred *cred) |
| 248 | { | 258 | { |
| 249 | int status; | 259 | int status; |
| @@ -259,8 +269,7 @@ do_confirm: | |||
| 259 | status = nfs4_proc_create_session(clp, cred); | 269 | status = nfs4_proc_create_session(clp, cred); |
| 260 | if (status != 0) | 270 | if (status != 0) |
| 261 | goto out; | 271 | goto out; |
| 262 | clear_bit(NFS4CLNT_LEASE_CONFIRM, &clp->cl_state); | 272 | nfs41_finish_session_reset(clp); |
| 263 | nfs41_setup_state_renewal(clp); | ||
| 264 | nfs_mark_client_ready(clp, NFS_CS_READY); | 273 | nfs_mark_client_ready(clp, NFS_CS_READY); |
| 265 | out: | 274 | out: |
| 266 | return status; | 275 | return status; |
| @@ -1772,16 +1781,9 @@ static int nfs4_reset_session(struct nfs_client *clp) | |||
| 1772 | status = nfs4_handle_reclaim_lease_error(clp, status); | 1781 | status = nfs4_handle_reclaim_lease_error(clp, status); |
| 1773 | goto out; | 1782 | goto out; |
| 1774 | } | 1783 | } |
| 1775 | clear_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state); | 1784 | nfs41_finish_session_reset(clp); |
| 1776 | /* create_session negotiated new slot table */ | ||
| 1777 | clear_bit(NFS4CLNT_RECALL_SLOT, &clp->cl_state); | ||
| 1778 | clear_bit(NFS4CLNT_BIND_CONN_TO_SESSION, &clp->cl_state); | ||
| 1779 | dprintk("%s: session reset was successful for server %s!\n", | 1785 | dprintk("%s: session reset was successful for server %s!\n", |
| 1780 | __func__, clp->cl_hostname); | 1786 | __func__, clp->cl_hostname); |
| 1781 | |||
| 1782 | /* Let the state manager reestablish state */ | ||
| 1783 | if (!test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state)) | ||
| 1784 | nfs41_setup_state_renewal(clp); | ||
| 1785 | out: | 1787 | out: |
| 1786 | if (cred) | 1788 | if (cred) |
| 1787 | put_rpccred(cred); | 1789 | put_rpccred(cred); |
diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c index ee4a74db95d0..18fae29b0301 100644 --- a/fs/nfs/nfs4xdr.c +++ b/fs/nfs/nfs4xdr.c | |||
| @@ -1198,12 +1198,13 @@ static void encode_getfattr(struct xdr_stream *xdr, const u32* bitmask, struct c | |||
| 1198 | } | 1198 | } |
| 1199 | 1199 | ||
| 1200 | static void encode_getfattr_open(struct xdr_stream *xdr, const u32 *bitmask, | 1200 | static void encode_getfattr_open(struct xdr_stream *xdr, const u32 *bitmask, |
| 1201 | const u32 *open_bitmap, | ||
| 1201 | struct compound_hdr *hdr) | 1202 | struct compound_hdr *hdr) |
| 1202 | { | 1203 | { |
| 1203 | encode_getattr_three(xdr, | 1204 | encode_getattr_three(xdr, |
| 1204 | bitmask[0] & nfs4_fattr_bitmap[0], | 1205 | bitmask[0] & open_bitmap[0], |
| 1205 | bitmask[1] & nfs4_fattr_bitmap[1], | 1206 | bitmask[1] & open_bitmap[1], |
| 1206 | bitmask[2] & FATTR4_WORD2_MDSTHRESHOLD, | 1207 | bitmask[2] & open_bitmap[2], |
| 1207 | hdr); | 1208 | hdr); |
| 1208 | } | 1209 | } |
| 1209 | 1210 | ||
| @@ -2221,7 +2222,7 @@ static void nfs4_xdr_enc_open(struct rpc_rqst *req, struct xdr_stream *xdr, | |||
| 2221 | encode_putfh(xdr, args->fh, &hdr); | 2222 | encode_putfh(xdr, args->fh, &hdr); |
| 2222 | encode_open(xdr, args, &hdr); | 2223 | encode_open(xdr, args, &hdr); |
| 2223 | encode_getfh(xdr, &hdr); | 2224 | encode_getfh(xdr, &hdr); |
| 2224 | encode_getfattr_open(xdr, args->bitmask, &hdr); | 2225 | encode_getfattr_open(xdr, args->bitmask, args->open_bitmap, &hdr); |
| 2225 | encode_nops(&hdr); | 2226 | encode_nops(&hdr); |
| 2226 | } | 2227 | } |
| 2227 | 2228 | ||
| @@ -4359,7 +4360,10 @@ static int decode_attr_mdsthreshold(struct xdr_stream *xdr, | |||
| 4359 | 4360 | ||
| 4360 | if (unlikely(bitmap[2] & (FATTR4_WORD2_MDSTHRESHOLD - 1U))) | 4361 | if (unlikely(bitmap[2] & (FATTR4_WORD2_MDSTHRESHOLD - 1U))) |
| 4361 | return -EIO; | 4362 | return -EIO; |
| 4362 | if (likely(bitmap[2] & FATTR4_WORD2_MDSTHRESHOLD)) { | 4363 | if (bitmap[2] & FATTR4_WORD2_MDSTHRESHOLD) { |
| 4364 | /* Did the server return an unrequested attribute? */ | ||
| 4365 | if (unlikely(res == NULL)) | ||
| 4366 | return -EREMOTEIO; | ||
| 4363 | p = xdr_inline_decode(xdr, 4); | 4367 | p = xdr_inline_decode(xdr, 4); |
| 4364 | if (unlikely(!p)) | 4368 | if (unlikely(!p)) |
| 4365 | goto out_overflow; | 4369 | goto out_overflow; |
| @@ -4372,6 +4376,7 @@ static int decode_attr_mdsthreshold(struct xdr_stream *xdr, | |||
| 4372 | __func__); | 4376 | __func__); |
| 4373 | 4377 | ||
| 4374 | status = decode_first_threshold_item4(xdr, res); | 4378 | status = decode_first_threshold_item4(xdr, res); |
| 4379 | bitmap[2] &= ~FATTR4_WORD2_MDSTHRESHOLD; | ||
| 4375 | } | 4380 | } |
| 4376 | return status; | 4381 | return status; |
| 4377 | out_overflow: | 4382 | out_overflow: |
diff --git a/fs/nfs/pnfs.h b/fs/nfs/pnfs.h index 29fd23c0efdc..64f90d845f6a 100644 --- a/fs/nfs/pnfs.h +++ b/fs/nfs/pnfs.h | |||
| @@ -365,7 +365,7 @@ static inline bool | |||
| 365 | pnfs_use_threshold(struct nfs4_threshold **dst, struct nfs4_threshold *src, | 365 | pnfs_use_threshold(struct nfs4_threshold **dst, struct nfs4_threshold *src, |
| 366 | struct nfs_server *nfss) | 366 | struct nfs_server *nfss) |
| 367 | { | 367 | { |
| 368 | return (dst && src && src->bm != 0 && | 368 | return (dst && src && src->bm != 0 && nfss->pnfs_curr_ld && |
| 369 | nfss->pnfs_curr_ld->id == src->l_type); | 369 | nfss->pnfs_curr_ld->id == src->l_type); |
| 370 | } | 370 | } |
| 371 | 371 | ||
diff --git a/fs/nfs/proc.c b/fs/nfs/proc.c index a706b6bcc286..617c7419a08e 100644 --- a/fs/nfs/proc.c +++ b/fs/nfs/proc.c | |||
| @@ -651,7 +651,7 @@ static int nfs_read_done(struct rpc_task *task, struct nfs_read_data *data) | |||
| 651 | /* Emulate the eof flag, which isn't normally needed in NFSv2 | 651 | /* Emulate the eof flag, which isn't normally needed in NFSv2 |
| 652 | * as it is guaranteed to always return the file attributes | 652 | * as it is guaranteed to always return the file attributes |
| 653 | */ | 653 | */ |
| 654 | if (data->args.offset + data->args.count >= data->res.fattr->size) | 654 | if (data->args.offset + data->res.count >= data->res.fattr->size) |
| 655 | data->res.eof = 1; | 655 | data->res.eof = 1; |
| 656 | } | 656 | } |
| 657 | return 0; | 657 | return 0; |
diff --git a/fs/nfs/super.c b/fs/nfs/super.c index ff656c022684..906f09c7d842 100644 --- a/fs/nfs/super.c +++ b/fs/nfs/super.c | |||
| @@ -1867,6 +1867,7 @@ static int nfs23_validate_mount_data(void *options, | |||
| 1867 | if (data == NULL) | 1867 | if (data == NULL) |
| 1868 | goto out_no_data; | 1868 | goto out_no_data; |
| 1869 | 1869 | ||
| 1870 | args->version = NFS_DEFAULT_VERSION; | ||
| 1870 | switch (data->version) { | 1871 | switch (data->version) { |
| 1871 | case 1: | 1872 | case 1: |
| 1872 | data->namlen = 0; | 1873 | data->namlen = 0; |
| @@ -2637,6 +2638,8 @@ static int nfs4_validate_mount_data(void *options, | |||
| 2637 | if (data == NULL) | 2638 | if (data == NULL) |
| 2638 | goto out_no_data; | 2639 | goto out_no_data; |
| 2639 | 2640 | ||
| 2641 | args->version = 4; | ||
| 2642 | |||
| 2640 | switch (data->version) { | 2643 | switch (data->version) { |
| 2641 | case 1: | 2644 | case 1: |
| 2642 | if (data->host_addrlen > sizeof(args->nfs_server.address)) | 2645 | if (data->host_addrlen > sizeof(args->nfs_server.address)) |
diff --git a/fs/nfs/write.c b/fs/nfs/write.c index e6fe3d69d14c..4d6861c0dc14 100644 --- a/fs/nfs/write.c +++ b/fs/nfs/write.c | |||
| @@ -80,6 +80,7 @@ struct nfs_write_header *nfs_writehdr_alloc(void) | |||
| 80 | INIT_LIST_HEAD(&hdr->rpc_list); | 80 | INIT_LIST_HEAD(&hdr->rpc_list); |
| 81 | spin_lock_init(&hdr->lock); | 81 | spin_lock_init(&hdr->lock); |
| 82 | atomic_set(&hdr->refcnt, 0); | 82 | atomic_set(&hdr->refcnt, 0); |
| 83 | hdr->verf = &p->verf; | ||
| 83 | } | 84 | } |
| 84 | return p; | 85 | return p; |
| 85 | } | 86 | } |
| @@ -619,6 +620,7 @@ static void nfs_write_completion(struct nfs_pgio_header *hdr) | |||
| 619 | goto next; | 620 | goto next; |
| 620 | } | 621 | } |
| 621 | if (test_bit(NFS_IOHDR_NEED_COMMIT, &hdr->flags)) { | 622 | if (test_bit(NFS_IOHDR_NEED_COMMIT, &hdr->flags)) { |
| 623 | memcpy(&req->wb_verf, hdr->verf, sizeof(req->wb_verf)); | ||
| 622 | nfs_mark_request_commit(req, hdr->lseg, &cinfo); | 624 | nfs_mark_request_commit(req, hdr->lseg, &cinfo); |
| 623 | goto next; | 625 | goto next; |
| 624 | } | 626 | } |
| @@ -1255,15 +1257,14 @@ static void nfs_writeback_release_common(void *calldata) | |||
| 1255 | struct nfs_write_data *data = calldata; | 1257 | struct nfs_write_data *data = calldata; |
| 1256 | struct nfs_pgio_header *hdr = data->header; | 1258 | struct nfs_pgio_header *hdr = data->header; |
| 1257 | int status = data->task.tk_status; | 1259 | int status = data->task.tk_status; |
| 1258 | struct nfs_page *req = hdr->req; | ||
| 1259 | 1260 | ||
| 1260 | if ((status >= 0) && nfs_write_need_commit(data)) { | 1261 | if ((status >= 0) && nfs_write_need_commit(data)) { |
| 1261 | spin_lock(&hdr->lock); | 1262 | spin_lock(&hdr->lock); |
| 1262 | if (test_bit(NFS_IOHDR_NEED_RESCHED, &hdr->flags)) | 1263 | if (test_bit(NFS_IOHDR_NEED_RESCHED, &hdr->flags)) |
| 1263 | ; /* Do nothing */ | 1264 | ; /* Do nothing */ |
| 1264 | else if (!test_and_set_bit(NFS_IOHDR_NEED_COMMIT, &hdr->flags)) | 1265 | else if (!test_and_set_bit(NFS_IOHDR_NEED_COMMIT, &hdr->flags)) |
| 1265 | memcpy(&req->wb_verf, &data->verf, sizeof(req->wb_verf)); | 1266 | memcpy(hdr->verf, &data->verf, sizeof(*hdr->verf)); |
| 1266 | else if (memcmp(&req->wb_verf, &data->verf, sizeof(req->wb_verf))) | 1267 | else if (memcmp(hdr->verf, &data->verf, sizeof(*hdr->verf))) |
| 1267 | set_bit(NFS_IOHDR_NEED_RESCHED, &hdr->flags); | 1268 | set_bit(NFS_IOHDR_NEED_RESCHED, &hdr->flags); |
| 1268 | spin_unlock(&hdr->lock); | 1269 | spin_unlock(&hdr->lock); |
| 1269 | } | 1270 | } |
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c index 8fdc9ec5c5d3..94effd5bc4a1 100644 --- a/fs/nfsd/nfs4state.c +++ b/fs/nfsd/nfs4state.c | |||
| @@ -900,7 +900,7 @@ static void free_session(struct kref *kref) | |||
| 900 | struct nfsd4_session *ses; | 900 | struct nfsd4_session *ses; |
| 901 | int mem; | 901 | int mem; |
| 902 | 902 | ||
| 903 | BUG_ON(!spin_is_locked(&client_lock)); | 903 | lockdep_assert_held(&client_lock); |
| 904 | ses = container_of(kref, struct nfsd4_session, se_ref); | 904 | ses = container_of(kref, struct nfsd4_session, se_ref); |
| 905 | nfsd4_del_conns(ses); | 905 | nfsd4_del_conns(ses); |
| 906 | spin_lock(&nfsd_drc_lock); | 906 | spin_lock(&nfsd_drc_lock); |
| @@ -1080,7 +1080,7 @@ static struct nfs4_client *alloc_client(struct xdr_netobj name) | |||
| 1080 | static inline void | 1080 | static inline void |
| 1081 | free_client(struct nfs4_client *clp) | 1081 | free_client(struct nfs4_client *clp) |
| 1082 | { | 1082 | { |
| 1083 | BUG_ON(!spin_is_locked(&client_lock)); | 1083 | lockdep_assert_held(&client_lock); |
| 1084 | while (!list_empty(&clp->cl_sessions)) { | 1084 | while (!list_empty(&clp->cl_sessions)) { |
| 1085 | struct nfsd4_session *ses; | 1085 | struct nfsd4_session *ses; |
| 1086 | ses = list_entry(clp->cl_sessions.next, struct nfsd4_session, | 1086 | ses = list_entry(clp->cl_sessions.next, struct nfsd4_session, |
diff --git a/fs/proc/base.c b/fs/proc/base.c index 616f41a7cde6..437195f204e1 100644 --- a/fs/proc/base.c +++ b/fs/proc/base.c | |||
| @@ -1803,7 +1803,7 @@ static int tid_fd_revalidate(struct dentry *dentry, struct nameidata *nd) | |||
| 1803 | rcu_read_lock(); | 1803 | rcu_read_lock(); |
| 1804 | file = fcheck_files(files, fd); | 1804 | file = fcheck_files(files, fd); |
| 1805 | if (file) { | 1805 | if (file) { |
| 1806 | unsigned i_mode, f_mode = file->f_mode; | 1806 | unsigned f_mode = file->f_mode; |
| 1807 | 1807 | ||
| 1808 | rcu_read_unlock(); | 1808 | rcu_read_unlock(); |
| 1809 | put_files_struct(files); | 1809 | put_files_struct(files); |
| @@ -1819,12 +1819,14 @@ static int tid_fd_revalidate(struct dentry *dentry, struct nameidata *nd) | |||
| 1819 | inode->i_gid = GLOBAL_ROOT_GID; | 1819 | inode->i_gid = GLOBAL_ROOT_GID; |
| 1820 | } | 1820 | } |
| 1821 | 1821 | ||
| 1822 | i_mode = S_IFLNK; | 1822 | if (S_ISLNK(inode->i_mode)) { |
| 1823 | if (f_mode & FMODE_READ) | 1823 | unsigned i_mode = S_IFLNK; |
| 1824 | i_mode |= S_IRUSR | S_IXUSR; | 1824 | if (f_mode & FMODE_READ) |
| 1825 | if (f_mode & FMODE_WRITE) | 1825 | i_mode |= S_IRUSR | S_IXUSR; |
| 1826 | i_mode |= S_IWUSR | S_IXUSR; | 1826 | if (f_mode & FMODE_WRITE) |
| 1827 | inode->i_mode = i_mode; | 1827 | i_mode |= S_IWUSR | S_IXUSR; |
| 1828 | inode->i_mode = i_mode; | ||
| 1829 | } | ||
| 1828 | 1830 | ||
| 1829 | security_task_to_inode(task, inode); | 1831 | security_task_to_inode(task, inode); |
| 1830 | put_task_struct(task); | 1832 | put_task_struct(task); |
| @@ -1859,6 +1861,7 @@ static struct dentry *proc_fd_instantiate(struct inode *dir, | |||
| 1859 | ei = PROC_I(inode); | 1861 | ei = PROC_I(inode); |
| 1860 | ei->fd = fd; | 1862 | ei->fd = fd; |
| 1861 | 1863 | ||
| 1864 | inode->i_mode = S_IFLNK; | ||
| 1862 | inode->i_op = &proc_pid_link_inode_operations; | 1865 | inode->i_op = &proc_pid_link_inode_operations; |
| 1863 | inode->i_size = 64; | 1866 | inode->i_size = 64; |
| 1864 | ei->op.proc_get_link = proc_fd_link; | 1867 | ei->op.proc_get_link = proc_fd_link; |
diff --git a/fs/ubifs/debug.c b/fs/ubifs/debug.c index 685a83756b2b..84a7e6f3c046 100644 --- a/fs/ubifs/debug.c +++ b/fs/ubifs/debug.c | |||
| @@ -2918,6 +2918,9 @@ int dbg_debugfs_init_fs(struct ubifs_info *c) | |||
| 2918 | struct dentry *dent; | 2918 | struct dentry *dent; |
| 2919 | struct ubifs_debug_info *d = c->dbg; | 2919 | struct ubifs_debug_info *d = c->dbg; |
| 2920 | 2920 | ||
| 2921 | if (!IS_ENABLED(DEBUG_FS)) | ||
| 2922 | return 0; | ||
| 2923 | |||
| 2921 | n = snprintf(d->dfs_dir_name, UBIFS_DFS_DIR_LEN + 1, UBIFS_DFS_DIR_NAME, | 2924 | n = snprintf(d->dfs_dir_name, UBIFS_DFS_DIR_LEN + 1, UBIFS_DFS_DIR_NAME, |
| 2922 | c->vi.ubi_num, c->vi.vol_id); | 2925 | c->vi.ubi_num, c->vi.vol_id); |
| 2923 | if (n == UBIFS_DFS_DIR_LEN) { | 2926 | if (n == UBIFS_DFS_DIR_LEN) { |
| @@ -3010,7 +3013,8 @@ out: | |||
| 3010 | */ | 3013 | */ |
| 3011 | void dbg_debugfs_exit_fs(struct ubifs_info *c) | 3014 | void dbg_debugfs_exit_fs(struct ubifs_info *c) |
| 3012 | { | 3015 | { |
| 3013 | debugfs_remove_recursive(c->dbg->dfs_dir); | 3016 | if (IS_ENABLED(DEBUG_FS)) |
| 3017 | debugfs_remove_recursive(c->dbg->dfs_dir); | ||
| 3014 | } | 3018 | } |
| 3015 | 3019 | ||
| 3016 | struct ubifs_global_debug_info ubifs_dbg; | 3020 | struct ubifs_global_debug_info ubifs_dbg; |
| @@ -3095,6 +3099,9 @@ int dbg_debugfs_init(void) | |||
| 3095 | const char *fname; | 3099 | const char *fname; |
| 3096 | struct dentry *dent; | 3100 | struct dentry *dent; |
| 3097 | 3101 | ||
| 3102 | if (!IS_ENABLED(DEBUG_FS)) | ||
| 3103 | return 0; | ||
| 3104 | |||
| 3098 | fname = "ubifs"; | 3105 | fname = "ubifs"; |
| 3099 | dent = debugfs_create_dir(fname, NULL); | 3106 | dent = debugfs_create_dir(fname, NULL); |
| 3100 | if (IS_ERR_OR_NULL(dent)) | 3107 | if (IS_ERR_OR_NULL(dent)) |
| @@ -3159,7 +3166,8 @@ out: | |||
| 3159 | */ | 3166 | */ |
| 3160 | void dbg_debugfs_exit(void) | 3167 | void dbg_debugfs_exit(void) |
| 3161 | { | 3168 | { |
| 3162 | debugfs_remove_recursive(dfs_rootdir); | 3169 | if (IS_ENABLED(DEBUG_FS)) |
| 3170 | debugfs_remove_recursive(dfs_rootdir); | ||
| 3163 | } | 3171 | } |
| 3164 | 3172 | ||
| 3165 | /** | 3173 | /** |
diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h index b0d62820ada1..9e6e1c6eb60a 100644 --- a/include/acpi/acpi_bus.h +++ b/include/acpi/acpi_bus.h | |||
| @@ -440,8 +440,8 @@ static inline int acpi_pm_device_sleep_wake(struct device *dev, bool enable) | |||
| 440 | 440 | ||
| 441 | #else /* CONFIG_ACPI */ | 441 | #else /* CONFIG_ACPI */ |
| 442 | 442 | ||
| 443 | static int register_acpi_bus_type(struct acpi_bus_type *bus) { return 0; } | 443 | static inline int register_acpi_bus_type(void *bus) { return 0; } |
| 444 | static int unregister_acpi_bus_type(struct acpi_bus_type *bus) { return 0; } | 444 | static inline int unregister_acpi_bus_type(void *bus) { return 0; } |
| 445 | 445 | ||
| 446 | #endif /* CONFIG_ACPI */ | 446 | #endif /* CONFIG_ACPI */ |
| 447 | 447 | ||
diff --git a/include/asm-generic/bug.h b/include/asm-generic/bug.h index 2520a6e241dc..9f02005f217a 100644 --- a/include/asm-generic/bug.h +++ b/include/asm-generic/bug.h | |||
| @@ -2,6 +2,7 @@ | |||
| 2 | #define _ASM_GENERIC_BUG_H | 2 | #define _ASM_GENERIC_BUG_H |
| 3 | 3 | ||
| 4 | #include <linux/compiler.h> | 4 | #include <linux/compiler.h> |
| 5 | #include <linux/kernel.h> | ||
| 5 | 6 | ||
| 6 | #ifdef CONFIG_BUG | 7 | #ifdef CONFIG_BUG |
| 7 | 8 | ||
diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h index 73e45600f95d..bac55c215113 100644 --- a/include/drm/drm_crtc.h +++ b/include/drm/drm_crtc.h | |||
| @@ -54,7 +54,7 @@ struct drm_mode_object { | |||
| 54 | struct drm_object_properties *properties; | 54 | struct drm_object_properties *properties; |
| 55 | }; | 55 | }; |
| 56 | 56 | ||
| 57 | #define DRM_OBJECT_MAX_PROPERTY 16 | 57 | #define DRM_OBJECT_MAX_PROPERTY 24 |
| 58 | struct drm_object_properties { | 58 | struct drm_object_properties { |
| 59 | int count; | 59 | int count; |
| 60 | uint32_t ids[DRM_OBJECT_MAX_PROPERTY]; | 60 | uint32_t ids[DRM_OBJECT_MAX_PROPERTY]; |
diff --git a/include/drm/drm_pciids.h b/include/drm/drm_pciids.h index 58d0bdab68dd..81368ab6c611 100644 --- a/include/drm/drm_pciids.h +++ b/include/drm/drm_pciids.h | |||
| @@ -181,6 +181,7 @@ | |||
| 181 | {0x1002, 0x6747, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \ | 181 | {0x1002, 0x6747, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \ |
| 182 | {0x1002, 0x6748, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \ | 182 | {0x1002, 0x6748, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \ |
| 183 | {0x1002, 0x6749, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \ | 183 | {0x1002, 0x6749, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \ |
| 184 | {0x1002, 0x674A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \ | ||
| 184 | {0x1002, 0x6750, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \ | 185 | {0x1002, 0x6750, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \ |
| 185 | {0x1002, 0x6751, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \ | 186 | {0x1002, 0x6751, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \ |
| 186 | {0x1002, 0x6758, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \ | 187 | {0x1002, 0x6758, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \ |
| @@ -198,6 +199,7 @@ | |||
| 198 | {0x1002, 0x6767, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \ | 199 | {0x1002, 0x6767, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \ |
| 199 | {0x1002, 0x6768, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \ | 200 | {0x1002, 0x6768, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \ |
| 200 | {0x1002, 0x6770, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \ | 201 | {0x1002, 0x6770, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \ |
| 202 | {0x1002, 0x6771, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \ | ||
| 201 | {0x1002, 0x6772, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \ | 203 | {0x1002, 0x6772, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \ |
| 202 | {0x1002, 0x6778, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \ | 204 | {0x1002, 0x6778, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \ |
| 203 | {0x1002, 0x6779, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \ | 205 | {0x1002, 0x6779, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \ |
| @@ -229,10 +231,11 @@ | |||
| 229 | {0x1002, 0x6827, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ | 231 | {0x1002, 0x6827, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ |
| 230 | {0x1002, 0x6828, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_NEW_MEMMAP}, \ | 232 | {0x1002, 0x6828, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_NEW_MEMMAP}, \ |
| 231 | {0x1002, 0x6829, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_NEW_MEMMAP}, \ | 233 | {0x1002, 0x6829, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_NEW_MEMMAP}, \ |
| 234 | {0x1002, 0x682B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ | ||
| 232 | {0x1002, 0x682D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ | 235 | {0x1002, 0x682D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ |
| 233 | {0x1002, 0x682F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ | 236 | {0x1002, 0x682F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ |
| 234 | {0x1002, 0x6830, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_NEW_MEMMAP}, \ | 237 | {0x1002, 0x6830, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ |
| 235 | {0x1002, 0x6831, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_NEW_MEMMAP}, \ | 238 | {0x1002, 0x6831, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ |
| 236 | {0x1002, 0x6837, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_NEW_MEMMAP}, \ | 239 | {0x1002, 0x6837, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_NEW_MEMMAP}, \ |
| 237 | {0x1002, 0x6838, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_NEW_MEMMAP}, \ | 240 | {0x1002, 0x6838, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_NEW_MEMMAP}, \ |
| 238 | {0x1002, 0x6839, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_NEW_MEMMAP}, \ | 241 | {0x1002, 0x6839, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_NEW_MEMMAP}, \ |
| @@ -531,6 +534,7 @@ | |||
| 531 | {0x1002, 0x9645, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO2|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ | 534 | {0x1002, 0x9645, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO2|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ |
| 532 | {0x1002, 0x9647, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP},\ | 535 | {0x1002, 0x9647, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP},\ |
| 533 | {0x1002, 0x9648, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP},\ | 536 | {0x1002, 0x9648, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP},\ |
| 537 | {0x1002, 0x9649, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP},\ | ||
| 534 | {0x1002, 0x964a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ | 538 | {0x1002, 0x964a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ |
| 535 | {0x1002, 0x964b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ | 539 | {0x1002, 0x964b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ |
| 536 | {0x1002, 0x964c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ | 540 | {0x1002, 0x964c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ |
| @@ -550,6 +554,7 @@ | |||
| 550 | {0x1002, 0x9807, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PALM|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ | 554 | {0x1002, 0x9807, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PALM|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ |
| 551 | {0x1002, 0x9808, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PALM|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ | 555 | {0x1002, 0x9808, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PALM|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ |
| 552 | {0x1002, 0x9809, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PALM|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ | 556 | {0x1002, 0x9809, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PALM|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ |
| 557 | {0x1002, 0x980A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PALM|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ | ||
| 553 | {0x1002, 0x9900, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ | 558 | {0x1002, 0x9900, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ |
| 554 | {0x1002, 0x9901, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ | 559 | {0x1002, 0x9901, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ |
| 555 | {0x1002, 0x9903, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ | 560 | {0x1002, 0x9903, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ |
| @@ -561,11 +566,19 @@ | |||
| 561 | {0x1002, 0x9909, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ | 566 | {0x1002, 0x9909, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ |
| 562 | {0x1002, 0x990A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ | 567 | {0x1002, 0x990A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ |
| 563 | {0x1002, 0x990F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ | 568 | {0x1002, 0x990F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ |
| 569 | {0x1002, 0x9910, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ | ||
| 570 | {0x1002, 0x9913, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ | ||
| 571 | {0x1002, 0x9917, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ | ||
| 572 | {0x1002, 0x9918, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ | ||
| 573 | {0x1002, 0x9919, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ | ||
| 564 | {0x1002, 0x9990, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ | 574 | {0x1002, 0x9990, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ |
| 565 | {0x1002, 0x9991, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ | 575 | {0x1002, 0x9991, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ |
| 566 | {0x1002, 0x9992, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ | 576 | {0x1002, 0x9992, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ |
| 567 | {0x1002, 0x9993, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ | 577 | {0x1002, 0x9993, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ |
| 568 | {0x1002, 0x9994, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ | 578 | {0x1002, 0x9994, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ |
| 579 | {0x1002, 0x99A0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ | ||
| 580 | {0x1002, 0x99A2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ | ||
| 581 | {0x1002, 0x99A4, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ | ||
| 569 | {0, 0, 0} | 582 | {0, 0, 0} |
| 570 | 583 | ||
| 571 | #define r128_PCI_IDS \ | 584 | #define r128_PCI_IDS \ |
diff --git a/include/drm/exynos_drm.h b/include/drm/exynos_drm.h index b6d7ce92eadd..68733587e700 100644 --- a/include/drm/exynos_drm.h +++ b/include/drm/exynos_drm.h | |||
| @@ -64,6 +64,7 @@ struct drm_exynos_gem_map_off { | |||
| 64 | * A structure for mapping buffer. | 64 | * A structure for mapping buffer. |
| 65 | * | 65 | * |
| 66 | * @handle: a handle to gem object created. | 66 | * @handle: a handle to gem object created. |
| 67 | * @pad: just padding to be 64-bit aligned. | ||
| 67 | * @size: memory size to be mapped. | 68 | * @size: memory size to be mapped. |
| 68 | * @mapped: having user virtual address mmaped. | 69 | * @mapped: having user virtual address mmaped. |
| 69 | * - this variable would be filled by exynos gem module | 70 | * - this variable would be filled by exynos gem module |
| @@ -72,7 +73,8 @@ struct drm_exynos_gem_map_off { | |||
| 72 | */ | 73 | */ |
| 73 | struct drm_exynos_gem_mmap { | 74 | struct drm_exynos_gem_mmap { |
| 74 | unsigned int handle; | 75 | unsigned int handle; |
| 75 | unsigned int size; | 76 | unsigned int pad; |
| 77 | uint64_t size; | ||
| 76 | uint64_t mapped; | 78 | uint64_t mapped; |
| 77 | }; | 79 | }; |
| 78 | 80 | ||
diff --git a/include/linux/clockchips.h b/include/linux/clockchips.h index 81e803e90aa4..acba894374a1 100644 --- a/include/linux/clockchips.h +++ b/include/linux/clockchips.h | |||
| @@ -132,6 +132,7 @@ extern u64 clockevent_delta2ns(unsigned long latch, | |||
| 132 | struct clock_event_device *evt); | 132 | struct clock_event_device *evt); |
| 133 | extern void clockevents_register_device(struct clock_event_device *dev); | 133 | extern void clockevents_register_device(struct clock_event_device *dev); |
| 134 | 134 | ||
| 135 | extern void clockevents_config(struct clock_event_device *dev, u32 freq); | ||
| 135 | extern void clockevents_config_and_register(struct clock_event_device *dev, | 136 | extern void clockevents_config_and_register(struct clock_event_device *dev, |
| 136 | u32 freq, unsigned long min_delta, | 137 | u32 freq, unsigned long min_delta, |
| 137 | unsigned long max_delta); | 138 | unsigned long max_delta); |
diff --git a/include/linux/compaction.h b/include/linux/compaction.h index e988037abd2a..51a90b7f2d60 100644 --- a/include/linux/compaction.h +++ b/include/linux/compaction.h | |||
| @@ -1,8 +1,6 @@ | |||
| 1 | #ifndef _LINUX_COMPACTION_H | 1 | #ifndef _LINUX_COMPACTION_H |
| 2 | #define _LINUX_COMPACTION_H | 2 | #define _LINUX_COMPACTION_H |
| 3 | 3 | ||
| 4 | #include <linux/node.h> | ||
| 5 | |||
| 6 | /* Return values for compact_zone() and try_to_compact_pages() */ | 4 | /* Return values for compact_zone() and try_to_compact_pages() */ |
| 7 | /* compaction didn't start as it was not possible or direct reclaim was more suitable */ | 5 | /* compaction didn't start as it was not possible or direct reclaim was more suitable */ |
| 8 | #define COMPACT_SKIPPED 0 | 6 | #define COMPACT_SKIPPED 0 |
| @@ -13,23 +11,6 @@ | |||
| 13 | /* The full zone was compacted */ | 11 | /* The full zone was compacted */ |
| 14 | #define COMPACT_COMPLETE 3 | 12 | #define COMPACT_COMPLETE 3 |
| 15 | 13 | ||
| 16 | /* | ||
| 17 | * compaction supports three modes | ||
| 18 | * | ||
| 19 | * COMPACT_ASYNC_MOVABLE uses asynchronous migration and only scans | ||
| 20 | * MIGRATE_MOVABLE pageblocks as migration sources and targets. | ||
| 21 | * COMPACT_ASYNC_UNMOVABLE uses asynchronous migration and only scans | ||
| 22 | * MIGRATE_MOVABLE pageblocks as migration sources. | ||
| 23 | * MIGRATE_UNMOVABLE pageblocks are scanned as potential migration | ||
| 24 | * targets and convers them to MIGRATE_MOVABLE if possible | ||
| 25 | * COMPACT_SYNC uses synchronous migration and scans all pageblocks | ||
| 26 | */ | ||
| 27 | enum compact_mode { | ||
| 28 | COMPACT_ASYNC_MOVABLE, | ||
| 29 | COMPACT_ASYNC_UNMOVABLE, | ||
| 30 | COMPACT_SYNC, | ||
| 31 | }; | ||
| 32 | |||
| 33 | #ifdef CONFIG_COMPACTION | 14 | #ifdef CONFIG_COMPACTION |
| 34 | extern int sysctl_compact_memory; | 15 | extern int sysctl_compact_memory; |
| 35 | extern int sysctl_compaction_handler(struct ctl_table *table, int write, | 16 | extern int sysctl_compaction_handler(struct ctl_table *table, int write, |
diff --git a/include/linux/frontswap.h b/include/linux/frontswap.h new file mode 100644 index 000000000000..0e4e2eec5c1d --- /dev/null +++ b/include/linux/frontswap.h | |||
| @@ -0,0 +1,127 @@ | |||
| 1 | #ifndef _LINUX_FRONTSWAP_H | ||
| 2 | #define _LINUX_FRONTSWAP_H | ||
| 3 | |||
| 4 | #include <linux/swap.h> | ||
| 5 | #include <linux/mm.h> | ||
| 6 | #include <linux/bitops.h> | ||
| 7 | |||
| 8 | struct frontswap_ops { | ||
| 9 | void (*init)(unsigned); | ||
| 10 | int (*store)(unsigned, pgoff_t, struct page *); | ||
| 11 | int (*load)(unsigned, pgoff_t, struct page *); | ||
| 12 | void (*invalidate_page)(unsigned, pgoff_t); | ||
| 13 | void (*invalidate_area)(unsigned); | ||
| 14 | }; | ||
| 15 | |||
| 16 | extern bool frontswap_enabled; | ||
| 17 | extern struct frontswap_ops | ||
| 18 | frontswap_register_ops(struct frontswap_ops *ops); | ||
| 19 | extern void frontswap_shrink(unsigned long); | ||
| 20 | extern unsigned long frontswap_curr_pages(void); | ||
| 21 | extern void frontswap_writethrough(bool); | ||
| 22 | |||
| 23 | extern void __frontswap_init(unsigned type); | ||
| 24 | extern int __frontswap_store(struct page *page); | ||
| 25 | extern int __frontswap_load(struct page *page); | ||
| 26 | extern void __frontswap_invalidate_page(unsigned, pgoff_t); | ||
| 27 | extern void __frontswap_invalidate_area(unsigned); | ||
| 28 | |||
| 29 | #ifdef CONFIG_FRONTSWAP | ||
| 30 | |||
| 31 | static inline bool frontswap_test(struct swap_info_struct *sis, pgoff_t offset) | ||
| 32 | { | ||
| 33 | bool ret = false; | ||
| 34 | |||
| 35 | if (frontswap_enabled && sis->frontswap_map) | ||
| 36 | ret = test_bit(offset, sis->frontswap_map); | ||
| 37 | return ret; | ||
| 38 | } | ||
| 39 | |||
| 40 | static inline void frontswap_set(struct swap_info_struct *sis, pgoff_t offset) | ||
| 41 | { | ||
| 42 | if (frontswap_enabled && sis->frontswap_map) | ||
| 43 | set_bit(offset, sis->frontswap_map); | ||
| 44 | } | ||
| 45 | |||
| 46 | static inline void frontswap_clear(struct swap_info_struct *sis, pgoff_t offset) | ||
| 47 | { | ||
| 48 | if (frontswap_enabled && sis->frontswap_map) | ||
| 49 | clear_bit(offset, sis->frontswap_map); | ||
| 50 | } | ||
| 51 | |||
| 52 | static inline void frontswap_map_set(struct swap_info_struct *p, | ||
| 53 | unsigned long *map) | ||
| 54 | { | ||
| 55 | p->frontswap_map = map; | ||
| 56 | } | ||
| 57 | |||
| 58 | static inline unsigned long *frontswap_map_get(struct swap_info_struct *p) | ||
| 59 | { | ||
| 60 | return p->frontswap_map; | ||
| 61 | } | ||
| 62 | #else | ||
| 63 | /* all inline routines become no-ops and all externs are ignored */ | ||
| 64 | |||
| 65 | #define frontswap_enabled (0) | ||
| 66 | |||
| 67 | static inline bool frontswap_test(struct swap_info_struct *sis, pgoff_t offset) | ||
| 68 | { | ||
| 69 | return false; | ||
| 70 | } | ||
| 71 | |||
| 72 | static inline void frontswap_set(struct swap_info_struct *sis, pgoff_t offset) | ||
| 73 | { | ||
| 74 | } | ||
| 75 | |||
| 76 | static inline void frontswap_clear(struct swap_info_struct *sis, pgoff_t offset) | ||
| 77 | { | ||
| 78 | } | ||
| 79 | |||
| 80 | static inline void frontswap_map_set(struct swap_info_struct *p, | ||
| 81 | unsigned long *map) | ||
| 82 | { | ||
| 83 | } | ||
| 84 | |||
| 85 | static inline unsigned long *frontswap_map_get(struct swap_info_struct *p) | ||
| 86 | { | ||
| 87 | return NULL; | ||
| 88 | } | ||
| 89 | #endif | ||
| 90 | |||
| 91 | static inline int frontswap_store(struct page *page) | ||
| 92 | { | ||
| 93 | int ret = -1; | ||
| 94 | |||
| 95 | if (frontswap_enabled) | ||
| 96 | ret = __frontswap_store(page); | ||
| 97 | return ret; | ||
| 98 | } | ||
| 99 | |||
| 100 | static inline int frontswap_load(struct page *page) | ||
| 101 | { | ||
| 102 | int ret = -1; | ||
| 103 | |||
| 104 | if (frontswap_enabled) | ||
| 105 | ret = __frontswap_load(page); | ||
| 106 | return ret; | ||
| 107 | } | ||
| 108 | |||
| 109 | static inline void frontswap_invalidate_page(unsigned type, pgoff_t offset) | ||
| 110 | { | ||
| 111 | if (frontswap_enabled) | ||
| 112 | __frontswap_invalidate_page(type, offset); | ||
| 113 | } | ||
| 114 | |||
| 115 | static inline void frontswap_invalidate_area(unsigned type) | ||
| 116 | { | ||
| 117 | if (frontswap_enabled) | ||
| 118 | __frontswap_invalidate_area(type); | ||
| 119 | } | ||
| 120 | |||
| 121 | static inline void frontswap_init(unsigned type) | ||
| 122 | { | ||
| 123 | if (frontswap_enabled) | ||
| 124 | __frontswap_init(type); | ||
| 125 | } | ||
| 126 | |||
| 127 | #endif /* _LINUX_FRONTSWAP_H */ | ||
diff --git a/include/linux/fs.h b/include/linux/fs.h index 51978ed43e97..17fd887c798f 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h | |||
| @@ -802,13 +802,14 @@ struct inode { | |||
| 802 | unsigned int __i_nlink; | 802 | unsigned int __i_nlink; |
| 803 | }; | 803 | }; |
| 804 | dev_t i_rdev; | 804 | dev_t i_rdev; |
| 805 | loff_t i_size; | ||
| 805 | struct timespec i_atime; | 806 | struct timespec i_atime; |
| 806 | struct timespec i_mtime; | 807 | struct timespec i_mtime; |
| 807 | struct timespec i_ctime; | 808 | struct timespec i_ctime; |
| 808 | spinlock_t i_lock; /* i_blocks, i_bytes, maybe i_size */ | 809 | spinlock_t i_lock; /* i_blocks, i_bytes, maybe i_size */ |
| 809 | unsigned short i_bytes; | 810 | unsigned short i_bytes; |
| 811 | unsigned int i_blkbits; | ||
| 810 | blkcnt_t i_blocks; | 812 | blkcnt_t i_blocks; |
| 811 | loff_t i_size; | ||
| 812 | 813 | ||
| 813 | #ifdef __NEED_I_SIZE_ORDERED | 814 | #ifdef __NEED_I_SIZE_ORDERED |
| 814 | seqcount_t i_size_seqcount; | 815 | seqcount_t i_size_seqcount; |
| @@ -828,9 +829,8 @@ struct inode { | |||
| 828 | struct list_head i_dentry; | 829 | struct list_head i_dentry; |
| 829 | struct rcu_head i_rcu; | 830 | struct rcu_head i_rcu; |
| 830 | }; | 831 | }; |
| 831 | atomic_t i_count; | ||
| 832 | unsigned int i_blkbits; | ||
| 833 | u64 i_version; | 832 | u64 i_version; |
| 833 | atomic_t i_count; | ||
| 834 | atomic_t i_dio_count; | 834 | atomic_t i_dio_count; |
| 835 | atomic_t i_writecount; | 835 | atomic_t i_writecount; |
| 836 | const struct file_operations *i_fop; /* former ->i_op->default_file_ops */ | 836 | const struct file_operations *i_fop; /* former ->i_op->default_file_ops */ |
diff --git a/include/linux/fuse.h b/include/linux/fuse.h index 8f2ab8fef929..9303348965fb 100644 --- a/include/linux/fuse.h +++ b/include/linux/fuse.h | |||
| @@ -54,6 +54,9 @@ | |||
| 54 | * 7.18 | 54 | * 7.18 |
| 55 | * - add FUSE_IOCTL_DIR flag | 55 | * - add FUSE_IOCTL_DIR flag |
| 56 | * - add FUSE_NOTIFY_DELETE | 56 | * - add FUSE_NOTIFY_DELETE |
| 57 | * | ||
| 58 | * 7.19 | ||
| 59 | * - add FUSE_FALLOCATE | ||
| 57 | */ | 60 | */ |
| 58 | 61 | ||
| 59 | #ifndef _LINUX_FUSE_H | 62 | #ifndef _LINUX_FUSE_H |
| @@ -85,7 +88,7 @@ | |||
| 85 | #define FUSE_KERNEL_VERSION 7 | 88 | #define FUSE_KERNEL_VERSION 7 |
| 86 | 89 | ||
| 87 | /** Minor version number of this interface */ | 90 | /** Minor version number of this interface */ |
| 88 | #define FUSE_KERNEL_MINOR_VERSION 18 | 91 | #define FUSE_KERNEL_MINOR_VERSION 19 |
| 89 | 92 | ||
| 90 | /** The node ID of the root inode */ | 93 | /** The node ID of the root inode */ |
| 91 | #define FUSE_ROOT_ID 1 | 94 | #define FUSE_ROOT_ID 1 |
| @@ -278,6 +281,7 @@ enum fuse_opcode { | |||
| 278 | FUSE_POLL = 40, | 281 | FUSE_POLL = 40, |
| 279 | FUSE_NOTIFY_REPLY = 41, | 282 | FUSE_NOTIFY_REPLY = 41, |
| 280 | FUSE_BATCH_FORGET = 42, | 283 | FUSE_BATCH_FORGET = 42, |
| 284 | FUSE_FALLOCATE = 43, | ||
| 281 | 285 | ||
| 282 | /* CUSE specific operations */ | 286 | /* CUSE specific operations */ |
| 283 | CUSE_INIT = 4096, | 287 | CUSE_INIT = 4096, |
| @@ -571,6 +575,14 @@ struct fuse_notify_poll_wakeup_out { | |||
| 571 | __u64 kh; | 575 | __u64 kh; |
| 572 | }; | 576 | }; |
| 573 | 577 | ||
| 578 | struct fuse_fallocate_in { | ||
| 579 | __u64 fh; | ||
| 580 | __u64 offset; | ||
| 581 | __u64 length; | ||
| 582 | __u32 mode; | ||
| 583 | __u32 padding; | ||
| 584 | }; | ||
| 585 | |||
| 574 | struct fuse_in_header { | 586 | struct fuse_in_header { |
| 575 | __u32 len; | 587 | __u32 len; |
| 576 | __u32 opcode; | 588 | __u32 opcode; |
diff --git a/include/linux/i2c-mux-pinctrl.h b/include/linux/i2c-mux-pinctrl.h new file mode 100644 index 000000000000..a65c86429e84 --- /dev/null +++ b/include/linux/i2c-mux-pinctrl.h | |||
| @@ -0,0 +1,41 @@ | |||
| 1 | /* | ||
| 2 | * i2c-mux-pinctrl platform data | ||
| 3 | * | ||
| 4 | * Copyright (c) 2012, NVIDIA CORPORATION. All rights reserved. | ||
| 5 | * | ||
| 6 | * This program is free software; you can redistribute it and/or modify it | ||
| 7 | * under the terms and conditions of the GNU General Public License, | ||
| 8 | * version 2, as published by the Free Software Foundation. | ||
| 9 | * | ||
| 10 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
| 11 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
| 12 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
| 13 | * more details. | ||
| 14 | * | ||
| 15 | * You should have received a copy of the GNU General Public License | ||
| 16 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
| 17 | */ | ||
| 18 | |||
| 19 | #ifndef _LINUX_I2C_MUX_PINCTRL_H | ||
| 20 | #define _LINUX_I2C_MUX_PINCTRL_H | ||
| 21 | |||
| 22 | /** | ||
| 23 | * struct i2c_mux_pinctrl_platform_data - Platform data for i2c-mux-pinctrl | ||
| 24 | * @parent_bus_num: Parent I2C bus number | ||
| 25 | * @base_bus_num: Base I2C bus number for the child busses. 0 for dynamic. | ||
| 26 | * @bus_count: Number of child busses. Also the number of elements in | ||
| 27 | * @pinctrl_states | ||
| 28 | * @pinctrl_states: The names of the pinctrl state to select for each child bus | ||
| 29 | * @pinctrl_state_idle: The pinctrl state to select when no child bus is being | ||
| 30 | * accessed. If NULL, the most recently used pinctrl state will be left | ||
| 31 | * selected. | ||
| 32 | */ | ||
| 33 | struct i2c_mux_pinctrl_platform_data { | ||
| 34 | int parent_bus_num; | ||
| 35 | int base_bus_num; | ||
| 36 | int bus_count; | ||
| 37 | const char **pinctrl_states; | ||
| 38 | const char *pinctrl_state_idle; | ||
| 39 | }; | ||
| 40 | |||
| 41 | #endif | ||
diff --git a/include/linux/init_task.h b/include/linux/init_task.h index e4baff5f7ff4..9e65eff6af3b 100644 --- a/include/linux/init_task.h +++ b/include/linux/init_task.h | |||
| @@ -149,6 +149,7 @@ extern struct cred init_cred; | |||
| 149 | .normal_prio = MAX_PRIO-20, \ | 149 | .normal_prio = MAX_PRIO-20, \ |
| 150 | .policy = SCHED_NORMAL, \ | 150 | .policy = SCHED_NORMAL, \ |
| 151 | .cpus_allowed = CPU_MASK_ALL, \ | 151 | .cpus_allowed = CPU_MASK_ALL, \ |
| 152 | .nr_cpus_allowed= NR_CPUS, \ | ||
| 152 | .mm = NULL, \ | 153 | .mm = NULL, \ |
| 153 | .active_mm = &init_mm, \ | 154 | .active_mm = &init_mm, \ |
| 154 | .se = { \ | 155 | .se = { \ |
| @@ -157,7 +158,6 @@ extern struct cred init_cred; | |||
| 157 | .rt = { \ | 158 | .rt = { \ |
| 158 | .run_list = LIST_HEAD_INIT(tsk.rt.run_list), \ | 159 | .run_list = LIST_HEAD_INIT(tsk.rt.run_list), \ |
| 159 | .time_slice = RR_TIMESLICE, \ | 160 | .time_slice = RR_TIMESLICE, \ |
| 160 | .nr_cpus_allowed = NR_CPUS, \ | ||
| 161 | }, \ | 161 | }, \ |
| 162 | .tasks = LIST_HEAD_INIT(tsk.tasks), \ | 162 | .tasks = LIST_HEAD_INIT(tsk.tasks), \ |
| 163 | INIT_PUSHABLE_TASKS(tsk) \ | 163 | INIT_PUSHABLE_TASKS(tsk) \ |
diff --git a/include/linux/moduleparam.h b/include/linux/moduleparam.h index 1b14d25162cb..d6a58065c09c 100644 --- a/include/linux/moduleparam.h +++ b/include/linux/moduleparam.h | |||
| @@ -128,7 +128,7 @@ struct kparam_array | |||
| 128 | * The ops can have NULL set or get functions. | 128 | * The ops can have NULL set or get functions. |
| 129 | */ | 129 | */ |
| 130 | #define module_param_cb(name, ops, arg, perm) \ | 130 | #define module_param_cb(name, ops, arg, perm) \ |
| 131 | __module_param_call(MODULE_PARAM_PREFIX, name, ops, arg, perm, 0) | 131 | __module_param_call(MODULE_PARAM_PREFIX, name, ops, arg, perm, -1) |
| 132 | 132 | ||
| 133 | /** | 133 | /** |
| 134 | * <level>_param_cb - general callback for a module/cmdline parameter | 134 | * <level>_param_cb - general callback for a module/cmdline parameter |
| @@ -192,7 +192,7 @@ struct kparam_array | |||
| 192 | { (void *)set, (void *)get }; \ | 192 | { (void *)set, (void *)get }; \ |
| 193 | __module_param_call(MODULE_PARAM_PREFIX, \ | 193 | __module_param_call(MODULE_PARAM_PREFIX, \ |
| 194 | name, &__param_ops_##name, arg, \ | 194 | name, &__param_ops_##name, arg, \ |
| 195 | (perm) + sizeof(__check_old_set_param(set))*0, 0) | 195 | (perm) + sizeof(__check_old_set_param(set))*0, -1) |
| 196 | 196 | ||
| 197 | /* We don't get oldget: it's often a new-style param_get_uint, etc. */ | 197 | /* We don't get oldget: it's often a new-style param_get_uint, etc. */ |
| 198 | static inline int | 198 | static inline int |
| @@ -272,7 +272,7 @@ static inline void __kernel_param_unlock(void) | |||
| 272 | */ | 272 | */ |
| 273 | #define core_param(name, var, type, perm) \ | 273 | #define core_param(name, var, type, perm) \ |
| 274 | param_check_##type(name, &(var)); \ | 274 | param_check_##type(name, &(var)); \ |
| 275 | __module_param_call("", name, ¶m_ops_##type, &var, perm, 0) | 275 | __module_param_call("", name, ¶m_ops_##type, &var, perm, -1) |
| 276 | #endif /* !MODULE */ | 276 | #endif /* !MODULE */ |
| 277 | 277 | ||
| 278 | /** | 278 | /** |
| @@ -290,7 +290,7 @@ static inline void __kernel_param_unlock(void) | |||
| 290 | = { len, string }; \ | 290 | = { len, string }; \ |
| 291 | __module_param_call(MODULE_PARAM_PREFIX, name, \ | 291 | __module_param_call(MODULE_PARAM_PREFIX, name, \ |
| 292 | ¶m_ops_string, \ | 292 | ¶m_ops_string, \ |
| 293 | .str = &__param_string_##name, perm, 0); \ | 293 | .str = &__param_string_##name, perm, -1); \ |
| 294 | __MODULE_PARM_TYPE(name, "string") | 294 | __MODULE_PARM_TYPE(name, "string") |
| 295 | 295 | ||
| 296 | /** | 296 | /** |
| @@ -432,7 +432,7 @@ extern int param_set_bint(const char *val, const struct kernel_param *kp); | |||
| 432 | __module_param_call(MODULE_PARAM_PREFIX, name, \ | 432 | __module_param_call(MODULE_PARAM_PREFIX, name, \ |
| 433 | ¶m_array_ops, \ | 433 | ¶m_array_ops, \ |
| 434 | .arr = &__param_arr_##name, \ | 434 | .arr = &__param_arr_##name, \ |
| 435 | perm, 0); \ | 435 | perm, -1); \ |
| 436 | __MODULE_PARM_TYPE(name, "array of " #type) | 436 | __MODULE_PARM_TYPE(name, "array of " #type) |
| 437 | 437 | ||
| 438 | extern struct kernel_param_ops param_array_ops; | 438 | extern struct kernel_param_ops param_array_ops; |
diff --git a/include/linux/netfilter/xt_HMARK.h b/include/linux/netfilter/xt_HMARK.h index abb1650940d2..826fc5807577 100644 --- a/include/linux/netfilter/xt_HMARK.h +++ b/include/linux/netfilter/xt_HMARK.h | |||
| @@ -27,7 +27,12 @@ union hmark_ports { | |||
| 27 | __u16 src; | 27 | __u16 src; |
| 28 | __u16 dst; | 28 | __u16 dst; |
| 29 | } p16; | 29 | } p16; |
| 30 | struct { | ||
| 31 | __be16 src; | ||
| 32 | __be16 dst; | ||
| 33 | } b16; | ||
| 30 | __u32 v32; | 34 | __u32 v32; |
| 35 | __be32 b32; | ||
| 31 | }; | 36 | }; |
| 32 | 37 | ||
| 33 | struct xt_hmark_info { | 38 | struct xt_hmark_info { |
diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h index d1a7bf51c326..8aadd90b808a 100644 --- a/include/linux/nfs_xdr.h +++ b/include/linux/nfs_xdr.h | |||
| @@ -348,6 +348,7 @@ struct nfs_openargs { | |||
| 348 | const struct qstr * name; | 348 | const struct qstr * name; |
| 349 | const struct nfs_server *server; /* Needed for ID mapping */ | 349 | const struct nfs_server *server; /* Needed for ID mapping */ |
| 350 | const u32 * bitmask; | 350 | const u32 * bitmask; |
| 351 | const u32 * open_bitmap; | ||
| 351 | __u32 claim; | 352 | __u32 claim; |
| 352 | struct nfs4_sequence_args seq_args; | 353 | struct nfs4_sequence_args seq_args; |
| 353 | }; | 354 | }; |
| @@ -1236,6 +1237,7 @@ struct nfs_pgio_header { | |||
| 1236 | struct list_head rpc_list; | 1237 | struct list_head rpc_list; |
| 1237 | atomic_t refcnt; | 1238 | atomic_t refcnt; |
| 1238 | struct nfs_page *req; | 1239 | struct nfs_page *req; |
| 1240 | struct nfs_writeverf *verf; | ||
| 1239 | struct pnfs_layout_segment *lseg; | 1241 | struct pnfs_layout_segment *lseg; |
| 1240 | loff_t io_start; | 1242 | loff_t io_start; |
| 1241 | const struct rpc_call_ops *mds_ops; | 1243 | const struct rpc_call_ops *mds_ops; |
| @@ -1273,6 +1275,7 @@ struct nfs_write_data { | |||
| 1273 | struct nfs_write_header { | 1275 | struct nfs_write_header { |
| 1274 | struct nfs_pgio_header header; | 1276 | struct nfs_pgio_header header; |
| 1275 | struct nfs_write_data rpc_data; | 1277 | struct nfs_write_data rpc_data; |
| 1278 | struct nfs_writeverf verf; | ||
| 1276 | }; | 1279 | }; |
| 1277 | 1280 | ||
| 1278 | struct nfs_mds_commit_info { | 1281 | struct nfs_mds_commit_info { |
diff --git a/include/linux/pci.h b/include/linux/pci.h index d8c379dba6ad..fefb4e19bf6a 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h | |||
| @@ -176,6 +176,8 @@ enum pci_dev_flags { | |||
| 176 | PCI_DEV_FLAGS_NO_D3 = (__force pci_dev_flags_t) 2, | 176 | PCI_DEV_FLAGS_NO_D3 = (__force pci_dev_flags_t) 2, |
| 177 | /* Provide indication device is assigned by a Virtual Machine Manager */ | 177 | /* Provide indication device is assigned by a Virtual Machine Manager */ |
| 178 | PCI_DEV_FLAGS_ASSIGNED = (__force pci_dev_flags_t) 4, | 178 | PCI_DEV_FLAGS_ASSIGNED = (__force pci_dev_flags_t) 4, |
| 179 | /* Device causes system crash if in D3 during S3 sleep */ | ||
| 180 | PCI_DEV_FLAGS_NO_D3_DURING_SLEEP = (__force pci_dev_flags_t) 8, | ||
| 179 | }; | 181 | }; |
| 180 | 182 | ||
| 181 | enum pci_irq_reroute_variant { | 183 | enum pci_irq_reroute_variant { |
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index f32578634d9d..45db49f64bb4 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h | |||
| @@ -555,6 +555,8 @@ enum perf_event_type { | |||
| 555 | PERF_RECORD_MAX, /* non-ABI */ | 555 | PERF_RECORD_MAX, /* non-ABI */ |
| 556 | }; | 556 | }; |
| 557 | 557 | ||
| 558 | #define PERF_MAX_STACK_DEPTH 127 | ||
| 559 | |||
| 558 | enum perf_callchain_context { | 560 | enum perf_callchain_context { |
| 559 | PERF_CONTEXT_HV = (__u64)-32, | 561 | PERF_CONTEXT_HV = (__u64)-32, |
| 560 | PERF_CONTEXT_KERNEL = (__u64)-128, | 562 | PERF_CONTEXT_KERNEL = (__u64)-128, |
| @@ -609,8 +611,6 @@ struct perf_guest_info_callbacks { | |||
| 609 | #include <linux/sysfs.h> | 611 | #include <linux/sysfs.h> |
| 610 | #include <asm/local.h> | 612 | #include <asm/local.h> |
| 611 | 613 | ||
| 612 | #define PERF_MAX_STACK_DEPTH 255 | ||
| 613 | |||
| 614 | struct perf_callchain_entry { | 614 | struct perf_callchain_entry { |
| 615 | __u64 nr; | 615 | __u64 nr; |
| 616 | __u64 ip[PERF_MAX_STACK_DEPTH]; | 616 | __u64 ip[PERF_MAX_STACK_DEPTH]; |
diff --git a/include/linux/prctl.h b/include/linux/prctl.h index 711e0a30aacc..3988012255dc 100644 --- a/include/linux/prctl.h +++ b/include/linux/prctl.h | |||
| @@ -127,8 +127,8 @@ | |||
| 127 | #define PR_SET_PTRACER 0x59616d61 | 127 | #define PR_SET_PTRACER 0x59616d61 |
| 128 | # define PR_SET_PTRACER_ANY ((unsigned long)-1) | 128 | # define PR_SET_PTRACER_ANY ((unsigned long)-1) |
| 129 | 129 | ||
| 130 | #define PR_SET_CHILD_SUBREAPER 36 | 130 | #define PR_SET_CHILD_SUBREAPER 36 |
| 131 | #define PR_GET_CHILD_SUBREAPER 37 | 131 | #define PR_GET_CHILD_SUBREAPER 37 |
| 132 | 132 | ||
| 133 | /* | 133 | /* |
| 134 | * If no_new_privs is set, then operations that grant new privileges (i.e. | 134 | * If no_new_privs is set, then operations that grant new privileges (i.e. |
| @@ -142,7 +142,9 @@ | |||
| 142 | * asking selinux for a specific new context (e.g. with runcon) will result | 142 | * asking selinux for a specific new context (e.g. with runcon) will result |
| 143 | * in execve returning -EPERM. | 143 | * in execve returning -EPERM. |
| 144 | */ | 144 | */ |
| 145 | #define PR_SET_NO_NEW_PRIVS 38 | 145 | #define PR_SET_NO_NEW_PRIVS 38 |
| 146 | #define PR_GET_NO_NEW_PRIVS 39 | 146 | #define PR_GET_NO_NEW_PRIVS 39 |
| 147 | |||
| 148 | #define PR_GET_TID_ADDRESS 40 | ||
| 147 | 149 | ||
| 148 | #endif /* _LINUX_PRCTL_H */ | 150 | #endif /* _LINUX_PRCTL_H */ |
diff --git a/include/linux/radix-tree.h b/include/linux/radix-tree.h index 0d04cd69ab9b..ffc444c38b0a 100644 --- a/include/linux/radix-tree.h +++ b/include/linux/radix-tree.h | |||
| @@ -368,8 +368,11 @@ radix_tree_next_slot(void **slot, struct radix_tree_iter *iter, unsigned flags) | |||
| 368 | iter->index++; | 368 | iter->index++; |
| 369 | if (likely(*slot)) | 369 | if (likely(*slot)) |
| 370 | return slot; | 370 | return slot; |
| 371 | if (flags & RADIX_TREE_ITER_CONTIG) | 371 | if (flags & RADIX_TREE_ITER_CONTIG) { |
| 372 | /* forbid switching to the next chunk */ | ||
| 373 | iter->next_index = 0; | ||
| 372 | break; | 374 | break; |
| 375 | } | ||
| 373 | } | 376 | } |
| 374 | } | 377 | } |
| 375 | return NULL; | 378 | return NULL; |
diff --git a/include/linux/rcutiny.h b/include/linux/rcutiny.h index adb5e5a38cae..854dc4c5c271 100644 --- a/include/linux/rcutiny.h +++ b/include/linux/rcutiny.h | |||
| @@ -87,8 +87,9 @@ static inline void kfree_call_rcu(struct rcu_head *head, | |||
| 87 | 87 | ||
| 88 | #ifdef CONFIG_TINY_RCU | 88 | #ifdef CONFIG_TINY_RCU |
| 89 | 89 | ||
| 90 | static inline int rcu_needs_cpu(int cpu) | 90 | static inline int rcu_needs_cpu(int cpu, unsigned long *delta_jiffies) |
| 91 | { | 91 | { |
| 92 | *delta_jiffies = ULONG_MAX; | ||
| 92 | return 0; | 93 | return 0; |
| 93 | } | 94 | } |
| 94 | 95 | ||
| @@ -96,8 +97,9 @@ static inline int rcu_needs_cpu(int cpu) | |||
| 96 | 97 | ||
| 97 | int rcu_preempt_needs_cpu(void); | 98 | int rcu_preempt_needs_cpu(void); |
| 98 | 99 | ||
| 99 | static inline int rcu_needs_cpu(int cpu) | 100 | static inline int rcu_needs_cpu(int cpu, unsigned long *delta_jiffies) |
| 100 | { | 101 | { |
| 102 | *delta_jiffies = ULONG_MAX; | ||
| 101 | return rcu_preempt_needs_cpu(); | 103 | return rcu_preempt_needs_cpu(); |
| 102 | } | 104 | } |
| 103 | 105 | ||
diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h index 3c6083cde4fc..952b79339304 100644 --- a/include/linux/rcutree.h +++ b/include/linux/rcutree.h | |||
| @@ -32,7 +32,7 @@ | |||
| 32 | 32 | ||
| 33 | extern void rcu_init(void); | 33 | extern void rcu_init(void); |
| 34 | extern void rcu_note_context_switch(int cpu); | 34 | extern void rcu_note_context_switch(int cpu); |
| 35 | extern int rcu_needs_cpu(int cpu); | 35 | extern int rcu_needs_cpu(int cpu, unsigned long *delta_jiffies); |
| 36 | extern void rcu_cpu_stall_reset(void); | 36 | extern void rcu_cpu_stall_reset(void); |
| 37 | 37 | ||
| 38 | /* | 38 | /* |
diff --git a/include/linux/sched.h b/include/linux/sched.h index f34437e835a7..4059c0f33f07 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
| @@ -145,6 +145,7 @@ extern unsigned long this_cpu_load(void); | |||
| 145 | 145 | ||
| 146 | 146 | ||
| 147 | extern void calc_global_load(unsigned long ticks); | 147 | extern void calc_global_load(unsigned long ticks); |
| 148 | extern void update_cpu_load_nohz(void); | ||
| 148 | 149 | ||
| 149 | extern unsigned long get_parent_ip(unsigned long addr); | 150 | extern unsigned long get_parent_ip(unsigned long addr); |
| 150 | 151 | ||
| @@ -438,6 +439,7 @@ extern int get_dumpable(struct mm_struct *mm); | |||
| 438 | /* leave room for more dump flags */ | 439 | /* leave room for more dump flags */ |
| 439 | #define MMF_VM_MERGEABLE 16 /* KSM may merge identical pages */ | 440 | #define MMF_VM_MERGEABLE 16 /* KSM may merge identical pages */ |
| 440 | #define MMF_VM_HUGEPAGE 17 /* set when VM_HUGEPAGE is set on vma */ | 441 | #define MMF_VM_HUGEPAGE 17 /* set when VM_HUGEPAGE is set on vma */ |
| 442 | #define MMF_EXE_FILE_CHANGED 18 /* see prctl_set_mm_exe_file() */ | ||
| 441 | 443 | ||
| 442 | #define MMF_INIT_MASK (MMF_DUMPABLE_MASK | MMF_DUMP_FILTER_MASK) | 444 | #define MMF_INIT_MASK (MMF_DUMPABLE_MASK | MMF_DUMP_FILTER_MASK) |
| 443 | 445 | ||
| @@ -875,6 +877,8 @@ struct sched_group_power { | |||
| 875 | * Number of busy cpus in this group. | 877 | * Number of busy cpus in this group. |
| 876 | */ | 878 | */ |
| 877 | atomic_t nr_busy_cpus; | 879 | atomic_t nr_busy_cpus; |
| 880 | |||
| 881 | unsigned long cpumask[0]; /* iteration mask */ | ||
| 878 | }; | 882 | }; |
| 879 | 883 | ||
| 880 | struct sched_group { | 884 | struct sched_group { |
| @@ -899,6 +903,15 @@ static inline struct cpumask *sched_group_cpus(struct sched_group *sg) | |||
| 899 | return to_cpumask(sg->cpumask); | 903 | return to_cpumask(sg->cpumask); |
| 900 | } | 904 | } |
| 901 | 905 | ||
| 906 | /* | ||
| 907 | * cpumask masking which cpus in the group are allowed to iterate up the domain | ||
| 908 | * tree. | ||
| 909 | */ | ||
| 910 | static inline struct cpumask *sched_group_mask(struct sched_group *sg) | ||
| 911 | { | ||
| 912 | return to_cpumask(sg->sgp->cpumask); | ||
| 913 | } | ||
| 914 | |||
| 902 | /** | 915 | /** |
| 903 | * group_first_cpu - Returns the first cpu in the cpumask of a sched_group. | 916 | * group_first_cpu - Returns the first cpu in the cpumask of a sched_group. |
| 904 | * @group: The group whose first cpu is to be returned. | 917 | * @group: The group whose first cpu is to be returned. |
| @@ -1187,7 +1200,6 @@ struct sched_rt_entity { | |||
| 1187 | struct list_head run_list; | 1200 | struct list_head run_list; |
| 1188 | unsigned long timeout; | 1201 | unsigned long timeout; |
| 1189 | unsigned int time_slice; | 1202 | unsigned int time_slice; |
| 1190 | int nr_cpus_allowed; | ||
| 1191 | 1203 | ||
| 1192 | struct sched_rt_entity *back; | 1204 | struct sched_rt_entity *back; |
| 1193 | #ifdef CONFIG_RT_GROUP_SCHED | 1205 | #ifdef CONFIG_RT_GROUP_SCHED |
| @@ -1252,6 +1264,7 @@ struct task_struct { | |||
| 1252 | #endif | 1264 | #endif |
| 1253 | 1265 | ||
| 1254 | unsigned int policy; | 1266 | unsigned int policy; |
| 1267 | int nr_cpus_allowed; | ||
| 1255 | cpumask_t cpus_allowed; | 1268 | cpumask_t cpus_allowed; |
| 1256 | 1269 | ||
| 1257 | #ifdef CONFIG_PREEMPT_RCU | 1270 | #ifdef CONFIG_PREEMPT_RCU |
diff --git a/include/linux/swap.h b/include/linux/swap.h index b6661933e252..c84ec68eaec9 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h | |||
| @@ -197,6 +197,10 @@ struct swap_info_struct { | |||
| 197 | struct block_device *bdev; /* swap device or bdev of swap file */ | 197 | struct block_device *bdev; /* swap device or bdev of swap file */ |
| 198 | struct file *swap_file; /* seldom referenced */ | 198 | struct file *swap_file; /* seldom referenced */ |
| 199 | unsigned int old_block_size; /* seldom referenced */ | 199 | unsigned int old_block_size; /* seldom referenced */ |
| 200 | #ifdef CONFIG_FRONTSWAP | ||
| 201 | unsigned long *frontswap_map; /* frontswap in-use, one bit per page */ | ||
| 202 | atomic_t frontswap_pages; /* frontswap pages in-use counter */ | ||
| 203 | #endif | ||
| 200 | }; | 204 | }; |
| 201 | 205 | ||
| 202 | struct swap_list_t { | 206 | struct swap_list_t { |
diff --git a/include/linux/swapfile.h b/include/linux/swapfile.h new file mode 100644 index 000000000000..e282624e8c10 --- /dev/null +++ b/include/linux/swapfile.h | |||
| @@ -0,0 +1,13 @@ | |||
| 1 | #ifndef _LINUX_SWAPFILE_H | ||
| 2 | #define _LINUX_SWAPFILE_H | ||
| 3 | |||
| 4 | /* | ||
| 5 | * these were static in swapfile.c but frontswap.c needs them and we don't | ||
| 6 | * want to expose them to the dozens of source files that include swap.h | ||
| 7 | */ | ||
| 8 | extern spinlock_t swap_lock; | ||
| 9 | extern struct swap_list_t swap_list; | ||
| 10 | extern struct swap_info_struct *swap_info[]; | ||
| 11 | extern int try_to_unuse(unsigned int, bool, unsigned long); | ||
| 12 | |||
| 13 | #endif /* _LINUX_SWAPFILE_H */ | ||
diff --git a/include/linux/swapops.h b/include/linux/swapops.h index 792d16d9cbc7..47ead515c811 100644 --- a/include/linux/swapops.h +++ b/include/linux/swapops.h | |||
| @@ -9,13 +9,15 @@ | |||
| 9 | * get good packing density in that tree, so the index should be dense in | 9 | * get good packing density in that tree, so the index should be dense in |
| 10 | * the low-order bits. | 10 | * the low-order bits. |
| 11 | * | 11 | * |
| 12 | * We arrange the `type' and `offset' fields so that `type' is at the five | 12 | * We arrange the `type' and `offset' fields so that `type' is at the seven |
| 13 | * high-order bits of the swp_entry_t and `offset' is right-aligned in the | 13 | * high-order bits of the swp_entry_t and `offset' is right-aligned in the |
| 14 | * remaining bits. | 14 | * remaining bits. Although `type' itself needs only five bits, we allow for |
| 15 | * shmem/tmpfs to shift it all up a further two bits: see swp_to_radix_entry(). | ||
| 15 | * | 16 | * |
| 16 | * swp_entry_t's are *never* stored anywhere in their arch-dependent format. | 17 | * swp_entry_t's are *never* stored anywhere in their arch-dependent format. |
| 17 | */ | 18 | */ |
| 18 | #define SWP_TYPE_SHIFT(e) (sizeof(e.val) * 8 - MAX_SWAPFILES_SHIFT) | 19 | #define SWP_TYPE_SHIFT(e) ((sizeof(e.val) * 8) - \ |
| 20 | (MAX_SWAPFILES_SHIFT + RADIX_TREE_EXCEPTIONAL_SHIFT)) | ||
| 19 | #define SWP_OFFSET_MASK(e) ((1UL << SWP_TYPE_SHIFT(e)) - 1) | 21 | #define SWP_OFFSET_MASK(e) ((1UL << SWP_TYPE_SHIFT(e)) - 1) |
| 20 | 22 | ||
| 21 | /* | 23 | /* |
diff --git a/include/linux/tcp.h b/include/linux/tcp.h index 4c5b63283377..5f359dbfcdce 100644 --- a/include/linux/tcp.h +++ b/include/linux/tcp.h | |||
| @@ -69,16 +69,16 @@ union tcp_word_hdr { | |||
| 69 | #define tcp_flag_word(tp) ( ((union tcp_word_hdr *)(tp))->words [3]) | 69 | #define tcp_flag_word(tp) ( ((union tcp_word_hdr *)(tp))->words [3]) |
| 70 | 70 | ||
| 71 | enum { | 71 | enum { |
| 72 | TCP_FLAG_CWR = __cpu_to_be32(0x00800000), | 72 | TCP_FLAG_CWR = __constant_cpu_to_be32(0x00800000), |
| 73 | TCP_FLAG_ECE = __cpu_to_be32(0x00400000), | 73 | TCP_FLAG_ECE = __constant_cpu_to_be32(0x00400000), |
| 74 | TCP_FLAG_URG = __cpu_to_be32(0x00200000), | 74 | TCP_FLAG_URG = __constant_cpu_to_be32(0x00200000), |
| 75 | TCP_FLAG_ACK = __cpu_to_be32(0x00100000), | 75 | TCP_FLAG_ACK = __constant_cpu_to_be32(0x00100000), |
| 76 | TCP_FLAG_PSH = __cpu_to_be32(0x00080000), | 76 | TCP_FLAG_PSH = __constant_cpu_to_be32(0x00080000), |
| 77 | TCP_FLAG_RST = __cpu_to_be32(0x00040000), | 77 | TCP_FLAG_RST = __constant_cpu_to_be32(0x00040000), |
| 78 | TCP_FLAG_SYN = __cpu_to_be32(0x00020000), | 78 | TCP_FLAG_SYN = __constant_cpu_to_be32(0x00020000), |
| 79 | TCP_FLAG_FIN = __cpu_to_be32(0x00010000), | 79 | TCP_FLAG_FIN = __constant_cpu_to_be32(0x00010000), |
| 80 | TCP_RESERVED_BITS = __cpu_to_be32(0x0F000000), | 80 | TCP_RESERVED_BITS = __constant_cpu_to_be32(0x0F000000), |
| 81 | TCP_DATA_OFFSET = __cpu_to_be32(0xF0000000) | 81 | TCP_DATA_OFFSET = __constant_cpu_to_be32(0xF0000000) |
| 82 | }; | 82 | }; |
| 83 | 83 | ||
| 84 | /* | 84 | /* |
diff --git a/include/linux/usb/hcd.h b/include/linux/usb/hcd.h index 7f855d50cdf5..49b3ac29726a 100644 --- a/include/linux/usb/hcd.h +++ b/include/linux/usb/hcd.h | |||
| @@ -126,8 +126,6 @@ struct usb_hcd { | |||
| 126 | unsigned wireless:1; /* Wireless USB HCD */ | 126 | unsigned wireless:1; /* Wireless USB HCD */ |
| 127 | unsigned authorized_default:1; | 127 | unsigned authorized_default:1; |
| 128 | unsigned has_tt:1; /* Integrated TT in root hub */ | 128 | unsigned has_tt:1; /* Integrated TT in root hub */ |
| 129 | unsigned broken_pci_sleep:1; /* Don't put the | ||
| 130 | controller in PCI-D3 for system sleep */ | ||
| 131 | 129 | ||
| 132 | unsigned int irq; /* irq allocated */ | 130 | unsigned int irq; /* irq allocated */ |
| 133 | void __iomem *regs; /* device memory/io */ | 131 | void __iomem *regs; /* device memory/io */ |
diff --git a/include/linux/vga_switcheroo.h b/include/linux/vga_switcheroo.h index b455c7c212eb..60da41fe9dc2 100644 --- a/include/linux/vga_switcheroo.h +++ b/include/linux/vga_switcheroo.h | |||
| @@ -12,6 +12,9 @@ | |||
| 12 | enum vga_switcheroo_state { | 12 | enum vga_switcheroo_state { |
| 13 | VGA_SWITCHEROO_OFF, | 13 | VGA_SWITCHEROO_OFF, |
| 14 | VGA_SWITCHEROO_ON, | 14 | VGA_SWITCHEROO_ON, |
| 15 | /* below are referred only from vga_switcheroo_get_client_state() */ | ||
| 16 | VGA_SWITCHEROO_INIT, | ||
| 17 | VGA_SWITCHEROO_NOT_FOUND, | ||
| 15 | }; | 18 | }; |
| 16 | 19 | ||
| 17 | enum vga_switcheroo_client_id { | 20 | enum vga_switcheroo_client_id { |
| @@ -50,6 +53,8 @@ void vga_switcheroo_unregister_handler(void); | |||
| 50 | 53 | ||
| 51 | int vga_switcheroo_process_delayed_switch(void); | 54 | int vga_switcheroo_process_delayed_switch(void); |
| 52 | 55 | ||
| 56 | int vga_switcheroo_get_client_state(struct pci_dev *dev); | ||
| 57 | |||
| 53 | #else | 58 | #else |
| 54 | 59 | ||
| 55 | static inline void vga_switcheroo_unregister_client(struct pci_dev *dev) {} | 60 | static inline void vga_switcheroo_unregister_client(struct pci_dev *dev) {} |
| @@ -62,5 +67,7 @@ static inline int vga_switcheroo_register_audio_client(struct pci_dev *pdev, | |||
| 62 | int id, bool active) { return 0; } | 67 | int id, bool active) { return 0; } |
| 63 | static inline void vga_switcheroo_unregister_handler(void) {} | 68 | static inline void vga_switcheroo_unregister_handler(void) {} |
| 64 | static inline int vga_switcheroo_process_delayed_switch(void) { return 0; } | 69 | static inline int vga_switcheroo_process_delayed_switch(void) { return 0; } |
| 70 | static inline int vga_switcheroo_get_client_state(struct pci_dev *dev) { return VGA_SWITCHEROO_ON; } | ||
| 71 | |||
| 65 | 72 | ||
| 66 | #endif | 73 | #endif |
diff --git a/include/net/inetpeer.h b/include/net/inetpeer.h index b94765e38e80..2040bff945d4 100644 --- a/include/net/inetpeer.h +++ b/include/net/inetpeer.h | |||
| @@ -40,7 +40,10 @@ struct inet_peer { | |||
| 40 | u32 pmtu_orig; | 40 | u32 pmtu_orig; |
| 41 | u32 pmtu_learned; | 41 | u32 pmtu_learned; |
| 42 | struct inetpeer_addr_base redirect_learned; | 42 | struct inetpeer_addr_base redirect_learned; |
| 43 | struct list_head gc_list; | 43 | union { |
| 44 | struct list_head gc_list; | ||
| 45 | struct rcu_head gc_rcu; | ||
| 46 | }; | ||
| 44 | /* | 47 | /* |
| 45 | * Once inet_peer is queued for deletion (refcnt == -1), following fields | 48 | * Once inet_peer is queued for deletion (refcnt == -1), following fields |
| 46 | * are not available: rid, ip_id_count, tcp_ts, tcp_ts_stamp | 49 | * are not available: rid, ip_id_count, tcp_ts, tcp_ts_stamp |
diff --git a/include/net/route.h b/include/net/route.h index ed2b78e2375d..98705468ac03 100644 --- a/include/net/route.h +++ b/include/net/route.h | |||
| @@ -130,9 +130,9 @@ static inline struct rtable *ip_route_output(struct net *net, __be32 daddr, | |||
| 130 | { | 130 | { |
| 131 | struct flowi4 fl4 = { | 131 | struct flowi4 fl4 = { |
| 132 | .flowi4_oif = oif, | 132 | .flowi4_oif = oif, |
| 133 | .flowi4_tos = tos, | ||
| 133 | .daddr = daddr, | 134 | .daddr = daddr, |
| 134 | .saddr = saddr, | 135 | .saddr = saddr, |
| 135 | .flowi4_tos = tos, | ||
| 136 | }; | 136 | }; |
| 137 | return ip_route_output_key(net, &fl4); | 137 | return ip_route_output_key(net, &fl4); |
| 138 | } | 138 | } |
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h index 55ce96b53b09..9d7d54a00e63 100644 --- a/include/net/sch_generic.h +++ b/include/net/sch_generic.h | |||
| @@ -220,13 +220,16 @@ struct tcf_proto { | |||
| 220 | 220 | ||
| 221 | struct qdisc_skb_cb { | 221 | struct qdisc_skb_cb { |
| 222 | unsigned int pkt_len; | 222 | unsigned int pkt_len; |
| 223 | unsigned char data[24]; | 223 | u16 bond_queue_mapping; |
| 224 | u16 _pad; | ||
| 225 | unsigned char data[20]; | ||
| 224 | }; | 226 | }; |
| 225 | 227 | ||
| 226 | static inline void qdisc_cb_private_validate(const struct sk_buff *skb, int sz) | 228 | static inline void qdisc_cb_private_validate(const struct sk_buff *skb, int sz) |
| 227 | { | 229 | { |
| 228 | struct qdisc_skb_cb *qcb; | 230 | struct qdisc_skb_cb *qcb; |
| 229 | BUILD_BUG_ON(sizeof(skb->cb) < sizeof(unsigned int) + sz); | 231 | |
| 232 | BUILD_BUG_ON(sizeof(skb->cb) < offsetof(struct qdisc_skb_cb, data) + sz); | ||
| 230 | BUILD_BUG_ON(sizeof(qcb->data) < sz); | 233 | BUILD_BUG_ON(sizeof(qcb->data) < sz); |
| 231 | } | 234 | } |
| 232 | 235 | ||
diff --git a/include/target/target_core_fabric.h b/include/target/target_core_fabric.h index 116959933f46..c78a23333c4f 100644 --- a/include/target/target_core_fabric.h +++ b/include/target/target_core_fabric.h | |||
| @@ -47,6 +47,7 @@ struct target_core_fabric_ops { | |||
| 47 | */ | 47 | */ |
| 48 | int (*check_stop_free)(struct se_cmd *); | 48 | int (*check_stop_free)(struct se_cmd *); |
| 49 | void (*release_cmd)(struct se_cmd *); | 49 | void (*release_cmd)(struct se_cmd *); |
| 50 | void (*put_session)(struct se_session *); | ||
| 50 | /* | 51 | /* |
| 51 | * Called with spin_lock_bh(struct se_portal_group->session_lock held. | 52 | * Called with spin_lock_bh(struct se_portal_group->session_lock held. |
| 52 | */ | 53 | */ |
diff --git a/include/trace/events/rcu.h b/include/trace/events/rcu.h index 1480900c511c..d274734b2aa4 100644 --- a/include/trace/events/rcu.h +++ b/include/trace/events/rcu.h | |||
| @@ -289,6 +289,7 @@ TRACE_EVENT(rcu_dyntick, | |||
| 289 | * "In holdoff": Nothing to do, holding off after unsuccessful attempt. | 289 | * "In holdoff": Nothing to do, holding off after unsuccessful attempt. |
| 290 | * "Begin holdoff": Attempt failed, don't retry until next jiffy. | 290 | * "Begin holdoff": Attempt failed, don't retry until next jiffy. |
| 291 | * "Dyntick with callbacks": Entering dyntick-idle despite callbacks. | 291 | * "Dyntick with callbacks": Entering dyntick-idle despite callbacks. |
| 292 | * "Dyntick with lazy callbacks": Entering dyntick-idle w/lazy callbacks. | ||
| 292 | * "More callbacks": Still more callbacks, try again to clear them out. | 293 | * "More callbacks": Still more callbacks, try again to clear them out. |
| 293 | * "Callbacks drained": All callbacks processed, off to dyntick idle! | 294 | * "Callbacks drained": All callbacks processed, off to dyntick idle! |
| 294 | * "Timer": Timer fired to cause CPU to continue processing callbacks. | 295 | * "Timer": Timer fired to cause CPU to continue processing callbacks. |
diff --git a/init/main.c b/init/main.c index 1ca6b32c4828..b5cc0a7c4708 100644 --- a/init/main.c +++ b/init/main.c | |||
| @@ -508,7 +508,7 @@ asmlinkage void __init start_kernel(void) | |||
| 508 | parse_early_param(); | 508 | parse_early_param(); |
| 509 | parse_args("Booting kernel", static_command_line, __start___param, | 509 | parse_args("Booting kernel", static_command_line, __start___param, |
| 510 | __stop___param - __start___param, | 510 | __stop___param - __start___param, |
| 511 | 0, 0, &unknown_bootoption); | 511 | -1, -1, &unknown_bootoption); |
| 512 | 512 | ||
| 513 | jump_label_init(); | 513 | jump_label_init(); |
| 514 | 514 | ||
| @@ -755,13 +755,8 @@ static void __init do_initcalls(void) | |||
| 755 | { | 755 | { |
| 756 | int level; | 756 | int level; |
| 757 | 757 | ||
| 758 | for (level = 0; level < ARRAY_SIZE(initcall_levels) - 1; level++) { | 758 | for (level = 0; level < ARRAY_SIZE(initcall_levels) - 1; level++) |
| 759 | pr_info("initlevel:%d=%s, %d registered initcalls\n", | ||
| 760 | level, initcall_level_names[level], | ||
| 761 | (int) (initcall_levels[level+1] | ||
| 762 | - initcall_levels[level])); | ||
| 763 | do_initcall_level(level); | 759 | do_initcall_level(level); |
| 764 | } | ||
| 765 | } | 760 | } |
| 766 | 761 | ||
| 767 | /* | 762 | /* |
| @@ -393,6 +393,16 @@ static int shm_fsync(struct file *file, loff_t start, loff_t end, int datasync) | |||
| 393 | return sfd->file->f_op->fsync(sfd->file, start, end, datasync); | 393 | return sfd->file->f_op->fsync(sfd->file, start, end, datasync); |
| 394 | } | 394 | } |
| 395 | 395 | ||
| 396 | static long shm_fallocate(struct file *file, int mode, loff_t offset, | ||
| 397 | loff_t len) | ||
| 398 | { | ||
| 399 | struct shm_file_data *sfd = shm_file_data(file); | ||
| 400 | |||
| 401 | if (!sfd->file->f_op->fallocate) | ||
| 402 | return -EOPNOTSUPP; | ||
| 403 | return sfd->file->f_op->fallocate(file, mode, offset, len); | ||
| 404 | } | ||
| 405 | |||
| 396 | static unsigned long shm_get_unmapped_area(struct file *file, | 406 | static unsigned long shm_get_unmapped_area(struct file *file, |
| 397 | unsigned long addr, unsigned long len, unsigned long pgoff, | 407 | unsigned long addr, unsigned long len, unsigned long pgoff, |
| 398 | unsigned long flags) | 408 | unsigned long flags) |
| @@ -410,6 +420,7 @@ static const struct file_operations shm_file_operations = { | |||
| 410 | .get_unmapped_area = shm_get_unmapped_area, | 420 | .get_unmapped_area = shm_get_unmapped_area, |
| 411 | #endif | 421 | #endif |
| 412 | .llseek = noop_llseek, | 422 | .llseek = noop_llseek, |
| 423 | .fallocate = shm_fallocate, | ||
| 413 | }; | 424 | }; |
| 414 | 425 | ||
| 415 | static const struct file_operations shm_file_operations_huge = { | 426 | static const struct file_operations shm_file_operations_huge = { |
| @@ -418,6 +429,7 @@ static const struct file_operations shm_file_operations_huge = { | |||
| 418 | .release = shm_release, | 429 | .release = shm_release, |
| 419 | .get_unmapped_area = shm_get_unmapped_area, | 430 | .get_unmapped_area = shm_get_unmapped_area, |
| 420 | .llseek = noop_llseek, | 431 | .llseek = noop_llseek, |
| 432 | .fallocate = shm_fallocate, | ||
| 421 | }; | 433 | }; |
| 422 | 434 | ||
| 423 | int is_file_shm_hugepages(struct file *file) | 435 | int is_file_shm_hugepages(struct file *file) |
diff --git a/kernel/cgroup.c b/kernel/cgroup.c index 0f3527d6184a..72fcd3069a90 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c | |||
| @@ -896,10 +896,13 @@ static void cgroup_diput(struct dentry *dentry, struct inode *inode) | |||
| 896 | mutex_unlock(&cgroup_mutex); | 896 | mutex_unlock(&cgroup_mutex); |
| 897 | 897 | ||
| 898 | /* | 898 | /* |
| 899 | * Drop the active superblock reference that we took when we | 899 | * We want to drop the active superblock reference from the |
| 900 | * created the cgroup | 900 | * cgroup creation after all the dentry refs are gone - |
| 901 | * kill_sb gets mighty unhappy otherwise. Mark | ||
| 902 | * dentry->d_fsdata with cgroup_diput() to tell | ||
| 903 | * cgroup_d_release() to call deactivate_super(). | ||
| 901 | */ | 904 | */ |
| 902 | deactivate_super(cgrp->root->sb); | 905 | dentry->d_fsdata = cgroup_diput; |
| 903 | 906 | ||
| 904 | /* | 907 | /* |
| 905 | * if we're getting rid of the cgroup, refcount should ensure | 908 | * if we're getting rid of the cgroup, refcount should ensure |
| @@ -925,6 +928,13 @@ static int cgroup_delete(const struct dentry *d) | |||
| 925 | return 1; | 928 | return 1; |
| 926 | } | 929 | } |
| 927 | 930 | ||
| 931 | static void cgroup_d_release(struct dentry *dentry) | ||
| 932 | { | ||
| 933 | /* did cgroup_diput() tell me to deactivate super? */ | ||
| 934 | if (dentry->d_fsdata == cgroup_diput) | ||
| 935 | deactivate_super(dentry->d_sb); | ||
| 936 | } | ||
| 937 | |||
| 928 | static void remove_dir(struct dentry *d) | 938 | static void remove_dir(struct dentry *d) |
| 929 | { | 939 | { |
| 930 | struct dentry *parent = dget(d->d_parent); | 940 | struct dentry *parent = dget(d->d_parent); |
| @@ -1532,6 +1542,7 @@ static int cgroup_get_rootdir(struct super_block *sb) | |||
| 1532 | static const struct dentry_operations cgroup_dops = { | 1542 | static const struct dentry_operations cgroup_dops = { |
| 1533 | .d_iput = cgroup_diput, | 1543 | .d_iput = cgroup_diput, |
| 1534 | .d_delete = cgroup_delete, | 1544 | .d_delete = cgroup_delete, |
| 1545 | .d_release = cgroup_d_release, | ||
| 1535 | }; | 1546 | }; |
| 1536 | 1547 | ||
| 1537 | struct inode *inode = | 1548 | struct inode *inode = |
diff --git a/kernel/events/core.c b/kernel/events/core.c index 5b06cbbf6931..f85c0154b333 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c | |||
| @@ -3181,7 +3181,6 @@ static void perf_event_for_each(struct perf_event *event, | |||
| 3181 | event = event->group_leader; | 3181 | event = event->group_leader; |
| 3182 | 3182 | ||
| 3183 | perf_event_for_each_child(event, func); | 3183 | perf_event_for_each_child(event, func); |
| 3184 | func(event); | ||
| 3185 | list_for_each_entry(sibling, &event->sibling_list, group_entry) | 3184 | list_for_each_entry(sibling, &event->sibling_list, group_entry) |
| 3186 | perf_event_for_each_child(sibling, func); | 3185 | perf_event_for_each_child(sibling, func); |
| 3187 | mutex_unlock(&ctx->mutex); | 3186 | mutex_unlock(&ctx->mutex); |
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c index fc275e4f629b..eebd6d5cfb44 100644 --- a/kernel/irq/chip.c +++ b/kernel/irq/chip.c | |||
| @@ -275,8 +275,10 @@ void handle_nested_irq(unsigned int irq) | |||
| 275 | kstat_incr_irqs_this_cpu(irq, desc); | 275 | kstat_incr_irqs_this_cpu(irq, desc); |
| 276 | 276 | ||
| 277 | action = desc->action; | 277 | action = desc->action; |
| 278 | if (unlikely(!action || irqd_irq_disabled(&desc->irq_data))) | 278 | if (unlikely(!action || irqd_irq_disabled(&desc->irq_data))) { |
| 279 | desc->istate |= IRQS_PENDING; | ||
| 279 | goto out_unlock; | 280 | goto out_unlock; |
| 281 | } | ||
| 280 | 282 | ||
| 281 | irqd_set(&desc->irq_data, IRQD_IRQ_INPROGRESS); | 283 | irqd_set(&desc->irq_data, IRQD_IRQ_INPROGRESS); |
| 282 | raw_spin_unlock_irq(&desc->lock); | 284 | raw_spin_unlock_irq(&desc->lock); |
| @@ -324,8 +326,10 @@ handle_simple_irq(unsigned int irq, struct irq_desc *desc) | |||
| 324 | desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); | 326 | desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); |
| 325 | kstat_incr_irqs_this_cpu(irq, desc); | 327 | kstat_incr_irqs_this_cpu(irq, desc); |
| 326 | 328 | ||
| 327 | if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) | 329 | if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) { |
| 330 | desc->istate |= IRQS_PENDING; | ||
| 328 | goto out_unlock; | 331 | goto out_unlock; |
| 332 | } | ||
| 329 | 333 | ||
| 330 | handle_irq_event(desc); | 334 | handle_irq_event(desc); |
| 331 | 335 | ||
diff --git a/kernel/irq/internals.h b/kernel/irq/internals.h index 8e5c56b3b7d9..001fa5bab490 100644 --- a/kernel/irq/internals.h +++ b/kernel/irq/internals.h | |||
| @@ -101,6 +101,9 @@ extern int irq_select_affinity_usr(unsigned int irq, struct cpumask *mask); | |||
| 101 | 101 | ||
| 102 | extern void irq_set_thread_affinity(struct irq_desc *desc); | 102 | extern void irq_set_thread_affinity(struct irq_desc *desc); |
| 103 | 103 | ||
| 104 | extern int irq_do_set_affinity(struct irq_data *data, | ||
| 105 | const struct cpumask *dest, bool force); | ||
| 106 | |||
| 104 | /* Inline functions for support of irq chips on slow busses */ | 107 | /* Inline functions for support of irq chips on slow busses */ |
| 105 | static inline void chip_bus_lock(struct irq_desc *desc) | 108 | static inline void chip_bus_lock(struct irq_desc *desc) |
| 106 | { | 109 | { |
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index ea0c6c2ae6f7..8c548232ba39 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c | |||
| @@ -142,6 +142,25 @@ static inline void | |||
| 142 | irq_get_pending(struct cpumask *mask, struct irq_desc *desc) { } | 142 | irq_get_pending(struct cpumask *mask, struct irq_desc *desc) { } |
| 143 | #endif | 143 | #endif |
| 144 | 144 | ||
| 145 | int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask, | ||
| 146 | bool force) | ||
| 147 | { | ||
| 148 | struct irq_desc *desc = irq_data_to_desc(data); | ||
| 149 | struct irq_chip *chip = irq_data_get_irq_chip(data); | ||
| 150 | int ret; | ||
| 151 | |||
| 152 | ret = chip->irq_set_affinity(data, mask, false); | ||
| 153 | switch (ret) { | ||
| 154 | case IRQ_SET_MASK_OK: | ||
| 155 | cpumask_copy(data->affinity, mask); | ||
| 156 | case IRQ_SET_MASK_OK_NOCOPY: | ||
| 157 | irq_set_thread_affinity(desc); | ||
| 158 | ret = 0; | ||
| 159 | } | ||
| 160 | |||
| 161 | return ret; | ||
| 162 | } | ||
| 163 | |||
| 145 | int __irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask) | 164 | int __irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask) |
| 146 | { | 165 | { |
| 147 | struct irq_chip *chip = irq_data_get_irq_chip(data); | 166 | struct irq_chip *chip = irq_data_get_irq_chip(data); |
| @@ -152,14 +171,7 @@ int __irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask) | |||
| 152 | return -EINVAL; | 171 | return -EINVAL; |
| 153 | 172 | ||
| 154 | if (irq_can_move_pcntxt(data)) { | 173 | if (irq_can_move_pcntxt(data)) { |
| 155 | ret = chip->irq_set_affinity(data, mask, false); | 174 | ret = irq_do_set_affinity(data, mask, false); |
| 156 | switch (ret) { | ||
| 157 | case IRQ_SET_MASK_OK: | ||
| 158 | cpumask_copy(data->affinity, mask); | ||
| 159 | case IRQ_SET_MASK_OK_NOCOPY: | ||
| 160 | irq_set_thread_affinity(desc); | ||
| 161 | ret = 0; | ||
| 162 | } | ||
| 163 | } else { | 175 | } else { |
| 164 | irqd_set_move_pending(data); | 176 | irqd_set_move_pending(data); |
| 165 | irq_copy_pending(desc, mask); | 177 | irq_copy_pending(desc, mask); |
| @@ -283,9 +295,8 @@ EXPORT_SYMBOL_GPL(irq_set_affinity_notifier); | |||
| 283 | static int | 295 | static int |
| 284 | setup_affinity(unsigned int irq, struct irq_desc *desc, struct cpumask *mask) | 296 | setup_affinity(unsigned int irq, struct irq_desc *desc, struct cpumask *mask) |
| 285 | { | 297 | { |
| 286 | struct irq_chip *chip = irq_desc_get_chip(desc); | ||
| 287 | struct cpumask *set = irq_default_affinity; | 298 | struct cpumask *set = irq_default_affinity; |
| 288 | int ret, node = desc->irq_data.node; | 299 | int node = desc->irq_data.node; |
| 289 | 300 | ||
| 290 | /* Excludes PER_CPU and NO_BALANCE interrupts */ | 301 | /* Excludes PER_CPU and NO_BALANCE interrupts */ |
| 291 | if (!irq_can_set_affinity(irq)) | 302 | if (!irq_can_set_affinity(irq)) |
| @@ -311,13 +322,7 @@ setup_affinity(unsigned int irq, struct irq_desc *desc, struct cpumask *mask) | |||
| 311 | if (cpumask_intersects(mask, nodemask)) | 322 | if (cpumask_intersects(mask, nodemask)) |
| 312 | cpumask_and(mask, mask, nodemask); | 323 | cpumask_and(mask, mask, nodemask); |
| 313 | } | 324 | } |
| 314 | ret = chip->irq_set_affinity(&desc->irq_data, mask, false); | 325 | irq_do_set_affinity(&desc->irq_data, mask, false); |
| 315 | switch (ret) { | ||
| 316 | case IRQ_SET_MASK_OK: | ||
| 317 | cpumask_copy(desc->irq_data.affinity, mask); | ||
| 318 | case IRQ_SET_MASK_OK_NOCOPY: | ||
| 319 | irq_set_thread_affinity(desc); | ||
| 320 | } | ||
| 321 | return 0; | 326 | return 0; |
| 322 | } | 327 | } |
| 323 | #else | 328 | #else |
diff --git a/kernel/irq/migration.c b/kernel/irq/migration.c index c3c89751b327..ca3f4aaff707 100644 --- a/kernel/irq/migration.c +++ b/kernel/irq/migration.c | |||
| @@ -42,17 +42,8 @@ void irq_move_masked_irq(struct irq_data *idata) | |||
| 42 | * For correct operation this depends on the caller | 42 | * For correct operation this depends on the caller |
| 43 | * masking the irqs. | 43 | * masking the irqs. |
| 44 | */ | 44 | */ |
| 45 | if (likely(cpumask_any_and(desc->pending_mask, cpu_online_mask) | 45 | if (cpumask_any_and(desc->pending_mask, cpu_online_mask) < nr_cpu_ids) |
| 46 | < nr_cpu_ids)) { | 46 | irq_do_set_affinity(&desc->irq_data, desc->pending_mask, false); |
| 47 | int ret = chip->irq_set_affinity(&desc->irq_data, | ||
| 48 | desc->pending_mask, false); | ||
| 49 | switch (ret) { | ||
| 50 | case IRQ_SET_MASK_OK: | ||
| 51 | cpumask_copy(desc->irq_data.affinity, desc->pending_mask); | ||
| 52 | case IRQ_SET_MASK_OK_NOCOPY: | ||
| 53 | irq_set_thread_affinity(desc); | ||
| 54 | } | ||
| 55 | } | ||
| 56 | 47 | ||
| 57 | cpumask_clear(desc->pending_mask); | 48 | cpumask_clear(desc->pending_mask); |
| 58 | } | 49 | } |
diff --git a/kernel/panic.c b/kernel/panic.c index 8ed89a175d79..d2a5f4ecc6dd 100644 --- a/kernel/panic.c +++ b/kernel/panic.c | |||
| @@ -27,7 +27,7 @@ | |||
| 27 | #define PANIC_TIMER_STEP 100 | 27 | #define PANIC_TIMER_STEP 100 |
| 28 | #define PANIC_BLINK_SPD 18 | 28 | #define PANIC_BLINK_SPD 18 |
| 29 | 29 | ||
| 30 | int panic_on_oops; | 30 | int panic_on_oops = CONFIG_PANIC_ON_OOPS_VALUE; |
| 31 | static unsigned long tainted_mask; | 31 | static unsigned long tainted_mask; |
| 32 | static int pause_on_oops; | 32 | static int pause_on_oops; |
| 33 | static int pause_on_oops_flag; | 33 | static int pause_on_oops_flag; |
| @@ -108,8 +108,6 @@ void panic(const char *fmt, ...) | |||
| 108 | */ | 108 | */ |
| 109 | crash_kexec(NULL); | 109 | crash_kexec(NULL); |
| 110 | 110 | ||
| 111 | kmsg_dump(KMSG_DUMP_PANIC); | ||
| 112 | |||
| 113 | /* | 111 | /* |
| 114 | * Note smp_send_stop is the usual smp shutdown function, which | 112 | * Note smp_send_stop is the usual smp shutdown function, which |
| 115 | * unfortunately means it may not be hardened to work in a panic | 113 | * unfortunately means it may not be hardened to work in a panic |
| @@ -117,6 +115,8 @@ void panic(const char *fmt, ...) | |||
| 117 | */ | 115 | */ |
| 118 | smp_send_stop(); | 116 | smp_send_stop(); |
| 119 | 117 | ||
| 118 | kmsg_dump(KMSG_DUMP_PANIC); | ||
| 119 | |||
| 120 | atomic_notifier_call_chain(&panic_notifier_list, 0, buf); | 120 | atomic_notifier_call_chain(&panic_notifier_list, 0, buf); |
| 121 | 121 | ||
| 122 | bust_spinlocks(0); | 122 | bust_spinlocks(0); |
diff --git a/kernel/rcutree.c b/kernel/rcutree.c index 0da7b88d92d0..3b0f1337f75b 100644 --- a/kernel/rcutree.c +++ b/kernel/rcutree.c | |||
| @@ -1397,6 +1397,8 @@ static void rcu_adopt_orphan_cbs(struct rcu_state *rsp) | |||
| 1397 | rdp->qlen_lazy += rsp->qlen_lazy; | 1397 | rdp->qlen_lazy += rsp->qlen_lazy; |
| 1398 | rdp->qlen += rsp->qlen; | 1398 | rdp->qlen += rsp->qlen; |
| 1399 | rdp->n_cbs_adopted += rsp->qlen; | 1399 | rdp->n_cbs_adopted += rsp->qlen; |
| 1400 | if (rsp->qlen_lazy != rsp->qlen) | ||
| 1401 | rcu_idle_count_callbacks_posted(); | ||
| 1400 | rsp->qlen_lazy = 0; | 1402 | rsp->qlen_lazy = 0; |
| 1401 | rsp->qlen = 0; | 1403 | rsp->qlen = 0; |
| 1402 | 1404 | ||
diff --git a/kernel/rcutree.h b/kernel/rcutree.h index 7f5d138dedf5..ea056495783e 100644 --- a/kernel/rcutree.h +++ b/kernel/rcutree.h | |||
| @@ -84,6 +84,20 @@ struct rcu_dynticks { | |||
| 84 | /* Process level is worth LLONG_MAX/2. */ | 84 | /* Process level is worth LLONG_MAX/2. */ |
| 85 | int dynticks_nmi_nesting; /* Track NMI nesting level. */ | 85 | int dynticks_nmi_nesting; /* Track NMI nesting level. */ |
| 86 | atomic_t dynticks; /* Even value for idle, else odd. */ | 86 | atomic_t dynticks; /* Even value for idle, else odd. */ |
| 87 | #ifdef CONFIG_RCU_FAST_NO_HZ | ||
| 88 | int dyntick_drain; /* Prepare-for-idle state variable. */ | ||
| 89 | unsigned long dyntick_holdoff; | ||
| 90 | /* No retries for the jiffy of failure. */ | ||
| 91 | struct timer_list idle_gp_timer; | ||
| 92 | /* Wake up CPU sleeping with callbacks. */ | ||
| 93 | unsigned long idle_gp_timer_expires; | ||
| 94 | /* When to wake up CPU (for repost). */ | ||
| 95 | bool idle_first_pass; /* First pass of attempt to go idle? */ | ||
| 96 | unsigned long nonlazy_posted; | ||
| 97 | /* # times non-lazy CBs posted to CPU. */ | ||
| 98 | unsigned long nonlazy_posted_snap; | ||
| 99 | /* idle-period nonlazy_posted snapshot. */ | ||
| 100 | #endif /* #ifdef CONFIG_RCU_FAST_NO_HZ */ | ||
| 87 | }; | 101 | }; |
| 88 | 102 | ||
| 89 | /* RCU's kthread states for tracing. */ | 103 | /* RCU's kthread states for tracing. */ |
diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h index 2411000d9869..5271a020887e 100644 --- a/kernel/rcutree_plugin.h +++ b/kernel/rcutree_plugin.h | |||
| @@ -1886,8 +1886,9 @@ static void __cpuinit rcu_prepare_kthreads(int cpu) | |||
| 1886 | * Because we not have RCU_FAST_NO_HZ, just check whether this CPU needs | 1886 | * Because we not have RCU_FAST_NO_HZ, just check whether this CPU needs |
| 1887 | * any flavor of RCU. | 1887 | * any flavor of RCU. |
| 1888 | */ | 1888 | */ |
| 1889 | int rcu_needs_cpu(int cpu) | 1889 | int rcu_needs_cpu(int cpu, unsigned long *delta_jiffies) |
| 1890 | { | 1890 | { |
| 1891 | *delta_jiffies = ULONG_MAX; | ||
| 1891 | return rcu_cpu_has_callbacks(cpu); | 1892 | return rcu_cpu_has_callbacks(cpu); |
| 1892 | } | 1893 | } |
| 1893 | 1894 | ||
| @@ -1962,41 +1963,6 @@ static void rcu_idle_count_callbacks_posted(void) | |||
| 1962 | #define RCU_IDLE_GP_DELAY 6 /* Roughly one grace period. */ | 1963 | #define RCU_IDLE_GP_DELAY 6 /* Roughly one grace period. */ |
| 1963 | #define RCU_IDLE_LAZY_GP_DELAY (6 * HZ) /* Roughly six seconds. */ | 1964 | #define RCU_IDLE_LAZY_GP_DELAY (6 * HZ) /* Roughly six seconds. */ |
| 1964 | 1965 | ||
| 1965 | /* Loop counter for rcu_prepare_for_idle(). */ | ||
| 1966 | static DEFINE_PER_CPU(int, rcu_dyntick_drain); | ||
| 1967 | /* If rcu_dyntick_holdoff==jiffies, don't try to enter dyntick-idle mode. */ | ||
| 1968 | static DEFINE_PER_CPU(unsigned long, rcu_dyntick_holdoff); | ||
| 1969 | /* Timer to awaken the CPU if it enters dyntick-idle mode with callbacks. */ | ||
| 1970 | static DEFINE_PER_CPU(struct timer_list, rcu_idle_gp_timer); | ||
| 1971 | /* Scheduled expiry time for rcu_idle_gp_timer to allow reposting. */ | ||
| 1972 | static DEFINE_PER_CPU(unsigned long, rcu_idle_gp_timer_expires); | ||
| 1973 | /* Enable special processing on first attempt to enter dyntick-idle mode. */ | ||
| 1974 | static DEFINE_PER_CPU(bool, rcu_idle_first_pass); | ||
| 1975 | /* Running count of non-lazy callbacks posted, never decremented. */ | ||
| 1976 | static DEFINE_PER_CPU(unsigned long, rcu_nonlazy_posted); | ||
| 1977 | /* Snapshot of rcu_nonlazy_posted to detect meaningful exits from idle. */ | ||
| 1978 | static DEFINE_PER_CPU(unsigned long, rcu_nonlazy_posted_snap); | ||
| 1979 | |||
| 1980 | /* | ||
| 1981 | * Allow the CPU to enter dyntick-idle mode if either: (1) There are no | ||
| 1982 | * callbacks on this CPU, (2) this CPU has not yet attempted to enter | ||
| 1983 | * dyntick-idle mode, or (3) this CPU is in the process of attempting to | ||
| 1984 | * enter dyntick-idle mode. Otherwise, if we have recently tried and failed | ||
| 1985 | * to enter dyntick-idle mode, we refuse to try to enter it. After all, | ||
| 1986 | * it is better to incur scheduling-clock interrupts than to spin | ||
| 1987 | * continuously for the same time duration! | ||
| 1988 | */ | ||
| 1989 | int rcu_needs_cpu(int cpu) | ||
| 1990 | { | ||
| 1991 | /* Flag a new idle sojourn to the idle-entry state machine. */ | ||
| 1992 | per_cpu(rcu_idle_first_pass, cpu) = 1; | ||
| 1993 | /* If no callbacks, RCU doesn't need the CPU. */ | ||
| 1994 | if (!rcu_cpu_has_callbacks(cpu)) | ||
| 1995 | return 0; | ||
| 1996 | /* Otherwise, RCU needs the CPU only if it recently tried and failed. */ | ||
| 1997 | return per_cpu(rcu_dyntick_holdoff, cpu) == jiffies; | ||
| 1998 | } | ||
| 1999 | |||
| 2000 | /* | 1966 | /* |
| 2001 | * Does the specified flavor of RCU have non-lazy callbacks pending on | 1967 | * Does the specified flavor of RCU have non-lazy callbacks pending on |
| 2002 | * the specified CPU? Both RCU flavor and CPU are specified by the | 1968 | * the specified CPU? Both RCU flavor and CPU are specified by the |
| @@ -2040,6 +2006,47 @@ static bool rcu_cpu_has_nonlazy_callbacks(int cpu) | |||
| 2040 | } | 2006 | } |
| 2041 | 2007 | ||
| 2042 | /* | 2008 | /* |
| 2009 | * Allow the CPU to enter dyntick-idle mode if either: (1) There are no | ||
| 2010 | * callbacks on this CPU, (2) this CPU has not yet attempted to enter | ||
| 2011 | * dyntick-idle mode, or (3) this CPU is in the process of attempting to | ||
| 2012 | * enter dyntick-idle mode. Otherwise, if we have recently tried and failed | ||
| 2013 | * to enter dyntick-idle mode, we refuse to try to enter it. After all, | ||
| 2014 | * it is better to incur scheduling-clock interrupts than to spin | ||
| 2015 | * continuously for the same time duration! | ||
| 2016 | * | ||
| 2017 | * The delta_jiffies argument is used to store the time when RCU is | ||
| 2018 | * going to need the CPU again if it still has callbacks. The reason | ||
| 2019 | * for this is that rcu_prepare_for_idle() might need to post a timer, | ||
| 2020 | * but if so, it will do so after tick_nohz_stop_sched_tick() has set | ||
| 2021 | * the wakeup time for this CPU. This means that RCU's timer can be | ||
| 2022 | * delayed until the wakeup time, which defeats the purpose of posting | ||
| 2023 | * a timer. | ||
| 2024 | */ | ||
| 2025 | int rcu_needs_cpu(int cpu, unsigned long *delta_jiffies) | ||
| 2026 | { | ||
| 2027 | struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu); | ||
| 2028 | |||
| 2029 | /* Flag a new idle sojourn to the idle-entry state machine. */ | ||
| 2030 | rdtp->idle_first_pass = 1; | ||
| 2031 | /* If no callbacks, RCU doesn't need the CPU. */ | ||
| 2032 | if (!rcu_cpu_has_callbacks(cpu)) { | ||
| 2033 | *delta_jiffies = ULONG_MAX; | ||
| 2034 | return 0; | ||
| 2035 | } | ||
| 2036 | if (rdtp->dyntick_holdoff == jiffies) { | ||
| 2037 | /* RCU recently tried and failed, so don't try again. */ | ||
| 2038 | *delta_jiffies = 1; | ||
| 2039 | return 1; | ||
| 2040 | } | ||
| 2041 | /* Set up for the possibility that RCU will post a timer. */ | ||
| 2042 | if (rcu_cpu_has_nonlazy_callbacks(cpu)) | ||
| 2043 | *delta_jiffies = RCU_IDLE_GP_DELAY; | ||
| 2044 | else | ||
| 2045 | *delta_jiffies = RCU_IDLE_LAZY_GP_DELAY; | ||
| 2046 | return 0; | ||
| 2047 | } | ||
| 2048 | |||
| 2049 | /* | ||
| 2043 | * Handler for smp_call_function_single(). The only point of this | 2050 | * Handler for smp_call_function_single(). The only point of this |
| 2044 | * handler is to wake the CPU up, so the handler does only tracing. | 2051 | * handler is to wake the CPU up, so the handler does only tracing. |
| 2045 | */ | 2052 | */ |
| @@ -2075,21 +2082,24 @@ static void rcu_idle_gp_timer_func(unsigned long cpu_in) | |||
| 2075 | */ | 2082 | */ |
| 2076 | static void rcu_prepare_for_idle_init(int cpu) | 2083 | static void rcu_prepare_for_idle_init(int cpu) |
| 2077 | { | 2084 | { |
| 2078 | per_cpu(rcu_dyntick_holdoff, cpu) = jiffies - 1; | 2085 | struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu); |
| 2079 | setup_timer(&per_cpu(rcu_idle_gp_timer, cpu), | 2086 | |
| 2080 | rcu_idle_gp_timer_func, cpu); | 2087 | rdtp->dyntick_holdoff = jiffies - 1; |
| 2081 | per_cpu(rcu_idle_gp_timer_expires, cpu) = jiffies - 1; | 2088 | setup_timer(&rdtp->idle_gp_timer, rcu_idle_gp_timer_func, cpu); |
| 2082 | per_cpu(rcu_idle_first_pass, cpu) = 1; | 2089 | rdtp->idle_gp_timer_expires = jiffies - 1; |
| 2090 | rdtp->idle_first_pass = 1; | ||
| 2083 | } | 2091 | } |
| 2084 | 2092 | ||
| 2085 | /* | 2093 | /* |
| 2086 | * Clean up for exit from idle. Because we are exiting from idle, there | 2094 | * Clean up for exit from idle. Because we are exiting from idle, there |
| 2087 | * is no longer any point to rcu_idle_gp_timer, so cancel it. This will | 2095 | * is no longer any point to ->idle_gp_timer, so cancel it. This will |
| 2088 | * do nothing if this timer is not active, so just cancel it unconditionally. | 2096 | * do nothing if this timer is not active, so just cancel it unconditionally. |
| 2089 | */ | 2097 | */ |
| 2090 | static void rcu_cleanup_after_idle(int cpu) | 2098 | static void rcu_cleanup_after_idle(int cpu) |
| 2091 | { | 2099 | { |
| 2092 | del_timer(&per_cpu(rcu_idle_gp_timer, cpu)); | 2100 | struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu); |
| 2101 | |||
| 2102 | del_timer(&rdtp->idle_gp_timer); | ||
| 2093 | trace_rcu_prep_idle("Cleanup after idle"); | 2103 | trace_rcu_prep_idle("Cleanup after idle"); |
| 2094 | } | 2104 | } |
| 2095 | 2105 | ||
| @@ -2108,42 +2118,41 @@ static void rcu_cleanup_after_idle(int cpu) | |||
| 2108 | * Because it is not legal to invoke rcu_process_callbacks() with irqs | 2118 | * Because it is not legal to invoke rcu_process_callbacks() with irqs |
| 2109 | * disabled, we do one pass of force_quiescent_state(), then do a | 2119 | * disabled, we do one pass of force_quiescent_state(), then do a |
| 2110 | * invoke_rcu_core() to cause rcu_process_callbacks() to be invoked | 2120 | * invoke_rcu_core() to cause rcu_process_callbacks() to be invoked |
| 2111 | * later. The per-cpu rcu_dyntick_drain variable controls the sequencing. | 2121 | * later. The ->dyntick_drain field controls the sequencing. |
| 2112 | * | 2122 | * |
| 2113 | * The caller must have disabled interrupts. | 2123 | * The caller must have disabled interrupts. |
| 2114 | */ | 2124 | */ |
| 2115 | static void rcu_prepare_for_idle(int cpu) | 2125 | static void rcu_prepare_for_idle(int cpu) |
| 2116 | { | 2126 | { |
| 2117 | struct timer_list *tp; | 2127 | struct timer_list *tp; |
| 2128 | struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu); | ||
| 2118 | 2129 | ||
| 2119 | /* | 2130 | /* |
| 2120 | * If this is an idle re-entry, for example, due to use of | 2131 | * If this is an idle re-entry, for example, due to use of |
| 2121 | * RCU_NONIDLE() or the new idle-loop tracing API within the idle | 2132 | * RCU_NONIDLE() or the new idle-loop tracing API within the idle |
| 2122 | * loop, then don't take any state-machine actions, unless the | 2133 | * loop, then don't take any state-machine actions, unless the |
| 2123 | * momentary exit from idle queued additional non-lazy callbacks. | 2134 | * momentary exit from idle queued additional non-lazy callbacks. |
| 2124 | * Instead, repost the rcu_idle_gp_timer if this CPU has callbacks | 2135 | * Instead, repost the ->idle_gp_timer if this CPU has callbacks |
| 2125 | * pending. | 2136 | * pending. |
| 2126 | */ | 2137 | */ |
| 2127 | if (!per_cpu(rcu_idle_first_pass, cpu) && | 2138 | if (!rdtp->idle_first_pass && |
| 2128 | (per_cpu(rcu_nonlazy_posted, cpu) == | 2139 | (rdtp->nonlazy_posted == rdtp->nonlazy_posted_snap)) { |
| 2129 | per_cpu(rcu_nonlazy_posted_snap, cpu))) { | ||
| 2130 | if (rcu_cpu_has_callbacks(cpu)) { | 2140 | if (rcu_cpu_has_callbacks(cpu)) { |
| 2131 | tp = &per_cpu(rcu_idle_gp_timer, cpu); | 2141 | tp = &rdtp->idle_gp_timer; |
| 2132 | mod_timer_pinned(tp, per_cpu(rcu_idle_gp_timer_expires, cpu)); | 2142 | mod_timer_pinned(tp, rdtp->idle_gp_timer_expires); |
| 2133 | } | 2143 | } |
| 2134 | return; | 2144 | return; |
| 2135 | } | 2145 | } |
| 2136 | per_cpu(rcu_idle_first_pass, cpu) = 0; | 2146 | rdtp->idle_first_pass = 0; |
| 2137 | per_cpu(rcu_nonlazy_posted_snap, cpu) = | 2147 | rdtp->nonlazy_posted_snap = rdtp->nonlazy_posted - 1; |
| 2138 | per_cpu(rcu_nonlazy_posted, cpu) - 1; | ||
| 2139 | 2148 | ||
| 2140 | /* | 2149 | /* |
| 2141 | * If there are no callbacks on this CPU, enter dyntick-idle mode. | 2150 | * If there are no callbacks on this CPU, enter dyntick-idle mode. |
| 2142 | * Also reset state to avoid prejudicing later attempts. | 2151 | * Also reset state to avoid prejudicing later attempts. |
| 2143 | */ | 2152 | */ |
| 2144 | if (!rcu_cpu_has_callbacks(cpu)) { | 2153 | if (!rcu_cpu_has_callbacks(cpu)) { |
| 2145 | per_cpu(rcu_dyntick_holdoff, cpu) = jiffies - 1; | 2154 | rdtp->dyntick_holdoff = jiffies - 1; |
| 2146 | per_cpu(rcu_dyntick_drain, cpu) = 0; | 2155 | rdtp->dyntick_drain = 0; |
| 2147 | trace_rcu_prep_idle("No callbacks"); | 2156 | trace_rcu_prep_idle("No callbacks"); |
| 2148 | return; | 2157 | return; |
| 2149 | } | 2158 | } |
| @@ -2152,36 +2161,37 @@ static void rcu_prepare_for_idle(int cpu) | |||
| 2152 | * If in holdoff mode, just return. We will presumably have | 2161 | * If in holdoff mode, just return. We will presumably have |
| 2153 | * refrained from disabling the scheduling-clock tick. | 2162 | * refrained from disabling the scheduling-clock tick. |
| 2154 | */ | 2163 | */ |
| 2155 | if (per_cpu(rcu_dyntick_holdoff, cpu) == jiffies) { | 2164 | if (rdtp->dyntick_holdoff == jiffies) { |
| 2156 | trace_rcu_prep_idle("In holdoff"); | 2165 | trace_rcu_prep_idle("In holdoff"); |
| 2157 | return; | 2166 | return; |
| 2158 | } | 2167 | } |
| 2159 | 2168 | ||
| 2160 | /* Check and update the rcu_dyntick_drain sequencing. */ | 2169 | /* Check and update the ->dyntick_drain sequencing. */ |
| 2161 | if (per_cpu(rcu_dyntick_drain, cpu) <= 0) { | 2170 | if (rdtp->dyntick_drain <= 0) { |
| 2162 | /* First time through, initialize the counter. */ | 2171 | /* First time through, initialize the counter. */ |
| 2163 | per_cpu(rcu_dyntick_drain, cpu) = RCU_IDLE_FLUSHES; | 2172 | rdtp->dyntick_drain = RCU_IDLE_FLUSHES; |
| 2164 | } else if (per_cpu(rcu_dyntick_drain, cpu) <= RCU_IDLE_OPT_FLUSHES && | 2173 | } else if (rdtp->dyntick_drain <= RCU_IDLE_OPT_FLUSHES && |
| 2165 | !rcu_pending(cpu) && | 2174 | !rcu_pending(cpu) && |
| 2166 | !local_softirq_pending()) { | 2175 | !local_softirq_pending()) { |
| 2167 | /* Can we go dyntick-idle despite still having callbacks? */ | 2176 | /* Can we go dyntick-idle despite still having callbacks? */ |
| 2168 | trace_rcu_prep_idle("Dyntick with callbacks"); | 2177 | rdtp->dyntick_drain = 0; |
| 2169 | per_cpu(rcu_dyntick_drain, cpu) = 0; | 2178 | rdtp->dyntick_holdoff = jiffies; |
| 2170 | per_cpu(rcu_dyntick_holdoff, cpu) = jiffies; | 2179 | if (rcu_cpu_has_nonlazy_callbacks(cpu)) { |
| 2171 | if (rcu_cpu_has_nonlazy_callbacks(cpu)) | 2180 | trace_rcu_prep_idle("Dyntick with callbacks"); |
| 2172 | per_cpu(rcu_idle_gp_timer_expires, cpu) = | 2181 | rdtp->idle_gp_timer_expires = |
| 2173 | jiffies + RCU_IDLE_GP_DELAY; | 2182 | jiffies + RCU_IDLE_GP_DELAY; |
| 2174 | else | 2183 | } else { |
| 2175 | per_cpu(rcu_idle_gp_timer_expires, cpu) = | 2184 | rdtp->idle_gp_timer_expires = |
| 2176 | jiffies + RCU_IDLE_LAZY_GP_DELAY; | 2185 | jiffies + RCU_IDLE_LAZY_GP_DELAY; |
| 2177 | tp = &per_cpu(rcu_idle_gp_timer, cpu); | 2186 | trace_rcu_prep_idle("Dyntick with lazy callbacks"); |
| 2178 | mod_timer_pinned(tp, per_cpu(rcu_idle_gp_timer_expires, cpu)); | 2187 | } |
| 2179 | per_cpu(rcu_nonlazy_posted_snap, cpu) = | 2188 | tp = &rdtp->idle_gp_timer; |
| 2180 | per_cpu(rcu_nonlazy_posted, cpu); | 2189 | mod_timer_pinned(tp, rdtp->idle_gp_timer_expires); |
| 2190 | rdtp->nonlazy_posted_snap = rdtp->nonlazy_posted; | ||
| 2181 | return; /* Nothing more to do immediately. */ | 2191 | return; /* Nothing more to do immediately. */ |
| 2182 | } else if (--per_cpu(rcu_dyntick_drain, cpu) <= 0) { | 2192 | } else if (--(rdtp->dyntick_drain) <= 0) { |
| 2183 | /* We have hit the limit, so time to give up. */ | 2193 | /* We have hit the limit, so time to give up. */ |
| 2184 | per_cpu(rcu_dyntick_holdoff, cpu) = jiffies; | 2194 | rdtp->dyntick_holdoff = jiffies; |
| 2185 | trace_rcu_prep_idle("Begin holdoff"); | 2195 | trace_rcu_prep_idle("Begin holdoff"); |
| 2186 | invoke_rcu_core(); /* Force the CPU out of dyntick-idle. */ | 2196 | invoke_rcu_core(); /* Force the CPU out of dyntick-idle. */ |
| 2187 | return; | 2197 | return; |
| @@ -2227,7 +2237,7 @@ static void rcu_prepare_for_idle(int cpu) | |||
| 2227 | */ | 2237 | */ |
| 2228 | static void rcu_idle_count_callbacks_posted(void) | 2238 | static void rcu_idle_count_callbacks_posted(void) |
| 2229 | { | 2239 | { |
| 2230 | __this_cpu_add(rcu_nonlazy_posted, 1); | 2240 | __this_cpu_add(rcu_dynticks.nonlazy_posted, 1); |
| 2231 | } | 2241 | } |
| 2232 | 2242 | ||
| 2233 | #endif /* #else #if !defined(CONFIG_RCU_FAST_NO_HZ) */ | 2243 | #endif /* #else #if !defined(CONFIG_RCU_FAST_NO_HZ) */ |
| @@ -2238,11 +2248,12 @@ static void rcu_idle_count_callbacks_posted(void) | |||
| 2238 | 2248 | ||
| 2239 | static void print_cpu_stall_fast_no_hz(char *cp, int cpu) | 2249 | static void print_cpu_stall_fast_no_hz(char *cp, int cpu) |
| 2240 | { | 2250 | { |
| 2241 | struct timer_list *tltp = &per_cpu(rcu_idle_gp_timer, cpu); | 2251 | struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu); |
| 2252 | struct timer_list *tltp = &rdtp->idle_gp_timer; | ||
| 2242 | 2253 | ||
| 2243 | sprintf(cp, "drain=%d %c timer=%lu", | 2254 | sprintf(cp, "drain=%d %c timer=%lu", |
| 2244 | per_cpu(rcu_dyntick_drain, cpu), | 2255 | rdtp->dyntick_drain, |
| 2245 | per_cpu(rcu_dyntick_holdoff, cpu) == jiffies ? 'H' : '.', | 2256 | rdtp->dyntick_holdoff == jiffies ? 'H' : '.', |
| 2246 | timer_pending(tltp) ? tltp->expires - jiffies : -1); | 2257 | timer_pending(tltp) ? tltp->expires - jiffies : -1); |
| 2247 | } | 2258 | } |
| 2248 | 2259 | ||
diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 39eb6011bc38..d5594a4268d4 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c | |||
| @@ -142,9 +142,8 @@ const_debug unsigned int sysctl_sched_features = | |||
| 142 | #define SCHED_FEAT(name, enabled) \ | 142 | #define SCHED_FEAT(name, enabled) \ |
| 143 | #name , | 143 | #name , |
| 144 | 144 | ||
| 145 | static __read_mostly char *sched_feat_names[] = { | 145 | static const char * const sched_feat_names[] = { |
| 146 | #include "features.h" | 146 | #include "features.h" |
| 147 | NULL | ||
| 148 | }; | 147 | }; |
| 149 | 148 | ||
| 150 | #undef SCHED_FEAT | 149 | #undef SCHED_FEAT |
| @@ -2517,25 +2516,32 @@ static void __update_cpu_load(struct rq *this_rq, unsigned long this_load, | |||
| 2517 | sched_avg_update(this_rq); | 2516 | sched_avg_update(this_rq); |
| 2518 | } | 2517 | } |
| 2519 | 2518 | ||
| 2519 | #ifdef CONFIG_NO_HZ | ||
| 2520 | /* | ||
| 2521 | * There is no sane way to deal with nohz on smp when using jiffies because the | ||
| 2522 | * cpu doing the jiffies update might drift wrt the cpu doing the jiffy reading | ||
| 2523 | * causing off-by-one errors in observed deltas; {0,2} instead of {1,1}. | ||
| 2524 | * | ||
| 2525 | * Therefore we cannot use the delta approach from the regular tick since that | ||
| 2526 | * would seriously skew the load calculation. However we'll make do for those | ||
| 2527 | * updates happening while idle (nohz_idle_balance) or coming out of idle | ||
| 2528 | * (tick_nohz_idle_exit). | ||
| 2529 | * | ||
| 2530 | * This means we might still be one tick off for nohz periods. | ||
| 2531 | */ | ||
| 2532 | |||
| 2520 | /* | 2533 | /* |
| 2521 | * Called from nohz_idle_balance() to update the load ratings before doing the | 2534 | * Called from nohz_idle_balance() to update the load ratings before doing the |
| 2522 | * idle balance. | 2535 | * idle balance. |
| 2523 | */ | 2536 | */ |
| 2524 | void update_idle_cpu_load(struct rq *this_rq) | 2537 | void update_idle_cpu_load(struct rq *this_rq) |
| 2525 | { | 2538 | { |
| 2526 | unsigned long curr_jiffies = jiffies; | 2539 | unsigned long curr_jiffies = ACCESS_ONCE(jiffies); |
| 2527 | unsigned long load = this_rq->load.weight; | 2540 | unsigned long load = this_rq->load.weight; |
| 2528 | unsigned long pending_updates; | 2541 | unsigned long pending_updates; |
| 2529 | 2542 | ||
| 2530 | /* | 2543 | /* |
| 2531 | * Bloody broken means of dealing with nohz, but better than nothing.. | 2544 | * bail if there's load or we're actually up-to-date. |
| 2532 | * jiffies is updated by one cpu, another cpu can drift wrt the jiffy | ||
| 2533 | * update and see 0 difference the one time and 2 the next, even though | ||
| 2534 | * we ticked at roughtly the same rate. | ||
| 2535 | * | ||
| 2536 | * Hence we only use this from nohz_idle_balance() and skip this | ||
| 2537 | * nonsense when called from the scheduler_tick() since that's | ||
| 2538 | * guaranteed a stable rate. | ||
| 2539 | */ | 2545 | */ |
| 2540 | if (load || curr_jiffies == this_rq->last_load_update_tick) | 2546 | if (load || curr_jiffies == this_rq->last_load_update_tick) |
| 2541 | return; | 2547 | return; |
| @@ -2547,12 +2553,38 @@ void update_idle_cpu_load(struct rq *this_rq) | |||
| 2547 | } | 2553 | } |
| 2548 | 2554 | ||
| 2549 | /* | 2555 | /* |
| 2556 | * Called from tick_nohz_idle_exit() -- try and fix up the ticks we missed. | ||
| 2557 | */ | ||
| 2558 | void update_cpu_load_nohz(void) | ||
| 2559 | { | ||
| 2560 | struct rq *this_rq = this_rq(); | ||
| 2561 | unsigned long curr_jiffies = ACCESS_ONCE(jiffies); | ||
| 2562 | unsigned long pending_updates; | ||
| 2563 | |||
| 2564 | if (curr_jiffies == this_rq->last_load_update_tick) | ||
| 2565 | return; | ||
| 2566 | |||
| 2567 | raw_spin_lock(&this_rq->lock); | ||
| 2568 | pending_updates = curr_jiffies - this_rq->last_load_update_tick; | ||
| 2569 | if (pending_updates) { | ||
| 2570 | this_rq->last_load_update_tick = curr_jiffies; | ||
| 2571 | /* | ||
| 2572 | * We were idle, this means load 0, the current load might be | ||
| 2573 | * !0 due to remote wakeups and the sort. | ||
| 2574 | */ | ||
| 2575 | __update_cpu_load(this_rq, 0, pending_updates); | ||
| 2576 | } | ||
| 2577 | raw_spin_unlock(&this_rq->lock); | ||
| 2578 | } | ||
| 2579 | #endif /* CONFIG_NO_HZ */ | ||
| 2580 | |||
| 2581 | /* | ||
| 2550 | * Called from scheduler_tick() | 2582 | * Called from scheduler_tick() |
| 2551 | */ | 2583 | */ |
| 2552 | static void update_cpu_load_active(struct rq *this_rq) | 2584 | static void update_cpu_load_active(struct rq *this_rq) |
| 2553 | { | 2585 | { |
| 2554 | /* | 2586 | /* |
| 2555 | * See the mess in update_idle_cpu_load(). | 2587 | * See the mess around update_idle_cpu_load() / update_cpu_load_nohz(). |
| 2556 | */ | 2588 | */ |
| 2557 | this_rq->last_load_update_tick = jiffies; | 2589 | this_rq->last_load_update_tick = jiffies; |
| 2558 | __update_cpu_load(this_rq, this_rq->load.weight, 1); | 2590 | __update_cpu_load(this_rq, this_rq->load.weight, 1); |
| @@ -4982,7 +5014,7 @@ void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask) | |||
| 4982 | p->sched_class->set_cpus_allowed(p, new_mask); | 5014 | p->sched_class->set_cpus_allowed(p, new_mask); |
| 4983 | 5015 | ||
| 4984 | cpumask_copy(&p->cpus_allowed, new_mask); | 5016 | cpumask_copy(&p->cpus_allowed, new_mask); |
| 4985 | p->rt.nr_cpus_allowed = cpumask_weight(new_mask); | 5017 | p->nr_cpus_allowed = cpumask_weight(new_mask); |
| 4986 | } | 5018 | } |
| 4987 | 5019 | ||
| 4988 | /* | 5020 | /* |
| @@ -5524,15 +5556,20 @@ static cpumask_var_t sched_domains_tmpmask; /* sched_domains_mutex */ | |||
| 5524 | 5556 | ||
| 5525 | #ifdef CONFIG_SCHED_DEBUG | 5557 | #ifdef CONFIG_SCHED_DEBUG |
| 5526 | 5558 | ||
| 5527 | static __read_mostly int sched_domain_debug_enabled; | 5559 | static __read_mostly int sched_debug_enabled; |
| 5528 | 5560 | ||
| 5529 | static int __init sched_domain_debug_setup(char *str) | 5561 | static int __init sched_debug_setup(char *str) |
| 5530 | { | 5562 | { |
| 5531 | sched_domain_debug_enabled = 1; | 5563 | sched_debug_enabled = 1; |
| 5532 | 5564 | ||
| 5533 | return 0; | 5565 | return 0; |
| 5534 | } | 5566 | } |
| 5535 | early_param("sched_debug", sched_domain_debug_setup); | 5567 | early_param("sched_debug", sched_debug_setup); |
| 5568 | |||
| 5569 | static inline bool sched_debug(void) | ||
| 5570 | { | ||
| 5571 | return sched_debug_enabled; | ||
| 5572 | } | ||
| 5536 | 5573 | ||
| 5537 | static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level, | 5574 | static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level, |
| 5538 | struct cpumask *groupmask) | 5575 | struct cpumask *groupmask) |
| @@ -5572,7 +5609,12 @@ static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level, | |||
| 5572 | break; | 5609 | break; |
| 5573 | } | 5610 | } |
| 5574 | 5611 | ||
| 5575 | if (!group->sgp->power) { | 5612 | /* |
| 5613 | * Even though we initialize ->power to something semi-sane, | ||
| 5614 | * we leave power_orig unset. This allows us to detect if | ||
| 5615 | * domain iteration is still funny without causing /0 traps. | ||
| 5616 | */ | ||
| 5617 | if (!group->sgp->power_orig) { | ||
| 5576 | printk(KERN_CONT "\n"); | 5618 | printk(KERN_CONT "\n"); |
| 5577 | printk(KERN_ERR "ERROR: domain->cpu_power not " | 5619 | printk(KERN_ERR "ERROR: domain->cpu_power not " |
| 5578 | "set\n"); | 5620 | "set\n"); |
| @@ -5620,7 +5662,7 @@ static void sched_domain_debug(struct sched_domain *sd, int cpu) | |||
| 5620 | { | 5662 | { |
| 5621 | int level = 0; | 5663 | int level = 0; |
| 5622 | 5664 | ||
| 5623 | if (!sched_domain_debug_enabled) | 5665 | if (!sched_debug_enabled) |
| 5624 | return; | 5666 | return; |
| 5625 | 5667 | ||
| 5626 | if (!sd) { | 5668 | if (!sd) { |
| @@ -5641,6 +5683,10 @@ static void sched_domain_debug(struct sched_domain *sd, int cpu) | |||
| 5641 | } | 5683 | } |
| 5642 | #else /* !CONFIG_SCHED_DEBUG */ | 5684 | #else /* !CONFIG_SCHED_DEBUG */ |
| 5643 | # define sched_domain_debug(sd, cpu) do { } while (0) | 5685 | # define sched_domain_debug(sd, cpu) do { } while (0) |
| 5686 | static inline bool sched_debug(void) | ||
| 5687 | { | ||
| 5688 | return false; | ||
| 5689 | } | ||
| 5644 | #endif /* CONFIG_SCHED_DEBUG */ | 5690 | #endif /* CONFIG_SCHED_DEBUG */ |
| 5645 | 5691 | ||
| 5646 | static int sd_degenerate(struct sched_domain *sd) | 5692 | static int sd_degenerate(struct sched_domain *sd) |
| @@ -5962,6 +6008,44 @@ struct sched_domain_topology_level { | |||
| 5962 | struct sd_data data; | 6008 | struct sd_data data; |
| 5963 | }; | 6009 | }; |
| 5964 | 6010 | ||
| 6011 | /* | ||
| 6012 | * Build an iteration mask that can exclude certain CPUs from the upwards | ||
| 6013 | * domain traversal. | ||
| 6014 | * | ||
| 6015 | * Asymmetric node setups can result in situations where the domain tree is of | ||
| 6016 | * unequal depth, make sure to skip domains that already cover the entire | ||
| 6017 | * range. | ||
| 6018 | * | ||
| 6019 | * In that case build_sched_domains() will have terminated the iteration early | ||
| 6020 | * and our sibling sd spans will be empty. Domains should always include the | ||
| 6021 | * cpu they're built on, so check that. | ||
| 6022 | * | ||
| 6023 | */ | ||
| 6024 | static void build_group_mask(struct sched_domain *sd, struct sched_group *sg) | ||
| 6025 | { | ||
| 6026 | const struct cpumask *span = sched_domain_span(sd); | ||
| 6027 | struct sd_data *sdd = sd->private; | ||
| 6028 | struct sched_domain *sibling; | ||
| 6029 | int i; | ||
| 6030 | |||
| 6031 | for_each_cpu(i, span) { | ||
| 6032 | sibling = *per_cpu_ptr(sdd->sd, i); | ||
| 6033 | if (!cpumask_test_cpu(i, sched_domain_span(sibling))) | ||
| 6034 | continue; | ||
| 6035 | |||
| 6036 | cpumask_set_cpu(i, sched_group_mask(sg)); | ||
| 6037 | } | ||
| 6038 | } | ||
| 6039 | |||
| 6040 | /* | ||
| 6041 | * Return the canonical balance cpu for this group, this is the first cpu | ||
| 6042 | * of this group that's also in the iteration mask. | ||
| 6043 | */ | ||
| 6044 | int group_balance_cpu(struct sched_group *sg) | ||
| 6045 | { | ||
| 6046 | return cpumask_first_and(sched_group_cpus(sg), sched_group_mask(sg)); | ||
| 6047 | } | ||
| 6048 | |||
| 5965 | static int | 6049 | static int |
| 5966 | build_overlap_sched_groups(struct sched_domain *sd, int cpu) | 6050 | build_overlap_sched_groups(struct sched_domain *sd, int cpu) |
| 5967 | { | 6051 | { |
| @@ -5980,6 +6064,12 @@ build_overlap_sched_groups(struct sched_domain *sd, int cpu) | |||
| 5980 | if (cpumask_test_cpu(i, covered)) | 6064 | if (cpumask_test_cpu(i, covered)) |
| 5981 | continue; | 6065 | continue; |
| 5982 | 6066 | ||
| 6067 | child = *per_cpu_ptr(sdd->sd, i); | ||
| 6068 | |||
| 6069 | /* See the comment near build_group_mask(). */ | ||
| 6070 | if (!cpumask_test_cpu(i, sched_domain_span(child))) | ||
| 6071 | continue; | ||
| 6072 | |||
| 5983 | sg = kzalloc_node(sizeof(struct sched_group) + cpumask_size(), | 6073 | sg = kzalloc_node(sizeof(struct sched_group) + cpumask_size(), |
| 5984 | GFP_KERNEL, cpu_to_node(cpu)); | 6074 | GFP_KERNEL, cpu_to_node(cpu)); |
| 5985 | 6075 | ||
| @@ -5987,8 +6077,6 @@ build_overlap_sched_groups(struct sched_domain *sd, int cpu) | |||
| 5987 | goto fail; | 6077 | goto fail; |
| 5988 | 6078 | ||
| 5989 | sg_span = sched_group_cpus(sg); | 6079 | sg_span = sched_group_cpus(sg); |
| 5990 | |||
| 5991 | child = *per_cpu_ptr(sdd->sd, i); | ||
| 5992 | if (child->child) { | 6080 | if (child->child) { |
| 5993 | child = child->child; | 6081 | child = child->child; |
| 5994 | cpumask_copy(sg_span, sched_domain_span(child)); | 6082 | cpumask_copy(sg_span, sched_domain_span(child)); |
| @@ -5997,10 +6085,24 @@ build_overlap_sched_groups(struct sched_domain *sd, int cpu) | |||
| 5997 | 6085 | ||
| 5998 | cpumask_or(covered, covered, sg_span); | 6086 | cpumask_or(covered, covered, sg_span); |
| 5999 | 6087 | ||
| 6000 | sg->sgp = *per_cpu_ptr(sdd->sgp, cpumask_first(sg_span)); | 6088 | sg->sgp = *per_cpu_ptr(sdd->sgp, i); |
| 6001 | atomic_inc(&sg->sgp->ref); | 6089 | if (atomic_inc_return(&sg->sgp->ref) == 1) |
| 6090 | build_group_mask(sd, sg); | ||
| 6002 | 6091 | ||
| 6003 | if (cpumask_test_cpu(cpu, sg_span)) | 6092 | /* |
| 6093 | * Initialize sgp->power such that even if we mess up the | ||
| 6094 | * domains and no possible iteration will get us here, we won't | ||
| 6095 | * die on a /0 trap. | ||
| 6096 | */ | ||
| 6097 | sg->sgp->power = SCHED_POWER_SCALE * cpumask_weight(sg_span); | ||
| 6098 | |||
| 6099 | /* | ||
| 6100 | * Make sure the first group of this domain contains the | ||
| 6101 | * canonical balance cpu. Otherwise the sched_domain iteration | ||
| 6102 | * breaks. See update_sg_lb_stats(). | ||
| 6103 | */ | ||
| 6104 | if ((!groups && cpumask_test_cpu(cpu, sg_span)) || | ||
| 6105 | group_balance_cpu(sg) == cpu) | ||
| 6004 | groups = sg; | 6106 | groups = sg; |
| 6005 | 6107 | ||
| 6006 | if (!first) | 6108 | if (!first) |
| @@ -6074,6 +6176,7 @@ build_sched_groups(struct sched_domain *sd, int cpu) | |||
| 6074 | 6176 | ||
| 6075 | cpumask_clear(sched_group_cpus(sg)); | 6177 | cpumask_clear(sched_group_cpus(sg)); |
| 6076 | sg->sgp->power = 0; | 6178 | sg->sgp->power = 0; |
| 6179 | cpumask_setall(sched_group_mask(sg)); | ||
| 6077 | 6180 | ||
| 6078 | for_each_cpu(j, span) { | 6181 | for_each_cpu(j, span) { |
| 6079 | if (get_group(j, sdd, NULL) != group) | 6182 | if (get_group(j, sdd, NULL) != group) |
| @@ -6115,7 +6218,7 @@ static void init_sched_groups_power(int cpu, struct sched_domain *sd) | |||
| 6115 | sg = sg->next; | 6218 | sg = sg->next; |
| 6116 | } while (sg != sd->groups); | 6219 | } while (sg != sd->groups); |
| 6117 | 6220 | ||
| 6118 | if (cpu != group_first_cpu(sg)) | 6221 | if (cpu != group_balance_cpu(sg)) |
| 6119 | return; | 6222 | return; |
| 6120 | 6223 | ||
| 6121 | update_group_power(sd, cpu); | 6224 | update_group_power(sd, cpu); |
| @@ -6165,11 +6268,8 @@ int sched_domain_level_max; | |||
| 6165 | 6268 | ||
| 6166 | static int __init setup_relax_domain_level(char *str) | 6269 | static int __init setup_relax_domain_level(char *str) |
| 6167 | { | 6270 | { |
| 6168 | unsigned long val; | 6271 | if (kstrtoint(str, 0, &default_relax_domain_level)) |
| 6169 | 6272 | pr_warn("Unable to set relax_domain_level\n"); | |
| 6170 | val = simple_strtoul(str, NULL, 0); | ||
| 6171 | if (val < sched_domain_level_max) | ||
| 6172 | default_relax_domain_level = val; | ||
| 6173 | 6273 | ||
| 6174 | return 1; | 6274 | return 1; |
| 6175 | } | 6275 | } |
| @@ -6279,14 +6379,13 @@ static struct sched_domain_topology_level *sched_domain_topology = default_topol | |||
| 6279 | #ifdef CONFIG_NUMA | 6379 | #ifdef CONFIG_NUMA |
| 6280 | 6380 | ||
| 6281 | static int sched_domains_numa_levels; | 6381 | static int sched_domains_numa_levels; |
| 6282 | static int sched_domains_numa_scale; | ||
| 6283 | static int *sched_domains_numa_distance; | 6382 | static int *sched_domains_numa_distance; |
| 6284 | static struct cpumask ***sched_domains_numa_masks; | 6383 | static struct cpumask ***sched_domains_numa_masks; |
| 6285 | static int sched_domains_curr_level; | 6384 | static int sched_domains_curr_level; |
| 6286 | 6385 | ||
| 6287 | static inline int sd_local_flags(int level) | 6386 | static inline int sd_local_flags(int level) |
| 6288 | { | 6387 | { |
| 6289 | if (sched_domains_numa_distance[level] > REMOTE_DISTANCE) | 6388 | if (sched_domains_numa_distance[level] > RECLAIM_DISTANCE) |
| 6290 | return 0; | 6389 | return 0; |
| 6291 | 6390 | ||
| 6292 | return SD_BALANCE_EXEC | SD_BALANCE_FORK | SD_WAKE_AFFINE; | 6391 | return SD_BALANCE_EXEC | SD_BALANCE_FORK | SD_WAKE_AFFINE; |
| @@ -6344,6 +6443,42 @@ static const struct cpumask *sd_numa_mask(int cpu) | |||
| 6344 | return sched_domains_numa_masks[sched_domains_curr_level][cpu_to_node(cpu)]; | 6443 | return sched_domains_numa_masks[sched_domains_curr_level][cpu_to_node(cpu)]; |
| 6345 | } | 6444 | } |
| 6346 | 6445 | ||
| 6446 | static void sched_numa_warn(const char *str) | ||
| 6447 | { | ||
| 6448 | static int done = false; | ||
| 6449 | int i,j; | ||
| 6450 | |||
| 6451 | if (done) | ||
| 6452 | return; | ||
| 6453 | |||
| 6454 | done = true; | ||
| 6455 | |||
| 6456 | printk(KERN_WARNING "ERROR: %s\n\n", str); | ||
| 6457 | |||
| 6458 | for (i = 0; i < nr_node_ids; i++) { | ||
| 6459 | printk(KERN_WARNING " "); | ||
| 6460 | for (j = 0; j < nr_node_ids; j++) | ||
| 6461 | printk(KERN_CONT "%02d ", node_distance(i,j)); | ||
| 6462 | printk(KERN_CONT "\n"); | ||
| 6463 | } | ||
| 6464 | printk(KERN_WARNING "\n"); | ||
| 6465 | } | ||
| 6466 | |||
| 6467 | static bool find_numa_distance(int distance) | ||
| 6468 | { | ||
| 6469 | int i; | ||
| 6470 | |||
| 6471 | if (distance == node_distance(0, 0)) | ||
| 6472 | return true; | ||
| 6473 | |||
| 6474 | for (i = 0; i < sched_domains_numa_levels; i++) { | ||
| 6475 | if (sched_domains_numa_distance[i] == distance) | ||
| 6476 | return true; | ||
| 6477 | } | ||
| 6478 | |||
| 6479 | return false; | ||
| 6480 | } | ||
| 6481 | |||
| 6347 | static void sched_init_numa(void) | 6482 | static void sched_init_numa(void) |
| 6348 | { | 6483 | { |
| 6349 | int next_distance, curr_distance = node_distance(0, 0); | 6484 | int next_distance, curr_distance = node_distance(0, 0); |
| @@ -6351,7 +6486,6 @@ static void sched_init_numa(void) | |||
| 6351 | int level = 0; | 6486 | int level = 0; |
| 6352 | int i, j, k; | 6487 | int i, j, k; |
| 6353 | 6488 | ||
| 6354 | sched_domains_numa_scale = curr_distance; | ||
| 6355 | sched_domains_numa_distance = kzalloc(sizeof(int) * nr_node_ids, GFP_KERNEL); | 6489 | sched_domains_numa_distance = kzalloc(sizeof(int) * nr_node_ids, GFP_KERNEL); |
| 6356 | if (!sched_domains_numa_distance) | 6490 | if (!sched_domains_numa_distance) |
| 6357 | return; | 6491 | return; |
| @@ -6362,23 +6496,41 @@ static void sched_init_numa(void) | |||
| 6362 | * | 6496 | * |
| 6363 | * Assumes node_distance(0,j) includes all distances in | 6497 | * Assumes node_distance(0,j) includes all distances in |
| 6364 | * node_distance(i,j) in order to avoid cubic time. | 6498 | * node_distance(i,j) in order to avoid cubic time. |
| 6365 | * | ||
| 6366 | * XXX: could be optimized to O(n log n) by using sort() | ||
| 6367 | */ | 6499 | */ |
| 6368 | next_distance = curr_distance; | 6500 | next_distance = curr_distance; |
| 6369 | for (i = 0; i < nr_node_ids; i++) { | 6501 | for (i = 0; i < nr_node_ids; i++) { |
| 6370 | for (j = 0; j < nr_node_ids; j++) { | 6502 | for (j = 0; j < nr_node_ids; j++) { |
| 6371 | int distance = node_distance(0, j); | 6503 | for (k = 0; k < nr_node_ids; k++) { |
| 6372 | if (distance > curr_distance && | 6504 | int distance = node_distance(i, k); |
| 6373 | (distance < next_distance || | 6505 | |
| 6374 | next_distance == curr_distance)) | 6506 | if (distance > curr_distance && |
| 6375 | next_distance = distance; | 6507 | (distance < next_distance || |
| 6508 | next_distance == curr_distance)) | ||
| 6509 | next_distance = distance; | ||
| 6510 | |||
| 6511 | /* | ||
| 6512 | * While not a strong assumption it would be nice to know | ||
| 6513 | * about cases where if node A is connected to B, B is not | ||
| 6514 | * equally connected to A. | ||
| 6515 | */ | ||
| 6516 | if (sched_debug() && node_distance(k, i) != distance) | ||
| 6517 | sched_numa_warn("Node-distance not symmetric"); | ||
| 6518 | |||
| 6519 | if (sched_debug() && i && !find_numa_distance(distance)) | ||
| 6520 | sched_numa_warn("Node-0 not representative"); | ||
| 6521 | } | ||
| 6522 | if (next_distance != curr_distance) { | ||
| 6523 | sched_domains_numa_distance[level++] = next_distance; | ||
| 6524 | sched_domains_numa_levels = level; | ||
| 6525 | curr_distance = next_distance; | ||
| 6526 | } else break; | ||
| 6376 | } | 6527 | } |
| 6377 | if (next_distance != curr_distance) { | 6528 | |
| 6378 | sched_domains_numa_distance[level++] = next_distance; | 6529 | /* |
| 6379 | sched_domains_numa_levels = level; | 6530 | * In case of sched_debug() we verify the above assumption. |
| 6380 | curr_distance = next_distance; | 6531 | */ |
| 6381 | } else break; | 6532 | if (!sched_debug()) |
| 6533 | break; | ||
| 6382 | } | 6534 | } |
| 6383 | /* | 6535 | /* |
| 6384 | * 'level' contains the number of unique distances, excluding the | 6536 | * 'level' contains the number of unique distances, excluding the |
| @@ -6403,7 +6555,7 @@ static void sched_init_numa(void) | |||
| 6403 | return; | 6555 | return; |
| 6404 | 6556 | ||
| 6405 | for (j = 0; j < nr_node_ids; j++) { | 6557 | for (j = 0; j < nr_node_ids; j++) { |
| 6406 | struct cpumask *mask = kzalloc_node(cpumask_size(), GFP_KERNEL, j); | 6558 | struct cpumask *mask = kzalloc(cpumask_size(), GFP_KERNEL); |
| 6407 | if (!mask) | 6559 | if (!mask) |
| 6408 | return; | 6560 | return; |
| 6409 | 6561 | ||
| @@ -6490,7 +6642,7 @@ static int __sdt_alloc(const struct cpumask *cpu_map) | |||
| 6490 | 6642 | ||
| 6491 | *per_cpu_ptr(sdd->sg, j) = sg; | 6643 | *per_cpu_ptr(sdd->sg, j) = sg; |
| 6492 | 6644 | ||
| 6493 | sgp = kzalloc_node(sizeof(struct sched_group_power), | 6645 | sgp = kzalloc_node(sizeof(struct sched_group_power) + cpumask_size(), |
| 6494 | GFP_KERNEL, cpu_to_node(j)); | 6646 | GFP_KERNEL, cpu_to_node(j)); |
| 6495 | if (!sgp) | 6647 | if (!sgp) |
| 6496 | return -ENOMEM; | 6648 | return -ENOMEM; |
| @@ -6543,7 +6695,6 @@ struct sched_domain *build_sched_domain(struct sched_domain_topology_level *tl, | |||
| 6543 | if (!sd) | 6695 | if (!sd) |
| 6544 | return child; | 6696 | return child; |
| 6545 | 6697 | ||
| 6546 | set_domain_attribute(sd, attr); | ||
| 6547 | cpumask_and(sched_domain_span(sd), cpu_map, tl->mask(cpu)); | 6698 | cpumask_and(sched_domain_span(sd), cpu_map, tl->mask(cpu)); |
| 6548 | if (child) { | 6699 | if (child) { |
| 6549 | sd->level = child->level + 1; | 6700 | sd->level = child->level + 1; |
| @@ -6551,6 +6702,7 @@ struct sched_domain *build_sched_domain(struct sched_domain_topology_level *tl, | |||
| 6551 | child->parent = sd; | 6702 | child->parent = sd; |
| 6552 | } | 6703 | } |
| 6553 | sd->child = child; | 6704 | sd->child = child; |
| 6705 | set_domain_attribute(sd, attr); | ||
| 6554 | 6706 | ||
| 6555 | return sd; | 6707 | return sd; |
| 6556 | } | 6708 | } |
| @@ -6691,7 +6843,6 @@ static int init_sched_domains(const struct cpumask *cpu_map) | |||
| 6691 | if (!doms_cur) | 6843 | if (!doms_cur) |
| 6692 | doms_cur = &fallback_doms; | 6844 | doms_cur = &fallback_doms; |
| 6693 | cpumask_andnot(doms_cur[0], cpu_map, cpu_isolated_map); | 6845 | cpumask_andnot(doms_cur[0], cpu_map, cpu_isolated_map); |
| 6694 | dattr_cur = NULL; | ||
| 6695 | err = build_sched_domains(doms_cur[0], NULL); | 6846 | err = build_sched_domains(doms_cur[0], NULL); |
| 6696 | register_sched_domain_sysctl(); | 6847 | register_sched_domain_sysctl(); |
| 6697 | 6848 | ||
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 940e6d17cf96..c099cc6eebe3 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c | |||
| @@ -2703,7 +2703,7 @@ select_task_rq_fair(struct task_struct *p, int sd_flag, int wake_flags) | |||
| 2703 | int want_sd = 1; | 2703 | int want_sd = 1; |
| 2704 | int sync = wake_flags & WF_SYNC; | 2704 | int sync = wake_flags & WF_SYNC; |
| 2705 | 2705 | ||
| 2706 | if (p->rt.nr_cpus_allowed == 1) | 2706 | if (p->nr_cpus_allowed == 1) |
| 2707 | return prev_cpu; | 2707 | return prev_cpu; |
| 2708 | 2708 | ||
| 2709 | if (sd_flag & SD_BALANCE_WAKE) { | 2709 | if (sd_flag & SD_BALANCE_WAKE) { |
| @@ -3503,15 +3503,22 @@ unsigned long __weak arch_scale_smt_power(struct sched_domain *sd, int cpu) | |||
| 3503 | unsigned long scale_rt_power(int cpu) | 3503 | unsigned long scale_rt_power(int cpu) |
| 3504 | { | 3504 | { |
| 3505 | struct rq *rq = cpu_rq(cpu); | 3505 | struct rq *rq = cpu_rq(cpu); |
| 3506 | u64 total, available; | 3506 | u64 total, available, age_stamp, avg; |
| 3507 | 3507 | ||
| 3508 | total = sched_avg_period() + (rq->clock - rq->age_stamp); | 3508 | /* |
| 3509 | * Since we're reading these variables without serialization make sure | ||
| 3510 | * we read them once before doing sanity checks on them. | ||
| 3511 | */ | ||
| 3512 | age_stamp = ACCESS_ONCE(rq->age_stamp); | ||
| 3513 | avg = ACCESS_ONCE(rq->rt_avg); | ||
| 3514 | |||
| 3515 | total = sched_avg_period() + (rq->clock - age_stamp); | ||
| 3509 | 3516 | ||
| 3510 | if (unlikely(total < rq->rt_avg)) { | 3517 | if (unlikely(total < avg)) { |
| 3511 | /* Ensures that power won't end up being negative */ | 3518 | /* Ensures that power won't end up being negative */ |
| 3512 | available = 0; | 3519 | available = 0; |
| 3513 | } else { | 3520 | } else { |
| 3514 | available = total - rq->rt_avg; | 3521 | available = total - avg; |
| 3515 | } | 3522 | } |
| 3516 | 3523 | ||
| 3517 | if (unlikely((s64)total < SCHED_POWER_SCALE)) | 3524 | if (unlikely((s64)total < SCHED_POWER_SCALE)) |
| @@ -3574,13 +3581,28 @@ void update_group_power(struct sched_domain *sd, int cpu) | |||
| 3574 | 3581 | ||
| 3575 | power = 0; | 3582 | power = 0; |
| 3576 | 3583 | ||
| 3577 | group = child->groups; | 3584 | if (child->flags & SD_OVERLAP) { |
| 3578 | do { | 3585 | /* |
| 3579 | power += group->sgp->power; | 3586 | * SD_OVERLAP domains cannot assume that child groups |
| 3580 | group = group->next; | 3587 | * span the current group. |
| 3581 | } while (group != child->groups); | 3588 | */ |
| 3582 | 3589 | ||
| 3583 | sdg->sgp->power = power; | 3590 | for_each_cpu(cpu, sched_group_cpus(sdg)) |
| 3591 | power += power_of(cpu); | ||
| 3592 | } else { | ||
| 3593 | /* | ||
| 3594 | * !SD_OVERLAP domains can assume that child groups | ||
| 3595 | * span the current group. | ||
| 3596 | */ | ||
| 3597 | |||
| 3598 | group = child->groups; | ||
| 3599 | do { | ||
| 3600 | power += group->sgp->power; | ||
| 3601 | group = group->next; | ||
| 3602 | } while (group != child->groups); | ||
| 3603 | } | ||
| 3604 | |||
| 3605 | sdg->sgp->power_orig = sdg->sgp->power = power; | ||
| 3584 | } | 3606 | } |
| 3585 | 3607 | ||
| 3586 | /* | 3608 | /* |
| @@ -3610,7 +3632,7 @@ fix_small_capacity(struct sched_domain *sd, struct sched_group *group) | |||
| 3610 | 3632 | ||
| 3611 | /** | 3633 | /** |
| 3612 | * update_sg_lb_stats - Update sched_group's statistics for load balancing. | 3634 | * update_sg_lb_stats - Update sched_group's statistics for load balancing. |
| 3613 | * @sd: The sched_domain whose statistics are to be updated. | 3635 | * @env: The load balancing environment. |
| 3614 | * @group: sched_group whose statistics are to be updated. | 3636 | * @group: sched_group whose statistics are to be updated. |
| 3615 | * @load_idx: Load index of sched_domain of this_cpu for load calc. | 3637 | * @load_idx: Load index of sched_domain of this_cpu for load calc. |
| 3616 | * @local_group: Does group contain this_cpu. | 3638 | * @local_group: Does group contain this_cpu. |
| @@ -3630,7 +3652,7 @@ static inline void update_sg_lb_stats(struct lb_env *env, | |||
| 3630 | int i; | 3652 | int i; |
| 3631 | 3653 | ||
| 3632 | if (local_group) | 3654 | if (local_group) |
| 3633 | balance_cpu = group_first_cpu(group); | 3655 | balance_cpu = group_balance_cpu(group); |
| 3634 | 3656 | ||
| 3635 | /* Tally up the load of all CPUs in the group */ | 3657 | /* Tally up the load of all CPUs in the group */ |
| 3636 | max_cpu_load = 0; | 3658 | max_cpu_load = 0; |
| @@ -3645,7 +3667,8 @@ static inline void update_sg_lb_stats(struct lb_env *env, | |||
| 3645 | 3667 | ||
| 3646 | /* Bias balancing toward cpus of our domain */ | 3668 | /* Bias balancing toward cpus of our domain */ |
| 3647 | if (local_group) { | 3669 | if (local_group) { |
| 3648 | if (idle_cpu(i) && !first_idle_cpu) { | 3670 | if (idle_cpu(i) && !first_idle_cpu && |
| 3671 | cpumask_test_cpu(i, sched_group_mask(group))) { | ||
| 3649 | first_idle_cpu = 1; | 3672 | first_idle_cpu = 1; |
| 3650 | balance_cpu = i; | 3673 | balance_cpu = i; |
| 3651 | } | 3674 | } |
| @@ -3719,11 +3742,10 @@ static inline void update_sg_lb_stats(struct lb_env *env, | |||
| 3719 | 3742 | ||
| 3720 | /** | 3743 | /** |
| 3721 | * update_sd_pick_busiest - return 1 on busiest group | 3744 | * update_sd_pick_busiest - return 1 on busiest group |
| 3722 | * @sd: sched_domain whose statistics are to be checked | 3745 | * @env: The load balancing environment. |
| 3723 | * @sds: sched_domain statistics | 3746 | * @sds: sched_domain statistics |
| 3724 | * @sg: sched_group candidate to be checked for being the busiest | 3747 | * @sg: sched_group candidate to be checked for being the busiest |
| 3725 | * @sgs: sched_group statistics | 3748 | * @sgs: sched_group statistics |
| 3726 | * @this_cpu: the current cpu | ||
| 3727 | * | 3749 | * |
| 3728 | * Determine if @sg is a busier group than the previously selected | 3750 | * Determine if @sg is a busier group than the previously selected |
| 3729 | * busiest group. | 3751 | * busiest group. |
| @@ -3761,9 +3783,7 @@ static bool update_sd_pick_busiest(struct lb_env *env, | |||
| 3761 | 3783 | ||
| 3762 | /** | 3784 | /** |
| 3763 | * update_sd_lb_stats - Update sched_domain's statistics for load balancing. | 3785 | * update_sd_lb_stats - Update sched_domain's statistics for load balancing. |
| 3764 | * @sd: sched_domain whose statistics are to be updated. | 3786 | * @env: The load balancing environment. |
| 3765 | * @this_cpu: Cpu for which load balance is currently performed. | ||
| 3766 | * @idle: Idle status of this_cpu | ||
| 3767 | * @cpus: Set of cpus considered for load balancing. | 3787 | * @cpus: Set of cpus considered for load balancing. |
| 3768 | * @balance: Should we balance. | 3788 | * @balance: Should we balance. |
| 3769 | * @sds: variable to hold the statistics for this sched_domain. | 3789 | * @sds: variable to hold the statistics for this sched_domain. |
| @@ -3852,10 +3872,8 @@ static inline void update_sd_lb_stats(struct lb_env *env, | |||
| 3852 | * Returns 1 when packing is required and a task should be moved to | 3872 | * Returns 1 when packing is required and a task should be moved to |
| 3853 | * this CPU. The amount of the imbalance is returned in *imbalance. | 3873 | * this CPU. The amount of the imbalance is returned in *imbalance. |
| 3854 | * | 3874 | * |
| 3855 | * @sd: The sched_domain whose packing is to be checked. | 3875 | * @env: The load balancing environment. |
| 3856 | * @sds: Statistics of the sched_domain which is to be packed | 3876 | * @sds: Statistics of the sched_domain which is to be packed |
| 3857 | * @this_cpu: The cpu at whose sched_domain we're performing load-balance. | ||
| 3858 | * @imbalance: returns amount of imbalanced due to packing. | ||
| 3859 | */ | 3877 | */ |
| 3860 | static int check_asym_packing(struct lb_env *env, struct sd_lb_stats *sds) | 3878 | static int check_asym_packing(struct lb_env *env, struct sd_lb_stats *sds) |
| 3861 | { | 3879 | { |
| @@ -3881,9 +3899,8 @@ static int check_asym_packing(struct lb_env *env, struct sd_lb_stats *sds) | |||
| 3881 | * fix_small_imbalance - Calculate the minor imbalance that exists | 3899 | * fix_small_imbalance - Calculate the minor imbalance that exists |
| 3882 | * amongst the groups of a sched_domain, during | 3900 | * amongst the groups of a sched_domain, during |
| 3883 | * load balancing. | 3901 | * load balancing. |
| 3902 | * @env: The load balancing environment. | ||
| 3884 | * @sds: Statistics of the sched_domain whose imbalance is to be calculated. | 3903 | * @sds: Statistics of the sched_domain whose imbalance is to be calculated. |
| 3885 | * @this_cpu: The cpu at whose sched_domain we're performing load-balance. | ||
| 3886 | * @imbalance: Variable to store the imbalance. | ||
| 3887 | */ | 3904 | */ |
| 3888 | static inline | 3905 | static inline |
| 3889 | void fix_small_imbalance(struct lb_env *env, struct sd_lb_stats *sds) | 3906 | void fix_small_imbalance(struct lb_env *env, struct sd_lb_stats *sds) |
| @@ -4026,11 +4043,7 @@ static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *s | |||
| 4026 | * Also calculates the amount of weighted load which should be moved | 4043 | * Also calculates the amount of weighted load which should be moved |
| 4027 | * to restore balance. | 4044 | * to restore balance. |
| 4028 | * | 4045 | * |
| 4029 | * @sd: The sched_domain whose busiest group is to be returned. | 4046 | * @env: The load balancing environment. |
| 4030 | * @this_cpu: The cpu for which load balancing is currently being performed. | ||
| 4031 | * @imbalance: Variable which stores amount of weighted load which should | ||
| 4032 | * be moved to restore balance/put a group to idle. | ||
| 4033 | * @idle: The idle status of this_cpu. | ||
| 4034 | * @cpus: The set of CPUs under consideration for load-balancing. | 4047 | * @cpus: The set of CPUs under consideration for load-balancing. |
| 4035 | * @balance: Pointer to a variable indicating if this_cpu | 4048 | * @balance: Pointer to a variable indicating if this_cpu |
| 4036 | * is the appropriate cpu to perform load balancing at this_level. | 4049 | * is the appropriate cpu to perform load balancing at this_level. |
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c index c5565c3c515f..573e1ca01102 100644 --- a/kernel/sched/rt.c +++ b/kernel/sched/rt.c | |||
| @@ -274,13 +274,16 @@ static void update_rt_migration(struct rt_rq *rt_rq) | |||
| 274 | 274 | ||
| 275 | static void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) | 275 | static void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) |
| 276 | { | 276 | { |
| 277 | struct task_struct *p; | ||
| 278 | |||
| 277 | if (!rt_entity_is_task(rt_se)) | 279 | if (!rt_entity_is_task(rt_se)) |
| 278 | return; | 280 | return; |
| 279 | 281 | ||
| 282 | p = rt_task_of(rt_se); | ||
| 280 | rt_rq = &rq_of_rt_rq(rt_rq)->rt; | 283 | rt_rq = &rq_of_rt_rq(rt_rq)->rt; |
| 281 | 284 | ||
| 282 | rt_rq->rt_nr_total++; | 285 | rt_rq->rt_nr_total++; |
| 283 | if (rt_se->nr_cpus_allowed > 1) | 286 | if (p->nr_cpus_allowed > 1) |
| 284 | rt_rq->rt_nr_migratory++; | 287 | rt_rq->rt_nr_migratory++; |
| 285 | 288 | ||
| 286 | update_rt_migration(rt_rq); | 289 | update_rt_migration(rt_rq); |
| @@ -288,13 +291,16 @@ static void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) | |||
| 288 | 291 | ||
| 289 | static void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) | 292 | static void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) |
| 290 | { | 293 | { |
| 294 | struct task_struct *p; | ||
| 295 | |||
| 291 | if (!rt_entity_is_task(rt_se)) | 296 | if (!rt_entity_is_task(rt_se)) |
| 292 | return; | 297 | return; |
| 293 | 298 | ||
| 299 | p = rt_task_of(rt_se); | ||
| 294 | rt_rq = &rq_of_rt_rq(rt_rq)->rt; | 300 | rt_rq = &rq_of_rt_rq(rt_rq)->rt; |
| 295 | 301 | ||
| 296 | rt_rq->rt_nr_total--; | 302 | rt_rq->rt_nr_total--; |
| 297 | if (rt_se->nr_cpus_allowed > 1) | 303 | if (p->nr_cpus_allowed > 1) |
| 298 | rt_rq->rt_nr_migratory--; | 304 | rt_rq->rt_nr_migratory--; |
| 299 | 305 | ||
| 300 | update_rt_migration(rt_rq); | 306 | update_rt_migration(rt_rq); |
| @@ -1161,7 +1167,7 @@ enqueue_task_rt(struct rq *rq, struct task_struct *p, int flags) | |||
| 1161 | 1167 | ||
| 1162 | enqueue_rt_entity(rt_se, flags & ENQUEUE_HEAD); | 1168 | enqueue_rt_entity(rt_se, flags & ENQUEUE_HEAD); |
| 1163 | 1169 | ||
| 1164 | if (!task_current(rq, p) && p->rt.nr_cpus_allowed > 1) | 1170 | if (!task_current(rq, p) && p->nr_cpus_allowed > 1) |
| 1165 | enqueue_pushable_task(rq, p); | 1171 | enqueue_pushable_task(rq, p); |
| 1166 | 1172 | ||
| 1167 | inc_nr_running(rq); | 1173 | inc_nr_running(rq); |
| @@ -1225,7 +1231,7 @@ select_task_rq_rt(struct task_struct *p, int sd_flag, int flags) | |||
| 1225 | 1231 | ||
| 1226 | cpu = task_cpu(p); | 1232 | cpu = task_cpu(p); |
| 1227 | 1233 | ||
| 1228 | if (p->rt.nr_cpus_allowed == 1) | 1234 | if (p->nr_cpus_allowed == 1) |
| 1229 | goto out; | 1235 | goto out; |
| 1230 | 1236 | ||
| 1231 | /* For anything but wake ups, just return the task_cpu */ | 1237 | /* For anything but wake ups, just return the task_cpu */ |
| @@ -1260,9 +1266,9 @@ select_task_rq_rt(struct task_struct *p, int sd_flag, int flags) | |||
| 1260 | * will have to sort it out. | 1266 | * will have to sort it out. |
| 1261 | */ | 1267 | */ |
| 1262 | if (curr && unlikely(rt_task(curr)) && | 1268 | if (curr && unlikely(rt_task(curr)) && |
| 1263 | (curr->rt.nr_cpus_allowed < 2 || | 1269 | (curr->nr_cpus_allowed < 2 || |
| 1264 | curr->prio <= p->prio) && | 1270 | curr->prio <= p->prio) && |
| 1265 | (p->rt.nr_cpus_allowed > 1)) { | 1271 | (p->nr_cpus_allowed > 1)) { |
| 1266 | int target = find_lowest_rq(p); | 1272 | int target = find_lowest_rq(p); |
| 1267 | 1273 | ||
| 1268 | if (target != -1) | 1274 | if (target != -1) |
| @@ -1276,10 +1282,10 @@ out: | |||
| 1276 | 1282 | ||
| 1277 | static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p) | 1283 | static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p) |
| 1278 | { | 1284 | { |
| 1279 | if (rq->curr->rt.nr_cpus_allowed == 1) | 1285 | if (rq->curr->nr_cpus_allowed == 1) |
| 1280 | return; | 1286 | return; |
| 1281 | 1287 | ||
| 1282 | if (p->rt.nr_cpus_allowed != 1 | 1288 | if (p->nr_cpus_allowed != 1 |
| 1283 | && cpupri_find(&rq->rd->cpupri, p, NULL)) | 1289 | && cpupri_find(&rq->rd->cpupri, p, NULL)) |
| 1284 | return; | 1290 | return; |
| 1285 | 1291 | ||
| @@ -1395,7 +1401,7 @@ static void put_prev_task_rt(struct rq *rq, struct task_struct *p) | |||
| 1395 | * The previous task needs to be made eligible for pushing | 1401 | * The previous task needs to be made eligible for pushing |
| 1396 | * if it is still active | 1402 | * if it is still active |
| 1397 | */ | 1403 | */ |
| 1398 | if (on_rt_rq(&p->rt) && p->rt.nr_cpus_allowed > 1) | 1404 | if (on_rt_rq(&p->rt) && p->nr_cpus_allowed > 1) |
| 1399 | enqueue_pushable_task(rq, p); | 1405 | enqueue_pushable_task(rq, p); |
| 1400 | } | 1406 | } |
| 1401 | 1407 | ||
| @@ -1408,7 +1414,7 @@ static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu) | |||
| 1408 | { | 1414 | { |
| 1409 | if (!task_running(rq, p) && | 1415 | if (!task_running(rq, p) && |
| 1410 | (cpu < 0 || cpumask_test_cpu(cpu, tsk_cpus_allowed(p))) && | 1416 | (cpu < 0 || cpumask_test_cpu(cpu, tsk_cpus_allowed(p))) && |
| 1411 | (p->rt.nr_cpus_allowed > 1)) | 1417 | (p->nr_cpus_allowed > 1)) |
| 1412 | return 1; | 1418 | return 1; |
| 1413 | return 0; | 1419 | return 0; |
| 1414 | } | 1420 | } |
| @@ -1464,7 +1470,7 @@ static int find_lowest_rq(struct task_struct *task) | |||
| 1464 | if (unlikely(!lowest_mask)) | 1470 | if (unlikely(!lowest_mask)) |
| 1465 | return -1; | 1471 | return -1; |
| 1466 | 1472 | ||
| 1467 | if (task->rt.nr_cpus_allowed == 1) | 1473 | if (task->nr_cpus_allowed == 1) |
| 1468 | return -1; /* No other targets possible */ | 1474 | return -1; /* No other targets possible */ |
| 1469 | 1475 | ||
| 1470 | if (!cpupri_find(&task_rq(task)->rd->cpupri, task, lowest_mask)) | 1476 | if (!cpupri_find(&task_rq(task)->rd->cpupri, task, lowest_mask)) |
| @@ -1556,7 +1562,7 @@ static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq) | |||
| 1556 | task_running(rq, task) || | 1562 | task_running(rq, task) || |
| 1557 | !task->on_rq)) { | 1563 | !task->on_rq)) { |
| 1558 | 1564 | ||
| 1559 | raw_spin_unlock(&lowest_rq->lock); | 1565 | double_unlock_balance(rq, lowest_rq); |
| 1560 | lowest_rq = NULL; | 1566 | lowest_rq = NULL; |
| 1561 | break; | 1567 | break; |
| 1562 | } | 1568 | } |
| @@ -1586,7 +1592,7 @@ static struct task_struct *pick_next_pushable_task(struct rq *rq) | |||
| 1586 | 1592 | ||
| 1587 | BUG_ON(rq->cpu != task_cpu(p)); | 1593 | BUG_ON(rq->cpu != task_cpu(p)); |
| 1588 | BUG_ON(task_current(rq, p)); | 1594 | BUG_ON(task_current(rq, p)); |
| 1589 | BUG_ON(p->rt.nr_cpus_allowed <= 1); | 1595 | BUG_ON(p->nr_cpus_allowed <= 1); |
| 1590 | 1596 | ||
| 1591 | BUG_ON(!p->on_rq); | 1597 | BUG_ON(!p->on_rq); |
| 1592 | BUG_ON(!rt_task(p)); | 1598 | BUG_ON(!rt_task(p)); |
| @@ -1793,9 +1799,9 @@ static void task_woken_rt(struct rq *rq, struct task_struct *p) | |||
| 1793 | if (!task_running(rq, p) && | 1799 | if (!task_running(rq, p) && |
| 1794 | !test_tsk_need_resched(rq->curr) && | 1800 | !test_tsk_need_resched(rq->curr) && |
| 1795 | has_pushable_tasks(rq) && | 1801 | has_pushable_tasks(rq) && |
| 1796 | p->rt.nr_cpus_allowed > 1 && | 1802 | p->nr_cpus_allowed > 1 && |
| 1797 | rt_task(rq->curr) && | 1803 | rt_task(rq->curr) && |
| 1798 | (rq->curr->rt.nr_cpus_allowed < 2 || | 1804 | (rq->curr->nr_cpus_allowed < 2 || |
| 1799 | rq->curr->prio <= p->prio)) | 1805 | rq->curr->prio <= p->prio)) |
| 1800 | push_rt_tasks(rq); | 1806 | push_rt_tasks(rq); |
| 1801 | } | 1807 | } |
| @@ -1817,7 +1823,7 @@ static void set_cpus_allowed_rt(struct task_struct *p, | |||
| 1817 | * Only update if the process changes its state from whether it | 1823 | * Only update if the process changes its state from whether it |
| 1818 | * can migrate or not. | 1824 | * can migrate or not. |
| 1819 | */ | 1825 | */ |
| 1820 | if ((p->rt.nr_cpus_allowed > 1) == (weight > 1)) | 1826 | if ((p->nr_cpus_allowed > 1) == (weight > 1)) |
| 1821 | return; | 1827 | return; |
| 1822 | 1828 | ||
| 1823 | rq = task_rq(p); | 1829 | rq = task_rq(p); |
| @@ -1979,6 +1985,8 @@ static void watchdog(struct rq *rq, struct task_struct *p) | |||
| 1979 | 1985 | ||
| 1980 | static void task_tick_rt(struct rq *rq, struct task_struct *p, int queued) | 1986 | static void task_tick_rt(struct rq *rq, struct task_struct *p, int queued) |
| 1981 | { | 1987 | { |
| 1988 | struct sched_rt_entity *rt_se = &p->rt; | ||
| 1989 | |||
| 1982 | update_curr_rt(rq); | 1990 | update_curr_rt(rq); |
| 1983 | 1991 | ||
| 1984 | watchdog(rq, p); | 1992 | watchdog(rq, p); |
| @@ -1996,12 +2004,15 @@ static void task_tick_rt(struct rq *rq, struct task_struct *p, int queued) | |||
| 1996 | p->rt.time_slice = RR_TIMESLICE; | 2004 | p->rt.time_slice = RR_TIMESLICE; |
| 1997 | 2005 | ||
| 1998 | /* | 2006 | /* |
| 1999 | * Requeue to the end of queue if we are not the only element | 2007 | * Requeue to the end of queue if we (and all of our ancestors) are the |
| 2000 | * on the queue: | 2008 | * only element on the queue |
| 2001 | */ | 2009 | */ |
| 2002 | if (p->rt.run_list.prev != p->rt.run_list.next) { | 2010 | for_each_sched_rt_entity(rt_se) { |
| 2003 | requeue_task_rt(rq, p, 0); | 2011 | if (rt_se->run_list.prev != rt_se->run_list.next) { |
| 2004 | set_tsk_need_resched(p); | 2012 | requeue_task_rt(rq, p, 0); |
| 2013 | set_tsk_need_resched(p); | ||
| 2014 | return; | ||
| 2015 | } | ||
| 2005 | } | 2016 | } |
| 2006 | } | 2017 | } |
| 2007 | 2018 | ||
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index ba9dccfd24ce..6d52cea7f33d 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h | |||
| @@ -526,6 +526,8 @@ static inline struct sched_domain *highest_flag_domain(int cpu, int flag) | |||
| 526 | DECLARE_PER_CPU(struct sched_domain *, sd_llc); | 526 | DECLARE_PER_CPU(struct sched_domain *, sd_llc); |
| 527 | DECLARE_PER_CPU(int, sd_llc_id); | 527 | DECLARE_PER_CPU(int, sd_llc_id); |
| 528 | 528 | ||
| 529 | extern int group_balance_cpu(struct sched_group *sg); | ||
| 530 | |||
| 529 | #endif /* CONFIG_SMP */ | 531 | #endif /* CONFIG_SMP */ |
| 530 | 532 | ||
| 531 | #include "stats.h" | 533 | #include "stats.h" |
diff --git a/kernel/smpboot.c b/kernel/smpboot.c index e1a797e028a3..98f60c5caa1b 100644 --- a/kernel/smpboot.c +++ b/kernel/smpboot.c | |||
| @@ -31,6 +31,12 @@ void __init idle_thread_set_boot_cpu(void) | |||
| 31 | per_cpu(idle_threads, smp_processor_id()) = current; | 31 | per_cpu(idle_threads, smp_processor_id()) = current; |
| 32 | } | 32 | } |
| 33 | 33 | ||
| 34 | /** | ||
| 35 | * idle_init - Initialize the idle thread for a cpu | ||
| 36 | * @cpu: The cpu for which the idle thread should be initialized | ||
| 37 | * | ||
| 38 | * Creates the thread if it does not exist. | ||
| 39 | */ | ||
| 34 | static inline void idle_init(unsigned int cpu) | 40 | static inline void idle_init(unsigned int cpu) |
| 35 | { | 41 | { |
| 36 | struct task_struct *tsk = per_cpu(idle_threads, cpu); | 42 | struct task_struct *tsk = per_cpu(idle_threads, cpu); |
| @@ -45,17 +51,16 @@ static inline void idle_init(unsigned int cpu) | |||
| 45 | } | 51 | } |
| 46 | 52 | ||
| 47 | /** | 53 | /** |
| 48 | * idle_thread_init - Initialize the idle thread for a cpu | 54 | * idle_threads_init - Initialize idle threads for all cpus |
| 49 | * @cpu: The cpu for which the idle thread should be initialized | ||
| 50 | * | ||
| 51 | * Creates the thread if it does not exist. | ||
| 52 | */ | 55 | */ |
| 53 | void __init idle_threads_init(void) | 56 | void __init idle_threads_init(void) |
| 54 | { | 57 | { |
| 55 | unsigned int cpu; | 58 | unsigned int cpu, boot_cpu; |
| 59 | |||
| 60 | boot_cpu = smp_processor_id(); | ||
| 56 | 61 | ||
| 57 | for_each_possible_cpu(cpu) { | 62 | for_each_possible_cpu(cpu) { |
| 58 | if (cpu != smp_processor_id()) | 63 | if (cpu != boot_cpu) |
| 59 | idle_init(cpu); | 64 | idle_init(cpu); |
| 60 | } | 65 | } |
| 61 | } | 66 | } |
diff --git a/kernel/sys.c b/kernel/sys.c index 9ff89cb9657a..f0ec44dcd415 100644 --- a/kernel/sys.c +++ b/kernel/sys.c | |||
| @@ -1786,27 +1786,13 @@ SYSCALL_DEFINE1(umask, int, mask) | |||
| 1786 | } | 1786 | } |
| 1787 | 1787 | ||
| 1788 | #ifdef CONFIG_CHECKPOINT_RESTORE | 1788 | #ifdef CONFIG_CHECKPOINT_RESTORE |
| 1789 | static bool vma_flags_mismatch(struct vm_area_struct *vma, | ||
| 1790 | unsigned long required, | ||
| 1791 | unsigned long banned) | ||
| 1792 | { | ||
| 1793 | return (vma->vm_flags & required) != required || | ||
| 1794 | (vma->vm_flags & banned); | ||
| 1795 | } | ||
| 1796 | |||
| 1797 | static int prctl_set_mm_exe_file(struct mm_struct *mm, unsigned int fd) | 1789 | static int prctl_set_mm_exe_file(struct mm_struct *mm, unsigned int fd) |
| 1798 | { | 1790 | { |
| 1791 | struct vm_area_struct *vma; | ||
| 1799 | struct file *exe_file; | 1792 | struct file *exe_file; |
| 1800 | struct dentry *dentry; | 1793 | struct dentry *dentry; |
| 1801 | int err; | 1794 | int err; |
| 1802 | 1795 | ||
| 1803 | /* | ||
| 1804 | * Setting new mm::exe_file is only allowed when no VM_EXECUTABLE vma's | ||
| 1805 | * remain. So perform a quick test first. | ||
| 1806 | */ | ||
| 1807 | if (mm->num_exe_file_vmas) | ||
| 1808 | return -EBUSY; | ||
| 1809 | |||
| 1810 | exe_file = fget(fd); | 1796 | exe_file = fget(fd); |
| 1811 | if (!exe_file) | 1797 | if (!exe_file) |
| 1812 | return -EBADF; | 1798 | return -EBADF; |
| @@ -1827,17 +1813,30 @@ static int prctl_set_mm_exe_file(struct mm_struct *mm, unsigned int fd) | |||
| 1827 | if (err) | 1813 | if (err) |
| 1828 | goto exit; | 1814 | goto exit; |
| 1829 | 1815 | ||
| 1816 | down_write(&mm->mmap_sem); | ||
| 1817 | |||
| 1818 | /* | ||
| 1819 | * Forbid mm->exe_file change if there are mapped other files. | ||
| 1820 | */ | ||
| 1821 | err = -EBUSY; | ||
| 1822 | for (vma = mm->mmap; vma; vma = vma->vm_next) { | ||
| 1823 | if (vma->vm_file && !path_equal(&vma->vm_file->f_path, | ||
| 1824 | &exe_file->f_path)) | ||
| 1825 | goto exit_unlock; | ||
| 1826 | } | ||
| 1827 | |||
| 1830 | /* | 1828 | /* |
| 1831 | * The symlink can be changed only once, just to disallow arbitrary | 1829 | * The symlink can be changed only once, just to disallow arbitrary |
| 1832 | * transitions malicious software might bring in. This means one | 1830 | * transitions malicious software might bring in. This means one |
| 1833 | * could make a snapshot over all processes running and monitor | 1831 | * could make a snapshot over all processes running and monitor |
| 1834 | * /proc/pid/exe changes to notice unusual activity if needed. | 1832 | * /proc/pid/exe changes to notice unusual activity if needed. |
| 1835 | */ | 1833 | */ |
| 1836 | down_write(&mm->mmap_sem); | 1834 | err = -EPERM; |
| 1837 | if (likely(!mm->exe_file)) | 1835 | if (test_and_set_bit(MMF_EXE_FILE_CHANGED, &mm->flags)) |
| 1838 | set_mm_exe_file(mm, exe_file); | 1836 | goto exit_unlock; |
| 1839 | else | 1837 | |
| 1840 | err = -EBUSY; | 1838 | set_mm_exe_file(mm, exe_file); |
| 1839 | exit_unlock: | ||
| 1841 | up_write(&mm->mmap_sem); | 1840 | up_write(&mm->mmap_sem); |
| 1842 | 1841 | ||
| 1843 | exit: | 1842 | exit: |
| @@ -1862,7 +1861,7 @@ static int prctl_set_mm(int opt, unsigned long addr, | |||
| 1862 | if (opt == PR_SET_MM_EXE_FILE) | 1861 | if (opt == PR_SET_MM_EXE_FILE) |
| 1863 | return prctl_set_mm_exe_file(mm, (unsigned int)addr); | 1862 | return prctl_set_mm_exe_file(mm, (unsigned int)addr); |
| 1864 | 1863 | ||
| 1865 | if (addr >= TASK_SIZE) | 1864 | if (addr >= TASK_SIZE || addr < mmap_min_addr) |
| 1866 | return -EINVAL; | 1865 | return -EINVAL; |
| 1867 | 1866 | ||
| 1868 | error = -EINVAL; | 1867 | error = -EINVAL; |
| @@ -1924,12 +1923,6 @@ static int prctl_set_mm(int opt, unsigned long addr, | |||
| 1924 | error = -EFAULT; | 1923 | error = -EFAULT; |
| 1925 | goto out; | 1924 | goto out; |
| 1926 | } | 1925 | } |
| 1927 | #ifdef CONFIG_STACK_GROWSUP | ||
| 1928 | if (vma_flags_mismatch(vma, VM_READ | VM_WRITE | VM_GROWSUP, 0)) | ||
| 1929 | #else | ||
| 1930 | if (vma_flags_mismatch(vma, VM_READ | VM_WRITE | VM_GROWSDOWN, 0)) | ||
| 1931 | #endif | ||
| 1932 | goto out; | ||
| 1933 | if (opt == PR_SET_MM_START_STACK) | 1926 | if (opt == PR_SET_MM_START_STACK) |
| 1934 | mm->start_stack = addr; | 1927 | mm->start_stack = addr; |
| 1935 | else if (opt == PR_SET_MM_ARG_START) | 1928 | else if (opt == PR_SET_MM_ARG_START) |
| @@ -1981,12 +1974,22 @@ out: | |||
| 1981 | up_read(&mm->mmap_sem); | 1974 | up_read(&mm->mmap_sem); |
| 1982 | return error; | 1975 | return error; |
| 1983 | } | 1976 | } |
| 1977 | |||
| 1978 | static int prctl_get_tid_address(struct task_struct *me, int __user **tid_addr) | ||
| 1979 | { | ||
| 1980 | return put_user(me->clear_child_tid, tid_addr); | ||
| 1981 | } | ||
| 1982 | |||
| 1984 | #else /* CONFIG_CHECKPOINT_RESTORE */ | 1983 | #else /* CONFIG_CHECKPOINT_RESTORE */ |
| 1985 | static int prctl_set_mm(int opt, unsigned long addr, | 1984 | static int prctl_set_mm(int opt, unsigned long addr, |
| 1986 | unsigned long arg4, unsigned long arg5) | 1985 | unsigned long arg4, unsigned long arg5) |
| 1987 | { | 1986 | { |
| 1988 | return -EINVAL; | 1987 | return -EINVAL; |
| 1989 | } | 1988 | } |
| 1989 | static int prctl_get_tid_address(struct task_struct *me, int __user **tid_addr) | ||
| 1990 | { | ||
| 1991 | return -EINVAL; | ||
| 1992 | } | ||
| 1990 | #endif | 1993 | #endif |
| 1991 | 1994 | ||
| 1992 | SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3, | 1995 | SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3, |
| @@ -2124,6 +2127,9 @@ SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3, | |||
| 2124 | else | 2127 | else |
| 2125 | return -EINVAL; | 2128 | return -EINVAL; |
| 2126 | break; | 2129 | break; |
| 2130 | case PR_GET_TID_ADDRESS: | ||
| 2131 | error = prctl_get_tid_address(me, (int __user **)arg2); | ||
| 2132 | break; | ||
| 2127 | default: | 2133 | default: |
| 2128 | return -EINVAL; | 2134 | return -EINVAL; |
| 2129 | } | 2135 | } |
diff --git a/kernel/time/clockevents.c b/kernel/time/clockevents.c index 9cd928f7a7c6..7e1ce012a851 100644 --- a/kernel/time/clockevents.c +++ b/kernel/time/clockevents.c | |||
| @@ -297,8 +297,7 @@ void clockevents_register_device(struct clock_event_device *dev) | |||
| 297 | } | 297 | } |
| 298 | EXPORT_SYMBOL_GPL(clockevents_register_device); | 298 | EXPORT_SYMBOL_GPL(clockevents_register_device); |
| 299 | 299 | ||
| 300 | static void clockevents_config(struct clock_event_device *dev, | 300 | void clockevents_config(struct clock_event_device *dev, u32 freq) |
| 301 | u32 freq) | ||
| 302 | { | 301 | { |
| 303 | u64 sec; | 302 | u64 sec; |
| 304 | 303 | ||
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c index 6a3a5b9ff561..869997833928 100644 --- a/kernel/time/tick-sched.c +++ b/kernel/time/tick-sched.c | |||
| @@ -274,6 +274,7 @@ EXPORT_SYMBOL_GPL(get_cpu_iowait_time_us); | |||
| 274 | static void tick_nohz_stop_sched_tick(struct tick_sched *ts) | 274 | static void tick_nohz_stop_sched_tick(struct tick_sched *ts) |
| 275 | { | 275 | { |
| 276 | unsigned long seq, last_jiffies, next_jiffies, delta_jiffies; | 276 | unsigned long seq, last_jiffies, next_jiffies, delta_jiffies; |
| 277 | unsigned long rcu_delta_jiffies; | ||
| 277 | ktime_t last_update, expires, now; | 278 | ktime_t last_update, expires, now; |
| 278 | struct clock_event_device *dev = __get_cpu_var(tick_cpu_device).evtdev; | 279 | struct clock_event_device *dev = __get_cpu_var(tick_cpu_device).evtdev; |
| 279 | u64 time_delta; | 280 | u64 time_delta; |
| @@ -322,7 +323,7 @@ static void tick_nohz_stop_sched_tick(struct tick_sched *ts) | |||
| 322 | time_delta = timekeeping_max_deferment(); | 323 | time_delta = timekeeping_max_deferment(); |
| 323 | } while (read_seqretry(&xtime_lock, seq)); | 324 | } while (read_seqretry(&xtime_lock, seq)); |
| 324 | 325 | ||
| 325 | if (rcu_needs_cpu(cpu) || printk_needs_cpu(cpu) || | 326 | if (rcu_needs_cpu(cpu, &rcu_delta_jiffies) || printk_needs_cpu(cpu) || |
| 326 | arch_needs_cpu(cpu)) { | 327 | arch_needs_cpu(cpu)) { |
| 327 | next_jiffies = last_jiffies + 1; | 328 | next_jiffies = last_jiffies + 1; |
| 328 | delta_jiffies = 1; | 329 | delta_jiffies = 1; |
| @@ -330,6 +331,10 @@ static void tick_nohz_stop_sched_tick(struct tick_sched *ts) | |||
| 330 | /* Get the next timer wheel timer */ | 331 | /* Get the next timer wheel timer */ |
| 331 | next_jiffies = get_next_timer_interrupt(last_jiffies); | 332 | next_jiffies = get_next_timer_interrupt(last_jiffies); |
| 332 | delta_jiffies = next_jiffies - last_jiffies; | 333 | delta_jiffies = next_jiffies - last_jiffies; |
| 334 | if (rcu_delta_jiffies < delta_jiffies) { | ||
| 335 | next_jiffies = last_jiffies + rcu_delta_jiffies; | ||
| 336 | delta_jiffies = rcu_delta_jiffies; | ||
| 337 | } | ||
| 333 | } | 338 | } |
| 334 | /* | 339 | /* |
| 335 | * Do not stop the tick, if we are only one off | 340 | * Do not stop the tick, if we are only one off |
| @@ -576,6 +581,7 @@ void tick_nohz_idle_exit(void) | |||
| 576 | /* Update jiffies first */ | 581 | /* Update jiffies first */ |
| 577 | select_nohz_load_balancer(0); | 582 | select_nohz_load_balancer(0); |
| 578 | tick_do_update_jiffies64(now); | 583 | tick_do_update_jiffies64(now); |
| 584 | update_cpu_load_nohz(); | ||
| 579 | 585 | ||
| 580 | #ifndef CONFIG_VIRT_CPU_ACCOUNTING | 586 | #ifndef CONFIG_VIRT_CPU_ACCOUNTING |
| 581 | /* | 587 | /* |
| @@ -814,6 +820,16 @@ static enum hrtimer_restart tick_sched_timer(struct hrtimer *timer) | |||
| 814 | return HRTIMER_RESTART; | 820 | return HRTIMER_RESTART; |
| 815 | } | 821 | } |
| 816 | 822 | ||
| 823 | static int sched_skew_tick; | ||
| 824 | |||
| 825 | static int __init skew_tick(char *str) | ||
| 826 | { | ||
| 827 | get_option(&str, &sched_skew_tick); | ||
| 828 | |||
| 829 | return 0; | ||
| 830 | } | ||
| 831 | early_param("skew_tick", skew_tick); | ||
| 832 | |||
| 817 | /** | 833 | /** |
| 818 | * tick_setup_sched_timer - setup the tick emulation timer | 834 | * tick_setup_sched_timer - setup the tick emulation timer |
| 819 | */ | 835 | */ |
| @@ -831,6 +847,14 @@ void tick_setup_sched_timer(void) | |||
| 831 | /* Get the next period (per cpu) */ | 847 | /* Get the next period (per cpu) */ |
| 832 | hrtimer_set_expires(&ts->sched_timer, tick_init_jiffy_update()); | 848 | hrtimer_set_expires(&ts->sched_timer, tick_init_jiffy_update()); |
| 833 | 849 | ||
| 850 | /* Offset the tick to avert xtime_lock contention. */ | ||
| 851 | if (sched_skew_tick) { | ||
| 852 | u64 offset = ktime_to_ns(tick_period) >> 1; | ||
| 853 | do_div(offset, num_possible_cpus()); | ||
| 854 | offset *= smp_processor_id(); | ||
| 855 | hrtimer_add_expires_ns(&ts->sched_timer, offset); | ||
| 856 | } | ||
| 857 | |||
| 834 | for (;;) { | 858 | for (;;) { |
| 835 | hrtimer_forward(&ts->sched_timer, now, tick_period); | 859 | hrtimer_forward(&ts->sched_timer, now, tick_period); |
| 836 | hrtimer_start_expires(&ts->sched_timer, | 860 | hrtimer_start_expires(&ts->sched_timer, |
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c index 6e46cacf5969..6f46a00a1e8a 100644 --- a/kernel/time/timekeeping.c +++ b/kernel/time/timekeeping.c | |||
| @@ -962,6 +962,7 @@ static cycle_t logarithmic_accumulation(cycle_t offset, int shift) | |||
| 962 | timekeeper.xtime.tv_sec++; | 962 | timekeeper.xtime.tv_sec++; |
| 963 | leap = second_overflow(timekeeper.xtime.tv_sec); | 963 | leap = second_overflow(timekeeper.xtime.tv_sec); |
| 964 | timekeeper.xtime.tv_sec += leap; | 964 | timekeeper.xtime.tv_sec += leap; |
| 965 | timekeeper.wall_to_monotonic.tv_sec -= leap; | ||
| 965 | } | 966 | } |
| 966 | 967 | ||
| 967 | /* Accumulate raw time */ | 968 | /* Accumulate raw time */ |
| @@ -1077,6 +1078,7 @@ static void update_wall_time(void) | |||
| 1077 | timekeeper.xtime.tv_sec++; | 1078 | timekeeper.xtime.tv_sec++; |
| 1078 | leap = second_overflow(timekeeper.xtime.tv_sec); | 1079 | leap = second_overflow(timekeeper.xtime.tv_sec); |
| 1079 | timekeeper.xtime.tv_sec += leap; | 1080 | timekeeper.xtime.tv_sec += leap; |
| 1081 | timekeeper.wall_to_monotonic.tv_sec -= leap; | ||
| 1080 | } | 1082 | } |
| 1081 | 1083 | ||
| 1082 | timekeeping_update(false); | 1084 | timekeeping_update(false); |
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 68032c6177db..49249c28690d 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
| @@ -371,7 +371,7 @@ EXPORT_SYMBOL_GPL(tracing_on); | |||
| 371 | void tracing_off(void) | 371 | void tracing_off(void) |
| 372 | { | 372 | { |
| 373 | if (global_trace.buffer) | 373 | if (global_trace.buffer) |
| 374 | ring_buffer_record_on(global_trace.buffer); | 374 | ring_buffer_record_off(global_trace.buffer); |
| 375 | /* | 375 | /* |
| 376 | * This flag is only looked at when buffers haven't been | 376 | * This flag is only looked at when buffers haven't been |
| 377 | * allocated yet. We don't really care about the race | 377 | * allocated yet. We don't really care about the race |
diff --git a/kernel/watchdog.c b/kernel/watchdog.c index e5e1d85b8c7c..4b1dfba70f7c 100644 --- a/kernel/watchdog.c +++ b/kernel/watchdog.c | |||
| @@ -372,6 +372,13 @@ static int watchdog(void *unused) | |||
| 372 | 372 | ||
| 373 | 373 | ||
| 374 | #ifdef CONFIG_HARDLOCKUP_DETECTOR | 374 | #ifdef CONFIG_HARDLOCKUP_DETECTOR |
| 375 | /* | ||
| 376 | * People like the simple clean cpu node info on boot. | ||
| 377 | * Reduce the watchdog noise by only printing messages | ||
| 378 | * that are different from what cpu0 displayed. | ||
| 379 | */ | ||
| 380 | static unsigned long cpu0_err; | ||
| 381 | |||
| 375 | static int watchdog_nmi_enable(int cpu) | 382 | static int watchdog_nmi_enable(int cpu) |
| 376 | { | 383 | { |
| 377 | struct perf_event_attr *wd_attr; | 384 | struct perf_event_attr *wd_attr; |
| @@ -390,11 +397,21 @@ static int watchdog_nmi_enable(int cpu) | |||
| 390 | 397 | ||
| 391 | /* Try to register using hardware perf events */ | 398 | /* Try to register using hardware perf events */ |
| 392 | event = perf_event_create_kernel_counter(wd_attr, cpu, NULL, watchdog_overflow_callback, NULL); | 399 | event = perf_event_create_kernel_counter(wd_attr, cpu, NULL, watchdog_overflow_callback, NULL); |
| 400 | |||
| 401 | /* save cpu0 error for future comparision */ | ||
| 402 | if (cpu == 0 && IS_ERR(event)) | ||
| 403 | cpu0_err = PTR_ERR(event); | ||
| 404 | |||
| 393 | if (!IS_ERR(event)) { | 405 | if (!IS_ERR(event)) { |
| 394 | pr_info("enabled, takes one hw-pmu counter.\n"); | 406 | /* only print for cpu0 or different than cpu0 */ |
| 407 | if (cpu == 0 || cpu0_err) | ||
| 408 | pr_info("enabled on all CPUs, permanently consumes one hw-PMU counter.\n"); | ||
| 395 | goto out_save; | 409 | goto out_save; |
| 396 | } | 410 | } |
| 397 | 411 | ||
| 412 | /* skip displaying the same error again */ | ||
| 413 | if (cpu > 0 && (PTR_ERR(event) == cpu0_err)) | ||
| 414 | return PTR_ERR(event); | ||
| 398 | 415 | ||
| 399 | /* vary the KERN level based on the returned errno */ | 416 | /* vary the KERN level based on the returned errno */ |
| 400 | if (PTR_ERR(event) == -EOPNOTSUPP) | 417 | if (PTR_ERR(event) == -EOPNOTSUPP) |
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index a42d3ae39648..ff5bdee4716d 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug | |||
| @@ -241,6 +241,26 @@ config BOOTPARAM_SOFTLOCKUP_PANIC_VALUE | |||
| 241 | default 0 if !BOOTPARAM_SOFTLOCKUP_PANIC | 241 | default 0 if !BOOTPARAM_SOFTLOCKUP_PANIC |
| 242 | default 1 if BOOTPARAM_SOFTLOCKUP_PANIC | 242 | default 1 if BOOTPARAM_SOFTLOCKUP_PANIC |
| 243 | 243 | ||
| 244 | config PANIC_ON_OOPS | ||
| 245 | bool "Panic on Oops" if EXPERT | ||
| 246 | default n | ||
| 247 | help | ||
| 248 | Say Y here to enable the kernel to panic when it oopses. This | ||
| 249 | has the same effect as setting oops=panic on the kernel command | ||
| 250 | line. | ||
| 251 | |||
| 252 | This feature is useful to ensure that the kernel does not do | ||
| 253 | anything erroneous after an oops which could result in data | ||
| 254 | corruption or other issues. | ||
| 255 | |||
| 256 | Say N if unsure. | ||
| 257 | |||
| 258 | config PANIC_ON_OOPS_VALUE | ||
| 259 | int | ||
| 260 | range 0 1 | ||
| 261 | default 0 if !PANIC_ON_OOPS | ||
| 262 | default 1 if PANIC_ON_OOPS | ||
| 263 | |||
| 244 | config DETECT_HUNG_TASK | 264 | config DETECT_HUNG_TASK |
| 245 | bool "Detect Hung Tasks" | 265 | bool "Detect Hung Tasks" |
| 246 | depends on DEBUG_KERNEL | 266 | depends on DEBUG_KERNEL |
diff --git a/lib/btree.c b/lib/btree.c index e5ec1e9c1aa5..f9a484676cb6 100644 --- a/lib/btree.c +++ b/lib/btree.c | |||
| @@ -319,8 +319,8 @@ void *btree_get_prev(struct btree_head *head, struct btree_geo *geo, | |||
| 319 | 319 | ||
| 320 | if (head->height == 0) | 320 | if (head->height == 0) |
| 321 | return NULL; | 321 | return NULL; |
| 322 | retry: | ||
| 323 | longcpy(key, __key, geo->keylen); | 322 | longcpy(key, __key, geo->keylen); |
| 323 | retry: | ||
| 324 | dec_key(geo, key); | 324 | dec_key(geo, key); |
| 325 | 325 | ||
| 326 | node = head->node; | 326 | node = head->node; |
| @@ -351,7 +351,7 @@ retry: | |||
| 351 | } | 351 | } |
| 352 | miss: | 352 | miss: |
| 353 | if (retry_key) { | 353 | if (retry_key) { |
| 354 | __key = retry_key; | 354 | longcpy(key, retry_key, geo->keylen); |
| 355 | retry_key = NULL; | 355 | retry_key = NULL; |
| 356 | goto retry; | 356 | goto retry; |
| 357 | } | 357 | } |
| @@ -509,6 +509,7 @@ retry: | |||
| 509 | int btree_insert(struct btree_head *head, struct btree_geo *geo, | 509 | int btree_insert(struct btree_head *head, struct btree_geo *geo, |
| 510 | unsigned long *key, void *val, gfp_t gfp) | 510 | unsigned long *key, void *val, gfp_t gfp) |
| 511 | { | 511 | { |
| 512 | BUG_ON(!val); | ||
| 512 | return btree_insert_level(head, geo, key, val, 1, gfp); | 513 | return btree_insert_level(head, geo, key, val, 1, gfp); |
| 513 | } | 514 | } |
| 514 | EXPORT_SYMBOL_GPL(btree_insert); | 515 | EXPORT_SYMBOL_GPL(btree_insert); |
diff --git a/lib/radix-tree.c b/lib/radix-tree.c index d7c878cc006c..e7964296fd50 100644 --- a/lib/radix-tree.c +++ b/lib/radix-tree.c | |||
| @@ -686,6 +686,9 @@ void **radix_tree_next_chunk(struct radix_tree_root *root, | |||
| 686 | * during iterating; it can be zero only at the beginning. | 686 | * during iterating; it can be zero only at the beginning. |
| 687 | * And we cannot overflow iter->next_index in a single step, | 687 | * And we cannot overflow iter->next_index in a single step, |
| 688 | * because RADIX_TREE_MAP_SHIFT < BITS_PER_LONG. | 688 | * because RADIX_TREE_MAP_SHIFT < BITS_PER_LONG. |
| 689 | * | ||
| 690 | * This condition also used by radix_tree_next_slot() to stop | ||
| 691 | * contiguous iterating, and forbid swithing to the next chunk. | ||
| 689 | */ | 692 | */ |
| 690 | index = iter->next_index; | 693 | index = iter->next_index; |
| 691 | if (!index && iter->index) | 694 | if (!index && iter->index) |
diff --git a/lib/raid6/recov.c b/lib/raid6/recov.c index 1805a5cc5daa..a95bccb8497d 100644 --- a/lib/raid6/recov.c +++ b/lib/raid6/recov.c | |||
| @@ -22,8 +22,8 @@ | |||
| 22 | #include <linux/raid/pq.h> | 22 | #include <linux/raid/pq.h> |
| 23 | 23 | ||
| 24 | /* Recover two failed data blocks. */ | 24 | /* Recover two failed data blocks. */ |
| 25 | void raid6_2data_recov_intx1(int disks, size_t bytes, int faila, int failb, | 25 | static void raid6_2data_recov_intx1(int disks, size_t bytes, int faila, |
| 26 | void **ptrs) | 26 | int failb, void **ptrs) |
| 27 | { | 27 | { |
| 28 | u8 *p, *q, *dp, *dq; | 28 | u8 *p, *q, *dp, *dq; |
| 29 | u8 px, qx, db; | 29 | u8 px, qx, db; |
| @@ -66,7 +66,8 @@ void raid6_2data_recov_intx1(int disks, size_t bytes, int faila, int failb, | |||
| 66 | } | 66 | } |
| 67 | 67 | ||
| 68 | /* Recover failure of one data block plus the P block */ | 68 | /* Recover failure of one data block plus the P block */ |
| 69 | void raid6_datap_recov_intx1(int disks, size_t bytes, int faila, void **ptrs) | 69 | static void raid6_datap_recov_intx1(int disks, size_t bytes, int faila, |
| 70 | void **ptrs) | ||
| 70 | { | 71 | { |
| 71 | u8 *p, *q, *dq; | 72 | u8 *p, *q, *dq; |
| 72 | const u8 *qmul; /* Q multiplier table */ | 73 | const u8 *qmul; /* Q multiplier table */ |
diff --git a/lib/raid6/recov_ssse3.c b/lib/raid6/recov_ssse3.c index 37ae61930559..ecb710c0b4d9 100644 --- a/lib/raid6/recov_ssse3.c +++ b/lib/raid6/recov_ssse3.c | |||
| @@ -19,8 +19,8 @@ static int raid6_has_ssse3(void) | |||
| 19 | boot_cpu_has(X86_FEATURE_SSSE3); | 19 | boot_cpu_has(X86_FEATURE_SSSE3); |
| 20 | } | 20 | } |
| 21 | 21 | ||
| 22 | void raid6_2data_recov_ssse3(int disks, size_t bytes, int faila, int failb, | 22 | static void raid6_2data_recov_ssse3(int disks, size_t bytes, int faila, |
| 23 | void **ptrs) | 23 | int failb, void **ptrs) |
| 24 | { | 24 | { |
| 25 | u8 *p, *q, *dp, *dq; | 25 | u8 *p, *q, *dp, *dq; |
| 26 | const u8 *pbmul; /* P multiplier table for B data */ | 26 | const u8 *pbmul; /* P multiplier table for B data */ |
| @@ -194,7 +194,8 @@ void raid6_2data_recov_ssse3(int disks, size_t bytes, int faila, int failb, | |||
| 194 | } | 194 | } |
| 195 | 195 | ||
| 196 | 196 | ||
| 197 | void raid6_datap_recov_ssse3(int disks, size_t bytes, int faila, void **ptrs) | 197 | static void raid6_datap_recov_ssse3(int disks, size_t bytes, int faila, |
| 198 | void **ptrs) | ||
| 198 | { | 199 | { |
| 199 | u8 *p, *q, *dq; | 200 | u8 *p, *q, *dq; |
| 200 | const u8 *qmul; /* Q multiplier table */ | 201 | const u8 *qmul; /* Q multiplier table */ |
diff --git a/lib/spinlock_debug.c b/lib/spinlock_debug.c index d0ec4f3d1593..e91fbc23fff1 100644 --- a/lib/spinlock_debug.c +++ b/lib/spinlock_debug.c | |||
| @@ -118,7 +118,7 @@ static void __spin_lock_debug(raw_spinlock_t *lock) | |||
| 118 | /* lockup suspected: */ | 118 | /* lockup suspected: */ |
| 119 | if (print_once) { | 119 | if (print_once) { |
| 120 | print_once = 0; | 120 | print_once = 0; |
| 121 | spin_dump(lock, "lockup"); | 121 | spin_dump(lock, "lockup suspected"); |
| 122 | #ifdef CONFIG_SMP | 122 | #ifdef CONFIG_SMP |
| 123 | trigger_all_cpu_backtrace(); | 123 | trigger_all_cpu_backtrace(); |
| 124 | #endif | 124 | #endif |
diff --git a/mm/Kconfig b/mm/Kconfig index b2176374b98e..82fed4eb2b6f 100644 --- a/mm/Kconfig +++ b/mm/Kconfig | |||
| @@ -389,3 +389,20 @@ config CLEANCACHE | |||
| 389 | in a negligible performance hit. | 389 | in a negligible performance hit. |
| 390 | 390 | ||
| 391 | If unsure, say Y to enable cleancache | 391 | If unsure, say Y to enable cleancache |
| 392 | |||
| 393 | config FRONTSWAP | ||
| 394 | bool "Enable frontswap to cache swap pages if tmem is present" | ||
| 395 | depends on SWAP | ||
| 396 | default n | ||
| 397 | help | ||
| 398 | Frontswap is so named because it can be thought of as the opposite | ||
| 399 | of a "backing" store for a swap device. The data is stored into | ||
| 400 | "transcendent memory", memory that is not directly accessible or | ||
| 401 | addressable by the kernel and is of unknown and possibly | ||
| 402 | time-varying size. When space in transcendent memory is available, | ||
| 403 | a significant swap I/O reduction may be achieved. When none is | ||
| 404 | available, all frontswap calls are reduced to a single pointer- | ||
| 405 | compare-against-NULL resulting in a negligible performance hit | ||
| 406 | and swap data is stored as normal on the matching swap device. | ||
| 407 | |||
| 408 | If unsure, say Y to enable frontswap. | ||
diff --git a/mm/Makefile b/mm/Makefile index a156285ce88d..2e2fbbefb99f 100644 --- a/mm/Makefile +++ b/mm/Makefile | |||
| @@ -29,6 +29,7 @@ obj-$(CONFIG_HAVE_MEMBLOCK) += memblock.o | |||
| 29 | 29 | ||
| 30 | obj-$(CONFIG_BOUNCE) += bounce.o | 30 | obj-$(CONFIG_BOUNCE) += bounce.o |
| 31 | obj-$(CONFIG_SWAP) += page_io.o swap_state.o swapfile.o | 31 | obj-$(CONFIG_SWAP) += page_io.o swap_state.o swapfile.o |
| 32 | obj-$(CONFIG_FRONTSWAP) += frontswap.o | ||
| 32 | obj-$(CONFIG_HAS_DMA) += dmapool.o | 33 | obj-$(CONFIG_HAS_DMA) += dmapool.o |
| 33 | obj-$(CONFIG_HUGETLBFS) += hugetlb.o | 34 | obj-$(CONFIG_HUGETLBFS) += hugetlb.o |
| 34 | obj-$(CONFIG_NUMA) += mempolicy.o | 35 | obj-$(CONFIG_NUMA) += mempolicy.o |
diff --git a/mm/compaction.c b/mm/compaction.c index 4ac338af5120..7ea259d82a99 100644 --- a/mm/compaction.c +++ b/mm/compaction.c | |||
| @@ -236,7 +236,7 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc, | |||
| 236 | */ | 236 | */ |
| 237 | while (unlikely(too_many_isolated(zone))) { | 237 | while (unlikely(too_many_isolated(zone))) { |
| 238 | /* async migration should just abort */ | 238 | /* async migration should just abort */ |
| 239 | if (cc->mode != COMPACT_SYNC) | 239 | if (!cc->sync) |
| 240 | return 0; | 240 | return 0; |
| 241 | 241 | ||
| 242 | congestion_wait(BLK_RW_ASYNC, HZ/10); | 242 | congestion_wait(BLK_RW_ASYNC, HZ/10); |
| @@ -304,8 +304,7 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc, | |||
| 304 | * satisfies the allocation | 304 | * satisfies the allocation |
| 305 | */ | 305 | */ |
| 306 | pageblock_nr = low_pfn >> pageblock_order; | 306 | pageblock_nr = low_pfn >> pageblock_order; |
| 307 | if (cc->mode != COMPACT_SYNC && | 307 | if (!cc->sync && last_pageblock_nr != pageblock_nr && |
| 308 | last_pageblock_nr != pageblock_nr && | ||
| 309 | !migrate_async_suitable(get_pageblock_migratetype(page))) { | 308 | !migrate_async_suitable(get_pageblock_migratetype(page))) { |
| 310 | low_pfn += pageblock_nr_pages; | 309 | low_pfn += pageblock_nr_pages; |
| 311 | low_pfn = ALIGN(low_pfn, pageblock_nr_pages) - 1; | 310 | low_pfn = ALIGN(low_pfn, pageblock_nr_pages) - 1; |
| @@ -326,7 +325,7 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc, | |||
| 326 | continue; | 325 | continue; |
| 327 | } | 326 | } |
| 328 | 327 | ||
| 329 | if (cc->mode != COMPACT_SYNC) | 328 | if (!cc->sync) |
| 330 | mode |= ISOLATE_ASYNC_MIGRATE; | 329 | mode |= ISOLATE_ASYNC_MIGRATE; |
| 331 | 330 | ||
| 332 | lruvec = mem_cgroup_page_lruvec(page, zone); | 331 | lruvec = mem_cgroup_page_lruvec(page, zone); |
| @@ -361,90 +360,27 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc, | |||
| 361 | 360 | ||
| 362 | #endif /* CONFIG_COMPACTION || CONFIG_CMA */ | 361 | #endif /* CONFIG_COMPACTION || CONFIG_CMA */ |
| 363 | #ifdef CONFIG_COMPACTION | 362 | #ifdef CONFIG_COMPACTION |
| 364 | /* | ||
| 365 | * Returns true if MIGRATE_UNMOVABLE pageblock was successfully | ||
| 366 | * converted to MIGRATE_MOVABLE type, false otherwise. | ||
| 367 | */ | ||
| 368 | static bool rescue_unmovable_pageblock(struct page *page) | ||
| 369 | { | ||
| 370 | unsigned long pfn, start_pfn, end_pfn; | ||
| 371 | struct page *start_page, *end_page; | ||
| 372 | |||
| 373 | pfn = page_to_pfn(page); | ||
| 374 | start_pfn = pfn & ~(pageblock_nr_pages - 1); | ||
| 375 | end_pfn = start_pfn + pageblock_nr_pages; | ||
| 376 | |||
| 377 | start_page = pfn_to_page(start_pfn); | ||
| 378 | end_page = pfn_to_page(end_pfn); | ||
| 379 | |||
| 380 | /* Do not deal with pageblocks that overlap zones */ | ||
| 381 | if (page_zone(start_page) != page_zone(end_page)) | ||
| 382 | return false; | ||
| 383 | |||
| 384 | for (page = start_page, pfn = start_pfn; page < end_page; pfn++, | ||
| 385 | page++) { | ||
| 386 | if (!pfn_valid_within(pfn)) | ||
| 387 | continue; | ||
| 388 | |||
| 389 | if (PageBuddy(page)) { | ||
| 390 | int order = page_order(page); | ||
| 391 | |||
| 392 | pfn += (1 << order) - 1; | ||
| 393 | page += (1 << order) - 1; | ||
| 394 | |||
| 395 | continue; | ||
| 396 | } else if (page_count(page) == 0 || PageLRU(page)) | ||
| 397 | continue; | ||
| 398 | |||
| 399 | return false; | ||
| 400 | } | ||
| 401 | |||
| 402 | set_pageblock_migratetype(page, MIGRATE_MOVABLE); | ||
| 403 | move_freepages_block(page_zone(page), page, MIGRATE_MOVABLE); | ||
| 404 | return true; | ||
| 405 | } | ||
| 406 | 363 | ||
| 407 | enum smt_result { | 364 | /* Returns true if the page is within a block suitable for migration to */ |
| 408 | GOOD_AS_MIGRATION_TARGET, | 365 | static bool suitable_migration_target(struct page *page) |
| 409 | FAIL_UNMOVABLE_TARGET, | ||
| 410 | FAIL_BAD_TARGET, | ||
| 411 | }; | ||
| 412 | |||
| 413 | /* | ||
| 414 | * Returns GOOD_AS_MIGRATION_TARGET if the page is within a block | ||
| 415 | * suitable for migration to, FAIL_UNMOVABLE_TARGET if the page | ||
| 416 | * is within a MIGRATE_UNMOVABLE block, FAIL_BAD_TARGET otherwise. | ||
| 417 | */ | ||
| 418 | static enum smt_result suitable_migration_target(struct page *page, | ||
| 419 | struct compact_control *cc) | ||
| 420 | { | 366 | { |
| 421 | 367 | ||
| 422 | int migratetype = get_pageblock_migratetype(page); | 368 | int migratetype = get_pageblock_migratetype(page); |
| 423 | 369 | ||
| 424 | /* Don't interfere with memory hot-remove or the min_free_kbytes blocks */ | 370 | /* Don't interfere with memory hot-remove or the min_free_kbytes blocks */ |
| 425 | if (migratetype == MIGRATE_ISOLATE || migratetype == MIGRATE_RESERVE) | 371 | if (migratetype == MIGRATE_ISOLATE || migratetype == MIGRATE_RESERVE) |
| 426 | return FAIL_BAD_TARGET; | 372 | return false; |
| 427 | 373 | ||
| 428 | /* If the page is a large free page, then allow migration */ | 374 | /* If the page is a large free page, then allow migration */ |
| 429 | if (PageBuddy(page) && page_order(page) >= pageblock_order) | 375 | if (PageBuddy(page) && page_order(page) >= pageblock_order) |
| 430 | return GOOD_AS_MIGRATION_TARGET; | 376 | return true; |
| 431 | 377 | ||
| 432 | /* If the block is MIGRATE_MOVABLE or MIGRATE_CMA, allow migration */ | 378 | /* If the block is MIGRATE_MOVABLE or MIGRATE_CMA, allow migration */ |
| 433 | if (cc->mode != COMPACT_ASYNC_UNMOVABLE && | 379 | if (migrate_async_suitable(migratetype)) |
| 434 | migrate_async_suitable(migratetype)) | 380 | return true; |
| 435 | return GOOD_AS_MIGRATION_TARGET; | ||
| 436 | |||
| 437 | if (cc->mode == COMPACT_ASYNC_MOVABLE && | ||
| 438 | migratetype == MIGRATE_UNMOVABLE) | ||
| 439 | return FAIL_UNMOVABLE_TARGET; | ||
| 440 | |||
| 441 | if (cc->mode != COMPACT_ASYNC_MOVABLE && | ||
| 442 | migratetype == MIGRATE_UNMOVABLE && | ||
| 443 | rescue_unmovable_pageblock(page)) | ||
| 444 | return GOOD_AS_MIGRATION_TARGET; | ||
| 445 | 381 | ||
| 446 | /* Otherwise skip the block */ | 382 | /* Otherwise skip the block */ |
| 447 | return FAIL_BAD_TARGET; | 383 | return false; |
| 448 | } | 384 | } |
| 449 | 385 | ||
| 450 | /* | 386 | /* |
| @@ -478,13 +414,6 @@ static void isolate_freepages(struct zone *zone, | |||
| 478 | zone_end_pfn = zone->zone_start_pfn + zone->spanned_pages; | 414 | zone_end_pfn = zone->zone_start_pfn + zone->spanned_pages; |
| 479 | 415 | ||
| 480 | /* | 416 | /* |
| 481 | * isolate_freepages() may be called more than once during | ||
| 482 | * compact_zone_order() run and we want only the most recent | ||
| 483 | * count. | ||
| 484 | */ | ||
| 485 | cc->nr_pageblocks_skipped = 0; | ||
| 486 | |||
| 487 | /* | ||
| 488 | * Isolate free pages until enough are available to migrate the | 417 | * Isolate free pages until enough are available to migrate the |
| 489 | * pages on cc->migratepages. We stop searching if the migrate | 418 | * pages on cc->migratepages. We stop searching if the migrate |
| 490 | * and free page scanners meet or enough free pages are isolated. | 419 | * and free page scanners meet or enough free pages are isolated. |
| @@ -492,7 +421,6 @@ static void isolate_freepages(struct zone *zone, | |||
| 492 | for (; pfn > low_pfn && cc->nr_migratepages > nr_freepages; | 421 | for (; pfn > low_pfn && cc->nr_migratepages > nr_freepages; |
| 493 | pfn -= pageblock_nr_pages) { | 422 | pfn -= pageblock_nr_pages) { |
| 494 | unsigned long isolated; | 423 | unsigned long isolated; |
| 495 | enum smt_result ret; | ||
| 496 | 424 | ||
| 497 | if (!pfn_valid(pfn)) | 425 | if (!pfn_valid(pfn)) |
| 498 | continue; | 426 | continue; |
| @@ -509,12 +437,9 @@ static void isolate_freepages(struct zone *zone, | |||
| 509 | continue; | 437 | continue; |
| 510 | 438 | ||
| 511 | /* Check the block is suitable for migration */ | 439 | /* Check the block is suitable for migration */ |
| 512 | ret = suitable_migration_target(page, cc); | 440 | if (!suitable_migration_target(page)) |
| 513 | if (ret != GOOD_AS_MIGRATION_TARGET) { | ||
| 514 | if (ret == FAIL_UNMOVABLE_TARGET) | ||
| 515 | cc->nr_pageblocks_skipped++; | ||
| 516 | continue; | 441 | continue; |
| 517 | } | 442 | |
| 518 | /* | 443 | /* |
| 519 | * Found a block suitable for isolating free pages from. Now | 444 | * Found a block suitable for isolating free pages from. Now |
| 520 | * we disabled interrupts, double check things are ok and | 445 | * we disabled interrupts, double check things are ok and |
| @@ -523,14 +448,12 @@ static void isolate_freepages(struct zone *zone, | |||
| 523 | */ | 448 | */ |
| 524 | isolated = 0; | 449 | isolated = 0; |
| 525 | spin_lock_irqsave(&zone->lock, flags); | 450 | spin_lock_irqsave(&zone->lock, flags); |
| 526 | ret = suitable_migration_target(page, cc); | 451 | if (suitable_migration_target(page)) { |
| 527 | if (ret == GOOD_AS_MIGRATION_TARGET) { | ||
| 528 | end_pfn = min(pfn + pageblock_nr_pages, zone_end_pfn); | 452 | end_pfn = min(pfn + pageblock_nr_pages, zone_end_pfn); |
| 529 | isolated = isolate_freepages_block(pfn, end_pfn, | 453 | isolated = isolate_freepages_block(pfn, end_pfn, |
| 530 | freelist, false); | 454 | freelist, false); |
| 531 | nr_freepages += isolated; | 455 | nr_freepages += isolated; |
| 532 | } else if (ret == FAIL_UNMOVABLE_TARGET) | 456 | } |
| 533 | cc->nr_pageblocks_skipped++; | ||
| 534 | spin_unlock_irqrestore(&zone->lock, flags); | 457 | spin_unlock_irqrestore(&zone->lock, flags); |
| 535 | 458 | ||
| 536 | /* | 459 | /* |
| @@ -762,9 +685,8 @@ static int compact_zone(struct zone *zone, struct compact_control *cc) | |||
| 762 | 685 | ||
| 763 | nr_migrate = cc->nr_migratepages; | 686 | nr_migrate = cc->nr_migratepages; |
| 764 | err = migrate_pages(&cc->migratepages, compaction_alloc, | 687 | err = migrate_pages(&cc->migratepages, compaction_alloc, |
| 765 | (unsigned long)&cc->freepages, false, | 688 | (unsigned long)cc, false, |
| 766 | (cc->mode == COMPACT_SYNC) ? MIGRATE_SYNC_LIGHT | 689 | cc->sync ? MIGRATE_SYNC_LIGHT : MIGRATE_ASYNC); |
| 767 | : MIGRATE_ASYNC); | ||
| 768 | update_nr_listpages(cc); | 690 | update_nr_listpages(cc); |
| 769 | nr_remaining = cc->nr_migratepages; | 691 | nr_remaining = cc->nr_migratepages; |
| 770 | 692 | ||
| @@ -793,8 +715,7 @@ out: | |||
| 793 | 715 | ||
| 794 | static unsigned long compact_zone_order(struct zone *zone, | 716 | static unsigned long compact_zone_order(struct zone *zone, |
| 795 | int order, gfp_t gfp_mask, | 717 | int order, gfp_t gfp_mask, |
| 796 | enum compact_mode mode, | 718 | bool sync) |
| 797 | unsigned long *nr_pageblocks_skipped) | ||
| 798 | { | 719 | { |
| 799 | struct compact_control cc = { | 720 | struct compact_control cc = { |
| 800 | .nr_freepages = 0, | 721 | .nr_freepages = 0, |
| @@ -802,17 +723,12 @@ static unsigned long compact_zone_order(struct zone *zone, | |||
| 802 | .order = order, | 723 | .order = order, |
| 803 | .migratetype = allocflags_to_migratetype(gfp_mask), | 724 | .migratetype = allocflags_to_migratetype(gfp_mask), |
| 804 | .zone = zone, | 725 | .zone = zone, |
| 805 | .mode = mode, | 726 | .sync = sync, |
| 806 | }; | 727 | }; |
| 807 | unsigned long rc; | ||
| 808 | |||
| 809 | INIT_LIST_HEAD(&cc.freepages); | 728 | INIT_LIST_HEAD(&cc.freepages); |
| 810 | INIT_LIST_HEAD(&cc.migratepages); | 729 | INIT_LIST_HEAD(&cc.migratepages); |
| 811 | 730 | ||
| 812 | rc = compact_zone(zone, &cc); | 731 | return compact_zone(zone, &cc); |
| 813 | *nr_pageblocks_skipped = cc.nr_pageblocks_skipped; | ||
| 814 | |||
| 815 | return rc; | ||
| 816 | } | 732 | } |
| 817 | 733 | ||
| 818 | int sysctl_extfrag_threshold = 500; | 734 | int sysctl_extfrag_threshold = 500; |
| @@ -837,8 +753,6 @@ unsigned long try_to_compact_pages(struct zonelist *zonelist, | |||
| 837 | struct zoneref *z; | 753 | struct zoneref *z; |
| 838 | struct zone *zone; | 754 | struct zone *zone; |
| 839 | int rc = COMPACT_SKIPPED; | 755 | int rc = COMPACT_SKIPPED; |
| 840 | unsigned long nr_pageblocks_skipped; | ||
| 841 | enum compact_mode mode; | ||
| 842 | 756 | ||
| 843 | /* | 757 | /* |
| 844 | * Check whether it is worth even starting compaction. The order check is | 758 | * Check whether it is worth even starting compaction. The order check is |
| @@ -855,22 +769,12 @@ unsigned long try_to_compact_pages(struct zonelist *zonelist, | |||
| 855 | nodemask) { | 769 | nodemask) { |
| 856 | int status; | 770 | int status; |
| 857 | 771 | ||
| 858 | mode = sync ? COMPACT_SYNC : COMPACT_ASYNC_MOVABLE; | 772 | status = compact_zone_order(zone, order, gfp_mask, sync); |
| 859 | retry: | ||
| 860 | status = compact_zone_order(zone, order, gfp_mask, mode, | ||
| 861 | &nr_pageblocks_skipped); | ||
| 862 | rc = max(status, rc); | 773 | rc = max(status, rc); |
| 863 | 774 | ||
| 864 | /* If a normal allocation would succeed, stop compacting */ | 775 | /* If a normal allocation would succeed, stop compacting */ |
| 865 | if (zone_watermark_ok(zone, order, low_wmark_pages(zone), 0, 0)) | 776 | if (zone_watermark_ok(zone, order, low_wmark_pages(zone), 0, 0)) |
| 866 | break; | 777 | break; |
| 867 | |||
| 868 | if (rc == COMPACT_COMPLETE && mode == COMPACT_ASYNC_MOVABLE) { | ||
| 869 | if (nr_pageblocks_skipped) { | ||
| 870 | mode = COMPACT_ASYNC_UNMOVABLE; | ||
| 871 | goto retry; | ||
| 872 | } | ||
| 873 | } | ||
| 874 | } | 778 | } |
| 875 | 779 | ||
| 876 | return rc; | 780 | return rc; |
| @@ -904,7 +808,7 @@ static int __compact_pgdat(pg_data_t *pgdat, struct compact_control *cc) | |||
| 904 | if (ok && cc->order > zone->compact_order_failed) | 808 | if (ok && cc->order > zone->compact_order_failed) |
| 905 | zone->compact_order_failed = cc->order + 1; | 809 | zone->compact_order_failed = cc->order + 1; |
| 906 | /* Currently async compaction is never deferred. */ | 810 | /* Currently async compaction is never deferred. */ |
| 907 | else if (!ok && cc->mode == COMPACT_SYNC) | 811 | else if (!ok && cc->sync) |
| 908 | defer_compaction(zone, cc->order); | 812 | defer_compaction(zone, cc->order); |
| 909 | } | 813 | } |
| 910 | 814 | ||
| @@ -919,7 +823,7 @@ int compact_pgdat(pg_data_t *pgdat, int order) | |||
| 919 | { | 823 | { |
| 920 | struct compact_control cc = { | 824 | struct compact_control cc = { |
| 921 | .order = order, | 825 | .order = order, |
| 922 | .mode = COMPACT_ASYNC_MOVABLE, | 826 | .sync = false, |
| 923 | }; | 827 | }; |
| 924 | 828 | ||
| 925 | return __compact_pgdat(pgdat, &cc); | 829 | return __compact_pgdat(pgdat, &cc); |
| @@ -929,7 +833,7 @@ static int compact_node(int nid) | |||
| 929 | { | 833 | { |
| 930 | struct compact_control cc = { | 834 | struct compact_control cc = { |
| 931 | .order = -1, | 835 | .order = -1, |
| 932 | .mode = COMPACT_SYNC, | 836 | .sync = true, |
| 933 | }; | 837 | }; |
| 934 | 838 | ||
| 935 | return __compact_pgdat(NODE_DATA(nid), &cc); | 839 | return __compact_pgdat(NODE_DATA(nid), &cc); |
diff --git a/mm/frontswap.c b/mm/frontswap.c new file mode 100644 index 000000000000..e25025574a02 --- /dev/null +++ b/mm/frontswap.c | |||
| @@ -0,0 +1,314 @@ | |||
| 1 | /* | ||
| 2 | * Frontswap frontend | ||
| 3 | * | ||
| 4 | * This code provides the generic "frontend" layer to call a matching | ||
| 5 | * "backend" driver implementation of frontswap. See | ||
| 6 | * Documentation/vm/frontswap.txt for more information. | ||
| 7 | * | ||
| 8 | * Copyright (C) 2009-2012 Oracle Corp. All rights reserved. | ||
| 9 | * Author: Dan Magenheimer | ||
| 10 | * | ||
| 11 | * This work is licensed under the terms of the GNU GPL, version 2. | ||
| 12 | */ | ||
| 13 | |||
| 14 | #include <linux/mm.h> | ||
| 15 | #include <linux/mman.h> | ||
| 16 | #include <linux/swap.h> | ||
| 17 | #include <linux/swapops.h> | ||
| 18 | #include <linux/proc_fs.h> | ||
| 19 | #include <linux/security.h> | ||
| 20 | #include <linux/capability.h> | ||
| 21 | #include <linux/module.h> | ||
| 22 | #include <linux/uaccess.h> | ||
| 23 | #include <linux/debugfs.h> | ||
| 24 | #include <linux/frontswap.h> | ||
| 25 | #include <linux/swapfile.h> | ||
| 26 | |||
| 27 | /* | ||
| 28 | * frontswap_ops is set by frontswap_register_ops to contain the pointers | ||
| 29 | * to the frontswap "backend" implementation functions. | ||
| 30 | */ | ||
| 31 | static struct frontswap_ops frontswap_ops __read_mostly; | ||
| 32 | |||
| 33 | /* | ||
| 34 | * This global enablement flag reduces overhead on systems where frontswap_ops | ||
| 35 | * has not been registered, so is preferred to the slower alternative: a | ||
| 36 | * function call that checks a non-global. | ||
| 37 | */ | ||
| 38 | bool frontswap_enabled __read_mostly; | ||
| 39 | EXPORT_SYMBOL(frontswap_enabled); | ||
| 40 | |||
| 41 | /* | ||
| 42 | * If enabled, frontswap_store will return failure even on success. As | ||
| 43 | * a result, the swap subsystem will always write the page to swap, in | ||
| 44 | * effect converting frontswap into a writethrough cache. In this mode, | ||
| 45 | * there is no direct reduction in swap writes, but a frontswap backend | ||
| 46 | * can unilaterally "reclaim" any pages in use with no data loss, thus | ||
| 47 | * providing increases control over maximum memory usage due to frontswap. | ||
| 48 | */ | ||
| 49 | static bool frontswap_writethrough_enabled __read_mostly; | ||
| 50 | |||
| 51 | #ifdef CONFIG_DEBUG_FS | ||
| 52 | /* | ||
| 53 | * Counters available via /sys/kernel/debug/frontswap (if debugfs is | ||
| 54 | * properly configured). These are for information only so are not protected | ||
| 55 | * against increment races. | ||
| 56 | */ | ||
| 57 | static u64 frontswap_loads; | ||
| 58 | static u64 frontswap_succ_stores; | ||
| 59 | static u64 frontswap_failed_stores; | ||
| 60 | static u64 frontswap_invalidates; | ||
| 61 | |||
| 62 | static inline void inc_frontswap_loads(void) { | ||
| 63 | frontswap_loads++; | ||
| 64 | } | ||
| 65 | static inline void inc_frontswap_succ_stores(void) { | ||
| 66 | frontswap_succ_stores++; | ||
| 67 | } | ||
| 68 | static inline void inc_frontswap_failed_stores(void) { | ||
| 69 | frontswap_failed_stores++; | ||
| 70 | } | ||
| 71 | static inline void inc_frontswap_invalidates(void) { | ||
| 72 | frontswap_invalidates++; | ||
| 73 | } | ||
| 74 | #else | ||
| 75 | static inline void inc_frontswap_loads(void) { } | ||
| 76 | static inline void inc_frontswap_succ_stores(void) { } | ||
| 77 | static inline void inc_frontswap_failed_stores(void) { } | ||
| 78 | static inline void inc_frontswap_invalidates(void) { } | ||
| 79 | #endif | ||
| 80 | /* | ||
| 81 | * Register operations for frontswap, returning previous thus allowing | ||
| 82 | * detection of multiple backends and possible nesting. | ||
| 83 | */ | ||
| 84 | struct frontswap_ops frontswap_register_ops(struct frontswap_ops *ops) | ||
| 85 | { | ||
| 86 | struct frontswap_ops old = frontswap_ops; | ||
| 87 | |||
| 88 | frontswap_ops = *ops; | ||
| 89 | frontswap_enabled = true; | ||
| 90 | return old; | ||
| 91 | } | ||
| 92 | EXPORT_SYMBOL(frontswap_register_ops); | ||
| 93 | |||
| 94 | /* | ||
| 95 | * Enable/disable frontswap writethrough (see above). | ||
| 96 | */ | ||
| 97 | void frontswap_writethrough(bool enable) | ||
| 98 | { | ||
| 99 | frontswap_writethrough_enabled = enable; | ||
| 100 | } | ||
| 101 | EXPORT_SYMBOL(frontswap_writethrough); | ||
| 102 | |||
| 103 | /* | ||
| 104 | * Called when a swap device is swapon'd. | ||
| 105 | */ | ||
| 106 | void __frontswap_init(unsigned type) | ||
| 107 | { | ||
| 108 | struct swap_info_struct *sis = swap_info[type]; | ||
| 109 | |||
| 110 | BUG_ON(sis == NULL); | ||
| 111 | if (sis->frontswap_map == NULL) | ||
| 112 | return; | ||
| 113 | if (frontswap_enabled) | ||
| 114 | (*frontswap_ops.init)(type); | ||
| 115 | } | ||
| 116 | EXPORT_SYMBOL(__frontswap_init); | ||
| 117 | |||
| 118 | /* | ||
| 119 | * "Store" data from a page to frontswap and associate it with the page's | ||
| 120 | * swaptype and offset. Page must be locked and in the swap cache. | ||
| 121 | * If frontswap already contains a page with matching swaptype and | ||
| 122 | * offset, the frontswap implmentation may either overwrite the data and | ||
| 123 | * return success or invalidate the page from frontswap and return failure. | ||
| 124 | */ | ||
| 125 | int __frontswap_store(struct page *page) | ||
| 126 | { | ||
| 127 | int ret = -1, dup = 0; | ||
| 128 | swp_entry_t entry = { .val = page_private(page), }; | ||
| 129 | int type = swp_type(entry); | ||
| 130 | struct swap_info_struct *sis = swap_info[type]; | ||
| 131 | pgoff_t offset = swp_offset(entry); | ||
| 132 | |||
| 133 | BUG_ON(!PageLocked(page)); | ||
| 134 | BUG_ON(sis == NULL); | ||
| 135 | if (frontswap_test(sis, offset)) | ||
| 136 | dup = 1; | ||
| 137 | ret = (*frontswap_ops.store)(type, offset, page); | ||
| 138 | if (ret == 0) { | ||
| 139 | frontswap_set(sis, offset); | ||
| 140 | inc_frontswap_succ_stores(); | ||
| 141 | if (!dup) | ||
| 142 | atomic_inc(&sis->frontswap_pages); | ||
| 143 | } else if (dup) { | ||
| 144 | /* | ||
| 145 | failed dup always results in automatic invalidate of | ||
| 146 | the (older) page from frontswap | ||
| 147 | */ | ||
| 148 | frontswap_clear(sis, offset); | ||
| 149 | atomic_dec(&sis->frontswap_pages); | ||
| 150 | inc_frontswap_failed_stores(); | ||
| 151 | } else | ||
| 152 | inc_frontswap_failed_stores(); | ||
| 153 | if (frontswap_writethrough_enabled) | ||
| 154 | /* report failure so swap also writes to swap device */ | ||
| 155 | ret = -1; | ||
| 156 | return ret; | ||
| 157 | } | ||
| 158 | EXPORT_SYMBOL(__frontswap_store); | ||
| 159 | |||
| 160 | /* | ||
| 161 | * "Get" data from frontswap associated with swaptype and offset that were | ||
| 162 | * specified when the data was put to frontswap and use it to fill the | ||
| 163 | * specified page with data. Page must be locked and in the swap cache. | ||
| 164 | */ | ||
| 165 | int __frontswap_load(struct page *page) | ||
| 166 | { | ||
| 167 | int ret = -1; | ||
| 168 | swp_entry_t entry = { .val = page_private(page), }; | ||
| 169 | int type = swp_type(entry); | ||
| 170 | struct swap_info_struct *sis = swap_info[type]; | ||
| 171 | pgoff_t offset = swp_offset(entry); | ||
| 172 | |||
| 173 | BUG_ON(!PageLocked(page)); | ||
| 174 | BUG_ON(sis == NULL); | ||
| 175 | if (frontswap_test(sis, offset)) | ||
| 176 | ret = (*frontswap_ops.load)(type, offset, page); | ||
| 177 | if (ret == 0) | ||
| 178 | inc_frontswap_loads(); | ||
| 179 | return ret; | ||
| 180 | } | ||
| 181 | EXPORT_SYMBOL(__frontswap_load); | ||
| 182 | |||
| 183 | /* | ||
| 184 | * Invalidate any data from frontswap associated with the specified swaptype | ||
| 185 | * and offset so that a subsequent "get" will fail. | ||
| 186 | */ | ||
| 187 | void __frontswap_invalidate_page(unsigned type, pgoff_t offset) | ||
| 188 | { | ||
| 189 | struct swap_info_struct *sis = swap_info[type]; | ||
| 190 | |||
| 191 | BUG_ON(sis == NULL); | ||
| 192 | if (frontswap_test(sis, offset)) { | ||
| 193 | (*frontswap_ops.invalidate_page)(type, offset); | ||
| 194 | atomic_dec(&sis->frontswap_pages); | ||
| 195 | frontswap_clear(sis, offset); | ||
| 196 | inc_frontswap_invalidates(); | ||
| 197 | } | ||
| 198 | } | ||
| 199 | EXPORT_SYMBOL(__frontswap_invalidate_page); | ||
| 200 | |||
| 201 | /* | ||
| 202 | * Invalidate all data from frontswap associated with all offsets for the | ||
| 203 | * specified swaptype. | ||
| 204 | */ | ||
| 205 | void __frontswap_invalidate_area(unsigned type) | ||
| 206 | { | ||
| 207 | struct swap_info_struct *sis = swap_info[type]; | ||
| 208 | |||
| 209 | BUG_ON(sis == NULL); | ||
| 210 | if (sis->frontswap_map == NULL) | ||
| 211 | return; | ||
| 212 | (*frontswap_ops.invalidate_area)(type); | ||
| 213 | atomic_set(&sis->frontswap_pages, 0); | ||
| 214 | memset(sis->frontswap_map, 0, sis->max / sizeof(long)); | ||
| 215 | } | ||
| 216 | EXPORT_SYMBOL(__frontswap_invalidate_area); | ||
| 217 | |||
| 218 | /* | ||
| 219 | * Frontswap, like a true swap device, may unnecessarily retain pages | ||
| 220 | * under certain circumstances; "shrink" frontswap is essentially a | ||
| 221 | * "partial swapoff" and works by calling try_to_unuse to attempt to | ||
| 222 | * unuse enough frontswap pages to attempt to -- subject to memory | ||
| 223 | * constraints -- reduce the number of pages in frontswap to the | ||
| 224 | * number given in the parameter target_pages. | ||
| 225 | */ | ||
| 226 | void frontswap_shrink(unsigned long target_pages) | ||
| 227 | { | ||
| 228 | struct swap_info_struct *si = NULL; | ||
| 229 | int si_frontswap_pages; | ||
| 230 | unsigned long total_pages = 0, total_pages_to_unuse; | ||
| 231 | unsigned long pages = 0, pages_to_unuse = 0; | ||
| 232 | int type; | ||
| 233 | bool locked = false; | ||
| 234 | |||
| 235 | /* | ||
| 236 | * we don't want to hold swap_lock while doing a very | ||
| 237 | * lengthy try_to_unuse, but swap_list may change | ||
| 238 | * so restart scan from swap_list.head each time | ||
| 239 | */ | ||
| 240 | spin_lock(&swap_lock); | ||
| 241 | locked = true; | ||
| 242 | total_pages = 0; | ||
| 243 | for (type = swap_list.head; type >= 0; type = si->next) { | ||
| 244 | si = swap_info[type]; | ||
| 245 | total_pages += atomic_read(&si->frontswap_pages); | ||
| 246 | } | ||
| 247 | if (total_pages <= target_pages) | ||
| 248 | goto out; | ||
| 249 | total_pages_to_unuse = total_pages - target_pages; | ||
| 250 | for (type = swap_list.head; type >= 0; type = si->next) { | ||
| 251 | si = swap_info[type]; | ||
| 252 | si_frontswap_pages = atomic_read(&si->frontswap_pages); | ||
| 253 | if (total_pages_to_unuse < si_frontswap_pages) | ||
| 254 | pages = pages_to_unuse = total_pages_to_unuse; | ||
| 255 | else { | ||
| 256 | pages = si_frontswap_pages; | ||
| 257 | pages_to_unuse = 0; /* unuse all */ | ||
| 258 | } | ||
| 259 | /* ensure there is enough RAM to fetch pages from frontswap */ | ||
| 260 | if (security_vm_enough_memory_mm(current->mm, pages)) | ||
| 261 | continue; | ||
| 262 | vm_unacct_memory(pages); | ||
| 263 | break; | ||
| 264 | } | ||
| 265 | if (type < 0) | ||
| 266 | goto out; | ||
| 267 | locked = false; | ||
| 268 | spin_unlock(&swap_lock); | ||
| 269 | try_to_unuse(type, true, pages_to_unuse); | ||
| 270 | out: | ||
| 271 | if (locked) | ||
| 272 | spin_unlock(&swap_lock); | ||
| 273 | return; | ||
| 274 | } | ||
| 275 | EXPORT_SYMBOL(frontswap_shrink); | ||
| 276 | |||
| 277 | /* | ||
| 278 | * Count and return the number of frontswap pages across all | ||
| 279 | * swap devices. This is exported so that backend drivers can | ||
| 280 | * determine current usage without reading debugfs. | ||
| 281 | */ | ||
| 282 | unsigned long frontswap_curr_pages(void) | ||
| 283 | { | ||
| 284 | int type; | ||
| 285 | unsigned long totalpages = 0; | ||
| 286 | struct swap_info_struct *si = NULL; | ||
| 287 | |||
| 288 | spin_lock(&swap_lock); | ||
| 289 | for (type = swap_list.head; type >= 0; type = si->next) { | ||
| 290 | si = swap_info[type]; | ||
| 291 | totalpages += atomic_read(&si->frontswap_pages); | ||
| 292 | } | ||
| 293 | spin_unlock(&swap_lock); | ||
| 294 | return totalpages; | ||
| 295 | } | ||
| 296 | EXPORT_SYMBOL(frontswap_curr_pages); | ||
| 297 | |||
| 298 | static int __init init_frontswap(void) | ||
| 299 | { | ||
| 300 | #ifdef CONFIG_DEBUG_FS | ||
| 301 | struct dentry *root = debugfs_create_dir("frontswap", NULL); | ||
| 302 | if (root == NULL) | ||
| 303 | return -ENXIO; | ||
| 304 | debugfs_create_u64("loads", S_IRUGO, root, &frontswap_loads); | ||
| 305 | debugfs_create_u64("succ_stores", S_IRUGO, root, &frontswap_succ_stores); | ||
| 306 | debugfs_create_u64("failed_stores", S_IRUGO, root, | ||
| 307 | &frontswap_failed_stores); | ||
| 308 | debugfs_create_u64("invalidates", S_IRUGO, | ||
| 309 | root, &frontswap_invalidates); | ||
| 310 | #endif | ||
| 311 | return 0; | ||
| 312 | } | ||
| 313 | |||
| 314 | module_init(init_frontswap); | ||
diff --git a/mm/internal.h b/mm/internal.h index 5cbb78190041..2ba87fbfb75b 100644 --- a/mm/internal.h +++ b/mm/internal.h | |||
| @@ -94,9 +94,6 @@ extern void putback_lru_page(struct page *page); | |||
| 94 | /* | 94 | /* |
| 95 | * in mm/page_alloc.c | 95 | * in mm/page_alloc.c |
| 96 | */ | 96 | */ |
| 97 | extern void set_pageblock_migratetype(struct page *page, int migratetype); | ||
| 98 | extern int move_freepages_block(struct zone *zone, struct page *page, | ||
| 99 | int migratetype); | ||
| 100 | extern void __free_pages_bootmem(struct page *page, unsigned int order); | 97 | extern void __free_pages_bootmem(struct page *page, unsigned int order); |
| 101 | extern void prep_compound_page(struct page *page, unsigned long order); | 98 | extern void prep_compound_page(struct page *page, unsigned long order); |
| 102 | #ifdef CONFIG_MEMORY_FAILURE | 99 | #ifdef CONFIG_MEMORY_FAILURE |
| @@ -104,7 +101,6 @@ extern bool is_free_buddy_page(struct page *page); | |||
| 104 | #endif | 101 | #endif |
| 105 | 102 | ||
| 106 | #if defined CONFIG_COMPACTION || defined CONFIG_CMA | 103 | #if defined CONFIG_COMPACTION || defined CONFIG_CMA |
| 107 | #include <linux/compaction.h> | ||
| 108 | 104 | ||
| 109 | /* | 105 | /* |
| 110 | * in mm/compaction.c | 106 | * in mm/compaction.c |
| @@ -123,14 +119,11 @@ struct compact_control { | |||
| 123 | unsigned long nr_migratepages; /* Number of pages to migrate */ | 119 | unsigned long nr_migratepages; /* Number of pages to migrate */ |
| 124 | unsigned long free_pfn; /* isolate_freepages search base */ | 120 | unsigned long free_pfn; /* isolate_freepages search base */ |
| 125 | unsigned long migrate_pfn; /* isolate_migratepages search base */ | 121 | unsigned long migrate_pfn; /* isolate_migratepages search base */ |
| 126 | enum compact_mode mode; /* Compaction mode */ | 122 | bool sync; /* Synchronous migration */ |
| 127 | 123 | ||
| 128 | int order; /* order a direct compactor needs */ | 124 | int order; /* order a direct compactor needs */ |
| 129 | int migratetype; /* MOVABLE, RECLAIMABLE etc */ | 125 | int migratetype; /* MOVABLE, RECLAIMABLE etc */ |
| 130 | struct zone *zone; | 126 | struct zone *zone; |
| 131 | |||
| 132 | /* Number of UNMOVABLE destination pageblocks skipped during scan */ | ||
| 133 | unsigned long nr_pageblocks_skipped; | ||
| 134 | }; | 127 | }; |
| 135 | 128 | ||
| 136 | unsigned long | 129 | unsigned long |
diff --git a/mm/memblock.c b/mm/memblock.c index 952123eba433..32a0a5e4d79d 100644 --- a/mm/memblock.c +++ b/mm/memblock.c | |||
| @@ -867,6 +867,16 @@ int __init_memblock memblock_is_memory(phys_addr_t addr) | |||
| 867 | return memblock_search(&memblock.memory, addr) != -1; | 867 | return memblock_search(&memblock.memory, addr) != -1; |
| 868 | } | 868 | } |
| 869 | 869 | ||
| 870 | /** | ||
| 871 | * memblock_is_region_memory - check if a region is a subset of memory | ||
| 872 | * @base: base of region to check | ||
| 873 | * @size: size of region to check | ||
| 874 | * | ||
| 875 | * Check if the region [@base, @base+@size) is a subset of a memory block. | ||
| 876 | * | ||
| 877 | * RETURNS: | ||
| 878 | * 0 if false, non-zero if true | ||
| 879 | */ | ||
| 870 | int __init_memblock memblock_is_region_memory(phys_addr_t base, phys_addr_t size) | 880 | int __init_memblock memblock_is_region_memory(phys_addr_t base, phys_addr_t size) |
| 871 | { | 881 | { |
| 872 | int idx = memblock_search(&memblock.memory, base); | 882 | int idx = memblock_search(&memblock.memory, base); |
| @@ -879,6 +889,16 @@ int __init_memblock memblock_is_region_memory(phys_addr_t base, phys_addr_t size | |||
| 879 | memblock.memory.regions[idx].size) >= end; | 889 | memblock.memory.regions[idx].size) >= end; |
| 880 | } | 890 | } |
| 881 | 891 | ||
| 892 | /** | ||
| 893 | * memblock_is_region_reserved - check if a region intersects reserved memory | ||
| 894 | * @base: base of region to check | ||
| 895 | * @size: size of region to check | ||
| 896 | * | ||
| 897 | * Check if the region [@base, @base+@size) intersects a reserved memory block. | ||
| 898 | * | ||
| 899 | * RETURNS: | ||
| 900 | * 0 if false, non-zero if true | ||
| 901 | */ | ||
| 882 | int __init_memblock memblock_is_region_reserved(phys_addr_t base, phys_addr_t size) | 902 | int __init_memblock memblock_is_region_reserved(phys_addr_t base, phys_addr_t size) |
| 883 | { | 903 | { |
| 884 | memblock_cap_size(base, &size); | 904 | memblock_cap_size(base, &size); |
diff --git a/mm/migrate.c b/mm/migrate.c index ab81d482ae6f..be26d5cbe56b 100644 --- a/mm/migrate.c +++ b/mm/migrate.c | |||
| @@ -436,7 +436,10 @@ void migrate_page_copy(struct page *newpage, struct page *page) | |||
| 436 | * is actually a signal that all of the page has become dirty. | 436 | * is actually a signal that all of the page has become dirty. |
| 437 | * Whereas only part of our page may be dirty. | 437 | * Whereas only part of our page may be dirty. |
| 438 | */ | 438 | */ |
| 439 | __set_page_dirty_nobuffers(newpage); | 439 | if (PageSwapBacked(page)) |
| 440 | SetPageDirty(newpage); | ||
| 441 | else | ||
| 442 | __set_page_dirty_nobuffers(newpage); | ||
| 440 | } | 443 | } |
| 441 | 444 | ||
| 442 | mlock_migrate_page(newpage, page); | 445 | mlock_migrate_page(newpage, page); |
diff --git a/mm/nommu.c b/mm/nommu.c index c4acfbc09972..d4b0c10872de 100644 --- a/mm/nommu.c +++ b/mm/nommu.c | |||
| @@ -1486,7 +1486,7 @@ SYSCALL_DEFINE6(mmap_pgoff, unsigned long, addr, unsigned long, len, | |||
| 1486 | 1486 | ||
| 1487 | flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE); | 1487 | flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE); |
| 1488 | 1488 | ||
| 1489 | ret = vm_mmap_pgoff(file, addr, len, prot, flags, pgoff); | 1489 | retval = vm_mmap_pgoff(file, addr, len, prot, flags, pgoff); |
| 1490 | 1490 | ||
| 1491 | if (file) | 1491 | if (file) |
| 1492 | fput(file); | 1492 | fput(file); |
diff --git a/mm/oom_kill.c b/mm/oom_kill.c index ed0e19677360..416637f0e924 100644 --- a/mm/oom_kill.c +++ b/mm/oom_kill.c | |||
| @@ -183,7 +183,7 @@ static bool oom_unkillable_task(struct task_struct *p, | |||
| 183 | unsigned long oom_badness(struct task_struct *p, struct mem_cgroup *memcg, | 183 | unsigned long oom_badness(struct task_struct *p, struct mem_cgroup *memcg, |
| 184 | const nodemask_t *nodemask, unsigned long totalpages) | 184 | const nodemask_t *nodemask, unsigned long totalpages) |
| 185 | { | 185 | { |
| 186 | unsigned long points; | 186 | long points; |
| 187 | 187 | ||
| 188 | if (oom_unkillable_task(p, memcg, nodemask)) | 188 | if (oom_unkillable_task(p, memcg, nodemask)) |
| 189 | return 0; | 189 | return 0; |
| @@ -223,7 +223,7 @@ unsigned long oom_badness(struct task_struct *p, struct mem_cgroup *memcg, | |||
| 223 | * Never return 0 for an eligible task regardless of the root bonus and | 223 | * Never return 0 for an eligible task regardless of the root bonus and |
| 224 | * oom_score_adj (oom_score_adj can't be OOM_SCORE_ADJ_MIN here). | 224 | * oom_score_adj (oom_score_adj can't be OOM_SCORE_ADJ_MIN here). |
| 225 | */ | 225 | */ |
| 226 | return points ? points : 1; | 226 | return points > 0 ? points : 1; |
| 227 | } | 227 | } |
| 228 | 228 | ||
| 229 | /* | 229 | /* |
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 6092f331b32e..44030096da63 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
| @@ -219,7 +219,7 @@ EXPORT_SYMBOL(nr_online_nodes); | |||
| 219 | 219 | ||
| 220 | int page_group_by_mobility_disabled __read_mostly; | 220 | int page_group_by_mobility_disabled __read_mostly; |
| 221 | 221 | ||
| 222 | void set_pageblock_migratetype(struct page *page, int migratetype) | 222 | static void set_pageblock_migratetype(struct page *page, int migratetype) |
| 223 | { | 223 | { |
| 224 | 224 | ||
| 225 | if (unlikely(page_group_by_mobility_disabled)) | 225 | if (unlikely(page_group_by_mobility_disabled)) |
| @@ -954,8 +954,8 @@ static int move_freepages(struct zone *zone, | |||
| 954 | return pages_moved; | 954 | return pages_moved; |
| 955 | } | 955 | } |
| 956 | 956 | ||
| 957 | int move_freepages_block(struct zone *zone, struct page *page, | 957 | static int move_freepages_block(struct zone *zone, struct page *page, |
| 958 | int migratetype) | 958 | int migratetype) |
| 959 | { | 959 | { |
| 960 | unsigned long start_pfn, end_pfn; | 960 | unsigned long start_pfn, end_pfn; |
| 961 | struct page *start_page, *end_page; | 961 | struct page *start_page, *end_page; |
| @@ -5651,7 +5651,7 @@ static int __alloc_contig_migrate_range(unsigned long start, unsigned long end) | |||
| 5651 | .nr_migratepages = 0, | 5651 | .nr_migratepages = 0, |
| 5652 | .order = -1, | 5652 | .order = -1, |
| 5653 | .zone = page_zone(pfn_to_page(start)), | 5653 | .zone = page_zone(pfn_to_page(start)), |
| 5654 | .mode = COMPACT_SYNC, | 5654 | .sync = true, |
| 5655 | }; | 5655 | }; |
| 5656 | INIT_LIST_HEAD(&cc.migratepages); | 5656 | INIT_LIST_HEAD(&cc.migratepages); |
| 5657 | 5657 | ||
diff --git a/mm/page_io.c b/mm/page_io.c index dc76b4d0611e..34f02923744c 100644 --- a/mm/page_io.c +++ b/mm/page_io.c | |||
| @@ -18,6 +18,7 @@ | |||
| 18 | #include <linux/bio.h> | 18 | #include <linux/bio.h> |
| 19 | #include <linux/swapops.h> | 19 | #include <linux/swapops.h> |
| 20 | #include <linux/writeback.h> | 20 | #include <linux/writeback.h> |
| 21 | #include <linux/frontswap.h> | ||
| 21 | #include <asm/pgtable.h> | 22 | #include <asm/pgtable.h> |
| 22 | 23 | ||
| 23 | static struct bio *get_swap_bio(gfp_t gfp_flags, | 24 | static struct bio *get_swap_bio(gfp_t gfp_flags, |
| @@ -98,6 +99,12 @@ int swap_writepage(struct page *page, struct writeback_control *wbc) | |||
| 98 | unlock_page(page); | 99 | unlock_page(page); |
| 99 | goto out; | 100 | goto out; |
| 100 | } | 101 | } |
| 102 | if (frontswap_store(page) == 0) { | ||
| 103 | set_page_writeback(page); | ||
| 104 | unlock_page(page); | ||
| 105 | end_page_writeback(page); | ||
| 106 | goto out; | ||
| 107 | } | ||
| 101 | bio = get_swap_bio(GFP_NOIO, page, end_swap_bio_write); | 108 | bio = get_swap_bio(GFP_NOIO, page, end_swap_bio_write); |
| 102 | if (bio == NULL) { | 109 | if (bio == NULL) { |
| 103 | set_page_dirty(page); | 110 | set_page_dirty(page); |
| @@ -122,6 +129,11 @@ int swap_readpage(struct page *page) | |||
| 122 | 129 | ||
| 123 | VM_BUG_ON(!PageLocked(page)); | 130 | VM_BUG_ON(!PageLocked(page)); |
| 124 | VM_BUG_ON(PageUptodate(page)); | 131 | VM_BUG_ON(PageUptodate(page)); |
| 132 | if (frontswap_load(page) == 0) { | ||
| 133 | SetPageUptodate(page); | ||
| 134 | unlock_page(page); | ||
| 135 | goto out; | ||
| 136 | } | ||
| 125 | bio = get_swap_bio(GFP_KERNEL, page, end_swap_bio_read); | 137 | bio = get_swap_bio(GFP_KERNEL, page, end_swap_bio_read); |
| 126 | if (bio == NULL) { | 138 | if (bio == NULL) { |
| 127 | unlock_page(page); | 139 | unlock_page(page); |
diff --git a/mm/shmem.c b/mm/shmem.c index 585bd220a21e..a15a466d0d1d 100644 --- a/mm/shmem.c +++ b/mm/shmem.c | |||
| @@ -683,10 +683,21 @@ static int shmem_unuse_inode(struct shmem_inode_info *info, | |||
| 683 | mutex_lock(&shmem_swaplist_mutex); | 683 | mutex_lock(&shmem_swaplist_mutex); |
| 684 | /* | 684 | /* |
| 685 | * We needed to drop mutex to make that restrictive page | 685 | * We needed to drop mutex to make that restrictive page |
| 686 | * allocation; but the inode might already be freed by now, | 686 | * allocation, but the inode might have been freed while we |
| 687 | * and we cannot refer to inode or mapping or info to check. | 687 | * dropped it: although a racing shmem_evict_inode() cannot |
| 688 | * However, we do hold page lock on the PageSwapCache page, | 688 | * complete without emptying the radix_tree, our page lock |
| 689 | * so can check if that still has our reference remaining. | 689 | * on this swapcache page is not enough to prevent that - |
| 690 | * free_swap_and_cache() of our swap entry will only | ||
| 691 | * trylock_page(), removing swap from radix_tree whatever. | ||
| 692 | * | ||
| 693 | * We must not proceed to shmem_add_to_page_cache() if the | ||
| 694 | * inode has been freed, but of course we cannot rely on | ||
| 695 | * inode or mapping or info to check that. However, we can | ||
| 696 | * safely check if our swap entry is still in use (and here | ||
| 697 | * it can't have got reused for another page): if it's still | ||
| 698 | * in use, then the inode cannot have been freed yet, and we | ||
| 699 | * can safely proceed (if it's no longer in use, that tells | ||
| 700 | * nothing about the inode, but we don't need to unuse swap). | ||
| 690 | */ | 701 | */ |
| 691 | if (!page_swapcount(*pagep)) | 702 | if (!page_swapcount(*pagep)) |
| 692 | error = -ENOENT; | 703 | error = -ENOENT; |
| @@ -730,9 +741,9 @@ int shmem_unuse(swp_entry_t swap, struct page *page) | |||
| 730 | 741 | ||
| 731 | /* | 742 | /* |
| 732 | * There's a faint possibility that swap page was replaced before | 743 | * There's a faint possibility that swap page was replaced before |
| 733 | * caller locked it: it will come back later with the right page. | 744 | * caller locked it: caller will come back later with the right page. |
| 734 | */ | 745 | */ |
| 735 | if (unlikely(!PageSwapCache(page))) | 746 | if (unlikely(!PageSwapCache(page) || page_private(page) != swap.val)) |
| 736 | goto out; | 747 | goto out; |
| 737 | 748 | ||
| 738 | /* | 749 | /* |
| @@ -995,21 +1006,15 @@ static int shmem_replace_page(struct page **pagep, gfp_t gfp, | |||
| 995 | newpage = shmem_alloc_page(gfp, info, index); | 1006 | newpage = shmem_alloc_page(gfp, info, index); |
| 996 | if (!newpage) | 1007 | if (!newpage) |
| 997 | return -ENOMEM; | 1008 | return -ENOMEM; |
| 998 | VM_BUG_ON(shmem_should_replace_page(newpage, gfp)); | ||
| 999 | 1009 | ||
| 1000 | *pagep = newpage; | ||
| 1001 | page_cache_get(newpage); | 1010 | page_cache_get(newpage); |
| 1002 | copy_highpage(newpage, oldpage); | 1011 | copy_highpage(newpage, oldpage); |
| 1012 | flush_dcache_page(newpage); | ||
| 1003 | 1013 | ||
| 1004 | VM_BUG_ON(!PageLocked(oldpage)); | ||
| 1005 | __set_page_locked(newpage); | 1014 | __set_page_locked(newpage); |
| 1006 | VM_BUG_ON(!PageUptodate(oldpage)); | ||
| 1007 | SetPageUptodate(newpage); | 1015 | SetPageUptodate(newpage); |
| 1008 | VM_BUG_ON(!PageSwapBacked(oldpage)); | ||
| 1009 | SetPageSwapBacked(newpage); | 1016 | SetPageSwapBacked(newpage); |
| 1010 | VM_BUG_ON(!swap_index); | ||
| 1011 | set_page_private(newpage, swap_index); | 1017 | set_page_private(newpage, swap_index); |
| 1012 | VM_BUG_ON(!PageSwapCache(oldpage)); | ||
| 1013 | SetPageSwapCache(newpage); | 1018 | SetPageSwapCache(newpage); |
| 1014 | 1019 | ||
| 1015 | /* | 1020 | /* |
| @@ -1019,13 +1024,24 @@ static int shmem_replace_page(struct page **pagep, gfp_t gfp, | |||
| 1019 | spin_lock_irq(&swap_mapping->tree_lock); | 1024 | spin_lock_irq(&swap_mapping->tree_lock); |
| 1020 | error = shmem_radix_tree_replace(swap_mapping, swap_index, oldpage, | 1025 | error = shmem_radix_tree_replace(swap_mapping, swap_index, oldpage, |
| 1021 | newpage); | 1026 | newpage); |
| 1022 | __inc_zone_page_state(newpage, NR_FILE_PAGES); | 1027 | if (!error) { |
| 1023 | __dec_zone_page_state(oldpage, NR_FILE_PAGES); | 1028 | __inc_zone_page_state(newpage, NR_FILE_PAGES); |
| 1029 | __dec_zone_page_state(oldpage, NR_FILE_PAGES); | ||
| 1030 | } | ||
| 1024 | spin_unlock_irq(&swap_mapping->tree_lock); | 1031 | spin_unlock_irq(&swap_mapping->tree_lock); |
| 1025 | BUG_ON(error); | ||
| 1026 | 1032 | ||
| 1027 | mem_cgroup_replace_page_cache(oldpage, newpage); | 1033 | if (unlikely(error)) { |
| 1028 | lru_cache_add_anon(newpage); | 1034 | /* |
| 1035 | * Is this possible? I think not, now that our callers check | ||
| 1036 | * both PageSwapCache and page_private after getting page lock; | ||
| 1037 | * but be defensive. Reverse old to newpage for clear and free. | ||
| 1038 | */ | ||
| 1039 | oldpage = newpage; | ||
| 1040 | } else { | ||
| 1041 | mem_cgroup_replace_page_cache(oldpage, newpage); | ||
| 1042 | lru_cache_add_anon(newpage); | ||
| 1043 | *pagep = newpage; | ||
| 1044 | } | ||
| 1029 | 1045 | ||
| 1030 | ClearPageSwapCache(oldpage); | 1046 | ClearPageSwapCache(oldpage); |
| 1031 | set_page_private(oldpage, 0); | 1047 | set_page_private(oldpage, 0); |
| @@ -1033,7 +1049,7 @@ static int shmem_replace_page(struct page **pagep, gfp_t gfp, | |||
| 1033 | unlock_page(oldpage); | 1049 | unlock_page(oldpage); |
| 1034 | page_cache_release(oldpage); | 1050 | page_cache_release(oldpage); |
| 1035 | page_cache_release(oldpage); | 1051 | page_cache_release(oldpage); |
| 1036 | return 0; | 1052 | return error; |
| 1037 | } | 1053 | } |
| 1038 | 1054 | ||
| 1039 | /* | 1055 | /* |
| @@ -1107,7 +1123,8 @@ repeat: | |||
| 1107 | 1123 | ||
| 1108 | /* We have to do this with page locked to prevent races */ | 1124 | /* We have to do this with page locked to prevent races */ |
| 1109 | lock_page(page); | 1125 | lock_page(page); |
| 1110 | if (!PageSwapCache(page) || page->mapping) { | 1126 | if (!PageSwapCache(page) || page_private(page) != swap.val || |
| 1127 | page->mapping) { | ||
| 1111 | error = -EEXIST; /* try again */ | 1128 | error = -EEXIST; /* try again */ |
| 1112 | goto failed; | 1129 | goto failed; |
| 1113 | } | 1130 | } |
diff --git a/mm/swapfile.c b/mm/swapfile.c index 457b10baef59..71373d03fcee 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c | |||
| @@ -31,6 +31,8 @@ | |||
| 31 | #include <linux/memcontrol.h> | 31 | #include <linux/memcontrol.h> |
| 32 | #include <linux/poll.h> | 32 | #include <linux/poll.h> |
| 33 | #include <linux/oom.h> | 33 | #include <linux/oom.h> |
| 34 | #include <linux/frontswap.h> | ||
| 35 | #include <linux/swapfile.h> | ||
| 34 | 36 | ||
| 35 | #include <asm/pgtable.h> | 37 | #include <asm/pgtable.h> |
| 36 | #include <asm/tlbflush.h> | 38 | #include <asm/tlbflush.h> |
| @@ -42,7 +44,7 @@ static bool swap_count_continued(struct swap_info_struct *, pgoff_t, | |||
| 42 | static void free_swap_count_continuations(struct swap_info_struct *); | 44 | static void free_swap_count_continuations(struct swap_info_struct *); |
| 43 | static sector_t map_swap_entry(swp_entry_t, struct block_device**); | 45 | static sector_t map_swap_entry(swp_entry_t, struct block_device**); |
| 44 | 46 | ||
| 45 | static DEFINE_SPINLOCK(swap_lock); | 47 | DEFINE_SPINLOCK(swap_lock); |
| 46 | static unsigned int nr_swapfiles; | 48 | static unsigned int nr_swapfiles; |
| 47 | long nr_swap_pages; | 49 | long nr_swap_pages; |
| 48 | long total_swap_pages; | 50 | long total_swap_pages; |
| @@ -53,9 +55,9 @@ static const char Unused_file[] = "Unused swap file entry "; | |||
| 53 | static const char Bad_offset[] = "Bad swap offset entry "; | 55 | static const char Bad_offset[] = "Bad swap offset entry "; |
| 54 | static const char Unused_offset[] = "Unused swap offset entry "; | 56 | static const char Unused_offset[] = "Unused swap offset entry "; |
| 55 | 57 | ||
| 56 | static struct swap_list_t swap_list = {-1, -1}; | 58 | struct swap_list_t swap_list = {-1, -1}; |
| 57 | 59 | ||
| 58 | static struct swap_info_struct *swap_info[MAX_SWAPFILES]; | 60 | struct swap_info_struct *swap_info[MAX_SWAPFILES]; |
| 59 | 61 | ||
| 60 | static DEFINE_MUTEX(swapon_mutex); | 62 | static DEFINE_MUTEX(swapon_mutex); |
| 61 | 63 | ||
| @@ -556,6 +558,7 @@ static unsigned char swap_entry_free(struct swap_info_struct *p, | |||
| 556 | swap_list.next = p->type; | 558 | swap_list.next = p->type; |
| 557 | nr_swap_pages++; | 559 | nr_swap_pages++; |
| 558 | p->inuse_pages--; | 560 | p->inuse_pages--; |
| 561 | frontswap_invalidate_page(p->type, offset); | ||
| 559 | if ((p->flags & SWP_BLKDEV) && | 562 | if ((p->flags & SWP_BLKDEV) && |
| 560 | disk->fops->swap_slot_free_notify) | 563 | disk->fops->swap_slot_free_notify) |
| 561 | disk->fops->swap_slot_free_notify(p->bdev, offset); | 564 | disk->fops->swap_slot_free_notify(p->bdev, offset); |
| @@ -985,11 +988,12 @@ static int unuse_mm(struct mm_struct *mm, | |||
| 985 | } | 988 | } |
| 986 | 989 | ||
| 987 | /* | 990 | /* |
| 988 | * Scan swap_map from current position to next entry still in use. | 991 | * Scan swap_map (or frontswap_map if frontswap parameter is true) |
| 992 | * from current position to next entry still in use. | ||
| 989 | * Recycle to start on reaching the end, returning 0 when empty. | 993 | * Recycle to start on reaching the end, returning 0 when empty. |
| 990 | */ | 994 | */ |
| 991 | static unsigned int find_next_to_unuse(struct swap_info_struct *si, | 995 | static unsigned int find_next_to_unuse(struct swap_info_struct *si, |
| 992 | unsigned int prev) | 996 | unsigned int prev, bool frontswap) |
| 993 | { | 997 | { |
| 994 | unsigned int max = si->max; | 998 | unsigned int max = si->max; |
| 995 | unsigned int i = prev; | 999 | unsigned int i = prev; |
| @@ -1015,6 +1019,12 @@ static unsigned int find_next_to_unuse(struct swap_info_struct *si, | |||
| 1015 | prev = 0; | 1019 | prev = 0; |
| 1016 | i = 1; | 1020 | i = 1; |
| 1017 | } | 1021 | } |
| 1022 | if (frontswap) { | ||
| 1023 | if (frontswap_test(si, i)) | ||
| 1024 | break; | ||
| 1025 | else | ||
| 1026 | continue; | ||
| 1027 | } | ||
| 1018 | count = si->swap_map[i]; | 1028 | count = si->swap_map[i]; |
| 1019 | if (count && swap_count(count) != SWAP_MAP_BAD) | 1029 | if (count && swap_count(count) != SWAP_MAP_BAD) |
| 1020 | break; | 1030 | break; |
| @@ -1026,8 +1036,12 @@ static unsigned int find_next_to_unuse(struct swap_info_struct *si, | |||
| 1026 | * We completely avoid races by reading each swap page in advance, | 1036 | * We completely avoid races by reading each swap page in advance, |
| 1027 | * and then search for the process using it. All the necessary | 1037 | * and then search for the process using it. All the necessary |
| 1028 | * page table adjustments can then be made atomically. | 1038 | * page table adjustments can then be made atomically. |
| 1039 | * | ||
| 1040 | * if the boolean frontswap is true, only unuse pages_to_unuse pages; | ||
| 1041 | * pages_to_unuse==0 means all pages; ignored if frontswap is false | ||
| 1029 | */ | 1042 | */ |
| 1030 | static int try_to_unuse(unsigned int type) | 1043 | int try_to_unuse(unsigned int type, bool frontswap, |
| 1044 | unsigned long pages_to_unuse) | ||
| 1031 | { | 1045 | { |
| 1032 | struct swap_info_struct *si = swap_info[type]; | 1046 | struct swap_info_struct *si = swap_info[type]; |
| 1033 | struct mm_struct *start_mm; | 1047 | struct mm_struct *start_mm; |
| @@ -1060,7 +1074,7 @@ static int try_to_unuse(unsigned int type) | |||
| 1060 | * one pass through swap_map is enough, but not necessarily: | 1074 | * one pass through swap_map is enough, but not necessarily: |
| 1061 | * there are races when an instance of an entry might be missed. | 1075 | * there are races when an instance of an entry might be missed. |
| 1062 | */ | 1076 | */ |
| 1063 | while ((i = find_next_to_unuse(si, i)) != 0) { | 1077 | while ((i = find_next_to_unuse(si, i, frontswap)) != 0) { |
| 1064 | if (signal_pending(current)) { | 1078 | if (signal_pending(current)) { |
| 1065 | retval = -EINTR; | 1079 | retval = -EINTR; |
| 1066 | break; | 1080 | break; |
| @@ -1227,6 +1241,10 @@ static int try_to_unuse(unsigned int type) | |||
| 1227 | * interactive performance. | 1241 | * interactive performance. |
| 1228 | */ | 1242 | */ |
| 1229 | cond_resched(); | 1243 | cond_resched(); |
| 1244 | if (frontswap && pages_to_unuse > 0) { | ||
| 1245 | if (!--pages_to_unuse) | ||
| 1246 | break; | ||
| 1247 | } | ||
| 1230 | } | 1248 | } |
| 1231 | 1249 | ||
| 1232 | mmput(start_mm); | 1250 | mmput(start_mm); |
| @@ -1486,7 +1504,8 @@ bad_bmap: | |||
| 1486 | } | 1504 | } |
| 1487 | 1505 | ||
| 1488 | static void enable_swap_info(struct swap_info_struct *p, int prio, | 1506 | static void enable_swap_info(struct swap_info_struct *p, int prio, |
| 1489 | unsigned char *swap_map) | 1507 | unsigned char *swap_map, |
| 1508 | unsigned long *frontswap_map) | ||
| 1490 | { | 1509 | { |
| 1491 | int i, prev; | 1510 | int i, prev; |
| 1492 | 1511 | ||
| @@ -1496,6 +1515,7 @@ static void enable_swap_info(struct swap_info_struct *p, int prio, | |||
| 1496 | else | 1515 | else |
| 1497 | p->prio = --least_priority; | 1516 | p->prio = --least_priority; |
| 1498 | p->swap_map = swap_map; | 1517 | p->swap_map = swap_map; |
| 1518 | frontswap_map_set(p, frontswap_map); | ||
| 1499 | p->flags |= SWP_WRITEOK; | 1519 | p->flags |= SWP_WRITEOK; |
| 1500 | nr_swap_pages += p->pages; | 1520 | nr_swap_pages += p->pages; |
| 1501 | total_swap_pages += p->pages; | 1521 | total_swap_pages += p->pages; |
| @@ -1512,6 +1532,7 @@ static void enable_swap_info(struct swap_info_struct *p, int prio, | |||
| 1512 | swap_list.head = swap_list.next = p->type; | 1532 | swap_list.head = swap_list.next = p->type; |
| 1513 | else | 1533 | else |
| 1514 | swap_info[prev]->next = p->type; | 1534 | swap_info[prev]->next = p->type; |
| 1535 | frontswap_init(p->type); | ||
| 1515 | spin_unlock(&swap_lock); | 1536 | spin_unlock(&swap_lock); |
| 1516 | } | 1537 | } |
| 1517 | 1538 | ||
| @@ -1585,7 +1606,7 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile) | |||
| 1585 | spin_unlock(&swap_lock); | 1606 | spin_unlock(&swap_lock); |
| 1586 | 1607 | ||
| 1587 | oom_score_adj = test_set_oom_score_adj(OOM_SCORE_ADJ_MAX); | 1608 | oom_score_adj = test_set_oom_score_adj(OOM_SCORE_ADJ_MAX); |
| 1588 | err = try_to_unuse(type); | 1609 | err = try_to_unuse(type, false, 0); /* force all pages to be unused */ |
| 1589 | compare_swap_oom_score_adj(OOM_SCORE_ADJ_MAX, oom_score_adj); | 1610 | compare_swap_oom_score_adj(OOM_SCORE_ADJ_MAX, oom_score_adj); |
| 1590 | 1611 | ||
| 1591 | if (err) { | 1612 | if (err) { |
| @@ -1596,7 +1617,7 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile) | |||
| 1596 | * sys_swapoff for this swap_info_struct at this point. | 1617 | * sys_swapoff for this swap_info_struct at this point. |
| 1597 | */ | 1618 | */ |
| 1598 | /* re-insert swap space back into swap_list */ | 1619 | /* re-insert swap space back into swap_list */ |
| 1599 | enable_swap_info(p, p->prio, p->swap_map); | 1620 | enable_swap_info(p, p->prio, p->swap_map, frontswap_map_get(p)); |
| 1600 | goto out_dput; | 1621 | goto out_dput; |
| 1601 | } | 1622 | } |
| 1602 | 1623 | ||
| @@ -1622,9 +1643,11 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile) | |||
| 1622 | swap_map = p->swap_map; | 1643 | swap_map = p->swap_map; |
| 1623 | p->swap_map = NULL; | 1644 | p->swap_map = NULL; |
| 1624 | p->flags = 0; | 1645 | p->flags = 0; |
| 1646 | frontswap_invalidate_area(type); | ||
| 1625 | spin_unlock(&swap_lock); | 1647 | spin_unlock(&swap_lock); |
| 1626 | mutex_unlock(&swapon_mutex); | 1648 | mutex_unlock(&swapon_mutex); |
| 1627 | vfree(swap_map); | 1649 | vfree(swap_map); |
| 1650 | vfree(frontswap_map_get(p)); | ||
| 1628 | /* Destroy swap account informatin */ | 1651 | /* Destroy swap account informatin */ |
| 1629 | swap_cgroup_swapoff(type); | 1652 | swap_cgroup_swapoff(type); |
| 1630 | 1653 | ||
| @@ -1893,24 +1916,20 @@ static unsigned long read_swap_header(struct swap_info_struct *p, | |||
| 1893 | 1916 | ||
| 1894 | /* | 1917 | /* |
| 1895 | * Find out how many pages are allowed for a single swap | 1918 | * Find out how many pages are allowed for a single swap |
| 1896 | * device. There are three limiting factors: 1) the number | 1919 | * device. There are two limiting factors: 1) the number |
| 1897 | * of bits for the swap offset in the swp_entry_t type, and | 1920 | * of bits for the swap offset in the swp_entry_t type, and |
| 1898 | * 2) the number of bits in the swap pte as defined by the | 1921 | * 2) the number of bits in the swap pte as defined by the |
| 1899 | * the different architectures, and 3) the number of free bits | 1922 | * different architectures. In order to find the |
| 1900 | * in an exceptional radix_tree entry. In order to find the | ||
| 1901 | * largest possible bit mask, a swap entry with swap type 0 | 1923 | * largest possible bit mask, a swap entry with swap type 0 |
| 1902 | * and swap offset ~0UL is created, encoded to a swap pte, | 1924 | * and swap offset ~0UL is created, encoded to a swap pte, |
| 1903 | * decoded to a swp_entry_t again, and finally the swap | 1925 | * decoded to a swp_entry_t again, and finally the swap |
| 1904 | * offset is extracted. This will mask all the bits from | 1926 | * offset is extracted. This will mask all the bits from |
| 1905 | * the initial ~0UL mask that can't be encoded in either | 1927 | * the initial ~0UL mask that can't be encoded in either |
| 1906 | * the swp_entry_t or the architecture definition of a | 1928 | * the swp_entry_t or the architecture definition of a |
| 1907 | * swap pte. Then the same is done for a radix_tree entry. | 1929 | * swap pte. |
| 1908 | */ | 1930 | */ |
| 1909 | maxpages = swp_offset(pte_to_swp_entry( | 1931 | maxpages = swp_offset(pte_to_swp_entry( |
| 1910 | swp_entry_to_pte(swp_entry(0, ~0UL)))); | 1932 | swp_entry_to_pte(swp_entry(0, ~0UL)))) + 1; |
| 1911 | maxpages = swp_offset(radix_to_swp_entry( | ||
| 1912 | swp_to_radix_entry(swp_entry(0, maxpages)))) + 1; | ||
| 1913 | |||
| 1914 | if (maxpages > swap_header->info.last_page) { | 1933 | if (maxpages > swap_header->info.last_page) { |
| 1915 | maxpages = swap_header->info.last_page + 1; | 1934 | maxpages = swap_header->info.last_page + 1; |
| 1916 | /* p->max is an unsigned int: don't overflow it */ | 1935 | /* p->max is an unsigned int: don't overflow it */ |
| @@ -1988,6 +2007,7 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags) | |||
| 1988 | sector_t span; | 2007 | sector_t span; |
| 1989 | unsigned long maxpages; | 2008 | unsigned long maxpages; |
| 1990 | unsigned char *swap_map = NULL; | 2009 | unsigned char *swap_map = NULL; |
| 2010 | unsigned long *frontswap_map = NULL; | ||
| 1991 | struct page *page = NULL; | 2011 | struct page *page = NULL; |
| 1992 | struct inode *inode = NULL; | 2012 | struct inode *inode = NULL; |
| 1993 | 2013 | ||
| @@ -2071,6 +2091,9 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags) | |||
| 2071 | error = nr_extents; | 2091 | error = nr_extents; |
| 2072 | goto bad_swap; | 2092 | goto bad_swap; |
| 2073 | } | 2093 | } |
| 2094 | /* frontswap enabled? set up bit-per-page map for frontswap */ | ||
| 2095 | if (frontswap_enabled) | ||
| 2096 | frontswap_map = vzalloc(maxpages / sizeof(long)); | ||
| 2074 | 2097 | ||
| 2075 | if (p->bdev) { | 2098 | if (p->bdev) { |
| 2076 | if (blk_queue_nonrot(bdev_get_queue(p->bdev))) { | 2099 | if (blk_queue_nonrot(bdev_get_queue(p->bdev))) { |
| @@ -2086,14 +2109,15 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags) | |||
| 2086 | if (swap_flags & SWAP_FLAG_PREFER) | 2109 | if (swap_flags & SWAP_FLAG_PREFER) |
| 2087 | prio = | 2110 | prio = |
| 2088 | (swap_flags & SWAP_FLAG_PRIO_MASK) >> SWAP_FLAG_PRIO_SHIFT; | 2111 | (swap_flags & SWAP_FLAG_PRIO_MASK) >> SWAP_FLAG_PRIO_SHIFT; |
| 2089 | enable_swap_info(p, prio, swap_map); | 2112 | enable_swap_info(p, prio, swap_map, frontswap_map); |
| 2090 | 2113 | ||
| 2091 | printk(KERN_INFO "Adding %uk swap on %s. " | 2114 | printk(KERN_INFO "Adding %uk swap on %s. " |
| 2092 | "Priority:%d extents:%d across:%lluk %s%s\n", | 2115 | "Priority:%d extents:%d across:%lluk %s%s%s\n", |
| 2093 | p->pages<<(PAGE_SHIFT-10), name, p->prio, | 2116 | p->pages<<(PAGE_SHIFT-10), name, p->prio, |
| 2094 | nr_extents, (unsigned long long)span<<(PAGE_SHIFT-10), | 2117 | nr_extents, (unsigned long long)span<<(PAGE_SHIFT-10), |
| 2095 | (p->flags & SWP_SOLIDSTATE) ? "SS" : "", | 2118 | (p->flags & SWP_SOLIDSTATE) ? "SS" : "", |
| 2096 | (p->flags & SWP_DISCARDABLE) ? "D" : ""); | 2119 | (p->flags & SWP_DISCARDABLE) ? "D" : "", |
| 2120 | (frontswap_map) ? "FS" : ""); | ||
| 2097 | 2121 | ||
| 2098 | mutex_unlock(&swapon_mutex); | 2122 | mutex_unlock(&swapon_mutex); |
| 2099 | atomic_inc(&proc_poll_event); | 2123 | atomic_inc(&proc_poll_event); |
diff --git a/net/appletalk/ddp.c b/net/appletalk/ddp.c index 0301b328cf0f..86852963b7f7 100644 --- a/net/appletalk/ddp.c +++ b/net/appletalk/ddp.c | |||
| @@ -1208,9 +1208,7 @@ static int atalk_connect(struct socket *sock, struct sockaddr *uaddr, | |||
| 1208 | if (addr->sat_addr.s_node == ATADDR_BCAST && | 1208 | if (addr->sat_addr.s_node == ATADDR_BCAST && |
| 1209 | !sock_flag(sk, SOCK_BROADCAST)) { | 1209 | !sock_flag(sk, SOCK_BROADCAST)) { |
| 1210 | #if 1 | 1210 | #if 1 |
| 1211 | printk(KERN_WARNING "%s is broken and did not set " | 1211 | pr_warn("atalk_connect: %s is broken and did not set SO_BROADCAST.\n", |
| 1212 | "SO_BROADCAST. It will break when 2.2 is " | ||
| 1213 | "released.\n", | ||
| 1214 | current->comm); | 1212 | current->comm); |
| 1215 | #else | 1213 | #else |
| 1216 | return -EACCES; | 1214 | return -EACCES; |
diff --git a/net/bluetooth/af_bluetooth.c b/net/bluetooth/af_bluetooth.c index 46e7f86acfc9..3e18af4dadc4 100644 --- a/net/bluetooth/af_bluetooth.c +++ b/net/bluetooth/af_bluetooth.c | |||
| @@ -210,7 +210,7 @@ struct sock *bt_accept_dequeue(struct sock *parent, struct socket *newsock) | |||
| 210 | } | 210 | } |
| 211 | 211 | ||
| 212 | if (sk->sk_state == BT_CONNECTED || !newsock || | 212 | if (sk->sk_state == BT_CONNECTED || !newsock || |
| 213 | test_bit(BT_DEFER_SETUP, &bt_sk(parent)->flags)) { | 213 | test_bit(BT_SK_DEFER_SETUP, &bt_sk(parent)->flags)) { |
| 214 | bt_accept_unlink(sk); | 214 | bt_accept_unlink(sk); |
| 215 | if (newsock) | 215 | if (newsock) |
| 216 | sock_graft(sk, newsock); | 216 | sock_graft(sk, newsock); |
diff --git a/net/core/drop_monitor.c b/net/core/drop_monitor.c index ea5fb9fcc3f5..d23b6682f4e9 100644 --- a/net/core/drop_monitor.c +++ b/net/core/drop_monitor.c | |||
| @@ -36,9 +36,6 @@ | |||
| 36 | #define TRACE_ON 1 | 36 | #define TRACE_ON 1 |
| 37 | #define TRACE_OFF 0 | 37 | #define TRACE_OFF 0 |
| 38 | 38 | ||
| 39 | static void send_dm_alert(struct work_struct *unused); | ||
| 40 | |||
| 41 | |||
| 42 | /* | 39 | /* |
| 43 | * Globals, our netlink socket pointer | 40 | * Globals, our netlink socket pointer |
| 44 | * and the work handle that will send up | 41 | * and the work handle that will send up |
| @@ -48,11 +45,10 @@ static int trace_state = TRACE_OFF; | |||
| 48 | static DEFINE_MUTEX(trace_state_mutex); | 45 | static DEFINE_MUTEX(trace_state_mutex); |
| 49 | 46 | ||
| 50 | struct per_cpu_dm_data { | 47 | struct per_cpu_dm_data { |
| 51 | struct work_struct dm_alert_work; | 48 | spinlock_t lock; |
| 52 | struct sk_buff __rcu *skb; | 49 | struct sk_buff *skb; |
| 53 | atomic_t dm_hit_count; | 50 | struct work_struct dm_alert_work; |
| 54 | struct timer_list send_timer; | 51 | struct timer_list send_timer; |
| 55 | int cpu; | ||
| 56 | }; | 52 | }; |
| 57 | 53 | ||
| 58 | struct dm_hw_stat_delta { | 54 | struct dm_hw_stat_delta { |
| @@ -78,13 +74,13 @@ static int dm_delay = 1; | |||
| 78 | static unsigned long dm_hw_check_delta = 2*HZ; | 74 | static unsigned long dm_hw_check_delta = 2*HZ; |
| 79 | static LIST_HEAD(hw_stats_list); | 75 | static LIST_HEAD(hw_stats_list); |
| 80 | 76 | ||
| 81 | static void reset_per_cpu_data(struct per_cpu_dm_data *data) | 77 | static struct sk_buff *reset_per_cpu_data(struct per_cpu_dm_data *data) |
| 82 | { | 78 | { |
| 83 | size_t al; | 79 | size_t al; |
| 84 | struct net_dm_alert_msg *msg; | 80 | struct net_dm_alert_msg *msg; |
| 85 | struct nlattr *nla; | 81 | struct nlattr *nla; |
| 86 | struct sk_buff *skb; | 82 | struct sk_buff *skb; |
| 87 | struct sk_buff *oskb = rcu_dereference_protected(data->skb, 1); | 83 | unsigned long flags; |
| 88 | 84 | ||
| 89 | al = sizeof(struct net_dm_alert_msg); | 85 | al = sizeof(struct net_dm_alert_msg); |
| 90 | al += dm_hit_limit * sizeof(struct net_dm_drop_point); | 86 | al += dm_hit_limit * sizeof(struct net_dm_drop_point); |
| @@ -99,65 +95,40 @@ static void reset_per_cpu_data(struct per_cpu_dm_data *data) | |||
| 99 | sizeof(struct net_dm_alert_msg)); | 95 | sizeof(struct net_dm_alert_msg)); |
| 100 | msg = nla_data(nla); | 96 | msg = nla_data(nla); |
| 101 | memset(msg, 0, al); | 97 | memset(msg, 0, al); |
| 102 | } else | 98 | } else { |
| 103 | schedule_work_on(data->cpu, &data->dm_alert_work); | 99 | mod_timer(&data->send_timer, jiffies + HZ / 10); |
| 104 | |||
| 105 | /* | ||
| 106 | * Don't need to lock this, since we are guaranteed to only | ||
| 107 | * run this on a single cpu at a time. | ||
| 108 | * Note also that we only update data->skb if the old and new skb | ||
| 109 | * pointers don't match. This ensures that we don't continually call | ||
| 110 | * synchornize_rcu if we repeatedly fail to alloc a new netlink message. | ||
| 111 | */ | ||
| 112 | if (skb != oskb) { | ||
| 113 | rcu_assign_pointer(data->skb, skb); | ||
| 114 | |||
| 115 | synchronize_rcu(); | ||
| 116 | |||
| 117 | atomic_set(&data->dm_hit_count, dm_hit_limit); | ||
| 118 | } | 100 | } |
| 119 | 101 | ||
| 102 | spin_lock_irqsave(&data->lock, flags); | ||
| 103 | swap(data->skb, skb); | ||
| 104 | spin_unlock_irqrestore(&data->lock, flags); | ||
| 105 | |||
| 106 | return skb; | ||
| 120 | } | 107 | } |
| 121 | 108 | ||
| 122 | static void send_dm_alert(struct work_struct *unused) | 109 | static void send_dm_alert(struct work_struct *work) |
| 123 | { | 110 | { |
| 124 | struct sk_buff *skb; | 111 | struct sk_buff *skb; |
| 125 | struct per_cpu_dm_data *data = &get_cpu_var(dm_cpu_data); | 112 | struct per_cpu_dm_data *data; |
| 126 | 113 | ||
| 127 | WARN_ON_ONCE(data->cpu != smp_processor_id()); | 114 | data = container_of(work, struct per_cpu_dm_data, dm_alert_work); |
| 128 | 115 | ||
| 129 | /* | 116 | skb = reset_per_cpu_data(data); |
| 130 | * Grab the skb we're about to send | ||
| 131 | */ | ||
| 132 | skb = rcu_dereference_protected(data->skb, 1); | ||
| 133 | |||
| 134 | /* | ||
| 135 | * Replace it with a new one | ||
| 136 | */ | ||
| 137 | reset_per_cpu_data(data); | ||
| 138 | 117 | ||
| 139 | /* | ||
| 140 | * Ship it! | ||
| 141 | */ | ||
| 142 | if (skb) | 118 | if (skb) |
| 143 | genlmsg_multicast(skb, 0, NET_DM_GRP_ALERT, GFP_KERNEL); | 119 | genlmsg_multicast(skb, 0, NET_DM_GRP_ALERT, GFP_KERNEL); |
| 144 | |||
| 145 | put_cpu_var(dm_cpu_data); | ||
| 146 | } | 120 | } |
| 147 | 121 | ||
| 148 | /* | 122 | /* |
| 149 | * This is the timer function to delay the sending of an alert | 123 | * This is the timer function to delay the sending of an alert |
| 150 | * in the event that more drops will arrive during the | 124 | * in the event that more drops will arrive during the |
| 151 | * hysteresis period. Note that it operates under the timer interrupt | 125 | * hysteresis period. |
| 152 | * so we don't need to disable preemption here | ||
| 153 | */ | 126 | */ |
| 154 | static void sched_send_work(unsigned long unused) | 127 | static void sched_send_work(unsigned long _data) |
| 155 | { | 128 | { |
| 156 | struct per_cpu_dm_data *data = &get_cpu_var(dm_cpu_data); | 129 | struct per_cpu_dm_data *data = (struct per_cpu_dm_data *)_data; |
| 157 | |||
| 158 | schedule_work_on(smp_processor_id(), &data->dm_alert_work); | ||
| 159 | 130 | ||
| 160 | put_cpu_var(dm_cpu_data); | 131 | schedule_work(&data->dm_alert_work); |
| 161 | } | 132 | } |
| 162 | 133 | ||
| 163 | static void trace_drop_common(struct sk_buff *skb, void *location) | 134 | static void trace_drop_common(struct sk_buff *skb, void *location) |
| @@ -167,33 +138,28 @@ static void trace_drop_common(struct sk_buff *skb, void *location) | |||
| 167 | struct nlattr *nla; | 138 | struct nlattr *nla; |
| 168 | int i; | 139 | int i; |
| 169 | struct sk_buff *dskb; | 140 | struct sk_buff *dskb; |
| 170 | struct per_cpu_dm_data *data = &get_cpu_var(dm_cpu_data); | 141 | struct per_cpu_dm_data *data; |
| 171 | 142 | unsigned long flags; | |
| 172 | 143 | ||
| 173 | rcu_read_lock(); | 144 | local_irq_save(flags); |
| 174 | dskb = rcu_dereference(data->skb); | 145 | data = &__get_cpu_var(dm_cpu_data); |
| 146 | spin_lock(&data->lock); | ||
| 147 | dskb = data->skb; | ||
| 175 | 148 | ||
| 176 | if (!dskb) | 149 | if (!dskb) |
| 177 | goto out; | 150 | goto out; |
| 178 | 151 | ||
| 179 | if (!atomic_add_unless(&data->dm_hit_count, -1, 0)) { | ||
| 180 | /* | ||
| 181 | * we're already at zero, discard this hit | ||
| 182 | */ | ||
| 183 | goto out; | ||
| 184 | } | ||
| 185 | |||
| 186 | nlh = (struct nlmsghdr *)dskb->data; | 152 | nlh = (struct nlmsghdr *)dskb->data; |
| 187 | nla = genlmsg_data(nlmsg_data(nlh)); | 153 | nla = genlmsg_data(nlmsg_data(nlh)); |
| 188 | msg = nla_data(nla); | 154 | msg = nla_data(nla); |
| 189 | for (i = 0; i < msg->entries; i++) { | 155 | for (i = 0; i < msg->entries; i++) { |
| 190 | if (!memcmp(&location, msg->points[i].pc, sizeof(void *))) { | 156 | if (!memcmp(&location, msg->points[i].pc, sizeof(void *))) { |
| 191 | msg->points[i].count++; | 157 | msg->points[i].count++; |
| 192 | atomic_inc(&data->dm_hit_count); | ||
| 193 | goto out; | 158 | goto out; |
| 194 | } | 159 | } |
| 195 | } | 160 | } |
| 196 | 161 | if (msg->entries == dm_hit_limit) | |
| 162 | goto out; | ||
| 197 | /* | 163 | /* |
| 198 | * We need to create a new entry | 164 | * We need to create a new entry |
| 199 | */ | 165 | */ |
| @@ -205,13 +171,11 @@ static void trace_drop_common(struct sk_buff *skb, void *location) | |||
| 205 | 171 | ||
| 206 | if (!timer_pending(&data->send_timer)) { | 172 | if (!timer_pending(&data->send_timer)) { |
| 207 | data->send_timer.expires = jiffies + dm_delay * HZ; | 173 | data->send_timer.expires = jiffies + dm_delay * HZ; |
| 208 | add_timer_on(&data->send_timer, smp_processor_id()); | 174 | add_timer(&data->send_timer); |
| 209 | } | 175 | } |
| 210 | 176 | ||
| 211 | out: | 177 | out: |
| 212 | rcu_read_unlock(); | 178 | spin_unlock_irqrestore(&data->lock, flags); |
| 213 | put_cpu_var(dm_cpu_data); | ||
| 214 | return; | ||
| 215 | } | 179 | } |
| 216 | 180 | ||
| 217 | static void trace_kfree_skb_hit(void *ignore, struct sk_buff *skb, void *location) | 181 | static void trace_kfree_skb_hit(void *ignore, struct sk_buff *skb, void *location) |
| @@ -418,11 +382,11 @@ static int __init init_net_drop_monitor(void) | |||
| 418 | 382 | ||
| 419 | for_each_possible_cpu(cpu) { | 383 | for_each_possible_cpu(cpu) { |
| 420 | data = &per_cpu(dm_cpu_data, cpu); | 384 | data = &per_cpu(dm_cpu_data, cpu); |
| 421 | data->cpu = cpu; | ||
| 422 | INIT_WORK(&data->dm_alert_work, send_dm_alert); | 385 | INIT_WORK(&data->dm_alert_work, send_dm_alert); |
| 423 | init_timer(&data->send_timer); | 386 | init_timer(&data->send_timer); |
| 424 | data->send_timer.data = cpu; | 387 | data->send_timer.data = (unsigned long)data; |
| 425 | data->send_timer.function = sched_send_work; | 388 | data->send_timer.function = sched_send_work; |
| 389 | spin_lock_init(&data->lock); | ||
| 426 | reset_per_cpu_data(data); | 390 | reset_per_cpu_data(data); |
| 427 | } | 391 | } |
| 428 | 392 | ||
diff --git a/net/core/filter.c b/net/core/filter.c index a3eddb515d1b..d4ce2dc712e3 100644 --- a/net/core/filter.c +++ b/net/core/filter.c | |||
| @@ -616,9 +616,9 @@ static int __sk_prepare_filter(struct sk_filter *fp) | |||
| 616 | /** | 616 | /** |
| 617 | * sk_unattached_filter_create - create an unattached filter | 617 | * sk_unattached_filter_create - create an unattached filter |
| 618 | * @fprog: the filter program | 618 | * @fprog: the filter program |
| 619 | * @sk: the socket to use | 619 | * @pfp: the unattached filter that is created |
| 620 | * | 620 | * |
| 621 | * Create a filter independent ofr any socket. We first run some | 621 | * Create a filter independent of any socket. We first run some |
| 622 | * sanity checks on it to make sure it does not explode on us later. | 622 | * sanity checks on it to make sure it does not explode on us later. |
| 623 | * If an error occurs or there is insufficient memory for the filter | 623 | * If an error occurs or there is insufficient memory for the filter |
| 624 | * a negative errno code is returned. On success the return is zero. | 624 | * a negative errno code is returned. On success the return is zero. |
diff --git a/net/core/neighbour.c b/net/core/neighbour.c index eb09f8bbbf07..d81d026138f0 100644 --- a/net/core/neighbour.c +++ b/net/core/neighbour.c | |||
| @@ -2219,9 +2219,7 @@ static int neigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb, | |||
| 2219 | rcu_read_lock_bh(); | 2219 | rcu_read_lock_bh(); |
| 2220 | nht = rcu_dereference_bh(tbl->nht); | 2220 | nht = rcu_dereference_bh(tbl->nht); |
| 2221 | 2221 | ||
| 2222 | for (h = 0; h < (1 << nht->hash_shift); h++) { | 2222 | for (h = s_h; h < (1 << nht->hash_shift); h++) { |
| 2223 | if (h < s_h) | ||
| 2224 | continue; | ||
| 2225 | if (h > s_h) | 2223 | if (h > s_h) |
| 2226 | s_idx = 0; | 2224 | s_idx = 0; |
| 2227 | for (n = rcu_dereference_bh(nht->hash_buckets[h]), idx = 0; | 2225 | for (n = rcu_dereference_bh(nht->hash_buckets[h]), idx = 0; |
| @@ -2260,9 +2258,7 @@ static int pneigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb, | |||
| 2260 | 2258 | ||
| 2261 | read_lock_bh(&tbl->lock); | 2259 | read_lock_bh(&tbl->lock); |
| 2262 | 2260 | ||
| 2263 | for (h = 0; h <= PNEIGH_HASHMASK; h++) { | 2261 | for (h = s_h; h <= PNEIGH_HASHMASK; h++) { |
| 2264 | if (h < s_h) | ||
| 2265 | continue; | ||
| 2266 | if (h > s_h) | 2262 | if (h > s_h) |
| 2267 | s_idx = 0; | 2263 | s_idx = 0; |
| 2268 | for (n = tbl->phash_buckets[h], idx = 0; n; n = n->next) { | 2264 | for (n = tbl->phash_buckets[h], idx = 0; n; n = n->next) { |
| @@ -2297,7 +2293,7 @@ static int neigh_dump_info(struct sk_buff *skb, struct netlink_callback *cb) | |||
| 2297 | struct neigh_table *tbl; | 2293 | struct neigh_table *tbl; |
| 2298 | int t, family, s_t; | 2294 | int t, family, s_t; |
| 2299 | int proxy = 0; | 2295 | int proxy = 0; |
| 2300 | int err = 0; | 2296 | int err; |
| 2301 | 2297 | ||
| 2302 | read_lock(&neigh_tbl_lock); | 2298 | read_lock(&neigh_tbl_lock); |
| 2303 | family = ((struct rtgenmsg *) nlmsg_data(cb->nlh))->rtgen_family; | 2299 | family = ((struct rtgenmsg *) nlmsg_data(cb->nlh))->rtgen_family; |
| @@ -2311,7 +2307,7 @@ static int neigh_dump_info(struct sk_buff *skb, struct netlink_callback *cb) | |||
| 2311 | 2307 | ||
| 2312 | s_t = cb->args[0]; | 2308 | s_t = cb->args[0]; |
| 2313 | 2309 | ||
| 2314 | for (tbl = neigh_tables, t = 0; tbl && (err >= 0); | 2310 | for (tbl = neigh_tables, t = 0; tbl; |
| 2315 | tbl = tbl->next, t++) { | 2311 | tbl = tbl->next, t++) { |
| 2316 | if (t < s_t || (family && tbl->family != family)) | 2312 | if (t < s_t || (family && tbl->family != family)) |
| 2317 | continue; | 2313 | continue; |
| @@ -2322,6 +2318,8 @@ static int neigh_dump_info(struct sk_buff *skb, struct netlink_callback *cb) | |||
| 2322 | err = pneigh_dump_table(tbl, skb, cb); | 2318 | err = pneigh_dump_table(tbl, skb, cb); |
| 2323 | else | 2319 | else |
| 2324 | err = neigh_dump_table(tbl, skb, cb); | 2320 | err = neigh_dump_table(tbl, skb, cb); |
| 2321 | if (err < 0) | ||
| 2322 | break; | ||
| 2325 | } | 2323 | } |
| 2326 | read_unlock(&neigh_tbl_lock); | 2324 | read_unlock(&neigh_tbl_lock); |
| 2327 | 2325 | ||
diff --git a/net/core/netpoll.c b/net/core/netpoll.c index 3d84fb9d8873..f9f40b932e4b 100644 --- a/net/core/netpoll.c +++ b/net/core/netpoll.c | |||
| @@ -362,22 +362,23 @@ EXPORT_SYMBOL(netpoll_send_skb_on_dev); | |||
| 362 | 362 | ||
| 363 | void netpoll_send_udp(struct netpoll *np, const char *msg, int len) | 363 | void netpoll_send_udp(struct netpoll *np, const char *msg, int len) |
| 364 | { | 364 | { |
| 365 | int total_len, eth_len, ip_len, udp_len; | 365 | int total_len, ip_len, udp_len; |
| 366 | struct sk_buff *skb; | 366 | struct sk_buff *skb; |
| 367 | struct udphdr *udph; | 367 | struct udphdr *udph; |
| 368 | struct iphdr *iph; | 368 | struct iphdr *iph; |
| 369 | struct ethhdr *eth; | 369 | struct ethhdr *eth; |
| 370 | 370 | ||
| 371 | udp_len = len + sizeof(*udph); | 371 | udp_len = len + sizeof(*udph); |
| 372 | ip_len = eth_len = udp_len + sizeof(*iph); | 372 | ip_len = udp_len + sizeof(*iph); |
| 373 | total_len = eth_len + ETH_HLEN + NET_IP_ALIGN; | 373 | total_len = ip_len + LL_RESERVED_SPACE(np->dev); |
| 374 | 374 | ||
| 375 | skb = find_skb(np, total_len, total_len - len); | 375 | skb = find_skb(np, total_len + np->dev->needed_tailroom, |
| 376 | total_len - len); | ||
| 376 | if (!skb) | 377 | if (!skb) |
| 377 | return; | 378 | return; |
| 378 | 379 | ||
| 379 | skb_copy_to_linear_data(skb, msg, len); | 380 | skb_copy_to_linear_data(skb, msg, len); |
| 380 | skb->len += len; | 381 | skb_put(skb, len); |
| 381 | 382 | ||
| 382 | skb_push(skb, sizeof(*udph)); | 383 | skb_push(skb, sizeof(*udph)); |
| 383 | skb_reset_transport_header(skb); | 384 | skb_reset_transport_header(skb); |
diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 016694d62484..d78671e9d545 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c | |||
| @@ -3361,7 +3361,7 @@ EXPORT_SYMBOL(kfree_skb_partial); | |||
| 3361 | * @to: prior buffer | 3361 | * @to: prior buffer |
| 3362 | * @from: buffer to add | 3362 | * @from: buffer to add |
| 3363 | * @fragstolen: pointer to boolean | 3363 | * @fragstolen: pointer to boolean |
| 3364 | * | 3364 | * @delta_truesize: how much more was allocated than was requested |
| 3365 | */ | 3365 | */ |
| 3366 | bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from, | 3366 | bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from, |
| 3367 | bool *fragstolen, int *delta_truesize) | 3367 | bool *fragstolen, int *delta_truesize) |
diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c index d4d61b694fab..dfba343b2509 100644 --- a/net/ipv4/inetpeer.c +++ b/net/ipv4/inetpeer.c | |||
| @@ -560,6 +560,17 @@ bool inet_peer_xrlim_allow(struct inet_peer *peer, int timeout) | |||
| 560 | } | 560 | } |
| 561 | EXPORT_SYMBOL(inet_peer_xrlim_allow); | 561 | EXPORT_SYMBOL(inet_peer_xrlim_allow); |
| 562 | 562 | ||
| 563 | static void inetpeer_inval_rcu(struct rcu_head *head) | ||
| 564 | { | ||
| 565 | struct inet_peer *p = container_of(head, struct inet_peer, gc_rcu); | ||
| 566 | |||
| 567 | spin_lock_bh(&gc_lock); | ||
| 568 | list_add_tail(&p->gc_list, &gc_list); | ||
| 569 | spin_unlock_bh(&gc_lock); | ||
| 570 | |||
| 571 | schedule_delayed_work(&gc_work, gc_delay); | ||
| 572 | } | ||
| 573 | |||
| 563 | void inetpeer_invalidate_tree(int family) | 574 | void inetpeer_invalidate_tree(int family) |
| 564 | { | 575 | { |
| 565 | struct inet_peer *old, *new, *prev; | 576 | struct inet_peer *old, *new, *prev; |
| @@ -576,10 +587,7 @@ void inetpeer_invalidate_tree(int family) | |||
| 576 | prev = cmpxchg(&base->root, old, new); | 587 | prev = cmpxchg(&base->root, old, new); |
| 577 | if (prev == old) { | 588 | if (prev == old) { |
| 578 | base->total = 0; | 589 | base->total = 0; |
| 579 | spin_lock(&gc_lock); | 590 | call_rcu(&prev->gc_rcu, inetpeer_inval_rcu); |
| 580 | list_add_tail(&prev->gc_list, &gc_list); | ||
| 581 | spin_unlock(&gc_lock); | ||
| 582 | schedule_delayed_work(&gc_work, gc_delay); | ||
| 583 | } | 591 | } |
| 584 | 592 | ||
| 585 | out: | 593 | out: |
diff --git a/net/ipv4/ip_forward.c b/net/ipv4/ip_forward.c index e5c44fc586ab..ab09b126423c 100644 --- a/net/ipv4/ip_forward.c +++ b/net/ipv4/ip_forward.c | |||
| @@ -44,6 +44,7 @@ static int ip_forward_finish(struct sk_buff *skb) | |||
| 44 | struct ip_options *opt = &(IPCB(skb)->opt); | 44 | struct ip_options *opt = &(IPCB(skb)->opt); |
| 45 | 45 | ||
| 46 | IP_INC_STATS_BH(dev_net(skb_dst(skb)->dev), IPSTATS_MIB_OUTFORWDATAGRAMS); | 46 | IP_INC_STATS_BH(dev_net(skb_dst(skb)->dev), IPSTATS_MIB_OUTFORWDATAGRAMS); |
| 47 | IP_ADD_STATS_BH(dev_net(skb_dst(skb)->dev), IPSTATS_MIB_OUTOCTETS, skb->len); | ||
| 47 | 48 | ||
| 48 | if (unlikely(opt->optlen)) | 49 | if (unlikely(opt->optlen)) |
| 49 | ip_forward_options(skb); | 50 | ip_forward_options(skb); |
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c index a9e519ad6db5..c94bbc6f2ba3 100644 --- a/net/ipv4/ipmr.c +++ b/net/ipv4/ipmr.c | |||
| @@ -1574,6 +1574,7 @@ static inline int ipmr_forward_finish(struct sk_buff *skb) | |||
| 1574 | struct ip_options *opt = &(IPCB(skb)->opt); | 1574 | struct ip_options *opt = &(IPCB(skb)->opt); |
| 1575 | 1575 | ||
| 1576 | IP_INC_STATS_BH(dev_net(skb_dst(skb)->dev), IPSTATS_MIB_OUTFORWDATAGRAMS); | 1576 | IP_INC_STATS_BH(dev_net(skb_dst(skb)->dev), IPSTATS_MIB_OUTFORWDATAGRAMS); |
| 1577 | IP_ADD_STATS_BH(dev_net(skb_dst(skb)->dev), IPSTATS_MIB_OUTOCTETS, skb->len); | ||
| 1577 | 1578 | ||
| 1578 | if (unlikely(opt->optlen)) | 1579 | if (unlikely(opt->optlen)) |
| 1579 | ip_forward_options(skb); | 1580 | ip_forward_options(skb); |
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c index 0c220a416626..74c21b924a79 100644 --- a/net/ipv6/ip6_fib.c +++ b/net/ipv6/ip6_fib.c | |||
| @@ -1561,7 +1561,7 @@ static int fib6_age(struct rt6_info *rt, void *arg) | |||
| 1561 | neigh_flags = neigh->flags; | 1561 | neigh_flags = neigh->flags; |
| 1562 | neigh_release(neigh); | 1562 | neigh_release(neigh); |
| 1563 | } | 1563 | } |
| 1564 | if (neigh_flags & NTF_ROUTER) { | 1564 | if (!(neigh_flags & NTF_ROUTER)) { |
| 1565 | RT6_TRACE("purging route %p via non-router but gateway\n", | 1565 | RT6_TRACE("purging route %p via non-router but gateway\n", |
| 1566 | rt); | 1566 | rt); |
| 1567 | return -1; | 1567 | return -1; |
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index 17b8c67998bb..decc21d19c53 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c | |||
| @@ -526,6 +526,7 @@ int ip6_forward(struct sk_buff *skb) | |||
| 526 | hdr->hop_limit--; | 526 | hdr->hop_limit--; |
| 527 | 527 | ||
| 528 | IP6_INC_STATS_BH(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTFORWDATAGRAMS); | 528 | IP6_INC_STATS_BH(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTFORWDATAGRAMS); |
| 529 | IP6_ADD_STATS_BH(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTOCTETS, skb->len); | ||
| 529 | return NF_HOOK(NFPROTO_IPV6, NF_INET_FORWARD, skb, skb->dev, dst->dev, | 530 | return NF_HOOK(NFPROTO_IPV6, NF_INET_FORWARD, skb, skb->dev, dst->dev, |
| 530 | ip6_forward_finish); | 531 | ip6_forward_finish); |
| 531 | 532 | ||
diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c index b15dc08643a4..461e47c8e956 100644 --- a/net/ipv6/ip6mr.c +++ b/net/ipv6/ip6mr.c | |||
| @@ -1886,6 +1886,8 @@ static inline int ip6mr_forward2_finish(struct sk_buff *skb) | |||
| 1886 | { | 1886 | { |
| 1887 | IP6_INC_STATS_BH(dev_net(skb_dst(skb)->dev), ip6_dst_idev(skb_dst(skb)), | 1887 | IP6_INC_STATS_BH(dev_net(skb_dst(skb)->dev), ip6_dst_idev(skb_dst(skb)), |
| 1888 | IPSTATS_MIB_OUTFORWDATAGRAMS); | 1888 | IPSTATS_MIB_OUTFORWDATAGRAMS); |
| 1889 | IP6_ADD_STATS_BH(dev_net(skb_dst(skb)->dev), ip6_dst_idev(skb_dst(skb)), | ||
| 1890 | IPSTATS_MIB_OUTOCTETS, skb->len); | ||
| 1889 | return dst_output(skb); | 1891 | return dst_output(skb); |
| 1890 | } | 1892 | } |
| 1891 | 1893 | ||
diff --git a/net/l2tp/l2tp_eth.c b/net/l2tp/l2tp_eth.c index 443591d629ca..185f12f4a5fa 100644 --- a/net/l2tp/l2tp_eth.c +++ b/net/l2tp/l2tp_eth.c | |||
| @@ -162,6 +162,7 @@ static void l2tp_eth_delete(struct l2tp_session *session) | |||
| 162 | if (dev) { | 162 | if (dev) { |
| 163 | unregister_netdev(dev); | 163 | unregister_netdev(dev); |
| 164 | spriv->dev = NULL; | 164 | spriv->dev = NULL; |
| 165 | module_put(THIS_MODULE); | ||
| 165 | } | 166 | } |
| 166 | } | 167 | } |
| 167 | } | 168 | } |
| @@ -249,6 +250,7 @@ static int l2tp_eth_create(struct net *net, u32 tunnel_id, u32 session_id, u32 p | |||
| 249 | if (rc < 0) | 250 | if (rc < 0) |
| 250 | goto out_del_dev; | 251 | goto out_del_dev; |
| 251 | 252 | ||
| 253 | __module_get(THIS_MODULE); | ||
| 252 | /* Must be done after register_netdev() */ | 254 | /* Must be done after register_netdev() */ |
| 253 | strlcpy(session->ifname, dev->name, IFNAMSIZ); | 255 | strlcpy(session->ifname, dev->name, IFNAMSIZ); |
| 254 | 256 | ||
diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c index 70614e7affab..61d8b75d2686 100644 --- a/net/l2tp/l2tp_ip.c +++ b/net/l2tp/l2tp_ip.c | |||
| @@ -464,10 +464,12 @@ static int l2tp_ip_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *m | |||
| 464 | sk->sk_bound_dev_if); | 464 | sk->sk_bound_dev_if); |
| 465 | if (IS_ERR(rt)) | 465 | if (IS_ERR(rt)) |
| 466 | goto no_route; | 466 | goto no_route; |
| 467 | if (connected) | 467 | if (connected) { |
| 468 | sk_setup_caps(sk, &rt->dst); | 468 | sk_setup_caps(sk, &rt->dst); |
| 469 | else | 469 | } else { |
| 470 | dst_release(&rt->dst); /* safe since we hold rcu_read_lock */ | 470 | skb_dst_set(skb, &rt->dst); |
| 471 | goto xmit; | ||
| 472 | } | ||
| 471 | } | 473 | } |
| 472 | 474 | ||
| 473 | /* We dont need to clone dst here, it is guaranteed to not disappear. | 475 | /* We dont need to clone dst here, it is guaranteed to not disappear. |
| @@ -475,6 +477,7 @@ static int l2tp_ip_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *m | |||
| 475 | */ | 477 | */ |
| 476 | skb_dst_set_noref(skb, &rt->dst); | 478 | skb_dst_set_noref(skb, &rt->dst); |
| 477 | 479 | ||
| 480 | xmit: | ||
| 478 | /* Queue the packet to IP for output */ | 481 | /* Queue the packet to IP for output */ |
| 479 | rc = ip_queue_xmit(skb, &inet->cork.fl); | 482 | rc = ip_queue_xmit(skb, &inet->cork.fl); |
| 480 | rcu_read_unlock(); | 483 | rcu_read_unlock(); |
diff --git a/net/mac80211/agg-rx.c b/net/mac80211/agg-rx.c index 26ddb699d693..c649188314cc 100644 --- a/net/mac80211/agg-rx.c +++ b/net/mac80211/agg-rx.c | |||
| @@ -145,15 +145,20 @@ static void sta_rx_agg_session_timer_expired(unsigned long data) | |||
| 145 | struct tid_ampdu_rx *tid_rx; | 145 | struct tid_ampdu_rx *tid_rx; |
| 146 | unsigned long timeout; | 146 | unsigned long timeout; |
| 147 | 147 | ||
| 148 | rcu_read_lock(); | ||
| 148 | tid_rx = rcu_dereference(sta->ampdu_mlme.tid_rx[*ptid]); | 149 | tid_rx = rcu_dereference(sta->ampdu_mlme.tid_rx[*ptid]); |
| 149 | if (!tid_rx) | 150 | if (!tid_rx) { |
| 151 | rcu_read_unlock(); | ||
| 150 | return; | 152 | return; |
| 153 | } | ||
| 151 | 154 | ||
| 152 | timeout = tid_rx->last_rx + TU_TO_JIFFIES(tid_rx->timeout); | 155 | timeout = tid_rx->last_rx + TU_TO_JIFFIES(tid_rx->timeout); |
| 153 | if (time_is_after_jiffies(timeout)) { | 156 | if (time_is_after_jiffies(timeout)) { |
| 154 | mod_timer(&tid_rx->session_timer, timeout); | 157 | mod_timer(&tid_rx->session_timer, timeout); |
| 158 | rcu_read_unlock(); | ||
| 155 | return; | 159 | return; |
| 156 | } | 160 | } |
| 161 | rcu_read_unlock(); | ||
| 157 | 162 | ||
| 158 | #ifdef CONFIG_MAC80211_HT_DEBUG | 163 | #ifdef CONFIG_MAC80211_HT_DEBUG |
| 159 | printk(KERN_DEBUG "rx session timer expired on tid %d\n", (u16)*ptid); | 164 | printk(KERN_DEBUG "rx session timer expired on tid %d\n", (u16)*ptid); |
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c index 495831ee48f1..e9cecca5c44d 100644 --- a/net/mac80211/cfg.c +++ b/net/mac80211/cfg.c | |||
| @@ -533,16 +533,16 @@ static void ieee80211_get_et_stats(struct wiphy *wiphy, | |||
| 533 | sinfo.filled = 0; | 533 | sinfo.filled = 0; |
| 534 | sta_set_sinfo(sta, &sinfo); | 534 | sta_set_sinfo(sta, &sinfo); |
| 535 | 535 | ||
| 536 | if (sinfo.filled | STATION_INFO_TX_BITRATE) | 536 | if (sinfo.filled & STATION_INFO_TX_BITRATE) |
| 537 | data[i] = 100000 * | 537 | data[i] = 100000 * |
| 538 | cfg80211_calculate_bitrate(&sinfo.txrate); | 538 | cfg80211_calculate_bitrate(&sinfo.txrate); |
| 539 | i++; | 539 | i++; |
| 540 | if (sinfo.filled | STATION_INFO_RX_BITRATE) | 540 | if (sinfo.filled & STATION_INFO_RX_BITRATE) |
| 541 | data[i] = 100000 * | 541 | data[i] = 100000 * |
| 542 | cfg80211_calculate_bitrate(&sinfo.rxrate); | 542 | cfg80211_calculate_bitrate(&sinfo.rxrate); |
| 543 | i++; | 543 | i++; |
| 544 | 544 | ||
| 545 | if (sinfo.filled | STATION_INFO_SIGNAL_AVG) | 545 | if (sinfo.filled & STATION_INFO_SIGNAL_AVG) |
| 546 | data[i] = (u8)sinfo.signal_avg; | 546 | data[i] = (u8)sinfo.signal_avg; |
| 547 | i++; | 547 | i++; |
| 548 | } else { | 548 | } else { |
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c index d4c19a7773db..8664111d0566 100644 --- a/net/mac80211/iface.c +++ b/net/mac80211/iface.c | |||
| @@ -637,6 +637,18 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata, | |||
| 637 | ieee80211_configure_filter(local); | 637 | ieee80211_configure_filter(local); |
| 638 | break; | 638 | break; |
| 639 | default: | 639 | default: |
| 640 | mutex_lock(&local->mtx); | ||
| 641 | if (local->hw_roc_dev == sdata->dev && | ||
| 642 | local->hw_roc_channel) { | ||
| 643 | /* ignore return value since this is racy */ | ||
| 644 | drv_cancel_remain_on_channel(local); | ||
| 645 | ieee80211_queue_work(&local->hw, &local->hw_roc_done); | ||
| 646 | } | ||
| 647 | mutex_unlock(&local->mtx); | ||
| 648 | |||
| 649 | flush_work(&local->hw_roc_start); | ||
| 650 | flush_work(&local->hw_roc_done); | ||
| 651 | |||
| 640 | flush_work(&sdata->work); | 652 | flush_work(&sdata->work); |
| 641 | /* | 653 | /* |
| 642 | * When we get here, the interface is marked down. | 654 | * When we get here, the interface is marked down. |
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index 04c306308987..91d84cc77bbf 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c | |||
| @@ -1220,6 +1220,22 @@ static void ieee80211_sta_wmm_params(struct ieee80211_local *local, | |||
| 1220 | sdata->vif.bss_conf.qos = true; | 1220 | sdata->vif.bss_conf.qos = true; |
| 1221 | } | 1221 | } |
| 1222 | 1222 | ||
| 1223 | static void __ieee80211_stop_poll(struct ieee80211_sub_if_data *sdata) | ||
| 1224 | { | ||
| 1225 | lockdep_assert_held(&sdata->local->mtx); | ||
| 1226 | |||
| 1227 | sdata->u.mgd.flags &= ~(IEEE80211_STA_CONNECTION_POLL | | ||
| 1228 | IEEE80211_STA_BEACON_POLL); | ||
| 1229 | ieee80211_run_deferred_scan(sdata->local); | ||
| 1230 | } | ||
| 1231 | |||
| 1232 | static void ieee80211_stop_poll(struct ieee80211_sub_if_data *sdata) | ||
| 1233 | { | ||
| 1234 | mutex_lock(&sdata->local->mtx); | ||
| 1235 | __ieee80211_stop_poll(sdata); | ||
| 1236 | mutex_unlock(&sdata->local->mtx); | ||
| 1237 | } | ||
| 1238 | |||
| 1223 | static u32 ieee80211_handle_bss_capability(struct ieee80211_sub_if_data *sdata, | 1239 | static u32 ieee80211_handle_bss_capability(struct ieee80211_sub_if_data *sdata, |
| 1224 | u16 capab, bool erp_valid, u8 erp) | 1240 | u16 capab, bool erp_valid, u8 erp) |
| 1225 | { | 1241 | { |
| @@ -1285,8 +1301,7 @@ static void ieee80211_set_associated(struct ieee80211_sub_if_data *sdata, | |||
| 1285 | sdata->u.mgd.flags |= IEEE80211_STA_RESET_SIGNAL_AVE; | 1301 | sdata->u.mgd.flags |= IEEE80211_STA_RESET_SIGNAL_AVE; |
| 1286 | 1302 | ||
| 1287 | /* just to be sure */ | 1303 | /* just to be sure */ |
| 1288 | sdata->u.mgd.flags &= ~(IEEE80211_STA_CONNECTION_POLL | | 1304 | ieee80211_stop_poll(sdata); |
| 1289 | IEEE80211_STA_BEACON_POLL); | ||
| 1290 | 1305 | ||
| 1291 | ieee80211_led_assoc(local, 1); | 1306 | ieee80211_led_assoc(local, 1); |
| 1292 | 1307 | ||
| @@ -1456,8 +1471,7 @@ static void ieee80211_reset_ap_probe(struct ieee80211_sub_if_data *sdata) | |||
| 1456 | return; | 1471 | return; |
| 1457 | } | 1472 | } |
| 1458 | 1473 | ||
| 1459 | ifmgd->flags &= ~(IEEE80211_STA_CONNECTION_POLL | | 1474 | __ieee80211_stop_poll(sdata); |
| 1460 | IEEE80211_STA_BEACON_POLL); | ||
| 1461 | 1475 | ||
| 1462 | mutex_lock(&local->iflist_mtx); | 1476 | mutex_lock(&local->iflist_mtx); |
| 1463 | ieee80211_recalc_ps(local, -1); | 1477 | ieee80211_recalc_ps(local, -1); |
| @@ -1477,7 +1491,6 @@ static void ieee80211_reset_ap_probe(struct ieee80211_sub_if_data *sdata) | |||
| 1477 | round_jiffies_up(jiffies + | 1491 | round_jiffies_up(jiffies + |
| 1478 | IEEE80211_CONNECTION_IDLE_TIME)); | 1492 | IEEE80211_CONNECTION_IDLE_TIME)); |
| 1479 | out: | 1493 | out: |
| 1480 | ieee80211_run_deferred_scan(local); | ||
| 1481 | mutex_unlock(&local->mtx); | 1494 | mutex_unlock(&local->mtx); |
| 1482 | } | 1495 | } |
| 1483 | 1496 | ||
| @@ -2408,7 +2421,11 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata, | |||
| 2408 | net_dbg_ratelimited("%s: cancelling probereq poll due to a received beacon\n", | 2421 | net_dbg_ratelimited("%s: cancelling probereq poll due to a received beacon\n", |
| 2409 | sdata->name); | 2422 | sdata->name); |
| 2410 | #endif | 2423 | #endif |
| 2424 | mutex_lock(&local->mtx); | ||
| 2411 | ifmgd->flags &= ~IEEE80211_STA_BEACON_POLL; | 2425 | ifmgd->flags &= ~IEEE80211_STA_BEACON_POLL; |
| 2426 | ieee80211_run_deferred_scan(local); | ||
| 2427 | mutex_unlock(&local->mtx); | ||
| 2428 | |||
| 2412 | mutex_lock(&local->iflist_mtx); | 2429 | mutex_lock(&local->iflist_mtx); |
| 2413 | ieee80211_recalc_ps(local, -1); | 2430 | ieee80211_recalc_ps(local, -1); |
| 2414 | mutex_unlock(&local->iflist_mtx); | 2431 | mutex_unlock(&local->iflist_mtx); |
| @@ -2595,8 +2612,7 @@ static void ieee80211_sta_connection_lost(struct ieee80211_sub_if_data *sdata, | |||
| 2595 | struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; | 2612 | struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; |
| 2596 | u8 frame_buf[DEAUTH_DISASSOC_LEN]; | 2613 | u8 frame_buf[DEAUTH_DISASSOC_LEN]; |
| 2597 | 2614 | ||
| 2598 | ifmgd->flags &= ~(IEEE80211_STA_CONNECTION_POLL | | 2615 | ieee80211_stop_poll(sdata); |
| 2599 | IEEE80211_STA_BEACON_POLL); | ||
| 2600 | 2616 | ||
| 2601 | ieee80211_set_disassoc(sdata, IEEE80211_STYPE_DEAUTH, reason, | 2617 | ieee80211_set_disassoc(sdata, IEEE80211_STYPE_DEAUTH, reason, |
| 2602 | false, frame_buf); | 2618 | false, frame_buf); |
| @@ -2874,8 +2890,7 @@ static void ieee80211_restart_sta_timer(struct ieee80211_sub_if_data *sdata) | |||
| 2874 | u32 flags; | 2890 | u32 flags; |
| 2875 | 2891 | ||
| 2876 | if (sdata->vif.type == NL80211_IFTYPE_STATION) { | 2892 | if (sdata->vif.type == NL80211_IFTYPE_STATION) { |
| 2877 | sdata->u.mgd.flags &= ~(IEEE80211_STA_BEACON_POLL | | 2893 | __ieee80211_stop_poll(sdata); |
| 2878 | IEEE80211_STA_CONNECTION_POLL); | ||
| 2879 | 2894 | ||
| 2880 | /* let's probe the connection once */ | 2895 | /* let's probe the connection once */ |
| 2881 | flags = sdata->local->hw.flags; | 2896 | flags = sdata->local->hw.flags; |
| @@ -2944,7 +2959,10 @@ void ieee80211_sta_restart(struct ieee80211_sub_if_data *sdata) | |||
| 2944 | if (test_and_clear_bit(TMR_RUNNING_CHANSW, &ifmgd->timers_running)) | 2959 | if (test_and_clear_bit(TMR_RUNNING_CHANSW, &ifmgd->timers_running)) |
| 2945 | add_timer(&ifmgd->chswitch_timer); | 2960 | add_timer(&ifmgd->chswitch_timer); |
| 2946 | ieee80211_sta_reset_beacon_monitor(sdata); | 2961 | ieee80211_sta_reset_beacon_monitor(sdata); |
| 2962 | |||
| 2963 | mutex_lock(&sdata->local->mtx); | ||
| 2947 | ieee80211_restart_sta_timer(sdata); | 2964 | ieee80211_restart_sta_timer(sdata); |
| 2965 | mutex_unlock(&sdata->local->mtx); | ||
| 2948 | } | 2966 | } |
| 2949 | #endif | 2967 | #endif |
| 2950 | 2968 | ||
| @@ -3106,7 +3124,7 @@ static int ieee80211_prep_connection(struct ieee80211_sub_if_data *sdata, | |||
| 3106 | } | 3124 | } |
| 3107 | 3125 | ||
| 3108 | local->oper_channel = cbss->channel; | 3126 | local->oper_channel = cbss->channel; |
| 3109 | ieee80211_hw_config(local, 0); | 3127 | ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL); |
| 3110 | 3128 | ||
| 3111 | if (!have_sta) { | 3129 | if (!have_sta) { |
| 3112 | u32 rates = 0, basic_rates = 0; | 3130 | u32 rates = 0, basic_rates = 0; |
diff --git a/net/mac80211/offchannel.c b/net/mac80211/offchannel.c index f054e94901a2..935aa4b6deee 100644 --- a/net/mac80211/offchannel.c +++ b/net/mac80211/offchannel.c | |||
| @@ -234,6 +234,22 @@ static void ieee80211_hw_roc_done(struct work_struct *work) | |||
| 234 | return; | 234 | return; |
| 235 | } | 235 | } |
| 236 | 236 | ||
| 237 | /* was never transmitted */ | ||
| 238 | if (local->hw_roc_skb) { | ||
| 239 | u64 cookie; | ||
| 240 | |||
| 241 | cookie = local->hw_roc_cookie ^ 2; | ||
| 242 | |||
| 243 | cfg80211_mgmt_tx_status(local->hw_roc_dev, cookie, | ||
| 244 | local->hw_roc_skb->data, | ||
| 245 | local->hw_roc_skb->len, false, | ||
| 246 | GFP_KERNEL); | ||
| 247 | |||
| 248 | kfree_skb(local->hw_roc_skb); | ||
| 249 | local->hw_roc_skb = NULL; | ||
| 250 | local->hw_roc_skb_for_status = NULL; | ||
| 251 | } | ||
| 252 | |||
| 237 | if (!local->hw_roc_for_tx) | 253 | if (!local->hw_roc_for_tx) |
| 238 | cfg80211_remain_on_channel_expired(local->hw_roc_dev, | 254 | cfg80211_remain_on_channel_expired(local->hw_roc_dev, |
| 239 | local->hw_roc_cookie, | 255 | local->hw_roc_cookie, |
diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c index f5b1638fbf80..de455f8bbb91 100644 --- a/net/mac80211/sta_info.c +++ b/net/mac80211/sta_info.c | |||
| @@ -378,7 +378,7 @@ static int sta_info_insert_finish(struct sta_info *sta) __acquires(RCU) | |||
| 378 | /* make the station visible */ | 378 | /* make the station visible */ |
| 379 | sta_info_hash_add(local, sta); | 379 | sta_info_hash_add(local, sta); |
| 380 | 380 | ||
| 381 | list_add(&sta->list, &local->sta_list); | 381 | list_add_rcu(&sta->list, &local->sta_list); |
| 382 | 382 | ||
| 383 | set_sta_flag(sta, WLAN_STA_INSERTED); | 383 | set_sta_flag(sta, WLAN_STA_INSERTED); |
| 384 | 384 | ||
| @@ -688,7 +688,7 @@ int __must_check __sta_info_destroy(struct sta_info *sta) | |||
| 688 | if (ret) | 688 | if (ret) |
| 689 | return ret; | 689 | return ret; |
| 690 | 690 | ||
| 691 | list_del(&sta->list); | 691 | list_del_rcu(&sta->list); |
| 692 | 692 | ||
| 693 | mutex_lock(&local->key_mtx); | 693 | mutex_lock(&local->key_mtx); |
| 694 | for (i = 0; i < NUM_DEFAULT_KEYS; i++) | 694 | for (i = 0; i < NUM_DEFAULT_KEYS; i++) |
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c index 847215bb2a6f..e453212fa17f 100644 --- a/net/mac80211/tx.c +++ b/net/mac80211/tx.c | |||
| @@ -1737,7 +1737,7 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb, | |||
| 1737 | __le16 fc; | 1737 | __le16 fc; |
| 1738 | struct ieee80211_hdr hdr; | 1738 | struct ieee80211_hdr hdr; |
| 1739 | struct ieee80211s_hdr mesh_hdr __maybe_unused; | 1739 | struct ieee80211s_hdr mesh_hdr __maybe_unused; |
| 1740 | struct mesh_path __maybe_unused *mppath = NULL; | 1740 | struct mesh_path __maybe_unused *mppath = NULL, *mpath = NULL; |
| 1741 | const u8 *encaps_data; | 1741 | const u8 *encaps_data; |
| 1742 | int encaps_len, skip_header_bytes; | 1742 | int encaps_len, skip_header_bytes; |
| 1743 | int nh_pos, h_pos; | 1743 | int nh_pos, h_pos; |
| @@ -1803,8 +1803,11 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb, | |||
| 1803 | goto fail; | 1803 | goto fail; |
| 1804 | } | 1804 | } |
| 1805 | rcu_read_lock(); | 1805 | rcu_read_lock(); |
| 1806 | if (!is_multicast_ether_addr(skb->data)) | 1806 | if (!is_multicast_ether_addr(skb->data)) { |
| 1807 | mppath = mpp_path_lookup(skb->data, sdata); | 1807 | mpath = mesh_path_lookup(skb->data, sdata); |
| 1808 | if (!mpath) | ||
| 1809 | mppath = mpp_path_lookup(skb->data, sdata); | ||
| 1810 | } | ||
| 1808 | 1811 | ||
| 1809 | /* | 1812 | /* |
| 1810 | * Use address extension if it is a packet from | 1813 | * Use address extension if it is a packet from |
diff --git a/net/mac80211/util.c b/net/mac80211/util.c index a44c6807df01..8dd4712620ff 100644 --- a/net/mac80211/util.c +++ b/net/mac80211/util.c | |||
| @@ -1271,7 +1271,7 @@ int ieee80211_reconfig(struct ieee80211_local *local) | |||
| 1271 | enum ieee80211_sta_state state; | 1271 | enum ieee80211_sta_state state; |
| 1272 | 1272 | ||
| 1273 | for (state = IEEE80211_STA_NOTEXIST; | 1273 | for (state = IEEE80211_STA_NOTEXIST; |
| 1274 | state < sta->sta_state - 1; state++) | 1274 | state < sta->sta_state; state++) |
| 1275 | WARN_ON(drv_sta_state(local, sta->sdata, sta, | 1275 | WARN_ON(drv_sta_state(local, sta->sdata, sta, |
| 1276 | state, state + 1)); | 1276 | state, state + 1)); |
| 1277 | } | 1277 | } |
diff --git a/net/netfilter/nf_conntrack_h323_main.c b/net/netfilter/nf_conntrack_h323_main.c index 46d69d7f1bb4..31f50bc3a312 100644 --- a/net/netfilter/nf_conntrack_h323_main.c +++ b/net/netfilter/nf_conntrack_h323_main.c | |||
| @@ -270,9 +270,8 @@ static int expect_rtp_rtcp(struct sk_buff *skb, struct nf_conn *ct, | |||
| 270 | return 0; | 270 | return 0; |
| 271 | 271 | ||
| 272 | /* RTP port is even */ | 272 | /* RTP port is even */ |
| 273 | port &= htons(~1); | 273 | rtp_port = port & ~htons(1); |
| 274 | rtp_port = port; | 274 | rtcp_port = port | htons(1); |
| 275 | rtcp_port = htons(ntohs(port) + 1); | ||
| 276 | 275 | ||
| 277 | /* Create expect for RTP */ | 276 | /* Create expect for RTP */ |
| 278 | if ((rtp_exp = nf_ct_expect_alloc(ct)) == NULL) | 277 | if ((rtp_exp = nf_ct_expect_alloc(ct)) == NULL) |
diff --git a/net/netfilter/xt_HMARK.c b/net/netfilter/xt_HMARK.c index 0a96a43108ed..1686ca1b53a1 100644 --- a/net/netfilter/xt_HMARK.c +++ b/net/netfilter/xt_HMARK.c | |||
| @@ -32,13 +32,13 @@ MODULE_ALIAS("ipt_HMARK"); | |||
| 32 | MODULE_ALIAS("ip6t_HMARK"); | 32 | MODULE_ALIAS("ip6t_HMARK"); |
| 33 | 33 | ||
| 34 | struct hmark_tuple { | 34 | struct hmark_tuple { |
| 35 | u32 src; | 35 | __be32 src; |
| 36 | u32 dst; | 36 | __be32 dst; |
| 37 | union hmark_ports uports; | 37 | union hmark_ports uports; |
| 38 | uint8_t proto; | 38 | u8 proto; |
| 39 | }; | 39 | }; |
| 40 | 40 | ||
| 41 | static inline u32 hmark_addr6_mask(const __u32 *addr32, const __u32 *mask) | 41 | static inline __be32 hmark_addr6_mask(const __be32 *addr32, const __be32 *mask) |
| 42 | { | 42 | { |
| 43 | return (addr32[0] & mask[0]) ^ | 43 | return (addr32[0] & mask[0]) ^ |
| 44 | (addr32[1] & mask[1]) ^ | 44 | (addr32[1] & mask[1]) ^ |
| @@ -46,8 +46,8 @@ static inline u32 hmark_addr6_mask(const __u32 *addr32, const __u32 *mask) | |||
| 46 | (addr32[3] & mask[3]); | 46 | (addr32[3] & mask[3]); |
| 47 | } | 47 | } |
| 48 | 48 | ||
| 49 | static inline u32 | 49 | static inline __be32 |
| 50 | hmark_addr_mask(int l3num, const __u32 *addr32, const __u32 *mask) | 50 | hmark_addr_mask(int l3num, const __be32 *addr32, const __be32 *mask) |
| 51 | { | 51 | { |
| 52 | switch (l3num) { | 52 | switch (l3num) { |
| 53 | case AF_INET: | 53 | case AF_INET: |
| @@ -58,6 +58,22 @@ hmark_addr_mask(int l3num, const __u32 *addr32, const __u32 *mask) | |||
| 58 | return 0; | 58 | return 0; |
| 59 | } | 59 | } |
| 60 | 60 | ||
| 61 | static inline void hmark_swap_ports(union hmark_ports *uports, | ||
| 62 | const struct xt_hmark_info *info) | ||
| 63 | { | ||
| 64 | union hmark_ports hp; | ||
| 65 | u16 src, dst; | ||
| 66 | |||
| 67 | hp.b32 = (uports->b32 & info->port_mask.b32) | info->port_set.b32; | ||
| 68 | src = ntohs(hp.b16.src); | ||
| 69 | dst = ntohs(hp.b16.dst); | ||
| 70 | |||
| 71 | if (dst > src) | ||
| 72 | uports->v32 = (dst << 16) | src; | ||
| 73 | else | ||
| 74 | uports->v32 = (src << 16) | dst; | ||
| 75 | } | ||
| 76 | |||
| 61 | static int | 77 | static int |
| 62 | hmark_ct_set_htuple(const struct sk_buff *skb, struct hmark_tuple *t, | 78 | hmark_ct_set_htuple(const struct sk_buff *skb, struct hmark_tuple *t, |
| 63 | const struct xt_hmark_info *info) | 79 | const struct xt_hmark_info *info) |
| @@ -74,22 +90,19 @@ hmark_ct_set_htuple(const struct sk_buff *skb, struct hmark_tuple *t, | |||
| 74 | otuple = &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple; | 90 | otuple = &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple; |
| 75 | rtuple = &ct->tuplehash[IP_CT_DIR_REPLY].tuple; | 91 | rtuple = &ct->tuplehash[IP_CT_DIR_REPLY].tuple; |
| 76 | 92 | ||
| 77 | t->src = hmark_addr_mask(otuple->src.l3num, otuple->src.u3.all, | 93 | t->src = hmark_addr_mask(otuple->src.l3num, otuple->src.u3.ip6, |
| 78 | info->src_mask.all); | 94 | info->src_mask.ip6); |
| 79 | t->dst = hmark_addr_mask(otuple->src.l3num, rtuple->src.u3.all, | 95 | t->dst = hmark_addr_mask(otuple->src.l3num, rtuple->src.u3.ip6, |
| 80 | info->dst_mask.all); | 96 | info->dst_mask.ip6); |
| 81 | 97 | ||
| 82 | if (info->flags & XT_HMARK_FLAG(XT_HMARK_METHOD_L3)) | 98 | if (info->flags & XT_HMARK_FLAG(XT_HMARK_METHOD_L3)) |
| 83 | return 0; | 99 | return 0; |
| 84 | 100 | ||
| 85 | t->proto = nf_ct_protonum(ct); | 101 | t->proto = nf_ct_protonum(ct); |
| 86 | if (t->proto != IPPROTO_ICMP) { | 102 | if (t->proto != IPPROTO_ICMP) { |
| 87 | t->uports.p16.src = otuple->src.u.all; | 103 | t->uports.b16.src = otuple->src.u.all; |
| 88 | t->uports.p16.dst = rtuple->src.u.all; | 104 | t->uports.b16.dst = rtuple->src.u.all; |
| 89 | t->uports.v32 = (t->uports.v32 & info->port_mask.v32) | | 105 | hmark_swap_ports(&t->uports, info); |
| 90 | info->port_set.v32; | ||
| 91 | if (t->uports.p16.dst < t->uports.p16.src) | ||
| 92 | swap(t->uports.p16.dst, t->uports.p16.src); | ||
| 93 | } | 106 | } |
| 94 | 107 | ||
| 95 | return 0; | 108 | return 0; |
| @@ -98,15 +111,19 @@ hmark_ct_set_htuple(const struct sk_buff *skb, struct hmark_tuple *t, | |||
| 98 | #endif | 111 | #endif |
| 99 | } | 112 | } |
| 100 | 113 | ||
| 114 | /* This hash function is endian independent, to ensure consistent hashing if | ||
| 115 | * the cluster is composed of big and little endian systems. */ | ||
| 101 | static inline u32 | 116 | static inline u32 |
| 102 | hmark_hash(struct hmark_tuple *t, const struct xt_hmark_info *info) | 117 | hmark_hash(struct hmark_tuple *t, const struct xt_hmark_info *info) |
| 103 | { | 118 | { |
| 104 | u32 hash; | 119 | u32 hash; |
| 120 | u32 src = ntohl(t->src); | ||
| 121 | u32 dst = ntohl(t->dst); | ||
| 105 | 122 | ||
| 106 | if (t->dst < t->src) | 123 | if (dst < src) |
| 107 | swap(t->src, t->dst); | 124 | swap(src, dst); |
| 108 | 125 | ||
| 109 | hash = jhash_3words(t->src, t->dst, t->uports.v32, info->hashrnd); | 126 | hash = jhash_3words(src, dst, t->uports.v32, info->hashrnd); |
| 110 | hash = hash ^ (t->proto & info->proto_mask); | 127 | hash = hash ^ (t->proto & info->proto_mask); |
| 111 | 128 | ||
| 112 | return (((u64)hash * info->hmodulus) >> 32) + info->hoffset; | 129 | return (((u64)hash * info->hmodulus) >> 32) + info->hoffset; |
| @@ -126,11 +143,7 @@ hmark_set_tuple_ports(const struct sk_buff *skb, unsigned int nhoff, | |||
| 126 | if (skb_copy_bits(skb, nhoff, &t->uports, sizeof(t->uports)) < 0) | 143 | if (skb_copy_bits(skb, nhoff, &t->uports, sizeof(t->uports)) < 0) |
| 127 | return; | 144 | return; |
| 128 | 145 | ||
| 129 | t->uports.v32 = (t->uports.v32 & info->port_mask.v32) | | 146 | hmark_swap_ports(&t->uports, info); |
| 130 | info->port_set.v32; | ||
| 131 | |||
| 132 | if (t->uports.p16.dst < t->uports.p16.src) | ||
| 133 | swap(t->uports.p16.dst, t->uports.p16.src); | ||
| 134 | } | 147 | } |
| 135 | 148 | ||
| 136 | #if IS_ENABLED(CONFIG_IP6_NF_IPTABLES) | 149 | #if IS_ENABLED(CONFIG_IP6_NF_IPTABLES) |
| @@ -178,8 +191,8 @@ hmark_pkt_set_htuple_ipv6(const struct sk_buff *skb, struct hmark_tuple *t, | |||
| 178 | return -1; | 191 | return -1; |
| 179 | } | 192 | } |
| 180 | noicmp: | 193 | noicmp: |
| 181 | t->src = hmark_addr6_mask(ip6->saddr.s6_addr32, info->src_mask.all); | 194 | t->src = hmark_addr6_mask(ip6->saddr.s6_addr32, info->src_mask.ip6); |
| 182 | t->dst = hmark_addr6_mask(ip6->daddr.s6_addr32, info->dst_mask.all); | 195 | t->dst = hmark_addr6_mask(ip6->daddr.s6_addr32, info->dst_mask.ip6); |
| 183 | 196 | ||
| 184 | if (info->flags & XT_HMARK_FLAG(XT_HMARK_METHOD_L3)) | 197 | if (info->flags & XT_HMARK_FLAG(XT_HMARK_METHOD_L3)) |
| 185 | return 0; | 198 | return 0; |
| @@ -255,11 +268,8 @@ hmark_pkt_set_htuple_ipv4(const struct sk_buff *skb, struct hmark_tuple *t, | |||
| 255 | } | 268 | } |
| 256 | } | 269 | } |
| 257 | 270 | ||
| 258 | t->src = (__force u32) ip->saddr; | 271 | t->src = ip->saddr & info->src_mask.ip; |
| 259 | t->dst = (__force u32) ip->daddr; | 272 | t->dst = ip->daddr & info->dst_mask.ip; |
| 260 | |||
| 261 | t->src &= info->src_mask.ip; | ||
| 262 | t->dst &= info->dst_mask.ip; | ||
| 263 | 273 | ||
| 264 | if (info->flags & XT_HMARK_FLAG(XT_HMARK_METHOD_L3)) | 274 | if (info->flags & XT_HMARK_FLAG(XT_HMARK_METHOD_L3)) |
| 265 | return 0; | 275 | return 0; |
diff --git a/net/nfc/llcp/sock.c b/net/nfc/llcp/sock.c index 3f339b19d140..17a707db40eb 100644 --- a/net/nfc/llcp/sock.c +++ b/net/nfc/llcp/sock.c | |||
| @@ -292,6 +292,9 @@ static int llcp_sock_getname(struct socket *sock, struct sockaddr *addr, | |||
| 292 | 292 | ||
| 293 | pr_debug("%p\n", sk); | 293 | pr_debug("%p\n", sk); |
| 294 | 294 | ||
| 295 | if (llcp_sock == NULL) | ||
| 296 | return -EBADFD; | ||
| 297 | |||
| 295 | addr->sa_family = AF_NFC; | 298 | addr->sa_family = AF_NFC; |
| 296 | *len = sizeof(struct sockaddr_nfc_llcp); | 299 | *len = sizeof(struct sockaddr_nfc_llcp); |
| 297 | 300 | ||
diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c index 04040476082e..21fde99e5c56 100644 --- a/net/sunrpc/rpc_pipe.c +++ b/net/sunrpc/rpc_pipe.c | |||
| @@ -71,7 +71,9 @@ static void rpc_purge_list(wait_queue_head_t *waitq, struct list_head *head, | |||
| 71 | msg->errno = err; | 71 | msg->errno = err; |
| 72 | destroy_msg(msg); | 72 | destroy_msg(msg); |
| 73 | } while (!list_empty(head)); | 73 | } while (!list_empty(head)); |
| 74 | wake_up(waitq); | 74 | |
| 75 | if (waitq) | ||
| 76 | wake_up(waitq); | ||
| 75 | } | 77 | } |
| 76 | 78 | ||
| 77 | static void | 79 | static void |
| @@ -91,11 +93,9 @@ rpc_timeout_upcall_queue(struct work_struct *work) | |||
| 91 | } | 93 | } |
| 92 | dentry = dget(pipe->dentry); | 94 | dentry = dget(pipe->dentry); |
| 93 | spin_unlock(&pipe->lock); | 95 | spin_unlock(&pipe->lock); |
| 94 | if (dentry) { | 96 | rpc_purge_list(dentry ? &RPC_I(dentry->d_inode)->waitq : NULL, |
| 95 | rpc_purge_list(&RPC_I(dentry->d_inode)->waitq, | 97 | &free_list, destroy_msg, -ETIMEDOUT); |
| 96 | &free_list, destroy_msg, -ETIMEDOUT); | 98 | dput(dentry); |
| 97 | dput(dentry); | ||
| 98 | } | ||
| 99 | } | 99 | } |
| 100 | 100 | ||
| 101 | ssize_t rpc_pipe_generic_upcall(struct file *filp, struct rpc_pipe_msg *msg, | 101 | ssize_t rpc_pipe_generic_upcall(struct file *filp, struct rpc_pipe_msg *msg, |
diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c index 7e9baaa1e543..3ee7461926d8 100644 --- a/net/sunrpc/svc.c +++ b/net/sunrpc/svc.c | |||
| @@ -1374,7 +1374,8 @@ bc_svc_process(struct svc_serv *serv, struct rpc_rqst *req, | |||
| 1374 | sizeof(req->rq_snd_buf)); | 1374 | sizeof(req->rq_snd_buf)); |
| 1375 | return bc_send(req); | 1375 | return bc_send(req); |
| 1376 | } else { | 1376 | } else { |
| 1377 | /* Nothing to do to drop request */ | 1377 | /* drop request */ |
| 1378 | xprt_free_bc_request(req); | ||
| 1378 | return 0; | 1379 | return 0; |
| 1379 | } | 1380 | } |
| 1380 | } | 1381 | } |
diff --git a/net/wireless/ibss.c b/net/wireless/ibss.c index d2a19b0ff71f..89baa3328411 100644 --- a/net/wireless/ibss.c +++ b/net/wireless/ibss.c | |||
| @@ -42,6 +42,7 @@ void __cfg80211_ibss_joined(struct net_device *dev, const u8 *bssid) | |||
| 42 | cfg80211_hold_bss(bss_from_pub(bss)); | 42 | cfg80211_hold_bss(bss_from_pub(bss)); |
| 43 | wdev->current_bss = bss_from_pub(bss); | 43 | wdev->current_bss = bss_from_pub(bss); |
| 44 | 44 | ||
| 45 | wdev->sme_state = CFG80211_SME_CONNECTED; | ||
| 45 | cfg80211_upload_connect_keys(wdev); | 46 | cfg80211_upload_connect_keys(wdev); |
| 46 | 47 | ||
| 47 | nl80211_send_ibss_bssid(wiphy_to_dev(wdev->wiphy), dev, bssid, | 48 | nl80211_send_ibss_bssid(wiphy_to_dev(wdev->wiphy), dev, bssid, |
| @@ -60,7 +61,7 @@ void cfg80211_ibss_joined(struct net_device *dev, const u8 *bssid, gfp_t gfp) | |||
| 60 | struct cfg80211_event *ev; | 61 | struct cfg80211_event *ev; |
| 61 | unsigned long flags; | 62 | unsigned long flags; |
| 62 | 63 | ||
| 63 | CFG80211_DEV_WARN_ON(!wdev->ssid_len); | 64 | CFG80211_DEV_WARN_ON(wdev->sme_state != CFG80211_SME_CONNECTING); |
| 64 | 65 | ||
| 65 | ev = kzalloc(sizeof(*ev), gfp); | 66 | ev = kzalloc(sizeof(*ev), gfp); |
| 66 | if (!ev) | 67 | if (!ev) |
| @@ -115,9 +116,11 @@ int __cfg80211_join_ibss(struct cfg80211_registered_device *rdev, | |||
| 115 | #ifdef CONFIG_CFG80211_WEXT | 116 | #ifdef CONFIG_CFG80211_WEXT |
| 116 | wdev->wext.ibss.channel = params->channel; | 117 | wdev->wext.ibss.channel = params->channel; |
| 117 | #endif | 118 | #endif |
| 119 | wdev->sme_state = CFG80211_SME_CONNECTING; | ||
| 118 | err = rdev->ops->join_ibss(&rdev->wiphy, dev, params); | 120 | err = rdev->ops->join_ibss(&rdev->wiphy, dev, params); |
| 119 | if (err) { | 121 | if (err) { |
| 120 | wdev->connect_keys = NULL; | 122 | wdev->connect_keys = NULL; |
| 123 | wdev->sme_state = CFG80211_SME_IDLE; | ||
| 121 | return err; | 124 | return err; |
| 122 | } | 125 | } |
| 123 | 126 | ||
| @@ -169,6 +172,7 @@ static void __cfg80211_clear_ibss(struct net_device *dev, bool nowext) | |||
| 169 | } | 172 | } |
| 170 | 173 | ||
| 171 | wdev->current_bss = NULL; | 174 | wdev->current_bss = NULL; |
| 175 | wdev->sme_state = CFG80211_SME_IDLE; | ||
| 172 | wdev->ssid_len = 0; | 176 | wdev->ssid_len = 0; |
| 173 | #ifdef CONFIG_CFG80211_WEXT | 177 | #ifdef CONFIG_CFG80211_WEXT |
| 174 | if (!nowext) | 178 | if (!nowext) |
diff --git a/net/wireless/util.c b/net/wireless/util.c index 55d99466babb..8f2d68fc3a44 100644 --- a/net/wireless/util.c +++ b/net/wireless/util.c | |||
| @@ -935,6 +935,7 @@ int cfg80211_can_change_interface(struct cfg80211_registered_device *rdev, | |||
| 935 | enum nl80211_iftype iftype) | 935 | enum nl80211_iftype iftype) |
| 936 | { | 936 | { |
| 937 | struct wireless_dev *wdev_iter; | 937 | struct wireless_dev *wdev_iter; |
| 938 | u32 used_iftypes = BIT(iftype); | ||
| 938 | int num[NUM_NL80211_IFTYPES]; | 939 | int num[NUM_NL80211_IFTYPES]; |
| 939 | int total = 1; | 940 | int total = 1; |
| 940 | int i, j; | 941 | int i, j; |
| @@ -961,6 +962,7 @@ int cfg80211_can_change_interface(struct cfg80211_registered_device *rdev, | |||
| 961 | 962 | ||
| 962 | num[wdev_iter->iftype]++; | 963 | num[wdev_iter->iftype]++; |
| 963 | total++; | 964 | total++; |
| 965 | used_iftypes |= BIT(wdev_iter->iftype); | ||
| 964 | } | 966 | } |
| 965 | mutex_unlock(&rdev->devlist_mtx); | 967 | mutex_unlock(&rdev->devlist_mtx); |
| 966 | 968 | ||
| @@ -970,6 +972,7 @@ int cfg80211_can_change_interface(struct cfg80211_registered_device *rdev, | |||
| 970 | for (i = 0; i < rdev->wiphy.n_iface_combinations; i++) { | 972 | for (i = 0; i < rdev->wiphy.n_iface_combinations; i++) { |
| 971 | const struct ieee80211_iface_combination *c; | 973 | const struct ieee80211_iface_combination *c; |
| 972 | struct ieee80211_iface_limit *limits; | 974 | struct ieee80211_iface_limit *limits; |
| 975 | u32 all_iftypes = 0; | ||
| 973 | 976 | ||
| 974 | c = &rdev->wiphy.iface_combinations[i]; | 977 | c = &rdev->wiphy.iface_combinations[i]; |
| 975 | 978 | ||
| @@ -984,6 +987,7 @@ int cfg80211_can_change_interface(struct cfg80211_registered_device *rdev, | |||
| 984 | if (rdev->wiphy.software_iftypes & BIT(iftype)) | 987 | if (rdev->wiphy.software_iftypes & BIT(iftype)) |
| 985 | continue; | 988 | continue; |
| 986 | for (j = 0; j < c->n_limits; j++) { | 989 | for (j = 0; j < c->n_limits; j++) { |
| 990 | all_iftypes |= limits[j].types; | ||
| 987 | if (!(limits[j].types & BIT(iftype))) | 991 | if (!(limits[j].types & BIT(iftype))) |
| 988 | continue; | 992 | continue; |
| 989 | if (limits[j].max < num[iftype]) | 993 | if (limits[j].max < num[iftype]) |
| @@ -991,7 +995,20 @@ int cfg80211_can_change_interface(struct cfg80211_registered_device *rdev, | |||
| 991 | limits[j].max -= num[iftype]; | 995 | limits[j].max -= num[iftype]; |
| 992 | } | 996 | } |
| 993 | } | 997 | } |
| 994 | /* yay, it fits */ | 998 | |
| 999 | /* | ||
| 1000 | * Finally check that all iftypes that we're currently | ||
| 1001 | * using are actually part of this combination. If they | ||
| 1002 | * aren't then we can't use this combination and have | ||
| 1003 | * to continue to the next. | ||
| 1004 | */ | ||
| 1005 | if ((all_iftypes & used_iftypes) != used_iftypes) | ||
| 1006 | goto cont; | ||
| 1007 | |||
| 1008 | /* | ||
| 1009 | * This combination covered all interface types and | ||
| 1010 | * supported the requested numbers, so we're good. | ||
| 1011 | */ | ||
| 995 | kfree(limits); | 1012 | kfree(limits); |
| 996 | return 0; | 1013 | return 0; |
| 997 | cont: | 1014 | cont: |
diff --git a/sound/core/compress_offload.c b/sound/core/compress_offload.c index a68aed7fce02..ec2118d0e27a 100644 --- a/sound/core/compress_offload.c +++ b/sound/core/compress_offload.c | |||
| @@ -502,10 +502,8 @@ static int snd_compr_pause(struct snd_compr_stream *stream) | |||
| 502 | if (stream->runtime->state != SNDRV_PCM_STATE_RUNNING) | 502 | if (stream->runtime->state != SNDRV_PCM_STATE_RUNNING) |
| 503 | return -EPERM; | 503 | return -EPERM; |
| 504 | retval = stream->ops->trigger(stream, SNDRV_PCM_TRIGGER_PAUSE_PUSH); | 504 | retval = stream->ops->trigger(stream, SNDRV_PCM_TRIGGER_PAUSE_PUSH); |
| 505 | if (!retval) { | 505 | if (!retval) |
| 506 | stream->runtime->state = SNDRV_PCM_STATE_PAUSED; | 506 | stream->runtime->state = SNDRV_PCM_STATE_PAUSED; |
| 507 | wake_up(&stream->runtime->sleep); | ||
| 508 | } | ||
| 509 | return retval; | 507 | return retval; |
| 510 | } | 508 | } |
| 511 | 509 | ||
| @@ -544,6 +542,10 @@ static int snd_compr_stop(struct snd_compr_stream *stream) | |||
| 544 | if (!retval) { | 542 | if (!retval) { |
| 545 | stream->runtime->state = SNDRV_PCM_STATE_SETUP; | 543 | stream->runtime->state = SNDRV_PCM_STATE_SETUP; |
| 546 | wake_up(&stream->runtime->sleep); | 544 | wake_up(&stream->runtime->sleep); |
| 545 | stream->runtime->hw_pointer = 0; | ||
| 546 | stream->runtime->app_pointer = 0; | ||
| 547 | stream->runtime->total_bytes_available = 0; | ||
| 548 | stream->runtime->total_bytes_transferred = 0; | ||
| 547 | } | 549 | } |
| 548 | return retval; | 550 | return retval; |
| 549 | } | 551 | } |
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c index 2b6392be451c..02763827dde0 100644 --- a/sound/pci/hda/hda_intel.c +++ b/sound/pci/hda/hda_intel.c | |||
| @@ -2484,9 +2484,9 @@ static void azx_notifier_unregister(struct azx *chip) | |||
| 2484 | static int DELAYED_INIT_MARK azx_first_init(struct azx *chip); | 2484 | static int DELAYED_INIT_MARK azx_first_init(struct azx *chip); |
| 2485 | static int DELAYED_INIT_MARK azx_probe_continue(struct azx *chip); | 2485 | static int DELAYED_INIT_MARK azx_probe_continue(struct azx *chip); |
| 2486 | 2486 | ||
| 2487 | #ifdef SUPPORT_VGA_SWITCHEROO | ||
| 2487 | static struct pci_dev __devinit *get_bound_vga(struct pci_dev *pci); | 2488 | static struct pci_dev __devinit *get_bound_vga(struct pci_dev *pci); |
| 2488 | 2489 | ||
| 2489 | #ifdef SUPPORT_VGA_SWITCHEROO | ||
| 2490 | static void azx_vs_set_state(struct pci_dev *pci, | 2490 | static void azx_vs_set_state(struct pci_dev *pci, |
| 2491 | enum vga_switcheroo_state state) | 2491 | enum vga_switcheroo_state state) |
| 2492 | { | 2492 | { |
| @@ -2578,6 +2578,7 @@ static int __devinit register_vga_switcheroo(struct azx *chip) | |||
| 2578 | #else | 2578 | #else |
| 2579 | #define init_vga_switcheroo(chip) /* NOP */ | 2579 | #define init_vga_switcheroo(chip) /* NOP */ |
| 2580 | #define register_vga_switcheroo(chip) 0 | 2580 | #define register_vga_switcheroo(chip) 0 |
| 2581 | #define check_hdmi_disabled(pci) false | ||
| 2581 | #endif /* SUPPORT_VGA_SWITCHER */ | 2582 | #endif /* SUPPORT_VGA_SWITCHER */ |
| 2582 | 2583 | ||
| 2583 | /* | 2584 | /* |
| @@ -2638,6 +2639,7 @@ static int azx_dev_free(struct snd_device *device) | |||
| 2638 | return azx_free(device->device_data); | 2639 | return azx_free(device->device_data); |
| 2639 | } | 2640 | } |
| 2640 | 2641 | ||
| 2642 | #ifdef SUPPORT_VGA_SWITCHEROO | ||
| 2641 | /* | 2643 | /* |
| 2642 | * Check of disabled HDMI controller by vga-switcheroo | 2644 | * Check of disabled HDMI controller by vga-switcheroo |
| 2643 | */ | 2645 | */ |
| @@ -2670,12 +2672,13 @@ static bool __devinit check_hdmi_disabled(struct pci_dev *pci) | |||
| 2670 | struct pci_dev *p = get_bound_vga(pci); | 2672 | struct pci_dev *p = get_bound_vga(pci); |
| 2671 | 2673 | ||
| 2672 | if (p) { | 2674 | if (p) { |
| 2673 | if (vga_default_device() && p != vga_default_device()) | 2675 | if (vga_switcheroo_get_client_state(p) == VGA_SWITCHEROO_OFF) |
| 2674 | vga_inactive = true; | 2676 | vga_inactive = true; |
| 2675 | pci_dev_put(p); | 2677 | pci_dev_put(p); |
| 2676 | } | 2678 | } |
| 2677 | return vga_inactive; | 2679 | return vga_inactive; |
| 2678 | } | 2680 | } |
| 2681 | #endif /* SUPPORT_VGA_SWITCHEROO */ | ||
| 2679 | 2682 | ||
| 2680 | /* | 2683 | /* |
| 2681 | * white/black-listing for position_fix | 2684 | * white/black-listing for position_fix |
| @@ -3351,6 +3354,11 @@ static DEFINE_PCI_DEVICE_TABLE(azx_ids) = { | |||
| 3351 | { PCI_DEVICE(0x6549, 0x1200), | 3354 | { PCI_DEVICE(0x6549, 0x1200), |
| 3352 | .driver_data = AZX_DRIVER_TERA | AZX_DCAPS_NO_64BIT }, | 3355 | .driver_data = AZX_DRIVER_TERA | AZX_DCAPS_NO_64BIT }, |
| 3353 | /* Creative X-Fi (CA0110-IBG) */ | 3356 | /* Creative X-Fi (CA0110-IBG) */ |
| 3357 | /* CTHDA chips */ | ||
| 3358 | { PCI_DEVICE(0x1102, 0x0010), | ||
| 3359 | .driver_data = AZX_DRIVER_CTHDA | AZX_DCAPS_PRESET_CTHDA }, | ||
| 3360 | { PCI_DEVICE(0x1102, 0x0012), | ||
| 3361 | .driver_data = AZX_DRIVER_CTHDA | AZX_DCAPS_PRESET_CTHDA }, | ||
| 3354 | #if !defined(CONFIG_SND_CTXFI) && !defined(CONFIG_SND_CTXFI_MODULE) | 3362 | #if !defined(CONFIG_SND_CTXFI) && !defined(CONFIG_SND_CTXFI_MODULE) |
| 3355 | /* the following entry conflicts with snd-ctxfi driver, | 3363 | /* the following entry conflicts with snd-ctxfi driver, |
| 3356 | * as ctxfi driver mutates from HD-audio to native mode with | 3364 | * as ctxfi driver mutates from HD-audio to native mode with |
| @@ -3367,11 +3375,6 @@ static DEFINE_PCI_DEVICE_TABLE(azx_ids) = { | |||
| 3367 | .driver_data = AZX_DRIVER_CTX | AZX_DCAPS_CTX_WORKAROUND | | 3375 | .driver_data = AZX_DRIVER_CTX | AZX_DCAPS_CTX_WORKAROUND | |
| 3368 | AZX_DCAPS_RIRB_PRE_DELAY | AZX_DCAPS_POSFIX_LPIB }, | 3376 | AZX_DCAPS_RIRB_PRE_DELAY | AZX_DCAPS_POSFIX_LPIB }, |
| 3369 | #endif | 3377 | #endif |
| 3370 | /* CTHDA chips */ | ||
| 3371 | { PCI_DEVICE(0x1102, 0x0010), | ||
| 3372 | .driver_data = AZX_DRIVER_CTHDA | AZX_DCAPS_PRESET_CTHDA }, | ||
| 3373 | { PCI_DEVICE(0x1102, 0x0012), | ||
| 3374 | .driver_data = AZX_DRIVER_CTHDA | AZX_DCAPS_PRESET_CTHDA }, | ||
| 3375 | /* Vortex86MX */ | 3378 | /* Vortex86MX */ |
| 3376 | { PCI_DEVICE(0x17f3, 0x3010), .driver_data = AZX_DRIVER_GENERIC }, | 3379 | { PCI_DEVICE(0x17f3, 0x3010), .driver_data = AZX_DRIVER_GENERIC }, |
| 3377 | /* VMware HDAudio */ | 3380 | /* VMware HDAudio */ |
diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c index 3acb5824ad39..172370b3793b 100644 --- a/sound/pci/hda/patch_conexant.c +++ b/sound/pci/hda/patch_conexant.c | |||
| @@ -4061,7 +4061,7 @@ static void cx_auto_init_digital(struct hda_codec *codec) | |||
| 4061 | static int cx_auto_init(struct hda_codec *codec) | 4061 | static int cx_auto_init(struct hda_codec *codec) |
| 4062 | { | 4062 | { |
| 4063 | struct conexant_spec *spec = codec->spec; | 4063 | struct conexant_spec *spec = codec->spec; |
| 4064 | /*snd_hda_sequence_write(codec, cx_auto_init_verbs);*/ | 4064 | snd_hda_gen_apply_verbs(codec); |
| 4065 | cx_auto_init_output(codec); | 4065 | cx_auto_init_output(codec); |
| 4066 | cx_auto_init_input(codec); | 4066 | cx_auto_init_input(codec); |
| 4067 | cx_auto_init_digital(codec); | 4067 | cx_auto_init_digital(codec); |
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index 224410e8e9e7..f8f4906e498d 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c | |||
| @@ -1896,6 +1896,7 @@ static int alc_init(struct hda_codec *codec) | |||
| 1896 | alc_fix_pll(codec); | 1896 | alc_fix_pll(codec); |
| 1897 | alc_auto_init_amp(codec, spec->init_amp); | 1897 | alc_auto_init_amp(codec, spec->init_amp); |
| 1898 | 1898 | ||
| 1899 | snd_hda_gen_apply_verbs(codec); | ||
| 1899 | alc_init_special_input_src(codec); | 1900 | alc_init_special_input_src(codec); |
| 1900 | alc_auto_init_std(codec); | 1901 | alc_auto_init_std(codec); |
| 1901 | 1902 | ||
| @@ -6439,6 +6440,7 @@ enum { | |||
| 6439 | ALC662_FIXUP_ASUS_MODE7, | 6440 | ALC662_FIXUP_ASUS_MODE7, |
| 6440 | ALC662_FIXUP_ASUS_MODE8, | 6441 | ALC662_FIXUP_ASUS_MODE8, |
| 6441 | ALC662_FIXUP_NO_JACK_DETECT, | 6442 | ALC662_FIXUP_NO_JACK_DETECT, |
| 6443 | ALC662_FIXUP_ZOTAC_Z68, | ||
| 6442 | }; | 6444 | }; |
| 6443 | 6445 | ||
| 6444 | static const struct alc_fixup alc662_fixups[] = { | 6446 | static const struct alc_fixup alc662_fixups[] = { |
| @@ -6588,6 +6590,13 @@ static const struct alc_fixup alc662_fixups[] = { | |||
| 6588 | .type = ALC_FIXUP_FUNC, | 6590 | .type = ALC_FIXUP_FUNC, |
| 6589 | .v.func = alc_fixup_no_jack_detect, | 6591 | .v.func = alc_fixup_no_jack_detect, |
| 6590 | }, | 6592 | }, |
| 6593 | [ALC662_FIXUP_ZOTAC_Z68] = { | ||
| 6594 | .type = ALC_FIXUP_PINS, | ||
| 6595 | .v.pins = (const struct alc_pincfg[]) { | ||
| 6596 | { 0x1b, 0x02214020 }, /* Front HP */ | ||
| 6597 | { } | ||
| 6598 | } | ||
| 6599 | }, | ||
| 6591 | }; | 6600 | }; |
| 6592 | 6601 | ||
| 6593 | static const struct snd_pci_quirk alc662_fixup_tbl[] = { | 6602 | static const struct snd_pci_quirk alc662_fixup_tbl[] = { |
| @@ -6601,6 +6610,7 @@ static const struct snd_pci_quirk alc662_fixup_tbl[] = { | |||
| 6601 | SND_PCI_QUIRK(0x144d, 0xc051, "Samsung R720", ALC662_FIXUP_IDEAPAD), | 6610 | SND_PCI_QUIRK(0x144d, 0xc051, "Samsung R720", ALC662_FIXUP_IDEAPAD), |
| 6602 | SND_PCI_QUIRK(0x17aa, 0x38af, "Lenovo Ideapad Y550P", ALC662_FIXUP_IDEAPAD), | 6611 | SND_PCI_QUIRK(0x17aa, 0x38af, "Lenovo Ideapad Y550P", ALC662_FIXUP_IDEAPAD), |
| 6603 | SND_PCI_QUIRK(0x17aa, 0x3a0d, "Lenovo Ideapad Y550", ALC662_FIXUP_IDEAPAD), | 6612 | SND_PCI_QUIRK(0x17aa, 0x3a0d, "Lenovo Ideapad Y550", ALC662_FIXUP_IDEAPAD), |
| 6613 | SND_PCI_QUIRK(0x19da, 0xa130, "Zotac Z68", ALC662_FIXUP_ZOTAC_Z68), | ||
| 6604 | SND_PCI_QUIRK(0x1b35, 0x2206, "CZC P10T", ALC662_FIXUP_CZC_P10T), | 6614 | SND_PCI_QUIRK(0x1b35, 0x2206, "CZC P10T", ALC662_FIXUP_CZC_P10T), |
| 6605 | 6615 | ||
| 6606 | #if 0 | 6616 | #if 0 |
diff --git a/sound/soc/codecs/wm2000.c b/sound/soc/codecs/wm2000.c index a75c3766aede..0418fa11e6bd 100644 --- a/sound/soc/codecs/wm2000.c +++ b/sound/soc/codecs/wm2000.c | |||
| @@ -99,8 +99,9 @@ static void wm2000_reset(struct wm2000_priv *wm2000) | |||
| 99 | } | 99 | } |
| 100 | 100 | ||
| 101 | static int wm2000_poll_bit(struct i2c_client *i2c, | 101 | static int wm2000_poll_bit(struct i2c_client *i2c, |
| 102 | unsigned int reg, u8 mask, int timeout) | 102 | unsigned int reg, u8 mask) |
| 103 | { | 103 | { |
| 104 | int timeout = 4000; | ||
| 104 | int val; | 105 | int val; |
| 105 | 106 | ||
| 106 | val = wm2000_read(i2c, reg); | 107 | val = wm2000_read(i2c, reg); |
| @@ -119,7 +120,7 @@ static int wm2000_poll_bit(struct i2c_client *i2c, | |||
| 119 | static int wm2000_power_up(struct i2c_client *i2c, int analogue) | 120 | static int wm2000_power_up(struct i2c_client *i2c, int analogue) |
| 120 | { | 121 | { |
| 121 | struct wm2000_priv *wm2000 = dev_get_drvdata(&i2c->dev); | 122 | struct wm2000_priv *wm2000 = dev_get_drvdata(&i2c->dev); |
| 122 | int ret, timeout; | 123 | int ret; |
| 123 | 124 | ||
| 124 | BUG_ON(wm2000->anc_mode != ANC_OFF); | 125 | BUG_ON(wm2000->anc_mode != ANC_OFF); |
| 125 | 126 | ||
| @@ -140,13 +141,13 @@ static int wm2000_power_up(struct i2c_client *i2c, int analogue) | |||
| 140 | 141 | ||
| 141 | /* Wait for ANC engine to become ready */ | 142 | /* Wait for ANC engine to become ready */ |
| 142 | if (!wm2000_poll_bit(i2c, WM2000_REG_ANC_STAT, | 143 | if (!wm2000_poll_bit(i2c, WM2000_REG_ANC_STAT, |
| 143 | WM2000_ANC_ENG_IDLE, 1)) { | 144 | WM2000_ANC_ENG_IDLE)) { |
| 144 | dev_err(&i2c->dev, "ANC engine failed to reset\n"); | 145 | dev_err(&i2c->dev, "ANC engine failed to reset\n"); |
| 145 | return -ETIMEDOUT; | 146 | return -ETIMEDOUT; |
| 146 | } | 147 | } |
| 147 | 148 | ||
| 148 | if (!wm2000_poll_bit(i2c, WM2000_REG_SYS_STATUS, | 149 | if (!wm2000_poll_bit(i2c, WM2000_REG_SYS_STATUS, |
| 149 | WM2000_STATUS_BOOT_COMPLETE, 1)) { | 150 | WM2000_STATUS_BOOT_COMPLETE)) { |
| 150 | dev_err(&i2c->dev, "ANC engine failed to initialise\n"); | 151 | dev_err(&i2c->dev, "ANC engine failed to initialise\n"); |
| 151 | return -ETIMEDOUT; | 152 | return -ETIMEDOUT; |
| 152 | } | 153 | } |
| @@ -173,16 +174,13 @@ static int wm2000_power_up(struct i2c_client *i2c, int analogue) | |||
| 173 | dev_dbg(&i2c->dev, "Download complete\n"); | 174 | dev_dbg(&i2c->dev, "Download complete\n"); |
| 174 | 175 | ||
| 175 | if (analogue) { | 176 | if (analogue) { |
| 176 | timeout = 248; | 177 | wm2000_write(i2c, WM2000_REG_ANA_VMID_PU_TIME, 248 / 4); |
| 177 | wm2000_write(i2c, WM2000_REG_ANA_VMID_PU_TIME, timeout / 4); | ||
| 178 | 178 | ||
| 179 | wm2000_write(i2c, WM2000_REG_SYS_MODE_CNTRL, | 179 | wm2000_write(i2c, WM2000_REG_SYS_MODE_CNTRL, |
| 180 | WM2000_MODE_ANA_SEQ_INCLUDE | | 180 | WM2000_MODE_ANA_SEQ_INCLUDE | |
| 181 | WM2000_MODE_MOUSE_ENABLE | | 181 | WM2000_MODE_MOUSE_ENABLE | |
| 182 | WM2000_MODE_THERMAL_ENABLE); | 182 | WM2000_MODE_THERMAL_ENABLE); |
| 183 | } else { | 183 | } else { |
| 184 | timeout = 10; | ||
| 185 | |||
| 186 | wm2000_write(i2c, WM2000_REG_SYS_MODE_CNTRL, | 184 | wm2000_write(i2c, WM2000_REG_SYS_MODE_CNTRL, |
| 187 | WM2000_MODE_MOUSE_ENABLE | | 185 | WM2000_MODE_MOUSE_ENABLE | |
| 188 | WM2000_MODE_THERMAL_ENABLE); | 186 | WM2000_MODE_THERMAL_ENABLE); |
| @@ -201,9 +199,8 @@ static int wm2000_power_up(struct i2c_client *i2c, int analogue) | |||
| 201 | wm2000_write(i2c, WM2000_REG_SYS_CTL2, WM2000_ANC_INT_N_CLR); | 199 | wm2000_write(i2c, WM2000_REG_SYS_CTL2, WM2000_ANC_INT_N_CLR); |
| 202 | 200 | ||
| 203 | if (!wm2000_poll_bit(i2c, WM2000_REG_SYS_STATUS, | 201 | if (!wm2000_poll_bit(i2c, WM2000_REG_SYS_STATUS, |
| 204 | WM2000_STATUS_MOUSE_ACTIVE, timeout)) { | 202 | WM2000_STATUS_MOUSE_ACTIVE)) { |
| 205 | dev_err(&i2c->dev, "Timed out waiting for device after %dms\n", | 203 | dev_err(&i2c->dev, "Timed out waiting for device\n"); |
| 206 | timeout * 10); | ||
| 207 | return -ETIMEDOUT; | 204 | return -ETIMEDOUT; |
| 208 | } | 205 | } |
| 209 | 206 | ||
| @@ -218,28 +215,25 @@ static int wm2000_power_up(struct i2c_client *i2c, int analogue) | |||
| 218 | static int wm2000_power_down(struct i2c_client *i2c, int analogue) | 215 | static int wm2000_power_down(struct i2c_client *i2c, int analogue) |
| 219 | { | 216 | { |
| 220 | struct wm2000_priv *wm2000 = dev_get_drvdata(&i2c->dev); | 217 | struct wm2000_priv *wm2000 = dev_get_drvdata(&i2c->dev); |
| 221 | int timeout; | ||
| 222 | 218 | ||
| 223 | if (analogue) { | 219 | if (analogue) { |
| 224 | timeout = 248; | 220 | wm2000_write(i2c, WM2000_REG_ANA_VMID_PD_TIME, 248 / 4); |
| 225 | wm2000_write(i2c, WM2000_REG_ANA_VMID_PD_TIME, timeout / 4); | ||
| 226 | wm2000_write(i2c, WM2000_REG_SYS_MODE_CNTRL, | 221 | wm2000_write(i2c, WM2000_REG_SYS_MODE_CNTRL, |
| 227 | WM2000_MODE_ANA_SEQ_INCLUDE | | 222 | WM2000_MODE_ANA_SEQ_INCLUDE | |
| 228 | WM2000_MODE_POWER_DOWN); | 223 | WM2000_MODE_POWER_DOWN); |
| 229 | } else { | 224 | } else { |
| 230 | timeout = 10; | ||
| 231 | wm2000_write(i2c, WM2000_REG_SYS_MODE_CNTRL, | 225 | wm2000_write(i2c, WM2000_REG_SYS_MODE_CNTRL, |
| 232 | WM2000_MODE_POWER_DOWN); | 226 | WM2000_MODE_POWER_DOWN); |
| 233 | } | 227 | } |
| 234 | 228 | ||
| 235 | if (!wm2000_poll_bit(i2c, WM2000_REG_SYS_STATUS, | 229 | if (!wm2000_poll_bit(i2c, WM2000_REG_SYS_STATUS, |
| 236 | WM2000_STATUS_POWER_DOWN_COMPLETE, timeout)) { | 230 | WM2000_STATUS_POWER_DOWN_COMPLETE)) { |
| 237 | dev_err(&i2c->dev, "Timeout waiting for ANC power down\n"); | 231 | dev_err(&i2c->dev, "Timeout waiting for ANC power down\n"); |
| 238 | return -ETIMEDOUT; | 232 | return -ETIMEDOUT; |
| 239 | } | 233 | } |
| 240 | 234 | ||
| 241 | if (!wm2000_poll_bit(i2c, WM2000_REG_ANC_STAT, | 235 | if (!wm2000_poll_bit(i2c, WM2000_REG_ANC_STAT, |
| 242 | WM2000_ANC_ENG_IDLE, 1)) { | 236 | WM2000_ANC_ENG_IDLE)) { |
| 243 | dev_err(&i2c->dev, "Timeout waiting for ANC engine idle\n"); | 237 | dev_err(&i2c->dev, "Timeout waiting for ANC engine idle\n"); |
| 244 | return -ETIMEDOUT; | 238 | return -ETIMEDOUT; |
| 245 | } | 239 | } |
| @@ -268,13 +262,13 @@ static int wm2000_enter_bypass(struct i2c_client *i2c, int analogue) | |||
| 268 | } | 262 | } |
| 269 | 263 | ||
| 270 | if (!wm2000_poll_bit(i2c, WM2000_REG_SYS_STATUS, | 264 | if (!wm2000_poll_bit(i2c, WM2000_REG_SYS_STATUS, |
| 271 | WM2000_STATUS_ANC_DISABLED, 10)) { | 265 | WM2000_STATUS_ANC_DISABLED)) { |
| 272 | dev_err(&i2c->dev, "Timeout waiting for ANC disable\n"); | 266 | dev_err(&i2c->dev, "Timeout waiting for ANC disable\n"); |
| 273 | return -ETIMEDOUT; | 267 | return -ETIMEDOUT; |
| 274 | } | 268 | } |
| 275 | 269 | ||
| 276 | if (!wm2000_poll_bit(i2c, WM2000_REG_ANC_STAT, | 270 | if (!wm2000_poll_bit(i2c, WM2000_REG_ANC_STAT, |
| 277 | WM2000_ANC_ENG_IDLE, 1)) { | 271 | WM2000_ANC_ENG_IDLE)) { |
| 278 | dev_err(&i2c->dev, "Timeout waiting for ANC engine idle\n"); | 272 | dev_err(&i2c->dev, "Timeout waiting for ANC engine idle\n"); |
| 279 | return -ETIMEDOUT; | 273 | return -ETIMEDOUT; |
| 280 | } | 274 | } |
| @@ -311,7 +305,7 @@ static int wm2000_exit_bypass(struct i2c_client *i2c, int analogue) | |||
| 311 | wm2000_write(i2c, WM2000_REG_SYS_CTL2, WM2000_ANC_INT_N_CLR); | 305 | wm2000_write(i2c, WM2000_REG_SYS_CTL2, WM2000_ANC_INT_N_CLR); |
| 312 | 306 | ||
| 313 | if (!wm2000_poll_bit(i2c, WM2000_REG_SYS_STATUS, | 307 | if (!wm2000_poll_bit(i2c, WM2000_REG_SYS_STATUS, |
| 314 | WM2000_STATUS_MOUSE_ACTIVE, 10)) { | 308 | WM2000_STATUS_MOUSE_ACTIVE)) { |
| 315 | dev_err(&i2c->dev, "Timed out waiting for MOUSE\n"); | 309 | dev_err(&i2c->dev, "Timed out waiting for MOUSE\n"); |
| 316 | return -ETIMEDOUT; | 310 | return -ETIMEDOUT; |
| 317 | } | 311 | } |
| @@ -325,38 +319,32 @@ static int wm2000_exit_bypass(struct i2c_client *i2c, int analogue) | |||
| 325 | static int wm2000_enter_standby(struct i2c_client *i2c, int analogue) | 319 | static int wm2000_enter_standby(struct i2c_client *i2c, int analogue) |
| 326 | { | 320 | { |
| 327 | struct wm2000_priv *wm2000 = dev_get_drvdata(&i2c->dev); | 321 | struct wm2000_priv *wm2000 = dev_get_drvdata(&i2c->dev); |
| 328 | int timeout; | ||
| 329 | 322 | ||
| 330 | BUG_ON(wm2000->anc_mode != ANC_ACTIVE); | 323 | BUG_ON(wm2000->anc_mode != ANC_ACTIVE); |
| 331 | 324 | ||
| 332 | if (analogue) { | 325 | if (analogue) { |
| 333 | timeout = 248; | 326 | wm2000_write(i2c, WM2000_REG_ANA_VMID_PD_TIME, 248 / 4); |
| 334 | wm2000_write(i2c, WM2000_REG_ANA_VMID_PD_TIME, timeout / 4); | ||
| 335 | 327 | ||
| 336 | wm2000_write(i2c, WM2000_REG_SYS_MODE_CNTRL, | 328 | wm2000_write(i2c, WM2000_REG_SYS_MODE_CNTRL, |
| 337 | WM2000_MODE_ANA_SEQ_INCLUDE | | 329 | WM2000_MODE_ANA_SEQ_INCLUDE | |
| 338 | WM2000_MODE_THERMAL_ENABLE | | 330 | WM2000_MODE_THERMAL_ENABLE | |
| 339 | WM2000_MODE_STANDBY_ENTRY); | 331 | WM2000_MODE_STANDBY_ENTRY); |
| 340 | } else { | 332 | } else { |
| 341 | timeout = 10; | ||
| 342 | |||
| 343 | wm2000_write(i2c, WM2000_REG_SYS_MODE_CNTRL, | 333 | wm2000_write(i2c, WM2000_REG_SYS_MODE_CNTRL, |
| 344 | WM2000_MODE_THERMAL_ENABLE | | 334 | WM2000_MODE_THERMAL_ENABLE | |
| 345 | WM2000_MODE_STANDBY_ENTRY); | 335 | WM2000_MODE_STANDBY_ENTRY); |
| 346 | } | 336 | } |
| 347 | 337 | ||
| 348 | if (!wm2000_poll_bit(i2c, WM2000_REG_SYS_STATUS, | 338 | if (!wm2000_poll_bit(i2c, WM2000_REG_SYS_STATUS, |
| 349 | WM2000_STATUS_ANC_DISABLED, timeout)) { | 339 | WM2000_STATUS_ANC_DISABLED)) { |
| 350 | dev_err(&i2c->dev, | 340 | dev_err(&i2c->dev, |
| 351 | "Timed out waiting for ANC disable after 1ms\n"); | 341 | "Timed out waiting for ANC disable after 1ms\n"); |
| 352 | return -ETIMEDOUT; | 342 | return -ETIMEDOUT; |
| 353 | } | 343 | } |
| 354 | 344 | ||
| 355 | if (!wm2000_poll_bit(i2c, WM2000_REG_ANC_STAT, WM2000_ANC_ENG_IDLE, | 345 | if (!wm2000_poll_bit(i2c, WM2000_REG_ANC_STAT, WM2000_ANC_ENG_IDLE)) { |
| 356 | 1)) { | ||
| 357 | dev_err(&i2c->dev, | 346 | dev_err(&i2c->dev, |
| 358 | "Timed out waiting for standby after %dms\n", | 347 | "Timed out waiting for standby\n"); |
| 359 | timeout * 10); | ||
| 360 | return -ETIMEDOUT; | 348 | return -ETIMEDOUT; |
| 361 | } | 349 | } |
| 362 | 350 | ||
| @@ -374,23 +362,19 @@ static int wm2000_enter_standby(struct i2c_client *i2c, int analogue) | |||
| 374 | static int wm2000_exit_standby(struct i2c_client *i2c, int analogue) | 362 | static int wm2000_exit_standby(struct i2c_client *i2c, int analogue) |
| 375 | { | 363 | { |
| 376 | struct wm2000_priv *wm2000 = dev_get_drvdata(&i2c->dev); | 364 | struct wm2000_priv *wm2000 = dev_get_drvdata(&i2c->dev); |
| 377 | int timeout; | ||
| 378 | 365 | ||
| 379 | BUG_ON(wm2000->anc_mode != ANC_STANDBY); | 366 | BUG_ON(wm2000->anc_mode != ANC_STANDBY); |
| 380 | 367 | ||
| 381 | wm2000_write(i2c, WM2000_REG_SYS_CTL1, 0); | 368 | wm2000_write(i2c, WM2000_REG_SYS_CTL1, 0); |
| 382 | 369 | ||
| 383 | if (analogue) { | 370 | if (analogue) { |
| 384 | timeout = 248; | 371 | wm2000_write(i2c, WM2000_REG_ANA_VMID_PU_TIME, 248 / 4); |
| 385 | wm2000_write(i2c, WM2000_REG_ANA_VMID_PU_TIME, timeout / 4); | ||
| 386 | 372 | ||
| 387 | wm2000_write(i2c, WM2000_REG_SYS_MODE_CNTRL, | 373 | wm2000_write(i2c, WM2000_REG_SYS_MODE_CNTRL, |
| 388 | WM2000_MODE_ANA_SEQ_INCLUDE | | 374 | WM2000_MODE_ANA_SEQ_INCLUDE | |
| 389 | WM2000_MODE_THERMAL_ENABLE | | 375 | WM2000_MODE_THERMAL_ENABLE | |
| 390 | WM2000_MODE_MOUSE_ENABLE); | 376 | WM2000_MODE_MOUSE_ENABLE); |
| 391 | } else { | 377 | } else { |
| 392 | timeout = 10; | ||
| 393 | |||
| 394 | wm2000_write(i2c, WM2000_REG_SYS_MODE_CNTRL, | 378 | wm2000_write(i2c, WM2000_REG_SYS_MODE_CNTRL, |
| 395 | WM2000_MODE_THERMAL_ENABLE | | 379 | WM2000_MODE_THERMAL_ENABLE | |
| 396 | WM2000_MODE_MOUSE_ENABLE); | 380 | WM2000_MODE_MOUSE_ENABLE); |
| @@ -400,9 +384,8 @@ static int wm2000_exit_standby(struct i2c_client *i2c, int analogue) | |||
| 400 | wm2000_write(i2c, WM2000_REG_SYS_CTL2, WM2000_ANC_INT_N_CLR); | 384 | wm2000_write(i2c, WM2000_REG_SYS_CTL2, WM2000_ANC_INT_N_CLR); |
| 401 | 385 | ||
| 402 | if (!wm2000_poll_bit(i2c, WM2000_REG_SYS_STATUS, | 386 | if (!wm2000_poll_bit(i2c, WM2000_REG_SYS_STATUS, |
| 403 | WM2000_STATUS_MOUSE_ACTIVE, timeout)) { | 387 | WM2000_STATUS_MOUSE_ACTIVE)) { |
| 404 | dev_err(&i2c->dev, "Timed out waiting for MOUSE after %dms\n", | 388 | dev_err(&i2c->dev, "Timed out waiting for MOUSE\n"); |
| 405 | timeout * 10); | ||
| 406 | return -ETIMEDOUT; | 389 | return -ETIMEDOUT; |
| 407 | } | 390 | } |
| 408 | 391 | ||
diff --git a/sound/soc/codecs/wm8994.c b/sound/soc/codecs/wm8994.c index 993639d694ce..aa8c98b628da 100644 --- a/sound/soc/codecs/wm8994.c +++ b/sound/soc/codecs/wm8994.c | |||
| @@ -46,6 +46,39 @@ | |||
| 46 | #define WM8994_NUM_DRC 3 | 46 | #define WM8994_NUM_DRC 3 |
| 47 | #define WM8994_NUM_EQ 3 | 47 | #define WM8994_NUM_EQ 3 |
| 48 | 48 | ||
| 49 | static struct { | ||
| 50 | unsigned int reg; | ||
| 51 | unsigned int mask; | ||
| 52 | } wm8994_vu_bits[] = { | ||
| 53 | { WM8994_LEFT_LINE_INPUT_1_2_VOLUME, WM8994_IN1_VU }, | ||
| 54 | { WM8994_RIGHT_LINE_INPUT_1_2_VOLUME, WM8994_IN1_VU }, | ||
| 55 | { WM8994_LEFT_LINE_INPUT_3_4_VOLUME, WM8994_IN2_VU }, | ||
| 56 | { WM8994_RIGHT_LINE_INPUT_3_4_VOLUME, WM8994_IN2_VU }, | ||
| 57 | { WM8994_SPEAKER_VOLUME_LEFT, WM8994_SPKOUT_VU }, | ||
| 58 | { WM8994_SPEAKER_VOLUME_RIGHT, WM8994_SPKOUT_VU }, | ||
| 59 | { WM8994_LEFT_OUTPUT_VOLUME, WM8994_HPOUT1_VU }, | ||
| 60 | { WM8994_RIGHT_OUTPUT_VOLUME, WM8994_HPOUT1_VU }, | ||
| 61 | { WM8994_LEFT_OPGA_VOLUME, WM8994_MIXOUT_VU }, | ||
| 62 | { WM8994_RIGHT_OPGA_VOLUME, WM8994_MIXOUT_VU }, | ||
| 63 | |||
| 64 | { WM8994_AIF1_DAC1_LEFT_VOLUME, WM8994_AIF1DAC1_VU }, | ||
| 65 | { WM8994_AIF1_DAC1_RIGHT_VOLUME, WM8994_AIF1DAC1_VU }, | ||
| 66 | { WM8994_AIF1_DAC2_LEFT_VOLUME, WM8994_AIF1DAC2_VU }, | ||
| 67 | { WM8994_AIF1_DAC2_RIGHT_VOLUME, WM8994_AIF1DAC2_VU }, | ||
| 68 | { WM8994_AIF2_DAC_LEFT_VOLUME, WM8994_AIF2DAC_VU }, | ||
| 69 | { WM8994_AIF2_DAC_RIGHT_VOLUME, WM8994_AIF2DAC_VU }, | ||
| 70 | { WM8994_AIF1_ADC1_LEFT_VOLUME, WM8994_AIF1ADC1_VU }, | ||
| 71 | { WM8994_AIF1_ADC1_RIGHT_VOLUME, WM8994_AIF1ADC1_VU }, | ||
| 72 | { WM8994_AIF1_ADC2_LEFT_VOLUME, WM8994_AIF1ADC2_VU }, | ||
| 73 | { WM8994_AIF1_ADC2_RIGHT_VOLUME, WM8994_AIF1ADC2_VU }, | ||
| 74 | { WM8994_AIF2_ADC_LEFT_VOLUME, WM8994_AIF2ADC_VU }, | ||
| 75 | { WM8994_AIF2_ADC_RIGHT_VOLUME, WM8994_AIF1ADC2_VU }, | ||
| 76 | { WM8994_DAC1_LEFT_VOLUME, WM8994_DAC1_VU }, | ||
| 77 | { WM8994_DAC1_RIGHT_VOLUME, WM8994_DAC1_VU }, | ||
| 78 | { WM8994_DAC2_LEFT_VOLUME, WM8994_DAC2_VU }, | ||
| 79 | { WM8994_DAC2_RIGHT_VOLUME, WM8994_DAC2_VU }, | ||
| 80 | }; | ||
| 81 | |||
| 49 | static int wm8994_drc_base[] = { | 82 | static int wm8994_drc_base[] = { |
| 50 | WM8994_AIF1_DRC1_1, | 83 | WM8994_AIF1_DRC1_1, |
| 51 | WM8994_AIF1_DRC2_1, | 84 | WM8994_AIF1_DRC2_1, |
| @@ -989,6 +1022,7 @@ static int aif1clk_ev(struct snd_soc_dapm_widget *w, | |||
| 989 | struct snd_soc_codec *codec = w->codec; | 1022 | struct snd_soc_codec *codec = w->codec; |
| 990 | struct wm8994 *control = codec->control_data; | 1023 | struct wm8994 *control = codec->control_data; |
| 991 | int mask = WM8994_AIF1DAC1L_ENA | WM8994_AIF1DAC1R_ENA; | 1024 | int mask = WM8994_AIF1DAC1L_ENA | WM8994_AIF1DAC1R_ENA; |
| 1025 | int i; | ||
| 992 | int dac; | 1026 | int dac; |
| 993 | int adc; | 1027 | int adc; |
| 994 | int val; | 1028 | int val; |
| @@ -1047,6 +1081,13 @@ static int aif1clk_ev(struct snd_soc_dapm_widget *w, | |||
| 1047 | WM8994_AIF1DAC2L_ENA); | 1081 | WM8994_AIF1DAC2L_ENA); |
| 1048 | break; | 1082 | break; |
| 1049 | 1083 | ||
| 1084 | case SND_SOC_DAPM_POST_PMU: | ||
| 1085 | for (i = 0; i < ARRAY_SIZE(wm8994_vu_bits); i++) | ||
| 1086 | snd_soc_write(codec, wm8994_vu_bits[i].reg, | ||
| 1087 | snd_soc_read(codec, | ||
| 1088 | wm8994_vu_bits[i].reg)); | ||
| 1089 | break; | ||
| 1090 | |||
| 1050 | case SND_SOC_DAPM_PRE_PMD: | 1091 | case SND_SOC_DAPM_PRE_PMD: |
| 1051 | case SND_SOC_DAPM_POST_PMD: | 1092 | case SND_SOC_DAPM_POST_PMD: |
| 1052 | snd_soc_update_bits(codec, WM8994_POWER_MANAGEMENT_5, | 1093 | snd_soc_update_bits(codec, WM8994_POWER_MANAGEMENT_5, |
| @@ -1072,6 +1113,7 @@ static int aif2clk_ev(struct snd_soc_dapm_widget *w, | |||
| 1072 | struct snd_kcontrol *kcontrol, int event) | 1113 | struct snd_kcontrol *kcontrol, int event) |
| 1073 | { | 1114 | { |
| 1074 | struct snd_soc_codec *codec = w->codec; | 1115 | struct snd_soc_codec *codec = w->codec; |
| 1116 | int i; | ||
| 1075 | int dac; | 1117 | int dac; |
| 1076 | int adc; | 1118 | int adc; |
| 1077 | int val; | 1119 | int val; |
| @@ -1122,6 +1164,13 @@ static int aif2clk_ev(struct snd_soc_dapm_widget *w, | |||
| 1122 | WM8994_AIF2DACR_ENA); | 1164 | WM8994_AIF2DACR_ENA); |
| 1123 | break; | 1165 | break; |
| 1124 | 1166 | ||
| 1167 | case SND_SOC_DAPM_POST_PMU: | ||
| 1168 | for (i = 0; i < ARRAY_SIZE(wm8994_vu_bits); i++) | ||
| 1169 | snd_soc_write(codec, wm8994_vu_bits[i].reg, | ||
| 1170 | snd_soc_read(codec, | ||
| 1171 | wm8994_vu_bits[i].reg)); | ||
| 1172 | break; | ||
| 1173 | |||
| 1125 | case SND_SOC_DAPM_PRE_PMD: | 1174 | case SND_SOC_DAPM_PRE_PMD: |
| 1126 | case SND_SOC_DAPM_POST_PMD: | 1175 | case SND_SOC_DAPM_POST_PMD: |
| 1127 | snd_soc_update_bits(codec, WM8994_POWER_MANAGEMENT_5, | 1176 | snd_soc_update_bits(codec, WM8994_POWER_MANAGEMENT_5, |
| @@ -1190,17 +1239,19 @@ static int late_enable_ev(struct snd_soc_dapm_widget *w, | |||
| 1190 | switch (event) { | 1239 | switch (event) { |
| 1191 | case SND_SOC_DAPM_PRE_PMU: | 1240 | case SND_SOC_DAPM_PRE_PMU: |
| 1192 | if (wm8994->aif1clk_enable) { | 1241 | if (wm8994->aif1clk_enable) { |
| 1193 | aif1clk_ev(w, kcontrol, event); | 1242 | aif1clk_ev(w, kcontrol, SND_SOC_DAPM_PRE_PMU); |
| 1194 | snd_soc_update_bits(codec, WM8994_AIF1_CLOCKING_1, | 1243 | snd_soc_update_bits(codec, WM8994_AIF1_CLOCKING_1, |
| 1195 | WM8994_AIF1CLK_ENA_MASK, | 1244 | WM8994_AIF1CLK_ENA_MASK, |
| 1196 | WM8994_AIF1CLK_ENA); | 1245 | WM8994_AIF1CLK_ENA); |
| 1246 | aif1clk_ev(w, kcontrol, SND_SOC_DAPM_POST_PMU); | ||
| 1197 | wm8994->aif1clk_enable = 0; | 1247 | wm8994->aif1clk_enable = 0; |
| 1198 | } | 1248 | } |
| 1199 | if (wm8994->aif2clk_enable) { | 1249 | if (wm8994->aif2clk_enable) { |
| 1200 | aif2clk_ev(w, kcontrol, event); | 1250 | aif2clk_ev(w, kcontrol, SND_SOC_DAPM_PRE_PMU); |
| 1201 | snd_soc_update_bits(codec, WM8994_AIF2_CLOCKING_1, | 1251 | snd_soc_update_bits(codec, WM8994_AIF2_CLOCKING_1, |
| 1202 | WM8994_AIF2CLK_ENA_MASK, | 1252 | WM8994_AIF2CLK_ENA_MASK, |
| 1203 | WM8994_AIF2CLK_ENA); | 1253 | WM8994_AIF2CLK_ENA); |
| 1254 | aif2clk_ev(w, kcontrol, SND_SOC_DAPM_POST_PMU); | ||
| 1204 | wm8994->aif2clk_enable = 0; | 1255 | wm8994->aif2clk_enable = 0; |
| 1205 | } | 1256 | } |
| 1206 | break; | 1257 | break; |
| @@ -1221,15 +1272,17 @@ static int late_disable_ev(struct snd_soc_dapm_widget *w, | |||
| 1221 | switch (event) { | 1272 | switch (event) { |
| 1222 | case SND_SOC_DAPM_POST_PMD: | 1273 | case SND_SOC_DAPM_POST_PMD: |
| 1223 | if (wm8994->aif1clk_disable) { | 1274 | if (wm8994->aif1clk_disable) { |
| 1275 | aif1clk_ev(w, kcontrol, SND_SOC_DAPM_PRE_PMD); | ||
| 1224 | snd_soc_update_bits(codec, WM8994_AIF1_CLOCKING_1, | 1276 | snd_soc_update_bits(codec, WM8994_AIF1_CLOCKING_1, |
| 1225 | WM8994_AIF1CLK_ENA_MASK, 0); | 1277 | WM8994_AIF1CLK_ENA_MASK, 0); |
| 1226 | aif1clk_ev(w, kcontrol, event); | 1278 | aif1clk_ev(w, kcontrol, SND_SOC_DAPM_POST_PMD); |
| 1227 | wm8994->aif1clk_disable = 0; | 1279 | wm8994->aif1clk_disable = 0; |
| 1228 | } | 1280 | } |
| 1229 | if (wm8994->aif2clk_disable) { | 1281 | if (wm8994->aif2clk_disable) { |
| 1282 | aif2clk_ev(w, kcontrol, SND_SOC_DAPM_PRE_PMD); | ||
| 1230 | snd_soc_update_bits(codec, WM8994_AIF2_CLOCKING_1, | 1283 | snd_soc_update_bits(codec, WM8994_AIF2_CLOCKING_1, |
| 1231 | WM8994_AIF2CLK_ENA_MASK, 0); | 1284 | WM8994_AIF2CLK_ENA_MASK, 0); |
| 1232 | aif2clk_ev(w, kcontrol, event); | 1285 | aif2clk_ev(w, kcontrol, SND_SOC_DAPM_POST_PMD); |
| 1233 | wm8994->aif2clk_disable = 0; | 1286 | wm8994->aif2clk_disable = 0; |
| 1234 | } | 1287 | } |
| 1235 | break; | 1288 | break; |
| @@ -1527,9 +1580,11 @@ SND_SOC_DAPM_POST("Late Disable PGA", late_disable_ev) | |||
| 1527 | 1580 | ||
| 1528 | static const struct snd_soc_dapm_widget wm8994_lateclk_widgets[] = { | 1581 | static const struct snd_soc_dapm_widget wm8994_lateclk_widgets[] = { |
| 1529 | SND_SOC_DAPM_SUPPLY("AIF1CLK", WM8994_AIF1_CLOCKING_1, 0, 0, aif1clk_ev, | 1582 | SND_SOC_DAPM_SUPPLY("AIF1CLK", WM8994_AIF1_CLOCKING_1, 0, 0, aif1clk_ev, |
| 1530 | SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_PRE_PMD), | 1583 | SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU | |
| 1584 | SND_SOC_DAPM_PRE_PMD), | ||
| 1531 | SND_SOC_DAPM_SUPPLY("AIF2CLK", WM8994_AIF2_CLOCKING_1, 0, 0, aif2clk_ev, | 1585 | SND_SOC_DAPM_SUPPLY("AIF2CLK", WM8994_AIF2_CLOCKING_1, 0, 0, aif2clk_ev, |
| 1532 | SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_PRE_PMD), | 1586 | SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU | |
| 1587 | SND_SOC_DAPM_PRE_PMD), | ||
| 1533 | SND_SOC_DAPM_PGA("Direct Voice", SND_SOC_NOPM, 0, 0, NULL, 0), | 1588 | SND_SOC_DAPM_PGA("Direct Voice", SND_SOC_NOPM, 0, 0, NULL, 0), |
| 1534 | SND_SOC_DAPM_MIXER("SPKL", WM8994_POWER_MANAGEMENT_3, 8, 0, | 1589 | SND_SOC_DAPM_MIXER("SPKL", WM8994_POWER_MANAGEMENT_3, 8, 0, |
| 1535 | left_speaker_mixer, ARRAY_SIZE(left_speaker_mixer)), | 1590 | left_speaker_mixer, ARRAY_SIZE(left_speaker_mixer)), |
| @@ -3879,39 +3934,11 @@ static int wm8994_codec_probe(struct snd_soc_codec *codec) | |||
| 3879 | 3934 | ||
| 3880 | pm_runtime_put(codec->dev); | 3935 | pm_runtime_put(codec->dev); |
| 3881 | 3936 | ||
| 3882 | /* Latch volume updates (right only; we always do left then right). */ | 3937 | /* Latch volume update bits */ |
| 3883 | snd_soc_update_bits(codec, WM8994_AIF1_DAC1_LEFT_VOLUME, | 3938 | for (i = 0; i < ARRAY_SIZE(wm8994_vu_bits); i++) |
| 3884 | WM8994_AIF1DAC1_VU, WM8994_AIF1DAC1_VU); | 3939 | snd_soc_update_bits(codec, wm8994_vu_bits[i].reg, |
| 3885 | snd_soc_update_bits(codec, WM8994_AIF1_DAC1_RIGHT_VOLUME, | 3940 | wm8994_vu_bits[i].mask, |
| 3886 | WM8994_AIF1DAC1_VU, WM8994_AIF1DAC1_VU); | 3941 | wm8994_vu_bits[i].mask); |
| 3887 | snd_soc_update_bits(codec, WM8994_AIF1_DAC2_LEFT_VOLUME, | ||
| 3888 | WM8994_AIF1DAC2_VU, WM8994_AIF1DAC2_VU); | ||
| 3889 | snd_soc_update_bits(codec, WM8994_AIF1_DAC2_RIGHT_VOLUME, | ||
| 3890 | WM8994_AIF1DAC2_VU, WM8994_AIF1DAC2_VU); | ||
| 3891 | snd_soc_update_bits(codec, WM8994_AIF2_DAC_LEFT_VOLUME, | ||
| 3892 | WM8994_AIF2DAC_VU, WM8994_AIF2DAC_VU); | ||
| 3893 | snd_soc_update_bits(codec, WM8994_AIF2_DAC_RIGHT_VOLUME, | ||
| 3894 | WM8994_AIF2DAC_VU, WM8994_AIF2DAC_VU); | ||
| 3895 | snd_soc_update_bits(codec, WM8994_AIF1_ADC1_LEFT_VOLUME, | ||
| 3896 | WM8994_AIF1ADC1_VU, WM8994_AIF1ADC1_VU); | ||
| 3897 | snd_soc_update_bits(codec, WM8994_AIF1_ADC1_RIGHT_VOLUME, | ||
| 3898 | WM8994_AIF1ADC1_VU, WM8994_AIF1ADC1_VU); | ||
| 3899 | snd_soc_update_bits(codec, WM8994_AIF1_ADC2_LEFT_VOLUME, | ||
| 3900 | WM8994_AIF1ADC2_VU, WM8994_AIF1ADC2_VU); | ||
| 3901 | snd_soc_update_bits(codec, WM8994_AIF1_ADC2_RIGHT_VOLUME, | ||
| 3902 | WM8994_AIF1ADC2_VU, WM8994_AIF1ADC2_VU); | ||
| 3903 | snd_soc_update_bits(codec, WM8994_AIF2_ADC_LEFT_VOLUME, | ||
| 3904 | WM8994_AIF2ADC_VU, WM8994_AIF1ADC2_VU); | ||
| 3905 | snd_soc_update_bits(codec, WM8994_AIF2_ADC_RIGHT_VOLUME, | ||
| 3906 | WM8994_AIF2ADC_VU, WM8994_AIF1ADC2_VU); | ||
| 3907 | snd_soc_update_bits(codec, WM8994_DAC1_LEFT_VOLUME, | ||
| 3908 | WM8994_DAC1_VU, WM8994_DAC1_VU); | ||
| 3909 | snd_soc_update_bits(codec, WM8994_DAC1_RIGHT_VOLUME, | ||
| 3910 | WM8994_DAC1_VU, WM8994_DAC1_VU); | ||
| 3911 | snd_soc_update_bits(codec, WM8994_DAC2_LEFT_VOLUME, | ||
| 3912 | WM8994_DAC2_VU, WM8994_DAC2_VU); | ||
| 3913 | snd_soc_update_bits(codec, WM8994_DAC2_RIGHT_VOLUME, | ||
| 3914 | WM8994_DAC2_VU, WM8994_DAC2_VU); | ||
| 3915 | 3942 | ||
| 3916 | /* Set the low bit of the 3D stereo depth so TLV matches */ | 3943 | /* Set the low bit of the 3D stereo depth so TLV matches */ |
| 3917 | snd_soc_update_bits(codec, WM8994_AIF1_DAC1_FILTERS_2, | 3944 | snd_soc_update_bits(codec, WM8994_AIF1_DAC1_FILTERS_2, |
diff --git a/sound/soc/fsl/imx-audmux.c b/sound/soc/fsl/imx-audmux.c index f23700359c67..080327414c6b 100644 --- a/sound/soc/fsl/imx-audmux.c +++ b/sound/soc/fsl/imx-audmux.c | |||
| @@ -26,6 +26,7 @@ | |||
| 26 | #include <linux/of_device.h> | 26 | #include <linux/of_device.h> |
| 27 | #include <linux/platform_device.h> | 27 | #include <linux/platform_device.h> |
| 28 | #include <linux/slab.h> | 28 | #include <linux/slab.h> |
| 29 | #include <linux/pinctrl/consumer.h> | ||
| 29 | 30 | ||
| 30 | #include "imx-audmux.h" | 31 | #include "imx-audmux.h" |
| 31 | 32 | ||
| @@ -249,6 +250,7 @@ EXPORT_SYMBOL_GPL(imx_audmux_v2_configure_port); | |||
| 249 | static int __devinit imx_audmux_probe(struct platform_device *pdev) | 250 | static int __devinit imx_audmux_probe(struct platform_device *pdev) |
| 250 | { | 251 | { |
| 251 | struct resource *res; | 252 | struct resource *res; |
| 253 | struct pinctrl *pinctrl; | ||
| 252 | const struct of_device_id *of_id = | 254 | const struct of_device_id *of_id = |
| 253 | of_match_device(imx_audmux_dt_ids, &pdev->dev); | 255 | of_match_device(imx_audmux_dt_ids, &pdev->dev); |
| 254 | 256 | ||
| @@ -257,6 +259,12 @@ static int __devinit imx_audmux_probe(struct platform_device *pdev) | |||
| 257 | if (!audmux_base) | 259 | if (!audmux_base) |
| 258 | return -EADDRNOTAVAIL; | 260 | return -EADDRNOTAVAIL; |
| 259 | 261 | ||
| 262 | pinctrl = devm_pinctrl_get_select_default(&pdev->dev); | ||
| 263 | if (IS_ERR(pinctrl)) { | ||
| 264 | dev_err(&pdev->dev, "setup pinctrl failed!"); | ||
| 265 | return PTR_ERR(pinctrl); | ||
| 266 | } | ||
| 267 | |||
| 260 | audmux_clk = clk_get(&pdev->dev, "audmux"); | 268 | audmux_clk = clk_get(&pdev->dev, "audmux"); |
| 261 | if (IS_ERR(audmux_clk)) { | 269 | if (IS_ERR(audmux_clk)) { |
| 262 | dev_dbg(&pdev->dev, "cannot get clock: %ld\n", | 270 | dev_dbg(&pdev->dev, "cannot get clock: %ld\n", |
diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c index 90ee77d2409d..89eae93445cf 100644 --- a/sound/soc/soc-dapm.c +++ b/sound/soc/soc-dapm.c | |||
| @@ -913,7 +913,7 @@ static int is_connected_input_ep(struct snd_soc_dapm_widget *widget, | |||
| 913 | /* do we need to add this widget to the list ? */ | 913 | /* do we need to add this widget to the list ? */ |
| 914 | if (list) { | 914 | if (list) { |
| 915 | int err; | 915 | int err; |
| 916 | err = dapm_list_add_widget(list, path->sink); | 916 | err = dapm_list_add_widget(list, path->source); |
| 917 | if (err < 0) { | 917 | if (err < 0) { |
| 918 | dev_err(widget->dapm->dev, "could not add widget %s\n", | 918 | dev_err(widget->dapm->dev, "could not add widget %s\n", |
| 919 | widget->name); | 919 | widget->name); |
| @@ -954,7 +954,7 @@ int snd_soc_dapm_dai_get_connected_widgets(struct snd_soc_dai *dai, int stream, | |||
| 954 | if (stream == SNDRV_PCM_STREAM_PLAYBACK) | 954 | if (stream == SNDRV_PCM_STREAM_PLAYBACK) |
| 955 | paths = is_connected_output_ep(dai->playback_widget, list); | 955 | paths = is_connected_output_ep(dai->playback_widget, list); |
| 956 | else | 956 | else |
| 957 | paths = is_connected_input_ep(dai->playback_widget, list); | 957 | paths = is_connected_input_ep(dai->capture_widget, list); |
| 958 | 958 | ||
| 959 | trace_snd_soc_dapm_connected(paths, stream); | 959 | trace_snd_soc_dapm_connected(paths, stream); |
| 960 | dapm_clear_walk(&card->dapm); | 960 | dapm_clear_walk(&card->dapm); |
diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c index bedd1717a373..48fd15b312c1 100644 --- a/sound/soc/soc-pcm.c +++ b/sound/soc/soc-pcm.c | |||
| @@ -794,6 +794,9 @@ static struct snd_soc_pcm_runtime *dpcm_get_be(struct snd_soc_card *card, | |||
| 794 | for (i = 0; i < card->num_links; i++) { | 794 | for (i = 0; i < card->num_links; i++) { |
| 795 | be = &card->rtd[i]; | 795 | be = &card->rtd[i]; |
| 796 | 796 | ||
| 797 | if (!be->dai_link->no_pcm) | ||
| 798 | continue; | ||
| 799 | |||
| 797 | if (be->cpu_dai->playback_widget == widget || | 800 | if (be->cpu_dai->playback_widget == widget || |
| 798 | be->codec_dai->playback_widget == widget) | 801 | be->codec_dai->playback_widget == widget) |
| 799 | return be; | 802 | return be; |
| @@ -803,6 +806,9 @@ static struct snd_soc_pcm_runtime *dpcm_get_be(struct snd_soc_card *card, | |||
| 803 | for (i = 0; i < card->num_links; i++) { | 806 | for (i = 0; i < card->num_links; i++) { |
| 804 | be = &card->rtd[i]; | 807 | be = &card->rtd[i]; |
| 805 | 808 | ||
| 809 | if (!be->dai_link->no_pcm) | ||
| 810 | continue; | ||
| 811 | |||
| 806 | if (be->cpu_dai->capture_widget == widget || | 812 | if (be->cpu_dai->capture_widget == widget || |
| 807 | be->codec_dai->capture_widget == widget) | 813 | be->codec_dai->capture_widget == widget) |
| 808 | return be; | 814 | return be; |
diff --git a/sound/soc/tegra/tegra30_ahub.c b/sound/soc/tegra/tegra30_ahub.c index 57cd419f743e..f43edb364a18 100644 --- a/sound/soc/tegra/tegra30_ahub.c +++ b/sound/soc/tegra/tegra30_ahub.c | |||
| @@ -629,3 +629,4 @@ MODULE_AUTHOR("Stephen Warren <swarren@nvidia.com>"); | |||
| 629 | MODULE_DESCRIPTION("Tegra30 AHUB driver"); | 629 | MODULE_DESCRIPTION("Tegra30 AHUB driver"); |
| 630 | MODULE_LICENSE("GPL v2"); | 630 | MODULE_LICENSE("GPL v2"); |
| 631 | MODULE_ALIAS("platform:" DRV_NAME); | 631 | MODULE_ALIAS("platform:" DRV_NAME); |
| 632 | MODULE_DEVICE_TABLE(of, tegra30_ahub_of_match); | ||
diff --git a/sound/usb/card.h b/sound/usb/card.h index 0d37238b8457..2b9fffff23b6 100644 --- a/sound/usb/card.h +++ b/sound/usb/card.h | |||
| @@ -119,6 +119,7 @@ struct snd_usb_substream { | |||
| 119 | unsigned long unlink_mask; /* bitmask of unlinked urbs */ | 119 | unsigned long unlink_mask; /* bitmask of unlinked urbs */ |
| 120 | 120 | ||
| 121 | /* data and sync endpoints for this stream */ | 121 | /* data and sync endpoints for this stream */ |
| 122 | unsigned int ep_num; /* the endpoint number */ | ||
| 122 | struct snd_usb_endpoint *data_endpoint; | 123 | struct snd_usb_endpoint *data_endpoint; |
| 123 | struct snd_usb_endpoint *sync_endpoint; | 124 | struct snd_usb_endpoint *sync_endpoint; |
| 124 | unsigned long flags; | 125 | unsigned long flags; |
diff --git a/sound/usb/stream.c b/sound/usb/stream.c index 6b7d7a2b7baa..083ed81160e5 100644 --- a/sound/usb/stream.c +++ b/sound/usb/stream.c | |||
| @@ -97,6 +97,7 @@ static void snd_usb_init_substream(struct snd_usb_stream *as, | |||
| 97 | subs->formats |= fp->formats; | 97 | subs->formats |= fp->formats; |
| 98 | subs->num_formats++; | 98 | subs->num_formats++; |
| 99 | subs->fmt_type = fp->fmt_type; | 99 | subs->fmt_type = fp->fmt_type; |
| 100 | subs->ep_num = fp->endpoint; | ||
| 100 | } | 101 | } |
| 101 | 102 | ||
| 102 | /* | 103 | /* |
| @@ -119,9 +120,7 @@ int snd_usb_add_audio_stream(struct snd_usb_audio *chip, | |||
| 119 | if (as->fmt_type != fp->fmt_type) | 120 | if (as->fmt_type != fp->fmt_type) |
| 120 | continue; | 121 | continue; |
| 121 | subs = &as->substream[stream]; | 122 | subs = &as->substream[stream]; |
| 122 | if (!subs->data_endpoint) | 123 | if (subs->ep_num == fp->endpoint) { |
| 123 | continue; | ||
| 124 | if (subs->data_endpoint->ep_num == fp->endpoint) { | ||
| 125 | list_add_tail(&fp->list, &subs->fmt_list); | 124 | list_add_tail(&fp->list, &subs->fmt_list); |
| 126 | subs->num_formats++; | 125 | subs->num_formats++; |
| 127 | subs->formats |= fp->formats; | 126 | subs->formats |= fp->formats; |
| @@ -134,7 +133,7 @@ int snd_usb_add_audio_stream(struct snd_usb_audio *chip, | |||
| 134 | if (as->fmt_type != fp->fmt_type) | 133 | if (as->fmt_type != fp->fmt_type) |
| 135 | continue; | 134 | continue; |
| 136 | subs = &as->substream[stream]; | 135 | subs = &as->substream[stream]; |
| 137 | if (subs->data_endpoint) | 136 | if (subs->ep_num) |
| 138 | continue; | 137 | continue; |
| 139 | err = snd_pcm_new_stream(as->pcm, stream, 1); | 138 | err = snd_pcm_new_stream(as->pcm, stream, 1); |
| 140 | if (err < 0) | 139 | if (err < 0) |
diff --git a/tools/perf/MANIFEST b/tools/perf/MANIFEST index 5476bc0a1eac..b4b572e8c100 100644 --- a/tools/perf/MANIFEST +++ b/tools/perf/MANIFEST | |||
| @@ -1,4 +1,6 @@ | |||
| 1 | tools/perf | 1 | tools/perf |
| 2 | tools/scripts | ||
| 3 | tools/lib/traceevent | ||
| 2 | include/linux/const.h | 4 | include/linux/const.h |
| 3 | include/linux/perf_event.h | 5 | include/linux/perf_event.h |
| 4 | include/linux/rbtree.h | 6 | include/linux/rbtree.h |
diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c index 8c767c6bca91..25249f76329d 100644 --- a/tools/perf/builtin-report.c +++ b/tools/perf/builtin-report.c | |||
| @@ -152,7 +152,7 @@ static int perf_evsel__add_hist_entry(struct perf_evsel *evsel, | |||
| 152 | 152 | ||
| 153 | if (symbol_conf.use_callchain) { | 153 | if (symbol_conf.use_callchain) { |
| 154 | err = callchain_append(he->callchain, | 154 | err = callchain_append(he->callchain, |
| 155 | &evsel->hists.callchain_cursor, | 155 | &callchain_cursor, |
| 156 | sample->period); | 156 | sample->period); |
| 157 | if (err) | 157 | if (err) |
| 158 | return err; | 158 | return err; |
| @@ -162,7 +162,7 @@ static int perf_evsel__add_hist_entry(struct perf_evsel *evsel, | |||
| 162 | * so we don't allocated the extra space needed because the stdio | 162 | * so we don't allocated the extra space needed because the stdio |
| 163 | * code will not use it. | 163 | * code will not use it. |
| 164 | */ | 164 | */ |
| 165 | if (al->sym != NULL && use_browser > 0) { | 165 | if (he->ms.sym != NULL && use_browser > 0) { |
| 166 | struct annotation *notes = symbol__annotation(he->ms.sym); | 166 | struct annotation *notes = symbol__annotation(he->ms.sym); |
| 167 | 167 | ||
| 168 | assert(evsel != NULL); | 168 | assert(evsel != NULL); |
diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c index 62ae30d34fa6..262589991ea4 100644 --- a/tools/perf/builtin-stat.c +++ b/tools/perf/builtin-stat.c | |||
| @@ -1129,7 +1129,7 @@ static int add_default_attributes(void) | |||
| 1129 | return 0; | 1129 | return 0; |
| 1130 | 1130 | ||
| 1131 | if (!evsel_list->nr_entries) { | 1131 | if (!evsel_list->nr_entries) { |
| 1132 | if (perf_evlist__add_attrs_array(evsel_list, default_attrs) < 0) | 1132 | if (perf_evlist__add_default_attrs(evsel_list, default_attrs) < 0) |
| 1133 | return -1; | 1133 | return -1; |
| 1134 | } | 1134 | } |
| 1135 | 1135 | ||
| @@ -1139,21 +1139,21 @@ static int add_default_attributes(void) | |||
| 1139 | return 0; | 1139 | return 0; |
| 1140 | 1140 | ||
| 1141 | /* Append detailed run extra attributes: */ | 1141 | /* Append detailed run extra attributes: */ |
| 1142 | if (perf_evlist__add_attrs_array(evsel_list, detailed_attrs) < 0) | 1142 | if (perf_evlist__add_default_attrs(evsel_list, detailed_attrs) < 0) |
| 1143 | return -1; | 1143 | return -1; |
| 1144 | 1144 | ||
| 1145 | if (detailed_run < 2) | 1145 | if (detailed_run < 2) |
| 1146 | return 0; | 1146 | return 0; |
| 1147 | 1147 | ||
| 1148 | /* Append very detailed run extra attributes: */ | 1148 | /* Append very detailed run extra attributes: */ |
| 1149 | if (perf_evlist__add_attrs_array(evsel_list, very_detailed_attrs) < 0) | 1149 | if (perf_evlist__add_default_attrs(evsel_list, very_detailed_attrs) < 0) |
| 1150 | return -1; | 1150 | return -1; |
| 1151 | 1151 | ||
| 1152 | if (detailed_run < 3) | 1152 | if (detailed_run < 3) |
| 1153 | return 0; | 1153 | return 0; |
| 1154 | 1154 | ||
| 1155 | /* Append very, very detailed run extra attributes: */ | 1155 | /* Append very, very detailed run extra attributes: */ |
| 1156 | return perf_evlist__add_attrs_array(evsel_list, very_very_detailed_attrs); | 1156 | return perf_evlist__add_default_attrs(evsel_list, very_very_detailed_attrs); |
| 1157 | } | 1157 | } |
| 1158 | 1158 | ||
| 1159 | int cmd_stat(int argc, const char **argv, const char *prefix __used) | 1159 | int cmd_stat(int argc, const char **argv, const char *prefix __used) |
diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c index 871b540293e1..6bb0277b7dfe 100644 --- a/tools/perf/builtin-top.c +++ b/tools/perf/builtin-top.c | |||
| @@ -787,7 +787,7 @@ static void perf_event__process_sample(struct perf_tool *tool, | |||
| 787 | } | 787 | } |
| 788 | 788 | ||
| 789 | if (symbol_conf.use_callchain) { | 789 | if (symbol_conf.use_callchain) { |
| 790 | err = callchain_append(he->callchain, &evsel->hists.callchain_cursor, | 790 | err = callchain_append(he->callchain, &callchain_cursor, |
| 791 | sample->period); | 791 | sample->period); |
| 792 | if (err) | 792 | if (err) |
| 793 | return; | 793 | return; |
diff --git a/tools/perf/design.txt b/tools/perf/design.txt index bd0bb1b1279b..67e5d0cace85 100644 --- a/tools/perf/design.txt +++ b/tools/perf/design.txt | |||
| @@ -409,14 +409,15 @@ Counters can be enabled and disabled in two ways: via ioctl and via | |||
| 409 | prctl. When a counter is disabled, it doesn't count or generate | 409 | prctl. When a counter is disabled, it doesn't count or generate |
| 410 | events but does continue to exist and maintain its count value. | 410 | events but does continue to exist and maintain its count value. |
| 411 | 411 | ||
| 412 | An individual counter or counter group can be enabled with | 412 | An individual counter can be enabled with |
| 413 | 413 | ||
| 414 | ioctl(fd, PERF_EVENT_IOC_ENABLE); | 414 | ioctl(fd, PERF_EVENT_IOC_ENABLE, 0); |
| 415 | 415 | ||
| 416 | or disabled with | 416 | or disabled with |
| 417 | 417 | ||
| 418 | ioctl(fd, PERF_EVENT_IOC_DISABLE); | 418 | ioctl(fd, PERF_EVENT_IOC_DISABLE, 0); |
| 419 | 419 | ||
| 420 | For a counter group, pass PERF_IOC_FLAG_GROUP as the third argument. | ||
| 420 | Enabling or disabling the leader of a group enables or disables the | 421 | Enabling or disabling the leader of a group enables or disables the |
| 421 | whole group; that is, while the group leader is disabled, none of the | 422 | whole group; that is, while the group leader is disabled, none of the |
| 422 | counters in the group will count. Enabling or disabling a member of a | 423 | counters in the group will count. Enabling or disabling a member of a |
diff --git a/tools/perf/ui/browsers/annotate.c b/tools/perf/ui/browsers/annotate.c index 4deea6aaf927..34b1c46eaf42 100644 --- a/tools/perf/ui/browsers/annotate.c +++ b/tools/perf/ui/browsers/annotate.c | |||
| @@ -668,7 +668,7 @@ static int annotate_browser__run(struct annotate_browser *browser, int evidx, | |||
| 668 | "q/ESC/CTRL+C Exit\n\n" | 668 | "q/ESC/CTRL+C Exit\n\n" |
| 669 | "-> Go to target\n" | 669 | "-> Go to target\n" |
| 670 | "<- Exit\n" | 670 | "<- Exit\n" |
| 671 | "h Cycle thru hottest instructions\n" | 671 | "H Cycle thru hottest instructions\n" |
| 672 | "j Toggle showing jump to target arrows\n" | 672 | "j Toggle showing jump to target arrows\n" |
| 673 | "J Toggle showing number of jump sources on targets\n" | 673 | "J Toggle showing number of jump sources on targets\n" |
| 674 | "n Search next string\n" | 674 | "n Search next string\n" |
diff --git a/tools/perf/util/PERF-VERSION-GEN b/tools/perf/util/PERF-VERSION-GEN index ad73300f7bac..95264f304179 100755 --- a/tools/perf/util/PERF-VERSION-GEN +++ b/tools/perf/util/PERF-VERSION-GEN | |||
| @@ -12,7 +12,7 @@ LF=' | |||
| 12 | # First check if there is a .git to get the version from git describe | 12 | # First check if there is a .git to get the version from git describe |
| 13 | # otherwise try to get the version from the kernel makefile | 13 | # otherwise try to get the version from the kernel makefile |
| 14 | if test -d ../../.git -o -f ../../.git && | 14 | if test -d ../../.git -o -f ../../.git && |
| 15 | VN=$(git describe --abbrev=4 HEAD 2>/dev/null) && | 15 | VN=$(git describe --match 'v[0-9].[0-9]*' --abbrev=4 HEAD 2>/dev/null) && |
| 16 | case "$VN" in | 16 | case "$VN" in |
| 17 | *$LF*) (exit 1) ;; | 17 | *$LF*) (exit 1) ;; |
| 18 | v[0-9]*) | 18 | v[0-9]*) |
diff --git a/tools/perf/util/callchain.c b/tools/perf/util/callchain.c index 9f7106a8d9a4..3a6bff47614f 100644 --- a/tools/perf/util/callchain.c +++ b/tools/perf/util/callchain.c | |||
| @@ -18,6 +18,8 @@ | |||
| 18 | #include "util.h" | 18 | #include "util.h" |
| 19 | #include "callchain.h" | 19 | #include "callchain.h" |
| 20 | 20 | ||
| 21 | __thread struct callchain_cursor callchain_cursor; | ||
| 22 | |||
| 21 | bool ip_callchain__valid(struct ip_callchain *chain, | 23 | bool ip_callchain__valid(struct ip_callchain *chain, |
| 22 | const union perf_event *event) | 24 | const union perf_event *event) |
| 23 | { | 25 | { |
diff --git a/tools/perf/util/callchain.h b/tools/perf/util/callchain.h index 7f9c0f1ae3a9..3bdb407f9cd9 100644 --- a/tools/perf/util/callchain.h +++ b/tools/perf/util/callchain.h | |||
| @@ -76,6 +76,8 @@ struct callchain_cursor { | |||
| 76 | struct callchain_cursor_node *curr; | 76 | struct callchain_cursor_node *curr; |
| 77 | }; | 77 | }; |
| 78 | 78 | ||
| 79 | extern __thread struct callchain_cursor callchain_cursor; | ||
| 80 | |||
| 79 | static inline void callchain_init(struct callchain_root *root) | 81 | static inline void callchain_init(struct callchain_root *root) |
| 80 | { | 82 | { |
| 81 | INIT_LIST_HEAD(&root->node.siblings); | 83 | INIT_LIST_HEAD(&root->node.siblings); |
diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c index 4ac5f5ae4ce9..7400fb3fc50c 100644 --- a/tools/perf/util/evlist.c +++ b/tools/perf/util/evlist.c | |||
| @@ -159,6 +159,17 @@ out_delete_partial_list: | |||
| 159 | return -1; | 159 | return -1; |
| 160 | } | 160 | } |
| 161 | 161 | ||
| 162 | int __perf_evlist__add_default_attrs(struct perf_evlist *evlist, | ||
| 163 | struct perf_event_attr *attrs, size_t nr_attrs) | ||
| 164 | { | ||
| 165 | size_t i; | ||
| 166 | |||
| 167 | for (i = 0; i < nr_attrs; i++) | ||
| 168 | event_attr_init(attrs + i); | ||
| 169 | |||
| 170 | return perf_evlist__add_attrs(evlist, attrs, nr_attrs); | ||
| 171 | } | ||
| 172 | |||
| 162 | static int trace_event__id(const char *evname) | 173 | static int trace_event__id(const char *evname) |
| 163 | { | 174 | { |
| 164 | char *filename, *colon; | 175 | char *filename, *colon; |
| @@ -263,7 +274,8 @@ void perf_evlist__disable(struct perf_evlist *evlist) | |||
| 263 | for (cpu = 0; cpu < evlist->cpus->nr; cpu++) { | 274 | for (cpu = 0; cpu < evlist->cpus->nr; cpu++) { |
| 264 | list_for_each_entry(pos, &evlist->entries, node) { | 275 | list_for_each_entry(pos, &evlist->entries, node) { |
| 265 | for (thread = 0; thread < evlist->threads->nr; thread++) | 276 | for (thread = 0; thread < evlist->threads->nr; thread++) |
| 266 | ioctl(FD(pos, cpu, thread), PERF_EVENT_IOC_DISABLE); | 277 | ioctl(FD(pos, cpu, thread), |
| 278 | PERF_EVENT_IOC_DISABLE, 0); | ||
| 267 | } | 279 | } |
| 268 | } | 280 | } |
| 269 | } | 281 | } |
| @@ -276,7 +288,8 @@ void perf_evlist__enable(struct perf_evlist *evlist) | |||
| 276 | for (cpu = 0; cpu < evlist->cpus->nr; cpu++) { | 288 | for (cpu = 0; cpu < evlist->cpus->nr; cpu++) { |
| 277 | list_for_each_entry(pos, &evlist->entries, node) { | 289 | list_for_each_entry(pos, &evlist->entries, node) { |
| 278 | for (thread = 0; thread < evlist->threads->nr; thread++) | 290 | for (thread = 0; thread < evlist->threads->nr; thread++) |
| 279 | ioctl(FD(pos, cpu, thread), PERF_EVENT_IOC_ENABLE); | 291 | ioctl(FD(pos, cpu, thread), |
| 292 | PERF_EVENT_IOC_ENABLE, 0); | ||
| 280 | } | 293 | } |
| 281 | } | 294 | } |
| 282 | } | 295 | } |
diff --git a/tools/perf/util/evlist.h b/tools/perf/util/evlist.h index 58abb63ac13a..989bee9624c2 100644 --- a/tools/perf/util/evlist.h +++ b/tools/perf/util/evlist.h | |||
| @@ -54,6 +54,8 @@ void perf_evlist__add(struct perf_evlist *evlist, struct perf_evsel *entry); | |||
| 54 | int perf_evlist__add_default(struct perf_evlist *evlist); | 54 | int perf_evlist__add_default(struct perf_evlist *evlist); |
| 55 | int perf_evlist__add_attrs(struct perf_evlist *evlist, | 55 | int perf_evlist__add_attrs(struct perf_evlist *evlist, |
| 56 | struct perf_event_attr *attrs, size_t nr_attrs); | 56 | struct perf_event_attr *attrs, size_t nr_attrs); |
| 57 | int __perf_evlist__add_default_attrs(struct perf_evlist *evlist, | ||
| 58 | struct perf_event_attr *attrs, size_t nr_attrs); | ||
| 57 | int perf_evlist__add_tracepoints(struct perf_evlist *evlist, | 59 | int perf_evlist__add_tracepoints(struct perf_evlist *evlist, |
| 58 | const char *tracepoints[], size_t nr_tracepoints); | 60 | const char *tracepoints[], size_t nr_tracepoints); |
| 59 | int perf_evlist__set_tracepoints_handlers(struct perf_evlist *evlist, | 61 | int perf_evlist__set_tracepoints_handlers(struct perf_evlist *evlist, |
| @@ -62,6 +64,8 @@ int perf_evlist__set_tracepoints_handlers(struct perf_evlist *evlist, | |||
| 62 | 64 | ||
| 63 | #define perf_evlist__add_attrs_array(evlist, array) \ | 65 | #define perf_evlist__add_attrs_array(evlist, array) \ |
| 64 | perf_evlist__add_attrs(evlist, array, ARRAY_SIZE(array)) | 66 | perf_evlist__add_attrs(evlist, array, ARRAY_SIZE(array)) |
| 67 | #define perf_evlist__add_default_attrs(evlist, array) \ | ||
| 68 | __perf_evlist__add_default_attrs(evlist, array, ARRAY_SIZE(array)) | ||
| 65 | 69 | ||
| 66 | #define perf_evlist__add_tracepoints_array(evlist, array) \ | 70 | #define perf_evlist__add_tracepoints_array(evlist, array) \ |
| 67 | perf_evlist__add_tracepoints(evlist, array, ARRAY_SIZE(array)) | 71 | perf_evlist__add_tracepoints(evlist, array, ARRAY_SIZE(array)) |
diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c index 91d19138f3ec..9f6cebd798ee 100644 --- a/tools/perf/util/evsel.c +++ b/tools/perf/util/evsel.c | |||
| @@ -494,16 +494,24 @@ int perf_evsel__open_per_thread(struct perf_evsel *evsel, | |||
| 494 | } | 494 | } |
| 495 | 495 | ||
| 496 | static int perf_event__parse_id_sample(const union perf_event *event, u64 type, | 496 | static int perf_event__parse_id_sample(const union perf_event *event, u64 type, |
| 497 | struct perf_sample *sample) | 497 | struct perf_sample *sample, |
| 498 | bool swapped) | ||
| 498 | { | 499 | { |
| 499 | const u64 *array = event->sample.array; | 500 | const u64 *array = event->sample.array; |
| 501 | union u64_swap u; | ||
| 500 | 502 | ||
| 501 | array += ((event->header.size - | 503 | array += ((event->header.size - |
| 502 | sizeof(event->header)) / sizeof(u64)) - 1; | 504 | sizeof(event->header)) / sizeof(u64)) - 1; |
| 503 | 505 | ||
| 504 | if (type & PERF_SAMPLE_CPU) { | 506 | if (type & PERF_SAMPLE_CPU) { |
| 505 | u32 *p = (u32 *)array; | 507 | u.val64 = *array; |
| 506 | sample->cpu = *p; | 508 | if (swapped) { |
| 509 | /* undo swap of u64, then swap on individual u32s */ | ||
| 510 | u.val64 = bswap_64(u.val64); | ||
| 511 | u.val32[0] = bswap_32(u.val32[0]); | ||
| 512 | } | ||
| 513 | |||
| 514 | sample->cpu = u.val32[0]; | ||
| 507 | array--; | 515 | array--; |
| 508 | } | 516 | } |
| 509 | 517 | ||
| @@ -523,9 +531,16 @@ static int perf_event__parse_id_sample(const union perf_event *event, u64 type, | |||
| 523 | } | 531 | } |
| 524 | 532 | ||
| 525 | if (type & PERF_SAMPLE_TID) { | 533 | if (type & PERF_SAMPLE_TID) { |
| 526 | u32 *p = (u32 *)array; | 534 | u.val64 = *array; |
| 527 | sample->pid = p[0]; | 535 | if (swapped) { |
| 528 | sample->tid = p[1]; | 536 | /* undo swap of u64, then swap on individual u32s */ |
| 537 | u.val64 = bswap_64(u.val64); | ||
| 538 | u.val32[0] = bswap_32(u.val32[0]); | ||
| 539 | u.val32[1] = bswap_32(u.val32[1]); | ||
| 540 | } | ||
| 541 | |||
| 542 | sample->pid = u.val32[0]; | ||
| 543 | sample->tid = u.val32[1]; | ||
| 529 | } | 544 | } |
| 530 | 545 | ||
| 531 | return 0; | 546 | return 0; |
| @@ -562,7 +577,7 @@ int perf_event__parse_sample(const union perf_event *event, u64 type, | |||
| 562 | if (event->header.type != PERF_RECORD_SAMPLE) { | 577 | if (event->header.type != PERF_RECORD_SAMPLE) { |
| 563 | if (!sample_id_all) | 578 | if (!sample_id_all) |
| 564 | return 0; | 579 | return 0; |
| 565 | return perf_event__parse_id_sample(event, type, data); | 580 | return perf_event__parse_id_sample(event, type, data, swapped); |
| 566 | } | 581 | } |
| 567 | 582 | ||
| 568 | array = event->sample.array; | 583 | array = event->sample.array; |
diff --git a/tools/perf/util/hist.c b/tools/perf/util/hist.c index 1293b5ebea4d..514e2a4b367d 100644 --- a/tools/perf/util/hist.c +++ b/tools/perf/util/hist.c | |||
| @@ -378,7 +378,7 @@ void hist_entry__free(struct hist_entry *he) | |||
| 378 | * collapse the histogram | 378 | * collapse the histogram |
| 379 | */ | 379 | */ |
| 380 | 380 | ||
| 381 | static bool hists__collapse_insert_entry(struct hists *hists, | 381 | static bool hists__collapse_insert_entry(struct hists *hists __used, |
| 382 | struct rb_root *root, | 382 | struct rb_root *root, |
| 383 | struct hist_entry *he) | 383 | struct hist_entry *he) |
| 384 | { | 384 | { |
| @@ -397,8 +397,9 @@ static bool hists__collapse_insert_entry(struct hists *hists, | |||
| 397 | iter->period += he->period; | 397 | iter->period += he->period; |
| 398 | iter->nr_events += he->nr_events; | 398 | iter->nr_events += he->nr_events; |
| 399 | if (symbol_conf.use_callchain) { | 399 | if (symbol_conf.use_callchain) { |
| 400 | callchain_cursor_reset(&hists->callchain_cursor); | 400 | callchain_cursor_reset(&callchain_cursor); |
| 401 | callchain_merge(&hists->callchain_cursor, iter->callchain, | 401 | callchain_merge(&callchain_cursor, |
| 402 | iter->callchain, | ||
| 402 | he->callchain); | 403 | he->callchain); |
| 403 | } | 404 | } |
| 404 | hist_entry__free(he); | 405 | hist_entry__free(he); |
diff --git a/tools/perf/util/hist.h b/tools/perf/util/hist.h index cfc64e293f90..34bb556d6219 100644 --- a/tools/perf/util/hist.h +++ b/tools/perf/util/hist.h | |||
| @@ -67,8 +67,6 @@ struct hists { | |||
| 67 | struct events_stats stats; | 67 | struct events_stats stats; |
| 68 | u64 event_stream; | 68 | u64 event_stream; |
| 69 | u16 col_len[HISTC_NR_COLS]; | 69 | u16 col_len[HISTC_NR_COLS]; |
| 70 | /* Best would be to reuse the session callchain cursor */ | ||
| 71 | struct callchain_cursor callchain_cursor; | ||
| 72 | }; | 70 | }; |
| 73 | 71 | ||
| 74 | struct hist_entry *__hists__add_entry(struct hists *self, | 72 | struct hist_entry *__hists__add_entry(struct hists *self, |
diff --git a/tools/perf/util/pager.c b/tools/perf/util/pager.c index 1915de20dcac..3322b8446e89 100644 --- a/tools/perf/util/pager.c +++ b/tools/perf/util/pager.c | |||
| @@ -57,6 +57,10 @@ void setup_pager(void) | |||
| 57 | } | 57 | } |
| 58 | if (!pager) | 58 | if (!pager) |
| 59 | pager = getenv("PAGER"); | 59 | pager = getenv("PAGER"); |
| 60 | if (!pager) { | ||
| 61 | if (!access("/usr/bin/pager", X_OK)) | ||
| 62 | pager = "/usr/bin/pager"; | ||
| 63 | } | ||
| 60 | if (!pager) | 64 | if (!pager) |
| 61 | pager = "less"; | 65 | pager = "less"; |
| 62 | else if (!*pager || !strcmp(pager, "cat")) | 66 | else if (!*pager || !strcmp(pager, "cat")) |
diff --git a/tools/perf/util/probe-event.c b/tools/perf/util/probe-event.c index 59dccc98b554..0dda25d82d06 100644 --- a/tools/perf/util/probe-event.c +++ b/tools/perf/util/probe-event.c | |||
| @@ -2164,16 +2164,12 @@ int del_perf_probe_events(struct strlist *dellist) | |||
| 2164 | 2164 | ||
| 2165 | error: | 2165 | error: |
| 2166 | if (kfd >= 0) { | 2166 | if (kfd >= 0) { |
| 2167 | if (namelist) | 2167 | strlist__delete(namelist); |
| 2168 | strlist__delete(namelist); | ||
| 2169 | |||
| 2170 | close(kfd); | 2168 | close(kfd); |
| 2171 | } | 2169 | } |
| 2172 | 2170 | ||
| 2173 | if (ufd >= 0) { | 2171 | if (ufd >= 0) { |
| 2174 | if (unamelist) | 2172 | strlist__delete(unamelist); |
| 2175 | strlist__delete(unamelist); | ||
| 2176 | |||
| 2177 | close(ufd); | 2173 | close(ufd); |
| 2178 | } | 2174 | } |
| 2179 | 2175 | ||
diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c index 93d355d27109..2600916efa83 100644 --- a/tools/perf/util/session.c +++ b/tools/perf/util/session.c | |||
| @@ -288,7 +288,8 @@ struct branch_info *machine__resolve_bstack(struct machine *self, | |||
| 288 | return bi; | 288 | return bi; |
| 289 | } | 289 | } |
| 290 | 290 | ||
| 291 | int machine__resolve_callchain(struct machine *self, struct perf_evsel *evsel, | 291 | int machine__resolve_callchain(struct machine *self, |
| 292 | struct perf_evsel *evsel __used, | ||
| 292 | struct thread *thread, | 293 | struct thread *thread, |
| 293 | struct ip_callchain *chain, | 294 | struct ip_callchain *chain, |
| 294 | struct symbol **parent) | 295 | struct symbol **parent) |
| @@ -297,7 +298,12 @@ int machine__resolve_callchain(struct machine *self, struct perf_evsel *evsel, | |||
| 297 | unsigned int i; | 298 | unsigned int i; |
| 298 | int err; | 299 | int err; |
| 299 | 300 | ||
| 300 | callchain_cursor_reset(&evsel->hists.callchain_cursor); | 301 | callchain_cursor_reset(&callchain_cursor); |
| 302 | |||
| 303 | if (chain->nr > PERF_MAX_STACK_DEPTH) { | ||
| 304 | pr_warning("corrupted callchain. skipping...\n"); | ||
| 305 | return 0; | ||
| 306 | } | ||
| 301 | 307 | ||
| 302 | for (i = 0; i < chain->nr; i++) { | 308 | for (i = 0; i < chain->nr; i++) { |
| 303 | u64 ip; | 309 | u64 ip; |
| @@ -317,7 +323,14 @@ int machine__resolve_callchain(struct machine *self, struct perf_evsel *evsel, | |||
| 317 | case PERF_CONTEXT_USER: | 323 | case PERF_CONTEXT_USER: |
| 318 | cpumode = PERF_RECORD_MISC_USER; break; | 324 | cpumode = PERF_RECORD_MISC_USER; break; |
| 319 | default: | 325 | default: |
| 320 | break; | 326 | pr_debug("invalid callchain context: " |
| 327 | "%"PRId64"\n", (s64) ip); | ||
| 328 | /* | ||
| 329 | * It seems the callchain is corrupted. | ||
| 330 | * Discard all. | ||
| 331 | */ | ||
| 332 | callchain_cursor_reset(&callchain_cursor); | ||
| 333 | return 0; | ||
| 321 | } | 334 | } |
| 322 | continue; | 335 | continue; |
| 323 | } | 336 | } |
| @@ -333,7 +346,7 @@ int machine__resolve_callchain(struct machine *self, struct perf_evsel *evsel, | |||
| 333 | break; | 346 | break; |
| 334 | } | 347 | } |
| 335 | 348 | ||
| 336 | err = callchain_cursor_append(&evsel->hists.callchain_cursor, | 349 | err = callchain_cursor_append(&callchain_cursor, |
| 337 | ip, al.map, al.sym); | 350 | ip, al.map, al.sym); |
| 338 | if (err) | 351 | if (err) |
| 339 | return err; | 352 | return err; |
| @@ -441,37 +454,65 @@ void mem_bswap_64(void *src, int byte_size) | |||
| 441 | } | 454 | } |
| 442 | } | 455 | } |
| 443 | 456 | ||
| 444 | static void perf_event__all64_swap(union perf_event *event) | 457 | static void swap_sample_id_all(union perf_event *event, void *data) |
| 458 | { | ||
| 459 | void *end = (void *) event + event->header.size; | ||
| 460 | int size = end - data; | ||
| 461 | |||
| 462 | BUG_ON(size % sizeof(u64)); | ||
| 463 | mem_bswap_64(data, size); | ||
| 464 | } | ||
| 465 | |||
| 466 | static void perf_event__all64_swap(union perf_event *event, | ||
| 467 | bool sample_id_all __used) | ||
| 445 | { | 468 | { |
| 446 | struct perf_event_header *hdr = &event->header; | 469 | struct perf_event_header *hdr = &event->header; |
| 447 | mem_bswap_64(hdr + 1, event->header.size - sizeof(*hdr)); | 470 | mem_bswap_64(hdr + 1, event->header.size - sizeof(*hdr)); |
| 448 | } | 471 | } |
| 449 | 472 | ||
| 450 | static void perf_event__comm_swap(union perf_event *event) | 473 | static void perf_event__comm_swap(union perf_event *event, bool sample_id_all) |
| 451 | { | 474 | { |
| 452 | event->comm.pid = bswap_32(event->comm.pid); | 475 | event->comm.pid = bswap_32(event->comm.pid); |
| 453 | event->comm.tid = bswap_32(event->comm.tid); | 476 | event->comm.tid = bswap_32(event->comm.tid); |
| 477 | |||
| 478 | if (sample_id_all) { | ||
| 479 | void *data = &event->comm.comm; | ||
| 480 | |||
| 481 | data += ALIGN(strlen(data) + 1, sizeof(u64)); | ||
| 482 | swap_sample_id_all(event, data); | ||
| 483 | } | ||
| 454 | } | 484 | } |
| 455 | 485 | ||
| 456 | static void perf_event__mmap_swap(union perf_event *event) | 486 | static void perf_event__mmap_swap(union perf_event *event, |
| 487 | bool sample_id_all) | ||
| 457 | { | 488 | { |
| 458 | event->mmap.pid = bswap_32(event->mmap.pid); | 489 | event->mmap.pid = bswap_32(event->mmap.pid); |
| 459 | event->mmap.tid = bswap_32(event->mmap.tid); | 490 | event->mmap.tid = bswap_32(event->mmap.tid); |
| 460 | event->mmap.start = bswap_64(event->mmap.start); | 491 | event->mmap.start = bswap_64(event->mmap.start); |
| 461 | event->mmap.len = bswap_64(event->mmap.len); | 492 | event->mmap.len = bswap_64(event->mmap.len); |
| 462 | event->mmap.pgoff = bswap_64(event->mmap.pgoff); | 493 | event->mmap.pgoff = bswap_64(event->mmap.pgoff); |
| 494 | |||
| 495 | if (sample_id_all) { | ||
| 496 | void *data = &event->mmap.filename; | ||
| 497 | |||
| 498 | data += ALIGN(strlen(data) + 1, sizeof(u64)); | ||
| 499 | swap_sample_id_all(event, data); | ||
| 500 | } | ||
| 463 | } | 501 | } |
| 464 | 502 | ||
| 465 | static void perf_event__task_swap(union perf_event *event) | 503 | static void perf_event__task_swap(union perf_event *event, bool sample_id_all) |
| 466 | { | 504 | { |
| 467 | event->fork.pid = bswap_32(event->fork.pid); | 505 | event->fork.pid = bswap_32(event->fork.pid); |
| 468 | event->fork.tid = bswap_32(event->fork.tid); | 506 | event->fork.tid = bswap_32(event->fork.tid); |
| 469 | event->fork.ppid = bswap_32(event->fork.ppid); | 507 | event->fork.ppid = bswap_32(event->fork.ppid); |
| 470 | event->fork.ptid = bswap_32(event->fork.ptid); | 508 | event->fork.ptid = bswap_32(event->fork.ptid); |
| 471 | event->fork.time = bswap_64(event->fork.time); | 509 | event->fork.time = bswap_64(event->fork.time); |
| 510 | |||
| 511 | if (sample_id_all) | ||
| 512 | swap_sample_id_all(event, &event->fork + 1); | ||
| 472 | } | 513 | } |
| 473 | 514 | ||
| 474 | static void perf_event__read_swap(union perf_event *event) | 515 | static void perf_event__read_swap(union perf_event *event, bool sample_id_all) |
| 475 | { | 516 | { |
| 476 | event->read.pid = bswap_32(event->read.pid); | 517 | event->read.pid = bswap_32(event->read.pid); |
| 477 | event->read.tid = bswap_32(event->read.tid); | 518 | event->read.tid = bswap_32(event->read.tid); |
| @@ -479,6 +520,9 @@ static void perf_event__read_swap(union perf_event *event) | |||
| 479 | event->read.time_enabled = bswap_64(event->read.time_enabled); | 520 | event->read.time_enabled = bswap_64(event->read.time_enabled); |
| 480 | event->read.time_running = bswap_64(event->read.time_running); | 521 | event->read.time_running = bswap_64(event->read.time_running); |
| 481 | event->read.id = bswap_64(event->read.id); | 522 | event->read.id = bswap_64(event->read.id); |
| 523 | |||
| 524 | if (sample_id_all) | ||
| 525 | swap_sample_id_all(event, &event->read + 1); | ||
| 482 | } | 526 | } |
| 483 | 527 | ||
| 484 | static u8 revbyte(u8 b) | 528 | static u8 revbyte(u8 b) |
| @@ -530,7 +574,8 @@ void perf_event__attr_swap(struct perf_event_attr *attr) | |||
| 530 | swap_bitfield((u8 *) (&attr->read_format + 1), sizeof(u64)); | 574 | swap_bitfield((u8 *) (&attr->read_format + 1), sizeof(u64)); |
| 531 | } | 575 | } |
| 532 | 576 | ||
| 533 | static void perf_event__hdr_attr_swap(union perf_event *event) | 577 | static void perf_event__hdr_attr_swap(union perf_event *event, |
| 578 | bool sample_id_all __used) | ||
| 534 | { | 579 | { |
| 535 | size_t size; | 580 | size_t size; |
| 536 | 581 | ||
| @@ -541,18 +586,21 @@ static void perf_event__hdr_attr_swap(union perf_event *event) | |||
| 541 | mem_bswap_64(event->attr.id, size); | 586 | mem_bswap_64(event->attr.id, size); |
| 542 | } | 587 | } |
| 543 | 588 | ||
| 544 | static void perf_event__event_type_swap(union perf_event *event) | 589 | static void perf_event__event_type_swap(union perf_event *event, |
| 590 | bool sample_id_all __used) | ||
| 545 | { | 591 | { |
| 546 | event->event_type.event_type.event_id = | 592 | event->event_type.event_type.event_id = |
| 547 | bswap_64(event->event_type.event_type.event_id); | 593 | bswap_64(event->event_type.event_type.event_id); |
| 548 | } | 594 | } |
| 549 | 595 | ||
| 550 | static void perf_event__tracing_data_swap(union perf_event *event) | 596 | static void perf_event__tracing_data_swap(union perf_event *event, |
| 597 | bool sample_id_all __used) | ||
| 551 | { | 598 | { |
| 552 | event->tracing_data.size = bswap_32(event->tracing_data.size); | 599 | event->tracing_data.size = bswap_32(event->tracing_data.size); |
| 553 | } | 600 | } |
| 554 | 601 | ||
| 555 | typedef void (*perf_event__swap_op)(union perf_event *event); | 602 | typedef void (*perf_event__swap_op)(union perf_event *event, |
| 603 | bool sample_id_all); | ||
| 556 | 604 | ||
| 557 | static perf_event__swap_op perf_event__swap_ops[] = { | 605 | static perf_event__swap_op perf_event__swap_ops[] = { |
| 558 | [PERF_RECORD_MMAP] = perf_event__mmap_swap, | 606 | [PERF_RECORD_MMAP] = perf_event__mmap_swap, |
| @@ -986,6 +1034,15 @@ static int perf_session__process_user_event(struct perf_session *session, union | |||
| 986 | } | 1034 | } |
| 987 | } | 1035 | } |
| 988 | 1036 | ||
| 1037 | static void event_swap(union perf_event *event, bool sample_id_all) | ||
| 1038 | { | ||
| 1039 | perf_event__swap_op swap; | ||
| 1040 | |||
| 1041 | swap = perf_event__swap_ops[event->header.type]; | ||
| 1042 | if (swap) | ||
| 1043 | swap(event, sample_id_all); | ||
| 1044 | } | ||
| 1045 | |||
| 989 | static int perf_session__process_event(struct perf_session *session, | 1046 | static int perf_session__process_event(struct perf_session *session, |
| 990 | union perf_event *event, | 1047 | union perf_event *event, |
| 991 | struct perf_tool *tool, | 1048 | struct perf_tool *tool, |
| @@ -994,9 +1051,8 @@ static int perf_session__process_event(struct perf_session *session, | |||
| 994 | struct perf_sample sample; | 1051 | struct perf_sample sample; |
| 995 | int ret; | 1052 | int ret; |
| 996 | 1053 | ||
| 997 | if (session->header.needs_swap && | 1054 | if (session->header.needs_swap) |
| 998 | perf_event__swap_ops[event->header.type]) | 1055 | event_swap(event, session->sample_id_all); |
| 999 | perf_event__swap_ops[event->header.type](event); | ||
| 1000 | 1056 | ||
| 1001 | if (event->header.type >= PERF_RECORD_HEADER_MAX) | 1057 | if (event->header.type >= PERF_RECORD_HEADER_MAX) |
| 1002 | return -EINVAL; | 1058 | return -EINVAL; |
| @@ -1428,7 +1484,6 @@ void perf_event__print_ip(union perf_event *event, struct perf_sample *sample, | |||
| 1428 | int print_sym, int print_dso, int print_symoffset) | 1484 | int print_sym, int print_dso, int print_symoffset) |
| 1429 | { | 1485 | { |
| 1430 | struct addr_location al; | 1486 | struct addr_location al; |
| 1431 | struct callchain_cursor *cursor = &evsel->hists.callchain_cursor; | ||
| 1432 | struct callchain_cursor_node *node; | 1487 | struct callchain_cursor_node *node; |
| 1433 | 1488 | ||
| 1434 | if (perf_event__preprocess_sample(event, machine, &al, sample, | 1489 | if (perf_event__preprocess_sample(event, machine, &al, sample, |
| @@ -1446,10 +1501,10 @@ void perf_event__print_ip(union perf_event *event, struct perf_sample *sample, | |||
| 1446 | error("Failed to resolve callchain. Skipping\n"); | 1501 | error("Failed to resolve callchain. Skipping\n"); |
| 1447 | return; | 1502 | return; |
| 1448 | } | 1503 | } |
| 1449 | callchain_cursor_commit(cursor); | 1504 | callchain_cursor_commit(&callchain_cursor); |
| 1450 | 1505 | ||
| 1451 | while (1) { | 1506 | while (1) { |
| 1452 | node = callchain_cursor_current(cursor); | 1507 | node = callchain_cursor_current(&callchain_cursor); |
| 1453 | if (!node) | 1508 | if (!node) |
| 1454 | break; | 1509 | break; |
| 1455 | 1510 | ||
| @@ -1460,12 +1515,12 @@ void perf_event__print_ip(union perf_event *event, struct perf_sample *sample, | |||
| 1460 | } | 1515 | } |
| 1461 | if (print_dso) { | 1516 | if (print_dso) { |
| 1462 | printf(" ("); | 1517 | printf(" ("); |
| 1463 | map__fprintf_dsoname(al.map, stdout); | 1518 | map__fprintf_dsoname(node->map, stdout); |
| 1464 | printf(")"); | 1519 | printf(")"); |
| 1465 | } | 1520 | } |
| 1466 | printf("\n"); | 1521 | printf("\n"); |
| 1467 | 1522 | ||
| 1468 | callchain_cursor_advance(cursor); | 1523 | callchain_cursor_advance(&callchain_cursor); |
| 1469 | } | 1524 | } |
| 1470 | 1525 | ||
| 1471 | } else { | 1526 | } else { |
diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c index e2ba8858f3e1..3e2e5ea0f03f 100644 --- a/tools/perf/util/symbol.c +++ b/tools/perf/util/symbol.c | |||
| @@ -323,6 +323,7 @@ struct dso *dso__new(const char *name) | |||
| 323 | dso->sorted_by_name = 0; | 323 | dso->sorted_by_name = 0; |
| 324 | dso->has_build_id = 0; | 324 | dso->has_build_id = 0; |
| 325 | dso->kernel = DSO_TYPE_USER; | 325 | dso->kernel = DSO_TYPE_USER; |
| 326 | dso->needs_swap = DSO_SWAP__UNSET; | ||
| 326 | INIT_LIST_HEAD(&dso->node); | 327 | INIT_LIST_HEAD(&dso->node); |
| 327 | } | 328 | } |
| 328 | 329 | ||
| @@ -1156,6 +1157,33 @@ static size_t elf_addr_to_index(Elf *elf, GElf_Addr addr) | |||
| 1156 | return -1; | 1157 | return -1; |
| 1157 | } | 1158 | } |
| 1158 | 1159 | ||
| 1160 | static int dso__swap_init(struct dso *dso, unsigned char eidata) | ||
| 1161 | { | ||
| 1162 | static unsigned int const endian = 1; | ||
| 1163 | |||
| 1164 | dso->needs_swap = DSO_SWAP__NO; | ||
| 1165 | |||
| 1166 | switch (eidata) { | ||
| 1167 | case ELFDATA2LSB: | ||
| 1168 | /* We are big endian, DSO is little endian. */ | ||
| 1169 | if (*(unsigned char const *)&endian != 1) | ||
| 1170 | dso->needs_swap = DSO_SWAP__YES; | ||
| 1171 | break; | ||
| 1172 | |||
| 1173 | case ELFDATA2MSB: | ||
| 1174 | /* We are little endian, DSO is big endian. */ | ||
| 1175 | if (*(unsigned char const *)&endian != 0) | ||
| 1176 | dso->needs_swap = DSO_SWAP__YES; | ||
| 1177 | break; | ||
| 1178 | |||
| 1179 | default: | ||
| 1180 | pr_err("unrecognized DSO data encoding %d\n", eidata); | ||
| 1181 | return -EINVAL; | ||
| 1182 | } | ||
| 1183 | |||
| 1184 | return 0; | ||
| 1185 | } | ||
| 1186 | |||
| 1159 | static int dso__load_sym(struct dso *dso, struct map *map, const char *name, | 1187 | static int dso__load_sym(struct dso *dso, struct map *map, const char *name, |
| 1160 | int fd, symbol_filter_t filter, int kmodule, | 1188 | int fd, symbol_filter_t filter, int kmodule, |
| 1161 | int want_symtab) | 1189 | int want_symtab) |
| @@ -1187,6 +1215,9 @@ static int dso__load_sym(struct dso *dso, struct map *map, const char *name, | |||
| 1187 | goto out_elf_end; | 1215 | goto out_elf_end; |
| 1188 | } | 1216 | } |
| 1189 | 1217 | ||
| 1218 | if (dso__swap_init(dso, ehdr.e_ident[EI_DATA])) | ||
| 1219 | goto out_elf_end; | ||
| 1220 | |||
| 1190 | /* Always reject images with a mismatched build-id: */ | 1221 | /* Always reject images with a mismatched build-id: */ |
| 1191 | if (dso->has_build_id) { | 1222 | if (dso->has_build_id) { |
| 1192 | u8 build_id[BUILD_ID_SIZE]; | 1223 | u8 build_id[BUILD_ID_SIZE]; |
| @@ -1272,7 +1303,7 @@ static int dso__load_sym(struct dso *dso, struct map *map, const char *name, | |||
| 1272 | if (opdsec && sym.st_shndx == opdidx) { | 1303 | if (opdsec && sym.st_shndx == opdidx) { |
| 1273 | u32 offset = sym.st_value - opdshdr.sh_addr; | 1304 | u32 offset = sym.st_value - opdshdr.sh_addr; |
| 1274 | u64 *opd = opddata->d_buf + offset; | 1305 | u64 *opd = opddata->d_buf + offset; |
| 1275 | sym.st_value = *opd; | 1306 | sym.st_value = DSO__SWAP(dso, u64, *opd); |
| 1276 | sym.st_shndx = elf_addr_to_index(elf, sym.st_value); | 1307 | sym.st_shndx = elf_addr_to_index(elf, sym.st_value); |
| 1277 | } | 1308 | } |
| 1278 | 1309 | ||
| @@ -2786,8 +2817,11 @@ int machine__load_vmlinux_path(struct machine *machine, enum map_type type, | |||
| 2786 | 2817 | ||
| 2787 | struct map *dso__new_map(const char *name) | 2818 | struct map *dso__new_map(const char *name) |
| 2788 | { | 2819 | { |
| 2820 | struct map *map = NULL; | ||
| 2789 | struct dso *dso = dso__new(name); | 2821 | struct dso *dso = dso__new(name); |
| 2790 | struct map *map = map__new2(0, dso, MAP__FUNCTION); | 2822 | |
| 2823 | if (dso) | ||
| 2824 | map = map__new2(0, dso, MAP__FUNCTION); | ||
| 2791 | 2825 | ||
| 2792 | return map; | 2826 | return map; |
| 2793 | } | 2827 | } |
diff --git a/tools/perf/util/symbol.h b/tools/perf/util/symbol.h index 5649d63798cb..af0752b1aca1 100644 --- a/tools/perf/util/symbol.h +++ b/tools/perf/util/symbol.h | |||
| @@ -9,6 +9,7 @@ | |||
| 9 | #include <linux/list.h> | 9 | #include <linux/list.h> |
| 10 | #include <linux/rbtree.h> | 10 | #include <linux/rbtree.h> |
| 11 | #include <stdio.h> | 11 | #include <stdio.h> |
| 12 | #include <byteswap.h> | ||
| 12 | 13 | ||
| 13 | #ifdef HAVE_CPLUS_DEMANGLE | 14 | #ifdef HAVE_CPLUS_DEMANGLE |
| 14 | extern char *cplus_demangle(const char *, int); | 15 | extern char *cplus_demangle(const char *, int); |
| @@ -160,11 +161,18 @@ enum dso_kernel_type { | |||
| 160 | DSO_TYPE_GUEST_KERNEL | 161 | DSO_TYPE_GUEST_KERNEL |
| 161 | }; | 162 | }; |
| 162 | 163 | ||
| 164 | enum dso_swap_type { | ||
| 165 | DSO_SWAP__UNSET, | ||
| 166 | DSO_SWAP__NO, | ||
| 167 | DSO_SWAP__YES, | ||
| 168 | }; | ||
| 169 | |||
| 163 | struct dso { | 170 | struct dso { |
| 164 | struct list_head node; | 171 | struct list_head node; |
| 165 | struct rb_root symbols[MAP__NR_TYPES]; | 172 | struct rb_root symbols[MAP__NR_TYPES]; |
| 166 | struct rb_root symbol_names[MAP__NR_TYPES]; | 173 | struct rb_root symbol_names[MAP__NR_TYPES]; |
| 167 | enum dso_kernel_type kernel; | 174 | enum dso_kernel_type kernel; |
| 175 | enum dso_swap_type needs_swap; | ||
| 168 | u8 adjust_symbols:1; | 176 | u8 adjust_symbols:1; |
| 169 | u8 has_build_id:1; | 177 | u8 has_build_id:1; |
| 170 | u8 hit:1; | 178 | u8 hit:1; |
| @@ -182,6 +190,28 @@ struct dso { | |||
| 182 | char name[0]; | 190 | char name[0]; |
| 183 | }; | 191 | }; |
| 184 | 192 | ||
| 193 | #define DSO__SWAP(dso, type, val) \ | ||
| 194 | ({ \ | ||
| 195 | type ____r = val; \ | ||
| 196 | BUG_ON(dso->needs_swap == DSO_SWAP__UNSET); \ | ||
| 197 | if (dso->needs_swap == DSO_SWAP__YES) { \ | ||
| 198 | switch (sizeof(____r)) { \ | ||
| 199 | case 2: \ | ||
| 200 | ____r = bswap_16(val); \ | ||
| 201 | break; \ | ||
| 202 | case 4: \ | ||
| 203 | ____r = bswap_32(val); \ | ||
| 204 | break; \ | ||
| 205 | case 8: \ | ||
| 206 | ____r = bswap_64(val); \ | ||
| 207 | break; \ | ||
| 208 | default: \ | ||
| 209 | BUG_ON(1); \ | ||
| 210 | } \ | ||
| 211 | } \ | ||
| 212 | ____r; \ | ||
| 213 | }) | ||
| 214 | |||
| 185 | struct dso *dso__new(const char *name); | 215 | struct dso *dso__new(const char *name); |
| 186 | void dso__delete(struct dso *dso); | 216 | void dso__delete(struct dso *dso); |
| 187 | 217 | ||
diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c index ab2f682fd44c..16de7ad4850f 100644 --- a/tools/power/x86/turbostat/turbostat.c +++ b/tools/power/x86/turbostat/turbostat.c | |||
| @@ -73,8 +73,8 @@ int backwards_count; | |||
| 73 | char *progname; | 73 | char *progname; |
| 74 | 74 | ||
| 75 | int num_cpus; | 75 | int num_cpus; |
| 76 | cpu_set_t *cpu_mask; | 76 | cpu_set_t *cpu_present_set, *cpu_mask; |
| 77 | size_t cpu_mask_size; | 77 | size_t cpu_present_setsize, cpu_mask_size; |
| 78 | 78 | ||
| 79 | struct counters { | 79 | struct counters { |
| 80 | unsigned long long tsc; /* per thread */ | 80 | unsigned long long tsc; /* per thread */ |
| @@ -103,6 +103,12 @@ struct timeval tv_even; | |||
| 103 | struct timeval tv_odd; | 103 | struct timeval tv_odd; |
| 104 | struct timeval tv_delta; | 104 | struct timeval tv_delta; |
| 105 | 105 | ||
| 106 | int mark_cpu_present(int pkg, int core, int cpu) | ||
| 107 | { | ||
| 108 | CPU_SET_S(cpu, cpu_present_setsize, cpu_present_set); | ||
| 109 | return 0; | ||
| 110 | } | ||
| 111 | |||
| 106 | /* | 112 | /* |
| 107 | * cpu_mask_init(ncpus) | 113 | * cpu_mask_init(ncpus) |
| 108 | * | 114 | * |
| @@ -118,6 +124,18 @@ void cpu_mask_init(int ncpus) | |||
| 118 | } | 124 | } |
| 119 | cpu_mask_size = CPU_ALLOC_SIZE(ncpus); | 125 | cpu_mask_size = CPU_ALLOC_SIZE(ncpus); |
| 120 | CPU_ZERO_S(cpu_mask_size, cpu_mask); | 126 | CPU_ZERO_S(cpu_mask_size, cpu_mask); |
| 127 | |||
| 128 | /* | ||
| 129 | * Allocate and initialize cpu_present_set | ||
| 130 | */ | ||
| 131 | cpu_present_set = CPU_ALLOC(ncpus); | ||
| 132 | if (cpu_present_set == NULL) { | ||
| 133 | perror("CPU_ALLOC"); | ||
| 134 | exit(3); | ||
| 135 | } | ||
| 136 | cpu_present_setsize = CPU_ALLOC_SIZE(ncpus); | ||
| 137 | CPU_ZERO_S(cpu_present_setsize, cpu_present_set); | ||
| 138 | for_all_cpus(mark_cpu_present); | ||
| 121 | } | 139 | } |
| 122 | 140 | ||
| 123 | void cpu_mask_uninit() | 141 | void cpu_mask_uninit() |
| @@ -125,6 +143,9 @@ void cpu_mask_uninit() | |||
| 125 | CPU_FREE(cpu_mask); | 143 | CPU_FREE(cpu_mask); |
| 126 | cpu_mask = NULL; | 144 | cpu_mask = NULL; |
| 127 | cpu_mask_size = 0; | 145 | cpu_mask_size = 0; |
| 146 | CPU_FREE(cpu_present_set); | ||
| 147 | cpu_present_set = NULL; | ||
| 148 | cpu_present_setsize = 0; | ||
| 128 | } | 149 | } |
| 129 | 150 | ||
| 130 | int cpu_migrate(int cpu) | 151 | int cpu_migrate(int cpu) |
| @@ -912,6 +933,8 @@ int is_snb(unsigned int family, unsigned int model) | |||
| 912 | switch (model) { | 933 | switch (model) { |
| 913 | case 0x2A: | 934 | case 0x2A: |
| 914 | case 0x2D: | 935 | case 0x2D: |
| 936 | case 0x3A: /* IVB */ | ||
| 937 | case 0x3D: /* IVB Xeon */ | ||
| 915 | return 1; | 938 | return 1; |
| 916 | } | 939 | } |
| 917 | return 0; | 940 | return 0; |
| @@ -1047,6 +1070,9 @@ int fork_it(char **argv) | |||
| 1047 | int retval; | 1070 | int retval; |
| 1048 | pid_t child_pid; | 1071 | pid_t child_pid; |
| 1049 | get_counters(cnt_even); | 1072 | get_counters(cnt_even); |
| 1073 | |||
| 1074 | /* clear affinity side-effect of get_counters() */ | ||
| 1075 | sched_setaffinity(0, cpu_present_setsize, cpu_present_set); | ||
| 1050 | gettimeofday(&tv_even, (struct timezone *)NULL); | 1076 | gettimeofday(&tv_even, (struct timezone *)NULL); |
| 1051 | 1077 | ||
| 1052 | child_pid = fork(); | 1078 | child_pid = fork(); |
diff --git a/virt/kvm/irq_comm.c b/virt/kvm/irq_comm.c index a6a0365475ed..5afb43114020 100644 --- a/virt/kvm/irq_comm.c +++ b/virt/kvm/irq_comm.c | |||
| @@ -332,6 +332,7 @@ static int setup_routing_entry(struct kvm_irq_routing_table *rt, | |||
| 332 | */ | 332 | */ |
| 333 | hlist_for_each_entry(ei, n, &rt->map[ue->gsi], link) | 333 | hlist_for_each_entry(ei, n, &rt->map[ue->gsi], link) |
| 334 | if (ei->type == KVM_IRQ_ROUTING_MSI || | 334 | if (ei->type == KVM_IRQ_ROUTING_MSI || |
| 335 | ue->type == KVM_IRQ_ROUTING_MSI || | ||
| 335 | ue->u.irqchip.irqchip == ei->irqchip.irqchip) | 336 | ue->u.irqchip.irqchip == ei->irqchip.irqchip) |
| 336 | return r; | 337 | return r; |
| 337 | 338 | ||
