diff options
author | Greg Kroah-Hartman <gregkh@linuxfoundation.org> | 2012-06-25 12:31:00 -0400 |
---|---|---|
committer | Greg Kroah-Hartman <gregkh@linuxfoundation.org> | 2012-06-25 12:31:00 -0400 |
commit | bcc66c0b8881f88459f9ac21038455bcafacdc6e (patch) | |
tree | b402e677253c3fc1038ca4a52fc54fc223261133 | |
parent | 1c1b86215730ef07d8851c2247b9ecf73038d05d (diff) | |
parent | 6b16351acbd415e66ba16bf7d473ece1574cf0bc (diff) |
Merge 3.5-rc4 into staging-next
This picks up the staging changes made in 3.5-rc4 so that everyone can sync up
properly.
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
787 files changed, 10328 insertions, 4477 deletions
@@ -111,6 +111,7 @@ Uwe Kleine-König <ukleinek@informatik.uni-freiburg.de> | |||
111 | Uwe Kleine-König <ukl@pengutronix.de> | 111 | Uwe Kleine-König <ukl@pengutronix.de> |
112 | Uwe Kleine-König <Uwe.Kleine-Koenig@digi.com> | 112 | Uwe Kleine-König <Uwe.Kleine-Koenig@digi.com> |
113 | Valdis Kletnieks <Valdis.Kletnieks@vt.edu> | 113 | Valdis Kletnieks <Valdis.Kletnieks@vt.edu> |
114 | Viresh Kumar <viresh.linux@gmail.com> <viresh.kumar@st.com> | ||
114 | Takashi YOSHII <takashi.yoshii.zj@renesas.com> | 115 | Takashi YOSHII <takashi.yoshii.zj@renesas.com> |
115 | Yusuke Goda <goda.yusuke@renesas.com> | 116 | Yusuke Goda <goda.yusuke@renesas.com> |
116 | Gustavo Padovan <gustavo@las.ic.unicamp.br> | 117 | Gustavo Padovan <gustavo@las.ic.unicamp.br> |
diff --git a/Documentation/arm/SPEAr/overview.txt b/Documentation/arm/SPEAr/overview.txt index 57aae7765c74..65610bf52ebf 100644 --- a/Documentation/arm/SPEAr/overview.txt +++ b/Documentation/arm/SPEAr/overview.txt | |||
@@ -60,4 +60,4 @@ Introduction | |||
60 | Document Author | 60 | Document Author |
61 | --------------- | 61 | --------------- |
62 | 62 | ||
63 | Viresh Kumar <viresh.kumar@st.com>, (c) 2010-2012 ST Microelectronics | 63 | Viresh Kumar <viresh.linux@gmail.com>, (c) 2010-2012 ST Microelectronics |
diff --git a/Documentation/devicetree/bindings/i2c/i2c-mux-pinctrl.txt b/Documentation/devicetree/bindings/i2c/i2c-mux-pinctrl.txt new file mode 100644 index 000000000000..ae8af1694e95 --- /dev/null +++ b/Documentation/devicetree/bindings/i2c/i2c-mux-pinctrl.txt | |||
@@ -0,0 +1,93 @@ | |||
1 | Pinctrl-based I2C Bus Mux | ||
2 | |||
3 | This binding describes an I2C bus multiplexer that uses pin multiplexing to | ||
4 | route the I2C signals, and represents the pin multiplexing configuration | ||
5 | using the pinctrl device tree bindings. | ||
6 | |||
7 | +-----+ +-----+ | ||
8 | | dev | | dev | | ||
9 | +------------------------+ +-----+ +-----+ | ||
10 | | SoC | | | | ||
11 | | /----|------+--------+ | ||
12 | | +---+ +------+ | child bus A, on first set of pins | ||
13 | | |I2C|---|Pinmux| | | ||
14 | | +---+ +------+ | child bus B, on second set of pins | ||
15 | | \----|------+--------+--------+ | ||
16 | | | | | | | ||
17 | +------------------------+ +-----+ +-----+ +-----+ | ||
18 | | dev | | dev | | dev | | ||
19 | +-----+ +-----+ +-----+ | ||
20 | |||
21 | Required properties: | ||
22 | - compatible: i2c-mux-pinctrl | ||
23 | - i2c-parent: The phandle of the I2C bus that this multiplexer's master-side | ||
24 | port is connected to. | ||
25 | |||
26 | Also required are: | ||
27 | |||
28 | * Standard pinctrl properties that specify the pin mux state for each child | ||
29 | bus. See ../pinctrl/pinctrl-bindings.txt. | ||
30 | |||
31 | * Standard I2C mux properties. See mux.txt in this directory. | ||
32 | |||
33 | * I2C child bus nodes. See mux.txt in this directory. | ||
34 | |||
35 | For each named state defined in the pinctrl-names property, an I2C child bus | ||
36 | will be created. I2C child bus numbers are assigned based on the index into | ||
37 | the pinctrl-names property. | ||
38 | |||
39 | The only exception is that no bus will be created for a state named "idle". If | ||
40 | such a state is defined, it must be the last entry in pinctrl-names. For | ||
41 | example: | ||
42 | |||
43 | pinctrl-names = "ddc", "pta", "idle" -> ddc = bus 0, pta = bus 1 | ||
44 | pinctrl-names = "ddc", "idle", "pta" -> Invalid ("idle" not last) | ||
45 | pinctrl-names = "idle", "ddc", "pta" -> Invalid ("idle" not last) | ||
46 | |||
47 | Whenever an access is made to a device on a child bus, the relevant pinctrl | ||
48 | state will be programmed into hardware. | ||
49 | |||
50 | If an idle state is defined, whenever an access is not being made to a device | ||
51 | on a child bus, the idle pinctrl state will be programmed into hardware. | ||
52 | |||
53 | If an idle state is not defined, the most recently used pinctrl state will be | ||
54 | left programmed into hardware whenever no access is being made of a device on | ||
55 | a child bus. | ||
56 | |||
57 | Example: | ||
58 | |||
59 | i2cmux { | ||
60 | compatible = "i2c-mux-pinctrl"; | ||
61 | #address-cells = <1>; | ||
62 | #size-cells = <0>; | ||
63 | |||
64 | i2c-parent = <&i2c1>; | ||
65 | |||
66 | pinctrl-names = "ddc", "pta", "idle"; | ||
67 | pinctrl-0 = <&state_i2cmux_ddc>; | ||
68 | pinctrl-1 = <&state_i2cmux_pta>; | ||
69 | pinctrl-2 = <&state_i2cmux_idle>; | ||
70 | |||
71 | i2c@0 { | ||
72 | reg = <0>; | ||
73 | #address-cells = <1>; | ||
74 | #size-cells = <0>; | ||
75 | |||
76 | eeprom { | ||
77 | compatible = "eeprom"; | ||
78 | reg = <0x50>; | ||
79 | }; | ||
80 | }; | ||
81 | |||
82 | i2c@1 { | ||
83 | reg = <1>; | ||
84 | #address-cells = <1>; | ||
85 | #size-cells = <0>; | ||
86 | |||
87 | eeprom { | ||
88 | compatible = "eeprom"; | ||
89 | reg = <0x50>; | ||
90 | }; | ||
91 | }; | ||
92 | }; | ||
93 | |||
diff --git a/Documentation/hwmon/coretemp b/Documentation/hwmon/coretemp index 84d46c0c71a3..c86b50c03ea8 100644 --- a/Documentation/hwmon/coretemp +++ b/Documentation/hwmon/coretemp | |||
@@ -6,7 +6,9 @@ Supported chips: | |||
6 | Prefix: 'coretemp' | 6 | Prefix: 'coretemp' |
7 | CPUID: family 0x6, models 0xe (Pentium M DC), 0xf (Core 2 DC 65nm), | 7 | CPUID: family 0x6, models 0xe (Pentium M DC), 0xf (Core 2 DC 65nm), |
8 | 0x16 (Core 2 SC 65nm), 0x17 (Penryn 45nm), | 8 | 0x16 (Core 2 SC 65nm), 0x17 (Penryn 45nm), |
9 | 0x1a (Nehalem), 0x1c (Atom), 0x1e (Lynnfield) | 9 | 0x1a (Nehalem), 0x1c (Atom), 0x1e (Lynnfield), |
10 | 0x26 (Tunnel Creek Atom), 0x27 (Medfield Atom), | ||
11 | 0x36 (Cedar Trail Atom) | ||
10 | Datasheet: Intel 64 and IA-32 Architectures Software Developer's Manual | 12 | Datasheet: Intel 64 and IA-32 Architectures Software Developer's Manual |
11 | Volume 3A: System Programming Guide | 13 | Volume 3A: System Programming Guide |
12 | http://softwarecommunity.intel.com/Wiki/Mobility/720.htm | 14 | http://softwarecommunity.intel.com/Wiki/Mobility/720.htm |
@@ -52,6 +54,17 @@ Some information comes from ark.intel.com | |||
52 | 54 | ||
53 | Process Processor TjMax(C) | 55 | Process Processor TjMax(C) |
54 | 56 | ||
57 | 22nm Core i5/i7 Processors | ||
58 | i7 3920XM, 3820QM, 3720QM, 3667U, 3520M 105 | ||
59 | i5 3427U, 3360M/3320M 105 | ||
60 | i7 3770/3770K 105 | ||
61 | i5 3570/3570K, 3550, 3470/3450 105 | ||
62 | i7 3770S 103 | ||
63 | i5 3570S/3550S, 3475S/3470S/3450S 103 | ||
64 | i7 3770T 94 | ||
65 | i5 3570T 94 | ||
66 | i5 3470T 91 | ||
67 | |||
55 | 32nm Core i3/i5/i7 Processors | 68 | 32nm Core i3/i5/i7 Processors |
56 | i7 660UM/640/620, 640LM/620, 620M, 610E 105 | 69 | i7 660UM/640/620, 640LM/620, 620M, 610E 105 |
57 | i5 540UM/520/430, 540M/520/450/430 105 | 70 | i5 540UM/520/430, 540M/520/450/430 105 |
@@ -65,6 +78,11 @@ Process Processor TjMax(C) | |||
65 | U3400 105 | 78 | U3400 105 |
66 | P4505/P4500 90 | 79 | P4505/P4500 90 |
67 | 80 | ||
81 | 32nm Atom Processors | ||
82 | Z2460 90 | ||
83 | D2700/2550/2500 100 | ||
84 | N2850/2800/2650/2600 100 | ||
85 | |||
68 | 45nm Xeon Processors 5400 Quad-Core | 86 | 45nm Xeon Processors 5400 Quad-Core |
69 | X5492, X5482, X5472, X5470, X5460, X5450 85 | 87 | X5492, X5482, X5472, X5470, X5460, X5450 85 |
70 | E5472, E5462, E5450/40/30/20/10/05 85 | 88 | E5472, E5462, E5450/40/30/20/10/05 85 |
@@ -85,6 +103,8 @@ Process Processor TjMax(C) | |||
85 | N475/470/455/450 100 | 103 | N475/470/455/450 100 |
86 | N280/270 90 | 104 | N280/270 90 |
87 | 330/230 125 | 105 | 330/230 125 |
106 | E680/660/640/620 90 | ||
107 | E680T/660T/640T/620T 110 | ||
88 | 108 | ||
89 | 45nm Core2 Processors | 109 | 45nm Core2 Processors |
90 | Solo ULV SU3500/3300 100 | 110 | Solo ULV SU3500/3300 100 |
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt index c45513d806ab..a92c5ebf373e 100644 --- a/Documentation/kernel-parameters.txt +++ b/Documentation/kernel-parameters.txt | |||
@@ -2543,6 +2543,15 @@ bytes respectively. Such letter suffixes can also be entirely omitted. | |||
2543 | 2543 | ||
2544 | sched_debug [KNL] Enables verbose scheduler debug messages. | 2544 | sched_debug [KNL] Enables verbose scheduler debug messages. |
2545 | 2545 | ||
2546 | skew_tick= [KNL] Offset the periodic timer tick per cpu to mitigate | ||
2547 | xtime_lock contention on larger systems, and/or RCU lock | ||
2548 | contention on all systems with CONFIG_MAXSMP set. | ||
2549 | Format: { "0" | "1" } | ||
2550 | 0 -- disable. (may be 1 via CONFIG_CMDLINE="skew_tick=1" | ||
2551 | 1 -- enable. | ||
2552 | Note: increases power consumption, thus should only be | ||
2553 | enabled if running jitter sensitive (HPC/RT) workloads. | ||
2554 | |||
2546 | security= [SECURITY] Choose a security module to enable at boot. | 2555 | security= [SECURITY] Choose a security module to enable at boot. |
2547 | If this boot parameter is not specified, only the first | 2556 | If this boot parameter is not specified, only the first |
2548 | security module asking for security registration will be | 2557 | security module asking for security registration will be |
diff --git a/Documentation/networking/stmmac.txt b/Documentation/networking/stmmac.txt index ab1e8d7004c5..5cb9a1972460 100644 --- a/Documentation/networking/stmmac.txt +++ b/Documentation/networking/stmmac.txt | |||
@@ -10,8 +10,8 @@ Currently this network device driver is for all STM embedded MAC/GMAC | |||
10 | (i.e. 7xxx/5xxx SoCs), SPEAr (arm), Loongson1B (mips) and XLINX XC2V3000 | 10 | (i.e. 7xxx/5xxx SoCs), SPEAr (arm), Loongson1B (mips) and XLINX XC2V3000 |
11 | FF1152AMT0221 D1215994A VIRTEX FPGA board. | 11 | FF1152AMT0221 D1215994A VIRTEX FPGA board. |
12 | 12 | ||
13 | DWC Ether MAC 10/100/1000 Universal version 3.60a (and older) and DWC Ether MAC 10/100 | 13 | DWC Ether MAC 10/100/1000 Universal version 3.60a (and older) and DWC Ether |
14 | Universal version 4.0 have been used for developing this driver. | 14 | MAC 10/100 Universal version 4.0 have been used for developing this driver. |
15 | 15 | ||
16 | This driver supports both the platform bus and PCI. | 16 | This driver supports both the platform bus and PCI. |
17 | 17 | ||
@@ -54,27 +54,27 @@ net_device structure enabling the scatter/gather feature. | |||
54 | When one or more packets are received, an interrupt happens. The interrupts | 54 | When one or more packets are received, an interrupt happens. The interrupts |
55 | are not queued so the driver has to scan all the descriptors in the ring during | 55 | are not queued so the driver has to scan all the descriptors in the ring during |
56 | the receive process. | 56 | the receive process. |
57 | This is based on NAPI so the interrupt handler signals only if there is work to be | 57 | This is based on NAPI so the interrupt handler signals only if there is work |
58 | done, and it exits. | 58 | to be done, and it exits. |
59 | Then the poll method will be scheduled at some future point. | 59 | Then the poll method will be scheduled at some future point. |
60 | The incoming packets are stored, by the DMA, in a list of pre-allocated socket | 60 | The incoming packets are stored, by the DMA, in a list of pre-allocated socket |
61 | buffers in order to avoid the memcpy (Zero-copy). | 61 | buffers in order to avoid the memcpy (Zero-copy). |
62 | 62 | ||
63 | 4.3) Timer-Driver Interrupt | 63 | 4.3) Timer-Driver Interrupt |
64 | Instead of having the device that asynchronously notifies the frame receptions, the | 64 | Instead of having the device that asynchronously notifies the frame receptions, |
65 | driver configures a timer to generate an interrupt at regular intervals. | 65 | the driver configures a timer to generate an interrupt at regular intervals. |
66 | Based on the granularity of the timer, the frames that are received by the device | 66 | Based on the granularity of the timer, the frames that are received by the |
67 | will experience different levels of latency. Some NICs have dedicated timer | 67 | device will experience different levels of latency. Some NICs have dedicated |
68 | device to perform this task. STMMAC can use either the RTC device or the TMU | 68 | timer device to perform this task. STMMAC can use either the RTC device or the |
69 | channel 2 on STLinux platforms. | 69 | TMU channel 2 on STLinux platforms. |
70 | The timers frequency can be passed to the driver as parameter; when change it, | 70 | The timers frequency can be passed to the driver as parameter; when change it, |
71 | take care of both hardware capability and network stability/performance impact. | 71 | take care of both hardware capability and network stability/performance impact. |
72 | Several performance tests on STM platforms showed this optimisation allows to spare | 72 | Several performance tests on STM platforms showed this optimisation allows to |
73 | the CPU while having the maximum throughput. | 73 | spare the CPU while having the maximum throughput. |
74 | 74 | ||
75 | 4.4) WOL | 75 | 4.4) WOL |
76 | Wake up on Lan feature through Magic and Unicast frames are supported for the GMAC | 76 | Wake up on Lan feature through Magic and Unicast frames are supported for the |
77 | core. | 77 | GMAC core. |
78 | 78 | ||
79 | 4.5) DMA descriptors | 79 | 4.5) DMA descriptors |
80 | Driver handles both normal and enhanced descriptors. The latter has been only | 80 | Driver handles both normal and enhanced descriptors. The latter has been only |
@@ -106,7 +106,8 @@ Several driver's information can be passed through the platform | |||
106 | These are included in the include/linux/stmmac.h header file | 106 | These are included in the include/linux/stmmac.h header file |
107 | and detailed below as well: | 107 | and detailed below as well: |
108 | 108 | ||
109 | struct plat_stmmacenet_data { | 109 | struct plat_stmmacenet_data { |
110 | char *phy_bus_name; | ||
110 | int bus_id; | 111 | int bus_id; |
111 | int phy_addr; | 112 | int phy_addr; |
112 | int interface; | 113 | int interface; |
@@ -124,19 +125,24 @@ and detailed below as well: | |||
124 | void (*bus_setup)(void __iomem *ioaddr); | 125 | void (*bus_setup)(void __iomem *ioaddr); |
125 | int (*init)(struct platform_device *pdev); | 126 | int (*init)(struct platform_device *pdev); |
126 | void (*exit)(struct platform_device *pdev); | 127 | void (*exit)(struct platform_device *pdev); |
128 | void *custom_cfg; | ||
129 | void *custom_data; | ||
127 | void *bsp_priv; | 130 | void *bsp_priv; |
128 | }; | 131 | }; |
129 | 132 | ||
130 | Where: | 133 | Where: |
134 | o phy_bus_name: phy bus name to attach to the stmmac. | ||
131 | o bus_id: bus identifier. | 135 | o bus_id: bus identifier. |
132 | o phy_addr: the physical address can be passed from the platform. | 136 | o phy_addr: the physical address can be passed from the platform. |
133 | If it is set to -1 the driver will automatically | 137 | If it is set to -1 the driver will automatically |
134 | detect it at run-time by probing all the 32 addresses. | 138 | detect it at run-time by probing all the 32 addresses. |
135 | o interface: PHY device's interface. | 139 | o interface: PHY device's interface. |
136 | o mdio_bus_data: specific platform fields for the MDIO bus. | 140 | o mdio_bus_data: specific platform fields for the MDIO bus. |
137 | o pbl: the Programmable Burst Length is maximum number of beats to | 141 | o dma_cfg: internal DMA parameters |
142 | o pbl: the Programmable Burst Length is maximum number of beats to | ||
138 | be transferred in one DMA transaction. | 143 | be transferred in one DMA transaction. |
139 | GMAC also enables the 4xPBL by default. | 144 | GMAC also enables the 4xPBL by default. |
145 | o fixed_burst/mixed_burst/burst_len | ||
140 | o clk_csr: fixed CSR Clock range selection. | 146 | o clk_csr: fixed CSR Clock range selection. |
141 | o has_gmac: uses the GMAC core. | 147 | o has_gmac: uses the GMAC core. |
142 | o enh_desc: if sets the MAC will use the enhanced descriptor structure. | 148 | o enh_desc: if sets the MAC will use the enhanced descriptor structure. |
@@ -160,8 +166,9 @@ Where: | |||
160 | this is sometime necessary on some platforms (e.g. ST boxes) | 166 | this is sometime necessary on some platforms (e.g. ST boxes) |
161 | where the HW needs to have set some PIO lines or system cfg | 167 | where the HW needs to have set some PIO lines or system cfg |
162 | registers. | 168 | registers. |
163 | o custom_cfg: this is a custom configuration that can be passed while | 169 | o custom_cfg/custom_data: this is a custom configuration that can be passed |
164 | initialising the resources. | 170 | while initialising the resources. |
171 | o bsp_priv: another private poiter. | ||
165 | 172 | ||
166 | For MDIO bus The we have: | 173 | For MDIO bus The we have: |
167 | 174 | ||
@@ -180,7 +187,6 @@ Where: | |||
180 | o irqs: list of IRQs, one per PHY. | 187 | o irqs: list of IRQs, one per PHY. |
181 | o probed_phy_irq: if irqs is NULL, use this for probed PHY. | 188 | o probed_phy_irq: if irqs is NULL, use this for probed PHY. |
182 | 189 | ||
183 | |||
184 | For DMA engine we have the following internal fields that should be | 190 | For DMA engine we have the following internal fields that should be |
185 | tuned according to the HW capabilities. | 191 | tuned according to the HW capabilities. |
186 | 192 | ||
diff --git a/Documentation/vm/frontswap.txt b/Documentation/vm/frontswap.txt new file mode 100644 index 000000000000..37067cf455f4 --- /dev/null +++ b/Documentation/vm/frontswap.txt | |||
@@ -0,0 +1,278 @@ | |||
1 | Frontswap provides a "transcendent memory" interface for swap pages. | ||
2 | In some environments, dramatic performance savings may be obtained because | ||
3 | swapped pages are saved in RAM (or a RAM-like device) instead of a swap disk. | ||
4 | |||
5 | (Note, frontswap -- and cleancache (merged at 3.0) -- are the "frontends" | ||
6 | and the only necessary changes to the core kernel for transcendent memory; | ||
7 | all other supporting code -- the "backends" -- is implemented as drivers. | ||
8 | See the LWN.net article "Transcendent memory in a nutshell" for a detailed | ||
9 | overview of frontswap and related kernel parts: | ||
10 | https://lwn.net/Articles/454795/ ) | ||
11 | |||
12 | Frontswap is so named because it can be thought of as the opposite of | ||
13 | a "backing" store for a swap device. The storage is assumed to be | ||
14 | a synchronous concurrency-safe page-oriented "pseudo-RAM device" conforming | ||
15 | to the requirements of transcendent memory (such as Xen's "tmem", or | ||
16 | in-kernel compressed memory, aka "zcache", or future RAM-like devices); | ||
17 | this pseudo-RAM device is not directly accessible or addressable by the | ||
18 | kernel and is of unknown and possibly time-varying size. The driver | ||
19 | links itself to frontswap by calling frontswap_register_ops to set the | ||
20 | frontswap_ops funcs appropriately and the functions it provides must | ||
21 | conform to certain policies as follows: | ||
22 | |||
23 | An "init" prepares the device to receive frontswap pages associated | ||
24 | with the specified swap device number (aka "type"). A "store" will | ||
25 | copy the page to transcendent memory and associate it with the type and | ||
26 | offset associated with the page. A "load" will copy the page, if found, | ||
27 | from transcendent memory into kernel memory, but will NOT remove the page | ||
28 | from from transcendent memory. An "invalidate_page" will remove the page | ||
29 | from transcendent memory and an "invalidate_area" will remove ALL pages | ||
30 | associated with the swap type (e.g., like swapoff) and notify the "device" | ||
31 | to refuse further stores with that swap type. | ||
32 | |||
33 | Once a page is successfully stored, a matching load on the page will normally | ||
34 | succeed. So when the kernel finds itself in a situation where it needs | ||
35 | to swap out a page, it first attempts to use frontswap. If the store returns | ||
36 | success, the data has been successfully saved to transcendent memory and | ||
37 | a disk write and, if the data is later read back, a disk read are avoided. | ||
38 | If a store returns failure, transcendent memory has rejected the data, and the | ||
39 | page can be written to swap as usual. | ||
40 | |||
41 | If a backend chooses, frontswap can be configured as a "writethrough | ||
42 | cache" by calling frontswap_writethrough(). In this mode, the reduction | ||
43 | in swap device writes is lost (and also a non-trivial performance advantage) | ||
44 | in order to allow the backend to arbitrarily "reclaim" space used to | ||
45 | store frontswap pages to more completely manage its memory usage. | ||
46 | |||
47 | Note that if a page is stored and the page already exists in transcendent memory | ||
48 | (a "duplicate" store), either the store succeeds and the data is overwritten, | ||
49 | or the store fails AND the page is invalidated. This ensures stale data may | ||
50 | never be obtained from frontswap. | ||
51 | |||
52 | If properly configured, monitoring of frontswap is done via debugfs in | ||
53 | the /sys/kernel/debug/frontswap directory. The effectiveness of | ||
54 | frontswap can be measured (across all swap devices) with: | ||
55 | |||
56 | failed_stores - how many store attempts have failed | ||
57 | loads - how many loads were attempted (all should succeed) | ||
58 | succ_stores - how many store attempts have succeeded | ||
59 | invalidates - how many invalidates were attempted | ||
60 | |||
61 | A backend implementation may provide additional metrics. | ||
62 | |||
63 | FAQ | ||
64 | |||
65 | 1) Where's the value? | ||
66 | |||
67 | When a workload starts swapping, performance falls through the floor. | ||
68 | Frontswap significantly increases performance in many such workloads by | ||
69 | providing a clean, dynamic interface to read and write swap pages to | ||
70 | "transcendent memory" that is otherwise not directly addressable to the kernel. | ||
71 | This interface is ideal when data is transformed to a different form | ||
72 | and size (such as with compression) or secretly moved (as might be | ||
73 | useful for write-balancing for some RAM-like devices). Swap pages (and | ||
74 | evicted page-cache pages) are a great use for this kind of slower-than-RAM- | ||
75 | but-much-faster-than-disk "pseudo-RAM device" and the frontswap (and | ||
76 | cleancache) interface to transcendent memory provides a nice way to read | ||
77 | and write -- and indirectly "name" -- the pages. | ||
78 | |||
79 | Frontswap -- and cleancache -- with a fairly small impact on the kernel, | ||
80 | provides a huge amount of flexibility for more dynamic, flexible RAM | ||
81 | utilization in various system configurations: | ||
82 | |||
83 | In the single kernel case, aka "zcache", pages are compressed and | ||
84 | stored in local memory, thus increasing the total anonymous pages | ||
85 | that can be safely kept in RAM. Zcache essentially trades off CPU | ||
86 | cycles used in compression/decompression for better memory utilization. | ||
87 | Benchmarks have shown little or no impact when memory pressure is | ||
88 | low while providing a significant performance improvement (25%+) | ||
89 | on some workloads under high memory pressure. | ||
90 | |||
91 | "RAMster" builds on zcache by adding "peer-to-peer" transcendent memory | ||
92 | support for clustered systems. Frontswap pages are locally compressed | ||
93 | as in zcache, but then "remotified" to another system's RAM. This | ||
94 | allows RAM to be dynamically load-balanced back-and-forth as needed, | ||
95 | i.e. when system A is overcommitted, it can swap to system B, and | ||
96 | vice versa. RAMster can also be configured as a memory server so | ||
97 | many servers in a cluster can swap, dynamically as needed, to a single | ||
98 | server configured with a large amount of RAM... without pre-configuring | ||
99 | how much of the RAM is available for each of the clients! | ||
100 | |||
101 | In the virtual case, the whole point of virtualization is to statistically | ||
102 | multiplex physical resources acrosst the varying demands of multiple | ||
103 | virtual machines. This is really hard to do with RAM and efforts to do | ||
104 | it well with no kernel changes have essentially failed (except in some | ||
105 | well-publicized special-case workloads). | ||
106 | Specifically, the Xen Transcendent Memory backend allows otherwise | ||
107 | "fallow" hypervisor-owned RAM to not only be "time-shared" between multiple | ||
108 | virtual machines, but the pages can be compressed and deduplicated to | ||
109 | optimize RAM utilization. And when guest OS's are induced to surrender | ||
110 | underutilized RAM (e.g. with "selfballooning"), sudden unexpected | ||
111 | memory pressure may result in swapping; frontswap allows those pages | ||
112 | to be swapped to and from hypervisor RAM (if overall host system memory | ||
113 | conditions allow), thus mitigating the potentially awful performance impact | ||
114 | of unplanned swapping. | ||
115 | |||
116 | A KVM implementation is underway and has been RFC'ed to lkml. And, | ||
117 | using frontswap, investigation is also underway on the use of NVM as | ||
118 | a memory extension technology. | ||
119 | |||
120 | 2) Sure there may be performance advantages in some situations, but | ||
121 | what's the space/time overhead of frontswap? | ||
122 | |||
123 | If CONFIG_FRONTSWAP is disabled, every frontswap hook compiles into | ||
124 | nothingness and the only overhead is a few extra bytes per swapon'ed | ||
125 | swap device. If CONFIG_FRONTSWAP is enabled but no frontswap "backend" | ||
126 | registers, there is one extra global variable compared to zero for | ||
127 | every swap page read or written. If CONFIG_FRONTSWAP is enabled | ||
128 | AND a frontswap backend registers AND the backend fails every "store" | ||
129 | request (i.e. provides no memory despite claiming it might), | ||
130 | CPU overhead is still negligible -- and since every frontswap fail | ||
131 | precedes a swap page write-to-disk, the system is highly likely | ||
132 | to be I/O bound and using a small fraction of a percent of a CPU | ||
133 | will be irrelevant anyway. | ||
134 | |||
135 | As for space, if CONFIG_FRONTSWAP is enabled AND a frontswap backend | ||
136 | registers, one bit is allocated for every swap page for every swap | ||
137 | device that is swapon'd. This is added to the EIGHT bits (which | ||
138 | was sixteen until about 2.6.34) that the kernel already allocates | ||
139 | for every swap page for every swap device that is swapon'd. (Hugh | ||
140 | Dickins has observed that frontswap could probably steal one of | ||
141 | the existing eight bits, but let's worry about that minor optimization | ||
142 | later.) For very large swap disks (which are rare) on a standard | ||
143 | 4K pagesize, this is 1MB per 32GB swap. | ||
144 | |||
145 | When swap pages are stored in transcendent memory instead of written | ||
146 | out to disk, there is a side effect that this may create more memory | ||
147 | pressure that can potentially outweigh the other advantages. A | ||
148 | backend, such as zcache, must implement policies to carefully (but | ||
149 | dynamically) manage memory limits to ensure this doesn't happen. | ||
150 | |||
151 | 3) OK, how about a quick overview of what this frontswap patch does | ||
152 | in terms that a kernel hacker can grok? | ||
153 | |||
154 | Let's assume that a frontswap "backend" has registered during | ||
155 | kernel initialization; this registration indicates that this | ||
156 | frontswap backend has access to some "memory" that is not directly | ||
157 | accessible by the kernel. Exactly how much memory it provides is | ||
158 | entirely dynamic and random. | ||
159 | |||
160 | Whenever a swap-device is swapon'd frontswap_init() is called, | ||
161 | passing the swap device number (aka "type") as a parameter. | ||
162 | This notifies frontswap to expect attempts to "store" swap pages | ||
163 | associated with that number. | ||
164 | |||
165 | Whenever the swap subsystem is readying a page to write to a swap | ||
166 | device (c.f swap_writepage()), frontswap_store is called. Frontswap | ||
167 | consults with the frontswap backend and if the backend says it does NOT | ||
168 | have room, frontswap_store returns -1 and the kernel swaps the page | ||
169 | to the swap device as normal. Note that the response from the frontswap | ||
170 | backend is unpredictable to the kernel; it may choose to never accept a | ||
171 | page, it could accept every ninth page, or it might accept every | ||
172 | page. But if the backend does accept a page, the data from the page | ||
173 | has already been copied and associated with the type and offset, | ||
174 | and the backend guarantees the persistence of the data. In this case, | ||
175 | frontswap sets a bit in the "frontswap_map" for the swap device | ||
176 | corresponding to the page offset on the swap device to which it would | ||
177 | otherwise have written the data. | ||
178 | |||
179 | When the swap subsystem needs to swap-in a page (swap_readpage()), | ||
180 | it first calls frontswap_load() which checks the frontswap_map to | ||
181 | see if the page was earlier accepted by the frontswap backend. If | ||
182 | it was, the page of data is filled from the frontswap backend and | ||
183 | the swap-in is complete. If not, the normal swap-in code is | ||
184 | executed to obtain the page of data from the real swap device. | ||
185 | |||
186 | So every time the frontswap backend accepts a page, a swap device read | ||
187 | and (potentially) a swap device write are replaced by a "frontswap backend | ||
188 | store" and (possibly) a "frontswap backend loads", which are presumably much | ||
189 | faster. | ||
190 | |||
191 | 4) Can't frontswap be configured as a "special" swap device that is | ||
192 | just higher priority than any real swap device (e.g. like zswap, | ||
193 | or maybe swap-over-nbd/NFS)? | ||
194 | |||
195 | No. First, the existing swap subsystem doesn't allow for any kind of | ||
196 | swap hierarchy. Perhaps it could be rewritten to accomodate a hierarchy, | ||
197 | but this would require fairly drastic changes. Even if it were | ||
198 | rewritten, the existing swap subsystem uses the block I/O layer which | ||
199 | assumes a swap device is fixed size and any page in it is linearly | ||
200 | addressable. Frontswap barely touches the existing swap subsystem, | ||
201 | and works around the constraints of the block I/O subsystem to provide | ||
202 | a great deal of flexibility and dynamicity. | ||
203 | |||
204 | For example, the acceptance of any swap page by the frontswap backend is | ||
205 | entirely unpredictable. This is critical to the definition of frontswap | ||
206 | backends because it grants completely dynamic discretion to the | ||
207 | backend. In zcache, one cannot know a priori how compressible a page is. | ||
208 | "Poorly" compressible pages can be rejected, and "poorly" can itself be | ||
209 | defined dynamically depending on current memory constraints. | ||
210 | |||
211 | Further, frontswap is entirely synchronous whereas a real swap | ||
212 | device is, by definition, asynchronous and uses block I/O. The | ||
213 | block I/O layer is not only unnecessary, but may perform "optimizations" | ||
214 | that are inappropriate for a RAM-oriented device including delaying | ||
215 | the write of some pages for a significant amount of time. Synchrony is | ||
216 | required to ensure the dynamicity of the backend and to avoid thorny race | ||
217 | conditions that would unnecessarily and greatly complicate frontswap | ||
218 | and/or the block I/O subsystem. That said, only the initial "store" | ||
219 | and "load" operations need be synchronous. A separate asynchronous thread | ||
220 | is free to manipulate the pages stored by frontswap. For example, | ||
221 | the "remotification" thread in RAMster uses standard asynchronous | ||
222 | kernel sockets to move compressed frontswap pages to a remote machine. | ||
223 | Similarly, a KVM guest-side implementation could do in-guest compression | ||
224 | and use "batched" hypercalls. | ||
225 | |||
226 | In a virtualized environment, the dynamicity allows the hypervisor | ||
227 | (or host OS) to do "intelligent overcommit". For example, it can | ||
228 | choose to accept pages only until host-swapping might be imminent, | ||
229 | then force guests to do their own swapping. | ||
230 | |||
231 | There is a downside to the transcendent memory specifications for | ||
232 | frontswap: Since any "store" might fail, there must always be a real | ||
233 | slot on a real swap device to swap the page. Thus frontswap must be | ||
234 | implemented as a "shadow" to every swapon'd device with the potential | ||
235 | capability of holding every page that the swap device might have held | ||
236 | and the possibility that it might hold no pages at all. This means | ||
237 | that frontswap cannot contain more pages than the total of swapon'd | ||
238 | swap devices. For example, if NO swap device is configured on some | ||
239 | installation, frontswap is useless. Swapless portable devices | ||
240 | can still use frontswap but a backend for such devices must configure | ||
241 | some kind of "ghost" swap device and ensure that it is never used. | ||
242 | |||
243 | 5) Why this weird definition about "duplicate stores"? If a page | ||
244 | has been previously successfully stored, can't it always be | ||
245 | successfully overwritten? | ||
246 | |||
247 | Nearly always it can, but no, sometimes it cannot. Consider an example | ||
248 | where data is compressed and the original 4K page has been compressed | ||
249 | to 1K. Now an attempt is made to overwrite the page with data that | ||
250 | is non-compressible and so would take the entire 4K. But the backend | ||
251 | has no more space. In this case, the store must be rejected. Whenever | ||
252 | frontswap rejects a store that would overwrite, it also must invalidate | ||
253 | the old data and ensure that it is no longer accessible. Since the | ||
254 | swap subsystem then writes the new data to the read swap device, | ||
255 | this is the correct course of action to ensure coherency. | ||
256 | |||
257 | 6) What is frontswap_shrink for? | ||
258 | |||
259 | When the (non-frontswap) swap subsystem swaps out a page to a real | ||
260 | swap device, that page is only taking up low-value pre-allocated disk | ||
261 | space. But if frontswap has placed a page in transcendent memory, that | ||
262 | page may be taking up valuable real estate. The frontswap_shrink | ||
263 | routine allows code outside of the swap subsystem to force pages out | ||
264 | of the memory managed by frontswap and back into kernel-addressable memory. | ||
265 | For example, in RAMster, a "suction driver" thread will attempt | ||
266 | to "repatriate" pages sent to a remote machine back to the local machine; | ||
267 | this is driven using the frontswap_shrink mechanism when memory pressure | ||
268 | subsides. | ||
269 | |||
270 | 7) Why does the frontswap patch create the new include file swapfile.h? | ||
271 | |||
272 | The frontswap code depends on some swap-subsystem-internal data | ||
273 | structures that have, over the years, moved back and forth between | ||
274 | static and global. This seemed a reasonable compromise: Define | ||
275 | them as global but declare them in a new include file that isn't | ||
276 | included by the large number of source files that include swap.h. | ||
277 | |||
278 | Dan Magenheimer, last updated April 9, 2012 | ||
diff --git a/MAINTAINERS b/MAINTAINERS index 55f0fda602ec..eb22272b2116 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
@@ -579,7 +579,7 @@ F: drivers/net/appletalk/ | |||
579 | F: net/appletalk/ | 579 | F: net/appletalk/ |
580 | 580 | ||
581 | ARASAN COMPACT FLASH PATA CONTROLLER | 581 | ARASAN COMPACT FLASH PATA CONTROLLER |
582 | M: Viresh Kumar <viresh.kumar@st.com> | 582 | M: Viresh Kumar <viresh.linux@gmail.com> |
583 | L: linux-ide@vger.kernel.org | 583 | L: linux-ide@vger.kernel.org |
584 | S: Maintained | 584 | S: Maintained |
585 | F: include/linux/pata_arasan_cf_data.h | 585 | F: include/linux/pata_arasan_cf_data.h |
@@ -1077,7 +1077,7 @@ F: drivers/media/video/s5p-fimc/ | |||
1077 | ARM/SAMSUNG S5P SERIES Multi Format Codec (MFC) SUPPORT | 1077 | ARM/SAMSUNG S5P SERIES Multi Format Codec (MFC) SUPPORT |
1078 | M: Kyungmin Park <kyungmin.park@samsung.com> | 1078 | M: Kyungmin Park <kyungmin.park@samsung.com> |
1079 | M: Kamil Debski <k.debski@samsung.com> | 1079 | M: Kamil Debski <k.debski@samsung.com> |
1080 | M: Jeongtae Park <jtp.park@samsung.com> | 1080 | M: Jeongtae Park <jtp.park@samsung.com> |
1081 | L: linux-arm-kernel@lists.infradead.org | 1081 | L: linux-arm-kernel@lists.infradead.org |
1082 | L: linux-media@vger.kernel.org | 1082 | L: linux-media@vger.kernel.org |
1083 | S: Maintained | 1083 | S: Maintained |
@@ -1646,11 +1646,11 @@ S: Maintained | |||
1646 | F: drivers/gpio/gpio-bt8xx.c | 1646 | F: drivers/gpio/gpio-bt8xx.c |
1647 | 1647 | ||
1648 | BTRFS FILE SYSTEM | 1648 | BTRFS FILE SYSTEM |
1649 | M: Chris Mason <chris.mason@oracle.com> | 1649 | M: Chris Mason <chris.mason@fusionio.com> |
1650 | L: linux-btrfs@vger.kernel.org | 1650 | L: linux-btrfs@vger.kernel.org |
1651 | W: http://btrfs.wiki.kernel.org/ | 1651 | W: http://btrfs.wiki.kernel.org/ |
1652 | Q: http://patchwork.kernel.org/project/linux-btrfs/list/ | 1652 | Q: http://patchwork.kernel.org/project/linux-btrfs/list/ |
1653 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/mason/btrfs-unstable.git | 1653 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/mason/linux-btrfs.git |
1654 | S: Maintained | 1654 | S: Maintained |
1655 | F: Documentation/filesystems/btrfs.txt | 1655 | F: Documentation/filesystems/btrfs.txt |
1656 | F: fs/btrfs/ | 1656 | F: fs/btrfs/ |
@@ -1743,10 +1743,10 @@ F: include/linux/can/platform/ | |||
1743 | CAPABILITIES | 1743 | CAPABILITIES |
1744 | M: Serge Hallyn <serge.hallyn@canonical.com> | 1744 | M: Serge Hallyn <serge.hallyn@canonical.com> |
1745 | L: linux-security-module@vger.kernel.org | 1745 | L: linux-security-module@vger.kernel.org |
1746 | S: Supported | 1746 | S: Supported |
1747 | F: include/linux/capability.h | 1747 | F: include/linux/capability.h |
1748 | F: security/capability.c | 1748 | F: security/capability.c |
1749 | F: security/commoncap.c | 1749 | F: security/commoncap.c |
1750 | F: kernel/capability.c | 1750 | F: kernel/capability.c |
1751 | 1751 | ||
1752 | CELL BROADBAND ENGINE ARCHITECTURE | 1752 | CELL BROADBAND ENGINE ARCHITECTURE |
@@ -1800,6 +1800,9 @@ F: include/linux/cfag12864b.h | |||
1800 | CFG80211 and NL80211 | 1800 | CFG80211 and NL80211 |
1801 | M: Johannes Berg <johannes@sipsolutions.net> | 1801 | M: Johannes Berg <johannes@sipsolutions.net> |
1802 | L: linux-wireless@vger.kernel.org | 1802 | L: linux-wireless@vger.kernel.org |
1803 | W: http://wireless.kernel.org/ | ||
1804 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/jberg/mac80211.git | ||
1805 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/jberg/mac80211-next.git | ||
1803 | S: Maintained | 1806 | S: Maintained |
1804 | F: include/linux/nl80211.h | 1807 | F: include/linux/nl80211.h |
1805 | F: include/net/cfg80211.h | 1808 | F: include/net/cfg80211.h |
@@ -2146,11 +2149,11 @@ S: Orphan | |||
2146 | F: drivers/net/wan/pc300* | 2149 | F: drivers/net/wan/pc300* |
2147 | 2150 | ||
2148 | CYTTSP TOUCHSCREEN DRIVER | 2151 | CYTTSP TOUCHSCREEN DRIVER |
2149 | M: Javier Martinez Canillas <javier@dowhile0.org> | 2152 | M: Javier Martinez Canillas <javier@dowhile0.org> |
2150 | L: linux-input@vger.kernel.org | 2153 | L: linux-input@vger.kernel.org |
2151 | S: Maintained | 2154 | S: Maintained |
2152 | F: drivers/input/touchscreen/cyttsp* | 2155 | F: drivers/input/touchscreen/cyttsp* |
2153 | F: include/linux/input/cyttsp.h | 2156 | F: include/linux/input/cyttsp.h |
2154 | 2157 | ||
2155 | DAMA SLAVE for AX.25 | 2158 | DAMA SLAVE for AX.25 |
2156 | M: Joerg Reuter <jreuter@yaina.de> | 2159 | M: Joerg Reuter <jreuter@yaina.de> |
@@ -2270,7 +2273,7 @@ F: include/linux/device-mapper.h | |||
2270 | F: include/linux/dm-*.h | 2273 | F: include/linux/dm-*.h |
2271 | 2274 | ||
2272 | DIOLAN U2C-12 I2C DRIVER | 2275 | DIOLAN U2C-12 I2C DRIVER |
2273 | M: Guenter Roeck <guenter.roeck@ericsson.com> | 2276 | M: Guenter Roeck <linux@roeck-us.net> |
2274 | L: linux-i2c@vger.kernel.org | 2277 | L: linux-i2c@vger.kernel.org |
2275 | S: Maintained | 2278 | S: Maintained |
2276 | F: drivers/i2c/busses/i2c-diolan-u2c.c | 2279 | F: drivers/i2c/busses/i2c-diolan-u2c.c |
@@ -2930,6 +2933,13 @@ F: Documentation/power/freezing-of-tasks.txt | |||
2930 | F: include/linux/freezer.h | 2933 | F: include/linux/freezer.h |
2931 | F: kernel/freezer.c | 2934 | F: kernel/freezer.c |
2932 | 2935 | ||
2936 | FRONTSWAP API | ||
2937 | M: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> | ||
2938 | L: linux-kernel@vger.kernel.org | ||
2939 | S: Maintained | ||
2940 | F: mm/frontswap.c | ||
2941 | F: include/linux/frontswap.h | ||
2942 | |||
2933 | FS-CACHE: LOCAL CACHING FOR NETWORK FILESYSTEMS | 2943 | FS-CACHE: LOCAL CACHING FOR NETWORK FILESYSTEMS |
2934 | M: David Howells <dhowells@redhat.com> | 2944 | M: David Howells <dhowells@redhat.com> |
2935 | L: linux-cachefs@redhat.com | 2945 | L: linux-cachefs@redhat.com |
@@ -3138,7 +3148,7 @@ F: drivers/tty/hvc/ | |||
3138 | 3148 | ||
3139 | HARDWARE MONITORING | 3149 | HARDWARE MONITORING |
3140 | M: Jean Delvare <khali@linux-fr.org> | 3150 | M: Jean Delvare <khali@linux-fr.org> |
3141 | M: Guenter Roeck <guenter.roeck@ericsson.com> | 3151 | M: Guenter Roeck <linux@roeck-us.net> |
3142 | L: lm-sensors@lm-sensors.org | 3152 | L: lm-sensors@lm-sensors.org |
3143 | W: http://www.lm-sensors.org/ | 3153 | W: http://www.lm-sensors.org/ |
3144 | T: quilt kernel.org/pub/linux/kernel/people/jdelvare/linux-2.6/jdelvare-hwmon/ | 3154 | T: quilt kernel.org/pub/linux/kernel/people/jdelvare/linux-2.6/jdelvare-hwmon/ |
@@ -4096,6 +4106,8 @@ F: drivers/scsi/53c700* | |||
4096 | LED SUBSYSTEM | 4106 | LED SUBSYSTEM |
4097 | M: Bryan Wu <bryan.wu@canonical.com> | 4107 | M: Bryan Wu <bryan.wu@canonical.com> |
4098 | M: Richard Purdie <rpurdie@rpsys.net> | 4108 | M: Richard Purdie <rpurdie@rpsys.net> |
4109 | L: linux-leds@vger.kernel.org | ||
4110 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/cooloney/linux-leds.git | ||
4099 | S: Maintained | 4111 | S: Maintained |
4100 | F: drivers/leds/ | 4112 | F: drivers/leds/ |
4101 | F: include/linux/leds.h | 4113 | F: include/linux/leds.h |
@@ -4340,7 +4352,8 @@ MAC80211 | |||
4340 | M: Johannes Berg <johannes@sipsolutions.net> | 4352 | M: Johannes Berg <johannes@sipsolutions.net> |
4341 | L: linux-wireless@vger.kernel.org | 4353 | L: linux-wireless@vger.kernel.org |
4342 | W: http://linuxwireless.org/ | 4354 | W: http://linuxwireless.org/ |
4343 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/linville/wireless.git | 4355 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/jberg/mac80211.git |
4356 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/jberg/mac80211-next.git | ||
4344 | S: Maintained | 4357 | S: Maintained |
4345 | F: Documentation/networking/mac80211-injection.txt | 4358 | F: Documentation/networking/mac80211-injection.txt |
4346 | F: include/net/mac80211.h | 4359 | F: include/net/mac80211.h |
@@ -4351,7 +4364,8 @@ M: Stefano Brivio <stefano.brivio@polimi.it> | |||
4351 | M: Mattias Nissler <mattias.nissler@gmx.de> | 4364 | M: Mattias Nissler <mattias.nissler@gmx.de> |
4352 | L: linux-wireless@vger.kernel.org | 4365 | L: linux-wireless@vger.kernel.org |
4353 | W: http://linuxwireless.org/en/developers/Documentation/mac80211/RateControl/PID | 4366 | W: http://linuxwireless.org/en/developers/Documentation/mac80211/RateControl/PID |
4354 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/linville/wireless.git | 4367 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/jberg/mac80211.git |
4368 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/jberg/mac80211-next.git | ||
4355 | S: Maintained | 4369 | S: Maintained |
4356 | F: net/mac80211/rc80211_pid* | 4370 | F: net/mac80211/rc80211_pid* |
4357 | 4371 | ||
@@ -4411,6 +4425,13 @@ S: Orphan | |||
4411 | F: drivers/video/matrox/matroxfb_* | 4425 | F: drivers/video/matrox/matroxfb_* |
4412 | F: include/linux/matroxfb.h | 4426 | F: include/linux/matroxfb.h |
4413 | 4427 | ||
4428 | MAX16065 HARDWARE MONITOR DRIVER | ||
4429 | M: Guenter Roeck <linux@roeck-us.net> | ||
4430 | L: lm-sensors@lm-sensors.org | ||
4431 | S: Maintained | ||
4432 | F: Documentation/hwmon/max16065 | ||
4433 | F: drivers/hwmon/max16065.c | ||
4434 | |||
4414 | MAX6650 HARDWARE MONITOR AND FAN CONTROLLER DRIVER | 4435 | MAX6650 HARDWARE MONITOR AND FAN CONTROLLER DRIVER |
4415 | M: "Hans J. Koch" <hjk@hansjkoch.de> | 4436 | M: "Hans J. Koch" <hjk@hansjkoch.de> |
4416 | L: lm-sensors@lm-sensors.org | 4437 | L: lm-sensors@lm-sensors.org |
@@ -5149,7 +5170,7 @@ F: drivers/leds/leds-pca9532.c | |||
5149 | F: include/linux/leds-pca9532.h | 5170 | F: include/linux/leds-pca9532.h |
5150 | 5171 | ||
5151 | PCA9541 I2C BUS MASTER SELECTOR DRIVER | 5172 | PCA9541 I2C BUS MASTER SELECTOR DRIVER |
5152 | M: Guenter Roeck <guenter.roeck@ericsson.com> | 5173 | M: Guenter Roeck <linux@roeck-us.net> |
5153 | L: linux-i2c@vger.kernel.org | 5174 | L: linux-i2c@vger.kernel.org |
5154 | S: Maintained | 5175 | S: Maintained |
5155 | F: drivers/i2c/muxes/i2c-mux-pca9541.c | 5176 | F: drivers/i2c/muxes/i2c-mux-pca9541.c |
@@ -5169,7 +5190,7 @@ S: Maintained | |||
5169 | F: drivers/firmware/pcdp.* | 5190 | F: drivers/firmware/pcdp.* |
5170 | 5191 | ||
5171 | PCI ERROR RECOVERY | 5192 | PCI ERROR RECOVERY |
5172 | M: Linas Vepstas <linasvepstas@gmail.com> | 5193 | M: Linas Vepstas <linasvepstas@gmail.com> |
5173 | L: linux-pci@vger.kernel.org | 5194 | L: linux-pci@vger.kernel.org |
5174 | S: Supported | 5195 | S: Supported |
5175 | F: Documentation/PCI/pci-error-recovery.txt | 5196 | F: Documentation/PCI/pci-error-recovery.txt |
@@ -5275,7 +5296,7 @@ S: Maintained | |||
5275 | F: drivers/pinctrl/ | 5296 | F: drivers/pinctrl/ |
5276 | 5297 | ||
5277 | PIN CONTROLLER - ST SPEAR | 5298 | PIN CONTROLLER - ST SPEAR |
5278 | M: Viresh Kumar <viresh.kumar@st.com> | 5299 | M: Viresh Kumar <viresh.linux@gmail.com> |
5279 | L: spear-devel@list.st.com | 5300 | L: spear-devel@list.st.com |
5280 | L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) | 5301 | L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) |
5281 | W: http://www.st.com/spear | 5302 | W: http://www.st.com/spear |
@@ -5299,7 +5320,7 @@ F: drivers/video/fb-puv3.c | |||
5299 | F: drivers/rtc/rtc-puv3.c | 5320 | F: drivers/rtc/rtc-puv3.c |
5300 | 5321 | ||
5301 | PMBUS HARDWARE MONITORING DRIVERS | 5322 | PMBUS HARDWARE MONITORING DRIVERS |
5302 | M: Guenter Roeck <guenter.roeck@ericsson.com> | 5323 | M: Guenter Roeck <linux@roeck-us.net> |
5303 | L: lm-sensors@lm-sensors.org | 5324 | L: lm-sensors@lm-sensors.org |
5304 | W: http://www.lm-sensors.org/ | 5325 | W: http://www.lm-sensors.org/ |
5305 | W: http://www.roeck-us.net/linux/drivers/ | 5326 | W: http://www.roeck-us.net/linux/drivers/ |
@@ -5695,6 +5716,9 @@ F: include/linux/remoteproc.h | |||
5695 | RFKILL | 5716 | RFKILL |
5696 | M: Johannes Berg <johannes@sipsolutions.net> | 5717 | M: Johannes Berg <johannes@sipsolutions.net> |
5697 | L: linux-wireless@vger.kernel.org | 5718 | L: linux-wireless@vger.kernel.org |
5719 | W: http://wireless.kernel.org/ | ||
5720 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/jberg/mac80211.git | ||
5721 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/jberg/mac80211-next.git | ||
5698 | S: Maintained | 5722 | S: Maintained |
5699 | F: Documentation/rfkill.txt | 5723 | F: Documentation/rfkill.txt |
5700 | F: net/rfkill/ | 5724 | F: net/rfkill/ |
@@ -5849,7 +5873,7 @@ S: Maintained | |||
5849 | F: drivers/tty/serial | 5873 | F: drivers/tty/serial |
5850 | 5874 | ||
5851 | SYNOPSYS DESIGNWARE DMAC DRIVER | 5875 | SYNOPSYS DESIGNWARE DMAC DRIVER |
5852 | M: Viresh Kumar <viresh.kumar@st.com> | 5876 | M: Viresh Kumar <viresh.linux@gmail.com> |
5853 | S: Maintained | 5877 | S: Maintained |
5854 | F: include/linux/dw_dmac.h | 5878 | F: include/linux/dw_dmac.h |
5855 | F: drivers/dma/dw_dmac_regs.h | 5879 | F: drivers/dma/dw_dmac_regs.h |
@@ -5997,7 +6021,7 @@ S: Maintained | |||
5997 | F: drivers/mmc/host/sdhci-s3c.c | 6021 | F: drivers/mmc/host/sdhci-s3c.c |
5998 | 6022 | ||
5999 | SECURE DIGITAL HOST CONTROLLER INTERFACE (SDHCI) ST SPEAR DRIVER | 6023 | SECURE DIGITAL HOST CONTROLLER INTERFACE (SDHCI) ST SPEAR DRIVER |
6000 | M: Viresh Kumar <viresh.kumar@st.com> | 6024 | M: Viresh Kumar <viresh.linux@gmail.com> |
6001 | L: spear-devel@list.st.com | 6025 | L: spear-devel@list.st.com |
6002 | L: linux-mmc@vger.kernel.org | 6026 | L: linux-mmc@vger.kernel.org |
6003 | S: Maintained | 6027 | S: Maintained |
@@ -6353,7 +6377,7 @@ S: Maintained | |||
6353 | F: include/linux/compiler.h | 6377 | F: include/linux/compiler.h |
6354 | 6378 | ||
6355 | SPEAR PLATFORM SUPPORT | 6379 | SPEAR PLATFORM SUPPORT |
6356 | M: Viresh Kumar <viresh.kumar@st.com> | 6380 | M: Viresh Kumar <viresh.linux@gmail.com> |
6357 | M: Shiraz Hashim <shiraz.hashim@st.com> | 6381 | M: Shiraz Hashim <shiraz.hashim@st.com> |
6358 | L: spear-devel@list.st.com | 6382 | L: spear-devel@list.st.com |
6359 | L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) | 6383 | L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) |
@@ -6362,7 +6386,7 @@ S: Maintained | |||
6362 | F: arch/arm/plat-spear/ | 6386 | F: arch/arm/plat-spear/ |
6363 | 6387 | ||
6364 | SPEAR13XX MACHINE SUPPORT | 6388 | SPEAR13XX MACHINE SUPPORT |
6365 | M: Viresh Kumar <viresh.kumar@st.com> | 6389 | M: Viresh Kumar <viresh.linux@gmail.com> |
6366 | M: Shiraz Hashim <shiraz.hashim@st.com> | 6390 | M: Shiraz Hashim <shiraz.hashim@st.com> |
6367 | L: spear-devel@list.st.com | 6391 | L: spear-devel@list.st.com |
6368 | L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) | 6392 | L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) |
@@ -6371,7 +6395,7 @@ S: Maintained | |||
6371 | F: arch/arm/mach-spear13xx/ | 6395 | F: arch/arm/mach-spear13xx/ |
6372 | 6396 | ||
6373 | SPEAR3XX MACHINE SUPPORT | 6397 | SPEAR3XX MACHINE SUPPORT |
6374 | M: Viresh Kumar <viresh.kumar@st.com> | 6398 | M: Viresh Kumar <viresh.linux@gmail.com> |
6375 | M: Shiraz Hashim <shiraz.hashim@st.com> | 6399 | M: Shiraz Hashim <shiraz.hashim@st.com> |
6376 | L: spear-devel@list.st.com | 6400 | L: spear-devel@list.st.com |
6377 | L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) | 6401 | L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) |
@@ -6382,7 +6406,7 @@ F: arch/arm/mach-spear3xx/ | |||
6382 | SPEAR6XX MACHINE SUPPORT | 6406 | SPEAR6XX MACHINE SUPPORT |
6383 | M: Rajeev Kumar <rajeev-dlh.kumar@st.com> | 6407 | M: Rajeev Kumar <rajeev-dlh.kumar@st.com> |
6384 | M: Shiraz Hashim <shiraz.hashim@st.com> | 6408 | M: Shiraz Hashim <shiraz.hashim@st.com> |
6385 | M: Viresh Kumar <viresh.kumar@st.com> | 6409 | M: Viresh Kumar <viresh.linux@gmail.com> |
6386 | L: spear-devel@list.st.com | 6410 | L: spear-devel@list.st.com |
6387 | L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) | 6411 | L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) |
6388 | W: http://www.st.com/spear | 6412 | W: http://www.st.com/spear |
@@ -6390,7 +6414,7 @@ S: Maintained | |||
6390 | F: arch/arm/mach-spear6xx/ | 6414 | F: arch/arm/mach-spear6xx/ |
6391 | 6415 | ||
6392 | SPEAR CLOCK FRAMEWORK SUPPORT | 6416 | SPEAR CLOCK FRAMEWORK SUPPORT |
6393 | M: Viresh Kumar <viresh.kumar@st.com> | 6417 | M: Viresh Kumar <viresh.linux@gmail.com> |
6394 | L: spear-devel@list.st.com | 6418 | L: spear-devel@list.st.com |
6395 | L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) | 6419 | L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) |
6396 | W: http://www.st.com/spear | 6420 | W: http://www.st.com/spear |
@@ -7291,11 +7315,11 @@ F: Documentation/DocBook/uio-howto.tmpl | |||
7291 | F: drivers/uio/ | 7315 | F: drivers/uio/ |
7292 | F: include/linux/uio*.h | 7316 | F: include/linux/uio*.h |
7293 | 7317 | ||
7294 | UTIL-LINUX-NG PACKAGE | 7318 | UTIL-LINUX PACKAGE |
7295 | M: Karel Zak <kzak@redhat.com> | 7319 | M: Karel Zak <kzak@redhat.com> |
7296 | L: util-linux-ng@vger.kernel.org | 7320 | L: util-linux@vger.kernel.org |
7297 | W: http://kernel.org/~kzak/util-linux-ng/ | 7321 | W: http://en.wikipedia.org/wiki/Util-linux |
7298 | T: git git://git.kernel.org/pub/scm/utils/util-linux-ng/util-linux-ng.git | 7322 | T: git git://git.kernel.org/pub/scm/utils/util-linux/util-linux.git |
7299 | S: Maintained | 7323 | S: Maintained |
7300 | 7324 | ||
7301 | UVESAFB DRIVER | 7325 | UVESAFB DRIVER |
@@ -7397,7 +7421,7 @@ F: include/linux/vlynq.h | |||
7397 | 7421 | ||
7398 | VME SUBSYSTEM | 7422 | VME SUBSYSTEM |
7399 | M: Martyn Welch <martyn.welch@ge.com> | 7423 | M: Martyn Welch <martyn.welch@ge.com> |
7400 | M: Manohar Vanga <manohar.vanga@cern.ch> | 7424 | M: Manohar Vanga <manohar.vanga@gmail.com> |
7401 | M: Greg Kroah-Hartman <gregkh@linuxfoundation.org> | 7425 | M: Greg Kroah-Hartman <gregkh@linuxfoundation.org> |
7402 | L: devel@driverdev.osuosl.org | 7426 | L: devel@driverdev.osuosl.org |
7403 | S: Maintained | 7427 | S: Maintained |
@@ -1,7 +1,7 @@ | |||
1 | VERSION = 3 | 1 | VERSION = 3 |
2 | PATCHLEVEL = 5 | 2 | PATCHLEVEL = 5 |
3 | SUBLEVEL = 0 | 3 | SUBLEVEL = 0 |
4 | EXTRAVERSION = -rc1 | 4 | EXTRAVERSION = -rc4 |
5 | NAME = Saber-toothed Squirrel | 5 | NAME = Saber-toothed Squirrel |
6 | 6 | ||
7 | # *DOCUMENTATION* | 7 | # *DOCUMENTATION* |
@@ -561,6 +561,8 @@ else | |||
561 | KBUILD_CFLAGS += -O2 | 561 | KBUILD_CFLAGS += -O2 |
562 | endif | 562 | endif |
563 | 563 | ||
564 | include $(srctree)/arch/$(SRCARCH)/Makefile | ||
565 | |||
564 | ifdef CONFIG_READABLE_ASM | 566 | ifdef CONFIG_READABLE_ASM |
565 | # Disable optimizations that make assembler listings hard to read. | 567 | # Disable optimizations that make assembler listings hard to read. |
566 | # reorder blocks reorders the control in the function | 568 | # reorder blocks reorders the control in the function |
@@ -571,8 +573,6 @@ KBUILD_CFLAGS += $(call cc-option,-fno-reorder-blocks,) \ | |||
571 | $(call cc-option,-fno-partial-inlining) | 573 | $(call cc-option,-fno-partial-inlining) |
572 | endif | 574 | endif |
573 | 575 | ||
574 | include $(srctree)/arch/$(SRCARCH)/Makefile | ||
575 | |||
576 | ifneq ($(CONFIG_FRAME_WARN),0) | 576 | ifneq ($(CONFIG_FRAME_WARN),0) |
577 | KBUILD_CFLAGS += $(call cc-option,-Wframe-larger-than=${CONFIG_FRAME_WARN}) | 577 | KBUILD_CFLAGS += $(call cc-option,-Wframe-larger-than=${CONFIG_FRAME_WARN}) |
578 | endif | 578 | endif |
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index b649c5904a4f..a91009c61870 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig | |||
@@ -7,7 +7,6 @@ config ARM | |||
7 | select HAVE_IDE if PCI || ISA || PCMCIA | 7 | select HAVE_IDE if PCI || ISA || PCMCIA |
8 | select HAVE_DMA_ATTRS | 8 | select HAVE_DMA_ATTRS |
9 | select HAVE_DMA_CONTIGUOUS if (CPU_V6 || CPU_V6K || CPU_V7) | 9 | select HAVE_DMA_CONTIGUOUS if (CPU_V6 || CPU_V6K || CPU_V7) |
10 | select CMA if (CPU_V6 || CPU_V6K || CPU_V7) | ||
11 | select HAVE_MEMBLOCK | 10 | select HAVE_MEMBLOCK |
12 | select RTC_LIB | 11 | select RTC_LIB |
13 | select SYS_SUPPORTS_APM_EMULATION | 12 | select SYS_SUPPORTS_APM_EMULATION |
@@ -294,6 +293,7 @@ config ARCH_VERSATILE | |||
294 | select ICST | 293 | select ICST |
295 | select GENERIC_CLOCKEVENTS | 294 | select GENERIC_CLOCKEVENTS |
296 | select ARCH_WANT_OPTIONAL_GPIOLIB | 295 | select ARCH_WANT_OPTIONAL_GPIOLIB |
296 | select NEED_MACH_IO_H if PCI | ||
297 | select PLAT_VERSATILE | 297 | select PLAT_VERSATILE |
298 | select PLAT_VERSATILE_CLCD | 298 | select PLAT_VERSATILE_CLCD |
299 | select PLAT_VERSATILE_FPGA_IRQ | 299 | select PLAT_VERSATILE_FPGA_IRQ |
@@ -589,6 +589,7 @@ config ARCH_ORION5X | |||
589 | select PCI | 589 | select PCI |
590 | select ARCH_REQUIRE_GPIOLIB | 590 | select ARCH_REQUIRE_GPIOLIB |
591 | select GENERIC_CLOCKEVENTS | 591 | select GENERIC_CLOCKEVENTS |
592 | select NEED_MACH_IO_H | ||
592 | select PLAT_ORION | 593 | select PLAT_ORION |
593 | help | 594 | help |
594 | Support for the following Marvell Orion 5x series SoCs: | 595 | Support for the following Marvell Orion 5x series SoCs: |
diff --git a/arch/arm/boot/dts/mmp2-brownstone.dts b/arch/arm/boot/dts/mmp2-brownstone.dts index 153a4b2d12b5..c9b4f27d191e 100644 --- a/arch/arm/boot/dts/mmp2-brownstone.dts +++ b/arch/arm/boot/dts/mmp2-brownstone.dts | |||
@@ -11,7 +11,7 @@ | |||
11 | /include/ "mmp2.dtsi" | 11 | /include/ "mmp2.dtsi" |
12 | 12 | ||
13 | / { | 13 | / { |
14 | model = "Marvell MMP2 Aspenite Development Board"; | 14 | model = "Marvell MMP2 Brownstone Development Board"; |
15 | compatible = "mrvl,mmp2-brownstone", "mrvl,mmp2"; | 15 | compatible = "mrvl,mmp2-brownstone", "mrvl,mmp2"; |
16 | 16 | ||
17 | chosen { | 17 | chosen { |
@@ -19,7 +19,7 @@ | |||
19 | }; | 19 | }; |
20 | 20 | ||
21 | memory { | 21 | memory { |
22 | reg = <0x00000000 0x04000000>; | 22 | reg = <0x00000000 0x08000000>; |
23 | }; | 23 | }; |
24 | 24 | ||
25 | soc { | 25 | soc { |
diff --git a/arch/arm/boot/dts/omap2.dtsi b/arch/arm/boot/dts/omap2.dtsi index f2ab4ea7cc0e..581cb081cb0f 100644 --- a/arch/arm/boot/dts/omap2.dtsi +++ b/arch/arm/boot/dts/omap2.dtsi | |||
@@ -44,6 +44,8 @@ | |||
44 | compatible = "ti,omap2-intc"; | 44 | compatible = "ti,omap2-intc"; |
45 | interrupt-controller; | 45 | interrupt-controller; |
46 | #interrupt-cells = <1>; | 46 | #interrupt-cells = <1>; |
47 | ti,intc-size = <96>; | ||
48 | reg = <0x480FE000 0x1000>; | ||
47 | }; | 49 | }; |
48 | 50 | ||
49 | uart1: serial@4806a000 { | 51 | uart1: serial@4806a000 { |
diff --git a/arch/arm/boot/dts/spear1310-evb.dts b/arch/arm/boot/dts/spear1310-evb.dts index 8314e4171884..dd4358bc26e2 100644 --- a/arch/arm/boot/dts/spear1310-evb.dts +++ b/arch/arm/boot/dts/spear1310-evb.dts | |||
@@ -1,7 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * DTS file for SPEAr1310 Evaluation Baord | 2 | * DTS file for SPEAr1310 Evaluation Baord |
3 | * | 3 | * |
4 | * Copyright 2012 Viresh Kumar <viresh.kumar@st.com> | 4 | * Copyright 2012 Viresh Kumar <viresh.linux@gmail.com> |
5 | * | 5 | * |
6 | * The code contained herein is licensed under the GNU General Public | 6 | * The code contained herein is licensed under the GNU General Public |
7 | * License. You may obtain a copy of the GNU General Public License | 7 | * License. You may obtain a copy of the GNU General Public License |
diff --git a/arch/arm/boot/dts/spear1310.dtsi b/arch/arm/boot/dts/spear1310.dtsi index 9e61da404d57..419ea7413d23 100644 --- a/arch/arm/boot/dts/spear1310.dtsi +++ b/arch/arm/boot/dts/spear1310.dtsi | |||
@@ -1,7 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * DTS file for all SPEAr1310 SoCs | 2 | * DTS file for all SPEAr1310 SoCs |
3 | * | 3 | * |
4 | * Copyright 2012 Viresh Kumar <viresh.kumar@st.com> | 4 | * Copyright 2012 Viresh Kumar <viresh.linux@gmail.com> |
5 | * | 5 | * |
6 | * The code contained herein is licensed under the GNU General Public | 6 | * The code contained herein is licensed under the GNU General Public |
7 | * License. You may obtain a copy of the GNU General Public License | 7 | * License. You may obtain a copy of the GNU General Public License |
diff --git a/arch/arm/boot/dts/spear1340-evb.dts b/arch/arm/boot/dts/spear1340-evb.dts index 0d8472e5ab9f..c9a54e06fb68 100644 --- a/arch/arm/boot/dts/spear1340-evb.dts +++ b/arch/arm/boot/dts/spear1340-evb.dts | |||
@@ -1,7 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * DTS file for SPEAr1340 Evaluation Baord | 2 | * DTS file for SPEAr1340 Evaluation Baord |
3 | * | 3 | * |
4 | * Copyright 2012 Viresh Kumar <viresh.kumar@st.com> | 4 | * Copyright 2012 Viresh Kumar <viresh.linux@gmail.com> |
5 | * | 5 | * |
6 | * The code contained herein is licensed under the GNU General Public | 6 | * The code contained herein is licensed under the GNU General Public |
7 | * License. You may obtain a copy of the GNU General Public License | 7 | * License. You may obtain a copy of the GNU General Public License |
diff --git a/arch/arm/boot/dts/spear1340.dtsi b/arch/arm/boot/dts/spear1340.dtsi index a26fc47a55e8..d71fe2a68f09 100644 --- a/arch/arm/boot/dts/spear1340.dtsi +++ b/arch/arm/boot/dts/spear1340.dtsi | |||
@@ -1,7 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * DTS file for all SPEAr1340 SoCs | 2 | * DTS file for all SPEAr1340 SoCs |
3 | * | 3 | * |
4 | * Copyright 2012 Viresh Kumar <viresh.kumar@st.com> | 4 | * Copyright 2012 Viresh Kumar <viresh.linux@gmail.com> |
5 | * | 5 | * |
6 | * The code contained herein is licensed under the GNU General Public | 6 | * The code contained herein is licensed under the GNU General Public |
7 | * License. You may obtain a copy of the GNU General Public License | 7 | * License. You may obtain a copy of the GNU General Public License |
diff --git a/arch/arm/boot/dts/spear13xx.dtsi b/arch/arm/boot/dts/spear13xx.dtsi index 1f8e1e1481df..10dcec7e7321 100644 --- a/arch/arm/boot/dts/spear13xx.dtsi +++ b/arch/arm/boot/dts/spear13xx.dtsi | |||
@@ -1,7 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * DTS file for all SPEAr13xx SoCs | 2 | * DTS file for all SPEAr13xx SoCs |
3 | * | 3 | * |
4 | * Copyright 2012 Viresh Kumar <viresh.kumar@st.com> | 4 | * Copyright 2012 Viresh Kumar <viresh.linux@gmail.com> |
5 | * | 5 | * |
6 | * The code contained herein is licensed under the GNU General Public | 6 | * The code contained herein is licensed under the GNU General Public |
7 | * License. You may obtain a copy of the GNU General Public License | 7 | * License. You may obtain a copy of the GNU General Public License |
diff --git a/arch/arm/boot/dts/spear300-evb.dts b/arch/arm/boot/dts/spear300-evb.dts index fc82b1a26458..d71b8d581e3d 100644 --- a/arch/arm/boot/dts/spear300-evb.dts +++ b/arch/arm/boot/dts/spear300-evb.dts | |||
@@ -1,7 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * DTS file for SPEAr300 Evaluation Baord | 2 | * DTS file for SPEAr300 Evaluation Baord |
3 | * | 3 | * |
4 | * Copyright 2012 Viresh Kumar <viresh.kumar@st.com> | 4 | * Copyright 2012 Viresh Kumar <viresh.linux@gmail.com> |
5 | * | 5 | * |
6 | * The code contained herein is licensed under the GNU General Public | 6 | * The code contained herein is licensed under the GNU General Public |
7 | * License. You may obtain a copy of the GNU General Public License | 7 | * License. You may obtain a copy of the GNU General Public License |
diff --git a/arch/arm/boot/dts/spear300.dtsi b/arch/arm/boot/dts/spear300.dtsi index 01c5e358fdb2..ed3627c116cc 100644 --- a/arch/arm/boot/dts/spear300.dtsi +++ b/arch/arm/boot/dts/spear300.dtsi | |||
@@ -1,7 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * DTS file for SPEAr300 SoC | 2 | * DTS file for SPEAr300 SoC |
3 | * | 3 | * |
4 | * Copyright 2012 Viresh Kumar <viresh.kumar@st.com> | 4 | * Copyright 2012 Viresh Kumar <viresh.linux@gmail.com> |
5 | * | 5 | * |
6 | * The code contained herein is licensed under the GNU General Public | 6 | * The code contained herein is licensed under the GNU General Public |
7 | * License. You may obtain a copy of the GNU General Public License | 7 | * License. You may obtain a copy of the GNU General Public License |
diff --git a/arch/arm/boot/dts/spear310-evb.dts b/arch/arm/boot/dts/spear310-evb.dts index dc5e2d445a93..b00544e0cd5d 100644 --- a/arch/arm/boot/dts/spear310-evb.dts +++ b/arch/arm/boot/dts/spear310-evb.dts | |||
@@ -1,7 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * DTS file for SPEAr310 Evaluation Baord | 2 | * DTS file for SPEAr310 Evaluation Baord |
3 | * | 3 | * |
4 | * Copyright 2012 Viresh Kumar <viresh.kumar@st.com> | 4 | * Copyright 2012 Viresh Kumar <viresh.linux@gmail.com> |
5 | * | 5 | * |
6 | * The code contained herein is licensed under the GNU General Public | 6 | * The code contained herein is licensed under the GNU General Public |
7 | * License. You may obtain a copy of the GNU General Public License | 7 | * License. You may obtain a copy of the GNU General Public License |
diff --git a/arch/arm/boot/dts/spear310.dtsi b/arch/arm/boot/dts/spear310.dtsi index e47081c494d9..62fc4fb3e5f9 100644 --- a/arch/arm/boot/dts/spear310.dtsi +++ b/arch/arm/boot/dts/spear310.dtsi | |||
@@ -1,7 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * DTS file for SPEAr310 SoC | 2 | * DTS file for SPEAr310 SoC |
3 | * | 3 | * |
4 | * Copyright 2012 Viresh Kumar <viresh.kumar@st.com> | 4 | * Copyright 2012 Viresh Kumar <viresh.linux@gmail.com> |
5 | * | 5 | * |
6 | * The code contained herein is licensed under the GNU General Public | 6 | * The code contained herein is licensed under the GNU General Public |
7 | * License. You may obtain a copy of the GNU General Public License | 7 | * License. You may obtain a copy of the GNU General Public License |
diff --git a/arch/arm/boot/dts/spear320-evb.dts b/arch/arm/boot/dts/spear320-evb.dts index 6308fa3bec1e..c13fd1f3b09f 100644 --- a/arch/arm/boot/dts/spear320-evb.dts +++ b/arch/arm/boot/dts/spear320-evb.dts | |||
@@ -1,7 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * DTS file for SPEAr320 Evaluation Baord | 2 | * DTS file for SPEAr320 Evaluation Baord |
3 | * | 3 | * |
4 | * Copyright 2012 Viresh Kumar <viresh.kumar@st.com> | 4 | * Copyright 2012 Viresh Kumar <viresh.linux@gmail.com> |
5 | * | 5 | * |
6 | * The code contained herein is licensed under the GNU General Public | 6 | * The code contained herein is licensed under the GNU General Public |
7 | * License. You may obtain a copy of the GNU General Public License | 7 | * License. You may obtain a copy of the GNU General Public License |
diff --git a/arch/arm/boot/dts/spear320.dtsi b/arch/arm/boot/dts/spear320.dtsi index 5372ca399b1f..1f49d69595a0 100644 --- a/arch/arm/boot/dts/spear320.dtsi +++ b/arch/arm/boot/dts/spear320.dtsi | |||
@@ -1,7 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * DTS file for SPEAr320 SoC | 2 | * DTS file for SPEAr320 SoC |
3 | * | 3 | * |
4 | * Copyright 2012 Viresh Kumar <viresh.kumar@st.com> | 4 | * Copyright 2012 Viresh Kumar <viresh.linux@gmail.com> |
5 | * | 5 | * |
6 | * The code contained herein is licensed under the GNU General Public | 6 | * The code contained herein is licensed under the GNU General Public |
7 | * License. You may obtain a copy of the GNU General Public License | 7 | * License. You may obtain a copy of the GNU General Public License |
diff --git a/arch/arm/boot/dts/spear3xx.dtsi b/arch/arm/boot/dts/spear3xx.dtsi index 91072553963f..3a8bb5736928 100644 --- a/arch/arm/boot/dts/spear3xx.dtsi +++ b/arch/arm/boot/dts/spear3xx.dtsi | |||
@@ -1,7 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * DTS file for all SPEAr3xx SoCs | 2 | * DTS file for all SPEAr3xx SoCs |
3 | * | 3 | * |
4 | * Copyright 2012 Viresh Kumar <viresh.kumar@st.com> | 4 | * Copyright 2012 Viresh Kumar <viresh.linux@gmail.com> |
5 | * | 5 | * |
6 | * The code contained herein is licensed under the GNU General Public | 6 | * The code contained herein is licensed under the GNU General Public |
7 | * License. You may obtain a copy of the GNU General Public License | 7 | * License. You may obtain a copy of the GNU General Public License |
diff --git a/arch/arm/common/dmabounce.c b/arch/arm/common/dmabounce.c index 9d7eb530f95f..aa07f5938f05 100644 --- a/arch/arm/common/dmabounce.c +++ b/arch/arm/common/dmabounce.c | |||
@@ -366,8 +366,8 @@ static int __dmabounce_sync_for_cpu(struct device *dev, dma_addr_t addr, | |||
366 | struct safe_buffer *buf; | 366 | struct safe_buffer *buf; |
367 | unsigned long off; | 367 | unsigned long off; |
368 | 368 | ||
369 | dev_dbg(dev, "%s(dma=%#x,off=%#lx,sz=%zx,dir=%x)\n", | 369 | dev_dbg(dev, "%s(dma=%#x,sz=%zx,dir=%x)\n", |
370 | __func__, addr, off, sz, dir); | 370 | __func__, addr, sz, dir); |
371 | 371 | ||
372 | buf = find_safe_buffer_dev(dev, addr, __func__); | 372 | buf = find_safe_buffer_dev(dev, addr, __func__); |
373 | if (!buf) | 373 | if (!buf) |
@@ -377,8 +377,8 @@ static int __dmabounce_sync_for_cpu(struct device *dev, dma_addr_t addr, | |||
377 | 377 | ||
378 | BUG_ON(buf->direction != dir); | 378 | BUG_ON(buf->direction != dir); |
379 | 379 | ||
380 | dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n", | 380 | dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x off=%#lx) mapped to %p (dma=%#x)\n", |
381 | __func__, buf->ptr, virt_to_dma(dev, buf->ptr), | 381 | __func__, buf->ptr, virt_to_dma(dev, buf->ptr), off, |
382 | buf->safe, buf->safe_dma_addr); | 382 | buf->safe, buf->safe_dma_addr); |
383 | 383 | ||
384 | DO_STATS(dev->archdata.dmabounce->bounce_count++); | 384 | DO_STATS(dev->archdata.dmabounce->bounce_count++); |
@@ -406,8 +406,8 @@ static int __dmabounce_sync_for_device(struct device *dev, dma_addr_t addr, | |||
406 | struct safe_buffer *buf; | 406 | struct safe_buffer *buf; |
407 | unsigned long off; | 407 | unsigned long off; |
408 | 408 | ||
409 | dev_dbg(dev, "%s(dma=%#x,off=%#lx,sz=%zx,dir=%x)\n", | 409 | dev_dbg(dev, "%s(dma=%#x,sz=%zx,dir=%x)\n", |
410 | __func__, addr, off, sz, dir); | 410 | __func__, addr, sz, dir); |
411 | 411 | ||
412 | buf = find_safe_buffer_dev(dev, addr, __func__); | 412 | buf = find_safe_buffer_dev(dev, addr, __func__); |
413 | if (!buf) | 413 | if (!buf) |
@@ -417,8 +417,8 @@ static int __dmabounce_sync_for_device(struct device *dev, dma_addr_t addr, | |||
417 | 417 | ||
418 | BUG_ON(buf->direction != dir); | 418 | BUG_ON(buf->direction != dir); |
419 | 419 | ||
420 | dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n", | 420 | dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x off=%#lx) mapped to %p (dma=%#x)\n", |
421 | __func__, buf->ptr, virt_to_dma(dev, buf->ptr), | 421 | __func__, buf->ptr, virt_to_dma(dev, buf->ptr), off, |
422 | buf->safe, buf->safe_dma_addr); | 422 | buf->safe, buf->safe_dma_addr); |
423 | 423 | ||
424 | DO_STATS(dev->archdata.dmabounce->bounce_count++); | 424 | DO_STATS(dev->archdata.dmabounce->bounce_count++); |
diff --git a/arch/arm/include/asm/futex.h b/arch/arm/include/asm/futex.h index 7be54690aeec..e42cf597f6e6 100644 --- a/arch/arm/include/asm/futex.h +++ b/arch/arm/include/asm/futex.h | |||
@@ -19,6 +19,7 @@ | |||
19 | " .long 1b, 4f, 2b, 4f\n" \ | 19 | " .long 1b, 4f, 2b, 4f\n" \ |
20 | " .popsection\n" \ | 20 | " .popsection\n" \ |
21 | " .pushsection .fixup,\"ax\"\n" \ | 21 | " .pushsection .fixup,\"ax\"\n" \ |
22 | " .align 2\n" \ | ||
22 | "4: mov %0, " err_reg "\n" \ | 23 | "4: mov %0, " err_reg "\n" \ |
23 | " b 3b\n" \ | 24 | " b 3b\n" \ |
24 | " .popsection" | 25 | " .popsection" |
diff --git a/arch/arm/include/asm/hardware/sp810.h b/arch/arm/include/asm/hardware/sp810.h index e0d1c0cfa548..6b9b077d86b3 100644 --- a/arch/arm/include/asm/hardware/sp810.h +++ b/arch/arm/include/asm/hardware/sp810.h | |||
@@ -4,7 +4,7 @@ | |||
4 | * ARM PrimeXsys System Controller SP810 header file | 4 | * ARM PrimeXsys System Controller SP810 header file |
5 | * | 5 | * |
6 | * Copyright (C) 2009 ST Microelectronics | 6 | * Copyright (C) 2009 ST Microelectronics |
7 | * Viresh Kumar<viresh.kumar@st.com> | 7 | * Viresh Kumar <viresh.linux@gmail.com> |
8 | * | 8 | * |
9 | * This file is licensed under the terms of the GNU General Public | 9 | * This file is licensed under the terms of the GNU General Public |
10 | * License version 2. This program is licensed "as is" without any | 10 | * License version 2. This program is licensed "as is" without any |
diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S index 437f0c426517..0d1851ca6eb9 100644 --- a/arch/arm/kernel/entry-armv.S +++ b/arch/arm/kernel/entry-armv.S | |||
@@ -495,6 +495,7 @@ ENDPROC(__und_usr) | |||
495 | * The out of line fixup for the ldrt above. | 495 | * The out of line fixup for the ldrt above. |
496 | */ | 496 | */ |
497 | .pushsection .fixup, "ax" | 497 | .pushsection .fixup, "ax" |
498 | .align 2 | ||
498 | 4: mov pc, r9 | 499 | 4: mov pc, r9 |
499 | .popsection | 500 | .popsection |
500 | .pushsection __ex_table,"a" | 501 | .pushsection __ex_table,"a" |
diff --git a/arch/arm/kernel/kprobes-thumb.c b/arch/arm/kernel/kprobes-thumb.c index 8f96ec778e8d..6123daf397a7 100644 --- a/arch/arm/kernel/kprobes-thumb.c +++ b/arch/arm/kernel/kprobes-thumb.c | |||
@@ -660,7 +660,7 @@ static const union decode_item t32_table_1111_100x[] = { | |||
660 | /* LDRSB (literal) 1111 1001 x001 1111 xxxx xxxx xxxx xxxx */ | 660 | /* LDRSB (literal) 1111 1001 x001 1111 xxxx xxxx xxxx xxxx */ |
661 | /* LDRH (literal) 1111 1000 x011 1111 xxxx xxxx xxxx xxxx */ | 661 | /* LDRH (literal) 1111 1000 x011 1111 xxxx xxxx xxxx xxxx */ |
662 | /* LDRSH (literal) 1111 1001 x011 1111 xxxx xxxx xxxx xxxx */ | 662 | /* LDRSH (literal) 1111 1001 x011 1111 xxxx xxxx xxxx xxxx */ |
663 | DECODE_EMULATEX (0xfe5f0000, 0xf81f0000, t32_simulate_ldr_literal, | 663 | DECODE_SIMULATEX(0xfe5f0000, 0xf81f0000, t32_simulate_ldr_literal, |
664 | REGS(PC, NOSPPCX, 0, 0, 0)), | 664 | REGS(PC, NOSPPCX, 0, 0, 0)), |
665 | 665 | ||
666 | /* STRB (immediate) 1111 1000 0000 xxxx xxxx 1xxx xxxx xxxx */ | 666 | /* STRB (immediate) 1111 1000 0000 xxxx xxxx 1xxx xxxx xxxx */ |
diff --git a/arch/arm/mach-highbank/Makefile b/arch/arm/mach-highbank/Makefile index f8437dd238c2..ded4652ada80 100644 --- a/arch/arm/mach-highbank/Makefile +++ b/arch/arm/mach-highbank/Makefile | |||
@@ -1,4 +1,8 @@ | |||
1 | obj-y := clock.o highbank.o system.o | 1 | obj-y := clock.o highbank.o system.o smc.o |
2 | |||
3 | plus_sec := $(call as-instr,.arch_extension sec,+sec) | ||
4 | AFLAGS_smc.o :=-Wa,-march=armv7-a$(plus_sec) | ||
5 | |||
2 | obj-$(CONFIG_DEBUG_HIGHBANK_UART) += lluart.o | 6 | obj-$(CONFIG_DEBUG_HIGHBANK_UART) += lluart.o |
3 | obj-$(CONFIG_SMP) += platsmp.o | 7 | obj-$(CONFIG_SMP) += platsmp.o |
4 | obj-$(CONFIG_HOTPLUG_CPU) += hotplug.o | 8 | obj-$(CONFIG_HOTPLUG_CPU) += hotplug.o |
diff --git a/arch/arm/mach-highbank/core.h b/arch/arm/mach-highbank/core.h index d8e2d0be64ac..141ed5171826 100644 --- a/arch/arm/mach-highbank/core.h +++ b/arch/arm/mach-highbank/core.h | |||
@@ -8,3 +8,4 @@ extern void highbank_lluart_map_io(void); | |||
8 | static inline void highbank_lluart_map_io(void) {} | 8 | static inline void highbank_lluart_map_io(void) {} |
9 | #endif | 9 | #endif |
10 | 10 | ||
11 | extern void highbank_smc1(int fn, int arg); | ||
diff --git a/arch/arm/mach-highbank/highbank.c b/arch/arm/mach-highbank/highbank.c index 410a112bb52e..8777612b1a42 100644 --- a/arch/arm/mach-highbank/highbank.c +++ b/arch/arm/mach-highbank/highbank.c | |||
@@ -85,10 +85,24 @@ const static struct of_device_id irq_match[] = { | |||
85 | {} | 85 | {} |
86 | }; | 86 | }; |
87 | 87 | ||
88 | #ifdef CONFIG_CACHE_L2X0 | ||
89 | static void highbank_l2x0_disable(void) | ||
90 | { | ||
91 | /* Disable PL310 L2 Cache controller */ | ||
92 | highbank_smc1(0x102, 0x0); | ||
93 | } | ||
94 | #endif | ||
95 | |||
88 | static void __init highbank_init_irq(void) | 96 | static void __init highbank_init_irq(void) |
89 | { | 97 | { |
90 | of_irq_init(irq_match); | 98 | of_irq_init(irq_match); |
99 | |||
100 | #ifdef CONFIG_CACHE_L2X0 | ||
101 | /* Enable PL310 L2 Cache controller */ | ||
102 | highbank_smc1(0x102, 0x1); | ||
91 | l2x0_of_init(0, ~0UL); | 103 | l2x0_of_init(0, ~0UL); |
104 | outer_cache.disable = highbank_l2x0_disable; | ||
105 | #endif | ||
92 | } | 106 | } |
93 | 107 | ||
94 | static void __init highbank_timer_init(void) | 108 | static void __init highbank_timer_init(void) |
diff --git a/arch/arm/mach-highbank/smc.S b/arch/arm/mach-highbank/smc.S new file mode 100644 index 000000000000..407d17baaaa9 --- /dev/null +++ b/arch/arm/mach-highbank/smc.S | |||
@@ -0,0 +1,27 @@ | |||
1 | /* | ||
2 | * Copied from omap44xx-smc.S Copyright (C) 2010 Texas Instruments, Inc. | ||
3 | * Copyright 2012 Calxeda, Inc. | ||
4 | * | ||
5 | * This program is free software,you can redistribute it and/or modify | ||
6 | * it under the terms of the GNU General Public License version 2 as | ||
7 | * published by the Free Software Foundation. | ||
8 | */ | ||
9 | |||
10 | #include <linux/linkage.h> | ||
11 | |||
12 | /* | ||
13 | * This is common routine to manage secure monitor API | ||
14 | * used to modify the PL310 secure registers. | ||
15 | * 'r0' contains the value to be modified and 'r12' contains | ||
16 | * the monitor API number. | ||
17 | * Function signature : void highbank_smc1(u32 fn, u32 arg) | ||
18 | */ | ||
19 | |||
20 | ENTRY(highbank_smc1) | ||
21 | stmfd sp!, {r4-r11, lr} | ||
22 | mov r12, r0 | ||
23 | mov r0, r1 | ||
24 | dsb | ||
25 | smc #0 | ||
26 | ldmfd sp!, {r4-r11, pc} | ||
27 | ENDPROC(highbank_smc1) | ||
diff --git a/arch/arm/mach-imx/Kconfig b/arch/arm/mach-imx/Kconfig index 0021f726b153..eff4db5de0dd 100644 --- a/arch/arm/mach-imx/Kconfig +++ b/arch/arm/mach-imx/Kconfig | |||
@@ -477,6 +477,7 @@ config MACH_MX31_3DS | |||
477 | select IMX_HAVE_PLATFORM_IMX2_WDT | 477 | select IMX_HAVE_PLATFORM_IMX2_WDT |
478 | select IMX_HAVE_PLATFORM_IMX_I2C | 478 | select IMX_HAVE_PLATFORM_IMX_I2C |
479 | select IMX_HAVE_PLATFORM_IMX_KEYPAD | 479 | select IMX_HAVE_PLATFORM_IMX_KEYPAD |
480 | select IMX_HAVE_PLATFORM_IMX_SSI | ||
480 | select IMX_HAVE_PLATFORM_IMX_UART | 481 | select IMX_HAVE_PLATFORM_IMX_UART |
481 | select IMX_HAVE_PLATFORM_IPU_CORE | 482 | select IMX_HAVE_PLATFORM_IPU_CORE |
482 | select IMX_HAVE_PLATFORM_MXC_EHCI | 483 | select IMX_HAVE_PLATFORM_MXC_EHCI |
diff --git a/arch/arm/mach-imx/clk-imx1.c b/arch/arm/mach-imx/clk-imx1.c index 0f0beb580b73..516ddee1948e 100644 --- a/arch/arm/mach-imx/clk-imx1.c +++ b/arch/arm/mach-imx/clk-imx1.c | |||
@@ -108,8 +108,7 @@ int __init mx1_clocks_init(unsigned long fref) | |||
108 | clk_register_clkdev(clk[clk32], NULL, "mxc_rtc.0"); | 108 | clk_register_clkdev(clk[clk32], NULL, "mxc_rtc.0"); |
109 | clk_register_clkdev(clk[clko], "clko", NULL); | 109 | clk_register_clkdev(clk[clko], "clko", NULL); |
110 | 110 | ||
111 | mxc_timer_init(NULL, MX1_IO_ADDRESS(MX1_TIM1_BASE_ADDR), | 111 | mxc_timer_init(MX1_IO_ADDRESS(MX1_TIM1_BASE_ADDR), MX1_TIM1_INT); |
112 | MX1_TIM1_INT); | ||
113 | 112 | ||
114 | return 0; | 113 | return 0; |
115 | } | 114 | } |
diff --git a/arch/arm/mach-imx/clk-imx21.c b/arch/arm/mach-imx/clk-imx21.c index 4e4f384ee8dd..ea13e61bd5f3 100644 --- a/arch/arm/mach-imx/clk-imx21.c +++ b/arch/arm/mach-imx/clk-imx21.c | |||
@@ -180,7 +180,7 @@ int __init mx21_clocks_init(unsigned long lref, unsigned long href) | |||
180 | clk_register_clkdev(clk[sdhc1_ipg_gate], "sdhc1", NULL); | 180 | clk_register_clkdev(clk[sdhc1_ipg_gate], "sdhc1", NULL); |
181 | clk_register_clkdev(clk[sdhc2_ipg_gate], "sdhc2", NULL); | 181 | clk_register_clkdev(clk[sdhc2_ipg_gate], "sdhc2", NULL); |
182 | 182 | ||
183 | mxc_timer_init(NULL, MX21_IO_ADDRESS(MX21_GPT1_BASE_ADDR), | 183 | mxc_timer_init(MX21_IO_ADDRESS(MX21_GPT1_BASE_ADDR), MX21_INT_GPT1); |
184 | MX21_INT_GPT1); | 184 | |
185 | return 0; | 185 | return 0; |
186 | } | 186 | } |
diff --git a/arch/arm/mach-imx/clk-imx25.c b/arch/arm/mach-imx/clk-imx25.c index d9833bb5fd61..fdd8cc87c9fe 100644 --- a/arch/arm/mach-imx/clk-imx25.c +++ b/arch/arm/mach-imx/clk-imx25.c | |||
@@ -243,6 +243,6 @@ int __init mx25_clocks_init(void) | |||
243 | clk_register_clkdev(clk[sdma_ahb], "ahb", "imx35-sdma"); | 243 | clk_register_clkdev(clk[sdma_ahb], "ahb", "imx35-sdma"); |
244 | clk_register_clkdev(clk[iim_ipg], "iim", NULL); | 244 | clk_register_clkdev(clk[iim_ipg], "iim", NULL); |
245 | 245 | ||
246 | mxc_timer_init(NULL, MX25_IO_ADDRESS(MX25_GPT1_BASE_ADDR), 54); | 246 | mxc_timer_init(MX25_IO_ADDRESS(MX25_GPT1_BASE_ADDR), 54); |
247 | return 0; | 247 | return 0; |
248 | } | 248 | } |
diff --git a/arch/arm/mach-imx/clk-imx27.c b/arch/arm/mach-imx/clk-imx27.c index 50a7ebd8d1b2..295cbd7c08dc 100644 --- a/arch/arm/mach-imx/clk-imx27.c +++ b/arch/arm/mach-imx/clk-imx27.c | |||
@@ -263,8 +263,7 @@ int __init mx27_clocks_init(unsigned long fref) | |||
263 | clk_register_clkdev(clk[ssi1_baud_gate], "bitrate" , "imx-ssi.0"); | 263 | clk_register_clkdev(clk[ssi1_baud_gate], "bitrate" , "imx-ssi.0"); |
264 | clk_register_clkdev(clk[ssi2_baud_gate], "bitrate" , "imx-ssi.1"); | 264 | clk_register_clkdev(clk[ssi2_baud_gate], "bitrate" , "imx-ssi.1"); |
265 | 265 | ||
266 | mxc_timer_init(NULL, MX27_IO_ADDRESS(MX27_GPT1_BASE_ADDR), | 266 | mxc_timer_init(MX27_IO_ADDRESS(MX27_GPT1_BASE_ADDR), MX27_INT_GPT1); |
267 | MX27_INT_GPT1); | ||
268 | 267 | ||
269 | clk_prepare_enable(clk[emi_ahb_gate]); | 268 | clk_prepare_enable(clk[emi_ahb_gate]); |
270 | 269 | ||
diff --git a/arch/arm/mach-imx/clk-imx31.c b/arch/arm/mach-imx/clk-imx31.c index a854b9cae5ea..c9a06d800f8e 100644 --- a/arch/arm/mach-imx/clk-imx31.c +++ b/arch/arm/mach-imx/clk-imx31.c | |||
@@ -175,8 +175,7 @@ int __init mx31_clocks_init(unsigned long fref) | |||
175 | mx31_revision(); | 175 | mx31_revision(); |
176 | clk_disable_unprepare(clk[iim_gate]); | 176 | clk_disable_unprepare(clk[iim_gate]); |
177 | 177 | ||
178 | mxc_timer_init(NULL, MX31_IO_ADDRESS(MX31_GPT1_BASE_ADDR), | 178 | mxc_timer_init(MX31_IO_ADDRESS(MX31_GPT1_BASE_ADDR), MX31_INT_GPT); |
179 | MX31_INT_GPT); | ||
180 | 179 | ||
181 | return 0; | 180 | return 0; |
182 | } | 181 | } |
diff --git a/arch/arm/mach-imx/clk-imx35.c b/arch/arm/mach-imx/clk-imx35.c index a9e60bf7dd75..920a8cc42726 100644 --- a/arch/arm/mach-imx/clk-imx35.c +++ b/arch/arm/mach-imx/clk-imx35.c | |||
@@ -267,11 +267,9 @@ int __init mx35_clocks_init() | |||
267 | imx_print_silicon_rev("i.MX35", mx35_revision()); | 267 | imx_print_silicon_rev("i.MX35", mx35_revision()); |
268 | 268 | ||
269 | #ifdef CONFIG_MXC_USE_EPIT | 269 | #ifdef CONFIG_MXC_USE_EPIT |
270 | epit_timer_init(&epit1_clk, | 270 | epit_timer_init(MX35_IO_ADDRESS(MX35_EPIT1_BASE_ADDR), MX35_INT_EPIT1); |
271 | MX35_IO_ADDRESS(MX35_EPIT1_BASE_ADDR), MX35_INT_EPIT1); | ||
272 | #else | 271 | #else |
273 | mxc_timer_init(NULL, MX35_IO_ADDRESS(MX35_GPT1_BASE_ADDR), | 272 | mxc_timer_init(MX35_IO_ADDRESS(MX35_GPT1_BASE_ADDR), MX35_INT_GPT); |
274 | MX35_INT_GPT); | ||
275 | #endif | 273 | #endif |
276 | 274 | ||
277 | return 0; | 275 | return 0; |
diff --git a/arch/arm/mach-imx/clk-imx51-imx53.c b/arch/arm/mach-imx/clk-imx51-imx53.c index fcd94f3b0f0e..a2200c77bf70 100644 --- a/arch/arm/mach-imx/clk-imx51-imx53.c +++ b/arch/arm/mach-imx/clk-imx51-imx53.c | |||
@@ -104,12 +104,12 @@ static void __init mx5_clocks_common_init(unsigned long rate_ckil, | |||
104 | periph_apm_sel, ARRAY_SIZE(periph_apm_sel)); | 104 | periph_apm_sel, ARRAY_SIZE(periph_apm_sel)); |
105 | clk[main_bus] = imx_clk_mux("main_bus", MXC_CCM_CBCDR, 25, 1, | 105 | clk[main_bus] = imx_clk_mux("main_bus", MXC_CCM_CBCDR, 25, 1, |
106 | main_bus_sel, ARRAY_SIZE(main_bus_sel)); | 106 | main_bus_sel, ARRAY_SIZE(main_bus_sel)); |
107 | clk[per_lp_apm] = imx_clk_mux("per_lp_apm", MXC_CCM_CBCDR, 1, 1, | 107 | clk[per_lp_apm] = imx_clk_mux("per_lp_apm", MXC_CCM_CBCMR, 1, 1, |
108 | per_lp_apm_sel, ARRAY_SIZE(per_lp_apm_sel)); | 108 | per_lp_apm_sel, ARRAY_SIZE(per_lp_apm_sel)); |
109 | clk[per_pred1] = imx_clk_divider("per_pred1", "per_lp_apm", MXC_CCM_CBCDR, 6, 2); | 109 | clk[per_pred1] = imx_clk_divider("per_pred1", "per_lp_apm", MXC_CCM_CBCDR, 6, 2); |
110 | clk[per_pred2] = imx_clk_divider("per_pred2", "per_pred1", MXC_CCM_CBCDR, 3, 3); | 110 | clk[per_pred2] = imx_clk_divider("per_pred2", "per_pred1", MXC_CCM_CBCDR, 3, 3); |
111 | clk[per_podf] = imx_clk_divider("per_podf", "per_pred2", MXC_CCM_CBCDR, 0, 3); | 111 | clk[per_podf] = imx_clk_divider("per_podf", "per_pred2", MXC_CCM_CBCDR, 0, 3); |
112 | clk[per_root] = imx_clk_mux("per_root", MXC_CCM_CBCDR, 1, 0, | 112 | clk[per_root] = imx_clk_mux("per_root", MXC_CCM_CBCMR, 0, 1, |
113 | per_root_sel, ARRAY_SIZE(per_root_sel)); | 113 | per_root_sel, ARRAY_SIZE(per_root_sel)); |
114 | clk[ahb] = imx_clk_divider("ahb", "main_bus", MXC_CCM_CBCDR, 10, 3); | 114 | clk[ahb] = imx_clk_divider("ahb", "main_bus", MXC_CCM_CBCDR, 10, 3); |
115 | clk[ahb_max] = imx_clk_gate2("ahb_max", "ahb", MXC_CCM_CCGR0, 28); | 115 | clk[ahb_max] = imx_clk_gate2("ahb_max", "ahb", MXC_CCM_CCGR0, 28); |
@@ -172,7 +172,7 @@ static void __init mx5_clocks_common_init(unsigned long rate_ckil, | |||
172 | clk[pwm1_hf_gate] = imx_clk_gate2("pwm1_hf_gate", "ipg", MXC_CCM_CCGR2, 12); | 172 | clk[pwm1_hf_gate] = imx_clk_gate2("pwm1_hf_gate", "ipg", MXC_CCM_CCGR2, 12); |
173 | clk[pwm2_ipg_gate] = imx_clk_gate2("pwm2_ipg_gate", "ipg", MXC_CCM_CCGR2, 14); | 173 | clk[pwm2_ipg_gate] = imx_clk_gate2("pwm2_ipg_gate", "ipg", MXC_CCM_CCGR2, 14); |
174 | clk[pwm2_hf_gate] = imx_clk_gate2("pwm2_hf_gate", "ipg", MXC_CCM_CCGR2, 16); | 174 | clk[pwm2_hf_gate] = imx_clk_gate2("pwm2_hf_gate", "ipg", MXC_CCM_CCGR2, 16); |
175 | clk[gpt_gate] = imx_clk_gate2("gpt_gate", "ipg", MXC_CCM_CCGR2, 18); | 175 | clk[gpt_gate] = imx_clk_gate2("gpt_gate", "per_root", MXC_CCM_CCGR2, 18); |
176 | clk[fec_gate] = imx_clk_gate2("fec_gate", "ipg", MXC_CCM_CCGR2, 24); | 176 | clk[fec_gate] = imx_clk_gate2("fec_gate", "ipg", MXC_CCM_CCGR2, 24); |
177 | clk[usboh3_gate] = imx_clk_gate2("usboh3_gate", "ipg", MXC_CCM_CCGR2, 26); | 177 | clk[usboh3_gate] = imx_clk_gate2("usboh3_gate", "ipg", MXC_CCM_CCGR2, 26); |
178 | clk[usboh3_per_gate] = imx_clk_gate2("usboh3_per_gate", "usboh3_podf", MXC_CCM_CCGR2, 28); | 178 | clk[usboh3_per_gate] = imx_clk_gate2("usboh3_per_gate", "usboh3_podf", MXC_CCM_CCGR2, 28); |
@@ -366,8 +366,7 @@ int __init mx51_clocks_init(unsigned long rate_ckil, unsigned long rate_osc, | |||
366 | clk_set_rate(clk[esdhc_b_podf], 166250000); | 366 | clk_set_rate(clk[esdhc_b_podf], 166250000); |
367 | 367 | ||
368 | /* System timer */ | 368 | /* System timer */ |
369 | mxc_timer_init(NULL, MX51_IO_ADDRESS(MX51_GPT1_BASE_ADDR), | 369 | mxc_timer_init(MX51_IO_ADDRESS(MX51_GPT1_BASE_ADDR), MX51_INT_GPT); |
370 | MX51_INT_GPT); | ||
371 | 370 | ||
372 | clk_prepare_enable(clk[iim_gate]); | 371 | clk_prepare_enable(clk[iim_gate]); |
373 | imx_print_silicon_rev("i.MX51", mx51_revision()); | 372 | imx_print_silicon_rev("i.MX51", mx51_revision()); |
@@ -452,8 +451,7 @@ int __init mx53_clocks_init(unsigned long rate_ckil, unsigned long rate_osc, | |||
452 | clk_set_rate(clk[esdhc_b_podf], 200000000); | 451 | clk_set_rate(clk[esdhc_b_podf], 200000000); |
453 | 452 | ||
454 | /* System timer */ | 453 | /* System timer */ |
455 | mxc_timer_init(NULL, MX53_IO_ADDRESS(MX53_GPT1_BASE_ADDR), | 454 | mxc_timer_init(MX53_IO_ADDRESS(MX53_GPT1_BASE_ADDR), MX53_INT_GPT); |
456 | MX53_INT_GPT); | ||
457 | 455 | ||
458 | clk_prepare_enable(clk[iim_gate]); | 456 | clk_prepare_enable(clk[iim_gate]); |
459 | imx_print_silicon_rev("i.MX53", mx53_revision()); | 457 | imx_print_silicon_rev("i.MX53", mx53_revision()); |
diff --git a/arch/arm/mach-imx/clk-imx6q.c b/arch/arm/mach-imx/clk-imx6q.c index cab02d0a15d6..17dc66a085a5 100644 --- a/arch/arm/mach-imx/clk-imx6q.c +++ b/arch/arm/mach-imx/clk-imx6q.c | |||
@@ -122,10 +122,6 @@ static const char *cko1_sels[] = { "pll3_usb_otg", "pll2_bus", "pll1_sys", "pll5 | |||
122 | "dummy", "axi", "enfc", "ipu1_di0", "ipu1_di1", "ipu2_di0", | 122 | "dummy", "axi", "enfc", "ipu1_di0", "ipu1_di1", "ipu2_di0", |
123 | "ipu2_di1", "ahb", "ipg", "ipg_per", "ckil", "pll4_audio", }; | 123 | "ipu2_di1", "ahb", "ipg", "ipg_per", "ckil", "pll4_audio", }; |
124 | 124 | ||
125 | static const char * const clks_init_on[] __initconst = { | ||
126 | "mmdc_ch0_axi", "mmdc_ch1_axi", "usboh3", | ||
127 | }; | ||
128 | |||
129 | enum mx6q_clks { | 125 | enum mx6q_clks { |
130 | dummy, ckil, ckih, osc, pll2_pfd0_352m, pll2_pfd1_594m, pll2_pfd2_396m, | 126 | dummy, ckil, ckih, osc, pll2_pfd0_352m, pll2_pfd1_594m, pll2_pfd2_396m, |
131 | pll3_pfd0_720m, pll3_pfd1_540m, pll3_pfd2_508m, pll3_pfd3_454m, | 127 | pll3_pfd0_720m, pll3_pfd1_540m, pll3_pfd2_508m, pll3_pfd3_454m, |
@@ -161,11 +157,14 @@ enum mx6q_clks { | |||
161 | 157 | ||
162 | static struct clk *clk[clk_max]; | 158 | static struct clk *clk[clk_max]; |
163 | 159 | ||
160 | static enum mx6q_clks const clks_init_on[] __initconst = { | ||
161 | mmdc_ch0_axi, mmdc_ch1_axi, | ||
162 | }; | ||
163 | |||
164 | int __init mx6q_clocks_init(void) | 164 | int __init mx6q_clocks_init(void) |
165 | { | 165 | { |
166 | struct device_node *np; | 166 | struct device_node *np; |
167 | void __iomem *base; | 167 | void __iomem *base; |
168 | struct clk *c; | ||
169 | int i, irq; | 168 | int i, irq; |
170 | 169 | ||
171 | clk[dummy] = imx_clk_fixed("dummy", 0); | 170 | clk[dummy] = imx_clk_fixed("dummy", 0); |
@@ -424,21 +423,14 @@ int __init mx6q_clocks_init(void) | |||
424 | clk_register_clkdev(clk[ahb], "ahb", NULL); | 423 | clk_register_clkdev(clk[ahb], "ahb", NULL); |
425 | clk_register_clkdev(clk[cko1], "cko1", NULL); | 424 | clk_register_clkdev(clk[cko1], "cko1", NULL); |
426 | 425 | ||
427 | for (i = 0; i < ARRAY_SIZE(clks_init_on); i++) { | 426 | for (i = 0; i < ARRAY_SIZE(clks_init_on); i++) |
428 | c = clk_get_sys(clks_init_on[i], NULL); | 427 | clk_prepare_enable(clk[clks_init_on[i]]); |
429 | if (IS_ERR(c)) { | ||
430 | pr_err("%s: failed to get clk %s", __func__, | ||
431 | clks_init_on[i]); | ||
432 | return PTR_ERR(c); | ||
433 | } | ||
434 | clk_prepare_enable(c); | ||
435 | } | ||
436 | 428 | ||
437 | np = of_find_compatible_node(NULL, NULL, "fsl,imx6q-gpt"); | 429 | np = of_find_compatible_node(NULL, NULL, "fsl,imx6q-gpt"); |
438 | base = of_iomap(np, 0); | 430 | base = of_iomap(np, 0); |
439 | WARN_ON(!base); | 431 | WARN_ON(!base); |
440 | irq = irq_of_parse_and_map(np, 0); | 432 | irq = irq_of_parse_and_map(np, 0); |
441 | mxc_timer_init(NULL, base, irq); | 433 | mxc_timer_init(base, irq); |
442 | 434 | ||
443 | return 0; | 435 | return 0; |
444 | } | 436 | } |
diff --git a/arch/arm/mach-imx/clk-pllv2.c b/arch/arm/mach-imx/clk-pllv2.c index 4685919deb63..0440379e3628 100644 --- a/arch/arm/mach-imx/clk-pllv2.c +++ b/arch/arm/mach-imx/clk-pllv2.c | |||
@@ -74,30 +74,15 @@ struct clk_pllv2 { | |||
74 | void __iomem *base; | 74 | void __iomem *base; |
75 | }; | 75 | }; |
76 | 76 | ||
77 | static unsigned long clk_pllv2_recalc_rate(struct clk_hw *hw, | 77 | static unsigned long __clk_pllv2_recalc_rate(unsigned long parent_rate, |
78 | unsigned long parent_rate) | 78 | u32 dp_ctl, u32 dp_op, u32 dp_mfd, u32 dp_mfn) |
79 | { | 79 | { |
80 | long mfi, mfn, mfd, pdf, ref_clk, mfn_abs; | 80 | long mfi, mfn, mfd, pdf, ref_clk, mfn_abs; |
81 | unsigned long dp_op, dp_mfd, dp_mfn, dp_ctl, pll_hfsm, dbl; | 81 | unsigned long dbl; |
82 | void __iomem *pllbase; | ||
83 | s64 temp; | 82 | s64 temp; |
84 | struct clk_pllv2 *pll = to_clk_pllv2(hw); | ||
85 | |||
86 | pllbase = pll->base; | ||
87 | 83 | ||
88 | dp_ctl = __raw_readl(pllbase + MXC_PLL_DP_CTL); | ||
89 | pll_hfsm = dp_ctl & MXC_PLL_DP_CTL_HFSM; | ||
90 | dbl = dp_ctl & MXC_PLL_DP_CTL_DPDCK0_2_EN; | 84 | dbl = dp_ctl & MXC_PLL_DP_CTL_DPDCK0_2_EN; |
91 | 85 | ||
92 | if (pll_hfsm == 0) { | ||
93 | dp_op = __raw_readl(pllbase + MXC_PLL_DP_OP); | ||
94 | dp_mfd = __raw_readl(pllbase + MXC_PLL_DP_MFD); | ||
95 | dp_mfn = __raw_readl(pllbase + MXC_PLL_DP_MFN); | ||
96 | } else { | ||
97 | dp_op = __raw_readl(pllbase + MXC_PLL_DP_HFS_OP); | ||
98 | dp_mfd = __raw_readl(pllbase + MXC_PLL_DP_HFS_MFD); | ||
99 | dp_mfn = __raw_readl(pllbase + MXC_PLL_DP_HFS_MFN); | ||
100 | } | ||
101 | pdf = dp_op & MXC_PLL_DP_OP_PDF_MASK; | 86 | pdf = dp_op & MXC_PLL_DP_OP_PDF_MASK; |
102 | mfi = (dp_op & MXC_PLL_DP_OP_MFI_MASK) >> MXC_PLL_DP_OP_MFI_OFFSET; | 87 | mfi = (dp_op & MXC_PLL_DP_OP_MFI_MASK) >> MXC_PLL_DP_OP_MFI_OFFSET; |
103 | mfi = (mfi <= 5) ? 5 : mfi; | 88 | mfi = (mfi <= 5) ? 5 : mfi; |
@@ -123,18 +108,30 @@ static unsigned long clk_pllv2_recalc_rate(struct clk_hw *hw, | |||
123 | return temp; | 108 | return temp; |
124 | } | 109 | } |
125 | 110 | ||
126 | static int clk_pllv2_set_rate(struct clk_hw *hw, unsigned long rate, | 111 | static unsigned long clk_pllv2_recalc_rate(struct clk_hw *hw, |
127 | unsigned long parent_rate) | 112 | unsigned long parent_rate) |
128 | { | 113 | { |
114 | u32 dp_op, dp_mfd, dp_mfn, dp_ctl; | ||
115 | void __iomem *pllbase; | ||
129 | struct clk_pllv2 *pll = to_clk_pllv2(hw); | 116 | struct clk_pllv2 *pll = to_clk_pllv2(hw); |
117 | |||
118 | pllbase = pll->base; | ||
119 | |||
120 | dp_ctl = __raw_readl(pllbase + MXC_PLL_DP_CTL); | ||
121 | dp_op = __raw_readl(pllbase + MXC_PLL_DP_OP); | ||
122 | dp_mfd = __raw_readl(pllbase + MXC_PLL_DP_MFD); | ||
123 | dp_mfn = __raw_readl(pllbase + MXC_PLL_DP_MFN); | ||
124 | |||
125 | return __clk_pllv2_recalc_rate(parent_rate, dp_ctl, dp_op, dp_mfd, dp_mfn); | ||
126 | } | ||
127 | |||
128 | static int __clk_pllv2_set_rate(unsigned long rate, unsigned long parent_rate, | ||
129 | u32 *dp_op, u32 *dp_mfd, u32 *dp_mfn) | ||
130 | { | ||
130 | u32 reg; | 131 | u32 reg; |
131 | void __iomem *pllbase; | ||
132 | long mfi, pdf, mfn, mfd = 999999; | 132 | long mfi, pdf, mfn, mfd = 999999; |
133 | s64 temp64; | 133 | s64 temp64; |
134 | unsigned long quad_parent_rate; | 134 | unsigned long quad_parent_rate; |
135 | unsigned long pll_hfsm, dp_ctl; | ||
136 | |||
137 | pllbase = pll->base; | ||
138 | 135 | ||
139 | quad_parent_rate = 4 * parent_rate; | 136 | quad_parent_rate = 4 * parent_rate; |
140 | pdf = mfi = -1; | 137 | pdf = mfi = -1; |
@@ -144,25 +141,41 @@ static int clk_pllv2_set_rate(struct clk_hw *hw, unsigned long rate, | |||
144 | return -EINVAL; | 141 | return -EINVAL; |
145 | pdf--; | 142 | pdf--; |
146 | 143 | ||
147 | temp64 = rate * (pdf+1) - quad_parent_rate * mfi; | 144 | temp64 = rate * (pdf + 1) - quad_parent_rate * mfi; |
148 | do_div(temp64, quad_parent_rate/1000000); | 145 | do_div(temp64, quad_parent_rate / 1000000); |
149 | mfn = (long)temp64; | 146 | mfn = (long)temp64; |
150 | 147 | ||
148 | reg = mfi << 4 | pdf; | ||
149 | |||
150 | *dp_op = reg; | ||
151 | *dp_mfd = mfd; | ||
152 | *dp_mfn = mfn; | ||
153 | |||
154 | return 0; | ||
155 | } | ||
156 | |||
157 | static int clk_pllv2_set_rate(struct clk_hw *hw, unsigned long rate, | ||
158 | unsigned long parent_rate) | ||
159 | { | ||
160 | struct clk_pllv2 *pll = to_clk_pllv2(hw); | ||
161 | void __iomem *pllbase; | ||
162 | u32 dp_ctl, dp_op, dp_mfd, dp_mfn; | ||
163 | int ret; | ||
164 | |||
165 | pllbase = pll->base; | ||
166 | |||
167 | |||
168 | ret = __clk_pllv2_set_rate(rate, parent_rate, &dp_op, &dp_mfd, &dp_mfn); | ||
169 | if (ret) | ||
170 | return ret; | ||
171 | |||
151 | dp_ctl = __raw_readl(pllbase + MXC_PLL_DP_CTL); | 172 | dp_ctl = __raw_readl(pllbase + MXC_PLL_DP_CTL); |
152 | /* use dpdck0_2 */ | 173 | /* use dpdck0_2 */ |
153 | __raw_writel(dp_ctl | 0x1000L, pllbase + MXC_PLL_DP_CTL); | 174 | __raw_writel(dp_ctl | 0x1000L, pllbase + MXC_PLL_DP_CTL); |
154 | pll_hfsm = dp_ctl & MXC_PLL_DP_CTL_HFSM; | 175 | |
155 | if (pll_hfsm == 0) { | 176 | __raw_writel(dp_op, pllbase + MXC_PLL_DP_OP); |
156 | reg = mfi << 4 | pdf; | 177 | __raw_writel(dp_mfd, pllbase + MXC_PLL_DP_MFD); |
157 | __raw_writel(reg, pllbase + MXC_PLL_DP_OP); | 178 | __raw_writel(dp_mfn, pllbase + MXC_PLL_DP_MFN); |
158 | __raw_writel(mfd, pllbase + MXC_PLL_DP_MFD); | ||
159 | __raw_writel(mfn, pllbase + MXC_PLL_DP_MFN); | ||
160 | } else { | ||
161 | reg = mfi << 4 | pdf; | ||
162 | __raw_writel(reg, pllbase + MXC_PLL_DP_HFS_OP); | ||
163 | __raw_writel(mfd, pllbase + MXC_PLL_DP_HFS_MFD); | ||
164 | __raw_writel(mfn, pllbase + MXC_PLL_DP_HFS_MFN); | ||
165 | } | ||
166 | 179 | ||
167 | return 0; | 180 | return 0; |
168 | } | 181 | } |
@@ -170,7 +183,11 @@ static int clk_pllv2_set_rate(struct clk_hw *hw, unsigned long rate, | |||
170 | static long clk_pllv2_round_rate(struct clk_hw *hw, unsigned long rate, | 183 | static long clk_pllv2_round_rate(struct clk_hw *hw, unsigned long rate, |
171 | unsigned long *prate) | 184 | unsigned long *prate) |
172 | { | 185 | { |
173 | return rate; | 186 | u32 dp_op, dp_mfd, dp_mfn; |
187 | |||
188 | __clk_pllv2_set_rate(rate, *prate, &dp_op, &dp_mfd, &dp_mfn); | ||
189 | return __clk_pllv2_recalc_rate(*prate, MXC_PLL_DP_CTL_DPDCK0_2_EN, | ||
190 | dp_op, dp_mfd, dp_mfn); | ||
174 | } | 191 | } |
175 | 192 | ||
176 | static int clk_pllv2_prepare(struct clk_hw *hw) | 193 | static int clk_pllv2_prepare(struct clk_hw *hw) |
diff --git a/arch/arm/mach-imx/crm-regs-imx5.h b/arch/arm/mach-imx/crm-regs-imx5.h index 5e11ba7daee2..5e3f1f0f4cab 100644 --- a/arch/arm/mach-imx/crm-regs-imx5.h +++ b/arch/arm/mach-imx/crm-regs-imx5.h | |||
@@ -23,7 +23,7 @@ | |||
23 | #define MX53_DPLL1_BASE MX53_IO_ADDRESS(MX53_PLL1_BASE_ADDR) | 23 | #define MX53_DPLL1_BASE MX53_IO_ADDRESS(MX53_PLL1_BASE_ADDR) |
24 | #define MX53_DPLL2_BASE MX53_IO_ADDRESS(MX53_PLL2_BASE_ADDR) | 24 | #define MX53_DPLL2_BASE MX53_IO_ADDRESS(MX53_PLL2_BASE_ADDR) |
25 | #define MX53_DPLL3_BASE MX53_IO_ADDRESS(MX53_PLL3_BASE_ADDR) | 25 | #define MX53_DPLL3_BASE MX53_IO_ADDRESS(MX53_PLL3_BASE_ADDR) |
26 | #define MX53_DPLL4_BASE MX53_IO_ADDRESS(MX53_PLL3_BASE_ADDR) | 26 | #define MX53_DPLL4_BASE MX53_IO_ADDRESS(MX53_PLL4_BASE_ADDR) |
27 | 27 | ||
28 | /* PLL Register Offsets */ | 28 | /* PLL Register Offsets */ |
29 | #define MXC_PLL_DP_CTL 0x00 | 29 | #define MXC_PLL_DP_CTL 0x00 |
diff --git a/arch/arm/mach-imx/hotplug.c b/arch/arm/mach-imx/hotplug.c index 89493abd497c..20ed2d56c1af 100644 --- a/arch/arm/mach-imx/hotplug.c +++ b/arch/arm/mach-imx/hotplug.c | |||
@@ -12,6 +12,7 @@ | |||
12 | 12 | ||
13 | #include <linux/errno.h> | 13 | #include <linux/errno.h> |
14 | #include <asm/cacheflush.h> | 14 | #include <asm/cacheflush.h> |
15 | #include <asm/cp15.h> | ||
15 | #include <mach/common.h> | 16 | #include <mach/common.h> |
16 | 17 | ||
17 | int platform_cpu_kill(unsigned int cpu) | 18 | int platform_cpu_kill(unsigned int cpu) |
@@ -19,6 +20,44 @@ int platform_cpu_kill(unsigned int cpu) | |||
19 | return 1; | 20 | return 1; |
20 | } | 21 | } |
21 | 22 | ||
23 | static inline void cpu_enter_lowpower(void) | ||
24 | { | ||
25 | unsigned int v; | ||
26 | |||
27 | flush_cache_all(); | ||
28 | asm volatile( | ||
29 | "mcr p15, 0, %1, c7, c5, 0\n" | ||
30 | " mcr p15, 0, %1, c7, c10, 4\n" | ||
31 | /* | ||
32 | * Turn off coherency | ||
33 | */ | ||
34 | " mrc p15, 0, %0, c1, c0, 1\n" | ||
35 | " bic %0, %0, %3\n" | ||
36 | " mcr p15, 0, %0, c1, c0, 1\n" | ||
37 | " mrc p15, 0, %0, c1, c0, 0\n" | ||
38 | " bic %0, %0, %2\n" | ||
39 | " mcr p15, 0, %0, c1, c0, 0\n" | ||
40 | : "=&r" (v) | ||
41 | : "r" (0), "Ir" (CR_C), "Ir" (0x40) | ||
42 | : "cc"); | ||
43 | } | ||
44 | |||
45 | static inline void cpu_leave_lowpower(void) | ||
46 | { | ||
47 | unsigned int v; | ||
48 | |||
49 | asm volatile( | ||
50 | "mrc p15, 0, %0, c1, c0, 0\n" | ||
51 | " orr %0, %0, %1\n" | ||
52 | " mcr p15, 0, %0, c1, c0, 0\n" | ||
53 | " mrc p15, 0, %0, c1, c0, 1\n" | ||
54 | " orr %0, %0, %2\n" | ||
55 | " mcr p15, 0, %0, c1, c0, 1\n" | ||
56 | : "=&r" (v) | ||
57 | : "Ir" (CR_C), "Ir" (0x40) | ||
58 | : "cc"); | ||
59 | } | ||
60 | |||
22 | /* | 61 | /* |
23 | * platform-specific code to shutdown a CPU | 62 | * platform-specific code to shutdown a CPU |
24 | * | 63 | * |
@@ -26,9 +65,10 @@ int platform_cpu_kill(unsigned int cpu) | |||
26 | */ | 65 | */ |
27 | void platform_cpu_die(unsigned int cpu) | 66 | void platform_cpu_die(unsigned int cpu) |
28 | { | 67 | { |
29 | flush_cache_all(); | 68 | cpu_enter_lowpower(); |
30 | imx_enable_cpu(cpu, false); | 69 | imx_enable_cpu(cpu, false); |
31 | cpu_do_idle(); | 70 | cpu_do_idle(); |
71 | cpu_leave_lowpower(); | ||
32 | 72 | ||
33 | /* We should never return from idle */ | 73 | /* We should never return from idle */ |
34 | panic("cpu %d unexpectedly exit from shutdown\n", cpu); | 74 | panic("cpu %d unexpectedly exit from shutdown\n", cpu); |
diff --git a/arch/arm/mach-imx/mach-cpuimx35.c b/arch/arm/mach-imx/mach-cpuimx35.c index c515f8ede1a1..6450303f1a7a 100644 --- a/arch/arm/mach-imx/mach-cpuimx35.c +++ b/arch/arm/mach-imx/mach-cpuimx35.c | |||
@@ -70,7 +70,6 @@ static struct i2c_board_info eukrea_cpuimx35_i2c_devices[] = { | |||
70 | I2C_BOARD_INFO("pcf8563", 0x51), | 70 | I2C_BOARD_INFO("pcf8563", 0x51), |
71 | }, { | 71 | }, { |
72 | I2C_BOARD_INFO("tsc2007", 0x48), | 72 | I2C_BOARD_INFO("tsc2007", 0x48), |
73 | .type = "tsc2007", | ||
74 | .platform_data = &tsc2007_info, | 73 | .platform_data = &tsc2007_info, |
75 | .irq = IMX_GPIO_TO_IRQ(TSC2007_IRQGPIO), | 74 | .irq = IMX_GPIO_TO_IRQ(TSC2007_IRQGPIO), |
76 | }, | 75 | }, |
diff --git a/arch/arm/mach-imx/mach-cpuimx51sd.c b/arch/arm/mach-imx/mach-cpuimx51sd.c index ac50f1671e38..1e09de50cbcd 100644 --- a/arch/arm/mach-imx/mach-cpuimx51sd.c +++ b/arch/arm/mach-imx/mach-cpuimx51sd.c | |||
@@ -142,7 +142,6 @@ static struct i2c_board_info eukrea_cpuimx51sd_i2c_devices[] = { | |||
142 | I2C_BOARD_INFO("pcf8563", 0x51), | 142 | I2C_BOARD_INFO("pcf8563", 0x51), |
143 | }, { | 143 | }, { |
144 | I2C_BOARD_INFO("tsc2007", 0x49), | 144 | I2C_BOARD_INFO("tsc2007", 0x49), |
145 | .type = "tsc2007", | ||
146 | .platform_data = &tsc2007_info, | 145 | .platform_data = &tsc2007_info, |
147 | }, | 146 | }, |
148 | }; | 147 | }; |
diff --git a/arch/arm/mach-imx/mach-imx27_visstrim_m10.c b/arch/arm/mach-imx/mach-imx27_visstrim_m10.c index dff82eb57cd9..f76edb96a48a 100644 --- a/arch/arm/mach-imx/mach-imx27_visstrim_m10.c +++ b/arch/arm/mach-imx/mach-imx27_visstrim_m10.c | |||
@@ -116,6 +116,8 @@ static const int visstrim_m10_pins[] __initconst = { | |||
116 | PB23_PF_USB_PWR, | 116 | PB23_PF_USB_PWR, |
117 | PB24_PF_USB_OC, | 117 | PB24_PF_USB_OC, |
118 | /* CSI */ | 118 | /* CSI */ |
119 | TVP5150_RSTN | GPIO_GPIO | GPIO_OUT, | ||
120 | TVP5150_PWDN | GPIO_GPIO | GPIO_OUT, | ||
119 | PB10_PF_CSI_D0, | 121 | PB10_PF_CSI_D0, |
120 | PB11_PF_CSI_D1, | 122 | PB11_PF_CSI_D1, |
121 | PB12_PF_CSI_D2, | 123 | PB12_PF_CSI_D2, |
@@ -147,6 +149,24 @@ static struct gpio visstrim_m10_version_gpios[] = { | |||
147 | { MOTHERBOARD_BIT2, GPIOF_IN, "mother-version-2" }, | 149 | { MOTHERBOARD_BIT2, GPIOF_IN, "mother-version-2" }, |
148 | }; | 150 | }; |
149 | 151 | ||
152 | static const struct gpio visstrim_m10_gpios[] __initconst = { | ||
153 | { | ||
154 | .gpio = TVP5150_RSTN, | ||
155 | .flags = GPIOF_DIR_OUT | GPIOF_INIT_HIGH, | ||
156 | .label = "tvp5150_rstn", | ||
157 | }, | ||
158 | { | ||
159 | .gpio = TVP5150_PWDN, | ||
160 | .flags = GPIOF_DIR_OUT | GPIOF_INIT_LOW, | ||
161 | .label = "tvp5150_pwdn", | ||
162 | }, | ||
163 | { | ||
164 | .gpio = OTG_PHY_CS_GPIO, | ||
165 | .flags = GPIOF_DIR_OUT | GPIOF_INIT_LOW, | ||
166 | .label = "usbotg_cs", | ||
167 | }, | ||
168 | }; | ||
169 | |||
150 | /* Camera */ | 170 | /* Camera */ |
151 | static int visstrim_camera_power(struct device *dev, int on) | 171 | static int visstrim_camera_power(struct device *dev, int on) |
152 | { | 172 | { |
@@ -190,13 +210,6 @@ static void __init visstrim_camera_init(void) | |||
190 | struct platform_device *pdev; | 210 | struct platform_device *pdev; |
191 | int dma; | 211 | int dma; |
192 | 212 | ||
193 | /* Initialize tvp5150 gpios */ | ||
194 | mxc_gpio_mode(TVP5150_RSTN | GPIO_GPIO | GPIO_OUT); | ||
195 | mxc_gpio_mode(TVP5150_PWDN | GPIO_GPIO | GPIO_OUT); | ||
196 | gpio_set_value(TVP5150_RSTN, 1); | ||
197 | gpio_set_value(TVP5150_PWDN, 0); | ||
198 | ndelay(1); | ||
199 | |||
200 | gpio_set_value(TVP5150_PWDN, 1); | 213 | gpio_set_value(TVP5150_PWDN, 1); |
201 | ndelay(1); | 214 | ndelay(1); |
202 | gpio_set_value(TVP5150_RSTN, 0); | 215 | gpio_set_value(TVP5150_RSTN, 0); |
@@ -377,10 +390,6 @@ static struct i2c_board_info visstrim_m10_i2c_devices[] = { | |||
377 | /* USB OTG */ | 390 | /* USB OTG */ |
378 | static int otg_phy_init(struct platform_device *pdev) | 391 | static int otg_phy_init(struct platform_device *pdev) |
379 | { | 392 | { |
380 | gpio_set_value(OTG_PHY_CS_GPIO, 0); | ||
381 | |||
382 | mdelay(10); | ||
383 | |||
384 | return mx27_initialize_usb_hw(pdev->id, MXC_EHCI_POWER_PINS_ENABLED); | 393 | return mx27_initialize_usb_hw(pdev->id, MXC_EHCI_POWER_PINS_ENABLED); |
385 | } | 394 | } |
386 | 395 | ||
@@ -435,6 +444,11 @@ static void __init visstrim_m10_board_init(void) | |||
435 | if (ret) | 444 | if (ret) |
436 | pr_err("Failed to setup pins (%d)\n", ret); | 445 | pr_err("Failed to setup pins (%d)\n", ret); |
437 | 446 | ||
447 | ret = gpio_request_array(visstrim_m10_gpios, | ||
448 | ARRAY_SIZE(visstrim_m10_gpios)); | ||
449 | if (ret) | ||
450 | pr_err("Failed to request gpios (%d)\n", ret); | ||
451 | |||
438 | imx27_add_imx_ssi(0, &visstrim_m10_ssi_pdata); | 452 | imx27_add_imx_ssi(0, &visstrim_m10_ssi_pdata); |
439 | imx27_add_imx_uart0(&uart_pdata); | 453 | imx27_add_imx_uart0(&uart_pdata); |
440 | 454 | ||
diff --git a/arch/arm/mach-imx/mach-mx21ads.c b/arch/arm/mach-imx/mach-mx21ads.c index d14bbe949a4f..3e7401fca76c 100644 --- a/arch/arm/mach-imx/mach-mx21ads.c +++ b/arch/arm/mach-imx/mach-mx21ads.c | |||
@@ -32,7 +32,7 @@ | |||
32 | * Memory-mapped I/O on MX21ADS base board | 32 | * Memory-mapped I/O on MX21ADS base board |
33 | */ | 33 | */ |
34 | #define MX21ADS_MMIO_BASE_ADDR 0xf5000000 | 34 | #define MX21ADS_MMIO_BASE_ADDR 0xf5000000 |
35 | #define MX21ADS_MMIO_SIZE SZ_16M | 35 | #define MX21ADS_MMIO_SIZE 0xc00000 |
36 | 36 | ||
37 | #define MX21ADS_REG_ADDR(offset) (void __force __iomem *) \ | 37 | #define MX21ADS_REG_ADDR(offset) (void __force __iomem *) \ |
38 | (MX21ADS_MMIO_BASE_ADDR + (offset)) | 38 | (MX21ADS_MMIO_BASE_ADDR + (offset)) |
diff --git a/arch/arm/mach-imx/mm-imx3.c b/arch/arm/mach-imx/mm-imx3.c index 967ed5b35a45..a8983b9778d1 100644 --- a/arch/arm/mach-imx/mm-imx3.c +++ b/arch/arm/mach-imx/mm-imx3.c | |||
@@ -86,6 +86,7 @@ static void __iomem *imx3_ioremap_caller(unsigned long phys_addr, size_t size, | |||
86 | 86 | ||
87 | void __init imx3_init_l2x0(void) | 87 | void __init imx3_init_l2x0(void) |
88 | { | 88 | { |
89 | #ifdef CONFIG_CACHE_L2X0 | ||
89 | void __iomem *l2x0_base; | 90 | void __iomem *l2x0_base; |
90 | void __iomem *clkctl_base; | 91 | void __iomem *clkctl_base; |
91 | 92 | ||
@@ -115,6 +116,7 @@ void __init imx3_init_l2x0(void) | |||
115 | } | 116 | } |
116 | 117 | ||
117 | l2x0_init(l2x0_base, 0x00030024, 0x00000000); | 118 | l2x0_init(l2x0_base, 0x00030024, 0x00000000); |
119 | #endif | ||
118 | } | 120 | } |
119 | 121 | ||
120 | #ifdef CONFIG_SOC_IMX31 | 122 | #ifdef CONFIG_SOC_IMX31 |
@@ -179,6 +181,8 @@ void __init imx31_soc_init(void) | |||
179 | mxc_register_gpio("imx31-gpio", 1, MX31_GPIO2_BASE_ADDR, SZ_16K, MX31_INT_GPIO2, 0); | 181 | mxc_register_gpio("imx31-gpio", 1, MX31_GPIO2_BASE_ADDR, SZ_16K, MX31_INT_GPIO2, 0); |
180 | mxc_register_gpio("imx31-gpio", 2, MX31_GPIO3_BASE_ADDR, SZ_16K, MX31_INT_GPIO3, 0); | 182 | mxc_register_gpio("imx31-gpio", 2, MX31_GPIO3_BASE_ADDR, SZ_16K, MX31_INT_GPIO3, 0); |
181 | 183 | ||
184 | pinctrl_provide_dummies(); | ||
185 | |||
182 | if (to_version == 1) { | 186 | if (to_version == 1) { |
183 | strncpy(imx31_sdma_pdata.fw_name, "sdma-imx31-to1.bin", | 187 | strncpy(imx31_sdma_pdata.fw_name, "sdma-imx31-to1.bin", |
184 | strlen(imx31_sdma_pdata.fw_name)); | 188 | strlen(imx31_sdma_pdata.fw_name)); |
diff --git a/arch/arm/mach-imx/mm-imx5.c b/arch/arm/mach-imx/mm-imx5.c index feeee17da96b..1d003053d562 100644 --- a/arch/arm/mach-imx/mm-imx5.c +++ b/arch/arm/mach-imx/mm-imx5.c | |||
@@ -202,6 +202,8 @@ void __init imx51_soc_init(void) | |||
202 | mxc_register_gpio("imx31-gpio", 2, MX51_GPIO3_BASE_ADDR, SZ_16K, MX51_INT_GPIO3_LOW, MX51_INT_GPIO3_HIGH); | 202 | mxc_register_gpio("imx31-gpio", 2, MX51_GPIO3_BASE_ADDR, SZ_16K, MX51_INT_GPIO3_LOW, MX51_INT_GPIO3_HIGH); |
203 | mxc_register_gpio("imx31-gpio", 3, MX51_GPIO4_BASE_ADDR, SZ_16K, MX51_INT_GPIO4_LOW, MX51_INT_GPIO4_HIGH); | 203 | mxc_register_gpio("imx31-gpio", 3, MX51_GPIO4_BASE_ADDR, SZ_16K, MX51_INT_GPIO4_LOW, MX51_INT_GPIO4_HIGH); |
204 | 204 | ||
205 | pinctrl_provide_dummies(); | ||
206 | |||
205 | /* i.mx51 has the i.mx35 type sdma */ | 207 | /* i.mx51 has the i.mx35 type sdma */ |
206 | imx_add_imx_sdma("imx35-sdma", MX51_SDMA_BASE_ADDR, MX51_INT_SDMA, &imx51_sdma_pdata); | 208 | imx_add_imx_sdma("imx35-sdma", MX51_SDMA_BASE_ADDR, MX51_INT_SDMA, &imx51_sdma_pdata); |
207 | 209 | ||
diff --git a/arch/arm/mach-kirkwood/board-iconnect.c b/arch/arm/mach-kirkwood/board-iconnect.c index 2222c5739519..b0d3cc49269d 100644 --- a/arch/arm/mach-kirkwood/board-iconnect.c +++ b/arch/arm/mach-kirkwood/board-iconnect.c | |||
@@ -20,9 +20,6 @@ | |||
20 | #include <linux/mv643xx_eth.h> | 20 | #include <linux/mv643xx_eth.h> |
21 | #include <linux/gpio.h> | 21 | #include <linux/gpio.h> |
22 | #include <linux/leds.h> | 22 | #include <linux/leds.h> |
23 | #include <linux/spi/flash.h> | ||
24 | #include <linux/spi/spi.h> | ||
25 | #include <linux/spi/orion_spi.h> | ||
26 | #include <linux/i2c.h> | 23 | #include <linux/i2c.h> |
27 | #include <linux/input.h> | 24 | #include <linux/input.h> |
28 | #include <linux/gpio_keys.h> | 25 | #include <linux/gpio_keys.h> |
diff --git a/arch/arm/mach-kirkwood/common.c b/arch/arm/mach-kirkwood/common.c index 25fb3fd418ef..f261cd242643 100644 --- a/arch/arm/mach-kirkwood/common.c +++ b/arch/arm/mach-kirkwood/common.c | |||
@@ -159,6 +159,7 @@ static struct clk __init *clk_register_gate_fn(struct device *dev, | |||
159 | gate_fn->gate.flags = clk_gate_flags; | 159 | gate_fn->gate.flags = clk_gate_flags; |
160 | gate_fn->gate.lock = lock; | 160 | gate_fn->gate.lock = lock; |
161 | gate_fn->gate.hw.init = &init; | 161 | gate_fn->gate.hw.init = &init; |
162 | gate_fn->fn = fn; | ||
162 | 163 | ||
163 | /* ops is the gate ops, but with our disable function */ | 164 | /* ops is the gate ops, but with our disable function */ |
164 | if (clk_gate_fn_ops.disable != clk_gate_fn_disable) { | 165 | if (clk_gate_fn_ops.disable != clk_gate_fn_disable) { |
@@ -193,9 +194,11 @@ static struct clk __init *kirkwood_register_gate_fn(const char *name, | |||
193 | bit_idx, 0, &gating_lock, fn); | 194 | bit_idx, 0, &gating_lock, fn); |
194 | } | 195 | } |
195 | 196 | ||
197 | static struct clk *ge0, *ge1; | ||
198 | |||
196 | void __init kirkwood_clk_init(void) | 199 | void __init kirkwood_clk_init(void) |
197 | { | 200 | { |
198 | struct clk *runit, *ge0, *ge1, *sata0, *sata1, *usb0, *sdio; | 201 | struct clk *runit, *sata0, *sata1, *usb0, *sdio; |
199 | struct clk *crypto, *xor0, *xor1, *pex0, *pex1, *audio; | 202 | struct clk *crypto, *xor0, *xor1, *pex0, *pex1, *audio; |
200 | 203 | ||
201 | tclk = clk_register_fixed_rate(NULL, "tclk", NULL, | 204 | tclk = clk_register_fixed_rate(NULL, "tclk", NULL, |
@@ -257,6 +260,9 @@ void __init kirkwood_ge00_init(struct mv643xx_eth_platform_data *eth_data) | |||
257 | orion_ge00_init(eth_data, | 260 | orion_ge00_init(eth_data, |
258 | GE00_PHYS_BASE, IRQ_KIRKWOOD_GE00_SUM, | 261 | GE00_PHYS_BASE, IRQ_KIRKWOOD_GE00_SUM, |
259 | IRQ_KIRKWOOD_GE00_ERR); | 262 | IRQ_KIRKWOOD_GE00_ERR); |
263 | /* The interface forgets the MAC address assigned by u-boot if | ||
264 | the clock is turned off, so claim the clk now. */ | ||
265 | clk_prepare_enable(ge0); | ||
260 | } | 266 | } |
261 | 267 | ||
262 | 268 | ||
@@ -268,6 +274,7 @@ void __init kirkwood_ge01_init(struct mv643xx_eth_platform_data *eth_data) | |||
268 | orion_ge01_init(eth_data, | 274 | orion_ge01_init(eth_data, |
269 | GE01_PHYS_BASE, IRQ_KIRKWOOD_GE01_SUM, | 275 | GE01_PHYS_BASE, IRQ_KIRKWOOD_GE01_SUM, |
270 | IRQ_KIRKWOOD_GE01_ERR); | 276 | IRQ_KIRKWOOD_GE01_ERR); |
277 | clk_prepare_enable(ge1); | ||
271 | } | 278 | } |
272 | 279 | ||
273 | 280 | ||
diff --git a/arch/arm/mach-kirkwood/include/mach/bridge-regs.h b/arch/arm/mach-kirkwood/include/mach/bridge-regs.h index 3eee37a3b501..a115142f8690 100644 --- a/arch/arm/mach-kirkwood/include/mach/bridge-regs.h +++ b/arch/arm/mach-kirkwood/include/mach/bridge-regs.h | |||
@@ -38,6 +38,7 @@ | |||
38 | #define IRQ_MASK_HIGH_OFF 0x0014 | 38 | #define IRQ_MASK_HIGH_OFF 0x0014 |
39 | 39 | ||
40 | #define TIMER_VIRT_BASE (BRIDGE_VIRT_BASE | 0x0300) | 40 | #define TIMER_VIRT_BASE (BRIDGE_VIRT_BASE | 0x0300) |
41 | #define TIMER_PHYS_BASE (BRIDGE_PHYS_BASE | 0x0300) | ||
41 | 42 | ||
42 | #define L2_CONFIG_REG (BRIDGE_VIRT_BASE | 0x0128) | 43 | #define L2_CONFIG_REG (BRIDGE_VIRT_BASE | 0x0128) |
43 | #define L2_WRITETHROUGH 0x00000010 | 44 | #define L2_WRITETHROUGH 0x00000010 |
diff --git a/arch/arm/mach-kirkwood/include/mach/kirkwood.h b/arch/arm/mach-kirkwood/include/mach/kirkwood.h index fede3d503efa..c5b68510776b 100644 --- a/arch/arm/mach-kirkwood/include/mach/kirkwood.h +++ b/arch/arm/mach-kirkwood/include/mach/kirkwood.h | |||
@@ -80,6 +80,7 @@ | |||
80 | #define UART1_VIRT_BASE (DEV_BUS_VIRT_BASE | 0x2100) | 80 | #define UART1_VIRT_BASE (DEV_BUS_VIRT_BASE | 0x2100) |
81 | 81 | ||
82 | #define BRIDGE_VIRT_BASE (KIRKWOOD_REGS_VIRT_BASE | 0x20000) | 82 | #define BRIDGE_VIRT_BASE (KIRKWOOD_REGS_VIRT_BASE | 0x20000) |
83 | #define BRIDGE_PHYS_BASE (KIRKWOOD_REGS_PHYS_BASE | 0x20000) | ||
83 | 84 | ||
84 | #define CRYPTO_PHYS_BASE (KIRKWOOD_REGS_PHYS_BASE | 0x30000) | 85 | #define CRYPTO_PHYS_BASE (KIRKWOOD_REGS_PHYS_BASE | 0x30000) |
85 | 86 | ||
diff --git a/arch/arm/mach-mmp/irq.c b/arch/arm/mach-mmp/irq.c index fcfe0e3bd701..e60c7d98922b 100644 --- a/arch/arm/mach-mmp/irq.c +++ b/arch/arm/mach-mmp/irq.c | |||
@@ -241,6 +241,7 @@ void __init mmp2_init_icu(void) | |||
241 | icu_data[1].clr_mfp_irq_base = IRQ_MMP2_PMIC_BASE; | 241 | icu_data[1].clr_mfp_irq_base = IRQ_MMP2_PMIC_BASE; |
242 | icu_data[1].clr_mfp_hwirq = IRQ_MMP2_PMIC - IRQ_MMP2_PMIC_BASE; | 242 | icu_data[1].clr_mfp_hwirq = IRQ_MMP2_PMIC - IRQ_MMP2_PMIC_BASE; |
243 | icu_data[1].nr_irqs = 2; | 243 | icu_data[1].nr_irqs = 2; |
244 | icu_data[1].cascade_irq = 4; | ||
244 | icu_data[1].virq_base = IRQ_MMP2_PMIC_BASE; | 245 | icu_data[1].virq_base = IRQ_MMP2_PMIC_BASE; |
245 | icu_data[1].domain = irq_domain_add_legacy(NULL, icu_data[1].nr_irqs, | 246 | icu_data[1].domain = irq_domain_add_legacy(NULL, icu_data[1].nr_irqs, |
246 | icu_data[1].virq_base, 0, | 247 | icu_data[1].virq_base, 0, |
@@ -249,6 +250,7 @@ void __init mmp2_init_icu(void) | |||
249 | icu_data[2].reg_status = mmp_icu_base + 0x154; | 250 | icu_data[2].reg_status = mmp_icu_base + 0x154; |
250 | icu_data[2].reg_mask = mmp_icu_base + 0x16c; | 251 | icu_data[2].reg_mask = mmp_icu_base + 0x16c; |
251 | icu_data[2].nr_irqs = 2; | 252 | icu_data[2].nr_irqs = 2; |
253 | icu_data[2].cascade_irq = 5; | ||
252 | icu_data[2].virq_base = IRQ_MMP2_RTC_BASE; | 254 | icu_data[2].virq_base = IRQ_MMP2_RTC_BASE; |
253 | icu_data[2].domain = irq_domain_add_legacy(NULL, icu_data[2].nr_irqs, | 255 | icu_data[2].domain = irq_domain_add_legacy(NULL, icu_data[2].nr_irqs, |
254 | icu_data[2].virq_base, 0, | 256 | icu_data[2].virq_base, 0, |
@@ -257,6 +259,7 @@ void __init mmp2_init_icu(void) | |||
257 | icu_data[3].reg_status = mmp_icu_base + 0x180; | 259 | icu_data[3].reg_status = mmp_icu_base + 0x180; |
258 | icu_data[3].reg_mask = mmp_icu_base + 0x17c; | 260 | icu_data[3].reg_mask = mmp_icu_base + 0x17c; |
259 | icu_data[3].nr_irqs = 3; | 261 | icu_data[3].nr_irqs = 3; |
262 | icu_data[3].cascade_irq = 9; | ||
260 | icu_data[3].virq_base = IRQ_MMP2_KEYPAD_BASE; | 263 | icu_data[3].virq_base = IRQ_MMP2_KEYPAD_BASE; |
261 | icu_data[3].domain = irq_domain_add_legacy(NULL, icu_data[3].nr_irqs, | 264 | icu_data[3].domain = irq_domain_add_legacy(NULL, icu_data[3].nr_irqs, |
262 | icu_data[3].virq_base, 0, | 265 | icu_data[3].virq_base, 0, |
@@ -265,6 +268,7 @@ void __init mmp2_init_icu(void) | |||
265 | icu_data[4].reg_status = mmp_icu_base + 0x158; | 268 | icu_data[4].reg_status = mmp_icu_base + 0x158; |
266 | icu_data[4].reg_mask = mmp_icu_base + 0x170; | 269 | icu_data[4].reg_mask = mmp_icu_base + 0x170; |
267 | icu_data[4].nr_irqs = 5; | 270 | icu_data[4].nr_irqs = 5; |
271 | icu_data[4].cascade_irq = 17; | ||
268 | icu_data[4].virq_base = IRQ_MMP2_TWSI_BASE; | 272 | icu_data[4].virq_base = IRQ_MMP2_TWSI_BASE; |
269 | icu_data[4].domain = irq_domain_add_legacy(NULL, icu_data[4].nr_irqs, | 273 | icu_data[4].domain = irq_domain_add_legacy(NULL, icu_data[4].nr_irqs, |
270 | icu_data[4].virq_base, 0, | 274 | icu_data[4].virq_base, 0, |
@@ -273,6 +277,7 @@ void __init mmp2_init_icu(void) | |||
273 | icu_data[5].reg_status = mmp_icu_base + 0x15c; | 277 | icu_data[5].reg_status = mmp_icu_base + 0x15c; |
274 | icu_data[5].reg_mask = mmp_icu_base + 0x174; | 278 | icu_data[5].reg_mask = mmp_icu_base + 0x174; |
275 | icu_data[5].nr_irqs = 15; | 279 | icu_data[5].nr_irqs = 15; |
280 | icu_data[5].cascade_irq = 35; | ||
276 | icu_data[5].virq_base = IRQ_MMP2_MISC_BASE; | 281 | icu_data[5].virq_base = IRQ_MMP2_MISC_BASE; |
277 | icu_data[5].domain = irq_domain_add_legacy(NULL, icu_data[5].nr_irqs, | 282 | icu_data[5].domain = irq_domain_add_legacy(NULL, icu_data[5].nr_irqs, |
278 | icu_data[5].virq_base, 0, | 283 | icu_data[5].virq_base, 0, |
@@ -281,6 +286,7 @@ void __init mmp2_init_icu(void) | |||
281 | icu_data[6].reg_status = mmp_icu_base + 0x160; | 286 | icu_data[6].reg_status = mmp_icu_base + 0x160; |
282 | icu_data[6].reg_mask = mmp_icu_base + 0x178; | 287 | icu_data[6].reg_mask = mmp_icu_base + 0x178; |
283 | icu_data[6].nr_irqs = 2; | 288 | icu_data[6].nr_irqs = 2; |
289 | icu_data[6].cascade_irq = 51; | ||
284 | icu_data[6].virq_base = IRQ_MMP2_MIPI_HSI1_BASE; | 290 | icu_data[6].virq_base = IRQ_MMP2_MIPI_HSI1_BASE; |
285 | icu_data[6].domain = irq_domain_add_legacy(NULL, icu_data[6].nr_irqs, | 291 | icu_data[6].domain = irq_domain_add_legacy(NULL, icu_data[6].nr_irqs, |
286 | icu_data[6].virq_base, 0, | 292 | icu_data[6].virq_base, 0, |
@@ -289,6 +295,7 @@ void __init mmp2_init_icu(void) | |||
289 | icu_data[7].reg_status = mmp_icu_base + 0x188; | 295 | icu_data[7].reg_status = mmp_icu_base + 0x188; |
290 | icu_data[7].reg_mask = mmp_icu_base + 0x184; | 296 | icu_data[7].reg_mask = mmp_icu_base + 0x184; |
291 | icu_data[7].nr_irqs = 2; | 297 | icu_data[7].nr_irqs = 2; |
298 | icu_data[7].cascade_irq = 55; | ||
292 | icu_data[7].virq_base = IRQ_MMP2_MIPI_HSI0_BASE; | 299 | icu_data[7].virq_base = IRQ_MMP2_MIPI_HSI0_BASE; |
293 | icu_data[7].domain = irq_domain_add_legacy(NULL, icu_data[7].nr_irqs, | 300 | icu_data[7].domain = irq_domain_add_legacy(NULL, icu_data[7].nr_irqs, |
294 | icu_data[7].virq_base, 0, | 301 | icu_data[7].virq_base, 0, |
diff --git a/arch/arm/mach-omap2/board-n8x0.c b/arch/arm/mach-omap2/board-n8x0.c index 8ca14e88a31a..2c5d0ed75285 100644 --- a/arch/arm/mach-omap2/board-n8x0.c +++ b/arch/arm/mach-omap2/board-n8x0.c | |||
@@ -83,11 +83,9 @@ static struct musb_hdrc_config musb_config = { | |||
83 | }; | 83 | }; |
84 | 84 | ||
85 | static struct musb_hdrc_platform_data tusb_data = { | 85 | static struct musb_hdrc_platform_data tusb_data = { |
86 | #if defined(CONFIG_USB_MUSB_OTG) | 86 | #ifdef CONFIG_USB_GADGET_MUSB_HDRC |
87 | .mode = MUSB_OTG, | 87 | .mode = MUSB_OTG, |
88 | #elif defined(CONFIG_USB_MUSB_PERIPHERAL) | 88 | #else |
89 | .mode = MUSB_PERIPHERAL, | ||
90 | #else /* defined(CONFIG_USB_MUSB_HOST) */ | ||
91 | .mode = MUSB_HOST, | 89 | .mode = MUSB_HOST, |
92 | #endif | 90 | #endif |
93 | .set_power = tusb_set_power, | 91 | .set_power = tusb_set_power, |
diff --git a/arch/arm/mach-omap2/board-omap3beagle.c b/arch/arm/mach-omap2/board-omap3beagle.c index 79c6909eeb78..580fd17208da 100644 --- a/arch/arm/mach-omap2/board-omap3beagle.c +++ b/arch/arm/mach-omap2/board-omap3beagle.c | |||
@@ -81,13 +81,13 @@ static u8 omap3_beagle_version; | |||
81 | static struct { | 81 | static struct { |
82 | int mmc1_gpio_wp; | 82 | int mmc1_gpio_wp; |
83 | int usb_pwr_level; | 83 | int usb_pwr_level; |
84 | int reset_gpio; | 84 | int dvi_pd_gpio; |
85 | int usr_button_gpio; | 85 | int usr_button_gpio; |
86 | int mmc_caps; | 86 | int mmc_caps; |
87 | } beagle_config = { | 87 | } beagle_config = { |
88 | .mmc1_gpio_wp = -EINVAL, | 88 | .mmc1_gpio_wp = -EINVAL, |
89 | .usb_pwr_level = GPIOF_OUT_INIT_LOW, | 89 | .usb_pwr_level = GPIOF_OUT_INIT_LOW, |
90 | .reset_gpio = 129, | 90 | .dvi_pd_gpio = -EINVAL, |
91 | .usr_button_gpio = 4, | 91 | .usr_button_gpio = 4, |
92 | .mmc_caps = MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA, | 92 | .mmc_caps = MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA, |
93 | }; | 93 | }; |
@@ -126,21 +126,21 @@ static void __init omap3_beagle_init_rev(void) | |||
126 | printk(KERN_INFO "OMAP3 Beagle Rev: Ax/Bx\n"); | 126 | printk(KERN_INFO "OMAP3 Beagle Rev: Ax/Bx\n"); |
127 | omap3_beagle_version = OMAP3BEAGLE_BOARD_AXBX; | 127 | omap3_beagle_version = OMAP3BEAGLE_BOARD_AXBX; |
128 | beagle_config.mmc1_gpio_wp = 29; | 128 | beagle_config.mmc1_gpio_wp = 29; |
129 | beagle_config.reset_gpio = 170; | 129 | beagle_config.dvi_pd_gpio = 170; |
130 | beagle_config.usr_button_gpio = 7; | 130 | beagle_config.usr_button_gpio = 7; |
131 | break; | 131 | break; |
132 | case 6: | 132 | case 6: |
133 | printk(KERN_INFO "OMAP3 Beagle Rev: C1/C2/C3\n"); | 133 | printk(KERN_INFO "OMAP3 Beagle Rev: C1/C2/C3\n"); |
134 | omap3_beagle_version = OMAP3BEAGLE_BOARD_C1_3; | 134 | omap3_beagle_version = OMAP3BEAGLE_BOARD_C1_3; |
135 | beagle_config.mmc1_gpio_wp = 23; | 135 | beagle_config.mmc1_gpio_wp = 23; |
136 | beagle_config.reset_gpio = 170; | 136 | beagle_config.dvi_pd_gpio = 170; |
137 | beagle_config.usr_button_gpio = 7; | 137 | beagle_config.usr_button_gpio = 7; |
138 | break; | 138 | break; |
139 | case 5: | 139 | case 5: |
140 | printk(KERN_INFO "OMAP3 Beagle Rev: C4\n"); | 140 | printk(KERN_INFO "OMAP3 Beagle Rev: C4\n"); |
141 | omap3_beagle_version = OMAP3BEAGLE_BOARD_C4; | 141 | omap3_beagle_version = OMAP3BEAGLE_BOARD_C4; |
142 | beagle_config.mmc1_gpio_wp = 23; | 142 | beagle_config.mmc1_gpio_wp = 23; |
143 | beagle_config.reset_gpio = 170; | 143 | beagle_config.dvi_pd_gpio = 170; |
144 | beagle_config.usr_button_gpio = 7; | 144 | beagle_config.usr_button_gpio = 7; |
145 | break; | 145 | break; |
146 | case 0: | 146 | case 0: |
@@ -274,11 +274,9 @@ static int beagle_twl_gpio_setup(struct device *dev, | |||
274 | if (r) | 274 | if (r) |
275 | pr_err("%s: unable to configure nDVI_PWR_EN\n", | 275 | pr_err("%s: unable to configure nDVI_PWR_EN\n", |
276 | __func__); | 276 | __func__); |
277 | r = gpio_request_one(gpio + 2, GPIOF_OUT_INIT_HIGH, | 277 | |
278 | "DVI_LDO_EN"); | 278 | beagle_config.dvi_pd_gpio = gpio + 2; |
279 | if (r) | 279 | |
280 | pr_err("%s: unable to configure DVI_LDO_EN\n", | ||
281 | __func__); | ||
282 | } else { | 280 | } else { |
283 | /* | 281 | /* |
284 | * REVISIT: need ehci-omap hooks for external VBUS | 282 | * REVISIT: need ehci-omap hooks for external VBUS |
@@ -287,7 +285,7 @@ static int beagle_twl_gpio_setup(struct device *dev, | |||
287 | if (gpio_request_one(gpio + 1, GPIOF_IN, "EHCI_nOC")) | 285 | if (gpio_request_one(gpio + 1, GPIOF_IN, "EHCI_nOC")) |
288 | pr_err("%s: unable to configure EHCI_nOC\n", __func__); | 286 | pr_err("%s: unable to configure EHCI_nOC\n", __func__); |
289 | } | 287 | } |
290 | dvi_panel.power_down_gpio = beagle_config.reset_gpio; | 288 | dvi_panel.power_down_gpio = beagle_config.dvi_pd_gpio; |
291 | 289 | ||
292 | gpio_request_one(gpio + TWL4030_GPIO_MAX, beagle_config.usb_pwr_level, | 290 | gpio_request_one(gpio + TWL4030_GPIO_MAX, beagle_config.usb_pwr_level, |
293 | "nEN_USB_PWR"); | 291 | "nEN_USB_PWR"); |
@@ -499,7 +497,7 @@ static void __init omap3_beagle_init(void) | |||
499 | omap3_mux_init(board_mux, OMAP_PACKAGE_CBB); | 497 | omap3_mux_init(board_mux, OMAP_PACKAGE_CBB); |
500 | omap3_beagle_init_rev(); | 498 | omap3_beagle_init_rev(); |
501 | 499 | ||
502 | if (beagle_config.mmc1_gpio_wp != -EINVAL) | 500 | if (gpio_is_valid(beagle_config.mmc1_gpio_wp)) |
503 | omap_mux_init_gpio(beagle_config.mmc1_gpio_wp, OMAP_PIN_INPUT); | 501 | omap_mux_init_gpio(beagle_config.mmc1_gpio_wp, OMAP_PIN_INPUT); |
504 | mmc[0].caps = beagle_config.mmc_caps; | 502 | mmc[0].caps = beagle_config.mmc_caps; |
505 | omap_hsmmc_init(mmc); | 503 | omap_hsmmc_init(mmc); |
@@ -510,15 +508,13 @@ static void __init omap3_beagle_init(void) | |||
510 | 508 | ||
511 | platform_add_devices(omap3_beagle_devices, | 509 | platform_add_devices(omap3_beagle_devices, |
512 | ARRAY_SIZE(omap3_beagle_devices)); | 510 | ARRAY_SIZE(omap3_beagle_devices)); |
511 | if (gpio_is_valid(beagle_config.dvi_pd_gpio)) | ||
512 | omap_mux_init_gpio(beagle_config.dvi_pd_gpio, OMAP_PIN_OUTPUT); | ||
513 | omap_display_init(&beagle_dss_data); | 513 | omap_display_init(&beagle_dss_data); |
514 | omap_serial_init(); | 514 | omap_serial_init(); |
515 | omap_sdrc_init(mt46h32m32lf6_sdrc_params, | 515 | omap_sdrc_init(mt46h32m32lf6_sdrc_params, |
516 | mt46h32m32lf6_sdrc_params); | 516 | mt46h32m32lf6_sdrc_params); |
517 | 517 | ||
518 | omap_mux_init_gpio(170, OMAP_PIN_INPUT); | ||
519 | /* REVISIT leave DVI powered down until it's needed ... */ | ||
520 | gpio_request_one(170, GPIOF_OUT_INIT_HIGH, "DVI_nPD"); | ||
521 | |||
522 | usb_musb_init(NULL); | 518 | usb_musb_init(NULL); |
523 | usbhs_init(&usbhs_bdata); | 519 | usbhs_init(&usbhs_bdata); |
524 | omap_nand_flash_init(NAND_BUSWIDTH_16, omap3beagle_nand_partitions, | 520 | omap_nand_flash_init(NAND_BUSWIDTH_16, omap3beagle_nand_partitions, |
diff --git a/arch/arm/mach-omap2/board-rx51-peripherals.c b/arch/arm/mach-omap2/board-rx51-peripherals.c index ff53deccecab..df2534de3361 100644 --- a/arch/arm/mach-omap2/board-rx51-peripherals.c +++ b/arch/arm/mach-omap2/board-rx51-peripherals.c | |||
@@ -144,7 +144,6 @@ static struct lis3lv02d_platform_data rx51_lis3lv02d_data = { | |||
144 | .release_resources = lis302_release, | 144 | .release_resources = lis302_release, |
145 | .st_min_limits = {-32, 3, 3}, | 145 | .st_min_limits = {-32, 3, 3}, |
146 | .st_max_limits = {-3, 32, 32}, | 146 | .st_max_limits = {-3, 32, 32}, |
147 | .irq2 = OMAP_GPIO_IRQ(LIS302_IRQ2_GPIO), | ||
148 | }; | 147 | }; |
149 | #endif | 148 | #endif |
150 | 149 | ||
@@ -1030,7 +1029,6 @@ static struct i2c_board_info __initdata rx51_peripherals_i2c_board_info_3[] = { | |||
1030 | { | 1029 | { |
1031 | I2C_BOARD_INFO("lis3lv02d", 0x1d), | 1030 | I2C_BOARD_INFO("lis3lv02d", 0x1d), |
1032 | .platform_data = &rx51_lis3lv02d_data, | 1031 | .platform_data = &rx51_lis3lv02d_data, |
1033 | .irq = OMAP_GPIO_IRQ(LIS302_IRQ1_GPIO), | ||
1034 | }, | 1032 | }, |
1035 | #endif | 1033 | #endif |
1036 | }; | 1034 | }; |
@@ -1056,6 +1054,10 @@ static int __init rx51_i2c_init(void) | |||
1056 | omap_pmic_init(1, 2200, "twl5030", INT_34XX_SYS_NIRQ, &rx51_twldata); | 1054 | omap_pmic_init(1, 2200, "twl5030", INT_34XX_SYS_NIRQ, &rx51_twldata); |
1057 | omap_register_i2c_bus(2, 100, rx51_peripherals_i2c_board_info_2, | 1055 | omap_register_i2c_bus(2, 100, rx51_peripherals_i2c_board_info_2, |
1058 | ARRAY_SIZE(rx51_peripherals_i2c_board_info_2)); | 1056 | ARRAY_SIZE(rx51_peripherals_i2c_board_info_2)); |
1057 | #if defined(CONFIG_SENSORS_LIS3_I2C) || defined(CONFIG_SENSORS_LIS3_I2C_MODULE) | ||
1058 | rx51_lis3lv02d_data.irq2 = gpio_to_irq(LIS302_IRQ2_GPIO); | ||
1059 | rx51_peripherals_i2c_board_info_3[0].irq = gpio_to_irq(LIS302_IRQ1_GPIO); | ||
1060 | #endif | ||
1059 | omap_register_i2c_bus(3, 400, rx51_peripherals_i2c_board_info_3, | 1061 | omap_register_i2c_bus(3, 400, rx51_peripherals_i2c_board_info_3, |
1060 | ARRAY_SIZE(rx51_peripherals_i2c_board_info_3)); | 1062 | ARRAY_SIZE(rx51_peripherals_i2c_board_info_3)); |
1061 | return 0; | 1063 | return 0; |
diff --git a/arch/arm/mach-omap2/clock3xxx_data.c b/arch/arm/mach-omap2/clock3xxx_data.c index 4e1a3b0e8cc8..1efdec236ae8 100644 --- a/arch/arm/mach-omap2/clock3xxx_data.c +++ b/arch/arm/mach-omap2/clock3xxx_data.c | |||
@@ -3514,7 +3514,7 @@ int __init omap3xxx_clk_init(void) | |||
3514 | struct omap_clk *c; | 3514 | struct omap_clk *c; |
3515 | u32 cpu_clkflg = 0; | 3515 | u32 cpu_clkflg = 0; |
3516 | 3516 | ||
3517 | if (cpu_is_omap3517()) { | 3517 | if (soc_is_am35xx()) { |
3518 | cpu_mask = RATE_IN_34XX; | 3518 | cpu_mask = RATE_IN_34XX; |
3519 | cpu_clkflg = CK_AM35XX; | 3519 | cpu_clkflg = CK_AM35XX; |
3520 | } else if (cpu_is_omap3630()) { | 3520 | } else if (cpu_is_omap3630()) { |
diff --git a/arch/arm/mach-omap2/clock44xx_data.c b/arch/arm/mach-omap2/clock44xx_data.c index 2172f6603848..e2b701e164f6 100644 --- a/arch/arm/mach-omap2/clock44xx_data.c +++ b/arch/arm/mach-omap2/clock44xx_data.c | |||
@@ -84,6 +84,7 @@ static struct clk slimbus_clk = { | |||
84 | 84 | ||
85 | static struct clk sys_32k_ck = { | 85 | static struct clk sys_32k_ck = { |
86 | .name = "sys_32k_ck", | 86 | .name = "sys_32k_ck", |
87 | .clkdm_name = "prm_clkdm", | ||
87 | .rate = 32768, | 88 | .rate = 32768, |
88 | .ops = &clkops_null, | 89 | .ops = &clkops_null, |
89 | }; | 90 | }; |
@@ -512,6 +513,7 @@ static struct clk ddrphy_ck = { | |||
512 | .name = "ddrphy_ck", | 513 | .name = "ddrphy_ck", |
513 | .parent = &dpll_core_m2_ck, | 514 | .parent = &dpll_core_m2_ck, |
514 | .ops = &clkops_null, | 515 | .ops = &clkops_null, |
516 | .clkdm_name = "l3_emif_clkdm", | ||
515 | .fixed_div = 2, | 517 | .fixed_div = 2, |
516 | .recalc = &omap_fixed_divisor_recalc, | 518 | .recalc = &omap_fixed_divisor_recalc, |
517 | }; | 519 | }; |
@@ -769,6 +771,7 @@ static const struct clksel dpll_mpu_m2_div[] = { | |||
769 | static struct clk dpll_mpu_m2_ck = { | 771 | static struct clk dpll_mpu_m2_ck = { |
770 | .name = "dpll_mpu_m2_ck", | 772 | .name = "dpll_mpu_m2_ck", |
771 | .parent = &dpll_mpu_ck, | 773 | .parent = &dpll_mpu_ck, |
774 | .clkdm_name = "cm_clkdm", | ||
772 | .clksel = dpll_mpu_m2_div, | 775 | .clksel = dpll_mpu_m2_div, |
773 | .clksel_reg = OMAP4430_CM_DIV_M2_DPLL_MPU, | 776 | .clksel_reg = OMAP4430_CM_DIV_M2_DPLL_MPU, |
774 | .clksel_mask = OMAP4430_DPLL_CLKOUT_DIV_MASK, | 777 | .clksel_mask = OMAP4430_DPLL_CLKOUT_DIV_MASK, |
@@ -1149,6 +1152,7 @@ static const struct clksel l3_div_div[] = { | |||
1149 | static struct clk l3_div_ck = { | 1152 | static struct clk l3_div_ck = { |
1150 | .name = "l3_div_ck", | 1153 | .name = "l3_div_ck", |
1151 | .parent = &div_core_ck, | 1154 | .parent = &div_core_ck, |
1155 | .clkdm_name = "cm_clkdm", | ||
1152 | .clksel = l3_div_div, | 1156 | .clksel = l3_div_div, |
1153 | .clksel_reg = OMAP4430_CM_CLKSEL_CORE, | 1157 | .clksel_reg = OMAP4430_CM_CLKSEL_CORE, |
1154 | .clksel_mask = OMAP4430_CLKSEL_L3_MASK, | 1158 | .clksel_mask = OMAP4430_CLKSEL_L3_MASK, |
@@ -2824,6 +2828,7 @@ static const struct clksel trace_clk_div_div[] = { | |||
2824 | static struct clk trace_clk_div_ck = { | 2828 | static struct clk trace_clk_div_ck = { |
2825 | .name = "trace_clk_div_ck", | 2829 | .name = "trace_clk_div_ck", |
2826 | .parent = &pmd_trace_clk_mux_ck, | 2830 | .parent = &pmd_trace_clk_mux_ck, |
2831 | .clkdm_name = "emu_sys_clkdm", | ||
2827 | .clksel = trace_clk_div_div, | 2832 | .clksel = trace_clk_div_div, |
2828 | .clksel_reg = OMAP4430_CM_EMU_DEBUGSS_CLKCTRL, | 2833 | .clksel_reg = OMAP4430_CM_EMU_DEBUGSS_CLKCTRL, |
2829 | .clksel_mask = OMAP4430_CLKSEL_PMD_TRACE_CLK_MASK, | 2834 | .clksel_mask = OMAP4430_CLKSEL_PMD_TRACE_CLK_MASK, |
diff --git a/arch/arm/mach-omap2/cm.h b/arch/arm/mach-omap2/cm.h index a7bc096bd407..f24e3f7a2bbc 100644 --- a/arch/arm/mach-omap2/cm.h +++ b/arch/arm/mach-omap2/cm.h | |||
@@ -22,4 +22,15 @@ | |||
22 | */ | 22 | */ |
23 | #define MAX_MODULE_READY_TIME 2000 | 23 | #define MAX_MODULE_READY_TIME 2000 |
24 | 24 | ||
25 | /* | ||
26 | * MAX_MODULE_DISABLE_TIME: max duration in microseconds to wait for | ||
27 | * the PRCM to request that a module enter the inactive state in the | ||
28 | * case of OMAP2 & 3. In the case of OMAP4 this is the max duration | ||
29 | * in microseconds for the module to reach the inactive state from | ||
30 | * a functional state. | ||
31 | * XXX FSUSB on OMAP4430 takes ~4ms to idle after reset during | ||
32 | * kernel init. | ||
33 | */ | ||
34 | #define MAX_MODULE_DISABLE_TIME 5000 | ||
35 | |||
25 | #endif | 36 | #endif |
diff --git a/arch/arm/mach-omap2/cminst44xx.c b/arch/arm/mach-omap2/cminst44xx.c index 8c86d294b1a3..1a39945d9ff8 100644 --- a/arch/arm/mach-omap2/cminst44xx.c +++ b/arch/arm/mach-omap2/cminst44xx.c | |||
@@ -313,9 +313,9 @@ int omap4_cminst_wait_module_idle(u8 part, u16 inst, s16 cdoffs, u16 clkctrl_off | |||
313 | 313 | ||
314 | omap_test_timeout((_clkctrl_idlest(part, inst, cdoffs, clkctrl_offs) == | 314 | omap_test_timeout((_clkctrl_idlest(part, inst, cdoffs, clkctrl_offs) == |
315 | CLKCTRL_IDLEST_DISABLED), | 315 | CLKCTRL_IDLEST_DISABLED), |
316 | MAX_MODULE_READY_TIME, i); | 316 | MAX_MODULE_DISABLE_TIME, i); |
317 | 317 | ||
318 | return (i < MAX_MODULE_READY_TIME) ? 0 : -EBUSY; | 318 | return (i < MAX_MODULE_DISABLE_TIME) ? 0 : -EBUSY; |
319 | } | 319 | } |
320 | 320 | ||
321 | /** | 321 | /** |
diff --git a/arch/arm/mach-omap2/display.c b/arch/arm/mach-omap2/display.c index 54d49ddb9b81..5fb47a14f4ba 100644 --- a/arch/arm/mach-omap2/display.c +++ b/arch/arm/mach-omap2/display.c | |||
@@ -271,9 +271,9 @@ static struct platform_device *create_simple_dss_pdev(const char *pdev_name, | |||
271 | goto err; | 271 | goto err; |
272 | } | 272 | } |
273 | 273 | ||
274 | r = omap_device_register(pdev); | 274 | r = platform_device_add(pdev); |
275 | if (r) { | 275 | if (r) { |
276 | pr_err("Could not register omap_device for %s\n", pdev_name); | 276 | pr_err("Could not register platform_device for %s\n", pdev_name); |
277 | goto err; | 277 | goto err; |
278 | } | 278 | } |
279 | 279 | ||
diff --git a/arch/arm/mach-omap2/dsp.c b/arch/arm/mach-omap2/dsp.c index 845309f146fe..88ffa1e645cd 100644 --- a/arch/arm/mach-omap2/dsp.c +++ b/arch/arm/mach-omap2/dsp.c | |||
@@ -20,6 +20,9 @@ | |||
20 | 20 | ||
21 | #include <linux/module.h> | 21 | #include <linux/module.h> |
22 | #include <linux/platform_device.h> | 22 | #include <linux/platform_device.h> |
23 | |||
24 | #include <asm/memblock.h> | ||
25 | |||
23 | #include "cm2xxx_3xxx.h" | 26 | #include "cm2xxx_3xxx.h" |
24 | #include "prm2xxx_3xxx.h" | 27 | #include "prm2xxx_3xxx.h" |
25 | #ifdef CONFIG_BRIDGE_DVFS | 28 | #ifdef CONFIG_BRIDGE_DVFS |
diff --git a/arch/arm/mach-omap2/id.c b/arch/arm/mach-omap2/id.c index 0389b3264abe..00486a8564fd 100644 --- a/arch/arm/mach-omap2/id.c +++ b/arch/arm/mach-omap2/id.c | |||
@@ -247,6 +247,17 @@ void __init omap3xxx_check_features(void) | |||
247 | omap_features |= OMAP3_HAS_SDRC; | 247 | omap_features |= OMAP3_HAS_SDRC; |
248 | 248 | ||
249 | /* | 249 | /* |
250 | * am35x fixups: | ||
251 | * - The am35x Chip ID register has bits 12, 7:5, and 3:2 marked as | ||
252 | * reserved and therefore return 0 when read. Unfortunately, | ||
253 | * OMAP3_CHECK_FEATURE() will interpret some of those zeroes to | ||
254 | * mean that a feature is present even though it isn't so clear | ||
255 | * the incorrectly set feature bits. | ||
256 | */ | ||
257 | if (soc_is_am35xx()) | ||
258 | omap_features &= ~(OMAP3_HAS_IVA | OMAP3_HAS_ISP); | ||
259 | |||
260 | /* | ||
250 | * TODO: Get additional info (where applicable) | 261 | * TODO: Get additional info (where applicable) |
251 | * e.g. Size of L2 cache. | 262 | * e.g. Size of L2 cache. |
252 | */ | 263 | */ |
diff --git a/arch/arm/mach-omap2/irq.c b/arch/arm/mach-omap2/irq.c index fdc4303be563..6038a8c84b74 100644 --- a/arch/arm/mach-omap2/irq.c +++ b/arch/arm/mach-omap2/irq.c | |||
@@ -149,6 +149,7 @@ omap_alloc_gc(void __iomem *base, unsigned int irq_start, unsigned int num) | |||
149 | ct->chip.irq_ack = omap_mask_ack_irq; | 149 | ct->chip.irq_ack = omap_mask_ack_irq; |
150 | ct->chip.irq_mask = irq_gc_mask_disable_reg; | 150 | ct->chip.irq_mask = irq_gc_mask_disable_reg; |
151 | ct->chip.irq_unmask = irq_gc_unmask_enable_reg; | 151 | ct->chip.irq_unmask = irq_gc_unmask_enable_reg; |
152 | ct->chip.flags |= IRQCHIP_SKIP_SET_WAKE; | ||
152 | 153 | ||
153 | ct->regs.enable = INTC_MIR_CLEAR0; | 154 | ct->regs.enable = INTC_MIR_CLEAR0; |
154 | ct->regs.disable = INTC_MIR_SET0; | 155 | ct->regs.disable = INTC_MIR_SET0; |
diff --git a/arch/arm/mach-omap2/mux.c b/arch/arm/mach-omap2/mux.c index 80e55c5c9998..9fe6829f4c16 100644 --- a/arch/arm/mach-omap2/mux.c +++ b/arch/arm/mach-omap2/mux.c | |||
@@ -41,6 +41,7 @@ | |||
41 | #include "control.h" | 41 | #include "control.h" |
42 | #include "mux.h" | 42 | #include "mux.h" |
43 | #include "prm.h" | 43 | #include "prm.h" |
44 | #include "common.h" | ||
44 | 45 | ||
45 | #define OMAP_MUX_BASE_OFFSET 0x30 /* Offset from CTRL_BASE */ | 46 | #define OMAP_MUX_BASE_OFFSET 0x30 /* Offset from CTRL_BASE */ |
46 | #define OMAP_MUX_BASE_SZ 0x5ca | 47 | #define OMAP_MUX_BASE_SZ 0x5ca |
@@ -217,8 +218,7 @@ static int __init _omap_mux_get_by_name(struct omap_mux_partition *partition, | |||
217 | return -ENODEV; | 218 | return -ENODEV; |
218 | } | 219 | } |
219 | 220 | ||
220 | static int __init | 221 | int __init omap_mux_get_by_name(const char *muxname, |
221 | omap_mux_get_by_name(const char *muxname, | ||
222 | struct omap_mux_partition **found_partition, | 222 | struct omap_mux_partition **found_partition, |
223 | struct omap_mux **found_mux) | 223 | struct omap_mux **found_mux) |
224 | { | 224 | { |
diff --git a/arch/arm/mach-omap2/mux.h b/arch/arm/mach-omap2/mux.h index 69fe060a0b75..471e62a74a16 100644 --- a/arch/arm/mach-omap2/mux.h +++ b/arch/arm/mach-omap2/mux.h | |||
@@ -59,6 +59,7 @@ | |||
59 | #define OMAP_PIN_OFF_WAKEUPENABLE OMAP_WAKEUP_EN | 59 | #define OMAP_PIN_OFF_WAKEUPENABLE OMAP_WAKEUP_EN |
60 | 60 | ||
61 | #define OMAP_MODE_GPIO(x) (((x) & OMAP_MUX_MODE7) == OMAP_MUX_MODE4) | 61 | #define OMAP_MODE_GPIO(x) (((x) & OMAP_MUX_MODE7) == OMAP_MUX_MODE4) |
62 | #define OMAP_MODE_UART(x) (((x) & OMAP_MUX_MODE7) == OMAP_MUX_MODE0) | ||
62 | 63 | ||
63 | /* Flags for omapX_mux_init */ | 64 | /* Flags for omapX_mux_init */ |
64 | #define OMAP_PACKAGE_MASK 0xffff | 65 | #define OMAP_PACKAGE_MASK 0xffff |
@@ -225,8 +226,18 @@ omap_hwmod_mux_init(struct omap_device_pad *bpads, int nr_pads); | |||
225 | */ | 226 | */ |
226 | void omap_hwmod_mux(struct omap_hwmod_mux_info *hmux, u8 state); | 227 | void omap_hwmod_mux(struct omap_hwmod_mux_info *hmux, u8 state); |
227 | 228 | ||
229 | int omap_mux_get_by_name(const char *muxname, | ||
230 | struct omap_mux_partition **found_partition, | ||
231 | struct omap_mux **found_mux); | ||
228 | #else | 232 | #else |
229 | 233 | ||
234 | static inline int omap_mux_get_by_name(const char *muxname, | ||
235 | struct omap_mux_partition **found_partition, | ||
236 | struct omap_mux **found_mux) | ||
237 | { | ||
238 | return 0; | ||
239 | } | ||
240 | |||
230 | static inline int omap_mux_init_gpio(int gpio, int val) | 241 | static inline int omap_mux_init_gpio(int gpio, int val) |
231 | { | 242 | { |
232 | return 0; | 243 | return 0; |
diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c index bf86f7e8f91f..773193670ea2 100644 --- a/arch/arm/mach-omap2/omap_hwmod.c +++ b/arch/arm/mach-omap2/omap_hwmod.c | |||
@@ -530,7 +530,7 @@ static int _disable_wakeup(struct omap_hwmod *oh, u32 *v) | |||
530 | if (oh->class->sysc->idlemodes & SIDLE_SMART_WKUP) | 530 | if (oh->class->sysc->idlemodes & SIDLE_SMART_WKUP) |
531 | _set_slave_idlemode(oh, HWMOD_IDLEMODE_SMART, v); | 531 | _set_slave_idlemode(oh, HWMOD_IDLEMODE_SMART, v); |
532 | if (oh->class->sysc->idlemodes & MSTANDBY_SMART_WKUP) | 532 | if (oh->class->sysc->idlemodes & MSTANDBY_SMART_WKUP) |
533 | _set_master_standbymode(oh, HWMOD_IDLEMODE_SMART_WKUP, v); | 533 | _set_master_standbymode(oh, HWMOD_IDLEMODE_SMART, v); |
534 | 534 | ||
535 | /* XXX test pwrdm_get_wken for this hwmod's subsystem */ | 535 | /* XXX test pwrdm_get_wken for this hwmod's subsystem */ |
536 | 536 | ||
diff --git a/arch/arm/mach-omap2/omap_hwmod_44xx_data.c b/arch/arm/mach-omap2/omap_hwmod_44xx_data.c index 950454a3fa31..f30e861ce6d9 100644 --- a/arch/arm/mach-omap2/omap_hwmod_44xx_data.c +++ b/arch/arm/mach-omap2/omap_hwmod_44xx_data.c | |||
@@ -393,8 +393,7 @@ static struct omap_hwmod_class_sysconfig omap44xx_counter_sysc = { | |||
393 | .rev_offs = 0x0000, | 393 | .rev_offs = 0x0000, |
394 | .sysc_offs = 0x0004, | 394 | .sysc_offs = 0x0004, |
395 | .sysc_flags = SYSC_HAS_SIDLEMODE, | 395 | .sysc_flags = SYSC_HAS_SIDLEMODE, |
396 | .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART | | 396 | .idlemodes = (SIDLE_FORCE | SIDLE_NO), |
397 | SIDLE_SMART_WKUP), | ||
398 | .sysc_fields = &omap_hwmod_sysc_type1, | 397 | .sysc_fields = &omap_hwmod_sysc_type1, |
399 | }; | 398 | }; |
400 | 399 | ||
@@ -854,6 +853,11 @@ static struct omap_hwmod omap44xx_dss_hdmi_hwmod = { | |||
854 | .name = "dss_hdmi", | 853 | .name = "dss_hdmi", |
855 | .class = &omap44xx_hdmi_hwmod_class, | 854 | .class = &omap44xx_hdmi_hwmod_class, |
856 | .clkdm_name = "l3_dss_clkdm", | 855 | .clkdm_name = "l3_dss_clkdm", |
856 | /* | ||
857 | * HDMI audio requires to use no-idle mode. Hence, | ||
858 | * set idle mode by software. | ||
859 | */ | ||
860 | .flags = HWMOD_SWSUP_SIDLE, | ||
857 | .mpu_irqs = omap44xx_dss_hdmi_irqs, | 861 | .mpu_irqs = omap44xx_dss_hdmi_irqs, |
858 | .sdma_reqs = omap44xx_dss_hdmi_sdma_reqs, | 862 | .sdma_reqs = omap44xx_dss_hdmi_sdma_reqs, |
859 | .main_clk = "dss_48mhz_clk", | 863 | .main_clk = "dss_48mhz_clk", |
diff --git a/arch/arm/mach-omap2/omap_l3_smx.c b/arch/arm/mach-omap2/omap_l3_smx.c index a05a62f9ee5b..acc216491b8a 100644 --- a/arch/arm/mach-omap2/omap_l3_smx.c +++ b/arch/arm/mach-omap2/omap_l3_smx.c | |||
@@ -155,10 +155,11 @@ static irqreturn_t omap3_l3_block_irq(struct omap3_l3 *l3, | |||
155 | u8 multi = error & L3_ERROR_LOG_MULTI; | 155 | u8 multi = error & L3_ERROR_LOG_MULTI; |
156 | u32 address = omap3_l3_decode_addr(error_addr); | 156 | u32 address = omap3_l3_decode_addr(error_addr); |
157 | 157 | ||
158 | WARN(true, "%s seen by %s %s at address %x\n", | 158 | pr_err("%s seen by %s %s at address %x\n", |
159 | omap3_l3_code_string(code), | 159 | omap3_l3_code_string(code), |
160 | omap3_l3_initiator_string(initid), | 160 | omap3_l3_initiator_string(initid), |
161 | multi ? "Multiple Errors" : "", address); | 161 | multi ? "Multiple Errors" : "", address); |
162 | WARN_ON(1); | ||
162 | 163 | ||
163 | return IRQ_HANDLED; | 164 | return IRQ_HANDLED; |
164 | } | 165 | } |
diff --git a/arch/arm/mach-omap2/omap_phy_internal.c b/arch/arm/mach-omap2/omap_phy_internal.c index 4c90477e6f82..d52651a05daa 100644 --- a/arch/arm/mach-omap2/omap_phy_internal.c +++ b/arch/arm/mach-omap2/omap_phy_internal.c | |||
@@ -239,21 +239,15 @@ void am35x_set_mode(u8 musb_mode) | |||
239 | 239 | ||
240 | devconf2 &= ~CONF2_OTGMODE; | 240 | devconf2 &= ~CONF2_OTGMODE; |
241 | switch (musb_mode) { | 241 | switch (musb_mode) { |
242 | #ifdef CONFIG_USB_MUSB_HDRC_HCD | ||
243 | case MUSB_HOST: /* Force VBUS valid, ID = 0 */ | 242 | case MUSB_HOST: /* Force VBUS valid, ID = 0 */ |
244 | devconf2 |= CONF2_FORCE_HOST; | 243 | devconf2 |= CONF2_FORCE_HOST; |
245 | break; | 244 | break; |
246 | #endif | ||
247 | #ifdef CONFIG_USB_GADGET_MUSB_HDRC | ||
248 | case MUSB_PERIPHERAL: /* Force VBUS valid, ID = 1 */ | 245 | case MUSB_PERIPHERAL: /* Force VBUS valid, ID = 1 */ |
249 | devconf2 |= CONF2_FORCE_DEVICE; | 246 | devconf2 |= CONF2_FORCE_DEVICE; |
250 | break; | 247 | break; |
251 | #endif | ||
252 | #ifdef CONFIG_USB_MUSB_OTG | ||
253 | case MUSB_OTG: /* Don't override the VBUS/ID comparators */ | 248 | case MUSB_OTG: /* Don't override the VBUS/ID comparators */ |
254 | devconf2 |= CONF2_NO_OVERRIDE; | 249 | devconf2 |= CONF2_NO_OVERRIDE; |
255 | break; | 250 | break; |
256 | #endif | ||
257 | default: | 251 | default: |
258 | pr_info(KERN_INFO "Unsupported mode %u\n", musb_mode); | 252 | pr_info(KERN_INFO "Unsupported mode %u\n", musb_mode); |
259 | } | 253 | } |
diff --git a/arch/arm/mach-omap2/pm34xx.c b/arch/arm/mach-omap2/pm34xx.c index a34023d0ca7c..3a595e899724 100644 --- a/arch/arm/mach-omap2/pm34xx.c +++ b/arch/arm/mach-omap2/pm34xx.c | |||
@@ -724,6 +724,7 @@ int __init omap3_pm_init(void) | |||
724 | ret = request_irq(omap_prcm_event_to_irq("io"), | 724 | ret = request_irq(omap_prcm_event_to_irq("io"), |
725 | _prcm_int_handle_io, IRQF_SHARED | IRQF_NO_SUSPEND, "pm_io", | 725 | _prcm_int_handle_io, IRQF_SHARED | IRQF_NO_SUSPEND, "pm_io", |
726 | omap3_pm_init); | 726 | omap3_pm_init); |
727 | enable_irq(omap_prcm_event_to_irq("io")); | ||
727 | 728 | ||
728 | if (ret) { | 729 | if (ret) { |
729 | pr_err("pm: Failed to request pm_io irq\n"); | 730 | pr_err("pm: Failed to request pm_io irq\n"); |
diff --git a/arch/arm/mach-omap2/prm2xxx_3xxx.c b/arch/arm/mach-omap2/prm2xxx_3xxx.c index 9ce765407ad5..21cb74003a56 100644 --- a/arch/arm/mach-omap2/prm2xxx_3xxx.c +++ b/arch/arm/mach-omap2/prm2xxx_3xxx.c | |||
@@ -15,6 +15,7 @@ | |||
15 | #include <linux/errno.h> | 15 | #include <linux/errno.h> |
16 | #include <linux/err.h> | 16 | #include <linux/err.h> |
17 | #include <linux/io.h> | 17 | #include <linux/io.h> |
18 | #include <linux/irq.h> | ||
18 | 19 | ||
19 | #include "common.h" | 20 | #include "common.h" |
20 | #include <plat/cpu.h> | 21 | #include <plat/cpu.h> |
@@ -303,8 +304,15 @@ void omap3xxx_prm_restore_irqen(u32 *saved_mask) | |||
303 | 304 | ||
304 | static int __init omap3xxx_prcm_init(void) | 305 | static int __init omap3xxx_prcm_init(void) |
305 | { | 306 | { |
306 | if (cpu_is_omap34xx()) | 307 | int ret = 0; |
307 | return omap_prcm_register_chain_handler(&omap3_prcm_irq_setup); | 308 | |
308 | return 0; | 309 | if (cpu_is_omap34xx()) { |
310 | ret = omap_prcm_register_chain_handler(&omap3_prcm_irq_setup); | ||
311 | if (!ret) | ||
312 | irq_set_status_flags(omap_prcm_event_to_irq("io"), | ||
313 | IRQ_NOAUTOEN); | ||
314 | } | ||
315 | |||
316 | return ret; | ||
309 | } | 317 | } |
310 | subsys_initcall(omap3xxx_prcm_init); | 318 | subsys_initcall(omap3xxx_prcm_init); |
diff --git a/arch/arm/mach-omap2/serial.c b/arch/arm/mach-omap2/serial.c index 292d4aaca068..c1b93c752d70 100644 --- a/arch/arm/mach-omap2/serial.c +++ b/arch/arm/mach-omap2/serial.c | |||
@@ -57,6 +57,7 @@ struct omap_uart_state { | |||
57 | 57 | ||
58 | struct list_head node; | 58 | struct list_head node; |
59 | struct omap_hwmod *oh; | 59 | struct omap_hwmod *oh; |
60 | struct omap_device_pad default_omap_uart_pads[2]; | ||
60 | }; | 61 | }; |
61 | 62 | ||
62 | static LIST_HEAD(uart_list); | 63 | static LIST_HEAD(uart_list); |
@@ -126,11 +127,70 @@ static void omap_uart_set_smartidle(struct platform_device *pdev) {} | |||
126 | #endif /* CONFIG_PM */ | 127 | #endif /* CONFIG_PM */ |
127 | 128 | ||
128 | #ifdef CONFIG_OMAP_MUX | 129 | #ifdef CONFIG_OMAP_MUX |
129 | static void omap_serial_fill_default_pads(struct omap_board_data *bdata) | 130 | |
131 | #define OMAP_UART_DEFAULT_PAD_NAME_LEN 28 | ||
132 | static char rx_pad_name[OMAP_UART_DEFAULT_PAD_NAME_LEN], | ||
133 | tx_pad_name[OMAP_UART_DEFAULT_PAD_NAME_LEN] __initdata; | ||
134 | |||
135 | static void __init | ||
136 | omap_serial_fill_uart_tx_rx_pads(struct omap_board_data *bdata, | ||
137 | struct omap_uart_state *uart) | ||
138 | { | ||
139 | uart->default_omap_uart_pads[0].name = rx_pad_name; | ||
140 | uart->default_omap_uart_pads[0].flags = OMAP_DEVICE_PAD_REMUX | | ||
141 | OMAP_DEVICE_PAD_WAKEUP; | ||
142 | uart->default_omap_uart_pads[0].enable = OMAP_PIN_INPUT | | ||
143 | OMAP_MUX_MODE0; | ||
144 | uart->default_omap_uart_pads[0].idle = OMAP_PIN_INPUT | OMAP_MUX_MODE0; | ||
145 | uart->default_omap_uart_pads[1].name = tx_pad_name; | ||
146 | uart->default_omap_uart_pads[1].enable = OMAP_PIN_OUTPUT | | ||
147 | OMAP_MUX_MODE0; | ||
148 | bdata->pads = uart->default_omap_uart_pads; | ||
149 | bdata->pads_cnt = ARRAY_SIZE(uart->default_omap_uart_pads); | ||
150 | } | ||
151 | |||
152 | static void __init omap_serial_check_wakeup(struct omap_board_data *bdata, | ||
153 | struct omap_uart_state *uart) | ||
130 | { | 154 | { |
155 | struct omap_mux_partition *tx_partition = NULL, *rx_partition = NULL; | ||
156 | struct omap_mux *rx_mux = NULL, *tx_mux = NULL; | ||
157 | char *rx_fmt, *tx_fmt; | ||
158 | int uart_nr = bdata->id + 1; | ||
159 | |||
160 | if (bdata->id != 2) { | ||
161 | rx_fmt = "uart%d_rx.uart%d_rx"; | ||
162 | tx_fmt = "uart%d_tx.uart%d_tx"; | ||
163 | } else { | ||
164 | rx_fmt = "uart%d_rx_irrx.uart%d_rx_irrx"; | ||
165 | tx_fmt = "uart%d_tx_irtx.uart%d_tx_irtx"; | ||
166 | } | ||
167 | |||
168 | snprintf(rx_pad_name, OMAP_UART_DEFAULT_PAD_NAME_LEN, rx_fmt, | ||
169 | uart_nr, uart_nr); | ||
170 | snprintf(tx_pad_name, OMAP_UART_DEFAULT_PAD_NAME_LEN, tx_fmt, | ||
171 | uart_nr, uart_nr); | ||
172 | |||
173 | if (omap_mux_get_by_name(rx_pad_name, &rx_partition, &rx_mux) >= 0 && | ||
174 | omap_mux_get_by_name | ||
175 | (tx_pad_name, &tx_partition, &tx_mux) >= 0) { | ||
176 | u16 tx_mode, rx_mode; | ||
177 | |||
178 | tx_mode = omap_mux_read(tx_partition, tx_mux->reg_offset); | ||
179 | rx_mode = omap_mux_read(rx_partition, rx_mux->reg_offset); | ||
180 | |||
181 | /* | ||
182 | * Check if uart is used in default tx/rx mode i.e. in mux mode0 | ||
183 | * if yes then configure rx pin for wake up capability | ||
184 | */ | ||
185 | if (OMAP_MODE_UART(rx_mode) && OMAP_MODE_UART(tx_mode)) | ||
186 | omap_serial_fill_uart_tx_rx_pads(bdata, uart); | ||
187 | } | ||
131 | } | 188 | } |
132 | #else | 189 | #else |
133 | static void omap_serial_fill_default_pads(struct omap_board_data *bdata) {} | 190 | static void __init omap_serial_check_wakeup(struct omap_board_data *bdata, |
191 | struct omap_uart_state *uart) | ||
192 | { | ||
193 | } | ||
134 | #endif | 194 | #endif |
135 | 195 | ||
136 | static char *cmdline_find_option(char *str) | 196 | static char *cmdline_find_option(char *str) |
@@ -287,8 +347,7 @@ void __init omap_serial_board_init(struct omap_uart_port_info *info) | |||
287 | bdata.pads = NULL; | 347 | bdata.pads = NULL; |
288 | bdata.pads_cnt = 0; | 348 | bdata.pads_cnt = 0; |
289 | 349 | ||
290 | if (cpu_is_omap44xx() || cpu_is_omap34xx()) | 350 | omap_serial_check_wakeup(&bdata, uart); |
291 | omap_serial_fill_default_pads(&bdata); | ||
292 | 351 | ||
293 | if (!info) | 352 | if (!info) |
294 | omap_serial_init_port(&bdata, NULL); | 353 | omap_serial_init_port(&bdata, NULL); |
diff --git a/arch/arm/mach-omap2/usb-musb.c b/arch/arm/mach-omap2/usb-musb.c index b19d1b43c12e..c4a576856661 100644 --- a/arch/arm/mach-omap2/usb-musb.c +++ b/arch/arm/mach-omap2/usb-musb.c | |||
@@ -41,12 +41,10 @@ static struct musb_hdrc_config musb_config = { | |||
41 | }; | 41 | }; |
42 | 42 | ||
43 | static struct musb_hdrc_platform_data musb_plat = { | 43 | static struct musb_hdrc_platform_data musb_plat = { |
44 | #ifdef CONFIG_USB_MUSB_OTG | 44 | #ifdef CONFIG_USB_GADGET_MUSB_HDRC |
45 | .mode = MUSB_OTG, | 45 | .mode = MUSB_OTG, |
46 | #elif defined(CONFIG_USB_MUSB_HDRC_HCD) | 46 | #else |
47 | .mode = MUSB_HOST, | 47 | .mode = MUSB_HOST, |
48 | #elif defined(CONFIG_USB_GADGET_MUSB_HDRC) | ||
49 | .mode = MUSB_PERIPHERAL, | ||
50 | #endif | 48 | #endif |
51 | /* .clock is set dynamically */ | 49 | /* .clock is set dynamically */ |
52 | .config = &musb_config, | 50 | .config = &musb_config, |
diff --git a/arch/arm/mach-omap2/usb-tusb6010.c b/arch/arm/mach-omap2/usb-tusb6010.c index db84a46ce7fd..805bea6edf17 100644 --- a/arch/arm/mach-omap2/usb-tusb6010.c +++ b/arch/arm/mach-omap2/usb-tusb6010.c | |||
@@ -300,7 +300,7 @@ tusb6010_setup_interface(struct musb_hdrc_platform_data *data, | |||
300 | printk(error, 3, status); | 300 | printk(error, 3, status); |
301 | return status; | 301 | return status; |
302 | } | 302 | } |
303 | tusb_resources[2].start = irq + IH_GPIO_BASE; | 303 | tusb_resources[2].start = gpio_to_irq(irq); |
304 | 304 | ||
305 | /* set up memory timings ... can speed them up later */ | 305 | /* set up memory timings ... can speed them up later */ |
306 | if (!ps_refclk) { | 306 | if (!ps_refclk) { |
diff --git a/arch/arm/mach-orion5x/include/mach/bridge-regs.h b/arch/arm/mach-orion5x/include/mach/bridge-regs.h index 96484bcd34ca..11a3c1e9801f 100644 --- a/arch/arm/mach-orion5x/include/mach/bridge-regs.h +++ b/arch/arm/mach-orion5x/include/mach/bridge-regs.h | |||
@@ -35,5 +35,5 @@ | |||
35 | #define MAIN_IRQ_MASK (ORION5X_BRIDGE_VIRT_BASE | 0x204) | 35 | #define MAIN_IRQ_MASK (ORION5X_BRIDGE_VIRT_BASE | 0x204) |
36 | 36 | ||
37 | #define TIMER_VIRT_BASE (ORION5X_BRIDGE_VIRT_BASE | 0x300) | 37 | #define TIMER_VIRT_BASE (ORION5X_BRIDGE_VIRT_BASE | 0x300) |
38 | 38 | #define TIMER_PHYS_BASE (ORION5X_BRIDGE_PHYS_BASE | 0x300) | |
39 | #endif | 39 | #endif |
diff --git a/arch/arm/mach-orion5x/include/mach/io.h b/arch/arm/mach-orion5x/include/mach/io.h new file mode 100644 index 000000000000..1aa5d0a50a0b --- /dev/null +++ b/arch/arm/mach-orion5x/include/mach/io.h | |||
@@ -0,0 +1,22 @@ | |||
1 | /* | ||
2 | * arch/arm/mach-orion5x/include/mach/io.h | ||
3 | * | ||
4 | * This file is licensed under the terms of the GNU General Public | ||
5 | * License version 2. This program is licensed "as is" without any | ||
6 | * warranty of any kind, whether express or implied. | ||
7 | */ | ||
8 | |||
9 | #ifndef __ASM_ARCH_IO_H | ||
10 | #define __ASM_ARCH_IO_H | ||
11 | |||
12 | #include <mach/orion5x.h> | ||
13 | #include <asm/sizes.h> | ||
14 | |||
15 | #define IO_SPACE_LIMIT SZ_2M | ||
16 | static inline void __iomem *__io(unsigned long addr) | ||
17 | { | ||
18 | return (void __iomem *)(addr + ORION5X_PCIE_IO_VIRT_BASE); | ||
19 | } | ||
20 | |||
21 | #define __io(a) __io(a) | ||
22 | #endif | ||
diff --git a/arch/arm/mach-orion5x/include/mach/orion5x.h b/arch/arm/mach-orion5x/include/mach/orion5x.h index 2745f5d95b3f..683e085ce162 100644 --- a/arch/arm/mach-orion5x/include/mach/orion5x.h +++ b/arch/arm/mach-orion5x/include/mach/orion5x.h | |||
@@ -82,6 +82,7 @@ | |||
82 | #define UART1_VIRT_BASE (ORION5X_DEV_BUS_VIRT_BASE | 0x2100) | 82 | #define UART1_VIRT_BASE (ORION5X_DEV_BUS_VIRT_BASE | 0x2100) |
83 | 83 | ||
84 | #define ORION5X_BRIDGE_VIRT_BASE (ORION5X_REGS_VIRT_BASE | 0x20000) | 84 | #define ORION5X_BRIDGE_VIRT_BASE (ORION5X_REGS_VIRT_BASE | 0x20000) |
85 | #define ORION5X_BRIDGE_PHYS_BASE (ORION5X_REGS_PHYS_BASE | 0x20000) | ||
85 | 86 | ||
86 | #define ORION5X_PCI_VIRT_BASE (ORION5X_REGS_VIRT_BASE | 0x30000) | 87 | #define ORION5X_PCI_VIRT_BASE (ORION5X_REGS_VIRT_BASE | 0x30000) |
87 | 88 | ||
diff --git a/arch/arm/mach-shmobile/Kconfig b/arch/arm/mach-shmobile/Kconfig index f31383c32f9c..df33909205e2 100644 --- a/arch/arm/mach-shmobile/Kconfig +++ b/arch/arm/mach-shmobile/Kconfig | |||
@@ -186,6 +186,12 @@ config SH_TIMER_TMU | |||
186 | help | 186 | help |
187 | This enables build of the TMU timer driver. | 187 | This enables build of the TMU timer driver. |
188 | 188 | ||
189 | config EM_TIMER_STI | ||
190 | bool "STI timer driver" | ||
191 | default y | ||
192 | help | ||
193 | This enables build of the STI timer driver. | ||
194 | |||
189 | endmenu | 195 | endmenu |
190 | 196 | ||
191 | config SH_CLK_CPG | 197 | config SH_CLK_CPG |
diff --git a/arch/arm/mach-spear13xx/include/mach/debug-macro.S b/arch/arm/mach-spear13xx/include/mach/debug-macro.S index ea1564609bd4..9e3ae6bfe50d 100644 --- a/arch/arm/mach-spear13xx/include/mach/debug-macro.S +++ b/arch/arm/mach-spear13xx/include/mach/debug-macro.S | |||
@@ -4,7 +4,7 @@ | |||
4 | * Debugging macro include header spear13xx machine family | 4 | * Debugging macro include header spear13xx machine family |
5 | * | 5 | * |
6 | * Copyright (C) 2012 ST Microelectronics | 6 | * Copyright (C) 2012 ST Microelectronics |
7 | * Viresh Kumar <viresh.kumar@st.com> | 7 | * Viresh Kumar <viresh.linux@gmail.com> |
8 | * | 8 | * |
9 | * This file is licensed under the terms of the GNU General Public | 9 | * This file is licensed under the terms of the GNU General Public |
10 | * License version 2. This program is licensed "as is" without any | 10 | * License version 2. This program is licensed "as is" without any |
diff --git a/arch/arm/mach-spear13xx/include/mach/dma.h b/arch/arm/mach-spear13xx/include/mach/dma.h index 383ab04dc6c9..d50bdb605925 100644 --- a/arch/arm/mach-spear13xx/include/mach/dma.h +++ b/arch/arm/mach-spear13xx/include/mach/dma.h | |||
@@ -4,7 +4,7 @@ | |||
4 | * DMA information for SPEAr13xx machine family | 4 | * DMA information for SPEAr13xx machine family |
5 | * | 5 | * |
6 | * Copyright (C) 2012 ST Microelectronics | 6 | * Copyright (C) 2012 ST Microelectronics |
7 | * Viresh Kumar <viresh.kumar@st.com> | 7 | * Viresh Kumar <viresh.linux@gmail.com> |
8 | * | 8 | * |
9 | * This file is licensed under the terms of the GNU General Public | 9 | * This file is licensed under the terms of the GNU General Public |
10 | * License version 2. This program is licensed "as is" without any | 10 | * License version 2. This program is licensed "as is" without any |
diff --git a/arch/arm/mach-spear13xx/include/mach/generic.h b/arch/arm/mach-spear13xx/include/mach/generic.h index 6d8c45b9f298..dac57fd0cdfd 100644 --- a/arch/arm/mach-spear13xx/include/mach/generic.h +++ b/arch/arm/mach-spear13xx/include/mach/generic.h | |||
@@ -4,7 +4,7 @@ | |||
4 | * spear13xx machine family generic header file | 4 | * spear13xx machine family generic header file |
5 | * | 5 | * |
6 | * Copyright (C) 2012 ST Microelectronics | 6 | * Copyright (C) 2012 ST Microelectronics |
7 | * Viresh Kumar <viresh.kumar@st.com> | 7 | * Viresh Kumar <viresh.linux@gmail.com> |
8 | * | 8 | * |
9 | * This file is licensed under the terms of the GNU General Public | 9 | * This file is licensed under the terms of the GNU General Public |
10 | * License version 2. This program is licensed "as is" without any | 10 | * License version 2. This program is licensed "as is" without any |
diff --git a/arch/arm/mach-spear13xx/include/mach/gpio.h b/arch/arm/mach-spear13xx/include/mach/gpio.h index cd6f4f86a56b..85f176311f63 100644 --- a/arch/arm/mach-spear13xx/include/mach/gpio.h +++ b/arch/arm/mach-spear13xx/include/mach/gpio.h | |||
@@ -4,7 +4,7 @@ | |||
4 | * GPIO macros for SPEAr13xx machine family | 4 | * GPIO macros for SPEAr13xx machine family |
5 | * | 5 | * |
6 | * Copyright (C) 2012 ST Microelectronics | 6 | * Copyright (C) 2012 ST Microelectronics |
7 | * Viresh Kumar <viresh.kumar@st.com> | 7 | * Viresh Kumar <viresh.linux@gmail.com> |
8 | * | 8 | * |
9 | * This file is licensed under the terms of the GNU General Public | 9 | * This file is licensed under the terms of the GNU General Public |
10 | * License version 2. This program is licensed "as is" without any | 10 | * License version 2. This program is licensed "as is" without any |
diff --git a/arch/arm/mach-spear13xx/include/mach/irqs.h b/arch/arm/mach-spear13xx/include/mach/irqs.h index f542a24aa5f2..271a62b4cd31 100644 --- a/arch/arm/mach-spear13xx/include/mach/irqs.h +++ b/arch/arm/mach-spear13xx/include/mach/irqs.h | |||
@@ -4,7 +4,7 @@ | |||
4 | * IRQ helper macros for spear13xx machine family | 4 | * IRQ helper macros for spear13xx machine family |
5 | * | 5 | * |
6 | * Copyright (C) 2012 ST Microelectronics | 6 | * Copyright (C) 2012 ST Microelectronics |
7 | * Viresh Kumar <viresh.kumar@st.com> | 7 | * Viresh Kumar <viresh.linux@gmail.com> |
8 | * | 8 | * |
9 | * This file is licensed under the terms of the GNU General Public | 9 | * This file is licensed under the terms of the GNU General Public |
10 | * License version 2. This program is licensed "as is" without any | 10 | * License version 2. This program is licensed "as is" without any |
diff --git a/arch/arm/mach-spear13xx/include/mach/spear.h b/arch/arm/mach-spear13xx/include/mach/spear.h index 30c57ef72686..65f27def239b 100644 --- a/arch/arm/mach-spear13xx/include/mach/spear.h +++ b/arch/arm/mach-spear13xx/include/mach/spear.h | |||
@@ -4,7 +4,7 @@ | |||
4 | * spear13xx Machine family specific definition | 4 | * spear13xx Machine family specific definition |
5 | * | 5 | * |
6 | * Copyright (C) 2012 ST Microelectronics | 6 | * Copyright (C) 2012 ST Microelectronics |
7 | * Viresh Kumar <viresh.kumar@st.com> | 7 | * Viresh Kumar <viresh.linux@gmail.com> |
8 | * | 8 | * |
9 | * This file is licensed under the terms of the GNU General Public | 9 | * This file is licensed under the terms of the GNU General Public |
10 | * License version 2. This program is licensed "as is" without any | 10 | * License version 2. This program is licensed "as is" without any |
diff --git a/arch/arm/mach-spear13xx/include/mach/timex.h b/arch/arm/mach-spear13xx/include/mach/timex.h index 31af3e8d976e..3a58b8284a6a 100644 --- a/arch/arm/mach-spear13xx/include/mach/timex.h +++ b/arch/arm/mach-spear13xx/include/mach/timex.h | |||
@@ -4,7 +4,7 @@ | |||
4 | * SPEAr3XX machine family specific timex definitions | 4 | * SPEAr3XX machine family specific timex definitions |
5 | * | 5 | * |
6 | * Copyright (C) 2012 ST Microelectronics | 6 | * Copyright (C) 2012 ST Microelectronics |
7 | * Viresh Kumar <viresh.kumar@st.com> | 7 | * Viresh Kumar <viresh.linux@gmail.com> |
8 | * | 8 | * |
9 | * This file is licensed under the terms of the GNU General Public | 9 | * This file is licensed under the terms of the GNU General Public |
10 | * License version 2. This program is licensed "as is" without any | 10 | * License version 2. This program is licensed "as is" without any |
diff --git a/arch/arm/mach-spear13xx/include/mach/uncompress.h b/arch/arm/mach-spear13xx/include/mach/uncompress.h index c7840896ae6e..70fe72f05dea 100644 --- a/arch/arm/mach-spear13xx/include/mach/uncompress.h +++ b/arch/arm/mach-spear13xx/include/mach/uncompress.h | |||
@@ -4,7 +4,7 @@ | |||
4 | * Serial port stubs for kernel decompress status messages | 4 | * Serial port stubs for kernel decompress status messages |
5 | * | 5 | * |
6 | * Copyright (C) 2012 ST Microelectronics | 6 | * Copyright (C) 2012 ST Microelectronics |
7 | * Viresh Kumar <viresh.kumar@st.com> | 7 | * Viresh Kumar <viresh.linux@gmail.com> |
8 | * | 8 | * |
9 | * This file is licensed under the terms of the GNU General Public | 9 | * This file is licensed under the terms of the GNU General Public |
10 | * License version 2. This program is licensed "as is" without any | 10 | * License version 2. This program is licensed "as is" without any |
diff --git a/arch/arm/mach-spear13xx/spear1310.c b/arch/arm/mach-spear13xx/spear1310.c index fefd15b2f380..732d29bc7330 100644 --- a/arch/arm/mach-spear13xx/spear1310.c +++ b/arch/arm/mach-spear13xx/spear1310.c | |||
@@ -4,7 +4,7 @@ | |||
4 | * SPEAr1310 machine source file | 4 | * SPEAr1310 machine source file |
5 | * | 5 | * |
6 | * Copyright (C) 2012 ST Microelectronics | 6 | * Copyright (C) 2012 ST Microelectronics |
7 | * Viresh Kumar <viresh.kumar@st.com> | 7 | * Viresh Kumar <viresh.linux@gmail.com> |
8 | * | 8 | * |
9 | * This file is licensed under the terms of the GNU General Public | 9 | * This file is licensed under the terms of the GNU General Public |
10 | * License version 2. This program is licensed "as is" without any | 10 | * License version 2. This program is licensed "as is" without any |
diff --git a/arch/arm/mach-spear13xx/spear1340.c b/arch/arm/mach-spear13xx/spear1340.c index ee38cbc56869..81e4ed76ad06 100644 --- a/arch/arm/mach-spear13xx/spear1340.c +++ b/arch/arm/mach-spear13xx/spear1340.c | |||
@@ -4,7 +4,7 @@ | |||
4 | * SPEAr1340 machine source file | 4 | * SPEAr1340 machine source file |
5 | * | 5 | * |
6 | * Copyright (C) 2012 ST Microelectronics | 6 | * Copyright (C) 2012 ST Microelectronics |
7 | * Viresh Kumar <viresh.kumar@st.com> | 7 | * Viresh Kumar <viresh.linux@gmail.com> |
8 | * | 8 | * |
9 | * This file is licensed under the terms of the GNU General Public | 9 | * This file is licensed under the terms of the GNU General Public |
10 | * License version 2. This program is licensed "as is" without any | 10 | * License version 2. This program is licensed "as is" without any |
diff --git a/arch/arm/mach-spear13xx/spear13xx.c b/arch/arm/mach-spear13xx/spear13xx.c index 50b349ae863d..cf936b106e27 100644 --- a/arch/arm/mach-spear13xx/spear13xx.c +++ b/arch/arm/mach-spear13xx/spear13xx.c | |||
@@ -4,7 +4,7 @@ | |||
4 | * SPEAr13XX machines common source file | 4 | * SPEAr13XX machines common source file |
5 | * | 5 | * |
6 | * Copyright (C) 2012 ST Microelectronics | 6 | * Copyright (C) 2012 ST Microelectronics |
7 | * Viresh Kumar <viresh.kumar@st.com> | 7 | * Viresh Kumar <viresh.linux@gmail.com> |
8 | * | 8 | * |
9 | * This file is licensed under the terms of the GNU General Public | 9 | * This file is licensed under the terms of the GNU General Public |
10 | * License version 2. This program is licensed "as is" without any | 10 | * License version 2. This program is licensed "as is" without any |
diff --git a/arch/arm/mach-spear3xx/include/mach/debug-macro.S b/arch/arm/mach-spear3xx/include/mach/debug-macro.S index 590519f10d6e..0a6381fad5d9 100644 --- a/arch/arm/mach-spear3xx/include/mach/debug-macro.S +++ b/arch/arm/mach-spear3xx/include/mach/debug-macro.S | |||
@@ -4,7 +4,7 @@ | |||
4 | * Debugging macro include header spear3xx machine family | 4 | * Debugging macro include header spear3xx machine family |
5 | * | 5 | * |
6 | * Copyright (C) 2009 ST Microelectronics | 6 | * Copyright (C) 2009 ST Microelectronics |
7 | * Viresh Kumar<viresh.kumar@st.com> | 7 | * Viresh Kumar<viresh.linux@gmail.com> |
8 | * | 8 | * |
9 | * This file is licensed under the terms of the GNU General Public | 9 | * This file is licensed under the terms of the GNU General Public |
10 | * License version 2. This program is licensed "as is" without any | 10 | * License version 2. This program is licensed "as is" without any |
diff --git a/arch/arm/mach-spear3xx/include/mach/generic.h b/arch/arm/mach-spear3xx/include/mach/generic.h index 4a95b9453c2a..ce19113ca791 100644 --- a/arch/arm/mach-spear3xx/include/mach/generic.h +++ b/arch/arm/mach-spear3xx/include/mach/generic.h | |||
@@ -4,7 +4,7 @@ | |||
4 | * SPEAr3XX machine family generic header file | 4 | * SPEAr3XX machine family generic header file |
5 | * | 5 | * |
6 | * Copyright (C) 2009 ST Microelectronics | 6 | * Copyright (C) 2009 ST Microelectronics |
7 | * Viresh Kumar<viresh.kumar@st.com> | 7 | * Viresh Kumar<viresh.linux@gmail.com> |
8 | * | 8 | * |
9 | * This file is licensed under the terms of the GNU General Public | 9 | * This file is licensed under the terms of the GNU General Public |
10 | * License version 2. This program is licensed "as is" without any | 10 | * License version 2. This program is licensed "as is" without any |
diff --git a/arch/arm/mach-spear3xx/include/mach/gpio.h b/arch/arm/mach-spear3xx/include/mach/gpio.h index 451b2081bfc9..2ac74c6db7f1 100644 --- a/arch/arm/mach-spear3xx/include/mach/gpio.h +++ b/arch/arm/mach-spear3xx/include/mach/gpio.h | |||
@@ -4,7 +4,7 @@ | |||
4 | * GPIO macros for SPEAr3xx machine family | 4 | * GPIO macros for SPEAr3xx machine family |
5 | * | 5 | * |
6 | * Copyright (C) 2009 ST Microelectronics | 6 | * Copyright (C) 2009 ST Microelectronics |
7 | * Viresh Kumar<viresh.kumar@st.com> | 7 | * Viresh Kumar<viresh.linux@gmail.com> |
8 | * | 8 | * |
9 | * This file is licensed under the terms of the GNU General Public | 9 | * This file is licensed under the terms of the GNU General Public |
10 | * License version 2. This program is licensed "as is" without any | 10 | * License version 2. This program is licensed "as is" without any |
diff --git a/arch/arm/mach-spear3xx/include/mach/irqs.h b/arch/arm/mach-spear3xx/include/mach/irqs.h index 51bd62a0254c..803de76f5f36 100644 --- a/arch/arm/mach-spear3xx/include/mach/irqs.h +++ b/arch/arm/mach-spear3xx/include/mach/irqs.h | |||
@@ -4,7 +4,7 @@ | |||
4 | * IRQ helper macros for SPEAr3xx machine family | 4 | * IRQ helper macros for SPEAr3xx machine family |
5 | * | 5 | * |
6 | * Copyright (C) 2009 ST Microelectronics | 6 | * Copyright (C) 2009 ST Microelectronics |
7 | * Viresh Kumar<viresh.kumar@st.com> | 7 | * Viresh Kumar <viresh.linux@gmail.com> |
8 | * | 8 | * |
9 | * This file is licensed under the terms of the GNU General Public | 9 | * This file is licensed under the terms of the GNU General Public |
10 | * License version 2. This program is licensed "as is" without any | 10 | * License version 2. This program is licensed "as is" without any |
diff --git a/arch/arm/mach-spear3xx/include/mach/misc_regs.h b/arch/arm/mach-spear3xx/include/mach/misc_regs.h index 18e2ac576f25..6309bf68d6f8 100644 --- a/arch/arm/mach-spear3xx/include/mach/misc_regs.h +++ b/arch/arm/mach-spear3xx/include/mach/misc_regs.h | |||
@@ -4,7 +4,7 @@ | |||
4 | * Miscellaneous registers definitions for SPEAr3xx machine family | 4 | * Miscellaneous registers definitions for SPEAr3xx machine family |
5 | * | 5 | * |
6 | * Copyright (C) 2009 ST Microelectronics | 6 | * Copyright (C) 2009 ST Microelectronics |
7 | * Viresh Kumar<viresh.kumar@st.com> | 7 | * Viresh Kumar <viresh.linux@gmail.com> |
8 | * | 8 | * |
9 | * This file is licensed under the terms of the GNU General Public | 9 | * This file is licensed under the terms of the GNU General Public |
10 | * License version 2. This program is licensed "as is" without any | 10 | * License version 2. This program is licensed "as is" without any |
diff --git a/arch/arm/mach-spear3xx/include/mach/spear.h b/arch/arm/mach-spear3xx/include/mach/spear.h index 51eb953148a9..8cca95193d4d 100644 --- a/arch/arm/mach-spear3xx/include/mach/spear.h +++ b/arch/arm/mach-spear3xx/include/mach/spear.h | |||
@@ -4,7 +4,7 @@ | |||
4 | * SPEAr3xx Machine family specific definition | 4 | * SPEAr3xx Machine family specific definition |
5 | * | 5 | * |
6 | * Copyright (C) 2009 ST Microelectronics | 6 | * Copyright (C) 2009 ST Microelectronics |
7 | * Viresh Kumar<viresh.kumar@st.com> | 7 | * Viresh Kumar <viresh.linux@gmail.com> |
8 | * | 8 | * |
9 | * This file is licensed under the terms of the GNU General Public | 9 | * This file is licensed under the terms of the GNU General Public |
10 | * License version 2. This program is licensed "as is" without any | 10 | * License version 2. This program is licensed "as is" without any |
diff --git a/arch/arm/mach-spear3xx/include/mach/timex.h b/arch/arm/mach-spear3xx/include/mach/timex.h index a38cc9de876f..9f5d08bd0c44 100644 --- a/arch/arm/mach-spear3xx/include/mach/timex.h +++ b/arch/arm/mach-spear3xx/include/mach/timex.h | |||
@@ -4,7 +4,7 @@ | |||
4 | * SPEAr3XX machine family specific timex definitions | 4 | * SPEAr3XX machine family specific timex definitions |
5 | * | 5 | * |
6 | * Copyright (C) 2009 ST Microelectronics | 6 | * Copyright (C) 2009 ST Microelectronics |
7 | * Viresh Kumar<viresh.kumar@st.com> | 7 | * Viresh Kumar <viresh.linux@gmail.com> |
8 | * | 8 | * |
9 | * This file is licensed under the terms of the GNU General Public | 9 | * This file is licensed under the terms of the GNU General Public |
10 | * License version 2. This program is licensed "as is" without any | 10 | * License version 2. This program is licensed "as is" without any |
diff --git a/arch/arm/mach-spear3xx/include/mach/uncompress.h b/arch/arm/mach-spear3xx/include/mach/uncompress.h index 53ba8bbc0dfa..b909b011f7c8 100644 --- a/arch/arm/mach-spear3xx/include/mach/uncompress.h +++ b/arch/arm/mach-spear3xx/include/mach/uncompress.h | |||
@@ -4,7 +4,7 @@ | |||
4 | * Serial port stubs for kernel decompress status messages | 4 | * Serial port stubs for kernel decompress status messages |
5 | * | 5 | * |
6 | * Copyright (C) 2009 ST Microelectronics | 6 | * Copyright (C) 2009 ST Microelectronics |
7 | * Viresh Kumar<viresh.kumar@st.com> | 7 | * Viresh Kumar <viresh.linux@gmail.com> |
8 | * | 8 | * |
9 | * This file is licensed under the terms of the GNU General Public | 9 | * This file is licensed under the terms of the GNU General Public |
10 | * License version 2. This program is licensed "as is" without any | 10 | * License version 2. This program is licensed "as is" without any |
diff --git a/arch/arm/mach-spear3xx/spear300.c b/arch/arm/mach-spear3xx/spear300.c index f74a05bdb829..0f882ecb7d81 100644 --- a/arch/arm/mach-spear3xx/spear300.c +++ b/arch/arm/mach-spear3xx/spear300.c | |||
@@ -4,7 +4,7 @@ | |||
4 | * SPEAr300 machine source file | 4 | * SPEAr300 machine source file |
5 | * | 5 | * |
6 | * Copyright (C) 2009-2012 ST Microelectronics | 6 | * Copyright (C) 2009-2012 ST Microelectronics |
7 | * Viresh Kumar <viresh.kumar@st.com> | 7 | * Viresh Kumar <viresh.linux@gmail.com> |
8 | * | 8 | * |
9 | * This file is licensed under the terms of the GNU General Public | 9 | * This file is licensed under the terms of the GNU General Public |
10 | * License version 2. This program is licensed "as is" without any | 10 | * License version 2. This program is licensed "as is" without any |
diff --git a/arch/arm/mach-spear3xx/spear310.c b/arch/arm/mach-spear3xx/spear310.c index 84dfb0900747..bbcf4571d361 100644 --- a/arch/arm/mach-spear3xx/spear310.c +++ b/arch/arm/mach-spear3xx/spear310.c | |||
@@ -4,7 +4,7 @@ | |||
4 | * SPEAr310 machine source file | 4 | * SPEAr310 machine source file |
5 | * | 5 | * |
6 | * Copyright (C) 2009-2012 ST Microelectronics | 6 | * Copyright (C) 2009-2012 ST Microelectronics |
7 | * Viresh Kumar <viresh.kumar@st.com> | 7 | * Viresh Kumar <viresh.linux@gmail.com> |
8 | * | 8 | * |
9 | * This file is licensed under the terms of the GNU General Public | 9 | * This file is licensed under the terms of the GNU General Public |
10 | * License version 2. This program is licensed "as is" without any | 10 | * License version 2. This program is licensed "as is" without any |
diff --git a/arch/arm/mach-spear3xx/spear320.c b/arch/arm/mach-spear3xx/spear320.c index a88fa841d29d..88d483bcd66a 100644 --- a/arch/arm/mach-spear3xx/spear320.c +++ b/arch/arm/mach-spear3xx/spear320.c | |||
@@ -4,7 +4,7 @@ | |||
4 | * SPEAr320 machine source file | 4 | * SPEAr320 machine source file |
5 | * | 5 | * |
6 | * Copyright (C) 2009-2012 ST Microelectronics | 6 | * Copyright (C) 2009-2012 ST Microelectronics |
7 | * Viresh Kumar <viresh.kumar@st.com> | 7 | * Viresh Kumar <viresh.linux@gmail.com> |
8 | * | 8 | * |
9 | * This file is licensed under the terms of the GNU General Public | 9 | * This file is licensed under the terms of the GNU General Public |
10 | * License version 2. This program is licensed "as is" without any | 10 | * License version 2. This program is licensed "as is" without any |
diff --git a/arch/arm/mach-spear3xx/spear3xx.c b/arch/arm/mach-spear3xx/spear3xx.c index f22419ed74a8..0f41bd1c47c3 100644 --- a/arch/arm/mach-spear3xx/spear3xx.c +++ b/arch/arm/mach-spear3xx/spear3xx.c | |||
@@ -4,7 +4,7 @@ | |||
4 | * SPEAr3XX machines common source file | 4 | * SPEAr3XX machines common source file |
5 | * | 5 | * |
6 | * Copyright (C) 2009-2012 ST Microelectronics | 6 | * Copyright (C) 2009-2012 ST Microelectronics |
7 | * Viresh Kumar <viresh.kumar@st.com> | 7 | * Viresh Kumar <viresh.linux@gmail.com> |
8 | * | 8 | * |
9 | * This file is licensed under the terms of the GNU General Public | 9 | * This file is licensed under the terms of the GNU General Public |
10 | * License version 2. This program is licensed "as is" without any | 10 | * License version 2. This program is licensed "as is" without any |
diff --git a/arch/arm/mach-spear6xx/include/mach/gpio.h b/arch/arm/mach-spear6xx/include/mach/gpio.h index 3a789dbb69f7..d42cefc0356d 100644 --- a/arch/arm/mach-spear6xx/include/mach/gpio.h +++ b/arch/arm/mach-spear6xx/include/mach/gpio.h | |||
@@ -4,7 +4,7 @@ | |||
4 | * GPIO macros for SPEAr6xx machine family | 4 | * GPIO macros for SPEAr6xx machine family |
5 | * | 5 | * |
6 | * Copyright (C) 2009 ST Microelectronics | 6 | * Copyright (C) 2009 ST Microelectronics |
7 | * Viresh Kumar<viresh.kumar@st.com> | 7 | * Viresh Kumar <viresh.linux@gmail.com> |
8 | * | 8 | * |
9 | * This file is licensed under the terms of the GNU General Public | 9 | * This file is licensed under the terms of the GNU General Public |
10 | * License version 2. This program is licensed "as is" without any | 10 | * License version 2. This program is licensed "as is" without any |
diff --git a/arch/arm/mach-spear6xx/include/mach/misc_regs.h b/arch/arm/mach-spear6xx/include/mach/misc_regs.h index 179e45774b3a..c34acc201d34 100644 --- a/arch/arm/mach-spear6xx/include/mach/misc_regs.h +++ b/arch/arm/mach-spear6xx/include/mach/misc_regs.h | |||
@@ -4,7 +4,7 @@ | |||
4 | * Miscellaneous registers definitions for SPEAr6xx machine family | 4 | * Miscellaneous registers definitions for SPEAr6xx machine family |
5 | * | 5 | * |
6 | * Copyright (C) 2009 ST Microelectronics | 6 | * Copyright (C) 2009 ST Microelectronics |
7 | * Viresh Kumar<viresh.kumar@st.com> | 7 | * Viresh Kumar <viresh.linux@gmail.com> |
8 | * | 8 | * |
9 | * This file is licensed under the terms of the GNU General Public | 9 | * This file is licensed under the terms of the GNU General Public |
10 | * License version 2. This program is licensed "as is" without any | 10 | * License version 2. This program is licensed "as is" without any |
diff --git a/arch/arm/mach-tegra/reset.c b/arch/arm/mach-tegra/reset.c index 4d6a2ee99c3b..5beb7ebe2948 100644 --- a/arch/arm/mach-tegra/reset.c +++ b/arch/arm/mach-tegra/reset.c | |||
@@ -33,7 +33,7 @@ | |||
33 | 33 | ||
34 | static bool is_enabled; | 34 | static bool is_enabled; |
35 | 35 | ||
36 | static void tegra_cpu_reset_handler_enable(void) | 36 | static void __init tegra_cpu_reset_handler_enable(void) |
37 | { | 37 | { |
38 | void __iomem *iram_base = IO_ADDRESS(TEGRA_IRAM_RESET_BASE); | 38 | void __iomem *iram_base = IO_ADDRESS(TEGRA_IRAM_RESET_BASE); |
39 | void __iomem *evp_cpu_reset = | 39 | void __iomem *evp_cpu_reset = |
diff --git a/arch/arm/mach-ux500/board-mop500.c b/arch/arm/mach-ux500/board-mop500.c index 9c74ac545849..1509a3cb5833 100644 --- a/arch/arm/mach-ux500/board-mop500.c +++ b/arch/arm/mach-ux500/board-mop500.c | |||
@@ -580,43 +580,12 @@ static void ux500_uart0_reset(void) | |||
580 | udelay(1); | 580 | udelay(1); |
581 | } | 581 | } |
582 | 582 | ||
583 | /* This needs to be referenced by callbacks */ | ||
584 | struct pinctrl *u0_p; | ||
585 | struct pinctrl_state *u0_def; | ||
586 | struct pinctrl_state *u0_sleep; | ||
587 | |||
588 | static void ux500_uart0_init(void) | ||
589 | { | ||
590 | int ret; | ||
591 | |||
592 | if (IS_ERR(u0_p) || IS_ERR(u0_def)) | ||
593 | return; | ||
594 | |||
595 | ret = pinctrl_select_state(u0_p, u0_def); | ||
596 | if (ret) | ||
597 | pr_err("could not set UART0 defstate\n"); | ||
598 | } | ||
599 | |||
600 | static void ux500_uart0_exit(void) | ||
601 | { | ||
602 | int ret; | ||
603 | |||
604 | if (IS_ERR(u0_p) || IS_ERR(u0_sleep)) | ||
605 | return; | ||
606 | |||
607 | ret = pinctrl_select_state(u0_p, u0_sleep); | ||
608 | if (ret) | ||
609 | pr_err("could not set UART0 idlestate\n"); | ||
610 | } | ||
611 | |||
612 | static struct amba_pl011_data uart0_plat = { | 583 | static struct amba_pl011_data uart0_plat = { |
613 | #ifdef CONFIG_STE_DMA40 | 584 | #ifdef CONFIG_STE_DMA40 |
614 | .dma_filter = stedma40_filter, | 585 | .dma_filter = stedma40_filter, |
615 | .dma_rx_param = &uart0_dma_cfg_rx, | 586 | .dma_rx_param = &uart0_dma_cfg_rx, |
616 | .dma_tx_param = &uart0_dma_cfg_tx, | 587 | .dma_tx_param = &uart0_dma_cfg_tx, |
617 | #endif | 588 | #endif |
618 | .init = ux500_uart0_init, | ||
619 | .exit = ux500_uart0_exit, | ||
620 | .reset = ux500_uart0_reset, | 589 | .reset = ux500_uart0_reset, |
621 | }; | 590 | }; |
622 | 591 | ||
@@ -638,28 +607,7 @@ static struct amba_pl011_data uart2_plat = { | |||
638 | 607 | ||
639 | static void __init mop500_uart_init(struct device *parent) | 608 | static void __init mop500_uart_init(struct device *parent) |
640 | { | 609 | { |
641 | struct amba_device *uart0_device; | 610 | db8500_add_uart0(parent, &uart0_plat); |
642 | |||
643 | uart0_device = db8500_add_uart0(parent, &uart0_plat); | ||
644 | if (uart0_device) { | ||
645 | u0_p = pinctrl_get(&uart0_device->dev); | ||
646 | if (IS_ERR(u0_p)) | ||
647 | dev_err(&uart0_device->dev, | ||
648 | "could not get UART0 pinctrl\n"); | ||
649 | else { | ||
650 | u0_def = pinctrl_lookup_state(u0_p, | ||
651 | PINCTRL_STATE_DEFAULT); | ||
652 | if (IS_ERR(u0_def)) { | ||
653 | dev_err(&uart0_device->dev, | ||
654 | "could not get UART0 defstate\n"); | ||
655 | } | ||
656 | u0_sleep = pinctrl_lookup_state(u0_p, | ||
657 | PINCTRL_STATE_SLEEP); | ||
658 | if (IS_ERR(u0_sleep)) | ||
659 | dev_err(&uart0_device->dev, | ||
660 | "could not get UART0 idlestate\n"); | ||
661 | } | ||
662 | } | ||
663 | db8500_add_uart1(parent, &uart1_plat); | 611 | db8500_add_uart1(parent, &uart1_plat); |
664 | db8500_add_uart2(parent, &uart2_plat); | 612 | db8500_add_uart2(parent, &uart2_plat); |
665 | } | 613 | } |
diff --git a/arch/arm/mach-versatile/core.c b/arch/arm/mach-versatile/core.c index cf4687ee2a7b..cd8ea3588f93 100644 --- a/arch/arm/mach-versatile/core.c +++ b/arch/arm/mach-versatile/core.c | |||
@@ -169,26 +169,13 @@ static struct map_desc versatile_io_desc[] __initdata = { | |||
169 | .pfn = __phys_to_pfn(VERSATILE_PCI_CFG_BASE), | 169 | .pfn = __phys_to_pfn(VERSATILE_PCI_CFG_BASE), |
170 | .length = VERSATILE_PCI_CFG_BASE_SIZE, | 170 | .length = VERSATILE_PCI_CFG_BASE_SIZE, |
171 | .type = MT_DEVICE | 171 | .type = MT_DEVICE |
172 | }, | ||
173 | #if 0 | ||
174 | { | ||
175 | .virtual = VERSATILE_PCI_VIRT_MEM_BASE0, | ||
176 | .pfn = __phys_to_pfn(VERSATILE_PCI_MEM_BASE0), | ||
177 | .length = SZ_16M, | ||
178 | .type = MT_DEVICE | ||
179 | }, { | 172 | }, { |
180 | .virtual = VERSATILE_PCI_VIRT_MEM_BASE1, | 173 | .virtual = (unsigned long)VERSATILE_PCI_VIRT_MEM_BASE0, |
181 | .pfn = __phys_to_pfn(VERSATILE_PCI_MEM_BASE1), | 174 | .pfn = __phys_to_pfn(VERSATILE_PCI_MEM_BASE0), |
182 | .length = SZ_16M, | 175 | .length = IO_SPACE_LIMIT, |
183 | .type = MT_DEVICE | ||
184 | }, { | ||
185 | .virtual = VERSATILE_PCI_VIRT_MEM_BASE2, | ||
186 | .pfn = __phys_to_pfn(VERSATILE_PCI_MEM_BASE2), | ||
187 | .length = SZ_16M, | ||
188 | .type = MT_DEVICE | 176 | .type = MT_DEVICE |
189 | }, | 177 | }, |
190 | #endif | 178 | #endif |
191 | #endif | ||
192 | }; | 179 | }; |
193 | 180 | ||
194 | void __init versatile_map_io(void) | 181 | void __init versatile_map_io(void) |
diff --git a/arch/arm/mach-versatile/include/mach/hardware.h b/arch/arm/mach-versatile/include/mach/hardware.h index 4d4973dd8fba..408e58da46c6 100644 --- a/arch/arm/mach-versatile/include/mach/hardware.h +++ b/arch/arm/mach-versatile/include/mach/hardware.h | |||
@@ -29,8 +29,9 @@ | |||
29 | */ | 29 | */ |
30 | #define VERSATILE_PCI_VIRT_BASE (void __iomem *)0xe8000000ul | 30 | #define VERSATILE_PCI_VIRT_BASE (void __iomem *)0xe8000000ul |
31 | #define VERSATILE_PCI_CFG_VIRT_BASE (void __iomem *)0xe9000000ul | 31 | #define VERSATILE_PCI_CFG_VIRT_BASE (void __iomem *)0xe9000000ul |
32 | #define VERSATILE_PCI_VIRT_MEM_BASE0 (void __iomem *)PCIO_BASE | ||
32 | 33 | ||
33 | /* macro to get at IO space when running virtually */ | 34 | /* macro to get at MMIO space when running virtually */ |
34 | #define IO_ADDRESS(x) (((x) & 0x0fffffff) + (((x) >> 4) & 0x0f000000) + 0xf0000000) | 35 | #define IO_ADDRESS(x) (((x) & 0x0fffffff) + (((x) >> 4) & 0x0f000000) + 0xf0000000) |
35 | 36 | ||
36 | #define __io_address(n) ((void __iomem __force *)IO_ADDRESS(n)) | 37 | #define __io_address(n) ((void __iomem __force *)IO_ADDRESS(n)) |
diff --git a/arch/arm/mach-versatile/include/mach/io.h b/arch/arm/mach-versatile/include/mach/io.h new file mode 100644 index 000000000000..0406513be7d8 --- /dev/null +++ b/arch/arm/mach-versatile/include/mach/io.h | |||
@@ -0,0 +1,27 @@ | |||
1 | /* | ||
2 | * arch/arm/mach-versatile/include/mach/io.h | ||
3 | * | ||
4 | * Copyright (C) 2003 ARM Limited | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License as published by | ||
8 | * the Free Software Foundation; either version 2 of the License, or | ||
9 | * (at your option) any later version. | ||
10 | * | ||
11 | * This program is distributed in the hope that it will be useful, | ||
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
14 | * GNU General Public License for more details. | ||
15 | * | ||
16 | * You should have received a copy of the GNU General Public License | ||
17 | * along with this program; if not, write to the Free Software | ||
18 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
19 | */ | ||
20 | #ifndef __ASM_ARM_ARCH_IO_H | ||
21 | #define __ASM_ARM_ARCH_IO_H | ||
22 | |||
23 | #define PCIO_BASE 0xeb000000ul | ||
24 | |||
25 | #define __io(a) ((a) + PCIO_BASE) | ||
26 | |||
27 | #endif | ||
diff --git a/arch/arm/mach-versatile/pci.c b/arch/arm/mach-versatile/pci.c index 15c6a00000ec..bec933b04ef0 100644 --- a/arch/arm/mach-versatile/pci.c +++ b/arch/arm/mach-versatile/pci.c | |||
@@ -169,11 +169,18 @@ static struct pci_ops pci_versatile_ops = { | |||
169 | .write = versatile_write_config, | 169 | .write = versatile_write_config, |
170 | }; | 170 | }; |
171 | 171 | ||
172 | static struct resource io_port = { | ||
173 | .name = "PCI", | ||
174 | .start = 0, | ||
175 | .end = IO_SPACE_LIMIT, | ||
176 | .flags = IORESOURCE_IO, | ||
177 | }; | ||
178 | |||
172 | static struct resource io_mem = { | 179 | static struct resource io_mem = { |
173 | .name = "PCI I/O space", | 180 | .name = "PCI I/O space", |
174 | .start = VERSATILE_PCI_MEM_BASE0, | 181 | .start = VERSATILE_PCI_MEM_BASE0, |
175 | .end = VERSATILE_PCI_MEM_BASE0+VERSATILE_PCI_MEM_BASE0_SIZE-1, | 182 | .end = VERSATILE_PCI_MEM_BASE0+VERSATILE_PCI_MEM_BASE0_SIZE-1, |
176 | .flags = IORESOURCE_IO, | 183 | .flags = IORESOURCE_MEM, |
177 | }; | 184 | }; |
178 | 185 | ||
179 | static struct resource non_mem = { | 186 | static struct resource non_mem = { |
@@ -200,6 +207,12 @@ static int __init pci_versatile_setup_resources(struct pci_sys_data *sys) | |||
200 | "memory region (%d)\n", ret); | 207 | "memory region (%d)\n", ret); |
201 | goto out; | 208 | goto out; |
202 | } | 209 | } |
210 | ret = request_resource(&ioport_resource, &io_port); | ||
211 | if (ret) { | ||
212 | printk(KERN_ERR "PCI: unable to allocate I/O " | ||
213 | "port region (%d)\n", ret); | ||
214 | goto out; | ||
215 | } | ||
203 | ret = request_resource(&iomem_resource, &non_mem); | 216 | ret = request_resource(&iomem_resource, &non_mem); |
204 | if (ret) { | 217 | if (ret) { |
205 | printk(KERN_ERR "PCI: unable to allocate non-prefetchable " | 218 | printk(KERN_ERR "PCI: unable to allocate non-prefetchable " |
@@ -218,7 +231,7 @@ static int __init pci_versatile_setup_resources(struct pci_sys_data *sys) | |||
218 | * the mem resource for this bus | 231 | * the mem resource for this bus |
219 | * the prefetch mem resource for this bus | 232 | * the prefetch mem resource for this bus |
220 | */ | 233 | */ |
221 | pci_add_resource_offset(&sys->resources, &io_mem, sys->io_offset); | 234 | pci_add_resource_offset(&sys->resources, &io_port, sys->io_offset); |
222 | pci_add_resource_offset(&sys->resources, &non_mem, sys->mem_offset); | 235 | pci_add_resource_offset(&sys->resources, &non_mem, sys->mem_offset); |
223 | pci_add_resource_offset(&sys->resources, &pre_mem, sys->mem_offset); | 236 | pci_add_resource_offset(&sys->resources, &pre_mem, sys->mem_offset); |
224 | 237 | ||
@@ -249,6 +262,7 @@ int __init pci_versatile_setup(int nr, struct pci_sys_data *sys) | |||
249 | 262 | ||
250 | if (nr == 0) { | 263 | if (nr == 0) { |
251 | sys->mem_offset = 0; | 264 | sys->mem_offset = 0; |
265 | sys->io_offset = 0; | ||
252 | ret = pci_versatile_setup_resources(sys); | 266 | ret = pci_versatile_setup_resources(sys); |
253 | if (ret < 0) { | 267 | if (ret < 0) { |
254 | printk("pci_versatile_setup: resources... oops?\n"); | 268 | printk("pci_versatile_setup: resources... oops?\n"); |
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index ea6b43154090..d766e4256b74 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c | |||
@@ -228,7 +228,7 @@ static pte_t **consistent_pte; | |||
228 | 228 | ||
229 | #define DEFAULT_CONSISTENT_DMA_SIZE SZ_2M | 229 | #define DEFAULT_CONSISTENT_DMA_SIZE SZ_2M |
230 | 230 | ||
231 | unsigned long consistent_base = CONSISTENT_END - DEFAULT_CONSISTENT_DMA_SIZE; | 231 | static unsigned long consistent_base = CONSISTENT_END - DEFAULT_CONSISTENT_DMA_SIZE; |
232 | 232 | ||
233 | void __init init_consistent_dma_size(unsigned long size) | 233 | void __init init_consistent_dma_size(unsigned long size) |
234 | { | 234 | { |
@@ -268,10 +268,8 @@ static int __init consistent_init(void) | |||
268 | unsigned long base = consistent_base; | 268 | unsigned long base = consistent_base; |
269 | unsigned long num_ptes = (CONSISTENT_END - base) >> PMD_SHIFT; | 269 | unsigned long num_ptes = (CONSISTENT_END - base) >> PMD_SHIFT; |
270 | 270 | ||
271 | #ifndef CONFIG_ARM_DMA_USE_IOMMU | 271 | if (IS_ENABLED(CONFIG_CMA) && !IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU)) |
272 | if (cpu_architecture() >= CPU_ARCH_ARMv6) | ||
273 | return 0; | 272 | return 0; |
274 | #endif | ||
275 | 273 | ||
276 | consistent_pte = kmalloc(num_ptes * sizeof(pte_t), GFP_KERNEL); | 274 | consistent_pte = kmalloc(num_ptes * sizeof(pte_t), GFP_KERNEL); |
277 | if (!consistent_pte) { | 275 | if (!consistent_pte) { |
@@ -323,7 +321,7 @@ static struct arm_vmregion_head coherent_head = { | |||
323 | .vm_list = LIST_HEAD_INIT(coherent_head.vm_list), | 321 | .vm_list = LIST_HEAD_INIT(coherent_head.vm_list), |
324 | }; | 322 | }; |
325 | 323 | ||
326 | size_t coherent_pool_size = DEFAULT_CONSISTENT_DMA_SIZE / 8; | 324 | static size_t coherent_pool_size = DEFAULT_CONSISTENT_DMA_SIZE / 8; |
327 | 325 | ||
328 | static int __init early_coherent_pool(char *p) | 326 | static int __init early_coherent_pool(char *p) |
329 | { | 327 | { |
@@ -342,7 +340,7 @@ static int __init coherent_init(void) | |||
342 | struct page *page; | 340 | struct page *page; |
343 | void *ptr; | 341 | void *ptr; |
344 | 342 | ||
345 | if (cpu_architecture() < CPU_ARCH_ARMv6) | 343 | if (!IS_ENABLED(CONFIG_CMA)) |
346 | return 0; | 344 | return 0; |
347 | 345 | ||
348 | ptr = __alloc_from_contiguous(NULL, size, prot, &page); | 346 | ptr = __alloc_from_contiguous(NULL, size, prot, &page); |
@@ -704,7 +702,7 @@ static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, | |||
704 | 702 | ||
705 | if (arch_is_coherent() || nommu()) | 703 | if (arch_is_coherent() || nommu()) |
706 | addr = __alloc_simple_buffer(dev, size, gfp, &page); | 704 | addr = __alloc_simple_buffer(dev, size, gfp, &page); |
707 | else if (cpu_architecture() < CPU_ARCH_ARMv6) | 705 | else if (!IS_ENABLED(CONFIG_CMA)) |
708 | addr = __alloc_remap_buffer(dev, size, gfp, prot, &page, caller); | 706 | addr = __alloc_remap_buffer(dev, size, gfp, prot, &page, caller); |
709 | else if (gfp & GFP_ATOMIC) | 707 | else if (gfp & GFP_ATOMIC) |
710 | addr = __alloc_from_pool(dev, size, &page, caller); | 708 | addr = __alloc_from_pool(dev, size, &page, caller); |
@@ -773,7 +771,7 @@ void arm_dma_free(struct device *dev, size_t size, void *cpu_addr, | |||
773 | 771 | ||
774 | if (arch_is_coherent() || nommu()) { | 772 | if (arch_is_coherent() || nommu()) { |
775 | __dma_free_buffer(page, size); | 773 | __dma_free_buffer(page, size); |
776 | } else if (cpu_architecture() < CPU_ARCH_ARMv6) { | 774 | } else if (!IS_ENABLED(CONFIG_CMA)) { |
777 | __dma_free_remap(cpu_addr, size); | 775 | __dma_free_remap(cpu_addr, size); |
778 | __dma_free_buffer(page, size); | 776 | __dma_free_buffer(page, size); |
779 | } else { | 777 | } else { |
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c index c21d06c7dd7e..f54d59219764 100644 --- a/arch/arm/mm/init.c +++ b/arch/arm/mm/init.c | |||
@@ -212,7 +212,7 @@ EXPORT_SYMBOL(arm_dma_zone_size); | |||
212 | * allocations. This must be the smallest DMA mask in the system, | 212 | * allocations. This must be the smallest DMA mask in the system, |
213 | * so a successful GFP_DMA allocation will always satisfy this. | 213 | * so a successful GFP_DMA allocation will always satisfy this. |
214 | */ | 214 | */ |
215 | u32 arm_dma_limit; | 215 | phys_addr_t arm_dma_limit; |
216 | 216 | ||
217 | static void __init arm_adjust_dma_zone(unsigned long *size, unsigned long *hole, | 217 | static void __init arm_adjust_dma_zone(unsigned long *size, unsigned long *hole, |
218 | unsigned long dma_size) | 218 | unsigned long dma_size) |
diff --git a/arch/arm/mm/mm.h b/arch/arm/mm/mm.h index 93dc0c17cdcb..c471436c7952 100644 --- a/arch/arm/mm/mm.h +++ b/arch/arm/mm/mm.h | |||
@@ -62,7 +62,7 @@ extern void __flush_dcache_page(struct address_space *mapping, struct page *page | |||
62 | #endif | 62 | #endif |
63 | 63 | ||
64 | #ifdef CONFIG_ZONE_DMA | 64 | #ifdef CONFIG_ZONE_DMA |
65 | extern u32 arm_dma_limit; | 65 | extern phys_addr_t arm_dma_limit; |
66 | #else | 66 | #else |
67 | #define arm_dma_limit ((u32)~0) | 67 | #define arm_dma_limit ((u32)~0) |
68 | #endif | 68 | #endif |
diff --git a/arch/arm/net/bpf_jit_32.c b/arch/arm/net/bpf_jit_32.c index 62135849f48b..c641fb685017 100644 --- a/arch/arm/net/bpf_jit_32.c +++ b/arch/arm/net/bpf_jit_32.c | |||
@@ -762,6 +762,11 @@ b_epilogue: | |||
762 | update_on_xread(ctx); | 762 | update_on_xread(ctx); |
763 | emit(ARM_MOV_R(r_A, r_X), ctx); | 763 | emit(ARM_MOV_R(r_A, r_X), ctx); |
764 | break; | 764 | break; |
765 | case BPF_S_ANC_ALU_XOR_X: | ||
766 | /* A ^= X */ | ||
767 | update_on_xread(ctx); | ||
768 | emit(ARM_EOR_R(r_A, r_A, r_X), ctx); | ||
769 | break; | ||
765 | case BPF_S_ANC_PROTOCOL: | 770 | case BPF_S_ANC_PROTOCOL: |
766 | /* A = ntohs(skb->protocol) */ | 771 | /* A = ntohs(skb->protocol) */ |
767 | ctx->seen |= SEEN_SKB; | 772 | ctx->seen |= SEEN_SKB; |
diff --git a/arch/arm/net/bpf_jit_32.h b/arch/arm/net/bpf_jit_32.h index 99ae5e3f46d2..7fa2f7d3cb90 100644 --- a/arch/arm/net/bpf_jit_32.h +++ b/arch/arm/net/bpf_jit_32.h | |||
@@ -68,6 +68,8 @@ | |||
68 | #define ARM_INST_CMP_R 0x01500000 | 68 | #define ARM_INST_CMP_R 0x01500000 |
69 | #define ARM_INST_CMP_I 0x03500000 | 69 | #define ARM_INST_CMP_I 0x03500000 |
70 | 70 | ||
71 | #define ARM_INST_EOR_R 0x00200000 | ||
72 | |||
71 | #define ARM_INST_LDRB_I 0x05d00000 | 73 | #define ARM_INST_LDRB_I 0x05d00000 |
72 | #define ARM_INST_LDRB_R 0x07d00000 | 74 | #define ARM_INST_LDRB_R 0x07d00000 |
73 | #define ARM_INST_LDRH_I 0x01d000b0 | 75 | #define ARM_INST_LDRH_I 0x01d000b0 |
@@ -132,6 +134,8 @@ | |||
132 | #define ARM_CMP_R(rn, rm) _AL3_R(ARM_INST_CMP, 0, rn, rm) | 134 | #define ARM_CMP_R(rn, rm) _AL3_R(ARM_INST_CMP, 0, rn, rm) |
133 | #define ARM_CMP_I(rn, imm) _AL3_I(ARM_INST_CMP, 0, rn, imm) | 135 | #define ARM_CMP_I(rn, imm) _AL3_I(ARM_INST_CMP, 0, rn, imm) |
134 | 136 | ||
137 | #define ARM_EOR_R(rd, rn, rm) _AL3_R(ARM_INST_EOR, rd, rn, rm) | ||
138 | |||
135 | #define ARM_LDR_I(rt, rn, off) (ARM_INST_LDR_I | (rt) << 12 | (rn) << 16 \ | 139 | #define ARM_LDR_I(rt, rn, off) (ARM_INST_LDR_I | (rt) << 12 | (rn) << 16 \ |
136 | | (off)) | 140 | | (off)) |
137 | #define ARM_LDRB_I(rt, rn, off) (ARM_INST_LDRB_I | (rt) << 12 | (rn) << 16 \ | 141 | #define ARM_LDRB_I(rt, rn, off) (ARM_INST_LDRB_I | (rt) << 12 | (rn) << 16 \ |
diff --git a/arch/arm/plat-mxc/epit.c b/arch/arm/plat-mxc/epit.c index 9129c9e7d532..88726f4dbbfa 100644 --- a/arch/arm/plat-mxc/epit.c +++ b/arch/arm/plat-mxc/epit.c | |||
@@ -50,6 +50,7 @@ | |||
50 | #include <linux/irq.h> | 50 | #include <linux/irq.h> |
51 | #include <linux/clockchips.h> | 51 | #include <linux/clockchips.h> |
52 | #include <linux/clk.h> | 52 | #include <linux/clk.h> |
53 | #include <linux/err.h> | ||
53 | 54 | ||
54 | #include <mach/hardware.h> | 55 | #include <mach/hardware.h> |
55 | #include <asm/mach/time.h> | 56 | #include <asm/mach/time.h> |
@@ -201,8 +202,16 @@ static int __init epit_clockevent_init(struct clk *timer_clk) | |||
201 | return 0; | 202 | return 0; |
202 | } | 203 | } |
203 | 204 | ||
204 | void __init epit_timer_init(struct clk *timer_clk, void __iomem *base, int irq) | 205 | void __init epit_timer_init(void __iomem *base, int irq) |
205 | { | 206 | { |
207 | struct clk *timer_clk; | ||
208 | |||
209 | timer_clk = clk_get_sys("imx-epit.0", NULL); | ||
210 | if (IS_ERR(timer_clk)) { | ||
211 | pr_err("i.MX epit: unable to get clk\n"); | ||
212 | return; | ||
213 | } | ||
214 | |||
206 | clk_prepare_enable(timer_clk); | 215 | clk_prepare_enable(timer_clk); |
207 | 216 | ||
208 | timer_base = base; | 217 | timer_base = base; |
diff --git a/arch/arm/plat-mxc/include/mach/common.h b/arch/arm/plat-mxc/include/mach/common.h index cf663d84e7c1..e429ca1b814a 100644 --- a/arch/arm/plat-mxc/include/mach/common.h +++ b/arch/arm/plat-mxc/include/mach/common.h | |||
@@ -54,8 +54,8 @@ extern void imx50_soc_init(void); | |||
54 | extern void imx51_soc_init(void); | 54 | extern void imx51_soc_init(void); |
55 | extern void imx53_soc_init(void); | 55 | extern void imx53_soc_init(void); |
56 | extern void imx51_init_late(void); | 56 | extern void imx51_init_late(void); |
57 | extern void epit_timer_init(struct clk *timer_clk, void __iomem *base, int irq); | 57 | extern void epit_timer_init(void __iomem *base, int irq); |
58 | extern void mxc_timer_init(struct clk *timer_clk, void __iomem *, int); | 58 | extern void mxc_timer_init(void __iomem *, int); |
59 | extern int mx1_clocks_init(unsigned long fref); | 59 | extern int mx1_clocks_init(unsigned long fref); |
60 | extern int mx21_clocks_init(unsigned long lref, unsigned long fref); | 60 | extern int mx21_clocks_init(unsigned long lref, unsigned long fref); |
61 | extern int mx25_clocks_init(void); | 61 | extern int mx25_clocks_init(void); |
diff --git a/arch/arm/plat-mxc/time.c b/arch/arm/plat-mxc/time.c index 99f958ca6cb8..00e8e659e667 100644 --- a/arch/arm/plat-mxc/time.c +++ b/arch/arm/plat-mxc/time.c | |||
@@ -58,6 +58,7 @@ | |||
58 | /* MX31, MX35, MX25, MX5 */ | 58 | /* MX31, MX35, MX25, MX5 */ |
59 | #define V2_TCTL_WAITEN (1 << 3) /* Wait enable mode */ | 59 | #define V2_TCTL_WAITEN (1 << 3) /* Wait enable mode */ |
60 | #define V2_TCTL_CLK_IPG (1 << 6) | 60 | #define V2_TCTL_CLK_IPG (1 << 6) |
61 | #define V2_TCTL_CLK_PER (2 << 6) | ||
61 | #define V2_TCTL_FRR (1 << 9) | 62 | #define V2_TCTL_FRR (1 << 9) |
62 | #define V2_IR 0x0c | 63 | #define V2_IR 0x0c |
63 | #define V2_TSTAT 0x08 | 64 | #define V2_TSTAT 0x08 |
@@ -280,23 +281,22 @@ static int __init mxc_clockevent_init(struct clk *timer_clk) | |||
280 | return 0; | 281 | return 0; |
281 | } | 282 | } |
282 | 283 | ||
283 | void __init mxc_timer_init(struct clk *timer_clk, void __iomem *base, int irq) | 284 | void __init mxc_timer_init(void __iomem *base, int irq) |
284 | { | 285 | { |
285 | uint32_t tctl_val; | 286 | uint32_t tctl_val; |
287 | struct clk *timer_clk; | ||
286 | struct clk *timer_ipg_clk; | 288 | struct clk *timer_ipg_clk; |
287 | 289 | ||
288 | if (!timer_clk) { | 290 | timer_clk = clk_get_sys("imx-gpt.0", "per"); |
289 | timer_clk = clk_get_sys("imx-gpt.0", "per"); | 291 | if (IS_ERR(timer_clk)) { |
290 | if (IS_ERR(timer_clk)) { | 292 | pr_err("i.MX timer: unable to get clk\n"); |
291 | pr_err("i.MX timer: unable to get clk\n"); | 293 | return; |
292 | return; | ||
293 | } | ||
294 | |||
295 | timer_ipg_clk = clk_get_sys("imx-gpt.0", "ipg"); | ||
296 | if (!IS_ERR(timer_ipg_clk)) | ||
297 | clk_prepare_enable(timer_ipg_clk); | ||
298 | } | 294 | } |
299 | 295 | ||
296 | timer_ipg_clk = clk_get_sys("imx-gpt.0", "ipg"); | ||
297 | if (!IS_ERR(timer_ipg_clk)) | ||
298 | clk_prepare_enable(timer_ipg_clk); | ||
299 | |||
300 | clk_prepare_enable(timer_clk); | 300 | clk_prepare_enable(timer_clk); |
301 | 301 | ||
302 | timer_base = base; | 302 | timer_base = base; |
@@ -309,7 +309,7 @@ void __init mxc_timer_init(struct clk *timer_clk, void __iomem *base, int irq) | |||
309 | __raw_writel(0, timer_base + MXC_TPRER); /* see datasheet note */ | 309 | __raw_writel(0, timer_base + MXC_TPRER); /* see datasheet note */ |
310 | 310 | ||
311 | if (timer_is_v2()) | 311 | if (timer_is_v2()) |
312 | tctl_val = V2_TCTL_CLK_IPG | V2_TCTL_FRR | V2_TCTL_WAITEN | MXC_TCTL_TEN; | 312 | tctl_val = V2_TCTL_CLK_PER | V2_TCTL_FRR | V2_TCTL_WAITEN | MXC_TCTL_TEN; |
313 | else | 313 | else |
314 | tctl_val = MX1_2_TCTL_FRR | MX1_2_TCTL_CLK_PCLK1 | MXC_TCTL_TEN; | 314 | tctl_val = MX1_2_TCTL_FRR | MX1_2_TCTL_CLK_PCLK1 | MXC_TCTL_TEN; |
315 | 315 | ||
diff --git a/arch/arm/plat-omap/clock.c b/arch/arm/plat-omap/clock.c index 62ec5c452792..706b7e29397f 100644 --- a/arch/arm/plat-omap/clock.c +++ b/arch/arm/plat-omap/clock.c | |||
@@ -461,6 +461,7 @@ static int clk_dbg_show_summary(struct seq_file *s, void *unused) | |||
461 | struct clk *c; | 461 | struct clk *c; |
462 | struct clk *pa; | 462 | struct clk *pa; |
463 | 463 | ||
464 | mutex_lock(&clocks_mutex); | ||
464 | seq_printf(s, "%-30s %-30s %-10s %s\n", | 465 | seq_printf(s, "%-30s %-30s %-10s %s\n", |
465 | "clock-name", "parent-name", "rate", "use-count"); | 466 | "clock-name", "parent-name", "rate", "use-count"); |
466 | 467 | ||
@@ -469,6 +470,7 @@ static int clk_dbg_show_summary(struct seq_file *s, void *unused) | |||
469 | seq_printf(s, "%-30s %-30s %-10lu %d\n", | 470 | seq_printf(s, "%-30s %-30s %-10lu %d\n", |
470 | c->name, pa ? pa->name : "none", c->rate, c->usecount); | 471 | c->name, pa ? pa->name : "none", c->rate, c->usecount); |
471 | } | 472 | } |
473 | mutex_unlock(&clocks_mutex); | ||
472 | 474 | ||
473 | return 0; | 475 | return 0; |
474 | } | 476 | } |
diff --git a/arch/arm/plat-omap/include/plat/cpu.h b/arch/arm/plat-omap/include/plat/cpu.h index 297245dba66e..de6c0a08f461 100644 --- a/arch/arm/plat-omap/include/plat/cpu.h +++ b/arch/arm/plat-omap/include/plat/cpu.h | |||
@@ -252,8 +252,6 @@ IS_AM_SUBCLASS(335x, 0x335) | |||
252 | * cpu_is_omap2423(): True for OMAP2423 | 252 | * cpu_is_omap2423(): True for OMAP2423 |
253 | * cpu_is_omap2430(): True for OMAP2430 | 253 | * cpu_is_omap2430(): True for OMAP2430 |
254 | * cpu_is_omap3430(): True for OMAP3430 | 254 | * cpu_is_omap3430(): True for OMAP3430 |
255 | * cpu_is_omap3505(): True for OMAP3505 | ||
256 | * cpu_is_omap3517(): True for OMAP3517 | ||
257 | */ | 255 | */ |
258 | #define GET_OMAP_TYPE ((omap_rev() >> 16) & 0xffff) | 256 | #define GET_OMAP_TYPE ((omap_rev() >> 16) & 0xffff) |
259 | 257 | ||
@@ -277,8 +275,6 @@ IS_OMAP_TYPE(2422, 0x2422) | |||
277 | IS_OMAP_TYPE(2423, 0x2423) | 275 | IS_OMAP_TYPE(2423, 0x2423) |
278 | IS_OMAP_TYPE(2430, 0x2430) | 276 | IS_OMAP_TYPE(2430, 0x2430) |
279 | IS_OMAP_TYPE(3430, 0x3430) | 277 | IS_OMAP_TYPE(3430, 0x3430) |
280 | IS_OMAP_TYPE(3505, 0x3517) | ||
281 | IS_OMAP_TYPE(3517, 0x3517) | ||
282 | 278 | ||
283 | #define cpu_is_omap310() 0 | 279 | #define cpu_is_omap310() 0 |
284 | #define cpu_is_omap730() 0 | 280 | #define cpu_is_omap730() 0 |
@@ -293,12 +289,6 @@ IS_OMAP_TYPE(3517, 0x3517) | |||
293 | #define cpu_is_omap2422() 0 | 289 | #define cpu_is_omap2422() 0 |
294 | #define cpu_is_omap2423() 0 | 290 | #define cpu_is_omap2423() 0 |
295 | #define cpu_is_omap2430() 0 | 291 | #define cpu_is_omap2430() 0 |
296 | #define cpu_is_omap3503() 0 | ||
297 | #define cpu_is_omap3515() 0 | ||
298 | #define cpu_is_omap3525() 0 | ||
299 | #define cpu_is_omap3530() 0 | ||
300 | #define cpu_is_omap3505() 0 | ||
301 | #define cpu_is_omap3517() 0 | ||
302 | #define cpu_is_omap3430() 0 | 292 | #define cpu_is_omap3430() 0 |
303 | #define cpu_is_omap3630() 0 | 293 | #define cpu_is_omap3630() 0 |
304 | 294 | ||
@@ -350,12 +340,6 @@ IS_OMAP_TYPE(3517, 0x3517) | |||
350 | 340 | ||
351 | #if defined(CONFIG_ARCH_OMAP3) | 341 | #if defined(CONFIG_ARCH_OMAP3) |
352 | # undef cpu_is_omap3430 | 342 | # undef cpu_is_omap3430 |
353 | # undef cpu_is_omap3503 | ||
354 | # undef cpu_is_omap3515 | ||
355 | # undef cpu_is_omap3525 | ||
356 | # undef cpu_is_omap3530 | ||
357 | # undef cpu_is_omap3505 | ||
358 | # undef cpu_is_omap3517 | ||
359 | # undef cpu_is_ti81xx | 343 | # undef cpu_is_ti81xx |
360 | # undef cpu_is_ti816x | 344 | # undef cpu_is_ti816x |
361 | # undef cpu_is_ti814x | 345 | # undef cpu_is_ti814x |
@@ -363,19 +347,6 @@ IS_OMAP_TYPE(3517, 0x3517) | |||
363 | # undef cpu_is_am33xx | 347 | # undef cpu_is_am33xx |
364 | # undef cpu_is_am335x | 348 | # undef cpu_is_am335x |
365 | # define cpu_is_omap3430() is_omap3430() | 349 | # define cpu_is_omap3430() is_omap3430() |
366 | # define cpu_is_omap3503() (cpu_is_omap3430() && \ | ||
367 | (!omap3_has_iva()) && \ | ||
368 | (!omap3_has_sgx())) | ||
369 | # define cpu_is_omap3515() (cpu_is_omap3430() && \ | ||
370 | (!omap3_has_iva()) && \ | ||
371 | (omap3_has_sgx())) | ||
372 | # define cpu_is_omap3525() (cpu_is_omap3430() && \ | ||
373 | (!omap3_has_sgx()) && \ | ||
374 | (omap3_has_iva())) | ||
375 | # define cpu_is_omap3530() (cpu_is_omap3430()) | ||
376 | # define cpu_is_omap3517() is_omap3517() | ||
377 | # define cpu_is_omap3505() (cpu_is_omap3517() && \ | ||
378 | !omap3_has_sgx()) | ||
379 | # undef cpu_is_omap3630 | 350 | # undef cpu_is_omap3630 |
380 | # define cpu_is_omap3630() is_omap363x() | 351 | # define cpu_is_omap3630() is_omap363x() |
381 | # define cpu_is_ti81xx() is_ti81xx() | 352 | # define cpu_is_ti81xx() is_ti81xx() |
@@ -424,10 +395,6 @@ IS_OMAP_TYPE(3517, 0x3517) | |||
424 | #define OMAP3630_REV_ES1_1 (OMAP363X_CLASS | (0x1 << 8)) | 395 | #define OMAP3630_REV_ES1_1 (OMAP363X_CLASS | (0x1 << 8)) |
425 | #define OMAP3630_REV_ES1_2 (OMAP363X_CLASS | (0x2 << 8)) | 396 | #define OMAP3630_REV_ES1_2 (OMAP363X_CLASS | (0x2 << 8)) |
426 | 397 | ||
427 | #define OMAP3517_CLASS 0x35170034 | ||
428 | #define OMAP3517_REV_ES1_0 OMAP3517_CLASS | ||
429 | #define OMAP3517_REV_ES1_1 (OMAP3517_CLASS | (0x1 << 8)) | ||
430 | |||
431 | #define TI816X_CLASS 0x81600034 | 398 | #define TI816X_CLASS 0x81600034 |
432 | #define TI8168_REV_ES1_0 TI816X_CLASS | 399 | #define TI8168_REV_ES1_0 TI816X_CLASS |
433 | #define TI8168_REV_ES1_1 (TI816X_CLASS | (0x1 << 8)) | 400 | #define TI8168_REV_ES1_1 (TI816X_CLASS | (0x1 << 8)) |
diff --git a/arch/arm/plat-omap/include/plat/mmc.h b/arch/arm/plat-omap/include/plat/mmc.h index a7754a886d42..5493bd95da5e 100644 --- a/arch/arm/plat-omap/include/plat/mmc.h +++ b/arch/arm/plat-omap/include/plat/mmc.h | |||
@@ -172,8 +172,7 @@ struct omap_mmc_platform_data { | |||
172 | extern void omap_mmc_notify_cover_event(struct device *dev, int slot, | 172 | extern void omap_mmc_notify_cover_event(struct device *dev, int slot, |
173 | int is_closed); | 173 | int is_closed); |
174 | 174 | ||
175 | #if defined(CONFIG_MMC_OMAP) || defined(CONFIG_MMC_OMAP_MODULE) || \ | 175 | #if defined(CONFIG_MMC_OMAP) || defined(CONFIG_MMC_OMAP_MODULE) |
176 | defined(CONFIG_MMC_OMAP_HS) || defined(CONFIG_MMC_OMAP_HS_MODULE) | ||
177 | void omap1_init_mmc(struct omap_mmc_platform_data **mmc_data, | 176 | void omap1_init_mmc(struct omap_mmc_platform_data **mmc_data, |
178 | int nr_controllers); | 177 | int nr_controllers); |
179 | void omap242x_init_mmc(struct omap_mmc_platform_data **mmc_data); | 178 | void omap242x_init_mmc(struct omap_mmc_platform_data **mmc_data); |
@@ -185,7 +184,6 @@ static inline void omap1_init_mmc(struct omap_mmc_platform_data **mmc_data, | |||
185 | static inline void omap242x_init_mmc(struct omap_mmc_platform_data **mmc_data) | 184 | static inline void omap242x_init_mmc(struct omap_mmc_platform_data **mmc_data) |
186 | { | 185 | { |
187 | } | 186 | } |
188 | |||
189 | #endif | 187 | #endif |
190 | 188 | ||
191 | extern int omap_msdi_reset(struct omap_hwmod *oh); | 189 | extern int omap_msdi_reset(struct omap_hwmod *oh); |
diff --git a/arch/arm/plat-orion/common.c b/arch/arm/plat-orion/common.c index 61fd837624a8..c1793786aea9 100644 --- a/arch/arm/plat-orion/common.c +++ b/arch/arm/plat-orion/common.c | |||
@@ -582,7 +582,7 @@ void __init orion_spi_1_init(unsigned long mapbase) | |||
582 | * Watchdog | 582 | * Watchdog |
583 | ****************************************************************************/ | 583 | ****************************************************************************/ |
584 | static struct resource orion_wdt_resource = | 584 | static struct resource orion_wdt_resource = |
585 | DEFINE_RES_MEM(TIMER_VIRT_BASE, 0x28); | 585 | DEFINE_RES_MEM(TIMER_PHYS_BASE, 0x28); |
586 | 586 | ||
587 | static struct platform_device orion_wdt_device = { | 587 | static struct platform_device orion_wdt_device = { |
588 | .name = "orion_wdt", | 588 | .name = "orion_wdt", |
diff --git a/arch/arm/plat-pxa/ssp.c b/arch/arm/plat-pxa/ssp.c index 58b79809d20c..584c9bf8ed2d 100644 --- a/arch/arm/plat-pxa/ssp.c +++ b/arch/arm/plat-pxa/ssp.c | |||
@@ -193,6 +193,7 @@ static const struct platform_device_id ssp_id_table[] = { | |||
193 | { "pxa25x-nssp", PXA25x_NSSP }, | 193 | { "pxa25x-nssp", PXA25x_NSSP }, |
194 | { "pxa27x-ssp", PXA27x_SSP }, | 194 | { "pxa27x-ssp", PXA27x_SSP }, |
195 | { "pxa168-ssp", PXA168_SSP }, | 195 | { "pxa168-ssp", PXA168_SSP }, |
196 | { "pxa910-ssp", PXA910_SSP }, | ||
196 | { }, | 197 | { }, |
197 | }; | 198 | }; |
198 | 199 | ||
diff --git a/arch/arm/plat-spear/include/plat/debug-macro.S b/arch/arm/plat-spear/include/plat/debug-macro.S index ab3de721c5db..75b05ad0fbad 100644 --- a/arch/arm/plat-spear/include/plat/debug-macro.S +++ b/arch/arm/plat-spear/include/plat/debug-macro.S | |||
@@ -4,7 +4,7 @@ | |||
4 | * Debugging macro include header for spear platform | 4 | * Debugging macro include header for spear platform |
5 | * | 5 | * |
6 | * Copyright (C) 2009 ST Microelectronics | 6 | * Copyright (C) 2009 ST Microelectronics |
7 | * Viresh Kumar<viresh.kumar@st.com> | 7 | * Viresh Kumar <viresh.linux@gmail.com> |
8 | * | 8 | * |
9 | * This file is licensed under the terms of the GNU General Public | 9 | * This file is licensed under the terms of the GNU General Public |
10 | * License version 2. This program is licensed "as is" without any | 10 | * License version 2. This program is licensed "as is" without any |
diff --git a/arch/arm/plat-spear/include/plat/pl080.h b/arch/arm/plat-spear/include/plat/pl080.h index e14a3e4932f9..2bc6b54460a8 100644 --- a/arch/arm/plat-spear/include/plat/pl080.h +++ b/arch/arm/plat-spear/include/plat/pl080.h | |||
@@ -4,7 +4,7 @@ | |||
4 | * DMAC pl080 definitions for SPEAr platform | 4 | * DMAC pl080 definitions for SPEAr platform |
5 | * | 5 | * |
6 | * Copyright (C) 2012 ST Microelectronics | 6 | * Copyright (C) 2012 ST Microelectronics |
7 | * Viresh Kumar <viresh.kumar@st.com> | 7 | * Viresh Kumar <viresh.linux@gmail.com> |
8 | * | 8 | * |
9 | * This file is licensed under the terms of the GNU General Public | 9 | * This file is licensed under the terms of the GNU General Public |
10 | * License version 2. This program is licensed "as is" without any | 10 | * License version 2. This program is licensed "as is" without any |
diff --git a/arch/arm/plat-spear/include/plat/shirq.h b/arch/arm/plat-spear/include/plat/shirq.h index 03ed8b585dcf..88a7fbd24793 100644 --- a/arch/arm/plat-spear/include/plat/shirq.h +++ b/arch/arm/plat-spear/include/plat/shirq.h | |||
@@ -4,7 +4,7 @@ | |||
4 | * SPEAr platform shared irq layer header file | 4 | * SPEAr platform shared irq layer header file |
5 | * | 5 | * |
6 | * Copyright (C) 2009 ST Microelectronics | 6 | * Copyright (C) 2009 ST Microelectronics |
7 | * Viresh Kumar<viresh.kumar@st.com> | 7 | * Viresh Kumar <viresh.linux@gmail.com> |
8 | * | 8 | * |
9 | * This file is licensed under the terms of the GNU General Public | 9 | * This file is licensed under the terms of the GNU General Public |
10 | * License version 2. This program is licensed "as is" without any | 10 | * License version 2. This program is licensed "as is" without any |
diff --git a/arch/arm/plat-spear/include/plat/timex.h b/arch/arm/plat-spear/include/plat/timex.h index 914d09dd50fd..ef95e5b780bd 100644 --- a/arch/arm/plat-spear/include/plat/timex.h +++ b/arch/arm/plat-spear/include/plat/timex.h | |||
@@ -4,7 +4,7 @@ | |||
4 | * SPEAr platform specific timex definitions | 4 | * SPEAr platform specific timex definitions |
5 | * | 5 | * |
6 | * Copyright (C) 2009 ST Microelectronics | 6 | * Copyright (C) 2009 ST Microelectronics |
7 | * Viresh Kumar<viresh.kumar@st.com> | 7 | * Viresh Kumar <viresh.linux@gmail.com> |
8 | * | 8 | * |
9 | * This file is licensed under the terms of the GNU General Public | 9 | * This file is licensed under the terms of the GNU General Public |
10 | * License version 2. This program is licensed "as is" without any | 10 | * License version 2. This program is licensed "as is" without any |
diff --git a/arch/arm/plat-spear/include/plat/uncompress.h b/arch/arm/plat-spear/include/plat/uncompress.h index 6dd455bafdfd..2ce6cb17a98b 100644 --- a/arch/arm/plat-spear/include/plat/uncompress.h +++ b/arch/arm/plat-spear/include/plat/uncompress.h | |||
@@ -4,7 +4,7 @@ | |||
4 | * Serial port stubs for kernel decompress status messages | 4 | * Serial port stubs for kernel decompress status messages |
5 | * | 5 | * |
6 | * Copyright (C) 2009 ST Microelectronics | 6 | * Copyright (C) 2009 ST Microelectronics |
7 | * Viresh Kumar<viresh.kumar@st.com> | 7 | * Viresh Kumar <viresh.linux@gmail.com> |
8 | * | 8 | * |
9 | * This file is licensed under the terms of the GNU General Public | 9 | * This file is licensed under the terms of the GNU General Public |
10 | * License version 2. This program is licensed "as is" without any | 10 | * License version 2. This program is licensed "as is" without any |
diff --git a/arch/arm/plat-spear/pl080.c b/arch/arm/plat-spear/pl080.c index a56a067717c1..12cf27f935f9 100644 --- a/arch/arm/plat-spear/pl080.c +++ b/arch/arm/plat-spear/pl080.c | |||
@@ -4,7 +4,7 @@ | |||
4 | * DMAC pl080 definitions for SPEAr platform | 4 | * DMAC pl080 definitions for SPEAr platform |
5 | * | 5 | * |
6 | * Copyright (C) 2012 ST Microelectronics | 6 | * Copyright (C) 2012 ST Microelectronics |
7 | * Viresh Kumar <viresh.kumar@st.com> | 7 | * Viresh Kumar <viresh.linux@gmail.com> |
8 | * | 8 | * |
9 | * This file is licensed under the terms of the GNU General Public | 9 | * This file is licensed under the terms of the GNU General Public |
10 | * License version 2. This program is licensed "as is" without any | 10 | * License version 2. This program is licensed "as is" without any |
diff --git a/arch/arm/plat-spear/restart.c b/arch/arm/plat-spear/restart.c index ea0a61302b7e..4f990115b1bd 100644 --- a/arch/arm/plat-spear/restart.c +++ b/arch/arm/plat-spear/restart.c | |||
@@ -4,7 +4,7 @@ | |||
4 | * SPEAr platform specific restart functions | 4 | * SPEAr platform specific restart functions |
5 | * | 5 | * |
6 | * Copyright (C) 2009 ST Microelectronics | 6 | * Copyright (C) 2009 ST Microelectronics |
7 | * Viresh Kumar<viresh.kumar@st.com> | 7 | * Viresh Kumar <viresh.linux@gmail.com> |
8 | * | 8 | * |
9 | * This file is licensed under the terms of the GNU General Public | 9 | * This file is licensed under the terms of the GNU General Public |
10 | * License version 2. This program is licensed "as is" without any | 10 | * License version 2. This program is licensed "as is" without any |
diff --git a/arch/arm/plat-spear/shirq.c b/arch/arm/plat-spear/shirq.c index 961fb7261243..853e891e1184 100644 --- a/arch/arm/plat-spear/shirq.c +++ b/arch/arm/plat-spear/shirq.c | |||
@@ -4,7 +4,7 @@ | |||
4 | * SPEAr platform shared irq layer source file | 4 | * SPEAr platform shared irq layer source file |
5 | * | 5 | * |
6 | * Copyright (C) 2009 ST Microelectronics | 6 | * Copyright (C) 2009 ST Microelectronics |
7 | * Viresh Kumar<viresh.kumar@st.com> | 7 | * Viresh Kumar <viresh.linux@gmail.com> |
8 | * | 8 | * |
9 | * This file is licensed under the terms of the GNU General Public | 9 | * This file is licensed under the terms of the GNU General Public |
10 | * License version 2. This program is licensed "as is" without any | 10 | * License version 2. This program is licensed "as is" without any |
diff --git a/arch/avr32/kernel/signal.c b/arch/avr32/kernel/signal.c index c140f9b41dce..d552a854dacc 100644 --- a/arch/avr32/kernel/signal.c +++ b/arch/avr32/kernel/signal.c | |||
@@ -300,7 +300,7 @@ asmlinkage void do_notify_resume(struct pt_regs *regs, struct thread_info *ti) | |||
300 | if ((sysreg_read(SR) & MODE_MASK) == MODE_SUPERVISOR) | 300 | if ((sysreg_read(SR) & MODE_MASK) == MODE_SUPERVISOR) |
301 | syscall = 1; | 301 | syscall = 1; |
302 | 302 | ||
303 | if (ti->flags & _TIF_SIGPENDING)) | 303 | if (ti->flags & _TIF_SIGPENDING) |
304 | do_signal(regs, syscall); | 304 | do_signal(regs, syscall); |
305 | 305 | ||
306 | if (ti->flags & _TIF_NOTIFY_RESUME) { | 306 | if (ti->flags & _TIF_NOTIFY_RESUME) { |
diff --git a/arch/blackfin/kernel/process.c b/arch/blackfin/kernel/process.c index 2e3994b20169..62bcea7dcc6d 100644 --- a/arch/blackfin/kernel/process.c +++ b/arch/blackfin/kernel/process.c | |||
@@ -173,7 +173,7 @@ asmlinkage int bfin_clone(struct pt_regs *regs) | |||
173 | unsigned long newsp; | 173 | unsigned long newsp; |
174 | 174 | ||
175 | #ifdef __ARCH_SYNC_CORE_DCACHE | 175 | #ifdef __ARCH_SYNC_CORE_DCACHE |
176 | if (current->rt.nr_cpus_allowed == num_possible_cpus()) | 176 | if (current->nr_cpus_allowed == num_possible_cpus()) |
177 | set_cpus_allowed_ptr(current, cpumask_of(smp_processor_id())); | 177 | set_cpus_allowed_ptr(current, cpumask_of(smp_processor_id())); |
178 | #endif | 178 | #endif |
179 | 179 | ||
diff --git a/arch/h8300/kernel/setup.c b/arch/h8300/kernel/setup.c index 68d651081bd3..d0b1607f2711 100644 --- a/arch/h8300/kernel/setup.c +++ b/arch/h8300/kernel/setup.c | |||
@@ -35,6 +35,7 @@ | |||
35 | #include <asm/setup.h> | 35 | #include <asm/setup.h> |
36 | #include <asm/irq.h> | 36 | #include <asm/irq.h> |
37 | #include <asm/pgtable.h> | 37 | #include <asm/pgtable.h> |
38 | #include <asm/sections.h> | ||
38 | 39 | ||
39 | #if defined(__H8300H__) | 40 | #if defined(__H8300H__) |
40 | #define CPU "H8/300H" | 41 | #define CPU "H8/300H" |
@@ -54,7 +55,6 @@ unsigned long memory_end; | |||
54 | 55 | ||
55 | char __initdata command_line[COMMAND_LINE_SIZE]; | 56 | char __initdata command_line[COMMAND_LINE_SIZE]; |
56 | 57 | ||
57 | extern int _stext, _etext, _sdata, _edata, _sbss, _ebss, _end; | ||
58 | extern int _ramstart, _ramend; | 58 | extern int _ramstart, _ramend; |
59 | extern char _target_name[]; | 59 | extern char _target_name[]; |
60 | extern void h8300_gpio_init(void); | 60 | extern void h8300_gpio_init(void); |
@@ -119,9 +119,9 @@ void __init setup_arch(char **cmdline_p) | |||
119 | memory_end = CONFIG_BLKDEV_RESERVE_ADDRESS; | 119 | memory_end = CONFIG_BLKDEV_RESERVE_ADDRESS; |
120 | #endif | 120 | #endif |
121 | 121 | ||
122 | init_mm.start_code = (unsigned long) &_stext; | 122 | init_mm.start_code = (unsigned long) _stext; |
123 | init_mm.end_code = (unsigned long) &_etext; | 123 | init_mm.end_code = (unsigned long) _etext; |
124 | init_mm.end_data = (unsigned long) &_edata; | 124 | init_mm.end_data = (unsigned long) _edata; |
125 | init_mm.brk = (unsigned long) 0; | 125 | init_mm.brk = (unsigned long) 0; |
126 | 126 | ||
127 | #if (defined(CONFIG_H8300H_SIM) || defined(CONFIG_H8S_SIM)) && defined(CONFIG_GDB_MAGICPRINT) | 127 | #if (defined(CONFIG_H8300H_SIM) || defined(CONFIG_H8S_SIM)) && defined(CONFIG_GDB_MAGICPRINT) |
@@ -134,15 +134,12 @@ void __init setup_arch(char **cmdline_p) | |||
134 | printk(KERN_INFO "H8/300 series support by Yoshinori Sato <ysato@users.sourceforge.jp>\n"); | 134 | printk(KERN_INFO "H8/300 series support by Yoshinori Sato <ysato@users.sourceforge.jp>\n"); |
135 | 135 | ||
136 | #ifdef DEBUG | 136 | #ifdef DEBUG |
137 | printk(KERN_DEBUG "KERNEL -> TEXT=0x%06x-0x%06x DATA=0x%06x-0x%06x " | 137 | printk(KERN_DEBUG "KERNEL -> TEXT=0x%p-0x%p DATA=0x%p-0x%p " |
138 | "BSS=0x%06x-0x%06x\n", (int) &_stext, (int) &_etext, | 138 | "BSS=0x%p-0x%p\n", _stext, _etext, _sdata, _edata, __bss_start, |
139 | (int) &_sdata, (int) &_edata, | 139 | __bss_stop); |
140 | (int) &_sbss, (int) &_ebss); | 140 | printk(KERN_DEBUG "KERNEL -> ROMFS=0x%p-0x%06lx MEM=0x%06lx-0x%06lx " |
141 | printk(KERN_DEBUG "KERNEL -> ROMFS=0x%06x-0x%06x MEM=0x%06x-0x%06x " | 141 | "STACK=0x%06lx-0x%p\n", __bss_stop, memory_start, memory_start, |
142 | "STACK=0x%06x-0x%06x\n", | 142 | memory_end, memory_end, &_ramend); |
143 | (int) &_ebss, (int) memory_start, | ||
144 | (int) memory_start, (int) memory_end, | ||
145 | (int) memory_end, (int) &_ramend); | ||
146 | #endif | 143 | #endif |
147 | 144 | ||
148 | #ifdef CONFIG_DEFAULT_CMDLINE | 145 | #ifdef CONFIG_DEFAULT_CMDLINE |
diff --git a/arch/h8300/mm/init.c b/arch/h8300/mm/init.c index 973369c32a95..981e25094b1a 100644 --- a/arch/h8300/mm/init.c +++ b/arch/h8300/mm/init.c | |||
@@ -36,6 +36,7 @@ | |||
36 | #include <asm/segment.h> | 36 | #include <asm/segment.h> |
37 | #include <asm/page.h> | 37 | #include <asm/page.h> |
38 | #include <asm/pgtable.h> | 38 | #include <asm/pgtable.h> |
39 | #include <asm/sections.h> | ||
39 | 40 | ||
40 | #undef DEBUG | 41 | #undef DEBUG |
41 | 42 | ||
@@ -123,7 +124,6 @@ void __init mem_init(void) | |||
123 | int codek = 0, datak = 0, initk = 0; | 124 | int codek = 0, datak = 0, initk = 0; |
124 | /* DAVIDM look at setup memory map generically with reserved area */ | 125 | /* DAVIDM look at setup memory map generically with reserved area */ |
125 | unsigned long tmp; | 126 | unsigned long tmp; |
126 | extern char _etext, _stext, _sdata, _ebss, __init_begin, __init_end; | ||
127 | extern unsigned long _ramend, _ramstart; | 127 | extern unsigned long _ramend, _ramstart; |
128 | unsigned long len = &_ramend - &_ramstart; | 128 | unsigned long len = &_ramend - &_ramstart; |
129 | unsigned long start_mem = memory_start; /* DAVIDM - these must start at end of kernel */ | 129 | unsigned long start_mem = memory_start; /* DAVIDM - these must start at end of kernel */ |
@@ -142,9 +142,9 @@ void __init mem_init(void) | |||
142 | /* this will put all memory onto the freelists */ | 142 | /* this will put all memory onto the freelists */ |
143 | totalram_pages = free_all_bootmem(); | 143 | totalram_pages = free_all_bootmem(); |
144 | 144 | ||
145 | codek = (&_etext - &_stext) >> 10; | 145 | codek = (_etext - _stext) >> 10; |
146 | datak = (&_ebss - &_sdata) >> 10; | 146 | datak = (__bss_stop - _sdata) >> 10; |
147 | initk = (&__init_begin - &__init_end) >> 10; | 147 | initk = (__init_begin - __init_end) >> 10; |
148 | 148 | ||
149 | tmp = nr_free_pages() << PAGE_SHIFT; | 149 | tmp = nr_free_pages() << PAGE_SHIFT; |
150 | printk(KERN_INFO "Memory available: %luk/%luk RAM, %luk/%luk ROM (%dk kernel code, %dk data)\n", | 150 | printk(KERN_INFO "Memory available: %luk/%luk RAM, %luk/%luk ROM (%dk kernel code, %dk data)\n", |
@@ -178,22 +178,21 @@ free_initmem(void) | |||
178 | { | 178 | { |
179 | #ifdef CONFIG_RAMKERNEL | 179 | #ifdef CONFIG_RAMKERNEL |
180 | unsigned long addr; | 180 | unsigned long addr; |
181 | extern char __init_begin, __init_end; | ||
182 | /* | 181 | /* |
183 | * the following code should be cool even if these sections | 182 | * the following code should be cool even if these sections |
184 | * are not page aligned. | 183 | * are not page aligned. |
185 | */ | 184 | */ |
186 | addr = PAGE_ALIGN((unsigned long)(&__init_begin)); | 185 | addr = PAGE_ALIGN((unsigned long)(__init_begin)); |
187 | /* next to check that the page we free is not a partial page */ | 186 | /* next to check that the page we free is not a partial page */ |
188 | for (; addr + PAGE_SIZE < (unsigned long)(&__init_end); addr +=PAGE_SIZE) { | 187 | for (; addr + PAGE_SIZE < (unsigned long)__init_end; addr +=PAGE_SIZE) { |
189 | ClearPageReserved(virt_to_page(addr)); | 188 | ClearPageReserved(virt_to_page(addr)); |
190 | init_page_count(virt_to_page(addr)); | 189 | init_page_count(virt_to_page(addr)); |
191 | free_page(addr); | 190 | free_page(addr); |
192 | totalram_pages++; | 191 | totalram_pages++; |
193 | } | 192 | } |
194 | printk(KERN_INFO "Freeing unused kernel memory: %ldk freed (0x%x - 0x%x)\n", | 193 | printk(KERN_INFO "Freeing unused kernel memory: %ldk freed (0x%x - 0x%x)\n", |
195 | (addr - PAGE_ALIGN((long) &__init_begin)) >> 10, | 194 | (addr - PAGE_ALIGN((long) __init_begin)) >> 10, |
196 | (int)(PAGE_ALIGN((unsigned long)(&__init_begin))), | 195 | (int)(PAGE_ALIGN((unsigned long)__init_begin)), |
197 | (int)(addr - PAGE_SIZE)); | 196 | (int)(addr - PAGE_SIZE)); |
198 | #endif | 197 | #endif |
199 | } | 198 | } |
diff --git a/arch/m68k/Kconfig b/arch/m68k/Kconfig index cac5b6be572a..147120128260 100644 --- a/arch/m68k/Kconfig +++ b/arch/m68k/Kconfig | |||
@@ -7,6 +7,8 @@ config M68K | |||
7 | select GENERIC_IRQ_SHOW | 7 | select GENERIC_IRQ_SHOW |
8 | select ARCH_HAVE_NMI_SAFE_CMPXCHG if RMW_INSNS | 8 | select ARCH_HAVE_NMI_SAFE_CMPXCHG if RMW_INSNS |
9 | select GENERIC_CPU_DEVICES | 9 | select GENERIC_CPU_DEVICES |
10 | select GENERIC_STRNCPY_FROM_USER if MMU | ||
11 | select GENERIC_STRNLEN_USER if MMU | ||
10 | select FPU if MMU | 12 | select FPU if MMU |
11 | select ARCH_USES_GETTIMEOFFSET if MMU && !COLDFIRE | 13 | select ARCH_USES_GETTIMEOFFSET if MMU && !COLDFIRE |
12 | 14 | ||
diff --git a/arch/m68k/include/asm/Kbuild b/arch/m68k/include/asm/Kbuild index 1a922fad76f7..eafa2539a8ee 100644 --- a/arch/m68k/include/asm/Kbuild +++ b/arch/m68k/include/asm/Kbuild | |||
@@ -1,2 +1,4 @@ | |||
1 | include include/asm-generic/Kbuild.asm | 1 | include include/asm-generic/Kbuild.asm |
2 | header-y += cachectl.h | 2 | header-y += cachectl.h |
3 | |||
4 | generic-y += word-at-a-time.h | ||
diff --git a/arch/m68k/include/asm/m528xsim.h b/arch/m68k/include/asm/m528xsim.h index d63b99ff7ff7..497c31c803ff 100644 --- a/arch/m68k/include/asm/m528xsim.h +++ b/arch/m68k/include/asm/m528xsim.h | |||
@@ -86,7 +86,7 @@ | |||
86 | /* | 86 | /* |
87 | * QSPI module. | 87 | * QSPI module. |
88 | */ | 88 | */ |
89 | #define MCFQSPI_IOBASE (MCF_IPSBAR + 0x340) | 89 | #define MCFQSPI_BASE (MCF_IPSBAR + 0x340) |
90 | #define MCFQSPI_SIZE 0x40 | 90 | #define MCFQSPI_SIZE 0x40 |
91 | 91 | ||
92 | #define MCFQSPI_CS0 147 | 92 | #define MCFQSPI_CS0 147 |
diff --git a/arch/m68k/include/asm/uaccess_mm.h b/arch/m68k/include/asm/uaccess_mm.h index 9c80cd515b20..472c891a4aee 100644 --- a/arch/m68k/include/asm/uaccess_mm.h +++ b/arch/m68k/include/asm/uaccess_mm.h | |||
@@ -379,12 +379,15 @@ __constant_copy_to_user(void __user *to, const void *from, unsigned long n) | |||
379 | #define copy_from_user(to, from, n) __copy_from_user(to, from, n) | 379 | #define copy_from_user(to, from, n) __copy_from_user(to, from, n) |
380 | #define copy_to_user(to, from, n) __copy_to_user(to, from, n) | 380 | #define copy_to_user(to, from, n) __copy_to_user(to, from, n) |
381 | 381 | ||
382 | long strncpy_from_user(char *dst, const char __user *src, long count); | 382 | #define user_addr_max() \ |
383 | long strnlen_user(const char __user *src, long n); | 383 | (segment_eq(get_fs(), USER_DS) ? TASK_SIZE : ~0UL) |
384 | |||
385 | extern long strncpy_from_user(char *dst, const char __user *src, long count); | ||
386 | extern __must_check long strlen_user(const char __user *str); | ||
387 | extern __must_check long strnlen_user(const char __user *str, long n); | ||
388 | |||
384 | unsigned long __clear_user(void __user *to, unsigned long n); | 389 | unsigned long __clear_user(void __user *to, unsigned long n); |
385 | 390 | ||
386 | #define clear_user __clear_user | 391 | #define clear_user __clear_user |
387 | 392 | ||
388 | #define strlen_user(str) strnlen_user(str, 32767) | ||
389 | |||
390 | #endif /* _M68K_UACCESS_H */ | 393 | #endif /* _M68K_UACCESS_H */ |
diff --git a/arch/m68k/kernel/ptrace.c b/arch/m68k/kernel/ptrace.c index 8b4a2222e658..1bc10e62b9af 100644 --- a/arch/m68k/kernel/ptrace.c +++ b/arch/m68k/kernel/ptrace.c | |||
@@ -286,7 +286,7 @@ asmlinkage void syscall_trace(void) | |||
286 | } | 286 | } |
287 | } | 287 | } |
288 | 288 | ||
289 | #ifdef CONFIG_COLDFIRE | 289 | #if defined(CONFIG_COLDFIRE) || !defined(CONFIG_MMU) |
290 | asmlinkage int syscall_trace_enter(void) | 290 | asmlinkage int syscall_trace_enter(void) |
291 | { | 291 | { |
292 | int ret = 0; | 292 | int ret = 0; |
diff --git a/arch/m68k/kernel/time.c b/arch/m68k/kernel/time.c index d7deb7fc7eb5..707f0573ec6b 100644 --- a/arch/m68k/kernel/time.c +++ b/arch/m68k/kernel/time.c | |||
@@ -85,7 +85,7 @@ void __init time_init(void) | |||
85 | mach_sched_init(timer_interrupt); | 85 | mach_sched_init(timer_interrupt); |
86 | } | 86 | } |
87 | 87 | ||
88 | #ifdef CONFIG_M68KCLASSIC | 88 | #ifdef CONFIG_ARCH_USES_GETTIMEOFFSET |
89 | 89 | ||
90 | u32 arch_gettimeoffset(void) | 90 | u32 arch_gettimeoffset(void) |
91 | { | 91 | { |
@@ -108,4 +108,4 @@ static int __init rtc_init(void) | |||
108 | 108 | ||
109 | module_init(rtc_init); | 109 | module_init(rtc_init); |
110 | 110 | ||
111 | #endif /* CONFIG_M68KCLASSIC */ | 111 | #endif /* CONFIG_ARCH_USES_GETTIMEOFFSET */ |
diff --git a/arch/m68k/lib/uaccess.c b/arch/m68k/lib/uaccess.c index 5664386338da..5e97f2ee7c11 100644 --- a/arch/m68k/lib/uaccess.c +++ b/arch/m68k/lib/uaccess.c | |||
@@ -104,80 +104,6 @@ unsigned long __generic_copy_to_user(void __user *to, const void *from, | |||
104 | EXPORT_SYMBOL(__generic_copy_to_user); | 104 | EXPORT_SYMBOL(__generic_copy_to_user); |
105 | 105 | ||
106 | /* | 106 | /* |
107 | * Copy a null terminated string from userspace. | ||
108 | */ | ||
109 | long strncpy_from_user(char *dst, const char __user *src, long count) | ||
110 | { | ||
111 | long res; | ||
112 | char c; | ||
113 | |||
114 | if (count <= 0) | ||
115 | return count; | ||
116 | |||
117 | asm volatile ("\n" | ||
118 | "1: "MOVES".b (%2)+,%4\n" | ||
119 | " move.b %4,(%1)+\n" | ||
120 | " jeq 2f\n" | ||
121 | " subq.l #1,%3\n" | ||
122 | " jne 1b\n" | ||
123 | "2: sub.l %3,%0\n" | ||
124 | "3:\n" | ||
125 | " .section .fixup,\"ax\"\n" | ||
126 | " .even\n" | ||
127 | "10: move.l %5,%0\n" | ||
128 | " jra 3b\n" | ||
129 | " .previous\n" | ||
130 | "\n" | ||
131 | " .section __ex_table,\"a\"\n" | ||
132 | " .align 4\n" | ||
133 | " .long 1b,10b\n" | ||
134 | " .previous" | ||
135 | : "=d" (res), "+a" (dst), "+a" (src), "+r" (count), "=&d" (c) | ||
136 | : "i" (-EFAULT), "0" (count)); | ||
137 | |||
138 | return res; | ||
139 | } | ||
140 | EXPORT_SYMBOL(strncpy_from_user); | ||
141 | |||
142 | /* | ||
143 | * Return the size of a string (including the ending 0) | ||
144 | * | ||
145 | * Return 0 on exception, a value greater than N if too long | ||
146 | */ | ||
147 | long strnlen_user(const char __user *src, long n) | ||
148 | { | ||
149 | char c; | ||
150 | long res; | ||
151 | |||
152 | asm volatile ("\n" | ||
153 | "1: subq.l #1,%1\n" | ||
154 | " jmi 3f\n" | ||
155 | "2: "MOVES".b (%0)+,%2\n" | ||
156 | " tst.b %2\n" | ||
157 | " jne 1b\n" | ||
158 | " jra 4f\n" | ||
159 | "\n" | ||
160 | "3: addq.l #1,%0\n" | ||
161 | "4: sub.l %4,%0\n" | ||
162 | "5:\n" | ||
163 | " .section .fixup,\"ax\"\n" | ||
164 | " .even\n" | ||
165 | "20: sub.l %0,%0\n" | ||
166 | " jra 5b\n" | ||
167 | " .previous\n" | ||
168 | "\n" | ||
169 | " .section __ex_table,\"a\"\n" | ||
170 | " .align 4\n" | ||
171 | " .long 2b,20b\n" | ||
172 | " .previous\n" | ||
173 | : "=&a" (res), "+d" (n), "=&d" (c) | ||
174 | : "0" (src), "r" (src)); | ||
175 | |||
176 | return res; | ||
177 | } | ||
178 | EXPORT_SYMBOL(strnlen_user); | ||
179 | |||
180 | /* | ||
181 | * Zero Userspace | 107 | * Zero Userspace |
182 | */ | 108 | */ |
183 | 109 | ||
diff --git a/arch/m68k/platform/68328/timers.c b/arch/m68k/platform/68328/timers.c index c801c172b822..f4dc9b295609 100644 --- a/arch/m68k/platform/68328/timers.c +++ b/arch/m68k/platform/68328/timers.c | |||
@@ -53,6 +53,7 @@ | |||
53 | #endif | 53 | #endif |
54 | 54 | ||
55 | static u32 m68328_tick_cnt; | 55 | static u32 m68328_tick_cnt; |
56 | static irq_handler_t timer_interrupt; | ||
56 | 57 | ||
57 | /***************************************************************************/ | 58 | /***************************************************************************/ |
58 | 59 | ||
@@ -62,7 +63,7 @@ static irqreturn_t hw_tick(int irq, void *dummy) | |||
62 | TSTAT &= 0; | 63 | TSTAT &= 0; |
63 | 64 | ||
64 | m68328_tick_cnt += TICKS_PER_JIFFY; | 65 | m68328_tick_cnt += TICKS_PER_JIFFY; |
65 | return arch_timer_interrupt(irq, dummy); | 66 | return timer_interrupt(irq, dummy); |
66 | } | 67 | } |
67 | 68 | ||
68 | /***************************************************************************/ | 69 | /***************************************************************************/ |
@@ -99,7 +100,7 @@ static struct clocksource m68328_clk = { | |||
99 | 100 | ||
100 | /***************************************************************************/ | 101 | /***************************************************************************/ |
101 | 102 | ||
102 | void hw_timer_init(void) | 103 | void hw_timer_init(irq_handler_t handler) |
103 | { | 104 | { |
104 | /* disable timer 1 */ | 105 | /* disable timer 1 */ |
105 | TCTL = 0; | 106 | TCTL = 0; |
@@ -115,6 +116,7 @@ void hw_timer_init(void) | |||
115 | /* Enable timer 1 */ | 116 | /* Enable timer 1 */ |
116 | TCTL |= TCTL_TEN; | 117 | TCTL |= TCTL_TEN; |
117 | clocksource_register_hz(&m68328_clk, TICKS_PER_JIFFY*HZ); | 118 | clocksource_register_hz(&m68328_clk, TICKS_PER_JIFFY*HZ); |
119 | timer_interrupt = handler; | ||
118 | } | 120 | } |
119 | 121 | ||
120 | /***************************************************************************/ | 122 | /***************************************************************************/ |
diff --git a/arch/m68k/platform/68360/config.c b/arch/m68k/platform/68360/config.c index 255fc03913e9..9877cefad1e7 100644 --- a/arch/m68k/platform/68360/config.c +++ b/arch/m68k/platform/68360/config.c | |||
@@ -35,6 +35,7 @@ extern void m360_cpm_reset(void); | |||
35 | #define OSCILLATOR (unsigned long int)33000000 | 35 | #define OSCILLATOR (unsigned long int)33000000 |
36 | #endif | 36 | #endif |
37 | 37 | ||
38 | static irq_handler_t timer_interrupt; | ||
38 | unsigned long int system_clock; | 39 | unsigned long int system_clock; |
39 | 40 | ||
40 | extern QUICC *pquicc; | 41 | extern QUICC *pquicc; |
@@ -52,7 +53,7 @@ static irqreturn_t hw_tick(int irq, void *dummy) | |||
52 | 53 | ||
53 | pquicc->timer_ter1 = 0x0002; /* clear timer event */ | 54 | pquicc->timer_ter1 = 0x0002; /* clear timer event */ |
54 | 55 | ||
55 | return arch_timer_interrupt(irq, dummy); | 56 | return timer_interrupt(irq, dummy); |
56 | } | 57 | } |
57 | 58 | ||
58 | static struct irqaction m68360_timer_irq = { | 59 | static struct irqaction m68360_timer_irq = { |
@@ -61,7 +62,7 @@ static struct irqaction m68360_timer_irq = { | |||
61 | .handler = hw_tick, | 62 | .handler = hw_tick, |
62 | }; | 63 | }; |
63 | 64 | ||
64 | void hw_timer_init(void) | 65 | void hw_timer_init(irq_handler_t handler) |
65 | { | 66 | { |
66 | unsigned char prescaler; | 67 | unsigned char prescaler; |
67 | unsigned short tgcr_save; | 68 | unsigned short tgcr_save; |
@@ -94,6 +95,8 @@ void hw_timer_init(void) | |||
94 | 95 | ||
95 | pquicc->timer_ter1 = 0x0003; /* clear timer events */ | 96 | pquicc->timer_ter1 = 0x0003; /* clear timer events */ |
96 | 97 | ||
98 | timer_interrupt = handler; | ||
99 | |||
97 | /* enable timer 1 interrupt in CIMR */ | 100 | /* enable timer 1 interrupt in CIMR */ |
98 | setup_irq(CPMVEC_TIMER1, &m68360_timer_irq); | 101 | setup_irq(CPMVEC_TIMER1, &m68360_timer_irq); |
99 | 102 | ||
diff --git a/arch/parisc/Makefile b/arch/parisc/Makefile index dbc3850b1d0d..5707f1a62341 100644 --- a/arch/parisc/Makefile +++ b/arch/parisc/Makefile | |||
@@ -21,6 +21,7 @@ KBUILD_DEFCONFIG := default_defconfig | |||
21 | 21 | ||
22 | NM = sh $(srctree)/arch/parisc/nm | 22 | NM = sh $(srctree)/arch/parisc/nm |
23 | CHECKFLAGS += -D__hppa__=1 | 23 | CHECKFLAGS += -D__hppa__=1 |
24 | LIBGCC = $(shell $(CC) $(KBUILD_CFLAGS) -print-libgcc-file-name) | ||
24 | 25 | ||
25 | MACHINE := $(shell uname -m) | 26 | MACHINE := $(shell uname -m) |
26 | ifeq ($(MACHINE),parisc*) | 27 | ifeq ($(MACHINE),parisc*) |
@@ -79,7 +80,7 @@ kernel-y := mm/ kernel/ math-emu/ | |||
79 | kernel-$(CONFIG_HPUX) += hpux/ | 80 | kernel-$(CONFIG_HPUX) += hpux/ |
80 | 81 | ||
81 | core-y += $(addprefix arch/parisc/, $(kernel-y)) | 82 | core-y += $(addprefix arch/parisc/, $(kernel-y)) |
82 | libs-y += arch/parisc/lib/ `$(CC) -print-libgcc-file-name` | 83 | libs-y += arch/parisc/lib/ $(LIBGCC) |
83 | 84 | ||
84 | drivers-$(CONFIG_OPROFILE) += arch/parisc/oprofile/ | 85 | drivers-$(CONFIG_OPROFILE) += arch/parisc/oprofile/ |
85 | 86 | ||
diff --git a/arch/parisc/include/asm/Kbuild b/arch/parisc/include/asm/Kbuild index 19a434f55059..4383707d9801 100644 --- a/arch/parisc/include/asm/Kbuild +++ b/arch/parisc/include/asm/Kbuild | |||
@@ -1,3 +1,4 @@ | |||
1 | include include/asm-generic/Kbuild.asm | 1 | include include/asm-generic/Kbuild.asm |
2 | 2 | ||
3 | header-y += pdc.h | 3 | header-y += pdc.h |
4 | generic-y += word-at-a-time.h | ||
diff --git a/arch/parisc/include/asm/bug.h b/arch/parisc/include/asm/bug.h index 72cfdb0cfdd1..62a33338549c 100644 --- a/arch/parisc/include/asm/bug.h +++ b/arch/parisc/include/asm/bug.h | |||
@@ -1,6 +1,8 @@ | |||
1 | #ifndef _PARISC_BUG_H | 1 | #ifndef _PARISC_BUG_H |
2 | #define _PARISC_BUG_H | 2 | #define _PARISC_BUG_H |
3 | 3 | ||
4 | #include <linux/kernel.h> /* for BUGFLAG_TAINT */ | ||
5 | |||
4 | /* | 6 | /* |
5 | * Tell the user there is some problem. | 7 | * Tell the user there is some problem. |
6 | * The offending file and line are encoded in the __bug_table section. | 8 | * The offending file and line are encoded in the __bug_table section. |
diff --git a/arch/powerpc/include/asm/hw_irq.h b/arch/powerpc/include/asm/hw_irq.h index c9aac24b02e2..32b394f3b854 100644 --- a/arch/powerpc/include/asm/hw_irq.h +++ b/arch/powerpc/include/asm/hw_irq.h | |||
@@ -100,6 +100,9 @@ static inline void hard_irq_disable(void) | |||
100 | get_paca()->irq_happened |= PACA_IRQ_HARD_DIS; | 100 | get_paca()->irq_happened |= PACA_IRQ_HARD_DIS; |
101 | } | 101 | } |
102 | 102 | ||
103 | /* include/linux/interrupt.h needs hard_irq_disable to be a macro */ | ||
104 | #define hard_irq_disable hard_irq_disable | ||
105 | |||
103 | /* | 106 | /* |
104 | * This is called by asynchronous interrupts to conditionally | 107 | * This is called by asynchronous interrupts to conditionally |
105 | * re-enable hard interrupts when soft-disabled after having | 108 | * re-enable hard interrupts when soft-disabled after having |
diff --git a/arch/powerpc/kernel/module_32.c b/arch/powerpc/kernel/module_32.c index 0b6d79617d7b..2e3200ca485f 100644 --- a/arch/powerpc/kernel/module_32.c +++ b/arch/powerpc/kernel/module_32.c | |||
@@ -176,8 +176,8 @@ int module_frob_arch_sections(Elf32_Ehdr *hdr, | |||
176 | 176 | ||
177 | static inline int entry_matches(struct ppc_plt_entry *entry, Elf32_Addr val) | 177 | static inline int entry_matches(struct ppc_plt_entry *entry, Elf32_Addr val) |
178 | { | 178 | { |
179 | if (entry->jump[0] == 0x3d600000 + ((val + 0x8000) >> 16) | 179 | if (entry->jump[0] == 0x3d800000 + ((val + 0x8000) >> 16) |
180 | && entry->jump[1] == 0x396b0000 + (val & 0xffff)) | 180 | && entry->jump[1] == 0x398c0000 + (val & 0xffff)) |
181 | return 1; | 181 | return 1; |
182 | return 0; | 182 | return 0; |
183 | } | 183 | } |
@@ -204,10 +204,9 @@ static uint32_t do_plt_call(void *location, | |||
204 | entry++; | 204 | entry++; |
205 | } | 205 | } |
206 | 206 | ||
207 | /* Stolen from Paul Mackerras as well... */ | 207 | entry->jump[0] = 0x3d800000+((val+0x8000)>>16); /* lis r12,sym@ha */ |
208 | entry->jump[0] = 0x3d600000+((val+0x8000)>>16); /* lis r11,sym@ha */ | 208 | entry->jump[1] = 0x398c0000 + (val&0xffff); /* addi r12,r12,sym@l*/ |
209 | entry->jump[1] = 0x396b0000 + (val&0xffff); /* addi r11,r11,sym@l*/ | 209 | entry->jump[2] = 0x7d8903a6; /* mtctr r12 */ |
210 | entry->jump[2] = 0x7d6903a6; /* mtctr r11 */ | ||
211 | entry->jump[3] = 0x4e800420; /* bctr */ | 210 | entry->jump[3] = 0x4e800420; /* bctr */ |
212 | 211 | ||
213 | DEBUGP("Initialized plt for 0x%x at %p\n", val, entry); | 212 | DEBUGP("Initialized plt for 0x%x at %p\n", val, entry); |
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c index 99a995c2a3f2..be171ee73bf8 100644 --- a/arch/powerpc/kernel/time.c +++ b/arch/powerpc/kernel/time.c | |||
@@ -475,6 +475,7 @@ void timer_interrupt(struct pt_regs * regs) | |||
475 | struct pt_regs *old_regs; | 475 | struct pt_regs *old_regs; |
476 | u64 *next_tb = &__get_cpu_var(decrementers_next_tb); | 476 | u64 *next_tb = &__get_cpu_var(decrementers_next_tb); |
477 | struct clock_event_device *evt = &__get_cpu_var(decrementers); | 477 | struct clock_event_device *evt = &__get_cpu_var(decrementers); |
478 | u64 now; | ||
478 | 479 | ||
479 | /* Ensure a positive value is written to the decrementer, or else | 480 | /* Ensure a positive value is written to the decrementer, or else |
480 | * some CPUs will continue to take decrementer exceptions. | 481 | * some CPUs will continue to take decrementer exceptions. |
@@ -509,9 +510,16 @@ void timer_interrupt(struct pt_regs * regs) | |||
509 | irq_work_run(); | 510 | irq_work_run(); |
510 | } | 511 | } |
511 | 512 | ||
512 | *next_tb = ~(u64)0; | 513 | now = get_tb_or_rtc(); |
513 | if (evt->event_handler) | 514 | if (now >= *next_tb) { |
514 | evt->event_handler(evt); | 515 | *next_tb = ~(u64)0; |
516 | if (evt->event_handler) | ||
517 | evt->event_handler(evt); | ||
518 | } else { | ||
519 | now = *next_tb - now; | ||
520 | if (now <= DECREMENTER_MAX) | ||
521 | set_dec((int)now); | ||
522 | } | ||
515 | 523 | ||
516 | #ifdef CONFIG_PPC64 | 524 | #ifdef CONFIG_PPC64 |
517 | /* collect purr register values often, for accurate calculations */ | 525 | /* collect purr register values often, for accurate calculations */ |
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index c6af1d623839..3abe1b86e583 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c | |||
@@ -268,24 +268,45 @@ static unsigned long do_h_register_vpa(struct kvm_vcpu *vcpu, | |||
268 | return err; | 268 | return err; |
269 | } | 269 | } |
270 | 270 | ||
271 | static void kvmppc_update_vpa(struct kvm *kvm, struct kvmppc_vpa *vpap) | 271 | static void kvmppc_update_vpa(struct kvm_vcpu *vcpu, struct kvmppc_vpa *vpap) |
272 | { | 272 | { |
273 | struct kvm *kvm = vcpu->kvm; | ||
273 | void *va; | 274 | void *va; |
274 | unsigned long nb; | 275 | unsigned long nb; |
276 | unsigned long gpa; | ||
275 | 277 | ||
276 | vpap->update_pending = 0; | 278 | /* |
277 | va = NULL; | 279 | * We need to pin the page pointed to by vpap->next_gpa, |
278 | if (vpap->next_gpa) { | 280 | * but we can't call kvmppc_pin_guest_page under the lock |
279 | va = kvmppc_pin_guest_page(kvm, vpap->next_gpa, &nb); | 281 | * as it does get_user_pages() and down_read(). So we |
280 | if (nb < vpap->len) { | 282 | * have to drop the lock, pin the page, then get the lock |
281 | /* | 283 | * again and check that a new area didn't get registered |
282 | * If it's now too short, it must be that userspace | 284 | * in the meantime. |
283 | * has changed the mappings underlying guest memory, | 285 | */ |
284 | * so unregister the region. | 286 | for (;;) { |
285 | */ | 287 | gpa = vpap->next_gpa; |
288 | spin_unlock(&vcpu->arch.vpa_update_lock); | ||
289 | va = NULL; | ||
290 | nb = 0; | ||
291 | if (gpa) | ||
292 | va = kvmppc_pin_guest_page(kvm, vpap->next_gpa, &nb); | ||
293 | spin_lock(&vcpu->arch.vpa_update_lock); | ||
294 | if (gpa == vpap->next_gpa) | ||
295 | break; | ||
296 | /* sigh... unpin that one and try again */ | ||
297 | if (va) | ||
286 | kvmppc_unpin_guest_page(kvm, va); | 298 | kvmppc_unpin_guest_page(kvm, va); |
287 | va = NULL; | 299 | } |
288 | } | 300 | |
301 | vpap->update_pending = 0; | ||
302 | if (va && nb < vpap->len) { | ||
303 | /* | ||
304 | * If it's now too short, it must be that userspace | ||
305 | * has changed the mappings underlying guest memory, | ||
306 | * so unregister the region. | ||
307 | */ | ||
308 | kvmppc_unpin_guest_page(kvm, va); | ||
309 | va = NULL; | ||
289 | } | 310 | } |
290 | if (vpap->pinned_addr) | 311 | if (vpap->pinned_addr) |
291 | kvmppc_unpin_guest_page(kvm, vpap->pinned_addr); | 312 | kvmppc_unpin_guest_page(kvm, vpap->pinned_addr); |
@@ -296,20 +317,18 @@ static void kvmppc_update_vpa(struct kvm *kvm, struct kvmppc_vpa *vpap) | |||
296 | 317 | ||
297 | static void kvmppc_update_vpas(struct kvm_vcpu *vcpu) | 318 | static void kvmppc_update_vpas(struct kvm_vcpu *vcpu) |
298 | { | 319 | { |
299 | struct kvm *kvm = vcpu->kvm; | ||
300 | |||
301 | spin_lock(&vcpu->arch.vpa_update_lock); | 320 | spin_lock(&vcpu->arch.vpa_update_lock); |
302 | if (vcpu->arch.vpa.update_pending) { | 321 | if (vcpu->arch.vpa.update_pending) { |
303 | kvmppc_update_vpa(kvm, &vcpu->arch.vpa); | 322 | kvmppc_update_vpa(vcpu, &vcpu->arch.vpa); |
304 | init_vpa(vcpu, vcpu->arch.vpa.pinned_addr); | 323 | init_vpa(vcpu, vcpu->arch.vpa.pinned_addr); |
305 | } | 324 | } |
306 | if (vcpu->arch.dtl.update_pending) { | 325 | if (vcpu->arch.dtl.update_pending) { |
307 | kvmppc_update_vpa(kvm, &vcpu->arch.dtl); | 326 | kvmppc_update_vpa(vcpu, &vcpu->arch.dtl); |
308 | vcpu->arch.dtl_ptr = vcpu->arch.dtl.pinned_addr; | 327 | vcpu->arch.dtl_ptr = vcpu->arch.dtl.pinned_addr; |
309 | vcpu->arch.dtl_index = 0; | 328 | vcpu->arch.dtl_index = 0; |
310 | } | 329 | } |
311 | if (vcpu->arch.slb_shadow.update_pending) | 330 | if (vcpu->arch.slb_shadow.update_pending) |
312 | kvmppc_update_vpa(kvm, &vcpu->arch.slb_shadow); | 331 | kvmppc_update_vpa(vcpu, &vcpu->arch.slb_shadow); |
313 | spin_unlock(&vcpu->arch.vpa_update_lock); | 332 | spin_unlock(&vcpu->arch.vpa_update_lock); |
314 | } | 333 | } |
315 | 334 | ||
@@ -800,12 +819,39 @@ static int kvmppc_run_core(struct kvmppc_vcore *vc) | |||
800 | struct kvm_vcpu *vcpu, *vcpu0, *vnext; | 819 | struct kvm_vcpu *vcpu, *vcpu0, *vnext; |
801 | long ret; | 820 | long ret; |
802 | u64 now; | 821 | u64 now; |
803 | int ptid, i; | 822 | int ptid, i, need_vpa_update; |
804 | 823 | ||
805 | /* don't start if any threads have a signal pending */ | 824 | /* don't start if any threads have a signal pending */ |
806 | list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list) | 825 | need_vpa_update = 0; |
826 | list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list) { | ||
807 | if (signal_pending(vcpu->arch.run_task)) | 827 | if (signal_pending(vcpu->arch.run_task)) |
808 | return 0; | 828 | return 0; |
829 | need_vpa_update |= vcpu->arch.vpa.update_pending | | ||
830 | vcpu->arch.slb_shadow.update_pending | | ||
831 | vcpu->arch.dtl.update_pending; | ||
832 | } | ||
833 | |||
834 | /* | ||
835 | * Initialize *vc, in particular vc->vcore_state, so we can | ||
836 | * drop the vcore lock if necessary. | ||
837 | */ | ||
838 | vc->n_woken = 0; | ||
839 | vc->nap_count = 0; | ||
840 | vc->entry_exit_count = 0; | ||
841 | vc->vcore_state = VCORE_RUNNING; | ||
842 | vc->in_guest = 0; | ||
843 | vc->napping_threads = 0; | ||
844 | |||
845 | /* | ||
846 | * Updating any of the vpas requires calling kvmppc_pin_guest_page, | ||
847 | * which can't be called with any spinlocks held. | ||
848 | */ | ||
849 | if (need_vpa_update) { | ||
850 | spin_unlock(&vc->lock); | ||
851 | list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list) | ||
852 | kvmppc_update_vpas(vcpu); | ||
853 | spin_lock(&vc->lock); | ||
854 | } | ||
809 | 855 | ||
810 | /* | 856 | /* |
811 | * Make sure we are running on thread 0, and that | 857 | * Make sure we are running on thread 0, and that |
@@ -838,20 +884,10 @@ static int kvmppc_run_core(struct kvmppc_vcore *vc) | |||
838 | if (vcpu->arch.ceded) | 884 | if (vcpu->arch.ceded) |
839 | vcpu->arch.ptid = ptid++; | 885 | vcpu->arch.ptid = ptid++; |
840 | 886 | ||
841 | vc->n_woken = 0; | ||
842 | vc->nap_count = 0; | ||
843 | vc->entry_exit_count = 0; | ||
844 | vc->vcore_state = VCORE_RUNNING; | ||
845 | vc->stolen_tb += mftb() - vc->preempt_tb; | 887 | vc->stolen_tb += mftb() - vc->preempt_tb; |
846 | vc->in_guest = 0; | ||
847 | vc->pcpu = smp_processor_id(); | 888 | vc->pcpu = smp_processor_id(); |
848 | vc->napping_threads = 0; | ||
849 | list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list) { | 889 | list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list) { |
850 | kvmppc_start_thread(vcpu); | 890 | kvmppc_start_thread(vcpu); |
851 | if (vcpu->arch.vpa.update_pending || | ||
852 | vcpu->arch.slb_shadow.update_pending || | ||
853 | vcpu->arch.dtl.update_pending) | ||
854 | kvmppc_update_vpas(vcpu); | ||
855 | kvmppc_create_dtl_entry(vcpu, vc); | 891 | kvmppc_create_dtl_entry(vcpu, vc); |
856 | } | 892 | } |
857 | /* Grab any remaining hw threads so they can't go into the kernel */ | 893 | /* Grab any remaining hw threads so they can't go into the kernel */ |
diff --git a/arch/powerpc/platforms/pseries/nvram.c b/arch/powerpc/platforms/pseries/nvram.c index 36f957f31842..8733a86ad52e 100644 --- a/arch/powerpc/platforms/pseries/nvram.c +++ b/arch/powerpc/platforms/pseries/nvram.c | |||
@@ -68,9 +68,7 @@ static const char *pseries_nvram_os_partitions[] = { | |||
68 | }; | 68 | }; |
69 | 69 | ||
70 | static void oops_to_nvram(struct kmsg_dumper *dumper, | 70 | static void oops_to_nvram(struct kmsg_dumper *dumper, |
71 | enum kmsg_dump_reason reason, | 71 | enum kmsg_dump_reason reason); |
72 | const char *old_msgs, unsigned long old_len, | ||
73 | const char *new_msgs, unsigned long new_len); | ||
74 | 72 | ||
75 | static struct kmsg_dumper nvram_kmsg_dumper = { | 73 | static struct kmsg_dumper nvram_kmsg_dumper = { |
76 | .dump = oops_to_nvram | 74 | .dump = oops_to_nvram |
@@ -504,28 +502,6 @@ int __init pSeries_nvram_init(void) | |||
504 | } | 502 | } |
505 | 503 | ||
506 | /* | 504 | /* |
507 | * Try to capture the last capture_len bytes of the printk buffer. Return | ||
508 | * the amount actually captured. | ||
509 | */ | ||
510 | static size_t capture_last_msgs(const char *old_msgs, size_t old_len, | ||
511 | const char *new_msgs, size_t new_len, | ||
512 | char *captured, size_t capture_len) | ||
513 | { | ||
514 | if (new_len >= capture_len) { | ||
515 | memcpy(captured, new_msgs + (new_len - capture_len), | ||
516 | capture_len); | ||
517 | return capture_len; | ||
518 | } else { | ||
519 | /* Grab the end of old_msgs. */ | ||
520 | size_t old_tail_len = min(old_len, capture_len - new_len); | ||
521 | memcpy(captured, old_msgs + (old_len - old_tail_len), | ||
522 | old_tail_len); | ||
523 | memcpy(captured + old_tail_len, new_msgs, new_len); | ||
524 | return old_tail_len + new_len; | ||
525 | } | ||
526 | } | ||
527 | |||
528 | /* | ||
529 | * Are we using the ibm,rtas-log for oops/panic reports? And if so, | 505 | * Are we using the ibm,rtas-log for oops/panic reports? And if so, |
530 | * would logging this oops/panic overwrite an RTAS event that rtas_errd | 506 | * would logging this oops/panic overwrite an RTAS event that rtas_errd |
531 | * hasn't had a chance to read and process? Return 1 if so, else 0. | 507 | * hasn't had a chance to read and process? Return 1 if so, else 0. |
@@ -541,27 +517,6 @@ static int clobbering_unread_rtas_event(void) | |||
541 | NVRAM_RTAS_READ_TIMEOUT); | 517 | NVRAM_RTAS_READ_TIMEOUT); |
542 | } | 518 | } |
543 | 519 | ||
544 | /* Squeeze out each line's <n> severity prefix. */ | ||
545 | static size_t elide_severities(char *buf, size_t len) | ||
546 | { | ||
547 | char *in, *out, *buf_end = buf + len; | ||
548 | /* Assume a <n> at the very beginning marks the start of a line. */ | ||
549 | int newline = 1; | ||
550 | |||
551 | in = out = buf; | ||
552 | while (in < buf_end) { | ||
553 | if (newline && in+3 <= buf_end && | ||
554 | *in == '<' && isdigit(in[1]) && in[2] == '>') { | ||
555 | in += 3; | ||
556 | newline = 0; | ||
557 | } else { | ||
558 | newline = (*in == '\n'); | ||
559 | *out++ = *in++; | ||
560 | } | ||
561 | } | ||
562 | return out - buf; | ||
563 | } | ||
564 | |||
565 | /* Derived from logfs_compress() */ | 520 | /* Derived from logfs_compress() */ |
566 | static int nvram_compress(const void *in, void *out, size_t inlen, | 521 | static int nvram_compress(const void *in, void *out, size_t inlen, |
567 | size_t outlen) | 522 | size_t outlen) |
@@ -619,9 +574,7 @@ static int zip_oops(size_t text_len) | |||
619 | * partition. If that's too much, go back and capture uncompressed text. | 574 | * partition. If that's too much, go back and capture uncompressed text. |
620 | */ | 575 | */ |
621 | static void oops_to_nvram(struct kmsg_dumper *dumper, | 576 | static void oops_to_nvram(struct kmsg_dumper *dumper, |
622 | enum kmsg_dump_reason reason, | 577 | enum kmsg_dump_reason reason) |
623 | const char *old_msgs, unsigned long old_len, | ||
624 | const char *new_msgs, unsigned long new_len) | ||
625 | { | 578 | { |
626 | static unsigned int oops_count = 0; | 579 | static unsigned int oops_count = 0; |
627 | static bool panicking = false; | 580 | static bool panicking = false; |
@@ -660,14 +613,14 @@ static void oops_to_nvram(struct kmsg_dumper *dumper, | |||
660 | return; | 613 | return; |
661 | 614 | ||
662 | if (big_oops_buf) { | 615 | if (big_oops_buf) { |
663 | text_len = capture_last_msgs(old_msgs, old_len, | 616 | kmsg_dump_get_buffer(dumper, false, |
664 | new_msgs, new_len, big_oops_buf, big_oops_buf_sz); | 617 | big_oops_buf, big_oops_buf_sz, &text_len); |
665 | text_len = elide_severities(big_oops_buf, text_len); | ||
666 | rc = zip_oops(text_len); | 618 | rc = zip_oops(text_len); |
667 | } | 619 | } |
668 | if (rc != 0) { | 620 | if (rc != 0) { |
669 | text_len = capture_last_msgs(old_msgs, old_len, | 621 | kmsg_dump_rewind(dumper); |
670 | new_msgs, new_len, oops_data, oops_data_sz); | 622 | kmsg_dump_get_buffer(dumper, true, |
623 | oops_data, oops_data_sz, &text_len); | ||
671 | err_type = ERR_TYPE_KERNEL_PANIC; | 624 | err_type = ERR_TYPE_KERNEL_PANIC; |
672 | *oops_len = (u16) text_len; | 625 | *oops_len = (u16) text_len; |
673 | } | 626 | } |
diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig index 99bcd0ee838d..31d9db7913e4 100644 --- a/arch/sh/Kconfig +++ b/arch/sh/Kconfig | |||
@@ -32,6 +32,8 @@ config SUPERH | |||
32 | select GENERIC_SMP_IDLE_THREAD | 32 | select GENERIC_SMP_IDLE_THREAD |
33 | select GENERIC_CLOCKEVENTS | 33 | select GENERIC_CLOCKEVENTS |
34 | select GENERIC_CMOS_UPDATE if SH_SH03 || SH_DREAMCAST | 34 | select GENERIC_CMOS_UPDATE if SH_SH03 || SH_DREAMCAST |
35 | select GENERIC_STRNCPY_FROM_USER | ||
36 | select GENERIC_STRNLEN_USER | ||
35 | help | 37 | help |
36 | The SuperH is a RISC processor targeted for use in embedded systems | 38 | The SuperH is a RISC processor targeted for use in embedded systems |
37 | and consumer electronics; it was also used in the Sega Dreamcast | 39 | and consumer electronics; it was also used in the Sega Dreamcast |
diff --git a/arch/sh/Makefile b/arch/sh/Makefile index 46edf070da1c..aed701c7b11b 100644 --- a/arch/sh/Makefile +++ b/arch/sh/Makefile | |||
@@ -9,6 +9,12 @@ | |||
9 | # License. See the file "COPYING" in the main directory of this archive | 9 | # License. See the file "COPYING" in the main directory of this archive |
10 | # for more details. | 10 | # for more details. |
11 | # | 11 | # |
12 | ifneq ($(SUBARCH),$(ARCH)) | ||
13 | ifeq ($(CROSS_COMPILE),) | ||
14 | CROSS_COMPILE := $(call cc-cross-prefix, $(UTS_MACHINE)-linux- $(UTS_MACHINE)-linux-gnu- $(UTS_MACHINE)-unknown-linux-gnu-) | ||
15 | endif | ||
16 | endif | ||
17 | |||
12 | isa-y := any | 18 | isa-y := any |
13 | isa-$(CONFIG_SH_DSP) := sh | 19 | isa-$(CONFIG_SH_DSP) := sh |
14 | isa-$(CONFIG_CPU_SH2) := sh2 | 20 | isa-$(CONFIG_CPU_SH2) := sh2 |
@@ -106,19 +112,13 @@ LDFLAGS_vmlinux += --defsym phys_stext=_stext-$(CONFIG_PAGE_OFFSET) \ | |||
106 | KBUILD_DEFCONFIG := cayman_defconfig | 112 | KBUILD_DEFCONFIG := cayman_defconfig |
107 | endif | 113 | endif |
108 | 114 | ||
109 | ifneq ($(SUBARCH),$(ARCH)) | ||
110 | ifeq ($(CROSS_COMPILE),) | ||
111 | CROSS_COMPILE := $(call cc-cross-prefix, $(UTS_MACHINE)-linux- $(UTS_MACHINE)-linux-gnu- $(UTS_MACHINE)-unknown-linux-gnu-) | ||
112 | endif | ||
113 | endif | ||
114 | |||
115 | ifdef CONFIG_CPU_LITTLE_ENDIAN | 115 | ifdef CONFIG_CPU_LITTLE_ENDIAN |
116 | ld-bfd := elf32-$(UTS_MACHINE)-linux | 116 | ld-bfd := elf32-$(UTS_MACHINE)-linux |
117 | LDFLAGS_vmlinux += --defsym 'jiffies=jiffies_64' --oformat $(ld-bfd) | 117 | LDFLAGS_vmlinux += --defsym jiffies=jiffies_64 --oformat $(ld-bfd) |
118 | LDFLAGS += -EL | 118 | LDFLAGS += -EL |
119 | else | 119 | else |
120 | ld-bfd := elf32-$(UTS_MACHINE)big-linux | 120 | ld-bfd := elf32-$(UTS_MACHINE)big-linux |
121 | LDFLAGS_vmlinux += --defsym 'jiffies=jiffies_64+4' --oformat $(ld-bfd) | 121 | LDFLAGS_vmlinux += --defsym jiffies=jiffies_64+4 --oformat $(ld-bfd) |
122 | LDFLAGS += -EB | 122 | LDFLAGS += -EB |
123 | endif | 123 | endif |
124 | 124 | ||
diff --git a/arch/sh/include/asm/Kbuild b/arch/sh/include/asm/Kbuild index 7beb42322f60..7b673ddcd555 100644 --- a/arch/sh/include/asm/Kbuild +++ b/arch/sh/include/asm/Kbuild | |||
@@ -1,5 +1,39 @@ | |||
1 | include include/asm-generic/Kbuild.asm | 1 | include include/asm-generic/Kbuild.asm |
2 | 2 | ||
3 | generic-y += bitsperlong.h | ||
4 | generic-y += cputime.h | ||
5 | generic-y += current.h | ||
6 | generic-y += delay.h | ||
7 | generic-y += div64.h | ||
8 | generic-y += emergency-restart.h | ||
9 | generic-y += errno.h | ||
10 | generic-y += fcntl.h | ||
11 | generic-y += ioctl.h | ||
12 | generic-y += ipcbuf.h | ||
13 | generic-y += irq_regs.h | ||
14 | generic-y += kvm_para.h | ||
15 | generic-y += local.h | ||
16 | generic-y += local64.h | ||
17 | generic-y += param.h | ||
18 | generic-y += parport.h | ||
19 | generic-y += percpu.h | ||
20 | generic-y += poll.h | ||
21 | generic-y += mman.h | ||
22 | generic-y += msgbuf.h | ||
23 | generic-y += resource.h | ||
24 | generic-y += scatterlist.h | ||
25 | generic-y += sembuf.h | ||
26 | generic-y += serial.h | ||
27 | generic-y += shmbuf.h | ||
28 | generic-y += siginfo.h | ||
29 | generic-y += sizes.h | ||
30 | generic-y += socket.h | ||
31 | generic-y += statfs.h | ||
32 | generic-y += termbits.h | ||
33 | generic-y += termios.h | ||
34 | generic-y += ucontext.h | ||
35 | generic-y += xor.h | ||
36 | |||
3 | header-y += cachectl.h | 37 | header-y += cachectl.h |
4 | header-y += cpu-features.h | 38 | header-y += cpu-features.h |
5 | header-y += hw_breakpoint.h | 39 | header-y += hw_breakpoint.h |
diff --git a/arch/sh/include/asm/bitsperlong.h b/arch/sh/include/asm/bitsperlong.h deleted file mode 100644 index 6dc0bb0c13b2..000000000000 --- a/arch/sh/include/asm/bitsperlong.h +++ /dev/null | |||
@@ -1 +0,0 @@ | |||
1 | #include <asm-generic/bitsperlong.h> | ||
diff --git a/arch/sh/include/asm/cputime.h b/arch/sh/include/asm/cputime.h deleted file mode 100644 index 6ca395d1393e..000000000000 --- a/arch/sh/include/asm/cputime.h +++ /dev/null | |||
@@ -1,6 +0,0 @@ | |||
1 | #ifndef __SH_CPUTIME_H | ||
2 | #define __SH_CPUTIME_H | ||
3 | |||
4 | #include <asm-generic/cputime.h> | ||
5 | |||
6 | #endif /* __SH_CPUTIME_H */ | ||
diff --git a/arch/sh/include/asm/current.h b/arch/sh/include/asm/current.h deleted file mode 100644 index 4c51401b5537..000000000000 --- a/arch/sh/include/asm/current.h +++ /dev/null | |||
@@ -1 +0,0 @@ | |||
1 | #include <asm-generic/current.h> | ||
diff --git a/arch/sh/include/asm/delay.h b/arch/sh/include/asm/delay.h deleted file mode 100644 index 9670e127b7b2..000000000000 --- a/arch/sh/include/asm/delay.h +++ /dev/null | |||
@@ -1 +0,0 @@ | |||
1 | #include <asm-generic/delay.h> | ||
diff --git a/arch/sh/include/asm/div64.h b/arch/sh/include/asm/div64.h deleted file mode 100644 index 6cd978cefb28..000000000000 --- a/arch/sh/include/asm/div64.h +++ /dev/null | |||
@@ -1 +0,0 @@ | |||
1 | #include <asm-generic/div64.h> | ||
diff --git a/arch/sh/include/asm/emergency-restart.h b/arch/sh/include/asm/emergency-restart.h deleted file mode 100644 index 108d8c48e42e..000000000000 --- a/arch/sh/include/asm/emergency-restart.h +++ /dev/null | |||
@@ -1,6 +0,0 @@ | |||
1 | #ifndef _ASM_EMERGENCY_RESTART_H | ||
2 | #define _ASM_EMERGENCY_RESTART_H | ||
3 | |||
4 | #include <asm-generic/emergency-restart.h> | ||
5 | |||
6 | #endif /* _ASM_EMERGENCY_RESTART_H */ | ||
diff --git a/arch/sh/include/asm/errno.h b/arch/sh/include/asm/errno.h deleted file mode 100644 index 51cf6f9cebb8..000000000000 --- a/arch/sh/include/asm/errno.h +++ /dev/null | |||
@@ -1,6 +0,0 @@ | |||
1 | #ifndef __ASM_SH_ERRNO_H | ||
2 | #define __ASM_SH_ERRNO_H | ||
3 | |||
4 | #include <asm-generic/errno.h> | ||
5 | |||
6 | #endif /* __ASM_SH_ERRNO_H */ | ||
diff --git a/arch/sh/include/asm/fcntl.h b/arch/sh/include/asm/fcntl.h deleted file mode 100644 index 46ab12db5739..000000000000 --- a/arch/sh/include/asm/fcntl.h +++ /dev/null | |||
@@ -1 +0,0 @@ | |||
1 | #include <asm-generic/fcntl.h> | ||
diff --git a/arch/sh/include/asm/ioctl.h b/arch/sh/include/asm/ioctl.h deleted file mode 100644 index b279fe06dfe5..000000000000 --- a/arch/sh/include/asm/ioctl.h +++ /dev/null | |||
@@ -1 +0,0 @@ | |||
1 | #include <asm-generic/ioctl.h> | ||
diff --git a/arch/sh/include/asm/ipcbuf.h b/arch/sh/include/asm/ipcbuf.h deleted file mode 100644 index 84c7e51cb6d0..000000000000 --- a/arch/sh/include/asm/ipcbuf.h +++ /dev/null | |||
@@ -1 +0,0 @@ | |||
1 | #include <asm-generic/ipcbuf.h> | ||
diff --git a/arch/sh/include/asm/irq_regs.h b/arch/sh/include/asm/irq_regs.h deleted file mode 100644 index 3dd9c0b70270..000000000000 --- a/arch/sh/include/asm/irq_regs.h +++ /dev/null | |||
@@ -1 +0,0 @@ | |||
1 | #include <asm-generic/irq_regs.h> | ||
diff --git a/arch/sh/include/asm/kvm_para.h b/arch/sh/include/asm/kvm_para.h deleted file mode 100644 index 14fab8f0b957..000000000000 --- a/arch/sh/include/asm/kvm_para.h +++ /dev/null | |||
@@ -1 +0,0 @@ | |||
1 | #include <asm-generic/kvm_para.h> | ||
diff --git a/arch/sh/include/asm/local.h b/arch/sh/include/asm/local.h deleted file mode 100644 index 9ed9b9cb459a..000000000000 --- a/arch/sh/include/asm/local.h +++ /dev/null | |||
@@ -1,7 +0,0 @@ | |||
1 | #ifndef __ASM_SH_LOCAL_H | ||
2 | #define __ASM_SH_LOCAL_H | ||
3 | |||
4 | #include <asm-generic/local.h> | ||
5 | |||
6 | #endif /* __ASM_SH_LOCAL_H */ | ||
7 | |||
diff --git a/arch/sh/include/asm/local64.h b/arch/sh/include/asm/local64.h deleted file mode 100644 index 36c93b5cc239..000000000000 --- a/arch/sh/include/asm/local64.h +++ /dev/null | |||
@@ -1 +0,0 @@ | |||
1 | #include <asm-generic/local64.h> | ||
diff --git a/arch/sh/include/asm/mman.h b/arch/sh/include/asm/mman.h deleted file mode 100644 index 8eebf89f5ab1..000000000000 --- a/arch/sh/include/asm/mman.h +++ /dev/null | |||
@@ -1 +0,0 @@ | |||
1 | #include <asm-generic/mman.h> | ||
diff --git a/arch/sh/include/asm/msgbuf.h b/arch/sh/include/asm/msgbuf.h deleted file mode 100644 index 809134c644a6..000000000000 --- a/arch/sh/include/asm/msgbuf.h +++ /dev/null | |||
@@ -1 +0,0 @@ | |||
1 | #include <asm-generic/msgbuf.h> | ||
diff --git a/arch/sh/include/asm/param.h b/arch/sh/include/asm/param.h deleted file mode 100644 index 965d45427975..000000000000 --- a/arch/sh/include/asm/param.h +++ /dev/null | |||
@@ -1 +0,0 @@ | |||
1 | #include <asm-generic/param.h> | ||
diff --git a/arch/sh/include/asm/parport.h b/arch/sh/include/asm/parport.h deleted file mode 100644 index cf252af64590..000000000000 --- a/arch/sh/include/asm/parport.h +++ /dev/null | |||
@@ -1 +0,0 @@ | |||
1 | #include <asm-generic/parport.h> | ||
diff --git a/arch/sh/include/asm/percpu.h b/arch/sh/include/asm/percpu.h deleted file mode 100644 index 4db4b39a4399..000000000000 --- a/arch/sh/include/asm/percpu.h +++ /dev/null | |||
@@ -1,6 +0,0 @@ | |||
1 | #ifndef __ARCH_SH_PERCPU | ||
2 | #define __ARCH_SH_PERCPU | ||
3 | |||
4 | #include <asm-generic/percpu.h> | ||
5 | |||
6 | #endif /* __ARCH_SH_PERCPU */ | ||
diff --git a/arch/sh/include/asm/poll.h b/arch/sh/include/asm/poll.h deleted file mode 100644 index c98509d3149e..000000000000 --- a/arch/sh/include/asm/poll.h +++ /dev/null | |||
@@ -1 +0,0 @@ | |||
1 | #include <asm-generic/poll.h> | ||
diff --git a/arch/sh/include/asm/resource.h b/arch/sh/include/asm/resource.h deleted file mode 100644 index 9c2499a86ec0..000000000000 --- a/arch/sh/include/asm/resource.h +++ /dev/null | |||
@@ -1,6 +0,0 @@ | |||
1 | #ifndef __ASM_SH_RESOURCE_H | ||
2 | #define __ASM_SH_RESOURCE_H | ||
3 | |||
4 | #include <asm-generic/resource.h> | ||
5 | |||
6 | #endif /* __ASM_SH_RESOURCE_H */ | ||
diff --git a/arch/sh/include/asm/scatterlist.h b/arch/sh/include/asm/scatterlist.h deleted file mode 100644 index 98dfc3510f10..000000000000 --- a/arch/sh/include/asm/scatterlist.h +++ /dev/null | |||
@@ -1,6 +0,0 @@ | |||
1 | #ifndef __ASM_SH_SCATTERLIST_H | ||
2 | #define __ASM_SH_SCATTERLIST_H | ||
3 | |||
4 | #include <asm-generic/scatterlist.h> | ||
5 | |||
6 | #endif /* __ASM_SH_SCATTERLIST_H */ | ||
diff --git a/arch/sh/include/asm/sembuf.h b/arch/sh/include/asm/sembuf.h deleted file mode 100644 index 7673b83cfef7..000000000000 --- a/arch/sh/include/asm/sembuf.h +++ /dev/null | |||
@@ -1 +0,0 @@ | |||
1 | #include <asm-generic/sembuf.h> | ||
diff --git a/arch/sh/include/asm/serial.h b/arch/sh/include/asm/serial.h deleted file mode 100644 index a0cb0caff152..000000000000 --- a/arch/sh/include/asm/serial.h +++ /dev/null | |||
@@ -1 +0,0 @@ | |||
1 | #include <asm-generic/serial.h> | ||
diff --git a/arch/sh/include/asm/shmbuf.h b/arch/sh/include/asm/shmbuf.h deleted file mode 100644 index 83c05fc2de38..000000000000 --- a/arch/sh/include/asm/shmbuf.h +++ /dev/null | |||
@@ -1 +0,0 @@ | |||
1 | #include <asm-generic/shmbuf.h> | ||
diff --git a/arch/sh/include/asm/siginfo.h b/arch/sh/include/asm/siginfo.h deleted file mode 100644 index 813040ed68a9..000000000000 --- a/arch/sh/include/asm/siginfo.h +++ /dev/null | |||
@@ -1,6 +0,0 @@ | |||
1 | #ifndef __ASM_SH_SIGINFO_H | ||
2 | #define __ASM_SH_SIGINFO_H | ||
3 | |||
4 | #include <asm-generic/siginfo.h> | ||
5 | |||
6 | #endif /* __ASM_SH_SIGINFO_H */ | ||
diff --git a/arch/sh/include/asm/sizes.h b/arch/sh/include/asm/sizes.h deleted file mode 100644 index dd248c2e1085..000000000000 --- a/arch/sh/include/asm/sizes.h +++ /dev/null | |||
@@ -1 +0,0 @@ | |||
1 | #include <asm-generic/sizes.h> | ||
diff --git a/arch/sh/include/asm/socket.h b/arch/sh/include/asm/socket.h deleted file mode 100644 index 6b71384b9d8b..000000000000 --- a/arch/sh/include/asm/socket.h +++ /dev/null | |||
@@ -1 +0,0 @@ | |||
1 | #include <asm-generic/socket.h> | ||
diff --git a/arch/sh/include/asm/statfs.h b/arch/sh/include/asm/statfs.h deleted file mode 100644 index 9202a023328f..000000000000 --- a/arch/sh/include/asm/statfs.h +++ /dev/null | |||
@@ -1,6 +0,0 @@ | |||
1 | #ifndef __ASM_SH_STATFS_H | ||
2 | #define __ASM_SH_STATFS_H | ||
3 | |||
4 | #include <asm-generic/statfs.h> | ||
5 | |||
6 | #endif /* __ASM_SH_STATFS_H */ | ||
diff --git a/arch/sh/include/asm/termbits.h b/arch/sh/include/asm/termbits.h deleted file mode 100644 index 3935b106de79..000000000000 --- a/arch/sh/include/asm/termbits.h +++ /dev/null | |||
@@ -1 +0,0 @@ | |||
1 | #include <asm-generic/termbits.h> | ||
diff --git a/arch/sh/include/asm/termios.h b/arch/sh/include/asm/termios.h deleted file mode 100644 index 280d78a9d966..000000000000 --- a/arch/sh/include/asm/termios.h +++ /dev/null | |||
@@ -1 +0,0 @@ | |||
1 | #include <asm-generic/termios.h> | ||
diff --git a/arch/sh/include/asm/uaccess.h b/arch/sh/include/asm/uaccess.h index 050f221fa898..8698a80ed00c 100644 --- a/arch/sh/include/asm/uaccess.h +++ b/arch/sh/include/asm/uaccess.h | |||
@@ -25,6 +25,8 @@ | |||
25 | (__chk_user_ptr(addr), \ | 25 | (__chk_user_ptr(addr), \ |
26 | __access_ok((unsigned long __force)(addr), (size))) | 26 | __access_ok((unsigned long __force)(addr), (size))) |
27 | 27 | ||
28 | #define user_addr_max() (current_thread_info()->addr_limit.seg) | ||
29 | |||
28 | /* | 30 | /* |
29 | * Uh, these should become the main single-value transfer routines ... | 31 | * Uh, these should become the main single-value transfer routines ... |
30 | * They automatically use the right size if we just have the right | 32 | * They automatically use the right size if we just have the right |
@@ -100,6 +102,11 @@ struct __large_struct { unsigned long buf[100]; }; | |||
100 | # include "uaccess_64.h" | 102 | # include "uaccess_64.h" |
101 | #endif | 103 | #endif |
102 | 104 | ||
105 | extern long strncpy_from_user(char *dest, const char __user *src, long count); | ||
106 | |||
107 | extern __must_check long strlen_user(const char __user *str); | ||
108 | extern __must_check long strnlen_user(const char __user *str, long n); | ||
109 | |||
103 | /* Generic arbitrary sized copy. */ | 110 | /* Generic arbitrary sized copy. */ |
104 | /* Return the number of bytes NOT copied */ | 111 | /* Return the number of bytes NOT copied */ |
105 | __kernel_size_t __copy_user(void *to, const void *from, __kernel_size_t n); | 112 | __kernel_size_t __copy_user(void *to, const void *from, __kernel_size_t n); |
@@ -137,37 +144,6 @@ __kernel_size_t __clear_user(void *addr, __kernel_size_t size); | |||
137 | __cl_size; \ | 144 | __cl_size; \ |
138 | }) | 145 | }) |
139 | 146 | ||
140 | /** | ||
141 | * strncpy_from_user: - Copy a NUL terminated string from userspace. | ||
142 | * @dst: Destination address, in kernel space. This buffer must be at | ||
143 | * least @count bytes long. | ||
144 | * @src: Source address, in user space. | ||
145 | * @count: Maximum number of bytes to copy, including the trailing NUL. | ||
146 | * | ||
147 | * Copies a NUL-terminated string from userspace to kernel space. | ||
148 | * | ||
149 | * On success, returns the length of the string (not including the trailing | ||
150 | * NUL). | ||
151 | * | ||
152 | * If access to userspace fails, returns -EFAULT (some data may have been | ||
153 | * copied). | ||
154 | * | ||
155 | * If @count is smaller than the length of the string, copies @count bytes | ||
156 | * and returns @count. | ||
157 | */ | ||
158 | #define strncpy_from_user(dest,src,count) \ | ||
159 | ({ \ | ||
160 | unsigned long __sfu_src = (unsigned long)(src); \ | ||
161 | int __sfu_count = (int)(count); \ | ||
162 | long __sfu_res = -EFAULT; \ | ||
163 | \ | ||
164 | if (__access_ok(__sfu_src, __sfu_count)) \ | ||
165 | __sfu_res = __strncpy_from_user((unsigned long)(dest), \ | ||
166 | __sfu_src, __sfu_count); \ | ||
167 | \ | ||
168 | __sfu_res; \ | ||
169 | }) | ||
170 | |||
171 | static inline unsigned long | 147 | static inline unsigned long |
172 | copy_from_user(void *to, const void __user *from, unsigned long n) | 148 | copy_from_user(void *to, const void __user *from, unsigned long n) |
173 | { | 149 | { |
@@ -192,43 +168,6 @@ copy_to_user(void __user *to, const void *from, unsigned long n) | |||
192 | return __copy_size; | 168 | return __copy_size; |
193 | } | 169 | } |
194 | 170 | ||
195 | /** | ||
196 | * strnlen_user: - Get the size of a string in user space. | ||
197 | * @s: The string to measure. | ||
198 | * @n: The maximum valid length | ||
199 | * | ||
200 | * Context: User context only. This function may sleep. | ||
201 | * | ||
202 | * Get the size of a NUL-terminated string in user space. | ||
203 | * | ||
204 | * Returns the size of the string INCLUDING the terminating NUL. | ||
205 | * On exception, returns 0. | ||
206 | * If the string is too long, returns a value greater than @n. | ||
207 | */ | ||
208 | static inline long strnlen_user(const char __user *s, long n) | ||
209 | { | ||
210 | if (!__addr_ok(s)) | ||
211 | return 0; | ||
212 | else | ||
213 | return __strnlen_user(s, n); | ||
214 | } | ||
215 | |||
216 | /** | ||
217 | * strlen_user: - Get the size of a string in user space. | ||
218 | * @str: The string to measure. | ||
219 | * | ||
220 | * Context: User context only. This function may sleep. | ||
221 | * | ||
222 | * Get the size of a NUL-terminated string in user space. | ||
223 | * | ||
224 | * Returns the size of the string INCLUDING the terminating NUL. | ||
225 | * On exception, returns 0. | ||
226 | * | ||
227 | * If there is a limit on the length of a valid string, you may wish to | ||
228 | * consider using strnlen_user() instead. | ||
229 | */ | ||
230 | #define strlen_user(str) strnlen_user(str, ~0UL >> 1) | ||
231 | |||
232 | /* | 171 | /* |
233 | * The exception table consists of pairs of addresses: the first is the | 172 | * The exception table consists of pairs of addresses: the first is the |
234 | * address of an instruction that is allowed to fault, and the second is | 173 | * address of an instruction that is allowed to fault, and the second is |
diff --git a/arch/sh/include/asm/uaccess_32.h b/arch/sh/include/asm/uaccess_32.h index ae0d24f6653f..c0de7ee35ab7 100644 --- a/arch/sh/include/asm/uaccess_32.h +++ b/arch/sh/include/asm/uaccess_32.h | |||
@@ -170,79 +170,4 @@ __asm__ __volatile__( \ | |||
170 | 170 | ||
171 | extern void __put_user_unknown(void); | 171 | extern void __put_user_unknown(void); |
172 | 172 | ||
173 | static inline int | ||
174 | __strncpy_from_user(unsigned long __dest, unsigned long __user __src, int __count) | ||
175 | { | ||
176 | __kernel_size_t res; | ||
177 | unsigned long __dummy, _d, _s, _c; | ||
178 | |||
179 | __asm__ __volatile__( | ||
180 | "9:\n" | ||
181 | "mov.b @%2+, %1\n\t" | ||
182 | "cmp/eq #0, %1\n\t" | ||
183 | "bt/s 2f\n" | ||
184 | "1:\n" | ||
185 | "mov.b %1, @%3\n\t" | ||
186 | "dt %4\n\t" | ||
187 | "bf/s 9b\n\t" | ||
188 | " add #1, %3\n\t" | ||
189 | "2:\n\t" | ||
190 | "sub %4, %0\n" | ||
191 | "3:\n" | ||
192 | ".section .fixup,\"ax\"\n" | ||
193 | "4:\n\t" | ||
194 | "mov.l 5f, %1\n\t" | ||
195 | "jmp @%1\n\t" | ||
196 | " mov %9, %0\n\t" | ||
197 | ".balign 4\n" | ||
198 | "5: .long 3b\n" | ||
199 | ".previous\n" | ||
200 | ".section __ex_table,\"a\"\n" | ||
201 | " .balign 4\n" | ||
202 | " .long 9b,4b\n" | ||
203 | ".previous" | ||
204 | : "=r" (res), "=&z" (__dummy), "=r" (_s), "=r" (_d), "=r"(_c) | ||
205 | : "0" (__count), "2" (__src), "3" (__dest), "4" (__count), | ||
206 | "i" (-EFAULT) | ||
207 | : "memory", "t"); | ||
208 | |||
209 | return res; | ||
210 | } | ||
211 | |||
212 | /* | ||
213 | * Return the size of a string (including the ending 0 even when we have | ||
214 | * exceeded the maximum string length). | ||
215 | */ | ||
216 | static inline long __strnlen_user(const char __user *__s, long __n) | ||
217 | { | ||
218 | unsigned long res; | ||
219 | unsigned long __dummy; | ||
220 | |||
221 | __asm__ __volatile__( | ||
222 | "1:\t" | ||
223 | "mov.b @(%0,%3), %1\n\t" | ||
224 | "cmp/eq %4, %0\n\t" | ||
225 | "bt/s 2f\n\t" | ||
226 | " add #1, %0\n\t" | ||
227 | "tst %1, %1\n\t" | ||
228 | "bf 1b\n\t" | ||
229 | "2:\n" | ||
230 | ".section .fixup,\"ax\"\n" | ||
231 | "3:\n\t" | ||
232 | "mov.l 4f, %1\n\t" | ||
233 | "jmp @%1\n\t" | ||
234 | " mov #0, %0\n" | ||
235 | ".balign 4\n" | ||
236 | "4: .long 2b\n" | ||
237 | ".previous\n" | ||
238 | ".section __ex_table,\"a\"\n" | ||
239 | " .balign 4\n" | ||
240 | " .long 1b,3b\n" | ||
241 | ".previous" | ||
242 | : "=z" (res), "=&r" (__dummy) | ||
243 | : "0" (0), "r" (__s), "r" (__n) | ||
244 | : "t"); | ||
245 | return res; | ||
246 | } | ||
247 | |||
248 | #endif /* __ASM_SH_UACCESS_32_H */ | 173 | #endif /* __ASM_SH_UACCESS_32_H */ |
diff --git a/arch/sh/include/asm/uaccess_64.h b/arch/sh/include/asm/uaccess_64.h index 56fd20b8cdcc..2e07e0f40c6a 100644 --- a/arch/sh/include/asm/uaccess_64.h +++ b/arch/sh/include/asm/uaccess_64.h | |||
@@ -84,8 +84,4 @@ extern long __put_user_asm_l(void *, long); | |||
84 | extern long __put_user_asm_q(void *, long); | 84 | extern long __put_user_asm_q(void *, long); |
85 | extern void __put_user_unknown(void); | 85 | extern void __put_user_unknown(void); |
86 | 86 | ||
87 | extern long __strnlen_user(const char *__s, long __n); | ||
88 | extern int __strncpy_from_user(unsigned long __dest, | ||
89 | unsigned long __user __src, int __count); | ||
90 | |||
91 | #endif /* __ASM_SH_UACCESS_64_H */ | 87 | #endif /* __ASM_SH_UACCESS_64_H */ |
diff --git a/arch/sh/include/asm/ucontext.h b/arch/sh/include/asm/ucontext.h deleted file mode 100644 index 9bc07b9f30fb..000000000000 --- a/arch/sh/include/asm/ucontext.h +++ /dev/null | |||
@@ -1 +0,0 @@ | |||
1 | #include <asm-generic/ucontext.h> | ||
diff --git a/arch/sh/include/asm/word-at-a-time.h b/arch/sh/include/asm/word-at-a-time.h new file mode 100644 index 000000000000..6e38953ff7fd --- /dev/null +++ b/arch/sh/include/asm/word-at-a-time.h | |||
@@ -0,0 +1,53 @@ | |||
1 | #ifndef __ASM_SH_WORD_AT_A_TIME_H | ||
2 | #define __ASM_SH_WORD_AT_A_TIME_H | ||
3 | |||
4 | #ifdef CONFIG_CPU_BIG_ENDIAN | ||
5 | # include <asm-generic/word-at-a-time.h> | ||
6 | #else | ||
7 | /* | ||
8 | * Little-endian version cribbed from x86. | ||
9 | */ | ||
10 | struct word_at_a_time { | ||
11 | const unsigned long one_bits, high_bits; | ||
12 | }; | ||
13 | |||
14 | #define WORD_AT_A_TIME_CONSTANTS { REPEAT_BYTE(0x01), REPEAT_BYTE(0x80) } | ||
15 | |||
16 | /* Carl Chatfield / Jan Achrenius G+ version for 32-bit */ | ||
17 | static inline long count_masked_bytes(long mask) | ||
18 | { | ||
19 | /* (000000 0000ff 00ffff ffffff) -> ( 1 1 2 3 ) */ | ||
20 | long a = (0x0ff0001+mask) >> 23; | ||
21 | /* Fix the 1 for 00 case */ | ||
22 | return a & mask; | ||
23 | } | ||
24 | |||
25 | /* Return nonzero if it has a zero */ | ||
26 | static inline unsigned long has_zero(unsigned long a, unsigned long *bits, const struct word_at_a_time *c) | ||
27 | { | ||
28 | unsigned long mask = ((a - c->one_bits) & ~a) & c->high_bits; | ||
29 | *bits = mask; | ||
30 | return mask; | ||
31 | } | ||
32 | |||
33 | static inline unsigned long prep_zero_mask(unsigned long a, unsigned long bits, const struct word_at_a_time *c) | ||
34 | { | ||
35 | return bits; | ||
36 | } | ||
37 | |||
38 | static inline unsigned long create_zero_mask(unsigned long bits) | ||
39 | { | ||
40 | bits = (bits - 1) & ~bits; | ||
41 | return bits >> 7; | ||
42 | } | ||
43 | |||
44 | /* The mask we created is directly usable as a bytemask */ | ||
45 | #define zero_bytemask(mask) (mask) | ||
46 | |||
47 | static inline unsigned long find_zero(unsigned long mask) | ||
48 | { | ||
49 | return count_masked_bytes(mask); | ||
50 | } | ||
51 | #endif | ||
52 | |||
53 | #endif | ||
diff --git a/arch/sh/include/asm/xor.h b/arch/sh/include/asm/xor.h deleted file mode 100644 index c82eb12a5b18..000000000000 --- a/arch/sh/include/asm/xor.h +++ /dev/null | |||
@@ -1 +0,0 @@ | |||
1 | #include <asm-generic/xor.h> | ||
diff --git a/arch/sh/include/cpu-sh2a/cpu/ubc.h b/arch/sh/include/cpu-sh2a/cpu/ubc.h deleted file mode 100644 index 1192e1c761a7..000000000000 --- a/arch/sh/include/cpu-sh2a/cpu/ubc.h +++ /dev/null | |||
@@ -1,28 +0,0 @@ | |||
1 | /* | ||
2 | * SH-2A UBC definitions | ||
3 | * | ||
4 | * Copyright (C) 2008 Kieran Bingham | ||
5 | * | ||
6 | * This file is subject to the terms and conditions of the GNU General Public | ||
7 | * License. See the file "COPYING" in the main directory of this archive | ||
8 | * for more details. | ||
9 | */ | ||
10 | |||
11 | #ifndef __ASM_CPU_SH2A_UBC_H | ||
12 | #define __ASM_CPU_SH2A_UBC_H | ||
13 | |||
14 | #define UBC_BARA 0xfffc0400 | ||
15 | #define UBC_BAMRA 0xfffc0404 | ||
16 | #define UBC_BBRA 0xfffc04a0 /* 16 bit access */ | ||
17 | #define UBC_BDRA 0xfffc0408 | ||
18 | #define UBC_BDMRA 0xfffc040c | ||
19 | |||
20 | #define UBC_BARB 0xfffc0410 | ||
21 | #define UBC_BAMRB 0xfffc0414 | ||
22 | #define UBC_BBRB 0xfffc04b0 /* 16 bit access */ | ||
23 | #define UBC_BDRB 0xfffc0418 | ||
24 | #define UBC_BDMRB 0xfffc041c | ||
25 | |||
26 | #define UBC_BRCR 0xfffc04c0 | ||
27 | |||
28 | #endif /* __ASM_CPU_SH2A_UBC_H */ | ||
diff --git a/arch/sh/kernel/cpu/sh5/entry.S b/arch/sh/kernel/cpu/sh5/entry.S index ff1f0e6e9bec..b7cf6a547f11 100644 --- a/arch/sh/kernel/cpu/sh5/entry.S +++ b/arch/sh/kernel/cpu/sh5/entry.S | |||
@@ -1569,86 +1569,6 @@ ___clear_user_exit: | |||
1569 | #endif /* CONFIG_MMU */ | 1569 | #endif /* CONFIG_MMU */ |
1570 | 1570 | ||
1571 | /* | 1571 | /* |
1572 | * int __strncpy_from_user(unsigned long __dest, unsigned long __src, | ||
1573 | * int __count) | ||
1574 | * | ||
1575 | * Inputs: | ||
1576 | * (r2) target address | ||
1577 | * (r3) source address | ||
1578 | * (r4) maximum size in bytes | ||
1579 | * | ||
1580 | * Ouputs: | ||
1581 | * (*r2) copied data | ||
1582 | * (r2) -EFAULT (in case of faulting) | ||
1583 | * copied data (otherwise) | ||
1584 | */ | ||
1585 | .global __strncpy_from_user | ||
1586 | __strncpy_from_user: | ||
1587 | pta ___strncpy_from_user1, tr0 | ||
1588 | pta ___strncpy_from_user_done, tr1 | ||
1589 | or r4, ZERO, r5 /* r5 = original count */ | ||
1590 | beq/u r4, r63, tr1 /* early exit if r4==0 */ | ||
1591 | movi -(EFAULT), r6 /* r6 = reply, no real fixup */ | ||
1592 | or ZERO, ZERO, r7 /* r7 = data, clear top byte of data */ | ||
1593 | |||
1594 | ___strncpy_from_user1: | ||
1595 | ld.b r3, 0, r7 /* Fault address: only in reading */ | ||
1596 | st.b r2, 0, r7 | ||
1597 | addi r2, 1, r2 | ||
1598 | addi r3, 1, r3 | ||
1599 | beq/u ZERO, r7, tr1 | ||
1600 | addi r4, -1, r4 /* return real number of copied bytes */ | ||
1601 | bne/l ZERO, r4, tr0 | ||
1602 | |||
1603 | ___strncpy_from_user_done: | ||
1604 | sub r5, r4, r6 /* If done, return copied */ | ||
1605 | |||
1606 | ___strncpy_from_user_exit: | ||
1607 | or r6, ZERO, r2 | ||
1608 | ptabs LINK, tr0 | ||
1609 | blink tr0, ZERO | ||
1610 | |||
1611 | /* | ||
1612 | * extern long __strnlen_user(const char *__s, long __n) | ||
1613 | * | ||
1614 | * Inputs: | ||
1615 | * (r2) source address | ||
1616 | * (r3) source size in bytes | ||
1617 | * | ||
1618 | * Ouputs: | ||
1619 | * (r2) -EFAULT (in case of faulting) | ||
1620 | * string length (otherwise) | ||
1621 | */ | ||
1622 | .global __strnlen_user | ||
1623 | __strnlen_user: | ||
1624 | pta ___strnlen_user_set_reply, tr0 | ||
1625 | pta ___strnlen_user1, tr1 | ||
1626 | or ZERO, ZERO, r5 /* r5 = counter */ | ||
1627 | movi -(EFAULT), r6 /* r6 = reply, no real fixup */ | ||
1628 | or ZERO, ZERO, r7 /* r7 = data, clear top byte of data */ | ||
1629 | beq r3, ZERO, tr0 | ||
1630 | |||
1631 | ___strnlen_user1: | ||
1632 | ldx.b r2, r5, r7 /* Fault address: only in reading */ | ||
1633 | addi r3, -1, r3 /* No real fixup */ | ||
1634 | addi r5, 1, r5 | ||
1635 | beq r3, ZERO, tr0 | ||
1636 | bne r7, ZERO, tr1 | ||
1637 | ! The line below used to be active. This meant led to a junk byte lying between each pair | ||
1638 | ! of entries in the argv & envp structures in memory. Whilst the program saw the right data | ||
1639 | ! via the argv and envp arguments to main, it meant the 'flat' representation visible through | ||
1640 | ! /proc/$pid/cmdline was corrupt, causing trouble with ps, for example. | ||
1641 | ! addi r5, 1, r5 /* Include '\0' */ | ||
1642 | |||
1643 | ___strnlen_user_set_reply: | ||
1644 | or r5, ZERO, r6 /* If done, return counter */ | ||
1645 | |||
1646 | ___strnlen_user_exit: | ||
1647 | or r6, ZERO, r2 | ||
1648 | ptabs LINK, tr0 | ||
1649 | blink tr0, ZERO | ||
1650 | |||
1651 | /* | ||
1652 | * extern long __get_user_asm_?(void *val, long addr) | 1572 | * extern long __get_user_asm_?(void *val, long addr) |
1653 | * | 1573 | * |
1654 | * Inputs: | 1574 | * Inputs: |
@@ -1982,8 +1902,6 @@ asm_uaccess_start: | |||
1982 | .long ___copy_user2, ___copy_user_exit | 1902 | .long ___copy_user2, ___copy_user_exit |
1983 | .long ___clear_user1, ___clear_user_exit | 1903 | .long ___clear_user1, ___clear_user_exit |
1984 | #endif | 1904 | #endif |
1985 | .long ___strncpy_from_user1, ___strncpy_from_user_exit | ||
1986 | .long ___strnlen_user1, ___strnlen_user_exit | ||
1987 | .long ___get_user_asm_b1, ___get_user_asm_b_exit | 1905 | .long ___get_user_asm_b1, ___get_user_asm_b_exit |
1988 | .long ___get_user_asm_w1, ___get_user_asm_w_exit | 1906 | .long ___get_user_asm_w1, ___get_user_asm_w_exit |
1989 | .long ___get_user_asm_l1, ___get_user_asm_l_exit | 1907 | .long ___get_user_asm_l1, ___get_user_asm_l_exit |
diff --git a/arch/sh/kernel/process.c b/arch/sh/kernel/process.c index 9b7a459a4613..055d91b70305 100644 --- a/arch/sh/kernel/process.c +++ b/arch/sh/kernel/process.c | |||
@@ -4,6 +4,7 @@ | |||
4 | #include <linux/sched.h> | 4 | #include <linux/sched.h> |
5 | #include <linux/export.h> | 5 | #include <linux/export.h> |
6 | #include <linux/stackprotector.h> | 6 | #include <linux/stackprotector.h> |
7 | #include <asm/fpu.h> | ||
7 | 8 | ||
8 | struct kmem_cache *task_xstate_cachep = NULL; | 9 | struct kmem_cache *task_xstate_cachep = NULL; |
9 | unsigned int xstate_size; | 10 | unsigned int xstate_size; |
diff --git a/arch/sh/kernel/process_64.c b/arch/sh/kernel/process_64.c index 4264583eabac..602545b12a86 100644 --- a/arch/sh/kernel/process_64.c +++ b/arch/sh/kernel/process_64.c | |||
@@ -33,6 +33,7 @@ | |||
33 | #include <asm/switch_to.h> | 33 | #include <asm/switch_to.h> |
34 | 34 | ||
35 | struct task_struct *last_task_used_math = NULL; | 35 | struct task_struct *last_task_used_math = NULL; |
36 | struct pt_regs fake_swapper_regs = { 0, }; | ||
36 | 37 | ||
37 | void show_regs(struct pt_regs *regs) | 38 | void show_regs(struct pt_regs *regs) |
38 | { | 39 | { |
diff --git a/arch/sh/kernel/sh_ksyms_64.c b/arch/sh/kernel/sh_ksyms_64.c index 45afa5c51f67..26a0774f5272 100644 --- a/arch/sh/kernel/sh_ksyms_64.c +++ b/arch/sh/kernel/sh_ksyms_64.c | |||
@@ -32,8 +32,6 @@ EXPORT_SYMBOL(__get_user_asm_b); | |||
32 | EXPORT_SYMBOL(__get_user_asm_w); | 32 | EXPORT_SYMBOL(__get_user_asm_w); |
33 | EXPORT_SYMBOL(__get_user_asm_l); | 33 | EXPORT_SYMBOL(__get_user_asm_l); |
34 | EXPORT_SYMBOL(__get_user_asm_q); | 34 | EXPORT_SYMBOL(__get_user_asm_q); |
35 | EXPORT_SYMBOL(__strnlen_user); | ||
36 | EXPORT_SYMBOL(__strncpy_from_user); | ||
37 | EXPORT_SYMBOL(__clear_user); | 35 | EXPORT_SYMBOL(__clear_user); |
38 | EXPORT_SYMBOL(copy_page); | 36 | EXPORT_SYMBOL(copy_page); |
39 | EXPORT_SYMBOL(__copy_user); | 37 | EXPORT_SYMBOL(__copy_user); |
diff --git a/arch/sparc/include/asm/cmt.h b/arch/sparc/include/asm/cmt.h deleted file mode 100644 index 870db5928577..000000000000 --- a/arch/sparc/include/asm/cmt.h +++ /dev/null | |||
@@ -1,59 +0,0 @@ | |||
1 | #ifndef _SPARC64_CMT_H | ||
2 | #define _SPARC64_CMT_H | ||
3 | |||
4 | /* cmt.h: Chip Multi-Threading register definitions | ||
5 | * | ||
6 | * Copyright (C) 2004 David S. Miller (davem@redhat.com) | ||
7 | */ | ||
8 | |||
9 | /* ASI_CORE_ID - private */ | ||
10 | #define LP_ID 0x0000000000000010UL | ||
11 | #define LP_ID_MAX 0x00000000003f0000UL | ||
12 | #define LP_ID_ID 0x000000000000003fUL | ||
13 | |||
14 | /* ASI_INTR_ID - private */ | ||
15 | #define LP_INTR_ID 0x0000000000000000UL | ||
16 | #define LP_INTR_ID_ID 0x00000000000003ffUL | ||
17 | |||
18 | /* ASI_CESR_ID - private */ | ||
19 | #define CESR_ID 0x0000000000000040UL | ||
20 | #define CESR_ID_ID 0x00000000000000ffUL | ||
21 | |||
22 | /* ASI_CORE_AVAILABLE - shared */ | ||
23 | #define LP_AVAIL 0x0000000000000000UL | ||
24 | #define LP_AVAIL_1 0x0000000000000002UL | ||
25 | #define LP_AVAIL_0 0x0000000000000001UL | ||
26 | |||
27 | /* ASI_CORE_ENABLE_STATUS - shared */ | ||
28 | #define LP_ENAB_STAT 0x0000000000000010UL | ||
29 | #define LP_ENAB_STAT_1 0x0000000000000002UL | ||
30 | #define LP_ENAB_STAT_0 0x0000000000000001UL | ||
31 | |||
32 | /* ASI_CORE_ENABLE - shared */ | ||
33 | #define LP_ENAB 0x0000000000000020UL | ||
34 | #define LP_ENAB_1 0x0000000000000002UL | ||
35 | #define LP_ENAB_0 0x0000000000000001UL | ||
36 | |||
37 | /* ASI_CORE_RUNNING - shared */ | ||
38 | #define LP_RUNNING_RW 0x0000000000000050UL | ||
39 | #define LP_RUNNING_W1S 0x0000000000000060UL | ||
40 | #define LP_RUNNING_W1C 0x0000000000000068UL | ||
41 | #define LP_RUNNING_1 0x0000000000000002UL | ||
42 | #define LP_RUNNING_0 0x0000000000000001UL | ||
43 | |||
44 | /* ASI_CORE_RUNNING_STAT - shared */ | ||
45 | #define LP_RUN_STAT 0x0000000000000058UL | ||
46 | #define LP_RUN_STAT_1 0x0000000000000002UL | ||
47 | #define LP_RUN_STAT_0 0x0000000000000001UL | ||
48 | |||
49 | /* ASI_XIR_STEERING - shared */ | ||
50 | #define LP_XIR_STEER 0x0000000000000030UL | ||
51 | #define LP_XIR_STEER_1 0x0000000000000002UL | ||
52 | #define LP_XIR_STEER_0 0x0000000000000001UL | ||
53 | |||
54 | /* ASI_CMT_ERROR_STEERING - shared */ | ||
55 | #define CMT_ER_STEER 0x0000000000000040UL | ||
56 | #define CMT_ER_STEER_1 0x0000000000000002UL | ||
57 | #define CMT_ER_STEER_0 0x0000000000000001UL | ||
58 | |||
59 | #endif /* _SPARC64_CMT_H */ | ||
diff --git a/arch/sparc/include/asm/mpmbox.h b/arch/sparc/include/asm/mpmbox.h deleted file mode 100644 index f8423039b242..000000000000 --- a/arch/sparc/include/asm/mpmbox.h +++ /dev/null | |||
@@ -1,67 +0,0 @@ | |||
1 | /* | ||
2 | * mpmbox.h: Interface and defines for the OpenProm mailbox | ||
3 | * facilities for MP machines under Linux. | ||
4 | * | ||
5 | * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu) | ||
6 | */ | ||
7 | |||
8 | #ifndef _SPARC_MPMBOX_H | ||
9 | #define _SPARC_MPMBOX_H | ||
10 | |||
11 | /* The prom allocates, for each CPU on the machine an unsigned | ||
12 | * byte in physical ram. You probe the device tree prom nodes | ||
13 | * for these values. The purpose of this byte is to be able to | ||
14 | * pass messages from one cpu to another. | ||
15 | */ | ||
16 | |||
17 | /* These are the main message types we have to look for in our | ||
18 | * Cpu mailboxes, based upon these values we decide what course | ||
19 | * of action to take. | ||
20 | */ | ||
21 | |||
22 | /* The CPU is executing code in the kernel. */ | ||
23 | #define MAILBOX_ISRUNNING 0xf0 | ||
24 | |||
25 | /* Another CPU called romvec->pv_exit(), you should call | ||
26 | * prom_stopcpu() when you see this in your mailbox. | ||
27 | */ | ||
28 | #define MAILBOX_EXIT 0xfb | ||
29 | |||
30 | /* Another CPU called romvec->pv_enter(), you should call | ||
31 | * prom_cpuidle() when this is seen. | ||
32 | */ | ||
33 | #define MAILBOX_GOSPIN 0xfc | ||
34 | |||
35 | /* Another CPU has hit a breakpoint either into kadb or the prom | ||
36 | * itself. Just like MAILBOX_GOSPIN, you should call prom_cpuidle() | ||
37 | * at this point. | ||
38 | */ | ||
39 | #define MAILBOX_BPT_SPIN 0xfd | ||
40 | |||
41 | /* Oh geese, some other nitwit got a damn watchdog reset. The party's | ||
42 | * over so go call prom_stopcpu(). | ||
43 | */ | ||
44 | #define MAILBOX_WDOG_STOP 0xfe | ||
45 | |||
46 | #ifndef __ASSEMBLY__ | ||
47 | |||
48 | /* Handy macro's to determine a cpu's state. */ | ||
49 | |||
50 | /* Is the cpu still in Power On Self Test? */ | ||
51 | #define MBOX_POST_P(letter) ((letter) >= 0x00 && (letter) <= 0x7f) | ||
52 | |||
53 | /* Is the cpu at the 'ok' prompt of the PROM? */ | ||
54 | #define MBOX_PROMPROMPT_P(letter) ((letter) >= 0x80 && (letter) <= 0x8f) | ||
55 | |||
56 | /* Is the cpu spinning in the PROM? */ | ||
57 | #define MBOX_PROMSPIN_P(letter) ((letter) >= 0x90 && (letter) <= 0xef) | ||
58 | |||
59 | /* Sanity check... This is junk mail, throw it out. */ | ||
60 | #define MBOX_BOGON_P(letter) ((letter) >= 0xf1 && (letter) <= 0xfa) | ||
61 | |||
62 | /* Is the cpu actively running an application/kernel-code? */ | ||
63 | #define MBOX_RUNNING_P(letter) ((letter) == MAILBOX_ISRUNNING) | ||
64 | |||
65 | #endif /* !(__ASSEMBLY__) */ | ||
66 | |||
67 | #endif /* !(_SPARC_MPMBOX_H) */ | ||
diff --git a/arch/sparc/kernel/vio.c b/arch/sparc/kernel/vio.c index 5cffdc55f075..3e244f31e56b 100644 --- a/arch/sparc/kernel/vio.c +++ b/arch/sparc/kernel/vio.c | |||
@@ -443,7 +443,7 @@ static int __init vio_init(void) | |||
443 | root_vdev = vio_create_one(hp, root, NULL); | 443 | root_vdev = vio_create_one(hp, root, NULL); |
444 | err = -ENODEV; | 444 | err = -ENODEV; |
445 | if (!root_vdev) { | 445 | if (!root_vdev) { |
446 | printk(KERN_ERR "VIO: Coult not create root device.\n"); | 446 | printk(KERN_ERR "VIO: Could not create root device.\n"); |
447 | goto out_release; | 447 | goto out_release; |
448 | } | 448 | } |
449 | 449 | ||
diff --git a/arch/tile/include/asm/thread_info.h b/arch/tile/include/asm/thread_info.h index 7e1fef36bde6..e9c670d7a7fe 100644 --- a/arch/tile/include/asm/thread_info.h +++ b/arch/tile/include/asm/thread_info.h | |||
@@ -91,11 +91,6 @@ extern void smp_nap(void); | |||
91 | /* Enable interrupts racelessly and nap forever: helper for cpu_idle(). */ | 91 | /* Enable interrupts racelessly and nap forever: helper for cpu_idle(). */ |
92 | extern void _cpu_idle(void); | 92 | extern void _cpu_idle(void); |
93 | 93 | ||
94 | /* Switch boot idle thread to a freshly-allocated stack and free old stack. */ | ||
95 | extern void cpu_idle_on_new_stack(struct thread_info *old_ti, | ||
96 | unsigned long new_sp, | ||
97 | unsigned long new_ss10); | ||
98 | |||
99 | #else /* __ASSEMBLY__ */ | 94 | #else /* __ASSEMBLY__ */ |
100 | 95 | ||
101 | /* | 96 | /* |
diff --git a/arch/tile/include/asm/uaccess.h b/arch/tile/include/asm/uaccess.h index c3dd275f25e2..9ab078a4605d 100644 --- a/arch/tile/include/asm/uaccess.h +++ b/arch/tile/include/asm/uaccess.h | |||
@@ -146,7 +146,7 @@ extern int fixup_exception(struct pt_regs *regs); | |||
146 | #ifdef __tilegx__ | 146 | #ifdef __tilegx__ |
147 | #define __get_user_1(x, ptr, ret) __get_user_asm(ld1u, x, ptr, ret) | 147 | #define __get_user_1(x, ptr, ret) __get_user_asm(ld1u, x, ptr, ret) |
148 | #define __get_user_2(x, ptr, ret) __get_user_asm(ld2u, x, ptr, ret) | 148 | #define __get_user_2(x, ptr, ret) __get_user_asm(ld2u, x, ptr, ret) |
149 | #define __get_user_4(x, ptr, ret) __get_user_asm(ld4u, x, ptr, ret) | 149 | #define __get_user_4(x, ptr, ret) __get_user_asm(ld4s, x, ptr, ret) |
150 | #define __get_user_8(x, ptr, ret) __get_user_asm(ld, x, ptr, ret) | 150 | #define __get_user_8(x, ptr, ret) __get_user_asm(ld, x, ptr, ret) |
151 | #else | 151 | #else |
152 | #define __get_user_1(x, ptr, ret) __get_user_asm(lb_u, x, ptr, ret) | 152 | #define __get_user_1(x, ptr, ret) __get_user_asm(lb_u, x, ptr, ret) |
diff --git a/arch/tile/kernel/entry.S b/arch/tile/kernel/entry.S index 133c4b56a99e..c31637baff28 100644 --- a/arch/tile/kernel/entry.S +++ b/arch/tile/kernel/entry.S | |||
@@ -68,20 +68,6 @@ STD_ENTRY(KBacktraceIterator_init_current) | |||
68 | jrp lr /* keep backtracer happy */ | 68 | jrp lr /* keep backtracer happy */ |
69 | STD_ENDPROC(KBacktraceIterator_init_current) | 69 | STD_ENDPROC(KBacktraceIterator_init_current) |
70 | 70 | ||
71 | /* | ||
72 | * Reset our stack to r1/r2 (sp and ksp0+cpu respectively), then | ||
73 | * free the old stack (passed in r0) and re-invoke cpu_idle(). | ||
74 | * We update sp and ksp0 simultaneously to avoid backtracer warnings. | ||
75 | */ | ||
76 | STD_ENTRY(cpu_idle_on_new_stack) | ||
77 | { | ||
78 | move sp, r1 | ||
79 | mtspr SPR_SYSTEM_SAVE_K_0, r2 | ||
80 | } | ||
81 | jal free_thread_info | ||
82 | j cpu_idle | ||
83 | STD_ENDPROC(cpu_idle_on_new_stack) | ||
84 | |||
85 | /* Loop forever on a nap during SMP boot. */ | 71 | /* Loop forever on a nap during SMP boot. */ |
86 | STD_ENTRY(smp_nap) | 72 | STD_ENTRY(smp_nap) |
87 | nap | 73 | nap |
diff --git a/arch/tile/kernel/setup.c b/arch/tile/kernel/setup.c index 6098ccc59be2..dd87f3420390 100644 --- a/arch/tile/kernel/setup.c +++ b/arch/tile/kernel/setup.c | |||
@@ -29,6 +29,7 @@ | |||
29 | #include <linux/smp.h> | 29 | #include <linux/smp.h> |
30 | #include <linux/timex.h> | 30 | #include <linux/timex.h> |
31 | #include <linux/hugetlb.h> | 31 | #include <linux/hugetlb.h> |
32 | #include <linux/start_kernel.h> | ||
32 | #include <asm/setup.h> | 33 | #include <asm/setup.h> |
33 | #include <asm/sections.h> | 34 | #include <asm/sections.h> |
34 | #include <asm/cacheflush.h> | 35 | #include <asm/cacheflush.h> |
diff --git a/arch/x86/boot/header.S b/arch/x86/boot/header.S index 8bbea6aa40d9..efe5acfc79c3 100644 --- a/arch/x86/boot/header.S +++ b/arch/x86/boot/header.S | |||
@@ -94,10 +94,10 @@ bs_die: | |||
94 | 94 | ||
95 | .section ".bsdata", "a" | 95 | .section ".bsdata", "a" |
96 | bugger_off_msg: | 96 | bugger_off_msg: |
97 | .ascii "Direct booting from floppy is no longer supported.\r\n" | 97 | .ascii "Direct floppy boot is not supported. " |
98 | .ascii "Please use a boot loader program instead.\r\n" | 98 | .ascii "Use a boot loader program instead.\r\n" |
99 | .ascii "\n" | 99 | .ascii "\n" |
100 | .ascii "Remove disk and press any key to reboot . . .\r\n" | 100 | .ascii "Remove disk and press any key to reboot ...\r\n" |
101 | .byte 0 | 101 | .byte 0 |
102 | 102 | ||
103 | #ifdef CONFIG_EFI_STUB | 103 | #ifdef CONFIG_EFI_STUB |
@@ -111,7 +111,7 @@ coff_header: | |||
111 | #else | 111 | #else |
112 | .word 0x8664 # x86-64 | 112 | .word 0x8664 # x86-64 |
113 | #endif | 113 | #endif |
114 | .word 2 # nr_sections | 114 | .word 3 # nr_sections |
115 | .long 0 # TimeDateStamp | 115 | .long 0 # TimeDateStamp |
116 | .long 0 # PointerToSymbolTable | 116 | .long 0 # PointerToSymbolTable |
117 | .long 1 # NumberOfSymbols | 117 | .long 1 # NumberOfSymbols |
@@ -158,8 +158,8 @@ extra_header_fields: | |||
158 | #else | 158 | #else |
159 | .quad 0 # ImageBase | 159 | .quad 0 # ImageBase |
160 | #endif | 160 | #endif |
161 | .long 0x1000 # SectionAlignment | 161 | .long 0x20 # SectionAlignment |
162 | .long 0x200 # FileAlignment | 162 | .long 0x20 # FileAlignment |
163 | .word 0 # MajorOperatingSystemVersion | 163 | .word 0 # MajorOperatingSystemVersion |
164 | .word 0 # MinorOperatingSystemVersion | 164 | .word 0 # MinorOperatingSystemVersion |
165 | .word 0 # MajorImageVersion | 165 | .word 0 # MajorImageVersion |
@@ -200,8 +200,10 @@ extra_header_fields: | |||
200 | 200 | ||
201 | # Section table | 201 | # Section table |
202 | section_table: | 202 | section_table: |
203 | .ascii ".text" | 203 | # |
204 | .byte 0 | 204 | # The offset & size fields are filled in by build.c. |
205 | # | ||
206 | .ascii ".setup" | ||
205 | .byte 0 | 207 | .byte 0 |
206 | .byte 0 | 208 | .byte 0 |
207 | .long 0 | 209 | .long 0 |
@@ -217,9 +219,8 @@ section_table: | |||
217 | 219 | ||
218 | # | 220 | # |
219 | # The EFI application loader requires a relocation section | 221 | # The EFI application loader requires a relocation section |
220 | # because EFI applications must be relocatable. But since | 222 | # because EFI applications must be relocatable. The .reloc |
221 | # we don't need the loader to fixup any relocs for us, we | 223 | # offset & size fields are filled in by build.c. |
222 | # just create an empty (zero-length) .reloc section header. | ||
223 | # | 224 | # |
224 | .ascii ".reloc" | 225 | .ascii ".reloc" |
225 | .byte 0 | 226 | .byte 0 |
@@ -233,6 +234,25 @@ section_table: | |||
233 | .word 0 # NumberOfRelocations | 234 | .word 0 # NumberOfRelocations |
234 | .word 0 # NumberOfLineNumbers | 235 | .word 0 # NumberOfLineNumbers |
235 | .long 0x42100040 # Characteristics (section flags) | 236 | .long 0x42100040 # Characteristics (section flags) |
237 | |||
238 | # | ||
239 | # The offset & size fields are filled in by build.c. | ||
240 | # | ||
241 | .ascii ".text" | ||
242 | .byte 0 | ||
243 | .byte 0 | ||
244 | .byte 0 | ||
245 | .long 0 | ||
246 | .long 0x0 # startup_{32,64} | ||
247 | .long 0 # Size of initialized data | ||
248 | # on disk | ||
249 | .long 0x0 # startup_{32,64} | ||
250 | .long 0 # PointerToRelocations | ||
251 | .long 0 # PointerToLineNumbers | ||
252 | .word 0 # NumberOfRelocations | ||
253 | .word 0 # NumberOfLineNumbers | ||
254 | .long 0x60500020 # Characteristics (section flags) | ||
255 | |||
236 | #endif /* CONFIG_EFI_STUB */ | 256 | #endif /* CONFIG_EFI_STUB */ |
237 | 257 | ||
238 | # Kernel attributes; used by setup. This is part 1 of the | 258 | # Kernel attributes; used by setup. This is part 1 of the |
diff --git a/arch/x86/boot/tools/build.c b/arch/x86/boot/tools/build.c index 3f61f6e2b46f..4b8e165ee572 100644 --- a/arch/x86/boot/tools/build.c +++ b/arch/x86/boot/tools/build.c | |||
@@ -50,6 +50,8 @@ typedef unsigned int u32; | |||
50 | u8 buf[SETUP_SECT_MAX*512]; | 50 | u8 buf[SETUP_SECT_MAX*512]; |
51 | int is_big_kernel; | 51 | int is_big_kernel; |
52 | 52 | ||
53 | #define PECOFF_RELOC_RESERVE 0x20 | ||
54 | |||
53 | /*----------------------------------------------------------------------*/ | 55 | /*----------------------------------------------------------------------*/ |
54 | 56 | ||
55 | static const u32 crctab32[] = { | 57 | static const u32 crctab32[] = { |
@@ -133,11 +135,103 @@ static void usage(void) | |||
133 | die("Usage: build setup system [> image]"); | 135 | die("Usage: build setup system [> image]"); |
134 | } | 136 | } |
135 | 137 | ||
136 | int main(int argc, char ** argv) | ||
137 | { | ||
138 | #ifdef CONFIG_EFI_STUB | 138 | #ifdef CONFIG_EFI_STUB |
139 | unsigned int file_sz, pe_header; | 139 | |
140 | static void update_pecoff_section_header(char *section_name, u32 offset, u32 size) | ||
141 | { | ||
142 | unsigned int pe_header; | ||
143 | unsigned short num_sections; | ||
144 | u8 *section; | ||
145 | |||
146 | pe_header = get_unaligned_le32(&buf[0x3c]); | ||
147 | num_sections = get_unaligned_le16(&buf[pe_header + 6]); | ||
148 | |||
149 | #ifdef CONFIG_X86_32 | ||
150 | section = &buf[pe_header + 0xa8]; | ||
151 | #else | ||
152 | section = &buf[pe_header + 0xb8]; | ||
140 | #endif | 153 | #endif |
154 | |||
155 | while (num_sections > 0) { | ||
156 | if (strncmp((char*)section, section_name, 8) == 0) { | ||
157 | /* section header size field */ | ||
158 | put_unaligned_le32(size, section + 0x8); | ||
159 | |||
160 | /* section header vma field */ | ||
161 | put_unaligned_le32(offset, section + 0xc); | ||
162 | |||
163 | /* section header 'size of initialised data' field */ | ||
164 | put_unaligned_le32(size, section + 0x10); | ||
165 | |||
166 | /* section header 'file offset' field */ | ||
167 | put_unaligned_le32(offset, section + 0x14); | ||
168 | |||
169 | break; | ||
170 | } | ||
171 | section += 0x28; | ||
172 | num_sections--; | ||
173 | } | ||
174 | } | ||
175 | |||
176 | static void update_pecoff_setup_and_reloc(unsigned int size) | ||
177 | { | ||
178 | u32 setup_offset = 0x200; | ||
179 | u32 reloc_offset = size - PECOFF_RELOC_RESERVE; | ||
180 | u32 setup_size = reloc_offset - setup_offset; | ||
181 | |||
182 | update_pecoff_section_header(".setup", setup_offset, setup_size); | ||
183 | update_pecoff_section_header(".reloc", reloc_offset, PECOFF_RELOC_RESERVE); | ||
184 | |||
185 | /* | ||
186 | * Modify .reloc section contents with a single entry. The | ||
187 | * relocation is applied to offset 10 of the relocation section. | ||
188 | */ | ||
189 | put_unaligned_le32(reloc_offset + 10, &buf[reloc_offset]); | ||
190 | put_unaligned_le32(10, &buf[reloc_offset + 4]); | ||
191 | } | ||
192 | |||
193 | static void update_pecoff_text(unsigned int text_start, unsigned int file_sz) | ||
194 | { | ||
195 | unsigned int pe_header; | ||
196 | unsigned int text_sz = file_sz - text_start; | ||
197 | |||
198 | pe_header = get_unaligned_le32(&buf[0x3c]); | ||
199 | |||
200 | /* Size of image */ | ||
201 | put_unaligned_le32(file_sz, &buf[pe_header + 0x50]); | ||
202 | |||
203 | /* | ||
204 | * Size of code: Subtract the size of the first sector (512 bytes) | ||
205 | * which includes the header. | ||
206 | */ | ||
207 | put_unaligned_le32(file_sz - 512, &buf[pe_header + 0x1c]); | ||
208 | |||
209 | #ifdef CONFIG_X86_32 | ||
210 | /* | ||
211 | * Address of entry point. | ||
212 | * | ||
213 | * The EFI stub entry point is +16 bytes from the start of | ||
214 | * the .text section. | ||
215 | */ | ||
216 | put_unaligned_le32(text_start + 16, &buf[pe_header + 0x28]); | ||
217 | #else | ||
218 | /* | ||
219 | * Address of entry point. startup_32 is at the beginning and | ||
220 | * the 64-bit entry point (startup_64) is always 512 bytes | ||
221 | * after. The EFI stub entry point is 16 bytes after that, as | ||
222 | * the first instruction allows legacy loaders to jump over | ||
223 | * the EFI stub initialisation | ||
224 | */ | ||
225 | put_unaligned_le32(text_start + 528, &buf[pe_header + 0x28]); | ||
226 | #endif /* CONFIG_X86_32 */ | ||
227 | |||
228 | update_pecoff_section_header(".text", text_start, text_sz); | ||
229 | } | ||
230 | |||
231 | #endif /* CONFIG_EFI_STUB */ | ||
232 | |||
233 | int main(int argc, char ** argv) | ||
234 | { | ||
141 | unsigned int i, sz, setup_sectors; | 235 | unsigned int i, sz, setup_sectors; |
142 | int c; | 236 | int c; |
143 | u32 sys_size; | 237 | u32 sys_size; |
@@ -163,6 +257,12 @@ int main(int argc, char ** argv) | |||
163 | die("Boot block hasn't got boot flag (0xAA55)"); | 257 | die("Boot block hasn't got boot flag (0xAA55)"); |
164 | fclose(file); | 258 | fclose(file); |
165 | 259 | ||
260 | #ifdef CONFIG_EFI_STUB | ||
261 | /* Reserve 0x20 bytes for .reloc section */ | ||
262 | memset(buf+c, 0, PECOFF_RELOC_RESERVE); | ||
263 | c += PECOFF_RELOC_RESERVE; | ||
264 | #endif | ||
265 | |||
166 | /* Pad unused space with zeros */ | 266 | /* Pad unused space with zeros */ |
167 | setup_sectors = (c + 511) / 512; | 267 | setup_sectors = (c + 511) / 512; |
168 | if (setup_sectors < SETUP_SECT_MIN) | 268 | if (setup_sectors < SETUP_SECT_MIN) |
@@ -170,6 +270,10 @@ int main(int argc, char ** argv) | |||
170 | i = setup_sectors*512; | 270 | i = setup_sectors*512; |
171 | memset(buf+c, 0, i-c); | 271 | memset(buf+c, 0, i-c); |
172 | 272 | ||
273 | #ifdef CONFIG_EFI_STUB | ||
274 | update_pecoff_setup_and_reloc(i); | ||
275 | #endif | ||
276 | |||
173 | /* Set the default root device */ | 277 | /* Set the default root device */ |
174 | put_unaligned_le16(DEFAULT_ROOT_DEV, &buf[508]); | 278 | put_unaligned_le16(DEFAULT_ROOT_DEV, &buf[508]); |
175 | 279 | ||
@@ -194,66 +298,8 @@ int main(int argc, char ** argv) | |||
194 | put_unaligned_le32(sys_size, &buf[0x1f4]); | 298 | put_unaligned_le32(sys_size, &buf[0x1f4]); |
195 | 299 | ||
196 | #ifdef CONFIG_EFI_STUB | 300 | #ifdef CONFIG_EFI_STUB |
197 | file_sz = sz + i + ((sys_size * 16) - sz); | 301 | update_pecoff_text(setup_sectors * 512, sz + i + ((sys_size * 16) - sz)); |
198 | 302 | #endif | |
199 | pe_header = get_unaligned_le32(&buf[0x3c]); | ||
200 | |||
201 | /* Size of image */ | ||
202 | put_unaligned_le32(file_sz, &buf[pe_header + 0x50]); | ||
203 | |||
204 | /* | ||
205 | * Subtract the size of the first section (512 bytes) which | ||
206 | * includes the header and .reloc section. The remaining size | ||
207 | * is that of the .text section. | ||
208 | */ | ||
209 | file_sz -= 512; | ||
210 | |||
211 | /* Size of code */ | ||
212 | put_unaligned_le32(file_sz, &buf[pe_header + 0x1c]); | ||
213 | |||
214 | #ifdef CONFIG_X86_32 | ||
215 | /* | ||
216 | * Address of entry point. | ||
217 | * | ||
218 | * The EFI stub entry point is +16 bytes from the start of | ||
219 | * the .text section. | ||
220 | */ | ||
221 | put_unaligned_le32(i + 16, &buf[pe_header + 0x28]); | ||
222 | |||
223 | /* .text size */ | ||
224 | put_unaligned_le32(file_sz, &buf[pe_header + 0xb0]); | ||
225 | |||
226 | /* .text vma */ | ||
227 | put_unaligned_le32(0x200, &buf[pe_header + 0xb4]); | ||
228 | |||
229 | /* .text size of initialised data */ | ||
230 | put_unaligned_le32(file_sz, &buf[pe_header + 0xb8]); | ||
231 | |||
232 | /* .text file offset */ | ||
233 | put_unaligned_le32(0x200, &buf[pe_header + 0xbc]); | ||
234 | #else | ||
235 | /* | ||
236 | * Address of entry point. startup_32 is at the beginning and | ||
237 | * the 64-bit entry point (startup_64) is always 512 bytes | ||
238 | * after. The EFI stub entry point is 16 bytes after that, as | ||
239 | * the first instruction allows legacy loaders to jump over | ||
240 | * the EFI stub initialisation | ||
241 | */ | ||
242 | put_unaligned_le32(i + 528, &buf[pe_header + 0x28]); | ||
243 | |||
244 | /* .text size */ | ||
245 | put_unaligned_le32(file_sz, &buf[pe_header + 0xc0]); | ||
246 | |||
247 | /* .text vma */ | ||
248 | put_unaligned_le32(0x200, &buf[pe_header + 0xc4]); | ||
249 | |||
250 | /* .text size of initialised data */ | ||
251 | put_unaligned_le32(file_sz, &buf[pe_header + 0xc8]); | ||
252 | |||
253 | /* .text file offset */ | ||
254 | put_unaligned_le32(0x200, &buf[pe_header + 0xcc]); | ||
255 | #endif /* CONFIG_X86_32 */ | ||
256 | #endif /* CONFIG_EFI_STUB */ | ||
257 | 303 | ||
258 | crc = partial_crc32(buf, i, crc); | 304 | crc = partial_crc32(buf, i, crc); |
259 | if (fwrite(buf, 1, i, stdout) != i) | 305 | if (fwrite(buf, 1, i, stdout) != i) |
diff --git a/arch/x86/crypto/aesni-intel_asm.S b/arch/x86/crypto/aesni-intel_asm.S index be6d9e365a80..3470624d7835 100644 --- a/arch/x86/crypto/aesni-intel_asm.S +++ b/arch/x86/crypto/aesni-intel_asm.S | |||
@@ -2460,10 +2460,12 @@ ENTRY(aesni_cbc_dec) | |||
2460 | pxor IN3, STATE4 | 2460 | pxor IN3, STATE4 |
2461 | movaps IN4, IV | 2461 | movaps IN4, IV |
2462 | #else | 2462 | #else |
2463 | pxor (INP), STATE2 | ||
2464 | pxor 0x10(INP), STATE3 | ||
2465 | pxor IN1, STATE4 | 2463 | pxor IN1, STATE4 |
2466 | movaps IN2, IV | 2464 | movaps IN2, IV |
2465 | movups (INP), IN1 | ||
2466 | pxor IN1, STATE2 | ||
2467 | movups 0x10(INP), IN2 | ||
2468 | pxor IN2, STATE3 | ||
2467 | #endif | 2469 | #endif |
2468 | movups STATE1, (OUTP) | 2470 | movups STATE1, (OUTP) |
2469 | movups STATE2, 0x10(OUTP) | 2471 | movups STATE2, 0x10(OUTP) |
diff --git a/arch/x86/include/asm/nmi.h b/arch/x86/include/asm/nmi.h index 0e3793b821ef..dc580c42851c 100644 --- a/arch/x86/include/asm/nmi.h +++ b/arch/x86/include/asm/nmi.h | |||
@@ -54,6 +54,20 @@ struct nmiaction { | |||
54 | __register_nmi_handler((t), &fn##_na); \ | 54 | __register_nmi_handler((t), &fn##_na); \ |
55 | }) | 55 | }) |
56 | 56 | ||
57 | /* | ||
58 | * For special handlers that register/unregister in the | ||
59 | * init section only. This should be considered rare. | ||
60 | */ | ||
61 | #define register_nmi_handler_initonly(t, fn, fg, n) \ | ||
62 | ({ \ | ||
63 | static struct nmiaction fn##_na __initdata = { \ | ||
64 | .handler = (fn), \ | ||
65 | .name = (n), \ | ||
66 | .flags = (fg), \ | ||
67 | }; \ | ||
68 | __register_nmi_handler((t), &fn##_na); \ | ||
69 | }) | ||
70 | |||
57 | int __register_nmi_handler(unsigned int, struct nmiaction *); | 71 | int __register_nmi_handler(unsigned int, struct nmiaction *); |
58 | 72 | ||
59 | void unregister_nmi_handler(unsigned int, const char *); | 73 | void unregister_nmi_handler(unsigned int, const char *); |
diff --git a/arch/x86/include/asm/pgtable-3level.h b/arch/x86/include/asm/pgtable-3level.h index 43876f16caf1..cb00ccc7d571 100644 --- a/arch/x86/include/asm/pgtable-3level.h +++ b/arch/x86/include/asm/pgtable-3level.h | |||
@@ -47,16 +47,26 @@ static inline void native_set_pte(pte_t *ptep, pte_t pte) | |||
47 | * they can run pmd_offset_map_lock or pmd_trans_huge or other pmd | 47 | * they can run pmd_offset_map_lock or pmd_trans_huge or other pmd |
48 | * operations. | 48 | * operations. |
49 | * | 49 | * |
50 | * Without THP if the mmap_sem is hold for reading, the | 50 | * Without THP if the mmap_sem is hold for reading, the pmd can only |
51 | * pmd can only transition from null to not null while pmd_read_atomic runs. | 51 | * transition from null to not null while pmd_read_atomic runs. So |
52 | * So there's no need of literally reading it atomically. | 52 | * we can always return atomic pmd values with this function. |
53 | * | 53 | * |
54 | * With THP if the mmap_sem is hold for reading, the pmd can become | 54 | * With THP if the mmap_sem is hold for reading, the pmd can become |
55 | * THP or null or point to a pte (and in turn become "stable") at any | 55 | * trans_huge or none or point to a pte (and in turn become "stable") |
56 | * time under pmd_read_atomic, so it's mandatory to read it atomically | 56 | * at any time under pmd_read_atomic. We could read it really |
57 | * with cmpxchg8b. | 57 | * atomically here with a atomic64_read for the THP enabled case (and |
58 | * it would be a whole lot simpler), but to avoid using cmpxchg8b we | ||
59 | * only return an atomic pmdval if the low part of the pmdval is later | ||
60 | * found stable (i.e. pointing to a pte). And we're returning a none | ||
61 | * pmdval if the low part of the pmd is none. In some cases the high | ||
62 | * and low part of the pmdval returned may not be consistent if THP is | ||
63 | * enabled (the low part may point to previously mapped hugepage, | ||
64 | * while the high part may point to a more recently mapped hugepage), | ||
65 | * but pmd_none_or_trans_huge_or_clear_bad() only needs the low part | ||
66 | * of the pmd to be read atomically to decide if the pmd is unstable | ||
67 | * or not, with the only exception of when the low part of the pmd is | ||
68 | * zero in which case we return a none pmd. | ||
58 | */ | 69 | */ |
59 | #ifndef CONFIG_TRANSPARENT_HUGEPAGE | ||
60 | static inline pmd_t pmd_read_atomic(pmd_t *pmdp) | 70 | static inline pmd_t pmd_read_atomic(pmd_t *pmdp) |
61 | { | 71 | { |
62 | pmdval_t ret; | 72 | pmdval_t ret; |
@@ -74,12 +84,6 @@ static inline pmd_t pmd_read_atomic(pmd_t *pmdp) | |||
74 | 84 | ||
75 | return (pmd_t) { ret }; | 85 | return (pmd_t) { ret }; |
76 | } | 86 | } |
77 | #else /* CONFIG_TRANSPARENT_HUGEPAGE */ | ||
78 | static inline pmd_t pmd_read_atomic(pmd_t *pmdp) | ||
79 | { | ||
80 | return (pmd_t) { atomic64_read((atomic64_t *)pmdp) }; | ||
81 | } | ||
82 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ | ||
83 | 87 | ||
84 | static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte) | 88 | static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte) |
85 | { | 89 | { |
diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h index 04cd6882308e..e1f3a17034fc 100644 --- a/arch/x86/include/asm/uaccess.h +++ b/arch/x86/include/asm/uaccess.h | |||
@@ -33,9 +33,8 @@ | |||
33 | #define segment_eq(a, b) ((a).seg == (b).seg) | 33 | #define segment_eq(a, b) ((a).seg == (b).seg) |
34 | 34 | ||
35 | #define user_addr_max() (current_thread_info()->addr_limit.seg) | 35 | #define user_addr_max() (current_thread_info()->addr_limit.seg) |
36 | #define __addr_ok(addr) \ | 36 | #define __addr_ok(addr) \ |
37 | ((unsigned long __force)(addr) < \ | 37 | ((unsigned long __force)(addr) < user_addr_max()) |
38 | (current_thread_info()->addr_limit.seg)) | ||
39 | 38 | ||
40 | /* | 39 | /* |
41 | * Test whether a block of memory is a valid user space address. | 40 | * Test whether a block of memory is a valid user space address. |
@@ -47,14 +46,14 @@ | |||
47 | * This needs 33-bit (65-bit for x86_64) arithmetic. We have a carry... | 46 | * This needs 33-bit (65-bit for x86_64) arithmetic. We have a carry... |
48 | */ | 47 | */ |
49 | 48 | ||
50 | #define __range_not_ok(addr, size) \ | 49 | #define __range_not_ok(addr, size, limit) \ |
51 | ({ \ | 50 | ({ \ |
52 | unsigned long flag, roksum; \ | 51 | unsigned long flag, roksum; \ |
53 | __chk_user_ptr(addr); \ | 52 | __chk_user_ptr(addr); \ |
54 | asm("add %3,%1 ; sbb %0,%0 ; cmp %1,%4 ; sbb $0,%0" \ | 53 | asm("add %3,%1 ; sbb %0,%0 ; cmp %1,%4 ; sbb $0,%0" \ |
55 | : "=&r" (flag), "=r" (roksum) \ | 54 | : "=&r" (flag), "=r" (roksum) \ |
56 | : "1" (addr), "g" ((long)(size)), \ | 55 | : "1" (addr), "g" ((long)(size)), \ |
57 | "rm" (current_thread_info()->addr_limit.seg)); \ | 56 | "rm" (limit)); \ |
58 | flag; \ | 57 | flag; \ |
59 | }) | 58 | }) |
60 | 59 | ||
@@ -77,7 +76,8 @@ | |||
77 | * checks that the pointer is in the user space range - after calling | 76 | * checks that the pointer is in the user space range - after calling |
78 | * this function, memory access functions may still return -EFAULT. | 77 | * this function, memory access functions may still return -EFAULT. |
79 | */ | 78 | */ |
80 | #define access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0)) | 79 | #define access_ok(type, addr, size) \ |
80 | (likely(__range_not_ok(addr, size, user_addr_max()) == 0)) | ||
81 | 81 | ||
82 | /* | 82 | /* |
83 | * The exception table consists of pairs of addresses relative to the | 83 | * The exception table consists of pairs of addresses relative to the |
diff --git a/arch/x86/include/asm/uv/uv_bau.h b/arch/x86/include/asm/uv/uv_bau.h index becf47b81735..6149b476d9df 100644 --- a/arch/x86/include/asm/uv/uv_bau.h +++ b/arch/x86/include/asm/uv/uv_bau.h | |||
@@ -149,7 +149,6 @@ | |||
149 | /* 4 bits of software ack period */ | 149 | /* 4 bits of software ack period */ |
150 | #define UV2_ACK_MASK 0x7UL | 150 | #define UV2_ACK_MASK 0x7UL |
151 | #define UV2_ACK_UNITS_SHFT 3 | 151 | #define UV2_ACK_UNITS_SHFT 3 |
152 | #define UV2_LEG_SHFT UV2H_LB_BAU_MISC_CONTROL_USE_LEGACY_DESCRIPTOR_FORMATS_SHFT | ||
153 | #define UV2_EXT_SHFT UV2H_LB_BAU_MISC_CONTROL_ENABLE_EXTENDED_SB_STATUS_SHFT | 152 | #define UV2_EXT_SHFT UV2H_LB_BAU_MISC_CONTROL_ENABLE_EXTENDED_SB_STATUS_SHFT |
154 | 153 | ||
155 | /* | 154 | /* |
diff --git a/arch/x86/kernel/aperture_64.c b/arch/x86/kernel/aperture_64.c index 6e76c191a835..d5fd66f0d4cd 100644 --- a/arch/x86/kernel/aperture_64.c +++ b/arch/x86/kernel/aperture_64.c | |||
@@ -20,7 +20,6 @@ | |||
20 | #include <linux/bitops.h> | 20 | #include <linux/bitops.h> |
21 | #include <linux/ioport.h> | 21 | #include <linux/ioport.h> |
22 | #include <linux/suspend.h> | 22 | #include <linux/suspend.h> |
23 | #include <linux/kmemleak.h> | ||
24 | #include <asm/e820.h> | 23 | #include <asm/e820.h> |
25 | #include <asm/io.h> | 24 | #include <asm/io.h> |
26 | #include <asm/iommu.h> | 25 | #include <asm/iommu.h> |
@@ -95,11 +94,6 @@ static u32 __init allocate_aperture(void) | |||
95 | return 0; | 94 | return 0; |
96 | } | 95 | } |
97 | memblock_reserve(addr, aper_size); | 96 | memblock_reserve(addr, aper_size); |
98 | /* | ||
99 | * Kmemleak should not scan this block as it may not be mapped via the | ||
100 | * kernel direct mapping. | ||
101 | */ | ||
102 | kmemleak_ignore(phys_to_virt(addr)); | ||
103 | printk(KERN_INFO "Mapping aperture over %d KB of RAM @ %lx\n", | 97 | printk(KERN_INFO "Mapping aperture over %d KB of RAM @ %lx\n", |
104 | aper_size >> 10, addr); | 98 | aper_size >> 10, addr); |
105 | insert_aperture_resource((u32)addr, aper_size); | 99 | insert_aperture_resource((u32)addr, aper_size); |
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c index ac96561d1a99..5f0ff597437c 100644 --- a/arch/x86/kernel/apic/io_apic.c +++ b/arch/x86/kernel/apic/io_apic.c | |||
@@ -1195,7 +1195,7 @@ static void __clear_irq_vector(int irq, struct irq_cfg *cfg) | |||
1195 | BUG_ON(!cfg->vector); | 1195 | BUG_ON(!cfg->vector); |
1196 | 1196 | ||
1197 | vector = cfg->vector; | 1197 | vector = cfg->vector; |
1198 | for_each_cpu_and(cpu, cfg->domain, cpu_online_mask) | 1198 | for_each_cpu(cpu, cfg->domain) |
1199 | per_cpu(vector_irq, cpu)[vector] = -1; | 1199 | per_cpu(vector_irq, cpu)[vector] = -1; |
1200 | 1200 | ||
1201 | cfg->vector = 0; | 1201 | cfg->vector = 0; |
@@ -1203,7 +1203,7 @@ static void __clear_irq_vector(int irq, struct irq_cfg *cfg) | |||
1203 | 1203 | ||
1204 | if (likely(!cfg->move_in_progress)) | 1204 | if (likely(!cfg->move_in_progress)) |
1205 | return; | 1205 | return; |
1206 | for_each_cpu_and(cpu, cfg->old_domain, cpu_online_mask) { | 1206 | for_each_cpu(cpu, cfg->old_domain) { |
1207 | for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; | 1207 | for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; |
1208 | vector++) { | 1208 | vector++) { |
1209 | if (per_cpu(vector_irq, cpu)[vector] != irq) | 1209 | if (per_cpu(vector_irq, cpu)[vector] != irq) |
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c index 0a687fd185e6..da27c5d2168a 100644 --- a/arch/x86/kernel/cpu/mcheck/mce.c +++ b/arch/x86/kernel/cpu/mcheck/mce.c | |||
@@ -1274,7 +1274,7 @@ static void mce_timer_fn(unsigned long data) | |||
1274 | */ | 1274 | */ |
1275 | iv = __this_cpu_read(mce_next_interval); | 1275 | iv = __this_cpu_read(mce_next_interval); |
1276 | if (mce_notify_irq()) | 1276 | if (mce_notify_irq()) |
1277 | iv = max(iv, (unsigned long) HZ/100); | 1277 | iv = max(iv / 2, (unsigned long) HZ/100); |
1278 | else | 1278 | else |
1279 | iv = min(iv * 2, round_jiffies_relative(check_interval * HZ)); | 1279 | iv = min(iv * 2, round_jiffies_relative(check_interval * HZ)); |
1280 | __this_cpu_write(mce_next_interval, iv); | 1280 | __this_cpu_write(mce_next_interval, iv); |
@@ -1557,7 +1557,7 @@ static void __mcheck_cpu_init_vendor(struct cpuinfo_x86 *c) | |||
1557 | static void __mcheck_cpu_init_timer(void) | 1557 | static void __mcheck_cpu_init_timer(void) |
1558 | { | 1558 | { |
1559 | struct timer_list *t = &__get_cpu_var(mce_timer); | 1559 | struct timer_list *t = &__get_cpu_var(mce_timer); |
1560 | unsigned long iv = __this_cpu_read(mce_next_interval); | 1560 | unsigned long iv = check_interval * HZ; |
1561 | 1561 | ||
1562 | setup_timer(t, mce_timer_fn, smp_processor_id()); | 1562 | setup_timer(t, mce_timer_fn, smp_processor_id()); |
1563 | 1563 | ||
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c index e049d6da0183..c4706cf9c011 100644 --- a/arch/x86/kernel/cpu/perf_event.c +++ b/arch/x86/kernel/cpu/perf_event.c | |||
@@ -1496,6 +1496,7 @@ static struct cpu_hw_events *allocate_fake_cpuc(void) | |||
1496 | if (!cpuc->shared_regs) | 1496 | if (!cpuc->shared_regs) |
1497 | goto error; | 1497 | goto error; |
1498 | } | 1498 | } |
1499 | cpuc->is_fake = 1; | ||
1499 | return cpuc; | 1500 | return cpuc; |
1500 | error: | 1501 | error: |
1501 | free_fake_cpuc(cpuc); | 1502 | free_fake_cpuc(cpuc); |
@@ -1756,6 +1757,12 @@ perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs) | |||
1756 | dump_trace(NULL, regs, NULL, 0, &backtrace_ops, entry); | 1757 | dump_trace(NULL, regs, NULL, 0, &backtrace_ops, entry); |
1757 | } | 1758 | } |
1758 | 1759 | ||
1760 | static inline int | ||
1761 | valid_user_frame(const void __user *fp, unsigned long size) | ||
1762 | { | ||
1763 | return (__range_not_ok(fp, size, TASK_SIZE) == 0); | ||
1764 | } | ||
1765 | |||
1759 | #ifdef CONFIG_COMPAT | 1766 | #ifdef CONFIG_COMPAT |
1760 | 1767 | ||
1761 | #include <asm/compat.h> | 1768 | #include <asm/compat.h> |
@@ -1780,7 +1787,7 @@ perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry *entry) | |||
1780 | if (bytes != sizeof(frame)) | 1787 | if (bytes != sizeof(frame)) |
1781 | break; | 1788 | break; |
1782 | 1789 | ||
1783 | if (fp < compat_ptr(regs->sp)) | 1790 | if (!valid_user_frame(fp, sizeof(frame))) |
1784 | break; | 1791 | break; |
1785 | 1792 | ||
1786 | perf_callchain_store(entry, frame.return_address); | 1793 | perf_callchain_store(entry, frame.return_address); |
@@ -1826,7 +1833,7 @@ perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs) | |||
1826 | if (bytes != sizeof(frame)) | 1833 | if (bytes != sizeof(frame)) |
1827 | break; | 1834 | break; |
1828 | 1835 | ||
1829 | if ((unsigned long)fp < regs->sp) | 1836 | if (!valid_user_frame(fp, sizeof(frame))) |
1830 | break; | 1837 | break; |
1831 | 1838 | ||
1832 | perf_callchain_store(entry, frame.return_address); | 1839 | perf_callchain_store(entry, frame.return_address); |
diff --git a/arch/x86/kernel/cpu/perf_event.h b/arch/x86/kernel/cpu/perf_event.h index 6638aaf54493..7241e2fc3c17 100644 --- a/arch/x86/kernel/cpu/perf_event.h +++ b/arch/x86/kernel/cpu/perf_event.h | |||
@@ -117,6 +117,7 @@ struct cpu_hw_events { | |||
117 | struct perf_event *event_list[X86_PMC_IDX_MAX]; /* in enabled order */ | 117 | struct perf_event *event_list[X86_PMC_IDX_MAX]; /* in enabled order */ |
118 | 118 | ||
119 | unsigned int group_flag; | 119 | unsigned int group_flag; |
120 | int is_fake; | ||
120 | 121 | ||
121 | /* | 122 | /* |
122 | * Intel DebugStore bits | 123 | * Intel DebugStore bits |
@@ -364,6 +365,7 @@ struct x86_pmu { | |||
364 | int pebs_record_size; | 365 | int pebs_record_size; |
365 | void (*drain_pebs)(struct pt_regs *regs); | 366 | void (*drain_pebs)(struct pt_regs *regs); |
366 | struct event_constraint *pebs_constraints; | 367 | struct event_constraint *pebs_constraints; |
368 | void (*pebs_aliases)(struct perf_event *event); | ||
367 | 369 | ||
368 | /* | 370 | /* |
369 | * Intel LBR | 371 | * Intel LBR |
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c index 166546ec6aef..187c294bc658 100644 --- a/arch/x86/kernel/cpu/perf_event_intel.c +++ b/arch/x86/kernel/cpu/perf_event_intel.c | |||
@@ -1119,27 +1119,33 @@ intel_bts_constraints(struct perf_event *event) | |||
1119 | return NULL; | 1119 | return NULL; |
1120 | } | 1120 | } |
1121 | 1121 | ||
1122 | static bool intel_try_alt_er(struct perf_event *event, int orig_idx) | 1122 | static int intel_alt_er(int idx) |
1123 | { | 1123 | { |
1124 | if (!(x86_pmu.er_flags & ERF_HAS_RSP_1)) | 1124 | if (!(x86_pmu.er_flags & ERF_HAS_RSP_1)) |
1125 | return false; | 1125 | return idx; |
1126 | 1126 | ||
1127 | if (event->hw.extra_reg.idx == EXTRA_REG_RSP_0) { | 1127 | if (idx == EXTRA_REG_RSP_0) |
1128 | event->hw.config &= ~INTEL_ARCH_EVENT_MASK; | 1128 | return EXTRA_REG_RSP_1; |
1129 | event->hw.config |= 0x01bb; | 1129 | |
1130 | event->hw.extra_reg.idx = EXTRA_REG_RSP_1; | 1130 | if (idx == EXTRA_REG_RSP_1) |
1131 | event->hw.extra_reg.reg = MSR_OFFCORE_RSP_1; | 1131 | return EXTRA_REG_RSP_0; |
1132 | } else if (event->hw.extra_reg.idx == EXTRA_REG_RSP_1) { | 1132 | |
1133 | return idx; | ||
1134 | } | ||
1135 | |||
1136 | static void intel_fixup_er(struct perf_event *event, int idx) | ||
1137 | { | ||
1138 | event->hw.extra_reg.idx = idx; | ||
1139 | |||
1140 | if (idx == EXTRA_REG_RSP_0) { | ||
1133 | event->hw.config &= ~INTEL_ARCH_EVENT_MASK; | 1141 | event->hw.config &= ~INTEL_ARCH_EVENT_MASK; |
1134 | event->hw.config |= 0x01b7; | 1142 | event->hw.config |= 0x01b7; |
1135 | event->hw.extra_reg.idx = EXTRA_REG_RSP_0; | ||
1136 | event->hw.extra_reg.reg = MSR_OFFCORE_RSP_0; | 1143 | event->hw.extra_reg.reg = MSR_OFFCORE_RSP_0; |
1144 | } else if (idx == EXTRA_REG_RSP_1) { | ||
1145 | event->hw.config &= ~INTEL_ARCH_EVENT_MASK; | ||
1146 | event->hw.config |= 0x01bb; | ||
1147 | event->hw.extra_reg.reg = MSR_OFFCORE_RSP_1; | ||
1137 | } | 1148 | } |
1138 | |||
1139 | if (event->hw.extra_reg.idx == orig_idx) | ||
1140 | return false; | ||
1141 | |||
1142 | return true; | ||
1143 | } | 1149 | } |
1144 | 1150 | ||
1145 | /* | 1151 | /* |
@@ -1157,14 +1163,18 @@ __intel_shared_reg_get_constraints(struct cpu_hw_events *cpuc, | |||
1157 | struct event_constraint *c = &emptyconstraint; | 1163 | struct event_constraint *c = &emptyconstraint; |
1158 | struct er_account *era; | 1164 | struct er_account *era; |
1159 | unsigned long flags; | 1165 | unsigned long flags; |
1160 | int orig_idx = reg->idx; | 1166 | int idx = reg->idx; |
1161 | 1167 | ||
1162 | /* already allocated shared msr */ | 1168 | /* |
1163 | if (reg->alloc) | 1169 | * reg->alloc can be set due to existing state, so for fake cpuc we |
1170 | * need to ignore this, otherwise we might fail to allocate proper fake | ||
1171 | * state for this extra reg constraint. Also see the comment below. | ||
1172 | */ | ||
1173 | if (reg->alloc && !cpuc->is_fake) | ||
1164 | return NULL; /* call x86_get_event_constraint() */ | 1174 | return NULL; /* call x86_get_event_constraint() */ |
1165 | 1175 | ||
1166 | again: | 1176 | again: |
1167 | era = &cpuc->shared_regs->regs[reg->idx]; | 1177 | era = &cpuc->shared_regs->regs[idx]; |
1168 | /* | 1178 | /* |
1169 | * we use spin_lock_irqsave() to avoid lockdep issues when | 1179 | * we use spin_lock_irqsave() to avoid lockdep issues when |
1170 | * passing a fake cpuc | 1180 | * passing a fake cpuc |
@@ -1173,6 +1183,29 @@ again: | |||
1173 | 1183 | ||
1174 | if (!atomic_read(&era->ref) || era->config == reg->config) { | 1184 | if (!atomic_read(&era->ref) || era->config == reg->config) { |
1175 | 1185 | ||
1186 | /* | ||
1187 | * If its a fake cpuc -- as per validate_{group,event}() we | ||
1188 | * shouldn't touch event state and we can avoid doing so | ||
1189 | * since both will only call get_event_constraints() once | ||
1190 | * on each event, this avoids the need for reg->alloc. | ||
1191 | * | ||
1192 | * Not doing the ER fixup will only result in era->reg being | ||
1193 | * wrong, but since we won't actually try and program hardware | ||
1194 | * this isn't a problem either. | ||
1195 | */ | ||
1196 | if (!cpuc->is_fake) { | ||
1197 | if (idx != reg->idx) | ||
1198 | intel_fixup_er(event, idx); | ||
1199 | |||
1200 | /* | ||
1201 | * x86_schedule_events() can call get_event_constraints() | ||
1202 | * multiple times on events in the case of incremental | ||
1203 | * scheduling(). reg->alloc ensures we only do the ER | ||
1204 | * allocation once. | ||
1205 | */ | ||
1206 | reg->alloc = 1; | ||
1207 | } | ||
1208 | |||
1176 | /* lock in msr value */ | 1209 | /* lock in msr value */ |
1177 | era->config = reg->config; | 1210 | era->config = reg->config; |
1178 | era->reg = reg->reg; | 1211 | era->reg = reg->reg; |
@@ -1180,17 +1213,17 @@ again: | |||
1180 | /* one more user */ | 1213 | /* one more user */ |
1181 | atomic_inc(&era->ref); | 1214 | atomic_inc(&era->ref); |
1182 | 1215 | ||
1183 | /* no need to reallocate during incremental event scheduling */ | ||
1184 | reg->alloc = 1; | ||
1185 | |||
1186 | /* | 1216 | /* |
1187 | * need to call x86_get_event_constraint() | 1217 | * need to call x86_get_event_constraint() |
1188 | * to check if associated event has constraints | 1218 | * to check if associated event has constraints |
1189 | */ | 1219 | */ |
1190 | c = NULL; | 1220 | c = NULL; |
1191 | } else if (intel_try_alt_er(event, orig_idx)) { | 1221 | } else { |
1192 | raw_spin_unlock_irqrestore(&era->lock, flags); | 1222 | idx = intel_alt_er(idx); |
1193 | goto again; | 1223 | if (idx != reg->idx) { |
1224 | raw_spin_unlock_irqrestore(&era->lock, flags); | ||
1225 | goto again; | ||
1226 | } | ||
1194 | } | 1227 | } |
1195 | raw_spin_unlock_irqrestore(&era->lock, flags); | 1228 | raw_spin_unlock_irqrestore(&era->lock, flags); |
1196 | 1229 | ||
@@ -1204,11 +1237,14 @@ __intel_shared_reg_put_constraints(struct cpu_hw_events *cpuc, | |||
1204 | struct er_account *era; | 1237 | struct er_account *era; |
1205 | 1238 | ||
1206 | /* | 1239 | /* |
1207 | * only put constraint if extra reg was actually | 1240 | * Only put constraint if extra reg was actually allocated. Also takes |
1208 | * allocated. Also takes care of event which do | 1241 | * care of event which do not use an extra shared reg. |
1209 | * not use an extra shared reg | 1242 | * |
1243 | * Also, if this is a fake cpuc we shouldn't touch any event state | ||
1244 | * (reg->alloc) and we don't care about leaving inconsistent cpuc state | ||
1245 | * either since it'll be thrown out. | ||
1210 | */ | 1246 | */ |
1211 | if (!reg->alloc) | 1247 | if (!reg->alloc || cpuc->is_fake) |
1212 | return; | 1248 | return; |
1213 | 1249 | ||
1214 | era = &cpuc->shared_regs->regs[reg->idx]; | 1250 | era = &cpuc->shared_regs->regs[reg->idx]; |
@@ -1300,15 +1336,9 @@ static void intel_put_event_constraints(struct cpu_hw_events *cpuc, | |||
1300 | intel_put_shared_regs_event_constraints(cpuc, event); | 1336 | intel_put_shared_regs_event_constraints(cpuc, event); |
1301 | } | 1337 | } |
1302 | 1338 | ||
1303 | static int intel_pmu_hw_config(struct perf_event *event) | 1339 | static void intel_pebs_aliases_core2(struct perf_event *event) |
1304 | { | 1340 | { |
1305 | int ret = x86_pmu_hw_config(event); | 1341 | if ((event->hw.config & X86_RAW_EVENT_MASK) == 0x003c) { |
1306 | |||
1307 | if (ret) | ||
1308 | return ret; | ||
1309 | |||
1310 | if (event->attr.precise_ip && | ||
1311 | (event->hw.config & X86_RAW_EVENT_MASK) == 0x003c) { | ||
1312 | /* | 1342 | /* |
1313 | * Use an alternative encoding for CPU_CLK_UNHALTED.THREAD_P | 1343 | * Use an alternative encoding for CPU_CLK_UNHALTED.THREAD_P |
1314 | * (0x003c) so that we can use it with PEBS. | 1344 | * (0x003c) so that we can use it with PEBS. |
@@ -1329,10 +1359,48 @@ static int intel_pmu_hw_config(struct perf_event *event) | |||
1329 | */ | 1359 | */ |
1330 | u64 alt_config = X86_CONFIG(.event=0xc0, .inv=1, .cmask=16); | 1360 | u64 alt_config = X86_CONFIG(.event=0xc0, .inv=1, .cmask=16); |
1331 | 1361 | ||
1362 | alt_config |= (event->hw.config & ~X86_RAW_EVENT_MASK); | ||
1363 | event->hw.config = alt_config; | ||
1364 | } | ||
1365 | } | ||
1366 | |||
1367 | static void intel_pebs_aliases_snb(struct perf_event *event) | ||
1368 | { | ||
1369 | if ((event->hw.config & X86_RAW_EVENT_MASK) == 0x003c) { | ||
1370 | /* | ||
1371 | * Use an alternative encoding for CPU_CLK_UNHALTED.THREAD_P | ||
1372 | * (0x003c) so that we can use it with PEBS. | ||
1373 | * | ||
1374 | * The regular CPU_CLK_UNHALTED.THREAD_P event (0x003c) isn't | ||
1375 | * PEBS capable. However we can use UOPS_RETIRED.ALL | ||
1376 | * (0x01c2), which is a PEBS capable event, to get the same | ||
1377 | * count. | ||
1378 | * | ||
1379 | * UOPS_RETIRED.ALL counts the number of cycles that retires | ||
1380 | * CNTMASK micro-ops. By setting CNTMASK to a value (16) | ||
1381 | * larger than the maximum number of micro-ops that can be | ||
1382 | * retired per cycle (4) and then inverting the condition, we | ||
1383 | * count all cycles that retire 16 or less micro-ops, which | ||
1384 | * is every cycle. | ||
1385 | * | ||
1386 | * Thereby we gain a PEBS capable cycle counter. | ||
1387 | */ | ||
1388 | u64 alt_config = X86_CONFIG(.event=0xc2, .umask=0x01, .inv=1, .cmask=16); | ||
1332 | 1389 | ||
1333 | alt_config |= (event->hw.config & ~X86_RAW_EVENT_MASK); | 1390 | alt_config |= (event->hw.config & ~X86_RAW_EVENT_MASK); |
1334 | event->hw.config = alt_config; | 1391 | event->hw.config = alt_config; |
1335 | } | 1392 | } |
1393 | } | ||
1394 | |||
1395 | static int intel_pmu_hw_config(struct perf_event *event) | ||
1396 | { | ||
1397 | int ret = x86_pmu_hw_config(event); | ||
1398 | |||
1399 | if (ret) | ||
1400 | return ret; | ||
1401 | |||
1402 | if (event->attr.precise_ip && x86_pmu.pebs_aliases) | ||
1403 | x86_pmu.pebs_aliases(event); | ||
1336 | 1404 | ||
1337 | if (intel_pmu_needs_lbr_smpl(event)) { | 1405 | if (intel_pmu_needs_lbr_smpl(event)) { |
1338 | ret = intel_pmu_setup_lbr_filter(event); | 1406 | ret = intel_pmu_setup_lbr_filter(event); |
@@ -1607,6 +1675,7 @@ static __initconst const struct x86_pmu intel_pmu = { | |||
1607 | .max_period = (1ULL << 31) - 1, | 1675 | .max_period = (1ULL << 31) - 1, |
1608 | .get_event_constraints = intel_get_event_constraints, | 1676 | .get_event_constraints = intel_get_event_constraints, |
1609 | .put_event_constraints = intel_put_event_constraints, | 1677 | .put_event_constraints = intel_put_event_constraints, |
1678 | .pebs_aliases = intel_pebs_aliases_core2, | ||
1610 | 1679 | ||
1611 | .format_attrs = intel_arch3_formats_attr, | 1680 | .format_attrs = intel_arch3_formats_attr, |
1612 | 1681 | ||
@@ -1840,8 +1909,9 @@ __init int intel_pmu_init(void) | |||
1840 | break; | 1909 | break; |
1841 | 1910 | ||
1842 | case 42: /* SandyBridge */ | 1911 | case 42: /* SandyBridge */ |
1843 | x86_add_quirk(intel_sandybridge_quirk); | ||
1844 | case 45: /* SandyBridge, "Romely-EP" */ | 1912 | case 45: /* SandyBridge, "Romely-EP" */ |
1913 | x86_add_quirk(intel_sandybridge_quirk); | ||
1914 | case 58: /* IvyBridge */ | ||
1845 | memcpy(hw_cache_event_ids, snb_hw_cache_event_ids, | 1915 | memcpy(hw_cache_event_ids, snb_hw_cache_event_ids, |
1846 | sizeof(hw_cache_event_ids)); | 1916 | sizeof(hw_cache_event_ids)); |
1847 | 1917 | ||
@@ -1849,6 +1919,7 @@ __init int intel_pmu_init(void) | |||
1849 | 1919 | ||
1850 | x86_pmu.event_constraints = intel_snb_event_constraints; | 1920 | x86_pmu.event_constraints = intel_snb_event_constraints; |
1851 | x86_pmu.pebs_constraints = intel_snb_pebs_event_constraints; | 1921 | x86_pmu.pebs_constraints = intel_snb_pebs_event_constraints; |
1922 | x86_pmu.pebs_aliases = intel_pebs_aliases_snb; | ||
1852 | x86_pmu.extra_regs = intel_snb_extra_regs; | 1923 | x86_pmu.extra_regs = intel_snb_extra_regs; |
1853 | /* all extra regs are per-cpu when HT is on */ | 1924 | /* all extra regs are per-cpu when HT is on */ |
1854 | x86_pmu.er_flags |= ERF_HAS_RSP_1; | 1925 | x86_pmu.er_flags |= ERF_HAS_RSP_1; |
diff --git a/arch/x86/kernel/cpu/perf_event_intel_ds.c b/arch/x86/kernel/cpu/perf_event_intel_ds.c index 5a3edc27f6e5..35e2192df9f4 100644 --- a/arch/x86/kernel/cpu/perf_event_intel_ds.c +++ b/arch/x86/kernel/cpu/perf_event_intel_ds.c | |||
@@ -400,14 +400,7 @@ struct event_constraint intel_snb_pebs_event_constraints[] = { | |||
400 | INTEL_EVENT_CONSTRAINT(0xc4, 0xf), /* BR_INST_RETIRED.* */ | 400 | INTEL_EVENT_CONSTRAINT(0xc4, 0xf), /* BR_INST_RETIRED.* */ |
401 | INTEL_EVENT_CONSTRAINT(0xc5, 0xf), /* BR_MISP_RETIRED.* */ | 401 | INTEL_EVENT_CONSTRAINT(0xc5, 0xf), /* BR_MISP_RETIRED.* */ |
402 | INTEL_EVENT_CONSTRAINT(0xcd, 0x8), /* MEM_TRANS_RETIRED.* */ | 402 | INTEL_EVENT_CONSTRAINT(0xcd, 0x8), /* MEM_TRANS_RETIRED.* */ |
403 | INTEL_UEVENT_CONSTRAINT(0x11d0, 0xf), /* MEM_UOP_RETIRED.STLB_MISS_LOADS */ | 403 | INTEL_EVENT_CONSTRAINT(0xd0, 0xf), /* MEM_UOP_RETIRED.* */ |
404 | INTEL_UEVENT_CONSTRAINT(0x12d0, 0xf), /* MEM_UOP_RETIRED.STLB_MISS_STORES */ | ||
405 | INTEL_UEVENT_CONSTRAINT(0x21d0, 0xf), /* MEM_UOP_RETIRED.LOCK_LOADS */ | ||
406 | INTEL_UEVENT_CONSTRAINT(0x22d0, 0xf), /* MEM_UOP_RETIRED.LOCK_STORES */ | ||
407 | INTEL_UEVENT_CONSTRAINT(0x41d0, 0xf), /* MEM_UOP_RETIRED.SPLIT_LOADS */ | ||
408 | INTEL_UEVENT_CONSTRAINT(0x42d0, 0xf), /* MEM_UOP_RETIRED.SPLIT_STORES */ | ||
409 | INTEL_UEVENT_CONSTRAINT(0x81d0, 0xf), /* MEM_UOP_RETIRED.ANY_LOADS */ | ||
410 | INTEL_UEVENT_CONSTRAINT(0x82d0, 0xf), /* MEM_UOP_RETIRED.ANY_STORES */ | ||
411 | INTEL_EVENT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */ | 404 | INTEL_EVENT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */ |
412 | INTEL_EVENT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */ | 405 | INTEL_EVENT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */ |
413 | INTEL_UEVENT_CONSTRAINT(0x02d4, 0xf), /* MEM_LOAD_UOPS_MISC_RETIRED.LLC_MISS */ | 406 | INTEL_UEVENT_CONSTRAINT(0x02d4, 0xf), /* MEM_LOAD_UOPS_MISC_RETIRED.LLC_MISS */ |
diff --git a/arch/x86/kernel/kvmclock.c b/arch/x86/kernel/kvmclock.c index 086eb58c6e80..f1b42b3a186c 100644 --- a/arch/x86/kernel/kvmclock.c +++ b/arch/x86/kernel/kvmclock.c | |||
@@ -120,11 +120,6 @@ bool kvm_check_and_clear_guest_paused(void) | |||
120 | bool ret = false; | 120 | bool ret = false; |
121 | struct pvclock_vcpu_time_info *src; | 121 | struct pvclock_vcpu_time_info *src; |
122 | 122 | ||
123 | /* | ||
124 | * per_cpu() is safe here because this function is only called from | ||
125 | * timer functions where preemption is already disabled. | ||
126 | */ | ||
127 | WARN_ON(!in_atomic()); | ||
128 | src = &__get_cpu_var(hv_clock); | 123 | src = &__get_cpu_var(hv_clock); |
129 | if ((src->flags & PVCLOCK_GUEST_STOPPED) != 0) { | 124 | if ((src->flags & PVCLOCK_GUEST_STOPPED) != 0) { |
130 | __this_cpu_and(hv_clock.flags, ~PVCLOCK_GUEST_STOPPED); | 125 | __this_cpu_and(hv_clock.flags, ~PVCLOCK_GUEST_STOPPED); |
diff --git a/arch/x86/kernel/nmi_selftest.c b/arch/x86/kernel/nmi_selftest.c index e31bf8d5c4d2..149b8d9c6ad4 100644 --- a/arch/x86/kernel/nmi_selftest.c +++ b/arch/x86/kernel/nmi_selftest.c | |||
@@ -42,7 +42,7 @@ static int __init nmi_unk_cb(unsigned int val, struct pt_regs *regs) | |||
42 | static void __init init_nmi_testsuite(void) | 42 | static void __init init_nmi_testsuite(void) |
43 | { | 43 | { |
44 | /* trap all the unknown NMIs we may generate */ | 44 | /* trap all the unknown NMIs we may generate */ |
45 | register_nmi_handler(NMI_UNKNOWN, nmi_unk_cb, 0, "nmi_selftest_unk"); | 45 | register_nmi_handler_initonly(NMI_UNKNOWN, nmi_unk_cb, 0, "nmi_selftest_unk"); |
46 | } | 46 | } |
47 | 47 | ||
48 | static void __init cleanup_nmi_testsuite(void) | 48 | static void __init cleanup_nmi_testsuite(void) |
@@ -64,7 +64,7 @@ static void __init test_nmi_ipi(struct cpumask *mask) | |||
64 | { | 64 | { |
65 | unsigned long timeout; | 65 | unsigned long timeout; |
66 | 66 | ||
67 | if (register_nmi_handler(NMI_LOCAL, test_nmi_ipi_callback, | 67 | if (register_nmi_handler_initonly(NMI_LOCAL, test_nmi_ipi_callback, |
68 | NMI_FLAG_FIRST, "nmi_selftest")) { | 68 | NMI_FLAG_FIRST, "nmi_selftest")) { |
69 | nmi_fail = FAILURE; | 69 | nmi_fail = FAILURE; |
70 | return; | 70 | return; |
diff --git a/arch/x86/kernel/pci-dma.c b/arch/x86/kernel/pci-dma.c index 62c9457ccd2f..c0f420f76cd3 100644 --- a/arch/x86/kernel/pci-dma.c +++ b/arch/x86/kernel/pci-dma.c | |||
@@ -100,7 +100,7 @@ void *dma_generic_alloc_coherent(struct device *dev, size_t size, | |||
100 | struct dma_attrs *attrs) | 100 | struct dma_attrs *attrs) |
101 | { | 101 | { |
102 | unsigned long dma_mask; | 102 | unsigned long dma_mask; |
103 | struct page *page = NULL; | 103 | struct page *page; |
104 | unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT; | 104 | unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT; |
105 | dma_addr_t addr; | 105 | dma_addr_t addr; |
106 | 106 | ||
@@ -108,6 +108,7 @@ void *dma_generic_alloc_coherent(struct device *dev, size_t size, | |||
108 | 108 | ||
109 | flag |= __GFP_ZERO; | 109 | flag |= __GFP_ZERO; |
110 | again: | 110 | again: |
111 | page = NULL; | ||
111 | if (!(flag & GFP_ATOMIC)) | 112 | if (!(flag & GFP_ATOMIC)) |
112 | page = dma_alloc_from_contiguous(dev, count, get_order(size)); | 113 | page = dma_alloc_from_contiguous(dev, count, get_order(size)); |
113 | if (!page) | 114 | if (!page) |
diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c index 79c45af81604..25b48edb847c 100644 --- a/arch/x86/kernel/reboot.c +++ b/arch/x86/kernel/reboot.c | |||
@@ -639,9 +639,11 @@ void native_machine_shutdown(void) | |||
639 | set_cpus_allowed_ptr(current, cpumask_of(reboot_cpu_id)); | 639 | set_cpus_allowed_ptr(current, cpumask_of(reboot_cpu_id)); |
640 | 640 | ||
641 | /* | 641 | /* |
642 | * O.K Now that I'm on the appropriate processor, | 642 | * O.K Now that I'm on the appropriate processor, stop all of the |
643 | * stop all of the others. | 643 | * others. Also disable the local irq to not receive the per-cpu |
644 | * timer interrupt which may trigger scheduler's load balance. | ||
644 | */ | 645 | */ |
646 | local_irq_disable(); | ||
645 | stop_other_cpus(); | 647 | stop_other_cpus(); |
646 | #endif | 648 | #endif |
647 | 649 | ||
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index f56f96da77f5..7bd8a0823654 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c | |||
@@ -349,9 +349,12 @@ static bool __cpuinit match_llc(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o) | |||
349 | 349 | ||
350 | static bool __cpuinit match_mc(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o) | 350 | static bool __cpuinit match_mc(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o) |
351 | { | 351 | { |
352 | if (c->phys_proc_id == o->phys_proc_id) | 352 | if (c->phys_proc_id == o->phys_proc_id) { |
353 | return topology_sane(c, o, "mc"); | 353 | if (cpu_has(c, X86_FEATURE_AMD_DCM)) |
354 | return true; | ||
354 | 355 | ||
356 | return topology_sane(c, o, "mc"); | ||
357 | } | ||
355 | return false; | 358 | return false; |
356 | } | 359 | } |
357 | 360 | ||
@@ -382,6 +385,15 @@ void __cpuinit set_cpu_sibling_map(int cpu) | |||
382 | if ((i == cpu) || (has_mc && match_llc(c, o))) | 385 | if ((i == cpu) || (has_mc && match_llc(c, o))) |
383 | link_mask(llc_shared, cpu, i); | 386 | link_mask(llc_shared, cpu, i); |
384 | 387 | ||
388 | } | ||
389 | |||
390 | /* | ||
391 | * This needs a separate iteration over the cpus because we rely on all | ||
392 | * cpu_sibling_mask links to be set-up. | ||
393 | */ | ||
394 | for_each_cpu(i, cpu_sibling_setup_mask) { | ||
395 | o = &cpu_data(i); | ||
396 | |||
385 | if ((i == cpu) || (has_mc && match_mc(c, o))) { | 397 | if ((i == cpu) || (has_mc && match_mc(c, o))) { |
386 | link_mask(core, cpu, i); | 398 | link_mask(core, cpu, i); |
387 | 399 | ||
@@ -410,15 +422,7 @@ void __cpuinit set_cpu_sibling_map(int cpu) | |||
410 | /* maps the cpu to the sched domain representing multi-core */ | 422 | /* maps the cpu to the sched domain representing multi-core */ |
411 | const struct cpumask *cpu_coregroup_mask(int cpu) | 423 | const struct cpumask *cpu_coregroup_mask(int cpu) |
412 | { | 424 | { |
413 | struct cpuinfo_x86 *c = &cpu_data(cpu); | 425 | return cpu_llc_shared_mask(cpu); |
414 | /* | ||
415 | * For perf, we return last level cache shared map. | ||
416 | * And for power savings, we return cpu_core_map | ||
417 | */ | ||
418 | if (!(cpu_has(c, X86_FEATURE_AMD_DCM))) | ||
419 | return cpu_core_mask(cpu); | ||
420 | else | ||
421 | return cpu_llc_shared_mask(cpu); | ||
422 | } | 426 | } |
423 | 427 | ||
424 | static void impress_friends(void) | 428 | static void impress_friends(void) |
diff --git a/arch/x86/lib/usercopy.c b/arch/x86/lib/usercopy.c index f61ee67ec00f..4f74d94c8d97 100644 --- a/arch/x86/lib/usercopy.c +++ b/arch/x86/lib/usercopy.c | |||
@@ -8,6 +8,7 @@ | |||
8 | #include <linux/module.h> | 8 | #include <linux/module.h> |
9 | 9 | ||
10 | #include <asm/word-at-a-time.h> | 10 | #include <asm/word-at-a-time.h> |
11 | #include <linux/sched.h> | ||
11 | 12 | ||
12 | /* | 13 | /* |
13 | * best effort, GUP based copy_from_user() that is NMI-safe | 14 | * best effort, GUP based copy_from_user() that is NMI-safe |
@@ -21,6 +22,9 @@ copy_from_user_nmi(void *to, const void __user *from, unsigned long n) | |||
21 | void *map; | 22 | void *map; |
22 | int ret; | 23 | int ret; |
23 | 24 | ||
25 | if (__range_not_ok(from, n, TASK_SIZE)) | ||
26 | return len; | ||
27 | |||
24 | do { | 28 | do { |
25 | ret = __get_user_pages_fast(addr, 1, 0, &page); | 29 | ret = __get_user_pages_fast(addr, 1, 0, &page); |
26 | if (!ret) | 30 | if (!ret) |
diff --git a/arch/x86/lib/x86-opcode-map.txt b/arch/x86/lib/x86-opcode-map.txt index 819137904428..5d7e51f3fd28 100644 --- a/arch/x86/lib/x86-opcode-map.txt +++ b/arch/x86/lib/x86-opcode-map.txt | |||
@@ -28,7 +28,7 @@ | |||
28 | # - (66): the last prefix is 0x66 | 28 | # - (66): the last prefix is 0x66 |
29 | # - (F3): the last prefix is 0xF3 | 29 | # - (F3): the last prefix is 0xF3 |
30 | # - (F2): the last prefix is 0xF2 | 30 | # - (F2): the last prefix is 0xF2 |
31 | # | 31 | # - (!F3) : the last prefix is not 0xF3 (including non-last prefix case) |
32 | 32 | ||
33 | Table: one byte opcode | 33 | Table: one byte opcode |
34 | Referrer: | 34 | Referrer: |
@@ -515,12 +515,12 @@ b4: LFS Gv,Mp | |||
515 | b5: LGS Gv,Mp | 515 | b5: LGS Gv,Mp |
516 | b6: MOVZX Gv,Eb | 516 | b6: MOVZX Gv,Eb |
517 | b7: MOVZX Gv,Ew | 517 | b7: MOVZX Gv,Ew |
518 | b8: JMPE | POPCNT Gv,Ev (F3) | 518 | b8: JMPE (!F3) | POPCNT Gv,Ev (F3) |
519 | b9: Grp10 (1A) | 519 | b9: Grp10 (1A) |
520 | ba: Grp8 Ev,Ib (1A) | 520 | ba: Grp8 Ev,Ib (1A) |
521 | bb: BTC Ev,Gv | 521 | bb: BTC Ev,Gv |
522 | bc: BSF Gv,Ev | TZCNT Gv,Ev (F3) | 522 | bc: BSF Gv,Ev (!F3) | TZCNT Gv,Ev (F3) |
523 | bd: BSR Gv,Ev | LZCNT Gv,Ev (F3) | 523 | bd: BSR Gv,Ev (!F3) | LZCNT Gv,Ev (F3) |
524 | be: MOVSX Gv,Eb | 524 | be: MOVSX Gv,Eb |
525 | bf: MOVSX Gv,Ew | 525 | bf: MOVSX Gv,Ew |
526 | # 0x0f 0xc0-0xcf | 526 | # 0x0f 0xc0-0xcf |
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c index 97141c26a13a..bc4e9d84157f 100644 --- a/arch/x86/mm/init.c +++ b/arch/x86/mm/init.c | |||
@@ -62,7 +62,8 @@ static void __init find_early_table_space(struct map_range *mr, unsigned long en | |||
62 | extra += PMD_SIZE; | 62 | extra += PMD_SIZE; |
63 | #endif | 63 | #endif |
64 | /* The first 2/4M doesn't use large pages. */ | 64 | /* The first 2/4M doesn't use large pages. */ |
65 | extra += mr->end - mr->start; | 65 | if (mr->start < PMD_SIZE) |
66 | extra += mr->end - mr->start; | ||
66 | 67 | ||
67 | ptes = (extra + PAGE_SIZE - 1) >> PAGE_SHIFT; | 68 | ptes = (extra + PAGE_SIZE - 1) >> PAGE_SHIFT; |
68 | } else | 69 | } else |
diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c index be1ef574ce9a..78fe3f1ac49f 100644 --- a/arch/x86/mm/ioremap.c +++ b/arch/x86/mm/ioremap.c | |||
@@ -180,7 +180,7 @@ err_free_memtype: | |||
180 | 180 | ||
181 | /** | 181 | /** |
182 | * ioremap_nocache - map bus memory into CPU space | 182 | * ioremap_nocache - map bus memory into CPU space |
183 | * @offset: bus address of the memory | 183 | * @phys_addr: bus address of the memory |
184 | * @size: size of the resource to map | 184 | * @size: size of the resource to map |
185 | * | 185 | * |
186 | * ioremap_nocache performs a platform specific sequence of operations to | 186 | * ioremap_nocache performs a platform specific sequence of operations to |
@@ -217,7 +217,7 @@ EXPORT_SYMBOL(ioremap_nocache); | |||
217 | 217 | ||
218 | /** | 218 | /** |
219 | * ioremap_wc - map memory into CPU space write combined | 219 | * ioremap_wc - map memory into CPU space write combined |
220 | * @offset: bus address of the memory | 220 | * @phys_addr: bus address of the memory |
221 | * @size: size of the resource to map | 221 | * @size: size of the resource to map |
222 | * | 222 | * |
223 | * This version of ioremap ensures that the memory is marked write combining. | 223 | * This version of ioremap ensures that the memory is marked write combining. |
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c index e1ebde315210..a718e0d23503 100644 --- a/arch/x86/mm/pageattr.c +++ b/arch/x86/mm/pageattr.c | |||
@@ -122,7 +122,7 @@ within(unsigned long addr, unsigned long start, unsigned long end) | |||
122 | 122 | ||
123 | /** | 123 | /** |
124 | * clflush_cache_range - flush a cache range with clflush | 124 | * clflush_cache_range - flush a cache range with clflush |
125 | * @addr: virtual start address | 125 | * @vaddr: virtual start address |
126 | * @size: number of bytes to flush | 126 | * @size: number of bytes to flush |
127 | * | 127 | * |
128 | * clflush is an unordered instruction which needs fencing with mfence | 128 | * clflush is an unordered instruction which needs fencing with mfence |
diff --git a/arch/x86/mm/srat.c b/arch/x86/mm/srat.c index 732af3a96183..4599c3e8bcb6 100644 --- a/arch/x86/mm/srat.c +++ b/arch/x86/mm/srat.c | |||
@@ -176,6 +176,8 @@ acpi_numa_memory_affinity_init(struct acpi_srat_mem_affinity *ma) | |||
176 | return; | 176 | return; |
177 | } | 177 | } |
178 | 178 | ||
179 | node_set(node, numa_nodes_parsed); | ||
180 | |||
179 | printk(KERN_INFO "SRAT: Node %u PXM %u [mem %#010Lx-%#010Lx]\n", | 181 | printk(KERN_INFO "SRAT: Node %u PXM %u [mem %#010Lx-%#010Lx]\n", |
180 | node, pxm, | 182 | node, pxm, |
181 | (unsigned long long) start, (unsigned long long) end - 1); | 183 | (unsigned long long) start, (unsigned long long) end - 1); |
diff --git a/arch/x86/platform/mrst/early_printk_mrst.c b/arch/x86/platform/mrst/early_printk_mrst.c index 3c6e328483c7..028454f0c3a5 100644 --- a/arch/x86/platform/mrst/early_printk_mrst.c +++ b/arch/x86/platform/mrst/early_printk_mrst.c | |||
@@ -110,19 +110,16 @@ static struct kmsg_dumper dw_dumper; | |||
110 | static int dumper_registered; | 110 | static int dumper_registered; |
111 | 111 | ||
112 | static void dw_kmsg_dump(struct kmsg_dumper *dumper, | 112 | static void dw_kmsg_dump(struct kmsg_dumper *dumper, |
113 | enum kmsg_dump_reason reason, | 113 | enum kmsg_dump_reason reason) |
114 | const char *s1, unsigned long l1, | ||
115 | const char *s2, unsigned long l2) | ||
116 | { | 114 | { |
117 | int i; | 115 | static char line[1024]; |
116 | size_t len; | ||
118 | 117 | ||
119 | /* When run to this, we'd better re-init the HW */ | 118 | /* When run to this, we'd better re-init the HW */ |
120 | mrst_early_console_init(); | 119 | mrst_early_console_init(); |
121 | 120 | ||
122 | for (i = 0; i < l1; i++) | 121 | while (kmsg_dump_get_line(dumper, true, line, sizeof(line), &len)) |
123 | early_mrst_console.write(&early_mrst_console, s1 + i, 1); | 122 | early_mrst_console.write(&early_mrst_console, line, len); |
124 | for (i = 0; i < l2; i++) | ||
125 | early_mrst_console.write(&early_mrst_console, s2 + i, 1); | ||
126 | } | 123 | } |
127 | 124 | ||
128 | /* Set the ratio rate to 115200, 8n1, IRQ disabled */ | 125 | /* Set the ratio rate to 115200, 8n1, IRQ disabled */ |
diff --git a/arch/x86/platform/mrst/mrst.c b/arch/x86/platform/mrst/mrst.c index e31bcd8f2eee..fd41a9262d65 100644 --- a/arch/x86/platform/mrst/mrst.c +++ b/arch/x86/platform/mrst/mrst.c | |||
@@ -782,7 +782,7 @@ BLOCKING_NOTIFIER_HEAD(intel_scu_notifier); | |||
782 | EXPORT_SYMBOL_GPL(intel_scu_notifier); | 782 | EXPORT_SYMBOL_GPL(intel_scu_notifier); |
783 | 783 | ||
784 | /* Called by IPC driver */ | 784 | /* Called by IPC driver */ |
785 | void intel_scu_devices_create(void) | 785 | void __devinit intel_scu_devices_create(void) |
786 | { | 786 | { |
787 | int i; | 787 | int i; |
788 | 788 | ||
diff --git a/arch/x86/platform/uv/tlb_uv.c b/arch/x86/platform/uv/tlb_uv.c index 3ae0e61abd23..59880afa851f 100644 --- a/arch/x86/platform/uv/tlb_uv.c +++ b/arch/x86/platform/uv/tlb_uv.c | |||
@@ -1295,7 +1295,6 @@ static void __init enable_timeouts(void) | |||
1295 | */ | 1295 | */ |
1296 | mmr_image |= (1L << SOFTACK_MSHIFT); | 1296 | mmr_image |= (1L << SOFTACK_MSHIFT); |
1297 | if (is_uv2_hub()) { | 1297 | if (is_uv2_hub()) { |
1298 | mmr_image &= ~(1L << UV2_LEG_SHFT); | ||
1299 | mmr_image |= (1L << UV2_EXT_SHFT); | 1298 | mmr_image |= (1L << UV2_EXT_SHFT); |
1300 | } | 1299 | } |
1301 | write_mmr_misc_control(pnode, mmr_image); | 1300 | write_mmr_misc_control(pnode, mmr_image); |
diff --git a/arch/x86/tools/gen-insn-attr-x86.awk b/arch/x86/tools/gen-insn-attr-x86.awk index 5f6a5b6c3a15..ddcf39b1a18d 100644 --- a/arch/x86/tools/gen-insn-attr-x86.awk +++ b/arch/x86/tools/gen-insn-attr-x86.awk | |||
@@ -66,9 +66,10 @@ BEGIN { | |||
66 | rex_expr = "^REX(\\.[XRWB]+)*" | 66 | rex_expr = "^REX(\\.[XRWB]+)*" |
67 | fpu_expr = "^ESC" # TODO | 67 | fpu_expr = "^ESC" # TODO |
68 | 68 | ||
69 | lprefix1_expr = "\\(66\\)" | 69 | lprefix1_expr = "\\((66|!F3)\\)" |
70 | lprefix2_expr = "\\(F3\\)" | 70 | lprefix2_expr = "\\(F3\\)" |
71 | lprefix3_expr = "\\(F2\\)" | 71 | lprefix3_expr = "\\((F2|!F3)\\)" |
72 | lprefix_expr = "\\((66|F2|F3)\\)" | ||
72 | max_lprefix = 4 | 73 | max_lprefix = 4 |
73 | 74 | ||
74 | # All opcodes starting with lower-case 'v' or with (v1) superscript | 75 | # All opcodes starting with lower-case 'v' or with (v1) superscript |
@@ -333,13 +334,16 @@ function convert_operands(count,opnd, i,j,imm,mod) | |||
333 | if (match(ext, lprefix1_expr)) { | 334 | if (match(ext, lprefix1_expr)) { |
334 | lptable1[idx] = add_flags(lptable1[idx],flags) | 335 | lptable1[idx] = add_flags(lptable1[idx],flags) |
335 | variant = "INAT_VARIANT" | 336 | variant = "INAT_VARIANT" |
336 | } else if (match(ext, lprefix2_expr)) { | 337 | } |
338 | if (match(ext, lprefix2_expr)) { | ||
337 | lptable2[idx] = add_flags(lptable2[idx],flags) | 339 | lptable2[idx] = add_flags(lptable2[idx],flags) |
338 | variant = "INAT_VARIANT" | 340 | variant = "INAT_VARIANT" |
339 | } else if (match(ext, lprefix3_expr)) { | 341 | } |
342 | if (match(ext, lprefix3_expr)) { | ||
340 | lptable3[idx] = add_flags(lptable3[idx],flags) | 343 | lptable3[idx] = add_flags(lptable3[idx],flags) |
341 | variant = "INAT_VARIANT" | 344 | variant = "INAT_VARIANT" |
342 | } else { | 345 | } |
346 | if (!match(ext, lprefix_expr)){ | ||
343 | table[idx] = add_flags(table[idx],flags) | 347 | table[idx] = add_flags(table[idx],flags) |
344 | } | 348 | } |
345 | } | 349 | } |
diff --git a/arch/x86/um/sys_call_table_32.c b/arch/x86/um/sys_call_table_32.c index 416bd40c0eba..68d1dc91b37b 100644 --- a/arch/x86/um/sys_call_table_32.c +++ b/arch/x86/um/sys_call_table_32.c | |||
@@ -39,9 +39,9 @@ | |||
39 | #undef __SYSCALL_I386 | 39 | #undef __SYSCALL_I386 |
40 | #define __SYSCALL_I386(nr, sym, compat) [ nr ] = sym, | 40 | #define __SYSCALL_I386(nr, sym, compat) [ nr ] = sym, |
41 | 41 | ||
42 | typedef void (*sys_call_ptr_t)(void); | 42 | typedef asmlinkage void (*sys_call_ptr_t)(void); |
43 | 43 | ||
44 | extern void sys_ni_syscall(void); | 44 | extern asmlinkage void sys_ni_syscall(void); |
45 | 45 | ||
46 | const sys_call_ptr_t sys_call_table[] __cacheline_aligned = { | 46 | const sys_call_ptr_t sys_call_table[] __cacheline_aligned = { |
47 | /* | 47 | /* |
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c index e74df9548a02..ff962d4b821e 100644 --- a/arch/x86/xen/enlighten.c +++ b/arch/x86/xen/enlighten.c | |||
@@ -209,6 +209,9 @@ static void __init xen_banner(void) | |||
209 | xen_feature(XENFEAT_mmu_pt_update_preserve_ad) ? " (preserve-AD)" : ""); | 209 | xen_feature(XENFEAT_mmu_pt_update_preserve_ad) ? " (preserve-AD)" : ""); |
210 | } | 210 | } |
211 | 211 | ||
212 | #define CPUID_THERM_POWER_LEAF 6 | ||
213 | #define APERFMPERF_PRESENT 0 | ||
214 | |||
212 | static __read_mostly unsigned int cpuid_leaf1_edx_mask = ~0; | 215 | static __read_mostly unsigned int cpuid_leaf1_edx_mask = ~0; |
213 | static __read_mostly unsigned int cpuid_leaf1_ecx_mask = ~0; | 216 | static __read_mostly unsigned int cpuid_leaf1_ecx_mask = ~0; |
214 | 217 | ||
@@ -242,6 +245,11 @@ static void xen_cpuid(unsigned int *ax, unsigned int *bx, | |||
242 | *dx = cpuid_leaf5_edx_val; | 245 | *dx = cpuid_leaf5_edx_val; |
243 | return; | 246 | return; |
244 | 247 | ||
248 | case CPUID_THERM_POWER_LEAF: | ||
249 | /* Disabling APERFMPERF for kernel usage */ | ||
250 | maskecx = ~(1 << APERFMPERF_PRESENT); | ||
251 | break; | ||
252 | |||
245 | case 0xb: | 253 | case 0xb: |
246 | /* Suppress extended topology stuff */ | 254 | /* Suppress extended topology stuff */ |
247 | maskebx = 0; | 255 | maskebx = 0; |
diff --git a/arch/x86/xen/p2m.c b/arch/x86/xen/p2m.c index ffd08c414e91..64effdc6da94 100644 --- a/arch/x86/xen/p2m.c +++ b/arch/x86/xen/p2m.c | |||
@@ -706,6 +706,7 @@ int m2p_add_override(unsigned long mfn, struct page *page, | |||
706 | unsigned long uninitialized_var(address); | 706 | unsigned long uninitialized_var(address); |
707 | unsigned level; | 707 | unsigned level; |
708 | pte_t *ptep = NULL; | 708 | pte_t *ptep = NULL; |
709 | int ret = 0; | ||
709 | 710 | ||
710 | pfn = page_to_pfn(page); | 711 | pfn = page_to_pfn(page); |
711 | if (!PageHighMem(page)) { | 712 | if (!PageHighMem(page)) { |
@@ -741,6 +742,24 @@ int m2p_add_override(unsigned long mfn, struct page *page, | |||
741 | list_add(&page->lru, &m2p_overrides[mfn_hash(mfn)]); | 742 | list_add(&page->lru, &m2p_overrides[mfn_hash(mfn)]); |
742 | spin_unlock_irqrestore(&m2p_override_lock, flags); | 743 | spin_unlock_irqrestore(&m2p_override_lock, flags); |
743 | 744 | ||
745 | /* p2m(m2p(mfn)) == mfn: the mfn is already present somewhere in | ||
746 | * this domain. Set the FOREIGN_FRAME_BIT in the p2m for the other | ||
747 | * pfn so that the following mfn_to_pfn(mfn) calls will return the | ||
748 | * pfn from the m2p_override (the backend pfn) instead. | ||
749 | * We need to do this because the pages shared by the frontend | ||
750 | * (xen-blkfront) can be already locked (lock_page, called by | ||
751 | * do_read_cache_page); when the userspace backend tries to use them | ||
752 | * with direct_IO, mfn_to_pfn returns the pfn of the frontend, so | ||
753 | * do_blockdev_direct_IO is going to try to lock the same pages | ||
754 | * again resulting in a deadlock. | ||
755 | * As a side effect get_user_pages_fast might not be safe on the | ||
756 | * frontend pages while they are being shared with the backend, | ||
757 | * because mfn_to_pfn (that ends up being called by GUPF) will | ||
758 | * return the backend pfn rather than the frontend pfn. */ | ||
759 | ret = __get_user(pfn, &machine_to_phys_mapping[mfn]); | ||
760 | if (ret == 0 && get_phys_to_machine(pfn) == mfn) | ||
761 | set_phys_to_machine(pfn, FOREIGN_FRAME(mfn)); | ||
762 | |||
744 | return 0; | 763 | return 0; |
745 | } | 764 | } |
746 | EXPORT_SYMBOL_GPL(m2p_add_override); | 765 | EXPORT_SYMBOL_GPL(m2p_add_override); |
@@ -752,6 +771,7 @@ int m2p_remove_override(struct page *page, bool clear_pte) | |||
752 | unsigned long uninitialized_var(address); | 771 | unsigned long uninitialized_var(address); |
753 | unsigned level; | 772 | unsigned level; |
754 | pte_t *ptep = NULL; | 773 | pte_t *ptep = NULL; |
774 | int ret = 0; | ||
755 | 775 | ||
756 | pfn = page_to_pfn(page); | 776 | pfn = page_to_pfn(page); |
757 | mfn = get_phys_to_machine(pfn); | 777 | mfn = get_phys_to_machine(pfn); |
@@ -821,6 +841,22 @@ int m2p_remove_override(struct page *page, bool clear_pte) | |||
821 | } else | 841 | } else |
822 | set_phys_to_machine(pfn, page->index); | 842 | set_phys_to_machine(pfn, page->index); |
823 | 843 | ||
844 | /* p2m(m2p(mfn)) == FOREIGN_FRAME(mfn): the mfn is already present | ||
845 | * somewhere in this domain, even before being added to the | ||
846 | * m2p_override (see comment above in m2p_add_override). | ||
847 | * If there are no other entries in the m2p_override corresponding | ||
848 | * to this mfn, then remove the FOREIGN_FRAME_BIT from the p2m for | ||
849 | * the original pfn (the one shared by the frontend): the backend | ||
850 | * cannot do any IO on this page anymore because it has been | ||
851 | * unshared. Removing the FOREIGN_FRAME_BIT from the p2m entry of | ||
852 | * the original pfn causes mfn_to_pfn(mfn) to return the frontend | ||
853 | * pfn again. */ | ||
854 | mfn &= ~FOREIGN_FRAME_BIT; | ||
855 | ret = __get_user(pfn, &machine_to_phys_mapping[mfn]); | ||
856 | if (ret == 0 && get_phys_to_machine(pfn) == FOREIGN_FRAME(mfn) && | ||
857 | m2p_find_override(mfn) == NULL) | ||
858 | set_phys_to_machine(pfn, mfn); | ||
859 | |||
824 | return 0; | 860 | return 0; |
825 | } | 861 | } |
826 | EXPORT_SYMBOL_GPL(m2p_remove_override); | 862 | EXPORT_SYMBOL_GPL(m2p_remove_override); |
diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c index 3ebba0753d38..a4790bf22c59 100644 --- a/arch/x86/xen/setup.c +++ b/arch/x86/xen/setup.c | |||
@@ -371,7 +371,8 @@ char * __init xen_memory_setup(void) | |||
371 | populated = xen_populate_chunk(map, memmap.nr_entries, | 371 | populated = xen_populate_chunk(map, memmap.nr_entries, |
372 | max_pfn, &last_pfn, xen_released_pages); | 372 | max_pfn, &last_pfn, xen_released_pages); |
373 | 373 | ||
374 | extra_pages += (xen_released_pages - populated); | 374 | xen_released_pages -= populated; |
375 | extra_pages += xen_released_pages; | ||
375 | 376 | ||
376 | if (last_pfn > max_pfn) { | 377 | if (last_pfn > max_pfn) { |
377 | max_pfn = min(MAX_DOMAIN_PAGES, last_pfn); | 378 | max_pfn = min(MAX_DOMAIN_PAGES, last_pfn); |
diff --git a/arch/xtensa/Makefile b/arch/xtensa/Makefile index 7608559de93a..f973754ddf90 100644 --- a/arch/xtensa/Makefile +++ b/arch/xtensa/Makefile | |||
@@ -68,8 +68,8 @@ endif | |||
68 | 68 | ||
69 | # Only build variant and/or platform if it includes a Makefile | 69 | # Only build variant and/or platform if it includes a Makefile |
70 | 70 | ||
71 | buildvar := $(shell test -a $(srctree)/arch/xtensa/variants/$(VARIANT)/Makefile && echo arch/xtensa/variants/$(VARIANT)/) | 71 | buildvar := $(shell test -e $(srctree)/arch/xtensa/variants/$(VARIANT)/Makefile && echo arch/xtensa/variants/$(VARIANT)/) |
72 | buildplf := $(shell test -a $(srctree)/arch/xtensa/platforms/$(PLATFORM)/Makefile && echo arch/xtensa/platforms/$(PLATFORM)/) | 72 | buildplf := $(shell test -e $(srctree)/arch/xtensa/platforms/$(PLATFORM)/Makefile && echo arch/xtensa/platforms/$(PLATFORM)/) |
73 | 73 | ||
74 | # Find libgcc.a | 74 | # Find libgcc.a |
75 | 75 | ||
diff --git a/arch/xtensa/include/asm/syscall.h b/arch/xtensa/include/asm/syscall.h index 0b9f2e13c781..c1dacca312f3 100644 --- a/arch/xtensa/include/asm/syscall.h +++ b/arch/xtensa/include/asm/syscall.h | |||
@@ -31,5 +31,5 @@ asmlinkage long sys_pselect6(int n, fd_set __user *inp, fd_set __user *outp, | |||
31 | asmlinkage long sys_ppoll(struct pollfd __user *ufds, unsigned int nfds, | 31 | asmlinkage long sys_ppoll(struct pollfd __user *ufds, unsigned int nfds, |
32 | struct timespec __user *tsp, const sigset_t __user *sigmask, | 32 | struct timespec __user *tsp, const sigset_t __user *sigmask, |
33 | size_t sigsetsize); | 33 | size_t sigsetsize); |
34 | 34 | asmlinkage long sys_rt_sigsuspend(sigset_t __user *unewset, | |
35 | 35 | size_t sigsetsize); | |
diff --git a/arch/xtensa/kernel/signal.c b/arch/xtensa/kernel/signal.c index b9f8e5850d3a..efe4e854b3cd 100644 --- a/arch/xtensa/kernel/signal.c +++ b/arch/xtensa/kernel/signal.c | |||
@@ -493,7 +493,7 @@ static void do_signal(struct pt_regs *regs) | |||
493 | if (ret) | 493 | if (ret) |
494 | return; | 494 | return; |
495 | 495 | ||
496 | signal_delivered(signr, info, ka, regs, 0); | 496 | signal_delivered(signr, &info, &ka, regs, 0); |
497 | if (current->ptrace & PT_SINGLESTEP) | 497 | if (current->ptrace & PT_SINGLESTEP) |
498 | task_pt_regs(current)->icountlevel = 1; | 498 | task_pt_regs(current)->icountlevel = 1; |
499 | 499 | ||
diff --git a/arch/xtensa/kernel/vmlinux.lds.S b/arch/xtensa/kernel/vmlinux.lds.S index 88ecea3facb4..ee2e2089483d 100644 --- a/arch/xtensa/kernel/vmlinux.lds.S +++ b/arch/xtensa/kernel/vmlinux.lds.S | |||
@@ -83,7 +83,6 @@ SECTIONS | |||
83 | 83 | ||
84 | _text = .; | 84 | _text = .; |
85 | _stext = .; | 85 | _stext = .; |
86 | _ftext = .; | ||
87 | 86 | ||
88 | .text : | 87 | .text : |
89 | { | 88 | { |
@@ -112,7 +111,7 @@ SECTIONS | |||
112 | EXCEPTION_TABLE(16) | 111 | EXCEPTION_TABLE(16) |
113 | /* Data section */ | 112 | /* Data section */ |
114 | 113 | ||
115 | _fdata = .; | 114 | _sdata = .; |
116 | RW_DATA_SECTION(XCHAL_ICACHE_LINESIZE, PAGE_SIZE, THREAD_SIZE) | 115 | RW_DATA_SECTION(XCHAL_ICACHE_LINESIZE, PAGE_SIZE, THREAD_SIZE) |
117 | _edata = .; | 116 | _edata = .; |
118 | 117 | ||
diff --git a/arch/xtensa/mm/init.c b/arch/xtensa/mm/init.c index ba150e5de2eb..db955179da2d 100644 --- a/arch/xtensa/mm/init.c +++ b/arch/xtensa/mm/init.c | |||
@@ -26,11 +26,7 @@ | |||
26 | 26 | ||
27 | #include <asm/bootparam.h> | 27 | #include <asm/bootparam.h> |
28 | #include <asm/page.h> | 28 | #include <asm/page.h> |
29 | 29 | #include <asm/sections.h> | |
30 | /* References to section boundaries */ | ||
31 | |||
32 | extern char _ftext, _etext, _fdata, _edata, _rodata_end; | ||
33 | extern char __init_begin, __init_end; | ||
34 | 30 | ||
35 | /* | 31 | /* |
36 | * mem_reserve(start, end, must_exist) | 32 | * mem_reserve(start, end, must_exist) |
@@ -197,9 +193,9 @@ void __init mem_init(void) | |||
197 | reservedpages++; | 193 | reservedpages++; |
198 | } | 194 | } |
199 | 195 | ||
200 | codesize = (unsigned long) &_etext - (unsigned long) &_ftext; | 196 | codesize = (unsigned long) _etext - (unsigned long) _stext; |
201 | datasize = (unsigned long) &_edata - (unsigned long) &_fdata; | 197 | datasize = (unsigned long) _edata - (unsigned long) _sdata; |
202 | initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin; | 198 | initsize = (unsigned long) __init_end - (unsigned long) __init_begin; |
203 | 199 | ||
204 | printk("Memory: %luk/%luk available (%ldk kernel code, %ldk reserved, " | 200 | printk("Memory: %luk/%luk available (%ldk kernel code, %ldk reserved, " |
205 | "%ldk data, %ldk init %ldk highmem)\n", | 201 | "%ldk data, %ldk init %ldk highmem)\n", |
@@ -237,7 +233,7 @@ void free_initrd_mem(unsigned long start, unsigned long end) | |||
237 | 233 | ||
238 | void free_initmem(void) | 234 | void free_initmem(void) |
239 | { | 235 | { |
240 | free_reserved_mem(&__init_begin, &__init_end); | 236 | free_reserved_mem(__init_begin, __init_end); |
241 | printk("Freeing unused kernel memory: %dk freed\n", | 237 | printk("Freeing unused kernel memory: %zuk freed\n", |
242 | (&__init_end - &__init_begin) >> 10); | 238 | (__init_end - __init_begin) >> 10); |
243 | } | 239 | } |
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig index 47768ff87343..80998958cf45 100644 --- a/drivers/acpi/Kconfig +++ b/drivers/acpi/Kconfig | |||
@@ -208,7 +208,7 @@ config ACPI_IPMI | |||
208 | 208 | ||
209 | config ACPI_HOTPLUG_CPU | 209 | config ACPI_HOTPLUG_CPU |
210 | bool | 210 | bool |
211 | depends on ACPI_PROCESSOR && HOTPLUG_CPU | 211 | depends on EXPERIMENTAL && ACPI_PROCESSOR && HOTPLUG_CPU |
212 | select ACPI_CONTAINER | 212 | select ACPI_CONTAINER |
213 | default y | 213 | default y |
214 | 214 | ||
diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c index 86933ca8b472..7dd3f9fb9f3f 100644 --- a/drivers/acpi/battery.c +++ b/drivers/acpi/battery.c | |||
@@ -643,11 +643,19 @@ static int acpi_battery_update(struct acpi_battery *battery) | |||
643 | 643 | ||
644 | static void acpi_battery_refresh(struct acpi_battery *battery) | 644 | static void acpi_battery_refresh(struct acpi_battery *battery) |
645 | { | 645 | { |
646 | int power_unit; | ||
647 | |||
646 | if (!battery->bat.dev) | 648 | if (!battery->bat.dev) |
647 | return; | 649 | return; |
648 | 650 | ||
651 | power_unit = battery->power_unit; | ||
652 | |||
649 | acpi_battery_get_info(battery); | 653 | acpi_battery_get_info(battery); |
650 | /* The battery may have changed its reporting units. */ | 654 | |
655 | if (power_unit == battery->power_unit) | ||
656 | return; | ||
657 | |||
658 | /* The battery has changed its reporting units. */ | ||
651 | sysfs_remove_battery(battery); | 659 | sysfs_remove_battery(battery); |
652 | sysfs_add_battery(battery); | 660 | sysfs_add_battery(battery); |
653 | } | 661 | } |
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c index 3188da3df8da..adceafda9c17 100644 --- a/drivers/acpi/bus.c +++ b/drivers/acpi/bus.c | |||
@@ -182,41 +182,66 @@ EXPORT_SYMBOL(acpi_bus_get_private_data); | |||
182 | Power Management | 182 | Power Management |
183 | -------------------------------------------------------------------------- */ | 183 | -------------------------------------------------------------------------- */ |
184 | 184 | ||
185 | static const char *state_string(int state) | ||
186 | { | ||
187 | switch (state) { | ||
188 | case ACPI_STATE_D0: | ||
189 | return "D0"; | ||
190 | case ACPI_STATE_D1: | ||
191 | return "D1"; | ||
192 | case ACPI_STATE_D2: | ||
193 | return "D2"; | ||
194 | case ACPI_STATE_D3_HOT: | ||
195 | return "D3hot"; | ||
196 | case ACPI_STATE_D3_COLD: | ||
197 | return "D3"; | ||
198 | default: | ||
199 | return "(unknown)"; | ||
200 | } | ||
201 | } | ||
202 | |||
185 | static int __acpi_bus_get_power(struct acpi_device *device, int *state) | 203 | static int __acpi_bus_get_power(struct acpi_device *device, int *state) |
186 | { | 204 | { |
187 | int result = 0; | 205 | int result = ACPI_STATE_UNKNOWN; |
188 | acpi_status status = 0; | ||
189 | unsigned long long psc = 0; | ||
190 | 206 | ||
191 | if (!device || !state) | 207 | if (!device || !state) |
192 | return -EINVAL; | 208 | return -EINVAL; |
193 | 209 | ||
194 | *state = ACPI_STATE_UNKNOWN; | 210 | if (!device->flags.power_manageable) { |
195 | |||
196 | if (device->flags.power_manageable) { | ||
197 | /* | ||
198 | * Get the device's power state either directly (via _PSC) or | ||
199 | * indirectly (via power resources). | ||
200 | */ | ||
201 | if (device->power.flags.power_resources) { | ||
202 | result = acpi_power_get_inferred_state(device, state); | ||
203 | if (result) | ||
204 | return result; | ||
205 | } else if (device->power.flags.explicit_get) { | ||
206 | status = acpi_evaluate_integer(device->handle, "_PSC", | ||
207 | NULL, &psc); | ||
208 | if (ACPI_FAILURE(status)) | ||
209 | return -ENODEV; | ||
210 | *state = (int)psc; | ||
211 | } | ||
212 | } else { | ||
213 | /* TBD: Non-recursive algorithm for walking up hierarchy. */ | 211 | /* TBD: Non-recursive algorithm for walking up hierarchy. */ |
214 | *state = device->parent ? | 212 | *state = device->parent ? |
215 | device->parent->power.state : ACPI_STATE_D0; | 213 | device->parent->power.state : ACPI_STATE_D0; |
214 | goto out; | ||
215 | } | ||
216 | |||
217 | /* | ||
218 | * Get the device's power state either directly (via _PSC) or | ||
219 | * indirectly (via power resources). | ||
220 | */ | ||
221 | if (device->power.flags.explicit_get) { | ||
222 | unsigned long long psc; | ||
223 | acpi_status status = acpi_evaluate_integer(device->handle, | ||
224 | "_PSC", NULL, &psc); | ||
225 | if (ACPI_FAILURE(status)) | ||
226 | return -ENODEV; | ||
227 | |||
228 | result = psc; | ||
229 | } | ||
230 | /* The test below covers ACPI_STATE_UNKNOWN too. */ | ||
231 | if (result <= ACPI_STATE_D2) { | ||
232 | ; /* Do nothing. */ | ||
233 | } else if (device->power.flags.power_resources) { | ||
234 | int error = acpi_power_get_inferred_state(device, &result); | ||
235 | if (error) | ||
236 | return error; | ||
237 | } else if (result == ACPI_STATE_D3_HOT) { | ||
238 | result = ACPI_STATE_D3; | ||
216 | } | 239 | } |
240 | *state = result; | ||
217 | 241 | ||
218 | ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Device [%s] power state is D%d\n", | 242 | out: |
219 | device->pnp.bus_id, *state)); | 243 | ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Device [%s] power state is %s\n", |
244 | device->pnp.bus_id, state_string(*state))); | ||
220 | 245 | ||
221 | return 0; | 246 | return 0; |
222 | } | 247 | } |
@@ -234,13 +259,14 @@ static int __acpi_bus_set_power(struct acpi_device *device, int state) | |||
234 | /* Make sure this is a valid target state */ | 259 | /* Make sure this is a valid target state */ |
235 | 260 | ||
236 | if (state == device->power.state) { | 261 | if (state == device->power.state) { |
237 | ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Device is already at D%d\n", | 262 | ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Device is already at %s\n", |
238 | state)); | 263 | state_string(state))); |
239 | return 0; | 264 | return 0; |
240 | } | 265 | } |
241 | 266 | ||
242 | if (!device->power.states[state].flags.valid) { | 267 | if (!device->power.states[state].flags.valid) { |
243 | printk(KERN_WARNING PREFIX "Device does not support D%d\n", state); | 268 | printk(KERN_WARNING PREFIX "Device does not support %s\n", |
269 | state_string(state)); | ||
244 | return -ENODEV; | 270 | return -ENODEV; |
245 | } | 271 | } |
246 | if (device->parent && (state < device->parent->power.state)) { | 272 | if (device->parent && (state < device->parent->power.state)) { |
@@ -294,13 +320,13 @@ static int __acpi_bus_set_power(struct acpi_device *device, int state) | |||
294 | end: | 320 | end: |
295 | if (result) | 321 | if (result) |
296 | printk(KERN_WARNING PREFIX | 322 | printk(KERN_WARNING PREFIX |
297 | "Device [%s] failed to transition to D%d\n", | 323 | "Device [%s] failed to transition to %s\n", |
298 | device->pnp.bus_id, state); | 324 | device->pnp.bus_id, state_string(state)); |
299 | else { | 325 | else { |
300 | device->power.state = state; | 326 | device->power.state = state; |
301 | ACPI_DEBUG_PRINT((ACPI_DB_INFO, | 327 | ACPI_DEBUG_PRINT((ACPI_DB_INFO, |
302 | "Device [%s] transitioned to D%d\n", | 328 | "Device [%s] transitioned to %s\n", |
303 | device->pnp.bus_id, state)); | 329 | device->pnp.bus_id, state_string(state))); |
304 | } | 330 | } |
305 | 331 | ||
306 | return result; | 332 | return result; |
diff --git a/drivers/acpi/power.c b/drivers/acpi/power.c index 0500f719f63e..dd6d6a3c6780 100644 --- a/drivers/acpi/power.c +++ b/drivers/acpi/power.c | |||
@@ -631,7 +631,7 @@ int acpi_power_get_inferred_state(struct acpi_device *device, int *state) | |||
631 | * We know a device's inferred power state when all the resources | 631 | * We know a device's inferred power state when all the resources |
632 | * required for a given D-state are 'on'. | 632 | * required for a given D-state are 'on'. |
633 | */ | 633 | */ |
634 | for (i = ACPI_STATE_D0; i < ACPI_STATE_D3_HOT; i++) { | 634 | for (i = ACPI_STATE_D0; i <= ACPI_STATE_D3_HOT; i++) { |
635 | list = &device->power.states[i].resources; | 635 | list = &device->power.states[i].resources; |
636 | if (list->count < 1) | 636 | if (list->count < 1) |
637 | continue; | 637 | continue; |
diff --git a/drivers/acpi/processor_perflib.c b/drivers/acpi/processor_perflib.c index 0af48a8554cd..a093dc163a42 100644 --- a/drivers/acpi/processor_perflib.c +++ b/drivers/acpi/processor_perflib.c | |||
@@ -333,6 +333,7 @@ static int acpi_processor_get_performance_states(struct acpi_processor *pr) | |||
333 | struct acpi_buffer state = { 0, NULL }; | 333 | struct acpi_buffer state = { 0, NULL }; |
334 | union acpi_object *pss = NULL; | 334 | union acpi_object *pss = NULL; |
335 | int i; | 335 | int i; |
336 | int last_invalid = -1; | ||
336 | 337 | ||
337 | 338 | ||
338 | status = acpi_evaluate_object(pr->handle, "_PSS", NULL, &buffer); | 339 | status = acpi_evaluate_object(pr->handle, "_PSS", NULL, &buffer); |
@@ -394,14 +395,33 @@ static int acpi_processor_get_performance_states(struct acpi_processor *pr) | |||
394 | ((u32)(px->core_frequency * 1000) != | 395 | ((u32)(px->core_frequency * 1000) != |
395 | (px->core_frequency * 1000))) { | 396 | (px->core_frequency * 1000))) { |
396 | printk(KERN_ERR FW_BUG PREFIX | 397 | printk(KERN_ERR FW_BUG PREFIX |
397 | "Invalid BIOS _PSS frequency: 0x%llx MHz\n", | 398 | "Invalid BIOS _PSS frequency found for processor %d: 0x%llx MHz\n", |
398 | px->core_frequency); | 399 | pr->id, px->core_frequency); |
399 | result = -EFAULT; | 400 | if (last_invalid == -1) |
400 | kfree(pr->performance->states); | 401 | last_invalid = i; |
401 | goto end; | 402 | } else { |
403 | if (last_invalid != -1) { | ||
404 | /* | ||
405 | * Copy this valid entry over last_invalid entry | ||
406 | */ | ||
407 | memcpy(&(pr->performance->states[last_invalid]), | ||
408 | px, sizeof(struct acpi_processor_px)); | ||
409 | ++last_invalid; | ||
410 | } | ||
402 | } | 411 | } |
403 | } | 412 | } |
404 | 413 | ||
414 | if (last_invalid == 0) { | ||
415 | printk(KERN_ERR FW_BUG PREFIX | ||
416 | "No valid BIOS _PSS frequency found for processor %d\n", pr->id); | ||
417 | result = -EFAULT; | ||
418 | kfree(pr->performance->states); | ||
419 | pr->performance->states = NULL; | ||
420 | } | ||
421 | |||
422 | if (last_invalid > 0) | ||
423 | pr->performance->state_count = last_invalid; | ||
424 | |||
405 | end: | 425 | end: |
406 | kfree(buffer.pointer); | 426 | kfree(buffer.pointer); |
407 | 427 | ||
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c index 85cbfdccc97c..c8a1f3b68110 100644 --- a/drivers/acpi/scan.c +++ b/drivers/acpi/scan.c | |||
@@ -1567,6 +1567,7 @@ static int acpi_bus_scan_fixed(void) | |||
1567 | ACPI_BUS_TYPE_POWER_BUTTON, | 1567 | ACPI_BUS_TYPE_POWER_BUTTON, |
1568 | ACPI_STA_DEFAULT, | 1568 | ACPI_STA_DEFAULT, |
1569 | &ops); | 1569 | &ops); |
1570 | device_init_wakeup(&device->dev, true); | ||
1570 | } | 1571 | } |
1571 | 1572 | ||
1572 | if ((acpi_gbl_FADT.flags & ACPI_FADT_SLEEP_BUTTON) == 0) { | 1573 | if ((acpi_gbl_FADT.flags & ACPI_FADT_SLEEP_BUTTON) == 0) { |
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c index 74ee4ab577b6..88561029cca8 100644 --- a/drivers/acpi/sleep.c +++ b/drivers/acpi/sleep.c | |||
@@ -57,6 +57,7 @@ MODULE_PARM_DESC(gts, "Enable evaluation of _GTS on suspend."); | |||
57 | MODULE_PARM_DESC(bfs, "Enable evaluation of _BFS on resume".); | 57 | MODULE_PARM_DESC(bfs, "Enable evaluation of _BFS on resume".); |
58 | 58 | ||
59 | static u8 sleep_states[ACPI_S_STATE_COUNT]; | 59 | static u8 sleep_states[ACPI_S_STATE_COUNT]; |
60 | static bool pwr_btn_event_pending; | ||
60 | 61 | ||
61 | static void acpi_sleep_tts_switch(u32 acpi_state) | 62 | static void acpi_sleep_tts_switch(u32 acpi_state) |
62 | { | 63 | { |
@@ -184,6 +185,14 @@ static int acpi_pm_prepare(void) | |||
184 | return error; | 185 | return error; |
185 | } | 186 | } |
186 | 187 | ||
188 | static int find_powerf_dev(struct device *dev, void *data) | ||
189 | { | ||
190 | struct acpi_device *device = to_acpi_device(dev); | ||
191 | const char *hid = acpi_device_hid(device); | ||
192 | |||
193 | return !strcmp(hid, ACPI_BUTTON_HID_POWERF); | ||
194 | } | ||
195 | |||
187 | /** | 196 | /** |
188 | * acpi_pm_finish - Instruct the platform to leave a sleep state. | 197 | * acpi_pm_finish - Instruct the platform to leave a sleep state. |
189 | * | 198 | * |
@@ -192,6 +201,7 @@ static int acpi_pm_prepare(void) | |||
192 | */ | 201 | */ |
193 | static void acpi_pm_finish(void) | 202 | static void acpi_pm_finish(void) |
194 | { | 203 | { |
204 | struct device *pwr_btn_dev; | ||
195 | u32 acpi_state = acpi_target_sleep_state; | 205 | u32 acpi_state = acpi_target_sleep_state; |
196 | 206 | ||
197 | acpi_ec_unblock_transactions(); | 207 | acpi_ec_unblock_transactions(); |
@@ -209,6 +219,23 @@ static void acpi_pm_finish(void) | |||
209 | acpi_set_firmware_waking_vector((acpi_physical_address) 0); | 219 | acpi_set_firmware_waking_vector((acpi_physical_address) 0); |
210 | 220 | ||
211 | acpi_target_sleep_state = ACPI_STATE_S0; | 221 | acpi_target_sleep_state = ACPI_STATE_S0; |
222 | |||
223 | /* If we were woken with the fixed power button, provide a small | ||
224 | * hint to userspace in the form of a wakeup event on the fixed power | ||
225 | * button device (if it can be found). | ||
226 | * | ||
227 | * We delay the event generation til now, as the PM layer requires | ||
228 | * timekeeping to be running before we generate events. */ | ||
229 | if (!pwr_btn_event_pending) | ||
230 | return; | ||
231 | |||
232 | pwr_btn_event_pending = false; | ||
233 | pwr_btn_dev = bus_find_device(&acpi_bus_type, NULL, NULL, | ||
234 | find_powerf_dev); | ||
235 | if (pwr_btn_dev) { | ||
236 | pm_wakeup_event(pwr_btn_dev, 0); | ||
237 | put_device(pwr_btn_dev); | ||
238 | } | ||
212 | } | 239 | } |
213 | 240 | ||
214 | /** | 241 | /** |
@@ -298,9 +325,23 @@ static int acpi_suspend_enter(suspend_state_t pm_state) | |||
298 | /* ACPI 3.0 specs (P62) says that it's the responsibility | 325 | /* ACPI 3.0 specs (P62) says that it's the responsibility |
299 | * of the OSPM to clear the status bit [ implying that the | 326 | * of the OSPM to clear the status bit [ implying that the |
300 | * POWER_BUTTON event should not reach userspace ] | 327 | * POWER_BUTTON event should not reach userspace ] |
328 | * | ||
329 | * However, we do generate a small hint for userspace in the form of | ||
330 | * a wakeup event. We flag this condition for now and generate the | ||
331 | * event later, as we're currently too early in resume to be able to | ||
332 | * generate wakeup events. | ||
301 | */ | 333 | */ |
302 | if (ACPI_SUCCESS(status) && (acpi_state == ACPI_STATE_S3)) | 334 | if (ACPI_SUCCESS(status) && (acpi_state == ACPI_STATE_S3)) { |
303 | acpi_clear_event(ACPI_EVENT_POWER_BUTTON); | 335 | acpi_event_status pwr_btn_status; |
336 | |||
337 | acpi_get_event_status(ACPI_EVENT_POWER_BUTTON, &pwr_btn_status); | ||
338 | |||
339 | if (pwr_btn_status & ACPI_EVENT_FLAG_SET) { | ||
340 | acpi_clear_event(ACPI_EVENT_POWER_BUTTON); | ||
341 | /* Flag for later */ | ||
342 | pwr_btn_event_pending = true; | ||
343 | } | ||
344 | } | ||
304 | 345 | ||
305 | /* | 346 | /* |
306 | * Disable and clear GPE status before interrupt is enabled. Some GPEs | 347 | * Disable and clear GPE status before interrupt is enabled. Some GPEs |
@@ -730,8 +771,8 @@ int acpi_pm_device_sleep_state(struct device *dev, int *d_min_p) | |||
730 | * can wake the system. _S0W may be valid, too. | 771 | * can wake the system. _S0W may be valid, too. |
731 | */ | 772 | */ |
732 | if (acpi_target_sleep_state == ACPI_STATE_S0 || | 773 | if (acpi_target_sleep_state == ACPI_STATE_S0 || |
733 | (device_may_wakeup(dev) && | 774 | (device_may_wakeup(dev) && adev->wakeup.flags.valid && |
734 | adev->wakeup.sleep_state <= acpi_target_sleep_state)) { | 775 | adev->wakeup.sleep_state >= acpi_target_sleep_state)) { |
735 | acpi_status status; | 776 | acpi_status status; |
736 | 777 | ||
737 | acpi_method[3] = 'W'; | 778 | acpi_method[3] = 'W'; |
diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c index 9577b6fa2650..a576575617d7 100644 --- a/drivers/acpi/video.c +++ b/drivers/acpi/video.c | |||
@@ -1687,10 +1687,6 @@ static int acpi_video_bus_add(struct acpi_device *device) | |||
1687 | set_bit(KEY_BRIGHTNESS_ZERO, input->keybit); | 1687 | set_bit(KEY_BRIGHTNESS_ZERO, input->keybit); |
1688 | set_bit(KEY_DISPLAY_OFF, input->keybit); | 1688 | set_bit(KEY_DISPLAY_OFF, input->keybit); |
1689 | 1689 | ||
1690 | error = input_register_device(input); | ||
1691 | if (error) | ||
1692 | goto err_stop_video; | ||
1693 | |||
1694 | printk(KERN_INFO PREFIX "%s [%s] (multi-head: %s rom: %s post: %s)\n", | 1690 | printk(KERN_INFO PREFIX "%s [%s] (multi-head: %s rom: %s post: %s)\n", |
1695 | ACPI_VIDEO_DEVICE_NAME, acpi_device_bid(device), | 1691 | ACPI_VIDEO_DEVICE_NAME, acpi_device_bid(device), |
1696 | video->flags.multihead ? "yes" : "no", | 1692 | video->flags.multihead ? "yes" : "no", |
@@ -1701,12 +1697,16 @@ static int acpi_video_bus_add(struct acpi_device *device) | |||
1701 | video->pm_nb.priority = 0; | 1697 | video->pm_nb.priority = 0; |
1702 | error = register_pm_notifier(&video->pm_nb); | 1698 | error = register_pm_notifier(&video->pm_nb); |
1703 | if (error) | 1699 | if (error) |
1704 | goto err_unregister_input_dev; | 1700 | goto err_stop_video; |
1701 | |||
1702 | error = input_register_device(input); | ||
1703 | if (error) | ||
1704 | goto err_unregister_pm_notifier; | ||
1705 | 1705 | ||
1706 | return 0; | 1706 | return 0; |
1707 | 1707 | ||
1708 | err_unregister_input_dev: | 1708 | err_unregister_pm_notifier: |
1709 | input_unregister_device(input); | 1709 | unregister_pm_notifier(&video->pm_nb); |
1710 | err_stop_video: | 1710 | err_stop_video: |
1711 | acpi_video_bus_stop_devices(video); | 1711 | acpi_video_bus_stop_devices(video); |
1712 | err_free_input_dev: | 1712 | err_free_input_dev: |
@@ -1743,9 +1743,18 @@ static int acpi_video_bus_remove(struct acpi_device *device, int type) | |||
1743 | return 0; | 1743 | return 0; |
1744 | } | 1744 | } |
1745 | 1745 | ||
1746 | static int __init is_i740(struct pci_dev *dev) | ||
1747 | { | ||
1748 | if (dev->device == 0x00D1) | ||
1749 | return 1; | ||
1750 | if (dev->device == 0x7000) | ||
1751 | return 1; | ||
1752 | return 0; | ||
1753 | } | ||
1754 | |||
1746 | static int __init intel_opregion_present(void) | 1755 | static int __init intel_opregion_present(void) |
1747 | { | 1756 | { |
1748 | #if defined(CONFIG_DRM_I915) || defined(CONFIG_DRM_I915_MODULE) | 1757 | int opregion = 0; |
1749 | struct pci_dev *dev = NULL; | 1758 | struct pci_dev *dev = NULL; |
1750 | u32 address; | 1759 | u32 address; |
1751 | 1760 | ||
@@ -1754,13 +1763,15 @@ static int __init intel_opregion_present(void) | |||
1754 | continue; | 1763 | continue; |
1755 | if (dev->vendor != PCI_VENDOR_ID_INTEL) | 1764 | if (dev->vendor != PCI_VENDOR_ID_INTEL) |
1756 | continue; | 1765 | continue; |
1766 | /* We don't want to poke around undefined i740 registers */ | ||
1767 | if (is_i740(dev)) | ||
1768 | continue; | ||
1757 | pci_read_config_dword(dev, 0xfc, &address); | 1769 | pci_read_config_dword(dev, 0xfc, &address); |
1758 | if (!address) | 1770 | if (!address) |
1759 | continue; | 1771 | continue; |
1760 | return 1; | 1772 | opregion = 1; |
1761 | } | 1773 | } |
1762 | #endif | 1774 | return opregion; |
1763 | return 0; | ||
1764 | } | 1775 | } |
1765 | 1776 | ||
1766 | int acpi_video_register(void) | 1777 | int acpi_video_register(void) |
diff --git a/drivers/ata/pata_arasan_cf.c b/drivers/ata/pata_arasan_cf.c index 3239517f4d90..ac6a5beb28f3 100644 --- a/drivers/ata/pata_arasan_cf.c +++ b/drivers/ata/pata_arasan_cf.c | |||
@@ -4,7 +4,7 @@ | |||
4 | * Arasan Compact Flash host controller source file | 4 | * Arasan Compact Flash host controller source file |
5 | * | 5 | * |
6 | * Copyright (C) 2011 ST Microelectronics | 6 | * Copyright (C) 2011 ST Microelectronics |
7 | * Viresh Kumar <viresh.kumar@st.com> | 7 | * Viresh Kumar <viresh.linux@gmail.com> |
8 | * | 8 | * |
9 | * This file is licensed under the terms of the GNU General Public | 9 | * This file is licensed under the terms of the GNU General Public |
10 | * License version 2. This program is licensed "as is" without any | 10 | * License version 2. This program is licensed "as is" without any |
@@ -959,7 +959,7 @@ static struct platform_driver arasan_cf_driver = { | |||
959 | 959 | ||
960 | module_platform_driver(arasan_cf_driver); | 960 | module_platform_driver(arasan_cf_driver); |
961 | 961 | ||
962 | MODULE_AUTHOR("Viresh Kumar <viresh.kumar@st.com>"); | 962 | MODULE_AUTHOR("Viresh Kumar <viresh.linux@gmail.com>"); |
963 | MODULE_DESCRIPTION("Arasan ATA Compact Flash driver"); | 963 | MODULE_DESCRIPTION("Arasan ATA Compact Flash driver"); |
964 | MODULE_LICENSE("GPL"); | 964 | MODULE_LICENSE("GPL"); |
965 | MODULE_ALIAS("platform:" DRIVER_NAME); | 965 | MODULE_ALIAS("platform:" DRIVER_NAME); |
diff --git a/drivers/base/dd.c b/drivers/base/dd.c index 1b1cbb571d38..dcb8a6e48692 100644 --- a/drivers/base/dd.c +++ b/drivers/base/dd.c | |||
@@ -100,7 +100,7 @@ static void driver_deferred_probe_add(struct device *dev) | |||
100 | mutex_lock(&deferred_probe_mutex); | 100 | mutex_lock(&deferred_probe_mutex); |
101 | if (list_empty(&dev->p->deferred_probe)) { | 101 | if (list_empty(&dev->p->deferred_probe)) { |
102 | dev_dbg(dev, "Added to deferred list\n"); | 102 | dev_dbg(dev, "Added to deferred list\n"); |
103 | list_add(&dev->p->deferred_probe, &deferred_probe_pending_list); | 103 | list_add_tail(&dev->p->deferred_probe, &deferred_probe_pending_list); |
104 | } | 104 | } |
105 | mutex_unlock(&deferred_probe_mutex); | 105 | mutex_unlock(&deferred_probe_mutex); |
106 | } | 106 | } |
diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c index 0bcda488f11c..c89aa01fb1de 100644 --- a/drivers/base/regmap/regmap.c +++ b/drivers/base/regmap/regmap.c | |||
@@ -246,11 +246,11 @@ struct regmap *regmap_init(struct device *dev, | |||
246 | map->lock = regmap_lock_mutex; | 246 | map->lock = regmap_lock_mutex; |
247 | map->unlock = regmap_unlock_mutex; | 247 | map->unlock = regmap_unlock_mutex; |
248 | } | 248 | } |
249 | map->format.buf_size = (config->reg_bits + config->val_bits) / 8; | ||
250 | map->format.reg_bytes = DIV_ROUND_UP(config->reg_bits, 8); | 249 | map->format.reg_bytes = DIV_ROUND_UP(config->reg_bits, 8); |
251 | map->format.pad_bytes = config->pad_bits / 8; | 250 | map->format.pad_bytes = config->pad_bits / 8; |
252 | map->format.val_bytes = DIV_ROUND_UP(config->val_bits, 8); | 251 | map->format.val_bytes = DIV_ROUND_UP(config->val_bits, 8); |
253 | map->format.buf_size += map->format.pad_bytes; | 252 | map->format.buf_size = DIV_ROUND_UP(config->reg_bits + |
253 | config->val_bits + config->pad_bits, 8); | ||
254 | map->reg_shift = config->pad_bits % 8; | 254 | map->reg_shift = config->pad_bits % 8; |
255 | if (config->reg_stride) | 255 | if (config->reg_stride) |
256 | map->reg_stride = config->reg_stride; | 256 | map->reg_stride = config->reg_stride; |
@@ -368,7 +368,7 @@ struct regmap *regmap_init(struct device *dev, | |||
368 | 368 | ||
369 | ret = regcache_init(map, config); | 369 | ret = regcache_init(map, config); |
370 | if (ret < 0) | 370 | if (ret < 0) |
371 | goto err_free_workbuf; | 371 | goto err_debugfs; |
372 | 372 | ||
373 | /* Add a devres resource for dev_get_regmap() */ | 373 | /* Add a devres resource for dev_get_regmap() */ |
374 | m = devres_alloc(dev_get_regmap_release, sizeof(*m), GFP_KERNEL); | 374 | m = devres_alloc(dev_get_regmap_release, sizeof(*m), GFP_KERNEL); |
@@ -383,7 +383,8 @@ struct regmap *regmap_init(struct device *dev, | |||
383 | 383 | ||
384 | err_cache: | 384 | err_cache: |
385 | regcache_exit(map); | 385 | regcache_exit(map); |
386 | err_free_workbuf: | 386 | err_debugfs: |
387 | regmap_debugfs_exit(map); | ||
387 | kfree(map->work_buf); | 388 | kfree(map->work_buf); |
388 | err_map: | 389 | err_map: |
389 | kfree(map); | 390 | kfree(map); |
@@ -471,6 +472,7 @@ int regmap_reinit_cache(struct regmap *map, const struct regmap_config *config) | |||
471 | 472 | ||
472 | return ret; | 473 | return ret; |
473 | } | 474 | } |
475 | EXPORT_SYMBOL_GPL(regmap_reinit_cache); | ||
474 | 476 | ||
475 | /** | 477 | /** |
476 | * regmap_exit(): Free a previously allocated register map | 478 | * regmap_exit(): Free a previously allocated register map |
diff --git a/drivers/bcma/driver_chipcommon_pmu.c b/drivers/bcma/driver_chipcommon_pmu.c index a058842f14fd..61ce4054b3c3 100644 --- a/drivers/bcma/driver_chipcommon_pmu.c +++ b/drivers/bcma/driver_chipcommon_pmu.c | |||
@@ -139,7 +139,9 @@ void bcma_pmu_workarounds(struct bcma_drv_cc *cc) | |||
139 | bcma_chipco_chipctl_maskset(cc, 0, ~0, 0x7); | 139 | bcma_chipco_chipctl_maskset(cc, 0, ~0, 0x7); |
140 | break; | 140 | break; |
141 | case 0x4331: | 141 | case 0x4331: |
142 | /* BCM4331 workaround is SPROM-related, we put it in sprom.c */ | 142 | case 43431: |
143 | /* Ext PA lines must be enabled for tx on BCM4331 */ | ||
144 | bcma_chipco_bcm4331_ext_pa_lines_ctl(cc, true); | ||
143 | break; | 145 | break; |
144 | case 43224: | 146 | case 43224: |
145 | if (bus->chipinfo.rev == 0) { | 147 | if (bus->chipinfo.rev == 0) { |
diff --git a/drivers/bcma/driver_pci.c b/drivers/bcma/driver_pci.c index 9a96f14c8f47..c32ebd537abe 100644 --- a/drivers/bcma/driver_pci.c +++ b/drivers/bcma/driver_pci.c | |||
@@ -232,17 +232,19 @@ void __devinit bcma_core_pci_init(struct bcma_drv_pci *pc) | |||
232 | int bcma_core_pci_irq_ctl(struct bcma_drv_pci *pc, struct bcma_device *core, | 232 | int bcma_core_pci_irq_ctl(struct bcma_drv_pci *pc, struct bcma_device *core, |
233 | bool enable) | 233 | bool enable) |
234 | { | 234 | { |
235 | struct pci_dev *pdev = pc->core->bus->host_pci; | 235 | struct pci_dev *pdev; |
236 | u32 coremask, tmp; | 236 | u32 coremask, tmp; |
237 | int err = 0; | 237 | int err = 0; |
238 | 238 | ||
239 | if (core->bus->hosttype != BCMA_HOSTTYPE_PCI) { | 239 | if (!pc || core->bus->hosttype != BCMA_HOSTTYPE_PCI) { |
240 | /* This bcma device is not on a PCI host-bus. So the IRQs are | 240 | /* This bcma device is not on a PCI host-bus. So the IRQs are |
241 | * not routed through the PCI core. | 241 | * not routed through the PCI core. |
242 | * So we must not enable routing through the PCI core. */ | 242 | * So we must not enable routing through the PCI core. */ |
243 | goto out; | 243 | goto out; |
244 | } | 244 | } |
245 | 245 | ||
246 | pdev = pc->core->bus->host_pci; | ||
247 | |||
246 | err = pci_read_config_dword(pdev, BCMA_PCI_IRQMASK, &tmp); | 248 | err = pci_read_config_dword(pdev, BCMA_PCI_IRQMASK, &tmp); |
247 | if (err) | 249 | if (err) |
248 | goto out; | 250 | goto out; |
diff --git a/drivers/bcma/sprom.c b/drivers/bcma/sprom.c index c7f93359acb0..f16f42d36071 100644 --- a/drivers/bcma/sprom.c +++ b/drivers/bcma/sprom.c | |||
@@ -579,13 +579,13 @@ int bcma_sprom_get(struct bcma_bus *bus) | |||
579 | if (!sprom) | 579 | if (!sprom) |
580 | return -ENOMEM; | 580 | return -ENOMEM; |
581 | 581 | ||
582 | if (bus->chipinfo.id == 0x4331) | 582 | if (bus->chipinfo.id == 0x4331 || bus->chipinfo.id == 43431) |
583 | bcma_chipco_bcm4331_ext_pa_lines_ctl(&bus->drv_cc, false); | 583 | bcma_chipco_bcm4331_ext_pa_lines_ctl(&bus->drv_cc, false); |
584 | 584 | ||
585 | pr_debug("SPROM offset 0x%x\n", offset); | 585 | pr_debug("SPROM offset 0x%x\n", offset); |
586 | bcma_sprom_read(bus, offset, sprom); | 586 | bcma_sprom_read(bus, offset, sprom); |
587 | 587 | ||
588 | if (bus->chipinfo.id == 0x4331) | 588 | if (bus->chipinfo.id == 0x4331 || bus->chipinfo.id == 43431) |
589 | bcma_chipco_bcm4331_ext_pa_lines_ctl(&bus->drv_cc, true); | 589 | bcma_chipco_bcm4331_ext_pa_lines_ctl(&bus->drv_cc, true); |
590 | 590 | ||
591 | err = bcma_sprom_valid(sprom); | 591 | err = bcma_sprom_valid(sprom); |
diff --git a/drivers/char/agp/intel-agp.c b/drivers/char/agp/intel-agp.c index 764f70c5e690..0a4185279417 100644 --- a/drivers/char/agp/intel-agp.c +++ b/drivers/char/agp/intel-agp.c | |||
@@ -898,6 +898,7 @@ static struct pci_device_id agp_intel_pci_table[] = { | |||
898 | ID(PCI_DEVICE_ID_INTEL_B43_HB), | 898 | ID(PCI_DEVICE_ID_INTEL_B43_HB), |
899 | ID(PCI_DEVICE_ID_INTEL_B43_1_HB), | 899 | ID(PCI_DEVICE_ID_INTEL_B43_1_HB), |
900 | ID(PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB), | 900 | ID(PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB), |
901 | ID(PCI_DEVICE_ID_INTEL_IRONLAKE_D2_HB), | ||
901 | ID(PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB), | 902 | ID(PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB), |
902 | ID(PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB), | 903 | ID(PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB), |
903 | ID(PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB), | 904 | ID(PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB), |
diff --git a/drivers/char/agp/intel-agp.h b/drivers/char/agp/intel-agp.h index c0091753a0d1..8e2d9140f300 100644 --- a/drivers/char/agp/intel-agp.h +++ b/drivers/char/agp/intel-agp.h | |||
@@ -212,6 +212,7 @@ | |||
212 | #define PCI_DEVICE_ID_INTEL_G41_HB 0x2E30 | 212 | #define PCI_DEVICE_ID_INTEL_G41_HB 0x2E30 |
213 | #define PCI_DEVICE_ID_INTEL_G41_IG 0x2E32 | 213 | #define PCI_DEVICE_ID_INTEL_G41_IG 0x2E32 |
214 | #define PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB 0x0040 | 214 | #define PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB 0x0040 |
215 | #define PCI_DEVICE_ID_INTEL_IRONLAKE_D2_HB 0x0069 | ||
215 | #define PCI_DEVICE_ID_INTEL_IRONLAKE_D_IG 0x0042 | 216 | #define PCI_DEVICE_ID_INTEL_IRONLAKE_D_IG 0x0042 |
216 | #define PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB 0x0044 | 217 | #define PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB 0x0044 |
217 | #define PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB 0x0062 | 218 | #define PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB 0x0062 |
diff --git a/drivers/char/hw_random/atmel-rng.c b/drivers/char/hw_random/atmel-rng.c index f518b99f53f5..731c9046cf7b 100644 --- a/drivers/char/hw_random/atmel-rng.c +++ b/drivers/char/hw_random/atmel-rng.c | |||
@@ -34,8 +34,15 @@ static int atmel_trng_read(struct hwrng *rng, void *buf, size_t max, | |||
34 | u32 *data = buf; | 34 | u32 *data = buf; |
35 | 35 | ||
36 | /* data ready? */ | 36 | /* data ready? */ |
37 | if (readl(trng->base + TRNG_ODATA) & 1) { | 37 | if (readl(trng->base + TRNG_ISR) & 1) { |
38 | *data = readl(trng->base + TRNG_ODATA); | 38 | *data = readl(trng->base + TRNG_ODATA); |
39 | /* | ||
40 | ensure data ready is only set again AFTER the next data | ||
41 | word is ready in case it got set between checking ISR | ||
42 | and reading ODATA, so we don't risk re-reading the | ||
43 | same word | ||
44 | */ | ||
45 | readl(trng->base + TRNG_ISR); | ||
39 | return 4; | 46 | return 4; |
40 | } else | 47 | } else |
41 | return 0; | 48 | return 0; |
diff --git a/drivers/clk/spear/clk-aux-synth.c b/drivers/clk/spear/clk-aux-synth.c index af34074e702b..6756e7c3bc07 100644 --- a/drivers/clk/spear/clk-aux-synth.c +++ b/drivers/clk/spear/clk-aux-synth.c | |||
@@ -1,6 +1,6 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (C) 2012 ST Microelectronics | 2 | * Copyright (C) 2012 ST Microelectronics |
3 | * Viresh Kumar <viresh.kumar@st.com> | 3 | * Viresh Kumar <viresh.linux@gmail.com> |
4 | * | 4 | * |
5 | * This file is licensed under the terms of the GNU General Public | 5 | * This file is licensed under the terms of the GNU General Public |
6 | * License version 2. This program is licensed "as is" without any | 6 | * License version 2. This program is licensed "as is" without any |
diff --git a/drivers/clk/spear/clk-frac-synth.c b/drivers/clk/spear/clk-frac-synth.c index 4dbdb3fe18e0..958aa3ad1d60 100644 --- a/drivers/clk/spear/clk-frac-synth.c +++ b/drivers/clk/spear/clk-frac-synth.c | |||
@@ -1,6 +1,6 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (C) 2012 ST Microelectronics | 2 | * Copyright (C) 2012 ST Microelectronics |
3 | * Viresh Kumar <viresh.kumar@st.com> | 3 | * Viresh Kumar <viresh.linux@gmail.com> |
4 | * | 4 | * |
5 | * This file is licensed under the terms of the GNU General Public | 5 | * This file is licensed under the terms of the GNU General Public |
6 | * License version 2. This program is licensed "as is" without any | 6 | * License version 2. This program is licensed "as is" without any |
diff --git a/drivers/clk/spear/clk-gpt-synth.c b/drivers/clk/spear/clk-gpt-synth.c index b471c9762a97..1afc18c4effc 100644 --- a/drivers/clk/spear/clk-gpt-synth.c +++ b/drivers/clk/spear/clk-gpt-synth.c | |||
@@ -1,6 +1,6 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (C) 2012 ST Microelectronics | 2 | * Copyright (C) 2012 ST Microelectronics |
3 | * Viresh Kumar <viresh.kumar@st.com> | 3 | * Viresh Kumar <viresh.linux@gmail.com> |
4 | * | 4 | * |
5 | * This file is licensed under the terms of the GNU General Public | 5 | * This file is licensed under the terms of the GNU General Public |
6 | * License version 2. This program is licensed "as is" without any | 6 | * License version 2. This program is licensed "as is" without any |
diff --git a/drivers/clk/spear/clk-vco-pll.c b/drivers/clk/spear/clk-vco-pll.c index dcd4bdf4b0d9..5f1b6badeb15 100644 --- a/drivers/clk/spear/clk-vco-pll.c +++ b/drivers/clk/spear/clk-vco-pll.c | |||
@@ -1,6 +1,6 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (C) 2012 ST Microelectronics | 2 | * Copyright (C) 2012 ST Microelectronics |
3 | * Viresh Kumar <viresh.kumar@st.com> | 3 | * Viresh Kumar <viresh.linux@gmail.com> |
4 | * | 4 | * |
5 | * This file is licensed under the terms of the GNU General Public | 5 | * This file is licensed under the terms of the GNU General Public |
6 | * License version 2. This program is licensed "as is" without any | 6 | * License version 2. This program is licensed "as is" without any |
diff --git a/drivers/clk/spear/clk.c b/drivers/clk/spear/clk.c index 376d4e5ff326..7cd63788d546 100644 --- a/drivers/clk/spear/clk.c +++ b/drivers/clk/spear/clk.c | |||
@@ -1,6 +1,6 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (C) 2012 ST Microelectronics | 2 | * Copyright (C) 2012 ST Microelectronics |
3 | * Viresh Kumar <viresh.kumar@st.com> | 3 | * Viresh Kumar <viresh.linux@gmail.com> |
4 | * | 4 | * |
5 | * This file is licensed under the terms of the GNU General Public | 5 | * This file is licensed under the terms of the GNU General Public |
6 | * License version 2. This program is licensed "as is" without any | 6 | * License version 2. This program is licensed "as is" without any |
diff --git a/drivers/clk/spear/clk.h b/drivers/clk/spear/clk.h index 3321c46a071c..931737677dfa 100644 --- a/drivers/clk/spear/clk.h +++ b/drivers/clk/spear/clk.h | |||
@@ -2,7 +2,7 @@ | |||
2 | * Clock framework definitions for SPEAr platform | 2 | * Clock framework definitions for SPEAr platform |
3 | * | 3 | * |
4 | * Copyright (C) 2012 ST Microelectronics | 4 | * Copyright (C) 2012 ST Microelectronics |
5 | * Viresh Kumar <viresh.kumar@st.com> | 5 | * Viresh Kumar <viresh.linux@gmail.com> |
6 | * | 6 | * |
7 | * This file is licensed under the terms of the GNU General Public | 7 | * This file is licensed under the terms of the GNU General Public |
8 | * License version 2. This program is licensed "as is" without any | 8 | * License version 2. This program is licensed "as is" without any |
diff --git a/drivers/clk/spear/spear1310_clock.c b/drivers/clk/spear/spear1310_clock.c index 42b68df9aeef..8f05652d53e6 100644 --- a/drivers/clk/spear/spear1310_clock.c +++ b/drivers/clk/spear/spear1310_clock.c | |||
@@ -4,7 +4,7 @@ | |||
4 | * SPEAr1310 machine clock framework source file | 4 | * SPEAr1310 machine clock framework source file |
5 | * | 5 | * |
6 | * Copyright (C) 2012 ST Microelectronics | 6 | * Copyright (C) 2012 ST Microelectronics |
7 | * Viresh Kumar <viresh.kumar@st.com> | 7 | * Viresh Kumar <viresh.linux@gmail.com> |
8 | * | 8 | * |
9 | * This file is licensed under the terms of the GNU General Public | 9 | * This file is licensed under the terms of the GNU General Public |
10 | * License version 2. This program is licensed "as is" without any | 10 | * License version 2. This program is licensed "as is" without any |
diff --git a/drivers/clk/spear/spear1340_clock.c b/drivers/clk/spear/spear1340_clock.c index f130919d5bf8..e3ea72162236 100644 --- a/drivers/clk/spear/spear1340_clock.c +++ b/drivers/clk/spear/spear1340_clock.c | |||
@@ -4,7 +4,7 @@ | |||
4 | * SPEAr1340 machine clock framework source file | 4 | * SPEAr1340 machine clock framework source file |
5 | * | 5 | * |
6 | * Copyright (C) 2012 ST Microelectronics | 6 | * Copyright (C) 2012 ST Microelectronics |
7 | * Viresh Kumar <viresh.kumar@st.com> | 7 | * Viresh Kumar <viresh.linux@gmail.com> |
8 | * | 8 | * |
9 | * This file is licensed under the terms of the GNU General Public | 9 | * This file is licensed under the terms of the GNU General Public |
10 | * License version 2. This program is licensed "as is" without any | 10 | * License version 2. This program is licensed "as is" without any |
diff --git a/drivers/clk/spear/spear3xx_clock.c b/drivers/clk/spear/spear3xx_clock.c index 440bb3e4c971..01dd6daff2a1 100644 --- a/drivers/clk/spear/spear3xx_clock.c +++ b/drivers/clk/spear/spear3xx_clock.c | |||
@@ -2,7 +2,7 @@ | |||
2 | * SPEAr3xx machines clock framework source file | 2 | * SPEAr3xx machines clock framework source file |
3 | * | 3 | * |
4 | * Copyright (C) 2012 ST Microelectronics | 4 | * Copyright (C) 2012 ST Microelectronics |
5 | * Viresh Kumar <viresh.kumar@st.com> | 5 | * Viresh Kumar <viresh.linux@gmail.com> |
6 | * | 6 | * |
7 | * This file is licensed under the terms of the GNU General Public | 7 | * This file is licensed under the terms of the GNU General Public |
8 | * License version 2. This program is licensed "as is" without any | 8 | * License version 2. This program is licensed "as is" without any |
diff --git a/drivers/clk/spear/spear6xx_clock.c b/drivers/clk/spear/spear6xx_clock.c index f9a20b382304..554d64b062a1 100644 --- a/drivers/clk/spear/spear6xx_clock.c +++ b/drivers/clk/spear/spear6xx_clock.c | |||
@@ -2,7 +2,7 @@ | |||
2 | * SPEAr6xx machines clock framework source file | 2 | * SPEAr6xx machines clock framework source file |
3 | * | 3 | * |
4 | * Copyright (C) 2012 ST Microelectronics | 4 | * Copyright (C) 2012 ST Microelectronics |
5 | * Viresh Kumar <viresh.kumar@st.com> | 5 | * Viresh Kumar <viresh.linux@gmail.com> |
6 | * | 6 | * |
7 | * This file is licensed under the terms of the GNU General Public | 7 | * This file is licensed under the terms of the GNU General Public |
8 | * License version 2. This program is licensed "as is" without any | 8 | * License version 2. This program is licensed "as is" without any |
diff --git a/drivers/clocksource/Makefile b/drivers/clocksource/Makefile index 8d81a1d32653..dd3e661a124d 100644 --- a/drivers/clocksource/Makefile +++ b/drivers/clocksource/Makefile | |||
@@ -6,6 +6,7 @@ obj-$(CONFIG_CS5535_CLOCK_EVENT_SRC) += cs5535-clockevt.o | |||
6 | obj-$(CONFIG_SH_TIMER_CMT) += sh_cmt.o | 6 | obj-$(CONFIG_SH_TIMER_CMT) += sh_cmt.o |
7 | obj-$(CONFIG_SH_TIMER_MTU2) += sh_mtu2.o | 7 | obj-$(CONFIG_SH_TIMER_MTU2) += sh_mtu2.o |
8 | obj-$(CONFIG_SH_TIMER_TMU) += sh_tmu.o | 8 | obj-$(CONFIG_SH_TIMER_TMU) += sh_tmu.o |
9 | obj-$(CONFIG_EM_TIMER_STI) += em_sti.o | ||
9 | obj-$(CONFIG_CLKBLD_I8253) += i8253.o | 10 | obj-$(CONFIG_CLKBLD_I8253) += i8253.o |
10 | obj-$(CONFIG_CLKSRC_MMIO) += mmio.o | 11 | obj-$(CONFIG_CLKSRC_MMIO) += mmio.o |
11 | obj-$(CONFIG_DW_APB_TIMER) += dw_apb_timer.o | 12 | obj-$(CONFIG_DW_APB_TIMER) += dw_apb_timer.o |
diff --git a/drivers/clocksource/em_sti.c b/drivers/clocksource/em_sti.c new file mode 100644 index 000000000000..372051d1bba8 --- /dev/null +++ b/drivers/clocksource/em_sti.c | |||
@@ -0,0 +1,406 @@ | |||
1 | /* | ||
2 | * Emma Mobile Timer Support - STI | ||
3 | * | ||
4 | * Copyright (C) 2012 Magnus Damm | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License as published by | ||
8 | * the Free Software Foundation; either version 2 of the License | ||
9 | * | ||
10 | * This program is distributed in the hope that it will be useful, | ||
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
13 | * GNU General Public License for more details. | ||
14 | * | ||
15 | * You should have received a copy of the GNU General Public License | ||
16 | * along with this program; if not, write to the Free Software | ||
17 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
18 | */ | ||
19 | |||
20 | #include <linux/init.h> | ||
21 | #include <linux/platform_device.h> | ||
22 | #include <linux/spinlock.h> | ||
23 | #include <linux/interrupt.h> | ||
24 | #include <linux/ioport.h> | ||
25 | #include <linux/io.h> | ||
26 | #include <linux/clk.h> | ||
27 | #include <linux/irq.h> | ||
28 | #include <linux/err.h> | ||
29 | #include <linux/delay.h> | ||
30 | #include <linux/clocksource.h> | ||
31 | #include <linux/clockchips.h> | ||
32 | #include <linux/slab.h> | ||
33 | #include <linux/module.h> | ||
34 | |||
35 | enum { USER_CLOCKSOURCE, USER_CLOCKEVENT, USER_NR }; | ||
36 | |||
37 | struct em_sti_priv { | ||
38 | void __iomem *base; | ||
39 | struct clk *clk; | ||
40 | struct platform_device *pdev; | ||
41 | unsigned int active[USER_NR]; | ||
42 | unsigned long rate; | ||
43 | raw_spinlock_t lock; | ||
44 | struct clock_event_device ced; | ||
45 | struct clocksource cs; | ||
46 | }; | ||
47 | |||
48 | #define STI_CONTROL 0x00 | ||
49 | #define STI_COMPA_H 0x10 | ||
50 | #define STI_COMPA_L 0x14 | ||
51 | #define STI_COMPB_H 0x18 | ||
52 | #define STI_COMPB_L 0x1c | ||
53 | #define STI_COUNT_H 0x20 | ||
54 | #define STI_COUNT_L 0x24 | ||
55 | #define STI_COUNT_RAW_H 0x28 | ||
56 | #define STI_COUNT_RAW_L 0x2c | ||
57 | #define STI_SET_H 0x30 | ||
58 | #define STI_SET_L 0x34 | ||
59 | #define STI_INTSTATUS 0x40 | ||
60 | #define STI_INTRAWSTATUS 0x44 | ||
61 | #define STI_INTENSET 0x48 | ||
62 | #define STI_INTENCLR 0x4c | ||
63 | #define STI_INTFFCLR 0x50 | ||
64 | |||
65 | static inline unsigned long em_sti_read(struct em_sti_priv *p, int offs) | ||
66 | { | ||
67 | return ioread32(p->base + offs); | ||
68 | } | ||
69 | |||
70 | static inline void em_sti_write(struct em_sti_priv *p, int offs, | ||
71 | unsigned long value) | ||
72 | { | ||
73 | iowrite32(value, p->base + offs); | ||
74 | } | ||
75 | |||
76 | static int em_sti_enable(struct em_sti_priv *p) | ||
77 | { | ||
78 | int ret; | ||
79 | |||
80 | /* enable clock */ | ||
81 | ret = clk_enable(p->clk); | ||
82 | if (ret) { | ||
83 | dev_err(&p->pdev->dev, "cannot enable clock\n"); | ||
84 | return ret; | ||
85 | } | ||
86 | |||
87 | /* configure channel, periodic mode and maximum timeout */ | ||
88 | p->rate = clk_get_rate(p->clk); | ||
89 | |||
90 | /* reset the counter */ | ||
91 | em_sti_write(p, STI_SET_H, 0x40000000); | ||
92 | em_sti_write(p, STI_SET_L, 0x00000000); | ||
93 | |||
94 | /* mask and clear pending interrupts */ | ||
95 | em_sti_write(p, STI_INTENCLR, 3); | ||
96 | em_sti_write(p, STI_INTFFCLR, 3); | ||
97 | |||
98 | /* enable updates of counter registers */ | ||
99 | em_sti_write(p, STI_CONTROL, 1); | ||
100 | |||
101 | return 0; | ||
102 | } | ||
103 | |||
104 | static void em_sti_disable(struct em_sti_priv *p) | ||
105 | { | ||
106 | /* mask interrupts */ | ||
107 | em_sti_write(p, STI_INTENCLR, 3); | ||
108 | |||
109 | /* stop clock */ | ||
110 | clk_disable(p->clk); | ||
111 | } | ||
112 | |||
113 | static cycle_t em_sti_count(struct em_sti_priv *p) | ||
114 | { | ||
115 | cycle_t ticks; | ||
116 | unsigned long flags; | ||
117 | |||
118 | /* the STI hardware buffers the 48-bit count, but to | ||
119 | * break it out into two 32-bit access the registers | ||
120 | * must be accessed in a certain order. | ||
121 | * Always read STI_COUNT_H before STI_COUNT_L. | ||
122 | */ | ||
123 | raw_spin_lock_irqsave(&p->lock, flags); | ||
124 | ticks = (cycle_t)(em_sti_read(p, STI_COUNT_H) & 0xffff) << 32; | ||
125 | ticks |= em_sti_read(p, STI_COUNT_L); | ||
126 | raw_spin_unlock_irqrestore(&p->lock, flags); | ||
127 | |||
128 | return ticks; | ||
129 | } | ||
130 | |||
131 | static cycle_t em_sti_set_next(struct em_sti_priv *p, cycle_t next) | ||
132 | { | ||
133 | unsigned long flags; | ||
134 | |||
135 | raw_spin_lock_irqsave(&p->lock, flags); | ||
136 | |||
137 | /* mask compare A interrupt */ | ||
138 | em_sti_write(p, STI_INTENCLR, 1); | ||
139 | |||
140 | /* update compare A value */ | ||
141 | em_sti_write(p, STI_COMPA_H, next >> 32); | ||
142 | em_sti_write(p, STI_COMPA_L, next & 0xffffffff); | ||
143 | |||
144 | /* clear compare A interrupt source */ | ||
145 | em_sti_write(p, STI_INTFFCLR, 1); | ||
146 | |||
147 | /* unmask compare A interrupt */ | ||
148 | em_sti_write(p, STI_INTENSET, 1); | ||
149 | |||
150 | raw_spin_unlock_irqrestore(&p->lock, flags); | ||
151 | |||
152 | return next; | ||
153 | } | ||
154 | |||
155 | static irqreturn_t em_sti_interrupt(int irq, void *dev_id) | ||
156 | { | ||
157 | struct em_sti_priv *p = dev_id; | ||
158 | |||
159 | p->ced.event_handler(&p->ced); | ||
160 | return IRQ_HANDLED; | ||
161 | } | ||
162 | |||
163 | static int em_sti_start(struct em_sti_priv *p, unsigned int user) | ||
164 | { | ||
165 | unsigned long flags; | ||
166 | int used_before; | ||
167 | int ret = 0; | ||
168 | |||
169 | raw_spin_lock_irqsave(&p->lock, flags); | ||
170 | used_before = p->active[USER_CLOCKSOURCE] | p->active[USER_CLOCKEVENT]; | ||
171 | if (!used_before) | ||
172 | ret = em_sti_enable(p); | ||
173 | |||
174 | if (!ret) | ||
175 | p->active[user] = 1; | ||
176 | raw_spin_unlock_irqrestore(&p->lock, flags); | ||
177 | |||
178 | return ret; | ||
179 | } | ||
180 | |||
181 | static void em_sti_stop(struct em_sti_priv *p, unsigned int user) | ||
182 | { | ||
183 | unsigned long flags; | ||
184 | int used_before, used_after; | ||
185 | |||
186 | raw_spin_lock_irqsave(&p->lock, flags); | ||
187 | used_before = p->active[USER_CLOCKSOURCE] | p->active[USER_CLOCKEVENT]; | ||
188 | p->active[user] = 0; | ||
189 | used_after = p->active[USER_CLOCKSOURCE] | p->active[USER_CLOCKEVENT]; | ||
190 | |||
191 | if (used_before && !used_after) | ||
192 | em_sti_disable(p); | ||
193 | raw_spin_unlock_irqrestore(&p->lock, flags); | ||
194 | } | ||
195 | |||
196 | static struct em_sti_priv *cs_to_em_sti(struct clocksource *cs) | ||
197 | { | ||
198 | return container_of(cs, struct em_sti_priv, cs); | ||
199 | } | ||
200 | |||
201 | static cycle_t em_sti_clocksource_read(struct clocksource *cs) | ||
202 | { | ||
203 | return em_sti_count(cs_to_em_sti(cs)); | ||
204 | } | ||
205 | |||
206 | static int em_sti_clocksource_enable(struct clocksource *cs) | ||
207 | { | ||
208 | int ret; | ||
209 | struct em_sti_priv *p = cs_to_em_sti(cs); | ||
210 | |||
211 | ret = em_sti_start(p, USER_CLOCKSOURCE); | ||
212 | if (!ret) | ||
213 | __clocksource_updatefreq_hz(cs, p->rate); | ||
214 | return ret; | ||
215 | } | ||
216 | |||
217 | static void em_sti_clocksource_disable(struct clocksource *cs) | ||
218 | { | ||
219 | em_sti_stop(cs_to_em_sti(cs), USER_CLOCKSOURCE); | ||
220 | } | ||
221 | |||
222 | static void em_sti_clocksource_resume(struct clocksource *cs) | ||
223 | { | ||
224 | em_sti_clocksource_enable(cs); | ||
225 | } | ||
226 | |||
227 | static int em_sti_register_clocksource(struct em_sti_priv *p) | ||
228 | { | ||
229 | struct clocksource *cs = &p->cs; | ||
230 | |||
231 | memset(cs, 0, sizeof(*cs)); | ||
232 | cs->name = dev_name(&p->pdev->dev); | ||
233 | cs->rating = 200; | ||
234 | cs->read = em_sti_clocksource_read; | ||
235 | cs->enable = em_sti_clocksource_enable; | ||
236 | cs->disable = em_sti_clocksource_disable; | ||
237 | cs->suspend = em_sti_clocksource_disable; | ||
238 | cs->resume = em_sti_clocksource_resume; | ||
239 | cs->mask = CLOCKSOURCE_MASK(48); | ||
240 | cs->flags = CLOCK_SOURCE_IS_CONTINUOUS; | ||
241 | |||
242 | dev_info(&p->pdev->dev, "used as clock source\n"); | ||
243 | |||
244 | /* Register with dummy 1 Hz value, gets updated in ->enable() */ | ||
245 | clocksource_register_hz(cs, 1); | ||
246 | return 0; | ||
247 | } | ||
248 | |||
249 | static struct em_sti_priv *ced_to_em_sti(struct clock_event_device *ced) | ||
250 | { | ||
251 | return container_of(ced, struct em_sti_priv, ced); | ||
252 | } | ||
253 | |||
254 | static void em_sti_clock_event_mode(enum clock_event_mode mode, | ||
255 | struct clock_event_device *ced) | ||
256 | { | ||
257 | struct em_sti_priv *p = ced_to_em_sti(ced); | ||
258 | |||
259 | /* deal with old setting first */ | ||
260 | switch (ced->mode) { | ||
261 | case CLOCK_EVT_MODE_ONESHOT: | ||
262 | em_sti_stop(p, USER_CLOCKEVENT); | ||
263 | break; | ||
264 | default: | ||
265 | break; | ||
266 | } | ||
267 | |||
268 | switch (mode) { | ||
269 | case CLOCK_EVT_MODE_ONESHOT: | ||
270 | dev_info(&p->pdev->dev, "used for oneshot clock events\n"); | ||
271 | em_sti_start(p, USER_CLOCKEVENT); | ||
272 | clockevents_config(&p->ced, p->rate); | ||
273 | break; | ||
274 | case CLOCK_EVT_MODE_SHUTDOWN: | ||
275 | case CLOCK_EVT_MODE_UNUSED: | ||
276 | em_sti_stop(p, USER_CLOCKEVENT); | ||
277 | break; | ||
278 | default: | ||
279 | break; | ||
280 | } | ||
281 | } | ||
282 | |||
283 | static int em_sti_clock_event_next(unsigned long delta, | ||
284 | struct clock_event_device *ced) | ||
285 | { | ||
286 | struct em_sti_priv *p = ced_to_em_sti(ced); | ||
287 | cycle_t next; | ||
288 | int safe; | ||
289 | |||
290 | next = em_sti_set_next(p, em_sti_count(p) + delta); | ||
291 | safe = em_sti_count(p) < (next - 1); | ||
292 | |||
293 | return !safe; | ||
294 | } | ||
295 | |||
296 | static void em_sti_register_clockevent(struct em_sti_priv *p) | ||
297 | { | ||
298 | struct clock_event_device *ced = &p->ced; | ||
299 | |||
300 | memset(ced, 0, sizeof(*ced)); | ||
301 | ced->name = dev_name(&p->pdev->dev); | ||
302 | ced->features = CLOCK_EVT_FEAT_ONESHOT; | ||
303 | ced->rating = 200; | ||
304 | ced->cpumask = cpumask_of(0); | ||
305 | ced->set_next_event = em_sti_clock_event_next; | ||
306 | ced->set_mode = em_sti_clock_event_mode; | ||
307 | |||
308 | dev_info(&p->pdev->dev, "used for clock events\n"); | ||
309 | |||
310 | /* Register with dummy 1 Hz value, gets updated in ->set_mode() */ | ||
311 | clockevents_config_and_register(ced, 1, 2, 0xffffffff); | ||
312 | } | ||
313 | |||
314 | static int __devinit em_sti_probe(struct platform_device *pdev) | ||
315 | { | ||
316 | struct em_sti_priv *p; | ||
317 | struct resource *res; | ||
318 | int irq, ret; | ||
319 | |||
320 | p = kzalloc(sizeof(*p), GFP_KERNEL); | ||
321 | if (p == NULL) { | ||
322 | dev_err(&pdev->dev, "failed to allocate driver data\n"); | ||
323 | ret = -ENOMEM; | ||
324 | goto err0; | ||
325 | } | ||
326 | |||
327 | p->pdev = pdev; | ||
328 | platform_set_drvdata(pdev, p); | ||
329 | |||
330 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
331 | if (!res) { | ||
332 | dev_err(&pdev->dev, "failed to get I/O memory\n"); | ||
333 | ret = -EINVAL; | ||
334 | goto err0; | ||
335 | } | ||
336 | |||
337 | irq = platform_get_irq(pdev, 0); | ||
338 | if (irq < 0) { | ||
339 | dev_err(&pdev->dev, "failed to get irq\n"); | ||
340 | ret = -EINVAL; | ||
341 | goto err0; | ||
342 | } | ||
343 | |||
344 | /* map memory, let base point to the STI instance */ | ||
345 | p->base = ioremap_nocache(res->start, resource_size(res)); | ||
346 | if (p->base == NULL) { | ||
347 | dev_err(&pdev->dev, "failed to remap I/O memory\n"); | ||
348 | ret = -ENXIO; | ||
349 | goto err0; | ||
350 | } | ||
351 | |||
352 | /* get hold of clock */ | ||
353 | p->clk = clk_get(&pdev->dev, "sclk"); | ||
354 | if (IS_ERR(p->clk)) { | ||
355 | dev_err(&pdev->dev, "cannot get clock\n"); | ||
356 | ret = PTR_ERR(p->clk); | ||
357 | goto err1; | ||
358 | } | ||
359 | |||
360 | if (request_irq(irq, em_sti_interrupt, | ||
361 | IRQF_TIMER | IRQF_IRQPOLL | IRQF_NOBALANCING, | ||
362 | dev_name(&pdev->dev), p)) { | ||
363 | dev_err(&pdev->dev, "failed to request low IRQ\n"); | ||
364 | ret = -ENOENT; | ||
365 | goto err2; | ||
366 | } | ||
367 | |||
368 | raw_spin_lock_init(&p->lock); | ||
369 | em_sti_register_clockevent(p); | ||
370 | em_sti_register_clocksource(p); | ||
371 | return 0; | ||
372 | |||
373 | err2: | ||
374 | clk_put(p->clk); | ||
375 | err1: | ||
376 | iounmap(p->base); | ||
377 | err0: | ||
378 | kfree(p); | ||
379 | return ret; | ||
380 | } | ||
381 | |||
382 | static int __devexit em_sti_remove(struct platform_device *pdev) | ||
383 | { | ||
384 | return -EBUSY; /* cannot unregister clockevent and clocksource */ | ||
385 | } | ||
386 | |||
387 | static const struct of_device_id em_sti_dt_ids[] __devinitconst = { | ||
388 | { .compatible = "renesas,em-sti", }, | ||
389 | {}, | ||
390 | }; | ||
391 | MODULE_DEVICE_TABLE(of, em_sti_dt_ids); | ||
392 | |||
393 | static struct platform_driver em_sti_device_driver = { | ||
394 | .probe = em_sti_probe, | ||
395 | .remove = __devexit_p(em_sti_remove), | ||
396 | .driver = { | ||
397 | .name = "em_sti", | ||
398 | .of_match_table = em_sti_dt_ids, | ||
399 | } | ||
400 | }; | ||
401 | |||
402 | module_platform_driver(em_sti_device_driver); | ||
403 | |||
404 | MODULE_AUTHOR("Magnus Damm"); | ||
405 | MODULE_DESCRIPTION("Renesas Emma Mobile STI Timer Driver"); | ||
406 | MODULE_LICENSE("GPL v2"); | ||
diff --git a/drivers/clocksource/sh_cmt.c b/drivers/clocksource/sh_cmt.c index 32fe9ef5cc5c..98b06baafcc6 100644 --- a/drivers/clocksource/sh_cmt.c +++ b/drivers/clocksource/sh_cmt.c | |||
@@ -48,13 +48,13 @@ struct sh_cmt_priv { | |||
48 | unsigned long next_match_value; | 48 | unsigned long next_match_value; |
49 | unsigned long max_match_value; | 49 | unsigned long max_match_value; |
50 | unsigned long rate; | 50 | unsigned long rate; |
51 | spinlock_t lock; | 51 | raw_spinlock_t lock; |
52 | struct clock_event_device ced; | 52 | struct clock_event_device ced; |
53 | struct clocksource cs; | 53 | struct clocksource cs; |
54 | unsigned long total_cycles; | 54 | unsigned long total_cycles; |
55 | }; | 55 | }; |
56 | 56 | ||
57 | static DEFINE_SPINLOCK(sh_cmt_lock); | 57 | static DEFINE_RAW_SPINLOCK(sh_cmt_lock); |
58 | 58 | ||
59 | #define CMSTR -1 /* shared register */ | 59 | #define CMSTR -1 /* shared register */ |
60 | #define CMCSR 0 /* channel register */ | 60 | #define CMCSR 0 /* channel register */ |
@@ -139,7 +139,7 @@ static void sh_cmt_start_stop_ch(struct sh_cmt_priv *p, int start) | |||
139 | unsigned long flags, value; | 139 | unsigned long flags, value; |
140 | 140 | ||
141 | /* start stop register shared by multiple timer channels */ | 141 | /* start stop register shared by multiple timer channels */ |
142 | spin_lock_irqsave(&sh_cmt_lock, flags); | 142 | raw_spin_lock_irqsave(&sh_cmt_lock, flags); |
143 | value = sh_cmt_read(p, CMSTR); | 143 | value = sh_cmt_read(p, CMSTR); |
144 | 144 | ||
145 | if (start) | 145 | if (start) |
@@ -148,7 +148,7 @@ static void sh_cmt_start_stop_ch(struct sh_cmt_priv *p, int start) | |||
148 | value &= ~(1 << cfg->timer_bit); | 148 | value &= ~(1 << cfg->timer_bit); |
149 | 149 | ||
150 | sh_cmt_write(p, CMSTR, value); | 150 | sh_cmt_write(p, CMSTR, value); |
151 | spin_unlock_irqrestore(&sh_cmt_lock, flags); | 151 | raw_spin_unlock_irqrestore(&sh_cmt_lock, flags); |
152 | } | 152 | } |
153 | 153 | ||
154 | static int sh_cmt_enable(struct sh_cmt_priv *p, unsigned long *rate) | 154 | static int sh_cmt_enable(struct sh_cmt_priv *p, unsigned long *rate) |
@@ -328,9 +328,9 @@ static void sh_cmt_set_next(struct sh_cmt_priv *p, unsigned long delta) | |||
328 | { | 328 | { |
329 | unsigned long flags; | 329 | unsigned long flags; |
330 | 330 | ||
331 | spin_lock_irqsave(&p->lock, flags); | 331 | raw_spin_lock_irqsave(&p->lock, flags); |
332 | __sh_cmt_set_next(p, delta); | 332 | __sh_cmt_set_next(p, delta); |
333 | spin_unlock_irqrestore(&p->lock, flags); | 333 | raw_spin_unlock_irqrestore(&p->lock, flags); |
334 | } | 334 | } |
335 | 335 | ||
336 | static irqreturn_t sh_cmt_interrupt(int irq, void *dev_id) | 336 | static irqreturn_t sh_cmt_interrupt(int irq, void *dev_id) |
@@ -385,7 +385,7 @@ static int sh_cmt_start(struct sh_cmt_priv *p, unsigned long flag) | |||
385 | int ret = 0; | 385 | int ret = 0; |
386 | unsigned long flags; | 386 | unsigned long flags; |
387 | 387 | ||
388 | spin_lock_irqsave(&p->lock, flags); | 388 | raw_spin_lock_irqsave(&p->lock, flags); |
389 | 389 | ||
390 | if (!(p->flags & (FLAG_CLOCKEVENT | FLAG_CLOCKSOURCE))) | 390 | if (!(p->flags & (FLAG_CLOCKEVENT | FLAG_CLOCKSOURCE))) |
391 | ret = sh_cmt_enable(p, &p->rate); | 391 | ret = sh_cmt_enable(p, &p->rate); |
@@ -398,7 +398,7 @@ static int sh_cmt_start(struct sh_cmt_priv *p, unsigned long flag) | |||
398 | if ((flag == FLAG_CLOCKSOURCE) && (!(p->flags & FLAG_CLOCKEVENT))) | 398 | if ((flag == FLAG_CLOCKSOURCE) && (!(p->flags & FLAG_CLOCKEVENT))) |
399 | __sh_cmt_set_next(p, p->max_match_value); | 399 | __sh_cmt_set_next(p, p->max_match_value); |
400 | out: | 400 | out: |
401 | spin_unlock_irqrestore(&p->lock, flags); | 401 | raw_spin_unlock_irqrestore(&p->lock, flags); |
402 | 402 | ||
403 | return ret; | 403 | return ret; |
404 | } | 404 | } |
@@ -408,7 +408,7 @@ static void sh_cmt_stop(struct sh_cmt_priv *p, unsigned long flag) | |||
408 | unsigned long flags; | 408 | unsigned long flags; |
409 | unsigned long f; | 409 | unsigned long f; |
410 | 410 | ||
411 | spin_lock_irqsave(&p->lock, flags); | 411 | raw_spin_lock_irqsave(&p->lock, flags); |
412 | 412 | ||
413 | f = p->flags & (FLAG_CLOCKEVENT | FLAG_CLOCKSOURCE); | 413 | f = p->flags & (FLAG_CLOCKEVENT | FLAG_CLOCKSOURCE); |
414 | p->flags &= ~flag; | 414 | p->flags &= ~flag; |
@@ -420,7 +420,7 @@ static void sh_cmt_stop(struct sh_cmt_priv *p, unsigned long flag) | |||
420 | if ((flag == FLAG_CLOCKEVENT) && (p->flags & FLAG_CLOCKSOURCE)) | 420 | if ((flag == FLAG_CLOCKEVENT) && (p->flags & FLAG_CLOCKSOURCE)) |
421 | __sh_cmt_set_next(p, p->max_match_value); | 421 | __sh_cmt_set_next(p, p->max_match_value); |
422 | 422 | ||
423 | spin_unlock_irqrestore(&p->lock, flags); | 423 | raw_spin_unlock_irqrestore(&p->lock, flags); |
424 | } | 424 | } |
425 | 425 | ||
426 | static struct sh_cmt_priv *cs_to_sh_cmt(struct clocksource *cs) | 426 | static struct sh_cmt_priv *cs_to_sh_cmt(struct clocksource *cs) |
@@ -435,13 +435,13 @@ static cycle_t sh_cmt_clocksource_read(struct clocksource *cs) | |||
435 | unsigned long value; | 435 | unsigned long value; |
436 | int has_wrapped; | 436 | int has_wrapped; |
437 | 437 | ||
438 | spin_lock_irqsave(&p->lock, flags); | 438 | raw_spin_lock_irqsave(&p->lock, flags); |
439 | value = p->total_cycles; | 439 | value = p->total_cycles; |
440 | raw = sh_cmt_get_counter(p, &has_wrapped); | 440 | raw = sh_cmt_get_counter(p, &has_wrapped); |
441 | 441 | ||
442 | if (unlikely(has_wrapped)) | 442 | if (unlikely(has_wrapped)) |
443 | raw += p->match_value + 1; | 443 | raw += p->match_value + 1; |
444 | spin_unlock_irqrestore(&p->lock, flags); | 444 | raw_spin_unlock_irqrestore(&p->lock, flags); |
445 | 445 | ||
446 | return value + raw; | 446 | return value + raw; |
447 | } | 447 | } |
@@ -591,7 +591,7 @@ static int sh_cmt_register(struct sh_cmt_priv *p, char *name, | |||
591 | p->max_match_value = (1 << p->width) - 1; | 591 | p->max_match_value = (1 << p->width) - 1; |
592 | 592 | ||
593 | p->match_value = p->max_match_value; | 593 | p->match_value = p->max_match_value; |
594 | spin_lock_init(&p->lock); | 594 | raw_spin_lock_init(&p->lock); |
595 | 595 | ||
596 | if (clockevent_rating) | 596 | if (clockevent_rating) |
597 | sh_cmt_register_clockevent(p, name, clockevent_rating); | 597 | sh_cmt_register_clockevent(p, name, clockevent_rating); |
diff --git a/drivers/clocksource/sh_mtu2.c b/drivers/clocksource/sh_mtu2.c index a2172f690418..d9b76ca64a61 100644 --- a/drivers/clocksource/sh_mtu2.c +++ b/drivers/clocksource/sh_mtu2.c | |||
@@ -43,7 +43,7 @@ struct sh_mtu2_priv { | |||
43 | struct clock_event_device ced; | 43 | struct clock_event_device ced; |
44 | }; | 44 | }; |
45 | 45 | ||
46 | static DEFINE_SPINLOCK(sh_mtu2_lock); | 46 | static DEFINE_RAW_SPINLOCK(sh_mtu2_lock); |
47 | 47 | ||
48 | #define TSTR -1 /* shared register */ | 48 | #define TSTR -1 /* shared register */ |
49 | #define TCR 0 /* channel register */ | 49 | #define TCR 0 /* channel register */ |
@@ -107,7 +107,7 @@ static void sh_mtu2_start_stop_ch(struct sh_mtu2_priv *p, int start) | |||
107 | unsigned long flags, value; | 107 | unsigned long flags, value; |
108 | 108 | ||
109 | /* start stop register shared by multiple timer channels */ | 109 | /* start stop register shared by multiple timer channels */ |
110 | spin_lock_irqsave(&sh_mtu2_lock, flags); | 110 | raw_spin_lock_irqsave(&sh_mtu2_lock, flags); |
111 | value = sh_mtu2_read(p, TSTR); | 111 | value = sh_mtu2_read(p, TSTR); |
112 | 112 | ||
113 | if (start) | 113 | if (start) |
@@ -116,7 +116,7 @@ static void sh_mtu2_start_stop_ch(struct sh_mtu2_priv *p, int start) | |||
116 | value &= ~(1 << cfg->timer_bit); | 116 | value &= ~(1 << cfg->timer_bit); |
117 | 117 | ||
118 | sh_mtu2_write(p, TSTR, value); | 118 | sh_mtu2_write(p, TSTR, value); |
119 | spin_unlock_irqrestore(&sh_mtu2_lock, flags); | 119 | raw_spin_unlock_irqrestore(&sh_mtu2_lock, flags); |
120 | } | 120 | } |
121 | 121 | ||
122 | static int sh_mtu2_enable(struct sh_mtu2_priv *p) | 122 | static int sh_mtu2_enable(struct sh_mtu2_priv *p) |
diff --git a/drivers/clocksource/sh_tmu.c b/drivers/clocksource/sh_tmu.c index 97f54b634be4..c1b51d49d106 100644 --- a/drivers/clocksource/sh_tmu.c +++ b/drivers/clocksource/sh_tmu.c | |||
@@ -45,7 +45,7 @@ struct sh_tmu_priv { | |||
45 | struct clocksource cs; | 45 | struct clocksource cs; |
46 | }; | 46 | }; |
47 | 47 | ||
48 | static DEFINE_SPINLOCK(sh_tmu_lock); | 48 | static DEFINE_RAW_SPINLOCK(sh_tmu_lock); |
49 | 49 | ||
50 | #define TSTR -1 /* shared register */ | 50 | #define TSTR -1 /* shared register */ |
51 | #define TCOR 0 /* channel register */ | 51 | #define TCOR 0 /* channel register */ |
@@ -95,7 +95,7 @@ static void sh_tmu_start_stop_ch(struct sh_tmu_priv *p, int start) | |||
95 | unsigned long flags, value; | 95 | unsigned long flags, value; |
96 | 96 | ||
97 | /* start stop register shared by multiple timer channels */ | 97 | /* start stop register shared by multiple timer channels */ |
98 | spin_lock_irqsave(&sh_tmu_lock, flags); | 98 | raw_spin_lock_irqsave(&sh_tmu_lock, flags); |
99 | value = sh_tmu_read(p, TSTR); | 99 | value = sh_tmu_read(p, TSTR); |
100 | 100 | ||
101 | if (start) | 101 | if (start) |
@@ -104,7 +104,7 @@ static void sh_tmu_start_stop_ch(struct sh_tmu_priv *p, int start) | |||
104 | value &= ~(1 << cfg->timer_bit); | 104 | value &= ~(1 << cfg->timer_bit); |
105 | 105 | ||
106 | sh_tmu_write(p, TSTR, value); | 106 | sh_tmu_write(p, TSTR, value); |
107 | spin_unlock_irqrestore(&sh_tmu_lock, flags); | 107 | raw_spin_unlock_irqrestore(&sh_tmu_lock, flags); |
108 | } | 108 | } |
109 | 109 | ||
110 | static int sh_tmu_enable(struct sh_tmu_priv *p) | 110 | static int sh_tmu_enable(struct sh_tmu_priv *p) |
@@ -245,12 +245,7 @@ static void sh_tmu_clock_event_start(struct sh_tmu_priv *p, int periodic) | |||
245 | 245 | ||
246 | sh_tmu_enable(p); | 246 | sh_tmu_enable(p); |
247 | 247 | ||
248 | /* TODO: calculate good shift from rate and counter bit width */ | 248 | clockevents_config(ced, p->rate); |
249 | |||
250 | ced->shift = 32; | ||
251 | ced->mult = div_sc(p->rate, NSEC_PER_SEC, ced->shift); | ||
252 | ced->max_delta_ns = clockevent_delta2ns(0xffffffff, ced); | ||
253 | ced->min_delta_ns = 5000; | ||
254 | 249 | ||
255 | if (periodic) { | 250 | if (periodic) { |
256 | p->periodic = (p->rate + HZ/2) / HZ; | 251 | p->periodic = (p->rate + HZ/2) / HZ; |
@@ -323,7 +318,8 @@ static void sh_tmu_register_clockevent(struct sh_tmu_priv *p, | |||
323 | ced->set_mode = sh_tmu_clock_event_mode; | 318 | ced->set_mode = sh_tmu_clock_event_mode; |
324 | 319 | ||
325 | dev_info(&p->pdev->dev, "used for clock events\n"); | 320 | dev_info(&p->pdev->dev, "used for clock events\n"); |
326 | clockevents_register_device(ced); | 321 | |
322 | clockevents_config_and_register(ced, 1, 0x300, 0xffffffff); | ||
327 | 323 | ||
328 | ret = setup_irq(p->irqaction.irq, &p->irqaction); | 324 | ret = setup_irq(p->irqaction.irq, &p->irqaction); |
329 | if (ret) { | 325 | if (ret) { |
diff --git a/drivers/dma/dw_dmac.c b/drivers/dma/dw_dmac.c index e23dc82d43ac..721296157577 100644 --- a/drivers/dma/dw_dmac.c +++ b/drivers/dma/dw_dmac.c | |||
@@ -1626,4 +1626,4 @@ module_exit(dw_exit); | |||
1626 | MODULE_LICENSE("GPL v2"); | 1626 | MODULE_LICENSE("GPL v2"); |
1627 | MODULE_DESCRIPTION("Synopsys DesignWare DMA Controller driver"); | 1627 | MODULE_DESCRIPTION("Synopsys DesignWare DMA Controller driver"); |
1628 | MODULE_AUTHOR("Haavard Skinnemoen (Atmel)"); | 1628 | MODULE_AUTHOR("Haavard Skinnemoen (Atmel)"); |
1629 | MODULE_AUTHOR("Viresh Kumar <viresh.kumar@st.com>"); | 1629 | MODULE_AUTHOR("Viresh Kumar <viresh.linux@gmail.com>"); |
diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c index fb4f4990f5eb..1dc2a4ad0026 100644 --- a/drivers/dma/imx-sdma.c +++ b/drivers/dma/imx-sdma.c | |||
@@ -815,8 +815,6 @@ static int sdma_request_channel(struct sdma_channel *sdmac) | |||
815 | 815 | ||
816 | init_completion(&sdmac->done); | 816 | init_completion(&sdmac->done); |
817 | 817 | ||
818 | sdmac->buf_tail = 0; | ||
819 | |||
820 | return 0; | 818 | return 0; |
821 | out: | 819 | out: |
822 | 820 | ||
@@ -927,6 +925,8 @@ static struct dma_async_tx_descriptor *sdma_prep_slave_sg( | |||
927 | 925 | ||
928 | sdmac->flags = 0; | 926 | sdmac->flags = 0; |
929 | 927 | ||
928 | sdmac->buf_tail = 0; | ||
929 | |||
930 | dev_dbg(sdma->dev, "setting up %d entries for channel %d.\n", | 930 | dev_dbg(sdma->dev, "setting up %d entries for channel %d.\n", |
931 | sg_len, channel); | 931 | sg_len, channel); |
932 | 932 | ||
@@ -1027,6 +1027,8 @@ static struct dma_async_tx_descriptor *sdma_prep_dma_cyclic( | |||
1027 | 1027 | ||
1028 | sdmac->status = DMA_IN_PROGRESS; | 1028 | sdmac->status = DMA_IN_PROGRESS; |
1029 | 1029 | ||
1030 | sdmac->buf_tail = 0; | ||
1031 | |||
1030 | sdmac->flags |= IMX_DMA_SG_LOOP; | 1032 | sdmac->flags |= IMX_DMA_SG_LOOP; |
1031 | sdmac->direction = direction; | 1033 | sdmac->direction = direction; |
1032 | ret = sdma_load_context(sdmac); | 1034 | ret = sdma_load_context(sdmac); |
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c index cbcc28e79be6..e4feba6b03c0 100644 --- a/drivers/dma/pl330.c +++ b/drivers/dma/pl330.c | |||
@@ -392,6 +392,8 @@ struct pl330_req { | |||
392 | struct pl330_reqcfg *cfg; | 392 | struct pl330_reqcfg *cfg; |
393 | /* Pointer to first xfer in the request. */ | 393 | /* Pointer to first xfer in the request. */ |
394 | struct pl330_xfer *x; | 394 | struct pl330_xfer *x; |
395 | /* Hook to attach to DMAC's list of reqs with due callback */ | ||
396 | struct list_head rqd; | ||
395 | }; | 397 | }; |
396 | 398 | ||
397 | /* | 399 | /* |
@@ -461,8 +463,6 @@ struct _pl330_req { | |||
461 | /* Number of bytes taken to setup MC for the req */ | 463 | /* Number of bytes taken to setup MC for the req */ |
462 | u32 mc_len; | 464 | u32 mc_len; |
463 | struct pl330_req *r; | 465 | struct pl330_req *r; |
464 | /* Hook to attach to DMAC's list of reqs with due callback */ | ||
465 | struct list_head rqd; | ||
466 | }; | 466 | }; |
467 | 467 | ||
468 | /* ToBeDone for tasklet */ | 468 | /* ToBeDone for tasklet */ |
@@ -1683,7 +1683,7 @@ static void pl330_dotask(unsigned long data) | |||
1683 | /* Returns 1 if state was updated, 0 otherwise */ | 1683 | /* Returns 1 if state was updated, 0 otherwise */ |
1684 | static int pl330_update(const struct pl330_info *pi) | 1684 | static int pl330_update(const struct pl330_info *pi) |
1685 | { | 1685 | { |
1686 | struct _pl330_req *rqdone; | 1686 | struct pl330_req *rqdone, *tmp; |
1687 | struct pl330_dmac *pl330; | 1687 | struct pl330_dmac *pl330; |
1688 | unsigned long flags; | 1688 | unsigned long flags; |
1689 | void __iomem *regs; | 1689 | void __iomem *regs; |
@@ -1750,7 +1750,10 @@ static int pl330_update(const struct pl330_info *pi) | |||
1750 | if (active == -1) /* Aborted */ | 1750 | if (active == -1) /* Aborted */ |
1751 | continue; | 1751 | continue; |
1752 | 1752 | ||
1753 | rqdone = &thrd->req[active]; | 1753 | /* Detach the req */ |
1754 | rqdone = thrd->req[active].r; | ||
1755 | thrd->req[active].r = NULL; | ||
1756 | |||
1754 | mark_free(thrd, active); | 1757 | mark_free(thrd, active); |
1755 | 1758 | ||
1756 | /* Get going again ASAP */ | 1759 | /* Get going again ASAP */ |
@@ -1762,20 +1765,11 @@ static int pl330_update(const struct pl330_info *pi) | |||
1762 | } | 1765 | } |
1763 | 1766 | ||
1764 | /* Now that we are in no hurry, do the callbacks */ | 1767 | /* Now that we are in no hurry, do the callbacks */ |
1765 | while (!list_empty(&pl330->req_done)) { | 1768 | list_for_each_entry_safe(rqdone, tmp, &pl330->req_done, rqd) { |
1766 | struct pl330_req *r; | 1769 | list_del(&rqdone->rqd); |
1767 | |||
1768 | rqdone = container_of(pl330->req_done.next, | ||
1769 | struct _pl330_req, rqd); | ||
1770 | |||
1771 | list_del_init(&rqdone->rqd); | ||
1772 | |||
1773 | /* Detach the req */ | ||
1774 | r = rqdone->r; | ||
1775 | rqdone->r = NULL; | ||
1776 | 1770 | ||
1777 | spin_unlock_irqrestore(&pl330->lock, flags); | 1771 | spin_unlock_irqrestore(&pl330->lock, flags); |
1778 | _callback(r, PL330_ERR_NONE); | 1772 | _callback(rqdone, PL330_ERR_NONE); |
1779 | spin_lock_irqsave(&pl330->lock, flags); | 1773 | spin_lock_irqsave(&pl330->lock, flags); |
1780 | } | 1774 | } |
1781 | 1775 | ||
@@ -2321,7 +2315,7 @@ static void pl330_tasklet(unsigned long data) | |||
2321 | /* Pick up ripe tomatoes */ | 2315 | /* Pick up ripe tomatoes */ |
2322 | list_for_each_entry_safe(desc, _dt, &pch->work_list, node) | 2316 | list_for_each_entry_safe(desc, _dt, &pch->work_list, node) |
2323 | if (desc->status == DONE) { | 2317 | if (desc->status == DONE) { |
2324 | if (pch->cyclic) | 2318 | if (!pch->cyclic) |
2325 | dma_cookie_complete(&desc->txd); | 2319 | dma_cookie_complete(&desc->txd); |
2326 | list_move_tail(&desc->node, &list); | 2320 | list_move_tail(&desc->node, &list); |
2327 | } | 2321 | } |
@@ -2539,7 +2533,7 @@ static inline void _init_desc(struct dma_pl330_desc *desc) | |||
2539 | } | 2533 | } |
2540 | 2534 | ||
2541 | /* Returns the number of descriptors added to the DMAC pool */ | 2535 | /* Returns the number of descriptors added to the DMAC pool */ |
2542 | int add_desc(struct dma_pl330_dmac *pdmac, gfp_t flg, int count) | 2536 | static int add_desc(struct dma_pl330_dmac *pdmac, gfp_t flg, int count) |
2543 | { | 2537 | { |
2544 | struct dma_pl330_desc *desc; | 2538 | struct dma_pl330_desc *desc; |
2545 | unsigned long flags; | 2539 | unsigned long flags; |
diff --git a/drivers/edac/edac_mc.c b/drivers/edac/edac_mc.c index 10f375032e96..de5ba86e8b89 100644 --- a/drivers/edac/edac_mc.c +++ b/drivers/edac/edac_mc.c | |||
@@ -164,7 +164,7 @@ void *edac_align_ptr(void **p, unsigned size, int n_elems) | |||
164 | else | 164 | else |
165 | return (char *)ptr; | 165 | return (char *)ptr; |
166 | 166 | ||
167 | r = size % align; | 167 | r = (unsigned long)p % align; |
168 | 168 | ||
169 | if (r == 0) | 169 | if (r == 0) |
170 | return (char *)ptr; | 170 | return (char *)ptr; |
diff --git a/drivers/edac/i7core_edac.c b/drivers/edac/i7core_edac.c index d27778f65a5d..a499c7ed820a 100644 --- a/drivers/edac/i7core_edac.c +++ b/drivers/edac/i7core_edac.c | |||
@@ -1814,12 +1814,6 @@ static int i7core_mce_check_error(struct notifier_block *nb, unsigned long val, | |||
1814 | if (mce->bank != 8) | 1814 | if (mce->bank != 8) |
1815 | return NOTIFY_DONE; | 1815 | return NOTIFY_DONE; |
1816 | 1816 | ||
1817 | #ifdef CONFIG_SMP | ||
1818 | /* Only handle if it is the right mc controller */ | ||
1819 | if (mce->socketid != pvt->i7core_dev->socket) | ||
1820 | return NOTIFY_DONE; | ||
1821 | #endif | ||
1822 | |||
1823 | smp_rmb(); | 1817 | smp_rmb(); |
1824 | if ((pvt->mce_out + 1) % MCE_LOG_LEN == pvt->mce_in) { | 1818 | if ((pvt->mce_out + 1) % MCE_LOG_LEN == pvt->mce_in) { |
1825 | smp_wmb(); | 1819 | smp_wmb(); |
@@ -2116,8 +2110,6 @@ static void i7core_unregister_mci(struct i7core_dev *i7core_dev) | |||
2116 | if (pvt->enable_scrub) | 2110 | if (pvt->enable_scrub) |
2117 | disable_sdram_scrub_setting(mci); | 2111 | disable_sdram_scrub_setting(mci); |
2118 | 2112 | ||
2119 | mce_unregister_decode_chain(&i7_mce_dec); | ||
2120 | |||
2121 | /* Disable EDAC polling */ | 2113 | /* Disable EDAC polling */ |
2122 | i7core_pci_ctl_release(pvt); | 2114 | i7core_pci_ctl_release(pvt); |
2123 | 2115 | ||
@@ -2222,8 +2214,6 @@ static int i7core_register_mci(struct i7core_dev *i7core_dev) | |||
2222 | /* DCLK for scrub rate setting */ | 2214 | /* DCLK for scrub rate setting */ |
2223 | pvt->dclk_freq = get_dclk_freq(); | 2215 | pvt->dclk_freq = get_dclk_freq(); |
2224 | 2216 | ||
2225 | mce_register_decode_chain(&i7_mce_dec); | ||
2226 | |||
2227 | return 0; | 2217 | return 0; |
2228 | 2218 | ||
2229 | fail0: | 2219 | fail0: |
@@ -2367,8 +2357,10 @@ static int __init i7core_init(void) | |||
2367 | 2357 | ||
2368 | pci_rc = pci_register_driver(&i7core_driver); | 2358 | pci_rc = pci_register_driver(&i7core_driver); |
2369 | 2359 | ||
2370 | if (pci_rc >= 0) | 2360 | if (pci_rc >= 0) { |
2361 | mce_register_decode_chain(&i7_mce_dec); | ||
2371 | return 0; | 2362 | return 0; |
2363 | } | ||
2372 | 2364 | ||
2373 | i7core_printk(KERN_ERR, "Failed to register device with error %d.\n", | 2365 | i7core_printk(KERN_ERR, "Failed to register device with error %d.\n", |
2374 | pci_rc); | 2366 | pci_rc); |
@@ -2384,6 +2376,7 @@ static void __exit i7core_exit(void) | |||
2384 | { | 2376 | { |
2385 | debugf2("MC: " __FILE__ ": %s()\n", __func__); | 2377 | debugf2("MC: " __FILE__ ": %s()\n", __func__); |
2386 | pci_unregister_driver(&i7core_driver); | 2378 | pci_unregister_driver(&i7core_driver); |
2379 | mce_unregister_decode_chain(&i7_mce_dec); | ||
2387 | } | 2380 | } |
2388 | 2381 | ||
2389 | module_init(i7core_init); | 2382 | module_init(i7core_init); |
diff --git a/drivers/edac/mpc85xx_edac.c b/drivers/edac/mpc85xx_edac.c index 4c402353ba98..0e374625f6f8 100644 --- a/drivers/edac/mpc85xx_edac.c +++ b/drivers/edac/mpc85xx_edac.c | |||
@@ -980,7 +980,8 @@ static int __devinit mpc85xx_mc_err_probe(struct platform_device *op) | |||
980 | layers[1].type = EDAC_MC_LAYER_CHANNEL; | 980 | layers[1].type = EDAC_MC_LAYER_CHANNEL; |
981 | layers[1].size = 1; | 981 | layers[1].size = 1; |
982 | layers[1].is_virt_csrow = false; | 982 | layers[1].is_virt_csrow = false; |
983 | mci = edac_mc_alloc(edac_mc_idx, ARRAY_SIZE(layers), sizeof(*pdata)); | 983 | mci = edac_mc_alloc(edac_mc_idx, ARRAY_SIZE(layers), layers, |
984 | sizeof(*pdata)); | ||
984 | if (!mci) { | 985 | if (!mci) { |
985 | devres_release_group(&op->dev, mpc85xx_mc_err_probe); | 986 | devres_release_group(&op->dev, mpc85xx_mc_err_probe); |
986 | return -ENOMEM; | 987 | return -ENOMEM; |
diff --git a/drivers/edac/sb_edac.c b/drivers/edac/sb_edac.c index 4adaf4b7da99..36ad17e79d61 100644 --- a/drivers/edac/sb_edac.c +++ b/drivers/edac/sb_edac.c | |||
@@ -555,7 +555,7 @@ static int get_dimm_config(struct mem_ctl_info *mci) | |||
555 | pvt->is_close_pg = false; | 555 | pvt->is_close_pg = false; |
556 | } | 556 | } |
557 | 557 | ||
558 | pci_read_config_dword(pvt->pci_ta, RANK_CFG_A, ®); | 558 | pci_read_config_dword(pvt->pci_ddrio, RANK_CFG_A, ®); |
559 | if (IS_RDIMM_ENABLED(reg)) { | 559 | if (IS_RDIMM_ENABLED(reg)) { |
560 | /* FIXME: Can also be LRDIMM */ | 560 | /* FIXME: Can also be LRDIMM */ |
561 | debugf0("Memory is registered\n"); | 561 | debugf0("Memory is registered\n"); |
@@ -1604,8 +1604,6 @@ static void sbridge_unregister_mci(struct sbridge_dev *sbridge_dev) | |||
1604 | debugf0("MC: " __FILE__ ": %s(): mci = %p, dev = %p\n", | 1604 | debugf0("MC: " __FILE__ ": %s(): mci = %p, dev = %p\n", |
1605 | __func__, mci, &sbridge_dev->pdev[0]->dev); | 1605 | __func__, mci, &sbridge_dev->pdev[0]->dev); |
1606 | 1606 | ||
1607 | mce_unregister_decode_chain(&sbridge_mce_dec); | ||
1608 | |||
1609 | /* Remove MC sysfs nodes */ | 1607 | /* Remove MC sysfs nodes */ |
1610 | edac_mc_del_mc(mci->dev); | 1608 | edac_mc_del_mc(mci->dev); |
1611 | 1609 | ||
@@ -1682,7 +1680,6 @@ static int sbridge_register_mci(struct sbridge_dev *sbridge_dev) | |||
1682 | goto fail0; | 1680 | goto fail0; |
1683 | } | 1681 | } |
1684 | 1682 | ||
1685 | mce_register_decode_chain(&sbridge_mce_dec); | ||
1686 | return 0; | 1683 | return 0; |
1687 | 1684 | ||
1688 | fail0: | 1685 | fail0: |
@@ -1811,8 +1808,10 @@ static int __init sbridge_init(void) | |||
1811 | 1808 | ||
1812 | pci_rc = pci_register_driver(&sbridge_driver); | 1809 | pci_rc = pci_register_driver(&sbridge_driver); |
1813 | 1810 | ||
1814 | if (pci_rc >= 0) | 1811 | if (pci_rc >= 0) { |
1812 | mce_register_decode_chain(&sbridge_mce_dec); | ||
1815 | return 0; | 1813 | return 0; |
1814 | } | ||
1816 | 1815 | ||
1817 | sbridge_printk(KERN_ERR, "Failed to register device with error %d.\n", | 1816 | sbridge_printk(KERN_ERR, "Failed to register device with error %d.\n", |
1818 | pci_rc); | 1817 | pci_rc); |
@@ -1828,6 +1827,7 @@ static void __exit sbridge_exit(void) | |||
1828 | { | 1827 | { |
1829 | debugf2("MC: " __FILE__ ": %s()\n", __func__); | 1828 | debugf2("MC: " __FILE__ ": %s()\n", __func__); |
1830 | pci_unregister_driver(&sbridge_driver); | 1829 | pci_unregister_driver(&sbridge_driver); |
1830 | mce_unregister_decode_chain(&sbridge_mce_dec); | ||
1831 | } | 1831 | } |
1832 | 1832 | ||
1833 | module_init(sbridge_init); | 1833 | module_init(sbridge_init); |
diff --git a/drivers/extcon/extcon-max8997.c b/drivers/extcon/extcon-max8997.c index 23416e443765..a4ed30bd9a41 100644 --- a/drivers/extcon/extcon-max8997.c +++ b/drivers/extcon/extcon-max8997.c | |||
@@ -116,8 +116,8 @@ const char *max8997_extcon_cable[] = { | |||
116 | [5] = "Charge-downstream", | 116 | [5] = "Charge-downstream", |
117 | [6] = "MHL", | 117 | [6] = "MHL", |
118 | [7] = "Dock-desk", | 118 | [7] = "Dock-desk", |
119 | [7] = "Dock-card", | 119 | [8] = "Dock-card", |
120 | [8] = "JIG", | 120 | [9] = "JIG", |
121 | 121 | ||
122 | NULL, | 122 | NULL, |
123 | }; | 123 | }; |
@@ -514,6 +514,7 @@ static int __devexit max8997_muic_remove(struct platform_device *pdev) | |||
514 | 514 | ||
515 | extcon_dev_unregister(info->edev); | 515 | extcon_dev_unregister(info->edev); |
516 | 516 | ||
517 | kfree(info->edev); | ||
517 | kfree(info); | 518 | kfree(info); |
518 | 519 | ||
519 | return 0; | 520 | return 0; |
diff --git a/drivers/extcon/extcon_class.c b/drivers/extcon/extcon_class.c index f598a700ec15..159aeb07b3ba 100644 --- a/drivers/extcon/extcon_class.c +++ b/drivers/extcon/extcon_class.c | |||
@@ -762,7 +762,7 @@ int extcon_dev_register(struct extcon_dev *edev, struct device *dev) | |||
762 | #if defined(CONFIG_ANDROID) | 762 | #if defined(CONFIG_ANDROID) |
763 | if (switch_class) | 763 | if (switch_class) |
764 | ret = class_compat_create_link(switch_class, edev->dev, | 764 | ret = class_compat_create_link(switch_class, edev->dev, |
765 | dev); | 765 | NULL); |
766 | #endif /* CONFIG_ANDROID */ | 766 | #endif /* CONFIG_ANDROID */ |
767 | 767 | ||
768 | spin_lock_init(&edev->lock); | 768 | spin_lock_init(&edev->lock); |
diff --git a/drivers/extcon/extcon_gpio.c b/drivers/extcon/extcon_gpio.c index fe7a07b47336..8a0dcc11c7c7 100644 --- a/drivers/extcon/extcon_gpio.c +++ b/drivers/extcon/extcon_gpio.c | |||
@@ -125,6 +125,7 @@ static int __devinit gpio_extcon_probe(struct platform_device *pdev) | |||
125 | if (ret < 0) | 125 | if (ret < 0) |
126 | goto err_request_irq; | 126 | goto err_request_irq; |
127 | 127 | ||
128 | platform_set_drvdata(pdev, extcon_data); | ||
128 | /* Perform initial detection */ | 129 | /* Perform initial detection */ |
129 | gpio_extcon_work(&extcon_data->work.work); | 130 | gpio_extcon_work(&extcon_data->work.work); |
130 | 131 | ||
@@ -146,6 +147,7 @@ static int __devexit gpio_extcon_remove(struct platform_device *pdev) | |||
146 | struct gpio_extcon_data *extcon_data = platform_get_drvdata(pdev); | 147 | struct gpio_extcon_data *extcon_data = platform_get_drvdata(pdev); |
147 | 148 | ||
148 | cancel_delayed_work_sync(&extcon_data->work); | 149 | cancel_delayed_work_sync(&extcon_data->work); |
150 | free_irq(extcon_data->irq, extcon_data); | ||
149 | gpio_free(extcon_data->gpio); | 151 | gpio_free(extcon_data->gpio); |
150 | extcon_dev_unregister(&extcon_data->edev); | 152 | extcon_dev_unregister(&extcon_data->edev); |
151 | devm_kfree(&pdev->dev, extcon_data); | 153 | devm_kfree(&pdev->dev, extcon_data); |
diff --git a/drivers/gpio/gpio-samsung.c b/drivers/gpio/gpio-samsung.c index 7bb00448e13d..b6453d0e44ad 100644 --- a/drivers/gpio/gpio-samsung.c +++ b/drivers/gpio/gpio-samsung.c | |||
@@ -2833,7 +2833,7 @@ static __init void exynos5_gpiolib_init(void) | |||
2833 | } | 2833 | } |
2834 | 2834 | ||
2835 | /* need to set base address for gpc4 */ | 2835 | /* need to set base address for gpc4 */ |
2836 | exonys5_gpios_1[11].base = gpio_base1 + 0x2E0; | 2836 | exynos5_gpios_1[11].base = gpio_base1 + 0x2E0; |
2837 | 2837 | ||
2838 | /* need to set base address for gpx */ | 2838 | /* need to set base address for gpx */ |
2839 | chip = &exynos5_gpios_1[21]; | 2839 | chip = &exynos5_gpios_1[21]; |
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c index eb92fe257a39..5873e481e5d2 100644 --- a/drivers/gpu/drm/drm_edid.c +++ b/drivers/gpu/drm/drm_edid.c | |||
@@ -610,7 +610,7 @@ static bool | |||
610 | drm_monitor_supports_rb(struct edid *edid) | 610 | drm_monitor_supports_rb(struct edid *edid) |
611 | { | 611 | { |
612 | if (edid->revision >= 4) { | 612 | if (edid->revision >= 4) { |
613 | bool ret; | 613 | bool ret = false; |
614 | drm_for_each_detailed_block((u8 *)edid, is_rb, &ret); | 614 | drm_for_each_detailed_block((u8 *)edid, is_rb, &ret); |
615 | return ret; | 615 | return ret; |
616 | } | 616 | } |
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c index 420953197d0a..d6de2e07fa03 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_drv.c +++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c | |||
@@ -244,8 +244,8 @@ static const struct file_operations exynos_drm_driver_fops = { | |||
244 | }; | 244 | }; |
245 | 245 | ||
246 | static struct drm_driver exynos_drm_driver = { | 246 | static struct drm_driver exynos_drm_driver = { |
247 | .driver_features = DRIVER_HAVE_IRQ | DRIVER_BUS_PLATFORM | | 247 | .driver_features = DRIVER_HAVE_IRQ | DRIVER_MODESET | |
248 | DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME, | 248 | DRIVER_GEM | DRIVER_PRIME, |
249 | .load = exynos_drm_load, | 249 | .load = exynos_drm_load, |
250 | .unload = exynos_drm_unload, | 250 | .unload = exynos_drm_unload, |
251 | .open = exynos_drm_open, | 251 | .open = exynos_drm_open, |
diff --git a/drivers/gpu/drm/exynos/exynos_drm_encoder.c b/drivers/gpu/drm/exynos/exynos_drm_encoder.c index 6e9ac7bd1dcf..23d5ad379f86 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_encoder.c +++ b/drivers/gpu/drm/exynos/exynos_drm_encoder.c | |||
@@ -172,19 +172,12 @@ static void exynos_drm_encoder_commit(struct drm_encoder *encoder) | |||
172 | manager_ops->commit(manager->dev); | 172 | manager_ops->commit(manager->dev); |
173 | } | 173 | } |
174 | 174 | ||
175 | static struct drm_crtc * | ||
176 | exynos_drm_encoder_get_crtc(struct drm_encoder *encoder) | ||
177 | { | ||
178 | return encoder->crtc; | ||
179 | } | ||
180 | |||
181 | static struct drm_encoder_helper_funcs exynos_encoder_helper_funcs = { | 175 | static struct drm_encoder_helper_funcs exynos_encoder_helper_funcs = { |
182 | .dpms = exynos_drm_encoder_dpms, | 176 | .dpms = exynos_drm_encoder_dpms, |
183 | .mode_fixup = exynos_drm_encoder_mode_fixup, | 177 | .mode_fixup = exynos_drm_encoder_mode_fixup, |
184 | .mode_set = exynos_drm_encoder_mode_set, | 178 | .mode_set = exynos_drm_encoder_mode_set, |
185 | .prepare = exynos_drm_encoder_prepare, | 179 | .prepare = exynos_drm_encoder_prepare, |
186 | .commit = exynos_drm_encoder_commit, | 180 | .commit = exynos_drm_encoder_commit, |
187 | .get_crtc = exynos_drm_encoder_get_crtc, | ||
188 | }; | 181 | }; |
189 | 182 | ||
190 | static void exynos_drm_encoder_destroy(struct drm_encoder *encoder) | 183 | static void exynos_drm_encoder_destroy(struct drm_encoder *encoder) |
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fb.c b/drivers/gpu/drm/exynos/exynos_drm_fb.c index f82a299553fb..4ccfe4328fab 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_fb.c +++ b/drivers/gpu/drm/exynos/exynos_drm_fb.c | |||
@@ -51,11 +51,22 @@ struct exynos_drm_fb { | |||
51 | static void exynos_drm_fb_destroy(struct drm_framebuffer *fb) | 51 | static void exynos_drm_fb_destroy(struct drm_framebuffer *fb) |
52 | { | 52 | { |
53 | struct exynos_drm_fb *exynos_fb = to_exynos_fb(fb); | 53 | struct exynos_drm_fb *exynos_fb = to_exynos_fb(fb); |
54 | unsigned int i; | ||
54 | 55 | ||
55 | DRM_DEBUG_KMS("%s\n", __FILE__); | 56 | DRM_DEBUG_KMS("%s\n", __FILE__); |
56 | 57 | ||
57 | drm_framebuffer_cleanup(fb); | 58 | drm_framebuffer_cleanup(fb); |
58 | 59 | ||
60 | for (i = 0; i < ARRAY_SIZE(exynos_fb->exynos_gem_obj); i++) { | ||
61 | struct drm_gem_object *obj; | ||
62 | |||
63 | if (exynos_fb->exynos_gem_obj[i] == NULL) | ||
64 | continue; | ||
65 | |||
66 | obj = &exynos_fb->exynos_gem_obj[i]->base; | ||
67 | drm_gem_object_unreference_unlocked(obj); | ||
68 | } | ||
69 | |||
59 | kfree(exynos_fb); | 70 | kfree(exynos_fb); |
60 | exynos_fb = NULL; | 71 | exynos_fb = NULL; |
61 | } | 72 | } |
@@ -134,11 +145,11 @@ exynos_user_fb_create(struct drm_device *dev, struct drm_file *file_priv, | |||
134 | return ERR_PTR(-ENOENT); | 145 | return ERR_PTR(-ENOENT); |
135 | } | 146 | } |
136 | 147 | ||
137 | drm_gem_object_unreference_unlocked(obj); | ||
138 | |||
139 | fb = exynos_drm_framebuffer_init(dev, mode_cmd, obj); | 148 | fb = exynos_drm_framebuffer_init(dev, mode_cmd, obj); |
140 | if (IS_ERR(fb)) | 149 | if (IS_ERR(fb)) { |
150 | drm_gem_object_unreference_unlocked(obj); | ||
141 | return fb; | 151 | return fb; |
152 | } | ||
142 | 153 | ||
143 | exynos_fb = to_exynos_fb(fb); | 154 | exynos_fb = to_exynos_fb(fb); |
144 | nr = exynos_drm_format_num_buffers(fb->pixel_format); | 155 | nr = exynos_drm_format_num_buffers(fb->pixel_format); |
@@ -152,8 +163,6 @@ exynos_user_fb_create(struct drm_device *dev, struct drm_file *file_priv, | |||
152 | return ERR_PTR(-ENOENT); | 163 | return ERR_PTR(-ENOENT); |
153 | } | 164 | } |
154 | 165 | ||
155 | drm_gem_object_unreference_unlocked(obj); | ||
156 | |||
157 | exynos_fb->exynos_gem_obj[i] = to_exynos_gem_obj(obj); | 166 | exynos_fb->exynos_gem_obj[i] = to_exynos_gem_obj(obj); |
158 | } | 167 | } |
159 | 168 | ||
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fb.h b/drivers/gpu/drm/exynos/exynos_drm_fb.h index 3ecb30d93552..50823756cdea 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_fb.h +++ b/drivers/gpu/drm/exynos/exynos_drm_fb.h | |||
@@ -31,10 +31,10 @@ | |||
31 | static inline int exynos_drm_format_num_buffers(uint32_t format) | 31 | static inline int exynos_drm_format_num_buffers(uint32_t format) |
32 | { | 32 | { |
33 | switch (format) { | 33 | switch (format) { |
34 | case DRM_FORMAT_NV12M: | 34 | case DRM_FORMAT_NV12: |
35 | case DRM_FORMAT_NV12MT: | 35 | case DRM_FORMAT_NV12MT: |
36 | return 2; | 36 | return 2; |
37 | case DRM_FORMAT_YUV420M: | 37 | case DRM_FORMAT_YUV420: |
38 | return 3; | 38 | return 3; |
39 | default: | 39 | default: |
40 | return 1; | 40 | return 1; |
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c index fc91293c4560..5c8b683029ea 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_gem.c +++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c | |||
@@ -689,7 +689,6 @@ int exynos_drm_gem_dumb_map_offset(struct drm_file *file_priv, | |||
689 | struct drm_device *dev, uint32_t handle, | 689 | struct drm_device *dev, uint32_t handle, |
690 | uint64_t *offset) | 690 | uint64_t *offset) |
691 | { | 691 | { |
692 | struct exynos_drm_gem_obj *exynos_gem_obj; | ||
693 | struct drm_gem_object *obj; | 692 | struct drm_gem_object *obj; |
694 | int ret = 0; | 693 | int ret = 0; |
695 | 694 | ||
@@ -710,15 +709,13 @@ int exynos_drm_gem_dumb_map_offset(struct drm_file *file_priv, | |||
710 | goto unlock; | 709 | goto unlock; |
711 | } | 710 | } |
712 | 711 | ||
713 | exynos_gem_obj = to_exynos_gem_obj(obj); | 712 | if (!obj->map_list.map) { |
714 | 713 | ret = drm_gem_create_mmap_offset(obj); | |
715 | if (!exynos_gem_obj->base.map_list.map) { | ||
716 | ret = drm_gem_create_mmap_offset(&exynos_gem_obj->base); | ||
717 | if (ret) | 714 | if (ret) |
718 | goto out; | 715 | goto out; |
719 | } | 716 | } |
720 | 717 | ||
721 | *offset = (u64)exynos_gem_obj->base.map_list.hash.key << PAGE_SHIFT; | 718 | *offset = (u64)obj->map_list.hash.key << PAGE_SHIFT; |
722 | DRM_DEBUG_KMS("offset = 0x%lx\n", (unsigned long)*offset); | 719 | DRM_DEBUG_KMS("offset = 0x%lx\n", (unsigned long)*offset); |
723 | 720 | ||
724 | out: | 721 | out: |
diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c index 68ef01028375..e2147a2ddcec 100644 --- a/drivers/gpu/drm/exynos/exynos_mixer.c +++ b/drivers/gpu/drm/exynos/exynos_mixer.c | |||
@@ -365,7 +365,7 @@ static void vp_video_buffer(struct mixer_context *ctx, int win) | |||
365 | switch (win_data->pixel_format) { | 365 | switch (win_data->pixel_format) { |
366 | case DRM_FORMAT_NV12MT: | 366 | case DRM_FORMAT_NV12MT: |
367 | tiled_mode = true; | 367 | tiled_mode = true; |
368 | case DRM_FORMAT_NV12M: | 368 | case DRM_FORMAT_NV12: |
369 | crcb_mode = false; | 369 | crcb_mode = false; |
370 | buf_num = 2; | 370 | buf_num = 2; |
371 | break; | 371 | break; |
@@ -601,18 +601,20 @@ static void mixer_win_reset(struct mixer_context *ctx) | |||
601 | mixer_reg_write(res, MXR_BG_COLOR2, 0x008080); | 601 | mixer_reg_write(res, MXR_BG_COLOR2, 0x008080); |
602 | 602 | ||
603 | /* setting graphical layers */ | 603 | /* setting graphical layers */ |
604 | |||
605 | val = MXR_GRP_CFG_COLOR_KEY_DISABLE; /* no blank key */ | 604 | val = MXR_GRP_CFG_COLOR_KEY_DISABLE; /* no blank key */ |
606 | val |= MXR_GRP_CFG_WIN_BLEND_EN; | 605 | val |= MXR_GRP_CFG_WIN_BLEND_EN; |
606 | val |= MXR_GRP_CFG_BLEND_PRE_MUL; | ||
607 | val |= MXR_GRP_CFG_PIXEL_BLEND_EN; | ||
607 | val |= MXR_GRP_CFG_ALPHA_VAL(0xff); /* non-transparent alpha */ | 608 | val |= MXR_GRP_CFG_ALPHA_VAL(0xff); /* non-transparent alpha */ |
608 | 609 | ||
609 | /* the same configuration for both layers */ | 610 | /* the same configuration for both layers */ |
610 | mixer_reg_write(res, MXR_GRAPHIC_CFG(0), val); | 611 | mixer_reg_write(res, MXR_GRAPHIC_CFG(0), val); |
611 | |||
612 | val |= MXR_GRP_CFG_BLEND_PRE_MUL; | ||
613 | val |= MXR_GRP_CFG_PIXEL_BLEND_EN; | ||
614 | mixer_reg_write(res, MXR_GRAPHIC_CFG(1), val); | 612 | mixer_reg_write(res, MXR_GRAPHIC_CFG(1), val); |
615 | 613 | ||
614 | /* setting video layers */ | ||
615 | val = MXR_GRP_CFG_ALPHA_VAL(0); | ||
616 | mixer_reg_write(res, MXR_VIDEO_CFG, val); | ||
617 | |||
616 | /* configuration of Video Processor Registers */ | 618 | /* configuration of Video Processor Registers */ |
617 | vp_win_reset(ctx); | 619 | vp_win_reset(ctx); |
618 | vp_default_filter(res); | 620 | vp_default_filter(res); |
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 238a52165833..9fe9ebe52a7a 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c | |||
@@ -233,6 +233,7 @@ static const struct intel_device_info intel_sandybridge_d_info = { | |||
233 | .has_blt_ring = 1, | 233 | .has_blt_ring = 1, |
234 | .has_llc = 1, | 234 | .has_llc = 1, |
235 | .has_pch_split = 1, | 235 | .has_pch_split = 1, |
236 | .has_force_wake = 1, | ||
236 | }; | 237 | }; |
237 | 238 | ||
238 | static const struct intel_device_info intel_sandybridge_m_info = { | 239 | static const struct intel_device_info intel_sandybridge_m_info = { |
@@ -243,6 +244,7 @@ static const struct intel_device_info intel_sandybridge_m_info = { | |||
243 | .has_blt_ring = 1, | 244 | .has_blt_ring = 1, |
244 | .has_llc = 1, | 245 | .has_llc = 1, |
245 | .has_pch_split = 1, | 246 | .has_pch_split = 1, |
247 | .has_force_wake = 1, | ||
246 | }; | 248 | }; |
247 | 249 | ||
248 | static const struct intel_device_info intel_ivybridge_d_info = { | 250 | static const struct intel_device_info intel_ivybridge_d_info = { |
@@ -252,6 +254,7 @@ static const struct intel_device_info intel_ivybridge_d_info = { | |||
252 | .has_blt_ring = 1, | 254 | .has_blt_ring = 1, |
253 | .has_llc = 1, | 255 | .has_llc = 1, |
254 | .has_pch_split = 1, | 256 | .has_pch_split = 1, |
257 | .has_force_wake = 1, | ||
255 | }; | 258 | }; |
256 | 259 | ||
257 | static const struct intel_device_info intel_ivybridge_m_info = { | 260 | static const struct intel_device_info intel_ivybridge_m_info = { |
@@ -262,6 +265,7 @@ static const struct intel_device_info intel_ivybridge_m_info = { | |||
262 | .has_blt_ring = 1, | 265 | .has_blt_ring = 1, |
263 | .has_llc = 1, | 266 | .has_llc = 1, |
264 | .has_pch_split = 1, | 267 | .has_pch_split = 1, |
268 | .has_force_wake = 1, | ||
265 | }; | 269 | }; |
266 | 270 | ||
267 | static const struct intel_device_info intel_valleyview_m_info = { | 271 | static const struct intel_device_info intel_valleyview_m_info = { |
@@ -289,6 +293,7 @@ static const struct intel_device_info intel_haswell_d_info = { | |||
289 | .has_blt_ring = 1, | 293 | .has_blt_ring = 1, |
290 | .has_llc = 1, | 294 | .has_llc = 1, |
291 | .has_pch_split = 1, | 295 | .has_pch_split = 1, |
296 | .has_force_wake = 1, | ||
292 | }; | 297 | }; |
293 | 298 | ||
294 | static const struct intel_device_info intel_haswell_m_info = { | 299 | static const struct intel_device_info intel_haswell_m_info = { |
@@ -298,6 +303,7 @@ static const struct intel_device_info intel_haswell_m_info = { | |||
298 | .has_blt_ring = 1, | 303 | .has_blt_ring = 1, |
299 | .has_llc = 1, | 304 | .has_llc = 1, |
300 | .has_pch_split = 1, | 305 | .has_pch_split = 1, |
306 | .has_force_wake = 1, | ||
301 | }; | 307 | }; |
302 | 308 | ||
303 | static const struct pci_device_id pciidlist[] = { /* aka */ | 309 | static const struct pci_device_id pciidlist[] = { /* aka */ |
@@ -1139,10 +1145,9 @@ MODULE_LICENSE("GPL and additional rights"); | |||
1139 | 1145 | ||
1140 | /* We give fast paths for the really cool registers */ | 1146 | /* We give fast paths for the really cool registers */ |
1141 | #define NEEDS_FORCE_WAKE(dev_priv, reg) \ | 1147 | #define NEEDS_FORCE_WAKE(dev_priv, reg) \ |
1142 | (((dev_priv)->info->gen >= 6) && \ | 1148 | ((HAS_FORCE_WAKE((dev_priv)->dev)) && \ |
1143 | ((reg) < 0x40000) && \ | 1149 | ((reg) < 0x40000) && \ |
1144 | ((reg) != FORCEWAKE)) && \ | 1150 | ((reg) != FORCEWAKE)) |
1145 | (!IS_VALLEYVIEW((dev_priv)->dev)) | ||
1146 | 1151 | ||
1147 | #define __i915_read(x, y) \ | 1152 | #define __i915_read(x, y) \ |
1148 | u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg) { \ | 1153 | u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg) { \ |
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index c9cfc67c2cf5..b0b676abde0d 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h | |||
@@ -285,6 +285,7 @@ struct intel_device_info { | |||
285 | u8 is_ivybridge:1; | 285 | u8 is_ivybridge:1; |
286 | u8 is_valleyview:1; | 286 | u8 is_valleyview:1; |
287 | u8 has_pch_split:1; | 287 | u8 has_pch_split:1; |
288 | u8 has_force_wake:1; | ||
288 | u8 is_haswell:1; | 289 | u8 is_haswell:1; |
289 | u8 has_fbc:1; | 290 | u8 has_fbc:1; |
290 | u8 has_pipe_cxsr:1; | 291 | u8 has_pipe_cxsr:1; |
@@ -1101,6 +1102,8 @@ struct drm_i915_file_private { | |||
1101 | #define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT) | 1102 | #define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT) |
1102 | #define HAS_PCH_IBX(dev) (INTEL_PCH_TYPE(dev) == PCH_IBX) | 1103 | #define HAS_PCH_IBX(dev) (INTEL_PCH_TYPE(dev) == PCH_IBX) |
1103 | 1104 | ||
1105 | #define HAS_FORCE_WAKE(dev) (INTEL_INFO(dev)->has_force_wake) | ||
1106 | |||
1104 | #include "i915_trace.h" | 1107 | #include "i915_trace.h" |
1105 | 1108 | ||
1106 | /** | 1109 | /** |
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 1417660a93ec..b1fe0edda955 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c | |||
@@ -510,7 +510,7 @@ out: | |||
510 | return ret; | 510 | return ret; |
511 | } | 511 | } |
512 | 512 | ||
513 | static void pch_irq_handler(struct drm_device *dev, u32 pch_iir) | 513 | static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir) |
514 | { | 514 | { |
515 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | 515 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
516 | int pipe; | 516 | int pipe; |
@@ -550,6 +550,35 @@ static void pch_irq_handler(struct drm_device *dev, u32 pch_iir) | |||
550 | DRM_DEBUG_DRIVER("PCH transcoder A underrun interrupt\n"); | 550 | DRM_DEBUG_DRIVER("PCH transcoder A underrun interrupt\n"); |
551 | } | 551 | } |
552 | 552 | ||
553 | static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir) | ||
554 | { | ||
555 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | ||
556 | int pipe; | ||
557 | |||
558 | if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) | ||
559 | DRM_DEBUG_DRIVER("PCH audio power change on port %d\n", | ||
560 | (pch_iir & SDE_AUDIO_POWER_MASK_CPT) >> | ||
561 | SDE_AUDIO_POWER_SHIFT_CPT); | ||
562 | |||
563 | if (pch_iir & SDE_AUX_MASK_CPT) | ||
564 | DRM_DEBUG_DRIVER("AUX channel interrupt\n"); | ||
565 | |||
566 | if (pch_iir & SDE_GMBUS_CPT) | ||
567 | DRM_DEBUG_DRIVER("PCH GMBUS interrupt\n"); | ||
568 | |||
569 | if (pch_iir & SDE_AUDIO_CP_REQ_CPT) | ||
570 | DRM_DEBUG_DRIVER("Audio CP request interrupt\n"); | ||
571 | |||
572 | if (pch_iir & SDE_AUDIO_CP_CHG_CPT) | ||
573 | DRM_DEBUG_DRIVER("Audio CP change interrupt\n"); | ||
574 | |||
575 | if (pch_iir & SDE_FDI_MASK_CPT) | ||
576 | for_each_pipe(pipe) | ||
577 | DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n", | ||
578 | pipe_name(pipe), | ||
579 | I915_READ(FDI_RX_IIR(pipe))); | ||
580 | } | ||
581 | |||
553 | static irqreturn_t ivybridge_irq_handler(DRM_IRQ_ARGS) | 582 | static irqreturn_t ivybridge_irq_handler(DRM_IRQ_ARGS) |
554 | { | 583 | { |
555 | struct drm_device *dev = (struct drm_device *) arg; | 584 | struct drm_device *dev = (struct drm_device *) arg; |
@@ -591,7 +620,7 @@ static irqreturn_t ivybridge_irq_handler(DRM_IRQ_ARGS) | |||
591 | 620 | ||
592 | if (pch_iir & SDE_HOTPLUG_MASK_CPT) | 621 | if (pch_iir & SDE_HOTPLUG_MASK_CPT) |
593 | queue_work(dev_priv->wq, &dev_priv->hotplug_work); | 622 | queue_work(dev_priv->wq, &dev_priv->hotplug_work); |
594 | pch_irq_handler(dev, pch_iir); | 623 | cpt_irq_handler(dev, pch_iir); |
595 | 624 | ||
596 | /* clear PCH hotplug event before clear CPU irq */ | 625 | /* clear PCH hotplug event before clear CPU irq */ |
597 | I915_WRITE(SDEIIR, pch_iir); | 626 | I915_WRITE(SDEIIR, pch_iir); |
@@ -684,7 +713,10 @@ static irqreturn_t ironlake_irq_handler(DRM_IRQ_ARGS) | |||
684 | if (de_iir & DE_PCH_EVENT) { | 713 | if (de_iir & DE_PCH_EVENT) { |
685 | if (pch_iir & hotplug_mask) | 714 | if (pch_iir & hotplug_mask) |
686 | queue_work(dev_priv->wq, &dev_priv->hotplug_work); | 715 | queue_work(dev_priv->wq, &dev_priv->hotplug_work); |
687 | pch_irq_handler(dev, pch_iir); | 716 | if (HAS_PCH_CPT(dev)) |
717 | cpt_irq_handler(dev, pch_iir); | ||
718 | else | ||
719 | ibx_irq_handler(dev, pch_iir); | ||
688 | } | 720 | } |
689 | 721 | ||
690 | if (de_iir & DE_PCU_EVENT) { | 722 | if (de_iir & DE_PCU_EVENT) { |
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 2d49b9507ed0..48d5e8e051cf 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h | |||
@@ -210,6 +210,14 @@ | |||
210 | #define MI_DISPLAY_FLIP MI_INSTR(0x14, 2) | 210 | #define MI_DISPLAY_FLIP MI_INSTR(0x14, 2) |
211 | #define MI_DISPLAY_FLIP_I915 MI_INSTR(0x14, 1) | 211 | #define MI_DISPLAY_FLIP_I915 MI_INSTR(0x14, 1) |
212 | #define MI_DISPLAY_FLIP_PLANE(n) ((n) << 20) | 212 | #define MI_DISPLAY_FLIP_PLANE(n) ((n) << 20) |
213 | /* IVB has funny definitions for which plane to flip. */ | ||
214 | #define MI_DISPLAY_FLIP_IVB_PLANE_A (0 << 19) | ||
215 | #define MI_DISPLAY_FLIP_IVB_PLANE_B (1 << 19) | ||
216 | #define MI_DISPLAY_FLIP_IVB_SPRITE_A (2 << 19) | ||
217 | #define MI_DISPLAY_FLIP_IVB_SPRITE_B (3 << 19) | ||
218 | #define MI_DISPLAY_FLIP_IVB_PLANE_C (4 << 19) | ||
219 | #define MI_DISPLAY_FLIP_IVB_SPRITE_C (5 << 19) | ||
220 | |||
213 | #define MI_SET_CONTEXT MI_INSTR(0x18, 0) | 221 | #define MI_SET_CONTEXT MI_INSTR(0x18, 0) |
214 | #define MI_MM_SPACE_GTT (1<<8) | 222 | #define MI_MM_SPACE_GTT (1<<8) |
215 | #define MI_MM_SPACE_PHYSICAL (0<<8) | 223 | #define MI_MM_SPACE_PHYSICAL (0<<8) |
@@ -3313,7 +3321,7 @@ | |||
3313 | 3321 | ||
3314 | /* PCH */ | 3322 | /* PCH */ |
3315 | 3323 | ||
3316 | /* south display engine interrupt */ | 3324 | /* south display engine interrupt: IBX */ |
3317 | #define SDE_AUDIO_POWER_D (1 << 27) | 3325 | #define SDE_AUDIO_POWER_D (1 << 27) |
3318 | #define SDE_AUDIO_POWER_C (1 << 26) | 3326 | #define SDE_AUDIO_POWER_C (1 << 26) |
3319 | #define SDE_AUDIO_POWER_B (1 << 25) | 3327 | #define SDE_AUDIO_POWER_B (1 << 25) |
@@ -3349,15 +3357,44 @@ | |||
3349 | #define SDE_TRANSA_CRC_ERR (1 << 1) | 3357 | #define SDE_TRANSA_CRC_ERR (1 << 1) |
3350 | #define SDE_TRANSA_FIFO_UNDER (1 << 0) | 3358 | #define SDE_TRANSA_FIFO_UNDER (1 << 0) |
3351 | #define SDE_TRANS_MASK (0x3f) | 3359 | #define SDE_TRANS_MASK (0x3f) |
3352 | /* CPT */ | 3360 | |
3353 | #define SDE_CRT_HOTPLUG_CPT (1 << 19) | 3361 | /* south display engine interrupt: CPT/PPT */ |
3362 | #define SDE_AUDIO_POWER_D_CPT (1 << 31) | ||
3363 | #define SDE_AUDIO_POWER_C_CPT (1 << 30) | ||
3364 | #define SDE_AUDIO_POWER_B_CPT (1 << 29) | ||
3365 | #define SDE_AUDIO_POWER_SHIFT_CPT 29 | ||
3366 | #define SDE_AUDIO_POWER_MASK_CPT (7 << 29) | ||
3367 | #define SDE_AUXD_CPT (1 << 27) | ||
3368 | #define SDE_AUXC_CPT (1 << 26) | ||
3369 | #define SDE_AUXB_CPT (1 << 25) | ||
3370 | #define SDE_AUX_MASK_CPT (7 << 25) | ||
3354 | #define SDE_PORTD_HOTPLUG_CPT (1 << 23) | 3371 | #define SDE_PORTD_HOTPLUG_CPT (1 << 23) |
3355 | #define SDE_PORTC_HOTPLUG_CPT (1 << 22) | 3372 | #define SDE_PORTC_HOTPLUG_CPT (1 << 22) |
3356 | #define SDE_PORTB_HOTPLUG_CPT (1 << 21) | 3373 | #define SDE_PORTB_HOTPLUG_CPT (1 << 21) |
3374 | #define SDE_CRT_HOTPLUG_CPT (1 << 19) | ||
3357 | #define SDE_HOTPLUG_MASK_CPT (SDE_CRT_HOTPLUG_CPT | \ | 3375 | #define SDE_HOTPLUG_MASK_CPT (SDE_CRT_HOTPLUG_CPT | \ |
3358 | SDE_PORTD_HOTPLUG_CPT | \ | 3376 | SDE_PORTD_HOTPLUG_CPT | \ |
3359 | SDE_PORTC_HOTPLUG_CPT | \ | 3377 | SDE_PORTC_HOTPLUG_CPT | \ |
3360 | SDE_PORTB_HOTPLUG_CPT) | 3378 | SDE_PORTB_HOTPLUG_CPT) |
3379 | #define SDE_GMBUS_CPT (1 << 17) | ||
3380 | #define SDE_AUDIO_CP_REQ_C_CPT (1 << 10) | ||
3381 | #define SDE_AUDIO_CP_CHG_C_CPT (1 << 9) | ||
3382 | #define SDE_FDI_RXC_CPT (1 << 8) | ||
3383 | #define SDE_AUDIO_CP_REQ_B_CPT (1 << 6) | ||
3384 | #define SDE_AUDIO_CP_CHG_B_CPT (1 << 5) | ||
3385 | #define SDE_FDI_RXB_CPT (1 << 4) | ||
3386 | #define SDE_AUDIO_CP_REQ_A_CPT (1 << 2) | ||
3387 | #define SDE_AUDIO_CP_CHG_A_CPT (1 << 1) | ||
3388 | #define SDE_FDI_RXA_CPT (1 << 0) | ||
3389 | #define SDE_AUDIO_CP_REQ_CPT (SDE_AUDIO_CP_REQ_C_CPT | \ | ||
3390 | SDE_AUDIO_CP_REQ_B_CPT | \ | ||
3391 | SDE_AUDIO_CP_REQ_A_CPT) | ||
3392 | #define SDE_AUDIO_CP_CHG_CPT (SDE_AUDIO_CP_CHG_C_CPT | \ | ||
3393 | SDE_AUDIO_CP_CHG_B_CPT | \ | ||
3394 | SDE_AUDIO_CP_CHG_A_CPT) | ||
3395 | #define SDE_FDI_MASK_CPT (SDE_FDI_RXC_CPT | \ | ||
3396 | SDE_FDI_RXB_CPT | \ | ||
3397 | SDE_FDI_RXA_CPT) | ||
3361 | 3398 | ||
3362 | #define SDEISR 0xc4000 | 3399 | #define SDEISR 0xc4000 |
3363 | #define SDEIMR 0xc4004 | 3400 | #define SDEIMR 0xc4004 |
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 914789420906..a7c727d0c105 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c | |||
@@ -6158,17 +6158,34 @@ static int intel_gen7_queue_flip(struct drm_device *dev, | |||
6158 | struct drm_i915_private *dev_priv = dev->dev_private; | 6158 | struct drm_i915_private *dev_priv = dev->dev_private; |
6159 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 6159 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
6160 | struct intel_ring_buffer *ring = &dev_priv->ring[BCS]; | 6160 | struct intel_ring_buffer *ring = &dev_priv->ring[BCS]; |
6161 | uint32_t plane_bit = 0; | ||
6161 | int ret; | 6162 | int ret; |
6162 | 6163 | ||
6163 | ret = intel_pin_and_fence_fb_obj(dev, obj, ring); | 6164 | ret = intel_pin_and_fence_fb_obj(dev, obj, ring); |
6164 | if (ret) | 6165 | if (ret) |
6165 | goto err; | 6166 | goto err; |
6166 | 6167 | ||
6168 | switch(intel_crtc->plane) { | ||
6169 | case PLANE_A: | ||
6170 | plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_A; | ||
6171 | break; | ||
6172 | case PLANE_B: | ||
6173 | plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_B; | ||
6174 | break; | ||
6175 | case PLANE_C: | ||
6176 | plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_C; | ||
6177 | break; | ||
6178 | default: | ||
6179 | WARN_ONCE(1, "unknown plane in flip command\n"); | ||
6180 | ret = -ENODEV; | ||
6181 | goto err; | ||
6182 | } | ||
6183 | |||
6167 | ret = intel_ring_begin(ring, 4); | 6184 | ret = intel_ring_begin(ring, 4); |
6168 | if (ret) | 6185 | if (ret) |
6169 | goto err_unpin; | 6186 | goto err_unpin; |
6170 | 6187 | ||
6171 | intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | (intel_crtc->plane << 19)); | 6188 | intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | plane_bit); |
6172 | intel_ring_emit(ring, (fb->pitches[0] | obj->tiling_mode)); | 6189 | intel_ring_emit(ring, (fb->pitches[0] | obj->tiling_mode)); |
6173 | intel_ring_emit(ring, (obj->gtt_offset)); | 6190 | intel_ring_emit(ring, (obj->gtt_offset)); |
6174 | intel_ring_emit(ring, (MI_NOOP)); | 6191 | intel_ring_emit(ring, (MI_NOOP)); |
@@ -6541,7 +6558,7 @@ static void intel_setup_outputs(struct drm_device *dev) | |||
6541 | if (I915_READ(HDMIC) & PORT_DETECTED) | 6558 | if (I915_READ(HDMIC) & PORT_DETECTED) |
6542 | intel_hdmi_init(dev, HDMIC); | 6559 | intel_hdmi_init(dev, HDMIC); |
6543 | 6560 | ||
6544 | if (I915_READ(HDMID) & PORT_DETECTED) | 6561 | if (!dpd_is_edp && I915_READ(HDMID) & PORT_DETECTED) |
6545 | intel_hdmi_init(dev, HDMID); | 6562 | intel_hdmi_init(dev, HDMID); |
6546 | 6563 | ||
6547 | if (I915_READ(PCH_DP_C) & DP_DETECTED) | 6564 | if (I915_READ(PCH_DP_C) & DP_DETECTED) |
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index 296cfc201a81..c0449324143c 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c | |||
@@ -32,6 +32,7 @@ | |||
32 | #include "drm.h" | 32 | #include "drm.h" |
33 | #include "drm_crtc.h" | 33 | #include "drm_crtc.h" |
34 | #include "drm_crtc_helper.h" | 34 | #include "drm_crtc_helper.h" |
35 | #include "drm_edid.h" | ||
35 | #include "intel_drv.h" | 36 | #include "intel_drv.h" |
36 | #include "i915_drm.h" | 37 | #include "i915_drm.h" |
37 | #include "i915_drv.h" | 38 | #include "i915_drv.h" |
@@ -67,6 +68,8 @@ struct intel_dp { | |||
67 | struct drm_display_mode *panel_fixed_mode; /* for eDP */ | 68 | struct drm_display_mode *panel_fixed_mode; /* for eDP */ |
68 | struct delayed_work panel_vdd_work; | 69 | struct delayed_work panel_vdd_work; |
69 | bool want_panel_vdd; | 70 | bool want_panel_vdd; |
71 | struct edid *edid; /* cached EDID for eDP */ | ||
72 | int edid_mode_count; | ||
70 | }; | 73 | }; |
71 | 74 | ||
72 | /** | 75 | /** |
@@ -371,7 +374,7 @@ intel_dp_aux_ch(struct intel_dp *intel_dp, | |||
371 | int recv_bytes; | 374 | int recv_bytes; |
372 | uint32_t status; | 375 | uint32_t status; |
373 | uint32_t aux_clock_divider; | 376 | uint32_t aux_clock_divider; |
374 | int try, precharge = 5; | 377 | int try, precharge; |
375 | 378 | ||
376 | intel_dp_check_edp(intel_dp); | 379 | intel_dp_check_edp(intel_dp); |
377 | /* The clock divider is based off the hrawclk, | 380 | /* The clock divider is based off the hrawclk, |
@@ -391,6 +394,11 @@ intel_dp_aux_ch(struct intel_dp *intel_dp, | |||
391 | else | 394 | else |
392 | aux_clock_divider = intel_hrawclk(dev) / 2; | 395 | aux_clock_divider = intel_hrawclk(dev) / 2; |
393 | 396 | ||
397 | if (IS_GEN6(dev)) | ||
398 | precharge = 3; | ||
399 | else | ||
400 | precharge = 5; | ||
401 | |||
394 | /* Try to wait for any previous AUX channel activity */ | 402 | /* Try to wait for any previous AUX channel activity */ |
395 | for (try = 0; try < 3; try++) { | 403 | for (try = 0; try < 3; try++) { |
396 | status = I915_READ(ch_ctl); | 404 | status = I915_READ(ch_ctl); |
@@ -1973,6 +1981,8 @@ intel_dp_probe_oui(struct intel_dp *intel_dp) | |||
1973 | if (!(intel_dp->dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT)) | 1981 | if (!(intel_dp->dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT)) |
1974 | return; | 1982 | return; |
1975 | 1983 | ||
1984 | ironlake_edp_panel_vdd_on(intel_dp); | ||
1985 | |||
1976 | if (intel_dp_aux_native_read_retry(intel_dp, DP_SINK_OUI, buf, 3)) | 1986 | if (intel_dp_aux_native_read_retry(intel_dp, DP_SINK_OUI, buf, 3)) |
1977 | DRM_DEBUG_KMS("Sink OUI: %02hx%02hx%02hx\n", | 1987 | DRM_DEBUG_KMS("Sink OUI: %02hx%02hx%02hx\n", |
1978 | buf[0], buf[1], buf[2]); | 1988 | buf[0], buf[1], buf[2]); |
@@ -1980,6 +1990,8 @@ intel_dp_probe_oui(struct intel_dp *intel_dp) | |||
1980 | if (intel_dp_aux_native_read_retry(intel_dp, DP_BRANCH_OUI, buf, 3)) | 1990 | if (intel_dp_aux_native_read_retry(intel_dp, DP_BRANCH_OUI, buf, 3)) |
1981 | DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n", | 1991 | DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n", |
1982 | buf[0], buf[1], buf[2]); | 1992 | buf[0], buf[1], buf[2]); |
1993 | |||
1994 | ironlake_edp_panel_vdd_off(intel_dp, false); | ||
1983 | } | 1995 | } |
1984 | 1996 | ||
1985 | static bool | 1997 | static bool |
@@ -2116,10 +2128,22 @@ intel_dp_get_edid(struct drm_connector *connector, struct i2c_adapter *adapter) | |||
2116 | { | 2128 | { |
2117 | struct intel_dp *intel_dp = intel_attached_dp(connector); | 2129 | struct intel_dp *intel_dp = intel_attached_dp(connector); |
2118 | struct edid *edid; | 2130 | struct edid *edid; |
2131 | int size; | ||
2132 | |||
2133 | if (is_edp(intel_dp)) { | ||
2134 | if (!intel_dp->edid) | ||
2135 | return NULL; | ||
2136 | |||
2137 | size = (intel_dp->edid->extensions + 1) * EDID_LENGTH; | ||
2138 | edid = kmalloc(size, GFP_KERNEL); | ||
2139 | if (!edid) | ||
2140 | return NULL; | ||
2141 | |||
2142 | memcpy(edid, intel_dp->edid, size); | ||
2143 | return edid; | ||
2144 | } | ||
2119 | 2145 | ||
2120 | ironlake_edp_panel_vdd_on(intel_dp); | ||
2121 | edid = drm_get_edid(connector, adapter); | 2146 | edid = drm_get_edid(connector, adapter); |
2122 | ironlake_edp_panel_vdd_off(intel_dp, false); | ||
2123 | return edid; | 2147 | return edid; |
2124 | } | 2148 | } |
2125 | 2149 | ||
@@ -2129,9 +2153,17 @@ intel_dp_get_edid_modes(struct drm_connector *connector, struct i2c_adapter *ada | |||
2129 | struct intel_dp *intel_dp = intel_attached_dp(connector); | 2153 | struct intel_dp *intel_dp = intel_attached_dp(connector); |
2130 | int ret; | 2154 | int ret; |
2131 | 2155 | ||
2132 | ironlake_edp_panel_vdd_on(intel_dp); | 2156 | if (is_edp(intel_dp)) { |
2157 | drm_mode_connector_update_edid_property(connector, | ||
2158 | intel_dp->edid); | ||
2159 | ret = drm_add_edid_modes(connector, intel_dp->edid); | ||
2160 | drm_edid_to_eld(connector, | ||
2161 | intel_dp->edid); | ||
2162 | connector->display_info.raw_edid = NULL; | ||
2163 | return intel_dp->edid_mode_count; | ||
2164 | } | ||
2165 | |||
2133 | ret = intel_ddc_get_modes(connector, adapter); | 2166 | ret = intel_ddc_get_modes(connector, adapter); |
2134 | ironlake_edp_panel_vdd_off(intel_dp, false); | ||
2135 | return ret; | 2167 | return ret; |
2136 | } | 2168 | } |
2137 | 2169 | ||
@@ -2321,6 +2353,7 @@ static void intel_dp_encoder_destroy(struct drm_encoder *encoder) | |||
2321 | i2c_del_adapter(&intel_dp->adapter); | 2353 | i2c_del_adapter(&intel_dp->adapter); |
2322 | drm_encoder_cleanup(encoder); | 2354 | drm_encoder_cleanup(encoder); |
2323 | if (is_edp(intel_dp)) { | 2355 | if (is_edp(intel_dp)) { |
2356 | kfree(intel_dp->edid); | ||
2324 | cancel_delayed_work_sync(&intel_dp->panel_vdd_work); | 2357 | cancel_delayed_work_sync(&intel_dp->panel_vdd_work); |
2325 | ironlake_panel_vdd_off_sync(intel_dp); | 2358 | ironlake_panel_vdd_off_sync(intel_dp); |
2326 | } | 2359 | } |
@@ -2504,11 +2537,14 @@ intel_dp_init(struct drm_device *dev, int output_reg) | |||
2504 | break; | 2537 | break; |
2505 | } | 2538 | } |
2506 | 2539 | ||
2540 | intel_dp_i2c_init(intel_dp, intel_connector, name); | ||
2541 | |||
2507 | /* Cache some DPCD data in the eDP case */ | 2542 | /* Cache some DPCD data in the eDP case */ |
2508 | if (is_edp(intel_dp)) { | 2543 | if (is_edp(intel_dp)) { |
2509 | bool ret; | 2544 | bool ret; |
2510 | struct edp_power_seq cur, vbt; | 2545 | struct edp_power_seq cur, vbt; |
2511 | u32 pp_on, pp_off, pp_div; | 2546 | u32 pp_on, pp_off, pp_div; |
2547 | struct edid *edid; | ||
2512 | 2548 | ||
2513 | pp_on = I915_READ(PCH_PP_ON_DELAYS); | 2549 | pp_on = I915_READ(PCH_PP_ON_DELAYS); |
2514 | pp_off = I915_READ(PCH_PP_OFF_DELAYS); | 2550 | pp_off = I915_READ(PCH_PP_OFF_DELAYS); |
@@ -2576,9 +2612,19 @@ intel_dp_init(struct drm_device *dev, int output_reg) | |||
2576 | intel_dp_destroy(&intel_connector->base); | 2612 | intel_dp_destroy(&intel_connector->base); |
2577 | return; | 2613 | return; |
2578 | } | 2614 | } |
2579 | } | ||
2580 | 2615 | ||
2581 | intel_dp_i2c_init(intel_dp, intel_connector, name); | 2616 | ironlake_edp_panel_vdd_on(intel_dp); |
2617 | edid = drm_get_edid(connector, &intel_dp->adapter); | ||
2618 | if (edid) { | ||
2619 | drm_mode_connector_update_edid_property(connector, | ||
2620 | edid); | ||
2621 | intel_dp->edid_mode_count = | ||
2622 | drm_add_edid_modes(connector, edid); | ||
2623 | drm_edid_to_eld(connector, edid); | ||
2624 | intel_dp->edid = edid; | ||
2625 | } | ||
2626 | ironlake_edp_panel_vdd_off(intel_dp, false); | ||
2627 | } | ||
2582 | 2628 | ||
2583 | intel_encoder->hot_plug = intel_dp_hot_plug; | 2629 | intel_encoder->hot_plug = intel_dp_hot_plug; |
2584 | 2630 | ||
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index b59b6d5b7583..e5b84ff89ca5 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c | |||
@@ -266,10 +266,15 @@ u32 intel_ring_get_active_head(struct intel_ring_buffer *ring) | |||
266 | 266 | ||
267 | static int init_ring_common(struct intel_ring_buffer *ring) | 267 | static int init_ring_common(struct intel_ring_buffer *ring) |
268 | { | 268 | { |
269 | drm_i915_private_t *dev_priv = ring->dev->dev_private; | 269 | struct drm_device *dev = ring->dev; |
270 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
270 | struct drm_i915_gem_object *obj = ring->obj; | 271 | struct drm_i915_gem_object *obj = ring->obj; |
272 | int ret = 0; | ||
271 | u32 head; | 273 | u32 head; |
272 | 274 | ||
275 | if (HAS_FORCE_WAKE(dev)) | ||
276 | gen6_gt_force_wake_get(dev_priv); | ||
277 | |||
273 | /* Stop the ring if it's running. */ | 278 | /* Stop the ring if it's running. */ |
274 | I915_WRITE_CTL(ring, 0); | 279 | I915_WRITE_CTL(ring, 0); |
275 | I915_WRITE_HEAD(ring, 0); | 280 | I915_WRITE_HEAD(ring, 0); |
@@ -317,7 +322,8 @@ static int init_ring_common(struct intel_ring_buffer *ring) | |||
317 | I915_READ_HEAD(ring), | 322 | I915_READ_HEAD(ring), |
318 | I915_READ_TAIL(ring), | 323 | I915_READ_TAIL(ring), |
319 | I915_READ_START(ring)); | 324 | I915_READ_START(ring)); |
320 | return -EIO; | 325 | ret = -EIO; |
326 | goto out; | ||
321 | } | 327 | } |
322 | 328 | ||
323 | if (!drm_core_check_feature(ring->dev, DRIVER_MODESET)) | 329 | if (!drm_core_check_feature(ring->dev, DRIVER_MODESET)) |
@@ -326,9 +332,14 @@ static int init_ring_common(struct intel_ring_buffer *ring) | |||
326 | ring->head = I915_READ_HEAD(ring); | 332 | ring->head = I915_READ_HEAD(ring); |
327 | ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR; | 333 | ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR; |
328 | ring->space = ring_space(ring); | 334 | ring->space = ring_space(ring); |
335 | ring->last_retired_head = -1; | ||
329 | } | 336 | } |
330 | 337 | ||
331 | return 0; | 338 | out: |
339 | if (HAS_FORCE_WAKE(dev)) | ||
340 | gen6_gt_force_wake_put(dev_priv); | ||
341 | |||
342 | return ret; | ||
332 | } | 343 | } |
333 | 344 | ||
334 | static int | 345 | static int |
@@ -987,6 +998,10 @@ static int intel_init_ring_buffer(struct drm_device *dev, | |||
987 | if (ret) | 998 | if (ret) |
988 | goto err_unref; | 999 | goto err_unref; |
989 | 1000 | ||
1001 | ret = i915_gem_object_set_to_gtt_domain(obj, true); | ||
1002 | if (ret) | ||
1003 | goto err_unpin; | ||
1004 | |||
990 | ring->virtual_start = ioremap_wc(dev->agp->base + obj->gtt_offset, | 1005 | ring->virtual_start = ioremap_wc(dev->agp->base + obj->gtt_offset, |
991 | ring->size); | 1006 | ring->size); |
992 | if (ring->virtual_start == NULL) { | 1007 | if (ring->virtual_start == NULL) { |
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c index 01d77d1554f4..3904d7964a4b 100644 --- a/drivers/gpu/drm/radeon/atombios_crtc.c +++ b/drivers/gpu/drm/radeon/atombios_crtc.c | |||
@@ -1149,7 +1149,9 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc, | |||
1149 | } | 1149 | } |
1150 | 1150 | ||
1151 | if (tiling_flags & RADEON_TILING_MACRO) { | 1151 | if (tiling_flags & RADEON_TILING_MACRO) { |
1152 | if (rdev->family >= CHIP_CAYMAN) | 1152 | if (rdev->family >= CHIP_TAHITI) |
1153 | tmp = rdev->config.si.tile_config; | ||
1154 | else if (rdev->family >= CHIP_CAYMAN) | ||
1153 | tmp = rdev->config.cayman.tile_config; | 1155 | tmp = rdev->config.cayman.tile_config; |
1154 | else | 1156 | else |
1155 | tmp = rdev->config.evergreen.tile_config; | 1157 | tmp = rdev->config.evergreen.tile_config; |
@@ -1177,6 +1179,12 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc, | |||
1177 | } else if (tiling_flags & RADEON_TILING_MICRO) | 1179 | } else if (tiling_flags & RADEON_TILING_MICRO) |
1178 | fb_format |= EVERGREEN_GRPH_ARRAY_MODE(EVERGREEN_GRPH_ARRAY_1D_TILED_THIN1); | 1180 | fb_format |= EVERGREEN_GRPH_ARRAY_MODE(EVERGREEN_GRPH_ARRAY_1D_TILED_THIN1); |
1179 | 1181 | ||
1182 | if ((rdev->family == CHIP_TAHITI) || | ||
1183 | (rdev->family == CHIP_PITCAIRN)) | ||
1184 | fb_format |= SI_GRPH_PIPE_CONFIG(SI_ADDR_SURF_P8_32x32_8x16); | ||
1185 | else if (rdev->family == CHIP_VERDE) | ||
1186 | fb_format |= SI_GRPH_PIPE_CONFIG(SI_ADDR_SURF_P4_8x16); | ||
1187 | |||
1180 | switch (radeon_crtc->crtc_id) { | 1188 | switch (radeon_crtc->crtc_id) { |
1181 | case 0: | 1189 | case 0: |
1182 | WREG32(AVIVO_D1VGA_CONTROL, 0); | 1190 | WREG32(AVIVO_D1VGA_CONTROL, 0); |
diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c index e7b1ec5ae8c6..486ccdf4aacd 100644 --- a/drivers/gpu/drm/radeon/atombios_encoders.c +++ b/drivers/gpu/drm/radeon/atombios_encoders.c | |||
@@ -1926,7 +1926,9 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder, | |||
1926 | 1926 | ||
1927 | if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI) { | 1927 | if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI) { |
1928 | r600_hdmi_enable(encoder); | 1928 | r600_hdmi_enable(encoder); |
1929 | if (ASIC_IS_DCE4(rdev)) | 1929 | if (ASIC_IS_DCE6(rdev)) |
1930 | ; /* TODO (use pointers instead of if-s?) */ | ||
1931 | else if (ASIC_IS_DCE4(rdev)) | ||
1930 | evergreen_hdmi_setmode(encoder, adjusted_mode); | 1932 | evergreen_hdmi_setmode(encoder, adjusted_mode); |
1931 | else | 1933 | else |
1932 | r600_hdmi_setmode(encoder, adjusted_mode); | 1934 | r600_hdmi_setmode(encoder, adjusted_mode); |
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c index 01550d05e273..7fb3d2e0434c 100644 --- a/drivers/gpu/drm/radeon/evergreen.c +++ b/drivers/gpu/drm/radeon/evergreen.c | |||
@@ -1932,6 +1932,9 @@ static void evergreen_gpu_init(struct radeon_device *rdev) | |||
1932 | smx_dc_ctl0 |= NUMBER_OF_SETS(rdev->config.evergreen.sx_num_of_sets); | 1932 | smx_dc_ctl0 |= NUMBER_OF_SETS(rdev->config.evergreen.sx_num_of_sets); |
1933 | WREG32(SMX_DC_CTL0, smx_dc_ctl0); | 1933 | WREG32(SMX_DC_CTL0, smx_dc_ctl0); |
1934 | 1934 | ||
1935 | if (rdev->family <= CHIP_SUMO2) | ||
1936 | WREG32(SMX_SAR_CTL0, 0x00010000); | ||
1937 | |||
1935 | WREG32(SX_EXPORT_BUFFER_SIZES, (COLOR_BUFFER_SIZE((rdev->config.evergreen.sx_max_export_size / 4) - 1) | | 1938 | WREG32(SX_EXPORT_BUFFER_SIZES, (COLOR_BUFFER_SIZE((rdev->config.evergreen.sx_max_export_size / 4) - 1) | |
1936 | POSITION_BUFFER_SIZE((rdev->config.evergreen.sx_max_export_pos_size / 4) - 1) | | 1939 | POSITION_BUFFER_SIZE((rdev->config.evergreen.sx_max_export_pos_size / 4) - 1) | |
1937 | SMX_BUFFER_SIZE((rdev->config.evergreen.sx_max_export_smx_size / 4) - 1))); | 1940 | SMX_BUFFER_SIZE((rdev->config.evergreen.sx_max_export_smx_size / 4) - 1))); |
diff --git a/drivers/gpu/drm/radeon/evergreen_cs.c b/drivers/gpu/drm/radeon/evergreen_cs.c index 4e7dd2b4843d..c16554122ccd 100644 --- a/drivers/gpu/drm/radeon/evergreen_cs.c +++ b/drivers/gpu/drm/radeon/evergreen_cs.c | |||
@@ -52,6 +52,7 @@ struct evergreen_cs_track { | |||
52 | u32 cb_color_view[12]; | 52 | u32 cb_color_view[12]; |
53 | u32 cb_color_pitch[12]; | 53 | u32 cb_color_pitch[12]; |
54 | u32 cb_color_slice[12]; | 54 | u32 cb_color_slice[12]; |
55 | u32 cb_color_slice_idx[12]; | ||
55 | u32 cb_color_attrib[12]; | 56 | u32 cb_color_attrib[12]; |
56 | u32 cb_color_cmask_slice[8];/* unused */ | 57 | u32 cb_color_cmask_slice[8];/* unused */ |
57 | u32 cb_color_fmask_slice[8];/* unused */ | 58 | u32 cb_color_fmask_slice[8];/* unused */ |
@@ -127,12 +128,14 @@ static void evergreen_cs_track_init(struct evergreen_cs_track *track) | |||
127 | track->cb_color_info[i] = 0; | 128 | track->cb_color_info[i] = 0; |
128 | track->cb_color_view[i] = 0xFFFFFFFF; | 129 | track->cb_color_view[i] = 0xFFFFFFFF; |
129 | track->cb_color_pitch[i] = 0; | 130 | track->cb_color_pitch[i] = 0; |
130 | track->cb_color_slice[i] = 0; | 131 | track->cb_color_slice[i] = 0xfffffff; |
132 | track->cb_color_slice_idx[i] = 0; | ||
131 | } | 133 | } |
132 | track->cb_target_mask = 0xFFFFFFFF; | 134 | track->cb_target_mask = 0xFFFFFFFF; |
133 | track->cb_shader_mask = 0xFFFFFFFF; | 135 | track->cb_shader_mask = 0xFFFFFFFF; |
134 | track->cb_dirty = true; | 136 | track->cb_dirty = true; |
135 | 137 | ||
138 | track->db_depth_slice = 0xffffffff; | ||
136 | track->db_depth_view = 0xFFFFC000; | 139 | track->db_depth_view = 0xFFFFC000; |
137 | track->db_depth_size = 0xFFFFFFFF; | 140 | track->db_depth_size = 0xFFFFFFFF; |
138 | track->db_depth_control = 0xFFFFFFFF; | 141 | track->db_depth_control = 0xFFFFFFFF; |
@@ -250,10 +253,9 @@ static int evergreen_surface_check_2d(struct radeon_cs_parser *p, | |||
250 | { | 253 | { |
251 | struct evergreen_cs_track *track = p->track; | 254 | struct evergreen_cs_track *track = p->track; |
252 | unsigned palign, halign, tileb, slice_pt; | 255 | unsigned palign, halign, tileb, slice_pt; |
256 | unsigned mtile_pr, mtile_ps, mtileb; | ||
253 | 257 | ||
254 | tileb = 64 * surf->bpe * surf->nsamples; | 258 | tileb = 64 * surf->bpe * surf->nsamples; |
255 | palign = track->group_size / (8 * surf->bpe * surf->nsamples); | ||
256 | palign = MAX(8, palign); | ||
257 | slice_pt = 1; | 259 | slice_pt = 1; |
258 | if (tileb > surf->tsplit) { | 260 | if (tileb > surf->tsplit) { |
259 | slice_pt = tileb / surf->tsplit; | 261 | slice_pt = tileb / surf->tsplit; |
@@ -262,7 +264,10 @@ static int evergreen_surface_check_2d(struct radeon_cs_parser *p, | |||
262 | /* macro tile width & height */ | 264 | /* macro tile width & height */ |
263 | palign = (8 * surf->bankw * track->npipes) * surf->mtilea; | 265 | palign = (8 * surf->bankw * track->npipes) * surf->mtilea; |
264 | halign = (8 * surf->bankh * surf->nbanks) / surf->mtilea; | 266 | halign = (8 * surf->bankh * surf->nbanks) / surf->mtilea; |
265 | surf->layer_size = surf->nbx * surf->nby * surf->bpe * slice_pt; | 267 | mtileb = (palign / 8) * (halign / 8) * tileb;; |
268 | mtile_pr = surf->nbx / palign; | ||
269 | mtile_ps = (mtile_pr * surf->nby) / halign; | ||
270 | surf->layer_size = mtile_ps * mtileb * slice_pt; | ||
266 | surf->base_align = (palign / 8) * (halign / 8) * tileb; | 271 | surf->base_align = (palign / 8) * (halign / 8) * tileb; |
267 | surf->palign = palign; | 272 | surf->palign = palign; |
268 | surf->halign = halign; | 273 | surf->halign = halign; |
@@ -434,6 +439,39 @@ static int evergreen_cs_track_validate_cb(struct radeon_cs_parser *p, unsigned i | |||
434 | 439 | ||
435 | offset += surf.layer_size * mslice; | 440 | offset += surf.layer_size * mslice; |
436 | if (offset > radeon_bo_size(track->cb_color_bo[id])) { | 441 | if (offset > radeon_bo_size(track->cb_color_bo[id])) { |
442 | /* old ddx are broken they allocate bo with w*h*bpp but | ||
443 | * program slice with ALIGN(h, 8), catch this and patch | ||
444 | * command stream. | ||
445 | */ | ||
446 | if (!surf.mode) { | ||
447 | volatile u32 *ib = p->ib.ptr; | ||
448 | unsigned long tmp, nby, bsize, size, min = 0; | ||
449 | |||
450 | /* find the height the ddx wants */ | ||
451 | if (surf.nby > 8) { | ||
452 | min = surf.nby - 8; | ||
453 | } | ||
454 | bsize = radeon_bo_size(track->cb_color_bo[id]); | ||
455 | tmp = track->cb_color_bo_offset[id] << 8; | ||
456 | for (nby = surf.nby; nby > min; nby--) { | ||
457 | size = nby * surf.nbx * surf.bpe * surf.nsamples; | ||
458 | if ((tmp + size * mslice) <= bsize) { | ||
459 | break; | ||
460 | } | ||
461 | } | ||
462 | if (nby > min) { | ||
463 | surf.nby = nby; | ||
464 | slice = ((nby * surf.nbx) / 64) - 1; | ||
465 | if (!evergreen_surface_check(p, &surf, "cb")) { | ||
466 | /* check if this one works */ | ||
467 | tmp += surf.layer_size * mslice; | ||
468 | if (tmp <= bsize) { | ||
469 | ib[track->cb_color_slice_idx[id]] = slice; | ||
470 | goto old_ddx_ok; | ||
471 | } | ||
472 | } | ||
473 | } | ||
474 | } | ||
437 | dev_warn(p->dev, "%s:%d cb[%d] bo too small (layer size %d, " | 475 | dev_warn(p->dev, "%s:%d cb[%d] bo too small (layer size %d, " |
438 | "offset %d, max layer %d, bo size %ld, slice %d)\n", | 476 | "offset %d, max layer %d, bo size %ld, slice %d)\n", |
439 | __func__, __LINE__, id, surf.layer_size, | 477 | __func__, __LINE__, id, surf.layer_size, |
@@ -446,6 +484,7 @@ static int evergreen_cs_track_validate_cb(struct radeon_cs_parser *p, unsigned i | |||
446 | surf.tsplit, surf.mtilea); | 484 | surf.tsplit, surf.mtilea); |
447 | return -EINVAL; | 485 | return -EINVAL; |
448 | } | 486 | } |
487 | old_ddx_ok: | ||
449 | 488 | ||
450 | return 0; | 489 | return 0; |
451 | } | 490 | } |
@@ -1532,6 +1571,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx) | |||
1532 | case CB_COLOR7_SLICE: | 1571 | case CB_COLOR7_SLICE: |
1533 | tmp = (reg - CB_COLOR0_SLICE) / 0x3c; | 1572 | tmp = (reg - CB_COLOR0_SLICE) / 0x3c; |
1534 | track->cb_color_slice[tmp] = radeon_get_ib_value(p, idx); | 1573 | track->cb_color_slice[tmp] = radeon_get_ib_value(p, idx); |
1574 | track->cb_color_slice_idx[tmp] = idx; | ||
1535 | track->cb_dirty = true; | 1575 | track->cb_dirty = true; |
1536 | break; | 1576 | break; |
1537 | case CB_COLOR8_SLICE: | 1577 | case CB_COLOR8_SLICE: |
@@ -1540,6 +1580,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx) | |||
1540 | case CB_COLOR11_SLICE: | 1580 | case CB_COLOR11_SLICE: |
1541 | tmp = ((reg - CB_COLOR8_SLICE) / 0x1c) + 8; | 1581 | tmp = ((reg - CB_COLOR8_SLICE) / 0x1c) + 8; |
1542 | track->cb_color_slice[tmp] = radeon_get_ib_value(p, idx); | 1582 | track->cb_color_slice[tmp] = radeon_get_ib_value(p, idx); |
1583 | track->cb_color_slice_idx[tmp] = idx; | ||
1543 | track->cb_dirty = true; | 1584 | track->cb_dirty = true; |
1544 | break; | 1585 | break; |
1545 | case CB_COLOR0_ATTRIB: | 1586 | case CB_COLOR0_ATTRIB: |
diff --git a/drivers/gpu/drm/radeon/evergreen_hdmi.c b/drivers/gpu/drm/radeon/evergreen_hdmi.c index a51f880985f8..65c54160028b 100644 --- a/drivers/gpu/drm/radeon/evergreen_hdmi.c +++ b/drivers/gpu/drm/radeon/evergreen_hdmi.c | |||
@@ -156,9 +156,6 @@ void evergreen_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode | |||
156 | struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; | 156 | struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; |
157 | uint32_t offset; | 157 | uint32_t offset; |
158 | 158 | ||
159 | if (ASIC_IS_DCE5(rdev)) | ||
160 | return; | ||
161 | |||
162 | /* Silent, r600_hdmi_enable will raise WARN for us */ | 159 | /* Silent, r600_hdmi_enable will raise WARN for us */ |
163 | if (!dig->afmt->enabled) | 160 | if (!dig->afmt->enabled) |
164 | return; | 161 | return; |
diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h index 2773039b4902..b50b15c70498 100644 --- a/drivers/gpu/drm/radeon/evergreend.h +++ b/drivers/gpu/drm/radeon/evergreend.h | |||
@@ -503,6 +503,7 @@ | |||
503 | #define SCRATCH_UMSK 0x8540 | 503 | #define SCRATCH_UMSK 0x8540 |
504 | #define SCRATCH_ADDR 0x8544 | 504 | #define SCRATCH_ADDR 0x8544 |
505 | 505 | ||
506 | #define SMX_SAR_CTL0 0xA008 | ||
506 | #define SMX_DC_CTL0 0xA020 | 507 | #define SMX_DC_CTL0 0xA020 |
507 | #define USE_HASH_FUNCTION (1 << 0) | 508 | #define USE_HASH_FUNCTION (1 << 0) |
508 | #define NUMBER_OF_SETS(x) ((x) << 1) | 509 | #define NUMBER_OF_SETS(x) ((x) << 1) |
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c index 3df4efa11942..b7bf18e40215 100644 --- a/drivers/gpu/drm/radeon/ni.c +++ b/drivers/gpu/drm/radeon/ni.c | |||
@@ -460,15 +460,28 @@ static void cayman_gpu_init(struct radeon_device *rdev) | |||
460 | rdev->config.cayman.max_pipes_per_simd = 4; | 460 | rdev->config.cayman.max_pipes_per_simd = 4; |
461 | rdev->config.cayman.max_tile_pipes = 2; | 461 | rdev->config.cayman.max_tile_pipes = 2; |
462 | if ((rdev->pdev->device == 0x9900) || | 462 | if ((rdev->pdev->device == 0x9900) || |
463 | (rdev->pdev->device == 0x9901)) { | 463 | (rdev->pdev->device == 0x9901) || |
464 | (rdev->pdev->device == 0x9905) || | ||
465 | (rdev->pdev->device == 0x9906) || | ||
466 | (rdev->pdev->device == 0x9907) || | ||
467 | (rdev->pdev->device == 0x9908) || | ||
468 | (rdev->pdev->device == 0x9909) || | ||
469 | (rdev->pdev->device == 0x9910) || | ||
470 | (rdev->pdev->device == 0x9917)) { | ||
464 | rdev->config.cayman.max_simds_per_se = 6; | 471 | rdev->config.cayman.max_simds_per_se = 6; |
465 | rdev->config.cayman.max_backends_per_se = 2; | 472 | rdev->config.cayman.max_backends_per_se = 2; |
466 | } else if ((rdev->pdev->device == 0x9903) || | 473 | } else if ((rdev->pdev->device == 0x9903) || |
467 | (rdev->pdev->device == 0x9904)) { | 474 | (rdev->pdev->device == 0x9904) || |
475 | (rdev->pdev->device == 0x990A) || | ||
476 | (rdev->pdev->device == 0x9913) || | ||
477 | (rdev->pdev->device == 0x9918)) { | ||
468 | rdev->config.cayman.max_simds_per_se = 4; | 478 | rdev->config.cayman.max_simds_per_se = 4; |
469 | rdev->config.cayman.max_backends_per_se = 2; | 479 | rdev->config.cayman.max_backends_per_se = 2; |
470 | } else if ((rdev->pdev->device == 0x9990) || | 480 | } else if ((rdev->pdev->device == 0x9919) || |
471 | (rdev->pdev->device == 0x9991)) { | 481 | (rdev->pdev->device == 0x9990) || |
482 | (rdev->pdev->device == 0x9991) || | ||
483 | (rdev->pdev->device == 0x9994) || | ||
484 | (rdev->pdev->device == 0x99A0)) { | ||
472 | rdev->config.cayman.max_simds_per_se = 3; | 485 | rdev->config.cayman.max_simds_per_se = 3; |
473 | rdev->config.cayman.max_backends_per_se = 1; | 486 | rdev->config.cayman.max_backends_per_se = 1; |
474 | } else { | 487 | } else { |
@@ -1290,6 +1303,10 @@ static int cayman_startup(struct radeon_device *rdev) | |||
1290 | if (r) | 1303 | if (r) |
1291 | return r; | 1304 | return r; |
1292 | 1305 | ||
1306 | r = r600_audio_init(rdev); | ||
1307 | if (r) | ||
1308 | return r; | ||
1309 | |||
1293 | return 0; | 1310 | return 0; |
1294 | } | 1311 | } |
1295 | 1312 | ||
@@ -1316,6 +1333,7 @@ int cayman_resume(struct radeon_device *rdev) | |||
1316 | 1333 | ||
1317 | int cayman_suspend(struct radeon_device *rdev) | 1334 | int cayman_suspend(struct radeon_device *rdev) |
1318 | { | 1335 | { |
1336 | r600_audio_fini(rdev); | ||
1319 | /* FIXME: we should wait for ring to be empty */ | 1337 | /* FIXME: we should wait for ring to be empty */ |
1320 | radeon_ib_pool_suspend(rdev); | 1338 | radeon_ib_pool_suspend(rdev); |
1321 | radeon_vm_manager_suspend(rdev); | 1339 | radeon_vm_manager_suspend(rdev); |
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c index 45cfcea63507..bff627293812 100644 --- a/drivers/gpu/drm/radeon/r600.c +++ b/drivers/gpu/drm/radeon/r600.c | |||
@@ -1839,6 +1839,7 @@ void r600_gpu_init(struct radeon_device *rdev) | |||
1839 | WREG32(PA_CL_ENHANCE, (CLIP_VTX_REORDER_ENA | | 1839 | WREG32(PA_CL_ENHANCE, (CLIP_VTX_REORDER_ENA | |
1840 | NUM_CLIP_SEQ(3))); | 1840 | NUM_CLIP_SEQ(3))); |
1841 | WREG32(PA_SC_ENHANCE, FORCE_EOV_MAX_CLK_CNT(4095)); | 1841 | WREG32(PA_SC_ENHANCE, FORCE_EOV_MAX_CLK_CNT(4095)); |
1842 | WREG32(VC_ENHANCE, 0); | ||
1842 | } | 1843 | } |
1843 | 1844 | ||
1844 | 1845 | ||
@@ -2426,6 +2427,12 @@ int r600_startup(struct radeon_device *rdev) | |||
2426 | if (r) | 2427 | if (r) |
2427 | return r; | 2428 | return r; |
2428 | 2429 | ||
2430 | r = r600_audio_init(rdev); | ||
2431 | if (r) { | ||
2432 | DRM_ERROR("radeon: audio init failed\n"); | ||
2433 | return r; | ||
2434 | } | ||
2435 | |||
2429 | return 0; | 2436 | return 0; |
2430 | } | 2437 | } |
2431 | 2438 | ||
@@ -2462,12 +2469,6 @@ int r600_resume(struct radeon_device *rdev) | |||
2462 | return r; | 2469 | return r; |
2463 | } | 2470 | } |
2464 | 2471 | ||
2465 | r = r600_audio_init(rdev); | ||
2466 | if (r) { | ||
2467 | DRM_ERROR("radeon: audio resume failed\n"); | ||
2468 | return r; | ||
2469 | } | ||
2470 | |||
2471 | return r; | 2472 | return r; |
2472 | } | 2473 | } |
2473 | 2474 | ||
@@ -2577,9 +2578,6 @@ int r600_init(struct radeon_device *rdev) | |||
2577 | rdev->accel_working = false; | 2578 | rdev->accel_working = false; |
2578 | } | 2579 | } |
2579 | 2580 | ||
2580 | r = r600_audio_init(rdev); | ||
2581 | if (r) | ||
2582 | return r; /* TODO error handling */ | ||
2583 | return 0; | 2581 | return 0; |
2584 | } | 2582 | } |
2585 | 2583 | ||
diff --git a/drivers/gpu/drm/radeon/r600_audio.c b/drivers/gpu/drm/radeon/r600_audio.c index 7c4fa77f018f..79b55916cf90 100644 --- a/drivers/gpu/drm/radeon/r600_audio.c +++ b/drivers/gpu/drm/radeon/r600_audio.c | |||
@@ -57,7 +57,7 @@ static bool radeon_dig_encoder(struct drm_encoder *encoder) | |||
57 | */ | 57 | */ |
58 | static int r600_audio_chipset_supported(struct radeon_device *rdev) | 58 | static int r600_audio_chipset_supported(struct radeon_device *rdev) |
59 | { | 59 | { |
60 | return (rdev->family >= CHIP_R600 && !ASIC_IS_DCE5(rdev)) | 60 | return (rdev->family >= CHIP_R600 && !ASIC_IS_DCE6(rdev)) |
61 | || rdev->family == CHIP_RS600 | 61 | || rdev->family == CHIP_RS600 |
62 | || rdev->family == CHIP_RS690 | 62 | || rdev->family == CHIP_RS690 |
63 | || rdev->family == CHIP_RS740; | 63 | || rdev->family == CHIP_RS740; |
@@ -192,6 +192,7 @@ void r600_audio_set_clock(struct drm_encoder *encoder, int clock) | |||
192 | struct radeon_device *rdev = dev->dev_private; | 192 | struct radeon_device *rdev = dev->dev_private; |
193 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); | 193 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); |
194 | struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; | 194 | struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; |
195 | struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc); | ||
195 | int base_rate = 48000; | 196 | int base_rate = 48000; |
196 | 197 | ||
197 | switch (radeon_encoder->encoder_id) { | 198 | switch (radeon_encoder->encoder_id) { |
@@ -217,8 +218,8 @@ void r600_audio_set_clock(struct drm_encoder *encoder, int clock) | |||
217 | WREG32(EVERGREEN_AUDIO_PLL1_DIV, clock * 10); | 218 | WREG32(EVERGREEN_AUDIO_PLL1_DIV, clock * 10); |
218 | WREG32(EVERGREEN_AUDIO_PLL1_UNK, 0x00000071); | 219 | WREG32(EVERGREEN_AUDIO_PLL1_UNK, 0x00000071); |
219 | 220 | ||
220 | /* Some magic trigger or src sel? */ | 221 | /* Select DTO source */ |
221 | WREG32_P(0x5ac, 0x01, ~0x77); | 222 | WREG32(0x5ac, radeon_crtc->crtc_id); |
222 | } else { | 223 | } else { |
223 | switch (dig->dig_encoder) { | 224 | switch (dig->dig_encoder) { |
224 | case 0: | 225 | case 0: |
diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c index 0133f5f09bd6..ca87f7afaf23 100644 --- a/drivers/gpu/drm/radeon/r600_cs.c +++ b/drivers/gpu/drm/radeon/r600_cs.c | |||
@@ -2079,6 +2079,48 @@ static int r600_packet3_check(struct radeon_cs_parser *p, | |||
2079 | return -EINVAL; | 2079 | return -EINVAL; |
2080 | } | 2080 | } |
2081 | break; | 2081 | break; |
2082 | case PACKET3_STRMOUT_BASE_UPDATE: | ||
2083 | if (p->family < CHIP_RV770) { | ||
2084 | DRM_ERROR("STRMOUT_BASE_UPDATE only supported on 7xx\n"); | ||
2085 | return -EINVAL; | ||
2086 | } | ||
2087 | if (pkt->count != 1) { | ||
2088 | DRM_ERROR("bad STRMOUT_BASE_UPDATE packet count\n"); | ||
2089 | return -EINVAL; | ||
2090 | } | ||
2091 | if (idx_value > 3) { | ||
2092 | DRM_ERROR("bad STRMOUT_BASE_UPDATE index\n"); | ||
2093 | return -EINVAL; | ||
2094 | } | ||
2095 | { | ||
2096 | u64 offset; | ||
2097 | |||
2098 | r = r600_cs_packet_next_reloc(p, &reloc); | ||
2099 | if (r) { | ||
2100 | DRM_ERROR("bad STRMOUT_BASE_UPDATE reloc\n"); | ||
2101 | return -EINVAL; | ||
2102 | } | ||
2103 | |||
2104 | if (reloc->robj != track->vgt_strmout_bo[idx_value]) { | ||
2105 | DRM_ERROR("bad STRMOUT_BASE_UPDATE, bo does not match\n"); | ||
2106 | return -EINVAL; | ||
2107 | } | ||
2108 | |||
2109 | offset = radeon_get_ib_value(p, idx+1) << 8; | ||
2110 | if (offset != track->vgt_strmout_bo_offset[idx_value]) { | ||
2111 | DRM_ERROR("bad STRMOUT_BASE_UPDATE, bo offset does not match: 0x%llx, 0x%x\n", | ||
2112 | offset, track->vgt_strmout_bo_offset[idx_value]); | ||
2113 | return -EINVAL; | ||
2114 | } | ||
2115 | |||
2116 | if ((offset + 4) > radeon_bo_size(reloc->robj)) { | ||
2117 | DRM_ERROR("bad STRMOUT_BASE_UPDATE bo too small: 0x%llx, 0x%lx\n", | ||
2118 | offset + 4, radeon_bo_size(reloc->robj)); | ||
2119 | return -EINVAL; | ||
2120 | } | ||
2121 | ib[idx+1] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); | ||
2122 | } | ||
2123 | break; | ||
2082 | case PACKET3_SURFACE_BASE_UPDATE: | 2124 | case PACKET3_SURFACE_BASE_UPDATE: |
2083 | if (p->family >= CHIP_RV770 || p->family == CHIP_R600) { | 2125 | if (p->family >= CHIP_RV770 || p->family == CHIP_R600) { |
2084 | DRM_ERROR("bad SURFACE_BASE_UPDATE\n"); | 2126 | DRM_ERROR("bad SURFACE_BASE_UPDATE\n"); |
diff --git a/drivers/gpu/drm/radeon/r600_hdmi.c b/drivers/gpu/drm/radeon/r600_hdmi.c index 226379e00ac1..82a0a4c919c0 100644 --- a/drivers/gpu/drm/radeon/r600_hdmi.c +++ b/drivers/gpu/drm/radeon/r600_hdmi.c | |||
@@ -322,9 +322,6 @@ void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mod | |||
322 | struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; | 322 | struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; |
323 | uint32_t offset; | 323 | uint32_t offset; |
324 | 324 | ||
325 | if (ASIC_IS_DCE5(rdev)) | ||
326 | return; | ||
327 | |||
328 | /* Silent, r600_hdmi_enable will raise WARN for us */ | 325 | /* Silent, r600_hdmi_enable will raise WARN for us */ |
329 | if (!dig->afmt->enabled) | 326 | if (!dig->afmt->enabled) |
330 | return; | 327 | return; |
@@ -348,7 +345,6 @@ void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mod | |||
348 | WREG32(HDMI0_AUDIO_PACKET_CONTROL + offset, | 345 | WREG32(HDMI0_AUDIO_PACKET_CONTROL + offset, |
349 | HDMI0_AUDIO_SAMPLE_SEND | /* send audio packets */ | 346 | HDMI0_AUDIO_SAMPLE_SEND | /* send audio packets */ |
350 | HDMI0_AUDIO_DELAY_EN(1) | /* default audio delay */ | 347 | HDMI0_AUDIO_DELAY_EN(1) | /* default audio delay */ |
351 | HDMI0_AUDIO_SEND_MAX_PACKETS | /* send NULL packets if no audio is available */ | ||
352 | HDMI0_AUDIO_PACKETS_PER_LINE(3) | /* should be suffient for all audio modes and small enough for all hblanks */ | 348 | HDMI0_AUDIO_PACKETS_PER_LINE(3) | /* should be suffient for all audio modes and small enough for all hblanks */ |
353 | HDMI0_60958_CS_UPDATE); /* allow 60958 channel status fields to be updated */ | 349 | HDMI0_60958_CS_UPDATE); /* allow 60958 channel status fields to be updated */ |
354 | } | 350 | } |
@@ -484,7 +480,7 @@ void r600_hdmi_enable(struct drm_encoder *encoder) | |||
484 | uint32_t offset; | 480 | uint32_t offset; |
485 | u32 hdmi; | 481 | u32 hdmi; |
486 | 482 | ||
487 | if (ASIC_IS_DCE5(rdev)) | 483 | if (ASIC_IS_DCE6(rdev)) |
488 | return; | 484 | return; |
489 | 485 | ||
490 | /* Silent, r600_hdmi_enable will raise WARN for us */ | 486 | /* Silent, r600_hdmi_enable will raise WARN for us */ |
@@ -544,7 +540,7 @@ void r600_hdmi_disable(struct drm_encoder *encoder) | |||
544 | struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; | 540 | struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; |
545 | uint32_t offset; | 541 | uint32_t offset; |
546 | 542 | ||
547 | if (ASIC_IS_DCE5(rdev)) | 543 | if (ASIC_IS_DCE6(rdev)) |
548 | return; | 544 | return; |
549 | 545 | ||
550 | /* Called for ATOM_ENCODER_MODE_HDMI only */ | 546 | /* Called for ATOM_ENCODER_MODE_HDMI only */ |
diff --git a/drivers/gpu/drm/radeon/r600d.h b/drivers/gpu/drm/radeon/r600d.h index a0dbf1fe6a40..025fd5b6c08c 100644 --- a/drivers/gpu/drm/radeon/r600d.h +++ b/drivers/gpu/drm/radeon/r600d.h | |||
@@ -485,6 +485,7 @@ | |||
485 | #define TC_L2_SIZE(x) ((x)<<5) | 485 | #define TC_L2_SIZE(x) ((x)<<5) |
486 | #define L2_DISABLE_LATE_HIT (1<<9) | 486 | #define L2_DISABLE_LATE_HIT (1<<9) |
487 | 487 | ||
488 | #define VC_ENHANCE 0x9714 | ||
488 | 489 | ||
489 | #define VGT_CACHE_INVALIDATION 0x88C4 | 490 | #define VGT_CACHE_INVALIDATION 0x88C4 |
490 | #define CACHE_INVALIDATION(x) ((x)<<0) | 491 | #define CACHE_INVALIDATION(x) ((x)<<0) |
@@ -1163,6 +1164,7 @@ | |||
1163 | #define PACKET3_SET_CTL_CONST 0x6F | 1164 | #define PACKET3_SET_CTL_CONST 0x6F |
1164 | #define PACKET3_SET_CTL_CONST_OFFSET 0x0003cff0 | 1165 | #define PACKET3_SET_CTL_CONST_OFFSET 0x0003cff0 |
1165 | #define PACKET3_SET_CTL_CONST_END 0x0003e200 | 1166 | #define PACKET3_SET_CTL_CONST_END 0x0003e200 |
1167 | #define PACKET3_STRMOUT_BASE_UPDATE 0x72 /* r7xx */ | ||
1166 | #define PACKET3_SURFACE_BASE_UPDATE 0x73 | 1168 | #define PACKET3_SURFACE_BASE_UPDATE 0x73 |
1167 | 1169 | ||
1168 | 1170 | ||
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index 85dac33e3cce..fefcca55c1eb 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h | |||
@@ -1374,9 +1374,9 @@ struct cayman_asic { | |||
1374 | 1374 | ||
1375 | struct si_asic { | 1375 | struct si_asic { |
1376 | unsigned max_shader_engines; | 1376 | unsigned max_shader_engines; |
1377 | unsigned max_pipes_per_simd; | ||
1378 | unsigned max_tile_pipes; | 1377 | unsigned max_tile_pipes; |
1379 | unsigned max_simds_per_se; | 1378 | unsigned max_cu_per_sh; |
1379 | unsigned max_sh_per_se; | ||
1380 | unsigned max_backends_per_se; | 1380 | unsigned max_backends_per_se; |
1381 | unsigned max_texture_channel_caches; | 1381 | unsigned max_texture_channel_caches; |
1382 | unsigned max_gprs; | 1382 | unsigned max_gprs; |
@@ -1387,7 +1387,6 @@ struct si_asic { | |||
1387 | unsigned sc_hiz_tile_fifo_size; | 1387 | unsigned sc_hiz_tile_fifo_size; |
1388 | unsigned sc_earlyz_tile_fifo_size; | 1388 | unsigned sc_earlyz_tile_fifo_size; |
1389 | 1389 | ||
1390 | unsigned num_shader_engines; | ||
1391 | unsigned num_tile_pipes; | 1390 | unsigned num_tile_pipes; |
1392 | unsigned num_backends_per_se; | 1391 | unsigned num_backends_per_se; |
1393 | unsigned backend_disable_mask_per_asic; | 1392 | unsigned backend_disable_mask_per_asic; |
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c index f0bb2b543b13..2c4d53fd20c5 100644 --- a/drivers/gpu/drm/radeon/radeon_drv.c +++ b/drivers/gpu/drm/radeon/radeon_drv.c | |||
@@ -57,9 +57,11 @@ | |||
57 | * 2.13.0 - virtual memory support, streamout | 57 | * 2.13.0 - virtual memory support, streamout |
58 | * 2.14.0 - add evergreen tiling informations | 58 | * 2.14.0 - add evergreen tiling informations |
59 | * 2.15.0 - add max_pipes query | 59 | * 2.15.0 - add max_pipes query |
60 | * 2.16.0 - fix evergreen 2D tiled surface calculation | ||
61 | * 2.17.0 - add STRMOUT_BASE_UPDATE for r7xx | ||
60 | */ | 62 | */ |
61 | #define KMS_DRIVER_MAJOR 2 | 63 | #define KMS_DRIVER_MAJOR 2 |
62 | #define KMS_DRIVER_MINOR 15 | 64 | #define KMS_DRIVER_MINOR 17 |
63 | #define KMS_DRIVER_PATCHLEVEL 0 | 65 | #define KMS_DRIVER_PATCHLEVEL 0 |
64 | int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags); | 66 | int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags); |
65 | int radeon_driver_unload_kms(struct drm_device *dev); | 67 | int radeon_driver_unload_kms(struct drm_device *dev); |
diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c index 79db56e6c2ac..59d44937dd9f 100644 --- a/drivers/gpu/drm/radeon/radeon_gart.c +++ b/drivers/gpu/drm/radeon/radeon_gart.c | |||
@@ -476,12 +476,18 @@ int radeon_vm_bo_add(struct radeon_device *rdev, | |||
476 | 476 | ||
477 | mutex_lock(&vm->mutex); | 477 | mutex_lock(&vm->mutex); |
478 | if (last_pfn > vm->last_pfn) { | 478 | if (last_pfn > vm->last_pfn) { |
479 | /* grow va space 32M by 32M */ | 479 | /* release mutex and lock in right order */ |
480 | unsigned align = ((32 << 20) >> 12) - 1; | 480 | mutex_unlock(&vm->mutex); |
481 | radeon_mutex_lock(&rdev->cs_mutex); | 481 | radeon_mutex_lock(&rdev->cs_mutex); |
482 | radeon_vm_unbind_locked(rdev, vm); | 482 | mutex_lock(&vm->mutex); |
483 | /* and check again */ | ||
484 | if (last_pfn > vm->last_pfn) { | ||
485 | /* grow va space 32M by 32M */ | ||
486 | unsigned align = ((32 << 20) >> 12) - 1; | ||
487 | radeon_vm_unbind_locked(rdev, vm); | ||
488 | vm->last_pfn = (last_pfn + align) & ~align; | ||
489 | } | ||
483 | radeon_mutex_unlock(&rdev->cs_mutex); | 490 | radeon_mutex_unlock(&rdev->cs_mutex); |
484 | vm->last_pfn = (last_pfn + align) & ~align; | ||
485 | } | 491 | } |
486 | head = &vm->va; | 492 | head = &vm->va; |
487 | last_offset = 0; | 493 | last_offset = 0; |
@@ -595,8 +601,8 @@ int radeon_vm_bo_rmv(struct radeon_device *rdev, | |||
595 | if (bo_va == NULL) | 601 | if (bo_va == NULL) |
596 | return 0; | 602 | return 0; |
597 | 603 | ||
598 | mutex_lock(&vm->mutex); | ||
599 | radeon_mutex_lock(&rdev->cs_mutex); | 604 | radeon_mutex_lock(&rdev->cs_mutex); |
605 | mutex_lock(&vm->mutex); | ||
600 | radeon_vm_bo_update_pte(rdev, vm, bo, NULL); | 606 | radeon_vm_bo_update_pte(rdev, vm, bo, NULL); |
601 | radeon_mutex_unlock(&rdev->cs_mutex); | 607 | radeon_mutex_unlock(&rdev->cs_mutex); |
602 | list_del(&bo_va->vm_list); | 608 | list_del(&bo_va->vm_list); |
@@ -641,9 +647,8 @@ void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm) | |||
641 | struct radeon_bo_va *bo_va, *tmp; | 647 | struct radeon_bo_va *bo_va, *tmp; |
642 | int r; | 648 | int r; |
643 | 649 | ||
644 | mutex_lock(&vm->mutex); | ||
645 | |||
646 | radeon_mutex_lock(&rdev->cs_mutex); | 650 | radeon_mutex_lock(&rdev->cs_mutex); |
651 | mutex_lock(&vm->mutex); | ||
647 | radeon_vm_unbind_locked(rdev, vm); | 652 | radeon_vm_unbind_locked(rdev, vm); |
648 | radeon_mutex_unlock(&rdev->cs_mutex); | 653 | radeon_mutex_unlock(&rdev->cs_mutex); |
649 | 654 | ||
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c index f1016a5820d1..5c58d7d90cb2 100644 --- a/drivers/gpu/drm/radeon/radeon_kms.c +++ b/drivers/gpu/drm/radeon/radeon_kms.c | |||
@@ -273,7 +273,7 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) | |||
273 | break; | 273 | break; |
274 | case RADEON_INFO_MAX_PIPES: | 274 | case RADEON_INFO_MAX_PIPES: |
275 | if (rdev->family >= CHIP_TAHITI) | 275 | if (rdev->family >= CHIP_TAHITI) |
276 | value = rdev->config.si.max_pipes_per_simd; | 276 | value = rdev->config.si.max_cu_per_sh; |
277 | else if (rdev->family >= CHIP_CAYMAN) | 277 | else if (rdev->family >= CHIP_CAYMAN) |
278 | value = rdev->config.cayman.max_pipes_per_simd; | 278 | value = rdev->config.cayman.max_pipes_per_simd; |
279 | else if (rdev->family >= CHIP_CEDAR) | 279 | else if (rdev->family >= CHIP_CEDAR) |
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c index 08825548ee69..5b37e283ec38 100644 --- a/drivers/gpu/drm/radeon/radeon_pm.c +++ b/drivers/gpu/drm/radeon/radeon_pm.c | |||
@@ -801,9 +801,13 @@ static void radeon_dynpm_idle_work_handler(struct work_struct *work) | |||
801 | int i; | 801 | int i; |
802 | 802 | ||
803 | for (i = 0; i < RADEON_NUM_RINGS; ++i) { | 803 | for (i = 0; i < RADEON_NUM_RINGS; ++i) { |
804 | not_processed += radeon_fence_count_emitted(rdev, i); | 804 | struct radeon_ring *ring = &rdev->ring[i]; |
805 | if (not_processed >= 3) | 805 | |
806 | break; | 806 | if (ring->ready) { |
807 | not_processed += radeon_fence_count_emitted(rdev, i); | ||
808 | if (not_processed >= 3) | ||
809 | break; | ||
810 | } | ||
807 | } | 811 | } |
808 | 812 | ||
809 | if (not_processed >= 3) { /* should upclock */ | 813 | if (not_processed >= 3) { /* should upclock */ |
diff --git a/drivers/gpu/drm/radeon/radeon_prime.c b/drivers/gpu/drm/radeon/radeon_prime.c index 8ddab4c76710..6bef46ace831 100644 --- a/drivers/gpu/drm/radeon/radeon_prime.c +++ b/drivers/gpu/drm/radeon/radeon_prime.c | |||
@@ -169,11 +169,17 @@ struct dma_buf *radeon_gem_prime_export(struct drm_device *dev, | |||
169 | struct radeon_bo *bo = gem_to_radeon_bo(obj); | 169 | struct radeon_bo *bo = gem_to_radeon_bo(obj); |
170 | int ret = 0; | 170 | int ret = 0; |
171 | 171 | ||
172 | ret = radeon_bo_reserve(bo, false); | ||
173 | if (unlikely(ret != 0)) | ||
174 | return ERR_PTR(ret); | ||
175 | |||
172 | /* pin buffer into GTT */ | 176 | /* pin buffer into GTT */ |
173 | ret = radeon_bo_pin(bo, RADEON_GEM_DOMAIN_GTT, NULL); | 177 | ret = radeon_bo_pin(bo, RADEON_GEM_DOMAIN_GTT, NULL); |
174 | if (ret) | 178 | if (ret) { |
179 | radeon_bo_unreserve(bo); | ||
175 | return ERR_PTR(ret); | 180 | return ERR_PTR(ret); |
176 | 181 | } | |
182 | radeon_bo_unreserve(bo); | ||
177 | return dma_buf_export(bo, &radeon_dmabuf_ops, obj->size, flags); | 183 | return dma_buf_export(bo, &radeon_dmabuf_ops, obj->size, flags); |
178 | } | 184 | } |
179 | 185 | ||
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c index 25f9eef12c42..e95c5e61d4e2 100644 --- a/drivers/gpu/drm/radeon/rs600.c +++ b/drivers/gpu/drm/radeon/rs600.c | |||
@@ -908,12 +908,6 @@ static int rs600_startup(struct radeon_device *rdev) | |||
908 | return r; | 908 | return r; |
909 | } | 909 | } |
910 | 910 | ||
911 | r = r600_audio_init(rdev); | ||
912 | if (r) { | ||
913 | dev_err(rdev->dev, "failed initializing audio\n"); | ||
914 | return r; | ||
915 | } | ||
916 | |||
917 | r = radeon_ib_pool_start(rdev); | 911 | r = radeon_ib_pool_start(rdev); |
918 | if (r) | 912 | if (r) |
919 | return r; | 913 | return r; |
@@ -922,6 +916,12 @@ static int rs600_startup(struct radeon_device *rdev) | |||
922 | if (r) | 916 | if (r) |
923 | return r; | 917 | return r; |
924 | 918 | ||
919 | r = r600_audio_init(rdev); | ||
920 | if (r) { | ||
921 | dev_err(rdev->dev, "failed initializing audio\n"); | ||
922 | return r; | ||
923 | } | ||
924 | |||
925 | return 0; | 925 | return 0; |
926 | } | 926 | } |
927 | 927 | ||
diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c index 3277ddecfe9f..159b6a43fda0 100644 --- a/drivers/gpu/drm/radeon/rs690.c +++ b/drivers/gpu/drm/radeon/rs690.c | |||
@@ -637,12 +637,6 @@ static int rs690_startup(struct radeon_device *rdev) | |||
637 | return r; | 637 | return r; |
638 | } | 638 | } |
639 | 639 | ||
640 | r = r600_audio_init(rdev); | ||
641 | if (r) { | ||
642 | dev_err(rdev->dev, "failed initializing audio\n"); | ||
643 | return r; | ||
644 | } | ||
645 | |||
646 | r = radeon_ib_pool_start(rdev); | 640 | r = radeon_ib_pool_start(rdev); |
647 | if (r) | 641 | if (r) |
648 | return r; | 642 | return r; |
@@ -651,6 +645,12 @@ static int rs690_startup(struct radeon_device *rdev) | |||
651 | if (r) | 645 | if (r) |
652 | return r; | 646 | return r; |
653 | 647 | ||
648 | r = r600_audio_init(rdev); | ||
649 | if (r) { | ||
650 | dev_err(rdev->dev, "failed initializing audio\n"); | ||
651 | return r; | ||
652 | } | ||
653 | |||
654 | return 0; | 654 | return 0; |
655 | } | 655 | } |
656 | 656 | ||
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c index 04ddc365a908..b4f51c569c36 100644 --- a/drivers/gpu/drm/radeon/rv770.c +++ b/drivers/gpu/drm/radeon/rv770.c | |||
@@ -616,6 +616,9 @@ static void rv770_gpu_init(struct radeon_device *rdev) | |||
616 | ACK_FLUSH_CTL(3) | | 616 | ACK_FLUSH_CTL(3) | |
617 | SYNC_FLUSH_CTL)); | 617 | SYNC_FLUSH_CTL)); |
618 | 618 | ||
619 | if (rdev->family != CHIP_RV770) | ||
620 | WREG32(SMX_SAR_CTL0, 0x00003f3f); | ||
621 | |||
619 | db_debug3 = RREG32(DB_DEBUG3); | 622 | db_debug3 = RREG32(DB_DEBUG3); |
620 | db_debug3 &= ~DB_CLK_OFF_DELAY(0x1f); | 623 | db_debug3 &= ~DB_CLK_OFF_DELAY(0x1f); |
621 | switch (rdev->family) { | 624 | switch (rdev->family) { |
@@ -792,7 +795,7 @@ static void rv770_gpu_init(struct radeon_device *rdev) | |||
792 | 795 | ||
793 | WREG32(PA_CL_ENHANCE, (CLIP_VTX_REORDER_ENA | | 796 | WREG32(PA_CL_ENHANCE, (CLIP_VTX_REORDER_ENA | |
794 | NUM_CLIP_SEQ(3))); | 797 | NUM_CLIP_SEQ(3))); |
795 | 798 | WREG32(VC_ENHANCE, 0); | |
796 | } | 799 | } |
797 | 800 | ||
798 | void r700_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc) | 801 | void r700_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc) |
@@ -956,6 +959,12 @@ static int rv770_startup(struct radeon_device *rdev) | |||
956 | if (r) | 959 | if (r) |
957 | return r; | 960 | return r; |
958 | 961 | ||
962 | r = r600_audio_init(rdev); | ||
963 | if (r) { | ||
964 | DRM_ERROR("radeon: audio init failed\n"); | ||
965 | return r; | ||
966 | } | ||
967 | |||
959 | return 0; | 968 | return 0; |
960 | } | 969 | } |
961 | 970 | ||
@@ -978,12 +987,6 @@ int rv770_resume(struct radeon_device *rdev) | |||
978 | return r; | 987 | return r; |
979 | } | 988 | } |
980 | 989 | ||
981 | r = r600_audio_init(rdev); | ||
982 | if (r) { | ||
983 | dev_err(rdev->dev, "radeon: audio init failed\n"); | ||
984 | return r; | ||
985 | } | ||
986 | |||
987 | return r; | 990 | return r; |
988 | 991 | ||
989 | } | 992 | } |
@@ -1092,12 +1095,6 @@ int rv770_init(struct radeon_device *rdev) | |||
1092 | rdev->accel_working = false; | 1095 | rdev->accel_working = false; |
1093 | } | 1096 | } |
1094 | 1097 | ||
1095 | r = r600_audio_init(rdev); | ||
1096 | if (r) { | ||
1097 | dev_err(rdev->dev, "radeon: audio init failed\n"); | ||
1098 | return r; | ||
1099 | } | ||
1100 | |||
1101 | return 0; | 1098 | return 0; |
1102 | } | 1099 | } |
1103 | 1100 | ||
diff --git a/drivers/gpu/drm/radeon/rv770d.h b/drivers/gpu/drm/radeon/rv770d.h index fdc089896011..b0adfc595d75 100644 --- a/drivers/gpu/drm/radeon/rv770d.h +++ b/drivers/gpu/drm/radeon/rv770d.h | |||
@@ -211,6 +211,7 @@ | |||
211 | #define SCRATCH_UMSK 0x8540 | 211 | #define SCRATCH_UMSK 0x8540 |
212 | #define SCRATCH_ADDR 0x8544 | 212 | #define SCRATCH_ADDR 0x8544 |
213 | 213 | ||
214 | #define SMX_SAR_CTL0 0xA008 | ||
214 | #define SMX_DC_CTL0 0xA020 | 215 | #define SMX_DC_CTL0 0xA020 |
215 | #define USE_HASH_FUNCTION (1 << 0) | 216 | #define USE_HASH_FUNCTION (1 << 0) |
216 | #define CACHE_DEPTH(x) ((x) << 1) | 217 | #define CACHE_DEPTH(x) ((x) << 1) |
@@ -310,6 +311,8 @@ | |||
310 | #define TCP_CNTL 0x9610 | 311 | #define TCP_CNTL 0x9610 |
311 | #define TCP_CHAN_STEER 0x9614 | 312 | #define TCP_CHAN_STEER 0x9614 |
312 | 313 | ||
314 | #define VC_ENHANCE 0x9714 | ||
315 | |||
313 | #define VGT_CACHE_INVALIDATION 0x88C4 | 316 | #define VGT_CACHE_INVALIDATION 0x88C4 |
314 | #define CACHE_INVALIDATION(x) ((x)<<0) | 317 | #define CACHE_INVALIDATION(x) ((x)<<0) |
315 | #define VC_ONLY 0 | 318 | #define VC_ONLY 0 |
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c index 549732e56ca9..c7b61f16ecfd 100644 --- a/drivers/gpu/drm/radeon/si.c +++ b/drivers/gpu/drm/radeon/si.c | |||
@@ -867,200 +867,6 @@ void dce6_bandwidth_update(struct radeon_device *rdev) | |||
867 | /* | 867 | /* |
868 | * Core functions | 868 | * Core functions |
869 | */ | 869 | */ |
870 | static u32 si_get_tile_pipe_to_backend_map(struct radeon_device *rdev, | ||
871 | u32 num_tile_pipes, | ||
872 | u32 num_backends_per_asic, | ||
873 | u32 *backend_disable_mask_per_asic, | ||
874 | u32 num_shader_engines) | ||
875 | { | ||
876 | u32 backend_map = 0; | ||
877 | u32 enabled_backends_mask = 0; | ||
878 | u32 enabled_backends_count = 0; | ||
879 | u32 num_backends_per_se; | ||
880 | u32 cur_pipe; | ||
881 | u32 swizzle_pipe[SI_MAX_PIPES]; | ||
882 | u32 cur_backend = 0; | ||
883 | u32 i; | ||
884 | bool force_no_swizzle; | ||
885 | |||
886 | /* force legal values */ | ||
887 | if (num_tile_pipes < 1) | ||
888 | num_tile_pipes = 1; | ||
889 | if (num_tile_pipes > rdev->config.si.max_tile_pipes) | ||
890 | num_tile_pipes = rdev->config.si.max_tile_pipes; | ||
891 | if (num_shader_engines < 1) | ||
892 | num_shader_engines = 1; | ||
893 | if (num_shader_engines > rdev->config.si.max_shader_engines) | ||
894 | num_shader_engines = rdev->config.si.max_shader_engines; | ||
895 | if (num_backends_per_asic < num_shader_engines) | ||
896 | num_backends_per_asic = num_shader_engines; | ||
897 | if (num_backends_per_asic > (rdev->config.si.max_backends_per_se * num_shader_engines)) | ||
898 | num_backends_per_asic = rdev->config.si.max_backends_per_se * num_shader_engines; | ||
899 | |||
900 | /* make sure we have the same number of backends per se */ | ||
901 | num_backends_per_asic = ALIGN(num_backends_per_asic, num_shader_engines); | ||
902 | /* set up the number of backends per se */ | ||
903 | num_backends_per_se = num_backends_per_asic / num_shader_engines; | ||
904 | if (num_backends_per_se > rdev->config.si.max_backends_per_se) { | ||
905 | num_backends_per_se = rdev->config.si.max_backends_per_se; | ||
906 | num_backends_per_asic = num_backends_per_se * num_shader_engines; | ||
907 | } | ||
908 | |||
909 | /* create enable mask and count for enabled backends */ | ||
910 | for (i = 0; i < SI_MAX_BACKENDS; ++i) { | ||
911 | if (((*backend_disable_mask_per_asic >> i) & 1) == 0) { | ||
912 | enabled_backends_mask |= (1 << i); | ||
913 | ++enabled_backends_count; | ||
914 | } | ||
915 | if (enabled_backends_count == num_backends_per_asic) | ||
916 | break; | ||
917 | } | ||
918 | |||
919 | /* force the backends mask to match the current number of backends */ | ||
920 | if (enabled_backends_count != num_backends_per_asic) { | ||
921 | u32 this_backend_enabled; | ||
922 | u32 shader_engine; | ||
923 | u32 backend_per_se; | ||
924 | |||
925 | enabled_backends_mask = 0; | ||
926 | enabled_backends_count = 0; | ||
927 | *backend_disable_mask_per_asic = SI_MAX_BACKENDS_MASK; | ||
928 | for (i = 0; i < SI_MAX_BACKENDS; ++i) { | ||
929 | /* calc the current se */ | ||
930 | shader_engine = i / rdev->config.si.max_backends_per_se; | ||
931 | /* calc the backend per se */ | ||
932 | backend_per_se = i % rdev->config.si.max_backends_per_se; | ||
933 | /* default to not enabled */ | ||
934 | this_backend_enabled = 0; | ||
935 | if ((shader_engine < num_shader_engines) && | ||
936 | (backend_per_se < num_backends_per_se)) | ||
937 | this_backend_enabled = 1; | ||
938 | if (this_backend_enabled) { | ||
939 | enabled_backends_mask |= (1 << i); | ||
940 | *backend_disable_mask_per_asic &= ~(1 << i); | ||
941 | ++enabled_backends_count; | ||
942 | } | ||
943 | } | ||
944 | } | ||
945 | |||
946 | |||
947 | memset((uint8_t *)&swizzle_pipe[0], 0, sizeof(u32) * SI_MAX_PIPES); | ||
948 | switch (rdev->family) { | ||
949 | case CHIP_TAHITI: | ||
950 | case CHIP_PITCAIRN: | ||
951 | case CHIP_VERDE: | ||
952 | force_no_swizzle = true; | ||
953 | break; | ||
954 | default: | ||
955 | force_no_swizzle = false; | ||
956 | break; | ||
957 | } | ||
958 | if (force_no_swizzle) { | ||
959 | bool last_backend_enabled = false; | ||
960 | |||
961 | force_no_swizzle = false; | ||
962 | for (i = 0; i < SI_MAX_BACKENDS; ++i) { | ||
963 | if (((enabled_backends_mask >> i) & 1) == 1) { | ||
964 | if (last_backend_enabled) | ||
965 | force_no_swizzle = true; | ||
966 | last_backend_enabled = true; | ||
967 | } else | ||
968 | last_backend_enabled = false; | ||
969 | } | ||
970 | } | ||
971 | |||
972 | switch (num_tile_pipes) { | ||
973 | case 1: | ||
974 | case 3: | ||
975 | case 5: | ||
976 | case 7: | ||
977 | DRM_ERROR("odd number of pipes!\n"); | ||
978 | break; | ||
979 | case 2: | ||
980 | swizzle_pipe[0] = 0; | ||
981 | swizzle_pipe[1] = 1; | ||
982 | break; | ||
983 | case 4: | ||
984 | if (force_no_swizzle) { | ||
985 | swizzle_pipe[0] = 0; | ||
986 | swizzle_pipe[1] = 1; | ||
987 | swizzle_pipe[2] = 2; | ||
988 | swizzle_pipe[3] = 3; | ||
989 | } else { | ||
990 | swizzle_pipe[0] = 0; | ||
991 | swizzle_pipe[1] = 2; | ||
992 | swizzle_pipe[2] = 1; | ||
993 | swizzle_pipe[3] = 3; | ||
994 | } | ||
995 | break; | ||
996 | case 6: | ||
997 | if (force_no_swizzle) { | ||
998 | swizzle_pipe[0] = 0; | ||
999 | swizzle_pipe[1] = 1; | ||
1000 | swizzle_pipe[2] = 2; | ||
1001 | swizzle_pipe[3] = 3; | ||
1002 | swizzle_pipe[4] = 4; | ||
1003 | swizzle_pipe[5] = 5; | ||
1004 | } else { | ||
1005 | swizzle_pipe[0] = 0; | ||
1006 | swizzle_pipe[1] = 2; | ||
1007 | swizzle_pipe[2] = 4; | ||
1008 | swizzle_pipe[3] = 1; | ||
1009 | swizzle_pipe[4] = 3; | ||
1010 | swizzle_pipe[5] = 5; | ||
1011 | } | ||
1012 | break; | ||
1013 | case 8: | ||
1014 | if (force_no_swizzle) { | ||
1015 | swizzle_pipe[0] = 0; | ||
1016 | swizzle_pipe[1] = 1; | ||
1017 | swizzle_pipe[2] = 2; | ||
1018 | swizzle_pipe[3] = 3; | ||
1019 | swizzle_pipe[4] = 4; | ||
1020 | swizzle_pipe[5] = 5; | ||
1021 | swizzle_pipe[6] = 6; | ||
1022 | swizzle_pipe[7] = 7; | ||
1023 | } else { | ||
1024 | swizzle_pipe[0] = 0; | ||
1025 | swizzle_pipe[1] = 2; | ||
1026 | swizzle_pipe[2] = 4; | ||
1027 | swizzle_pipe[3] = 6; | ||
1028 | swizzle_pipe[4] = 1; | ||
1029 | swizzle_pipe[5] = 3; | ||
1030 | swizzle_pipe[6] = 5; | ||
1031 | swizzle_pipe[7] = 7; | ||
1032 | } | ||
1033 | break; | ||
1034 | } | ||
1035 | |||
1036 | for (cur_pipe = 0; cur_pipe < num_tile_pipes; ++cur_pipe) { | ||
1037 | while (((1 << cur_backend) & enabled_backends_mask) == 0) | ||
1038 | cur_backend = (cur_backend + 1) % SI_MAX_BACKENDS; | ||
1039 | |||
1040 | backend_map |= (((cur_backend & 0xf) << (swizzle_pipe[cur_pipe] * 4))); | ||
1041 | |||
1042 | cur_backend = (cur_backend + 1) % SI_MAX_BACKENDS; | ||
1043 | } | ||
1044 | |||
1045 | return backend_map; | ||
1046 | } | ||
1047 | |||
1048 | static u32 si_get_disable_mask_per_asic(struct radeon_device *rdev, | ||
1049 | u32 disable_mask_per_se, | ||
1050 | u32 max_disable_mask_per_se, | ||
1051 | u32 num_shader_engines) | ||
1052 | { | ||
1053 | u32 disable_field_width_per_se = r600_count_pipe_bits(disable_mask_per_se); | ||
1054 | u32 disable_mask_per_asic = disable_mask_per_se & max_disable_mask_per_se; | ||
1055 | |||
1056 | if (num_shader_engines == 1) | ||
1057 | return disable_mask_per_asic; | ||
1058 | else if (num_shader_engines == 2) | ||
1059 | return disable_mask_per_asic | (disable_mask_per_asic << disable_field_width_per_se); | ||
1060 | else | ||
1061 | return 0xffffffff; | ||
1062 | } | ||
1063 | |||
1064 | static void si_tiling_mode_table_init(struct radeon_device *rdev) | 870 | static void si_tiling_mode_table_init(struct radeon_device *rdev) |
1065 | { | 871 | { |
1066 | const u32 num_tile_mode_states = 32; | 872 | const u32 num_tile_mode_states = 32; |
@@ -1562,18 +1368,151 @@ static void si_tiling_mode_table_init(struct radeon_device *rdev) | |||
1562 | DRM_ERROR("unknown asic: 0x%x\n", rdev->family); | 1368 | DRM_ERROR("unknown asic: 0x%x\n", rdev->family); |
1563 | } | 1369 | } |
1564 | 1370 | ||
1371 | static void si_select_se_sh(struct radeon_device *rdev, | ||
1372 | u32 se_num, u32 sh_num) | ||
1373 | { | ||
1374 | u32 data = INSTANCE_BROADCAST_WRITES; | ||
1375 | |||
1376 | if ((se_num == 0xffffffff) && (sh_num == 0xffffffff)) | ||
1377 | data = SH_BROADCAST_WRITES | SE_BROADCAST_WRITES; | ||
1378 | else if (se_num == 0xffffffff) | ||
1379 | data |= SE_BROADCAST_WRITES | SH_INDEX(sh_num); | ||
1380 | else if (sh_num == 0xffffffff) | ||
1381 | data |= SH_BROADCAST_WRITES | SE_INDEX(se_num); | ||
1382 | else | ||
1383 | data |= SH_INDEX(sh_num) | SE_INDEX(se_num); | ||
1384 | WREG32(GRBM_GFX_INDEX, data); | ||
1385 | } | ||
1386 | |||
1387 | static u32 si_create_bitmask(u32 bit_width) | ||
1388 | { | ||
1389 | u32 i, mask = 0; | ||
1390 | |||
1391 | for (i = 0; i < bit_width; i++) { | ||
1392 | mask <<= 1; | ||
1393 | mask |= 1; | ||
1394 | } | ||
1395 | return mask; | ||
1396 | } | ||
1397 | |||
1398 | static u32 si_get_cu_enabled(struct radeon_device *rdev, u32 cu_per_sh) | ||
1399 | { | ||
1400 | u32 data, mask; | ||
1401 | |||
1402 | data = RREG32(CC_GC_SHADER_ARRAY_CONFIG); | ||
1403 | if (data & 1) | ||
1404 | data &= INACTIVE_CUS_MASK; | ||
1405 | else | ||
1406 | data = 0; | ||
1407 | data |= RREG32(GC_USER_SHADER_ARRAY_CONFIG); | ||
1408 | |||
1409 | data >>= INACTIVE_CUS_SHIFT; | ||
1410 | |||
1411 | mask = si_create_bitmask(cu_per_sh); | ||
1412 | |||
1413 | return ~data & mask; | ||
1414 | } | ||
1415 | |||
1416 | static void si_setup_spi(struct radeon_device *rdev, | ||
1417 | u32 se_num, u32 sh_per_se, | ||
1418 | u32 cu_per_sh) | ||
1419 | { | ||
1420 | int i, j, k; | ||
1421 | u32 data, mask, active_cu; | ||
1422 | |||
1423 | for (i = 0; i < se_num; i++) { | ||
1424 | for (j = 0; j < sh_per_se; j++) { | ||
1425 | si_select_se_sh(rdev, i, j); | ||
1426 | data = RREG32(SPI_STATIC_THREAD_MGMT_3); | ||
1427 | active_cu = si_get_cu_enabled(rdev, cu_per_sh); | ||
1428 | |||
1429 | mask = 1; | ||
1430 | for (k = 0; k < 16; k++) { | ||
1431 | mask <<= k; | ||
1432 | if (active_cu & mask) { | ||
1433 | data &= ~mask; | ||
1434 | WREG32(SPI_STATIC_THREAD_MGMT_3, data); | ||
1435 | break; | ||
1436 | } | ||
1437 | } | ||
1438 | } | ||
1439 | } | ||
1440 | si_select_se_sh(rdev, 0xffffffff, 0xffffffff); | ||
1441 | } | ||
1442 | |||
1443 | static u32 si_get_rb_disabled(struct radeon_device *rdev, | ||
1444 | u32 max_rb_num, u32 se_num, | ||
1445 | u32 sh_per_se) | ||
1446 | { | ||
1447 | u32 data, mask; | ||
1448 | |||
1449 | data = RREG32(CC_RB_BACKEND_DISABLE); | ||
1450 | if (data & 1) | ||
1451 | data &= BACKEND_DISABLE_MASK; | ||
1452 | else | ||
1453 | data = 0; | ||
1454 | data |= RREG32(GC_USER_RB_BACKEND_DISABLE); | ||
1455 | |||
1456 | data >>= BACKEND_DISABLE_SHIFT; | ||
1457 | |||
1458 | mask = si_create_bitmask(max_rb_num / se_num / sh_per_se); | ||
1459 | |||
1460 | return data & mask; | ||
1461 | } | ||
1462 | |||
1463 | static void si_setup_rb(struct radeon_device *rdev, | ||
1464 | u32 se_num, u32 sh_per_se, | ||
1465 | u32 max_rb_num) | ||
1466 | { | ||
1467 | int i, j; | ||
1468 | u32 data, mask; | ||
1469 | u32 disabled_rbs = 0; | ||
1470 | u32 enabled_rbs = 0; | ||
1471 | |||
1472 | for (i = 0; i < se_num; i++) { | ||
1473 | for (j = 0; j < sh_per_se; j++) { | ||
1474 | si_select_se_sh(rdev, i, j); | ||
1475 | data = si_get_rb_disabled(rdev, max_rb_num, se_num, sh_per_se); | ||
1476 | disabled_rbs |= data << ((i * sh_per_se + j) * TAHITI_RB_BITMAP_WIDTH_PER_SH); | ||
1477 | } | ||
1478 | } | ||
1479 | si_select_se_sh(rdev, 0xffffffff, 0xffffffff); | ||
1480 | |||
1481 | mask = 1; | ||
1482 | for (i = 0; i < max_rb_num; i++) { | ||
1483 | if (!(disabled_rbs & mask)) | ||
1484 | enabled_rbs |= mask; | ||
1485 | mask <<= 1; | ||
1486 | } | ||
1487 | |||
1488 | for (i = 0; i < se_num; i++) { | ||
1489 | si_select_se_sh(rdev, i, 0xffffffff); | ||
1490 | data = 0; | ||
1491 | for (j = 0; j < sh_per_se; j++) { | ||
1492 | switch (enabled_rbs & 3) { | ||
1493 | case 1: | ||
1494 | data |= (RASTER_CONFIG_RB_MAP_0 << (i * sh_per_se + j) * 2); | ||
1495 | break; | ||
1496 | case 2: | ||
1497 | data |= (RASTER_CONFIG_RB_MAP_3 << (i * sh_per_se + j) * 2); | ||
1498 | break; | ||
1499 | case 3: | ||
1500 | default: | ||
1501 | data |= (RASTER_CONFIG_RB_MAP_2 << (i * sh_per_se + j) * 2); | ||
1502 | break; | ||
1503 | } | ||
1504 | enabled_rbs >>= 2; | ||
1505 | } | ||
1506 | WREG32(PA_SC_RASTER_CONFIG, data); | ||
1507 | } | ||
1508 | si_select_se_sh(rdev, 0xffffffff, 0xffffffff); | ||
1509 | } | ||
1510 | |||
1565 | static void si_gpu_init(struct radeon_device *rdev) | 1511 | static void si_gpu_init(struct radeon_device *rdev) |
1566 | { | 1512 | { |
1567 | u32 cc_rb_backend_disable = 0; | ||
1568 | u32 cc_gc_shader_array_config; | ||
1569 | u32 gb_addr_config = 0; | 1513 | u32 gb_addr_config = 0; |
1570 | u32 mc_shared_chmap, mc_arb_ramcfg; | 1514 | u32 mc_shared_chmap, mc_arb_ramcfg; |
1571 | u32 gb_backend_map; | ||
1572 | u32 cgts_tcc_disable; | ||
1573 | u32 sx_debug_1; | 1515 | u32 sx_debug_1; |
1574 | u32 gc_user_shader_array_config; | ||
1575 | u32 gc_user_rb_backend_disable; | ||
1576 | u32 cgts_user_tcc_disable; | ||
1577 | u32 hdp_host_path_cntl; | 1516 | u32 hdp_host_path_cntl; |
1578 | u32 tmp; | 1517 | u32 tmp; |
1579 | int i, j; | 1518 | int i, j; |
@@ -1581,9 +1520,9 @@ static void si_gpu_init(struct radeon_device *rdev) | |||
1581 | switch (rdev->family) { | 1520 | switch (rdev->family) { |
1582 | case CHIP_TAHITI: | 1521 | case CHIP_TAHITI: |
1583 | rdev->config.si.max_shader_engines = 2; | 1522 | rdev->config.si.max_shader_engines = 2; |
1584 | rdev->config.si.max_pipes_per_simd = 4; | ||
1585 | rdev->config.si.max_tile_pipes = 12; | 1523 | rdev->config.si.max_tile_pipes = 12; |
1586 | rdev->config.si.max_simds_per_se = 8; | 1524 | rdev->config.si.max_cu_per_sh = 8; |
1525 | rdev->config.si.max_sh_per_se = 2; | ||
1587 | rdev->config.si.max_backends_per_se = 4; | 1526 | rdev->config.si.max_backends_per_se = 4; |
1588 | rdev->config.si.max_texture_channel_caches = 12; | 1527 | rdev->config.si.max_texture_channel_caches = 12; |
1589 | rdev->config.si.max_gprs = 256; | 1528 | rdev->config.si.max_gprs = 256; |
@@ -1594,12 +1533,13 @@ static void si_gpu_init(struct radeon_device *rdev) | |||
1594 | rdev->config.si.sc_prim_fifo_size_backend = 0x100; | 1533 | rdev->config.si.sc_prim_fifo_size_backend = 0x100; |
1595 | rdev->config.si.sc_hiz_tile_fifo_size = 0x30; | 1534 | rdev->config.si.sc_hiz_tile_fifo_size = 0x30; |
1596 | rdev->config.si.sc_earlyz_tile_fifo_size = 0x130; | 1535 | rdev->config.si.sc_earlyz_tile_fifo_size = 0x130; |
1536 | gb_addr_config = TAHITI_GB_ADDR_CONFIG_GOLDEN; | ||
1597 | break; | 1537 | break; |
1598 | case CHIP_PITCAIRN: | 1538 | case CHIP_PITCAIRN: |
1599 | rdev->config.si.max_shader_engines = 2; | 1539 | rdev->config.si.max_shader_engines = 2; |
1600 | rdev->config.si.max_pipes_per_simd = 4; | ||
1601 | rdev->config.si.max_tile_pipes = 8; | 1540 | rdev->config.si.max_tile_pipes = 8; |
1602 | rdev->config.si.max_simds_per_se = 5; | 1541 | rdev->config.si.max_cu_per_sh = 5; |
1542 | rdev->config.si.max_sh_per_se = 2; | ||
1603 | rdev->config.si.max_backends_per_se = 4; | 1543 | rdev->config.si.max_backends_per_se = 4; |
1604 | rdev->config.si.max_texture_channel_caches = 8; | 1544 | rdev->config.si.max_texture_channel_caches = 8; |
1605 | rdev->config.si.max_gprs = 256; | 1545 | rdev->config.si.max_gprs = 256; |
@@ -1610,13 +1550,14 @@ static void si_gpu_init(struct radeon_device *rdev) | |||
1610 | rdev->config.si.sc_prim_fifo_size_backend = 0x100; | 1550 | rdev->config.si.sc_prim_fifo_size_backend = 0x100; |
1611 | rdev->config.si.sc_hiz_tile_fifo_size = 0x30; | 1551 | rdev->config.si.sc_hiz_tile_fifo_size = 0x30; |
1612 | rdev->config.si.sc_earlyz_tile_fifo_size = 0x130; | 1552 | rdev->config.si.sc_earlyz_tile_fifo_size = 0x130; |
1553 | gb_addr_config = TAHITI_GB_ADDR_CONFIG_GOLDEN; | ||
1613 | break; | 1554 | break; |
1614 | case CHIP_VERDE: | 1555 | case CHIP_VERDE: |
1615 | default: | 1556 | default: |
1616 | rdev->config.si.max_shader_engines = 1; | 1557 | rdev->config.si.max_shader_engines = 1; |
1617 | rdev->config.si.max_pipes_per_simd = 4; | ||
1618 | rdev->config.si.max_tile_pipes = 4; | 1558 | rdev->config.si.max_tile_pipes = 4; |
1619 | rdev->config.si.max_simds_per_se = 2; | 1559 | rdev->config.si.max_cu_per_sh = 2; |
1560 | rdev->config.si.max_sh_per_se = 2; | ||
1620 | rdev->config.si.max_backends_per_se = 4; | 1561 | rdev->config.si.max_backends_per_se = 4; |
1621 | rdev->config.si.max_texture_channel_caches = 4; | 1562 | rdev->config.si.max_texture_channel_caches = 4; |
1622 | rdev->config.si.max_gprs = 256; | 1563 | rdev->config.si.max_gprs = 256; |
@@ -1627,6 +1568,7 @@ static void si_gpu_init(struct radeon_device *rdev) | |||
1627 | rdev->config.si.sc_prim_fifo_size_backend = 0x40; | 1568 | rdev->config.si.sc_prim_fifo_size_backend = 0x40; |
1628 | rdev->config.si.sc_hiz_tile_fifo_size = 0x30; | 1569 | rdev->config.si.sc_hiz_tile_fifo_size = 0x30; |
1629 | rdev->config.si.sc_earlyz_tile_fifo_size = 0x130; | 1570 | rdev->config.si.sc_earlyz_tile_fifo_size = 0x130; |
1571 | gb_addr_config = VERDE_GB_ADDR_CONFIG_GOLDEN; | ||
1630 | break; | 1572 | break; |
1631 | } | 1573 | } |
1632 | 1574 | ||
@@ -1648,31 +1590,7 @@ static void si_gpu_init(struct radeon_device *rdev) | |||
1648 | mc_shared_chmap = RREG32(MC_SHARED_CHMAP); | 1590 | mc_shared_chmap = RREG32(MC_SHARED_CHMAP); |
1649 | mc_arb_ramcfg = RREG32(MC_ARB_RAMCFG); | 1591 | mc_arb_ramcfg = RREG32(MC_ARB_RAMCFG); |
1650 | 1592 | ||
1651 | cc_rb_backend_disable = RREG32(CC_RB_BACKEND_DISABLE); | ||
1652 | cc_gc_shader_array_config = RREG32(CC_GC_SHADER_ARRAY_CONFIG); | ||
1653 | cgts_tcc_disable = 0xffff0000; | ||
1654 | for (i = 0; i < rdev->config.si.max_texture_channel_caches; i++) | ||
1655 | cgts_tcc_disable &= ~(1 << (16 + i)); | ||
1656 | gc_user_rb_backend_disable = RREG32(GC_USER_RB_BACKEND_DISABLE); | ||
1657 | gc_user_shader_array_config = RREG32(GC_USER_SHADER_ARRAY_CONFIG); | ||
1658 | cgts_user_tcc_disable = RREG32(CGTS_USER_TCC_DISABLE); | ||
1659 | |||
1660 | rdev->config.si.num_shader_engines = rdev->config.si.max_shader_engines; | ||
1661 | rdev->config.si.num_tile_pipes = rdev->config.si.max_tile_pipes; | 1593 | rdev->config.si.num_tile_pipes = rdev->config.si.max_tile_pipes; |
1662 | tmp = ((~gc_user_rb_backend_disable) & BACKEND_DISABLE_MASK) >> BACKEND_DISABLE_SHIFT; | ||
1663 | rdev->config.si.num_backends_per_se = r600_count_pipe_bits(tmp); | ||
1664 | tmp = (gc_user_rb_backend_disable & BACKEND_DISABLE_MASK) >> BACKEND_DISABLE_SHIFT; | ||
1665 | rdev->config.si.backend_disable_mask_per_asic = | ||
1666 | si_get_disable_mask_per_asic(rdev, tmp, SI_MAX_BACKENDS_PER_SE_MASK, | ||
1667 | rdev->config.si.num_shader_engines); | ||
1668 | rdev->config.si.backend_map = | ||
1669 | si_get_tile_pipe_to_backend_map(rdev, rdev->config.si.num_tile_pipes, | ||
1670 | rdev->config.si.num_backends_per_se * | ||
1671 | rdev->config.si.num_shader_engines, | ||
1672 | &rdev->config.si.backend_disable_mask_per_asic, | ||
1673 | rdev->config.si.num_shader_engines); | ||
1674 | tmp = ((~cgts_user_tcc_disable) & TCC_DISABLE_MASK) >> TCC_DISABLE_SHIFT; | ||
1675 | rdev->config.si.num_texture_channel_caches = r600_count_pipe_bits(tmp); | ||
1676 | rdev->config.si.mem_max_burst_length_bytes = 256; | 1594 | rdev->config.si.mem_max_burst_length_bytes = 256; |
1677 | tmp = (mc_arb_ramcfg & NOOFCOLS_MASK) >> NOOFCOLS_SHIFT; | 1595 | tmp = (mc_arb_ramcfg & NOOFCOLS_MASK) >> NOOFCOLS_SHIFT; |
1678 | rdev->config.si.mem_row_size_in_kb = (4 * (1 << (8 + tmp))) / 1024; | 1596 | rdev->config.si.mem_row_size_in_kb = (4 * (1 << (8 + tmp))) / 1024; |
@@ -1683,55 +1601,8 @@ static void si_gpu_init(struct radeon_device *rdev) | |||
1683 | rdev->config.si.num_gpus = 1; | 1601 | rdev->config.si.num_gpus = 1; |
1684 | rdev->config.si.multi_gpu_tile_size = 64; | 1602 | rdev->config.si.multi_gpu_tile_size = 64; |
1685 | 1603 | ||
1686 | gb_addr_config = 0; | 1604 | /* fix up row size */ |
1687 | switch (rdev->config.si.num_tile_pipes) { | 1605 | gb_addr_config &= ~ROW_SIZE_MASK; |
1688 | case 1: | ||
1689 | gb_addr_config |= NUM_PIPES(0); | ||
1690 | break; | ||
1691 | case 2: | ||
1692 | gb_addr_config |= NUM_PIPES(1); | ||
1693 | break; | ||
1694 | case 4: | ||
1695 | gb_addr_config |= NUM_PIPES(2); | ||
1696 | break; | ||
1697 | case 8: | ||
1698 | default: | ||
1699 | gb_addr_config |= NUM_PIPES(3); | ||
1700 | break; | ||
1701 | } | ||
1702 | |||
1703 | tmp = (rdev->config.si.mem_max_burst_length_bytes / 256) - 1; | ||
1704 | gb_addr_config |= PIPE_INTERLEAVE_SIZE(tmp); | ||
1705 | gb_addr_config |= NUM_SHADER_ENGINES(rdev->config.si.num_shader_engines - 1); | ||
1706 | tmp = (rdev->config.si.shader_engine_tile_size / 16) - 1; | ||
1707 | gb_addr_config |= SHADER_ENGINE_TILE_SIZE(tmp); | ||
1708 | switch (rdev->config.si.num_gpus) { | ||
1709 | case 1: | ||
1710 | default: | ||
1711 | gb_addr_config |= NUM_GPUS(0); | ||
1712 | break; | ||
1713 | case 2: | ||
1714 | gb_addr_config |= NUM_GPUS(1); | ||
1715 | break; | ||
1716 | case 4: | ||
1717 | gb_addr_config |= NUM_GPUS(2); | ||
1718 | break; | ||
1719 | } | ||
1720 | switch (rdev->config.si.multi_gpu_tile_size) { | ||
1721 | case 16: | ||
1722 | gb_addr_config |= MULTI_GPU_TILE_SIZE(0); | ||
1723 | break; | ||
1724 | case 32: | ||
1725 | default: | ||
1726 | gb_addr_config |= MULTI_GPU_TILE_SIZE(1); | ||
1727 | break; | ||
1728 | case 64: | ||
1729 | gb_addr_config |= MULTI_GPU_TILE_SIZE(2); | ||
1730 | break; | ||
1731 | case 128: | ||
1732 | gb_addr_config |= MULTI_GPU_TILE_SIZE(3); | ||
1733 | break; | ||
1734 | } | ||
1735 | switch (rdev->config.si.mem_row_size_in_kb) { | 1606 | switch (rdev->config.si.mem_row_size_in_kb) { |
1736 | case 1: | 1607 | case 1: |
1737 | default: | 1608 | default: |
@@ -1745,26 +1616,6 @@ static void si_gpu_init(struct radeon_device *rdev) | |||
1745 | break; | 1616 | break; |
1746 | } | 1617 | } |
1747 | 1618 | ||
1748 | tmp = (gb_addr_config & NUM_PIPES_MASK) >> NUM_PIPES_SHIFT; | ||
1749 | rdev->config.si.num_tile_pipes = (1 << tmp); | ||
1750 | tmp = (gb_addr_config & PIPE_INTERLEAVE_SIZE_MASK) >> PIPE_INTERLEAVE_SIZE_SHIFT; | ||
1751 | rdev->config.si.mem_max_burst_length_bytes = (tmp + 1) * 256; | ||
1752 | tmp = (gb_addr_config & NUM_SHADER_ENGINES_MASK) >> NUM_SHADER_ENGINES_SHIFT; | ||
1753 | rdev->config.si.num_shader_engines = tmp + 1; | ||
1754 | tmp = (gb_addr_config & NUM_GPUS_MASK) >> NUM_GPUS_SHIFT; | ||
1755 | rdev->config.si.num_gpus = tmp + 1; | ||
1756 | tmp = (gb_addr_config & MULTI_GPU_TILE_SIZE_MASK) >> MULTI_GPU_TILE_SIZE_SHIFT; | ||
1757 | rdev->config.si.multi_gpu_tile_size = 1 << tmp; | ||
1758 | tmp = (gb_addr_config & ROW_SIZE_MASK) >> ROW_SIZE_SHIFT; | ||
1759 | rdev->config.si.mem_row_size_in_kb = 1 << tmp; | ||
1760 | |||
1761 | gb_backend_map = | ||
1762 | si_get_tile_pipe_to_backend_map(rdev, rdev->config.si.num_tile_pipes, | ||
1763 | rdev->config.si.num_backends_per_se * | ||
1764 | rdev->config.si.num_shader_engines, | ||
1765 | &rdev->config.si.backend_disable_mask_per_asic, | ||
1766 | rdev->config.si.num_shader_engines); | ||
1767 | |||
1768 | /* setup tiling info dword. gb_addr_config is not adequate since it does | 1619 | /* setup tiling info dword. gb_addr_config is not adequate since it does |
1769 | * not have bank info, so create a custom tiling dword. | 1620 | * not have bank info, so create a custom tiling dword. |
1770 | * bits 3:0 num_pipes | 1621 | * bits 3:0 num_pipes |
@@ -1789,33 +1640,29 @@ static void si_gpu_init(struct radeon_device *rdev) | |||
1789 | rdev->config.si.tile_config |= (3 << 0); | 1640 | rdev->config.si.tile_config |= (3 << 0); |
1790 | break; | 1641 | break; |
1791 | } | 1642 | } |
1792 | rdev->config.si.tile_config |= | 1643 | if ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT) |
1793 | ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT) << 4; | 1644 | rdev->config.si.tile_config |= 1 << 4; |
1645 | else | ||
1646 | rdev->config.si.tile_config |= 0 << 4; | ||
1794 | rdev->config.si.tile_config |= | 1647 | rdev->config.si.tile_config |= |
1795 | ((gb_addr_config & PIPE_INTERLEAVE_SIZE_MASK) >> PIPE_INTERLEAVE_SIZE_SHIFT) << 8; | 1648 | ((gb_addr_config & PIPE_INTERLEAVE_SIZE_MASK) >> PIPE_INTERLEAVE_SIZE_SHIFT) << 8; |
1796 | rdev->config.si.tile_config |= | 1649 | rdev->config.si.tile_config |= |
1797 | ((gb_addr_config & ROW_SIZE_MASK) >> ROW_SIZE_SHIFT) << 12; | 1650 | ((gb_addr_config & ROW_SIZE_MASK) >> ROW_SIZE_SHIFT) << 12; |
1798 | 1651 | ||
1799 | rdev->config.si.backend_map = gb_backend_map; | ||
1800 | WREG32(GB_ADDR_CONFIG, gb_addr_config); | 1652 | WREG32(GB_ADDR_CONFIG, gb_addr_config); |
1801 | WREG32(DMIF_ADDR_CONFIG, gb_addr_config); | 1653 | WREG32(DMIF_ADDR_CONFIG, gb_addr_config); |
1802 | WREG32(HDP_ADDR_CONFIG, gb_addr_config); | 1654 | WREG32(HDP_ADDR_CONFIG, gb_addr_config); |
1803 | 1655 | ||
1804 | /* primary versions */ | 1656 | si_tiling_mode_table_init(rdev); |
1805 | WREG32(CC_RB_BACKEND_DISABLE, cc_rb_backend_disable); | ||
1806 | WREG32(CC_SYS_RB_BACKEND_DISABLE, cc_rb_backend_disable); | ||
1807 | WREG32(CC_GC_SHADER_ARRAY_CONFIG, cc_gc_shader_array_config); | ||
1808 | |||
1809 | WREG32(CGTS_TCC_DISABLE, cgts_tcc_disable); | ||
1810 | 1657 | ||
1811 | /* user versions */ | 1658 | si_setup_rb(rdev, rdev->config.si.max_shader_engines, |
1812 | WREG32(GC_USER_RB_BACKEND_DISABLE, cc_rb_backend_disable); | 1659 | rdev->config.si.max_sh_per_se, |
1813 | WREG32(GC_USER_SYS_RB_BACKEND_DISABLE, cc_rb_backend_disable); | 1660 | rdev->config.si.max_backends_per_se); |
1814 | WREG32(GC_USER_SHADER_ARRAY_CONFIG, cc_gc_shader_array_config); | ||
1815 | 1661 | ||
1816 | WREG32(CGTS_USER_TCC_DISABLE, cgts_tcc_disable); | 1662 | si_setup_spi(rdev, rdev->config.si.max_shader_engines, |
1663 | rdev->config.si.max_sh_per_se, | ||
1664 | rdev->config.si.max_cu_per_sh); | ||
1817 | 1665 | ||
1818 | si_tiling_mode_table_init(rdev); | ||
1819 | 1666 | ||
1820 | /* set HW defaults for 3D engine */ | 1667 | /* set HW defaults for 3D engine */ |
1821 | WREG32(CP_QUEUE_THRESHOLDS, (ROQ_IB1_START(0x16) | | 1668 | WREG32(CP_QUEUE_THRESHOLDS, (ROQ_IB1_START(0x16) | |
diff --git a/drivers/gpu/drm/radeon/si_reg.h b/drivers/gpu/drm/radeon/si_reg.h index eda938a7cb6e..501f9d431d57 100644 --- a/drivers/gpu/drm/radeon/si_reg.h +++ b/drivers/gpu/drm/radeon/si_reg.h | |||
@@ -30,4 +30,76 @@ | |||
30 | #define SI_DC_GPIO_HPD_EN 0x65b8 | 30 | #define SI_DC_GPIO_HPD_EN 0x65b8 |
31 | #define SI_DC_GPIO_HPD_Y 0x65bc | 31 | #define SI_DC_GPIO_HPD_Y 0x65bc |
32 | 32 | ||
33 | #define SI_GRPH_CONTROL 0x6804 | ||
34 | # define SI_GRPH_DEPTH(x) (((x) & 0x3) << 0) | ||
35 | # define SI_GRPH_DEPTH_8BPP 0 | ||
36 | # define SI_GRPH_DEPTH_16BPP 1 | ||
37 | # define SI_GRPH_DEPTH_32BPP 2 | ||
38 | # define SI_GRPH_NUM_BANKS(x) (((x) & 0x3) << 2) | ||
39 | # define SI_ADDR_SURF_2_BANK 0 | ||
40 | # define SI_ADDR_SURF_4_BANK 1 | ||
41 | # define SI_ADDR_SURF_8_BANK 2 | ||
42 | # define SI_ADDR_SURF_16_BANK 3 | ||
43 | # define SI_GRPH_Z(x) (((x) & 0x3) << 4) | ||
44 | # define SI_GRPH_BANK_WIDTH(x) (((x) & 0x3) << 6) | ||
45 | # define SI_ADDR_SURF_BANK_WIDTH_1 0 | ||
46 | # define SI_ADDR_SURF_BANK_WIDTH_2 1 | ||
47 | # define SI_ADDR_SURF_BANK_WIDTH_4 2 | ||
48 | # define SI_ADDR_SURF_BANK_WIDTH_8 3 | ||
49 | # define SI_GRPH_FORMAT(x) (((x) & 0x7) << 8) | ||
50 | /* 8 BPP */ | ||
51 | # define SI_GRPH_FORMAT_INDEXED 0 | ||
52 | /* 16 BPP */ | ||
53 | # define SI_GRPH_FORMAT_ARGB1555 0 | ||
54 | # define SI_GRPH_FORMAT_ARGB565 1 | ||
55 | # define SI_GRPH_FORMAT_ARGB4444 2 | ||
56 | # define SI_GRPH_FORMAT_AI88 3 | ||
57 | # define SI_GRPH_FORMAT_MONO16 4 | ||
58 | # define SI_GRPH_FORMAT_BGRA5551 5 | ||
59 | /* 32 BPP */ | ||
60 | # define SI_GRPH_FORMAT_ARGB8888 0 | ||
61 | # define SI_GRPH_FORMAT_ARGB2101010 1 | ||
62 | # define SI_GRPH_FORMAT_32BPP_DIG 2 | ||
63 | # define SI_GRPH_FORMAT_8B_ARGB2101010 3 | ||
64 | # define SI_GRPH_FORMAT_BGRA1010102 4 | ||
65 | # define SI_GRPH_FORMAT_8B_BGRA1010102 5 | ||
66 | # define SI_GRPH_FORMAT_RGB111110 6 | ||
67 | # define SI_GRPH_FORMAT_BGR101111 7 | ||
68 | # define SI_GRPH_BANK_HEIGHT(x) (((x) & 0x3) << 11) | ||
69 | # define SI_ADDR_SURF_BANK_HEIGHT_1 0 | ||
70 | # define SI_ADDR_SURF_BANK_HEIGHT_2 1 | ||
71 | # define SI_ADDR_SURF_BANK_HEIGHT_4 2 | ||
72 | # define SI_ADDR_SURF_BANK_HEIGHT_8 3 | ||
73 | # define SI_GRPH_TILE_SPLIT(x) (((x) & 0x7) << 13) | ||
74 | # define SI_ADDR_SURF_TILE_SPLIT_64B 0 | ||
75 | # define SI_ADDR_SURF_TILE_SPLIT_128B 1 | ||
76 | # define SI_ADDR_SURF_TILE_SPLIT_256B 2 | ||
77 | # define SI_ADDR_SURF_TILE_SPLIT_512B 3 | ||
78 | # define SI_ADDR_SURF_TILE_SPLIT_1KB 4 | ||
79 | # define SI_ADDR_SURF_TILE_SPLIT_2KB 5 | ||
80 | # define SI_ADDR_SURF_TILE_SPLIT_4KB 6 | ||
81 | # define SI_GRPH_MACRO_TILE_ASPECT(x) (((x) & 0x3) << 18) | ||
82 | # define SI_ADDR_SURF_MACRO_TILE_ASPECT_1 0 | ||
83 | # define SI_ADDR_SURF_MACRO_TILE_ASPECT_2 1 | ||
84 | # define SI_ADDR_SURF_MACRO_TILE_ASPECT_4 2 | ||
85 | # define SI_ADDR_SURF_MACRO_TILE_ASPECT_8 3 | ||
86 | # define SI_GRPH_ARRAY_MODE(x) (((x) & 0x7) << 20) | ||
87 | # define SI_GRPH_ARRAY_LINEAR_GENERAL 0 | ||
88 | # define SI_GRPH_ARRAY_LINEAR_ALIGNED 1 | ||
89 | # define SI_GRPH_ARRAY_1D_TILED_THIN1 2 | ||
90 | # define SI_GRPH_ARRAY_2D_TILED_THIN1 4 | ||
91 | # define SI_GRPH_PIPE_CONFIG(x) (((x) & 0x1f) << 24) | ||
92 | # define SI_ADDR_SURF_P2 0 | ||
93 | # define SI_ADDR_SURF_P4_8x16 4 | ||
94 | # define SI_ADDR_SURF_P4_16x16 5 | ||
95 | # define SI_ADDR_SURF_P4_16x32 6 | ||
96 | # define SI_ADDR_SURF_P4_32x32 7 | ||
97 | # define SI_ADDR_SURF_P8_16x16_8x16 8 | ||
98 | # define SI_ADDR_SURF_P8_16x32_8x16 9 | ||
99 | # define SI_ADDR_SURF_P8_32x32_8x16 10 | ||
100 | # define SI_ADDR_SURF_P8_16x32_16x16 11 | ||
101 | # define SI_ADDR_SURF_P8_32x32_16x16 12 | ||
102 | # define SI_ADDR_SURF_P8_32x32_16x32 13 | ||
103 | # define SI_ADDR_SURF_P8_32x64_32x32 14 | ||
104 | |||
33 | #endif | 105 | #endif |
diff --git a/drivers/gpu/drm/radeon/sid.h b/drivers/gpu/drm/radeon/sid.h index 53ea2c42dbd6..db4067962868 100644 --- a/drivers/gpu/drm/radeon/sid.h +++ b/drivers/gpu/drm/radeon/sid.h | |||
@@ -24,6 +24,11 @@ | |||
24 | #ifndef SI_H | 24 | #ifndef SI_H |
25 | #define SI_H | 25 | #define SI_H |
26 | 26 | ||
27 | #define TAHITI_RB_BITMAP_WIDTH_PER_SH 2 | ||
28 | |||
29 | #define TAHITI_GB_ADDR_CONFIG_GOLDEN 0x12011003 | ||
30 | #define VERDE_GB_ADDR_CONFIG_GOLDEN 0x12010002 | ||
31 | |||
27 | #define CG_MULT_THERMAL_STATUS 0x714 | 32 | #define CG_MULT_THERMAL_STATUS 0x714 |
28 | #define ASIC_MAX_TEMP(x) ((x) << 0) | 33 | #define ASIC_MAX_TEMP(x) ((x) << 0) |
29 | #define ASIC_MAX_TEMP_MASK 0x000001ff | 34 | #define ASIC_MAX_TEMP_MASK 0x000001ff |
@@ -408,6 +413,12 @@ | |||
408 | #define SOFT_RESET_IA (1 << 15) | 413 | #define SOFT_RESET_IA (1 << 15) |
409 | 414 | ||
410 | #define GRBM_GFX_INDEX 0x802C | 415 | #define GRBM_GFX_INDEX 0x802C |
416 | #define INSTANCE_INDEX(x) ((x) << 0) | ||
417 | #define SH_INDEX(x) ((x) << 8) | ||
418 | #define SE_INDEX(x) ((x) << 16) | ||
419 | #define SH_BROADCAST_WRITES (1 << 29) | ||
420 | #define INSTANCE_BROADCAST_WRITES (1 << 30) | ||
421 | #define SE_BROADCAST_WRITES (1 << 31) | ||
411 | 422 | ||
412 | #define GRBM_INT_CNTL 0x8060 | 423 | #define GRBM_INT_CNTL 0x8060 |
413 | # define RDERR_INT_ENABLE (1 << 0) | 424 | # define RDERR_INT_ENABLE (1 << 0) |
@@ -480,6 +491,8 @@ | |||
480 | #define VGT_TF_MEMORY_BASE 0x89B8 | 491 | #define VGT_TF_MEMORY_BASE 0x89B8 |
481 | 492 | ||
482 | #define CC_GC_SHADER_ARRAY_CONFIG 0x89bc | 493 | #define CC_GC_SHADER_ARRAY_CONFIG 0x89bc |
494 | #define INACTIVE_CUS_MASK 0xFFFF0000 | ||
495 | #define INACTIVE_CUS_SHIFT 16 | ||
483 | #define GC_USER_SHADER_ARRAY_CONFIG 0x89c0 | 496 | #define GC_USER_SHADER_ARRAY_CONFIG 0x89c0 |
484 | 497 | ||
485 | #define PA_CL_ENHANCE 0x8A14 | 498 | #define PA_CL_ENHANCE 0x8A14 |
@@ -688,6 +701,12 @@ | |||
688 | #define RLC_MC_CNTL 0xC344 | 701 | #define RLC_MC_CNTL 0xC344 |
689 | #define RLC_UCODE_CNTL 0xC348 | 702 | #define RLC_UCODE_CNTL 0xC348 |
690 | 703 | ||
704 | #define PA_SC_RASTER_CONFIG 0x28350 | ||
705 | # define RASTER_CONFIG_RB_MAP_0 0 | ||
706 | # define RASTER_CONFIG_RB_MAP_1 1 | ||
707 | # define RASTER_CONFIG_RB_MAP_2 2 | ||
708 | # define RASTER_CONFIG_RB_MAP_3 3 | ||
709 | |||
691 | #define VGT_EVENT_INITIATOR 0x28a90 | 710 | #define VGT_EVENT_INITIATOR 0x28a90 |
692 | # define SAMPLE_STREAMOUTSTATS1 (1 << 0) | 711 | # define SAMPLE_STREAMOUTSTATS1 (1 << 0) |
693 | # define SAMPLE_STREAMOUTSTATS2 (2 << 0) | 712 | # define SAMPLE_STREAMOUTSTATS2 (2 << 0) |
diff --git a/drivers/gpu/drm/sis/sis_drv.c b/drivers/gpu/drm/sis/sis_drv.c index 30d98d14b5c5..dd14cd1a0033 100644 --- a/drivers/gpu/drm/sis/sis_drv.c +++ b/drivers/gpu/drm/sis/sis_drv.c | |||
@@ -47,9 +47,9 @@ static int sis_driver_load(struct drm_device *dev, unsigned long chipset) | |||
47 | if (dev_priv == NULL) | 47 | if (dev_priv == NULL) |
48 | return -ENOMEM; | 48 | return -ENOMEM; |
49 | 49 | ||
50 | idr_init(&dev_priv->object_idr); | ||
50 | dev->dev_private = (void *)dev_priv; | 51 | dev->dev_private = (void *)dev_priv; |
51 | dev_priv->chipset = chipset; | 52 | dev_priv->chipset = chipset; |
52 | idr_init(&dev->object_name_idr); | ||
53 | 53 | ||
54 | return 0; | 54 | return 0; |
55 | } | 55 | } |
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index b67cfcaa661f..36f4b28c1b90 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c | |||
@@ -1204,6 +1204,7 @@ int ttm_bo_init(struct ttm_bo_device *bdev, | |||
1204 | (*destroy)(bo); | 1204 | (*destroy)(bo); |
1205 | else | 1205 | else |
1206 | kfree(bo); | 1206 | kfree(bo); |
1207 | ttm_mem_global_free(mem_glob, acc_size); | ||
1207 | return -EINVAL; | 1208 | return -EINVAL; |
1208 | } | 1209 | } |
1209 | bo->destroy = destroy; | 1210 | bo->destroy = destroy; |
@@ -1307,22 +1308,14 @@ int ttm_bo_create(struct ttm_bo_device *bdev, | |||
1307 | struct ttm_buffer_object **p_bo) | 1308 | struct ttm_buffer_object **p_bo) |
1308 | { | 1309 | { |
1309 | struct ttm_buffer_object *bo; | 1310 | struct ttm_buffer_object *bo; |
1310 | struct ttm_mem_global *mem_glob = bdev->glob->mem_glob; | ||
1311 | size_t acc_size; | 1311 | size_t acc_size; |
1312 | int ret; | 1312 | int ret; |
1313 | 1313 | ||
1314 | acc_size = ttm_bo_acc_size(bdev, size, sizeof(struct ttm_buffer_object)); | ||
1315 | ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false); | ||
1316 | if (unlikely(ret != 0)) | ||
1317 | return ret; | ||
1318 | |||
1319 | bo = kzalloc(sizeof(*bo), GFP_KERNEL); | 1314 | bo = kzalloc(sizeof(*bo), GFP_KERNEL); |
1320 | 1315 | if (unlikely(bo == NULL)) | |
1321 | if (unlikely(bo == NULL)) { | ||
1322 | ttm_mem_global_free(mem_glob, acc_size); | ||
1323 | return -ENOMEM; | 1316 | return -ENOMEM; |
1324 | } | ||
1325 | 1317 | ||
1318 | acc_size = ttm_bo_acc_size(bdev, size, sizeof(struct ttm_buffer_object)); | ||
1326 | ret = ttm_bo_init(bdev, bo, size, type, placement, page_alignment, | 1319 | ret = ttm_bo_init(bdev, bo, size, type, placement, page_alignment, |
1327 | buffer_start, interruptible, | 1320 | buffer_start, interruptible, |
1328 | persistent_swap_storage, acc_size, NULL, NULL); | 1321 | persistent_swap_storage, acc_size, NULL, NULL); |
diff --git a/drivers/gpu/drm/udl/udl_drv.c b/drivers/gpu/drm/udl/udl_drv.c index 4d02c46a9420..6e52069894b3 100644 --- a/drivers/gpu/drm/udl/udl_drv.c +++ b/drivers/gpu/drm/udl/udl_drv.c | |||
@@ -13,8 +13,21 @@ | |||
13 | 13 | ||
14 | static struct drm_driver driver; | 14 | static struct drm_driver driver; |
15 | 15 | ||
16 | /* | ||
17 | * There are many DisplayLink-based graphics products, all with unique PIDs. | ||
18 | * So we match on DisplayLink's VID + Vendor-Defined Interface Class (0xff) | ||
19 | * We also require a match on SubClass (0x00) and Protocol (0x00), | ||
20 | * which is compatible with all known USB 2.0 era graphics chips and firmware, | ||
21 | * but allows DisplayLink to increment those for any future incompatible chips | ||
22 | */ | ||
16 | static struct usb_device_id id_table[] = { | 23 | static struct usb_device_id id_table[] = { |
17 | {.idVendor = 0x17e9, .match_flags = USB_DEVICE_ID_MATCH_VENDOR,}, | 24 | {.idVendor = 0x17e9, .bInterfaceClass = 0xff, |
25 | .bInterfaceSubClass = 0x00, | ||
26 | .bInterfaceProtocol = 0x00, | ||
27 | .match_flags = USB_DEVICE_ID_MATCH_VENDOR | | ||
28 | USB_DEVICE_ID_MATCH_INT_CLASS | | ||
29 | USB_DEVICE_ID_MATCH_INT_SUBCLASS | | ||
30 | USB_DEVICE_ID_MATCH_INT_PROTOCOL,}, | ||
18 | {}, | 31 | {}, |
19 | }; | 32 | }; |
20 | MODULE_DEVICE_TABLE(usb, id_table); | 33 | MODULE_DEVICE_TABLE(usb, id_table); |
diff --git a/drivers/gpu/drm/via/via_map.c b/drivers/gpu/drm/via/via_map.c index 1f182254e81e..c126182ac07e 100644 --- a/drivers/gpu/drm/via/via_map.c +++ b/drivers/gpu/drm/via/via_map.c | |||
@@ -100,12 +100,11 @@ int via_driver_load(struct drm_device *dev, unsigned long chipset) | |||
100 | if (dev_priv == NULL) | 100 | if (dev_priv == NULL) |
101 | return -ENOMEM; | 101 | return -ENOMEM; |
102 | 102 | ||
103 | idr_init(&dev_priv->object_idr); | ||
103 | dev->dev_private = (void *)dev_priv; | 104 | dev->dev_private = (void *)dev_priv; |
104 | 105 | ||
105 | dev_priv->chipset = chipset; | 106 | dev_priv->chipset = chipset; |
106 | 107 | ||
107 | idr_init(&dev->object_name_idr); | ||
108 | |||
109 | pci_set_master(dev->pdev); | 108 | pci_set_master(dev->pdev); |
110 | 109 | ||
111 | ret = drm_vblank_init(dev, 1); | 110 | ret = drm_vblank_init(dev, 1); |
diff --git a/drivers/gpu/vga/vga_switcheroo.c b/drivers/gpu/vga/vga_switcheroo.c index 38f9534ac513..5b3c7d135dc9 100644 --- a/drivers/gpu/vga/vga_switcheroo.c +++ b/drivers/gpu/vga/vga_switcheroo.c | |||
@@ -190,6 +190,19 @@ find_active_client(struct list_head *head) | |||
190 | return NULL; | 190 | return NULL; |
191 | } | 191 | } |
192 | 192 | ||
193 | int vga_switcheroo_get_client_state(struct pci_dev *pdev) | ||
194 | { | ||
195 | struct vga_switcheroo_client *client; | ||
196 | |||
197 | client = find_client_from_pci(&vgasr_priv.clients, pdev); | ||
198 | if (!client) | ||
199 | return VGA_SWITCHEROO_NOT_FOUND; | ||
200 | if (!vgasr_priv.active) | ||
201 | return VGA_SWITCHEROO_INIT; | ||
202 | return client->pwr_state; | ||
203 | } | ||
204 | EXPORT_SYMBOL(vga_switcheroo_get_client_state); | ||
205 | |||
193 | void vga_switcheroo_unregister_client(struct pci_dev *pdev) | 206 | void vga_switcheroo_unregister_client(struct pci_dev *pdev) |
194 | { | 207 | { |
195 | struct vga_switcheroo_client *client; | 208 | struct vga_switcheroo_client *client; |
@@ -291,8 +304,6 @@ static int vga_switchto_stage1(struct vga_switcheroo_client *new_client) | |||
291 | vga_switchon(new_client); | 304 | vga_switchon(new_client); |
292 | 305 | ||
293 | vga_set_default_device(new_client->pdev); | 306 | vga_set_default_device(new_client->pdev); |
294 | set_audio_state(new_client->id, VGA_SWITCHEROO_ON); | ||
295 | |||
296 | return 0; | 307 | return 0; |
297 | } | 308 | } |
298 | 309 | ||
@@ -308,6 +319,8 @@ static int vga_switchto_stage2(struct vga_switcheroo_client *new_client) | |||
308 | 319 | ||
309 | active->active = false; | 320 | active->active = false; |
310 | 321 | ||
322 | set_audio_state(active->id, VGA_SWITCHEROO_OFF); | ||
323 | |||
311 | if (new_client->fb_info) { | 324 | if (new_client->fb_info) { |
312 | struct fb_event event; | 325 | struct fb_event event; |
313 | event.info = new_client->fb_info; | 326 | event.info = new_client->fb_info; |
@@ -321,11 +334,11 @@ static int vga_switchto_stage2(struct vga_switcheroo_client *new_client) | |||
321 | if (new_client->ops->reprobe) | 334 | if (new_client->ops->reprobe) |
322 | new_client->ops->reprobe(new_client->pdev); | 335 | new_client->ops->reprobe(new_client->pdev); |
323 | 336 | ||
324 | set_audio_state(active->id, VGA_SWITCHEROO_OFF); | ||
325 | |||
326 | if (active->pwr_state == VGA_SWITCHEROO_ON) | 337 | if (active->pwr_state == VGA_SWITCHEROO_ON) |
327 | vga_switchoff(active); | 338 | vga_switchoff(active); |
328 | 339 | ||
340 | set_audio_state(new_client->id, VGA_SWITCHEROO_ON); | ||
341 | |||
329 | new_client->active = true; | 342 | new_client->active = true; |
330 | return 0; | 343 | return 0; |
331 | } | 344 | } |
@@ -371,8 +384,9 @@ vga_switcheroo_debugfs_write(struct file *filp, const char __user *ubuf, | |||
371 | /* pwr off the device not in use */ | 384 | /* pwr off the device not in use */ |
372 | if (strncmp(usercmd, "OFF", 3) == 0) { | 385 | if (strncmp(usercmd, "OFF", 3) == 0) { |
373 | list_for_each_entry(client, &vgasr_priv.clients, list) { | 386 | list_for_each_entry(client, &vgasr_priv.clients, list) { |
374 | if (client->active) | 387 | if (client->active || client_is_audio(client)) |
375 | continue; | 388 | continue; |
389 | set_audio_state(client->id, VGA_SWITCHEROO_OFF); | ||
376 | if (client->pwr_state == VGA_SWITCHEROO_ON) | 390 | if (client->pwr_state == VGA_SWITCHEROO_ON) |
377 | vga_switchoff(client); | 391 | vga_switchoff(client); |
378 | } | 392 | } |
@@ -381,10 +395,11 @@ vga_switcheroo_debugfs_write(struct file *filp, const char __user *ubuf, | |||
381 | /* pwr on the device not in use */ | 395 | /* pwr on the device not in use */ |
382 | if (strncmp(usercmd, "ON", 2) == 0) { | 396 | if (strncmp(usercmd, "ON", 2) == 0) { |
383 | list_for_each_entry(client, &vgasr_priv.clients, list) { | 397 | list_for_each_entry(client, &vgasr_priv.clients, list) { |
384 | if (client->active) | 398 | if (client->active || client_is_audio(client)) |
385 | continue; | 399 | continue; |
386 | if (client->pwr_state == VGA_SWITCHEROO_OFF) | 400 | if (client->pwr_state == VGA_SWITCHEROO_OFF) |
387 | vga_switchon(client); | 401 | vga_switchon(client); |
402 | set_audio_state(client->id, VGA_SWITCHEROO_ON); | ||
388 | } | 403 | } |
389 | goto out; | 404 | goto out; |
390 | } | 405 | } |
diff --git a/drivers/hwmon/applesmc.c b/drivers/hwmon/applesmc.c index f082e48ab113..70d62f5bc909 100644 --- a/drivers/hwmon/applesmc.c +++ b/drivers/hwmon/applesmc.c | |||
@@ -215,7 +215,7 @@ static int read_smc(u8 cmd, const char *key, u8 *buffer, u8 len) | |||
215 | int i; | 215 | int i; |
216 | 216 | ||
217 | if (send_command(cmd) || send_argument(key)) { | 217 | if (send_command(cmd) || send_argument(key)) { |
218 | pr_warn("%s: read arg fail\n", key); | 218 | pr_warn("%.4s: read arg fail\n", key); |
219 | return -EIO; | 219 | return -EIO; |
220 | } | 220 | } |
221 | 221 | ||
@@ -223,7 +223,7 @@ static int read_smc(u8 cmd, const char *key, u8 *buffer, u8 len) | |||
223 | 223 | ||
224 | for (i = 0; i < len; i++) { | 224 | for (i = 0; i < len; i++) { |
225 | if (__wait_status(0x05)) { | 225 | if (__wait_status(0x05)) { |
226 | pr_warn("%s: read data fail\n", key); | 226 | pr_warn("%.4s: read data fail\n", key); |
227 | return -EIO; | 227 | return -EIO; |
228 | } | 228 | } |
229 | buffer[i] = inb(APPLESMC_DATA_PORT); | 229 | buffer[i] = inb(APPLESMC_DATA_PORT); |
diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c index b9d512331ed4..7f1feb2f467a 100644 --- a/drivers/hwmon/coretemp.c +++ b/drivers/hwmon/coretemp.c | |||
@@ -191,6 +191,24 @@ static ssize_t show_temp(struct device *dev, | |||
191 | return tdata->valid ? sprintf(buf, "%d\n", tdata->temp) : -EAGAIN; | 191 | return tdata->valid ? sprintf(buf, "%d\n", tdata->temp) : -EAGAIN; |
192 | } | 192 | } |
193 | 193 | ||
194 | struct tjmax { | ||
195 | char const *id; | ||
196 | int tjmax; | ||
197 | }; | ||
198 | |||
199 | static struct tjmax __cpuinitconst tjmax_table[] = { | ||
200 | { "CPU D410", 100000 }, | ||
201 | { "CPU D425", 100000 }, | ||
202 | { "CPU D510", 100000 }, | ||
203 | { "CPU D525", 100000 }, | ||
204 | { "CPU N450", 100000 }, | ||
205 | { "CPU N455", 100000 }, | ||
206 | { "CPU N470", 100000 }, | ||
207 | { "CPU N475", 100000 }, | ||
208 | { "CPU 230", 100000 }, | ||
209 | { "CPU 330", 125000 }, | ||
210 | }; | ||
211 | |||
194 | static int __cpuinit adjust_tjmax(struct cpuinfo_x86 *c, u32 id, | 212 | static int __cpuinit adjust_tjmax(struct cpuinfo_x86 *c, u32 id, |
195 | struct device *dev) | 213 | struct device *dev) |
196 | { | 214 | { |
@@ -202,6 +220,13 @@ static int __cpuinit adjust_tjmax(struct cpuinfo_x86 *c, u32 id, | |||
202 | int err; | 220 | int err; |
203 | u32 eax, edx; | 221 | u32 eax, edx; |
204 | struct pci_dev *host_bridge; | 222 | struct pci_dev *host_bridge; |
223 | int i; | ||
224 | |||
225 | /* explicit tjmax table entries override heuristics */ | ||
226 | for (i = 0; i < ARRAY_SIZE(tjmax_table); i++) { | ||
227 | if (strstr(c->x86_model_id, tjmax_table[i].id)) | ||
228 | return tjmax_table[i].tjmax; | ||
229 | } | ||
205 | 230 | ||
206 | /* Early chips have no MSR for TjMax */ | 231 | /* Early chips have no MSR for TjMax */ |
207 | 232 | ||
@@ -210,7 +235,8 @@ static int __cpuinit adjust_tjmax(struct cpuinfo_x86 *c, u32 id, | |||
210 | 235 | ||
211 | /* Atom CPUs */ | 236 | /* Atom CPUs */ |
212 | 237 | ||
213 | if (c->x86_model == 0x1c) { | 238 | if (c->x86_model == 0x1c || c->x86_model == 0x26 |
239 | || c->x86_model == 0x27) { | ||
214 | usemsr_ee = 0; | 240 | usemsr_ee = 0; |
215 | 241 | ||
216 | host_bridge = pci_get_bus_and_slot(0, PCI_DEVFN(0, 0)); | 242 | host_bridge = pci_get_bus_and_slot(0, PCI_DEVFN(0, 0)); |
@@ -223,6 +249,9 @@ static int __cpuinit adjust_tjmax(struct cpuinfo_x86 *c, u32 id, | |||
223 | tjmax = 90000; | 249 | tjmax = 90000; |
224 | 250 | ||
225 | pci_dev_put(host_bridge); | 251 | pci_dev_put(host_bridge); |
252 | } else if (c->x86_model == 0x36) { | ||
253 | usemsr_ee = 0; | ||
254 | tjmax = 100000; | ||
226 | } | 255 | } |
227 | 256 | ||
228 | if (c->x86_model > 0xe && usemsr_ee) { | 257 | if (c->x86_model > 0xe && usemsr_ee) { |
@@ -772,7 +801,7 @@ MODULE_DEVICE_TABLE(x86cpu, coretemp_ids); | |||
772 | 801 | ||
773 | static int __init coretemp_init(void) | 802 | static int __init coretemp_init(void) |
774 | { | 803 | { |
775 | int i, err = -ENODEV; | 804 | int i, err; |
776 | 805 | ||
777 | /* | 806 | /* |
778 | * CPUID.06H.EAX[0] indicates whether the CPU has thermal | 807 | * CPUID.06H.EAX[0] indicates whether the CPU has thermal |
diff --git a/drivers/hwmon/emc2103.c b/drivers/hwmon/emc2103.c index 9691f664c76e..e7d234b59312 100644 --- a/drivers/hwmon/emc2103.c +++ b/drivers/hwmon/emc2103.c | |||
@@ -451,11 +451,15 @@ static ssize_t set_pwm_enable(struct device *dev, struct device_attribute *da, | |||
451 | data->fan_rpm_control = true; | 451 | data->fan_rpm_control = true; |
452 | break; | 452 | break; |
453 | default: | 453 | default: |
454 | mutex_unlock(&data->update_lock); | 454 | count = -EINVAL; |
455 | return -EINVAL; | 455 | goto err; |
456 | } | 456 | } |
457 | 457 | ||
458 | read_u8_from_i2c(client, REG_FAN_CONF1, &conf_reg); | 458 | result = read_u8_from_i2c(client, REG_FAN_CONF1, &conf_reg); |
459 | if (result) { | ||
460 | count = result; | ||
461 | goto err; | ||
462 | } | ||
459 | 463 | ||
460 | if (data->fan_rpm_control) | 464 | if (data->fan_rpm_control) |
461 | conf_reg |= 0x80; | 465 | conf_reg |= 0x80; |
@@ -463,7 +467,7 @@ static ssize_t set_pwm_enable(struct device *dev, struct device_attribute *da, | |||
463 | conf_reg &= ~0x80; | 467 | conf_reg &= ~0x80; |
464 | 468 | ||
465 | i2c_smbus_write_byte_data(client, REG_FAN_CONF1, conf_reg); | 469 | i2c_smbus_write_byte_data(client, REG_FAN_CONF1, conf_reg); |
466 | 470 | err: | |
467 | mutex_unlock(&data->update_lock); | 471 | mutex_unlock(&data->update_lock); |
468 | return count; | 472 | return count; |
469 | } | 473 | } |
diff --git a/drivers/i2c/muxes/Kconfig b/drivers/i2c/muxes/Kconfig index beb2491db274..a0edd9854218 100644 --- a/drivers/i2c/muxes/Kconfig +++ b/drivers/i2c/muxes/Kconfig | |||
@@ -37,4 +37,16 @@ config I2C_MUX_PCA954x | |||
37 | This driver can also be built as a module. If so, the module | 37 | This driver can also be built as a module. If so, the module |
38 | will be called i2c-mux-pca954x. | 38 | will be called i2c-mux-pca954x. |
39 | 39 | ||
40 | config I2C_MUX_PINCTRL | ||
41 | tristate "pinctrl-based I2C multiplexer" | ||
42 | depends on PINCTRL | ||
43 | help | ||
44 | If you say yes to this option, support will be included for an I2C | ||
45 | multiplexer that uses the pinctrl subsystem, i.e. pin multiplexing. | ||
46 | This is useful for SoCs whose I2C module's signals can be routed to | ||
47 | different sets of pins at run-time. | ||
48 | |||
49 | This driver can also be built as a module. If so, the module will be | ||
50 | called pinctrl-i2cmux. | ||
51 | |||
40 | endmenu | 52 | endmenu |
diff --git a/drivers/i2c/muxes/Makefile b/drivers/i2c/muxes/Makefile index 5826249b29ca..76da8692afff 100644 --- a/drivers/i2c/muxes/Makefile +++ b/drivers/i2c/muxes/Makefile | |||
@@ -4,5 +4,6 @@ | |||
4 | obj-$(CONFIG_I2C_MUX_GPIO) += i2c-mux-gpio.o | 4 | obj-$(CONFIG_I2C_MUX_GPIO) += i2c-mux-gpio.o |
5 | obj-$(CONFIG_I2C_MUX_PCA9541) += i2c-mux-pca9541.o | 5 | obj-$(CONFIG_I2C_MUX_PCA9541) += i2c-mux-pca9541.o |
6 | obj-$(CONFIG_I2C_MUX_PCA954x) += i2c-mux-pca954x.o | 6 | obj-$(CONFIG_I2C_MUX_PCA954x) += i2c-mux-pca954x.o |
7 | obj-$(CONFIG_I2C_MUX_PINCTRL) += i2c-mux-pinctrl.o | ||
7 | 8 | ||
8 | ccflags-$(CONFIG_I2C_DEBUG_BUS) := -DDEBUG | 9 | ccflags-$(CONFIG_I2C_DEBUG_BUS) := -DDEBUG |
diff --git a/drivers/i2c/muxes/i2c-mux-pinctrl.c b/drivers/i2c/muxes/i2c-mux-pinctrl.c new file mode 100644 index 000000000000..46a669763476 --- /dev/null +++ b/drivers/i2c/muxes/i2c-mux-pinctrl.c | |||
@@ -0,0 +1,279 @@ | |||
1 | /* | ||
2 | * I2C multiplexer using pinctrl API | ||
3 | * | ||
4 | * Copyright (c) 2012, NVIDIA CORPORATION. All rights reserved. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify it | ||
7 | * under the terms and conditions of the GNU General Public License, | ||
8 | * version 2, as published by the Free Software Foundation. | ||
9 | * | ||
10 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
11 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
12 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
13 | * more details. | ||
14 | * | ||
15 | * You should have received a copy of the GNU General Public License | ||
16 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
17 | */ | ||
18 | |||
19 | #include <linux/i2c.h> | ||
20 | #include <linux/i2c-mux.h> | ||
21 | #include <linux/init.h> | ||
22 | #include <linux/module.h> | ||
23 | #include <linux/of_i2c.h> | ||
24 | #include <linux/pinctrl/consumer.h> | ||
25 | #include <linux/i2c-mux-pinctrl.h> | ||
26 | #include <linux/platform_device.h> | ||
27 | #include <linux/slab.h> | ||
28 | |||
29 | struct i2c_mux_pinctrl { | ||
30 | struct device *dev; | ||
31 | struct i2c_mux_pinctrl_platform_data *pdata; | ||
32 | struct pinctrl *pinctrl; | ||
33 | struct pinctrl_state **states; | ||
34 | struct pinctrl_state *state_idle; | ||
35 | struct i2c_adapter *parent; | ||
36 | struct i2c_adapter **busses; | ||
37 | }; | ||
38 | |||
39 | static int i2c_mux_pinctrl_select(struct i2c_adapter *adap, void *data, | ||
40 | u32 chan) | ||
41 | { | ||
42 | struct i2c_mux_pinctrl *mux = data; | ||
43 | |||
44 | return pinctrl_select_state(mux->pinctrl, mux->states[chan]); | ||
45 | } | ||
46 | |||
47 | static int i2c_mux_pinctrl_deselect(struct i2c_adapter *adap, void *data, | ||
48 | u32 chan) | ||
49 | { | ||
50 | struct i2c_mux_pinctrl *mux = data; | ||
51 | |||
52 | return pinctrl_select_state(mux->pinctrl, mux->state_idle); | ||
53 | } | ||
54 | |||
55 | #ifdef CONFIG_OF | ||
56 | static int i2c_mux_pinctrl_parse_dt(struct i2c_mux_pinctrl *mux, | ||
57 | struct platform_device *pdev) | ||
58 | { | ||
59 | struct device_node *np = pdev->dev.of_node; | ||
60 | int num_names, i, ret; | ||
61 | struct device_node *adapter_np; | ||
62 | struct i2c_adapter *adapter; | ||
63 | |||
64 | if (!np) | ||
65 | return 0; | ||
66 | |||
67 | mux->pdata = devm_kzalloc(&pdev->dev, sizeof(*mux->pdata), GFP_KERNEL); | ||
68 | if (!mux->pdata) { | ||
69 | dev_err(mux->dev, | ||
70 | "Cannot allocate i2c_mux_pinctrl_platform_data\n"); | ||
71 | return -ENOMEM; | ||
72 | } | ||
73 | |||
74 | num_names = of_property_count_strings(np, "pinctrl-names"); | ||
75 | if (num_names < 0) { | ||
76 | dev_err(mux->dev, "Cannot parse pinctrl-names: %d\n", | ||
77 | num_names); | ||
78 | return num_names; | ||
79 | } | ||
80 | |||
81 | mux->pdata->pinctrl_states = devm_kzalloc(&pdev->dev, | ||
82 | sizeof(*mux->pdata->pinctrl_states) * num_names, | ||
83 | GFP_KERNEL); | ||
84 | if (!mux->pdata->pinctrl_states) { | ||
85 | dev_err(mux->dev, "Cannot allocate pinctrl_states\n"); | ||
86 | return -ENOMEM; | ||
87 | } | ||
88 | |||
89 | for (i = 0; i < num_names; i++) { | ||
90 | ret = of_property_read_string_index(np, "pinctrl-names", i, | ||
91 | &mux->pdata->pinctrl_states[mux->pdata->bus_count]); | ||
92 | if (ret < 0) { | ||
93 | dev_err(mux->dev, "Cannot parse pinctrl-names: %d\n", | ||
94 | ret); | ||
95 | return ret; | ||
96 | } | ||
97 | if (!strcmp(mux->pdata->pinctrl_states[mux->pdata->bus_count], | ||
98 | "idle")) { | ||
99 | if (i != num_names - 1) { | ||
100 | dev_err(mux->dev, "idle state must be last\n"); | ||
101 | return -EINVAL; | ||
102 | } | ||
103 | mux->pdata->pinctrl_state_idle = "idle"; | ||
104 | } else { | ||
105 | mux->pdata->bus_count++; | ||
106 | } | ||
107 | } | ||
108 | |||
109 | adapter_np = of_parse_phandle(np, "i2c-parent", 0); | ||
110 | if (!adapter_np) { | ||
111 | dev_err(mux->dev, "Cannot parse i2c-parent\n"); | ||
112 | return -ENODEV; | ||
113 | } | ||
114 | adapter = of_find_i2c_adapter_by_node(adapter_np); | ||
115 | if (!adapter) { | ||
116 | dev_err(mux->dev, "Cannot find parent bus\n"); | ||
117 | return -ENODEV; | ||
118 | } | ||
119 | mux->pdata->parent_bus_num = i2c_adapter_id(adapter); | ||
120 | put_device(&adapter->dev); | ||
121 | |||
122 | return 0; | ||
123 | } | ||
124 | #else | ||
125 | static inline int i2c_mux_pinctrl_parse_dt(struct i2c_mux_pinctrl *mux, | ||
126 | struct platform_device *pdev) | ||
127 | { | ||
128 | return 0; | ||
129 | } | ||
130 | #endif | ||
131 | |||
132 | static int __devinit i2c_mux_pinctrl_probe(struct platform_device *pdev) | ||
133 | { | ||
134 | struct i2c_mux_pinctrl *mux; | ||
135 | int (*deselect)(struct i2c_adapter *, void *, u32); | ||
136 | int i, ret; | ||
137 | |||
138 | mux = devm_kzalloc(&pdev->dev, sizeof(*mux), GFP_KERNEL); | ||
139 | if (!mux) { | ||
140 | dev_err(&pdev->dev, "Cannot allocate i2c_mux_pinctrl\n"); | ||
141 | ret = -ENOMEM; | ||
142 | goto err; | ||
143 | } | ||
144 | platform_set_drvdata(pdev, mux); | ||
145 | |||
146 | mux->dev = &pdev->dev; | ||
147 | |||
148 | mux->pdata = pdev->dev.platform_data; | ||
149 | if (!mux->pdata) { | ||
150 | ret = i2c_mux_pinctrl_parse_dt(mux, pdev); | ||
151 | if (ret < 0) | ||
152 | goto err; | ||
153 | } | ||
154 | if (!mux->pdata) { | ||
155 | dev_err(&pdev->dev, "Missing platform data\n"); | ||
156 | ret = -ENODEV; | ||
157 | goto err; | ||
158 | } | ||
159 | |||
160 | mux->states = devm_kzalloc(&pdev->dev, | ||
161 | sizeof(*mux->states) * mux->pdata->bus_count, | ||
162 | GFP_KERNEL); | ||
163 | if (!mux->states) { | ||
164 | dev_err(&pdev->dev, "Cannot allocate states\n"); | ||
165 | ret = -ENOMEM; | ||
166 | goto err; | ||
167 | } | ||
168 | |||
169 | mux->busses = devm_kzalloc(&pdev->dev, | ||
170 | sizeof(mux->busses) * mux->pdata->bus_count, | ||
171 | GFP_KERNEL); | ||
172 | if (!mux->states) { | ||
173 | dev_err(&pdev->dev, "Cannot allocate busses\n"); | ||
174 | ret = -ENOMEM; | ||
175 | goto err; | ||
176 | } | ||
177 | |||
178 | mux->pinctrl = devm_pinctrl_get(&pdev->dev); | ||
179 | if (IS_ERR(mux->pinctrl)) { | ||
180 | ret = PTR_ERR(mux->pinctrl); | ||
181 | dev_err(&pdev->dev, "Cannot get pinctrl: %d\n", ret); | ||
182 | goto err; | ||
183 | } | ||
184 | for (i = 0; i < mux->pdata->bus_count; i++) { | ||
185 | mux->states[i] = pinctrl_lookup_state(mux->pinctrl, | ||
186 | mux->pdata->pinctrl_states[i]); | ||
187 | if (IS_ERR(mux->states[i])) { | ||
188 | ret = PTR_ERR(mux->states[i]); | ||
189 | dev_err(&pdev->dev, | ||
190 | "Cannot look up pinctrl state %s: %d\n", | ||
191 | mux->pdata->pinctrl_states[i], ret); | ||
192 | goto err; | ||
193 | } | ||
194 | } | ||
195 | if (mux->pdata->pinctrl_state_idle) { | ||
196 | mux->state_idle = pinctrl_lookup_state(mux->pinctrl, | ||
197 | mux->pdata->pinctrl_state_idle); | ||
198 | if (IS_ERR(mux->state_idle)) { | ||
199 | ret = PTR_ERR(mux->state_idle); | ||
200 | dev_err(&pdev->dev, | ||
201 | "Cannot look up pinctrl state %s: %d\n", | ||
202 | mux->pdata->pinctrl_state_idle, ret); | ||
203 | goto err; | ||
204 | } | ||
205 | |||
206 | deselect = i2c_mux_pinctrl_deselect; | ||
207 | } else { | ||
208 | deselect = NULL; | ||
209 | } | ||
210 | |||
211 | mux->parent = i2c_get_adapter(mux->pdata->parent_bus_num); | ||
212 | if (!mux->parent) { | ||
213 | dev_err(&pdev->dev, "Parent adapter (%d) not found\n", | ||
214 | mux->pdata->parent_bus_num); | ||
215 | ret = -ENODEV; | ||
216 | goto err; | ||
217 | } | ||
218 | |||
219 | for (i = 0; i < mux->pdata->bus_count; i++) { | ||
220 | u32 bus = mux->pdata->base_bus_num ? | ||
221 | (mux->pdata->base_bus_num + i) : 0; | ||
222 | |||
223 | mux->busses[i] = i2c_add_mux_adapter(mux->parent, &pdev->dev, | ||
224 | mux, bus, i, | ||
225 | i2c_mux_pinctrl_select, | ||
226 | deselect); | ||
227 | if (!mux->busses[i]) { | ||
228 | ret = -ENODEV; | ||
229 | dev_err(&pdev->dev, "Failed to add adapter %d\n", i); | ||
230 | goto err_del_adapter; | ||
231 | } | ||
232 | } | ||
233 | |||
234 | return 0; | ||
235 | |||
236 | err_del_adapter: | ||
237 | for (; i > 0; i--) | ||
238 | i2c_del_mux_adapter(mux->busses[i - 1]); | ||
239 | i2c_put_adapter(mux->parent); | ||
240 | err: | ||
241 | return ret; | ||
242 | } | ||
243 | |||
244 | static int __devexit i2c_mux_pinctrl_remove(struct platform_device *pdev) | ||
245 | { | ||
246 | struct i2c_mux_pinctrl *mux = platform_get_drvdata(pdev); | ||
247 | int i; | ||
248 | |||
249 | for (i = 0; i < mux->pdata->bus_count; i++) | ||
250 | i2c_del_mux_adapter(mux->busses[i]); | ||
251 | |||
252 | i2c_put_adapter(mux->parent); | ||
253 | |||
254 | return 0; | ||
255 | } | ||
256 | |||
257 | #ifdef CONFIG_OF | ||
258 | static const struct of_device_id i2c_mux_pinctrl_of_match[] __devinitconst = { | ||
259 | { .compatible = "i2c-mux-pinctrl", }, | ||
260 | {}, | ||
261 | }; | ||
262 | MODULE_DEVICE_TABLE(of, i2c_mux_pinctrl_of_match); | ||
263 | #endif | ||
264 | |||
265 | static struct platform_driver i2c_mux_pinctrl_driver = { | ||
266 | .driver = { | ||
267 | .name = "i2c-mux-pinctrl", | ||
268 | .owner = THIS_MODULE, | ||
269 | .of_match_table = of_match_ptr(i2c_mux_pinctrl_of_match), | ||
270 | }, | ||
271 | .probe = i2c_mux_pinctrl_probe, | ||
272 | .remove = __devexit_p(i2c_mux_pinctrl_remove), | ||
273 | }; | ||
274 | module_platform_driver(i2c_mux_pinctrl_driver); | ||
275 | |||
276 | MODULE_DESCRIPTION("pinctrl-based I2C multiplexer driver"); | ||
277 | MODULE_AUTHOR("Stephen Warren <swarren@nvidia.com>"); | ||
278 | MODULE_LICENSE("GPL v2"); | ||
279 | MODULE_ALIAS("platform:i2c-mux-pinctrl"); | ||
diff --git a/drivers/ide/icside.c b/drivers/ide/icside.c index 8716066a2f2b..bcb507b0cfd4 100644 --- a/drivers/ide/icside.c +++ b/drivers/ide/icside.c | |||
@@ -236,7 +236,7 @@ static const struct ide_port_ops icside_v6_no_dma_port_ops = { | |||
236 | */ | 236 | */ |
237 | static void icside_set_dma_mode(ide_hwif_t *hwif, ide_drive_t *drive) | 237 | static void icside_set_dma_mode(ide_hwif_t *hwif, ide_drive_t *drive) |
238 | { | 238 | { |
239 | unsigned long cycle_time; | 239 | unsigned long cycle_time = 0; |
240 | int use_dma_info = 0; | 240 | int use_dma_info = 0; |
241 | const u8 xfer_mode = drive->dma_mode; | 241 | const u8 xfer_mode = drive->dma_mode; |
242 | 242 | ||
@@ -271,9 +271,9 @@ static void icside_set_dma_mode(ide_hwif_t *hwif, ide_drive_t *drive) | |||
271 | 271 | ||
272 | ide_set_drivedata(drive, (void *)cycle_time); | 272 | ide_set_drivedata(drive, (void *)cycle_time); |
273 | 273 | ||
274 | printk("%s: %s selected (peak %dMB/s)\n", drive->name, | 274 | printk(KERN_INFO "%s: %s selected (peak %luMB/s)\n", |
275 | ide_xfer_verbose(xfer_mode), | 275 | drive->name, ide_xfer_verbose(xfer_mode), |
276 | 2000 / (unsigned long)ide_get_drivedata(drive)); | 276 | 2000 / (cycle_time ? cycle_time : (unsigned long) -1)); |
277 | } | 277 | } |
278 | 278 | ||
279 | static const struct ide_port_ops icside_v6_port_ops = { | 279 | static const struct ide_port_ops icside_v6_port_ops = { |
@@ -375,8 +375,6 @@ static const struct ide_dma_ops icside_v6_dma_ops = { | |||
375 | .dma_test_irq = icside_dma_test_irq, | 375 | .dma_test_irq = icside_dma_test_irq, |
376 | .dma_lost_irq = ide_dma_lost_irq, | 376 | .dma_lost_irq = ide_dma_lost_irq, |
377 | }; | 377 | }; |
378 | #else | ||
379 | #define icside_v6_dma_ops NULL | ||
380 | #endif | 378 | #endif |
381 | 379 | ||
382 | static int icside_dma_off_init(ide_hwif_t *hwif, const struct ide_port_info *d) | 380 | static int icside_dma_off_init(ide_hwif_t *hwif, const struct ide_port_info *d) |
@@ -456,7 +454,6 @@ err_free: | |||
456 | static const struct ide_port_info icside_v6_port_info __initdata = { | 454 | static const struct ide_port_info icside_v6_port_info __initdata = { |
457 | .init_dma = icside_dma_off_init, | 455 | .init_dma = icside_dma_off_init, |
458 | .port_ops = &icside_v6_no_dma_port_ops, | 456 | .port_ops = &icside_v6_no_dma_port_ops, |
459 | .dma_ops = &icside_v6_dma_ops, | ||
460 | .host_flags = IDE_HFLAG_SERIALIZE | IDE_HFLAG_MMIO, | 457 | .host_flags = IDE_HFLAG_SERIALIZE | IDE_HFLAG_MMIO, |
461 | .mwdma_mask = ATA_MWDMA2, | 458 | .mwdma_mask = ATA_MWDMA2, |
462 | .swdma_mask = ATA_SWDMA2, | 459 | .swdma_mask = ATA_SWDMA2, |
@@ -518,11 +515,13 @@ icside_register_v6(struct icside_state *state, struct expansion_card *ec) | |||
518 | 515 | ||
519 | ecard_set_drvdata(ec, state); | 516 | ecard_set_drvdata(ec, state); |
520 | 517 | ||
518 | #ifdef CONFIG_BLK_DEV_IDEDMA_ICS | ||
521 | if (ec->dma != NO_DMA && !request_dma(ec->dma, DRV_NAME)) { | 519 | if (ec->dma != NO_DMA && !request_dma(ec->dma, DRV_NAME)) { |
522 | d.init_dma = icside_dma_init; | 520 | d.init_dma = icside_dma_init; |
523 | d.port_ops = &icside_v6_port_ops; | 521 | d.port_ops = &icside_v6_port_ops; |
524 | } else | 522 | d.dma_ops = &icside_v6_dma_ops; |
525 | d.dma_ops = NULL; | 523 | } |
524 | #endif | ||
526 | 525 | ||
527 | ret = ide_host_register(host, &d, hws); | 526 | ret = ide_host_register(host, &d, hws); |
528 | if (ret) | 527 | if (ret) |
diff --git a/drivers/ide/ide-cs.c b/drivers/ide/ide-cs.c index 28e344ea514c..f1e922e2479a 100644 --- a/drivers/ide/ide-cs.c +++ b/drivers/ide/ide-cs.c | |||
@@ -167,7 +167,8 @@ static int pcmcia_check_one_config(struct pcmcia_device *pdev, void *priv_data) | |||
167 | { | 167 | { |
168 | int *is_kme = priv_data; | 168 | int *is_kme = priv_data; |
169 | 169 | ||
170 | if (!(pdev->resource[0]->flags & IO_DATA_PATH_WIDTH_8)) { | 170 | if ((pdev->resource[0]->flags & IO_DATA_PATH_WIDTH) |
171 | != IO_DATA_PATH_WIDTH_8) { | ||
171 | pdev->resource[0]->flags &= ~IO_DATA_PATH_WIDTH; | 172 | pdev->resource[0]->flags &= ~IO_DATA_PATH_WIDTH; |
172 | pdev->resource[0]->flags |= IO_DATA_PATH_WIDTH_AUTO; | 173 | pdev->resource[0]->flags |= IO_DATA_PATH_WIDTH_AUTO; |
173 | } | 174 | } |
diff --git a/drivers/iio/Kconfig b/drivers/iio/Kconfig index 612073f6c540..d4984c8be973 100644 --- a/drivers/iio/Kconfig +++ b/drivers/iio/Kconfig | |||
@@ -8,8 +8,7 @@ menuconfig IIO | |||
8 | help | 8 | help |
9 | The industrial I/O subsystem provides a unified framework for | 9 | The industrial I/O subsystem provides a unified framework for |
10 | drivers for many different types of embedded sensors using a | 10 | drivers for many different types of embedded sensors using a |
11 | number of different physical interfaces (i2c, spi, etc). See | 11 | number of different physical interfaces (i2c, spi, etc). |
12 | Documentation/iio for more information. | ||
13 | 12 | ||
14 | if IIO | 13 | if IIO |
15 | 14 | ||
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c index 55d5642eb10a..2e826f9702c6 100644 --- a/drivers/infiniband/core/cma.c +++ b/drivers/infiniband/core/cma.c | |||
@@ -1184,7 +1184,7 @@ static void cma_set_req_event_data(struct rdma_cm_event *event, | |||
1184 | 1184 | ||
1185 | static int cma_check_req_qp_type(struct rdma_cm_id *id, struct ib_cm_event *ib_event) | 1185 | static int cma_check_req_qp_type(struct rdma_cm_id *id, struct ib_cm_event *ib_event) |
1186 | { | 1186 | { |
1187 | return (((ib_event->event == IB_CM_REQ_RECEIVED) || | 1187 | return (((ib_event->event == IB_CM_REQ_RECEIVED) && |
1188 | (ib_event->param.req_rcvd.qp_type == id->qp_type)) || | 1188 | (ib_event->param.req_rcvd.qp_type == id->qp_type)) || |
1189 | ((ib_event->event == IB_CM_SIDR_REQ_RECEIVED) && | 1189 | ((ib_event->event == IB_CM_SIDR_REQ_RECEIVED) && |
1190 | (id->qp_type == IB_QPT_UD)) || | 1190 | (id->qp_type == IB_QPT_UD)) || |
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c index 55ab284e22f2..b18870c455ad 100644 --- a/drivers/infiniband/hw/cxgb4/cm.c +++ b/drivers/infiniband/hw/cxgb4/cm.c | |||
@@ -1593,6 +1593,10 @@ static int import_ep(struct c4iw_ep *ep, __be32 peer_ip, struct dst_entry *dst, | |||
1593 | struct net_device *pdev; | 1593 | struct net_device *pdev; |
1594 | 1594 | ||
1595 | pdev = ip_dev_find(&init_net, peer_ip); | 1595 | pdev = ip_dev_find(&init_net, peer_ip); |
1596 | if (!pdev) { | ||
1597 | err = -ENODEV; | ||
1598 | goto out; | ||
1599 | } | ||
1596 | ep->l2t = cxgb4_l2t_get(cdev->rdev.lldi.l2t, | 1600 | ep->l2t = cxgb4_l2t_get(cdev->rdev.lldi.l2t, |
1597 | n, pdev, 0); | 1601 | n, pdev, 0); |
1598 | if (!ep->l2t) | 1602 | if (!ep->l2t) |
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c index ee1c577238f7..3530c41fcd1f 100644 --- a/drivers/infiniband/hw/mlx4/main.c +++ b/drivers/infiniband/hw/mlx4/main.c | |||
@@ -140,7 +140,7 @@ static int mlx4_ib_query_device(struct ib_device *ibdev, | |||
140 | props->max_mr_size = ~0ull; | 140 | props->max_mr_size = ~0ull; |
141 | props->page_size_cap = dev->dev->caps.page_size_cap; | 141 | props->page_size_cap = dev->dev->caps.page_size_cap; |
142 | props->max_qp = dev->dev->caps.num_qps - dev->dev->caps.reserved_qps; | 142 | props->max_qp = dev->dev->caps.num_qps - dev->dev->caps.reserved_qps; |
143 | props->max_qp_wr = dev->dev->caps.max_wqes; | 143 | props->max_qp_wr = dev->dev->caps.max_wqes - MLX4_IB_SQ_MAX_SPARE; |
144 | props->max_sge = min(dev->dev->caps.max_sq_sg, | 144 | props->max_sge = min(dev->dev->caps.max_sq_sg, |
145 | dev->dev->caps.max_rq_sg); | 145 | dev->dev->caps.max_rq_sg); |
146 | props->max_cq = dev->dev->caps.num_cqs - dev->dev->caps.reserved_cqs; | 146 | props->max_cq = dev->dev->caps.num_cqs - dev->dev->caps.reserved_cqs; |
@@ -1084,12 +1084,9 @@ static void mlx4_ib_alloc_eqs(struct mlx4_dev *dev, struct mlx4_ib_dev *ibdev) | |||
1084 | int total_eqs = 0; | 1084 | int total_eqs = 0; |
1085 | int i, j, eq; | 1085 | int i, j, eq; |
1086 | 1086 | ||
1087 | /* Init eq table */ | 1087 | /* Legacy mode or comp_pool is not large enough */ |
1088 | ibdev->eq_table = NULL; | 1088 | if (dev->caps.comp_pool == 0 || |
1089 | ibdev->eq_added = 0; | 1089 | dev->caps.num_ports > dev->caps.comp_pool) |
1090 | |||
1091 | /* Legacy mode? */ | ||
1092 | if (dev->caps.comp_pool == 0) | ||
1093 | return; | 1090 | return; |
1094 | 1091 | ||
1095 | eq_per_port = rounddown_pow_of_two(dev->caps.comp_pool/ | 1092 | eq_per_port = rounddown_pow_of_two(dev->caps.comp_pool/ |
@@ -1135,7 +1132,10 @@ static void mlx4_ib_alloc_eqs(struct mlx4_dev *dev, struct mlx4_ib_dev *ibdev) | |||
1135 | static void mlx4_ib_free_eqs(struct mlx4_dev *dev, struct mlx4_ib_dev *ibdev) | 1132 | static void mlx4_ib_free_eqs(struct mlx4_dev *dev, struct mlx4_ib_dev *ibdev) |
1136 | { | 1133 | { |
1137 | int i; | 1134 | int i; |
1138 | int total_eqs; | 1135 | |
1136 | /* no additional eqs were added */ | ||
1137 | if (!ibdev->eq_table) | ||
1138 | return; | ||
1139 | 1139 | ||
1140 | /* Reset the advertised EQ number */ | 1140 | /* Reset the advertised EQ number */ |
1141 | ibdev->ib_dev.num_comp_vectors = dev->caps.num_comp_vectors; | 1141 | ibdev->ib_dev.num_comp_vectors = dev->caps.num_comp_vectors; |
@@ -1148,12 +1148,7 @@ static void mlx4_ib_free_eqs(struct mlx4_dev *dev, struct mlx4_ib_dev *ibdev) | |||
1148 | mlx4_release_eq(dev, ibdev->eq_table[i]); | 1148 | mlx4_release_eq(dev, ibdev->eq_table[i]); |
1149 | } | 1149 | } |
1150 | 1150 | ||
1151 | total_eqs = dev->caps.num_comp_vectors + ibdev->eq_added; | ||
1152 | memset(ibdev->eq_table, 0, total_eqs * sizeof(int)); | ||
1153 | kfree(ibdev->eq_table); | 1151 | kfree(ibdev->eq_table); |
1154 | |||
1155 | ibdev->eq_table = NULL; | ||
1156 | ibdev->eq_added = 0; | ||
1157 | } | 1152 | } |
1158 | 1153 | ||
1159 | static void *mlx4_ib_add(struct mlx4_dev *dev) | 1154 | static void *mlx4_ib_add(struct mlx4_dev *dev) |
diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h index e62297cc77cc..ff36655d23d3 100644 --- a/drivers/infiniband/hw/mlx4/mlx4_ib.h +++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h | |||
@@ -44,6 +44,14 @@ | |||
44 | #include <linux/mlx4/device.h> | 44 | #include <linux/mlx4/device.h> |
45 | #include <linux/mlx4/doorbell.h> | 45 | #include <linux/mlx4/doorbell.h> |
46 | 46 | ||
47 | enum { | ||
48 | MLX4_IB_SQ_MIN_WQE_SHIFT = 6, | ||
49 | MLX4_IB_MAX_HEADROOM = 2048 | ||
50 | }; | ||
51 | |||
52 | #define MLX4_IB_SQ_HEADROOM(shift) ((MLX4_IB_MAX_HEADROOM >> (shift)) + 1) | ||
53 | #define MLX4_IB_SQ_MAX_SPARE (MLX4_IB_SQ_HEADROOM(MLX4_IB_SQ_MIN_WQE_SHIFT)) | ||
54 | |||
47 | struct mlx4_ib_ucontext { | 55 | struct mlx4_ib_ucontext { |
48 | struct ib_ucontext ibucontext; | 56 | struct ib_ucontext ibucontext; |
49 | struct mlx4_uar uar; | 57 | struct mlx4_uar uar; |
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c index ceb33327091a..8d4ed24aef93 100644 --- a/drivers/infiniband/hw/mlx4/qp.c +++ b/drivers/infiniband/hw/mlx4/qp.c | |||
@@ -310,8 +310,8 @@ static int set_rq_size(struct mlx4_ib_dev *dev, struct ib_qp_cap *cap, | |||
310 | int is_user, int has_rq, struct mlx4_ib_qp *qp) | 310 | int is_user, int has_rq, struct mlx4_ib_qp *qp) |
311 | { | 311 | { |
312 | /* Sanity check RQ size before proceeding */ | 312 | /* Sanity check RQ size before proceeding */ |
313 | if (cap->max_recv_wr > dev->dev->caps.max_wqes || | 313 | if (cap->max_recv_wr > dev->dev->caps.max_wqes - MLX4_IB_SQ_MAX_SPARE || |
314 | cap->max_recv_sge > dev->dev->caps.max_rq_sg) | 314 | cap->max_recv_sge > min(dev->dev->caps.max_sq_sg, dev->dev->caps.max_rq_sg)) |
315 | return -EINVAL; | 315 | return -EINVAL; |
316 | 316 | ||
317 | if (!has_rq) { | 317 | if (!has_rq) { |
@@ -329,8 +329,17 @@ static int set_rq_size(struct mlx4_ib_dev *dev, struct ib_qp_cap *cap, | |||
329 | qp->rq.wqe_shift = ilog2(qp->rq.max_gs * sizeof (struct mlx4_wqe_data_seg)); | 329 | qp->rq.wqe_shift = ilog2(qp->rq.max_gs * sizeof (struct mlx4_wqe_data_seg)); |
330 | } | 330 | } |
331 | 331 | ||
332 | cap->max_recv_wr = qp->rq.max_post = qp->rq.wqe_cnt; | 332 | /* leave userspace return values as they were, so as not to break ABI */ |
333 | cap->max_recv_sge = qp->rq.max_gs; | 333 | if (is_user) { |
334 | cap->max_recv_wr = qp->rq.max_post = qp->rq.wqe_cnt; | ||
335 | cap->max_recv_sge = qp->rq.max_gs; | ||
336 | } else { | ||
337 | cap->max_recv_wr = qp->rq.max_post = | ||
338 | min(dev->dev->caps.max_wqes - MLX4_IB_SQ_MAX_SPARE, qp->rq.wqe_cnt); | ||
339 | cap->max_recv_sge = min(qp->rq.max_gs, | ||
340 | min(dev->dev->caps.max_sq_sg, | ||
341 | dev->dev->caps.max_rq_sg)); | ||
342 | } | ||
334 | 343 | ||
335 | return 0; | 344 | return 0; |
336 | } | 345 | } |
@@ -341,8 +350,8 @@ static int set_kernel_sq_size(struct mlx4_ib_dev *dev, struct ib_qp_cap *cap, | |||
341 | int s; | 350 | int s; |
342 | 351 | ||
343 | /* Sanity check SQ size before proceeding */ | 352 | /* Sanity check SQ size before proceeding */ |
344 | if (cap->max_send_wr > dev->dev->caps.max_wqes || | 353 | if (cap->max_send_wr > (dev->dev->caps.max_wqes - MLX4_IB_SQ_MAX_SPARE) || |
345 | cap->max_send_sge > dev->dev->caps.max_sq_sg || | 354 | cap->max_send_sge > min(dev->dev->caps.max_sq_sg, dev->dev->caps.max_rq_sg) || |
346 | cap->max_inline_data + send_wqe_overhead(type, qp->flags) + | 355 | cap->max_inline_data + send_wqe_overhead(type, qp->flags) + |
347 | sizeof (struct mlx4_wqe_inline_seg) > dev->dev->caps.max_sq_desc_sz) | 356 | sizeof (struct mlx4_wqe_inline_seg) > dev->dev->caps.max_sq_desc_sz) |
348 | return -EINVAL; | 357 | return -EINVAL; |
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma.h b/drivers/infiniband/hw/ocrdma/ocrdma.h index 85a69c958559..48970af23679 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma.h +++ b/drivers/infiniband/hw/ocrdma/ocrdma.h | |||
@@ -61,6 +61,7 @@ struct ocrdma_dev_attr { | |||
61 | u32 max_inline_data; | 61 | u32 max_inline_data; |
62 | int max_send_sge; | 62 | int max_send_sge; |
63 | int max_recv_sge; | 63 | int max_recv_sge; |
64 | int max_srq_sge; | ||
64 | int max_mr; | 65 | int max_mr; |
65 | u64 max_mr_size; | 66 | u64 max_mr_size; |
66 | u32 max_num_mr_pbl; | 67 | u32 max_num_mr_pbl; |
@@ -231,7 +232,6 @@ struct ocrdma_qp_hwq_info { | |||
231 | u32 entry_size; | 232 | u32 entry_size; |
232 | u32 max_cnt; | 233 | u32 max_cnt; |
233 | u32 max_wqe_idx; | 234 | u32 max_wqe_idx; |
234 | u32 free_delta; | ||
235 | u16 dbid; /* qid, where to ring the doorbell. */ | 235 | u16 dbid; /* qid, where to ring the doorbell. */ |
236 | u32 len; | 236 | u32 len; |
237 | dma_addr_t pa; | 237 | dma_addr_t pa; |
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_abi.h b/drivers/infiniband/hw/ocrdma/ocrdma_abi.h index a411a4e3193d..517ab20b727c 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_abi.h +++ b/drivers/infiniband/hw/ocrdma/ocrdma_abi.h | |||
@@ -101,8 +101,6 @@ struct ocrdma_create_qp_uresp { | |||
101 | u32 rsvd1; | 101 | u32 rsvd1; |
102 | u32 num_wqe_allocated; | 102 | u32 num_wqe_allocated; |
103 | u32 num_rqe_allocated; | 103 | u32 num_rqe_allocated; |
104 | u32 free_wqe_delta; | ||
105 | u32 free_rqe_delta; | ||
106 | u32 db_sq_offset; | 104 | u32 db_sq_offset; |
107 | u32 db_rq_offset; | 105 | u32 db_rq_offset; |
108 | u32 db_shift; | 106 | u32 db_shift; |
@@ -126,8 +124,7 @@ struct ocrdma_create_srq_uresp { | |||
126 | u32 db_rq_offset; | 124 | u32 db_rq_offset; |
127 | u32 db_shift; | 125 | u32 db_shift; |
128 | 126 | ||
129 | u32 free_rqe_delta; | 127 | u64 rsvd2; |
130 | u32 rsvd2; | ||
131 | u64 rsvd3; | 128 | u64 rsvd3; |
132 | } __packed; | 129 | } __packed; |
133 | 130 | ||
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c index 9b204b1ba336..71942af4fce9 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c +++ b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c | |||
@@ -732,7 +732,7 @@ static void ocrdma_dispatch_ibevent(struct ocrdma_dev *dev, | |||
732 | break; | 732 | break; |
733 | case OCRDMA_SRQ_LIMIT_EVENT: | 733 | case OCRDMA_SRQ_LIMIT_EVENT: |
734 | ib_evt.element.srq = &qp->srq->ibsrq; | 734 | ib_evt.element.srq = &qp->srq->ibsrq; |
735 | ib_evt.event = IB_EVENT_QP_LAST_WQE_REACHED; | 735 | ib_evt.event = IB_EVENT_SRQ_LIMIT_REACHED; |
736 | srq_event = 1; | 736 | srq_event = 1; |
737 | qp_event = 0; | 737 | qp_event = 0; |
738 | break; | 738 | break; |
@@ -990,8 +990,6 @@ static void ocrdma_get_attr(struct ocrdma_dev *dev, | |||
990 | struct ocrdma_dev_attr *attr, | 990 | struct ocrdma_dev_attr *attr, |
991 | struct ocrdma_mbx_query_config *rsp) | 991 | struct ocrdma_mbx_query_config *rsp) |
992 | { | 992 | { |
993 | int max_q_mem; | ||
994 | |||
995 | attr->max_pd = | 993 | attr->max_pd = |
996 | (rsp->max_pd_ca_ack_delay & OCRDMA_MBX_QUERY_CFG_MAX_PD_MASK) >> | 994 | (rsp->max_pd_ca_ack_delay & OCRDMA_MBX_QUERY_CFG_MAX_PD_MASK) >> |
997 | OCRDMA_MBX_QUERY_CFG_MAX_PD_SHIFT; | 995 | OCRDMA_MBX_QUERY_CFG_MAX_PD_SHIFT; |
@@ -1004,6 +1002,9 @@ static void ocrdma_get_attr(struct ocrdma_dev *dev, | |||
1004 | attr->max_recv_sge = (rsp->max_write_send_sge & | 1002 | attr->max_recv_sge = (rsp->max_write_send_sge & |
1005 | OCRDMA_MBX_QUERY_CFG_MAX_SEND_SGE_MASK) >> | 1003 | OCRDMA_MBX_QUERY_CFG_MAX_SEND_SGE_MASK) >> |
1006 | OCRDMA_MBX_QUERY_CFG_MAX_SEND_SGE_SHIFT; | 1004 | OCRDMA_MBX_QUERY_CFG_MAX_SEND_SGE_SHIFT; |
1005 | attr->max_srq_sge = (rsp->max_srq_rqe_sge & | ||
1006 | OCRDMA_MBX_QUERY_CFG_MAX_SRQ_SGE_MASK) >> | ||
1007 | OCRDMA_MBX_QUERY_CFG_MAX_SRQ_SGE_OFFSET; | ||
1007 | attr->max_ord_per_qp = (rsp->max_ird_ord_per_qp & | 1008 | attr->max_ord_per_qp = (rsp->max_ird_ord_per_qp & |
1008 | OCRDMA_MBX_QUERY_CFG_MAX_ORD_PER_QP_MASK) >> | 1009 | OCRDMA_MBX_QUERY_CFG_MAX_ORD_PER_QP_MASK) >> |
1009 | OCRDMA_MBX_QUERY_CFG_MAX_ORD_PER_QP_SHIFT; | 1010 | OCRDMA_MBX_QUERY_CFG_MAX_ORD_PER_QP_SHIFT; |
@@ -1037,18 +1038,15 @@ static void ocrdma_get_attr(struct ocrdma_dev *dev, | |||
1037 | attr->max_inline_data = | 1038 | attr->max_inline_data = |
1038 | attr->wqe_size - (sizeof(struct ocrdma_hdr_wqe) + | 1039 | attr->wqe_size - (sizeof(struct ocrdma_hdr_wqe) + |
1039 | sizeof(struct ocrdma_sge)); | 1040 | sizeof(struct ocrdma_sge)); |
1040 | max_q_mem = OCRDMA_Q_PAGE_BASE_SIZE << (OCRDMA_MAX_Q_PAGE_SIZE_CNT - 1); | ||
1041 | /* hw can queue one less then the configured size, | ||
1042 | * so publish less by one to stack. | ||
1043 | */ | ||
1044 | if (dev->nic_info.dev_family == OCRDMA_GEN2_FAMILY) { | 1041 | if (dev->nic_info.dev_family == OCRDMA_GEN2_FAMILY) { |
1045 | dev->attr.max_wqe = max_q_mem / dev->attr.wqe_size; | ||
1046 | attr->ird = 1; | 1042 | attr->ird = 1; |
1047 | attr->ird_page_size = OCRDMA_MIN_Q_PAGE_SIZE; | 1043 | attr->ird_page_size = OCRDMA_MIN_Q_PAGE_SIZE; |
1048 | attr->num_ird_pages = MAX_OCRDMA_IRD_PAGES; | 1044 | attr->num_ird_pages = MAX_OCRDMA_IRD_PAGES; |
1049 | } else | 1045 | } |
1050 | dev->attr.max_wqe = (max_q_mem / dev->attr.wqe_size) - 1; | 1046 | dev->attr.max_wqe = rsp->max_wqes_rqes_per_q >> |
1051 | dev->attr.max_rqe = (max_q_mem / dev->attr.rqe_size) - 1; | 1047 | OCRDMA_MBX_QUERY_CFG_MAX_WQES_PER_WQ_OFFSET; |
1048 | dev->attr.max_rqe = rsp->max_wqes_rqes_per_q & | ||
1049 | OCRDMA_MBX_QUERY_CFG_MAX_RQES_PER_RQ_MASK; | ||
1052 | } | 1050 | } |
1053 | 1051 | ||
1054 | static int ocrdma_check_fw_config(struct ocrdma_dev *dev, | 1052 | static int ocrdma_check_fw_config(struct ocrdma_dev *dev, |
@@ -1990,19 +1988,12 @@ static void ocrdma_get_create_qp_rsp(struct ocrdma_create_qp_rsp *rsp, | |||
1990 | max_wqe_allocated = 1 << max_wqe_allocated; | 1988 | max_wqe_allocated = 1 << max_wqe_allocated; |
1991 | max_rqe_allocated = 1 << ((u16)rsp->max_wqe_rqe); | 1989 | max_rqe_allocated = 1 << ((u16)rsp->max_wqe_rqe); |
1992 | 1990 | ||
1993 | if (qp->dev->nic_info.dev_family == OCRDMA_GEN2_FAMILY) { | ||
1994 | qp->sq.free_delta = 0; | ||
1995 | qp->rq.free_delta = 1; | ||
1996 | } else | ||
1997 | qp->sq.free_delta = 1; | ||
1998 | |||
1999 | qp->sq.max_cnt = max_wqe_allocated; | 1991 | qp->sq.max_cnt = max_wqe_allocated; |
2000 | qp->sq.max_wqe_idx = max_wqe_allocated - 1; | 1992 | qp->sq.max_wqe_idx = max_wqe_allocated - 1; |
2001 | 1993 | ||
2002 | if (!attrs->srq) { | 1994 | if (!attrs->srq) { |
2003 | qp->rq.max_cnt = max_rqe_allocated; | 1995 | qp->rq.max_cnt = max_rqe_allocated; |
2004 | qp->rq.max_wqe_idx = max_rqe_allocated - 1; | 1996 | qp->rq.max_wqe_idx = max_rqe_allocated - 1; |
2005 | qp->rq.free_delta = 1; | ||
2006 | } | 1997 | } |
2007 | } | 1998 | } |
2008 | 1999 | ||
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_main.c b/drivers/infiniband/hw/ocrdma/ocrdma_main.c index a20d16eaae71..b050e629e9c3 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_main.c +++ b/drivers/infiniband/hw/ocrdma/ocrdma_main.c | |||
@@ -26,7 +26,6 @@ | |||
26 | *******************************************************************/ | 26 | *******************************************************************/ |
27 | 27 | ||
28 | #include <linux/module.h> | 28 | #include <linux/module.h> |
29 | #include <linux/version.h> | ||
30 | #include <linux/idr.h> | 29 | #include <linux/idr.h> |
31 | #include <rdma/ib_verbs.h> | 30 | #include <rdma/ib_verbs.h> |
32 | #include <rdma/ib_user_verbs.h> | 31 | #include <rdma/ib_user_verbs.h> |
@@ -98,13 +97,11 @@ static void ocrdma_build_sgid_mac(union ib_gid *sgid, unsigned char *mac_addr, | |||
98 | sgid->raw[15] = mac_addr[5]; | 97 | sgid->raw[15] = mac_addr[5]; |
99 | } | 98 | } |
100 | 99 | ||
101 | static void ocrdma_add_sgid(struct ocrdma_dev *dev, unsigned char *mac_addr, | 100 | static bool ocrdma_add_sgid(struct ocrdma_dev *dev, unsigned char *mac_addr, |
102 | bool is_vlan, u16 vlan_id) | 101 | bool is_vlan, u16 vlan_id) |
103 | { | 102 | { |
104 | int i; | 103 | int i; |
105 | bool found = false; | ||
106 | union ib_gid new_sgid; | 104 | union ib_gid new_sgid; |
107 | int free_idx = OCRDMA_MAX_SGID; | ||
108 | unsigned long flags; | 105 | unsigned long flags; |
109 | 106 | ||
110 | memset(&ocrdma_zero_sgid, 0, sizeof(union ib_gid)); | 107 | memset(&ocrdma_zero_sgid, 0, sizeof(union ib_gid)); |
@@ -116,23 +113,19 @@ static void ocrdma_add_sgid(struct ocrdma_dev *dev, unsigned char *mac_addr, | |||
116 | if (!memcmp(&dev->sgid_tbl[i], &ocrdma_zero_sgid, | 113 | if (!memcmp(&dev->sgid_tbl[i], &ocrdma_zero_sgid, |
117 | sizeof(union ib_gid))) { | 114 | sizeof(union ib_gid))) { |
118 | /* found free entry */ | 115 | /* found free entry */ |
119 | if (!found) { | 116 | memcpy(&dev->sgid_tbl[i], &new_sgid, |
120 | free_idx = i; | 117 | sizeof(union ib_gid)); |
121 | found = true; | 118 | spin_unlock_irqrestore(&dev->sgid_lock, flags); |
122 | break; | 119 | return true; |
123 | } | ||
124 | } else if (!memcmp(&dev->sgid_tbl[i], &new_sgid, | 120 | } else if (!memcmp(&dev->sgid_tbl[i], &new_sgid, |
125 | sizeof(union ib_gid))) { | 121 | sizeof(union ib_gid))) { |
126 | /* entry already present, no addition is required. */ | 122 | /* entry already present, no addition is required. */ |
127 | spin_unlock_irqrestore(&dev->sgid_lock, flags); | 123 | spin_unlock_irqrestore(&dev->sgid_lock, flags); |
128 | return; | 124 | return false; |
129 | } | 125 | } |
130 | } | 126 | } |
131 | /* if entry doesn't exist and if table has some space, add entry */ | ||
132 | if (found) | ||
133 | memcpy(&dev->sgid_tbl[free_idx], &new_sgid, | ||
134 | sizeof(union ib_gid)); | ||
135 | spin_unlock_irqrestore(&dev->sgid_lock, flags); | 127 | spin_unlock_irqrestore(&dev->sgid_lock, flags); |
128 | return false; | ||
136 | } | 129 | } |
137 | 130 | ||
138 | static bool ocrdma_del_sgid(struct ocrdma_dev *dev, unsigned char *mac_addr, | 131 | static bool ocrdma_del_sgid(struct ocrdma_dev *dev, unsigned char *mac_addr, |
@@ -168,7 +161,8 @@ static void ocrdma_add_default_sgid(struct ocrdma_dev *dev) | |||
168 | ocrdma_get_guid(dev, &sgid->raw[8]); | 161 | ocrdma_get_guid(dev, &sgid->raw[8]); |
169 | } | 162 | } |
170 | 163 | ||
171 | static int ocrdma_build_sgid_tbl(struct ocrdma_dev *dev) | 164 | #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE) |
165 | static void ocrdma_add_vlan_sgids(struct ocrdma_dev *dev) | ||
172 | { | 166 | { |
173 | struct net_device *netdev, *tmp; | 167 | struct net_device *netdev, *tmp; |
174 | u16 vlan_id; | 168 | u16 vlan_id; |
@@ -176,8 +170,6 @@ static int ocrdma_build_sgid_tbl(struct ocrdma_dev *dev) | |||
176 | 170 | ||
177 | netdev = dev->nic_info.netdev; | 171 | netdev = dev->nic_info.netdev; |
178 | 172 | ||
179 | ocrdma_add_default_sgid(dev); | ||
180 | |||
181 | rcu_read_lock(); | 173 | rcu_read_lock(); |
182 | for_each_netdev_rcu(&init_net, tmp) { | 174 | for_each_netdev_rcu(&init_net, tmp) { |
183 | if (netdev == tmp || vlan_dev_real_dev(tmp) == netdev) { | 175 | if (netdev == tmp || vlan_dev_real_dev(tmp) == netdev) { |
@@ -195,10 +187,23 @@ static int ocrdma_build_sgid_tbl(struct ocrdma_dev *dev) | |||
195 | } | 187 | } |
196 | } | 188 | } |
197 | rcu_read_unlock(); | 189 | rcu_read_unlock(); |
190 | } | ||
191 | #else | ||
192 | static void ocrdma_add_vlan_sgids(struct ocrdma_dev *dev) | ||
193 | { | ||
194 | |||
195 | } | ||
196 | #endif /* VLAN */ | ||
197 | |||
198 | static int ocrdma_build_sgid_tbl(struct ocrdma_dev *dev) | ||
199 | { | ||
200 | ocrdma_add_default_sgid(dev); | ||
201 | ocrdma_add_vlan_sgids(dev); | ||
198 | return 0; | 202 | return 0; |
199 | } | 203 | } |
200 | 204 | ||
201 | #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) | 205 | #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) || \ |
206 | defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE) | ||
202 | 207 | ||
203 | static int ocrdma_inet6addr_event(struct notifier_block *notifier, | 208 | static int ocrdma_inet6addr_event(struct notifier_block *notifier, |
204 | unsigned long event, void *ptr) | 209 | unsigned long event, void *ptr) |
@@ -209,6 +214,7 @@ static int ocrdma_inet6addr_event(struct notifier_block *notifier, | |||
209 | struct ib_event gid_event; | 214 | struct ib_event gid_event; |
210 | struct ocrdma_dev *dev; | 215 | struct ocrdma_dev *dev; |
211 | bool found = false; | 216 | bool found = false; |
217 | bool updated = false; | ||
212 | bool is_vlan = false; | 218 | bool is_vlan = false; |
213 | u16 vid = 0; | 219 | u16 vid = 0; |
214 | 220 | ||
@@ -234,23 +240,21 @@ static int ocrdma_inet6addr_event(struct notifier_block *notifier, | |||
234 | mutex_lock(&dev->dev_lock); | 240 | mutex_lock(&dev->dev_lock); |
235 | switch (event) { | 241 | switch (event) { |
236 | case NETDEV_UP: | 242 | case NETDEV_UP: |
237 | ocrdma_add_sgid(dev, netdev->dev_addr, is_vlan, vid); | 243 | updated = ocrdma_add_sgid(dev, netdev->dev_addr, is_vlan, vid); |
238 | break; | 244 | break; |
239 | case NETDEV_DOWN: | 245 | case NETDEV_DOWN: |
240 | found = ocrdma_del_sgid(dev, netdev->dev_addr, is_vlan, vid); | 246 | updated = ocrdma_del_sgid(dev, netdev->dev_addr, is_vlan, vid); |
241 | if (found) { | ||
242 | /* found the matching entry, notify | ||
243 | * the consumers about it | ||
244 | */ | ||
245 | gid_event.device = &dev->ibdev; | ||
246 | gid_event.element.port_num = 1; | ||
247 | gid_event.event = IB_EVENT_GID_CHANGE; | ||
248 | ib_dispatch_event(&gid_event); | ||
249 | } | ||
250 | break; | 247 | break; |
251 | default: | 248 | default: |
252 | break; | 249 | break; |
253 | } | 250 | } |
251 | if (updated) { | ||
252 | /* GID table updated, notify the consumers about it */ | ||
253 | gid_event.device = &dev->ibdev; | ||
254 | gid_event.element.port_num = 1; | ||
255 | gid_event.event = IB_EVENT_GID_CHANGE; | ||
256 | ib_dispatch_event(&gid_event); | ||
257 | } | ||
254 | mutex_unlock(&dev->dev_lock); | 258 | mutex_unlock(&dev->dev_lock); |
255 | return NOTIFY_OK; | 259 | return NOTIFY_OK; |
256 | } | 260 | } |
@@ -259,7 +263,7 @@ static struct notifier_block ocrdma_inet6addr_notifier = { | |||
259 | .notifier_call = ocrdma_inet6addr_event | 263 | .notifier_call = ocrdma_inet6addr_event |
260 | }; | 264 | }; |
261 | 265 | ||
262 | #endif /* IPV6 */ | 266 | #endif /* IPV6 and VLAN */ |
263 | 267 | ||
264 | static enum rdma_link_layer ocrdma_link_layer(struct ib_device *device, | 268 | static enum rdma_link_layer ocrdma_link_layer(struct ib_device *device, |
265 | u8 port_num) | 269 | u8 port_num) |
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_sli.h b/drivers/infiniband/hw/ocrdma/ocrdma_sli.h index 7fd80cc0f037..c75cbdfa87e7 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_sli.h +++ b/drivers/infiniband/hw/ocrdma/ocrdma_sli.h | |||
@@ -418,6 +418,9 @@ enum { | |||
418 | 418 | ||
419 | OCRDMA_MBX_QUERY_CFG_MAX_SEND_SGE_SHIFT = 0, | 419 | OCRDMA_MBX_QUERY_CFG_MAX_SEND_SGE_SHIFT = 0, |
420 | OCRDMA_MBX_QUERY_CFG_MAX_SEND_SGE_MASK = 0xFFFF, | 420 | OCRDMA_MBX_QUERY_CFG_MAX_SEND_SGE_MASK = 0xFFFF, |
421 | OCRDMA_MBX_QUERY_CFG_MAX_WRITE_SGE_SHIFT = 16, | ||
422 | OCRDMA_MBX_QUERY_CFG_MAX_WRITE_SGE_MASK = 0xFFFF << | ||
423 | OCRDMA_MBX_QUERY_CFG_MAX_WRITE_SGE_SHIFT, | ||
421 | 424 | ||
422 | OCRDMA_MBX_QUERY_CFG_MAX_ORD_PER_QP_SHIFT = 0, | 425 | OCRDMA_MBX_QUERY_CFG_MAX_ORD_PER_QP_SHIFT = 0, |
423 | OCRDMA_MBX_QUERY_CFG_MAX_ORD_PER_QP_MASK = 0xFFFF, | 426 | OCRDMA_MBX_QUERY_CFG_MAX_ORD_PER_QP_MASK = 0xFFFF, |
@@ -458,7 +461,7 @@ enum { | |||
458 | OCRDMA_MBX_QUERY_CFG_MAX_WQES_PER_WQ_OFFSET, | 461 | OCRDMA_MBX_QUERY_CFG_MAX_WQES_PER_WQ_OFFSET, |
459 | OCRDMA_MBX_QUERY_CFG_MAX_RQES_PER_RQ_OFFSET = 0, | 462 | OCRDMA_MBX_QUERY_CFG_MAX_RQES_PER_RQ_OFFSET = 0, |
460 | OCRDMA_MBX_QUERY_CFG_MAX_RQES_PER_RQ_MASK = 0xFFFF << | 463 | OCRDMA_MBX_QUERY_CFG_MAX_RQES_PER_RQ_MASK = 0xFFFF << |
461 | OCRDMA_MBX_QUERY_CFG_MAX_WQES_PER_WQ_OFFSET, | 464 | OCRDMA_MBX_QUERY_CFG_MAX_RQES_PER_RQ_OFFSET, |
462 | 465 | ||
463 | OCRDMA_MBX_QUERY_CFG_MAX_CQ_OFFSET = 16, | 466 | OCRDMA_MBX_QUERY_CFG_MAX_CQ_OFFSET = 16, |
464 | OCRDMA_MBX_QUERY_CFG_MAX_CQ_MASK = 0xFFFF << | 467 | OCRDMA_MBX_QUERY_CFG_MAX_CQ_MASK = 0xFFFF << |
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c index e9f74d1b48f6..2e2e7aecc990 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c +++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c | |||
@@ -53,7 +53,7 @@ int ocrdma_query_gid(struct ib_device *ibdev, u8 port, | |||
53 | 53 | ||
54 | dev = get_ocrdma_dev(ibdev); | 54 | dev = get_ocrdma_dev(ibdev); |
55 | memset(sgid, 0, sizeof(*sgid)); | 55 | memset(sgid, 0, sizeof(*sgid)); |
56 | if (index > OCRDMA_MAX_SGID) | 56 | if (index >= OCRDMA_MAX_SGID) |
57 | return -EINVAL; | 57 | return -EINVAL; |
58 | 58 | ||
59 | memcpy(sgid, &dev->sgid_tbl[index], sizeof(*sgid)); | 59 | memcpy(sgid, &dev->sgid_tbl[index], sizeof(*sgid)); |
@@ -83,8 +83,8 @@ int ocrdma_query_device(struct ib_device *ibdev, struct ib_device_attr *attr) | |||
83 | IB_DEVICE_SHUTDOWN_PORT | | 83 | IB_DEVICE_SHUTDOWN_PORT | |
84 | IB_DEVICE_SYS_IMAGE_GUID | | 84 | IB_DEVICE_SYS_IMAGE_GUID | |
85 | IB_DEVICE_LOCAL_DMA_LKEY; | 85 | IB_DEVICE_LOCAL_DMA_LKEY; |
86 | attr->max_sge = dev->attr.max_send_sge; | 86 | attr->max_sge = min(dev->attr.max_send_sge, dev->attr.max_srq_sge); |
87 | attr->max_sge_rd = dev->attr.max_send_sge; | 87 | attr->max_sge_rd = 0; |
88 | attr->max_cq = dev->attr.max_cq; | 88 | attr->max_cq = dev->attr.max_cq; |
89 | attr->max_cqe = dev->attr.max_cqe; | 89 | attr->max_cqe = dev->attr.max_cqe; |
90 | attr->max_mr = dev->attr.max_mr; | 90 | attr->max_mr = dev->attr.max_mr; |
@@ -97,7 +97,7 @@ int ocrdma_query_device(struct ib_device *ibdev, struct ib_device_attr *attr) | |||
97 | min(dev->attr.max_ord_per_qp, dev->attr.max_ird_per_qp); | 97 | min(dev->attr.max_ord_per_qp, dev->attr.max_ird_per_qp); |
98 | attr->max_qp_init_rd_atom = dev->attr.max_ord_per_qp; | 98 | attr->max_qp_init_rd_atom = dev->attr.max_ord_per_qp; |
99 | attr->max_srq = (dev->attr.max_qp - 1); | 99 | attr->max_srq = (dev->attr.max_qp - 1); |
100 | attr->max_srq_sge = attr->max_sge; | 100 | attr->max_srq_sge = attr->max_srq_sge; |
101 | attr->max_srq_wr = dev->attr.max_rqe; | 101 | attr->max_srq_wr = dev->attr.max_rqe; |
102 | attr->local_ca_ack_delay = dev->attr.local_ca_ack_delay; | 102 | attr->local_ca_ack_delay = dev->attr.local_ca_ack_delay; |
103 | attr->max_fast_reg_page_list_len = 0; | 103 | attr->max_fast_reg_page_list_len = 0; |
@@ -940,8 +940,6 @@ static int ocrdma_copy_qp_uresp(struct ocrdma_qp *qp, | |||
940 | uresp.db_rq_offset = OCRDMA_DB_RQ_OFFSET; | 940 | uresp.db_rq_offset = OCRDMA_DB_RQ_OFFSET; |
941 | uresp.db_shift = 16; | 941 | uresp.db_shift = 16; |
942 | } | 942 | } |
943 | uresp.free_wqe_delta = qp->sq.free_delta; | ||
944 | uresp.free_rqe_delta = qp->rq.free_delta; | ||
945 | 943 | ||
946 | if (qp->dpp_enabled) { | 944 | if (qp->dpp_enabled) { |
947 | uresp.dpp_credit = dpp_credit_lmt; | 945 | uresp.dpp_credit = dpp_credit_lmt; |
@@ -1307,8 +1305,6 @@ static int ocrdma_hwq_free_cnt(struct ocrdma_qp_hwq_info *q) | |||
1307 | free_cnt = (q->max_cnt - q->head) + q->tail; | 1305 | free_cnt = (q->max_cnt - q->head) + q->tail; |
1308 | else | 1306 | else |
1309 | free_cnt = q->tail - q->head; | 1307 | free_cnt = q->tail - q->head; |
1310 | if (q->free_delta) | ||
1311 | free_cnt -= q->free_delta; | ||
1312 | return free_cnt; | 1308 | return free_cnt; |
1313 | } | 1309 | } |
1314 | 1310 | ||
@@ -1501,7 +1497,6 @@ static int ocrdma_copy_srq_uresp(struct ocrdma_srq *srq, struct ib_udata *udata) | |||
1501 | (srq->pd->id * srq->dev->nic_info.db_page_size); | 1497 | (srq->pd->id * srq->dev->nic_info.db_page_size); |
1502 | uresp.db_page_size = srq->dev->nic_info.db_page_size; | 1498 | uresp.db_page_size = srq->dev->nic_info.db_page_size; |
1503 | uresp.num_rqe_allocated = srq->rq.max_cnt; | 1499 | uresp.num_rqe_allocated = srq->rq.max_cnt; |
1504 | uresp.free_rqe_delta = 1; | ||
1505 | if (srq->dev->nic_info.dev_family == OCRDMA_GEN2_FAMILY) { | 1500 | if (srq->dev->nic_info.dev_family == OCRDMA_GEN2_FAMILY) { |
1506 | uresp.db_rq_offset = OCRDMA_DB_GEN2_RQ1_OFFSET; | 1501 | uresp.db_rq_offset = OCRDMA_DB_GEN2_RQ1_OFFSET; |
1507 | uresp.db_shift = 24; | 1502 | uresp.db_shift = 24; |
@@ -2306,8 +2301,10 @@ static bool ocrdma_poll_err_rcqe(struct ocrdma_qp *qp, struct ocrdma_cqe *cqe, | |||
2306 | *stop = true; | 2301 | *stop = true; |
2307 | expand = false; | 2302 | expand = false; |
2308 | } | 2303 | } |
2309 | } else | 2304 | } else { |
2305 | *polled = true; | ||
2310 | expand = ocrdma_update_err_rcqe(ibwc, cqe, qp, status); | 2306 | expand = ocrdma_update_err_rcqe(ibwc, cqe, qp, status); |
2307 | } | ||
2311 | return expand; | 2308 | return expand; |
2312 | } | 2309 | } |
2313 | 2310 | ||
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.h b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.h index e6483439f25f..633f03d80274 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.h +++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.h | |||
@@ -28,7 +28,6 @@ | |||
28 | #ifndef __OCRDMA_VERBS_H__ | 28 | #ifndef __OCRDMA_VERBS_H__ |
29 | #define __OCRDMA_VERBS_H__ | 29 | #define __OCRDMA_VERBS_H__ |
30 | 30 | ||
31 | #include <linux/version.h> | ||
32 | int ocrdma_post_send(struct ib_qp *, struct ib_send_wr *, | 31 | int ocrdma_post_send(struct ib_qp *, struct ib_send_wr *, |
33 | struct ib_send_wr **bad_wr); | 32 | struct ib_send_wr **bad_wr); |
34 | int ocrdma_post_recv(struct ib_qp *, struct ib_recv_wr *, | 33 | int ocrdma_post_recv(struct ib_qp *, struct ib_recv_wr *, |
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c index d90a421e9cac..a2e418cba0ff 100644 --- a/drivers/iommu/amd_iommu.c +++ b/drivers/iommu/amd_iommu.c | |||
@@ -547,26 +547,12 @@ static void iommu_poll_events(struct amd_iommu *iommu) | |||
547 | spin_unlock_irqrestore(&iommu->lock, flags); | 547 | spin_unlock_irqrestore(&iommu->lock, flags); |
548 | } | 548 | } |
549 | 549 | ||
550 | static void iommu_handle_ppr_entry(struct amd_iommu *iommu, u32 head) | 550 | static void iommu_handle_ppr_entry(struct amd_iommu *iommu, u64 *raw) |
551 | { | 551 | { |
552 | struct amd_iommu_fault fault; | 552 | struct amd_iommu_fault fault; |
553 | volatile u64 *raw; | ||
554 | int i; | ||
555 | 553 | ||
556 | INC_STATS_COUNTER(pri_requests); | 554 | INC_STATS_COUNTER(pri_requests); |
557 | 555 | ||
558 | raw = (u64 *)(iommu->ppr_log + head); | ||
559 | |||
560 | /* | ||
561 | * Hardware bug: Interrupt may arrive before the entry is written to | ||
562 | * memory. If this happens we need to wait for the entry to arrive. | ||
563 | */ | ||
564 | for (i = 0; i < LOOP_TIMEOUT; ++i) { | ||
565 | if (PPR_REQ_TYPE(raw[0]) != 0) | ||
566 | break; | ||
567 | udelay(1); | ||
568 | } | ||
569 | |||
570 | if (PPR_REQ_TYPE(raw[0]) != PPR_REQ_FAULT) { | 556 | if (PPR_REQ_TYPE(raw[0]) != PPR_REQ_FAULT) { |
571 | pr_err_ratelimited("AMD-Vi: Unknown PPR request received\n"); | 557 | pr_err_ratelimited("AMD-Vi: Unknown PPR request received\n"); |
572 | return; | 558 | return; |
@@ -578,12 +564,6 @@ static void iommu_handle_ppr_entry(struct amd_iommu *iommu, u32 head) | |||
578 | fault.tag = PPR_TAG(raw[0]); | 564 | fault.tag = PPR_TAG(raw[0]); |
579 | fault.flags = PPR_FLAGS(raw[0]); | 565 | fault.flags = PPR_FLAGS(raw[0]); |
580 | 566 | ||
581 | /* | ||
582 | * To detect the hardware bug we need to clear the entry | ||
583 | * to back to zero. | ||
584 | */ | ||
585 | raw[0] = raw[1] = 0; | ||
586 | |||
587 | atomic_notifier_call_chain(&ppr_notifier, 0, &fault); | 567 | atomic_notifier_call_chain(&ppr_notifier, 0, &fault); |
588 | } | 568 | } |
589 | 569 | ||
@@ -595,25 +575,62 @@ static void iommu_poll_ppr_log(struct amd_iommu *iommu) | |||
595 | if (iommu->ppr_log == NULL) | 575 | if (iommu->ppr_log == NULL) |
596 | return; | 576 | return; |
597 | 577 | ||
578 | /* enable ppr interrupts again */ | ||
579 | writel(MMIO_STATUS_PPR_INT_MASK, iommu->mmio_base + MMIO_STATUS_OFFSET); | ||
580 | |||
598 | spin_lock_irqsave(&iommu->lock, flags); | 581 | spin_lock_irqsave(&iommu->lock, flags); |
599 | 582 | ||
600 | head = readl(iommu->mmio_base + MMIO_PPR_HEAD_OFFSET); | 583 | head = readl(iommu->mmio_base + MMIO_PPR_HEAD_OFFSET); |
601 | tail = readl(iommu->mmio_base + MMIO_PPR_TAIL_OFFSET); | 584 | tail = readl(iommu->mmio_base + MMIO_PPR_TAIL_OFFSET); |
602 | 585 | ||
603 | while (head != tail) { | 586 | while (head != tail) { |
587 | volatile u64 *raw; | ||
588 | u64 entry[2]; | ||
589 | int i; | ||
604 | 590 | ||
605 | /* Handle PPR entry */ | 591 | raw = (u64 *)(iommu->ppr_log + head); |
606 | iommu_handle_ppr_entry(iommu, head); | 592 | |
593 | /* | ||
594 | * Hardware bug: Interrupt may arrive before the entry is | ||
595 | * written to memory. If this happens we need to wait for the | ||
596 | * entry to arrive. | ||
597 | */ | ||
598 | for (i = 0; i < LOOP_TIMEOUT; ++i) { | ||
599 | if (PPR_REQ_TYPE(raw[0]) != 0) | ||
600 | break; | ||
601 | udelay(1); | ||
602 | } | ||
603 | |||
604 | /* Avoid memcpy function-call overhead */ | ||
605 | entry[0] = raw[0]; | ||
606 | entry[1] = raw[1]; | ||
607 | 607 | ||
608 | /* Update and refresh ring-buffer state*/ | 608 | /* |
609 | * To detect the hardware bug we need to clear the entry | ||
610 | * back to zero. | ||
611 | */ | ||
612 | raw[0] = raw[1] = 0UL; | ||
613 | |||
614 | /* Update head pointer of hardware ring-buffer */ | ||
609 | head = (head + PPR_ENTRY_SIZE) % PPR_LOG_SIZE; | 615 | head = (head + PPR_ENTRY_SIZE) % PPR_LOG_SIZE; |
610 | writel(head, iommu->mmio_base + MMIO_PPR_HEAD_OFFSET); | 616 | writel(head, iommu->mmio_base + MMIO_PPR_HEAD_OFFSET); |
617 | |||
618 | /* | ||
619 | * Release iommu->lock because ppr-handling might need to | ||
620 | * re-aquire it | ||
621 | */ | ||
622 | spin_unlock_irqrestore(&iommu->lock, flags); | ||
623 | |||
624 | /* Handle PPR entry */ | ||
625 | iommu_handle_ppr_entry(iommu, entry); | ||
626 | |||
627 | spin_lock_irqsave(&iommu->lock, flags); | ||
628 | |||
629 | /* Refresh ring-buffer information */ | ||
630 | head = readl(iommu->mmio_base + MMIO_PPR_HEAD_OFFSET); | ||
611 | tail = readl(iommu->mmio_base + MMIO_PPR_TAIL_OFFSET); | 631 | tail = readl(iommu->mmio_base + MMIO_PPR_TAIL_OFFSET); |
612 | } | 632 | } |
613 | 633 | ||
614 | /* enable ppr interrupts again */ | ||
615 | writel(MMIO_STATUS_PPR_INT_MASK, iommu->mmio_base + MMIO_STATUS_OFFSET); | ||
616 | |||
617 | spin_unlock_irqrestore(&iommu->lock, flags); | 634 | spin_unlock_irqrestore(&iommu->lock, flags); |
618 | } | 635 | } |
619 | 636 | ||
diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c index c56790375e0f..542024ba6dba 100644 --- a/drivers/iommu/amd_iommu_init.c +++ b/drivers/iommu/amd_iommu_init.c | |||
@@ -1029,6 +1029,9 @@ static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h) | |||
1029 | if (!iommu->dev) | 1029 | if (!iommu->dev) |
1030 | return 1; | 1030 | return 1; |
1031 | 1031 | ||
1032 | iommu->root_pdev = pci_get_bus_and_slot(iommu->dev->bus->number, | ||
1033 | PCI_DEVFN(0, 0)); | ||
1034 | |||
1032 | iommu->cap_ptr = h->cap_ptr; | 1035 | iommu->cap_ptr = h->cap_ptr; |
1033 | iommu->pci_seg = h->pci_seg; | 1036 | iommu->pci_seg = h->pci_seg; |
1034 | iommu->mmio_phys = h->mmio_phys; | 1037 | iommu->mmio_phys = h->mmio_phys; |
@@ -1323,20 +1326,16 @@ static void iommu_apply_resume_quirks(struct amd_iommu *iommu) | |||
1323 | { | 1326 | { |
1324 | int i, j; | 1327 | int i, j; |
1325 | u32 ioc_feature_control; | 1328 | u32 ioc_feature_control; |
1326 | struct pci_dev *pdev = NULL; | 1329 | struct pci_dev *pdev = iommu->root_pdev; |
1327 | 1330 | ||
1328 | /* RD890 BIOSes may not have completely reconfigured the iommu */ | 1331 | /* RD890 BIOSes may not have completely reconfigured the iommu */ |
1329 | if (!is_rd890_iommu(iommu->dev)) | 1332 | if (!is_rd890_iommu(iommu->dev) || !pdev) |
1330 | return; | 1333 | return; |
1331 | 1334 | ||
1332 | /* | 1335 | /* |
1333 | * First, we need to ensure that the iommu is enabled. This is | 1336 | * First, we need to ensure that the iommu is enabled. This is |
1334 | * controlled by a register in the northbridge | 1337 | * controlled by a register in the northbridge |
1335 | */ | 1338 | */ |
1336 | pdev = pci_get_bus_and_slot(iommu->dev->bus->number, PCI_DEVFN(0, 0)); | ||
1337 | |||
1338 | if (!pdev) | ||
1339 | return; | ||
1340 | 1339 | ||
1341 | /* Select Northbridge indirect register 0x75 and enable writing */ | 1340 | /* Select Northbridge indirect register 0x75 and enable writing */ |
1342 | pci_write_config_dword(pdev, 0x60, 0x75 | (1 << 7)); | 1341 | pci_write_config_dword(pdev, 0x60, 0x75 | (1 << 7)); |
@@ -1346,8 +1345,6 @@ static void iommu_apply_resume_quirks(struct amd_iommu *iommu) | |||
1346 | if (!(ioc_feature_control & 0x1)) | 1345 | if (!(ioc_feature_control & 0x1)) |
1347 | pci_write_config_dword(pdev, 0x64, ioc_feature_control | 1); | 1346 | pci_write_config_dword(pdev, 0x64, ioc_feature_control | 1); |
1348 | 1347 | ||
1349 | pci_dev_put(pdev); | ||
1350 | |||
1351 | /* Restore the iommu BAR */ | 1348 | /* Restore the iommu BAR */ |
1352 | pci_write_config_dword(iommu->dev, iommu->cap_ptr + 4, | 1349 | pci_write_config_dword(iommu->dev, iommu->cap_ptr + 4, |
1353 | iommu->stored_addr_lo); | 1350 | iommu->stored_addr_lo); |
diff --git a/drivers/iommu/amd_iommu_types.h b/drivers/iommu/amd_iommu_types.h index 2452f3b71736..24355559a2ad 100644 --- a/drivers/iommu/amd_iommu_types.h +++ b/drivers/iommu/amd_iommu_types.h | |||
@@ -481,6 +481,9 @@ struct amd_iommu { | |||
481 | /* Pointer to PCI device of this IOMMU */ | 481 | /* Pointer to PCI device of this IOMMU */ |
482 | struct pci_dev *dev; | 482 | struct pci_dev *dev; |
483 | 483 | ||
484 | /* Cache pdev to root device for resume quirks */ | ||
485 | struct pci_dev *root_pdev; | ||
486 | |||
484 | /* physical address of MMIO space */ | 487 | /* physical address of MMIO space */ |
485 | u64 mmio_phys; | 488 | u64 mmio_phys; |
486 | /* virtual address of MMIO space */ | 489 | /* virtual address of MMIO space */ |
diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig index 04cb8c88d74b..12b2b55c519e 100644 --- a/drivers/leds/Kconfig +++ b/drivers/leds/Kconfig | |||
@@ -379,7 +379,7 @@ config LEDS_NETXBIG | |||
379 | 379 | ||
380 | config LEDS_ASIC3 | 380 | config LEDS_ASIC3 |
381 | bool "LED support for the HTC ASIC3" | 381 | bool "LED support for the HTC ASIC3" |
382 | depends on LEDS_CLASS | 382 | depends on LEDS_CLASS=y |
383 | depends on MFD_ASIC3 | 383 | depends on MFD_ASIC3 |
384 | default y | 384 | default y |
385 | help | 385 | help |
@@ -390,7 +390,7 @@ config LEDS_ASIC3 | |||
390 | 390 | ||
391 | config LEDS_RENESAS_TPU | 391 | config LEDS_RENESAS_TPU |
392 | bool "LED support for Renesas TPU" | 392 | bool "LED support for Renesas TPU" |
393 | depends on LEDS_CLASS && HAVE_CLK && GENERIC_GPIO | 393 | depends on LEDS_CLASS=y && HAVE_CLK && GENERIC_GPIO |
394 | help | 394 | help |
395 | This option enables build of the LED TPU platform driver, | 395 | This option enables build of the LED TPU platform driver, |
396 | suitable to drive any TPU channel on newer Renesas SoCs. | 396 | suitable to drive any TPU channel on newer Renesas SoCs. |
diff --git a/drivers/leds/led-class.c b/drivers/leds/led-class.c index 8ee92c81aec2..e663e6f413e9 100644 --- a/drivers/leds/led-class.c +++ b/drivers/leds/led-class.c | |||
@@ -29,7 +29,7 @@ static void led_update_brightness(struct led_classdev *led_cdev) | |||
29 | led_cdev->brightness = led_cdev->brightness_get(led_cdev); | 29 | led_cdev->brightness = led_cdev->brightness_get(led_cdev); |
30 | } | 30 | } |
31 | 31 | ||
32 | static ssize_t led_brightness_show(struct device *dev, | 32 | static ssize_t led_brightness_show(struct device *dev, |
33 | struct device_attribute *attr, char *buf) | 33 | struct device_attribute *attr, char *buf) |
34 | { | 34 | { |
35 | struct led_classdev *led_cdev = dev_get_drvdata(dev); | 35 | struct led_classdev *led_cdev = dev_get_drvdata(dev); |
diff --git a/drivers/leds/led-core.c b/drivers/leds/led-core.c index d6860043f6f9..d65353d8d3fc 100644 --- a/drivers/leds/led-core.c +++ b/drivers/leds/led-core.c | |||
@@ -44,13 +44,6 @@ static void led_set_software_blink(struct led_classdev *led_cdev, | |||
44 | if (!led_cdev->blink_brightness) | 44 | if (!led_cdev->blink_brightness) |
45 | led_cdev->blink_brightness = led_cdev->max_brightness; | 45 | led_cdev->blink_brightness = led_cdev->max_brightness; |
46 | 46 | ||
47 | if (led_get_trigger_data(led_cdev) && | ||
48 | delay_on == led_cdev->blink_delay_on && | ||
49 | delay_off == led_cdev->blink_delay_off) | ||
50 | return; | ||
51 | |||
52 | led_stop_software_blink(led_cdev); | ||
53 | |||
54 | led_cdev->blink_delay_on = delay_on; | 47 | led_cdev->blink_delay_on = delay_on; |
55 | led_cdev->blink_delay_off = delay_off; | 48 | led_cdev->blink_delay_off = delay_off; |
56 | 49 | ||
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index 835de7168cd3..a9c7981ddd24 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c | |||
@@ -2550,6 +2550,7 @@ static struct r1conf *setup_conf(struct mddev *mddev) | |||
2550 | err = -EINVAL; | 2550 | err = -EINVAL; |
2551 | spin_lock_init(&conf->device_lock); | 2551 | spin_lock_init(&conf->device_lock); |
2552 | rdev_for_each(rdev, mddev) { | 2552 | rdev_for_each(rdev, mddev) { |
2553 | struct request_queue *q; | ||
2553 | int disk_idx = rdev->raid_disk; | 2554 | int disk_idx = rdev->raid_disk; |
2554 | if (disk_idx >= mddev->raid_disks | 2555 | if (disk_idx >= mddev->raid_disks |
2555 | || disk_idx < 0) | 2556 | || disk_idx < 0) |
@@ -2562,6 +2563,9 @@ static struct r1conf *setup_conf(struct mddev *mddev) | |||
2562 | if (disk->rdev) | 2563 | if (disk->rdev) |
2563 | goto abort; | 2564 | goto abort; |
2564 | disk->rdev = rdev; | 2565 | disk->rdev = rdev; |
2566 | q = bdev_get_queue(rdev->bdev); | ||
2567 | if (q->merge_bvec_fn) | ||
2568 | mddev->merge_check_needed = 1; | ||
2565 | 2569 | ||
2566 | disk->head_position = 0; | 2570 | disk->head_position = 0; |
2567 | } | 2571 | } |
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index 987db37cb875..99ae6068e456 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c | |||
@@ -3475,6 +3475,7 @@ static int run(struct mddev *mddev) | |||
3475 | 3475 | ||
3476 | rdev_for_each(rdev, mddev) { | 3476 | rdev_for_each(rdev, mddev) { |
3477 | long long diff; | 3477 | long long diff; |
3478 | struct request_queue *q; | ||
3478 | 3479 | ||
3479 | disk_idx = rdev->raid_disk; | 3480 | disk_idx = rdev->raid_disk; |
3480 | if (disk_idx < 0) | 3481 | if (disk_idx < 0) |
@@ -3493,6 +3494,9 @@ static int run(struct mddev *mddev) | |||
3493 | goto out_free_conf; | 3494 | goto out_free_conf; |
3494 | disk->rdev = rdev; | 3495 | disk->rdev = rdev; |
3495 | } | 3496 | } |
3497 | q = bdev_get_queue(rdev->bdev); | ||
3498 | if (q->merge_bvec_fn) | ||
3499 | mddev->merge_check_needed = 1; | ||
3496 | diff = (rdev->new_data_offset - rdev->data_offset); | 3500 | diff = (rdev->new_data_offset - rdev->data_offset); |
3497 | if (!mddev->reshape_backwards) | 3501 | if (!mddev->reshape_backwards) |
3498 | diff = -diff; | 3502 | diff = -diff; |
diff --git a/drivers/media/video/pms.c b/drivers/media/video/pms.c index af2d9086d7e8..c370c2d87c17 100644 --- a/drivers/media/video/pms.c +++ b/drivers/media/video/pms.c | |||
@@ -29,6 +29,7 @@ | |||
29 | #include <linux/ioport.h> | 29 | #include <linux/ioport.h> |
30 | #include <linux/init.h> | 30 | #include <linux/init.h> |
31 | #include <linux/mutex.h> | 31 | #include <linux/mutex.h> |
32 | #include <linux/slab.h> | ||
32 | #include <linux/uaccess.h> | 33 | #include <linux/uaccess.h> |
33 | #include <linux/isa.h> | 34 | #include <linux/isa.h> |
34 | #include <asm/io.h> | 35 | #include <asm/io.h> |
diff --git a/drivers/mfd/stmpe-i2c.c b/drivers/mfd/stmpe-i2c.c index 373f423b1181..947a06a1845f 100644 --- a/drivers/mfd/stmpe-i2c.c +++ b/drivers/mfd/stmpe-i2c.c | |||
@@ -6,7 +6,7 @@ | |||
6 | * | 6 | * |
7 | * License Terms: GNU General Public License, version 2 | 7 | * License Terms: GNU General Public License, version 2 |
8 | * Author: Rabin Vincent <rabin.vincent@stericsson.com> for ST-Ericsson | 8 | * Author: Rabin Vincent <rabin.vincent@stericsson.com> for ST-Ericsson |
9 | * Author: Viresh Kumar <viresh.kumar@st.com> for ST Microelectronics | 9 | * Author: Viresh Kumar <viresh.linux@gmail.com> for ST Microelectronics |
10 | */ | 10 | */ |
11 | 11 | ||
12 | #include <linux/i2c.h> | 12 | #include <linux/i2c.h> |
diff --git a/drivers/mfd/stmpe-spi.c b/drivers/mfd/stmpe-spi.c index afd459013ecb..9edfe864cc05 100644 --- a/drivers/mfd/stmpe-spi.c +++ b/drivers/mfd/stmpe-spi.c | |||
@@ -4,7 +4,7 @@ | |||
4 | * Copyright (C) ST Microelectronics SA 2011 | 4 | * Copyright (C) ST Microelectronics SA 2011 |
5 | * | 5 | * |
6 | * License Terms: GNU General Public License, version 2 | 6 | * License Terms: GNU General Public License, version 2 |
7 | * Author: Viresh Kumar <viresh.kumar@st.com> for ST Microelectronics | 7 | * Author: Viresh Kumar <viresh.linux@gmail.com> for ST Microelectronics |
8 | */ | 8 | */ |
9 | 9 | ||
10 | #include <linux/spi/spi.h> | 10 | #include <linux/spi/spi.h> |
@@ -146,4 +146,4 @@ module_exit(stmpe_exit); | |||
146 | 146 | ||
147 | MODULE_LICENSE("GPL v2"); | 147 | MODULE_LICENSE("GPL v2"); |
148 | MODULE_DESCRIPTION("STMPE MFD SPI Interface Driver"); | 148 | MODULE_DESCRIPTION("STMPE MFD SPI Interface Driver"); |
149 | MODULE_AUTHOR("Viresh Kumar <viresh.kumar@st.com>"); | 149 | MODULE_AUTHOR("Viresh Kumar <viresh.linux@gmail.com>"); |
diff --git a/drivers/misc/mei/interrupt.c b/drivers/misc/mei/interrupt.c index 93936f1b75eb..23f5463d4cae 100644 --- a/drivers/misc/mei/interrupt.c +++ b/drivers/misc/mei/interrupt.c | |||
@@ -835,7 +835,7 @@ static int _mei_irq_thread_read(struct mei_device *dev, s32 *slots, | |||
835 | struct mei_cl *cl, | 835 | struct mei_cl *cl, |
836 | struct mei_io_list *cmpl_list) | 836 | struct mei_io_list *cmpl_list) |
837 | { | 837 | { |
838 | if ((*slots * sizeof(u32)) >= (sizeof(struct mei_msg_hdr) + | 838 | if ((*slots * sizeof(u32)) < (sizeof(struct mei_msg_hdr) + |
839 | sizeof(struct hbm_flow_control))) { | 839 | sizeof(struct hbm_flow_control))) { |
840 | /* return the cancel routine */ | 840 | /* return the cancel routine */ |
841 | list_del(&cb_pos->cb_list); | 841 | list_del(&cb_pos->cb_list); |
diff --git a/drivers/misc/mei/main.c b/drivers/misc/mei/main.c index c70333228337..7de13891e49e 100644 --- a/drivers/misc/mei/main.c +++ b/drivers/misc/mei/main.c | |||
@@ -982,7 +982,7 @@ static int __devinit mei_probe(struct pci_dev *pdev, | |||
982 | err = request_threaded_irq(pdev->irq, | 982 | err = request_threaded_irq(pdev->irq, |
983 | NULL, | 983 | NULL, |
984 | mei_interrupt_thread_handler, | 984 | mei_interrupt_thread_handler, |
985 | 0, mei_driver_name, dev); | 985 | IRQF_ONESHOT, mei_driver_name, dev); |
986 | else | 986 | else |
987 | err = request_threaded_irq(pdev->irq, | 987 | err = request_threaded_irq(pdev->irq, |
988 | mei_interrupt_quick_handler, | 988 | mei_interrupt_quick_handler, |
@@ -992,7 +992,7 @@ static int __devinit mei_probe(struct pci_dev *pdev, | |||
992 | if (err) { | 992 | if (err) { |
993 | dev_err(&pdev->dev, "request_threaded_irq failure. irq = %d\n", | 993 | dev_err(&pdev->dev, "request_threaded_irq failure. irq = %d\n", |
994 | pdev->irq); | 994 | pdev->irq); |
995 | goto unmap_memory; | 995 | goto disable_msi; |
996 | } | 996 | } |
997 | INIT_DELAYED_WORK(&dev->timer_work, mei_timer); | 997 | INIT_DELAYED_WORK(&dev->timer_work, mei_timer); |
998 | if (mei_hw_init(dev)) { | 998 | if (mei_hw_init(dev)) { |
@@ -1023,8 +1023,8 @@ release_irq: | |||
1023 | mei_disable_interrupts(dev); | 1023 | mei_disable_interrupts(dev); |
1024 | flush_scheduled_work(); | 1024 | flush_scheduled_work(); |
1025 | free_irq(pdev->irq, dev); | 1025 | free_irq(pdev->irq, dev); |
1026 | disable_msi: | ||
1026 | pci_disable_msi(pdev); | 1027 | pci_disable_msi(pdev); |
1027 | unmap_memory: | ||
1028 | pci_iounmap(pdev, dev->mem_addr); | 1028 | pci_iounmap(pdev, dev->mem_addr); |
1029 | free_device: | 1029 | free_device: |
1030 | kfree(dev); | 1030 | kfree(dev); |
@@ -1101,6 +1101,8 @@ static void __devexit mei_remove(struct pci_dev *pdev) | |||
1101 | 1101 | ||
1102 | pci_release_regions(pdev); | 1102 | pci_release_regions(pdev); |
1103 | pci_disable_device(pdev); | 1103 | pci_disable_device(pdev); |
1104 | |||
1105 | misc_deregister(&mei_misc_device); | ||
1104 | } | 1106 | } |
1105 | #ifdef CONFIG_PM | 1107 | #ifdef CONFIG_PM |
1106 | static int mei_pci_suspend(struct device *device) | 1108 | static int mei_pci_suspend(struct device *device) |
@@ -1216,7 +1218,6 @@ module_init(mei_init_module); | |||
1216 | */ | 1218 | */ |
1217 | static void __exit mei_exit_module(void) | 1219 | static void __exit mei_exit_module(void) |
1218 | { | 1220 | { |
1219 | misc_deregister(&mei_misc_device); | ||
1220 | pci_unregister_driver(&mei_driver); | 1221 | pci_unregister_driver(&mei_driver); |
1221 | 1222 | ||
1222 | pr_debug("unloaded successfully.\n"); | 1223 | pr_debug("unloaded successfully.\n"); |
diff --git a/drivers/misc/mei/wd.c b/drivers/misc/mei/wd.c index 6be5605707b4..e2ec0505eb5c 100644 --- a/drivers/misc/mei/wd.c +++ b/drivers/misc/mei/wd.c | |||
@@ -341,7 +341,7 @@ static const struct watchdog_ops wd_ops = { | |||
341 | }; | 341 | }; |
342 | static const struct watchdog_info wd_info = { | 342 | static const struct watchdog_info wd_info = { |
343 | .identity = INTEL_AMT_WATCHDOG_ID, | 343 | .identity = INTEL_AMT_WATCHDOG_ID, |
344 | .options = WDIOF_KEEPALIVEPING, | 344 | .options = WDIOF_KEEPALIVEPING | WDIOF_ALARMONLY, |
345 | }; | 345 | }; |
346 | 346 | ||
347 | static struct watchdog_device amt_wd_dev = { | 347 | static struct watchdog_device amt_wd_dev = { |
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c index 2d4a4b746750..258b203397aa 100644 --- a/drivers/mmc/core/mmc.c +++ b/drivers/mmc/core/mmc.c | |||
@@ -1326,7 +1326,7 @@ static int mmc_suspend(struct mmc_host *host) | |||
1326 | if (!err) | 1326 | if (!err) |
1327 | mmc_card_set_sleep(host->card); | 1327 | mmc_card_set_sleep(host->card); |
1328 | } else if (!mmc_host_is_spi(host)) | 1328 | } else if (!mmc_host_is_spi(host)) |
1329 | mmc_deselect_cards(host); | 1329 | err = mmc_deselect_cards(host); |
1330 | host->card->state &= ~(MMC_STATE_HIGHSPEED | MMC_STATE_HIGHSPEED_200); | 1330 | host->card->state &= ~(MMC_STATE_HIGHSPEED | MMC_STATE_HIGHSPEED_200); |
1331 | mmc_release_host(host); | 1331 | mmc_release_host(host); |
1332 | 1332 | ||
diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c index c272c6868ecf..b2b43f624b9e 100644 --- a/drivers/mmc/core/sd.c +++ b/drivers/mmc/core/sd.c | |||
@@ -1075,16 +1075,18 @@ static void mmc_sd_detect(struct mmc_host *host) | |||
1075 | */ | 1075 | */ |
1076 | static int mmc_sd_suspend(struct mmc_host *host) | 1076 | static int mmc_sd_suspend(struct mmc_host *host) |
1077 | { | 1077 | { |
1078 | int err = 0; | ||
1079 | |||
1078 | BUG_ON(!host); | 1080 | BUG_ON(!host); |
1079 | BUG_ON(!host->card); | 1081 | BUG_ON(!host->card); |
1080 | 1082 | ||
1081 | mmc_claim_host(host); | 1083 | mmc_claim_host(host); |
1082 | if (!mmc_host_is_spi(host)) | 1084 | if (!mmc_host_is_spi(host)) |
1083 | mmc_deselect_cards(host); | 1085 | err = mmc_deselect_cards(host); |
1084 | host->card->state &= ~MMC_STATE_HIGHSPEED; | 1086 | host->card->state &= ~MMC_STATE_HIGHSPEED; |
1085 | mmc_release_host(host); | 1087 | mmc_release_host(host); |
1086 | 1088 | ||
1087 | return 0; | 1089 | return err; |
1088 | } | 1090 | } |
1089 | 1091 | ||
1090 | /* | 1092 | /* |
diff --git a/drivers/mmc/core/sdio.c b/drivers/mmc/core/sdio.c index 13d0e95380ab..41c5fd8848f4 100644 --- a/drivers/mmc/core/sdio.c +++ b/drivers/mmc/core/sdio.c | |||
@@ -218,6 +218,12 @@ static int sdio_enable_wide(struct mmc_card *card) | |||
218 | if (ret) | 218 | if (ret) |
219 | return ret; | 219 | return ret; |
220 | 220 | ||
221 | if ((ctrl & SDIO_BUS_WIDTH_MASK) == SDIO_BUS_WIDTH_RESERVED) | ||
222 | pr_warning("%s: SDIO_CCCR_IF is invalid: 0x%02x\n", | ||
223 | mmc_hostname(card->host), ctrl); | ||
224 | |||
225 | /* set as 4-bit bus width */ | ||
226 | ctrl &= ~SDIO_BUS_WIDTH_MASK; | ||
221 | ctrl |= SDIO_BUS_WIDTH_4BIT; | 227 | ctrl |= SDIO_BUS_WIDTH_4BIT; |
222 | 228 | ||
223 | ret = mmc_io_rw_direct(card, 1, 0, SDIO_CCCR_IF, ctrl, NULL); | 229 | ret = mmc_io_rw_direct(card, 1, 0, SDIO_CCCR_IF, ctrl, NULL); |
diff --git a/drivers/mmc/host/atmel-mci-regs.h b/drivers/mmc/host/atmel-mci-regs.h index 787aba1682bb..ab56f7db5315 100644 --- a/drivers/mmc/host/atmel-mci-regs.h +++ b/drivers/mmc/host/atmel-mci-regs.h | |||
@@ -140,4 +140,18 @@ | |||
140 | #define atmci_writel(port,reg,value) \ | 140 | #define atmci_writel(port,reg,value) \ |
141 | __raw_writel((value), (port)->regs + reg) | 141 | __raw_writel((value), (port)->regs + reg) |
142 | 142 | ||
143 | /* | ||
144 | * Fix sconfig's burst size according to atmel MCI. We need to convert them as: | ||
145 | * 1 -> 0, 4 -> 1, 8 -> 2, 16 -> 3. | ||
146 | * | ||
147 | * This can be done by finding most significant bit set. | ||
148 | */ | ||
149 | static inline unsigned int atmci_convert_chksize(unsigned int maxburst) | ||
150 | { | ||
151 | if (maxburst > 1) | ||
152 | return fls(maxburst) - 2; | ||
153 | else | ||
154 | return 0; | ||
155 | } | ||
156 | |||
143 | #endif /* __DRIVERS_MMC_ATMEL_MCI_H__ */ | 157 | #endif /* __DRIVERS_MMC_ATMEL_MCI_H__ */ |
diff --git a/drivers/mmc/host/atmel-mci.c b/drivers/mmc/host/atmel-mci.c index 420aca642b14..f2c115e06438 100644 --- a/drivers/mmc/host/atmel-mci.c +++ b/drivers/mmc/host/atmel-mci.c | |||
@@ -910,6 +910,7 @@ atmci_prepare_data_dma(struct atmel_mci *host, struct mmc_data *data) | |||
910 | enum dma_data_direction direction; | 910 | enum dma_data_direction direction; |
911 | enum dma_transfer_direction slave_dirn; | 911 | enum dma_transfer_direction slave_dirn; |
912 | unsigned int sglen; | 912 | unsigned int sglen; |
913 | u32 maxburst; | ||
913 | u32 iflags; | 914 | u32 iflags; |
914 | 915 | ||
915 | data->error = -EINPROGRESS; | 916 | data->error = -EINPROGRESS; |
@@ -943,17 +944,18 @@ atmci_prepare_data_dma(struct atmel_mci *host, struct mmc_data *data) | |||
943 | if (!chan) | 944 | if (!chan) |
944 | return -ENODEV; | 945 | return -ENODEV; |
945 | 946 | ||
946 | if (host->caps.has_dma) | ||
947 | atmci_writel(host, ATMCI_DMA, ATMCI_DMA_CHKSIZE(3) | ATMCI_DMAEN); | ||
948 | |||
949 | if (data->flags & MMC_DATA_READ) { | 947 | if (data->flags & MMC_DATA_READ) { |
950 | direction = DMA_FROM_DEVICE; | 948 | direction = DMA_FROM_DEVICE; |
951 | host->dma_conf.direction = slave_dirn = DMA_DEV_TO_MEM; | 949 | host->dma_conf.direction = slave_dirn = DMA_DEV_TO_MEM; |
950 | maxburst = atmci_convert_chksize(host->dma_conf.src_maxburst); | ||
952 | } else { | 951 | } else { |
953 | direction = DMA_TO_DEVICE; | 952 | direction = DMA_TO_DEVICE; |
954 | host->dma_conf.direction = slave_dirn = DMA_MEM_TO_DEV; | 953 | host->dma_conf.direction = slave_dirn = DMA_MEM_TO_DEV; |
954 | maxburst = atmci_convert_chksize(host->dma_conf.dst_maxburst); | ||
955 | } | 955 | } |
956 | 956 | ||
957 | atmci_writel(host, ATMCI_DMA, ATMCI_DMA_CHKSIZE(maxburst) | ATMCI_DMAEN); | ||
958 | |||
957 | sglen = dma_map_sg(chan->device->dev, data->sg, | 959 | sglen = dma_map_sg(chan->device->dev, data->sg, |
958 | data->sg_len, direction); | 960 | data->sg_len, direction); |
959 | 961 | ||
@@ -2314,6 +2316,8 @@ static int __init atmci_probe(struct platform_device *pdev) | |||
2314 | 2316 | ||
2315 | platform_set_drvdata(pdev, host); | 2317 | platform_set_drvdata(pdev, host); |
2316 | 2318 | ||
2319 | setup_timer(&host->timer, atmci_timeout_timer, (unsigned long)host); | ||
2320 | |||
2317 | /* We need at least one slot to succeed */ | 2321 | /* We need at least one slot to succeed */ |
2318 | nr_slots = 0; | 2322 | nr_slots = 0; |
2319 | ret = -ENODEV; | 2323 | ret = -ENODEV; |
@@ -2352,8 +2356,6 @@ static int __init atmci_probe(struct platform_device *pdev) | |||
2352 | } | 2356 | } |
2353 | } | 2357 | } |
2354 | 2358 | ||
2355 | setup_timer(&host->timer, atmci_timeout_timer, (unsigned long)host); | ||
2356 | |||
2357 | dev_info(&pdev->dev, | 2359 | dev_info(&pdev->dev, |
2358 | "Atmel MCI controller at 0x%08lx irq %d, %u slots\n", | 2360 | "Atmel MCI controller at 0x%08lx irq %d, %u slots\n", |
2359 | host->mapbase, irq, nr_slots); | 2361 | host->mapbase, irq, nr_slots); |
diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c index 9bbf45f8c538..1ca5e72ceb65 100644 --- a/drivers/mmc/host/dw_mmc.c +++ b/drivers/mmc/host/dw_mmc.c | |||
@@ -418,6 +418,8 @@ static int dw_mci_idmac_init(struct dw_mci *host) | |||
418 | p->des3 = host->sg_dma; | 418 | p->des3 = host->sg_dma; |
419 | p->des0 = IDMAC_DES0_ER; | 419 | p->des0 = IDMAC_DES0_ER; |
420 | 420 | ||
421 | mci_writel(host, BMOD, SDMMC_IDMAC_SWRESET); | ||
422 | |||
421 | /* Mask out interrupts - get Tx & Rx complete only */ | 423 | /* Mask out interrupts - get Tx & Rx complete only */ |
422 | mci_writel(host, IDINTEN, SDMMC_IDMAC_INT_NI | SDMMC_IDMAC_INT_RI | | 424 | mci_writel(host, IDINTEN, SDMMC_IDMAC_INT_NI | SDMMC_IDMAC_INT_RI | |
423 | SDMMC_IDMAC_INT_TI); | 425 | SDMMC_IDMAC_INT_TI); |
@@ -615,14 +617,15 @@ static void dw_mci_setup_bus(struct dw_mci_slot *slot) | |||
615 | u32 div; | 617 | u32 div; |
616 | 618 | ||
617 | if (slot->clock != host->current_speed) { | 619 | if (slot->clock != host->current_speed) { |
618 | if (host->bus_hz % slot->clock) | 620 | div = host->bus_hz / slot->clock; |
621 | if (host->bus_hz % slot->clock && host->bus_hz > slot->clock) | ||
619 | /* | 622 | /* |
620 | * move the + 1 after the divide to prevent | 623 | * move the + 1 after the divide to prevent |
621 | * over-clocking the card. | 624 | * over-clocking the card. |
622 | */ | 625 | */ |
623 | div = ((host->bus_hz / slot->clock) >> 1) + 1; | 626 | div += 1; |
624 | else | 627 | |
625 | div = (host->bus_hz / slot->clock) >> 1; | 628 | div = (host->bus_hz != slot->clock) ? DIV_ROUND_UP(div, 2) : 0; |
626 | 629 | ||
627 | dev_info(&slot->mmc->class_dev, | 630 | dev_info(&slot->mmc->class_dev, |
628 | "Bus speed (slot %d) = %dHz (slot req %dHz, actual %dHZ" | 631 | "Bus speed (slot %d) = %dHz (slot req %dHz, actual %dHZ" |
@@ -939,8 +942,8 @@ static void dw_mci_command_complete(struct dw_mci *host, struct mmc_command *cmd | |||
939 | mdelay(20); | 942 | mdelay(20); |
940 | 943 | ||
941 | if (cmd->data) { | 944 | if (cmd->data) { |
942 | host->data = NULL; | ||
943 | dw_mci_stop_dma(host); | 945 | dw_mci_stop_dma(host); |
946 | host->data = NULL; | ||
944 | } | 947 | } |
945 | } | 948 | } |
946 | } | 949 | } |
@@ -1623,7 +1626,6 @@ static irqreturn_t dw_mci_interrupt(int irq, void *dev_id) | |||
1623 | if (pending & (SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI)) { | 1626 | if (pending & (SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI)) { |
1624 | mci_writel(host, IDSTS, SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI); | 1627 | mci_writel(host, IDSTS, SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI); |
1625 | mci_writel(host, IDSTS, SDMMC_IDMAC_INT_NI); | 1628 | mci_writel(host, IDSTS, SDMMC_IDMAC_INT_NI); |
1626 | set_bit(EVENT_DATA_COMPLETE, &host->pending_events); | ||
1627 | host->dma_ops->complete(host); | 1629 | host->dma_ops->complete(host); |
1628 | } | 1630 | } |
1629 | #endif | 1631 | #endif |
@@ -1725,7 +1727,8 @@ static void dw_mci_work_routine_card(struct work_struct *work) | |||
1725 | 1727 | ||
1726 | #ifdef CONFIG_MMC_DW_IDMAC | 1728 | #ifdef CONFIG_MMC_DW_IDMAC |
1727 | ctrl = mci_readl(host, BMOD); | 1729 | ctrl = mci_readl(host, BMOD); |
1728 | ctrl |= 0x01; /* Software reset of DMA */ | 1730 | /* Software reset of DMA */ |
1731 | ctrl |= SDMMC_IDMAC_SWRESET; | ||
1729 | mci_writel(host, BMOD, ctrl); | 1732 | mci_writel(host, BMOD, ctrl); |
1730 | #endif | 1733 | #endif |
1731 | 1734 | ||
@@ -1950,10 +1953,6 @@ int dw_mci_probe(struct dw_mci *host) | |||
1950 | spin_lock_init(&host->lock); | 1953 | spin_lock_init(&host->lock); |
1951 | INIT_LIST_HEAD(&host->queue); | 1954 | INIT_LIST_HEAD(&host->queue); |
1952 | 1955 | ||
1953 | |||
1954 | host->dma_ops = host->pdata->dma_ops; | ||
1955 | dw_mci_init_dma(host); | ||
1956 | |||
1957 | /* | 1956 | /* |
1958 | * Get the host data width - this assumes that HCON has been set with | 1957 | * Get the host data width - this assumes that HCON has been set with |
1959 | * the correct values. | 1958 | * the correct values. |
@@ -1981,10 +1980,11 @@ int dw_mci_probe(struct dw_mci *host) | |||
1981 | } | 1980 | } |
1982 | 1981 | ||
1983 | /* Reset all blocks */ | 1982 | /* Reset all blocks */ |
1984 | if (!mci_wait_reset(&host->dev, host)) { | 1983 | if (!mci_wait_reset(&host->dev, host)) |
1985 | ret = -ENODEV; | 1984 | return -ENODEV; |
1986 | goto err_dmaunmap; | 1985 | |
1987 | } | 1986 | host->dma_ops = host->pdata->dma_ops; |
1987 | dw_mci_init_dma(host); | ||
1988 | 1988 | ||
1989 | /* Clear the interrupts for the host controller */ | 1989 | /* Clear the interrupts for the host controller */ |
1990 | mci_writel(host, RINTSTS, 0xFFFFFFFF); | 1990 | mci_writel(host, RINTSTS, 0xFFFFFFFF); |
@@ -2170,14 +2170,14 @@ int dw_mci_resume(struct dw_mci *host) | |||
2170 | if (host->vmmc) | 2170 | if (host->vmmc) |
2171 | regulator_enable(host->vmmc); | 2171 | regulator_enable(host->vmmc); |
2172 | 2172 | ||
2173 | if (host->dma_ops->init) | ||
2174 | host->dma_ops->init(host); | ||
2175 | |||
2176 | if (!mci_wait_reset(&host->dev, host)) { | 2173 | if (!mci_wait_reset(&host->dev, host)) { |
2177 | ret = -ENODEV; | 2174 | ret = -ENODEV; |
2178 | return ret; | 2175 | return ret; |
2179 | } | 2176 | } |
2180 | 2177 | ||
2178 | if (host->dma_ops->init) | ||
2179 | host->dma_ops->init(host); | ||
2180 | |||
2181 | /* Restore the old value at FIFOTH register */ | 2181 | /* Restore the old value at FIFOTH register */ |
2182 | mci_writel(host, FIFOTH, host->fifoth_val); | 2182 | mci_writel(host, FIFOTH, host->fifoth_val); |
2183 | 2183 | ||
diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c index f0fcce40cd8d..50ff19a62368 100644 --- a/drivers/mmc/host/mmci.c +++ b/drivers/mmc/host/mmci.c | |||
@@ -1216,12 +1216,7 @@ static void mmci_dt_populate_generic_pdata(struct device_node *np, | |||
1216 | int bus_width = 0; | 1216 | int bus_width = 0; |
1217 | 1217 | ||
1218 | pdata->gpio_wp = of_get_named_gpio(np, "wp-gpios", 0); | 1218 | pdata->gpio_wp = of_get_named_gpio(np, "wp-gpios", 0); |
1219 | if (!pdata->gpio_wp) | ||
1220 | pdata->gpio_wp = -1; | ||
1221 | |||
1222 | pdata->gpio_cd = of_get_named_gpio(np, "cd-gpios", 0); | 1219 | pdata->gpio_cd = of_get_named_gpio(np, "cd-gpios", 0); |
1223 | if (!pdata->gpio_cd) | ||
1224 | pdata->gpio_cd = -1; | ||
1225 | 1220 | ||
1226 | if (of_get_property(np, "cd-inverted", NULL)) | 1221 | if (of_get_property(np, "cd-inverted", NULL)) |
1227 | pdata->cd_invert = true; | 1222 | pdata->cd_invert = true; |
@@ -1276,6 +1271,12 @@ static int __devinit mmci_probe(struct amba_device *dev, | |||
1276 | return -EINVAL; | 1271 | return -EINVAL; |
1277 | } | 1272 | } |
1278 | 1273 | ||
1274 | if (!plat) { | ||
1275 | plat = devm_kzalloc(&dev->dev, sizeof(*plat), GFP_KERNEL); | ||
1276 | if (!plat) | ||
1277 | return -ENOMEM; | ||
1278 | } | ||
1279 | |||
1279 | if (np) | 1280 | if (np) |
1280 | mmci_dt_populate_generic_pdata(np, plat); | 1281 | mmci_dt_populate_generic_pdata(np, plat); |
1281 | 1282 | ||
@@ -1424,6 +1425,10 @@ static int __devinit mmci_probe(struct amba_device *dev, | |||
1424 | writel(0, host->base + MMCIMASK1); | 1425 | writel(0, host->base + MMCIMASK1); |
1425 | writel(0xfff, host->base + MMCICLEAR); | 1426 | writel(0xfff, host->base + MMCICLEAR); |
1426 | 1427 | ||
1428 | if (plat->gpio_cd == -EPROBE_DEFER) { | ||
1429 | ret = -EPROBE_DEFER; | ||
1430 | goto err_gpio_cd; | ||
1431 | } | ||
1427 | if (gpio_is_valid(plat->gpio_cd)) { | 1432 | if (gpio_is_valid(plat->gpio_cd)) { |
1428 | ret = gpio_request(plat->gpio_cd, DRIVER_NAME " (cd)"); | 1433 | ret = gpio_request(plat->gpio_cd, DRIVER_NAME " (cd)"); |
1429 | if (ret == 0) | 1434 | if (ret == 0) |
@@ -1447,6 +1452,10 @@ static int __devinit mmci_probe(struct amba_device *dev, | |||
1447 | if (ret >= 0) | 1452 | if (ret >= 0) |
1448 | host->gpio_cd_irq = gpio_to_irq(plat->gpio_cd); | 1453 | host->gpio_cd_irq = gpio_to_irq(plat->gpio_cd); |
1449 | } | 1454 | } |
1455 | if (plat->gpio_wp == -EPROBE_DEFER) { | ||
1456 | ret = -EPROBE_DEFER; | ||
1457 | goto err_gpio_wp; | ||
1458 | } | ||
1450 | if (gpio_is_valid(plat->gpio_wp)) { | 1459 | if (gpio_is_valid(plat->gpio_wp)) { |
1451 | ret = gpio_request(plat->gpio_wp, DRIVER_NAME " (wp)"); | 1460 | ret = gpio_request(plat->gpio_wp, DRIVER_NAME " (wp)"); |
1452 | if (ret == 0) | 1461 | if (ret == 0) |
diff --git a/drivers/mmc/host/mxs-mmc.c b/drivers/mmc/host/mxs-mmc.c index 34a90266ab11..277161d279b8 100644 --- a/drivers/mmc/host/mxs-mmc.c +++ b/drivers/mmc/host/mxs-mmc.c | |||
@@ -894,8 +894,8 @@ static struct platform_driver mxs_mmc_driver = { | |||
894 | .owner = THIS_MODULE, | 894 | .owner = THIS_MODULE, |
895 | #ifdef CONFIG_PM | 895 | #ifdef CONFIG_PM |
896 | .pm = &mxs_mmc_pm_ops, | 896 | .pm = &mxs_mmc_pm_ops, |
897 | .of_match_table = mxs_mmc_dt_ids, | ||
898 | #endif | 897 | #endif |
898 | .of_match_table = mxs_mmc_dt_ids, | ||
899 | }, | 899 | }, |
900 | }; | 900 | }; |
901 | 901 | ||
diff --git a/drivers/mmc/host/omap.c b/drivers/mmc/host/omap.c index 552196c764d4..3e8dcf8d2e05 100644 --- a/drivers/mmc/host/omap.c +++ b/drivers/mmc/host/omap.c | |||
@@ -1300,7 +1300,7 @@ static const struct mmc_host_ops mmc_omap_ops = { | |||
1300 | .set_ios = mmc_omap_set_ios, | 1300 | .set_ios = mmc_omap_set_ios, |
1301 | }; | 1301 | }; |
1302 | 1302 | ||
1303 | static int __init mmc_omap_new_slot(struct mmc_omap_host *host, int id) | 1303 | static int __devinit mmc_omap_new_slot(struct mmc_omap_host *host, int id) |
1304 | { | 1304 | { |
1305 | struct mmc_omap_slot *slot = NULL; | 1305 | struct mmc_omap_slot *slot = NULL; |
1306 | struct mmc_host *mmc; | 1306 | struct mmc_host *mmc; |
@@ -1485,24 +1485,26 @@ static int __devinit mmc_omap_probe(struct platform_device *pdev) | |||
1485 | } | 1485 | } |
1486 | 1486 | ||
1487 | host->nr_slots = pdata->nr_slots; | 1487 | host->nr_slots = pdata->nr_slots; |
1488 | host->reg_shift = (cpu_is_omap7xx() ? 1 : 2); | ||
1489 | |||
1490 | host->mmc_omap_wq = alloc_workqueue("mmc_omap", 0, 0); | ||
1491 | if (!host->mmc_omap_wq) | ||
1492 | goto err_plat_cleanup; | ||
1493 | |||
1488 | for (i = 0; i < pdata->nr_slots; i++) { | 1494 | for (i = 0; i < pdata->nr_slots; i++) { |
1489 | ret = mmc_omap_new_slot(host, i); | 1495 | ret = mmc_omap_new_slot(host, i); |
1490 | if (ret < 0) { | 1496 | if (ret < 0) { |
1491 | while (--i >= 0) | 1497 | while (--i >= 0) |
1492 | mmc_omap_remove_slot(host->slots[i]); | 1498 | mmc_omap_remove_slot(host->slots[i]); |
1493 | 1499 | ||
1494 | goto err_plat_cleanup; | 1500 | goto err_destroy_wq; |
1495 | } | 1501 | } |
1496 | } | 1502 | } |
1497 | 1503 | ||
1498 | host->reg_shift = (cpu_is_omap7xx() ? 1 : 2); | ||
1499 | |||
1500 | host->mmc_omap_wq = alloc_workqueue("mmc_omap", 0, 0); | ||
1501 | if (!host->mmc_omap_wq) | ||
1502 | goto err_plat_cleanup; | ||
1503 | |||
1504 | return 0; | 1504 | return 0; |
1505 | 1505 | ||
1506 | err_destroy_wq: | ||
1507 | destroy_workqueue(host->mmc_omap_wq); | ||
1506 | err_plat_cleanup: | 1508 | err_plat_cleanup: |
1507 | if (pdata->cleanup) | 1509 | if (pdata->cleanup) |
1508 | pdata->cleanup(&pdev->dev); | 1510 | pdata->cleanup(&pdev->dev); |
diff --git a/drivers/mmc/host/sdhci-s3c.c b/drivers/mmc/host/sdhci-s3c.c index 55a164fcaa15..a50c205ea208 100644 --- a/drivers/mmc/host/sdhci-s3c.c +++ b/drivers/mmc/host/sdhci-s3c.c | |||
@@ -404,7 +404,7 @@ static void sdhci_s3c_setup_card_detect_gpio(struct sdhci_s3c *sc) | |||
404 | if (sc->ext_cd_irq && | 404 | if (sc->ext_cd_irq && |
405 | request_threaded_irq(sc->ext_cd_irq, NULL, | 405 | request_threaded_irq(sc->ext_cd_irq, NULL, |
406 | sdhci_s3c_gpio_card_detect_thread, | 406 | sdhci_s3c_gpio_card_detect_thread, |
407 | IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING, | 407 | IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING | IRQF_ONESHOT, |
408 | dev_name(dev), sc) == 0) { | 408 | dev_name(dev), sc) == 0) { |
409 | int status = gpio_get_value(sc->ext_cd_gpio); | 409 | int status = gpio_get_value(sc->ext_cd_gpio); |
410 | if (pdata->ext_cd_gpio_invert) | 410 | if (pdata->ext_cd_gpio_invert) |
diff --git a/drivers/mmc/host/sdhci-spear.c b/drivers/mmc/host/sdhci-spear.c index 1fe32dfa7cd4..423da8194cd8 100644 --- a/drivers/mmc/host/sdhci-spear.c +++ b/drivers/mmc/host/sdhci-spear.c | |||
@@ -4,7 +4,7 @@ | |||
4 | * Support of SDHCI platform devices for spear soc family | 4 | * Support of SDHCI platform devices for spear soc family |
5 | * | 5 | * |
6 | * Copyright (C) 2010 ST Microelectronics | 6 | * Copyright (C) 2010 ST Microelectronics |
7 | * Viresh Kumar<viresh.kumar@st.com> | 7 | * Viresh Kumar <viresh.linux@gmail.com> |
8 | * | 8 | * |
9 | * Inspired by sdhci-pltfm.c | 9 | * Inspired by sdhci-pltfm.c |
10 | * | 10 | * |
@@ -289,5 +289,5 @@ static struct platform_driver sdhci_driver = { | |||
289 | module_platform_driver(sdhci_driver); | 289 | module_platform_driver(sdhci_driver); |
290 | 290 | ||
291 | MODULE_DESCRIPTION("SPEAr Secure Digital Host Controller Interface driver"); | 291 | MODULE_DESCRIPTION("SPEAr Secure Digital Host Controller Interface driver"); |
292 | MODULE_AUTHOR("Viresh Kumar <viresh.kumar@st.com>"); | 292 | MODULE_AUTHOR("Viresh Kumar <viresh.linux@gmail.com>"); |
293 | MODULE_LICENSE("GPL v2"); | 293 | MODULE_LICENSE("GPL v2"); |
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c index e626732aff77..f4b8b4db3a9a 100644 --- a/drivers/mmc/host/sdhci.c +++ b/drivers/mmc/host/sdhci.c | |||
@@ -680,8 +680,8 @@ static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_command *cmd) | |||
680 | } | 680 | } |
681 | 681 | ||
682 | if (count >= 0xF) { | 682 | if (count >= 0xF) { |
683 | pr_warning("%s: Too large timeout 0x%x requested for CMD%d!\n", | 683 | DBG("%s: Too large timeout 0x%x requested for CMD%d!\n", |
684 | mmc_hostname(host->mmc), count, cmd->opcode); | 684 | mmc_hostname(host->mmc), count, cmd->opcode); |
685 | count = 0xE; | 685 | count = 0xE; |
686 | } | 686 | } |
687 | 687 | ||
diff --git a/drivers/mtd/mtdoops.c b/drivers/mtd/mtdoops.c index ae36d7e1e913..551e316e4454 100644 --- a/drivers/mtd/mtdoops.c +++ b/drivers/mtd/mtdoops.c | |||
@@ -304,32 +304,17 @@ static void find_next_position(struct mtdoops_context *cxt) | |||
304 | } | 304 | } |
305 | 305 | ||
306 | static void mtdoops_do_dump(struct kmsg_dumper *dumper, | 306 | static void mtdoops_do_dump(struct kmsg_dumper *dumper, |
307 | enum kmsg_dump_reason reason, const char *s1, unsigned long l1, | 307 | enum kmsg_dump_reason reason) |
308 | const char *s2, unsigned long l2) | ||
309 | { | 308 | { |
310 | struct mtdoops_context *cxt = container_of(dumper, | 309 | struct mtdoops_context *cxt = container_of(dumper, |
311 | struct mtdoops_context, dump); | 310 | struct mtdoops_context, dump); |
312 | unsigned long s1_start, s2_start; | ||
313 | unsigned long l1_cpy, l2_cpy; | ||
314 | char *dst; | ||
315 | |||
316 | if (reason != KMSG_DUMP_OOPS && | ||
317 | reason != KMSG_DUMP_PANIC) | ||
318 | return; | ||
319 | 311 | ||
320 | /* Only dump oopses if dump_oops is set */ | 312 | /* Only dump oopses if dump_oops is set */ |
321 | if (reason == KMSG_DUMP_OOPS && !dump_oops) | 313 | if (reason == KMSG_DUMP_OOPS && !dump_oops) |
322 | return; | 314 | return; |
323 | 315 | ||
324 | dst = cxt->oops_buf + MTDOOPS_HEADER_SIZE; /* Skip the header */ | 316 | kmsg_dump_get_buffer(dumper, true, cxt->oops_buf + MTDOOPS_HEADER_SIZE, |
325 | l2_cpy = min(l2, record_size - MTDOOPS_HEADER_SIZE); | 317 | record_size - MTDOOPS_HEADER_SIZE, NULL); |
326 | l1_cpy = min(l1, record_size - MTDOOPS_HEADER_SIZE - l2_cpy); | ||
327 | |||
328 | s2_start = l2 - l2_cpy; | ||
329 | s1_start = l1 - l1_cpy; | ||
330 | |||
331 | memcpy(dst, s1 + s1_start, l1_cpy); | ||
332 | memcpy(dst + l1_cpy, s2 + s2_start, l2_cpy); | ||
333 | 318 | ||
334 | /* Panics must be written immediately */ | 319 | /* Panics must be written immediately */ |
335 | if (reason != KMSG_DUMP_OOPS) | 320 | if (reason != KMSG_DUMP_OOPS) |
@@ -375,6 +360,7 @@ static void mtdoops_notify_add(struct mtd_info *mtd) | |||
375 | return; | 360 | return; |
376 | } | 361 | } |
377 | 362 | ||
363 | cxt->dump.max_reason = KMSG_DUMP_OOPS; | ||
378 | cxt->dump.dump = mtdoops_do_dump; | 364 | cxt->dump.dump = mtdoops_do_dump; |
379 | err = kmsg_dump_register(&cxt->dump); | 365 | err = kmsg_dump_register(&cxt->dump); |
380 | if (err) { | 366 | if (err) { |
diff --git a/drivers/mtd/ubi/debug.c b/drivers/mtd/ubi/debug.c index 9f957c2d48e9..09d4f8d9d592 100644 --- a/drivers/mtd/ubi/debug.c +++ b/drivers/mtd/ubi/debug.c | |||
@@ -264,6 +264,9 @@ static struct dentry *dfs_rootdir; | |||
264 | */ | 264 | */ |
265 | int ubi_debugfs_init(void) | 265 | int ubi_debugfs_init(void) |
266 | { | 266 | { |
267 | if (!IS_ENABLED(DEBUG_FS)) | ||
268 | return 0; | ||
269 | |||
267 | dfs_rootdir = debugfs_create_dir("ubi", NULL); | 270 | dfs_rootdir = debugfs_create_dir("ubi", NULL); |
268 | if (IS_ERR_OR_NULL(dfs_rootdir)) { | 271 | if (IS_ERR_OR_NULL(dfs_rootdir)) { |
269 | int err = dfs_rootdir ? -ENODEV : PTR_ERR(dfs_rootdir); | 272 | int err = dfs_rootdir ? -ENODEV : PTR_ERR(dfs_rootdir); |
@@ -281,7 +284,8 @@ int ubi_debugfs_init(void) | |||
281 | */ | 284 | */ |
282 | void ubi_debugfs_exit(void) | 285 | void ubi_debugfs_exit(void) |
283 | { | 286 | { |
284 | debugfs_remove(dfs_rootdir); | 287 | if (IS_ENABLED(DEBUG_FS)) |
288 | debugfs_remove(dfs_rootdir); | ||
285 | } | 289 | } |
286 | 290 | ||
287 | /* Read an UBI debugfs file */ | 291 | /* Read an UBI debugfs file */ |
@@ -403,6 +407,9 @@ int ubi_debugfs_init_dev(struct ubi_device *ubi) | |||
403 | struct dentry *dent; | 407 | struct dentry *dent; |
404 | struct ubi_debug_info *d = ubi->dbg; | 408 | struct ubi_debug_info *d = ubi->dbg; |
405 | 409 | ||
410 | if (!IS_ENABLED(DEBUG_FS)) | ||
411 | return 0; | ||
412 | |||
406 | n = snprintf(d->dfs_dir_name, UBI_DFS_DIR_LEN + 1, UBI_DFS_DIR_NAME, | 413 | n = snprintf(d->dfs_dir_name, UBI_DFS_DIR_LEN + 1, UBI_DFS_DIR_NAME, |
407 | ubi->ubi_num); | 414 | ubi->ubi_num); |
408 | if (n == UBI_DFS_DIR_LEN) { | 415 | if (n == UBI_DFS_DIR_LEN) { |
@@ -470,5 +477,6 @@ out: | |||
470 | */ | 477 | */ |
471 | void ubi_debugfs_exit_dev(struct ubi_device *ubi) | 478 | void ubi_debugfs_exit_dev(struct ubi_device *ubi) |
472 | { | 479 | { |
473 | debugfs_remove_recursive(ubi->dbg->dfs_dir); | 480 | if (IS_ENABLED(DEBUG_FS)) |
481 | debugfs_remove_recursive(ubi->dbg->dfs_dir); | ||
474 | } | 482 | } |
diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c index 9df100a4ec38..b6be644e7b85 100644 --- a/drivers/mtd/ubi/wl.c +++ b/drivers/mtd/ubi/wl.c | |||
@@ -1262,11 +1262,11 @@ int ubi_wl_flush(struct ubi_device *ubi, int vol_id, int lnum) | |||
1262 | dbg_wl("flush pending work for LEB %d:%d (%d pending works)", | 1262 | dbg_wl("flush pending work for LEB %d:%d (%d pending works)", |
1263 | vol_id, lnum, ubi->works_count); | 1263 | vol_id, lnum, ubi->works_count); |
1264 | 1264 | ||
1265 | down_write(&ubi->work_sem); | ||
1266 | while (found) { | 1265 | while (found) { |
1267 | struct ubi_work *wrk; | 1266 | struct ubi_work *wrk; |
1268 | found = 0; | 1267 | found = 0; |
1269 | 1268 | ||
1269 | down_read(&ubi->work_sem); | ||
1270 | spin_lock(&ubi->wl_lock); | 1270 | spin_lock(&ubi->wl_lock); |
1271 | list_for_each_entry(wrk, &ubi->works, list) { | 1271 | list_for_each_entry(wrk, &ubi->works, list) { |
1272 | if ((vol_id == UBI_ALL || wrk->vol_id == vol_id) && | 1272 | if ((vol_id == UBI_ALL || wrk->vol_id == vol_id) && |
@@ -1277,18 +1277,27 @@ int ubi_wl_flush(struct ubi_device *ubi, int vol_id, int lnum) | |||
1277 | spin_unlock(&ubi->wl_lock); | 1277 | spin_unlock(&ubi->wl_lock); |
1278 | 1278 | ||
1279 | err = wrk->func(ubi, wrk, 0); | 1279 | err = wrk->func(ubi, wrk, 0); |
1280 | if (err) | 1280 | if (err) { |
1281 | goto out; | 1281 | up_read(&ubi->work_sem); |
1282 | return err; | ||
1283 | } | ||
1284 | |||
1282 | spin_lock(&ubi->wl_lock); | 1285 | spin_lock(&ubi->wl_lock); |
1283 | found = 1; | 1286 | found = 1; |
1284 | break; | 1287 | break; |
1285 | } | 1288 | } |
1286 | } | 1289 | } |
1287 | spin_unlock(&ubi->wl_lock); | 1290 | spin_unlock(&ubi->wl_lock); |
1291 | up_read(&ubi->work_sem); | ||
1288 | } | 1292 | } |
1289 | 1293 | ||
1290 | out: | 1294 | /* |
1295 | * Make sure all the works which have been done in parallel are | ||
1296 | * finished. | ||
1297 | */ | ||
1298 | down_write(&ubi->work_sem); | ||
1291 | up_write(&ubi->work_sem); | 1299 | up_write(&ubi->work_sem); |
1300 | |||
1292 | return err; | 1301 | return err; |
1293 | } | 1302 | } |
1294 | 1303 | ||
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 2ee8cf9e8a3b..b9c2ae62166d 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c | |||
@@ -76,6 +76,7 @@ | |||
76 | #include <net/route.h> | 76 | #include <net/route.h> |
77 | #include <net/net_namespace.h> | 77 | #include <net/net_namespace.h> |
78 | #include <net/netns/generic.h> | 78 | #include <net/netns/generic.h> |
79 | #include <net/pkt_sched.h> | ||
79 | #include "bonding.h" | 80 | #include "bonding.h" |
80 | #include "bond_3ad.h" | 81 | #include "bond_3ad.h" |
81 | #include "bond_alb.h" | 82 | #include "bond_alb.h" |
@@ -381,8 +382,6 @@ struct vlan_entry *bond_next_vlan(struct bonding *bond, struct vlan_entry *curr) | |||
381 | return next; | 382 | return next; |
382 | } | 383 | } |
383 | 384 | ||
384 | #define bond_queue_mapping(skb) (*(u16 *)((skb)->cb)) | ||
385 | |||
386 | /** | 385 | /** |
387 | * bond_dev_queue_xmit - Prepare skb for xmit. | 386 | * bond_dev_queue_xmit - Prepare skb for xmit. |
388 | * | 387 | * |
@@ -395,7 +394,9 @@ int bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb, | |||
395 | { | 394 | { |
396 | skb->dev = slave_dev; | 395 | skb->dev = slave_dev; |
397 | 396 | ||
398 | skb->queue_mapping = bond_queue_mapping(skb); | 397 | BUILD_BUG_ON(sizeof(skb->queue_mapping) != |
398 | sizeof(qdisc_skb_cb(skb)->bond_queue_mapping)); | ||
399 | skb->queue_mapping = qdisc_skb_cb(skb)->bond_queue_mapping; | ||
399 | 400 | ||
400 | if (unlikely(netpoll_tx_running(slave_dev))) | 401 | if (unlikely(netpoll_tx_running(slave_dev))) |
401 | bond_netpoll_send_skb(bond_get_slave_by_dev(bond, slave_dev), skb); | 402 | bond_netpoll_send_skb(bond_get_slave_by_dev(bond, slave_dev), skb); |
@@ -4171,7 +4172,7 @@ static u16 bond_select_queue(struct net_device *dev, struct sk_buff *skb) | |||
4171 | /* | 4172 | /* |
4172 | * Save the original txq to restore before passing to the driver | 4173 | * Save the original txq to restore before passing to the driver |
4173 | */ | 4174 | */ |
4174 | bond_queue_mapping(skb) = skb->queue_mapping; | 4175 | qdisc_skb_cb(skb)->bond_queue_mapping = skb->queue_mapping; |
4175 | 4176 | ||
4176 | if (unlikely(txq >= dev->real_num_tx_queues)) { | 4177 | if (unlikely(txq >= dev->real_num_tx_queues)) { |
4177 | do { | 4178 | do { |
diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c index aef42f045320..485bedb8278c 100644 --- a/drivers/net/bonding/bond_sysfs.c +++ b/drivers/net/bonding/bond_sysfs.c | |||
@@ -1082,8 +1082,12 @@ static ssize_t bonding_store_primary(struct device *d, | |||
1082 | } | 1082 | } |
1083 | } | 1083 | } |
1084 | 1084 | ||
1085 | pr_info("%s: Unable to set %.*s as primary slave.\n", | 1085 | strncpy(bond->params.primary, ifname, IFNAMSIZ); |
1086 | bond->dev->name, (int)strlen(buf) - 1, buf); | 1086 | bond->params.primary[IFNAMSIZ - 1] = 0; |
1087 | |||
1088 | pr_info("%s: Recording %s as primary, " | ||
1089 | "but it has not been enslaved to %s yet.\n", | ||
1090 | bond->dev->name, ifname, bond->dev->name); | ||
1087 | out: | 1091 | out: |
1088 | write_unlock_bh(&bond->curr_slave_lock); | 1092 | write_unlock_bh(&bond->curr_slave_lock); |
1089 | read_unlock(&bond->lock); | 1093 | read_unlock(&bond->lock); |
diff --git a/drivers/net/can/c_can/c_can.c b/drivers/net/can/c_can/c_can.c index 536bda072a16..8dc84d66eea1 100644 --- a/drivers/net/can/c_can/c_can.c +++ b/drivers/net/can/c_can/c_can.c | |||
@@ -686,7 +686,7 @@ static int c_can_get_berr_counter(const struct net_device *dev, | |||
686 | * | 686 | * |
687 | * We iterate from priv->tx_echo to priv->tx_next and check if the | 687 | * We iterate from priv->tx_echo to priv->tx_next and check if the |
688 | * packet has been transmitted, echo it back to the CAN framework. | 688 | * packet has been transmitted, echo it back to the CAN framework. |
689 | * If we discover a not yet transmitted package, stop looking for more. | 689 | * If we discover a not yet transmitted packet, stop looking for more. |
690 | */ | 690 | */ |
691 | static void c_can_do_tx(struct net_device *dev) | 691 | static void c_can_do_tx(struct net_device *dev) |
692 | { | 692 | { |
@@ -698,7 +698,7 @@ static void c_can_do_tx(struct net_device *dev) | |||
698 | for (/* nix */; (priv->tx_next - priv->tx_echo) > 0; priv->tx_echo++) { | 698 | for (/* nix */; (priv->tx_next - priv->tx_echo) > 0; priv->tx_echo++) { |
699 | msg_obj_no = get_tx_echo_msg_obj(priv); | 699 | msg_obj_no = get_tx_echo_msg_obj(priv); |
700 | val = c_can_read_reg32(priv, &priv->regs->txrqst1); | 700 | val = c_can_read_reg32(priv, &priv->regs->txrqst1); |
701 | if (!(val & (1 << msg_obj_no))) { | 701 | if (!(val & (1 << (msg_obj_no - 1)))) { |
702 | can_get_echo_skb(dev, | 702 | can_get_echo_skb(dev, |
703 | msg_obj_no - C_CAN_MSG_OBJ_TX_FIRST); | 703 | msg_obj_no - C_CAN_MSG_OBJ_TX_FIRST); |
704 | stats->tx_bytes += priv->read_reg(priv, | 704 | stats->tx_bytes += priv->read_reg(priv, |
@@ -706,6 +706,8 @@ static void c_can_do_tx(struct net_device *dev) | |||
706 | & IF_MCONT_DLC_MASK; | 706 | & IF_MCONT_DLC_MASK; |
707 | stats->tx_packets++; | 707 | stats->tx_packets++; |
708 | c_can_inval_msg_object(dev, 0, msg_obj_no); | 708 | c_can_inval_msg_object(dev, 0, msg_obj_no); |
709 | } else { | ||
710 | break; | ||
709 | } | 711 | } |
710 | } | 712 | } |
711 | 713 | ||
@@ -950,7 +952,7 @@ static int c_can_poll(struct napi_struct *napi, int quota) | |||
950 | struct net_device *dev = napi->dev; | 952 | struct net_device *dev = napi->dev; |
951 | struct c_can_priv *priv = netdev_priv(dev); | 953 | struct c_can_priv *priv = netdev_priv(dev); |
952 | 954 | ||
953 | irqstatus = priv->read_reg(priv, &priv->regs->interrupt); | 955 | irqstatus = priv->irqstatus; |
954 | if (!irqstatus) | 956 | if (!irqstatus) |
955 | goto end; | 957 | goto end; |
956 | 958 | ||
@@ -1028,12 +1030,11 @@ end: | |||
1028 | 1030 | ||
1029 | static irqreturn_t c_can_isr(int irq, void *dev_id) | 1031 | static irqreturn_t c_can_isr(int irq, void *dev_id) |
1030 | { | 1032 | { |
1031 | u16 irqstatus; | ||
1032 | struct net_device *dev = (struct net_device *)dev_id; | 1033 | struct net_device *dev = (struct net_device *)dev_id; |
1033 | struct c_can_priv *priv = netdev_priv(dev); | 1034 | struct c_can_priv *priv = netdev_priv(dev); |
1034 | 1035 | ||
1035 | irqstatus = priv->read_reg(priv, &priv->regs->interrupt); | 1036 | priv->irqstatus = priv->read_reg(priv, &priv->regs->interrupt); |
1036 | if (!irqstatus) | 1037 | if (!priv->irqstatus) |
1037 | return IRQ_NONE; | 1038 | return IRQ_NONE; |
1038 | 1039 | ||
1039 | /* disable all interrupts and schedule the NAPI */ | 1040 | /* disable all interrupts and schedule the NAPI */ |
@@ -1063,10 +1064,11 @@ static int c_can_open(struct net_device *dev) | |||
1063 | goto exit_irq_fail; | 1064 | goto exit_irq_fail; |
1064 | } | 1065 | } |
1065 | 1066 | ||
1067 | napi_enable(&priv->napi); | ||
1068 | |||
1066 | /* start the c_can controller */ | 1069 | /* start the c_can controller */ |
1067 | c_can_start(dev); | 1070 | c_can_start(dev); |
1068 | 1071 | ||
1069 | napi_enable(&priv->napi); | ||
1070 | netif_start_queue(dev); | 1072 | netif_start_queue(dev); |
1071 | 1073 | ||
1072 | return 0; | 1074 | return 0; |
diff --git a/drivers/net/can/c_can/c_can.h b/drivers/net/can/c_can/c_can.h index 9b7fbef3d09a..5f32d34af507 100644 --- a/drivers/net/can/c_can/c_can.h +++ b/drivers/net/can/c_can/c_can.h | |||
@@ -76,6 +76,7 @@ struct c_can_priv { | |||
76 | unsigned int tx_next; | 76 | unsigned int tx_next; |
77 | unsigned int tx_echo; | 77 | unsigned int tx_echo; |
78 | void *priv; /* for board-specific data */ | 78 | void *priv; /* for board-specific data */ |
79 | u16 irqstatus; | ||
79 | }; | 80 | }; |
80 | 81 | ||
81 | struct net_device *alloc_c_can_dev(void); | 82 | struct net_device *alloc_c_can_dev(void); |
diff --git a/drivers/net/can/cc770/cc770_platform.c b/drivers/net/can/cc770/cc770_platform.c index 53115eee8075..688371cda37a 100644 --- a/drivers/net/can/cc770/cc770_platform.c +++ b/drivers/net/can/cc770/cc770_platform.c | |||
@@ -154,7 +154,7 @@ static int __devinit cc770_get_platform_data(struct platform_device *pdev, | |||
154 | struct cc770_platform_data *pdata = pdev->dev.platform_data; | 154 | struct cc770_platform_data *pdata = pdev->dev.platform_data; |
155 | 155 | ||
156 | priv->can.clock.freq = pdata->osc_freq; | 156 | priv->can.clock.freq = pdata->osc_freq; |
157 | if (priv->cpu_interface | CPUIF_DSC) | 157 | if (priv->cpu_interface & CPUIF_DSC) |
158 | priv->can.clock.freq /= 2; | 158 | priv->can.clock.freq /= 2; |
159 | priv->clkout = pdata->cor; | 159 | priv->clkout = pdata->cor; |
160 | priv->bus_config = pdata->bcr; | 160 | priv->bus_config = pdata->bcr; |
diff --git a/drivers/net/dummy.c b/drivers/net/dummy.c index 442d91a2747b..bab0158f1cc3 100644 --- a/drivers/net/dummy.c +++ b/drivers/net/dummy.c | |||
@@ -187,8 +187,10 @@ static int __init dummy_init_module(void) | |||
187 | rtnl_lock(); | 187 | rtnl_lock(); |
188 | err = __rtnl_link_register(&dummy_link_ops); | 188 | err = __rtnl_link_register(&dummy_link_ops); |
189 | 189 | ||
190 | for (i = 0; i < numdummies && !err; i++) | 190 | for (i = 0; i < numdummies && !err; i++) { |
191 | err = dummy_init_one(); | 191 | err = dummy_init_one(); |
192 | cond_resched(); | ||
193 | } | ||
192 | if (err < 0) | 194 | if (err < 0) |
193 | __rtnl_link_unregister(&dummy_link_ops); | 195 | __rtnl_link_unregister(&dummy_link_ops); |
194 | rtnl_unlock(); | 196 | rtnl_unlock(); |
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h index e30e2a2f354c..7de824184979 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h | |||
@@ -747,21 +747,6 @@ struct bnx2x_fastpath { | |||
747 | 747 | ||
748 | #define ETH_RX_ERROR_FALGS ETH_FAST_PATH_RX_CQE_PHY_DECODE_ERR_FLG | 748 | #define ETH_RX_ERROR_FALGS ETH_FAST_PATH_RX_CQE_PHY_DECODE_ERR_FLG |
749 | 749 | ||
750 | #define BNX2X_IP_CSUM_ERR(cqe) \ | ||
751 | (!((cqe)->fast_path_cqe.status_flags & \ | ||
752 | ETH_FAST_PATH_RX_CQE_IP_XSUM_NO_VALIDATION_FLG) && \ | ||
753 | ((cqe)->fast_path_cqe.type_error_flags & \ | ||
754 | ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG)) | ||
755 | |||
756 | #define BNX2X_L4_CSUM_ERR(cqe) \ | ||
757 | (!((cqe)->fast_path_cqe.status_flags & \ | ||
758 | ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG) && \ | ||
759 | ((cqe)->fast_path_cqe.type_error_flags & \ | ||
760 | ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG)) | ||
761 | |||
762 | #define BNX2X_RX_CSUM_OK(cqe) \ | ||
763 | (!(BNX2X_L4_CSUM_ERR(cqe) || BNX2X_IP_CSUM_ERR(cqe))) | ||
764 | |||
765 | #define BNX2X_PRS_FLAG_OVERETH_IPV4(flags) \ | 750 | #define BNX2X_PRS_FLAG_OVERETH_IPV4(flags) \ |
766 | (((le16_to_cpu(flags) & \ | 751 | (((le16_to_cpu(flags) & \ |
767 | PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) >> \ | 752 | PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) >> \ |
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c index ad0743bf4bde..cbc56f274e0c 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c | |||
@@ -617,6 +617,25 @@ static int bnx2x_alloc_rx_data(struct bnx2x *bp, | |||
617 | return 0; | 617 | return 0; |
618 | } | 618 | } |
619 | 619 | ||
620 | static void bnx2x_csum_validate(struct sk_buff *skb, union eth_rx_cqe *cqe, | ||
621 | struct bnx2x_fastpath *fp) | ||
622 | { | ||
623 | /* Do nothing if no IP/L4 csum validation was done */ | ||
624 | |||
625 | if (cqe->fast_path_cqe.status_flags & | ||
626 | (ETH_FAST_PATH_RX_CQE_IP_XSUM_NO_VALIDATION_FLG | | ||
627 | ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG)) | ||
628 | return; | ||
629 | |||
630 | /* If both IP/L4 validation were done, check if an error was found. */ | ||
631 | |||
632 | if (cqe->fast_path_cqe.type_error_flags & | ||
633 | (ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG | | ||
634 | ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG)) | ||
635 | fp->eth_q_stats.hw_csum_err++; | ||
636 | else | ||
637 | skb->ip_summed = CHECKSUM_UNNECESSARY; | ||
638 | } | ||
620 | 639 | ||
621 | int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget) | 640 | int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget) |
622 | { | 641 | { |
@@ -806,13 +825,9 @@ reuse_rx: | |||
806 | 825 | ||
807 | skb_checksum_none_assert(skb); | 826 | skb_checksum_none_assert(skb); |
808 | 827 | ||
809 | if (bp->dev->features & NETIF_F_RXCSUM) { | 828 | if (bp->dev->features & NETIF_F_RXCSUM) |
829 | bnx2x_csum_validate(skb, cqe, fp); | ||
810 | 830 | ||
811 | if (likely(BNX2X_RX_CSUM_OK(cqe))) | ||
812 | skb->ip_summed = CHECKSUM_UNNECESSARY; | ||
813 | else | ||
814 | fp->eth_q_stats.hw_csum_err++; | ||
815 | } | ||
816 | 831 | ||
817 | skb_record_rx_queue(skb, fp->rx_queue); | 832 | skb_record_rx_queue(skb, fp->rx_queue); |
818 | 833 | ||
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index edeeb516807a..e47ff8be1d7b 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c | |||
@@ -14275,7 +14275,8 @@ static int __devinit tg3_get_invariants(struct tg3 *tp) | |||
14275 | } | 14275 | } |
14276 | } | 14276 | } |
14277 | 14277 | ||
14278 | if (tg3_flag(tp, 5755_PLUS)) | 14278 | if (tg3_flag(tp, 5755_PLUS) || |
14279 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) | ||
14279 | tg3_flag_set(tp, SHORT_DMA_BUG); | 14280 | tg3_flag_set(tp, SHORT_DMA_BUG); |
14280 | 14281 | ||
14281 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) | 14282 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) |
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index 08efd308d78a..fdb50cec6b51 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c | |||
@@ -736,6 +736,8 @@ static netdev_tx_t be_xmit(struct sk_buff *skb, | |||
736 | 736 | ||
737 | copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb); | 737 | copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb); |
738 | if (copied) { | 738 | if (copied) { |
739 | int gso_segs = skb_shinfo(skb)->gso_segs; | ||
740 | |||
739 | /* record the sent skb in the sent_skb table */ | 741 | /* record the sent skb in the sent_skb table */ |
740 | BUG_ON(txo->sent_skb_list[start]); | 742 | BUG_ON(txo->sent_skb_list[start]); |
741 | txo->sent_skb_list[start] = skb; | 743 | txo->sent_skb_list[start] = skb; |
@@ -753,8 +755,7 @@ static netdev_tx_t be_xmit(struct sk_buff *skb, | |||
753 | 755 | ||
754 | be_txq_notify(adapter, txq->id, wrb_cnt); | 756 | be_txq_notify(adapter, txq->id, wrb_cnt); |
755 | 757 | ||
756 | be_tx_stats_update(txo, wrb_cnt, copied, | 758 | be_tx_stats_update(txo, wrb_cnt, copied, gso_segs, stopped); |
757 | skb_shinfo(skb)->gso_segs, stopped); | ||
758 | } else { | 759 | } else { |
759 | txq->head = start; | 760 | txq->head = start; |
760 | dev_kfree_skb_any(skb); | 761 | dev_kfree_skb_any(skb); |
diff --git a/drivers/net/ethernet/intel/e1000e/ethtool.c b/drivers/net/ethernet/intel/e1000e/ethtool.c index d863075df7a4..905e2147d918 100644 --- a/drivers/net/ethernet/intel/e1000e/ethtool.c +++ b/drivers/net/ethernet/intel/e1000e/ethtool.c | |||
@@ -258,7 +258,8 @@ static int e1000_set_settings(struct net_device *netdev, | |||
258 | * When SoL/IDER sessions are active, autoneg/speed/duplex | 258 | * When SoL/IDER sessions are active, autoneg/speed/duplex |
259 | * cannot be changed | 259 | * cannot be changed |
260 | */ | 260 | */ |
261 | if (hw->phy.ops.check_reset_block(hw)) { | 261 | if (hw->phy.ops.check_reset_block && |
262 | hw->phy.ops.check_reset_block(hw)) { | ||
262 | e_err("Cannot change link characteristics when SoL/IDER is active.\n"); | 263 | e_err("Cannot change link characteristics when SoL/IDER is active.\n"); |
263 | return -EINVAL; | 264 | return -EINVAL; |
264 | } | 265 | } |
@@ -1615,7 +1616,8 @@ static int e1000_loopback_test(struct e1000_adapter *adapter, u64 *data) | |||
1615 | * PHY loopback cannot be performed if SoL/IDER | 1616 | * PHY loopback cannot be performed if SoL/IDER |
1616 | * sessions are active | 1617 | * sessions are active |
1617 | */ | 1618 | */ |
1618 | if (hw->phy.ops.check_reset_block(hw)) { | 1619 | if (hw->phy.ops.check_reset_block && |
1620 | hw->phy.ops.check_reset_block(hw)) { | ||
1619 | e_err("Cannot do PHY loopback test when SoL/IDER is active.\n"); | 1621 | e_err("Cannot do PHY loopback test when SoL/IDER is active.\n"); |
1620 | *data = 0; | 1622 | *data = 0; |
1621 | goto out; | 1623 | goto out; |
diff --git a/drivers/net/ethernet/intel/e1000e/mac.c b/drivers/net/ethernet/intel/e1000e/mac.c index 026e8b3ab52e..a13439928488 100644 --- a/drivers/net/ethernet/intel/e1000e/mac.c +++ b/drivers/net/ethernet/intel/e1000e/mac.c | |||
@@ -709,7 +709,7 @@ s32 e1000e_setup_link_generic(struct e1000_hw *hw) | |||
709 | * In the case of the phy reset being blocked, we already have a link. | 709 | * In the case of the phy reset being blocked, we already have a link. |
710 | * We do not need to set it up again. | 710 | * We do not need to set it up again. |
711 | */ | 711 | */ |
712 | if (hw->phy.ops.check_reset_block(hw)) | 712 | if (hw->phy.ops.check_reset_block && hw->phy.ops.check_reset_block(hw)) |
713 | return 0; | 713 | return 0; |
714 | 714 | ||
715 | /* | 715 | /* |
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index a4b0435b00dc..31d37a2b5ba8 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c | |||
@@ -6237,7 +6237,7 @@ static int __devinit e1000_probe(struct pci_dev *pdev, | |||
6237 | adapter->hw.phy.ms_type = e1000_ms_hw_default; | 6237 | adapter->hw.phy.ms_type = e1000_ms_hw_default; |
6238 | } | 6238 | } |
6239 | 6239 | ||
6240 | if (hw->phy.ops.check_reset_block(hw)) | 6240 | if (hw->phy.ops.check_reset_block && hw->phy.ops.check_reset_block(hw)) |
6241 | e_info("PHY reset is blocked due to SOL/IDER session.\n"); | 6241 | e_info("PHY reset is blocked due to SOL/IDER session.\n"); |
6242 | 6242 | ||
6243 | /* Set initial default active device features */ | 6243 | /* Set initial default active device features */ |
@@ -6404,7 +6404,7 @@ err_register: | |||
6404 | if (!(adapter->flags & FLAG_HAS_AMT)) | 6404 | if (!(adapter->flags & FLAG_HAS_AMT)) |
6405 | e1000e_release_hw_control(adapter); | 6405 | e1000e_release_hw_control(adapter); |
6406 | err_eeprom: | 6406 | err_eeprom: |
6407 | if (!hw->phy.ops.check_reset_block(hw)) | 6407 | if (hw->phy.ops.check_reset_block && !hw->phy.ops.check_reset_block(hw)) |
6408 | e1000_phy_hw_reset(&adapter->hw); | 6408 | e1000_phy_hw_reset(&adapter->hw); |
6409 | err_hw_init: | 6409 | err_hw_init: |
6410 | kfree(adapter->tx_ring); | 6410 | kfree(adapter->tx_ring); |
diff --git a/drivers/net/ethernet/intel/e1000e/phy.c b/drivers/net/ethernet/intel/e1000e/phy.c index 0334d013bc3c..b860d4f7ea2a 100644 --- a/drivers/net/ethernet/intel/e1000e/phy.c +++ b/drivers/net/ethernet/intel/e1000e/phy.c | |||
@@ -2155,9 +2155,11 @@ s32 e1000e_phy_hw_reset_generic(struct e1000_hw *hw) | |||
2155 | s32 ret_val; | 2155 | s32 ret_val; |
2156 | u32 ctrl; | 2156 | u32 ctrl; |
2157 | 2157 | ||
2158 | ret_val = phy->ops.check_reset_block(hw); | 2158 | if (phy->ops.check_reset_block) { |
2159 | if (ret_val) | 2159 | ret_val = phy->ops.check_reset_block(hw); |
2160 | return 0; | 2160 | if (ret_val) |
2161 | return 0; | ||
2162 | } | ||
2161 | 2163 | ||
2162 | ret_val = phy->ops.acquire(hw); | 2164 | ret_val = phy->ops.acquire(hw); |
2163 | if (ret_val) | 2165 | if (ret_val) |
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index bf20457ea23a..17ad6a3c1be1 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | |||
@@ -1390,6 +1390,8 @@ static void ixgbe_process_skb_fields(struct ixgbe_ring *rx_ring, | |||
1390 | union ixgbe_adv_rx_desc *rx_desc, | 1390 | union ixgbe_adv_rx_desc *rx_desc, |
1391 | struct sk_buff *skb) | 1391 | struct sk_buff *skb) |
1392 | { | 1392 | { |
1393 | struct net_device *dev = rx_ring->netdev; | ||
1394 | |||
1393 | ixgbe_update_rsc_stats(rx_ring, skb); | 1395 | ixgbe_update_rsc_stats(rx_ring, skb); |
1394 | 1396 | ||
1395 | ixgbe_rx_hash(rx_ring, rx_desc, skb); | 1397 | ixgbe_rx_hash(rx_ring, rx_desc, skb); |
@@ -1401,14 +1403,15 @@ static void ixgbe_process_skb_fields(struct ixgbe_ring *rx_ring, | |||
1401 | ixgbe_ptp_rx_hwtstamp(rx_ring->q_vector, skb); | 1403 | ixgbe_ptp_rx_hwtstamp(rx_ring->q_vector, skb); |
1402 | #endif | 1404 | #endif |
1403 | 1405 | ||
1404 | if (ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_VP)) { | 1406 | if ((dev->features & NETIF_F_HW_VLAN_RX) && |
1407 | ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_VP)) { | ||
1405 | u16 vid = le16_to_cpu(rx_desc->wb.upper.vlan); | 1408 | u16 vid = le16_to_cpu(rx_desc->wb.upper.vlan); |
1406 | __vlan_hwaccel_put_tag(skb, vid); | 1409 | __vlan_hwaccel_put_tag(skb, vid); |
1407 | } | 1410 | } |
1408 | 1411 | ||
1409 | skb_record_rx_queue(skb, rx_ring->queue_index); | 1412 | skb_record_rx_queue(skb, rx_ring->queue_index); |
1410 | 1413 | ||
1411 | skb->protocol = eth_type_trans(skb, rx_ring->netdev); | 1414 | skb->protocol = eth_type_trans(skb, dev); |
1412 | } | 1415 | } |
1413 | 1416 | ||
1414 | static void ixgbe_rx_skb(struct ixgbe_q_vector *q_vector, | 1417 | static void ixgbe_rx_skb(struct ixgbe_q_vector *q_vector, |
@@ -3607,10 +3610,6 @@ static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter) | |||
3607 | if (hw->mac.type == ixgbe_mac_82598EB) | 3610 | if (hw->mac.type == ixgbe_mac_82598EB) |
3608 | netif_set_gso_max_size(adapter->netdev, 32768); | 3611 | netif_set_gso_max_size(adapter->netdev, 32768); |
3609 | 3612 | ||
3610 | |||
3611 | /* Enable VLAN tag insert/strip */ | ||
3612 | adapter->netdev->features |= NETIF_F_HW_VLAN_RX; | ||
3613 | |||
3614 | hw->mac.ops.set_vfta(&adapter->hw, 0, 0, true); | 3613 | hw->mac.ops.set_vfta(&adapter->hw, 0, 0, true); |
3615 | 3614 | ||
3616 | #ifdef IXGBE_FCOE | 3615 | #ifdef IXGBE_FCOE |
@@ -6701,11 +6700,6 @@ static netdev_features_t ixgbe_fix_features(struct net_device *netdev, | |||
6701 | { | 6700 | { |
6702 | struct ixgbe_adapter *adapter = netdev_priv(netdev); | 6701 | struct ixgbe_adapter *adapter = netdev_priv(netdev); |
6703 | 6702 | ||
6704 | #ifdef CONFIG_DCB | ||
6705 | if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) | ||
6706 | features &= ~NETIF_F_HW_VLAN_RX; | ||
6707 | #endif | ||
6708 | |||
6709 | /* return error if RXHASH is being enabled when RSS is not supported */ | 6703 | /* return error if RXHASH is being enabled when RSS is not supported */ |
6710 | if (!(adapter->flags & IXGBE_FLAG_RSS_ENABLED)) | 6704 | if (!(adapter->flags & IXGBE_FLAG_RSS_ENABLED)) |
6711 | features &= ~NETIF_F_RXHASH; | 6705 | features &= ~NETIF_F_RXHASH; |
@@ -6718,7 +6712,6 @@ static netdev_features_t ixgbe_fix_features(struct net_device *netdev, | |||
6718 | if (!(adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE)) | 6712 | if (!(adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE)) |
6719 | features &= ~NETIF_F_LRO; | 6713 | features &= ~NETIF_F_LRO; |
6720 | 6714 | ||
6721 | |||
6722 | return features; | 6715 | return features; |
6723 | } | 6716 | } |
6724 | 6717 | ||
@@ -6766,6 +6759,11 @@ static int ixgbe_set_features(struct net_device *netdev, | |||
6766 | need_reset = true; | 6759 | need_reset = true; |
6767 | } | 6760 | } |
6768 | 6761 | ||
6762 | if (features & NETIF_F_HW_VLAN_RX) | ||
6763 | ixgbe_vlan_strip_enable(adapter); | ||
6764 | else | ||
6765 | ixgbe_vlan_strip_disable(adapter); | ||
6766 | |||
6769 | if (changed & NETIF_F_RXALL) | 6767 | if (changed & NETIF_F_RXALL) |
6770 | need_reset = true; | 6768 | need_reset = true; |
6771 | 6769 | ||
diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c index 04d901d0ff63..f0f06b2bc28b 100644 --- a/drivers/net/ethernet/marvell/mv643xx_eth.c +++ b/drivers/net/ethernet/marvell/mv643xx_eth.c | |||
@@ -436,7 +436,9 @@ struct mv643xx_eth_private { | |||
436 | /* | 436 | /* |
437 | * Hardware-specific parameters. | 437 | * Hardware-specific parameters. |
438 | */ | 438 | */ |
439 | #if defined(CONFIG_HAVE_CLK) | ||
439 | struct clk *clk; | 440 | struct clk *clk; |
441 | #endif | ||
440 | unsigned int t_clk; | 442 | unsigned int t_clk; |
441 | }; | 443 | }; |
442 | 444 | ||
@@ -2895,17 +2897,17 @@ static int mv643xx_eth_probe(struct platform_device *pdev) | |||
2895 | mp->dev = dev; | 2897 | mp->dev = dev; |
2896 | 2898 | ||
2897 | /* | 2899 | /* |
2898 | * Get the clk rate, if there is one, otherwise use the default. | 2900 | * Start with a default rate, and if there is a clock, allow |
2901 | * it to override the default. | ||
2899 | */ | 2902 | */ |
2903 | mp->t_clk = 133000000; | ||
2904 | #if defined(CONFIG_HAVE_CLK) | ||
2900 | mp->clk = clk_get(&pdev->dev, (pdev->id ? "1" : "0")); | 2905 | mp->clk = clk_get(&pdev->dev, (pdev->id ? "1" : "0")); |
2901 | if (!IS_ERR(mp->clk)) { | 2906 | if (!IS_ERR(mp->clk)) { |
2902 | clk_prepare_enable(mp->clk); | 2907 | clk_prepare_enable(mp->clk); |
2903 | mp->t_clk = clk_get_rate(mp->clk); | 2908 | mp->t_clk = clk_get_rate(mp->clk); |
2904 | } else { | ||
2905 | mp->t_clk = 133000000; | ||
2906 | printk(KERN_WARNING "Unable to get clock"); | ||
2907 | } | 2909 | } |
2908 | 2910 | #endif | |
2909 | set_params(mp, pd); | 2911 | set_params(mp, pd); |
2910 | netif_set_real_num_tx_queues(dev, mp->txq_count); | 2912 | netif_set_real_num_tx_queues(dev, mp->txq_count); |
2911 | netif_set_real_num_rx_queues(dev, mp->rxq_count); | 2913 | netif_set_real_num_rx_queues(dev, mp->rxq_count); |
@@ -2995,10 +2997,13 @@ static int mv643xx_eth_remove(struct platform_device *pdev) | |||
2995 | phy_detach(mp->phy); | 2997 | phy_detach(mp->phy); |
2996 | cancel_work_sync(&mp->tx_timeout_task); | 2998 | cancel_work_sync(&mp->tx_timeout_task); |
2997 | 2999 | ||
3000 | #if defined(CONFIG_HAVE_CLK) | ||
2998 | if (!IS_ERR(mp->clk)) { | 3001 | if (!IS_ERR(mp->clk)) { |
2999 | clk_disable_unprepare(mp->clk); | 3002 | clk_disable_unprepare(mp->clk); |
3000 | clk_put(mp->clk); | 3003 | clk_put(mp->clk); |
3001 | } | 3004 | } |
3005 | #endif | ||
3006 | |||
3002 | free_netdev(mp->dev); | 3007 | free_netdev(mp->dev); |
3003 | 3008 | ||
3004 | platform_set_drvdata(pdev, NULL); | 3009 | platform_set_drvdata(pdev, NULL); |
diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c index cace36f2ab92..28a54451a3e5 100644 --- a/drivers/net/ethernet/marvell/sky2.c +++ b/drivers/net/ethernet/marvell/sky2.c | |||
@@ -4381,10 +4381,12 @@ static int sky2_set_features(struct net_device *dev, netdev_features_t features) | |||
4381 | struct sky2_port *sky2 = netdev_priv(dev); | 4381 | struct sky2_port *sky2 = netdev_priv(dev); |
4382 | netdev_features_t changed = dev->features ^ features; | 4382 | netdev_features_t changed = dev->features ^ features; |
4383 | 4383 | ||
4384 | if (changed & NETIF_F_RXCSUM) { | 4384 | if ((changed & NETIF_F_RXCSUM) && |
4385 | bool on = features & NETIF_F_RXCSUM; | 4385 | !(sky2->hw->flags & SKY2_HW_NEW_LE)) { |
4386 | sky2_write32(sky2->hw, Q_ADDR(rxqaddr[sky2->port], Q_CSR), | 4386 | sky2_write32(sky2->hw, |
4387 | on ? BMU_ENA_RX_CHKSUM : BMU_DIS_RX_CHKSUM); | 4387 | Q_ADDR(rxqaddr[sky2->port], Q_CSR), |
4388 | (features & NETIF_F_RXCSUM) | ||
4389 | ? BMU_ENA_RX_CHKSUM : BMU_DIS_RX_CHKSUM); | ||
4388 | } | 4390 | } |
4389 | 4391 | ||
4390 | if (changed & NETIF_F_RXHASH) | 4392 | if (changed & NETIF_F_RXHASH) |
diff --git a/drivers/net/ethernet/mellanox/mlx4/port.c b/drivers/net/ethernet/mellanox/mlx4/port.c index 1fe2c7a8b40c..a8fb52992c64 100644 --- a/drivers/net/ethernet/mellanox/mlx4/port.c +++ b/drivers/net/ethernet/mellanox/mlx4/port.c | |||
@@ -697,10 +697,10 @@ static int mlx4_common_set_port(struct mlx4_dev *dev, int slave, u32 in_mod, | |||
697 | if (slave != dev->caps.function) | 697 | if (slave != dev->caps.function) |
698 | memset(inbox->buf, 0, 256); | 698 | memset(inbox->buf, 0, 256); |
699 | if (dev->flags & MLX4_FLAG_OLD_PORT_CMDS) { | 699 | if (dev->flags & MLX4_FLAG_OLD_PORT_CMDS) { |
700 | *(u8 *) inbox->buf = !!reset_qkey_viols << 6; | 700 | *(u8 *) inbox->buf |= !!reset_qkey_viols << 6; |
701 | ((__be32 *) inbox->buf)[2] = agg_cap_mask; | 701 | ((__be32 *) inbox->buf)[2] = agg_cap_mask; |
702 | } else { | 702 | } else { |
703 | ((u8 *) inbox->buf)[3] = !!reset_qkey_viols; | 703 | ((u8 *) inbox->buf)[3] |= !!reset_qkey_viols; |
704 | ((__be32 *) inbox->buf)[1] = agg_cap_mask; | 704 | ((__be32 *) inbox->buf)[1] = agg_cap_mask; |
705 | } | 705 | } |
706 | 706 | ||
diff --git a/drivers/net/ethernet/nxp/lpc_eth.c b/drivers/net/ethernet/nxp/lpc_eth.c index 8d2666fcffd7..083d6715335c 100644 --- a/drivers/net/ethernet/nxp/lpc_eth.c +++ b/drivers/net/ethernet/nxp/lpc_eth.c | |||
@@ -946,16 +946,16 @@ static void __lpc_handle_xmit(struct net_device *ndev) | |||
946 | /* Update stats */ | 946 | /* Update stats */ |
947 | ndev->stats.tx_packets++; | 947 | ndev->stats.tx_packets++; |
948 | ndev->stats.tx_bytes += skb->len; | 948 | ndev->stats.tx_bytes += skb->len; |
949 | |||
950 | /* Free buffer */ | ||
951 | dev_kfree_skb_irq(skb); | ||
952 | } | 949 | } |
950 | dev_kfree_skb_irq(skb); | ||
953 | 951 | ||
954 | txcidx = readl(LPC_ENET_TXCONSUMEINDEX(pldat->net_base)); | 952 | txcidx = readl(LPC_ENET_TXCONSUMEINDEX(pldat->net_base)); |
955 | } | 953 | } |
956 | 954 | ||
957 | if (netif_queue_stopped(ndev)) | 955 | if (pldat->num_used_tx_buffs <= ENET_TX_DESC/2) { |
958 | netif_wake_queue(ndev); | 956 | if (netif_queue_stopped(ndev)) |
957 | netif_wake_queue(ndev); | ||
958 | } | ||
959 | } | 959 | } |
960 | 960 | ||
961 | static int __lpc_handle_recv(struct net_device *ndev, int budget) | 961 | static int __lpc_handle_recv(struct net_device *ndev, int budget) |
@@ -1320,6 +1320,7 @@ static const struct net_device_ops lpc_netdev_ops = { | |||
1320 | .ndo_set_rx_mode = lpc_eth_set_multicast_list, | 1320 | .ndo_set_rx_mode = lpc_eth_set_multicast_list, |
1321 | .ndo_do_ioctl = lpc_eth_ioctl, | 1321 | .ndo_do_ioctl = lpc_eth_ioctl, |
1322 | .ndo_set_mac_address = lpc_set_mac_address, | 1322 | .ndo_set_mac_address = lpc_set_mac_address, |
1323 | .ndo_change_mtu = eth_change_mtu, | ||
1323 | }; | 1324 | }; |
1324 | 1325 | ||
1325 | static int lpc_eth_drv_probe(struct platform_device *pdev) | 1326 | static int lpc_eth_drv_probe(struct platform_device *pdev) |
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c index 9757ce3543a0..7260aa79466a 100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c | |||
@@ -5889,11 +5889,7 @@ static void rtl_slow_event_work(struct rtl8169_private *tp) | |||
5889 | if (status & LinkChg) | 5889 | if (status & LinkChg) |
5890 | __rtl8169_check_link_status(dev, tp, tp->mmio_addr, true); | 5890 | __rtl8169_check_link_status(dev, tp, tp->mmio_addr, true); |
5891 | 5891 | ||
5892 | napi_disable(&tp->napi); | 5892 | rtl_irq_enable_all(tp); |
5893 | rtl_irq_disable(tp); | ||
5894 | |||
5895 | napi_enable(&tp->napi); | ||
5896 | napi_schedule(&tp->napi); | ||
5897 | } | 5893 | } |
5898 | 5894 | ||
5899 | static void rtl_task(struct work_struct *work) | 5895 | static void rtl_task(struct work_struct *work) |
diff --git a/drivers/net/ethernet/stmicro/stmmac/Kconfig b/drivers/net/ethernet/stmicro/stmmac/Kconfig index 036428348faa..9f448279e12a 100644 --- a/drivers/net/ethernet/stmicro/stmmac/Kconfig +++ b/drivers/net/ethernet/stmicro/stmmac/Kconfig | |||
@@ -13,7 +13,7 @@ config STMMAC_ETH | |||
13 | if STMMAC_ETH | 13 | if STMMAC_ETH |
14 | 14 | ||
15 | config STMMAC_PLATFORM | 15 | config STMMAC_PLATFORM |
16 | tristate "STMMAC platform bus support" | 16 | bool "STMMAC Platform bus support" |
17 | depends on STMMAC_ETH | 17 | depends on STMMAC_ETH |
18 | default y | 18 | default y |
19 | ---help--- | 19 | ---help--- |
@@ -26,7 +26,7 @@ config STMMAC_PLATFORM | |||
26 | If unsure, say N. | 26 | If unsure, say N. |
27 | 27 | ||
28 | config STMMAC_PCI | 28 | config STMMAC_PCI |
29 | tristate "STMMAC support on PCI bus (EXPERIMENTAL)" | 29 | bool "STMMAC PCI bus support (EXPERIMENTAL)" |
30 | depends on STMMAC_ETH && PCI && EXPERIMENTAL | 30 | depends on STMMAC_ETH && PCI && EXPERIMENTAL |
31 | ---help--- | 31 | ---help--- |
32 | This is to select the Synopsys DWMAC available on PCI devices, | 32 | This is to select the Synopsys DWMAC available on PCI devices, |
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h index 6b5d060ee9de..dc20c56efc9d 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h | |||
@@ -26,6 +26,7 @@ | |||
26 | #include <linux/clk.h> | 26 | #include <linux/clk.h> |
27 | #include <linux/stmmac.h> | 27 | #include <linux/stmmac.h> |
28 | #include <linux/phy.h> | 28 | #include <linux/phy.h> |
29 | #include <linux/pci.h> | ||
29 | #include "common.h" | 30 | #include "common.h" |
30 | #ifdef CONFIG_STMMAC_TIMER | 31 | #ifdef CONFIG_STMMAC_TIMER |
31 | #include "stmmac_timer.h" | 32 | #include "stmmac_timer.h" |
@@ -95,7 +96,6 @@ extern int stmmac_mdio_register(struct net_device *ndev); | |||
95 | extern void stmmac_set_ethtool_ops(struct net_device *netdev); | 96 | extern void stmmac_set_ethtool_ops(struct net_device *netdev); |
96 | extern const struct stmmac_desc_ops enh_desc_ops; | 97 | extern const struct stmmac_desc_ops enh_desc_ops; |
97 | extern const struct stmmac_desc_ops ndesc_ops; | 98 | extern const struct stmmac_desc_ops ndesc_ops; |
98 | |||
99 | int stmmac_freeze(struct net_device *ndev); | 99 | int stmmac_freeze(struct net_device *ndev); |
100 | int stmmac_restore(struct net_device *ndev); | 100 | int stmmac_restore(struct net_device *ndev); |
101 | int stmmac_resume(struct net_device *ndev); | 101 | int stmmac_resume(struct net_device *ndev); |
@@ -109,7 +109,7 @@ struct stmmac_priv *stmmac_dvr_probe(struct device *device, | |||
109 | static inline int stmmac_clk_enable(struct stmmac_priv *priv) | 109 | static inline int stmmac_clk_enable(struct stmmac_priv *priv) |
110 | { | 110 | { |
111 | if (!IS_ERR(priv->stmmac_clk)) | 111 | if (!IS_ERR(priv->stmmac_clk)) |
112 | return clk_enable(priv->stmmac_clk); | 112 | return clk_prepare_enable(priv->stmmac_clk); |
113 | 113 | ||
114 | return 0; | 114 | return 0; |
115 | } | 115 | } |
@@ -119,7 +119,7 @@ static inline void stmmac_clk_disable(struct stmmac_priv *priv) | |||
119 | if (IS_ERR(priv->stmmac_clk)) | 119 | if (IS_ERR(priv->stmmac_clk)) |
120 | return; | 120 | return; |
121 | 121 | ||
122 | clk_disable(priv->stmmac_clk); | 122 | clk_disable_unprepare(priv->stmmac_clk); |
123 | } | 123 | } |
124 | static inline int stmmac_clk_get(struct stmmac_priv *priv) | 124 | static inline int stmmac_clk_get(struct stmmac_priv *priv) |
125 | { | 125 | { |
@@ -143,3 +143,60 @@ static inline int stmmac_clk_get(struct stmmac_priv *priv) | |||
143 | return 0; | 143 | return 0; |
144 | } | 144 | } |
145 | #endif /* CONFIG_HAVE_CLK */ | 145 | #endif /* CONFIG_HAVE_CLK */ |
146 | |||
147 | |||
148 | #ifdef CONFIG_STMMAC_PLATFORM | ||
149 | extern struct platform_driver stmmac_pltfr_driver; | ||
150 | static inline int stmmac_register_platform(void) | ||
151 | { | ||
152 | int err; | ||
153 | |||
154 | err = platform_driver_register(&stmmac_pltfr_driver); | ||
155 | if (err) | ||
156 | pr_err("stmmac: failed to register the platform driver\n"); | ||
157 | |||
158 | return err; | ||
159 | } | ||
160 | static inline void stmmac_unregister_platform(void) | ||
161 | { | ||
162 | platform_driver_register(&stmmac_pltfr_driver); | ||
163 | } | ||
164 | #else | ||
165 | static inline int stmmac_register_platform(void) | ||
166 | { | ||
167 | pr_debug("stmmac: do not register the platf driver\n"); | ||
168 | |||
169 | return -EINVAL; | ||
170 | } | ||
171 | static inline void stmmac_unregister_platform(void) | ||
172 | { | ||
173 | } | ||
174 | #endif /* CONFIG_STMMAC_PLATFORM */ | ||
175 | |||
176 | #ifdef CONFIG_STMMAC_PCI | ||
177 | extern struct pci_driver stmmac_pci_driver; | ||
178 | static inline int stmmac_register_pci(void) | ||
179 | { | ||
180 | int err; | ||
181 | |||
182 | err = pci_register_driver(&stmmac_pci_driver); | ||
183 | if (err) | ||
184 | pr_err("stmmac: failed to register the PCI driver\n"); | ||
185 | |||
186 | return err; | ||
187 | } | ||
188 | static inline void stmmac_unregister_pci(void) | ||
189 | { | ||
190 | pci_unregister_driver(&stmmac_pci_driver); | ||
191 | } | ||
192 | #else | ||
193 | static inline int stmmac_register_pci(void) | ||
194 | { | ||
195 | pr_debug("stmmac: do not register the PCI driver\n"); | ||
196 | |||
197 | return -EINVAL; | ||
198 | } | ||
199 | static inline void stmmac_unregister_pci(void) | ||
200 | { | ||
201 | } | ||
202 | #endif /* CONFIG_STMMAC_PCI */ | ||
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 70966330f44e..51b3b68528ee 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c | |||
@@ -833,8 +833,9 @@ static u32 stmmac_get_synopsys_id(struct stmmac_priv *priv) | |||
833 | 833 | ||
834 | /** | 834 | /** |
835 | * stmmac_selec_desc_mode | 835 | * stmmac_selec_desc_mode |
836 | * @dev : device pointer | 836 | * @priv : private structure |
837 | * Description: select the Enhanced/Alternate or Normal descriptors */ | 837 | * Description: select the Enhanced/Alternate or Normal descriptors |
838 | */ | ||
838 | static void stmmac_selec_desc_mode(struct stmmac_priv *priv) | 839 | static void stmmac_selec_desc_mode(struct stmmac_priv *priv) |
839 | { | 840 | { |
840 | if (priv->plat->enh_desc) { | 841 | if (priv->plat->enh_desc) { |
@@ -1861,6 +1862,8 @@ static int stmmac_hw_init(struct stmmac_priv *priv) | |||
1861 | /** | 1862 | /** |
1862 | * stmmac_dvr_probe | 1863 | * stmmac_dvr_probe |
1863 | * @device: device pointer | 1864 | * @device: device pointer |
1865 | * @plat_dat: platform data pointer | ||
1866 | * @addr: iobase memory address | ||
1864 | * Description: this is the main probe function used to | 1867 | * Description: this is the main probe function used to |
1865 | * call the alloc_etherdev, allocate the priv structure. | 1868 | * call the alloc_etherdev, allocate the priv structure. |
1866 | */ | 1869 | */ |
@@ -2090,6 +2093,34 @@ int stmmac_restore(struct net_device *ndev) | |||
2090 | } | 2093 | } |
2091 | #endif /* CONFIG_PM */ | 2094 | #endif /* CONFIG_PM */ |
2092 | 2095 | ||
2096 | /* Driver can be configured w/ and w/ both PCI and Platf drivers | ||
2097 | * depending on the configuration selected. | ||
2098 | */ | ||
2099 | static int __init stmmac_init(void) | ||
2100 | { | ||
2101 | int err_plt = 0; | ||
2102 | int err_pci = 0; | ||
2103 | |||
2104 | err_plt = stmmac_register_platform(); | ||
2105 | err_pci = stmmac_register_pci(); | ||
2106 | |||
2107 | if ((err_pci) && (err_plt)) { | ||
2108 | pr_err("stmmac: driver registration failed\n"); | ||
2109 | return -EINVAL; | ||
2110 | } | ||
2111 | |||
2112 | return 0; | ||
2113 | } | ||
2114 | |||
2115 | static void __exit stmmac_exit(void) | ||
2116 | { | ||
2117 | stmmac_unregister_platform(); | ||
2118 | stmmac_unregister_pci(); | ||
2119 | } | ||
2120 | |||
2121 | module_init(stmmac_init); | ||
2122 | module_exit(stmmac_exit); | ||
2123 | |||
2093 | #ifndef MODULE | 2124 | #ifndef MODULE |
2094 | static int __init stmmac_cmdline_opt(char *str) | 2125 | static int __init stmmac_cmdline_opt(char *str) |
2095 | { | 2126 | { |
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c index 58fab5303e9c..cf826e6b6aa1 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c | |||
@@ -179,7 +179,7 @@ static DEFINE_PCI_DEVICE_TABLE(stmmac_id_table) = { | |||
179 | 179 | ||
180 | MODULE_DEVICE_TABLE(pci, stmmac_id_table); | 180 | MODULE_DEVICE_TABLE(pci, stmmac_id_table); |
181 | 181 | ||
182 | static struct pci_driver stmmac_driver = { | 182 | struct pci_driver stmmac_pci_driver = { |
183 | .name = STMMAC_RESOURCE_NAME, | 183 | .name = STMMAC_RESOURCE_NAME, |
184 | .id_table = stmmac_id_table, | 184 | .id_table = stmmac_id_table, |
185 | .probe = stmmac_pci_probe, | 185 | .probe = stmmac_pci_probe, |
@@ -190,33 +190,6 @@ static struct pci_driver stmmac_driver = { | |||
190 | #endif | 190 | #endif |
191 | }; | 191 | }; |
192 | 192 | ||
193 | /** | ||
194 | * stmmac_init_module - Entry point for the driver | ||
195 | * Description: This function is the entry point for the driver. | ||
196 | */ | ||
197 | static int __init stmmac_init_module(void) | ||
198 | { | ||
199 | int ret; | ||
200 | |||
201 | ret = pci_register_driver(&stmmac_driver); | ||
202 | if (ret < 0) | ||
203 | pr_err("%s: ERROR: driver registration failed\n", __func__); | ||
204 | |||
205 | return ret; | ||
206 | } | ||
207 | |||
208 | /** | ||
209 | * stmmac_cleanup_module - Cleanup routine for the driver | ||
210 | * Description: This function is the cleanup routine for the driver. | ||
211 | */ | ||
212 | static void __exit stmmac_cleanup_module(void) | ||
213 | { | ||
214 | pci_unregister_driver(&stmmac_driver); | ||
215 | } | ||
216 | |||
217 | module_init(stmmac_init_module); | ||
218 | module_exit(stmmac_cleanup_module); | ||
219 | |||
220 | MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet PCI driver"); | 193 | MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet PCI driver"); |
221 | MODULE_AUTHOR("Rayagond Kokatanur <rayagond.kokatanur@vayavyalabs.com>"); | 194 | MODULE_AUTHOR("Rayagond Kokatanur <rayagond.kokatanur@vayavyalabs.com>"); |
222 | MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>"); | 195 | MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>"); |
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c index 3dd8f0803808..680d2b8dfe27 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c | |||
@@ -255,7 +255,7 @@ static const struct of_device_id stmmac_dt_ids[] = { | |||
255 | }; | 255 | }; |
256 | MODULE_DEVICE_TABLE(of, stmmac_dt_ids); | 256 | MODULE_DEVICE_TABLE(of, stmmac_dt_ids); |
257 | 257 | ||
258 | static struct platform_driver stmmac_driver = { | 258 | struct platform_driver stmmac_pltfr_driver = { |
259 | .probe = stmmac_pltfr_probe, | 259 | .probe = stmmac_pltfr_probe, |
260 | .remove = stmmac_pltfr_remove, | 260 | .remove = stmmac_pltfr_remove, |
261 | .driver = { | 261 | .driver = { |
@@ -266,8 +266,6 @@ static struct platform_driver stmmac_driver = { | |||
266 | }, | 266 | }, |
267 | }; | 267 | }; |
268 | 268 | ||
269 | module_platform_driver(stmmac_driver); | ||
270 | |||
271 | MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet PLATFORM driver"); | 269 | MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet PLATFORM driver"); |
272 | MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>"); | 270 | MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>"); |
273 | MODULE_LICENSE("GPL"); | 271 | MODULE_LICENSE("GPL"); |
diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c index 703c8cce2a2c..8c726b7004d3 100644 --- a/drivers/net/ethernet/sun/niu.c +++ b/drivers/net/ethernet/sun/niu.c | |||
@@ -3598,7 +3598,6 @@ static int release_tx_packet(struct niu *np, struct tx_ring_info *rp, int idx) | |||
3598 | static void niu_tx_work(struct niu *np, struct tx_ring_info *rp) | 3598 | static void niu_tx_work(struct niu *np, struct tx_ring_info *rp) |
3599 | { | 3599 | { |
3600 | struct netdev_queue *txq; | 3600 | struct netdev_queue *txq; |
3601 | unsigned int tx_bytes; | ||
3602 | u16 pkt_cnt, tmp; | 3601 | u16 pkt_cnt, tmp; |
3603 | int cons, index; | 3602 | int cons, index; |
3604 | u64 cs; | 3603 | u64 cs; |
@@ -3621,18 +3620,12 @@ static void niu_tx_work(struct niu *np, struct tx_ring_info *rp) | |||
3621 | netif_printk(np, tx_done, KERN_DEBUG, np->dev, | 3620 | netif_printk(np, tx_done, KERN_DEBUG, np->dev, |
3622 | "%s() pkt_cnt[%u] cons[%d]\n", __func__, pkt_cnt, cons); | 3621 | "%s() pkt_cnt[%u] cons[%d]\n", __func__, pkt_cnt, cons); |
3623 | 3622 | ||
3624 | tx_bytes = 0; | 3623 | while (pkt_cnt--) |
3625 | tmp = pkt_cnt; | ||
3626 | while (tmp--) { | ||
3627 | tx_bytes += rp->tx_buffs[cons].skb->len; | ||
3628 | cons = release_tx_packet(np, rp, cons); | 3624 | cons = release_tx_packet(np, rp, cons); |
3629 | } | ||
3630 | 3625 | ||
3631 | rp->cons = cons; | 3626 | rp->cons = cons; |
3632 | smp_mb(); | 3627 | smp_mb(); |
3633 | 3628 | ||
3634 | netdev_tx_completed_queue(txq, pkt_cnt, tx_bytes); | ||
3635 | |||
3636 | out: | 3629 | out: |
3637 | if (unlikely(netif_tx_queue_stopped(txq) && | 3630 | if (unlikely(netif_tx_queue_stopped(txq) && |
3638 | (niu_tx_avail(rp) > NIU_TX_WAKEUP_THRESH(rp)))) { | 3631 | (niu_tx_avail(rp) > NIU_TX_WAKEUP_THRESH(rp)))) { |
@@ -4333,7 +4326,6 @@ static void niu_free_channels(struct niu *np) | |||
4333 | struct tx_ring_info *rp = &np->tx_rings[i]; | 4326 | struct tx_ring_info *rp = &np->tx_rings[i]; |
4334 | 4327 | ||
4335 | niu_free_tx_ring_info(np, rp); | 4328 | niu_free_tx_ring_info(np, rp); |
4336 | netdev_tx_reset_queue(netdev_get_tx_queue(np->dev, i)); | ||
4337 | } | 4329 | } |
4338 | kfree(np->tx_rings); | 4330 | kfree(np->tx_rings); |
4339 | np->tx_rings = NULL; | 4331 | np->tx_rings = NULL; |
@@ -6739,8 +6731,6 @@ static netdev_tx_t niu_start_xmit(struct sk_buff *skb, | |||
6739 | prod = NEXT_TX(rp, prod); | 6731 | prod = NEXT_TX(rp, prod); |
6740 | } | 6732 | } |
6741 | 6733 | ||
6742 | netdev_tx_sent_queue(txq, skb->len); | ||
6743 | |||
6744 | if (prod < rp->prod) | 6734 | if (prod < rp->prod) |
6745 | rp->wrap_bit ^= TX_RING_KICK_WRAP; | 6735 | rp->wrap_bit ^= TX_RING_KICK_WRAP; |
6746 | rp->prod = prod; | 6736 | rp->prod = prod; |
diff --git a/drivers/net/ethernet/tile/Kconfig b/drivers/net/ethernet/tile/Kconfig index 2d9218f86bca..098b1c42b393 100644 --- a/drivers/net/ethernet/tile/Kconfig +++ b/drivers/net/ethernet/tile/Kconfig | |||
@@ -7,6 +7,8 @@ config TILE_NET | |||
7 | depends on TILE | 7 | depends on TILE |
8 | default y | 8 | default y |
9 | select CRC32 | 9 | select CRC32 |
10 | select TILE_GXIO_MPIPE if TILEGX | ||
11 | select HIGH_RES_TIMERS if TILEGX | ||
10 | ---help--- | 12 | ---help--- |
11 | This is a standard Linux network device driver for the | 13 | This is a standard Linux network device driver for the |
12 | on-chip Tilera Gigabit Ethernet and XAUI interfaces. | 14 | on-chip Tilera Gigabit Ethernet and XAUI interfaces. |
diff --git a/drivers/net/ethernet/tile/Makefile b/drivers/net/ethernet/tile/Makefile index f634f142cab4..0ef9eefd3211 100644 --- a/drivers/net/ethernet/tile/Makefile +++ b/drivers/net/ethernet/tile/Makefile | |||
@@ -4,7 +4,7 @@ | |||
4 | 4 | ||
5 | obj-$(CONFIG_TILE_NET) += tile_net.o | 5 | obj-$(CONFIG_TILE_NET) += tile_net.o |
6 | ifdef CONFIG_TILEGX | 6 | ifdef CONFIG_TILEGX |
7 | tile_net-objs := tilegx.o mpipe.o iorpc_mpipe.o dma_queue.o | 7 | tile_net-y := tilegx.o |
8 | else | 8 | else |
9 | tile_net-objs := tilepro.o | 9 | tile_net-y := tilepro.o |
10 | endif | 10 | endif |
diff --git a/drivers/net/ethernet/tile/tilegx.c b/drivers/net/ethernet/tile/tilegx.c new file mode 100644 index 000000000000..83b4b388ad49 --- /dev/null +++ b/drivers/net/ethernet/tile/tilegx.c | |||
@@ -0,0 +1,1898 @@ | |||
1 | /* | ||
2 | * Copyright 2012 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #include <linux/module.h> | ||
16 | #include <linux/init.h> | ||
17 | #include <linux/moduleparam.h> | ||
18 | #include <linux/sched.h> | ||
19 | #include <linux/kernel.h> /* printk() */ | ||
20 | #include <linux/slab.h> /* kmalloc() */ | ||
21 | #include <linux/errno.h> /* error codes */ | ||
22 | #include <linux/types.h> /* size_t */ | ||
23 | #include <linux/interrupt.h> | ||
24 | #include <linux/in.h> | ||
25 | #include <linux/irq.h> | ||
26 | #include <linux/netdevice.h> /* struct device, and other headers */ | ||
27 | #include <linux/etherdevice.h> /* eth_type_trans */ | ||
28 | #include <linux/skbuff.h> | ||
29 | #include <linux/ioctl.h> | ||
30 | #include <linux/cdev.h> | ||
31 | #include <linux/hugetlb.h> | ||
32 | #include <linux/in6.h> | ||
33 | #include <linux/timer.h> | ||
34 | #include <linux/hrtimer.h> | ||
35 | #include <linux/ktime.h> | ||
36 | #include <linux/io.h> | ||
37 | #include <linux/ctype.h> | ||
38 | #include <linux/ip.h> | ||
39 | #include <linux/tcp.h> | ||
40 | |||
41 | #include <asm/checksum.h> | ||
42 | #include <asm/homecache.h> | ||
43 | #include <gxio/mpipe.h> | ||
44 | #include <arch/sim.h> | ||
45 | |||
46 | /* Default transmit lockup timeout period, in jiffies. */ | ||
47 | #define TILE_NET_TIMEOUT (5 * HZ) | ||
48 | |||
49 | /* The maximum number of distinct channels (idesc.channel is 5 bits). */ | ||
50 | #define TILE_NET_CHANNELS 32 | ||
51 | |||
52 | /* Maximum number of idescs to handle per "poll". */ | ||
53 | #define TILE_NET_BATCH 128 | ||
54 | |||
55 | /* Maximum number of packets to handle per "poll". */ | ||
56 | #define TILE_NET_WEIGHT 64 | ||
57 | |||
58 | /* Number of entries in each iqueue. */ | ||
59 | #define IQUEUE_ENTRIES 512 | ||
60 | |||
61 | /* Number of entries in each equeue. */ | ||
62 | #define EQUEUE_ENTRIES 2048 | ||
63 | |||
64 | /* Total header bytes per equeue slot. Must be big enough for 2 bytes | ||
65 | * of NET_IP_ALIGN alignment, plus 14 bytes (?) of L2 header, plus up to | ||
66 | * 60 bytes of actual TCP header. We round up to align to cache lines. | ||
67 | */ | ||
68 | #define HEADER_BYTES 128 | ||
69 | |||
70 | /* Maximum completions per cpu per device (must be a power of two). | ||
71 | * ISSUE: What is the right number here? If this is too small, then | ||
72 | * egress might block waiting for free space in a completions array. | ||
73 | * ISSUE: At the least, allocate these only for initialized echannels. | ||
74 | */ | ||
75 | #define TILE_NET_MAX_COMPS 64 | ||
76 | |||
77 | #define MAX_FRAGS (MAX_SKB_FRAGS + 1) | ||
78 | |||
79 | /* Size of completions data to allocate. | ||
80 | * ISSUE: Probably more than needed since we don't use all the channels. | ||
81 | */ | ||
82 | #define COMPS_SIZE (TILE_NET_CHANNELS * sizeof(struct tile_net_comps)) | ||
83 | |||
84 | /* Size of NotifRing data to allocate. */ | ||
85 | #define NOTIF_RING_SIZE (IQUEUE_ENTRIES * sizeof(gxio_mpipe_idesc_t)) | ||
86 | |||
87 | /* Timeout to wake the per-device TX timer after we stop the queue. | ||
88 | * We don't want the timeout too short (adds overhead, and might end | ||
89 | * up causing stop/wake/stop/wake cycles) or too long (affects performance). | ||
90 | * For the 10 Gb NIC, 30 usec means roughly 30+ 1500-byte packets. | ||
91 | */ | ||
92 | #define TX_TIMER_DELAY_USEC 30 | ||
93 | |||
94 | /* Timeout to wake the per-cpu egress timer to free completions. */ | ||
95 | #define EGRESS_TIMER_DELAY_USEC 1000 | ||
96 | |||
97 | MODULE_AUTHOR("Tilera Corporation"); | ||
98 | MODULE_LICENSE("GPL"); | ||
99 | |||
100 | /* A "packet fragment" (a chunk of memory). */ | ||
101 | struct frag { | ||
102 | void *buf; | ||
103 | size_t length; | ||
104 | }; | ||
105 | |||
106 | /* A single completion. */ | ||
107 | struct tile_net_comp { | ||
108 | /* The "complete_count" when the completion will be complete. */ | ||
109 | s64 when; | ||
110 | /* The buffer to be freed when the completion is complete. */ | ||
111 | struct sk_buff *skb; | ||
112 | }; | ||
113 | |||
114 | /* The completions for a given cpu and echannel. */ | ||
115 | struct tile_net_comps { | ||
116 | /* The completions. */ | ||
117 | struct tile_net_comp comp_queue[TILE_NET_MAX_COMPS]; | ||
118 | /* The number of completions used. */ | ||
119 | unsigned long comp_next; | ||
120 | /* The number of completions freed. */ | ||
121 | unsigned long comp_last; | ||
122 | }; | ||
123 | |||
124 | /* The transmit wake timer for a given cpu and echannel. */ | ||
125 | struct tile_net_tx_wake { | ||
126 | struct hrtimer timer; | ||
127 | struct net_device *dev; | ||
128 | }; | ||
129 | |||
130 | /* Info for a specific cpu. */ | ||
131 | struct tile_net_info { | ||
132 | /* The NAPI struct. */ | ||
133 | struct napi_struct napi; | ||
134 | /* Packet queue. */ | ||
135 | gxio_mpipe_iqueue_t iqueue; | ||
136 | /* Our cpu. */ | ||
137 | int my_cpu; | ||
138 | /* True if iqueue is valid. */ | ||
139 | bool has_iqueue; | ||
140 | /* NAPI flags. */ | ||
141 | bool napi_added; | ||
142 | bool napi_enabled; | ||
143 | /* Number of small sk_buffs which must still be provided. */ | ||
144 | unsigned int num_needed_small_buffers; | ||
145 | /* Number of large sk_buffs which must still be provided. */ | ||
146 | unsigned int num_needed_large_buffers; | ||
147 | /* A timer for handling egress completions. */ | ||
148 | struct hrtimer egress_timer; | ||
149 | /* True if "egress_timer" is scheduled. */ | ||
150 | bool egress_timer_scheduled; | ||
151 | /* Comps for each egress channel. */ | ||
152 | struct tile_net_comps *comps_for_echannel[TILE_NET_CHANNELS]; | ||
153 | /* Transmit wake timer for each egress channel. */ | ||
154 | struct tile_net_tx_wake tx_wake[TILE_NET_CHANNELS]; | ||
155 | }; | ||
156 | |||
157 | /* Info for egress on a particular egress channel. */ | ||
158 | struct tile_net_egress { | ||
159 | /* The "equeue". */ | ||
160 | gxio_mpipe_equeue_t *equeue; | ||
161 | /* The headers for TSO. */ | ||
162 | unsigned char *headers; | ||
163 | }; | ||
164 | |||
165 | /* Info for a specific device. */ | ||
166 | struct tile_net_priv { | ||
167 | /* Our network device. */ | ||
168 | struct net_device *dev; | ||
169 | /* The primary link. */ | ||
170 | gxio_mpipe_link_t link; | ||
171 | /* The primary channel, if open, else -1. */ | ||
172 | int channel; | ||
173 | /* The "loopify" egress link, if needed. */ | ||
174 | gxio_mpipe_link_t loopify_link; | ||
175 | /* The "loopify" egress channel, if open, else -1. */ | ||
176 | int loopify_channel; | ||
177 | /* The egress channel (channel or loopify_channel). */ | ||
178 | int echannel; | ||
179 | /* Total stats. */ | ||
180 | struct net_device_stats stats; | ||
181 | }; | ||
182 | |||
183 | /* Egress info, indexed by "priv->echannel" (lazily created as needed). */ | ||
184 | static struct tile_net_egress egress_for_echannel[TILE_NET_CHANNELS]; | ||
185 | |||
186 | /* Devices currently associated with each channel. | ||
187 | * NOTE: The array entry can become NULL after ifconfig down, but | ||
188 | * we do not free the underlying net_device structures, so it is | ||
189 | * safe to use a pointer after reading it from this array. | ||
190 | */ | ||
191 | static struct net_device *tile_net_devs_for_channel[TILE_NET_CHANNELS]; | ||
192 | |||
193 | /* A mutex for "tile_net_devs_for_channel". */ | ||
194 | static DEFINE_MUTEX(tile_net_devs_for_channel_mutex); | ||
195 | |||
196 | /* The per-cpu info. */ | ||
197 | static DEFINE_PER_CPU(struct tile_net_info, per_cpu_info); | ||
198 | |||
199 | /* The "context" for all devices. */ | ||
200 | static gxio_mpipe_context_t context; | ||
201 | |||
202 | /* Buffer sizes and mpipe enum codes for buffer stacks. | ||
203 | * See arch/tile/include/gxio/mpipe.h for the set of possible values. | ||
204 | */ | ||
205 | #define BUFFER_SIZE_SMALL_ENUM GXIO_MPIPE_BUFFER_SIZE_128 | ||
206 | #define BUFFER_SIZE_SMALL 128 | ||
207 | #define BUFFER_SIZE_LARGE_ENUM GXIO_MPIPE_BUFFER_SIZE_1664 | ||
208 | #define BUFFER_SIZE_LARGE 1664 | ||
209 | |||
210 | /* The small/large "buffer stacks". */ | ||
211 | static int small_buffer_stack = -1; | ||
212 | static int large_buffer_stack = -1; | ||
213 | |||
214 | /* Amount of memory allocated for each buffer stack. */ | ||
215 | static size_t buffer_stack_size; | ||
216 | |||
217 | /* The actual memory allocated for the buffer stacks. */ | ||
218 | static void *small_buffer_stack_va; | ||
219 | static void *large_buffer_stack_va; | ||
220 | |||
221 | /* The buckets. */ | ||
222 | static int first_bucket = -1; | ||
223 | static int num_buckets = 1; | ||
224 | |||
225 | /* The ingress irq. */ | ||
226 | static int ingress_irq = -1; | ||
227 | |||
228 | /* Text value of tile_net.cpus if passed as a module parameter. */ | ||
229 | static char *network_cpus_string; | ||
230 | |||
231 | /* The actual cpus in "network_cpus". */ | ||
232 | static struct cpumask network_cpus_map; | ||
233 | |||
234 | /* If "loopify=LINK" was specified, this is "LINK". */ | ||
235 | static char *loopify_link_name; | ||
236 | |||
237 | /* If "tile_net.custom" was specified, this is non-NULL. */ | ||
238 | static char *custom_str; | ||
239 | |||
240 | /* The "tile_net.cpus" argument specifies the cpus that are dedicated | ||
241 | * to handle ingress packets. | ||
242 | * | ||
243 | * The parameter should be in the form "tile_net.cpus=m-n[,x-y]", where | ||
244 | * m, n, x, y are integer numbers that represent the cpus that can be | ||
245 | * neither a dedicated cpu nor a dataplane cpu. | ||
246 | */ | ||
247 | static bool network_cpus_init(void) | ||
248 | { | ||
249 | char buf[1024]; | ||
250 | int rc; | ||
251 | |||
252 | if (network_cpus_string == NULL) | ||
253 | return false; | ||
254 | |||
255 | rc = cpulist_parse_crop(network_cpus_string, &network_cpus_map); | ||
256 | if (rc != 0) { | ||
257 | pr_warn("tile_net.cpus=%s: malformed cpu list\n", | ||
258 | network_cpus_string); | ||
259 | return false; | ||
260 | } | ||
261 | |||
262 | /* Remove dedicated cpus. */ | ||
263 | cpumask_and(&network_cpus_map, &network_cpus_map, cpu_possible_mask); | ||
264 | |||
265 | if (cpumask_empty(&network_cpus_map)) { | ||
266 | pr_warn("Ignoring empty tile_net.cpus='%s'.\n", | ||
267 | network_cpus_string); | ||
268 | return false; | ||
269 | } | ||
270 | |||
271 | cpulist_scnprintf(buf, sizeof(buf), &network_cpus_map); | ||
272 | pr_info("Linux network CPUs: %s\n", buf); | ||
273 | return true; | ||
274 | } | ||
275 | |||
276 | module_param_named(cpus, network_cpus_string, charp, 0444); | ||
277 | MODULE_PARM_DESC(cpus, "cpulist of cores that handle network interrupts"); | ||
278 | |||
279 | /* The "tile_net.loopify=LINK" argument causes the named device to | ||
280 | * actually use "loop0" for ingress, and "loop1" for egress. This | ||
281 | * allows an app to sit between the actual link and linux, passing | ||
282 | * (some) packets along to linux, and forwarding (some) packets sent | ||
283 | * out by linux. | ||
284 | */ | ||
285 | module_param_named(loopify, loopify_link_name, charp, 0444); | ||
286 | MODULE_PARM_DESC(loopify, "name the device to use loop0/1 for ingress/egress"); | ||
287 | |||
288 | /* The "tile_net.custom" argument causes us to ignore the "conventional" | ||
289 | * classifier metadata, in particular, the "l2_offset". | ||
290 | */ | ||
291 | module_param_named(custom, custom_str, charp, 0444); | ||
292 | MODULE_PARM_DESC(custom, "indicates a (heavily) customized classifier"); | ||
293 | |||
294 | /* Atomically update a statistics field. | ||
295 | * Note that on TILE-Gx, this operation is fire-and-forget on the | ||
296 | * issuing core (single-cycle dispatch) and takes only a few cycles | ||
297 | * longer than a regular store when the request reaches the home cache. | ||
298 | * No expensive bus management overhead is required. | ||
299 | */ | ||
300 | static void tile_net_stats_add(unsigned long value, unsigned long *field) | ||
301 | { | ||
302 | BUILD_BUG_ON(sizeof(atomic_long_t) != sizeof(unsigned long)); | ||
303 | atomic_long_add(value, (atomic_long_t *)field); | ||
304 | } | ||
305 | |||
306 | /* Allocate and push a buffer. */ | ||
307 | static bool tile_net_provide_buffer(bool small) | ||
308 | { | ||
309 | int stack = small ? small_buffer_stack : large_buffer_stack; | ||
310 | const unsigned long buffer_alignment = 128; | ||
311 | struct sk_buff *skb; | ||
312 | int len; | ||
313 | |||
314 | len = sizeof(struct sk_buff **) + buffer_alignment; | ||
315 | len += (small ? BUFFER_SIZE_SMALL : BUFFER_SIZE_LARGE); | ||
316 | skb = dev_alloc_skb(len); | ||
317 | if (skb == NULL) | ||
318 | return false; | ||
319 | |||
320 | /* Make room for a back-pointer to 'skb' and guarantee alignment. */ | ||
321 | skb_reserve(skb, sizeof(struct sk_buff **)); | ||
322 | skb_reserve(skb, -(long)skb->data & (buffer_alignment - 1)); | ||
323 | |||
324 | /* Save a back-pointer to 'skb'. */ | ||
325 | *(struct sk_buff **)(skb->data - sizeof(struct sk_buff **)) = skb; | ||
326 | |||
327 | /* Make sure "skb" and the back-pointer have been flushed. */ | ||
328 | wmb(); | ||
329 | |||
330 | gxio_mpipe_push_buffer(&context, stack, | ||
331 | (void *)va_to_tile_io_addr(skb->data)); | ||
332 | |||
333 | return true; | ||
334 | } | ||
335 | |||
336 | /* Convert a raw mpipe buffer to its matching skb pointer. */ | ||
337 | static struct sk_buff *mpipe_buf_to_skb(void *va) | ||
338 | { | ||
339 | /* Acquire the associated "skb". */ | ||
340 | struct sk_buff **skb_ptr = va - sizeof(*skb_ptr); | ||
341 | struct sk_buff *skb = *skb_ptr; | ||
342 | |||
343 | /* Paranoia. */ | ||
344 | if (skb->data != va) { | ||
345 | /* Panic here since there's a reasonable chance | ||
346 | * that corrupt buffers means generic memory | ||
347 | * corruption, with unpredictable system effects. | ||
348 | */ | ||
349 | panic("Corrupt linux buffer! va=%p, skb=%p, skb->data=%p", | ||
350 | va, skb, skb->data); | ||
351 | } | ||
352 | |||
353 | return skb; | ||
354 | } | ||
355 | |||
356 | static void tile_net_pop_all_buffers(int stack) | ||
357 | { | ||
358 | for (;;) { | ||
359 | tile_io_addr_t addr = | ||
360 | (tile_io_addr_t)gxio_mpipe_pop_buffer(&context, stack); | ||
361 | if (addr == 0) | ||
362 | break; | ||
363 | dev_kfree_skb_irq(mpipe_buf_to_skb(tile_io_addr_to_va(addr))); | ||
364 | } | ||
365 | } | ||
366 | |||
367 | /* Provide linux buffers to mPIPE. */ | ||
368 | static void tile_net_provide_needed_buffers(void) | ||
369 | { | ||
370 | struct tile_net_info *info = &__get_cpu_var(per_cpu_info); | ||
371 | |||
372 | while (info->num_needed_small_buffers != 0) { | ||
373 | if (!tile_net_provide_buffer(true)) | ||
374 | goto oops; | ||
375 | info->num_needed_small_buffers--; | ||
376 | } | ||
377 | |||
378 | while (info->num_needed_large_buffers != 0) { | ||
379 | if (!tile_net_provide_buffer(false)) | ||
380 | goto oops; | ||
381 | info->num_needed_large_buffers--; | ||
382 | } | ||
383 | |||
384 | return; | ||
385 | |||
386 | oops: | ||
387 | /* Add a description to the page allocation failure dump. */ | ||
388 | pr_notice("Tile %d still needs some buffers\n", info->my_cpu); | ||
389 | } | ||
390 | |||
391 | static inline bool filter_packet(struct net_device *dev, void *buf) | ||
392 | { | ||
393 | /* Filter packets received before we're up. */ | ||
394 | if (dev == NULL || !(dev->flags & IFF_UP)) | ||
395 | return true; | ||
396 | |||
397 | /* Filter out packets that aren't for us. */ | ||
398 | if (!(dev->flags & IFF_PROMISC) && | ||
399 | !is_multicast_ether_addr(buf) && | ||
400 | compare_ether_addr(dev->dev_addr, buf) != 0) | ||
401 | return true; | ||
402 | |||
403 | return false; | ||
404 | } | ||
405 | |||
406 | static void tile_net_receive_skb(struct net_device *dev, struct sk_buff *skb, | ||
407 | gxio_mpipe_idesc_t *idesc, unsigned long len) | ||
408 | { | ||
409 | struct tile_net_info *info = &__get_cpu_var(per_cpu_info); | ||
410 | struct tile_net_priv *priv = netdev_priv(dev); | ||
411 | |||
412 | /* Encode the actual packet length. */ | ||
413 | skb_put(skb, len); | ||
414 | |||
415 | skb->protocol = eth_type_trans(skb, dev); | ||
416 | |||
417 | /* Acknowledge "good" hardware checksums. */ | ||
418 | if (idesc->cs && idesc->csum_seed_val == 0xFFFF) | ||
419 | skb->ip_summed = CHECKSUM_UNNECESSARY; | ||
420 | |||
421 | netif_receive_skb(skb); | ||
422 | |||
423 | /* Update stats. */ | ||
424 | tile_net_stats_add(1, &priv->stats.rx_packets); | ||
425 | tile_net_stats_add(len, &priv->stats.rx_bytes); | ||
426 | |||
427 | /* Need a new buffer. */ | ||
428 | if (idesc->size == BUFFER_SIZE_SMALL_ENUM) | ||
429 | info->num_needed_small_buffers++; | ||
430 | else | ||
431 | info->num_needed_large_buffers++; | ||
432 | } | ||
433 | |||
434 | /* Handle a packet. Return true if "processed", false if "filtered". */ | ||
435 | static bool tile_net_handle_packet(gxio_mpipe_idesc_t *idesc) | ||
436 | { | ||
437 | struct tile_net_info *info = &__get_cpu_var(per_cpu_info); | ||
438 | struct net_device *dev = tile_net_devs_for_channel[idesc->channel]; | ||
439 | uint8_t l2_offset; | ||
440 | void *va; | ||
441 | void *buf; | ||
442 | unsigned long len; | ||
443 | bool filter; | ||
444 | |||
445 | /* Drop packets for which no buffer was available. | ||
446 | * NOTE: This happens under heavy load. | ||
447 | */ | ||
448 | if (idesc->be) { | ||
449 | struct tile_net_priv *priv = netdev_priv(dev); | ||
450 | tile_net_stats_add(1, &priv->stats.rx_dropped); | ||
451 | gxio_mpipe_iqueue_consume(&info->iqueue, idesc); | ||
452 | if (net_ratelimit()) | ||
453 | pr_info("Dropping packet (insufficient buffers).\n"); | ||
454 | return false; | ||
455 | } | ||
456 | |||
457 | /* Get the "l2_offset", if allowed. */ | ||
458 | l2_offset = custom_str ? 0 : gxio_mpipe_idesc_get_l2_offset(idesc); | ||
459 | |||
460 | /* Get the raw buffer VA (includes "headroom"). */ | ||
461 | va = tile_io_addr_to_va((unsigned long)(long)idesc->va); | ||
462 | |||
463 | /* Get the actual packet start/length. */ | ||
464 | buf = va + l2_offset; | ||
465 | len = idesc->l2_size - l2_offset; | ||
466 | |||
467 | /* Point "va" at the raw buffer. */ | ||
468 | va -= NET_IP_ALIGN; | ||
469 | |||
470 | filter = filter_packet(dev, buf); | ||
471 | if (filter) { | ||
472 | gxio_mpipe_iqueue_drop(&info->iqueue, idesc); | ||
473 | } else { | ||
474 | struct sk_buff *skb = mpipe_buf_to_skb(va); | ||
475 | |||
476 | /* Skip headroom, and any custom header. */ | ||
477 | skb_reserve(skb, NET_IP_ALIGN + l2_offset); | ||
478 | |||
479 | tile_net_receive_skb(dev, skb, idesc, len); | ||
480 | } | ||
481 | |||
482 | gxio_mpipe_iqueue_consume(&info->iqueue, idesc); | ||
483 | return !filter; | ||
484 | } | ||
485 | |||
486 | /* Handle some packets for the current CPU. | ||
487 | * | ||
488 | * This function handles up to TILE_NET_BATCH idescs per call. | ||
489 | * | ||
490 | * ISSUE: Since we do not provide new buffers until this function is | ||
491 | * complete, we must initially provide enough buffers for each network | ||
492 | * cpu to fill its iqueue and also its batched idescs. | ||
493 | * | ||
494 | * ISSUE: The "rotting packet" race condition occurs if a packet | ||
495 | * arrives after the queue appears to be empty, and before the | ||
496 | * hypervisor interrupt is re-enabled. | ||
497 | */ | ||
498 | static int tile_net_poll(struct napi_struct *napi, int budget) | ||
499 | { | ||
500 | struct tile_net_info *info = &__get_cpu_var(per_cpu_info); | ||
501 | unsigned int work = 0; | ||
502 | gxio_mpipe_idesc_t *idesc; | ||
503 | int i, n; | ||
504 | |||
505 | /* Process packets. */ | ||
506 | while ((n = gxio_mpipe_iqueue_try_peek(&info->iqueue, &idesc)) > 0) { | ||
507 | for (i = 0; i < n; i++) { | ||
508 | if (i == TILE_NET_BATCH) | ||
509 | goto done; | ||
510 | if (tile_net_handle_packet(idesc + i)) { | ||
511 | if (++work >= budget) | ||
512 | goto done; | ||
513 | } | ||
514 | } | ||
515 | } | ||
516 | |||
517 | /* There are no packets left. */ | ||
518 | napi_complete(&info->napi); | ||
519 | |||
520 | /* Re-enable hypervisor interrupts. */ | ||
521 | gxio_mpipe_enable_notif_ring_interrupt(&context, info->iqueue.ring); | ||
522 | |||
523 | /* HACK: Avoid the "rotting packet" problem. */ | ||
524 | if (gxio_mpipe_iqueue_try_peek(&info->iqueue, &idesc) > 0) | ||
525 | napi_schedule(&info->napi); | ||
526 | |||
527 | /* ISSUE: Handle completions? */ | ||
528 | |||
529 | done: | ||
530 | tile_net_provide_needed_buffers(); | ||
531 | |||
532 | return work; | ||
533 | } | ||
534 | |||
535 | /* Handle an ingress interrupt on the current cpu. */ | ||
536 | static irqreturn_t tile_net_handle_ingress_irq(int irq, void *unused) | ||
537 | { | ||
538 | struct tile_net_info *info = &__get_cpu_var(per_cpu_info); | ||
539 | napi_schedule(&info->napi); | ||
540 | return IRQ_HANDLED; | ||
541 | } | ||
542 | |||
543 | /* Free some completions. This must be called with interrupts blocked. */ | ||
544 | static int tile_net_free_comps(gxio_mpipe_equeue_t *equeue, | ||
545 | struct tile_net_comps *comps, | ||
546 | int limit, bool force_update) | ||
547 | { | ||
548 | int n = 0; | ||
549 | while (comps->comp_last < comps->comp_next) { | ||
550 | unsigned int cid = comps->comp_last % TILE_NET_MAX_COMPS; | ||
551 | struct tile_net_comp *comp = &comps->comp_queue[cid]; | ||
552 | if (!gxio_mpipe_equeue_is_complete(equeue, comp->when, | ||
553 | force_update || n == 0)) | ||
554 | break; | ||
555 | dev_kfree_skb_irq(comp->skb); | ||
556 | comps->comp_last++; | ||
557 | if (++n == limit) | ||
558 | break; | ||
559 | } | ||
560 | return n; | ||
561 | } | ||
562 | |||
563 | /* Add a completion. This must be called with interrupts blocked. | ||
564 | * tile_net_equeue_try_reserve() will have ensured a free completion entry. | ||
565 | */ | ||
566 | static void add_comp(gxio_mpipe_equeue_t *equeue, | ||
567 | struct tile_net_comps *comps, | ||
568 | uint64_t when, struct sk_buff *skb) | ||
569 | { | ||
570 | int cid = comps->comp_next % TILE_NET_MAX_COMPS; | ||
571 | comps->comp_queue[cid].when = when; | ||
572 | comps->comp_queue[cid].skb = skb; | ||
573 | comps->comp_next++; | ||
574 | } | ||
575 | |||
576 | static void tile_net_schedule_tx_wake_timer(struct net_device *dev) | ||
577 | { | ||
578 | struct tile_net_info *info = &__get_cpu_var(per_cpu_info); | ||
579 | struct tile_net_priv *priv = netdev_priv(dev); | ||
580 | |||
581 | hrtimer_start(&info->tx_wake[priv->echannel].timer, | ||
582 | ktime_set(0, TX_TIMER_DELAY_USEC * 1000UL), | ||
583 | HRTIMER_MODE_REL_PINNED); | ||
584 | } | ||
585 | |||
586 | static enum hrtimer_restart tile_net_handle_tx_wake_timer(struct hrtimer *t) | ||
587 | { | ||
588 | struct tile_net_tx_wake *tx_wake = | ||
589 | container_of(t, struct tile_net_tx_wake, timer); | ||
590 | netif_wake_subqueue(tx_wake->dev, smp_processor_id()); | ||
591 | return HRTIMER_NORESTART; | ||
592 | } | ||
593 | |||
594 | /* Make sure the egress timer is scheduled. */ | ||
595 | static void tile_net_schedule_egress_timer(void) | ||
596 | { | ||
597 | struct tile_net_info *info = &__get_cpu_var(per_cpu_info); | ||
598 | |||
599 | if (!info->egress_timer_scheduled) { | ||
600 | hrtimer_start(&info->egress_timer, | ||
601 | ktime_set(0, EGRESS_TIMER_DELAY_USEC * 1000UL), | ||
602 | HRTIMER_MODE_REL_PINNED); | ||
603 | info->egress_timer_scheduled = true; | ||
604 | } | ||
605 | } | ||
606 | |||
607 | /* The "function" for "info->egress_timer". | ||
608 | * | ||
609 | * This timer will reschedule itself as long as there are any pending | ||
610 | * completions expected for this tile. | ||
611 | */ | ||
612 | static enum hrtimer_restart tile_net_handle_egress_timer(struct hrtimer *t) | ||
613 | { | ||
614 | struct tile_net_info *info = &__get_cpu_var(per_cpu_info); | ||
615 | unsigned long irqflags; | ||
616 | bool pending = false; | ||
617 | int i; | ||
618 | |||
619 | local_irq_save(irqflags); | ||
620 | |||
621 | /* The timer is no longer scheduled. */ | ||
622 | info->egress_timer_scheduled = false; | ||
623 | |||
624 | /* Free all possible comps for this tile. */ | ||
625 | for (i = 0; i < TILE_NET_CHANNELS; i++) { | ||
626 | struct tile_net_egress *egress = &egress_for_echannel[i]; | ||
627 | struct tile_net_comps *comps = info->comps_for_echannel[i]; | ||
628 | if (comps->comp_last >= comps->comp_next) | ||
629 | continue; | ||
630 | tile_net_free_comps(egress->equeue, comps, -1, true); | ||
631 | pending = pending || (comps->comp_last < comps->comp_next); | ||
632 | } | ||
633 | |||
634 | /* Reschedule timer if needed. */ | ||
635 | if (pending) | ||
636 | tile_net_schedule_egress_timer(); | ||
637 | |||
638 | local_irq_restore(irqflags); | ||
639 | |||
640 | return HRTIMER_NORESTART; | ||
641 | } | ||
642 | |||
643 | /* Helper function for "tile_net_update()". | ||
644 | * "dev" (i.e. arg) is the device being brought up or down, | ||
645 | * or NULL if all devices are now down. | ||
646 | */ | ||
647 | static void tile_net_update_cpu(void *arg) | ||
648 | { | ||
649 | struct tile_net_info *info = &__get_cpu_var(per_cpu_info); | ||
650 | struct net_device *dev = arg; | ||
651 | |||
652 | if (!info->has_iqueue) | ||
653 | return; | ||
654 | |||
655 | if (dev != NULL) { | ||
656 | if (!info->napi_added) { | ||
657 | netif_napi_add(dev, &info->napi, | ||
658 | tile_net_poll, TILE_NET_WEIGHT); | ||
659 | info->napi_added = true; | ||
660 | } | ||
661 | if (!info->napi_enabled) { | ||
662 | napi_enable(&info->napi); | ||
663 | info->napi_enabled = true; | ||
664 | } | ||
665 | enable_percpu_irq(ingress_irq, 0); | ||
666 | } else { | ||
667 | disable_percpu_irq(ingress_irq); | ||
668 | if (info->napi_enabled) { | ||
669 | napi_disable(&info->napi); | ||
670 | info->napi_enabled = false; | ||
671 | } | ||
672 | /* FIXME: Drain the iqueue. */ | ||
673 | } | ||
674 | } | ||
675 | |||
676 | /* Helper function for tile_net_open() and tile_net_stop(). | ||
677 | * Always called under tile_net_devs_for_channel_mutex. | ||
678 | */ | ||
679 | static int tile_net_update(struct net_device *dev) | ||
680 | { | ||
681 | static gxio_mpipe_rules_t rules; /* too big to fit on the stack */ | ||
682 | bool saw_channel = false; | ||
683 | int channel; | ||
684 | int rc; | ||
685 | int cpu; | ||
686 | |||
687 | gxio_mpipe_rules_init(&rules, &context); | ||
688 | |||
689 | for (channel = 0; channel < TILE_NET_CHANNELS; channel++) { | ||
690 | if (tile_net_devs_for_channel[channel] == NULL) | ||
691 | continue; | ||
692 | if (!saw_channel) { | ||
693 | saw_channel = true; | ||
694 | gxio_mpipe_rules_begin(&rules, first_bucket, | ||
695 | num_buckets, NULL); | ||
696 | gxio_mpipe_rules_set_headroom(&rules, NET_IP_ALIGN); | ||
697 | } | ||
698 | gxio_mpipe_rules_add_channel(&rules, channel); | ||
699 | } | ||
700 | |||
701 | /* NOTE: This can fail if there is no classifier. | ||
702 | * ISSUE: Can anything else cause it to fail? | ||
703 | */ | ||
704 | rc = gxio_mpipe_rules_commit(&rules); | ||
705 | if (rc != 0) { | ||
706 | netdev_warn(dev, "gxio_mpipe_rules_commit failed: %d\n", rc); | ||
707 | return -EIO; | ||
708 | } | ||
709 | |||
710 | /* Update all cpus, sequentially (to protect "netif_napi_add()"). */ | ||
711 | for_each_online_cpu(cpu) | ||
712 | smp_call_function_single(cpu, tile_net_update_cpu, | ||
713 | (saw_channel ? dev : NULL), 1); | ||
714 | |||
715 | /* HACK: Allow packets to flow in the simulator. */ | ||
716 | if (saw_channel) | ||
717 | sim_enable_mpipe_links(0, -1); | ||
718 | |||
719 | return 0; | ||
720 | } | ||
721 | |||
722 | /* Allocate and initialize mpipe buffer stacks, and register them in | ||
723 | * the mPIPE TLBs, for both small and large packet sizes. | ||
724 | * This routine supports tile_net_init_mpipe(), below. | ||
725 | */ | ||
726 | static int init_buffer_stacks(struct net_device *dev, int num_buffers) | ||
727 | { | ||
728 | pte_t hash_pte = pte_set_home((pte_t) { 0 }, PAGE_HOME_HASH); | ||
729 | int rc; | ||
730 | |||
731 | /* Compute stack bytes; we round up to 64KB and then use | ||
732 | * alloc_pages() so we get the required 64KB alignment as well. | ||
733 | */ | ||
734 | buffer_stack_size = | ||
735 | ALIGN(gxio_mpipe_calc_buffer_stack_bytes(num_buffers), | ||
736 | 64 * 1024); | ||
737 | |||
738 | /* Allocate two buffer stack indices. */ | ||
739 | rc = gxio_mpipe_alloc_buffer_stacks(&context, 2, 0, 0); | ||
740 | if (rc < 0) { | ||
741 | netdev_err(dev, "gxio_mpipe_alloc_buffer_stacks failed: %d\n", | ||
742 | rc); | ||
743 | return rc; | ||
744 | } | ||
745 | small_buffer_stack = rc; | ||
746 | large_buffer_stack = rc + 1; | ||
747 | |||
748 | /* Allocate the small memory stack. */ | ||
749 | small_buffer_stack_va = | ||
750 | alloc_pages_exact(buffer_stack_size, GFP_KERNEL); | ||
751 | if (small_buffer_stack_va == NULL) { | ||
752 | netdev_err(dev, | ||
753 | "Could not alloc %zd bytes for buffer stacks\n", | ||
754 | buffer_stack_size); | ||
755 | return -ENOMEM; | ||
756 | } | ||
757 | rc = gxio_mpipe_init_buffer_stack(&context, small_buffer_stack, | ||
758 | BUFFER_SIZE_SMALL_ENUM, | ||
759 | small_buffer_stack_va, | ||
760 | buffer_stack_size, 0); | ||
761 | if (rc != 0) { | ||
762 | netdev_err(dev, "gxio_mpipe_init_buffer_stack: %d\n", rc); | ||
763 | return rc; | ||
764 | } | ||
765 | rc = gxio_mpipe_register_client_memory(&context, small_buffer_stack, | ||
766 | hash_pte, 0); | ||
767 | if (rc != 0) { | ||
768 | netdev_err(dev, | ||
769 | "gxio_mpipe_register_buffer_memory failed: %d\n", | ||
770 | rc); | ||
771 | return rc; | ||
772 | } | ||
773 | |||
774 | /* Allocate the large buffer stack. */ | ||
775 | large_buffer_stack_va = | ||
776 | alloc_pages_exact(buffer_stack_size, GFP_KERNEL); | ||
777 | if (large_buffer_stack_va == NULL) { | ||
778 | netdev_err(dev, | ||
779 | "Could not alloc %zd bytes for buffer stacks\n", | ||
780 | buffer_stack_size); | ||
781 | return -ENOMEM; | ||
782 | } | ||
783 | rc = gxio_mpipe_init_buffer_stack(&context, large_buffer_stack, | ||
784 | BUFFER_SIZE_LARGE_ENUM, | ||
785 | large_buffer_stack_va, | ||
786 | buffer_stack_size, 0); | ||
787 | if (rc != 0) { | ||
788 | netdev_err(dev, "gxio_mpipe_init_buffer_stack failed: %d\n", | ||
789 | rc); | ||
790 | return rc; | ||
791 | } | ||
792 | rc = gxio_mpipe_register_client_memory(&context, large_buffer_stack, | ||
793 | hash_pte, 0); | ||
794 | if (rc != 0) { | ||
795 | netdev_err(dev, | ||
796 | "gxio_mpipe_register_buffer_memory failed: %d\n", | ||
797 | rc); | ||
798 | return rc; | ||
799 | } | ||
800 | |||
801 | return 0; | ||
802 | } | ||
803 | |||
804 | /* Allocate per-cpu resources (memory for completions and idescs). | ||
805 | * This routine supports tile_net_init_mpipe(), below. | ||
806 | */ | ||
807 | static int alloc_percpu_mpipe_resources(struct net_device *dev, | ||
808 | int cpu, int ring) | ||
809 | { | ||
810 | struct tile_net_info *info = &per_cpu(per_cpu_info, cpu); | ||
811 | int order, i, rc; | ||
812 | struct page *page; | ||
813 | void *addr; | ||
814 | |||
815 | /* Allocate the "comps". */ | ||
816 | order = get_order(COMPS_SIZE); | ||
817 | page = homecache_alloc_pages(GFP_KERNEL, order, cpu); | ||
818 | if (page == NULL) { | ||
819 | netdev_err(dev, "Failed to alloc %zd bytes comps memory\n", | ||
820 | COMPS_SIZE); | ||
821 | return -ENOMEM; | ||
822 | } | ||
823 | addr = pfn_to_kaddr(page_to_pfn(page)); | ||
824 | memset(addr, 0, COMPS_SIZE); | ||
825 | for (i = 0; i < TILE_NET_CHANNELS; i++) | ||
826 | info->comps_for_echannel[i] = | ||
827 | addr + i * sizeof(struct tile_net_comps); | ||
828 | |||
829 | /* If this is a network cpu, create an iqueue. */ | ||
830 | if (cpu_isset(cpu, network_cpus_map)) { | ||
831 | order = get_order(NOTIF_RING_SIZE); | ||
832 | page = homecache_alloc_pages(GFP_KERNEL, order, cpu); | ||
833 | if (page == NULL) { | ||
834 | netdev_err(dev, | ||
835 | "Failed to alloc %zd bytes iqueue memory\n", | ||
836 | NOTIF_RING_SIZE); | ||
837 | return -ENOMEM; | ||
838 | } | ||
839 | addr = pfn_to_kaddr(page_to_pfn(page)); | ||
840 | rc = gxio_mpipe_iqueue_init(&info->iqueue, &context, ring++, | ||
841 | addr, NOTIF_RING_SIZE, 0); | ||
842 | if (rc < 0) { | ||
843 | netdev_err(dev, | ||
844 | "gxio_mpipe_iqueue_init failed: %d\n", rc); | ||
845 | return rc; | ||
846 | } | ||
847 | info->has_iqueue = true; | ||
848 | } | ||
849 | |||
850 | return ring; | ||
851 | } | ||
852 | |||
853 | /* Initialize NotifGroup and buckets. | ||
854 | * This routine supports tile_net_init_mpipe(), below. | ||
855 | */ | ||
856 | static int init_notif_group_and_buckets(struct net_device *dev, | ||
857 | int ring, int network_cpus_count) | ||
858 | { | ||
859 | int group, rc; | ||
860 | |||
861 | /* Allocate one NotifGroup. */ | ||
862 | rc = gxio_mpipe_alloc_notif_groups(&context, 1, 0, 0); | ||
863 | if (rc < 0) { | ||
864 | netdev_err(dev, "gxio_mpipe_alloc_notif_groups failed: %d\n", | ||
865 | rc); | ||
866 | return rc; | ||
867 | } | ||
868 | group = rc; | ||
869 | |||
870 | /* Initialize global num_buckets value. */ | ||
871 | if (network_cpus_count > 4) | ||
872 | num_buckets = 256; | ||
873 | else if (network_cpus_count > 1) | ||
874 | num_buckets = 16; | ||
875 | |||
876 | /* Allocate some buckets, and set global first_bucket value. */ | ||
877 | rc = gxio_mpipe_alloc_buckets(&context, num_buckets, 0, 0); | ||
878 | if (rc < 0) { | ||
879 | netdev_err(dev, "gxio_mpipe_alloc_buckets failed: %d\n", rc); | ||
880 | return rc; | ||
881 | } | ||
882 | first_bucket = rc; | ||
883 | |||
884 | /* Init group and buckets. */ | ||
885 | rc = gxio_mpipe_init_notif_group_and_buckets( | ||
886 | &context, group, ring, network_cpus_count, | ||
887 | first_bucket, num_buckets, | ||
888 | GXIO_MPIPE_BUCKET_STICKY_FLOW_LOCALITY); | ||
889 | if (rc != 0) { | ||
890 | netdev_err( | ||
891 | dev, | ||
892 | "gxio_mpipe_init_notif_group_and_buckets failed: %d\n", | ||
893 | rc); | ||
894 | return rc; | ||
895 | } | ||
896 | |||
897 | return 0; | ||
898 | } | ||
899 | |||
900 | /* Create an irq and register it, then activate the irq and request | ||
901 | * interrupts on all cores. Note that "ingress_irq" being initialized | ||
902 | * is how we know not to call tile_net_init_mpipe() again. | ||
903 | * This routine supports tile_net_init_mpipe(), below. | ||
904 | */ | ||
905 | static int tile_net_setup_interrupts(struct net_device *dev) | ||
906 | { | ||
907 | int cpu, rc; | ||
908 | |||
909 | rc = create_irq(); | ||
910 | if (rc < 0) { | ||
911 | netdev_err(dev, "create_irq failed: %d\n", rc); | ||
912 | return rc; | ||
913 | } | ||
914 | ingress_irq = rc; | ||
915 | tile_irq_activate(ingress_irq, TILE_IRQ_PERCPU); | ||
916 | rc = request_irq(ingress_irq, tile_net_handle_ingress_irq, | ||
917 | 0, NULL, NULL); | ||
918 | if (rc != 0) { | ||
919 | netdev_err(dev, "request_irq failed: %d\n", rc); | ||
920 | destroy_irq(ingress_irq); | ||
921 | ingress_irq = -1; | ||
922 | return rc; | ||
923 | } | ||
924 | |||
925 | for_each_online_cpu(cpu) { | ||
926 | struct tile_net_info *info = &per_cpu(per_cpu_info, cpu); | ||
927 | if (info->has_iqueue) { | ||
928 | gxio_mpipe_request_notif_ring_interrupt( | ||
929 | &context, cpu_x(cpu), cpu_y(cpu), | ||
930 | 1, ingress_irq, info->iqueue.ring); | ||
931 | } | ||
932 | } | ||
933 | |||
934 | return 0; | ||
935 | } | ||
936 | |||
937 | /* Undo any state set up partially by a failed call to tile_net_init_mpipe. */ | ||
938 | static void tile_net_init_mpipe_fail(void) | ||
939 | { | ||
940 | int cpu; | ||
941 | |||
942 | /* Do cleanups that require the mpipe context first. */ | ||
943 | if (small_buffer_stack >= 0) | ||
944 | tile_net_pop_all_buffers(small_buffer_stack); | ||
945 | if (large_buffer_stack >= 0) | ||
946 | tile_net_pop_all_buffers(large_buffer_stack); | ||
947 | |||
948 | /* Destroy mpipe context so the hardware no longer owns any memory. */ | ||
949 | gxio_mpipe_destroy(&context); | ||
950 | |||
951 | for_each_online_cpu(cpu) { | ||
952 | struct tile_net_info *info = &per_cpu(per_cpu_info, cpu); | ||
953 | free_pages((unsigned long)(info->comps_for_echannel[0]), | ||
954 | get_order(COMPS_SIZE)); | ||
955 | info->comps_for_echannel[0] = NULL; | ||
956 | free_pages((unsigned long)(info->iqueue.idescs), | ||
957 | get_order(NOTIF_RING_SIZE)); | ||
958 | info->iqueue.idescs = NULL; | ||
959 | } | ||
960 | |||
961 | if (small_buffer_stack_va) | ||
962 | free_pages_exact(small_buffer_stack_va, buffer_stack_size); | ||
963 | if (large_buffer_stack_va) | ||
964 | free_pages_exact(large_buffer_stack_va, buffer_stack_size); | ||
965 | |||
966 | small_buffer_stack_va = NULL; | ||
967 | large_buffer_stack_va = NULL; | ||
968 | large_buffer_stack = -1; | ||
969 | small_buffer_stack = -1; | ||
970 | first_bucket = -1; | ||
971 | } | ||
972 | |||
973 | /* The first time any tilegx network device is opened, we initialize | ||
974 | * the global mpipe state. If this step fails, we fail to open the | ||
975 | * device, but if it succeeds, we never need to do it again, and since | ||
976 | * tile_net can't be unloaded, we never undo it. | ||
977 | * | ||
978 | * Note that some resources in this path (buffer stack indices, | ||
979 | * bindings from init_buffer_stack, etc.) are hypervisor resources | ||
980 | * that are freed implicitly by gxio_mpipe_destroy(). | ||
981 | */ | ||
982 | static int tile_net_init_mpipe(struct net_device *dev) | ||
983 | { | ||
984 | int i, num_buffers, rc; | ||
985 | int cpu; | ||
986 | int first_ring, ring; | ||
987 | int network_cpus_count = cpus_weight(network_cpus_map); | ||
988 | |||
989 | if (!hash_default) { | ||
990 | netdev_err(dev, "Networking requires hash_default!\n"); | ||
991 | return -EIO; | ||
992 | } | ||
993 | |||
994 | rc = gxio_mpipe_init(&context, 0); | ||
995 | if (rc != 0) { | ||
996 | netdev_err(dev, "gxio_mpipe_init failed: %d\n", rc); | ||
997 | return -EIO; | ||
998 | } | ||
999 | |||
1000 | /* Set up the buffer stacks. */ | ||
1001 | num_buffers = | ||
1002 | network_cpus_count * (IQUEUE_ENTRIES + TILE_NET_BATCH); | ||
1003 | rc = init_buffer_stacks(dev, num_buffers); | ||
1004 | if (rc != 0) | ||
1005 | goto fail; | ||
1006 | |||
1007 | /* Provide initial buffers. */ | ||
1008 | rc = -ENOMEM; | ||
1009 | for (i = 0; i < num_buffers; i++) { | ||
1010 | if (!tile_net_provide_buffer(true)) { | ||
1011 | netdev_err(dev, "Cannot allocate initial sk_bufs!\n"); | ||
1012 | goto fail; | ||
1013 | } | ||
1014 | } | ||
1015 | for (i = 0; i < num_buffers; i++) { | ||
1016 | if (!tile_net_provide_buffer(false)) { | ||
1017 | netdev_err(dev, "Cannot allocate initial sk_bufs!\n"); | ||
1018 | goto fail; | ||
1019 | } | ||
1020 | } | ||
1021 | |||
1022 | /* Allocate one NotifRing for each network cpu. */ | ||
1023 | rc = gxio_mpipe_alloc_notif_rings(&context, network_cpus_count, 0, 0); | ||
1024 | if (rc < 0) { | ||
1025 | netdev_err(dev, "gxio_mpipe_alloc_notif_rings failed %d\n", | ||
1026 | rc); | ||
1027 | goto fail; | ||
1028 | } | ||
1029 | |||
1030 | /* Init NotifRings per-cpu. */ | ||
1031 | first_ring = rc; | ||
1032 | ring = first_ring; | ||
1033 | for_each_online_cpu(cpu) { | ||
1034 | rc = alloc_percpu_mpipe_resources(dev, cpu, ring); | ||
1035 | if (rc < 0) | ||
1036 | goto fail; | ||
1037 | ring = rc; | ||
1038 | } | ||
1039 | |||
1040 | /* Initialize NotifGroup and buckets. */ | ||
1041 | rc = init_notif_group_and_buckets(dev, first_ring, network_cpus_count); | ||
1042 | if (rc != 0) | ||
1043 | goto fail; | ||
1044 | |||
1045 | /* Create and enable interrupts. */ | ||
1046 | rc = tile_net_setup_interrupts(dev); | ||
1047 | if (rc != 0) | ||
1048 | goto fail; | ||
1049 | |||
1050 | return 0; | ||
1051 | |||
1052 | fail: | ||
1053 | tile_net_init_mpipe_fail(); | ||
1054 | return rc; | ||
1055 | } | ||
1056 | |||
1057 | /* Create persistent egress info for a given egress channel. | ||
1058 | * Note that this may be shared between, say, "gbe0" and "xgbe0". | ||
1059 | * ISSUE: Defer header allocation until TSO is actually needed? | ||
1060 | */ | ||
1061 | static int tile_net_init_egress(struct net_device *dev, int echannel) | ||
1062 | { | ||
1063 | struct page *headers_page, *edescs_page, *equeue_page; | ||
1064 | gxio_mpipe_edesc_t *edescs; | ||
1065 | gxio_mpipe_equeue_t *equeue; | ||
1066 | unsigned char *headers; | ||
1067 | int headers_order, edescs_order, equeue_order; | ||
1068 | size_t edescs_size; | ||
1069 | int edma; | ||
1070 | int rc = -ENOMEM; | ||
1071 | |||
1072 | /* Only initialize once. */ | ||
1073 | if (egress_for_echannel[echannel].equeue != NULL) | ||
1074 | return 0; | ||
1075 | |||
1076 | /* Allocate memory for the "headers". */ | ||
1077 | headers_order = get_order(EQUEUE_ENTRIES * HEADER_BYTES); | ||
1078 | headers_page = alloc_pages(GFP_KERNEL, headers_order); | ||
1079 | if (headers_page == NULL) { | ||
1080 | netdev_warn(dev, | ||
1081 | "Could not alloc %zd bytes for TSO headers.\n", | ||
1082 | PAGE_SIZE << headers_order); | ||
1083 | goto fail; | ||
1084 | } | ||
1085 | headers = pfn_to_kaddr(page_to_pfn(headers_page)); | ||
1086 | |||
1087 | /* Allocate memory for the "edescs". */ | ||
1088 | edescs_size = EQUEUE_ENTRIES * sizeof(*edescs); | ||
1089 | edescs_order = get_order(edescs_size); | ||
1090 | edescs_page = alloc_pages(GFP_KERNEL, edescs_order); | ||
1091 | if (edescs_page == NULL) { | ||
1092 | netdev_warn(dev, | ||
1093 | "Could not alloc %zd bytes for eDMA ring.\n", | ||
1094 | edescs_size); | ||
1095 | goto fail_headers; | ||
1096 | } | ||
1097 | edescs = pfn_to_kaddr(page_to_pfn(edescs_page)); | ||
1098 | |||
1099 | /* Allocate memory for the "equeue". */ | ||
1100 | equeue_order = get_order(sizeof(*equeue)); | ||
1101 | equeue_page = alloc_pages(GFP_KERNEL, equeue_order); | ||
1102 | if (equeue_page == NULL) { | ||
1103 | netdev_warn(dev, | ||
1104 | "Could not alloc %zd bytes for equeue info.\n", | ||
1105 | PAGE_SIZE << equeue_order); | ||
1106 | goto fail_edescs; | ||
1107 | } | ||
1108 | equeue = pfn_to_kaddr(page_to_pfn(equeue_page)); | ||
1109 | |||
1110 | /* Allocate an edma ring. Note that in practice this can't | ||
1111 | * fail, which is good, because we will leak an edma ring if so. | ||
1112 | */ | ||
1113 | rc = gxio_mpipe_alloc_edma_rings(&context, 1, 0, 0); | ||
1114 | if (rc < 0) { | ||
1115 | netdev_warn(dev, "gxio_mpipe_alloc_edma_rings failed: %d\n", | ||
1116 | rc); | ||
1117 | goto fail_equeue; | ||
1118 | } | ||
1119 | edma = rc; | ||
1120 | |||
1121 | /* Initialize the equeue. */ | ||
1122 | rc = gxio_mpipe_equeue_init(equeue, &context, edma, echannel, | ||
1123 | edescs, edescs_size, 0); | ||
1124 | if (rc != 0) { | ||
1125 | netdev_err(dev, "gxio_mpipe_equeue_init failed: %d\n", rc); | ||
1126 | goto fail_equeue; | ||
1127 | } | ||
1128 | |||
1129 | /* Done. */ | ||
1130 | egress_for_echannel[echannel].equeue = equeue; | ||
1131 | egress_for_echannel[echannel].headers = headers; | ||
1132 | return 0; | ||
1133 | |||
1134 | fail_equeue: | ||
1135 | __free_pages(equeue_page, equeue_order); | ||
1136 | |||
1137 | fail_edescs: | ||
1138 | __free_pages(edescs_page, edescs_order); | ||
1139 | |||
1140 | fail_headers: | ||
1141 | __free_pages(headers_page, headers_order); | ||
1142 | |||
1143 | fail: | ||
1144 | return rc; | ||
1145 | } | ||
1146 | |||
1147 | /* Return channel number for a newly-opened link. */ | ||
1148 | static int tile_net_link_open(struct net_device *dev, gxio_mpipe_link_t *link, | ||
1149 | const char *link_name) | ||
1150 | { | ||
1151 | int rc = gxio_mpipe_link_open(link, &context, link_name, 0); | ||
1152 | if (rc < 0) { | ||
1153 | netdev_err(dev, "Failed to open '%s'\n", link_name); | ||
1154 | return rc; | ||
1155 | } | ||
1156 | rc = gxio_mpipe_link_channel(link); | ||
1157 | if (rc < 0 || rc >= TILE_NET_CHANNELS) { | ||
1158 | netdev_err(dev, "gxio_mpipe_link_channel bad value: %d\n", rc); | ||
1159 | gxio_mpipe_link_close(link); | ||
1160 | return -EINVAL; | ||
1161 | } | ||
1162 | return rc; | ||
1163 | } | ||
1164 | |||
1165 | /* Help the kernel activate the given network interface. */ | ||
1166 | static int tile_net_open(struct net_device *dev) | ||
1167 | { | ||
1168 | struct tile_net_priv *priv = netdev_priv(dev); | ||
1169 | int cpu, rc; | ||
1170 | |||
1171 | mutex_lock(&tile_net_devs_for_channel_mutex); | ||
1172 | |||
1173 | /* Do one-time initialization the first time any device is opened. */ | ||
1174 | if (ingress_irq < 0) { | ||
1175 | rc = tile_net_init_mpipe(dev); | ||
1176 | if (rc != 0) | ||
1177 | goto fail; | ||
1178 | } | ||
1179 | |||
1180 | /* Determine if this is the "loopify" device. */ | ||
1181 | if (unlikely((loopify_link_name != NULL) && | ||
1182 | !strcmp(dev->name, loopify_link_name))) { | ||
1183 | rc = tile_net_link_open(dev, &priv->link, "loop0"); | ||
1184 | if (rc < 0) | ||
1185 | goto fail; | ||
1186 | priv->channel = rc; | ||
1187 | rc = tile_net_link_open(dev, &priv->loopify_link, "loop1"); | ||
1188 | if (rc < 0) | ||
1189 | goto fail; | ||
1190 | priv->loopify_channel = rc; | ||
1191 | priv->echannel = rc; | ||
1192 | } else { | ||
1193 | rc = tile_net_link_open(dev, &priv->link, dev->name); | ||
1194 | if (rc < 0) | ||
1195 | goto fail; | ||
1196 | priv->channel = rc; | ||
1197 | priv->echannel = rc; | ||
1198 | } | ||
1199 | |||
1200 | /* Initialize egress info (if needed). Once ever, per echannel. */ | ||
1201 | rc = tile_net_init_egress(dev, priv->echannel); | ||
1202 | if (rc != 0) | ||
1203 | goto fail; | ||
1204 | |||
1205 | tile_net_devs_for_channel[priv->channel] = dev; | ||
1206 | |||
1207 | rc = tile_net_update(dev); | ||
1208 | if (rc != 0) | ||
1209 | goto fail; | ||
1210 | |||
1211 | mutex_unlock(&tile_net_devs_for_channel_mutex); | ||
1212 | |||
1213 | /* Initialize the transmit wake timer for this device for each cpu. */ | ||
1214 | for_each_online_cpu(cpu) { | ||
1215 | struct tile_net_info *info = &per_cpu(per_cpu_info, cpu); | ||
1216 | struct tile_net_tx_wake *tx_wake = | ||
1217 | &info->tx_wake[priv->echannel]; | ||
1218 | |||
1219 | hrtimer_init(&tx_wake->timer, CLOCK_MONOTONIC, | ||
1220 | HRTIMER_MODE_REL); | ||
1221 | tx_wake->timer.function = tile_net_handle_tx_wake_timer; | ||
1222 | tx_wake->dev = dev; | ||
1223 | } | ||
1224 | |||
1225 | for_each_online_cpu(cpu) | ||
1226 | netif_start_subqueue(dev, cpu); | ||
1227 | netif_carrier_on(dev); | ||
1228 | return 0; | ||
1229 | |||
1230 | fail: | ||
1231 | if (priv->loopify_channel >= 0) { | ||
1232 | if (gxio_mpipe_link_close(&priv->loopify_link) != 0) | ||
1233 | netdev_warn(dev, "Failed to close loopify link!\n"); | ||
1234 | priv->loopify_channel = -1; | ||
1235 | } | ||
1236 | if (priv->channel >= 0) { | ||
1237 | if (gxio_mpipe_link_close(&priv->link) != 0) | ||
1238 | netdev_warn(dev, "Failed to close link!\n"); | ||
1239 | priv->channel = -1; | ||
1240 | } | ||
1241 | priv->echannel = -1; | ||
1242 | tile_net_devs_for_channel[priv->channel] = NULL; | ||
1243 | mutex_unlock(&tile_net_devs_for_channel_mutex); | ||
1244 | |||
1245 | /* Don't return raw gxio error codes to generic Linux. */ | ||
1246 | return (rc > -512) ? rc : -EIO; | ||
1247 | } | ||
1248 | |||
1249 | /* Help the kernel deactivate the given network interface. */ | ||
1250 | static int tile_net_stop(struct net_device *dev) | ||
1251 | { | ||
1252 | struct tile_net_priv *priv = netdev_priv(dev); | ||
1253 | int cpu; | ||
1254 | |||
1255 | for_each_online_cpu(cpu) { | ||
1256 | struct tile_net_info *info = &per_cpu(per_cpu_info, cpu); | ||
1257 | struct tile_net_tx_wake *tx_wake = | ||
1258 | &info->tx_wake[priv->echannel]; | ||
1259 | |||
1260 | hrtimer_cancel(&tx_wake->timer); | ||
1261 | netif_stop_subqueue(dev, cpu); | ||
1262 | } | ||
1263 | |||
1264 | mutex_lock(&tile_net_devs_for_channel_mutex); | ||
1265 | tile_net_devs_for_channel[priv->channel] = NULL; | ||
1266 | (void)tile_net_update(dev); | ||
1267 | if (priv->loopify_channel >= 0) { | ||
1268 | if (gxio_mpipe_link_close(&priv->loopify_link) != 0) | ||
1269 | netdev_warn(dev, "Failed to close loopify link!\n"); | ||
1270 | priv->loopify_channel = -1; | ||
1271 | } | ||
1272 | if (priv->channel >= 0) { | ||
1273 | if (gxio_mpipe_link_close(&priv->link) != 0) | ||
1274 | netdev_warn(dev, "Failed to close link!\n"); | ||
1275 | priv->channel = -1; | ||
1276 | } | ||
1277 | priv->echannel = -1; | ||
1278 | mutex_unlock(&tile_net_devs_for_channel_mutex); | ||
1279 | |||
1280 | return 0; | ||
1281 | } | ||
1282 | |||
1283 | /* Determine the VA for a fragment. */ | ||
1284 | static inline void *tile_net_frag_buf(skb_frag_t *f) | ||
1285 | { | ||
1286 | unsigned long pfn = page_to_pfn(skb_frag_page(f)); | ||
1287 | return pfn_to_kaddr(pfn) + f->page_offset; | ||
1288 | } | ||
1289 | |||
1290 | /* Acquire a completion entry and an egress slot, or if we can't, | ||
1291 | * stop the queue and schedule the tx_wake timer. | ||
1292 | */ | ||
1293 | static s64 tile_net_equeue_try_reserve(struct net_device *dev, | ||
1294 | struct tile_net_comps *comps, | ||
1295 | gxio_mpipe_equeue_t *equeue, | ||
1296 | int num_edescs) | ||
1297 | { | ||
1298 | /* Try to acquire a completion entry. */ | ||
1299 | if (comps->comp_next - comps->comp_last < TILE_NET_MAX_COMPS - 1 || | ||
1300 | tile_net_free_comps(equeue, comps, 32, false) != 0) { | ||
1301 | |||
1302 | /* Try to acquire an egress slot. */ | ||
1303 | s64 slot = gxio_mpipe_equeue_try_reserve(equeue, num_edescs); | ||
1304 | if (slot >= 0) | ||
1305 | return slot; | ||
1306 | |||
1307 | /* Freeing some completions gives the equeue time to drain. */ | ||
1308 | tile_net_free_comps(equeue, comps, TILE_NET_MAX_COMPS, false); | ||
1309 | |||
1310 | slot = gxio_mpipe_equeue_try_reserve(equeue, num_edescs); | ||
1311 | if (slot >= 0) | ||
1312 | return slot; | ||
1313 | } | ||
1314 | |||
1315 | /* Still nothing; give up and stop the queue for a short while. */ | ||
1316 | netif_stop_subqueue(dev, smp_processor_id()); | ||
1317 | tile_net_schedule_tx_wake_timer(dev); | ||
1318 | return -1; | ||
1319 | } | ||
1320 | |||
1321 | /* Determine how many edesc's are needed for TSO. | ||
1322 | * | ||
1323 | * Sometimes, if "sendfile()" requires copying, we will be called with | ||
1324 | * "data" containing the header and payload, with "frags" being empty. | ||
1325 | * Sometimes, for example when using NFS over TCP, a single segment can | ||
1326 | * span 3 fragments. This requires special care. | ||
1327 | */ | ||
1328 | static int tso_count_edescs(struct sk_buff *skb) | ||
1329 | { | ||
1330 | struct skb_shared_info *sh = skb_shinfo(skb); | ||
1331 | unsigned int data_len = skb->data_len; | ||
1332 | unsigned int p_len = sh->gso_size; | ||
1333 | long f_id = -1; /* id of the current fragment */ | ||
1334 | long f_size = -1; /* size of the current fragment */ | ||
1335 | long f_used = -1; /* bytes used from the current fragment */ | ||
1336 | long n; /* size of the current piece of payload */ | ||
1337 | int num_edescs = 0; | ||
1338 | int segment; | ||
1339 | |||
1340 | for (segment = 0; segment < sh->gso_segs; segment++) { | ||
1341 | |||
1342 | unsigned int p_used = 0; | ||
1343 | |||
1344 | /* One edesc for header and for each piece of the payload. */ | ||
1345 | for (num_edescs++; p_used < p_len; num_edescs++) { | ||
1346 | |||
1347 | /* Advance as needed. */ | ||
1348 | while (f_used >= f_size) { | ||
1349 | f_id++; | ||
1350 | f_size = sh->frags[f_id].size; | ||
1351 | f_used = 0; | ||
1352 | } | ||
1353 | |||
1354 | /* Use bytes from the current fragment. */ | ||
1355 | n = p_len - p_used; | ||
1356 | if (n > f_size - f_used) | ||
1357 | n = f_size - f_used; | ||
1358 | f_used += n; | ||
1359 | p_used += n; | ||
1360 | } | ||
1361 | |||
1362 | /* The last segment may be less than gso_size. */ | ||
1363 | data_len -= p_len; | ||
1364 | if (data_len < p_len) | ||
1365 | p_len = data_len; | ||
1366 | } | ||
1367 | |||
1368 | return num_edescs; | ||
1369 | } | ||
1370 | |||
1371 | /* Prepare modified copies of the skbuff headers. | ||
1372 | * FIXME: add support for IPv6. | ||
1373 | */ | ||
1374 | static void tso_headers_prepare(struct sk_buff *skb, unsigned char *headers, | ||
1375 | s64 slot) | ||
1376 | { | ||
1377 | struct skb_shared_info *sh = skb_shinfo(skb); | ||
1378 | struct iphdr *ih; | ||
1379 | struct tcphdr *th; | ||
1380 | unsigned int data_len = skb->data_len; | ||
1381 | unsigned char *data = skb->data; | ||
1382 | unsigned int ih_off, th_off, sh_len, p_len; | ||
1383 | unsigned int isum_seed, tsum_seed, id, seq; | ||
1384 | long f_id = -1; /* id of the current fragment */ | ||
1385 | long f_size = -1; /* size of the current fragment */ | ||
1386 | long f_used = -1; /* bytes used from the current fragment */ | ||
1387 | long n; /* size of the current piece of payload */ | ||
1388 | int segment; | ||
1389 | |||
1390 | /* Locate original headers and compute various lengths. */ | ||
1391 | ih = ip_hdr(skb); | ||
1392 | th = tcp_hdr(skb); | ||
1393 | ih_off = skb_network_offset(skb); | ||
1394 | th_off = skb_transport_offset(skb); | ||
1395 | sh_len = th_off + tcp_hdrlen(skb); | ||
1396 | p_len = sh->gso_size; | ||
1397 | |||
1398 | /* Set up seed values for IP and TCP csum and initialize id and seq. */ | ||
1399 | isum_seed = ((0xFFFF - ih->check) + | ||
1400 | (0xFFFF - ih->tot_len) + | ||
1401 | (0xFFFF - ih->id)); | ||
1402 | tsum_seed = th->check + (0xFFFF ^ htons(skb->len)); | ||
1403 | id = ntohs(ih->id); | ||
1404 | seq = ntohl(th->seq); | ||
1405 | |||
1406 | /* Prepare all the headers. */ | ||
1407 | for (segment = 0; segment < sh->gso_segs; segment++) { | ||
1408 | unsigned char *buf; | ||
1409 | unsigned int p_used = 0; | ||
1410 | |||
1411 | /* Copy to the header memory for this segment. */ | ||
1412 | buf = headers + (slot % EQUEUE_ENTRIES) * HEADER_BYTES + | ||
1413 | NET_IP_ALIGN; | ||
1414 | memcpy(buf, data, sh_len); | ||
1415 | |||
1416 | /* Update copied ip header. */ | ||
1417 | ih = (struct iphdr *)(buf + ih_off); | ||
1418 | ih->tot_len = htons(sh_len + p_len - ih_off); | ||
1419 | ih->id = htons(id); | ||
1420 | ih->check = csum_long(isum_seed + ih->tot_len + | ||
1421 | ih->id) ^ 0xffff; | ||
1422 | |||
1423 | /* Update copied tcp header. */ | ||
1424 | th = (struct tcphdr *)(buf + th_off); | ||
1425 | th->seq = htonl(seq); | ||
1426 | th->check = csum_long(tsum_seed + htons(sh_len + p_len)); | ||
1427 | if (segment != sh->gso_segs - 1) { | ||
1428 | th->fin = 0; | ||
1429 | th->psh = 0; | ||
1430 | } | ||
1431 | |||
1432 | /* Skip past the header. */ | ||
1433 | slot++; | ||
1434 | |||
1435 | /* Skip past the payload. */ | ||
1436 | while (p_used < p_len) { | ||
1437 | |||
1438 | /* Advance as needed. */ | ||
1439 | while (f_used >= f_size) { | ||
1440 | f_id++; | ||
1441 | f_size = sh->frags[f_id].size; | ||
1442 | f_used = 0; | ||
1443 | } | ||
1444 | |||
1445 | /* Use bytes from the current fragment. */ | ||
1446 | n = p_len - p_used; | ||
1447 | if (n > f_size - f_used) | ||
1448 | n = f_size - f_used; | ||
1449 | f_used += n; | ||
1450 | p_used += n; | ||
1451 | |||
1452 | slot++; | ||
1453 | } | ||
1454 | |||
1455 | id++; | ||
1456 | seq += p_len; | ||
1457 | |||
1458 | /* The last segment may be less than gso_size. */ | ||
1459 | data_len -= p_len; | ||
1460 | if (data_len < p_len) | ||
1461 | p_len = data_len; | ||
1462 | } | ||
1463 | |||
1464 | /* Flush the headers so they are ready for hardware DMA. */ | ||
1465 | wmb(); | ||
1466 | } | ||
1467 | |||
1468 | /* Pass all the data to mpipe for egress. */ | ||
1469 | static void tso_egress(struct net_device *dev, gxio_mpipe_equeue_t *equeue, | ||
1470 | struct sk_buff *skb, unsigned char *headers, s64 slot) | ||
1471 | { | ||
1472 | struct tile_net_priv *priv = netdev_priv(dev); | ||
1473 | struct skb_shared_info *sh = skb_shinfo(skb); | ||
1474 | unsigned int data_len = skb->data_len; | ||
1475 | unsigned int p_len = sh->gso_size; | ||
1476 | gxio_mpipe_edesc_t edesc_head = { { 0 } }; | ||
1477 | gxio_mpipe_edesc_t edesc_body = { { 0 } }; | ||
1478 | long f_id = -1; /* id of the current fragment */ | ||
1479 | long f_size = -1; /* size of the current fragment */ | ||
1480 | long f_used = -1; /* bytes used from the current fragment */ | ||
1481 | long n; /* size of the current piece of payload */ | ||
1482 | unsigned long tx_packets = 0, tx_bytes = 0; | ||
1483 | unsigned int csum_start, sh_len; | ||
1484 | int segment; | ||
1485 | |||
1486 | /* Prepare to egress the headers: set up header edesc. */ | ||
1487 | csum_start = skb_checksum_start_offset(skb); | ||
1488 | sh_len = skb_transport_offset(skb) + tcp_hdrlen(skb); | ||
1489 | edesc_head.csum = 1; | ||
1490 | edesc_head.csum_start = csum_start; | ||
1491 | edesc_head.csum_dest = csum_start + skb->csum_offset; | ||
1492 | edesc_head.xfer_size = sh_len; | ||
1493 | |||
1494 | /* This is only used to specify the TLB. */ | ||
1495 | edesc_head.stack_idx = large_buffer_stack; | ||
1496 | edesc_body.stack_idx = large_buffer_stack; | ||
1497 | |||
1498 | /* Egress all the edescs. */ | ||
1499 | for (segment = 0; segment < sh->gso_segs; segment++) { | ||
1500 | void *va; | ||
1501 | unsigned char *buf; | ||
1502 | unsigned int p_used = 0; | ||
1503 | |||
1504 | /* Egress the header. */ | ||
1505 | buf = headers + (slot % EQUEUE_ENTRIES) * HEADER_BYTES + | ||
1506 | NET_IP_ALIGN; | ||
1507 | edesc_head.va = va_to_tile_io_addr(buf); | ||
1508 | gxio_mpipe_equeue_put_at(equeue, edesc_head, slot); | ||
1509 | slot++; | ||
1510 | |||
1511 | /* Egress the payload. */ | ||
1512 | while (p_used < p_len) { | ||
1513 | |||
1514 | /* Advance as needed. */ | ||
1515 | while (f_used >= f_size) { | ||
1516 | f_id++; | ||
1517 | f_size = sh->frags[f_id].size; | ||
1518 | f_used = 0; | ||
1519 | } | ||
1520 | |||
1521 | va = tile_net_frag_buf(&sh->frags[f_id]) + f_used; | ||
1522 | |||
1523 | /* Use bytes from the current fragment. */ | ||
1524 | n = p_len - p_used; | ||
1525 | if (n > f_size - f_used) | ||
1526 | n = f_size - f_used; | ||
1527 | f_used += n; | ||
1528 | p_used += n; | ||
1529 | |||
1530 | /* Egress a piece of the payload. */ | ||
1531 | edesc_body.va = va_to_tile_io_addr(va); | ||
1532 | edesc_body.xfer_size = n; | ||
1533 | edesc_body.bound = !(p_used < p_len); | ||
1534 | gxio_mpipe_equeue_put_at(equeue, edesc_body, slot); | ||
1535 | slot++; | ||
1536 | } | ||
1537 | |||
1538 | tx_packets++; | ||
1539 | tx_bytes += sh_len + p_len; | ||
1540 | |||
1541 | /* The last segment may be less than gso_size. */ | ||
1542 | data_len -= p_len; | ||
1543 | if (data_len < p_len) | ||
1544 | p_len = data_len; | ||
1545 | } | ||
1546 | |||
1547 | /* Update stats. */ | ||
1548 | tile_net_stats_add(tx_packets, &priv->stats.tx_packets); | ||
1549 | tile_net_stats_add(tx_bytes, &priv->stats.tx_bytes); | ||
1550 | } | ||
1551 | |||
1552 | /* Do "TSO" handling for egress. | ||
1553 | * | ||
1554 | * Normally drivers set NETIF_F_TSO only to support hardware TSO; | ||
1555 | * otherwise the stack uses scatter-gather to implement GSO in software. | ||
1556 | * On our testing, enabling GSO support (via NETIF_F_SG) drops network | ||
1557 | * performance down to around 7.5 Gbps on the 10G interfaces, although | ||
1558 | * also dropping cpu utilization way down, to under 8%. But | ||
1559 | * implementing "TSO" in the driver brings performance back up to line | ||
1560 | * rate, while dropping cpu usage even further, to less than 4%. In | ||
1561 | * practice, profiling of GSO shows that skb_segment() is what causes | ||
1562 | * the performance overheads; we benefit in the driver from using | ||
1563 | * preallocated memory to duplicate the TCP/IP headers. | ||
1564 | */ | ||
1565 | static int tile_net_tx_tso(struct sk_buff *skb, struct net_device *dev) | ||
1566 | { | ||
1567 | struct tile_net_info *info = &__get_cpu_var(per_cpu_info); | ||
1568 | struct tile_net_priv *priv = netdev_priv(dev); | ||
1569 | int channel = priv->echannel; | ||
1570 | struct tile_net_egress *egress = &egress_for_echannel[channel]; | ||
1571 | struct tile_net_comps *comps = info->comps_for_echannel[channel]; | ||
1572 | gxio_mpipe_equeue_t *equeue = egress->equeue; | ||
1573 | unsigned long irqflags; | ||
1574 | int num_edescs; | ||
1575 | s64 slot; | ||
1576 | |||
1577 | /* Determine how many mpipe edesc's are needed. */ | ||
1578 | num_edescs = tso_count_edescs(skb); | ||
1579 | |||
1580 | local_irq_save(irqflags); | ||
1581 | |||
1582 | /* Try to acquire a completion entry and an egress slot. */ | ||
1583 | slot = tile_net_equeue_try_reserve(dev, comps, equeue, num_edescs); | ||
1584 | if (slot < 0) { | ||
1585 | local_irq_restore(irqflags); | ||
1586 | return NETDEV_TX_BUSY; | ||
1587 | } | ||
1588 | |||
1589 | /* Set up copies of header data properly. */ | ||
1590 | tso_headers_prepare(skb, egress->headers, slot); | ||
1591 | |||
1592 | /* Actually pass the data to the network hardware. */ | ||
1593 | tso_egress(dev, equeue, skb, egress->headers, slot); | ||
1594 | |||
1595 | /* Add a completion record. */ | ||
1596 | add_comp(equeue, comps, slot + num_edescs - 1, skb); | ||
1597 | |||
1598 | local_irq_restore(irqflags); | ||
1599 | |||
1600 | /* Make sure the egress timer is scheduled. */ | ||
1601 | tile_net_schedule_egress_timer(); | ||
1602 | |||
1603 | return NETDEV_TX_OK; | ||
1604 | } | ||
1605 | |||
1606 | /* Analyze the body and frags for a transmit request. */ | ||
1607 | static unsigned int tile_net_tx_frags(struct frag *frags, | ||
1608 | struct sk_buff *skb, | ||
1609 | void *b_data, unsigned int b_len) | ||
1610 | { | ||
1611 | unsigned int i, n = 0; | ||
1612 | |||
1613 | struct skb_shared_info *sh = skb_shinfo(skb); | ||
1614 | |||
1615 | if (b_len != 0) { | ||
1616 | frags[n].buf = b_data; | ||
1617 | frags[n++].length = b_len; | ||
1618 | } | ||
1619 | |||
1620 | for (i = 0; i < sh->nr_frags; i++) { | ||
1621 | skb_frag_t *f = &sh->frags[i]; | ||
1622 | frags[n].buf = tile_net_frag_buf(f); | ||
1623 | frags[n++].length = skb_frag_size(f); | ||
1624 | } | ||
1625 | |||
1626 | return n; | ||
1627 | } | ||
1628 | |||
1629 | /* Help the kernel transmit a packet. */ | ||
1630 | static int tile_net_tx(struct sk_buff *skb, struct net_device *dev) | ||
1631 | { | ||
1632 | struct tile_net_info *info = &__get_cpu_var(per_cpu_info); | ||
1633 | struct tile_net_priv *priv = netdev_priv(dev); | ||
1634 | struct tile_net_egress *egress = &egress_for_echannel[priv->echannel]; | ||
1635 | gxio_mpipe_equeue_t *equeue = egress->equeue; | ||
1636 | struct tile_net_comps *comps = | ||
1637 | info->comps_for_echannel[priv->echannel]; | ||
1638 | unsigned int len = skb->len; | ||
1639 | unsigned char *data = skb->data; | ||
1640 | unsigned int num_edescs; | ||
1641 | struct frag frags[MAX_FRAGS]; | ||
1642 | gxio_mpipe_edesc_t edescs[MAX_FRAGS]; | ||
1643 | unsigned long irqflags; | ||
1644 | gxio_mpipe_edesc_t edesc = { { 0 } }; | ||
1645 | unsigned int i; | ||
1646 | s64 slot; | ||
1647 | |||
1648 | if (skb_is_gso(skb)) | ||
1649 | return tile_net_tx_tso(skb, dev); | ||
1650 | |||
1651 | num_edescs = tile_net_tx_frags(frags, skb, data, skb_headlen(skb)); | ||
1652 | |||
1653 | /* This is only used to specify the TLB. */ | ||
1654 | edesc.stack_idx = large_buffer_stack; | ||
1655 | |||
1656 | /* Prepare the edescs. */ | ||
1657 | for (i = 0; i < num_edescs; i++) { | ||
1658 | edesc.xfer_size = frags[i].length; | ||
1659 | edesc.va = va_to_tile_io_addr(frags[i].buf); | ||
1660 | edescs[i] = edesc; | ||
1661 | } | ||
1662 | |||
1663 | /* Mark the final edesc. */ | ||
1664 | edescs[num_edescs - 1].bound = 1; | ||
1665 | |||
1666 | /* Add checksum info to the initial edesc, if needed. */ | ||
1667 | if (skb->ip_summed == CHECKSUM_PARTIAL) { | ||
1668 | unsigned int csum_start = skb_checksum_start_offset(skb); | ||
1669 | edescs[0].csum = 1; | ||
1670 | edescs[0].csum_start = csum_start; | ||
1671 | edescs[0].csum_dest = csum_start + skb->csum_offset; | ||
1672 | } | ||
1673 | |||
1674 | local_irq_save(irqflags); | ||
1675 | |||
1676 | /* Try to acquire a completion entry and an egress slot. */ | ||
1677 | slot = tile_net_equeue_try_reserve(dev, comps, equeue, num_edescs); | ||
1678 | if (slot < 0) { | ||
1679 | local_irq_restore(irqflags); | ||
1680 | return NETDEV_TX_BUSY; | ||
1681 | } | ||
1682 | |||
1683 | for (i = 0; i < num_edescs; i++) | ||
1684 | gxio_mpipe_equeue_put_at(equeue, edescs[i], slot++); | ||
1685 | |||
1686 | /* Add a completion record. */ | ||
1687 | add_comp(equeue, comps, slot - 1, skb); | ||
1688 | |||
1689 | /* NOTE: Use ETH_ZLEN for short packets (e.g. 42 < 60). */ | ||
1690 | tile_net_stats_add(1, &priv->stats.tx_packets); | ||
1691 | tile_net_stats_add(max_t(unsigned int, len, ETH_ZLEN), | ||
1692 | &priv->stats.tx_bytes); | ||
1693 | |||
1694 | local_irq_restore(irqflags); | ||
1695 | |||
1696 | /* Make sure the egress timer is scheduled. */ | ||
1697 | tile_net_schedule_egress_timer(); | ||
1698 | |||
1699 | return NETDEV_TX_OK; | ||
1700 | } | ||
1701 | |||
1702 | /* Return subqueue id on this core (one per core). */ | ||
1703 | static u16 tile_net_select_queue(struct net_device *dev, struct sk_buff *skb) | ||
1704 | { | ||
1705 | return smp_processor_id(); | ||
1706 | } | ||
1707 | |||
1708 | /* Deal with a transmit timeout. */ | ||
1709 | static void tile_net_tx_timeout(struct net_device *dev) | ||
1710 | { | ||
1711 | int cpu; | ||
1712 | |||
1713 | for_each_online_cpu(cpu) | ||
1714 | netif_wake_subqueue(dev, cpu); | ||
1715 | } | ||
1716 | |||
1717 | /* Ioctl commands. */ | ||
1718 | static int tile_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) | ||
1719 | { | ||
1720 | return -EOPNOTSUPP; | ||
1721 | } | ||
1722 | |||
1723 | /* Get system network statistics for device. */ | ||
1724 | static struct net_device_stats *tile_net_get_stats(struct net_device *dev) | ||
1725 | { | ||
1726 | struct tile_net_priv *priv = netdev_priv(dev); | ||
1727 | return &priv->stats; | ||
1728 | } | ||
1729 | |||
1730 | /* Change the MTU. */ | ||
1731 | static int tile_net_change_mtu(struct net_device *dev, int new_mtu) | ||
1732 | { | ||
1733 | if ((new_mtu < 68) || (new_mtu > 1500)) | ||
1734 | return -EINVAL; | ||
1735 | dev->mtu = new_mtu; | ||
1736 | return 0; | ||
1737 | } | ||
1738 | |||
1739 | /* Change the Ethernet address of the NIC. | ||
1740 | * | ||
1741 | * The hypervisor driver does not support changing MAC address. However, | ||
1742 | * the hardware does not do anything with the MAC address, so the address | ||
1743 | * which gets used on outgoing packets, and which is accepted on incoming | ||
1744 | * packets, is completely up to us. | ||
1745 | * | ||
1746 | * Returns 0 on success, negative on failure. | ||
1747 | */ | ||
1748 | static int tile_net_set_mac_address(struct net_device *dev, void *p) | ||
1749 | { | ||
1750 | struct sockaddr *addr = p; | ||
1751 | |||
1752 | if (!is_valid_ether_addr(addr->sa_data)) | ||
1753 | return -EINVAL; | ||
1754 | memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); | ||
1755 | return 0; | ||
1756 | } | ||
1757 | |||
1758 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
1759 | /* Polling 'interrupt' - used by things like netconsole to send skbs | ||
1760 | * without having to re-enable interrupts. It's not called while | ||
1761 | * the interrupt routine is executing. | ||
1762 | */ | ||
1763 | static void tile_net_netpoll(struct net_device *dev) | ||
1764 | { | ||
1765 | disable_percpu_irq(ingress_irq); | ||
1766 | tile_net_handle_ingress_irq(ingress_irq, NULL); | ||
1767 | enable_percpu_irq(ingress_irq, 0); | ||
1768 | } | ||
1769 | #endif | ||
1770 | |||
1771 | static const struct net_device_ops tile_net_ops = { | ||
1772 | .ndo_open = tile_net_open, | ||
1773 | .ndo_stop = tile_net_stop, | ||
1774 | .ndo_start_xmit = tile_net_tx, | ||
1775 | .ndo_select_queue = tile_net_select_queue, | ||
1776 | .ndo_do_ioctl = tile_net_ioctl, | ||
1777 | .ndo_get_stats = tile_net_get_stats, | ||
1778 | .ndo_change_mtu = tile_net_change_mtu, | ||
1779 | .ndo_tx_timeout = tile_net_tx_timeout, | ||
1780 | .ndo_set_mac_address = tile_net_set_mac_address, | ||
1781 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
1782 | .ndo_poll_controller = tile_net_netpoll, | ||
1783 | #endif | ||
1784 | }; | ||
1785 | |||
1786 | /* The setup function. | ||
1787 | * | ||
1788 | * This uses ether_setup() to assign various fields in dev, including | ||
1789 | * setting IFF_BROADCAST and IFF_MULTICAST, then sets some extra fields. | ||
1790 | */ | ||
1791 | static void tile_net_setup(struct net_device *dev) | ||
1792 | { | ||
1793 | ether_setup(dev); | ||
1794 | dev->netdev_ops = &tile_net_ops; | ||
1795 | dev->watchdog_timeo = TILE_NET_TIMEOUT; | ||
1796 | dev->features |= NETIF_F_LLTX; | ||
1797 | dev->features |= NETIF_F_HW_CSUM; | ||
1798 | dev->features |= NETIF_F_SG; | ||
1799 | dev->features |= NETIF_F_TSO; | ||
1800 | dev->mtu = 1500; | ||
1801 | } | ||
1802 | |||
1803 | /* Allocate the device structure, register the device, and obtain the | ||
1804 | * MAC address from the hypervisor. | ||
1805 | */ | ||
1806 | static void tile_net_dev_init(const char *name, const uint8_t *mac) | ||
1807 | { | ||
1808 | int ret; | ||
1809 | int i; | ||
1810 | int nz_addr = 0; | ||
1811 | struct net_device *dev; | ||
1812 | struct tile_net_priv *priv; | ||
1813 | |||
1814 | /* HACK: Ignore "loop" links. */ | ||
1815 | if (strncmp(name, "loop", 4) == 0) | ||
1816 | return; | ||
1817 | |||
1818 | /* Allocate the device structure. Normally, "name" is a | ||
1819 | * template, instantiated by register_netdev(), but not for us. | ||
1820 | */ | ||
1821 | dev = alloc_netdev_mqs(sizeof(*priv), name, tile_net_setup, | ||
1822 | NR_CPUS, 1); | ||
1823 | if (!dev) { | ||
1824 | pr_err("alloc_netdev_mqs(%s) failed\n", name); | ||
1825 | return; | ||
1826 | } | ||
1827 | |||
1828 | /* Initialize "priv". */ | ||
1829 | priv = netdev_priv(dev); | ||
1830 | memset(priv, 0, sizeof(*priv)); | ||
1831 | priv->dev = dev; | ||
1832 | priv->channel = -1; | ||
1833 | priv->loopify_channel = -1; | ||
1834 | priv->echannel = -1; | ||
1835 | |||
1836 | /* Get the MAC address and set it in the device struct; this must | ||
1837 | * be done before the device is opened. If the MAC is all zeroes, | ||
1838 | * we use a random address, since we're probably on the simulator. | ||
1839 | */ | ||
1840 | for (i = 0; i < 6; i++) | ||
1841 | nz_addr |= mac[i]; | ||
1842 | |||
1843 | if (nz_addr) { | ||
1844 | memcpy(dev->dev_addr, mac, 6); | ||
1845 | dev->addr_len = 6; | ||
1846 | } else { | ||
1847 | random_ether_addr(dev->dev_addr); | ||
1848 | } | ||
1849 | |||
1850 | /* Register the network device. */ | ||
1851 | ret = register_netdev(dev); | ||
1852 | if (ret) { | ||
1853 | netdev_err(dev, "register_netdev failed %d\n", ret); | ||
1854 | free_netdev(dev); | ||
1855 | return; | ||
1856 | } | ||
1857 | } | ||
1858 | |||
1859 | /* Per-cpu module initialization. */ | ||
1860 | static void tile_net_init_module_percpu(void *unused) | ||
1861 | { | ||
1862 | struct tile_net_info *info = &__get_cpu_var(per_cpu_info); | ||
1863 | int my_cpu = smp_processor_id(); | ||
1864 | |||
1865 | info->has_iqueue = false; | ||
1866 | |||
1867 | info->my_cpu = my_cpu; | ||
1868 | |||
1869 | /* Initialize the egress timer. */ | ||
1870 | hrtimer_init(&info->egress_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); | ||
1871 | info->egress_timer.function = tile_net_handle_egress_timer; | ||
1872 | } | ||
1873 | |||
1874 | /* Module initialization. */ | ||
1875 | static int __init tile_net_init_module(void) | ||
1876 | { | ||
1877 | int i; | ||
1878 | char name[GXIO_MPIPE_LINK_NAME_LEN]; | ||
1879 | uint8_t mac[6]; | ||
1880 | |||
1881 | pr_info("Tilera Network Driver\n"); | ||
1882 | |||
1883 | mutex_init(&tile_net_devs_for_channel_mutex); | ||
1884 | |||
1885 | /* Initialize each CPU. */ | ||
1886 | on_each_cpu(tile_net_init_module_percpu, NULL, 1); | ||
1887 | |||
1888 | /* Find out what devices we have, and initialize them. */ | ||
1889 | for (i = 0; gxio_mpipe_link_enumerate_mac(i, name, mac) >= 0; i++) | ||
1890 | tile_net_dev_init(name, mac); | ||
1891 | |||
1892 | if (!network_cpus_init()) | ||
1893 | network_cpus_map = *cpu_online_mask; | ||
1894 | |||
1895 | return 0; | ||
1896 | } | ||
1897 | |||
1898 | module_init(tile_net_init_module); | ||
diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h index 4ffcd57b011b..2857ab078aac 100644 --- a/drivers/net/hyperv/hyperv_net.h +++ b/drivers/net/hyperv/hyperv_net.h | |||
@@ -478,6 +478,7 @@ struct netvsc_device { | |||
478 | u32 nvsp_version; | 478 | u32 nvsp_version; |
479 | 479 | ||
480 | atomic_t num_outstanding_sends; | 480 | atomic_t num_outstanding_sends; |
481 | wait_queue_head_t wait_drain; | ||
481 | bool start_remove; | 482 | bool start_remove; |
482 | bool destroy; | 483 | bool destroy; |
483 | /* | 484 | /* |
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c index 8b919471472f..0c569831db5a 100644 --- a/drivers/net/hyperv/netvsc.c +++ b/drivers/net/hyperv/netvsc.c | |||
@@ -42,6 +42,7 @@ static struct netvsc_device *alloc_net_device(struct hv_device *device) | |||
42 | if (!net_device) | 42 | if (!net_device) |
43 | return NULL; | 43 | return NULL; |
44 | 44 | ||
45 | init_waitqueue_head(&net_device->wait_drain); | ||
45 | net_device->start_remove = false; | 46 | net_device->start_remove = false; |
46 | net_device->destroy = false; | 47 | net_device->destroy = false; |
47 | net_device->dev = device; | 48 | net_device->dev = device; |
@@ -387,12 +388,8 @@ int netvsc_device_remove(struct hv_device *device) | |||
387 | spin_unlock_irqrestore(&device->channel->inbound_lock, flags); | 388 | spin_unlock_irqrestore(&device->channel->inbound_lock, flags); |
388 | 389 | ||
389 | /* Wait for all send completions */ | 390 | /* Wait for all send completions */ |
390 | while (atomic_read(&net_device->num_outstanding_sends)) { | 391 | wait_event(net_device->wait_drain, |
391 | dev_info(&device->device, | 392 | atomic_read(&net_device->num_outstanding_sends) == 0); |
392 | "waiting for %d requests to complete...\n", | ||
393 | atomic_read(&net_device->num_outstanding_sends)); | ||
394 | udelay(100); | ||
395 | } | ||
396 | 393 | ||
397 | netvsc_disconnect_vsp(net_device); | 394 | netvsc_disconnect_vsp(net_device); |
398 | 395 | ||
@@ -486,6 +483,9 @@ static void netvsc_send_completion(struct hv_device *device, | |||
486 | num_outstanding_sends = | 483 | num_outstanding_sends = |
487 | atomic_dec_return(&net_device->num_outstanding_sends); | 484 | atomic_dec_return(&net_device->num_outstanding_sends); |
488 | 485 | ||
486 | if (net_device->destroy && num_outstanding_sends == 0) | ||
487 | wake_up(&net_device->wait_drain); | ||
488 | |||
489 | if (netif_queue_stopped(ndev) && !net_device->start_remove && | 489 | if (netif_queue_stopped(ndev) && !net_device->start_remove && |
490 | (hv_ringbuf_avail_percent(&device->channel->outbound) | 490 | (hv_ringbuf_avail_percent(&device->channel->outbound) |
491 | > RING_AVAIL_PERCENT_HIWATER || | 491 | > RING_AVAIL_PERCENT_HIWATER || |
diff --git a/drivers/net/phy/icplus.c b/drivers/net/phy/icplus.c index 5ac46f5226f3..47f8e8939266 100644 --- a/drivers/net/phy/icplus.c +++ b/drivers/net/phy/icplus.c | |||
@@ -41,6 +41,8 @@ MODULE_LICENSE("GPL"); | |||
41 | #define IP1001_APS_ON 11 /* IP1001 APS Mode bit */ | 41 | #define IP1001_APS_ON 11 /* IP1001 APS Mode bit */ |
42 | #define IP101A_G_APS_ON 2 /* IP101A/G APS Mode bit */ | 42 | #define IP101A_G_APS_ON 2 /* IP101A/G APS Mode bit */ |
43 | #define IP101A_G_IRQ_CONF_STATUS 0x11 /* Conf Info IRQ & Status Reg */ | 43 | #define IP101A_G_IRQ_CONF_STATUS 0x11 /* Conf Info IRQ & Status Reg */ |
44 | #define IP101A_G_IRQ_PIN_USED (1<<15) /* INTR pin used */ | ||
45 | #define IP101A_G_IRQ_DEFAULT IP101A_G_IRQ_PIN_USED | ||
44 | 46 | ||
45 | static int ip175c_config_init(struct phy_device *phydev) | 47 | static int ip175c_config_init(struct phy_device *phydev) |
46 | { | 48 | { |
@@ -136,6 +138,11 @@ static int ip1001_config_init(struct phy_device *phydev) | |||
136 | if (c < 0) | 138 | if (c < 0) |
137 | return c; | 139 | return c; |
138 | 140 | ||
141 | /* INTR pin used: speed/link/duplex will cause an interrupt */ | ||
142 | c = phy_write(phydev, IP101A_G_IRQ_CONF_STATUS, IP101A_G_IRQ_DEFAULT); | ||
143 | if (c < 0) | ||
144 | return c; | ||
145 | |||
139 | if (phydev->interface == PHY_INTERFACE_MODE_RGMII) { | 146 | if (phydev->interface == PHY_INTERFACE_MODE_RGMII) { |
140 | /* Additional delay (2ns) used to adjust RX clock phase | 147 | /* Additional delay (2ns) used to adjust RX clock phase |
141 | * at RGMII interface */ | 148 | * at RGMII interface */ |
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c index 683ef1ce5519..5061608f408c 100644 --- a/drivers/net/phy/mdio_bus.c +++ b/drivers/net/phy/mdio_bus.c | |||
@@ -96,7 +96,7 @@ static int of_mdio_bus_match(struct device *dev, void *mdio_bus_np) | |||
96 | } | 96 | } |
97 | /** | 97 | /** |
98 | * of_mdio_find_bus - Given an mii_bus node, find the mii_bus. | 98 | * of_mdio_find_bus - Given an mii_bus node, find the mii_bus. |
99 | * @mdio_np: Pointer to the mii_bus. | 99 | * @mdio_bus_np: Pointer to the mii_bus. |
100 | * | 100 | * |
101 | * Returns a pointer to the mii_bus, or NULL if none found. | 101 | * Returns a pointer to the mii_bus, or NULL if none found. |
102 | * | 102 | * |
diff --git a/drivers/net/usb/sierra_net.c b/drivers/net/usb/sierra_net.c index 3faef5670d1f..d75d1f56becf 100644 --- a/drivers/net/usb/sierra_net.c +++ b/drivers/net/usb/sierra_net.c | |||
@@ -946,7 +946,7 @@ struct sk_buff *sierra_net_tx_fixup(struct usbnet *dev, struct sk_buff *skb, | |||
946 | } | 946 | } |
947 | 947 | ||
948 | static const u8 sierra_net_ifnum_list[] = { 7, 10, 11 }; | 948 | static const u8 sierra_net_ifnum_list[] = { 7, 10, 11 }; |
949 | static const struct sierra_net_info_data sierra_net_info_data_68A3 = { | 949 | static const struct sierra_net_info_data sierra_net_info_data_direct_ip = { |
950 | .rx_urb_size = 8 * 1024, | 950 | .rx_urb_size = 8 * 1024, |
951 | .whitelist = { | 951 | .whitelist = { |
952 | .infolen = ARRAY_SIZE(sierra_net_ifnum_list), | 952 | .infolen = ARRAY_SIZE(sierra_net_ifnum_list), |
@@ -954,7 +954,7 @@ static const struct sierra_net_info_data sierra_net_info_data_68A3 = { | |||
954 | } | 954 | } |
955 | }; | 955 | }; |
956 | 956 | ||
957 | static const struct driver_info sierra_net_info_68A3 = { | 957 | static const struct driver_info sierra_net_info_direct_ip = { |
958 | .description = "Sierra Wireless USB-to-WWAN Modem", | 958 | .description = "Sierra Wireless USB-to-WWAN Modem", |
959 | .flags = FLAG_WWAN | FLAG_SEND_ZLP, | 959 | .flags = FLAG_WWAN | FLAG_SEND_ZLP, |
960 | .bind = sierra_net_bind, | 960 | .bind = sierra_net_bind, |
@@ -962,12 +962,18 @@ static const struct driver_info sierra_net_info_68A3 = { | |||
962 | .status = sierra_net_status, | 962 | .status = sierra_net_status, |
963 | .rx_fixup = sierra_net_rx_fixup, | 963 | .rx_fixup = sierra_net_rx_fixup, |
964 | .tx_fixup = sierra_net_tx_fixup, | 964 | .tx_fixup = sierra_net_tx_fixup, |
965 | .data = (unsigned long)&sierra_net_info_data_68A3, | 965 | .data = (unsigned long)&sierra_net_info_data_direct_ip, |
966 | }; | 966 | }; |
967 | 967 | ||
968 | static const struct usb_device_id products[] = { | 968 | static const struct usb_device_id products[] = { |
969 | {USB_DEVICE(0x1199, 0x68A3), /* Sierra Wireless USB-to-WWAN modem */ | 969 | {USB_DEVICE(0x1199, 0x68A3), /* Sierra Wireless USB-to-WWAN modem */ |
970 | .driver_info = (unsigned long) &sierra_net_info_68A3}, | 970 | .driver_info = (unsigned long) &sierra_net_info_direct_ip}, |
971 | {USB_DEVICE(0x0F3D, 0x68A3), /* AT&T Direct IP modem */ | ||
972 | .driver_info = (unsigned long) &sierra_net_info_direct_ip}, | ||
973 | {USB_DEVICE(0x1199, 0x68AA), /* Sierra Wireless Direct IP LTE modem */ | ||
974 | .driver_info = (unsigned long) &sierra_net_info_direct_ip}, | ||
975 | {USB_DEVICE(0x0F3D, 0x68AA), /* AT&T Direct IP LTE modem */ | ||
976 | .driver_info = (unsigned long) &sierra_net_info_direct_ip}, | ||
971 | 977 | ||
972 | {}, /* last item */ | 978 | {}, /* last item */ |
973 | }; | 979 | }; |
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 5214b1eceb95..f18149ae2588 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c | |||
@@ -42,7 +42,8 @@ module_param(gso, bool, 0444); | |||
42 | #define VIRTNET_DRIVER_VERSION "1.0.0" | 42 | #define VIRTNET_DRIVER_VERSION "1.0.0" |
43 | 43 | ||
44 | struct virtnet_stats { | 44 | struct virtnet_stats { |
45 | struct u64_stats_sync syncp; | 45 | struct u64_stats_sync tx_syncp; |
46 | struct u64_stats_sync rx_syncp; | ||
46 | u64 tx_bytes; | 47 | u64 tx_bytes; |
47 | u64 tx_packets; | 48 | u64 tx_packets; |
48 | 49 | ||
@@ -300,10 +301,10 @@ static void receive_buf(struct net_device *dev, void *buf, unsigned int len) | |||
300 | 301 | ||
301 | hdr = skb_vnet_hdr(skb); | 302 | hdr = skb_vnet_hdr(skb); |
302 | 303 | ||
303 | u64_stats_update_begin(&stats->syncp); | 304 | u64_stats_update_begin(&stats->rx_syncp); |
304 | stats->rx_bytes += skb->len; | 305 | stats->rx_bytes += skb->len; |
305 | stats->rx_packets++; | 306 | stats->rx_packets++; |
306 | u64_stats_update_end(&stats->syncp); | 307 | u64_stats_update_end(&stats->rx_syncp); |
307 | 308 | ||
308 | if (hdr->hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) { | 309 | if (hdr->hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) { |
309 | pr_debug("Needs csum!\n"); | 310 | pr_debug("Needs csum!\n"); |
@@ -565,10 +566,10 @@ static unsigned int free_old_xmit_skbs(struct virtnet_info *vi) | |||
565 | while ((skb = virtqueue_get_buf(vi->svq, &len)) != NULL) { | 566 | while ((skb = virtqueue_get_buf(vi->svq, &len)) != NULL) { |
566 | pr_debug("Sent skb %p\n", skb); | 567 | pr_debug("Sent skb %p\n", skb); |
567 | 568 | ||
568 | u64_stats_update_begin(&stats->syncp); | 569 | u64_stats_update_begin(&stats->tx_syncp); |
569 | stats->tx_bytes += skb->len; | 570 | stats->tx_bytes += skb->len; |
570 | stats->tx_packets++; | 571 | stats->tx_packets++; |
571 | u64_stats_update_end(&stats->syncp); | 572 | u64_stats_update_end(&stats->tx_syncp); |
572 | 573 | ||
573 | tot_sgs += skb_vnet_hdr(skb)->num_sg; | 574 | tot_sgs += skb_vnet_hdr(skb)->num_sg; |
574 | dev_kfree_skb_any(skb); | 575 | dev_kfree_skb_any(skb); |
@@ -703,12 +704,16 @@ static struct rtnl_link_stats64 *virtnet_stats(struct net_device *dev, | |||
703 | u64 tpackets, tbytes, rpackets, rbytes; | 704 | u64 tpackets, tbytes, rpackets, rbytes; |
704 | 705 | ||
705 | do { | 706 | do { |
706 | start = u64_stats_fetch_begin(&stats->syncp); | 707 | start = u64_stats_fetch_begin(&stats->tx_syncp); |
707 | tpackets = stats->tx_packets; | 708 | tpackets = stats->tx_packets; |
708 | tbytes = stats->tx_bytes; | 709 | tbytes = stats->tx_bytes; |
710 | } while (u64_stats_fetch_retry(&stats->tx_syncp, start)); | ||
711 | |||
712 | do { | ||
713 | start = u64_stats_fetch_begin(&stats->rx_syncp); | ||
709 | rpackets = stats->rx_packets; | 714 | rpackets = stats->rx_packets; |
710 | rbytes = stats->rx_bytes; | 715 | rbytes = stats->rx_bytes; |
711 | } while (u64_stats_fetch_retry(&stats->syncp, start)); | 716 | } while (u64_stats_fetch_retry(&stats->rx_syncp, start)); |
712 | 717 | ||
713 | tot->rx_packets += rpackets; | 718 | tot->rx_packets += rpackets; |
714 | tot->tx_packets += tpackets; | 719 | tot->tx_packets += tpackets; |
diff --git a/drivers/net/wireless/b43/b43.h b/drivers/net/wireless/b43/b43.h index 67c13af6f206..c06b6cb5c91e 100644 --- a/drivers/net/wireless/b43/b43.h +++ b/drivers/net/wireless/b43/b43.h | |||
@@ -877,6 +877,10 @@ struct b43_wl { | |||
877 | * from the mac80211 subsystem. */ | 877 | * from the mac80211 subsystem. */ |
878 | u16 mac80211_initially_registered_queues; | 878 | u16 mac80211_initially_registered_queues; |
879 | 879 | ||
880 | /* Set this if we call ieee80211_register_hw() and check if we call | ||
881 | * ieee80211_unregister_hw(). */ | ||
882 | bool hw_registred; | ||
883 | |||
880 | /* We can only have one operating interface (802.11 core) | 884 | /* We can only have one operating interface (802.11 core) |
881 | * at a time. General information about this interface follows. | 885 | * at a time. General information about this interface follows. |
882 | */ | 886 | */ |
diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c index 5a39b226b2e3..1b988f26bdf1 100644 --- a/drivers/net/wireless/b43/main.c +++ b/drivers/net/wireless/b43/main.c | |||
@@ -2437,6 +2437,7 @@ start_ieee80211: | |||
2437 | err = ieee80211_register_hw(wl->hw); | 2437 | err = ieee80211_register_hw(wl->hw); |
2438 | if (err) | 2438 | if (err) |
2439 | goto err_one_core_detach; | 2439 | goto err_one_core_detach; |
2440 | wl->hw_registred = true; | ||
2440 | b43_leds_register(wl->current_dev); | 2441 | b43_leds_register(wl->current_dev); |
2441 | goto out; | 2442 | goto out; |
2442 | 2443 | ||
@@ -3766,7 +3767,7 @@ static int b43_switch_band(struct b43_wl *wl, struct ieee80211_channel *chan) | |||
3766 | if (prev_status >= B43_STAT_STARTED) { | 3767 | if (prev_status >= B43_STAT_STARTED) { |
3767 | err = b43_wireless_core_start(up_dev); | 3768 | err = b43_wireless_core_start(up_dev); |
3768 | if (err) { | 3769 | if (err) { |
3769 | b43err(wl, "Fatal: Coult not start device for " | 3770 | b43err(wl, "Fatal: Could not start device for " |
3770 | "selected %s-GHz band\n", | 3771 | "selected %s-GHz band\n", |
3771 | band_to_string(chan->band)); | 3772 | band_to_string(chan->band)); |
3772 | b43_wireless_core_exit(up_dev); | 3773 | b43_wireless_core_exit(up_dev); |
@@ -5299,6 +5300,7 @@ static struct b43_wl *b43_wireless_init(struct b43_bus_dev *dev) | |||
5299 | 5300 | ||
5300 | hw->queues = modparam_qos ? B43_QOS_QUEUE_NUM : 1; | 5301 | hw->queues = modparam_qos ? B43_QOS_QUEUE_NUM : 1; |
5301 | wl->mac80211_initially_registered_queues = hw->queues; | 5302 | wl->mac80211_initially_registered_queues = hw->queues; |
5303 | wl->hw_registred = false; | ||
5302 | hw->max_rates = 2; | 5304 | hw->max_rates = 2; |
5303 | SET_IEEE80211_DEV(hw, dev->dev); | 5305 | SET_IEEE80211_DEV(hw, dev->dev); |
5304 | if (is_valid_ether_addr(sprom->et1mac)) | 5306 | if (is_valid_ether_addr(sprom->et1mac)) |
@@ -5370,12 +5372,15 @@ static void b43_bcma_remove(struct bcma_device *core) | |||
5370 | * as the ieee80211 unreg will destroy the workqueue. */ | 5372 | * as the ieee80211 unreg will destroy the workqueue. */ |
5371 | cancel_work_sync(&wldev->restart_work); | 5373 | cancel_work_sync(&wldev->restart_work); |
5372 | 5374 | ||
5373 | /* Restore the queues count before unregistering, because firmware detect | 5375 | B43_WARN_ON(!wl); |
5374 | * might have modified it. Restoring is important, so the networking | 5376 | if (wl->current_dev == wldev && wl->hw_registred) { |
5375 | * stack can properly free resources. */ | 5377 | /* Restore the queues count before unregistering, because firmware detect |
5376 | wl->hw->queues = wl->mac80211_initially_registered_queues; | 5378 | * might have modified it. Restoring is important, so the networking |
5377 | b43_leds_stop(wldev); | 5379 | * stack can properly free resources. */ |
5378 | ieee80211_unregister_hw(wl->hw); | 5380 | wl->hw->queues = wl->mac80211_initially_registered_queues; |
5381 | b43_leds_stop(wldev); | ||
5382 | ieee80211_unregister_hw(wl->hw); | ||
5383 | } | ||
5379 | 5384 | ||
5380 | b43_one_core_detach(wldev->dev); | 5385 | b43_one_core_detach(wldev->dev); |
5381 | 5386 | ||
@@ -5446,7 +5451,7 @@ static void b43_ssb_remove(struct ssb_device *sdev) | |||
5446 | cancel_work_sync(&wldev->restart_work); | 5451 | cancel_work_sync(&wldev->restart_work); |
5447 | 5452 | ||
5448 | B43_WARN_ON(!wl); | 5453 | B43_WARN_ON(!wl); |
5449 | if (wl->current_dev == wldev) { | 5454 | if (wl->current_dev == wldev && wl->hw_registred) { |
5450 | /* Restore the queues count before unregistering, because firmware detect | 5455 | /* Restore the queues count before unregistering, because firmware detect |
5451 | * might have modified it. Restoring is important, so the networking | 5456 | * might have modified it. Restoring is important, so the networking |
5452 | * stack can properly free resources. */ | 5457 | * stack can properly free resources. */ |
diff --git a/drivers/net/wireless/b43legacy/main.c b/drivers/net/wireless/b43legacy/main.c index cd9c9bc186d9..eae691e2f7dd 100644 --- a/drivers/net/wireless/b43legacy/main.c +++ b/drivers/net/wireless/b43legacy/main.c | |||
@@ -2633,7 +2633,7 @@ static int b43legacy_switch_phymode(struct b43legacy_wl *wl, | |||
2633 | if (prev_status >= B43legacy_STAT_STARTED) { | 2633 | if (prev_status >= B43legacy_STAT_STARTED) { |
2634 | err = b43legacy_wireless_core_start(up_dev); | 2634 | err = b43legacy_wireless_core_start(up_dev); |
2635 | if (err) { | 2635 | if (err) { |
2636 | b43legacyerr(wl, "Fatal: Coult not start device for " | 2636 | b43legacyerr(wl, "Fatal: Could not start device for " |
2637 | "newly selected %s-PHY mode\n", | 2637 | "newly selected %s-PHY mode\n", |
2638 | phymode_to_string(new_mode)); | 2638 | phymode_to_string(new_mode)); |
2639 | b43legacy_wireless_core_exit(up_dev); | 2639 | b43legacy_wireless_core_exit(up_dev); |
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c index e2480d196276..8e7e6928c936 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c | |||
@@ -89,9 +89,9 @@ int brcmf_sdio_intr_register(struct brcmf_sdio_dev *sdiodev) | |||
89 | data |= 1 << SDIO_FUNC_1 | 1 << SDIO_FUNC_2 | 1; | 89 | data |= 1 << SDIO_FUNC_1 | 1 << SDIO_FUNC_2 | 1; |
90 | brcmf_sdio_regwb(sdiodev, SDIO_CCCR_IENx, data, &ret); | 90 | brcmf_sdio_regwb(sdiodev, SDIO_CCCR_IENx, data, &ret); |
91 | 91 | ||
92 | /* redirect, configure ane enable io for interrupt signal */ | 92 | /* redirect, configure and enable io for interrupt signal */ |
93 | data = SDIO_SEPINT_MASK | SDIO_SEPINT_OE; | 93 | data = SDIO_SEPINT_MASK | SDIO_SEPINT_OE; |
94 | if (sdiodev->irq_flags | IRQF_TRIGGER_HIGH) | 94 | if (sdiodev->irq_flags & IRQF_TRIGGER_HIGH) |
95 | data |= SDIO_SEPINT_ACT_HI; | 95 | data |= SDIO_SEPINT_ACT_HI; |
96 | brcmf_sdio_regwb(sdiodev, SDIO_CCCR_BRCM_SEPINT, data, &ret); | 96 | brcmf_sdio_regwb(sdiodev, SDIO_CCCR_BRCM_SEPINT, data, &ret); |
97 | 97 | ||
diff --git a/drivers/net/wireless/ipw2x00/ipw2100.c b/drivers/net/wireless/ipw2x00/ipw2100.c index 9cfae0c08707..95aa8e1683ec 100644 --- a/drivers/net/wireless/ipw2x00/ipw2100.c +++ b/drivers/net/wireless/ipw2x00/ipw2100.c | |||
@@ -1903,14 +1903,6 @@ static void ipw2100_down(struct ipw2100_priv *priv) | |||
1903 | netif_stop_queue(priv->net_dev); | 1903 | netif_stop_queue(priv->net_dev); |
1904 | } | 1904 | } |
1905 | 1905 | ||
1906 | /* Called by register_netdev() */ | ||
1907 | static int ipw2100_net_init(struct net_device *dev) | ||
1908 | { | ||
1909 | struct ipw2100_priv *priv = libipw_priv(dev); | ||
1910 | |||
1911 | return ipw2100_up(priv, 1); | ||
1912 | } | ||
1913 | |||
1914 | static int ipw2100_wdev_init(struct net_device *dev) | 1906 | static int ipw2100_wdev_init(struct net_device *dev) |
1915 | { | 1907 | { |
1916 | struct ipw2100_priv *priv = libipw_priv(dev); | 1908 | struct ipw2100_priv *priv = libipw_priv(dev); |
@@ -6087,7 +6079,6 @@ static const struct net_device_ops ipw2100_netdev_ops = { | |||
6087 | .ndo_stop = ipw2100_close, | 6079 | .ndo_stop = ipw2100_close, |
6088 | .ndo_start_xmit = libipw_xmit, | 6080 | .ndo_start_xmit = libipw_xmit, |
6089 | .ndo_change_mtu = libipw_change_mtu, | 6081 | .ndo_change_mtu = libipw_change_mtu, |
6090 | .ndo_init = ipw2100_net_init, | ||
6091 | .ndo_tx_timeout = ipw2100_tx_timeout, | 6082 | .ndo_tx_timeout = ipw2100_tx_timeout, |
6092 | .ndo_set_mac_address = ipw2100_set_address, | 6083 | .ndo_set_mac_address = ipw2100_set_address, |
6093 | .ndo_validate_addr = eth_validate_addr, | 6084 | .ndo_validate_addr = eth_validate_addr, |
@@ -6329,6 +6320,10 @@ static int ipw2100_pci_init_one(struct pci_dev *pci_dev, | |||
6329 | printk(KERN_INFO DRV_NAME | 6320 | printk(KERN_INFO DRV_NAME |
6330 | ": Detected Intel PRO/Wireless 2100 Network Connection\n"); | 6321 | ": Detected Intel PRO/Wireless 2100 Network Connection\n"); |
6331 | 6322 | ||
6323 | err = ipw2100_up(priv, 1); | ||
6324 | if (err) | ||
6325 | goto fail; | ||
6326 | |||
6332 | err = ipw2100_wdev_init(dev); | 6327 | err = ipw2100_wdev_init(dev); |
6333 | if (err) | 6328 | if (err) |
6334 | goto fail; | 6329 | goto fail; |
@@ -6338,12 +6333,7 @@ static int ipw2100_pci_init_one(struct pci_dev *pci_dev, | |||
6338 | * network device we would call ipw2100_up. This introduced a race | 6333 | * network device we would call ipw2100_up. This introduced a race |
6339 | * condition with newer hotplug configurations (network was coming | 6334 | * condition with newer hotplug configurations (network was coming |
6340 | * up and making calls before the device was initialized). | 6335 | * up and making calls before the device was initialized). |
6341 | * | 6336 | */ |
6342 | * If we called ipw2100_up before we registered the device, then the | ||
6343 | * device name wasn't registered. So, we instead use the net_dev->init | ||
6344 | * member to call a function that then just turns and calls ipw2100_up. | ||
6345 | * net_dev->init is called after name allocation but before the | ||
6346 | * notifier chain is called */ | ||
6347 | err = register_netdev(dev); | 6337 | err = register_netdev(dev); |
6348 | if (err) { | 6338 | if (err) { |
6349 | printk(KERN_WARNING DRV_NAME | 6339 | printk(KERN_WARNING DRV_NAME |
diff --git a/drivers/net/wireless/iwlwifi/iwl-6000.c b/drivers/net/wireless/iwlwifi/iwl-6000.c index 19f7ee84ae89..e5e8ada4aaf6 100644 --- a/drivers/net/wireless/iwlwifi/iwl-6000.c +++ b/drivers/net/wireless/iwlwifi/iwl-6000.c | |||
@@ -35,17 +35,20 @@ | |||
35 | #define IWL6000_UCODE_API_MAX 6 | 35 | #define IWL6000_UCODE_API_MAX 6 |
36 | #define IWL6050_UCODE_API_MAX 5 | 36 | #define IWL6050_UCODE_API_MAX 5 |
37 | #define IWL6000G2_UCODE_API_MAX 6 | 37 | #define IWL6000G2_UCODE_API_MAX 6 |
38 | #define IWL6035_UCODE_API_MAX 6 | ||
38 | 39 | ||
39 | /* Oldest version we won't warn about */ | 40 | /* Oldest version we won't warn about */ |
40 | #define IWL6000_UCODE_API_OK 4 | 41 | #define IWL6000_UCODE_API_OK 4 |
41 | #define IWL6000G2_UCODE_API_OK 5 | 42 | #define IWL6000G2_UCODE_API_OK 5 |
42 | #define IWL6050_UCODE_API_OK 5 | 43 | #define IWL6050_UCODE_API_OK 5 |
43 | #define IWL6000G2B_UCODE_API_OK 6 | 44 | #define IWL6000G2B_UCODE_API_OK 6 |
45 | #define IWL6035_UCODE_API_OK 6 | ||
44 | 46 | ||
45 | /* Lowest firmware API version supported */ | 47 | /* Lowest firmware API version supported */ |
46 | #define IWL6000_UCODE_API_MIN 4 | 48 | #define IWL6000_UCODE_API_MIN 4 |
47 | #define IWL6050_UCODE_API_MIN 4 | 49 | #define IWL6050_UCODE_API_MIN 4 |
48 | #define IWL6000G2_UCODE_API_MIN 4 | 50 | #define IWL6000G2_UCODE_API_MIN 5 |
51 | #define IWL6035_UCODE_API_MIN 6 | ||
49 | 52 | ||
50 | /* EEPROM versions */ | 53 | /* EEPROM versions */ |
51 | #define EEPROM_6000_TX_POWER_VERSION (4) | 54 | #define EEPROM_6000_TX_POWER_VERSION (4) |
@@ -227,9 +230,25 @@ const struct iwl_cfg iwl6030_2bg_cfg = { | |||
227 | IWL_DEVICE_6030, | 230 | IWL_DEVICE_6030, |
228 | }; | 231 | }; |
229 | 232 | ||
233 | #define IWL_DEVICE_6035 \ | ||
234 | .fw_name_pre = IWL6030_FW_PRE, \ | ||
235 | .ucode_api_max = IWL6035_UCODE_API_MAX, \ | ||
236 | .ucode_api_ok = IWL6035_UCODE_API_OK, \ | ||
237 | .ucode_api_min = IWL6035_UCODE_API_MIN, \ | ||
238 | .device_family = IWL_DEVICE_FAMILY_6030, \ | ||
239 | .max_inst_size = IWL60_RTC_INST_SIZE, \ | ||
240 | .max_data_size = IWL60_RTC_DATA_SIZE, \ | ||
241 | .eeprom_ver = EEPROM_6030_EEPROM_VERSION, \ | ||
242 | .eeprom_calib_ver = EEPROM_6030_TX_POWER_VERSION, \ | ||
243 | .base_params = &iwl6000_g2_base_params, \ | ||
244 | .bt_params = &iwl6000_bt_params, \ | ||
245 | .need_temp_offset_calib = true, \ | ||
246 | .led_mode = IWL_LED_RF_STATE, \ | ||
247 | .adv_pm = true | ||
248 | |||
230 | const struct iwl_cfg iwl6035_2agn_cfg = { | 249 | const struct iwl_cfg iwl6035_2agn_cfg = { |
231 | .name = "Intel(R) Centrino(R) Advanced-N 6235 AGN", | 250 | .name = "Intel(R) Centrino(R) Advanced-N 6235 AGN", |
232 | IWL_DEVICE_6030, | 251 | IWL_DEVICE_6035, |
233 | .ht_params = &iwl6000_ht_params, | 252 | .ht_params = &iwl6000_ht_params, |
234 | }; | 253 | }; |
235 | 254 | ||
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-sta.c b/drivers/net/wireless/iwlwifi/iwl-agn-sta.c index aea07aab3c9e..eb6a8eaf42fc 100644 --- a/drivers/net/wireless/iwlwifi/iwl-agn-sta.c +++ b/drivers/net/wireless/iwlwifi/iwl-agn-sta.c | |||
@@ -1267,7 +1267,7 @@ int iwl_remove_dynamic_key(struct iwl_priv *priv, | |||
1267 | key_flags |= STA_KEY_MULTICAST_MSK; | 1267 | key_flags |= STA_KEY_MULTICAST_MSK; |
1268 | 1268 | ||
1269 | sta_cmd.key.key_flags = key_flags; | 1269 | sta_cmd.key.key_flags = key_flags; |
1270 | sta_cmd.key.key_offset = WEP_INVALID_OFFSET; | 1270 | sta_cmd.key.key_offset = keyconf->hw_key_idx; |
1271 | sta_cmd.sta.modify_mask = STA_MODIFY_KEY_MASK; | 1271 | sta_cmd.sta.modify_mask = STA_MODIFY_KEY_MASK; |
1272 | sta_cmd.mode = STA_CONTROL_MODIFY_MSK; | 1272 | sta_cmd.mode = STA_CONTROL_MODIFY_MSK; |
1273 | 1273 | ||
diff --git a/drivers/net/wireless/iwlwifi/iwl-drv.c b/drivers/net/wireless/iwlwifi/iwl-drv.c index d742900969ea..fac67a526a30 100644 --- a/drivers/net/wireless/iwlwifi/iwl-drv.c +++ b/drivers/net/wireless/iwlwifi/iwl-drv.c | |||
@@ -861,13 +861,18 @@ static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context) | |||
861 | 861 | ||
862 | /* We have our copies now, allow OS release its copies */ | 862 | /* We have our copies now, allow OS release its copies */ |
863 | release_firmware(ucode_raw); | 863 | release_firmware(ucode_raw); |
864 | complete(&drv->request_firmware_complete); | ||
865 | 864 | ||
866 | drv->op_mode = iwl_dvm_ops.start(drv->trans, drv->cfg, &drv->fw); | 865 | drv->op_mode = iwl_dvm_ops.start(drv->trans, drv->cfg, &drv->fw); |
867 | 866 | ||
868 | if (!drv->op_mode) | 867 | if (!drv->op_mode) |
869 | goto out_free_fw; | 868 | goto out_unbind; |
870 | 869 | ||
870 | /* | ||
871 | * Complete the firmware request last so that | ||
872 | * a driver unbind (stop) doesn't run while we | ||
873 | * are doing the start() above. | ||
874 | */ | ||
875 | complete(&drv->request_firmware_complete); | ||
871 | return; | 876 | return; |
872 | 877 | ||
873 | try_again: | 878 | try_again: |
diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom.c b/drivers/net/wireless/iwlwifi/iwl-eeprom.c index 50c58911e718..b8e2b223ac36 100644 --- a/drivers/net/wireless/iwlwifi/iwl-eeprom.c +++ b/drivers/net/wireless/iwlwifi/iwl-eeprom.c | |||
@@ -568,28 +568,28 @@ static int iwl_find_otp_image(struct iwl_trans *trans, | |||
568 | * iwl_get_max_txpower_avg - get the highest tx power from all chains. | 568 | * iwl_get_max_txpower_avg - get the highest tx power from all chains. |
569 | * find the highest tx power from all chains for the channel | 569 | * find the highest tx power from all chains for the channel |
570 | */ | 570 | */ |
571 | static s8 iwl_get_max_txpower_avg(const struct iwl_cfg *cfg, | 571 | static s8 iwl_get_max_txpower_avg(struct iwl_priv *priv, |
572 | struct iwl_eeprom_enhanced_txpwr *enhanced_txpower, | 572 | struct iwl_eeprom_enhanced_txpwr *enhanced_txpower, |
573 | int element, s8 *max_txpower_in_half_dbm) | 573 | int element, s8 *max_txpower_in_half_dbm) |
574 | { | 574 | { |
575 | s8 max_txpower_avg = 0; /* (dBm) */ | 575 | s8 max_txpower_avg = 0; /* (dBm) */ |
576 | 576 | ||
577 | /* Take the highest tx power from any valid chains */ | 577 | /* Take the highest tx power from any valid chains */ |
578 | if ((cfg->valid_tx_ant & ANT_A) && | 578 | if ((priv->hw_params.valid_tx_ant & ANT_A) && |
579 | (enhanced_txpower[element].chain_a_max > max_txpower_avg)) | 579 | (enhanced_txpower[element].chain_a_max > max_txpower_avg)) |
580 | max_txpower_avg = enhanced_txpower[element].chain_a_max; | 580 | max_txpower_avg = enhanced_txpower[element].chain_a_max; |
581 | if ((cfg->valid_tx_ant & ANT_B) && | 581 | if ((priv->hw_params.valid_tx_ant & ANT_B) && |
582 | (enhanced_txpower[element].chain_b_max > max_txpower_avg)) | 582 | (enhanced_txpower[element].chain_b_max > max_txpower_avg)) |
583 | max_txpower_avg = enhanced_txpower[element].chain_b_max; | 583 | max_txpower_avg = enhanced_txpower[element].chain_b_max; |
584 | if ((cfg->valid_tx_ant & ANT_C) && | 584 | if ((priv->hw_params.valid_tx_ant & ANT_C) && |
585 | (enhanced_txpower[element].chain_c_max > max_txpower_avg)) | 585 | (enhanced_txpower[element].chain_c_max > max_txpower_avg)) |
586 | max_txpower_avg = enhanced_txpower[element].chain_c_max; | 586 | max_txpower_avg = enhanced_txpower[element].chain_c_max; |
587 | if (((cfg->valid_tx_ant == ANT_AB) | | 587 | if (((priv->hw_params.valid_tx_ant == ANT_AB) | |
588 | (cfg->valid_tx_ant == ANT_BC) | | 588 | (priv->hw_params.valid_tx_ant == ANT_BC) | |
589 | (cfg->valid_tx_ant == ANT_AC)) && | 589 | (priv->hw_params.valid_tx_ant == ANT_AC)) && |
590 | (enhanced_txpower[element].mimo2_max > max_txpower_avg)) | 590 | (enhanced_txpower[element].mimo2_max > max_txpower_avg)) |
591 | max_txpower_avg = enhanced_txpower[element].mimo2_max; | 591 | max_txpower_avg = enhanced_txpower[element].mimo2_max; |
592 | if ((cfg->valid_tx_ant == ANT_ABC) && | 592 | if ((priv->hw_params.valid_tx_ant == ANT_ABC) && |
593 | (enhanced_txpower[element].mimo3_max > max_txpower_avg)) | 593 | (enhanced_txpower[element].mimo3_max > max_txpower_avg)) |
594 | max_txpower_avg = enhanced_txpower[element].mimo3_max; | 594 | max_txpower_avg = enhanced_txpower[element].mimo3_max; |
595 | 595 | ||
@@ -691,7 +691,7 @@ static void iwl_eeprom_enhanced_txpower(struct iwl_priv *priv) | |||
691 | ((txp->delta_20_in_40 & 0xf0) >> 4), | 691 | ((txp->delta_20_in_40 & 0xf0) >> 4), |
692 | (txp->delta_20_in_40 & 0x0f)); | 692 | (txp->delta_20_in_40 & 0x0f)); |
693 | 693 | ||
694 | max_txp_avg = iwl_get_max_txpower_avg(priv->cfg, txp_array, idx, | 694 | max_txp_avg = iwl_get_max_txpower_avg(priv, txp_array, idx, |
695 | &max_txp_avg_halfdbm); | 695 | &max_txp_avg_halfdbm); |
696 | 696 | ||
697 | /* | 697 | /* |
diff --git a/drivers/net/wireless/iwlwifi/iwl-mac80211.c b/drivers/net/wireless/iwlwifi/iwl-mac80211.c index ab2f4d7500a4..3ee23134c02b 100644 --- a/drivers/net/wireless/iwlwifi/iwl-mac80211.c +++ b/drivers/net/wireless/iwlwifi/iwl-mac80211.c | |||
@@ -199,6 +199,7 @@ int iwlagn_mac_setup_register(struct iwl_priv *priv, | |||
199 | WIPHY_FLAG_DISABLE_BEACON_HINTS | | 199 | WIPHY_FLAG_DISABLE_BEACON_HINTS | |
200 | WIPHY_FLAG_IBSS_RSN; | 200 | WIPHY_FLAG_IBSS_RSN; |
201 | 201 | ||
202 | #ifdef CONFIG_PM_SLEEP | ||
202 | if (priv->fw->img[IWL_UCODE_WOWLAN].sec[0].len && | 203 | if (priv->fw->img[IWL_UCODE_WOWLAN].sec[0].len && |
203 | priv->trans->ops->wowlan_suspend && | 204 | priv->trans->ops->wowlan_suspend && |
204 | device_can_wakeup(priv->trans->dev)) { | 205 | device_can_wakeup(priv->trans->dev)) { |
@@ -217,6 +218,7 @@ int iwlagn_mac_setup_register(struct iwl_priv *priv, | |||
217 | hw->wiphy->wowlan.pattern_max_len = | 218 | hw->wiphy->wowlan.pattern_max_len = |
218 | IWLAGN_WOWLAN_MAX_PATTERN_LEN; | 219 | IWLAGN_WOWLAN_MAX_PATTERN_LEN; |
219 | } | 220 | } |
221 | #endif | ||
220 | 222 | ||
221 | if (iwlwifi_mod_params.power_save) | 223 | if (iwlwifi_mod_params.power_save) |
222 | hw->wiphy->flags |= WIPHY_FLAG_PS_ON_BY_DEFAULT; | 224 | hw->wiphy->flags |= WIPHY_FLAG_PS_ON_BY_DEFAULT; |
@@ -249,6 +251,7 @@ int iwlagn_mac_setup_register(struct iwl_priv *priv, | |||
249 | ret = ieee80211_register_hw(priv->hw); | 251 | ret = ieee80211_register_hw(priv->hw); |
250 | if (ret) { | 252 | if (ret) { |
251 | IWL_ERR(priv, "Failed to register hw (error %d)\n", ret); | 253 | IWL_ERR(priv, "Failed to register hw (error %d)\n", ret); |
254 | iwl_leds_exit(priv); | ||
252 | return ret; | 255 | return ret; |
253 | } | 256 | } |
254 | priv->mac80211_registered = 1; | 257 | priv->mac80211_registered = 1; |
diff --git a/drivers/net/wireless/iwlwifi/iwl-prph.h b/drivers/net/wireless/iwlwifi/iwl-prph.h index 3b1069290fa9..dfd54662e3e6 100644 --- a/drivers/net/wireless/iwlwifi/iwl-prph.h +++ b/drivers/net/wireless/iwlwifi/iwl-prph.h | |||
@@ -224,6 +224,7 @@ | |||
224 | #define SCD_TXFACT (SCD_BASE + 0x10) | 224 | #define SCD_TXFACT (SCD_BASE + 0x10) |
225 | #define SCD_ACTIVE (SCD_BASE + 0x14) | 225 | #define SCD_ACTIVE (SCD_BASE + 0x14) |
226 | #define SCD_QUEUECHAIN_SEL (SCD_BASE + 0xe8) | 226 | #define SCD_QUEUECHAIN_SEL (SCD_BASE + 0xe8) |
227 | #define SCD_CHAINEXT_EN (SCD_BASE + 0x244) | ||
227 | #define SCD_AGGR_SEL (SCD_BASE + 0x248) | 228 | #define SCD_AGGR_SEL (SCD_BASE + 0x248) |
228 | #define SCD_INTERRUPT_MASK (SCD_BASE + 0x108) | 229 | #define SCD_INTERRUPT_MASK (SCD_BASE + 0x108) |
229 | 230 | ||
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans-pcie.c b/drivers/net/wireless/iwlwifi/iwl-trans-pcie.c index ec6fb395b84d..79c6b91417f9 100644 --- a/drivers/net/wireless/iwlwifi/iwl-trans-pcie.c +++ b/drivers/net/wireless/iwlwifi/iwl-trans-pcie.c | |||
@@ -1058,6 +1058,11 @@ static void iwl_tx_start(struct iwl_trans *trans) | |||
1058 | iwl_write_prph(trans, SCD_DRAM_BASE_ADDR, | 1058 | iwl_write_prph(trans, SCD_DRAM_BASE_ADDR, |
1059 | trans_pcie->scd_bc_tbls.dma >> 10); | 1059 | trans_pcie->scd_bc_tbls.dma >> 10); |
1060 | 1060 | ||
1061 | /* The chain extension of the SCD doesn't work well. This feature is | ||
1062 | * enabled by default by the HW, so we need to disable it manually. | ||
1063 | */ | ||
1064 | iwl_write_prph(trans, SCD_CHAINEXT_EN, 0); | ||
1065 | |||
1061 | /* Enable DMA channel */ | 1066 | /* Enable DMA channel */ |
1062 | for (chan = 0; chan < FH_TCSR_CHNL_NUM ; chan++) | 1067 | for (chan = 0; chan < FH_TCSR_CHNL_NUM ; chan++) |
1063 | iwl_write_direct32(trans, FH_TCSR_CHNL_TX_CONFIG_REG(chan), | 1068 | iwl_write_direct32(trans, FH_TCSR_CHNL_TX_CONFIG_REG(chan), |
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c index fb787df01666..a0b7cfd34685 100644 --- a/drivers/net/wireless/mac80211_hwsim.c +++ b/drivers/net/wireless/mac80211_hwsim.c | |||
@@ -1555,6 +1555,7 @@ static int hwsim_tx_info_frame_received_nl(struct sk_buff *skb_2, | |||
1555 | hdr = (struct ieee80211_hdr *) skb->data; | 1555 | hdr = (struct ieee80211_hdr *) skb->data; |
1556 | mac80211_hwsim_monitor_ack(data2->hw, hdr->addr2); | 1556 | mac80211_hwsim_monitor_ack(data2->hw, hdr->addr2); |
1557 | } | 1557 | } |
1558 | txi->flags |= IEEE80211_TX_STAT_ACK; | ||
1558 | } | 1559 | } |
1559 | ieee80211_tx_status_irqsafe(data2->hw, skb); | 1560 | ieee80211_tx_status_irqsafe(data2->hw, skb); |
1560 | return 0; | 1561 | return 0; |
@@ -1721,6 +1722,24 @@ static void hwsim_exit_netlink(void) | |||
1721 | "unregister family %i\n", ret); | 1722 | "unregister family %i\n", ret); |
1722 | } | 1723 | } |
1723 | 1724 | ||
1725 | static const struct ieee80211_iface_limit hwsim_if_limits[] = { | ||
1726 | { .max = 1, .types = BIT(NL80211_IFTYPE_ADHOC) }, | ||
1727 | { .max = 2048, .types = BIT(NL80211_IFTYPE_STATION) | | ||
1728 | BIT(NL80211_IFTYPE_P2P_CLIENT) | | ||
1729 | #ifdef CONFIG_MAC80211_MESH | ||
1730 | BIT(NL80211_IFTYPE_MESH_POINT) | | ||
1731 | #endif | ||
1732 | BIT(NL80211_IFTYPE_AP) | | ||
1733 | BIT(NL80211_IFTYPE_P2P_GO) }, | ||
1734 | }; | ||
1735 | |||
1736 | static const struct ieee80211_iface_combination hwsim_if_comb = { | ||
1737 | .limits = hwsim_if_limits, | ||
1738 | .n_limits = ARRAY_SIZE(hwsim_if_limits), | ||
1739 | .max_interfaces = 2048, | ||
1740 | .num_different_channels = 1, | ||
1741 | }; | ||
1742 | |||
1724 | static int __init init_mac80211_hwsim(void) | 1743 | static int __init init_mac80211_hwsim(void) |
1725 | { | 1744 | { |
1726 | int i, err = 0; | 1745 | int i, err = 0; |
@@ -1782,6 +1801,9 @@ static int __init init_mac80211_hwsim(void) | |||
1782 | hw->wiphy->n_addresses = 2; | 1801 | hw->wiphy->n_addresses = 2; |
1783 | hw->wiphy->addresses = data->addresses; | 1802 | hw->wiphy->addresses = data->addresses; |
1784 | 1803 | ||
1804 | hw->wiphy->iface_combinations = &hwsim_if_comb; | ||
1805 | hw->wiphy->n_iface_combinations = 1; | ||
1806 | |||
1785 | if (fake_hw_scan) { | 1807 | if (fake_hw_scan) { |
1786 | hw->wiphy->max_scan_ssids = 255; | 1808 | hw->wiphy->max_scan_ssids = 255; |
1787 | hw->wiphy->max_scan_ie_len = IEEE80211_MAX_DATA_LEN; | 1809 | hw->wiphy->max_scan_ie_len = IEEE80211_MAX_DATA_LEN; |
diff --git a/drivers/net/wireless/mwifiex/cfg80211.c b/drivers/net/wireless/mwifiex/cfg80211.c index 87671446e24b..015fec3371a0 100644 --- a/drivers/net/wireless/mwifiex/cfg80211.c +++ b/drivers/net/wireless/mwifiex/cfg80211.c | |||
@@ -948,6 +948,19 @@ static int mwifiex_cfg80211_start_ap(struct wiphy *wiphy, | |||
948 | bss_cfg->ssid.ssid_len = params->ssid_len; | 948 | bss_cfg->ssid.ssid_len = params->ssid_len; |
949 | } | 949 | } |
950 | 950 | ||
951 | switch (params->hidden_ssid) { | ||
952 | case NL80211_HIDDEN_SSID_NOT_IN_USE: | ||
953 | bss_cfg->bcast_ssid_ctl = 1; | ||
954 | break; | ||
955 | case NL80211_HIDDEN_SSID_ZERO_LEN: | ||
956 | bss_cfg->bcast_ssid_ctl = 0; | ||
957 | break; | ||
958 | case NL80211_HIDDEN_SSID_ZERO_CONTENTS: | ||
959 | /* firmware doesn't support this type of hidden SSID */ | ||
960 | default: | ||
961 | return -EINVAL; | ||
962 | } | ||
963 | |||
951 | if (mwifiex_set_secure_params(priv, bss_cfg, params)) { | 964 | if (mwifiex_set_secure_params(priv, bss_cfg, params)) { |
952 | kfree(bss_cfg); | 965 | kfree(bss_cfg); |
953 | wiphy_err(wiphy, "Failed to parse secuirty parameters!\n"); | 966 | wiphy_err(wiphy, "Failed to parse secuirty parameters!\n"); |
diff --git a/drivers/net/wireless/mwifiex/fw.h b/drivers/net/wireless/mwifiex/fw.h index 9f674bbebe65..561452a5c818 100644 --- a/drivers/net/wireless/mwifiex/fw.h +++ b/drivers/net/wireless/mwifiex/fw.h | |||
@@ -122,6 +122,7 @@ enum MWIFIEX_802_11_PRIVACY_FILTER { | |||
122 | #define TLV_TYPE_CHANNELBANDLIST (PROPRIETARY_TLV_BASE_ID + 42) | 122 | #define TLV_TYPE_CHANNELBANDLIST (PROPRIETARY_TLV_BASE_ID + 42) |
123 | #define TLV_TYPE_UAP_BEACON_PERIOD (PROPRIETARY_TLV_BASE_ID + 44) | 123 | #define TLV_TYPE_UAP_BEACON_PERIOD (PROPRIETARY_TLV_BASE_ID + 44) |
124 | #define TLV_TYPE_UAP_DTIM_PERIOD (PROPRIETARY_TLV_BASE_ID + 45) | 124 | #define TLV_TYPE_UAP_DTIM_PERIOD (PROPRIETARY_TLV_BASE_ID + 45) |
125 | #define TLV_TYPE_UAP_BCAST_SSID (PROPRIETARY_TLV_BASE_ID + 48) | ||
125 | #define TLV_TYPE_UAP_RTS_THRESHOLD (PROPRIETARY_TLV_BASE_ID + 51) | 126 | #define TLV_TYPE_UAP_RTS_THRESHOLD (PROPRIETARY_TLV_BASE_ID + 51) |
126 | #define TLV_TYPE_UAP_WPA_PASSPHRASE (PROPRIETARY_TLV_BASE_ID + 60) | 127 | #define TLV_TYPE_UAP_WPA_PASSPHRASE (PROPRIETARY_TLV_BASE_ID + 60) |
127 | #define TLV_TYPE_UAP_ENCRY_PROTOCOL (PROPRIETARY_TLV_BASE_ID + 64) | 128 | #define TLV_TYPE_UAP_ENCRY_PROTOCOL (PROPRIETARY_TLV_BASE_ID + 64) |
@@ -1209,6 +1210,11 @@ struct host_cmd_tlv_ssid { | |||
1209 | u8 ssid[0]; | 1210 | u8 ssid[0]; |
1210 | } __packed; | 1211 | } __packed; |
1211 | 1212 | ||
1213 | struct host_cmd_tlv_bcast_ssid { | ||
1214 | struct host_cmd_tlv tlv; | ||
1215 | u8 bcast_ctl; | ||
1216 | } __packed; | ||
1217 | |||
1212 | struct host_cmd_tlv_beacon_period { | 1218 | struct host_cmd_tlv_beacon_period { |
1213 | struct host_cmd_tlv tlv; | 1219 | struct host_cmd_tlv tlv; |
1214 | __le16 period; | 1220 | __le16 period; |
diff --git a/drivers/net/wireless/mwifiex/uap_cmd.c b/drivers/net/wireless/mwifiex/uap_cmd.c index 76dfbc42a732..8173ab66066d 100644 --- a/drivers/net/wireless/mwifiex/uap_cmd.c +++ b/drivers/net/wireless/mwifiex/uap_cmd.c | |||
@@ -132,6 +132,7 @@ mwifiex_uap_bss_param_prepare(u8 *tlv, void *cmd_buf, u16 *param_size) | |||
132 | struct host_cmd_tlv_dtim_period *dtim_period; | 132 | struct host_cmd_tlv_dtim_period *dtim_period; |
133 | struct host_cmd_tlv_beacon_period *beacon_period; | 133 | struct host_cmd_tlv_beacon_period *beacon_period; |
134 | struct host_cmd_tlv_ssid *ssid; | 134 | struct host_cmd_tlv_ssid *ssid; |
135 | struct host_cmd_tlv_bcast_ssid *bcast_ssid; | ||
135 | struct host_cmd_tlv_channel_band *chan_band; | 136 | struct host_cmd_tlv_channel_band *chan_band; |
136 | struct host_cmd_tlv_frag_threshold *frag_threshold; | 137 | struct host_cmd_tlv_frag_threshold *frag_threshold; |
137 | struct host_cmd_tlv_rts_threshold *rts_threshold; | 138 | struct host_cmd_tlv_rts_threshold *rts_threshold; |
@@ -153,6 +154,14 @@ mwifiex_uap_bss_param_prepare(u8 *tlv, void *cmd_buf, u16 *param_size) | |||
153 | cmd_size += sizeof(struct host_cmd_tlv) + | 154 | cmd_size += sizeof(struct host_cmd_tlv) + |
154 | bss_cfg->ssid.ssid_len; | 155 | bss_cfg->ssid.ssid_len; |
155 | tlv += sizeof(struct host_cmd_tlv) + bss_cfg->ssid.ssid_len; | 156 | tlv += sizeof(struct host_cmd_tlv) + bss_cfg->ssid.ssid_len; |
157 | |||
158 | bcast_ssid = (struct host_cmd_tlv_bcast_ssid *)tlv; | ||
159 | bcast_ssid->tlv.type = cpu_to_le16(TLV_TYPE_UAP_BCAST_SSID); | ||
160 | bcast_ssid->tlv.len = | ||
161 | cpu_to_le16(sizeof(bcast_ssid->bcast_ctl)); | ||
162 | bcast_ssid->bcast_ctl = bss_cfg->bcast_ssid_ctl; | ||
163 | cmd_size += sizeof(struct host_cmd_tlv_bcast_ssid); | ||
164 | tlv += sizeof(struct host_cmd_tlv_bcast_ssid); | ||
156 | } | 165 | } |
157 | if (bss_cfg->channel && bss_cfg->channel <= MAX_CHANNEL_BAND_BG) { | 166 | if (bss_cfg->channel && bss_cfg->channel <= MAX_CHANNEL_BAND_BG) { |
158 | chan_band = (struct host_cmd_tlv_channel_band *)tlv; | 167 | chan_band = (struct host_cmd_tlv_channel_band *)tlv; |
@@ -416,6 +425,7 @@ int mwifiex_uap_set_channel(struct mwifiex_private *priv, int channel) | |||
416 | if (!bss_cfg) | 425 | if (!bss_cfg) |
417 | return -ENOMEM; | 426 | return -ENOMEM; |
418 | 427 | ||
428 | mwifiex_set_sys_config_invalid_data(bss_cfg); | ||
419 | bss_cfg->band_cfg = BAND_CONFIG_MANUAL; | 429 | bss_cfg->band_cfg = BAND_CONFIG_MANUAL; |
420 | bss_cfg->channel = channel; | 430 | bss_cfg->channel = channel; |
421 | 431 | ||
diff --git a/drivers/net/wireless/rt2x00/rt2x00.h b/drivers/net/wireless/rt2x00/rt2x00.h index ca36cccaba31..8f754025b06e 100644 --- a/drivers/net/wireless/rt2x00/rt2x00.h +++ b/drivers/net/wireless/rt2x00/rt2x00.h | |||
@@ -396,8 +396,7 @@ struct rt2x00_intf { | |||
396 | * for hardware which doesn't support hardware | 396 | * for hardware which doesn't support hardware |
397 | * sequence counting. | 397 | * sequence counting. |
398 | */ | 398 | */ |
399 | spinlock_t seqlock; | 399 | atomic_t seqno; |
400 | u16 seqno; | ||
401 | }; | 400 | }; |
402 | 401 | ||
403 | static inline struct rt2x00_intf* vif_to_intf(struct ieee80211_vif *vif) | 402 | static inline struct rt2x00_intf* vif_to_intf(struct ieee80211_vif *vif) |
diff --git a/drivers/net/wireless/rt2x00/rt2x00mac.c b/drivers/net/wireless/rt2x00/rt2x00mac.c index b49773ef72f2..dd24b2663b5e 100644 --- a/drivers/net/wireless/rt2x00/rt2x00mac.c +++ b/drivers/net/wireless/rt2x00/rt2x00mac.c | |||
@@ -277,7 +277,6 @@ int rt2x00mac_add_interface(struct ieee80211_hw *hw, | |||
277 | else | 277 | else |
278 | rt2x00dev->intf_sta_count++; | 278 | rt2x00dev->intf_sta_count++; |
279 | 279 | ||
280 | spin_lock_init(&intf->seqlock); | ||
281 | mutex_init(&intf->beacon_skb_mutex); | 280 | mutex_init(&intf->beacon_skb_mutex); |
282 | intf->beacon = entry; | 281 | intf->beacon = entry; |
283 | 282 | ||
diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.c b/drivers/net/wireless/rt2x00/rt2x00queue.c index 4c662eccf53c..2fd830103415 100644 --- a/drivers/net/wireless/rt2x00/rt2x00queue.c +++ b/drivers/net/wireless/rt2x00/rt2x00queue.c | |||
@@ -207,6 +207,7 @@ static void rt2x00queue_create_tx_descriptor_seq(struct rt2x00_dev *rt2x00dev, | |||
207 | struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); | 207 | struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); |
208 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; | 208 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; |
209 | struct rt2x00_intf *intf = vif_to_intf(tx_info->control.vif); | 209 | struct rt2x00_intf *intf = vif_to_intf(tx_info->control.vif); |
210 | u16 seqno; | ||
210 | 211 | ||
211 | if (!(tx_info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ)) | 212 | if (!(tx_info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ)) |
212 | return; | 213 | return; |
@@ -238,15 +239,13 @@ static void rt2x00queue_create_tx_descriptor_seq(struct rt2x00_dev *rt2x00dev, | |||
238 | * sequence counting per-frame, since those will override the | 239 | * sequence counting per-frame, since those will override the |
239 | * sequence counter given by mac80211. | 240 | * sequence counter given by mac80211. |
240 | */ | 241 | */ |
241 | spin_lock(&intf->seqlock); | ||
242 | |||
243 | if (test_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags)) | 242 | if (test_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags)) |
244 | intf->seqno += 0x10; | 243 | seqno = atomic_add_return(0x10, &intf->seqno); |
245 | hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG); | 244 | else |
246 | hdr->seq_ctrl |= cpu_to_le16(intf->seqno); | 245 | seqno = atomic_read(&intf->seqno); |
247 | |||
248 | spin_unlock(&intf->seqlock); | ||
249 | 246 | ||
247 | hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG); | ||
248 | hdr->seq_ctrl |= cpu_to_le16(seqno); | ||
250 | } | 249 | } |
251 | 250 | ||
252 | static void rt2x00queue_create_tx_descriptor_plcp(struct rt2x00_dev *rt2x00dev, | 251 | static void rt2x00queue_create_tx_descriptor_plcp(struct rt2x00_dev *rt2x00dev, |
diff --git a/drivers/net/wireless/rtl818x/rtl8187/leds.c b/drivers/net/wireless/rtl818x/rtl8187/leds.c index 2e0de2f5f0f9..c2d5b495c179 100644 --- a/drivers/net/wireless/rtl818x/rtl8187/leds.c +++ b/drivers/net/wireless/rtl818x/rtl8187/leds.c | |||
@@ -117,7 +117,7 @@ static void rtl8187_led_brightness_set(struct led_classdev *led_dev, | |||
117 | radio_on = true; | 117 | radio_on = true; |
118 | } else if (radio_on) { | 118 | } else if (radio_on) { |
119 | radio_on = false; | 119 | radio_on = false; |
120 | cancel_delayed_work_sync(&priv->led_on); | 120 | cancel_delayed_work(&priv->led_on); |
121 | ieee80211_queue_delayed_work(hw, &priv->led_off, 0); | 121 | ieee80211_queue_delayed_work(hw, &priv->led_off, 0); |
122 | } | 122 | } |
123 | } else if (radio_on) { | 123 | } else if (radio_on) { |
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index 447e83472c01..77cb54a65cde 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c | |||
@@ -1744,6 +1744,11 @@ int pci_prepare_to_sleep(struct pci_dev *dev) | |||
1744 | if (target_state == PCI_POWER_ERROR) | 1744 | if (target_state == PCI_POWER_ERROR) |
1745 | return -EIO; | 1745 | return -EIO; |
1746 | 1746 | ||
1747 | /* Some devices mustn't be in D3 during system sleep */ | ||
1748 | if (target_state == PCI_D3hot && | ||
1749 | (dev->dev_flags & PCI_DEV_FLAGS_NO_D3_DURING_SLEEP)) | ||
1750 | return 0; | ||
1751 | |||
1747 | pci_enable_wake(dev, target_state, device_may_wakeup(&dev->dev)); | 1752 | pci_enable_wake(dev, target_state, device_may_wakeup(&dev->dev)); |
1748 | 1753 | ||
1749 | error = pci_set_power_state(dev, target_state); | 1754 | error = pci_set_power_state(dev, target_state); |
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c index 2a7521677541..194b243a2817 100644 --- a/drivers/pci/quirks.c +++ b/drivers/pci/quirks.c | |||
@@ -2929,6 +2929,32 @@ static void __devinit disable_igfx_irq(struct pci_dev *dev) | |||
2929 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0102, disable_igfx_irq); | 2929 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0102, disable_igfx_irq); |
2930 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x010a, disable_igfx_irq); | 2930 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x010a, disable_igfx_irq); |
2931 | 2931 | ||
2932 | /* | ||
2933 | * The Intel 6 Series/C200 Series chipset's EHCI controllers on many | ||
2934 | * ASUS motherboards will cause memory corruption or a system crash | ||
2935 | * if they are in D3 while the system is put into S3 sleep. | ||
2936 | */ | ||
2937 | static void __devinit asus_ehci_no_d3(struct pci_dev *dev) | ||
2938 | { | ||
2939 | const char *sys_info; | ||
2940 | static const char good_Asus_board[] = "P8Z68-V"; | ||
2941 | |||
2942 | if (dev->dev_flags & PCI_DEV_FLAGS_NO_D3_DURING_SLEEP) | ||
2943 | return; | ||
2944 | if (dev->subsystem_vendor != PCI_VENDOR_ID_ASUSTEK) | ||
2945 | return; | ||
2946 | sys_info = dmi_get_system_info(DMI_BOARD_NAME); | ||
2947 | if (sys_info && memcmp(sys_info, good_Asus_board, | ||
2948 | sizeof(good_Asus_board) - 1) == 0) | ||
2949 | return; | ||
2950 | |||
2951 | dev_info(&dev->dev, "broken D3 during system sleep on ASUS\n"); | ||
2952 | dev->dev_flags |= PCI_DEV_FLAGS_NO_D3_DURING_SLEEP; | ||
2953 | device_set_wakeup_capable(&dev->dev, false); | ||
2954 | } | ||
2955 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1c26, asus_ehci_no_d3); | ||
2956 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1c2d, asus_ehci_no_d3); | ||
2957 | |||
2932 | static void pci_do_fixups(struct pci_dev *dev, struct pci_fixup *f, | 2958 | static void pci_do_fixups(struct pci_dev *dev, struct pci_fixup *f, |
2933 | struct pci_fixup *end) | 2959 | struct pci_fixup *end) |
2934 | { | 2960 | { |
diff --git a/drivers/pinctrl/core.c b/drivers/pinctrl/core.c index c3b331b74fa0..0cc053af70bd 100644 --- a/drivers/pinctrl/core.c +++ b/drivers/pinctrl/core.c | |||
@@ -61,7 +61,7 @@ static LIST_HEAD(pinctrl_maps); | |||
61 | list_for_each_entry(_maps_node_, &pinctrl_maps, node) \ | 61 | list_for_each_entry(_maps_node_, &pinctrl_maps, node) \ |
62 | for (_i_ = 0, _map_ = &_maps_node_->maps[_i_]; \ | 62 | for (_i_ = 0, _map_ = &_maps_node_->maps[_i_]; \ |
63 | _i_ < _maps_node_->num_maps; \ | 63 | _i_ < _maps_node_->num_maps; \ |
64 | i++, _map_ = &_maps_node_->maps[_i_]) | 64 | _i_++, _map_ = &_maps_node_->maps[_i_]) |
65 | 65 | ||
66 | /** | 66 | /** |
67 | * pinctrl_provide_dummies() - indicate if pinctrl provides dummy state support | 67 | * pinctrl_provide_dummies() - indicate if pinctrl provides dummy state support |
diff --git a/drivers/pinctrl/pinctrl-imx.c b/drivers/pinctrl/pinctrl-imx.c index f6e7c670906c..dd6d93aa5334 100644 --- a/drivers/pinctrl/pinctrl-imx.c +++ b/drivers/pinctrl/pinctrl-imx.c | |||
@@ -27,16 +27,16 @@ | |||
27 | #include "core.h" | 27 | #include "core.h" |
28 | #include "pinctrl-imx.h" | 28 | #include "pinctrl-imx.h" |
29 | 29 | ||
30 | #define IMX_PMX_DUMP(info, p, m, c, n) \ | 30 | #define IMX_PMX_DUMP(info, p, m, c, n) \ |
31 | { \ | 31 | { \ |
32 | int i, j; \ | 32 | int i, j; \ |
33 | printk("Format: Pin Mux Config\n"); \ | 33 | printk(KERN_DEBUG "Format: Pin Mux Config\n"); \ |
34 | for (i = 0; i < n; i++) { \ | 34 | for (i = 0; i < n; i++) { \ |
35 | j = p[i]; \ | 35 | j = p[i]; \ |
36 | printk("%s %d 0x%lx\n", \ | 36 | printk(KERN_DEBUG "%s %d 0x%lx\n", \ |
37 | info->pins[j].name, \ | 37 | info->pins[j].name, \ |
38 | m[i], c[i]); \ | 38 | m[i], c[i]); \ |
39 | } \ | 39 | } \ |
40 | } | 40 | } |
41 | 41 | ||
42 | /* The bits in CONFIG cell defined in binding doc*/ | 42 | /* The bits in CONFIG cell defined in binding doc*/ |
@@ -173,8 +173,10 @@ static int imx_dt_node_to_map(struct pinctrl_dev *pctldev, | |||
173 | 173 | ||
174 | /* create mux map */ | 174 | /* create mux map */ |
175 | parent = of_get_parent(np); | 175 | parent = of_get_parent(np); |
176 | if (!parent) | 176 | if (!parent) { |
177 | kfree(new_map); | ||
177 | return -EINVAL; | 178 | return -EINVAL; |
179 | } | ||
178 | new_map[0].type = PIN_MAP_TYPE_MUX_GROUP; | 180 | new_map[0].type = PIN_MAP_TYPE_MUX_GROUP; |
179 | new_map[0].data.mux.function = parent->name; | 181 | new_map[0].data.mux.function = parent->name; |
180 | new_map[0].data.mux.group = np->name; | 182 | new_map[0].data.mux.group = np->name; |
@@ -193,7 +195,7 @@ static int imx_dt_node_to_map(struct pinctrl_dev *pctldev, | |||
193 | } | 195 | } |
194 | 196 | ||
195 | dev_dbg(pctldev->dev, "maps: function %s group %s num %d\n", | 197 | dev_dbg(pctldev->dev, "maps: function %s group %s num %d\n", |
196 | new_map->data.mux.function, new_map->data.mux.group, map_num); | 198 | (*map)->data.mux.function, (*map)->data.mux.group, map_num); |
197 | 199 | ||
198 | return 0; | 200 | return 0; |
199 | } | 201 | } |
@@ -201,10 +203,7 @@ static int imx_dt_node_to_map(struct pinctrl_dev *pctldev, | |||
201 | static void imx_dt_free_map(struct pinctrl_dev *pctldev, | 203 | static void imx_dt_free_map(struct pinctrl_dev *pctldev, |
202 | struct pinctrl_map *map, unsigned num_maps) | 204 | struct pinctrl_map *map, unsigned num_maps) |
203 | { | 205 | { |
204 | int i; | 206 | kfree(map); |
205 | |||
206 | for (i = 0; i < num_maps; i++) | ||
207 | kfree(map); | ||
208 | } | 207 | } |
209 | 208 | ||
210 | static struct pinctrl_ops imx_pctrl_ops = { | 209 | static struct pinctrl_ops imx_pctrl_ops = { |
@@ -475,9 +474,8 @@ static int __devinit imx_pinctrl_parse_groups(struct device_node *np, | |||
475 | grp->configs[j] = config & ~IMX_PAD_SION; | 474 | grp->configs[j] = config & ~IMX_PAD_SION; |
476 | } | 475 | } |
477 | 476 | ||
478 | #ifdef DEBUG | ||
479 | IMX_PMX_DUMP(info, grp->pins, grp->mux_mode, grp->configs, grp->npins); | 477 | IMX_PMX_DUMP(info, grp->pins, grp->mux_mode, grp->configs, grp->npins); |
480 | #endif | 478 | |
481 | return 0; | 479 | return 0; |
482 | } | 480 | } |
483 | 481 | ||
diff --git a/drivers/pinctrl/pinctrl-mxs.c b/drivers/pinctrl/pinctrl-mxs.c index 556e45a213eb..4ba4636b6a4a 100644 --- a/drivers/pinctrl/pinctrl-mxs.c +++ b/drivers/pinctrl/pinctrl-mxs.c | |||
@@ -107,8 +107,10 @@ static int mxs_dt_node_to_map(struct pinctrl_dev *pctldev, | |||
107 | 107 | ||
108 | /* Compose group name */ | 108 | /* Compose group name */ |
109 | group = kzalloc(length, GFP_KERNEL); | 109 | group = kzalloc(length, GFP_KERNEL); |
110 | if (!group) | 110 | if (!group) { |
111 | return -ENOMEM; | 111 | ret = -ENOMEM; |
112 | goto free; | ||
113 | } | ||
112 | snprintf(group, length, "%s.%d", np->name, reg); | 114 | snprintf(group, length, "%s.%d", np->name, reg); |
113 | new_map[i].data.mux.group = group; | 115 | new_map[i].data.mux.group = group; |
114 | i++; | 116 | i++; |
@@ -118,7 +120,7 @@ static int mxs_dt_node_to_map(struct pinctrl_dev *pctldev, | |||
118 | pconfig = kmemdup(&config, sizeof(config), GFP_KERNEL); | 120 | pconfig = kmemdup(&config, sizeof(config), GFP_KERNEL); |
119 | if (!pconfig) { | 121 | if (!pconfig) { |
120 | ret = -ENOMEM; | 122 | ret = -ENOMEM; |
121 | goto free; | 123 | goto free_group; |
122 | } | 124 | } |
123 | 125 | ||
124 | new_map[i].type = PIN_MAP_TYPE_CONFIGS_GROUP; | 126 | new_map[i].type = PIN_MAP_TYPE_CONFIGS_GROUP; |
@@ -133,6 +135,9 @@ static int mxs_dt_node_to_map(struct pinctrl_dev *pctldev, | |||
133 | 135 | ||
134 | return 0; | 136 | return 0; |
135 | 137 | ||
138 | free_group: | ||
139 | if (!purecfg) | ||
140 | kfree(group); | ||
136 | free: | 141 | free: |
137 | kfree(new_map); | 142 | kfree(new_map); |
138 | return ret; | 143 | return ret; |
@@ -511,6 +516,7 @@ int __devinit mxs_pinctrl_probe(struct platform_device *pdev, | |||
511 | return 0; | 516 | return 0; |
512 | 517 | ||
513 | err: | 518 | err: |
519 | platform_set_drvdata(pdev, NULL); | ||
514 | iounmap(d->base); | 520 | iounmap(d->base); |
515 | return ret; | 521 | return ret; |
516 | } | 522 | } |
@@ -520,6 +526,7 @@ int __devexit mxs_pinctrl_remove(struct platform_device *pdev) | |||
520 | { | 526 | { |
521 | struct mxs_pinctrl_data *d = platform_get_drvdata(pdev); | 527 | struct mxs_pinctrl_data *d = platform_get_drvdata(pdev); |
522 | 528 | ||
529 | platform_set_drvdata(pdev, NULL); | ||
523 | pinctrl_unregister(d->pctl); | 530 | pinctrl_unregister(d->pctl); |
524 | iounmap(d->base); | 531 | iounmap(d->base); |
525 | 532 | ||
diff --git a/drivers/pinctrl/pinctrl-nomadik.c b/drivers/pinctrl/pinctrl-nomadik.c index b26395d16347..3e7e47d6b385 100644 --- a/drivers/pinctrl/pinctrl-nomadik.c +++ b/drivers/pinctrl/pinctrl-nomadik.c | |||
@@ -673,7 +673,7 @@ static void __nmk_gpio_set_wake(struct nmk_gpio_chip *nmk_chip, | |||
673 | * wakeup is anyhow controlled by the RIMSC and FIMSC registers. | 673 | * wakeup is anyhow controlled by the RIMSC and FIMSC registers. |
674 | */ | 674 | */ |
675 | if (nmk_chip->sleepmode && on) { | 675 | if (nmk_chip->sleepmode && on) { |
676 | __nmk_gpio_set_slpm(nmk_chip, gpio % nmk_chip->chip.base, | 676 | __nmk_gpio_set_slpm(nmk_chip, gpio % NMK_GPIO_PER_CHIP, |
677 | NMK_GPIO_SLPM_WAKEUP_ENABLE); | 677 | NMK_GPIO_SLPM_WAKEUP_ENABLE); |
678 | } | 678 | } |
679 | 679 | ||
@@ -1246,6 +1246,7 @@ static int __devinit nmk_gpio_probe(struct platform_device *dev) | |||
1246 | ret = PTR_ERR(clk); | 1246 | ret = PTR_ERR(clk); |
1247 | goto out_unmap; | 1247 | goto out_unmap; |
1248 | } | 1248 | } |
1249 | clk_prepare(clk); | ||
1249 | 1250 | ||
1250 | nmk_chip = kzalloc(sizeof(*nmk_chip), GFP_KERNEL); | 1251 | nmk_chip = kzalloc(sizeof(*nmk_chip), GFP_KERNEL); |
1251 | if (!nmk_chip) { | 1252 | if (!nmk_chip) { |
@@ -1437,7 +1438,27 @@ static int nmk_pmx_enable(struct pinctrl_dev *pctldev, unsigned function, | |||
1437 | 1438 | ||
1438 | dev_dbg(npct->dev, "enable group %s, %u pins\n", g->name, g->npins); | 1439 | dev_dbg(npct->dev, "enable group %s, %u pins\n", g->name, g->npins); |
1439 | 1440 | ||
1440 | /* Handle this special glitch on altfunction C */ | 1441 | /* |
1442 | * If we're setting altfunc C by setting both AFSLA and AFSLB to 1, | ||
1443 | * we may pass through an undesired state. In this case we take | ||
1444 | * some extra care. | ||
1445 | * | ||
1446 | * Safe sequence used to switch IOs between GPIO and Alternate-C mode: | ||
1447 | * - Save SLPM registers (since we have a shadow register in the | ||
1448 | * nmk_chip we're using that as backup) | ||
1449 | * - Set SLPM=0 for the IOs you want to switch and others to 1 | ||
1450 | * - Configure the GPIO registers for the IOs that are being switched | ||
1451 | * - Set IOFORCE=1 | ||
1452 | * - Modify the AFLSA/B registers for the IOs that are being switched | ||
1453 | * - Set IOFORCE=0 | ||
1454 | * - Restore SLPM registers | ||
1455 | * - Any spurious wake up event during switch sequence to be ignored | ||
1456 | * and cleared | ||
1457 | * | ||
1458 | * We REALLY need to save ALL slpm registers, because the external | ||
1459 | * IOFORCE will switch *all* ports to their sleepmode setting to as | ||
1460 | * to avoid glitches. (Not just one port!) | ||
1461 | */ | ||
1441 | glitch = (g->altsetting == NMK_GPIO_ALT_C); | 1462 | glitch = (g->altsetting == NMK_GPIO_ALT_C); |
1442 | 1463 | ||
1443 | if (glitch) { | 1464 | if (glitch) { |
diff --git a/drivers/pinctrl/pinctrl-sirf.c b/drivers/pinctrl/pinctrl-sirf.c index ba15b1a29e52..e9f8e7d11001 100644 --- a/drivers/pinctrl/pinctrl-sirf.c +++ b/drivers/pinctrl/pinctrl-sirf.c | |||
@@ -1184,7 +1184,7 @@ out_no_gpio_remap: | |||
1184 | return ret; | 1184 | return ret; |
1185 | } | 1185 | } |
1186 | 1186 | ||
1187 | static const struct of_device_id pinmux_ids[] = { | 1187 | static const struct of_device_id pinmux_ids[] __devinitconst = { |
1188 | { .compatible = "sirf,prima2-gpio-pinmux" }, | 1188 | { .compatible = "sirf,prima2-gpio-pinmux" }, |
1189 | {} | 1189 | {} |
1190 | }; | 1190 | }; |
diff --git a/drivers/pinctrl/spear/pinctrl-spear.c b/drivers/pinctrl/spear/pinctrl-spear.c index 5ae50aadf885..b3f6b2873fdd 100644 --- a/drivers/pinctrl/spear/pinctrl-spear.c +++ b/drivers/pinctrl/spear/pinctrl-spear.c | |||
@@ -2,7 +2,7 @@ | |||
2 | * Driver for the ST Microelectronics SPEAr pinmux | 2 | * Driver for the ST Microelectronics SPEAr pinmux |
3 | * | 3 | * |
4 | * Copyright (C) 2012 ST Microelectronics | 4 | * Copyright (C) 2012 ST Microelectronics |
5 | * Viresh Kumar <viresh.kumar@st.com> | 5 | * Viresh Kumar <viresh.linux@gmail.com> |
6 | * | 6 | * |
7 | * Inspired from: | 7 | * Inspired from: |
8 | * - U300 Pinctl drivers | 8 | * - U300 Pinctl drivers |
diff --git a/drivers/pinctrl/spear/pinctrl-spear.h b/drivers/pinctrl/spear/pinctrl-spear.h index 9155783bb47f..d950eb78d939 100644 --- a/drivers/pinctrl/spear/pinctrl-spear.h +++ b/drivers/pinctrl/spear/pinctrl-spear.h | |||
@@ -2,7 +2,7 @@ | |||
2 | * Driver header file for the ST Microelectronics SPEAr pinmux | 2 | * Driver header file for the ST Microelectronics SPEAr pinmux |
3 | * | 3 | * |
4 | * Copyright (C) 2012 ST Microelectronics | 4 | * Copyright (C) 2012 ST Microelectronics |
5 | * Viresh Kumar <viresh.kumar@st.com> | 5 | * Viresh Kumar <viresh.linux@gmail.com> |
6 | * | 6 | * |
7 | * This file is licensed under the terms of the GNU General Public | 7 | * This file is licensed under the terms of the GNU General Public |
8 | * License version 2. This program is licensed "as is" without any | 8 | * License version 2. This program is licensed "as is" without any |
diff --git a/drivers/pinctrl/spear/pinctrl-spear1310.c b/drivers/pinctrl/spear/pinctrl-spear1310.c index fff168be7f00..d6cca8c81b92 100644 --- a/drivers/pinctrl/spear/pinctrl-spear1310.c +++ b/drivers/pinctrl/spear/pinctrl-spear1310.c | |||
@@ -2,7 +2,7 @@ | |||
2 | * Driver for the ST Microelectronics SPEAr1310 pinmux | 2 | * Driver for the ST Microelectronics SPEAr1310 pinmux |
3 | * | 3 | * |
4 | * Copyright (C) 2012 ST Microelectronics | 4 | * Copyright (C) 2012 ST Microelectronics |
5 | * Viresh Kumar <viresh.kumar@st.com> | 5 | * Viresh Kumar <viresh.linux@gmail.com> |
6 | * | 6 | * |
7 | * This file is licensed under the terms of the GNU General Public | 7 | * This file is licensed under the terms of the GNU General Public |
8 | * License version 2. This program is licensed "as is" without any | 8 | * License version 2. This program is licensed "as is" without any |
@@ -2192,7 +2192,7 @@ static void __exit spear1310_pinctrl_exit(void) | |||
2192 | } | 2192 | } |
2193 | module_exit(spear1310_pinctrl_exit); | 2193 | module_exit(spear1310_pinctrl_exit); |
2194 | 2194 | ||
2195 | MODULE_AUTHOR("Viresh Kumar <viresh.kumar@st.com>"); | 2195 | MODULE_AUTHOR("Viresh Kumar <viresh.linux@gmail.com>"); |
2196 | MODULE_DESCRIPTION("ST Microelectronics SPEAr1310 pinctrl driver"); | 2196 | MODULE_DESCRIPTION("ST Microelectronics SPEAr1310 pinctrl driver"); |
2197 | MODULE_LICENSE("GPL v2"); | 2197 | MODULE_LICENSE("GPL v2"); |
2198 | MODULE_DEVICE_TABLE(of, spear1310_pinctrl_of_match); | 2198 | MODULE_DEVICE_TABLE(of, spear1310_pinctrl_of_match); |
diff --git a/drivers/pinctrl/spear/pinctrl-spear1340.c b/drivers/pinctrl/spear/pinctrl-spear1340.c index a8ab2a6f51bf..a0eb057e55bd 100644 --- a/drivers/pinctrl/spear/pinctrl-spear1340.c +++ b/drivers/pinctrl/spear/pinctrl-spear1340.c | |||
@@ -2,7 +2,7 @@ | |||
2 | * Driver for the ST Microelectronics SPEAr1340 pinmux | 2 | * Driver for the ST Microelectronics SPEAr1340 pinmux |
3 | * | 3 | * |
4 | * Copyright (C) 2012 ST Microelectronics | 4 | * Copyright (C) 2012 ST Microelectronics |
5 | * Viresh Kumar <viresh.kumar@st.com> | 5 | * Viresh Kumar <viresh.linux@gmail.com> |
6 | * | 6 | * |
7 | * This file is licensed under the terms of the GNU General Public | 7 | * This file is licensed under the terms of the GNU General Public |
8 | * License version 2. This program is licensed "as is" without any | 8 | * License version 2. This program is licensed "as is" without any |
@@ -1983,7 +1983,7 @@ static void __exit spear1340_pinctrl_exit(void) | |||
1983 | } | 1983 | } |
1984 | module_exit(spear1340_pinctrl_exit); | 1984 | module_exit(spear1340_pinctrl_exit); |
1985 | 1985 | ||
1986 | MODULE_AUTHOR("Viresh Kumar <viresh.kumar@st.com>"); | 1986 | MODULE_AUTHOR("Viresh Kumar <viresh.linux@gmail.com>"); |
1987 | MODULE_DESCRIPTION("ST Microelectronics SPEAr1340 pinctrl driver"); | 1987 | MODULE_DESCRIPTION("ST Microelectronics SPEAr1340 pinctrl driver"); |
1988 | MODULE_LICENSE("GPL v2"); | 1988 | MODULE_LICENSE("GPL v2"); |
1989 | MODULE_DEVICE_TABLE(of, spear1340_pinctrl_of_match); | 1989 | MODULE_DEVICE_TABLE(of, spear1340_pinctrl_of_match); |
diff --git a/drivers/pinctrl/spear/pinctrl-spear300.c b/drivers/pinctrl/spear/pinctrl-spear300.c index 9c82a35e4e78..4dfc2849b172 100644 --- a/drivers/pinctrl/spear/pinctrl-spear300.c +++ b/drivers/pinctrl/spear/pinctrl-spear300.c | |||
@@ -2,7 +2,7 @@ | |||
2 | * Driver for the ST Microelectronics SPEAr300 pinmux | 2 | * Driver for the ST Microelectronics SPEAr300 pinmux |
3 | * | 3 | * |
4 | * Copyright (C) 2012 ST Microelectronics | 4 | * Copyright (C) 2012 ST Microelectronics |
5 | * Viresh Kumar <viresh.kumar@st.com> | 5 | * Viresh Kumar <viresh.linux@gmail.com> |
6 | * | 6 | * |
7 | * This file is licensed under the terms of the GNU General Public | 7 | * This file is licensed under the terms of the GNU General Public |
8 | * License version 2. This program is licensed "as is" without any | 8 | * License version 2. This program is licensed "as is" without any |
@@ -702,7 +702,7 @@ static void __exit spear300_pinctrl_exit(void) | |||
702 | } | 702 | } |
703 | module_exit(spear300_pinctrl_exit); | 703 | module_exit(spear300_pinctrl_exit); |
704 | 704 | ||
705 | MODULE_AUTHOR("Viresh Kumar <viresh.kumar@st.com>"); | 705 | MODULE_AUTHOR("Viresh Kumar <viresh.linux@gmail.com>"); |
706 | MODULE_DESCRIPTION("ST Microelectronics SPEAr300 pinctrl driver"); | 706 | MODULE_DESCRIPTION("ST Microelectronics SPEAr300 pinctrl driver"); |
707 | MODULE_LICENSE("GPL v2"); | 707 | MODULE_LICENSE("GPL v2"); |
708 | MODULE_DEVICE_TABLE(of, spear300_pinctrl_of_match); | 708 | MODULE_DEVICE_TABLE(of, spear300_pinctrl_of_match); |
diff --git a/drivers/pinctrl/spear/pinctrl-spear310.c b/drivers/pinctrl/spear/pinctrl-spear310.c index 1a9707605125..96883693fb7e 100644 --- a/drivers/pinctrl/spear/pinctrl-spear310.c +++ b/drivers/pinctrl/spear/pinctrl-spear310.c | |||
@@ -2,7 +2,7 @@ | |||
2 | * Driver for the ST Microelectronics SPEAr310 pinmux | 2 | * Driver for the ST Microelectronics SPEAr310 pinmux |
3 | * | 3 | * |
4 | * Copyright (C) 2012 ST Microelectronics | 4 | * Copyright (C) 2012 ST Microelectronics |
5 | * Viresh Kumar <viresh.kumar@st.com> | 5 | * Viresh Kumar <viresh.linux@gmail.com> |
6 | * | 6 | * |
7 | * This file is licensed under the terms of the GNU General Public | 7 | * This file is licensed under the terms of the GNU General Public |
8 | * License version 2. This program is licensed "as is" without any | 8 | * License version 2. This program is licensed "as is" without any |
@@ -425,7 +425,7 @@ static void __exit spear310_pinctrl_exit(void) | |||
425 | } | 425 | } |
426 | module_exit(spear310_pinctrl_exit); | 426 | module_exit(spear310_pinctrl_exit); |
427 | 427 | ||
428 | MODULE_AUTHOR("Viresh Kumar <viresh.kumar@st.com>"); | 428 | MODULE_AUTHOR("Viresh Kumar <viresh.linux@gmail.com>"); |
429 | MODULE_DESCRIPTION("ST Microelectronics SPEAr310 pinctrl driver"); | 429 | MODULE_DESCRIPTION("ST Microelectronics SPEAr310 pinctrl driver"); |
430 | MODULE_LICENSE("GPL v2"); | 430 | MODULE_LICENSE("GPL v2"); |
431 | MODULE_DEVICE_TABLE(of, SPEAr310_pinctrl_of_match); | 431 | MODULE_DEVICE_TABLE(of, SPEAr310_pinctrl_of_match); |
diff --git a/drivers/pinctrl/spear/pinctrl-spear320.c b/drivers/pinctrl/spear/pinctrl-spear320.c index de726e6c283a..020b1e0bdb3e 100644 --- a/drivers/pinctrl/spear/pinctrl-spear320.c +++ b/drivers/pinctrl/spear/pinctrl-spear320.c | |||
@@ -2,7 +2,7 @@ | |||
2 | * Driver for the ST Microelectronics SPEAr320 pinmux | 2 | * Driver for the ST Microelectronics SPEAr320 pinmux |
3 | * | 3 | * |
4 | * Copyright (C) 2012 ST Microelectronics | 4 | * Copyright (C) 2012 ST Microelectronics |
5 | * Viresh Kumar <viresh.kumar@st.com> | 5 | * Viresh Kumar <viresh.linux@gmail.com> |
6 | * | 6 | * |
7 | * This file is licensed under the terms of the GNU General Public | 7 | * This file is licensed under the terms of the GNU General Public |
8 | * License version 2. This program is licensed "as is" without any | 8 | * License version 2. This program is licensed "as is" without any |
@@ -3462,7 +3462,7 @@ static void __exit spear320_pinctrl_exit(void) | |||
3462 | } | 3462 | } |
3463 | module_exit(spear320_pinctrl_exit); | 3463 | module_exit(spear320_pinctrl_exit); |
3464 | 3464 | ||
3465 | MODULE_AUTHOR("Viresh Kumar <viresh.kumar@st.com>"); | 3465 | MODULE_AUTHOR("Viresh Kumar <viresh.linux@gmail.com>"); |
3466 | MODULE_DESCRIPTION("ST Microelectronics SPEAr320 pinctrl driver"); | 3466 | MODULE_DESCRIPTION("ST Microelectronics SPEAr320 pinctrl driver"); |
3467 | MODULE_LICENSE("GPL v2"); | 3467 | MODULE_LICENSE("GPL v2"); |
3468 | MODULE_DEVICE_TABLE(of, spear320_pinctrl_of_match); | 3468 | MODULE_DEVICE_TABLE(of, spear320_pinctrl_of_match); |
diff --git a/drivers/pinctrl/spear/pinctrl-spear3xx.c b/drivers/pinctrl/spear/pinctrl-spear3xx.c index 91c883bc46a6..0242378f7cb8 100644 --- a/drivers/pinctrl/spear/pinctrl-spear3xx.c +++ b/drivers/pinctrl/spear/pinctrl-spear3xx.c | |||
@@ -2,7 +2,7 @@ | |||
2 | * Driver for the ST Microelectronics SPEAr3xx pinmux | 2 | * Driver for the ST Microelectronics SPEAr3xx pinmux |
3 | * | 3 | * |
4 | * Copyright (C) 2012 ST Microelectronics | 4 | * Copyright (C) 2012 ST Microelectronics |
5 | * Viresh Kumar <viresh.kumar@st.com> | 5 | * Viresh Kumar <viresh.linux@gmail.com> |
6 | * | 6 | * |
7 | * This file is licensed under the terms of the GNU General Public | 7 | * This file is licensed under the terms of the GNU General Public |
8 | * License version 2. This program is licensed "as is" without any | 8 | * License version 2. This program is licensed "as is" without any |
diff --git a/drivers/pinctrl/spear/pinctrl-spear3xx.h b/drivers/pinctrl/spear/pinctrl-spear3xx.h index 5d5fdd8df7b8..31f44347f17c 100644 --- a/drivers/pinctrl/spear/pinctrl-spear3xx.h +++ b/drivers/pinctrl/spear/pinctrl-spear3xx.h | |||
@@ -2,7 +2,7 @@ | |||
2 | * Header file for the ST Microelectronics SPEAr3xx pinmux | 2 | * Header file for the ST Microelectronics SPEAr3xx pinmux |
3 | * | 3 | * |
4 | * Copyright (C) 2012 ST Microelectronics | 4 | * Copyright (C) 2012 ST Microelectronics |
5 | * Viresh Kumar <viresh.kumar@st.com> | 5 | * Viresh Kumar <viresh.linux@gmail.com> |
6 | * | 6 | * |
7 | * This file is licensed under the terms of the GNU General Public | 7 | * This file is licensed under the terms of the GNU General Public |
8 | * License version 2. This program is licensed "as is" without any | 8 | * License version 2. This program is licensed "as is" without any |
diff --git a/drivers/platform/x86/acerhdf.c b/drivers/platform/x86/acerhdf.c index 639db4d0aa76..2fd9d36acd15 100644 --- a/drivers/platform/x86/acerhdf.c +++ b/drivers/platform/x86/acerhdf.c | |||
@@ -5,7 +5,7 @@ | |||
5 | * | 5 | * |
6 | * (C) 2009 - Peter Feuerer peter (a) piie.net | 6 | * (C) 2009 - Peter Feuerer peter (a) piie.net |
7 | * http://piie.net | 7 | * http://piie.net |
8 | * 2009 Borislav Petkov <petkovbb@gmail.com> | 8 | * 2009 Borislav Petkov bp (a) alien8.de |
9 | * | 9 | * |
10 | * Inspired by and many thanks to: | 10 | * Inspired by and many thanks to: |
11 | * o acerfand - Rachel Greenham | 11 | * o acerfand - Rachel Greenham |
diff --git a/drivers/regulator/ab8500.c b/drivers/regulator/ab8500.c index e1b8c54ace5a..a739f5ca936a 100644 --- a/drivers/regulator/ab8500.c +++ b/drivers/regulator/ab8500.c | |||
@@ -794,17 +794,17 @@ static __devinit int ab8500_regulator_register(struct platform_device *pdev, | |||
794 | } | 794 | } |
795 | 795 | ||
796 | static struct of_regulator_match ab8500_regulator_matches[] = { | 796 | static struct of_regulator_match ab8500_regulator_matches[] = { |
797 | { .name = "LDO-AUX1", .driver_data = (void *) AB8500_LDO_AUX1, }, | 797 | { .name = "ab8500_ldo_aux1", .driver_data = (void *) AB8500_LDO_AUX1, }, |
798 | { .name = "LDO-AUX2", .driver_data = (void *) AB8500_LDO_AUX2, }, | 798 | { .name = "ab8500_ldo_aux2", .driver_data = (void *) AB8500_LDO_AUX2, }, |
799 | { .name = "LDO-AUX3", .driver_data = (void *) AB8500_LDO_AUX3, }, | 799 | { .name = "ab8500_ldo_aux3", .driver_data = (void *) AB8500_LDO_AUX3, }, |
800 | { .name = "LDO-INTCORE", .driver_data = (void *) AB8500_LDO_INTCORE, }, | 800 | { .name = "ab8500_ldo_intcore", .driver_data = (void *) AB8500_LDO_INTCORE, }, |
801 | { .name = "LDO-TVOUT", .driver_data = (void *) AB8500_LDO_TVOUT, }, | 801 | { .name = "ab8500_ldo_tvout", .driver_data = (void *) AB8500_LDO_TVOUT, }, |
802 | { .name = "LDO-USB", .driver_data = (void *) AB8500_LDO_USB, }, | 802 | { .name = "ab8500_ldo_usb", .driver_data = (void *) AB8500_LDO_USB, }, |
803 | { .name = "LDO-AUDIO", .driver_data = (void *) AB8500_LDO_AUDIO, }, | 803 | { .name = "ab8500_ldo_audio", .driver_data = (void *) AB8500_LDO_AUDIO, }, |
804 | { .name = "LDO-ANAMIC1", .driver_data = (void *) AB8500_LDO_ANAMIC1, }, | 804 | { .name = "ab8500_ldo_anamic1", .driver_data = (void *) AB8500_LDO_ANAMIC1, }, |
805 | { .name = "LDO-ANAMIC2", .driver_data = (void *) AB8500_LDO_ANAMIC2, }, | 805 | { .name = "ab8500_ldo_amamic2", .driver_data = (void *) AB8500_LDO_ANAMIC2, }, |
806 | { .name = "LDO-DMIC", .driver_data = (void *) AB8500_LDO_DMIC, }, | 806 | { .name = "ab8500_ldo_dmic", .driver_data = (void *) AB8500_LDO_DMIC, }, |
807 | { .name = "LDO-ANA", .driver_data = (void *) AB8500_LDO_ANA, }, | 807 | { .name = "ab8500_ldo_ana", .driver_data = (void *) AB8500_LDO_ANA, }, |
808 | }; | 808 | }; |
809 | 809 | ||
810 | static __devinit int | 810 | static __devinit int |
diff --git a/drivers/regulator/anatop-regulator.c b/drivers/regulator/anatop-regulator.c index 3660bace123c..e82e7eaac0f1 100644 --- a/drivers/regulator/anatop-regulator.c +++ b/drivers/regulator/anatop-regulator.c | |||
@@ -224,7 +224,7 @@ static struct platform_driver anatop_regulator_driver = { | |||
224 | .of_match_table = of_anatop_regulator_match_tbl, | 224 | .of_match_table = of_anatop_regulator_match_tbl, |
225 | }, | 225 | }, |
226 | .probe = anatop_regulator_probe, | 226 | .probe = anatop_regulator_probe, |
227 | .remove = anatop_regulator_remove, | 227 | .remove = __devexit_p(anatop_regulator_remove), |
228 | }; | 228 | }; |
229 | 229 | ||
230 | static int __init anatop_regulator_init(void) | 230 | static int __init anatop_regulator_init(void) |
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c index 7584a74eec8a..09a737c868b5 100644 --- a/drivers/regulator/core.c +++ b/drivers/regulator/core.c | |||
@@ -2050,6 +2050,9 @@ int regulator_map_voltage_linear(struct regulator_dev *rdev, | |||
2050 | return -EINVAL; | 2050 | return -EINVAL; |
2051 | } | 2051 | } |
2052 | 2052 | ||
2053 | if (min_uV < rdev->desc->min_uV) | ||
2054 | min_uV = rdev->desc->min_uV; | ||
2055 | |||
2053 | ret = DIV_ROUND_UP(min_uV - rdev->desc->min_uV, rdev->desc->uV_step); | 2056 | ret = DIV_ROUND_UP(min_uV - rdev->desc->min_uV, rdev->desc->uV_step); |
2054 | if (ret < 0) | 2057 | if (ret < 0) |
2055 | return ret; | 2058 | return ret; |
diff --git a/drivers/regulator/db8500-prcmu.c b/drivers/regulator/db8500-prcmu.c index 968f97f3cb3d..9dbb491b6efa 100644 --- a/drivers/regulator/db8500-prcmu.c +++ b/drivers/regulator/db8500-prcmu.c | |||
@@ -452,26 +452,26 @@ static __devinit int db8500_regulator_register(struct platform_device *pdev, | |||
452 | } | 452 | } |
453 | 453 | ||
454 | static struct of_regulator_match db8500_regulator_matches[] = { | 454 | static struct of_regulator_match db8500_regulator_matches[] = { |
455 | { .name = "db8500-vape", .driver_data = (void *) DB8500_REGULATOR_VAPE, }, | 455 | { .name = "db8500_vape", .driver_data = (void *) DB8500_REGULATOR_VAPE, }, |
456 | { .name = "db8500-varm", .driver_data = (void *) DB8500_REGULATOR_VARM, }, | 456 | { .name = "db8500_varm", .driver_data = (void *) DB8500_REGULATOR_VARM, }, |
457 | { .name = "db8500-vmodem", .driver_data = (void *) DB8500_REGULATOR_VMODEM, }, | 457 | { .name = "db8500_vmodem", .driver_data = (void *) DB8500_REGULATOR_VMODEM, }, |
458 | { .name = "db8500-vpll", .driver_data = (void *) DB8500_REGULATOR_VPLL, }, | 458 | { .name = "db8500_vpll", .driver_data = (void *) DB8500_REGULATOR_VPLL, }, |
459 | { .name = "db8500-vsmps1", .driver_data = (void *) DB8500_REGULATOR_VSMPS1, }, | 459 | { .name = "db8500_vsmps1", .driver_data = (void *) DB8500_REGULATOR_VSMPS1, }, |
460 | { .name = "db8500-vsmps2", .driver_data = (void *) DB8500_REGULATOR_VSMPS2, }, | 460 | { .name = "db8500_vsmps2", .driver_data = (void *) DB8500_REGULATOR_VSMPS2, }, |
461 | { .name = "db8500-vsmps3", .driver_data = (void *) DB8500_REGULATOR_VSMPS3, }, | 461 | { .name = "db8500_vsmps3", .driver_data = (void *) DB8500_REGULATOR_VSMPS3, }, |
462 | { .name = "db8500-vrf1", .driver_data = (void *) DB8500_REGULATOR_VRF1, }, | 462 | { .name = "db8500_vrf1", .driver_data = (void *) DB8500_REGULATOR_VRF1, }, |
463 | { .name = "db8500-sva-mmdsp", .driver_data = (void *) DB8500_REGULATOR_SWITCH_SVAMMDSP, }, | 463 | { .name = "db8500_sva_mmdsp", .driver_data = (void *) DB8500_REGULATOR_SWITCH_SVAMMDSP, }, |
464 | { .name = "db8500-sva-mmdsp-ret", .driver_data = (void *) DB8500_REGULATOR_SWITCH_SVAMMDSPRET, }, | 464 | { .name = "db8500_sva_mmdsp_ret", .driver_data = (void *) DB8500_REGULATOR_SWITCH_SVAMMDSPRET, }, |
465 | { .name = "db8500-sva-pipe", .driver_data = (void *) DB8500_REGULATOR_SWITCH_SVAPIPE, }, | 465 | { .name = "db8500_sva_pipe", .driver_data = (void *) DB8500_REGULATOR_SWITCH_SVAPIPE, }, |
466 | { .name = "db8500-sia-mmdsp", .driver_data = (void *) DB8500_REGULATOR_SWITCH_SIAMMDSP, }, | 466 | { .name = "db8500_sia_mmdsp", .driver_data = (void *) DB8500_REGULATOR_SWITCH_SIAMMDSP, }, |
467 | { .name = "db8500-sia-mmdsp-ret", .driver_data = (void *) DB8500_REGULATOR_SWITCH_SIAMMDSPRET, }, | 467 | { .name = "db8500_sia_mmdsp_ret", .driver_data = (void *) DB8500_REGULATOR_SWITCH_SIAMMDSPRET, }, |
468 | { .name = "db8500-sia-pipe", .driver_data = (void *) DB8500_REGULATOR_SWITCH_SIAPIPE, }, | 468 | { .name = "db8500_sia_pipe", .driver_data = (void *) DB8500_REGULATOR_SWITCH_SIAPIPE, }, |
469 | { .name = "db8500-sga", .driver_data = (void *) DB8500_REGULATOR_SWITCH_SGA, }, | 469 | { .name = "db8500_sga", .driver_data = (void *) DB8500_REGULATOR_SWITCH_SGA, }, |
470 | { .name = "db8500-b2r2-mcde", .driver_data = (void *) DB8500_REGULATOR_SWITCH_B2R2_MCDE, }, | 470 | { .name = "db8500_b2r2_mcde", .driver_data = (void *) DB8500_REGULATOR_SWITCH_B2R2_MCDE, }, |
471 | { .name = "db8500-esram12", .driver_data = (void *) DB8500_REGULATOR_SWITCH_ESRAM12, }, | 471 | { .name = "db8500_esram12", .driver_data = (void *) DB8500_REGULATOR_SWITCH_ESRAM12, }, |
472 | { .name = "db8500-esram12-ret", .driver_data = (void *) DB8500_REGULATOR_SWITCH_ESRAM12RET, }, | 472 | { .name = "db8500_esram12_ret", .driver_data = (void *) DB8500_REGULATOR_SWITCH_ESRAM12RET, }, |
473 | { .name = "db8500-esram34", .driver_data = (void *) DB8500_REGULATOR_SWITCH_ESRAM34, }, | 473 | { .name = "db8500_esram34", .driver_data = (void *) DB8500_REGULATOR_SWITCH_ESRAM34, }, |
474 | { .name = "db8500-esram34-ret", .driver_data = (void *) DB8500_REGULATOR_SWITCH_ESRAM34RET, }, | 474 | { .name = "db8500_esram34_ret", .driver_data = (void *) DB8500_REGULATOR_SWITCH_ESRAM34RET, }, |
475 | }; | 475 | }; |
476 | 476 | ||
477 | static __devinit int | 477 | static __devinit int |
diff --git a/drivers/regulator/gpio-regulator.c b/drivers/regulator/gpio-regulator.c index 9997d7aaca84..242851a4c1a6 100644 --- a/drivers/regulator/gpio-regulator.c +++ b/drivers/regulator/gpio-regulator.c | |||
@@ -101,16 +101,20 @@ static int gpio_regulator_get_value(struct regulator_dev *dev) | |||
101 | } | 101 | } |
102 | 102 | ||
103 | static int gpio_regulator_set_value(struct regulator_dev *dev, | 103 | static int gpio_regulator_set_value(struct regulator_dev *dev, |
104 | int min, int max) | 104 | int min, int max, unsigned *selector) |
105 | { | 105 | { |
106 | struct gpio_regulator_data *data = rdev_get_drvdata(dev); | 106 | struct gpio_regulator_data *data = rdev_get_drvdata(dev); |
107 | int ptr, target, state, best_val = INT_MAX; | 107 | int ptr, target = 0, state, best_val = INT_MAX; |
108 | 108 | ||
109 | for (ptr = 0; ptr < data->nr_states; ptr++) | 109 | for (ptr = 0; ptr < data->nr_states; ptr++) |
110 | if (data->states[ptr].value < best_val && | 110 | if (data->states[ptr].value < best_val && |
111 | data->states[ptr].value >= min && | 111 | data->states[ptr].value >= min && |
112 | data->states[ptr].value <= max) | 112 | data->states[ptr].value <= max) { |
113 | target = data->states[ptr].gpios; | 113 | target = data->states[ptr].gpios; |
114 | best_val = data->states[ptr].value; | ||
115 | if (selector) | ||
116 | *selector = ptr; | ||
117 | } | ||
114 | 118 | ||
115 | if (best_val == INT_MAX) | 119 | if (best_val == INT_MAX) |
116 | return -EINVAL; | 120 | return -EINVAL; |
@@ -128,7 +132,7 @@ static int gpio_regulator_set_voltage(struct regulator_dev *dev, | |||
128 | int min_uV, int max_uV, | 132 | int min_uV, int max_uV, |
129 | unsigned *selector) | 133 | unsigned *selector) |
130 | { | 134 | { |
131 | return gpio_regulator_set_value(dev, min_uV, max_uV); | 135 | return gpio_regulator_set_value(dev, min_uV, max_uV, selector); |
132 | } | 136 | } |
133 | 137 | ||
134 | static int gpio_regulator_list_voltage(struct regulator_dev *dev, | 138 | static int gpio_regulator_list_voltage(struct regulator_dev *dev, |
@@ -145,7 +149,7 @@ static int gpio_regulator_list_voltage(struct regulator_dev *dev, | |||
145 | static int gpio_regulator_set_current_limit(struct regulator_dev *dev, | 149 | static int gpio_regulator_set_current_limit(struct regulator_dev *dev, |
146 | int min_uA, int max_uA) | 150 | int min_uA, int max_uA) |
147 | { | 151 | { |
148 | return gpio_regulator_set_value(dev, min_uA, max_uA); | 152 | return gpio_regulator_set_value(dev, min_uA, max_uA, NULL); |
149 | } | 153 | } |
150 | 154 | ||
151 | static struct regulator_ops gpio_regulator_voltage_ops = { | 155 | static struct regulator_ops gpio_regulator_voltage_ops = { |
@@ -286,7 +290,7 @@ static int __devinit gpio_regulator_probe(struct platform_device *pdev) | |||
286 | 290 | ||
287 | cfg.dev = &pdev->dev; | 291 | cfg.dev = &pdev->dev; |
288 | cfg.init_data = config->init_data; | 292 | cfg.init_data = config->init_data; |
289 | cfg.driver_data = &drvdata; | 293 | cfg.driver_data = drvdata; |
290 | 294 | ||
291 | drvdata->dev = regulator_register(&drvdata->desc, &cfg); | 295 | drvdata->dev = regulator_register(&drvdata->desc, &cfg); |
292 | if (IS_ERR(drvdata->dev)) { | 296 | if (IS_ERR(drvdata->dev)) { |
diff --git a/drivers/regulator/max8649.c b/drivers/regulator/max8649.c index 1f4bb80457b3..9d540cd02dab 100644 --- a/drivers/regulator/max8649.c +++ b/drivers/regulator/max8649.c | |||
@@ -259,6 +259,7 @@ static int __devinit max8649_regulator_probe(struct i2c_client *client, | |||
259 | config.dev = &client->dev; | 259 | config.dev = &client->dev; |
260 | config.init_data = pdata->regulator; | 260 | config.init_data = pdata->regulator; |
261 | config.driver_data = info; | 261 | config.driver_data = info; |
262 | config.regmap = info->regmap; | ||
262 | 263 | ||
263 | info->regulator = regulator_register(&dcdc_desc, &config); | 264 | info->regulator = regulator_register(&dcdc_desc, &config); |
264 | if (IS_ERR(info->regulator)) { | 265 | if (IS_ERR(info->regulator)) { |
diff --git a/drivers/regulator/palmas-regulator.c b/drivers/regulator/palmas-regulator.c index c4435f608df7..9b7ca90057d5 100644 --- a/drivers/regulator/palmas-regulator.c +++ b/drivers/regulator/palmas-regulator.c | |||
@@ -775,9 +775,6 @@ static __devinit int palmas_probe(struct platform_device *pdev) | |||
775 | err_unregister_regulator: | 775 | err_unregister_regulator: |
776 | while (--id >= 0) | 776 | while (--id >= 0) |
777 | regulator_unregister(pmic->rdev[id]); | 777 | regulator_unregister(pmic->rdev[id]); |
778 | kfree(pmic->rdev); | ||
779 | kfree(pmic->desc); | ||
780 | kfree(pmic); | ||
781 | return ret; | 778 | return ret; |
782 | } | 779 | } |
783 | 780 | ||
@@ -788,10 +785,6 @@ static int __devexit palmas_remove(struct platform_device *pdev) | |||
788 | 785 | ||
789 | for (id = 0; id < PALMAS_NUM_REGS; id++) | 786 | for (id = 0; id < PALMAS_NUM_REGS; id++) |
790 | regulator_unregister(pmic->rdev[id]); | 787 | regulator_unregister(pmic->rdev[id]); |
791 | |||
792 | kfree(pmic->rdev); | ||
793 | kfree(pmic->desc); | ||
794 | kfree(pmic); | ||
795 | return 0; | 788 | return 0; |
796 | } | 789 | } |
797 | 790 | ||
diff --git a/drivers/regulator/s5m8767.c b/drivers/regulator/s5m8767.c index 290d6fc01029..9caadb482178 100644 --- a/drivers/regulator/s5m8767.c +++ b/drivers/regulator/s5m8767.c | |||
@@ -451,7 +451,7 @@ static int s5m8767_set_voltage_time_sel(struct regulator_dev *rdev, | |||
451 | 451 | ||
452 | desc = reg_voltage_map[reg_id]; | 452 | desc = reg_voltage_map[reg_id]; |
453 | 453 | ||
454 | if (old_sel < new_sel) | 454 | if ((old_sel < new_sel) && s5m8767->ramp_delay) |
455 | return DIV_ROUND_UP(desc->step * (new_sel - old_sel), | 455 | return DIV_ROUND_UP(desc->step * (new_sel - old_sel), |
456 | s5m8767->ramp_delay * 1000); | 456 | s5m8767->ramp_delay * 1000); |
457 | return 0; | 457 | return 0; |
diff --git a/drivers/remoteproc/omap_remoteproc.c b/drivers/remoteproc/omap_remoteproc.c index 69425c4e86f3..de138e30d3e6 100644 --- a/drivers/remoteproc/omap_remoteproc.c +++ b/drivers/remoteproc/omap_remoteproc.c | |||
@@ -182,7 +182,7 @@ static int __devinit omap_rproc_probe(struct platform_device *pdev) | |||
182 | 182 | ||
183 | ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)); | 183 | ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)); |
184 | if (ret) { | 184 | if (ret) { |
185 | dev_err(pdev->dev.parent, "dma_set_coherent_mask: %d\n", ret); | 185 | dev_err(&pdev->dev, "dma_set_coherent_mask: %d\n", ret); |
186 | return ret; | 186 | return ret; |
187 | } | 187 | } |
188 | 188 | ||
diff --git a/drivers/remoteproc/remoteproc_core.c b/drivers/remoteproc/remoteproc_core.c index 8ea7bccc7100..66324ee4678f 100644 --- a/drivers/remoteproc/remoteproc_core.c +++ b/drivers/remoteproc/remoteproc_core.c | |||
@@ -247,7 +247,7 @@ rproc_load_segments(struct rproc *rproc, const u8 *elf_data, size_t len) | |||
247 | } | 247 | } |
248 | 248 | ||
249 | if (offset + filesz > len) { | 249 | if (offset + filesz > len) { |
250 | dev_err(dev, "truncated fw: need 0x%x avail 0x%x\n", | 250 | dev_err(dev, "truncated fw: need 0x%x avail 0x%zx\n", |
251 | offset + filesz, len); | 251 | offset + filesz, len); |
252 | ret = -EINVAL; | 252 | ret = -EINVAL; |
253 | break; | 253 | break; |
@@ -934,7 +934,7 @@ static void rproc_resource_cleanup(struct rproc *rproc) | |||
934 | unmapped = iommu_unmap(rproc->domain, entry->da, entry->len); | 934 | unmapped = iommu_unmap(rproc->domain, entry->da, entry->len); |
935 | if (unmapped != entry->len) { | 935 | if (unmapped != entry->len) { |
936 | /* nothing much to do besides complaining */ | 936 | /* nothing much to do besides complaining */ |
937 | dev_err(dev, "failed to unmap %u/%u\n", entry->len, | 937 | dev_err(dev, "failed to unmap %u/%zu\n", entry->len, |
938 | unmapped); | 938 | unmapped); |
939 | } | 939 | } |
940 | 940 | ||
@@ -1020,7 +1020,7 @@ static int rproc_fw_boot(struct rproc *rproc, const struct firmware *fw) | |||
1020 | 1020 | ||
1021 | ehdr = (struct elf32_hdr *)fw->data; | 1021 | ehdr = (struct elf32_hdr *)fw->data; |
1022 | 1022 | ||
1023 | dev_info(dev, "Booting fw image %s, size %d\n", name, fw->size); | 1023 | dev_info(dev, "Booting fw image %s, size %zd\n", name, fw->size); |
1024 | 1024 | ||
1025 | /* | 1025 | /* |
1026 | * if enabling an IOMMU isn't relevant for this rproc, this is | 1026 | * if enabling an IOMMU isn't relevant for this rproc, this is |
@@ -1041,8 +1041,10 @@ static int rproc_fw_boot(struct rproc *rproc, const struct firmware *fw) | |||
1041 | 1041 | ||
1042 | /* look for the resource table */ | 1042 | /* look for the resource table */ |
1043 | table = rproc_find_rsc_table(rproc, fw->data, fw->size, &tablesz); | 1043 | table = rproc_find_rsc_table(rproc, fw->data, fw->size, &tablesz); |
1044 | if (!table) | 1044 | if (!table) { |
1045 | ret = -EINVAL; | ||
1045 | goto clean_up; | 1046 | goto clean_up; |
1047 | } | ||
1046 | 1048 | ||
1047 | /* handle fw resources which are required to boot rproc */ | 1049 | /* handle fw resources which are required to boot rproc */ |
1048 | ret = rproc_handle_boot_rsc(rproc, table, tablesz); | 1050 | ret = rproc_handle_boot_rsc(rproc, table, tablesz); |
diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c index 7d5f56edb8ef..4267789ca995 100644 --- a/drivers/rtc/rtc-cmos.c +++ b/drivers/rtc/rtc-cmos.c | |||
@@ -910,14 +910,17 @@ static inline int cmos_poweroff(struct device *dev) | |||
910 | 910 | ||
911 | static u32 rtc_handler(void *context) | 911 | static u32 rtc_handler(void *context) |
912 | { | 912 | { |
913 | struct device *dev = context; | ||
914 | |||
915 | pm_wakeup_event(dev, 0); | ||
913 | acpi_clear_event(ACPI_EVENT_RTC); | 916 | acpi_clear_event(ACPI_EVENT_RTC); |
914 | acpi_disable_event(ACPI_EVENT_RTC, 0); | 917 | acpi_disable_event(ACPI_EVENT_RTC, 0); |
915 | return ACPI_INTERRUPT_HANDLED; | 918 | return ACPI_INTERRUPT_HANDLED; |
916 | } | 919 | } |
917 | 920 | ||
918 | static inline void rtc_wake_setup(void) | 921 | static inline void rtc_wake_setup(struct device *dev) |
919 | { | 922 | { |
920 | acpi_install_fixed_event_handler(ACPI_EVENT_RTC, rtc_handler, NULL); | 923 | acpi_install_fixed_event_handler(ACPI_EVENT_RTC, rtc_handler, dev); |
921 | /* | 924 | /* |
922 | * After the RTC handler is installed, the Fixed_RTC event should | 925 | * After the RTC handler is installed, the Fixed_RTC event should |
923 | * be disabled. Only when the RTC alarm is set will it be enabled. | 926 | * be disabled. Only when the RTC alarm is set will it be enabled. |
@@ -950,7 +953,7 @@ cmos_wake_setup(struct device *dev) | |||
950 | if (acpi_disabled) | 953 | if (acpi_disabled) |
951 | return; | 954 | return; |
952 | 955 | ||
953 | rtc_wake_setup(); | 956 | rtc_wake_setup(dev); |
954 | acpi_rtc_info.wake_on = rtc_wake_on; | 957 | acpi_rtc_info.wake_on = rtc_wake_on; |
955 | acpi_rtc_info.wake_off = rtc_wake_off; | 958 | acpi_rtc_info.wake_off = rtc_wake_off; |
956 | 959 | ||
diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.c b/drivers/scsi/mpt2sas/mpt2sas_base.c index 6102ef2cb2d8..9d46fcbe7755 100644 --- a/drivers/scsi/mpt2sas/mpt2sas_base.c +++ b/drivers/scsi/mpt2sas/mpt2sas_base.c | |||
@@ -1792,7 +1792,7 @@ static inline void _base_writeq(__u64 b, volatile void __iomem *addr, | |||
1792 | static inline u8 | 1792 | static inline u8 |
1793 | _base_get_msix_index(struct MPT2SAS_ADAPTER *ioc) | 1793 | _base_get_msix_index(struct MPT2SAS_ADAPTER *ioc) |
1794 | { | 1794 | { |
1795 | return ioc->cpu_msix_table[smp_processor_id()]; | 1795 | return ioc->cpu_msix_table[raw_smp_processor_id()]; |
1796 | } | 1796 | } |
1797 | 1797 | ||
1798 | /** | 1798 | /** |
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c index 04f80ebf09eb..6986552b47e6 100644 --- a/drivers/scsi/qla2xxx/qla_target.c +++ b/drivers/scsi/qla2xxx/qla_target.c | |||
@@ -26,7 +26,6 @@ | |||
26 | #include <linux/module.h> | 26 | #include <linux/module.h> |
27 | #include <linux/init.h> | 27 | #include <linux/init.h> |
28 | #include <linux/types.h> | 28 | #include <linux/types.h> |
29 | #include <linux/version.h> | ||
30 | #include <linux/blkdev.h> | 29 | #include <linux/blkdev.h> |
31 | #include <linux/interrupt.h> | 30 | #include <linux/interrupt.h> |
32 | #include <linux/pci.h> | 31 | #include <linux/pci.h> |
@@ -2477,11 +2476,9 @@ static void qlt_do_ctio_completion(struct scsi_qla_host *vha, uint32_t handle, | |||
2477 | } | 2476 | } |
2478 | 2477 | ||
2479 | cmd = qlt_ctio_to_cmd(vha, handle, ctio); | 2478 | cmd = qlt_ctio_to_cmd(vha, handle, ctio); |
2480 | if (cmd == NULL) { | 2479 | if (cmd == NULL) |
2481 | if (status != CTIO_SUCCESS) | ||
2482 | qlt_term_ctio_exchange(vha, ctio, NULL, status); | ||
2483 | return; | 2480 | return; |
2484 | } | 2481 | |
2485 | se_cmd = &cmd->se_cmd; | 2482 | se_cmd = &cmd->se_cmd; |
2486 | tfo = se_cmd->se_tfo; | 2483 | tfo = se_cmd->se_tfo; |
2487 | 2484 | ||
@@ -2727,10 +2724,12 @@ static void qlt_do_work(struct work_struct *work) | |||
2727 | out_term: | 2724 | out_term: |
2728 | ql_dbg(ql_dbg_tgt_mgt, vha, 0xf020, "Terminating work cmd %p", cmd); | 2725 | ql_dbg(ql_dbg_tgt_mgt, vha, 0xf020, "Terminating work cmd %p", cmd); |
2729 | /* | 2726 | /* |
2730 | * cmd has not sent to target yet, so pass NULL as the second argument | 2727 | * cmd has not sent to target yet, so pass NULL as the second |
2728 | * argument to qlt_send_term_exchange() and free the memory here. | ||
2731 | */ | 2729 | */ |
2732 | spin_lock_irqsave(&ha->hardware_lock, flags); | 2730 | spin_lock_irqsave(&ha->hardware_lock, flags); |
2733 | qlt_send_term_exchange(vha, NULL, &cmd->atio, 1); | 2731 | qlt_send_term_exchange(vha, NULL, &cmd->atio, 1); |
2732 | kmem_cache_free(qla_tgt_cmd_cachep, cmd); | ||
2734 | spin_unlock_irqrestore(&ha->hardware_lock, flags); | 2733 | spin_unlock_irqrestore(&ha->hardware_lock, flags); |
2735 | if (sess) | 2734 | if (sess) |
2736 | ha->tgt.tgt_ops->put_sess(sess); | 2735 | ha->tgt.tgt_ops->put_sess(sess); |
diff --git a/drivers/scsi/qla2xxx/qla_target.h b/drivers/scsi/qla2xxx/qla_target.h index 9ec19bc2f0fe..9f9ef1644fd9 100644 --- a/drivers/scsi/qla2xxx/qla_target.h +++ b/drivers/scsi/qla2xxx/qla_target.h | |||
@@ -919,7 +919,6 @@ struct qla_tgt_srr_ctio { | |||
919 | #define QLA_TGT_XMIT_STATUS 2 | 919 | #define QLA_TGT_XMIT_STATUS 2 |
920 | #define QLA_TGT_XMIT_ALL (QLA_TGT_XMIT_STATUS|QLA_TGT_XMIT_DATA) | 920 | #define QLA_TGT_XMIT_ALL (QLA_TGT_XMIT_STATUS|QLA_TGT_XMIT_DATA) |
921 | 921 | ||
922 | #include <linux/version.h> | ||
923 | 922 | ||
924 | extern struct qla_tgt_data qla_target; | 923 | extern struct qla_tgt_data qla_target; |
925 | /* | 924 | /* |
diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c index 436598f57404..6e64314dbbb3 100644 --- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c +++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c | |||
@@ -137,13 +137,15 @@ static char *tcm_qla2xxx_get_fabric_name(void) | |||
137 | */ | 137 | */ |
138 | static int tcm_qla2xxx_npiv_extract_wwn(const char *ns, u64 *nm) | 138 | static int tcm_qla2xxx_npiv_extract_wwn(const char *ns, u64 *nm) |
139 | { | 139 | { |
140 | unsigned int i, j, value; | 140 | unsigned int i, j; |
141 | u8 wwn[8]; | 141 | u8 wwn[8]; |
142 | 142 | ||
143 | memset(wwn, 0, sizeof(wwn)); | 143 | memset(wwn, 0, sizeof(wwn)); |
144 | 144 | ||
145 | /* Validate and store the new name */ | 145 | /* Validate and store the new name */ |
146 | for (i = 0, j = 0; i < 16; i++) { | 146 | for (i = 0, j = 0; i < 16; i++) { |
147 | int value; | ||
148 | |||
147 | value = hex_to_bin(*ns++); | 149 | value = hex_to_bin(*ns++); |
148 | if (value >= 0) | 150 | if (value >= 0) |
149 | j = (j << 4) | value; | 151 | j = (j << 4) | value; |
@@ -652,8 +654,8 @@ static int tcm_qla2xxx_handle_data(struct qla_tgt_cmd *cmd) | |||
652 | /* | 654 | /* |
653 | * Called from qla_target.c:qlt_issue_task_mgmt() | 655 | * Called from qla_target.c:qlt_issue_task_mgmt() |
654 | */ | 656 | */ |
655 | int tcm_qla2xxx_handle_tmr(struct qla_tgt_mgmt_cmd *mcmd, uint32_t lun, | 657 | static int tcm_qla2xxx_handle_tmr(struct qla_tgt_mgmt_cmd *mcmd, uint32_t lun, |
656 | uint8_t tmr_func, uint32_t tag) | 658 | uint8_t tmr_func, uint32_t tag) |
657 | { | 659 | { |
658 | struct qla_tgt_sess *sess = mcmd->sess; | 660 | struct qla_tgt_sess *sess = mcmd->sess; |
659 | struct se_cmd *se_cmd = &mcmd->se_cmd; | 661 | struct se_cmd *se_cmd = &mcmd->se_cmd; |
@@ -762,65 +764,8 @@ static u16 tcm_qla2xxx_set_fabric_sense_len(struct se_cmd *se_cmd, | |||
762 | struct target_fabric_configfs *tcm_qla2xxx_fabric_configfs; | 764 | struct target_fabric_configfs *tcm_qla2xxx_fabric_configfs; |
763 | struct target_fabric_configfs *tcm_qla2xxx_npiv_fabric_configfs; | 765 | struct target_fabric_configfs *tcm_qla2xxx_npiv_fabric_configfs; |
764 | 766 | ||
765 | static int tcm_qla2xxx_setup_nacl_from_rport( | 767 | static void tcm_qla2xxx_clear_sess_lookup(struct tcm_qla2xxx_lport *, |
766 | struct se_portal_group *se_tpg, | 768 | struct tcm_qla2xxx_nacl *, struct qla_tgt_sess *); |
767 | struct se_node_acl *se_nacl, | ||
768 | struct tcm_qla2xxx_lport *lport, | ||
769 | struct tcm_qla2xxx_nacl *nacl, | ||
770 | u64 rport_wwnn) | ||
771 | { | ||
772 | struct scsi_qla_host *vha = lport->qla_vha; | ||
773 | struct Scsi_Host *sh = vha->host; | ||
774 | struct fc_host_attrs *fc_host = shost_to_fc_host(sh); | ||
775 | struct fc_rport *rport; | ||
776 | unsigned long flags; | ||
777 | void *node; | ||
778 | int rc; | ||
779 | |||
780 | /* | ||
781 | * Scan the existing rports, and create a session for the | ||
782 | * explict NodeACL is an matching rport->node_name already | ||
783 | * exists. | ||
784 | */ | ||
785 | spin_lock_irqsave(sh->host_lock, flags); | ||
786 | list_for_each_entry(rport, &fc_host->rports, peers) { | ||
787 | if (rport_wwnn != rport->node_name) | ||
788 | continue; | ||
789 | |||
790 | pr_debug("Located existing rport_wwpn and rport->node_name: 0x%016LX, port_id: 0x%04x\n", | ||
791 | rport->node_name, rport->port_id); | ||
792 | nacl->nport_id = rport->port_id; | ||
793 | |||
794 | spin_unlock_irqrestore(sh->host_lock, flags); | ||
795 | |||
796 | spin_lock_irqsave(&vha->hw->hardware_lock, flags); | ||
797 | node = btree_lookup32(&lport->lport_fcport_map, rport->port_id); | ||
798 | if (node) { | ||
799 | rc = btree_update32(&lport->lport_fcport_map, | ||
800 | rport->port_id, se_nacl); | ||
801 | } else { | ||
802 | rc = btree_insert32(&lport->lport_fcport_map, | ||
803 | rport->port_id, se_nacl, | ||
804 | GFP_ATOMIC); | ||
805 | } | ||
806 | spin_unlock_irqrestore(&vha->hw->hardware_lock, flags); | ||
807 | |||
808 | if (rc) { | ||
809 | pr_err("Unable to insert se_nacl into fcport_map"); | ||
810 | WARN_ON(rc > 0); | ||
811 | return rc; | ||
812 | } | ||
813 | |||
814 | pr_debug("Inserted into fcport_map: %p for WWNN: 0x%016LX, port_id: 0x%08x\n", | ||
815 | se_nacl, rport_wwnn, nacl->nport_id); | ||
816 | |||
817 | return 1; | ||
818 | } | ||
819 | spin_unlock_irqrestore(sh->host_lock, flags); | ||
820 | |||
821 | return 0; | ||
822 | } | ||
823 | |||
824 | /* | 769 | /* |
825 | * Expected to be called with struct qla_hw_data->hardware_lock held | 770 | * Expected to be called with struct qla_hw_data->hardware_lock held |
826 | */ | 771 | */ |
@@ -842,11 +787,40 @@ static void tcm_qla2xxx_clear_nacl_from_fcport_map(struct qla_tgt_sess *sess) | |||
842 | 787 | ||
843 | pr_debug("Removed from fcport_map: %p for WWNN: 0x%016LX, port_id: 0x%06x\n", | 788 | pr_debug("Removed from fcport_map: %p for WWNN: 0x%016LX, port_id: 0x%06x\n", |
844 | se_nacl, nacl->nport_wwnn, nacl->nport_id); | 789 | se_nacl, nacl->nport_wwnn, nacl->nport_id); |
790 | /* | ||
791 | * Now clear the se_nacl and session pointers from our HW lport lookup | ||
792 | * table mapping for this initiator's fabric S_ID and LOOP_ID entries. | ||
793 | * | ||
794 | * This is done ahead of callbacks into tcm_qla2xxx_free_session() -> | ||
795 | * target_wait_for_sess_cmds() before the session waits for outstanding | ||
796 | * I/O to complete, to avoid a race between session shutdown execution | ||
797 | * and incoming ATIOs or TMRs picking up a stale se_node_act reference. | ||
798 | */ | ||
799 | tcm_qla2xxx_clear_sess_lookup(lport, nacl, sess); | ||
800 | } | ||
801 | |||
802 | static void tcm_qla2xxx_release_session(struct kref *kref) | ||
803 | { | ||
804 | struct se_session *se_sess = container_of(kref, | ||
805 | struct se_session, sess_kref); | ||
806 | |||
807 | qlt_unreg_sess(se_sess->fabric_sess_ptr); | ||
808 | } | ||
809 | |||
810 | static void tcm_qla2xxx_put_session(struct se_session *se_sess) | ||
811 | { | ||
812 | struct qla_tgt_sess *sess = se_sess->fabric_sess_ptr; | ||
813 | struct qla_hw_data *ha = sess->vha->hw; | ||
814 | unsigned long flags; | ||
815 | |||
816 | spin_lock_irqsave(&ha->hardware_lock, flags); | ||
817 | kref_put(&se_sess->sess_kref, tcm_qla2xxx_release_session); | ||
818 | spin_unlock_irqrestore(&ha->hardware_lock, flags); | ||
845 | } | 819 | } |
846 | 820 | ||
847 | static void tcm_qla2xxx_put_sess(struct qla_tgt_sess *sess) | 821 | static void tcm_qla2xxx_put_sess(struct qla_tgt_sess *sess) |
848 | { | 822 | { |
849 | target_put_session(sess->se_sess); | 823 | tcm_qla2xxx_put_session(sess->se_sess); |
850 | } | 824 | } |
851 | 825 | ||
852 | static void tcm_qla2xxx_shutdown_sess(struct qla_tgt_sess *sess) | 826 | static void tcm_qla2xxx_shutdown_sess(struct qla_tgt_sess *sess) |
@@ -859,14 +833,10 @@ static struct se_node_acl *tcm_qla2xxx_make_nodeacl( | |||
859 | struct config_group *group, | 833 | struct config_group *group, |
860 | const char *name) | 834 | const char *name) |
861 | { | 835 | { |
862 | struct se_wwn *se_wwn = se_tpg->se_tpg_wwn; | ||
863 | struct tcm_qla2xxx_lport *lport = container_of(se_wwn, | ||
864 | struct tcm_qla2xxx_lport, lport_wwn); | ||
865 | struct se_node_acl *se_nacl, *se_nacl_new; | 836 | struct se_node_acl *se_nacl, *se_nacl_new; |
866 | struct tcm_qla2xxx_nacl *nacl; | 837 | struct tcm_qla2xxx_nacl *nacl; |
867 | u64 wwnn; | 838 | u64 wwnn; |
868 | u32 qla2xxx_nexus_depth; | 839 | u32 qla2xxx_nexus_depth; |
869 | int rc; | ||
870 | 840 | ||
871 | if (tcm_qla2xxx_parse_wwn(name, &wwnn, 1) < 0) | 841 | if (tcm_qla2xxx_parse_wwn(name, &wwnn, 1) < 0) |
872 | return ERR_PTR(-EINVAL); | 842 | return ERR_PTR(-EINVAL); |
@@ -893,16 +863,6 @@ static struct se_node_acl *tcm_qla2xxx_make_nodeacl( | |||
893 | nacl = container_of(se_nacl, struct tcm_qla2xxx_nacl, se_node_acl); | 863 | nacl = container_of(se_nacl, struct tcm_qla2xxx_nacl, se_node_acl); |
894 | nacl->nport_wwnn = wwnn; | 864 | nacl->nport_wwnn = wwnn; |
895 | tcm_qla2xxx_format_wwn(&nacl->nport_name[0], TCM_QLA2XXX_NAMELEN, wwnn); | 865 | tcm_qla2xxx_format_wwn(&nacl->nport_name[0], TCM_QLA2XXX_NAMELEN, wwnn); |
896 | /* | ||
897 | * Setup a se_nacl handle based on an a matching struct fc_rport setup | ||
898 | * via drivers/scsi/qla2xxx/qla_init.c:qla2x00_reg_remote_port() | ||
899 | */ | ||
900 | rc = tcm_qla2xxx_setup_nacl_from_rport(se_tpg, se_nacl, lport, | ||
901 | nacl, wwnn); | ||
902 | if (rc < 0) { | ||
903 | tcm_qla2xxx_release_fabric_acl(se_tpg, se_nacl_new); | ||
904 | return ERR_PTR(rc); | ||
905 | } | ||
906 | 866 | ||
907 | return se_nacl; | 867 | return se_nacl; |
908 | } | 868 | } |
@@ -1390,6 +1350,25 @@ static void tcm_qla2xxx_set_sess_by_loop_id( | |||
1390 | nacl->qla_tgt_sess, new_se_nacl, new_se_nacl->initiatorname); | 1350 | nacl->qla_tgt_sess, new_se_nacl, new_se_nacl->initiatorname); |
1391 | } | 1351 | } |
1392 | 1352 | ||
1353 | /* | ||
1354 | * Should always be called with qla_hw_data->hardware_lock held. | ||
1355 | */ | ||
1356 | static void tcm_qla2xxx_clear_sess_lookup(struct tcm_qla2xxx_lport *lport, | ||
1357 | struct tcm_qla2xxx_nacl *nacl, struct qla_tgt_sess *sess) | ||
1358 | { | ||
1359 | struct se_session *se_sess = sess->se_sess; | ||
1360 | unsigned char be_sid[3]; | ||
1361 | |||
1362 | be_sid[0] = sess->s_id.b.domain; | ||
1363 | be_sid[1] = sess->s_id.b.area; | ||
1364 | be_sid[2] = sess->s_id.b.al_pa; | ||
1365 | |||
1366 | tcm_qla2xxx_set_sess_by_s_id(lport, NULL, nacl, se_sess, | ||
1367 | sess, be_sid); | ||
1368 | tcm_qla2xxx_set_sess_by_loop_id(lport, NULL, nacl, se_sess, | ||
1369 | sess, sess->loop_id); | ||
1370 | } | ||
1371 | |||
1393 | static void tcm_qla2xxx_free_session(struct qla_tgt_sess *sess) | 1372 | static void tcm_qla2xxx_free_session(struct qla_tgt_sess *sess) |
1394 | { | 1373 | { |
1395 | struct qla_tgt *tgt = sess->tgt; | 1374 | struct qla_tgt *tgt = sess->tgt; |
@@ -1398,8 +1377,6 @@ static void tcm_qla2xxx_free_session(struct qla_tgt_sess *sess) | |||
1398 | struct se_node_acl *se_nacl; | 1377 | struct se_node_acl *se_nacl; |
1399 | struct tcm_qla2xxx_lport *lport; | 1378 | struct tcm_qla2xxx_lport *lport; |
1400 | struct tcm_qla2xxx_nacl *nacl; | 1379 | struct tcm_qla2xxx_nacl *nacl; |
1401 | unsigned char be_sid[3]; | ||
1402 | unsigned long flags; | ||
1403 | 1380 | ||
1404 | BUG_ON(in_interrupt()); | 1381 | BUG_ON(in_interrupt()); |
1405 | 1382 | ||
@@ -1419,21 +1396,6 @@ static void tcm_qla2xxx_free_session(struct qla_tgt_sess *sess) | |||
1419 | return; | 1396 | return; |
1420 | } | 1397 | } |
1421 | target_wait_for_sess_cmds(se_sess, 0); | 1398 | target_wait_for_sess_cmds(se_sess, 0); |
1422 | /* | ||
1423 | * And now clear the se_nacl and session pointers from our HW lport | ||
1424 | * mappings for fabric S_ID and LOOP_ID. | ||
1425 | */ | ||
1426 | memset(&be_sid, 0, 3); | ||
1427 | be_sid[0] = sess->s_id.b.domain; | ||
1428 | be_sid[1] = sess->s_id.b.area; | ||
1429 | be_sid[2] = sess->s_id.b.al_pa; | ||
1430 | |||
1431 | spin_lock_irqsave(&ha->hardware_lock, flags); | ||
1432 | tcm_qla2xxx_set_sess_by_s_id(lport, NULL, nacl, se_sess, | ||
1433 | sess, be_sid); | ||
1434 | tcm_qla2xxx_set_sess_by_loop_id(lport, NULL, nacl, se_sess, | ||
1435 | sess, sess->loop_id); | ||
1436 | spin_unlock_irqrestore(&ha->hardware_lock, flags); | ||
1437 | 1399 | ||
1438 | transport_deregister_session_configfs(sess->se_sess); | 1400 | transport_deregister_session_configfs(sess->se_sess); |
1439 | transport_deregister_session(sess->se_sess); | 1401 | transport_deregister_session(sess->se_sess); |
@@ -1731,6 +1693,7 @@ static struct target_core_fabric_ops tcm_qla2xxx_ops = { | |||
1731 | .new_cmd_map = NULL, | 1693 | .new_cmd_map = NULL, |
1732 | .check_stop_free = tcm_qla2xxx_check_stop_free, | 1694 | .check_stop_free = tcm_qla2xxx_check_stop_free, |
1733 | .release_cmd = tcm_qla2xxx_release_cmd, | 1695 | .release_cmd = tcm_qla2xxx_release_cmd, |
1696 | .put_session = tcm_qla2xxx_put_session, | ||
1734 | .shutdown_session = tcm_qla2xxx_shutdown_session, | 1697 | .shutdown_session = tcm_qla2xxx_shutdown_session, |
1735 | .close_session = tcm_qla2xxx_close_session, | 1698 | .close_session = tcm_qla2xxx_close_session, |
1736 | .sess_get_index = tcm_qla2xxx_sess_get_index, | 1699 | .sess_get_index = tcm_qla2xxx_sess_get_index, |
@@ -1779,6 +1742,7 @@ static struct target_core_fabric_ops tcm_qla2xxx_npiv_ops = { | |||
1779 | .tpg_release_fabric_acl = tcm_qla2xxx_release_fabric_acl, | 1742 | .tpg_release_fabric_acl = tcm_qla2xxx_release_fabric_acl, |
1780 | .tpg_get_inst_index = tcm_qla2xxx_tpg_get_inst_index, | 1743 | .tpg_get_inst_index = tcm_qla2xxx_tpg_get_inst_index, |
1781 | .release_cmd = tcm_qla2xxx_release_cmd, | 1744 | .release_cmd = tcm_qla2xxx_release_cmd, |
1745 | .put_session = tcm_qla2xxx_put_session, | ||
1782 | .shutdown_session = tcm_qla2xxx_shutdown_session, | 1746 | .shutdown_session = tcm_qla2xxx_shutdown_session, |
1783 | .close_session = tcm_qla2xxx_close_session, | 1747 | .close_session = tcm_qla2xxx_close_session, |
1784 | .sess_get_index = tcm_qla2xxx_sess_get_index, | 1748 | .sess_get_index = tcm_qla2xxx_sess_get_index, |
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c index 61c82a345f82..bbbc9c918d4c 100644 --- a/drivers/scsi/scsi.c +++ b/drivers/scsi/scsi.c | |||
@@ -90,11 +90,9 @@ unsigned int scsi_logging_level; | |||
90 | EXPORT_SYMBOL(scsi_logging_level); | 90 | EXPORT_SYMBOL(scsi_logging_level); |
91 | #endif | 91 | #endif |
92 | 92 | ||
93 | #if IS_ENABLED(CONFIG_PM) || IS_ENABLED(CONFIG_BLK_DEV_SD) | 93 | /* sd, scsi core and power management need to coordinate flushing async actions */ |
94 | /* sd and scsi_pm need to coordinate flushing async actions */ | ||
95 | LIST_HEAD(scsi_sd_probe_domain); | 94 | LIST_HEAD(scsi_sd_probe_domain); |
96 | EXPORT_SYMBOL(scsi_sd_probe_domain); | 95 | EXPORT_SYMBOL(scsi_sd_probe_domain); |
97 | #endif | ||
98 | 96 | ||
99 | /* NB: These are exposed through /proc/scsi/scsi and form part of the ABI. | 97 | /* NB: These are exposed through /proc/scsi/scsi and form part of the ABI. |
100 | * You may not alter any existing entry (although adding new ones is | 98 | * You may not alter any existing entry (although adding new ones is |
diff --git a/drivers/spi/spi-omap2-mcspi.c b/drivers/spi/spi-omap2-mcspi.c index 46ef5fe51db5..0c73dd4f43a0 100644 --- a/drivers/spi/spi-omap2-mcspi.c +++ b/drivers/spi/spi-omap2-mcspi.c | |||
@@ -801,7 +801,7 @@ static int omap2_mcspi_setup(struct spi_device *spi) | |||
801 | mcspi_dma = &mcspi->dma_channels[spi->chip_select]; | 801 | mcspi_dma = &mcspi->dma_channels[spi->chip_select]; |
802 | 802 | ||
803 | if (!cs) { | 803 | if (!cs) { |
804 | cs = devm_kzalloc(&spi->dev , sizeof *cs, GFP_KERNEL); | 804 | cs = kzalloc(sizeof *cs, GFP_KERNEL); |
805 | if (!cs) | 805 | if (!cs) |
806 | return -ENOMEM; | 806 | return -ENOMEM; |
807 | cs->base = mcspi->base + spi->chip_select * 0x14; | 807 | cs->base = mcspi->base + spi->chip_select * 0x14; |
@@ -842,6 +842,7 @@ static void omap2_mcspi_cleanup(struct spi_device *spi) | |||
842 | cs = spi->controller_state; | 842 | cs = spi->controller_state; |
843 | list_del(&cs->node); | 843 | list_del(&cs->node); |
844 | 844 | ||
845 | kfree(cs); | ||
845 | } | 846 | } |
846 | 847 | ||
847 | if (spi->chip_select < spi->master->num_chipselect) { | 848 | if (spi->chip_select < spi->master->num_chipselect) { |
diff --git a/drivers/staging/gdm72xx/netlink_k.c b/drivers/staging/gdm72xx/netlink_k.c index 1c74b0875515..9fa432d74364 100644 --- a/drivers/staging/gdm72xx/netlink_k.c +++ b/drivers/staging/gdm72xx/netlink_k.c | |||
@@ -103,7 +103,7 @@ struct sock *netlink_init(int unit, void (*cb)(struct net_device *dev, u16 type, | |||
103 | 103 | ||
104 | void netlink_exit(struct sock *sock) | 104 | void netlink_exit(struct sock *sock) |
105 | { | 105 | { |
106 | sock_release(sock->sk_socket); | 106 | netlink_kernel_release(sock); |
107 | } | 107 | } |
108 | 108 | ||
109 | int netlink_send(struct sock *sock, int group, u16 type, void *msg, int len) | 109 | int netlink_send(struct sock *sock, int group, u16 type, void *msg, int len) |
diff --git a/drivers/staging/ramster/zcache-main.c b/drivers/staging/ramster/zcache-main.c index 4e7ef0e6b79c..d46764b5aaba 100644 --- a/drivers/staging/ramster/zcache-main.c +++ b/drivers/staging/ramster/zcache-main.c | |||
@@ -3002,7 +3002,7 @@ static inline struct tmem_oid oswiz(unsigned type, u32 ind) | |||
3002 | return oid; | 3002 | return oid; |
3003 | } | 3003 | } |
3004 | 3004 | ||
3005 | static int zcache_frontswap_put_page(unsigned type, pgoff_t offset, | 3005 | static int zcache_frontswap_store(unsigned type, pgoff_t offset, |
3006 | struct page *page) | 3006 | struct page *page) |
3007 | { | 3007 | { |
3008 | u64 ind64 = (u64)offset; | 3008 | u64 ind64 = (u64)offset; |
@@ -3025,7 +3025,7 @@ static int zcache_frontswap_put_page(unsigned type, pgoff_t offset, | |||
3025 | 3025 | ||
3026 | /* returns 0 if the page was successfully gotten from frontswap, -1 if | 3026 | /* returns 0 if the page was successfully gotten from frontswap, -1 if |
3027 | * was not present (should never happen!) */ | 3027 | * was not present (should never happen!) */ |
3028 | static int zcache_frontswap_get_page(unsigned type, pgoff_t offset, | 3028 | static int zcache_frontswap_load(unsigned type, pgoff_t offset, |
3029 | struct page *page) | 3029 | struct page *page) |
3030 | { | 3030 | { |
3031 | u64 ind64 = (u64)offset; | 3031 | u64 ind64 = (u64)offset; |
@@ -3080,8 +3080,8 @@ static void zcache_frontswap_init(unsigned ignored) | |||
3080 | } | 3080 | } |
3081 | 3081 | ||
3082 | static struct frontswap_ops zcache_frontswap_ops = { | 3082 | static struct frontswap_ops zcache_frontswap_ops = { |
3083 | .put_page = zcache_frontswap_put_page, | 3083 | .store = zcache_frontswap_store, |
3084 | .get_page = zcache_frontswap_get_page, | 3084 | .load = zcache_frontswap_load, |
3085 | .invalidate_page = zcache_frontswap_flush_page, | 3085 | .invalidate_page = zcache_frontswap_flush_page, |
3086 | .invalidate_area = zcache_frontswap_flush_area, | 3086 | .invalidate_area = zcache_frontswap_flush_area, |
3087 | .init = zcache_frontswap_init | 3087 | .init = zcache_frontswap_init |
diff --git a/drivers/staging/rtl8712/usb_intf.c b/drivers/staging/rtl8712/usb_intf.c index f9c32cae64e7..c758c40e0c85 100644 --- a/drivers/staging/rtl8712/usb_intf.c +++ b/drivers/staging/rtl8712/usb_intf.c | |||
@@ -100,6 +100,8 @@ static struct usb_device_id rtl871x_usb_id_tbl[] = { | |||
100 | /* - */ | 100 | /* - */ |
101 | {USB_DEVICE(0x20F4, 0x646B)}, | 101 | {USB_DEVICE(0x20F4, 0x646B)}, |
102 | {USB_DEVICE(0x083A, 0xC512)}, | 102 | {USB_DEVICE(0x083A, 0xC512)}, |
103 | {USB_DEVICE(0x25D4, 0x4CA1)}, | ||
104 | {USB_DEVICE(0x25D4, 0x4CAB)}, | ||
103 | 105 | ||
104 | /* RTL8191SU */ | 106 | /* RTL8191SU */ |
105 | /* Realtek */ | 107 | /* Realtek */ |
diff --git a/drivers/staging/zcache/zcache-main.c b/drivers/staging/zcache/zcache-main.c index 174861f93b42..c9e08bbeb519 100644 --- a/drivers/staging/zcache/zcache-main.c +++ b/drivers/staging/zcache/zcache-main.c | |||
@@ -1839,7 +1839,7 @@ static int zcache_frontswap_poolid = -1; | |||
1839 | * Swizzling increases objects per swaptype, increasing tmem concurrency | 1839 | * Swizzling increases objects per swaptype, increasing tmem concurrency |
1840 | * for heavy swaploads. Later, larger nr_cpus -> larger SWIZ_BITS | 1840 | * for heavy swaploads. Later, larger nr_cpus -> larger SWIZ_BITS |
1841 | * Setting SWIZ_BITS to 27 basically reconstructs the swap entry from | 1841 | * Setting SWIZ_BITS to 27 basically reconstructs the swap entry from |
1842 | * frontswap_get_page(), but has side-effects. Hence using 8. | 1842 | * frontswap_load(), but has side-effects. Hence using 8. |
1843 | */ | 1843 | */ |
1844 | #define SWIZ_BITS 8 | 1844 | #define SWIZ_BITS 8 |
1845 | #define SWIZ_MASK ((1 << SWIZ_BITS) - 1) | 1845 | #define SWIZ_MASK ((1 << SWIZ_BITS) - 1) |
@@ -1853,7 +1853,7 @@ static inline struct tmem_oid oswiz(unsigned type, u32 ind) | |||
1853 | return oid; | 1853 | return oid; |
1854 | } | 1854 | } |
1855 | 1855 | ||
1856 | static int zcache_frontswap_put_page(unsigned type, pgoff_t offset, | 1856 | static int zcache_frontswap_store(unsigned type, pgoff_t offset, |
1857 | struct page *page) | 1857 | struct page *page) |
1858 | { | 1858 | { |
1859 | u64 ind64 = (u64)offset; | 1859 | u64 ind64 = (u64)offset; |
@@ -1874,7 +1874,7 @@ static int zcache_frontswap_put_page(unsigned type, pgoff_t offset, | |||
1874 | 1874 | ||
1875 | /* returns 0 if the page was successfully gotten from frontswap, -1 if | 1875 | /* returns 0 if the page was successfully gotten from frontswap, -1 if |
1876 | * was not present (should never happen!) */ | 1876 | * was not present (should never happen!) */ |
1877 | static int zcache_frontswap_get_page(unsigned type, pgoff_t offset, | 1877 | static int zcache_frontswap_load(unsigned type, pgoff_t offset, |
1878 | struct page *page) | 1878 | struct page *page) |
1879 | { | 1879 | { |
1880 | u64 ind64 = (u64)offset; | 1880 | u64 ind64 = (u64)offset; |
@@ -1923,8 +1923,8 @@ static void zcache_frontswap_init(unsigned ignored) | |||
1923 | } | 1923 | } |
1924 | 1924 | ||
1925 | static struct frontswap_ops zcache_frontswap_ops = { | 1925 | static struct frontswap_ops zcache_frontswap_ops = { |
1926 | .put_page = zcache_frontswap_put_page, | 1926 | .store = zcache_frontswap_store, |
1927 | .get_page = zcache_frontswap_get_page, | 1927 | .load = zcache_frontswap_load, |
1928 | .invalidate_page = zcache_frontswap_flush_page, | 1928 | .invalidate_page = zcache_frontswap_flush_page, |
1929 | .invalidate_area = zcache_frontswap_flush_area, | 1929 | .invalidate_area = zcache_frontswap_flush_area, |
1930 | .init = zcache_frontswap_init | 1930 | .init = zcache_frontswap_init |
diff --git a/drivers/target/sbp/sbp_target.c b/drivers/target/sbp/sbp_target.c index 37c609898f84..7e6136e2ce81 100644 --- a/drivers/target/sbp/sbp_target.c +++ b/drivers/target/sbp/sbp_target.c | |||
@@ -587,14 +587,14 @@ static void sbp_management_request_logout( | |||
587 | { | 587 | { |
588 | struct sbp_tport *tport = agent->tport; | 588 | struct sbp_tport *tport = agent->tport; |
589 | struct sbp_tpg *tpg = tport->tpg; | 589 | struct sbp_tpg *tpg = tport->tpg; |
590 | int login_id; | 590 | int id; |
591 | struct sbp_login_descriptor *login; | 591 | struct sbp_login_descriptor *login; |
592 | 592 | ||
593 | login_id = LOGOUT_ORB_LOGIN_ID(be32_to_cpu(req->orb.misc)); | 593 | id = LOGOUT_ORB_LOGIN_ID(be32_to_cpu(req->orb.misc)); |
594 | 594 | ||
595 | login = sbp_login_find_by_id(tpg, login_id); | 595 | login = sbp_login_find_by_id(tpg, id); |
596 | if (!login) { | 596 | if (!login) { |
597 | pr_warn("cannot find login: %d\n", login_id); | 597 | pr_warn("cannot find login: %d\n", id); |
598 | 598 | ||
599 | req->status.status = cpu_to_be32( | 599 | req->status.status = cpu_to_be32( |
600 | STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) | | 600 | STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) | |
diff --git a/drivers/target/target_core_alua.c b/drivers/target/target_core_alua.c index e624b836469c..91799973081a 100644 --- a/drivers/target/target_core_alua.c +++ b/drivers/target/target_core_alua.c | |||
@@ -374,8 +374,9 @@ int target_emulate_set_target_port_groups(struct se_cmd *cmd) | |||
374 | 374 | ||
375 | out: | 375 | out: |
376 | transport_kunmap_data_sg(cmd); | 376 | transport_kunmap_data_sg(cmd); |
377 | target_complete_cmd(cmd, GOOD); | 377 | if (!rc) |
378 | return 0; | 378 | target_complete_cmd(cmd, GOOD); |
379 | return rc; | ||
379 | } | 380 | } |
380 | 381 | ||
381 | static inline int core_alua_state_nonoptimized( | 382 | static inline int core_alua_state_nonoptimized( |
diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c index 686dba189f8e..9f99d0404908 100644 --- a/drivers/target/target_core_file.c +++ b/drivers/target/target_core_file.c | |||
@@ -133,16 +133,11 @@ static struct se_device *fd_create_virtdevice( | |||
133 | ret = PTR_ERR(dev_p); | 133 | ret = PTR_ERR(dev_p); |
134 | goto fail; | 134 | goto fail; |
135 | } | 135 | } |
136 | |||
137 | /* O_DIRECT too? */ | ||
138 | flags = O_RDWR | O_CREAT | O_LARGEFILE; | ||
139 | |||
140 | /* | 136 | /* |
141 | * If fd_buffered_io=1 has not been set explicitly (the default), | 137 | * Use O_DSYNC by default instead of O_SYNC to forgo syncing |
142 | * use O_SYNC to force FILEIO writes to disk. | 138 | * of pure timestamp updates. |
143 | */ | 139 | */ |
144 | if (!(fd_dev->fbd_flags & FDBD_USE_BUFFERED_IO)) | 140 | flags = O_RDWR | O_CREAT | O_LARGEFILE | O_DSYNC; |
145 | flags |= O_SYNC; | ||
146 | 141 | ||
147 | file = filp_open(dev_p, flags, 0600); | 142 | file = filp_open(dev_p, flags, 0600); |
148 | if (IS_ERR(file)) { | 143 | if (IS_ERR(file)) { |
@@ -380,23 +375,6 @@ static void fd_emulate_sync_cache(struct se_cmd *cmd) | |||
380 | } | 375 | } |
381 | } | 376 | } |
382 | 377 | ||
383 | static void fd_emulate_write_fua(struct se_cmd *cmd) | ||
384 | { | ||
385 | struct se_device *dev = cmd->se_dev; | ||
386 | struct fd_dev *fd_dev = dev->dev_ptr; | ||
387 | loff_t start = cmd->t_task_lba * | ||
388 | dev->se_sub_dev->se_dev_attrib.block_size; | ||
389 | loff_t end = start + cmd->data_length; | ||
390 | int ret; | ||
391 | |||
392 | pr_debug("FILEIO: FUA WRITE LBA: %llu, bytes: %u\n", | ||
393 | cmd->t_task_lba, cmd->data_length); | ||
394 | |||
395 | ret = vfs_fsync_range(fd_dev->fd_file, start, end, 1); | ||
396 | if (ret != 0) | ||
397 | pr_err("FILEIO: vfs_fsync_range() failed: %d\n", ret); | ||
398 | } | ||
399 | |||
400 | static int fd_execute_cmd(struct se_cmd *cmd, struct scatterlist *sgl, | 378 | static int fd_execute_cmd(struct se_cmd *cmd, struct scatterlist *sgl, |
401 | u32 sgl_nents, enum dma_data_direction data_direction) | 379 | u32 sgl_nents, enum dma_data_direction data_direction) |
402 | { | 380 | { |
@@ -411,19 +389,21 @@ static int fd_execute_cmd(struct se_cmd *cmd, struct scatterlist *sgl, | |||
411 | ret = fd_do_readv(cmd, sgl, sgl_nents); | 389 | ret = fd_do_readv(cmd, sgl, sgl_nents); |
412 | } else { | 390 | } else { |
413 | ret = fd_do_writev(cmd, sgl, sgl_nents); | 391 | ret = fd_do_writev(cmd, sgl, sgl_nents); |
414 | 392 | /* | |
393 | * Perform implict vfs_fsync_range() for fd_do_writev() ops | ||
394 | * for SCSI WRITEs with Forced Unit Access (FUA) set. | ||
395 | * Allow this to happen independent of WCE=0 setting. | ||
396 | */ | ||
415 | if (ret > 0 && | 397 | if (ret > 0 && |
416 | dev->se_sub_dev->se_dev_attrib.emulate_write_cache > 0 && | ||
417 | dev->se_sub_dev->se_dev_attrib.emulate_fua_write > 0 && | 398 | dev->se_sub_dev->se_dev_attrib.emulate_fua_write > 0 && |
418 | (cmd->se_cmd_flags & SCF_FUA)) { | 399 | (cmd->se_cmd_flags & SCF_FUA)) { |
419 | /* | 400 | struct fd_dev *fd_dev = dev->dev_ptr; |
420 | * We might need to be a bit smarter here | 401 | loff_t start = cmd->t_task_lba * |
421 | * and return some sense data to let the initiator | 402 | dev->se_sub_dev->se_dev_attrib.block_size; |
422 | * know the FUA WRITE cache sync failed..? | 403 | loff_t end = start + cmd->data_length; |
423 | */ | ||
424 | fd_emulate_write_fua(cmd); | ||
425 | } | ||
426 | 404 | ||
405 | vfs_fsync_range(fd_dev->fd_file, start, end, 1); | ||
406 | } | ||
427 | } | 407 | } |
428 | 408 | ||
429 | if (ret < 0) { | 409 | if (ret < 0) { |
@@ -442,7 +422,6 @@ enum { | |||
442 | static match_table_t tokens = { | 422 | static match_table_t tokens = { |
443 | {Opt_fd_dev_name, "fd_dev_name=%s"}, | 423 | {Opt_fd_dev_name, "fd_dev_name=%s"}, |
444 | {Opt_fd_dev_size, "fd_dev_size=%s"}, | 424 | {Opt_fd_dev_size, "fd_dev_size=%s"}, |
445 | {Opt_fd_buffered_io, "fd_buffered_io=%d"}, | ||
446 | {Opt_err, NULL} | 425 | {Opt_err, NULL} |
447 | }; | 426 | }; |
448 | 427 | ||
@@ -454,7 +433,7 @@ static ssize_t fd_set_configfs_dev_params( | |||
454 | struct fd_dev *fd_dev = se_dev->se_dev_su_ptr; | 433 | struct fd_dev *fd_dev = se_dev->se_dev_su_ptr; |
455 | char *orig, *ptr, *arg_p, *opts; | 434 | char *orig, *ptr, *arg_p, *opts; |
456 | substring_t args[MAX_OPT_ARGS]; | 435 | substring_t args[MAX_OPT_ARGS]; |
457 | int ret = 0, arg, token; | 436 | int ret = 0, token; |
458 | 437 | ||
459 | opts = kstrdup(page, GFP_KERNEL); | 438 | opts = kstrdup(page, GFP_KERNEL); |
460 | if (!opts) | 439 | if (!opts) |
@@ -498,19 +477,6 @@ static ssize_t fd_set_configfs_dev_params( | |||
498 | " bytes\n", fd_dev->fd_dev_size); | 477 | " bytes\n", fd_dev->fd_dev_size); |
499 | fd_dev->fbd_flags |= FBDF_HAS_SIZE; | 478 | fd_dev->fbd_flags |= FBDF_HAS_SIZE; |
500 | break; | 479 | break; |
501 | case Opt_fd_buffered_io: | ||
502 | match_int(args, &arg); | ||
503 | if (arg != 1) { | ||
504 | pr_err("bogus fd_buffered_io=%d value\n", arg); | ||
505 | ret = -EINVAL; | ||
506 | goto out; | ||
507 | } | ||
508 | |||
509 | pr_debug("FILEIO: Using buffered I/O" | ||
510 | " operations for struct fd_dev\n"); | ||
511 | |||
512 | fd_dev->fbd_flags |= FDBD_USE_BUFFERED_IO; | ||
513 | break; | ||
514 | default: | 480 | default: |
515 | break; | 481 | break; |
516 | } | 482 | } |
@@ -542,10 +508,8 @@ static ssize_t fd_show_configfs_dev_params( | |||
542 | ssize_t bl = 0; | 508 | ssize_t bl = 0; |
543 | 509 | ||
544 | bl = sprintf(b + bl, "TCM FILEIO ID: %u", fd_dev->fd_dev_id); | 510 | bl = sprintf(b + bl, "TCM FILEIO ID: %u", fd_dev->fd_dev_id); |
545 | bl += sprintf(b + bl, " File: %s Size: %llu Mode: %s\n", | 511 | bl += sprintf(b + bl, " File: %s Size: %llu Mode: O_DSYNC\n", |
546 | fd_dev->fd_dev_name, fd_dev->fd_dev_size, | 512 | fd_dev->fd_dev_name, fd_dev->fd_dev_size); |
547 | (fd_dev->fbd_flags & FDBD_USE_BUFFERED_IO) ? | ||
548 | "Buffered" : "Synchronous"); | ||
549 | return bl; | 513 | return bl; |
550 | } | 514 | } |
551 | 515 | ||
diff --git a/drivers/target/target_core_file.h b/drivers/target/target_core_file.h index fbd59ef7d8be..70ce7fd7111d 100644 --- a/drivers/target/target_core_file.h +++ b/drivers/target/target_core_file.h | |||
@@ -14,7 +14,6 @@ | |||
14 | 14 | ||
15 | #define FBDF_HAS_PATH 0x01 | 15 | #define FBDF_HAS_PATH 0x01 |
16 | #define FBDF_HAS_SIZE 0x02 | 16 | #define FBDF_HAS_SIZE 0x02 |
17 | #define FDBD_USE_BUFFERED_IO 0x04 | ||
18 | 17 | ||
19 | struct fd_dev { | 18 | struct fd_dev { |
20 | u32 fbd_flags; | 19 | u32 fbd_flags; |
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index b05fdc0c05d3..634d0f31a28c 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c | |||
@@ -315,7 +315,7 @@ void transport_register_session( | |||
315 | } | 315 | } |
316 | EXPORT_SYMBOL(transport_register_session); | 316 | EXPORT_SYMBOL(transport_register_session); |
317 | 317 | ||
318 | static void target_release_session(struct kref *kref) | 318 | void target_release_session(struct kref *kref) |
319 | { | 319 | { |
320 | struct se_session *se_sess = container_of(kref, | 320 | struct se_session *se_sess = container_of(kref, |
321 | struct se_session, sess_kref); | 321 | struct se_session, sess_kref); |
@@ -332,6 +332,12 @@ EXPORT_SYMBOL(target_get_session); | |||
332 | 332 | ||
333 | void target_put_session(struct se_session *se_sess) | 333 | void target_put_session(struct se_session *se_sess) |
334 | { | 334 | { |
335 | struct se_portal_group *tpg = se_sess->se_tpg; | ||
336 | |||
337 | if (tpg->se_tpg_tfo->put_session != NULL) { | ||
338 | tpg->se_tpg_tfo->put_session(se_sess); | ||
339 | return; | ||
340 | } | ||
335 | kref_put(&se_sess->sess_kref, target_release_session); | 341 | kref_put(&se_sess->sess_kref, target_release_session); |
336 | } | 342 | } |
337 | EXPORT_SYMBOL(target_put_session); | 343 | EXPORT_SYMBOL(target_put_session); |
diff --git a/drivers/tty/hvc/hvc_xen.c b/drivers/tty/hvc/hvc_xen.c index d3d91dae065c..944eaeb8e0cf 100644 --- a/drivers/tty/hvc/hvc_xen.c +++ b/drivers/tty/hvc/hvc_xen.c | |||
@@ -214,24 +214,24 @@ static int xen_hvm_console_init(void) | |||
214 | /* already configured */ | 214 | /* already configured */ |
215 | if (info->intf != NULL) | 215 | if (info->intf != NULL) |
216 | return 0; | 216 | return 0; |
217 | 217 | /* | |
218 | * If the toolstack (or the hypervisor) hasn't set these values, the | ||
219 | * default value is 0. Even though mfn = 0 and evtchn = 0 are | ||
220 | * theoretically correct values, in practice they never are and they | ||
221 | * mean that a legacy toolstack hasn't initialized the pv console correctly. | ||
222 | */ | ||
218 | r = hvm_get_parameter(HVM_PARAM_CONSOLE_EVTCHN, &v); | 223 | r = hvm_get_parameter(HVM_PARAM_CONSOLE_EVTCHN, &v); |
219 | if (r < 0) { | 224 | if (r < 0 || v == 0) |
220 | kfree(info); | 225 | goto err; |
221 | return -ENODEV; | ||
222 | } | ||
223 | info->evtchn = v; | 226 | info->evtchn = v; |
224 | hvm_get_parameter(HVM_PARAM_CONSOLE_PFN, &v); | 227 | v = 0; |
225 | if (r < 0) { | 228 | r = hvm_get_parameter(HVM_PARAM_CONSOLE_PFN, &v); |
226 | kfree(info); | 229 | if (r < 0 || v == 0) |
227 | return -ENODEV; | 230 | goto err; |
228 | } | ||
229 | mfn = v; | 231 | mfn = v; |
230 | info->intf = ioremap(mfn << PAGE_SHIFT, PAGE_SIZE); | 232 | info->intf = ioremap(mfn << PAGE_SHIFT, PAGE_SIZE); |
231 | if (info->intf == NULL) { | 233 | if (info->intf == NULL) |
232 | kfree(info); | 234 | goto err; |
233 | return -ENODEV; | ||
234 | } | ||
235 | info->vtermno = HVC_COOKIE; | 235 | info->vtermno = HVC_COOKIE; |
236 | 236 | ||
237 | spin_lock(&xencons_lock); | 237 | spin_lock(&xencons_lock); |
@@ -239,6 +239,9 @@ static int xen_hvm_console_init(void) | |||
239 | spin_unlock(&xencons_lock); | 239 | spin_unlock(&xencons_lock); |
240 | 240 | ||
241 | return 0; | 241 | return 0; |
242 | err: | ||
243 | kfree(info); | ||
244 | return -ENODEV; | ||
242 | } | 245 | } |
243 | 246 | ||
244 | static int xen_pv_console_init(void) | 247 | static int xen_pv_console_init(void) |
diff --git a/drivers/tty/serial/8250/8250.c b/drivers/tty/serial/8250/8250.c index 47d061b9ad4d..6e1958a325bd 100644 --- a/drivers/tty/serial/8250/8250.c +++ b/drivers/tty/serial/8250/8250.c | |||
@@ -3113,7 +3113,7 @@ static struct uart_8250_port *serial8250_find_match_or_unused(struct uart_port * | |||
3113 | 3113 | ||
3114 | /** | 3114 | /** |
3115 | * serial8250_register_8250_port - register a serial port | 3115 | * serial8250_register_8250_port - register a serial port |
3116 | * @port: serial port template | 3116 | * @up: serial port template |
3117 | * | 3117 | * |
3118 | * Configure the serial port specified by the request. If the | 3118 | * Configure the serial port specified by the request. If the |
3119 | * port exists and is in use, it is hung up and unregistered | 3119 | * port exists and is in use, it is hung up and unregistered |
diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c index 4ad721fb8405..c17923ec6e95 100644 --- a/drivers/tty/serial/amba-pl011.c +++ b/drivers/tty/serial/amba-pl011.c | |||
@@ -133,6 +133,10 @@ struct pl011_dmatx_data { | |||
133 | struct uart_amba_port { | 133 | struct uart_amba_port { |
134 | struct uart_port port; | 134 | struct uart_port port; |
135 | struct clk *clk; | 135 | struct clk *clk; |
136 | /* Two optional pin states - default & sleep */ | ||
137 | struct pinctrl *pinctrl; | ||
138 | struct pinctrl_state *pins_default; | ||
139 | struct pinctrl_state *pins_sleep; | ||
136 | const struct vendor_data *vendor; | 140 | const struct vendor_data *vendor; |
137 | unsigned int dmacr; /* dma control reg */ | 141 | unsigned int dmacr; /* dma control reg */ |
138 | unsigned int im; /* interrupt mask */ | 142 | unsigned int im; /* interrupt mask */ |
@@ -1312,6 +1316,14 @@ static int pl011_startup(struct uart_port *port) | |||
1312 | unsigned int cr; | 1316 | unsigned int cr; |
1313 | int retval; | 1317 | int retval; |
1314 | 1318 | ||
1319 | /* Optionaly enable pins to be muxed in and configured */ | ||
1320 | if (!IS_ERR(uap->pins_default)) { | ||
1321 | retval = pinctrl_select_state(uap->pinctrl, uap->pins_default); | ||
1322 | if (retval) | ||
1323 | dev_err(port->dev, | ||
1324 | "could not set default pins\n"); | ||
1325 | } | ||
1326 | |||
1315 | retval = clk_prepare(uap->clk); | 1327 | retval = clk_prepare(uap->clk); |
1316 | if (retval) | 1328 | if (retval) |
1317 | goto out; | 1329 | goto out; |
@@ -1420,6 +1432,7 @@ static void pl011_shutdown(struct uart_port *port) | |||
1420 | { | 1432 | { |
1421 | struct uart_amba_port *uap = (struct uart_amba_port *)port; | 1433 | struct uart_amba_port *uap = (struct uart_amba_port *)port; |
1422 | unsigned int cr; | 1434 | unsigned int cr; |
1435 | int retval; | ||
1423 | 1436 | ||
1424 | /* | 1437 | /* |
1425 | * disable all interrupts | 1438 | * disable all interrupts |
@@ -1462,6 +1475,14 @@ static void pl011_shutdown(struct uart_port *port) | |||
1462 | */ | 1475 | */ |
1463 | clk_disable(uap->clk); | 1476 | clk_disable(uap->clk); |
1464 | clk_unprepare(uap->clk); | 1477 | clk_unprepare(uap->clk); |
1478 | /* Optionally let pins go into sleep states */ | ||
1479 | if (!IS_ERR(uap->pins_sleep)) { | ||
1480 | retval = pinctrl_select_state(uap->pinctrl, uap->pins_sleep); | ||
1481 | if (retval) | ||
1482 | dev_err(port->dev, | ||
1483 | "could not set pins to sleep state\n"); | ||
1484 | } | ||
1485 | |||
1465 | 1486 | ||
1466 | if (uap->port.dev->platform_data) { | 1487 | if (uap->port.dev->platform_data) { |
1467 | struct amba_pl011_data *plat; | 1488 | struct amba_pl011_data *plat; |
@@ -1792,6 +1813,14 @@ static int __init pl011_console_setup(struct console *co, char *options) | |||
1792 | if (!uap) | 1813 | if (!uap) |
1793 | return -ENODEV; | 1814 | return -ENODEV; |
1794 | 1815 | ||
1816 | /* Allow pins to be muxed in and configured */ | ||
1817 | if (!IS_ERR(uap->pins_default)) { | ||
1818 | ret = pinctrl_select_state(uap->pinctrl, uap->pins_default); | ||
1819 | if (ret) | ||
1820 | dev_err(uap->port.dev, | ||
1821 | "could not set default pins\n"); | ||
1822 | } | ||
1823 | |||
1795 | ret = clk_prepare(uap->clk); | 1824 | ret = clk_prepare(uap->clk); |
1796 | if (ret) | 1825 | if (ret) |
1797 | return ret; | 1826 | return ret; |
@@ -1844,7 +1873,6 @@ static int pl011_probe(struct amba_device *dev, const struct amba_id *id) | |||
1844 | { | 1873 | { |
1845 | struct uart_amba_port *uap; | 1874 | struct uart_amba_port *uap; |
1846 | struct vendor_data *vendor = id->data; | 1875 | struct vendor_data *vendor = id->data; |
1847 | struct pinctrl *pinctrl; | ||
1848 | void __iomem *base; | 1876 | void __iomem *base; |
1849 | int i, ret; | 1877 | int i, ret; |
1850 | 1878 | ||
@@ -1869,11 +1897,20 @@ static int pl011_probe(struct amba_device *dev, const struct amba_id *id) | |||
1869 | goto free; | 1897 | goto free; |
1870 | } | 1898 | } |
1871 | 1899 | ||
1872 | pinctrl = devm_pinctrl_get_select_default(&dev->dev); | 1900 | uap->pinctrl = devm_pinctrl_get(&dev->dev); |
1873 | if (IS_ERR(pinctrl)) { | 1901 | if (IS_ERR(uap->pinctrl)) { |
1874 | ret = PTR_ERR(pinctrl); | 1902 | ret = PTR_ERR(uap->pinctrl); |
1875 | goto unmap; | 1903 | goto unmap; |
1876 | } | 1904 | } |
1905 | uap->pins_default = pinctrl_lookup_state(uap->pinctrl, | ||
1906 | PINCTRL_STATE_DEFAULT); | ||
1907 | if (IS_ERR(uap->pins_default)) | ||
1908 | dev_err(&dev->dev, "could not get default pinstate\n"); | ||
1909 | |||
1910 | uap->pins_sleep = pinctrl_lookup_state(uap->pinctrl, | ||
1911 | PINCTRL_STATE_SLEEP); | ||
1912 | if (IS_ERR(uap->pins_sleep)) | ||
1913 | dev_dbg(&dev->dev, "could not get sleep pinstate\n"); | ||
1877 | 1914 | ||
1878 | uap->clk = clk_get(&dev->dev, NULL); | 1915 | uap->clk = clk_get(&dev->dev, NULL); |
1879 | if (IS_ERR(uap->clk)) { | 1916 | if (IS_ERR(uap->clk)) { |
diff --git a/drivers/tty/serial/serial_txx9.c b/drivers/tty/serial/serial_txx9.c index 34bd345da775..6ae2a58d62f2 100644 --- a/drivers/tty/serial/serial_txx9.c +++ b/drivers/tty/serial/serial_txx9.c | |||
@@ -466,7 +466,7 @@ static void serial_txx9_break_ctl(struct uart_port *port, int break_state) | |||
466 | spin_unlock_irqrestore(&up->port.lock, flags); | 466 | spin_unlock_irqrestore(&up->port.lock, flags); |
467 | } | 467 | } |
468 | 468 | ||
469 | #if defined(CONFIG_SERIAL_TXX9_CONSOLE) || (CONFIG_CONSOLE_POLL) | 469 | #if defined(CONFIG_SERIAL_TXX9_CONSOLE) || defined(CONFIG_CONSOLE_POLL) |
470 | /* | 470 | /* |
471 | * Wait for transmitter & holding register to empty | 471 | * Wait for transmitter & holding register to empty |
472 | */ | 472 | */ |
diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c index 4604153b7954..1bd9163bc118 100644 --- a/drivers/tty/serial/sh-sci.c +++ b/drivers/tty/serial/sh-sci.c | |||
@@ -2179,6 +2179,16 @@ static int __devinit sci_init_single(struct platform_device *dev, | |||
2179 | return 0; | 2179 | return 0; |
2180 | } | 2180 | } |
2181 | 2181 | ||
2182 | static void sci_cleanup_single(struct sci_port *port) | ||
2183 | { | ||
2184 | sci_free_gpios(port); | ||
2185 | |||
2186 | clk_put(port->iclk); | ||
2187 | clk_put(port->fclk); | ||
2188 | |||
2189 | pm_runtime_disable(port->port.dev); | ||
2190 | } | ||
2191 | |||
2182 | #ifdef CONFIG_SERIAL_SH_SCI_CONSOLE | 2192 | #ifdef CONFIG_SERIAL_SH_SCI_CONSOLE |
2183 | static void serial_console_putchar(struct uart_port *port, int ch) | 2193 | static void serial_console_putchar(struct uart_port *port, int ch) |
2184 | { | 2194 | { |
@@ -2360,14 +2370,10 @@ static int sci_remove(struct platform_device *dev) | |||
2360 | cpufreq_unregister_notifier(&port->freq_transition, | 2370 | cpufreq_unregister_notifier(&port->freq_transition, |
2361 | CPUFREQ_TRANSITION_NOTIFIER); | 2371 | CPUFREQ_TRANSITION_NOTIFIER); |
2362 | 2372 | ||
2363 | sci_free_gpios(port); | ||
2364 | |||
2365 | uart_remove_one_port(&sci_uart_driver, &port->port); | 2373 | uart_remove_one_port(&sci_uart_driver, &port->port); |
2366 | 2374 | ||
2367 | clk_put(port->iclk); | 2375 | sci_cleanup_single(port); |
2368 | clk_put(port->fclk); | ||
2369 | 2376 | ||
2370 | pm_runtime_disable(&dev->dev); | ||
2371 | return 0; | 2377 | return 0; |
2372 | } | 2378 | } |
2373 | 2379 | ||
@@ -2385,14 +2391,20 @@ static int __devinit sci_probe_single(struct platform_device *dev, | |||
2385 | index+1, SCI_NPORTS); | 2391 | index+1, SCI_NPORTS); |
2386 | dev_notice(&dev->dev, "Consider bumping " | 2392 | dev_notice(&dev->dev, "Consider bumping " |
2387 | "CONFIG_SERIAL_SH_SCI_NR_UARTS!\n"); | 2393 | "CONFIG_SERIAL_SH_SCI_NR_UARTS!\n"); |
2388 | return 0; | 2394 | return -EINVAL; |
2389 | } | 2395 | } |
2390 | 2396 | ||
2391 | ret = sci_init_single(dev, sciport, index, p); | 2397 | ret = sci_init_single(dev, sciport, index, p); |
2392 | if (ret) | 2398 | if (ret) |
2393 | return ret; | 2399 | return ret; |
2394 | 2400 | ||
2395 | return uart_add_one_port(&sci_uart_driver, &sciport->port); | 2401 | ret = uart_add_one_port(&sci_uart_driver, &sciport->port); |
2402 | if (ret) { | ||
2403 | sci_cleanup_single(sciport); | ||
2404 | return ret; | ||
2405 | } | ||
2406 | |||
2407 | return 0; | ||
2396 | } | 2408 | } |
2397 | 2409 | ||
2398 | static int __devinit sci_probe(struct platform_device *dev) | 2410 | static int __devinit sci_probe(struct platform_device *dev) |
@@ -2413,24 +2425,22 @@ static int __devinit sci_probe(struct platform_device *dev) | |||
2413 | 2425 | ||
2414 | ret = sci_probe_single(dev, dev->id, p, sp); | 2426 | ret = sci_probe_single(dev, dev->id, p, sp); |
2415 | if (ret) | 2427 | if (ret) |
2416 | goto err_unreg; | 2428 | return ret; |
2417 | 2429 | ||
2418 | sp->freq_transition.notifier_call = sci_notifier; | 2430 | sp->freq_transition.notifier_call = sci_notifier; |
2419 | 2431 | ||
2420 | ret = cpufreq_register_notifier(&sp->freq_transition, | 2432 | ret = cpufreq_register_notifier(&sp->freq_transition, |
2421 | CPUFREQ_TRANSITION_NOTIFIER); | 2433 | CPUFREQ_TRANSITION_NOTIFIER); |
2422 | if (unlikely(ret < 0)) | 2434 | if (unlikely(ret < 0)) { |
2423 | goto err_unreg; | 2435 | sci_cleanup_single(sp); |
2436 | return ret; | ||
2437 | } | ||
2424 | 2438 | ||
2425 | #ifdef CONFIG_SH_STANDARD_BIOS | 2439 | #ifdef CONFIG_SH_STANDARD_BIOS |
2426 | sh_bios_gdb_detach(); | 2440 | sh_bios_gdb_detach(); |
2427 | #endif | 2441 | #endif |
2428 | 2442 | ||
2429 | return 0; | 2443 | return 0; |
2430 | |||
2431 | err_unreg: | ||
2432 | sci_remove(dev); | ||
2433 | return ret; | ||
2434 | } | 2444 | } |
2435 | 2445 | ||
2436 | static int sci_suspend(struct device *dev) | 2446 | static int sci_suspend(struct device *dev) |
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c index f2a120eea9d4..36a2a0b7b82c 100644 --- a/drivers/usb/class/cdc-acm.c +++ b/drivers/usb/class/cdc-acm.c | |||
@@ -567,6 +567,14 @@ static int acm_port_activate(struct tty_port *port, struct tty_struct *tty) | |||
567 | 567 | ||
568 | usb_autopm_put_interface(acm->control); | 568 | usb_autopm_put_interface(acm->control); |
569 | 569 | ||
570 | /* | ||
571 | * Unthrottle device in case the TTY was closed while throttled. | ||
572 | */ | ||
573 | spin_lock_irq(&acm->read_lock); | ||
574 | acm->throttled = 0; | ||
575 | acm->throttle_req = 0; | ||
576 | spin_unlock_irq(&acm->read_lock); | ||
577 | |||
570 | if (acm_submit_read_urbs(acm, GFP_KERNEL)) | 578 | if (acm_submit_read_urbs(acm, GFP_KERNEL)) |
571 | goto error_submit_read_urbs; | 579 | goto error_submit_read_urbs; |
572 | 580 | ||
diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c index ea8b304f0e85..8fd398dffced 100644 --- a/drivers/usb/class/cdc-wdm.c +++ b/drivers/usb/class/cdc-wdm.c | |||
@@ -55,6 +55,15 @@ static const struct usb_device_id wdm_ids[] = { | |||
55 | .bInterfaceSubClass = 1, | 55 | .bInterfaceSubClass = 1, |
56 | .bInterfaceProtocol = 9, /* NOTE: CDC ECM control interface! */ | 56 | .bInterfaceProtocol = 9, /* NOTE: CDC ECM control interface! */ |
57 | }, | 57 | }, |
58 | { | ||
59 | /* Vodafone/Huawei K5005 (12d1:14c8) and similar modems */ | ||
60 | .match_flags = USB_DEVICE_ID_MATCH_VENDOR | | ||
61 | USB_DEVICE_ID_MATCH_INT_INFO, | ||
62 | .idVendor = HUAWEI_VENDOR_ID, | ||
63 | .bInterfaceClass = USB_CLASS_VENDOR_SPEC, | ||
64 | .bInterfaceSubClass = 1, | ||
65 | .bInterfaceProtocol = 57, /* NOTE: CDC ECM control interface! */ | ||
66 | }, | ||
58 | { } | 67 | { } |
59 | }; | 68 | }; |
60 | 69 | ||
diff --git a/drivers/usb/core/hcd-pci.c b/drivers/usb/core/hcd-pci.c index 57ed9e400c06..622b4a48e732 100644 --- a/drivers/usb/core/hcd-pci.c +++ b/drivers/usb/core/hcd-pci.c | |||
@@ -493,15 +493,6 @@ static int hcd_pci_suspend_noirq(struct device *dev) | |||
493 | 493 | ||
494 | pci_save_state(pci_dev); | 494 | pci_save_state(pci_dev); |
495 | 495 | ||
496 | /* | ||
497 | * Some systems crash if an EHCI controller is in D3 during | ||
498 | * a sleep transition. We have to leave such controllers in D0. | ||
499 | */ | ||
500 | if (hcd->broken_pci_sleep) { | ||
501 | dev_dbg(dev, "Staying in PCI D0\n"); | ||
502 | return retval; | ||
503 | } | ||
504 | |||
505 | /* If the root hub is dead rather than suspended, disallow remote | 496 | /* If the root hub is dead rather than suspended, disallow remote |
506 | * wakeup. usb_hc_died() should ensure that both hosts are marked as | 497 | * wakeup. usb_hc_died() should ensure that both hosts are marked as |
507 | * dying, so we only need to check the primary roothub. | 498 | * dying, so we only need to check the primary roothub. |
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c index 04fb834c3fa1..25a7422ee657 100644 --- a/drivers/usb/core/hub.c +++ b/drivers/usb/core/hub.c | |||
@@ -3379,7 +3379,7 @@ int usb_disable_lpm(struct usb_device *udev) | |||
3379 | return 0; | 3379 | return 0; |
3380 | 3380 | ||
3381 | udev->lpm_disable_count++; | 3381 | udev->lpm_disable_count++; |
3382 | if ((udev->u1_params.timeout == 0 && udev->u1_params.timeout == 0)) | 3382 | if ((udev->u1_params.timeout == 0 && udev->u2_params.timeout == 0)) |
3383 | return 0; | 3383 | return 0; |
3384 | 3384 | ||
3385 | /* If LPM is enabled, attempt to disable it. */ | 3385 | /* If LPM is enabled, attempt to disable it. */ |
diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c index b548cf1dbc62..bdd1c6749d88 100644 --- a/drivers/usb/core/message.c +++ b/drivers/usb/core/message.c | |||
@@ -1838,7 +1838,6 @@ free_interfaces: | |||
1838 | intfc = cp->intf_cache[i]; | 1838 | intfc = cp->intf_cache[i]; |
1839 | intf->altsetting = intfc->altsetting; | 1839 | intf->altsetting = intfc->altsetting; |
1840 | intf->num_altsetting = intfc->num_altsetting; | 1840 | intf->num_altsetting = intfc->num_altsetting; |
1841 | intf->intf_assoc = find_iad(dev, cp, i); | ||
1842 | kref_get(&intfc->ref); | 1841 | kref_get(&intfc->ref); |
1843 | 1842 | ||
1844 | alt = usb_altnum_to_altsetting(intf, 0); | 1843 | alt = usb_altnum_to_altsetting(intf, 0); |
@@ -1851,6 +1850,8 @@ free_interfaces: | |||
1851 | if (!alt) | 1850 | if (!alt) |
1852 | alt = &intf->altsetting[0]; | 1851 | alt = &intf->altsetting[0]; |
1853 | 1852 | ||
1853 | intf->intf_assoc = | ||
1854 | find_iad(dev, cp, alt->desc.bInterfaceNumber); | ||
1854 | intf->cur_altsetting = alt; | 1855 | intf->cur_altsetting = alt; |
1855 | usb_enable_interface(dev, intf, true); | 1856 | usb_enable_interface(dev, intf, true); |
1856 | intf->dev.parent = &dev->dev; | 1857 | intf->dev.parent = &dev->dev; |
diff --git a/drivers/usb/gadget/atmel_usba_udc.c b/drivers/usb/gadget/atmel_usba_udc.c index e23bf7984aaf..9a9bced813ed 100644 --- a/drivers/usb/gadget/atmel_usba_udc.c +++ b/drivers/usb/gadget/atmel_usba_udc.c | |||
@@ -599,12 +599,6 @@ usba_ep_enable(struct usb_ep *_ep, const struct usb_endpoint_descriptor *desc) | |||
599 | 599 | ||
600 | spin_lock_irqsave(&ep->udc->lock, flags); | 600 | spin_lock_irqsave(&ep->udc->lock, flags); |
601 | 601 | ||
602 | if (ep->ep.desc) { | ||
603 | spin_unlock_irqrestore(&ep->udc->lock, flags); | ||
604 | DBG(DBG_ERR, "ep%d already enabled\n", ep->index); | ||
605 | return -EBUSY; | ||
606 | } | ||
607 | |||
608 | ep->ep.desc = desc; | 602 | ep->ep.desc = desc; |
609 | ep->ep.maxpacket = maxpacket; | 603 | ep->ep.maxpacket = maxpacket; |
610 | 604 | ||
diff --git a/drivers/usb/gadget/fsl_qe_udc.c b/drivers/usb/gadget/fsl_qe_udc.c index 51881f3bd07a..b09452d6f33a 100644 --- a/drivers/usb/gadget/fsl_qe_udc.c +++ b/drivers/usb/gadget/fsl_qe_udc.c | |||
@@ -1596,7 +1596,7 @@ static int qe_ep_enable(struct usb_ep *_ep, | |||
1596 | ep = container_of(_ep, struct qe_ep, ep); | 1596 | ep = container_of(_ep, struct qe_ep, ep); |
1597 | 1597 | ||
1598 | /* catch various bogus parameters */ | 1598 | /* catch various bogus parameters */ |
1599 | if (!_ep || !desc || ep->ep.desc || _ep->name == ep_name[0] || | 1599 | if (!_ep || !desc || _ep->name == ep_name[0] || |
1600 | (desc->bDescriptorType != USB_DT_ENDPOINT)) | 1600 | (desc->bDescriptorType != USB_DT_ENDPOINT)) |
1601 | return -EINVAL; | 1601 | return -EINVAL; |
1602 | 1602 | ||
diff --git a/drivers/usb/gadget/fsl_udc_core.c b/drivers/usb/gadget/fsl_udc_core.c index 28316858208b..bc6f9bb9994a 100644 --- a/drivers/usb/gadget/fsl_udc_core.c +++ b/drivers/usb/gadget/fsl_udc_core.c | |||
@@ -567,7 +567,7 @@ static int fsl_ep_enable(struct usb_ep *_ep, | |||
567 | ep = container_of(_ep, struct fsl_ep, ep); | 567 | ep = container_of(_ep, struct fsl_ep, ep); |
568 | 568 | ||
569 | /* catch various bogus parameters */ | 569 | /* catch various bogus parameters */ |
570 | if (!_ep || !desc || ep->ep.desc | 570 | if (!_ep || !desc |
571 | || (desc->bDescriptorType != USB_DT_ENDPOINT)) | 571 | || (desc->bDescriptorType != USB_DT_ENDPOINT)) |
572 | return -EINVAL; | 572 | return -EINVAL; |
573 | 573 | ||
@@ -2575,7 +2575,7 @@ static int __init fsl_udc_probe(struct platform_device *pdev) | |||
2575 | /* for ep0: the desc defined here; | 2575 | /* for ep0: the desc defined here; |
2576 | * for other eps, gadget layer called ep_enable with defined desc | 2576 | * for other eps, gadget layer called ep_enable with defined desc |
2577 | */ | 2577 | */ |
2578 | udc_controller->eps[0].desc = &fsl_ep0_desc; | 2578 | udc_controller->eps[0].ep.desc = &fsl_ep0_desc; |
2579 | udc_controller->eps[0].ep.maxpacket = USB_MAX_CTRL_PAYLOAD; | 2579 | udc_controller->eps[0].ep.maxpacket = USB_MAX_CTRL_PAYLOAD; |
2580 | 2580 | ||
2581 | /* setup the udc->eps[] for non-control endpoints and link | 2581 | /* setup the udc->eps[] for non-control endpoints and link |
diff --git a/drivers/usb/gadget/fsl_usb2_udc.h b/drivers/usb/gadget/fsl_usb2_udc.h index 5cd7b7e7ddb4..f61a967f7082 100644 --- a/drivers/usb/gadget/fsl_usb2_udc.h +++ b/drivers/usb/gadget/fsl_usb2_udc.h | |||
@@ -568,10 +568,10 @@ static void dump_msg(const char *label, const u8 * buf, unsigned int length) | |||
568 | /* | 568 | /* |
569 | * ### internal used help routines. | 569 | * ### internal used help routines. |
570 | */ | 570 | */ |
571 | #define ep_index(EP) ((EP)->desc->bEndpointAddress&0xF) | 571 | #define ep_index(EP) ((EP)->ep.desc->bEndpointAddress&0xF) |
572 | #define ep_maxpacket(EP) ((EP)->ep.maxpacket) | 572 | #define ep_maxpacket(EP) ((EP)->ep.maxpacket) |
573 | #define ep_is_in(EP) ( (ep_index(EP) == 0) ? (EP->udc->ep0_dir == \ | 573 | #define ep_is_in(EP) ( (ep_index(EP) == 0) ? (EP->udc->ep0_dir == \ |
574 | USB_DIR_IN ):((EP)->desc->bEndpointAddress \ | 574 | USB_DIR_IN) : ((EP)->ep.desc->bEndpointAddress \ |
575 | & USB_DIR_IN)==USB_DIR_IN) | 575 | & USB_DIR_IN)==USB_DIR_IN) |
576 | #define get_ep_by_pipe(udc, pipe) ((pipe == 1)? &udc->eps[0]: \ | 576 | #define get_ep_by_pipe(udc, pipe) ((pipe == 1)? &udc->eps[0]: \ |
577 | &udc->eps[pipe]) | 577 | &udc->eps[pipe]) |
diff --git a/drivers/usb/gadget/goku_udc.c b/drivers/usb/gadget/goku_udc.c index b241e6c6a7f2..3d28fb976c78 100644 --- a/drivers/usb/gadget/goku_udc.c +++ b/drivers/usb/gadget/goku_udc.c | |||
@@ -102,7 +102,7 @@ goku_ep_enable(struct usb_ep *_ep, const struct usb_endpoint_descriptor *desc) | |||
102 | unsigned long flags; | 102 | unsigned long flags; |
103 | 103 | ||
104 | ep = container_of(_ep, struct goku_ep, ep); | 104 | ep = container_of(_ep, struct goku_ep, ep); |
105 | if (!_ep || !desc || ep->ep.desc | 105 | if (!_ep || !desc |
106 | || desc->bDescriptorType != USB_DT_ENDPOINT) | 106 | || desc->bDescriptorType != USB_DT_ENDPOINT) |
107 | return -EINVAL; | 107 | return -EINVAL; |
108 | dev = ep->dev; | 108 | dev = ep->dev; |
diff --git a/drivers/usb/gadget/mv_udc_core.c b/drivers/usb/gadget/mv_udc_core.c index dbcd1329495e..117a4bba1b8c 100644 --- a/drivers/usb/gadget/mv_udc_core.c +++ b/drivers/usb/gadget/mv_udc_core.c | |||
@@ -464,7 +464,7 @@ static int mv_ep_enable(struct usb_ep *_ep, | |||
464 | ep = container_of(_ep, struct mv_ep, ep); | 464 | ep = container_of(_ep, struct mv_ep, ep); |
465 | udc = ep->udc; | 465 | udc = ep->udc; |
466 | 466 | ||
467 | if (!_ep || !desc || ep->ep.desc | 467 | if (!_ep || !desc |
468 | || desc->bDescriptorType != USB_DT_ENDPOINT) | 468 | || desc->bDescriptorType != USB_DT_ENDPOINT) |
469 | return -EINVAL; | 469 | return -EINVAL; |
470 | 470 | ||
diff --git a/drivers/usb/gadget/omap_udc.c b/drivers/usb/gadget/omap_udc.c index 7ba32469c5bd..a460e8c204f4 100644 --- a/drivers/usb/gadget/omap_udc.c +++ b/drivers/usb/gadget/omap_udc.c | |||
@@ -153,7 +153,7 @@ static int omap_ep_enable(struct usb_ep *_ep, | |||
153 | u16 maxp; | 153 | u16 maxp; |
154 | 154 | ||
155 | /* catch various bogus parameters */ | 155 | /* catch various bogus parameters */ |
156 | if (!_ep || !desc || ep->ep.desc | 156 | if (!_ep || !desc |
157 | || desc->bDescriptorType != USB_DT_ENDPOINT | 157 | || desc->bDescriptorType != USB_DT_ENDPOINT |
158 | || ep->bEndpointAddress != desc->bEndpointAddress | 158 | || ep->bEndpointAddress != desc->bEndpointAddress |
159 | || ep->maxpacket < usb_endpoint_maxp(desc)) { | 159 | || ep->maxpacket < usb_endpoint_maxp(desc)) { |
diff --git a/drivers/usb/gadget/pxa25x_udc.c b/drivers/usb/gadget/pxa25x_udc.c index d7c8cb3bf759..f7ff9e8e746a 100644 --- a/drivers/usb/gadget/pxa25x_udc.c +++ b/drivers/usb/gadget/pxa25x_udc.c | |||
@@ -218,7 +218,7 @@ static int pxa25x_ep_enable (struct usb_ep *_ep, | |||
218 | struct pxa25x_udc *dev; | 218 | struct pxa25x_udc *dev; |
219 | 219 | ||
220 | ep = container_of (_ep, struct pxa25x_ep, ep); | 220 | ep = container_of (_ep, struct pxa25x_ep, ep); |
221 | if (!_ep || !desc || ep->ep.desc || _ep->name == ep0name | 221 | if (!_ep || !desc || _ep->name == ep0name |
222 | || desc->bDescriptorType != USB_DT_ENDPOINT | 222 | || desc->bDescriptorType != USB_DT_ENDPOINT |
223 | || ep->bEndpointAddress != desc->bEndpointAddress | 223 | || ep->bEndpointAddress != desc->bEndpointAddress |
224 | || ep->fifo_size < usb_endpoint_maxp (desc)) { | 224 | || ep->fifo_size < usb_endpoint_maxp (desc)) { |
diff --git a/drivers/usb/gadget/s3c-hsudc.c b/drivers/usb/gadget/s3c-hsudc.c index 36c6836eeb0f..236b271871a0 100644 --- a/drivers/usb/gadget/s3c-hsudc.c +++ b/drivers/usb/gadget/s3c-hsudc.c | |||
@@ -760,7 +760,7 @@ static int s3c_hsudc_ep_enable(struct usb_ep *_ep, | |||
760 | u32 ecr = 0; | 760 | u32 ecr = 0; |
761 | 761 | ||
762 | hsep = our_ep(_ep); | 762 | hsep = our_ep(_ep); |
763 | if (!_ep || !desc || hsep->ep.desc || _ep->name == ep0name | 763 | if (!_ep || !desc || _ep->name == ep0name |
764 | || desc->bDescriptorType != USB_DT_ENDPOINT | 764 | || desc->bDescriptorType != USB_DT_ENDPOINT |
765 | || hsep->bEndpointAddress != desc->bEndpointAddress | 765 | || hsep->bEndpointAddress != desc->bEndpointAddress |
766 | || ep_maxpacket(hsep) < usb_endpoint_maxp(desc)) | 766 | || ep_maxpacket(hsep) < usb_endpoint_maxp(desc)) |
diff --git a/drivers/usb/gadget/s3c2410_udc.c b/drivers/usb/gadget/s3c2410_udc.c index 3de71d37d75e..f2e51f50e528 100644 --- a/drivers/usb/gadget/s3c2410_udc.c +++ b/drivers/usb/gadget/s3c2410_udc.c | |||
@@ -1062,7 +1062,7 @@ static int s3c2410_udc_ep_enable(struct usb_ep *_ep, | |||
1062 | 1062 | ||
1063 | ep = to_s3c2410_ep(_ep); | 1063 | ep = to_s3c2410_ep(_ep); |
1064 | 1064 | ||
1065 | if (!_ep || !desc || ep->ep.desc | 1065 | if (!_ep || !desc |
1066 | || _ep->name == ep0name | 1066 | || _ep->name == ep0name |
1067 | || desc->bDescriptorType != USB_DT_ENDPOINT) | 1067 | || desc->bDescriptorType != USB_DT_ENDPOINT) |
1068 | return -EINVAL; | 1068 | return -EINVAL; |
diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c index b100f5f9f4b6..800be38c78b4 100644 --- a/drivers/usb/host/ehci-hcd.c +++ b/drivers/usb/host/ehci-hcd.c | |||
@@ -671,7 +671,9 @@ static int ehci_init(struct usb_hcd *hcd) | |||
671 | hw = ehci->async->hw; | 671 | hw = ehci->async->hw; |
672 | hw->hw_next = QH_NEXT(ehci, ehci->async->qh_dma); | 672 | hw->hw_next = QH_NEXT(ehci, ehci->async->qh_dma); |
673 | hw->hw_info1 = cpu_to_hc32(ehci, QH_HEAD); | 673 | hw->hw_info1 = cpu_to_hc32(ehci, QH_HEAD); |
674 | #if defined(CONFIG_PPC_PS3) | ||
674 | hw->hw_info1 |= cpu_to_hc32(ehci, (1 << 7)); /* I = 1 */ | 675 | hw->hw_info1 |= cpu_to_hc32(ehci, (1 << 7)); /* I = 1 */ |
676 | #endif | ||
675 | hw->hw_token = cpu_to_hc32(ehci, QTD_STS_HALT); | 677 | hw->hw_token = cpu_to_hc32(ehci, QTD_STS_HALT); |
676 | hw->hw_qtd_next = EHCI_LIST_END(ehci); | 678 | hw->hw_qtd_next = EHCI_LIST_END(ehci); |
677 | ehci->async->qh_state = QH_STATE_LINKED; | 679 | ehci->async->qh_state = QH_STATE_LINKED; |
diff --git a/drivers/usb/host/ehci-omap.c b/drivers/usb/host/ehci-omap.c index a44294d13494..17cfb8a1131c 100644 --- a/drivers/usb/host/ehci-omap.c +++ b/drivers/usb/host/ehci-omap.c | |||
@@ -43,6 +43,7 @@ | |||
43 | #include <linux/regulator/consumer.h> | 43 | #include <linux/regulator/consumer.h> |
44 | #include <linux/pm_runtime.h> | 44 | #include <linux/pm_runtime.h> |
45 | #include <linux/gpio.h> | 45 | #include <linux/gpio.h> |
46 | #include <linux/clk.h> | ||
46 | 47 | ||
47 | /* EHCI Register Set */ | 48 | /* EHCI Register Set */ |
48 | #define EHCI_INSNREG04 (0xA0) | 49 | #define EHCI_INSNREG04 (0xA0) |
@@ -55,6 +56,15 @@ | |||
55 | #define EHCI_INSNREG05_ULPI_EXTREGADD_SHIFT 8 | 56 | #define EHCI_INSNREG05_ULPI_EXTREGADD_SHIFT 8 |
56 | #define EHCI_INSNREG05_ULPI_WRDATA_SHIFT 0 | 57 | #define EHCI_INSNREG05_ULPI_WRDATA_SHIFT 0 |
57 | 58 | ||
59 | /* Errata i693 */ | ||
60 | static struct clk *utmi_p1_fck; | ||
61 | static struct clk *utmi_p2_fck; | ||
62 | static struct clk *xclk60mhsp1_ck; | ||
63 | static struct clk *xclk60mhsp2_ck; | ||
64 | static struct clk *usbhost_p1_fck; | ||
65 | static struct clk *usbhost_p2_fck; | ||
66 | static struct clk *init_60m_fclk; | ||
67 | |||
58 | /*-------------------------------------------------------------------------*/ | 68 | /*-------------------------------------------------------------------------*/ |
59 | 69 | ||
60 | static const struct hc_driver ehci_omap_hc_driver; | 70 | static const struct hc_driver ehci_omap_hc_driver; |
@@ -70,6 +80,41 @@ static inline u32 ehci_read(void __iomem *base, u32 reg) | |||
70 | return __raw_readl(base + reg); | 80 | return __raw_readl(base + reg); |
71 | } | 81 | } |
72 | 82 | ||
83 | /* Erratum i693 workaround sequence */ | ||
84 | static void omap_ehci_erratum_i693(struct ehci_hcd *ehci) | ||
85 | { | ||
86 | int ret = 0; | ||
87 | |||
88 | /* Switch to the internal 60 MHz clock */ | ||
89 | ret = clk_set_parent(utmi_p1_fck, init_60m_fclk); | ||
90 | if (ret != 0) | ||
91 | ehci_err(ehci, "init_60m_fclk set parent" | ||
92 | "failed error:%d\n", ret); | ||
93 | |||
94 | ret = clk_set_parent(utmi_p2_fck, init_60m_fclk); | ||
95 | if (ret != 0) | ||
96 | ehci_err(ehci, "init_60m_fclk set parent" | ||
97 | "failed error:%d\n", ret); | ||
98 | |||
99 | clk_enable(usbhost_p1_fck); | ||
100 | clk_enable(usbhost_p2_fck); | ||
101 | |||
102 | /* Wait 1ms and switch back to the external clock */ | ||
103 | mdelay(1); | ||
104 | ret = clk_set_parent(utmi_p1_fck, xclk60mhsp1_ck); | ||
105 | if (ret != 0) | ||
106 | ehci_err(ehci, "xclk60mhsp1_ck set parent" | ||
107 | "failed error:%d\n", ret); | ||
108 | |||
109 | ret = clk_set_parent(utmi_p2_fck, xclk60mhsp2_ck); | ||
110 | if (ret != 0) | ||
111 | ehci_err(ehci, "xclk60mhsp2_ck set parent" | ||
112 | "failed error:%d\n", ret); | ||
113 | |||
114 | clk_disable(usbhost_p1_fck); | ||
115 | clk_disable(usbhost_p2_fck); | ||
116 | } | ||
117 | |||
73 | static void omap_ehci_soft_phy_reset(struct platform_device *pdev, u8 port) | 118 | static void omap_ehci_soft_phy_reset(struct platform_device *pdev, u8 port) |
74 | { | 119 | { |
75 | struct usb_hcd *hcd = dev_get_drvdata(&pdev->dev); | 120 | struct usb_hcd *hcd = dev_get_drvdata(&pdev->dev); |
@@ -100,6 +145,50 @@ static void omap_ehci_soft_phy_reset(struct platform_device *pdev, u8 port) | |||
100 | } | 145 | } |
101 | } | 146 | } |
102 | 147 | ||
148 | static int omap_ehci_hub_control( | ||
149 | struct usb_hcd *hcd, | ||
150 | u16 typeReq, | ||
151 | u16 wValue, | ||
152 | u16 wIndex, | ||
153 | char *buf, | ||
154 | u16 wLength | ||
155 | ) | ||
156 | { | ||
157 | struct ehci_hcd *ehci = hcd_to_ehci(hcd); | ||
158 | u32 __iomem *status_reg = &ehci->regs->port_status[ | ||
159 | (wIndex & 0xff) - 1]; | ||
160 | u32 temp; | ||
161 | unsigned long flags; | ||
162 | int retval = 0; | ||
163 | |||
164 | spin_lock_irqsave(&ehci->lock, flags); | ||
165 | |||
166 | if (typeReq == SetPortFeature && wValue == USB_PORT_FEAT_SUSPEND) { | ||
167 | temp = ehci_readl(ehci, status_reg); | ||
168 | if ((temp & PORT_PE) == 0 || (temp & PORT_RESET) != 0) { | ||
169 | retval = -EPIPE; | ||
170 | goto done; | ||
171 | } | ||
172 | |||
173 | temp &= ~PORT_WKCONN_E; | ||
174 | temp |= PORT_WKDISC_E | PORT_WKOC_E; | ||
175 | ehci_writel(ehci, temp | PORT_SUSPEND, status_reg); | ||
176 | |||
177 | omap_ehci_erratum_i693(ehci); | ||
178 | |||
179 | set_bit((wIndex & 0xff) - 1, &ehci->suspended_ports); | ||
180 | goto done; | ||
181 | } | ||
182 | |||
183 | spin_unlock_irqrestore(&ehci->lock, flags); | ||
184 | |||
185 | /* Handle the hub control events here */ | ||
186 | return ehci_hub_control(hcd, typeReq, wValue, wIndex, buf, wLength); | ||
187 | done: | ||
188 | spin_unlock_irqrestore(&ehci->lock, flags); | ||
189 | return retval; | ||
190 | } | ||
191 | |||
103 | static void disable_put_regulator( | 192 | static void disable_put_regulator( |
104 | struct ehci_hcd_omap_platform_data *pdata) | 193 | struct ehci_hcd_omap_platform_data *pdata) |
105 | { | 194 | { |
@@ -264,8 +353,76 @@ static int ehci_hcd_omap_probe(struct platform_device *pdev) | |||
264 | /* root ports should always stay powered */ | 353 | /* root ports should always stay powered */ |
265 | ehci_port_power(omap_ehci, 1); | 354 | ehci_port_power(omap_ehci, 1); |
266 | 355 | ||
356 | /* get clocks */ | ||
357 | utmi_p1_fck = clk_get(dev, "utmi_p1_gfclk"); | ||
358 | if (IS_ERR(utmi_p1_fck)) { | ||
359 | ret = PTR_ERR(utmi_p1_fck); | ||
360 | dev_err(dev, "utmi_p1_gfclk failed error:%d\n", ret); | ||
361 | goto err_add_hcd; | ||
362 | } | ||
363 | |||
364 | xclk60mhsp1_ck = clk_get(dev, "xclk60mhsp1_ck"); | ||
365 | if (IS_ERR(xclk60mhsp1_ck)) { | ||
366 | ret = PTR_ERR(xclk60mhsp1_ck); | ||
367 | dev_err(dev, "xclk60mhsp1_ck failed error:%d\n", ret); | ||
368 | goto err_utmi_p1_fck; | ||
369 | } | ||
370 | |||
371 | utmi_p2_fck = clk_get(dev, "utmi_p2_gfclk"); | ||
372 | if (IS_ERR(utmi_p2_fck)) { | ||
373 | ret = PTR_ERR(utmi_p2_fck); | ||
374 | dev_err(dev, "utmi_p2_gfclk failed error:%d\n", ret); | ||
375 | goto err_xclk60mhsp1_ck; | ||
376 | } | ||
377 | |||
378 | xclk60mhsp2_ck = clk_get(dev, "xclk60mhsp2_ck"); | ||
379 | if (IS_ERR(xclk60mhsp2_ck)) { | ||
380 | ret = PTR_ERR(xclk60mhsp2_ck); | ||
381 | dev_err(dev, "xclk60mhsp2_ck failed error:%d\n", ret); | ||
382 | goto err_utmi_p2_fck; | ||
383 | } | ||
384 | |||
385 | usbhost_p1_fck = clk_get(dev, "usb_host_hs_utmi_p1_clk"); | ||
386 | if (IS_ERR(usbhost_p1_fck)) { | ||
387 | ret = PTR_ERR(usbhost_p1_fck); | ||
388 | dev_err(dev, "usbhost_p1_fck failed error:%d\n", ret); | ||
389 | goto err_xclk60mhsp2_ck; | ||
390 | } | ||
391 | |||
392 | usbhost_p2_fck = clk_get(dev, "usb_host_hs_utmi_p2_clk"); | ||
393 | if (IS_ERR(usbhost_p2_fck)) { | ||
394 | ret = PTR_ERR(usbhost_p2_fck); | ||
395 | dev_err(dev, "usbhost_p2_fck failed error:%d\n", ret); | ||
396 | goto err_usbhost_p1_fck; | ||
397 | } | ||
398 | |||
399 | init_60m_fclk = clk_get(dev, "init_60m_fclk"); | ||
400 | if (IS_ERR(init_60m_fclk)) { | ||
401 | ret = PTR_ERR(init_60m_fclk); | ||
402 | dev_err(dev, "init_60m_fclk failed error:%d\n", ret); | ||
403 | goto err_usbhost_p2_fck; | ||
404 | } | ||
405 | |||
267 | return 0; | 406 | return 0; |
268 | 407 | ||
408 | err_usbhost_p2_fck: | ||
409 | clk_put(usbhost_p2_fck); | ||
410 | |||
411 | err_usbhost_p1_fck: | ||
412 | clk_put(usbhost_p1_fck); | ||
413 | |||
414 | err_xclk60mhsp2_ck: | ||
415 | clk_put(xclk60mhsp2_ck); | ||
416 | |||
417 | err_utmi_p2_fck: | ||
418 | clk_put(utmi_p2_fck); | ||
419 | |||
420 | err_xclk60mhsp1_ck: | ||
421 | clk_put(xclk60mhsp1_ck); | ||
422 | |||
423 | err_utmi_p1_fck: | ||
424 | clk_put(utmi_p1_fck); | ||
425 | |||
269 | err_add_hcd: | 426 | err_add_hcd: |
270 | disable_put_regulator(pdata); | 427 | disable_put_regulator(pdata); |
271 | pm_runtime_put_sync(dev); | 428 | pm_runtime_put_sync(dev); |
@@ -294,6 +451,15 @@ static int ehci_hcd_omap_remove(struct platform_device *pdev) | |||
294 | disable_put_regulator(dev->platform_data); | 451 | disable_put_regulator(dev->platform_data); |
295 | iounmap(hcd->regs); | 452 | iounmap(hcd->regs); |
296 | usb_put_hcd(hcd); | 453 | usb_put_hcd(hcd); |
454 | |||
455 | clk_put(utmi_p1_fck); | ||
456 | clk_put(utmi_p2_fck); | ||
457 | clk_put(xclk60mhsp1_ck); | ||
458 | clk_put(xclk60mhsp2_ck); | ||
459 | clk_put(usbhost_p1_fck); | ||
460 | clk_put(usbhost_p2_fck); | ||
461 | clk_put(init_60m_fclk); | ||
462 | |||
297 | pm_runtime_put_sync(dev); | 463 | pm_runtime_put_sync(dev); |
298 | pm_runtime_disable(dev); | 464 | pm_runtime_disable(dev); |
299 | 465 | ||
@@ -364,7 +530,7 @@ static const struct hc_driver ehci_omap_hc_driver = { | |||
364 | * root hub support | 530 | * root hub support |
365 | */ | 531 | */ |
366 | .hub_status_data = ehci_hub_status_data, | 532 | .hub_status_data = ehci_hub_status_data, |
367 | .hub_control = ehci_hub_control, | 533 | .hub_control = omap_ehci_hub_control, |
368 | .bus_suspend = ehci_bus_suspend, | 534 | .bus_suspend = ehci_bus_suspend, |
369 | .bus_resume = ehci_bus_resume, | 535 | .bus_resume = ehci_bus_resume, |
370 | 536 | ||
diff --git a/drivers/usb/host/ehci-pci.c b/drivers/usb/host/ehci-pci.c index bc94d7bf072d..123481793a47 100644 --- a/drivers/usb/host/ehci-pci.c +++ b/drivers/usb/host/ehci-pci.c | |||
@@ -144,14 +144,6 @@ static int ehci_pci_setup(struct usb_hcd *hcd) | |||
144 | hcd->has_tt = 1; | 144 | hcd->has_tt = 1; |
145 | tdi_reset(ehci); | 145 | tdi_reset(ehci); |
146 | } | 146 | } |
147 | if (pdev->subsystem_vendor == PCI_VENDOR_ID_ASUSTEK) { | ||
148 | /* EHCI #1 or #2 on 6 Series/C200 Series chipset */ | ||
149 | if (pdev->device == 0x1c26 || pdev->device == 0x1c2d) { | ||
150 | ehci_info(ehci, "broken D3 during system sleep on ASUS\n"); | ||
151 | hcd->broken_pci_sleep = 1; | ||
152 | device_set_wakeup_capable(&pdev->dev, false); | ||
153 | } | ||
154 | } | ||
155 | break; | 147 | break; |
156 | case PCI_VENDOR_ID_TDI: | 148 | case PCI_VENDOR_ID_TDI: |
157 | if (pdev->device == PCI_DEVICE_ID_TDI_EHCI) { | 149 | if (pdev->device == PCI_DEVICE_ID_TDI_EHCI) { |
diff --git a/drivers/usb/host/ehci-sh.c b/drivers/usb/host/ehci-sh.c index ca819cdd0c5e..e7cb3925abf8 100644 --- a/drivers/usb/host/ehci-sh.c +++ b/drivers/usb/host/ehci-sh.c | |||
@@ -126,8 +126,7 @@ static int ehci_hcd_sh_probe(struct platform_device *pdev) | |||
126 | goto fail_create_hcd; | 126 | goto fail_create_hcd; |
127 | } | 127 | } |
128 | 128 | ||
129 | if (pdev->dev.platform_data != NULL) | 129 | pdata = pdev->dev.platform_data; |
130 | pdata = pdev->dev.platform_data; | ||
131 | 130 | ||
132 | /* initialize hcd */ | 131 | /* initialize hcd */ |
133 | hcd = usb_create_hcd(&ehci_sh_hc_driver, &pdev->dev, | 132 | hcd = usb_create_hcd(&ehci_sh_hc_driver, &pdev->dev, |
diff --git a/drivers/usb/host/ehci-xilinx-of.c b/drivers/usb/host/ehci-xilinx-of.c index 9c2cc4633894..e9713d589e30 100644 --- a/drivers/usb/host/ehci-xilinx-of.c +++ b/drivers/usb/host/ehci-xilinx-of.c | |||
@@ -270,14 +270,12 @@ static int ehci_hcd_xilinx_of_remove(struct platform_device *op) | |||
270 | * | 270 | * |
271 | * Properly shutdown the hcd, call driver's shutdown routine. | 271 | * Properly shutdown the hcd, call driver's shutdown routine. |
272 | */ | 272 | */ |
273 | static int ehci_hcd_xilinx_of_shutdown(struct platform_device *op) | 273 | static void ehci_hcd_xilinx_of_shutdown(struct platform_device *op) |
274 | { | 274 | { |
275 | struct usb_hcd *hcd = dev_get_drvdata(&op->dev); | 275 | struct usb_hcd *hcd = dev_get_drvdata(&op->dev); |
276 | 276 | ||
277 | if (hcd->driver->shutdown) | 277 | if (hcd->driver->shutdown) |
278 | hcd->driver->shutdown(hcd); | 278 | hcd->driver->shutdown(hcd); |
279 | |||
280 | return 0; | ||
281 | } | 279 | } |
282 | 280 | ||
283 | 281 | ||
diff --git a/drivers/usb/host/ohci-hub.c b/drivers/usb/host/ohci-hub.c index 836772dfabd3..2f3619eefefa 100644 --- a/drivers/usb/host/ohci-hub.c +++ b/drivers/usb/host/ohci-hub.c | |||
@@ -317,7 +317,7 @@ static int ohci_bus_resume (struct usb_hcd *hcd) | |||
317 | } | 317 | } |
318 | 318 | ||
319 | /* Carry out the final steps of resuming the controller device */ | 319 | /* Carry out the final steps of resuming the controller device */ |
320 | static void ohci_finish_controller_resume(struct usb_hcd *hcd) | 320 | static void __maybe_unused ohci_finish_controller_resume(struct usb_hcd *hcd) |
321 | { | 321 | { |
322 | struct ohci_hcd *ohci = hcd_to_ohci(hcd); | 322 | struct ohci_hcd *ohci = hcd_to_ohci(hcd); |
323 | int port; | 323 | int port; |
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c index ec4338eec826..77689bd64cac 100644 --- a/drivers/usb/host/xhci-mem.c +++ b/drivers/usb/host/xhci-mem.c | |||
@@ -793,10 +793,9 @@ static void xhci_free_tt_info(struct xhci_hcd *xhci, | |||
793 | struct xhci_virt_device *virt_dev, | 793 | struct xhci_virt_device *virt_dev, |
794 | int slot_id) | 794 | int slot_id) |
795 | { | 795 | { |
796 | struct list_head *tt; | ||
797 | struct list_head *tt_list_head; | 796 | struct list_head *tt_list_head; |
798 | struct list_head *tt_next; | 797 | struct xhci_tt_bw_info *tt_info, *next; |
799 | struct xhci_tt_bw_info *tt_info; | 798 | bool slot_found = false; |
800 | 799 | ||
801 | /* If the device never made it past the Set Address stage, | 800 | /* If the device never made it past the Set Address stage, |
802 | * it may not have the real_port set correctly. | 801 | * it may not have the real_port set correctly. |
@@ -808,34 +807,16 @@ static void xhci_free_tt_info(struct xhci_hcd *xhci, | |||
808 | } | 807 | } |
809 | 808 | ||
810 | tt_list_head = &(xhci->rh_bw[virt_dev->real_port - 1].tts); | 809 | tt_list_head = &(xhci->rh_bw[virt_dev->real_port - 1].tts); |
811 | if (list_empty(tt_list_head)) | 810 | list_for_each_entry_safe(tt_info, next, tt_list_head, tt_list) { |
812 | return; | 811 | /* Multi-TT hubs will have more than one entry */ |
813 | 812 | if (tt_info->slot_id == slot_id) { | |
814 | list_for_each(tt, tt_list_head) { | 813 | slot_found = true; |
815 | tt_info = list_entry(tt, struct xhci_tt_bw_info, tt_list); | 814 | list_del(&tt_info->tt_list); |
816 | if (tt_info->slot_id == slot_id) | 815 | kfree(tt_info); |
816 | } else if (slot_found) { | ||
817 | break; | 817 | break; |
818 | } | ||
818 | } | 819 | } |
819 | /* Cautionary measure in case the hub was disconnected before we | ||
820 | * stored the TT information. | ||
821 | */ | ||
822 | if (tt_info->slot_id != slot_id) | ||
823 | return; | ||
824 | |||
825 | tt_next = tt->next; | ||
826 | tt_info = list_entry(tt, struct xhci_tt_bw_info, | ||
827 | tt_list); | ||
828 | /* Multi-TT hubs will have more than one entry */ | ||
829 | do { | ||
830 | list_del(tt); | ||
831 | kfree(tt_info); | ||
832 | tt = tt_next; | ||
833 | if (list_empty(tt_list_head)) | ||
834 | break; | ||
835 | tt_next = tt->next; | ||
836 | tt_info = list_entry(tt, struct xhci_tt_bw_info, | ||
837 | tt_list); | ||
838 | } while (tt_info->slot_id == slot_id); | ||
839 | } | 820 | } |
840 | 821 | ||
841 | int xhci_alloc_tt_info(struct xhci_hcd *xhci, | 822 | int xhci_alloc_tt_info(struct xhci_hcd *xhci, |
@@ -1791,17 +1772,9 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci) | |||
1791 | { | 1772 | { |
1792 | struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller); | 1773 | struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller); |
1793 | struct dev_info *dev_info, *next; | 1774 | struct dev_info *dev_info, *next; |
1794 | struct list_head *tt_list_head; | ||
1795 | struct list_head *tt; | ||
1796 | struct list_head *endpoints; | ||
1797 | struct list_head *ep, *q; | ||
1798 | struct xhci_tt_bw_info *tt_info; | ||
1799 | struct xhci_interval_bw_table *bwt; | ||
1800 | struct xhci_virt_ep *virt_ep; | ||
1801 | |||
1802 | unsigned long flags; | 1775 | unsigned long flags; |
1803 | int size; | 1776 | int size; |
1804 | int i; | 1777 | int i, j, num_ports; |
1805 | 1778 | ||
1806 | /* Free the Event Ring Segment Table and the actual Event Ring */ | 1779 | /* Free the Event Ring Segment Table and the actual Event Ring */ |
1807 | size = sizeof(struct xhci_erst_entry)*(xhci->erst.num_entries); | 1780 | size = sizeof(struct xhci_erst_entry)*(xhci->erst.num_entries); |
@@ -1860,21 +1833,22 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci) | |||
1860 | } | 1833 | } |
1861 | spin_unlock_irqrestore(&xhci->lock, flags); | 1834 | spin_unlock_irqrestore(&xhci->lock, flags); |
1862 | 1835 | ||
1863 | bwt = &xhci->rh_bw->bw_table; | 1836 | num_ports = HCS_MAX_PORTS(xhci->hcs_params1); |
1864 | for (i = 0; i < XHCI_MAX_INTERVAL; i++) { | 1837 | for (i = 0; i < num_ports; i++) { |
1865 | endpoints = &bwt->interval_bw[i].endpoints; | 1838 | struct xhci_interval_bw_table *bwt = &xhci->rh_bw[i].bw_table; |
1866 | list_for_each_safe(ep, q, endpoints) { | 1839 | for (j = 0; j < XHCI_MAX_INTERVAL; j++) { |
1867 | virt_ep = list_entry(ep, struct xhci_virt_ep, bw_endpoint_list); | 1840 | struct list_head *ep = &bwt->interval_bw[j].endpoints; |
1868 | list_del(&virt_ep->bw_endpoint_list); | 1841 | while (!list_empty(ep)) |
1869 | kfree(virt_ep); | 1842 | list_del_init(ep->next); |
1870 | } | 1843 | } |
1871 | } | 1844 | } |
1872 | 1845 | ||
1873 | tt_list_head = &xhci->rh_bw->tts; | 1846 | for (i = 0; i < num_ports; i++) { |
1874 | list_for_each_safe(tt, q, tt_list_head) { | 1847 | struct xhci_tt_bw_info *tt, *n; |
1875 | tt_info = list_entry(tt, struct xhci_tt_bw_info, tt_list); | 1848 | list_for_each_entry_safe(tt, n, &xhci->rh_bw[i].tts, tt_list) { |
1876 | list_del(tt); | 1849 | list_del(&tt->tt_list); |
1877 | kfree(tt_info); | 1850 | kfree(tt); |
1851 | } | ||
1878 | } | 1852 | } |
1879 | 1853 | ||
1880 | xhci->num_usb2_ports = 0; | 1854 | xhci->num_usb2_ports = 0; |
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c index afdc73ee84a6..a979cd0dbe0f 100644 --- a/drivers/usb/host/xhci.c +++ b/drivers/usb/host/xhci.c | |||
@@ -795,8 +795,8 @@ int xhci_suspend(struct xhci_hcd *xhci) | |||
795 | command = xhci_readl(xhci, &xhci->op_regs->command); | 795 | command = xhci_readl(xhci, &xhci->op_regs->command); |
796 | command |= CMD_CSS; | 796 | command |= CMD_CSS; |
797 | xhci_writel(xhci, command, &xhci->op_regs->command); | 797 | xhci_writel(xhci, command, &xhci->op_regs->command); |
798 | if (handshake(xhci, &xhci->op_regs->status, STS_SAVE, 0, 10*100)) { | 798 | if (handshake(xhci, &xhci->op_regs->status, STS_SAVE, 0, 10 * 1000)) { |
799 | xhci_warn(xhci, "WARN: xHC CMD_CSS timeout\n"); | 799 | xhci_warn(xhci, "WARN: xHC save state timeout\n"); |
800 | spin_unlock_irq(&xhci->lock); | 800 | spin_unlock_irq(&xhci->lock); |
801 | return -ETIMEDOUT; | 801 | return -ETIMEDOUT; |
802 | } | 802 | } |
@@ -848,8 +848,8 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated) | |||
848 | command |= CMD_CRS; | 848 | command |= CMD_CRS; |
849 | xhci_writel(xhci, command, &xhci->op_regs->command); | 849 | xhci_writel(xhci, command, &xhci->op_regs->command); |
850 | if (handshake(xhci, &xhci->op_regs->status, | 850 | if (handshake(xhci, &xhci->op_regs->status, |
851 | STS_RESTORE, 0, 10*100)) { | 851 | STS_RESTORE, 0, 10 * 1000)) { |
852 | xhci_dbg(xhci, "WARN: xHC CMD_CSS timeout\n"); | 852 | xhci_warn(xhci, "WARN: xHC restore state timeout\n"); |
853 | spin_unlock_irq(&xhci->lock); | 853 | spin_unlock_irq(&xhci->lock); |
854 | return -ETIMEDOUT; | 854 | return -ETIMEDOUT; |
855 | } | 855 | } |
@@ -3906,7 +3906,7 @@ static u16 xhci_get_timeout_no_hub_lpm(struct usb_device *udev, | |||
3906 | default: | 3906 | default: |
3907 | dev_warn(&udev->dev, "%s: Can't get timeout for non-U1 or U2 state.\n", | 3907 | dev_warn(&udev->dev, "%s: Can't get timeout for non-U1 or U2 state.\n", |
3908 | __func__); | 3908 | __func__); |
3909 | return -EINVAL; | 3909 | return USB3_LPM_DISABLED; |
3910 | } | 3910 | } |
3911 | 3911 | ||
3912 | if (sel <= max_sel_pel && pel <= max_sel_pel) | 3912 | if (sel <= max_sel_pel && pel <= max_sel_pel) |
diff --git a/drivers/usb/musb/davinci.c b/drivers/usb/musb/davinci.c index 768b4b55c816..9d63ba4d10d6 100644 --- a/drivers/usb/musb/davinci.c +++ b/drivers/usb/musb/davinci.c | |||
@@ -34,6 +34,7 @@ | |||
34 | #include <linux/dma-mapping.h> | 34 | #include <linux/dma-mapping.h> |
35 | 35 | ||
36 | #include <mach/cputype.h> | 36 | #include <mach/cputype.h> |
37 | #include <mach/hardware.h> | ||
37 | 38 | ||
38 | #include <asm/mach-types.h> | 39 | #include <asm/mach-types.h> |
39 | 40 | ||
diff --git a/drivers/usb/musb/davinci.h b/drivers/usb/musb/davinci.h index 046c84433cad..371baa0ee509 100644 --- a/drivers/usb/musb/davinci.h +++ b/drivers/usb/musb/davinci.h | |||
@@ -15,7 +15,7 @@ | |||
15 | */ | 15 | */ |
16 | 16 | ||
17 | /* Integrated highspeed/otg PHY */ | 17 | /* Integrated highspeed/otg PHY */ |
18 | #define USBPHY_CTL_PADDR (DAVINCI_SYSTEM_MODULE_BASE + 0x34) | 18 | #define USBPHY_CTL_PADDR 0x01c40034 |
19 | #define USBPHY_DATAPOL BIT(11) /* (dm355) switch D+/D- */ | 19 | #define USBPHY_DATAPOL BIT(11) /* (dm355) switch D+/D- */ |
20 | #define USBPHY_PHYCLKGD BIT(8) | 20 | #define USBPHY_PHYCLKGD BIT(8) |
21 | #define USBPHY_SESNDEN BIT(7) /* v(sess_end) comparator */ | 21 | #define USBPHY_SESNDEN BIT(7) /* v(sess_end) comparator */ |
@@ -27,7 +27,7 @@ | |||
27 | #define USBPHY_OTGPDWN BIT(1) | 27 | #define USBPHY_OTGPDWN BIT(1) |
28 | #define USBPHY_PHYPDWN BIT(0) | 28 | #define USBPHY_PHYPDWN BIT(0) |
29 | 29 | ||
30 | #define DM355_DEEPSLEEP_PADDR (DAVINCI_SYSTEM_MODULE_BASE + 0x48) | 30 | #define DM355_DEEPSLEEP_PADDR 0x01c40048 |
31 | #define DRVVBUS_FORCE BIT(2) | 31 | #define DRVVBUS_FORCE BIT(2) |
32 | #define DRVVBUS_OVERRIDE BIT(1) | 32 | #define DRVVBUS_OVERRIDE BIT(1) |
33 | 33 | ||
diff --git a/drivers/usb/musb/musb_gadget.c b/drivers/usb/musb/musb_gadget.c index f42c29b11f71..95918dacc99a 100644 --- a/drivers/usb/musb/musb_gadget.c +++ b/drivers/usb/musb/musb_gadget.c | |||
@@ -1232,6 +1232,7 @@ static int musb_gadget_disable(struct usb_ep *ep) | |||
1232 | } | 1232 | } |
1233 | 1233 | ||
1234 | musb_ep->desc = NULL; | 1234 | musb_ep->desc = NULL; |
1235 | musb_ep->end_point.desc = NULL; | ||
1235 | 1236 | ||
1236 | /* abort all pending DMA and requests */ | 1237 | /* abort all pending DMA and requests */ |
1237 | nuke(musb_ep, -ESHUTDOWN); | 1238 | nuke(musb_ep, -ESHUTDOWN); |
diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c index 1b1926200ba7..73d25cd8cba5 100644 --- a/drivers/usb/serial/cp210x.c +++ b/drivers/usb/serial/cp210x.c | |||
@@ -82,6 +82,7 @@ static const struct usb_device_id id_table[] = { | |||
82 | { USB_DEVICE(0x10C4, 0x8066) }, /* Argussoft In-System Programmer */ | 82 | { USB_DEVICE(0x10C4, 0x8066) }, /* Argussoft In-System Programmer */ |
83 | { USB_DEVICE(0x10C4, 0x806F) }, /* IMS USB to RS422 Converter Cable */ | 83 | { USB_DEVICE(0x10C4, 0x806F) }, /* IMS USB to RS422 Converter Cable */ |
84 | { USB_DEVICE(0x10C4, 0x807A) }, /* Crumb128 board */ | 84 | { USB_DEVICE(0x10C4, 0x807A) }, /* Crumb128 board */ |
85 | { USB_DEVICE(0x10C4, 0x80C4) }, /* Cygnal Integrated Products, Inc., Optris infrared thermometer */ | ||
85 | { USB_DEVICE(0x10C4, 0x80CA) }, /* Degree Controls Inc */ | 86 | { USB_DEVICE(0x10C4, 0x80CA) }, /* Degree Controls Inc */ |
86 | { USB_DEVICE(0x10C4, 0x80DD) }, /* Tracient RFID */ | 87 | { USB_DEVICE(0x10C4, 0x80DD) }, /* Tracient RFID */ |
87 | { USB_DEVICE(0x10C4, 0x80F6) }, /* Suunto sports instrument */ | 88 | { USB_DEVICE(0x10C4, 0x80F6) }, /* Suunto sports instrument */ |
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c index 8c084ea34e26..bc912e5a3beb 100644 --- a/drivers/usb/serial/ftdi_sio.c +++ b/drivers/usb/serial/ftdi_sio.c | |||
@@ -737,6 +737,7 @@ static struct usb_device_id id_table_combined [] = { | |||
737 | { USB_DEVICE(TELLDUS_VID, TELLDUS_TELLSTICK_PID) }, | 737 | { USB_DEVICE(TELLDUS_VID, TELLDUS_TELLSTICK_PID) }, |
738 | { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_SERIAL_VX7_PID) }, | 738 | { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_SERIAL_VX7_PID) }, |
739 | { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_CT29B_PID) }, | 739 | { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_CT29B_PID) }, |
740 | { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_RTS01_PID) }, | ||
740 | { USB_DEVICE(FTDI_VID, FTDI_MAXSTREAM_PID) }, | 741 | { USB_DEVICE(FTDI_VID, FTDI_MAXSTREAM_PID) }, |
741 | { USB_DEVICE(FTDI_VID, FTDI_PHI_FISCO_PID) }, | 742 | { USB_DEVICE(FTDI_VID, FTDI_PHI_FISCO_PID) }, |
742 | { USB_DEVICE(TML_VID, TML_USB_SERIAL_PID) }, | 743 | { USB_DEVICE(TML_VID, TML_USB_SERIAL_PID) }, |
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h index f3c7c78ede33..5661c7e2d415 100644 --- a/drivers/usb/serial/ftdi_sio_ids.h +++ b/drivers/usb/serial/ftdi_sio_ids.h | |||
@@ -784,6 +784,7 @@ | |||
784 | #define RTSYSTEMS_VID 0x2100 /* Vendor ID */ | 784 | #define RTSYSTEMS_VID 0x2100 /* Vendor ID */ |
785 | #define RTSYSTEMS_SERIAL_VX7_PID 0x9e52 /* Serial converter for VX-7 Radios using FT232RL */ | 785 | #define RTSYSTEMS_SERIAL_VX7_PID 0x9e52 /* Serial converter for VX-7 Radios using FT232RL */ |
786 | #define RTSYSTEMS_CT29B_PID 0x9e54 /* CT29B Radio Cable */ | 786 | #define RTSYSTEMS_CT29B_PID 0x9e54 /* CT29B Radio Cable */ |
787 | #define RTSYSTEMS_RTS01_PID 0x9e57 /* USB-RTS01 Radio Cable */ | ||
787 | 788 | ||
788 | 789 | ||
789 | /* | 790 | /* |
diff --git a/drivers/usb/serial/generic.c b/drivers/usb/serial/generic.c index 105a6d898ca4..9b026bf7afef 100644 --- a/drivers/usb/serial/generic.c +++ b/drivers/usb/serial/generic.c | |||
@@ -39,13 +39,6 @@ MODULE_PARM_DESC(product, "User specified USB idProduct"); | |||
39 | 39 | ||
40 | static struct usb_device_id generic_device_ids[2]; /* Initially all zeroes. */ | 40 | static struct usb_device_id generic_device_ids[2]; /* Initially all zeroes. */ |
41 | 41 | ||
42 | /* we want to look at all devices, as the vendor/product id can change | ||
43 | * depending on the command line argument */ | ||
44 | static const struct usb_device_id generic_serial_ids[] = { | ||
45 | {.driver_info = 42}, | ||
46 | {} | ||
47 | }; | ||
48 | |||
49 | /* All of the device info needed for the Generic Serial Converter */ | 42 | /* All of the device info needed for the Generic Serial Converter */ |
50 | struct usb_serial_driver usb_serial_generic_device = { | 43 | struct usb_serial_driver usb_serial_generic_device = { |
51 | .driver = { | 44 | .driver = { |
@@ -79,7 +72,8 @@ int usb_serial_generic_register(int _debug) | |||
79 | USB_DEVICE_ID_MATCH_VENDOR | USB_DEVICE_ID_MATCH_PRODUCT; | 72 | USB_DEVICE_ID_MATCH_VENDOR | USB_DEVICE_ID_MATCH_PRODUCT; |
80 | 73 | ||
81 | /* register our generic driver with ourselves */ | 74 | /* register our generic driver with ourselves */ |
82 | retval = usb_serial_register_drivers(serial_drivers, "usbserial_generic", generic_serial_ids); | 75 | retval = usb_serial_register_drivers(serial_drivers, |
76 | "usbserial_generic", generic_device_ids); | ||
83 | #endif | 77 | #endif |
84 | return retval; | 78 | return retval; |
85 | } | 79 | } |
diff --git a/drivers/usb/serial/mct_u232.c b/drivers/usb/serial/mct_u232.c index d0ec1aa52719..a71fa0aa0406 100644 --- a/drivers/usb/serial/mct_u232.c +++ b/drivers/usb/serial/mct_u232.c | |||
@@ -309,13 +309,16 @@ static int mct_u232_set_modem_ctrl(struct usb_serial *serial, | |||
309 | MCT_U232_SET_REQUEST_TYPE, | 309 | MCT_U232_SET_REQUEST_TYPE, |
310 | 0, 0, buf, MCT_U232_SET_MODEM_CTRL_SIZE, | 310 | 0, 0, buf, MCT_U232_SET_MODEM_CTRL_SIZE, |
311 | WDR_TIMEOUT); | 311 | WDR_TIMEOUT); |
312 | if (rc < 0) | 312 | kfree(buf); |
313 | dev_err(&serial->dev->dev, | 313 | |
314 | "Set MODEM CTRL 0x%x failed (error = %d)\n", mcr, rc); | ||
315 | dbg("set_modem_ctrl: state=0x%x ==> mcr=0x%x", control_state, mcr); | 314 | dbg("set_modem_ctrl: state=0x%x ==> mcr=0x%x", control_state, mcr); |
316 | 315 | ||
317 | kfree(buf); | 316 | if (rc < 0) { |
318 | return rc; | 317 | dev_err(&serial->dev->dev, |
318 | "Set MODEM CTRL 0x%x failed (error = %d)\n", mcr, rc); | ||
319 | return rc; | ||
320 | } | ||
321 | return 0; | ||
319 | } /* mct_u232_set_modem_ctrl */ | 322 | } /* mct_u232_set_modem_ctrl */ |
320 | 323 | ||
321 | static int mct_u232_get_modem_stat(struct usb_serial *serial, | 324 | static int mct_u232_get_modem_stat(struct usb_serial *serial, |
diff --git a/drivers/usb/serial/mos7840.c b/drivers/usb/serial/mos7840.c index 29160f8b5101..57eca2448424 100644 --- a/drivers/usb/serial/mos7840.c +++ b/drivers/usb/serial/mos7840.c | |||
@@ -190,7 +190,7 @@ | |||
190 | 190 | ||
191 | static int device_type; | 191 | static int device_type; |
192 | 192 | ||
193 | static const struct usb_device_id id_table[] __devinitconst = { | 193 | static const struct usb_device_id id_table[] = { |
194 | {USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7840)}, | 194 | {USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7840)}, |
195 | {USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7820)}, | 195 | {USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7820)}, |
196 | {USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7810)}, | 196 | {USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7810)}, |
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c index 1aae9028cd0b..e668a2460bd4 100644 --- a/drivers/usb/serial/option.c +++ b/drivers/usb/serial/option.c | |||
@@ -47,6 +47,7 @@ | |||
47 | /* Function prototypes */ | 47 | /* Function prototypes */ |
48 | static int option_probe(struct usb_serial *serial, | 48 | static int option_probe(struct usb_serial *serial, |
49 | const struct usb_device_id *id); | 49 | const struct usb_device_id *id); |
50 | static void option_release(struct usb_serial *serial); | ||
50 | static int option_send_setup(struct usb_serial_port *port); | 51 | static int option_send_setup(struct usb_serial_port *port); |
51 | static void option_instat_callback(struct urb *urb); | 52 | static void option_instat_callback(struct urb *urb); |
52 | 53 | ||
@@ -150,6 +151,7 @@ static void option_instat_callback(struct urb *urb); | |||
150 | #define HUAWEI_PRODUCT_E14AC 0x14AC | 151 | #define HUAWEI_PRODUCT_E14AC 0x14AC |
151 | #define HUAWEI_PRODUCT_K3806 0x14AE | 152 | #define HUAWEI_PRODUCT_K3806 0x14AE |
152 | #define HUAWEI_PRODUCT_K4605 0x14C6 | 153 | #define HUAWEI_PRODUCT_K4605 0x14C6 |
154 | #define HUAWEI_PRODUCT_K5005 0x14C8 | ||
153 | #define HUAWEI_PRODUCT_K3770 0x14C9 | 155 | #define HUAWEI_PRODUCT_K3770 0x14C9 |
154 | #define HUAWEI_PRODUCT_K3771 0x14CA | 156 | #define HUAWEI_PRODUCT_K3771 0x14CA |
155 | #define HUAWEI_PRODUCT_K4510 0x14CB | 157 | #define HUAWEI_PRODUCT_K4510 0x14CB |
@@ -425,7 +427,7 @@ static void option_instat_callback(struct urb *urb); | |||
425 | #define SAMSUNG_VENDOR_ID 0x04e8 | 427 | #define SAMSUNG_VENDOR_ID 0x04e8 |
426 | #define SAMSUNG_PRODUCT_GT_B3730 0x6889 | 428 | #define SAMSUNG_PRODUCT_GT_B3730 0x6889 |
427 | 429 | ||
428 | /* YUGA products www.yuga-info.com*/ | 430 | /* YUGA products www.yuga-info.com gavin.kx@qq.com */ |
429 | #define YUGA_VENDOR_ID 0x257A | 431 | #define YUGA_VENDOR_ID 0x257A |
430 | #define YUGA_PRODUCT_CEM600 0x1601 | 432 | #define YUGA_PRODUCT_CEM600 0x1601 |
431 | #define YUGA_PRODUCT_CEM610 0x1602 | 433 | #define YUGA_PRODUCT_CEM610 0x1602 |
@@ -442,6 +444,8 @@ static void option_instat_callback(struct urb *urb); | |||
442 | #define YUGA_PRODUCT_CEU516 0x160C | 444 | #define YUGA_PRODUCT_CEU516 0x160C |
443 | #define YUGA_PRODUCT_CEU528 0x160D | 445 | #define YUGA_PRODUCT_CEU528 0x160D |
444 | #define YUGA_PRODUCT_CEU526 0x160F | 446 | #define YUGA_PRODUCT_CEU526 0x160F |
447 | #define YUGA_PRODUCT_CEU881 0x161F | ||
448 | #define YUGA_PRODUCT_CEU882 0x162F | ||
445 | 449 | ||
446 | #define YUGA_PRODUCT_CWM600 0x2601 | 450 | #define YUGA_PRODUCT_CWM600 0x2601 |
447 | #define YUGA_PRODUCT_CWM610 0x2602 | 451 | #define YUGA_PRODUCT_CWM610 0x2602 |
@@ -457,23 +461,26 @@ static void option_instat_callback(struct urb *urb); | |||
457 | #define YUGA_PRODUCT_CWU518 0x260B | 461 | #define YUGA_PRODUCT_CWU518 0x260B |
458 | #define YUGA_PRODUCT_CWU516 0x260C | 462 | #define YUGA_PRODUCT_CWU516 0x260C |
459 | #define YUGA_PRODUCT_CWU528 0x260D | 463 | #define YUGA_PRODUCT_CWU528 0x260D |
464 | #define YUGA_PRODUCT_CWU581 0x260E | ||
460 | #define YUGA_PRODUCT_CWU526 0x260F | 465 | #define YUGA_PRODUCT_CWU526 0x260F |
461 | 466 | #define YUGA_PRODUCT_CWU582 0x261F | |
462 | #define YUGA_PRODUCT_CLM600 0x2601 | 467 | #define YUGA_PRODUCT_CWU583 0x262F |
463 | #define YUGA_PRODUCT_CLM610 0x2602 | 468 | |
464 | #define YUGA_PRODUCT_CLM500 0x2603 | 469 | #define YUGA_PRODUCT_CLM600 0x3601 |
465 | #define YUGA_PRODUCT_CLM510 0x2604 | 470 | #define YUGA_PRODUCT_CLM610 0x3602 |
466 | #define YUGA_PRODUCT_CLM800 0x2605 | 471 | #define YUGA_PRODUCT_CLM500 0x3603 |
467 | #define YUGA_PRODUCT_CLM900 0x2606 | 472 | #define YUGA_PRODUCT_CLM510 0x3604 |
468 | 473 | #define YUGA_PRODUCT_CLM800 0x3605 | |
469 | #define YUGA_PRODUCT_CLU718 0x2607 | 474 | #define YUGA_PRODUCT_CLM900 0x3606 |
470 | #define YUGA_PRODUCT_CLU716 0x2608 | 475 | |
471 | #define YUGA_PRODUCT_CLU728 0x2609 | 476 | #define YUGA_PRODUCT_CLU718 0x3607 |
472 | #define YUGA_PRODUCT_CLU726 0x260A | 477 | #define YUGA_PRODUCT_CLU716 0x3608 |
473 | #define YUGA_PRODUCT_CLU518 0x260B | 478 | #define YUGA_PRODUCT_CLU728 0x3609 |
474 | #define YUGA_PRODUCT_CLU516 0x260C | 479 | #define YUGA_PRODUCT_CLU726 0x360A |
475 | #define YUGA_PRODUCT_CLU528 0x260D | 480 | #define YUGA_PRODUCT_CLU518 0x360B |
476 | #define YUGA_PRODUCT_CLU526 0x260F | 481 | #define YUGA_PRODUCT_CLU516 0x360C |
482 | #define YUGA_PRODUCT_CLU528 0x360D | ||
483 | #define YUGA_PRODUCT_CLU526 0x360F | ||
477 | 484 | ||
478 | /* Viettel products */ | 485 | /* Viettel products */ |
479 | #define VIETTEL_VENDOR_ID 0x2262 | 486 | #define VIETTEL_VENDOR_ID 0x2262 |
@@ -666,6 +673,11 @@ static const struct usb_device_id option_ids[] = { | |||
666 | { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3806, 0xff, 0xff, 0xff) }, | 673 | { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3806, 0xff, 0xff, 0xff) }, |
667 | { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4605, 0xff, 0xff, 0xff), | 674 | { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4605, 0xff, 0xff, 0xff), |
668 | .driver_info = (kernel_ulong_t) &huawei_cdc12_blacklist }, | 675 | .driver_info = (kernel_ulong_t) &huawei_cdc12_blacklist }, |
676 | { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4605, 0xff, 0x01, 0x31) }, | ||
677 | { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4605, 0xff, 0x01, 0x32) }, | ||
678 | { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K5005, 0xff, 0x01, 0x31) }, | ||
679 | { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K5005, 0xff, 0x01, 0x32) }, | ||
680 | { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K5005, 0xff, 0x01, 0x33) }, | ||
669 | { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3770, 0xff, 0x02, 0x31) }, | 681 | { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3770, 0xff, 0x02, 0x31) }, |
670 | { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3770, 0xff, 0x02, 0x32) }, | 682 | { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3770, 0xff, 0x02, 0x32) }, |
671 | { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3771, 0xff, 0x02, 0x31) }, | 683 | { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3771, 0xff, 0x02, 0x31) }, |
@@ -1209,6 +1221,11 @@ static const struct usb_device_id option_ids[] = { | |||
1209 | { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CLU516) }, | 1221 | { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CLU516) }, |
1210 | { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CLU528) }, | 1222 | { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CLU528) }, |
1211 | { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CLU526) }, | 1223 | { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CLU526) }, |
1224 | { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEU881) }, | ||
1225 | { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEU882) }, | ||
1226 | { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CWU581) }, | ||
1227 | { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CWU582) }, | ||
1228 | { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CWU583) }, | ||
1212 | { USB_DEVICE_AND_INTERFACE_INFO(VIETTEL_VENDOR_ID, VIETTEL_PRODUCT_VT1000, 0xff, 0xff, 0xff) }, | 1229 | { USB_DEVICE_AND_INTERFACE_INFO(VIETTEL_VENDOR_ID, VIETTEL_PRODUCT_VT1000, 0xff, 0xff, 0xff) }, |
1213 | { USB_DEVICE_AND_INTERFACE_INFO(ZD_VENDOR_ID, ZD_PRODUCT_7000, 0xff, 0xff, 0xff) }, | 1230 | { USB_DEVICE_AND_INTERFACE_INFO(ZD_VENDOR_ID, ZD_PRODUCT_7000, 0xff, 0xff, 0xff) }, |
1214 | { USB_DEVICE(LG_VENDOR_ID, LG_PRODUCT_L02C) }, /* docomo L-02C modem */ | 1231 | { USB_DEVICE(LG_VENDOR_ID, LG_PRODUCT_L02C) }, /* docomo L-02C modem */ |
@@ -1245,7 +1262,7 @@ static struct usb_serial_driver option_1port_device = { | |||
1245 | .ioctl = usb_wwan_ioctl, | 1262 | .ioctl = usb_wwan_ioctl, |
1246 | .attach = usb_wwan_startup, | 1263 | .attach = usb_wwan_startup, |
1247 | .disconnect = usb_wwan_disconnect, | 1264 | .disconnect = usb_wwan_disconnect, |
1248 | .release = usb_wwan_release, | 1265 | .release = option_release, |
1249 | .read_int_callback = option_instat_callback, | 1266 | .read_int_callback = option_instat_callback, |
1250 | #ifdef CONFIG_PM | 1267 | #ifdef CONFIG_PM |
1251 | .suspend = usb_wwan_suspend, | 1268 | .suspend = usb_wwan_suspend, |
@@ -1259,35 +1276,6 @@ static struct usb_serial_driver * const serial_drivers[] = { | |||
1259 | 1276 | ||
1260 | static bool debug; | 1277 | static bool debug; |
1261 | 1278 | ||
1262 | /* per port private data */ | ||
1263 | |||
1264 | #define N_IN_URB 4 | ||
1265 | #define N_OUT_URB 4 | ||
1266 | #define IN_BUFLEN 4096 | ||
1267 | #define OUT_BUFLEN 4096 | ||
1268 | |||
1269 | struct option_port_private { | ||
1270 | /* Input endpoints and buffer for this port */ | ||
1271 | struct urb *in_urbs[N_IN_URB]; | ||
1272 | u8 *in_buffer[N_IN_URB]; | ||
1273 | /* Output endpoints and buffer for this port */ | ||
1274 | struct urb *out_urbs[N_OUT_URB]; | ||
1275 | u8 *out_buffer[N_OUT_URB]; | ||
1276 | unsigned long out_busy; /* Bit vector of URBs in use */ | ||
1277 | int opened; | ||
1278 | struct usb_anchor delayed; | ||
1279 | |||
1280 | /* Settings for the port */ | ||
1281 | int rts_state; /* Handshaking pins (outputs) */ | ||
1282 | int dtr_state; | ||
1283 | int cts_state; /* Handshaking pins (inputs) */ | ||
1284 | int dsr_state; | ||
1285 | int dcd_state; | ||
1286 | int ri_state; | ||
1287 | |||
1288 | unsigned long tx_start_time[N_OUT_URB]; | ||
1289 | }; | ||
1290 | |||
1291 | module_usb_serial_driver(serial_drivers, option_ids); | 1279 | module_usb_serial_driver(serial_drivers, option_ids); |
1292 | 1280 | ||
1293 | static bool is_blacklisted(const u8 ifnum, enum option_blacklist_reason reason, | 1281 | static bool is_blacklisted(const u8 ifnum, enum option_blacklist_reason reason, |
@@ -1356,12 +1344,22 @@ static int option_probe(struct usb_serial *serial, | |||
1356 | return 0; | 1344 | return 0; |
1357 | } | 1345 | } |
1358 | 1346 | ||
1347 | static void option_release(struct usb_serial *serial) | ||
1348 | { | ||
1349 | struct usb_wwan_intf_private *priv = usb_get_serial_data(serial); | ||
1350 | |||
1351 | usb_wwan_release(serial); | ||
1352 | |||
1353 | kfree(priv); | ||
1354 | } | ||
1355 | |||
1359 | static void option_instat_callback(struct urb *urb) | 1356 | static void option_instat_callback(struct urb *urb) |
1360 | { | 1357 | { |
1361 | int err; | 1358 | int err; |
1362 | int status = urb->status; | 1359 | int status = urb->status; |
1363 | struct usb_serial_port *port = urb->context; | 1360 | struct usb_serial_port *port = urb->context; |
1364 | struct option_port_private *portdata = usb_get_serial_port_data(port); | 1361 | struct usb_wwan_port_private *portdata = |
1362 | usb_get_serial_port_data(port); | ||
1365 | 1363 | ||
1366 | dbg("%s: urb %p port %p has data %p", __func__, urb, port, portdata); | 1364 | dbg("%s: urb %p port %p has data %p", __func__, urb, port, portdata); |
1367 | 1365 | ||
@@ -1421,7 +1419,7 @@ static int option_send_setup(struct usb_serial_port *port) | |||
1421 | struct usb_serial *serial = port->serial; | 1419 | struct usb_serial *serial = port->serial; |
1422 | struct usb_wwan_intf_private *intfdata = | 1420 | struct usb_wwan_intf_private *intfdata = |
1423 | (struct usb_wwan_intf_private *) serial->private; | 1421 | (struct usb_wwan_intf_private *) serial->private; |
1424 | struct option_port_private *portdata; | 1422 | struct usb_wwan_port_private *portdata; |
1425 | int ifNum = serial->interface->cur_altsetting->desc.bInterfaceNumber; | 1423 | int ifNum = serial->interface->cur_altsetting->desc.bInterfaceNumber; |
1426 | int val = 0; | 1424 | int val = 0; |
1427 | 1425 | ||
diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c index 0d5fe59ebb9e..996015c5f1ac 100644 --- a/drivers/usb/serial/qcserial.c +++ b/drivers/usb/serial/qcserial.c | |||
@@ -105,7 +105,13 @@ static const struct usb_device_id id_table[] = { | |||
105 | {USB_DEVICE(0x1410, 0xa021)}, /* Novatel Gobi 3000 Composite */ | 105 | {USB_DEVICE(0x1410, 0xa021)}, /* Novatel Gobi 3000 Composite */ |
106 | {USB_DEVICE(0x413c, 0x8193)}, /* Dell Gobi 3000 QDL */ | 106 | {USB_DEVICE(0x413c, 0x8193)}, /* Dell Gobi 3000 QDL */ |
107 | {USB_DEVICE(0x413c, 0x8194)}, /* Dell Gobi 3000 Composite */ | 107 | {USB_DEVICE(0x413c, 0x8194)}, /* Dell Gobi 3000 Composite */ |
108 | {USB_DEVICE(0x1199, 0x9010)}, /* Sierra Wireless Gobi 3000 QDL */ | ||
109 | {USB_DEVICE(0x1199, 0x9012)}, /* Sierra Wireless Gobi 3000 QDL */ | ||
108 | {USB_DEVICE(0x1199, 0x9013)}, /* Sierra Wireless Gobi 3000 Modem device (MC8355) */ | 110 | {USB_DEVICE(0x1199, 0x9013)}, /* Sierra Wireless Gobi 3000 Modem device (MC8355) */ |
111 | {USB_DEVICE(0x1199, 0x9014)}, /* Sierra Wireless Gobi 3000 QDL */ | ||
112 | {USB_DEVICE(0x1199, 0x9015)}, /* Sierra Wireless Gobi 3000 Modem device */ | ||
113 | {USB_DEVICE(0x1199, 0x9018)}, /* Sierra Wireless Gobi 3000 QDL */ | ||
114 | {USB_DEVICE(0x1199, 0x9019)}, /* Sierra Wireless Gobi 3000 Modem device */ | ||
109 | {USB_DEVICE(0x12D1, 0x14F0)}, /* Sony Gobi 3000 QDL */ | 115 | {USB_DEVICE(0x12D1, 0x14F0)}, /* Sony Gobi 3000 QDL */ |
110 | {USB_DEVICE(0x12D1, 0x14F1)}, /* Sony Gobi 3000 Composite */ | 116 | {USB_DEVICE(0x12D1, 0x14F1)}, /* Sony Gobi 3000 Composite */ |
111 | { } /* Terminating entry */ | 117 | { } /* Terminating entry */ |
diff --git a/drivers/usb/serial/sierra.c b/drivers/usb/serial/sierra.c index ba54a0a8235c..d423d36acc04 100644 --- a/drivers/usb/serial/sierra.c +++ b/drivers/usb/serial/sierra.c | |||
@@ -294,6 +294,10 @@ static const struct usb_device_id id_table[] = { | |||
294 | { USB_DEVICE(0x1199, 0x68A3), /* Sierra Wireless Direct IP modems */ | 294 | { USB_DEVICE(0x1199, 0x68A3), /* Sierra Wireless Direct IP modems */ |
295 | .driver_info = (kernel_ulong_t)&direct_ip_interface_blacklist | 295 | .driver_info = (kernel_ulong_t)&direct_ip_interface_blacklist |
296 | }, | 296 | }, |
297 | /* AT&T Direct IP LTE modems */ | ||
298 | { USB_DEVICE_AND_INTERFACE_INFO(0x0F3D, 0x68AA, 0xFF, 0xFF, 0xFF), | ||
299 | .driver_info = (kernel_ulong_t)&direct_ip_interface_blacklist | ||
300 | }, | ||
297 | { USB_DEVICE(0x0f3d, 0x68A3), /* Airprime/Sierra Wireless Direct IP modems */ | 301 | { USB_DEVICE(0x0f3d, 0x68A3), /* Airprime/Sierra Wireless Direct IP modems */ |
298 | .driver_info = (kernel_ulong_t)&direct_ip_interface_blacklist | 302 | .driver_info = (kernel_ulong_t)&direct_ip_interface_blacklist |
299 | }, | 303 | }, |
diff --git a/drivers/usb/serial/usb-serial.c b/drivers/usb/serial/usb-serial.c index 6a1b609a0d94..27483f91a4a3 100644 --- a/drivers/usb/serial/usb-serial.c +++ b/drivers/usb/serial/usb-serial.c | |||
@@ -659,12 +659,14 @@ exit: | |||
659 | static struct usb_serial_driver *search_serial_device( | 659 | static struct usb_serial_driver *search_serial_device( |
660 | struct usb_interface *iface) | 660 | struct usb_interface *iface) |
661 | { | 661 | { |
662 | const struct usb_device_id *id; | 662 | const struct usb_device_id *id = NULL; |
663 | struct usb_serial_driver *drv; | 663 | struct usb_serial_driver *drv; |
664 | struct usb_driver *driver = to_usb_driver(iface->dev.driver); | ||
664 | 665 | ||
665 | /* Check if the usb id matches a known device */ | 666 | /* Check if the usb id matches a known device */ |
666 | list_for_each_entry(drv, &usb_serial_driver_list, driver_list) { | 667 | list_for_each_entry(drv, &usb_serial_driver_list, driver_list) { |
667 | id = get_iface_id(drv, iface); | 668 | if (drv->usb_driver == driver) |
669 | id = get_iface_id(drv, iface); | ||
668 | if (id) | 670 | if (id) |
669 | return drv; | 671 | return drv; |
670 | } | 672 | } |
@@ -755,7 +757,7 @@ static int usb_serial_probe(struct usb_interface *interface, | |||
755 | 757 | ||
756 | if (retval) { | 758 | if (retval) { |
757 | dbg("sub driver rejected device"); | 759 | dbg("sub driver rejected device"); |
758 | kfree(serial); | 760 | usb_serial_put(serial); |
759 | module_put(type->driver.owner); | 761 | module_put(type->driver.owner); |
760 | return retval; | 762 | return retval; |
761 | } | 763 | } |
@@ -827,7 +829,7 @@ static int usb_serial_probe(struct usb_interface *interface, | |||
827 | */ | 829 | */ |
828 | if (num_bulk_in == 0 || num_bulk_out == 0) { | 830 | if (num_bulk_in == 0 || num_bulk_out == 0) { |
829 | dev_info(&interface->dev, "PL-2303 hack: descriptors matched but endpoints did not\n"); | 831 | dev_info(&interface->dev, "PL-2303 hack: descriptors matched but endpoints did not\n"); |
830 | kfree(serial); | 832 | usb_serial_put(serial); |
831 | module_put(type->driver.owner); | 833 | module_put(type->driver.owner); |
832 | return -ENODEV; | 834 | return -ENODEV; |
833 | } | 835 | } |
@@ -841,7 +843,7 @@ static int usb_serial_probe(struct usb_interface *interface, | |||
841 | if (num_ports == 0) { | 843 | if (num_ports == 0) { |
842 | dev_err(&interface->dev, | 844 | dev_err(&interface->dev, |
843 | "Generic device with no bulk out, not allowed.\n"); | 845 | "Generic device with no bulk out, not allowed.\n"); |
844 | kfree(serial); | 846 | usb_serial_put(serial); |
845 | module_put(type->driver.owner); | 847 | module_put(type->driver.owner); |
846 | return -EIO; | 848 | return -EIO; |
847 | } | 849 | } |
diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h index 1719886bb9be..caf22bf5f822 100644 --- a/drivers/usb/storage/unusual_devs.h +++ b/drivers/usb/storage/unusual_devs.h | |||
@@ -1107,6 +1107,13 @@ UNUSUAL_DEV( 0x090a, 0x1200, 0x0000, 0x9999, | |||
1107 | USB_SC_RBC, USB_PR_BULK, NULL, | 1107 | USB_SC_RBC, USB_PR_BULK, NULL, |
1108 | 0 ), | 1108 | 0 ), |
1109 | 1109 | ||
1110 | /* Feiya QDI U2 DISK, reported by Hans de Goede <hdegoede@redhat.com> */ | ||
1111 | UNUSUAL_DEV( 0x090c, 0x1000, 0x0000, 0xffff, | ||
1112 | "Feiya", | ||
1113 | "QDI U2 DISK", | ||
1114 | USB_SC_DEVICE, USB_PR_DEVICE, NULL, | ||
1115 | US_FL_NO_READ_CAPACITY_16 ), | ||
1116 | |||
1110 | /* aeb */ | 1117 | /* aeb */ |
1111 | UNUSUAL_DEV( 0x090c, 0x1132, 0x0000, 0xffff, | 1118 | UNUSUAL_DEV( 0x090c, 0x1132, 0x0000, 0xffff, |
1112 | "Feiya", | 1119 | "Feiya", |
diff --git a/drivers/video/backlight/Kconfig b/drivers/video/backlight/Kconfig index fa2b03750316..2979292650d6 100644 --- a/drivers/video/backlight/Kconfig +++ b/drivers/video/backlight/Kconfig | |||
@@ -88,7 +88,7 @@ config LCD_PLATFORM | |||
88 | 88 | ||
89 | config LCD_TOSA | 89 | config LCD_TOSA |
90 | tristate "Sharp SL-6000 LCD Driver" | 90 | tristate "Sharp SL-6000 LCD Driver" |
91 | depends on SPI && MACH_TOSA | 91 | depends on I2C && SPI && MACH_TOSA |
92 | help | 92 | help |
93 | If you have an Sharp SL-6000 Zaurus say Y to enable a driver | 93 | If you have an Sharp SL-6000 Zaurus say Y to enable a driver |
94 | for its LCD. | 94 | for its LCD. |
diff --git a/drivers/video/backlight/ili9320.c b/drivers/video/backlight/ili9320.c index 6c9399341bcf..9327cd1b3143 100644 --- a/drivers/video/backlight/ili9320.c +++ b/drivers/video/backlight/ili9320.c | |||
@@ -263,7 +263,7 @@ int __devinit ili9320_probe_spi(struct spi_device *spi, | |||
263 | 263 | ||
264 | EXPORT_SYMBOL_GPL(ili9320_probe_spi); | 264 | EXPORT_SYMBOL_GPL(ili9320_probe_spi); |
265 | 265 | ||
266 | int __devexit ili9320_remove(struct ili9320 *ili) | 266 | int ili9320_remove(struct ili9320 *ili) |
267 | { | 267 | { |
268 | ili9320_power(ili, FB_BLANK_POWERDOWN); | 268 | ili9320_power(ili, FB_BLANK_POWERDOWN); |
269 | 269 | ||
diff --git a/drivers/video/bfin_adv7393fb.c b/drivers/video/bfin_adv7393fb.c index 33ea874c87d2..9bdd4b0c18c8 100644 --- a/drivers/video/bfin_adv7393fb.c +++ b/drivers/video/bfin_adv7393fb.c | |||
@@ -353,18 +353,16 @@ adv7393_read_proc(char *page, char **start, off_t off, | |||
353 | 353 | ||
354 | static int | 354 | static int |
355 | adv7393_write_proc(struct file *file, const char __user * buffer, | 355 | adv7393_write_proc(struct file *file, const char __user * buffer, |
356 | unsigned long count, void *data) | 356 | size_t count, void *data) |
357 | { | 357 | { |
358 | struct adv7393fb_device *fbdev = data; | 358 | struct adv7393fb_device *fbdev = data; |
359 | char line[8]; | ||
360 | unsigned int val; | 359 | unsigned int val; |
361 | int ret; | 360 | int ret; |
362 | 361 | ||
363 | ret = copy_from_user(line, buffer, count); | 362 | ret = kstrtouint_from_user(buffer, count, 0, &val); |
364 | if (ret) | 363 | if (ret) |
365 | return -EFAULT; | 364 | return -EFAULT; |
366 | 365 | ||
367 | val = simple_strtoul(line, NULL, 0); | ||
368 | adv7393_write(fbdev->client, val >> 8, val & 0xff); | 366 | adv7393_write(fbdev->client, val >> 8, val & 0xff); |
369 | 367 | ||
370 | return count; | 368 | return count; |
diff --git a/drivers/video/broadsheetfb.c b/drivers/video/broadsheetfb.c index 377dde3d5bfc..c95b417d0d41 100644 --- a/drivers/video/broadsheetfb.c +++ b/drivers/video/broadsheetfb.c | |||
@@ -1211,7 +1211,7 @@ static int __devexit broadsheetfb_remove(struct platform_device *dev) | |||
1211 | 1211 | ||
1212 | static struct platform_driver broadsheetfb_driver = { | 1212 | static struct platform_driver broadsheetfb_driver = { |
1213 | .probe = broadsheetfb_probe, | 1213 | .probe = broadsheetfb_probe, |
1214 | .remove = broadsheetfb_remove, | 1214 | .remove = __devexit_p(broadsheetfb_remove), |
1215 | .driver = { | 1215 | .driver = { |
1216 | .owner = THIS_MODULE, | 1216 | .owner = THIS_MODULE, |
1217 | .name = "broadsheetfb", | 1217 | .name = "broadsheetfb", |
diff --git a/drivers/video/console/Kconfig b/drivers/video/console/Kconfig index c2d11fef114b..e2c96d01d8f5 100644 --- a/drivers/video/console/Kconfig +++ b/drivers/video/console/Kconfig | |||
@@ -224,5 +224,19 @@ config FONT_10x18 | |||
224 | big letters. It fits between the sun 12x22 and the normal 8x16 font. | 224 | big letters. It fits between the sun 12x22 and the normal 8x16 font. |
225 | If other fonts are too big or too small for you, say Y, otherwise say N. | 225 | If other fonts are too big or too small for you, say Y, otherwise say N. |
226 | 226 | ||
227 | config FONT_AUTOSELECT | ||
228 | def_bool y | ||
229 | depends on FRAMEBUFFER_CONSOLE || SGI_NEWPORT_CONSOLE || STI_CONSOLE || USB_SISUSBVGA_CON | ||
230 | depends on !FONT_8x8 | ||
231 | depends on !FONT_6x11 | ||
232 | depends on !FONT_7x14 | ||
233 | depends on !FONT_PEARL_8x8 | ||
234 | depends on !FONT_ACORN_8x8 | ||
235 | depends on !FONT_MINI_4x6 | ||
236 | depends on !FONT_SUN8x16 | ||
237 | depends on !FONT_SUN12x22 | ||
238 | depends on !FONT_10x18 | ||
239 | select FONT_8x16 | ||
240 | |||
227 | endmenu | 241 | endmenu |
228 | 242 | ||
diff --git a/drivers/video/mbx/mbxfb.c b/drivers/video/mbx/mbxfb.c index ab0a8e527333..85e4f44bfa61 100644 --- a/drivers/video/mbx/mbxfb.c +++ b/drivers/video/mbx/mbxfb.c | |||
@@ -1045,7 +1045,7 @@ static int __devexit mbxfb_remove(struct platform_device *dev) | |||
1045 | 1045 | ||
1046 | static struct platform_driver mbxfb_driver = { | 1046 | static struct platform_driver mbxfb_driver = { |
1047 | .probe = mbxfb_probe, | 1047 | .probe = mbxfb_probe, |
1048 | .remove = mbxfb_remove, | 1048 | .remove = __devexit_p(mbxfb_remove), |
1049 | .suspend = mbxfb_suspend, | 1049 | .suspend = mbxfb_suspend, |
1050 | .resume = mbxfb_resume, | 1050 | .resume = mbxfb_resume, |
1051 | .driver = { | 1051 | .driver = { |
diff --git a/drivers/video/omap2/displays/panel-taal.c b/drivers/video/omap2/displays/panel-taal.c index 2ce9992f403b..901576eb5a84 100644 --- a/drivers/video/omap2/displays/panel-taal.c +++ b/drivers/video/omap2/displays/panel-taal.c | |||
@@ -526,7 +526,7 @@ static ssize_t taal_num_errors_show(struct device *dev, | |||
526 | { | 526 | { |
527 | struct omap_dss_device *dssdev = to_dss_device(dev); | 527 | struct omap_dss_device *dssdev = to_dss_device(dev); |
528 | struct taal_data *td = dev_get_drvdata(&dssdev->dev); | 528 | struct taal_data *td = dev_get_drvdata(&dssdev->dev); |
529 | u8 errors; | 529 | u8 errors = 0; |
530 | int r; | 530 | int r; |
531 | 531 | ||
532 | mutex_lock(&td->lock); | 532 | mutex_lock(&td->lock); |
diff --git a/drivers/video/omap2/dss/core.c b/drivers/video/omap2/dss/core.c index 72ded9cd2cb0..5066eee10ccf 100644 --- a/drivers/video/omap2/dss/core.c +++ b/drivers/video/omap2/dss/core.c | |||
@@ -194,8 +194,7 @@ static inline int dss_initialize_debugfs(void) | |||
194 | static inline void dss_uninitialize_debugfs(void) | 194 | static inline void dss_uninitialize_debugfs(void) |
195 | { | 195 | { |
196 | } | 196 | } |
197 | static inline int dss_debugfs_create_file(const char *name, | 197 | int dss_debugfs_create_file(const char *name, void (*write)(struct seq_file *)) |
198 | void (*write)(struct seq_file *)) | ||
199 | { | 198 | { |
200 | return 0; | 199 | return 0; |
201 | } | 200 | } |
diff --git a/drivers/video/omap2/dss/dsi.c b/drivers/video/omap2/dss/dsi.c index ec363d8390ed..ca8382d346e9 100644 --- a/drivers/video/omap2/dss/dsi.c +++ b/drivers/video/omap2/dss/dsi.c | |||
@@ -3724,7 +3724,7 @@ static int dsi_compute_interleave_lp(int blank, int enter_hs, int exit_hs, | |||
3724 | /* CLKIN4DDR = 16 * TXBYTECLKHS */ | 3724 | /* CLKIN4DDR = 16 * TXBYTECLKHS */ |
3725 | tlp_avail = thsbyte_clk * (blank - trans_lp); | 3725 | tlp_avail = thsbyte_clk * (blank - trans_lp); |
3726 | 3726 | ||
3727 | ttxclkesc = tdsi_fclk / lp_clk_div; | 3727 | ttxclkesc = tdsi_fclk * lp_clk_div; |
3728 | 3728 | ||
3729 | lp_inter = ((tlp_avail - 8 * thsbyte_clk - 5 * tdsi_fclk) / ttxclkesc - | 3729 | lp_inter = ((tlp_avail - 8 * thsbyte_clk - 5 * tdsi_fclk) / ttxclkesc - |
3730 | 26) / 16; | 3730 | 26) / 16; |
diff --git a/drivers/video/omap2/dss/dss.c b/drivers/video/omap2/dss/dss.c index 6ea1ff149f6f..770632359a17 100644 --- a/drivers/video/omap2/dss/dss.c +++ b/drivers/video/omap2/dss/dss.c | |||
@@ -731,7 +731,7 @@ static void dss_runtime_put(void) | |||
731 | DSSDBG("dss_runtime_put\n"); | 731 | DSSDBG("dss_runtime_put\n"); |
732 | 732 | ||
733 | r = pm_runtime_put_sync(&dss.pdev->dev); | 733 | r = pm_runtime_put_sync(&dss.pdev->dev); |
734 | WARN_ON(r < 0); | 734 | WARN_ON(r < 0 && r != -EBUSY); |
735 | } | 735 | } |
736 | 736 | ||
737 | /* DEBUGFS */ | 737 | /* DEBUGFS */ |
diff --git a/drivers/video/s3c-fb.c b/drivers/video/s3c-fb.c index 5f9d8e69029e..ea7b661e7229 100644 --- a/drivers/video/s3c-fb.c +++ b/drivers/video/s3c-fb.c | |||
@@ -361,7 +361,7 @@ static int s3c_fb_calc_pixclk(struct s3c_fb *sfb, unsigned int pixclk) | |||
361 | result = (unsigned int)tmp / 1000; | 361 | result = (unsigned int)tmp / 1000; |
362 | 362 | ||
363 | dev_dbg(sfb->dev, "pixclk=%u, clk=%lu, div=%d (%lu)\n", | 363 | dev_dbg(sfb->dev, "pixclk=%u, clk=%lu, div=%d (%lu)\n", |
364 | pixclk, clk, result, clk / result); | 364 | pixclk, clk, result, result ? clk / result : clk); |
365 | 365 | ||
366 | return result; | 366 | return result; |
367 | } | 367 | } |
@@ -1348,8 +1348,14 @@ static void s3c_fb_clear_win(struct s3c_fb *sfb, int win) | |||
1348 | writel(0, regs + VIDOSD_A(win, sfb->variant)); | 1348 | writel(0, regs + VIDOSD_A(win, sfb->variant)); |
1349 | writel(0, regs + VIDOSD_B(win, sfb->variant)); | 1349 | writel(0, regs + VIDOSD_B(win, sfb->variant)); |
1350 | writel(0, regs + VIDOSD_C(win, sfb->variant)); | 1350 | writel(0, regs + VIDOSD_C(win, sfb->variant)); |
1351 | reg = readl(regs + SHADOWCON); | 1351 | |
1352 | writel(reg & ~SHADOWCON_WINx_PROTECT(win), regs + SHADOWCON); | 1352 | if (sfb->variant.has_shadowcon) { |
1353 | reg = readl(sfb->regs + SHADOWCON); | ||
1354 | reg &= ~(SHADOWCON_WINx_PROTECT(win) | | ||
1355 | SHADOWCON_CHx_ENABLE(win) | | ||
1356 | SHADOWCON_CHx_LOCAL_ENABLE(win)); | ||
1357 | writel(reg, sfb->regs + SHADOWCON); | ||
1358 | } | ||
1353 | } | 1359 | } |
1354 | 1360 | ||
1355 | static int __devinit s3c_fb_probe(struct platform_device *pdev) | 1361 | static int __devinit s3c_fb_probe(struct platform_device *pdev) |
diff --git a/drivers/video/savage/savagefb_driver.c b/drivers/video/savage/savagefb_driver.c index cee7803a0a1c..f3d3b9ce4751 100644 --- a/drivers/video/savage/savagefb_driver.c +++ b/drivers/video/savage/savagefb_driver.c | |||
@@ -1351,7 +1351,7 @@ static void savagefb_set_par_int(struct savagefb_par *par, struct savage_reg *r | |||
1351 | /* following part not present in X11 driver */ | 1351 | /* following part not present in X11 driver */ |
1352 | cr67 = vga_in8(0x3d5, par) & 0xf; | 1352 | cr67 = vga_in8(0x3d5, par) & 0xf; |
1353 | vga_out8(0x3d5, 0x50 | cr67, par); | 1353 | vga_out8(0x3d5, 0x50 | cr67, par); |
1354 | udelay(10000); | 1354 | mdelay(10); |
1355 | vga_out8(0x3d4, 0x67, par); | 1355 | vga_out8(0x3d4, 0x67, par); |
1356 | /* end of part */ | 1356 | /* end of part */ |
1357 | vga_out8(0x3d5, reg->CR67 & ~0x0c, par); | 1357 | vga_out8(0x3d5, reg->CR67 & ~0x0c, par); |
@@ -1904,11 +1904,11 @@ static int savage_init_hw(struct savagefb_par *par) | |||
1904 | vga_out8(0x3d4, 0x66, par); | 1904 | vga_out8(0x3d4, 0x66, par); |
1905 | cr66 = vga_in8(0x3d5, par); | 1905 | cr66 = vga_in8(0x3d5, par); |
1906 | vga_out8(0x3d5, cr66 | 0x02, par); | 1906 | vga_out8(0x3d5, cr66 | 0x02, par); |
1907 | udelay(10000); | 1907 | mdelay(10); |
1908 | 1908 | ||
1909 | vga_out8(0x3d4, 0x66, par); | 1909 | vga_out8(0x3d4, 0x66, par); |
1910 | vga_out8(0x3d5, cr66 & ~0x02, par); /* clear reset flag */ | 1910 | vga_out8(0x3d5, cr66 & ~0x02, par); /* clear reset flag */ |
1911 | udelay(10000); | 1911 | mdelay(10); |
1912 | 1912 | ||
1913 | 1913 | ||
1914 | /* | 1914 | /* |
@@ -1918,11 +1918,11 @@ static int savage_init_hw(struct savagefb_par *par) | |||
1918 | vga_out8(0x3d4, 0x3f, par); | 1918 | vga_out8(0x3d4, 0x3f, par); |
1919 | cr3f = vga_in8(0x3d5, par); | 1919 | cr3f = vga_in8(0x3d5, par); |
1920 | vga_out8(0x3d5, cr3f | 0x08, par); | 1920 | vga_out8(0x3d5, cr3f | 0x08, par); |
1921 | udelay(10000); | 1921 | mdelay(10); |
1922 | 1922 | ||
1923 | vga_out8(0x3d4, 0x3f, par); | 1923 | vga_out8(0x3d4, 0x3f, par); |
1924 | vga_out8(0x3d5, cr3f & ~0x08, par); /* clear reset flags */ | 1924 | vga_out8(0x3d5, cr3f & ~0x08, par); /* clear reset flags */ |
1925 | udelay(10000); | 1925 | mdelay(10); |
1926 | 1926 | ||
1927 | /* Savage ramdac speeds */ | 1927 | /* Savage ramdac speeds */ |
1928 | par->numClocks = 4; | 1928 | par->numClocks = 4; |
diff --git a/drivers/watchdog/sp805_wdt.c b/drivers/watchdog/sp805_wdt.c index afcd13676542..e4841c36798b 100644 --- a/drivers/watchdog/sp805_wdt.c +++ b/drivers/watchdog/sp805_wdt.c | |||
@@ -4,7 +4,7 @@ | |||
4 | * Watchdog driver for ARM SP805 watchdog module | 4 | * Watchdog driver for ARM SP805 watchdog module |
5 | * | 5 | * |
6 | * Copyright (C) 2010 ST Microelectronics | 6 | * Copyright (C) 2010 ST Microelectronics |
7 | * Viresh Kumar<viresh.kumar@st.com> | 7 | * Viresh Kumar <viresh.linux@gmail.com> |
8 | * | 8 | * |
9 | * This file is licensed under the terms of the GNU General Public | 9 | * This file is licensed under the terms of the GNU General Public |
10 | * License version 2 or later. This program is licensed "as is" without any | 10 | * License version 2 or later. This program is licensed "as is" without any |
@@ -331,6 +331,6 @@ static struct amba_driver sp805_wdt_driver = { | |||
331 | 331 | ||
332 | module_amba_driver(sp805_wdt_driver); | 332 | module_amba_driver(sp805_wdt_driver); |
333 | 333 | ||
334 | MODULE_AUTHOR("Viresh Kumar <viresh.kumar@st.com>"); | 334 | MODULE_AUTHOR("Viresh Kumar <viresh.linux@gmail.com>"); |
335 | MODULE_DESCRIPTION("ARM SP805 Watchdog Driver"); | 335 | MODULE_DESCRIPTION("ARM SP805 Watchdog Driver"); |
336 | MODULE_LICENSE("GPL"); | 336 | MODULE_LICENSE("GPL"); |
diff --git a/drivers/xen/events.c b/drivers/xen/events.c index 6908e4ce2a0d..7595581d032c 100644 --- a/drivers/xen/events.c +++ b/drivers/xen/events.c | |||
@@ -827,6 +827,9 @@ int bind_evtchn_to_irq(unsigned int evtchn) | |||
827 | handle_edge_irq, "event"); | 827 | handle_edge_irq, "event"); |
828 | 828 | ||
829 | xen_irq_info_evtchn_init(irq, evtchn); | 829 | xen_irq_info_evtchn_init(irq, evtchn); |
830 | } else { | ||
831 | struct irq_info *info = info_for_irq(irq); | ||
832 | WARN_ON(info == NULL || info->type != IRQT_EVTCHN); | ||
830 | } | 833 | } |
831 | 834 | ||
832 | out: | 835 | out: |
@@ -862,6 +865,9 @@ static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu) | |||
862 | xen_irq_info_ipi_init(cpu, irq, evtchn, ipi); | 865 | xen_irq_info_ipi_init(cpu, irq, evtchn, ipi); |
863 | 866 | ||
864 | bind_evtchn_to_cpu(evtchn, cpu); | 867 | bind_evtchn_to_cpu(evtchn, cpu); |
868 | } else { | ||
869 | struct irq_info *info = info_for_irq(irq); | ||
870 | WARN_ON(info == NULL || info->type != IRQT_IPI); | ||
865 | } | 871 | } |
866 | 872 | ||
867 | out: | 873 | out: |
@@ -939,6 +945,9 @@ int bind_virq_to_irq(unsigned int virq, unsigned int cpu) | |||
939 | xen_irq_info_virq_init(cpu, irq, evtchn, virq); | 945 | xen_irq_info_virq_init(cpu, irq, evtchn, virq); |
940 | 946 | ||
941 | bind_evtchn_to_cpu(evtchn, cpu); | 947 | bind_evtchn_to_cpu(evtchn, cpu); |
948 | } else { | ||
949 | struct irq_info *info = info_for_irq(irq); | ||
950 | WARN_ON(info == NULL || info->type != IRQT_VIRQ); | ||
942 | } | 951 | } |
943 | 952 | ||
944 | out: | 953 | out: |
diff --git a/drivers/xen/pci.c b/drivers/xen/pci.c index b84bf0b6cc34..18fff88254eb 100644 --- a/drivers/xen/pci.c +++ b/drivers/xen/pci.c | |||
@@ -59,7 +59,7 @@ static int xen_add_device(struct device *dev) | |||
59 | 59 | ||
60 | #ifdef CONFIG_ACPI | 60 | #ifdef CONFIG_ACPI |
61 | handle = DEVICE_ACPI_HANDLE(&pci_dev->dev); | 61 | handle = DEVICE_ACPI_HANDLE(&pci_dev->dev); |
62 | if (!handle) | 62 | if (!handle && pci_dev->bus->bridge) |
63 | handle = DEVICE_ACPI_HANDLE(pci_dev->bus->bridge); | 63 | handle = DEVICE_ACPI_HANDLE(pci_dev->bus->bridge); |
64 | #ifdef CONFIG_PCI_IOV | 64 | #ifdef CONFIG_PCI_IOV |
65 | if (!handle && pci_dev->is_virtfn) | 65 | if (!handle && pci_dev->is_virtfn) |
diff --git a/drivers/xen/tmem.c b/drivers/xen/tmem.c index dcb79521e6c8..89f264c67420 100644 --- a/drivers/xen/tmem.c +++ b/drivers/xen/tmem.c | |||
@@ -269,7 +269,7 @@ static inline struct tmem_oid oswiz(unsigned type, u32 ind) | |||
269 | } | 269 | } |
270 | 270 | ||
271 | /* returns 0 if the page was successfully put into frontswap, -1 if not */ | 271 | /* returns 0 if the page was successfully put into frontswap, -1 if not */ |
272 | static int tmem_frontswap_put_page(unsigned type, pgoff_t offset, | 272 | static int tmem_frontswap_store(unsigned type, pgoff_t offset, |
273 | struct page *page) | 273 | struct page *page) |
274 | { | 274 | { |
275 | u64 ind64 = (u64)offset; | 275 | u64 ind64 = (u64)offset; |
@@ -295,7 +295,7 @@ static int tmem_frontswap_put_page(unsigned type, pgoff_t offset, | |||
295 | * returns 0 if the page was successfully gotten from frontswap, -1 if | 295 | * returns 0 if the page was successfully gotten from frontswap, -1 if |
296 | * was not present (should never happen!) | 296 | * was not present (should never happen!) |
297 | */ | 297 | */ |
298 | static int tmem_frontswap_get_page(unsigned type, pgoff_t offset, | 298 | static int tmem_frontswap_load(unsigned type, pgoff_t offset, |
299 | struct page *page) | 299 | struct page *page) |
300 | { | 300 | { |
301 | u64 ind64 = (u64)offset; | 301 | u64 ind64 = (u64)offset; |
@@ -362,8 +362,8 @@ static int __init no_frontswap(char *s) | |||
362 | __setup("nofrontswap", no_frontswap); | 362 | __setup("nofrontswap", no_frontswap); |
363 | 363 | ||
364 | static struct frontswap_ops __initdata tmem_frontswap_ops = { | 364 | static struct frontswap_ops __initdata tmem_frontswap_ops = { |
365 | .put_page = tmem_frontswap_put_page, | 365 | .store = tmem_frontswap_store, |
366 | .get_page = tmem_frontswap_get_page, | 366 | .load = tmem_frontswap_load, |
367 | .invalidate_page = tmem_frontswap_flush_page, | 367 | .invalidate_page = tmem_frontswap_flush_page, |
368 | .invalidate_area = tmem_frontswap_flush_area, | 368 | .invalidate_area = tmem_frontswap_flush_area, |
369 | .init = tmem_frontswap_init | 369 | .init = tmem_frontswap_init |
diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c index 3f75895c919b..7301cdb4b2cb 100644 --- a/fs/btrfs/backref.c +++ b/fs/btrfs/backref.c | |||
@@ -179,60 +179,74 @@ static int __add_prelim_ref(struct list_head *head, u64 root_id, | |||
179 | 179 | ||
180 | static int add_all_parents(struct btrfs_root *root, struct btrfs_path *path, | 180 | static int add_all_parents(struct btrfs_root *root, struct btrfs_path *path, |
181 | struct ulist *parents, int level, | 181 | struct ulist *parents, int level, |
182 | struct btrfs_key *key, u64 wanted_disk_byte, | 182 | struct btrfs_key *key_for_search, u64 time_seq, |
183 | u64 wanted_disk_byte, | ||
183 | const u64 *extent_item_pos) | 184 | const u64 *extent_item_pos) |
184 | { | 185 | { |
185 | int ret; | 186 | int ret = 0; |
186 | int slot = path->slots[level]; | 187 | int slot; |
187 | struct extent_buffer *eb = path->nodes[level]; | 188 | struct extent_buffer *eb; |
189 | struct btrfs_key key; | ||
188 | struct btrfs_file_extent_item *fi; | 190 | struct btrfs_file_extent_item *fi; |
189 | struct extent_inode_elem *eie = NULL; | 191 | struct extent_inode_elem *eie = NULL; |
190 | u64 disk_byte; | 192 | u64 disk_byte; |
191 | u64 wanted_objectid = key->objectid; | ||
192 | 193 | ||
193 | add_parent: | 194 | if (level != 0) { |
194 | if (level == 0 && extent_item_pos) { | 195 | eb = path->nodes[level]; |
195 | fi = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item); | 196 | ret = ulist_add(parents, eb->start, 0, GFP_NOFS); |
196 | ret = check_extent_in_eb(key, eb, fi, *extent_item_pos, &eie); | ||
197 | if (ret < 0) | 197 | if (ret < 0) |
198 | return ret; | 198 | return ret; |
199 | } | ||
200 | ret = ulist_add(parents, eb->start, (unsigned long)eie, GFP_NOFS); | ||
201 | if (ret < 0) | ||
202 | return ret; | ||
203 | |||
204 | if (level != 0) | ||
205 | return 0; | 199 | return 0; |
200 | } | ||
206 | 201 | ||
207 | /* | 202 | /* |
208 | * if the current leaf is full with EXTENT_DATA items, we must | 203 | * We normally enter this function with the path already pointing to |
209 | * check the next one if that holds a reference as well. | 204 | * the first item to check. But sometimes, we may enter it with |
210 | * ref->count cannot be used to skip this check. | 205 | * slot==nritems. In that case, go to the next leaf before we continue. |
211 | * repeat this until we don't find any additional EXTENT_DATA items. | ||
212 | */ | 206 | */ |
213 | while (1) { | 207 | if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) |
214 | eie = NULL; | 208 | ret = btrfs_next_old_leaf(root, path, time_seq); |
215 | ret = btrfs_next_leaf(root, path); | ||
216 | if (ret < 0) | ||
217 | return ret; | ||
218 | if (ret) | ||
219 | return 0; | ||
220 | 209 | ||
210 | while (!ret) { | ||
221 | eb = path->nodes[0]; | 211 | eb = path->nodes[0]; |
222 | for (slot = 0; slot < btrfs_header_nritems(eb); ++slot) { | 212 | slot = path->slots[0]; |
223 | btrfs_item_key_to_cpu(eb, key, slot); | 213 | |
224 | if (key->objectid != wanted_objectid || | 214 | btrfs_item_key_to_cpu(eb, &key, slot); |
225 | key->type != BTRFS_EXTENT_DATA_KEY) | 215 | |
226 | return 0; | 216 | if (key.objectid != key_for_search->objectid || |
227 | fi = btrfs_item_ptr(eb, slot, | 217 | key.type != BTRFS_EXTENT_DATA_KEY) |
228 | struct btrfs_file_extent_item); | 218 | break; |
229 | disk_byte = btrfs_file_extent_disk_bytenr(eb, fi); | 219 | |
230 | if (disk_byte == wanted_disk_byte) | 220 | fi = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item); |
231 | goto add_parent; | 221 | disk_byte = btrfs_file_extent_disk_bytenr(eb, fi); |
222 | |||
223 | if (disk_byte == wanted_disk_byte) { | ||
224 | eie = NULL; | ||
225 | if (extent_item_pos) { | ||
226 | ret = check_extent_in_eb(&key, eb, fi, | ||
227 | *extent_item_pos, | ||
228 | &eie); | ||
229 | if (ret < 0) | ||
230 | break; | ||
231 | } | ||
232 | if (!ret) { | ||
233 | ret = ulist_add(parents, eb->start, | ||
234 | (unsigned long)eie, GFP_NOFS); | ||
235 | if (ret < 0) | ||
236 | break; | ||
237 | if (!extent_item_pos) { | ||
238 | ret = btrfs_next_old_leaf(root, path, | ||
239 | time_seq); | ||
240 | continue; | ||
241 | } | ||
242 | } | ||
232 | } | 243 | } |
244 | ret = btrfs_next_old_item(root, path, time_seq); | ||
233 | } | 245 | } |
234 | 246 | ||
235 | return 0; | 247 | if (ret > 0) |
248 | ret = 0; | ||
249 | return ret; | ||
236 | } | 250 | } |
237 | 251 | ||
238 | /* | 252 | /* |
@@ -249,7 +263,6 @@ static int __resolve_indirect_ref(struct btrfs_fs_info *fs_info, | |||
249 | struct btrfs_path *path; | 263 | struct btrfs_path *path; |
250 | struct btrfs_root *root; | 264 | struct btrfs_root *root; |
251 | struct btrfs_key root_key; | 265 | struct btrfs_key root_key; |
252 | struct btrfs_key key = {0}; | ||
253 | struct extent_buffer *eb; | 266 | struct extent_buffer *eb; |
254 | int ret = 0; | 267 | int ret = 0; |
255 | int root_level; | 268 | int root_level; |
@@ -294,19 +307,9 @@ static int __resolve_indirect_ref(struct btrfs_fs_info *fs_info, | |||
294 | goto out; | 307 | goto out; |
295 | } | 308 | } |
296 | 309 | ||
297 | if (level == 0) { | 310 | ret = add_all_parents(root, path, parents, level, &ref->key_for_search, |
298 | if (ret == 1 && path->slots[0] >= btrfs_header_nritems(eb)) { | 311 | time_seq, ref->wanted_disk_byte, |
299 | ret = btrfs_next_leaf(root, path); | 312 | extent_item_pos); |
300 | if (ret) | ||
301 | goto out; | ||
302 | eb = path->nodes[0]; | ||
303 | } | ||
304 | |||
305 | btrfs_item_key_to_cpu(eb, &key, path->slots[0]); | ||
306 | } | ||
307 | |||
308 | ret = add_all_parents(root, path, parents, level, &key, | ||
309 | ref->wanted_disk_byte, extent_item_pos); | ||
310 | out: | 313 | out: |
311 | btrfs_free_path(path); | 314 | btrfs_free_path(path); |
312 | return ret; | 315 | return ret; |
diff --git a/fs/btrfs/btrfs_inode.h b/fs/btrfs/btrfs_inode.h index e616f8872e69..12394a90d60f 100644 --- a/fs/btrfs/btrfs_inode.h +++ b/fs/btrfs/btrfs_inode.h | |||
@@ -37,6 +37,7 @@ | |||
37 | #define BTRFS_INODE_IN_DEFRAG 3 | 37 | #define BTRFS_INODE_IN_DEFRAG 3 |
38 | #define BTRFS_INODE_DELALLOC_META_RESERVED 4 | 38 | #define BTRFS_INODE_DELALLOC_META_RESERVED 4 |
39 | #define BTRFS_INODE_HAS_ORPHAN_ITEM 5 | 39 | #define BTRFS_INODE_HAS_ORPHAN_ITEM 5 |
40 | #define BTRFS_INODE_HAS_ASYNC_EXTENT 6 | ||
40 | 41 | ||
41 | /* in memory btrfs inode */ | 42 | /* in memory btrfs inode */ |
42 | struct btrfs_inode { | 43 | struct btrfs_inode { |
diff --git a/fs/btrfs/check-integrity.c b/fs/btrfs/check-integrity.c index 9cebb1fd6a3c..da6e9364a5e3 100644 --- a/fs/btrfs/check-integrity.c +++ b/fs/btrfs/check-integrity.c | |||
@@ -93,6 +93,7 @@ | |||
93 | #include "print-tree.h" | 93 | #include "print-tree.h" |
94 | #include "locking.h" | 94 | #include "locking.h" |
95 | #include "check-integrity.h" | 95 | #include "check-integrity.h" |
96 | #include "rcu-string.h" | ||
96 | 97 | ||
97 | #define BTRFSIC_BLOCK_HASHTABLE_SIZE 0x10000 | 98 | #define BTRFSIC_BLOCK_HASHTABLE_SIZE 0x10000 |
98 | #define BTRFSIC_BLOCK_LINK_HASHTABLE_SIZE 0x10000 | 99 | #define BTRFSIC_BLOCK_LINK_HASHTABLE_SIZE 0x10000 |
@@ -843,13 +844,14 @@ static int btrfsic_process_superblock_dev_mirror( | |||
843 | superblock_tmp->never_written = 0; | 844 | superblock_tmp->never_written = 0; |
844 | superblock_tmp->mirror_num = 1 + superblock_mirror_num; | 845 | superblock_tmp->mirror_num = 1 + superblock_mirror_num; |
845 | if (state->print_mask & BTRFSIC_PRINT_MASK_SUPERBLOCK_WRITE) | 846 | if (state->print_mask & BTRFSIC_PRINT_MASK_SUPERBLOCK_WRITE) |
846 | printk(KERN_INFO "New initial S-block (bdev %p, %s)" | 847 | printk_in_rcu(KERN_INFO "New initial S-block (bdev %p, %s)" |
847 | " @%llu (%s/%llu/%d)\n", | 848 | " @%llu (%s/%llu/%d)\n", |
848 | superblock_bdev, device->name, | 849 | superblock_bdev, |
849 | (unsigned long long)dev_bytenr, | 850 | rcu_str_deref(device->name), |
850 | dev_state->name, | 851 | (unsigned long long)dev_bytenr, |
851 | (unsigned long long)dev_bytenr, | 852 | dev_state->name, |
852 | superblock_mirror_num); | 853 | (unsigned long long)dev_bytenr, |
854 | superblock_mirror_num); | ||
853 | list_add(&superblock_tmp->all_blocks_node, | 855 | list_add(&superblock_tmp->all_blocks_node, |
854 | &state->all_blocks_list); | 856 | &state->all_blocks_list); |
855 | btrfsic_block_hashtable_add(superblock_tmp, | 857 | btrfsic_block_hashtable_add(superblock_tmp, |
diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c index d7a96cfdc50a..15cbc2bf4ff0 100644 --- a/fs/btrfs/ctree.c +++ b/fs/btrfs/ctree.c | |||
@@ -467,6 +467,15 @@ static inline int tree_mod_dont_log(struct btrfs_fs_info *fs_info, | |||
467 | return 0; | 467 | return 0; |
468 | } | 468 | } |
469 | 469 | ||
470 | /* | ||
471 | * This allocates memory and gets a tree modification sequence number when | ||
472 | * needed. | ||
473 | * | ||
474 | * Returns 0 when no sequence number is needed, < 0 on error. | ||
475 | * Returns 1 when a sequence number was added. In this case, | ||
476 | * fs_info->tree_mod_seq_lock was acquired and must be released by the caller | ||
477 | * after inserting into the rb tree. | ||
478 | */ | ||
470 | static inline int tree_mod_alloc(struct btrfs_fs_info *fs_info, gfp_t flags, | 479 | static inline int tree_mod_alloc(struct btrfs_fs_info *fs_info, gfp_t flags, |
471 | struct tree_mod_elem **tm_ret) | 480 | struct tree_mod_elem **tm_ret) |
472 | { | 481 | { |
@@ -491,11 +500,11 @@ static inline int tree_mod_alloc(struct btrfs_fs_info *fs_info, gfp_t flags, | |||
491 | */ | 500 | */ |
492 | kfree(tm); | 501 | kfree(tm); |
493 | seq = 0; | 502 | seq = 0; |
503 | spin_unlock(&fs_info->tree_mod_seq_lock); | ||
494 | } else { | 504 | } else { |
495 | __get_tree_mod_seq(fs_info, &tm->elem); | 505 | __get_tree_mod_seq(fs_info, &tm->elem); |
496 | seq = tm->elem.seq; | 506 | seq = tm->elem.seq; |
497 | } | 507 | } |
498 | spin_unlock(&fs_info->tree_mod_seq_lock); | ||
499 | 508 | ||
500 | return seq; | 509 | return seq; |
501 | } | 510 | } |
@@ -521,7 +530,9 @@ tree_mod_log_insert_key_mask(struct btrfs_fs_info *fs_info, | |||
521 | tm->slot = slot; | 530 | tm->slot = slot; |
522 | tm->generation = btrfs_node_ptr_generation(eb, slot); | 531 | tm->generation = btrfs_node_ptr_generation(eb, slot); |
523 | 532 | ||
524 | return __tree_mod_log_insert(fs_info, tm); | 533 | ret = __tree_mod_log_insert(fs_info, tm); |
534 | spin_unlock(&fs_info->tree_mod_seq_lock); | ||
535 | return ret; | ||
525 | } | 536 | } |
526 | 537 | ||
527 | static noinline int | 538 | static noinline int |
@@ -559,7 +570,9 @@ tree_mod_log_insert_move(struct btrfs_fs_info *fs_info, | |||
559 | tm->move.nr_items = nr_items; | 570 | tm->move.nr_items = nr_items; |
560 | tm->op = MOD_LOG_MOVE_KEYS; | 571 | tm->op = MOD_LOG_MOVE_KEYS; |
561 | 572 | ||
562 | return __tree_mod_log_insert(fs_info, tm); | 573 | ret = __tree_mod_log_insert(fs_info, tm); |
574 | spin_unlock(&fs_info->tree_mod_seq_lock); | ||
575 | return ret; | ||
563 | } | 576 | } |
564 | 577 | ||
565 | static noinline int | 578 | static noinline int |
@@ -580,7 +593,9 @@ tree_mod_log_insert_root(struct btrfs_fs_info *fs_info, | |||
580 | tm->generation = btrfs_header_generation(old_root); | 593 | tm->generation = btrfs_header_generation(old_root); |
581 | tm->op = MOD_LOG_ROOT_REPLACE; | 594 | tm->op = MOD_LOG_ROOT_REPLACE; |
582 | 595 | ||
583 | return __tree_mod_log_insert(fs_info, tm); | 596 | ret = __tree_mod_log_insert(fs_info, tm); |
597 | spin_unlock(&fs_info->tree_mod_seq_lock); | ||
598 | return ret; | ||
584 | } | 599 | } |
585 | 600 | ||
586 | static struct tree_mod_elem * | 601 | static struct tree_mod_elem * |
@@ -1023,6 +1038,10 @@ __tree_mod_log_oldest_root(struct btrfs_fs_info *fs_info, | |||
1023 | looped = 1; | 1038 | looped = 1; |
1024 | } | 1039 | } |
1025 | 1040 | ||
1041 | /* if there's no old root to return, return what we found instead */ | ||
1042 | if (!found) | ||
1043 | found = tm; | ||
1044 | |||
1026 | return found; | 1045 | return found; |
1027 | } | 1046 | } |
1028 | 1047 | ||
@@ -1143,22 +1162,36 @@ tree_mod_log_rewind(struct btrfs_fs_info *fs_info, struct extent_buffer *eb, | |||
1143 | return eb_rewin; | 1162 | return eb_rewin; |
1144 | } | 1163 | } |
1145 | 1164 | ||
1165 | /* | ||
1166 | * get_old_root() rewinds the state of @root's root node to the given @time_seq | ||
1167 | * value. If there are no changes, the current root->root_node is returned. If | ||
1168 | * anything changed in between, there's a fresh buffer allocated on which the | ||
1169 | * rewind operations are done. In any case, the returned buffer is read locked. | ||
1170 | * Returns NULL on error (with no locks held). | ||
1171 | */ | ||
1146 | static inline struct extent_buffer * | 1172 | static inline struct extent_buffer * |
1147 | get_old_root(struct btrfs_root *root, u64 time_seq) | 1173 | get_old_root(struct btrfs_root *root, u64 time_seq) |
1148 | { | 1174 | { |
1149 | struct tree_mod_elem *tm; | 1175 | struct tree_mod_elem *tm; |
1150 | struct extent_buffer *eb; | 1176 | struct extent_buffer *eb; |
1151 | struct tree_mod_root *old_root; | 1177 | struct tree_mod_root *old_root = NULL; |
1152 | u64 old_generation; | 1178 | u64 old_generation = 0; |
1179 | u64 logical; | ||
1153 | 1180 | ||
1181 | eb = btrfs_read_lock_root_node(root); | ||
1154 | tm = __tree_mod_log_oldest_root(root->fs_info, root, time_seq); | 1182 | tm = __tree_mod_log_oldest_root(root->fs_info, root, time_seq); |
1155 | if (!tm) | 1183 | if (!tm) |
1156 | return root->node; | 1184 | return root->node; |
1157 | 1185 | ||
1158 | old_root = &tm->old_root; | 1186 | if (tm->op == MOD_LOG_ROOT_REPLACE) { |
1159 | old_generation = tm->generation; | 1187 | old_root = &tm->old_root; |
1188 | old_generation = tm->generation; | ||
1189 | logical = old_root->logical; | ||
1190 | } else { | ||
1191 | logical = root->node->start; | ||
1192 | } | ||
1160 | 1193 | ||
1161 | tm = tree_mod_log_search(root->fs_info, old_root->logical, time_seq); | 1194 | tm = tree_mod_log_search(root->fs_info, logical, time_seq); |
1162 | /* | 1195 | /* |
1163 | * there was an item in the log when __tree_mod_log_oldest_root | 1196 | * there was an item in the log when __tree_mod_log_oldest_root |
1164 | * returned. this one must not go away, because the time_seq passed to | 1197 | * returned. this one must not go away, because the time_seq passed to |
@@ -1166,22 +1199,25 @@ get_old_root(struct btrfs_root *root, u64 time_seq) | |||
1166 | */ | 1199 | */ |
1167 | BUG_ON(!tm); | 1200 | BUG_ON(!tm); |
1168 | 1201 | ||
1169 | if (old_root->logical == root->node->start) { | 1202 | if (old_root) |
1170 | /* there are logged operations for the current root */ | ||
1171 | eb = btrfs_clone_extent_buffer(root->node); | ||
1172 | } else { | ||
1173 | /* there's a root replace operation for the current root */ | ||
1174 | eb = alloc_dummy_extent_buffer(tm->index << PAGE_CACHE_SHIFT, | 1203 | eb = alloc_dummy_extent_buffer(tm->index << PAGE_CACHE_SHIFT, |
1175 | root->nodesize); | 1204 | root->nodesize); |
1205 | else | ||
1206 | eb = btrfs_clone_extent_buffer(root->node); | ||
1207 | btrfs_tree_read_unlock(root->node); | ||
1208 | free_extent_buffer(root->node); | ||
1209 | if (!eb) | ||
1210 | return NULL; | ||
1211 | btrfs_tree_read_lock(eb); | ||
1212 | if (old_root) { | ||
1176 | btrfs_set_header_bytenr(eb, eb->start); | 1213 | btrfs_set_header_bytenr(eb, eb->start); |
1177 | btrfs_set_header_backref_rev(eb, BTRFS_MIXED_BACKREF_REV); | 1214 | btrfs_set_header_backref_rev(eb, BTRFS_MIXED_BACKREF_REV); |
1178 | btrfs_set_header_owner(eb, root->root_key.objectid); | 1215 | btrfs_set_header_owner(eb, root->root_key.objectid); |
1216 | btrfs_set_header_level(eb, old_root->level); | ||
1217 | btrfs_set_header_generation(eb, old_generation); | ||
1179 | } | 1218 | } |
1180 | if (!eb) | ||
1181 | return NULL; | ||
1182 | btrfs_set_header_level(eb, old_root->level); | ||
1183 | btrfs_set_header_generation(eb, old_generation); | ||
1184 | __tree_mod_log_rewind(eb, time_seq, tm); | 1219 | __tree_mod_log_rewind(eb, time_seq, tm); |
1220 | extent_buffer_get(eb); | ||
1185 | 1221 | ||
1186 | return eb; | 1222 | return eb; |
1187 | } | 1223 | } |
@@ -1650,8 +1686,6 @@ static noinline int balance_level(struct btrfs_trans_handle *trans, | |||
1650 | BTRFS_NODEPTRS_PER_BLOCK(root) / 4) | 1686 | BTRFS_NODEPTRS_PER_BLOCK(root) / 4) |
1651 | return 0; | 1687 | return 0; |
1652 | 1688 | ||
1653 | btrfs_header_nritems(mid); | ||
1654 | |||
1655 | left = read_node_slot(root, parent, pslot - 1); | 1689 | left = read_node_slot(root, parent, pslot - 1); |
1656 | if (left) { | 1690 | if (left) { |
1657 | btrfs_tree_lock(left); | 1691 | btrfs_tree_lock(left); |
@@ -1681,7 +1715,6 @@ static noinline int balance_level(struct btrfs_trans_handle *trans, | |||
1681 | wret = push_node_left(trans, root, left, mid, 1); | 1715 | wret = push_node_left(trans, root, left, mid, 1); |
1682 | if (wret < 0) | 1716 | if (wret < 0) |
1683 | ret = wret; | 1717 | ret = wret; |
1684 | btrfs_header_nritems(mid); | ||
1685 | } | 1718 | } |
1686 | 1719 | ||
1687 | /* | 1720 | /* |
@@ -2615,9 +2648,7 @@ int btrfs_search_old_slot(struct btrfs_root *root, struct btrfs_key *key, | |||
2615 | 2648 | ||
2616 | again: | 2649 | again: |
2617 | b = get_old_root(root, time_seq); | 2650 | b = get_old_root(root, time_seq); |
2618 | extent_buffer_get(b); | ||
2619 | level = btrfs_header_level(b); | 2651 | level = btrfs_header_level(b); |
2620 | btrfs_tree_read_lock(b); | ||
2621 | p->locks[level] = BTRFS_READ_LOCK; | 2652 | p->locks[level] = BTRFS_READ_LOCK; |
2622 | 2653 | ||
2623 | while (b) { | 2654 | while (b) { |
@@ -5001,6 +5032,12 @@ next: | |||
5001 | */ | 5032 | */ |
5002 | int btrfs_next_leaf(struct btrfs_root *root, struct btrfs_path *path) | 5033 | int btrfs_next_leaf(struct btrfs_root *root, struct btrfs_path *path) |
5003 | { | 5034 | { |
5035 | return btrfs_next_old_leaf(root, path, 0); | ||
5036 | } | ||
5037 | |||
5038 | int btrfs_next_old_leaf(struct btrfs_root *root, struct btrfs_path *path, | ||
5039 | u64 time_seq) | ||
5040 | { | ||
5004 | int slot; | 5041 | int slot; |
5005 | int level; | 5042 | int level; |
5006 | struct extent_buffer *c; | 5043 | struct extent_buffer *c; |
@@ -5025,7 +5062,10 @@ again: | |||
5025 | path->keep_locks = 1; | 5062 | path->keep_locks = 1; |
5026 | path->leave_spinning = 1; | 5063 | path->leave_spinning = 1; |
5027 | 5064 | ||
5028 | ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); | 5065 | if (time_seq) |
5066 | ret = btrfs_search_old_slot(root, &key, path, time_seq); | ||
5067 | else | ||
5068 | ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); | ||
5029 | path->keep_locks = 0; | 5069 | path->keep_locks = 0; |
5030 | 5070 | ||
5031 | if (ret < 0) | 5071 | if (ret < 0) |
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 0236d03c6732..fa5c45b39075 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h | |||
@@ -2753,13 +2753,20 @@ static inline int btrfs_insert_empty_item(struct btrfs_trans_handle *trans, | |||
2753 | } | 2753 | } |
2754 | 2754 | ||
2755 | int btrfs_next_leaf(struct btrfs_root *root, struct btrfs_path *path); | 2755 | int btrfs_next_leaf(struct btrfs_root *root, struct btrfs_path *path); |
2756 | static inline int btrfs_next_item(struct btrfs_root *root, struct btrfs_path *p) | 2756 | int btrfs_next_old_leaf(struct btrfs_root *root, struct btrfs_path *path, |
2757 | u64 time_seq); | ||
2758 | static inline int btrfs_next_old_item(struct btrfs_root *root, | ||
2759 | struct btrfs_path *p, u64 time_seq) | ||
2757 | { | 2760 | { |
2758 | ++p->slots[0]; | 2761 | ++p->slots[0]; |
2759 | if (p->slots[0] >= btrfs_header_nritems(p->nodes[0])) | 2762 | if (p->slots[0] >= btrfs_header_nritems(p->nodes[0])) |
2760 | return btrfs_next_leaf(root, p); | 2763 | return btrfs_next_old_leaf(root, p, time_seq); |
2761 | return 0; | 2764 | return 0; |
2762 | } | 2765 | } |
2766 | static inline int btrfs_next_item(struct btrfs_root *root, struct btrfs_path *p) | ||
2767 | { | ||
2768 | return btrfs_next_old_item(root, p, 0); | ||
2769 | } | ||
2763 | int btrfs_prev_leaf(struct btrfs_root *root, struct btrfs_path *path); | 2770 | int btrfs_prev_leaf(struct btrfs_root *root, struct btrfs_path *path); |
2764 | int btrfs_leaf_free_space(struct btrfs_root *root, struct extent_buffer *leaf); | 2771 | int btrfs_leaf_free_space(struct btrfs_root *root, struct extent_buffer *leaf); |
2765 | int __must_check btrfs_drop_snapshot(struct btrfs_root *root, | 2772 | int __must_check btrfs_drop_snapshot(struct btrfs_root *root, |
diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c index c18d0442ae6d..2399f4086915 100644 --- a/fs/btrfs/delayed-inode.c +++ b/fs/btrfs/delayed-inode.c | |||
@@ -1879,3 +1879,21 @@ void btrfs_kill_all_delayed_nodes(struct btrfs_root *root) | |||
1879 | } | 1879 | } |
1880 | } | 1880 | } |
1881 | } | 1881 | } |
1882 | |||
1883 | void btrfs_destroy_delayed_inodes(struct btrfs_root *root) | ||
1884 | { | ||
1885 | struct btrfs_delayed_root *delayed_root; | ||
1886 | struct btrfs_delayed_node *curr_node, *prev_node; | ||
1887 | |||
1888 | delayed_root = btrfs_get_delayed_root(root); | ||
1889 | |||
1890 | curr_node = btrfs_first_delayed_node(delayed_root); | ||
1891 | while (curr_node) { | ||
1892 | __btrfs_kill_delayed_node(curr_node); | ||
1893 | |||
1894 | prev_node = curr_node; | ||
1895 | curr_node = btrfs_next_delayed_node(curr_node); | ||
1896 | btrfs_release_delayed_node(prev_node); | ||
1897 | } | ||
1898 | } | ||
1899 | |||
diff --git a/fs/btrfs/delayed-inode.h b/fs/btrfs/delayed-inode.h index 7083d08b2a21..f5aa4023d3e1 100644 --- a/fs/btrfs/delayed-inode.h +++ b/fs/btrfs/delayed-inode.h | |||
@@ -124,6 +124,9 @@ int btrfs_fill_inode(struct inode *inode, u32 *rdev); | |||
124 | /* Used for drop dead root */ | 124 | /* Used for drop dead root */ |
125 | void btrfs_kill_all_delayed_nodes(struct btrfs_root *root); | 125 | void btrfs_kill_all_delayed_nodes(struct btrfs_root *root); |
126 | 126 | ||
127 | /* Used for clean the transaction */ | ||
128 | void btrfs_destroy_delayed_inodes(struct btrfs_root *root); | ||
129 | |||
127 | /* Used for readdir() */ | 130 | /* Used for readdir() */ |
128 | void btrfs_get_delayed_items(struct inode *inode, struct list_head *ins_list, | 131 | void btrfs_get_delayed_items(struct inode *inode, struct list_head *ins_list, |
129 | struct list_head *del_list); | 132 | struct list_head *del_list); |
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 7ae51decf6d3..7b845ff4af99 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c | |||
@@ -44,6 +44,7 @@ | |||
44 | #include "free-space-cache.h" | 44 | #include "free-space-cache.h" |
45 | #include "inode-map.h" | 45 | #include "inode-map.h" |
46 | #include "check-integrity.h" | 46 | #include "check-integrity.h" |
47 | #include "rcu-string.h" | ||
47 | 48 | ||
48 | static struct extent_io_ops btree_extent_io_ops; | 49 | static struct extent_io_ops btree_extent_io_ops; |
49 | static void end_workqueue_fn(struct btrfs_work *work); | 50 | static void end_workqueue_fn(struct btrfs_work *work); |
@@ -2118,7 +2119,7 @@ int open_ctree(struct super_block *sb, | |||
2118 | 2119 | ||
2119 | features = btrfs_super_incompat_flags(disk_super); | 2120 | features = btrfs_super_incompat_flags(disk_super); |
2120 | features |= BTRFS_FEATURE_INCOMPAT_MIXED_BACKREF; | 2121 | features |= BTRFS_FEATURE_INCOMPAT_MIXED_BACKREF; |
2121 | if (tree_root->fs_info->compress_type & BTRFS_COMPRESS_LZO) | 2122 | if (tree_root->fs_info->compress_type == BTRFS_COMPRESS_LZO) |
2122 | features |= BTRFS_FEATURE_INCOMPAT_COMPRESS_LZO; | 2123 | features |= BTRFS_FEATURE_INCOMPAT_COMPRESS_LZO; |
2123 | 2124 | ||
2124 | /* | 2125 | /* |
@@ -2575,8 +2576,9 @@ static void btrfs_end_buffer_write_sync(struct buffer_head *bh, int uptodate) | |||
2575 | struct btrfs_device *device = (struct btrfs_device *) | 2576 | struct btrfs_device *device = (struct btrfs_device *) |
2576 | bh->b_private; | 2577 | bh->b_private; |
2577 | 2578 | ||
2578 | printk_ratelimited(KERN_WARNING "lost page write due to " | 2579 | printk_ratelimited_in_rcu(KERN_WARNING "lost page write due to " |
2579 | "I/O error on %s\n", device->name); | 2580 | "I/O error on %s\n", |
2581 | rcu_str_deref(device->name)); | ||
2580 | /* note, we dont' set_buffer_write_io_error because we have | 2582 | /* note, we dont' set_buffer_write_io_error because we have |
2581 | * our own ways of dealing with the IO errors | 2583 | * our own ways of dealing with the IO errors |
2582 | */ | 2584 | */ |
@@ -2749,8 +2751,8 @@ static int write_dev_flush(struct btrfs_device *device, int wait) | |||
2749 | wait_for_completion(&device->flush_wait); | 2751 | wait_for_completion(&device->flush_wait); |
2750 | 2752 | ||
2751 | if (bio_flagged(bio, BIO_EOPNOTSUPP)) { | 2753 | if (bio_flagged(bio, BIO_EOPNOTSUPP)) { |
2752 | printk("btrfs: disabling barriers on dev %s\n", | 2754 | printk_in_rcu("btrfs: disabling barriers on dev %s\n", |
2753 | device->name); | 2755 | rcu_str_deref(device->name)); |
2754 | device->nobarriers = 1; | 2756 | device->nobarriers = 1; |
2755 | } | 2757 | } |
2756 | if (!bio_flagged(bio, BIO_UPTODATE)) { | 2758 | if (!bio_flagged(bio, BIO_UPTODATE)) { |
@@ -3400,7 +3402,6 @@ int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans, | |||
3400 | 3402 | ||
3401 | delayed_refs = &trans->delayed_refs; | 3403 | delayed_refs = &trans->delayed_refs; |
3402 | 3404 | ||
3403 | again: | ||
3404 | spin_lock(&delayed_refs->lock); | 3405 | spin_lock(&delayed_refs->lock); |
3405 | if (delayed_refs->num_entries == 0) { | 3406 | if (delayed_refs->num_entries == 0) { |
3406 | spin_unlock(&delayed_refs->lock); | 3407 | spin_unlock(&delayed_refs->lock); |
@@ -3408,31 +3409,37 @@ again: | |||
3408 | return ret; | 3409 | return ret; |
3409 | } | 3410 | } |
3410 | 3411 | ||
3411 | node = rb_first(&delayed_refs->root); | 3412 | while ((node = rb_first(&delayed_refs->root)) != NULL) { |
3412 | while (node) { | ||
3413 | ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node); | 3413 | ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node); |
3414 | node = rb_next(node); | ||
3415 | |||
3416 | ref->in_tree = 0; | ||
3417 | rb_erase(&ref->rb_node, &delayed_refs->root); | ||
3418 | delayed_refs->num_entries--; | ||
3419 | 3414 | ||
3420 | atomic_set(&ref->refs, 1); | 3415 | atomic_set(&ref->refs, 1); |
3421 | if (btrfs_delayed_ref_is_head(ref)) { | 3416 | if (btrfs_delayed_ref_is_head(ref)) { |
3422 | struct btrfs_delayed_ref_head *head; | 3417 | struct btrfs_delayed_ref_head *head; |
3423 | 3418 | ||
3424 | head = btrfs_delayed_node_to_head(ref); | 3419 | head = btrfs_delayed_node_to_head(ref); |
3425 | spin_unlock(&delayed_refs->lock); | 3420 | if (!mutex_trylock(&head->mutex)) { |
3426 | mutex_lock(&head->mutex); | 3421 | atomic_inc(&ref->refs); |
3422 | spin_unlock(&delayed_refs->lock); | ||
3423 | |||
3424 | /* Need to wait for the delayed ref to run */ | ||
3425 | mutex_lock(&head->mutex); | ||
3426 | mutex_unlock(&head->mutex); | ||
3427 | btrfs_put_delayed_ref(ref); | ||
3428 | |||
3429 | spin_lock(&delayed_refs->lock); | ||
3430 | continue; | ||
3431 | } | ||
3432 | |||
3427 | kfree(head->extent_op); | 3433 | kfree(head->extent_op); |
3428 | delayed_refs->num_heads--; | 3434 | delayed_refs->num_heads--; |
3429 | if (list_empty(&head->cluster)) | 3435 | if (list_empty(&head->cluster)) |
3430 | delayed_refs->num_heads_ready--; | 3436 | delayed_refs->num_heads_ready--; |
3431 | list_del_init(&head->cluster); | 3437 | list_del_init(&head->cluster); |
3432 | mutex_unlock(&head->mutex); | ||
3433 | btrfs_put_delayed_ref(ref); | ||
3434 | goto again; | ||
3435 | } | 3438 | } |
3439 | ref->in_tree = 0; | ||
3440 | rb_erase(&ref->rb_node, &delayed_refs->root); | ||
3441 | delayed_refs->num_entries--; | ||
3442 | |||
3436 | spin_unlock(&delayed_refs->lock); | 3443 | spin_unlock(&delayed_refs->lock); |
3437 | btrfs_put_delayed_ref(ref); | 3444 | btrfs_put_delayed_ref(ref); |
3438 | 3445 | ||
@@ -3520,11 +3527,9 @@ static int btrfs_destroy_marked_extents(struct btrfs_root *root, | |||
3520 | &(&BTRFS_I(page->mapping->host)->io_tree)->buffer, | 3527 | &(&BTRFS_I(page->mapping->host)->io_tree)->buffer, |
3521 | offset >> PAGE_CACHE_SHIFT); | 3528 | offset >> PAGE_CACHE_SHIFT); |
3522 | spin_unlock(&dirty_pages->buffer_lock); | 3529 | spin_unlock(&dirty_pages->buffer_lock); |
3523 | if (eb) { | 3530 | if (eb) |
3524 | ret = test_and_clear_bit(EXTENT_BUFFER_DIRTY, | 3531 | ret = test_and_clear_bit(EXTENT_BUFFER_DIRTY, |
3525 | &eb->bflags); | 3532 | &eb->bflags); |
3526 | atomic_set(&eb->refs, 1); | ||
3527 | } | ||
3528 | if (PageWriteback(page)) | 3533 | if (PageWriteback(page)) |
3529 | end_page_writeback(page); | 3534 | end_page_writeback(page); |
3530 | 3535 | ||
@@ -3538,8 +3543,8 @@ static int btrfs_destroy_marked_extents(struct btrfs_root *root, | |||
3538 | spin_unlock_irq(&page->mapping->tree_lock); | 3543 | spin_unlock_irq(&page->mapping->tree_lock); |
3539 | } | 3544 | } |
3540 | 3545 | ||
3541 | page->mapping->a_ops->invalidatepage(page, 0); | ||
3542 | unlock_page(page); | 3546 | unlock_page(page); |
3547 | page_cache_release(page); | ||
3543 | } | 3548 | } |
3544 | } | 3549 | } |
3545 | 3550 | ||
@@ -3553,8 +3558,10 @@ static int btrfs_destroy_pinned_extent(struct btrfs_root *root, | |||
3553 | u64 start; | 3558 | u64 start; |
3554 | u64 end; | 3559 | u64 end; |
3555 | int ret; | 3560 | int ret; |
3561 | bool loop = true; | ||
3556 | 3562 | ||
3557 | unpin = pinned_extents; | 3563 | unpin = pinned_extents; |
3564 | again: | ||
3558 | while (1) { | 3565 | while (1) { |
3559 | ret = find_first_extent_bit(unpin, 0, &start, &end, | 3566 | ret = find_first_extent_bit(unpin, 0, &start, &end, |
3560 | EXTENT_DIRTY); | 3567 | EXTENT_DIRTY); |
@@ -3572,6 +3579,15 @@ static int btrfs_destroy_pinned_extent(struct btrfs_root *root, | |||
3572 | cond_resched(); | 3579 | cond_resched(); |
3573 | } | 3580 | } |
3574 | 3581 | ||
3582 | if (loop) { | ||
3583 | if (unpin == &root->fs_info->freed_extents[0]) | ||
3584 | unpin = &root->fs_info->freed_extents[1]; | ||
3585 | else | ||
3586 | unpin = &root->fs_info->freed_extents[0]; | ||
3587 | loop = false; | ||
3588 | goto again; | ||
3589 | } | ||
3590 | |||
3575 | return 0; | 3591 | return 0; |
3576 | } | 3592 | } |
3577 | 3593 | ||
@@ -3585,21 +3601,23 @@ void btrfs_cleanup_one_transaction(struct btrfs_transaction *cur_trans, | |||
3585 | /* FIXME: cleanup wait for commit */ | 3601 | /* FIXME: cleanup wait for commit */ |
3586 | cur_trans->in_commit = 1; | 3602 | cur_trans->in_commit = 1; |
3587 | cur_trans->blocked = 1; | 3603 | cur_trans->blocked = 1; |
3588 | if (waitqueue_active(&root->fs_info->transaction_blocked_wait)) | 3604 | wake_up(&root->fs_info->transaction_blocked_wait); |
3589 | wake_up(&root->fs_info->transaction_blocked_wait); | ||
3590 | 3605 | ||
3591 | cur_trans->blocked = 0; | 3606 | cur_trans->blocked = 0; |
3592 | if (waitqueue_active(&root->fs_info->transaction_wait)) | 3607 | wake_up(&root->fs_info->transaction_wait); |
3593 | wake_up(&root->fs_info->transaction_wait); | ||
3594 | 3608 | ||
3595 | cur_trans->commit_done = 1; | 3609 | cur_trans->commit_done = 1; |
3596 | if (waitqueue_active(&cur_trans->commit_wait)) | 3610 | wake_up(&cur_trans->commit_wait); |
3597 | wake_up(&cur_trans->commit_wait); | 3611 | |
3612 | btrfs_destroy_delayed_inodes(root); | ||
3613 | btrfs_assert_delayed_root_empty(root); | ||
3598 | 3614 | ||
3599 | btrfs_destroy_pending_snapshots(cur_trans); | 3615 | btrfs_destroy_pending_snapshots(cur_trans); |
3600 | 3616 | ||
3601 | btrfs_destroy_marked_extents(root, &cur_trans->dirty_pages, | 3617 | btrfs_destroy_marked_extents(root, &cur_trans->dirty_pages, |
3602 | EXTENT_DIRTY); | 3618 | EXTENT_DIRTY); |
3619 | btrfs_destroy_pinned_extent(root, | ||
3620 | root->fs_info->pinned_extents); | ||
3603 | 3621 | ||
3604 | /* | 3622 | /* |
3605 | memset(cur_trans, 0, sizeof(*cur_trans)); | 3623 | memset(cur_trans, 0, sizeof(*cur_trans)); |
@@ -3648,6 +3666,9 @@ int btrfs_cleanup_transaction(struct btrfs_root *root) | |||
3648 | if (waitqueue_active(&t->commit_wait)) | 3666 | if (waitqueue_active(&t->commit_wait)) |
3649 | wake_up(&t->commit_wait); | 3667 | wake_up(&t->commit_wait); |
3650 | 3668 | ||
3669 | btrfs_destroy_delayed_inodes(root); | ||
3670 | btrfs_assert_delayed_root_empty(root); | ||
3671 | |||
3651 | btrfs_destroy_pending_snapshots(t); | 3672 | btrfs_destroy_pending_snapshots(t); |
3652 | 3673 | ||
3653 | btrfs_destroy_delalloc_inodes(root); | 3674 | btrfs_destroy_delalloc_inodes(root); |
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 2c8f7b204617..aaa12c1eb348 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c | |||
@@ -20,6 +20,7 @@ | |||
20 | #include "volumes.h" | 20 | #include "volumes.h" |
21 | #include "check-integrity.h" | 21 | #include "check-integrity.h" |
22 | #include "locking.h" | 22 | #include "locking.h" |
23 | #include "rcu-string.h" | ||
23 | 24 | ||
24 | static struct kmem_cache *extent_state_cache; | 25 | static struct kmem_cache *extent_state_cache; |
25 | static struct kmem_cache *extent_buffer_cache; | 26 | static struct kmem_cache *extent_buffer_cache; |
@@ -1917,9 +1918,9 @@ int repair_io_failure(struct btrfs_mapping_tree *map_tree, u64 start, | |||
1917 | return -EIO; | 1918 | return -EIO; |
1918 | } | 1919 | } |
1919 | 1920 | ||
1920 | printk(KERN_INFO "btrfs read error corrected: ino %lu off %llu (dev %s " | 1921 | printk_in_rcu(KERN_INFO "btrfs read error corrected: ino %lu off %llu " |
1921 | "sector %llu)\n", page->mapping->host->i_ino, start, | 1922 | "(dev %s sector %llu)\n", page->mapping->host->i_ino, |
1922 | dev->name, sector); | 1923 | start, rcu_str_deref(dev->name), sector); |
1923 | 1924 | ||
1924 | bio_put(bio); | 1925 | bio_put(bio); |
1925 | return 0; | 1926 | return 0; |
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index f6ab6f5e635a..d8bb0dbc4941 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c | |||
@@ -830,7 +830,7 @@ static noinline int cow_file_range(struct inode *inode, | |||
830 | if (IS_ERR(trans)) { | 830 | if (IS_ERR(trans)) { |
831 | extent_clear_unlock_delalloc(inode, | 831 | extent_clear_unlock_delalloc(inode, |
832 | &BTRFS_I(inode)->io_tree, | 832 | &BTRFS_I(inode)->io_tree, |
833 | start, end, NULL, | 833 | start, end, locked_page, |
834 | EXTENT_CLEAR_UNLOCK_PAGE | | 834 | EXTENT_CLEAR_UNLOCK_PAGE | |
835 | EXTENT_CLEAR_UNLOCK | | 835 | EXTENT_CLEAR_UNLOCK | |
836 | EXTENT_CLEAR_DELALLOC | | 836 | EXTENT_CLEAR_DELALLOC | |
@@ -963,7 +963,7 @@ out: | |||
963 | out_unlock: | 963 | out_unlock: |
964 | extent_clear_unlock_delalloc(inode, | 964 | extent_clear_unlock_delalloc(inode, |
965 | &BTRFS_I(inode)->io_tree, | 965 | &BTRFS_I(inode)->io_tree, |
966 | start, end, NULL, | 966 | start, end, locked_page, |
967 | EXTENT_CLEAR_UNLOCK_PAGE | | 967 | EXTENT_CLEAR_UNLOCK_PAGE | |
968 | EXTENT_CLEAR_UNLOCK | | 968 | EXTENT_CLEAR_UNLOCK | |
969 | EXTENT_CLEAR_DELALLOC | | 969 | EXTENT_CLEAR_DELALLOC | |
@@ -986,8 +986,10 @@ static noinline void async_cow_start(struct btrfs_work *work) | |||
986 | compress_file_range(async_cow->inode, async_cow->locked_page, | 986 | compress_file_range(async_cow->inode, async_cow->locked_page, |
987 | async_cow->start, async_cow->end, async_cow, | 987 | async_cow->start, async_cow->end, async_cow, |
988 | &num_added); | 988 | &num_added); |
989 | if (num_added == 0) | 989 | if (num_added == 0) { |
990 | btrfs_add_delayed_iput(async_cow->inode); | ||
990 | async_cow->inode = NULL; | 991 | async_cow->inode = NULL; |
992 | } | ||
991 | } | 993 | } |
992 | 994 | ||
993 | /* | 995 | /* |
@@ -1020,6 +1022,8 @@ static noinline void async_cow_free(struct btrfs_work *work) | |||
1020 | { | 1022 | { |
1021 | struct async_cow *async_cow; | 1023 | struct async_cow *async_cow; |
1022 | async_cow = container_of(work, struct async_cow, work); | 1024 | async_cow = container_of(work, struct async_cow, work); |
1025 | if (async_cow->inode) | ||
1026 | btrfs_add_delayed_iput(async_cow->inode); | ||
1023 | kfree(async_cow); | 1027 | kfree(async_cow); |
1024 | } | 1028 | } |
1025 | 1029 | ||
@@ -1038,7 +1042,7 @@ static int cow_file_range_async(struct inode *inode, struct page *locked_page, | |||
1038 | while (start < end) { | 1042 | while (start < end) { |
1039 | async_cow = kmalloc(sizeof(*async_cow), GFP_NOFS); | 1043 | async_cow = kmalloc(sizeof(*async_cow), GFP_NOFS); |
1040 | BUG_ON(!async_cow); /* -ENOMEM */ | 1044 | BUG_ON(!async_cow); /* -ENOMEM */ |
1041 | async_cow->inode = inode; | 1045 | async_cow->inode = igrab(inode); |
1042 | async_cow->root = root; | 1046 | async_cow->root = root; |
1043 | async_cow->locked_page = locked_page; | 1047 | async_cow->locked_page = locked_page; |
1044 | async_cow->start = start; | 1048 | async_cow->start = start; |
@@ -1136,8 +1140,18 @@ static noinline int run_delalloc_nocow(struct inode *inode, | |||
1136 | u64 ino = btrfs_ino(inode); | 1140 | u64 ino = btrfs_ino(inode); |
1137 | 1141 | ||
1138 | path = btrfs_alloc_path(); | 1142 | path = btrfs_alloc_path(); |
1139 | if (!path) | 1143 | if (!path) { |
1144 | extent_clear_unlock_delalloc(inode, | ||
1145 | &BTRFS_I(inode)->io_tree, | ||
1146 | start, end, locked_page, | ||
1147 | EXTENT_CLEAR_UNLOCK_PAGE | | ||
1148 | EXTENT_CLEAR_UNLOCK | | ||
1149 | EXTENT_CLEAR_DELALLOC | | ||
1150 | EXTENT_CLEAR_DIRTY | | ||
1151 | EXTENT_SET_WRITEBACK | | ||
1152 | EXTENT_END_WRITEBACK); | ||
1140 | return -ENOMEM; | 1153 | return -ENOMEM; |
1154 | } | ||
1141 | 1155 | ||
1142 | nolock = btrfs_is_free_space_inode(root, inode); | 1156 | nolock = btrfs_is_free_space_inode(root, inode); |
1143 | 1157 | ||
@@ -1147,6 +1161,15 @@ static noinline int run_delalloc_nocow(struct inode *inode, | |||
1147 | trans = btrfs_join_transaction(root); | 1161 | trans = btrfs_join_transaction(root); |
1148 | 1162 | ||
1149 | if (IS_ERR(trans)) { | 1163 | if (IS_ERR(trans)) { |
1164 | extent_clear_unlock_delalloc(inode, | ||
1165 | &BTRFS_I(inode)->io_tree, | ||
1166 | start, end, locked_page, | ||
1167 | EXTENT_CLEAR_UNLOCK_PAGE | | ||
1168 | EXTENT_CLEAR_UNLOCK | | ||
1169 | EXTENT_CLEAR_DELALLOC | | ||
1170 | EXTENT_CLEAR_DIRTY | | ||
1171 | EXTENT_SET_WRITEBACK | | ||
1172 | EXTENT_END_WRITEBACK); | ||
1150 | btrfs_free_path(path); | 1173 | btrfs_free_path(path); |
1151 | return PTR_ERR(trans); | 1174 | return PTR_ERR(trans); |
1152 | } | 1175 | } |
@@ -1327,8 +1350,11 @@ out_check: | |||
1327 | } | 1350 | } |
1328 | btrfs_release_path(path); | 1351 | btrfs_release_path(path); |
1329 | 1352 | ||
1330 | if (cur_offset <= end && cow_start == (u64)-1) | 1353 | if (cur_offset <= end && cow_start == (u64)-1) { |
1331 | cow_start = cur_offset; | 1354 | cow_start = cur_offset; |
1355 | cur_offset = end; | ||
1356 | } | ||
1357 | |||
1332 | if (cow_start != (u64)-1) { | 1358 | if (cow_start != (u64)-1) { |
1333 | ret = cow_file_range(inode, locked_page, cow_start, end, | 1359 | ret = cow_file_range(inode, locked_page, cow_start, end, |
1334 | page_started, nr_written, 1); | 1360 | page_started, nr_written, 1); |
@@ -1347,6 +1373,17 @@ error: | |||
1347 | if (!ret) | 1373 | if (!ret) |
1348 | ret = err; | 1374 | ret = err; |
1349 | 1375 | ||
1376 | if (ret && cur_offset < end) | ||
1377 | extent_clear_unlock_delalloc(inode, | ||
1378 | &BTRFS_I(inode)->io_tree, | ||
1379 | cur_offset, end, locked_page, | ||
1380 | EXTENT_CLEAR_UNLOCK_PAGE | | ||
1381 | EXTENT_CLEAR_UNLOCK | | ||
1382 | EXTENT_CLEAR_DELALLOC | | ||
1383 | EXTENT_CLEAR_DIRTY | | ||
1384 | EXTENT_SET_WRITEBACK | | ||
1385 | EXTENT_END_WRITEBACK); | ||
1386 | |||
1350 | btrfs_free_path(path); | 1387 | btrfs_free_path(path); |
1351 | return ret; | 1388 | return ret; |
1352 | } | 1389 | } |
@@ -1361,20 +1398,23 @@ static int run_delalloc_range(struct inode *inode, struct page *locked_page, | |||
1361 | int ret; | 1398 | int ret; |
1362 | struct btrfs_root *root = BTRFS_I(inode)->root; | 1399 | struct btrfs_root *root = BTRFS_I(inode)->root; |
1363 | 1400 | ||
1364 | if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW) | 1401 | if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW) { |
1365 | ret = run_delalloc_nocow(inode, locked_page, start, end, | 1402 | ret = run_delalloc_nocow(inode, locked_page, start, end, |
1366 | page_started, 1, nr_written); | 1403 | page_started, 1, nr_written); |
1367 | else if (BTRFS_I(inode)->flags & BTRFS_INODE_PREALLOC) | 1404 | } else if (BTRFS_I(inode)->flags & BTRFS_INODE_PREALLOC) { |
1368 | ret = run_delalloc_nocow(inode, locked_page, start, end, | 1405 | ret = run_delalloc_nocow(inode, locked_page, start, end, |
1369 | page_started, 0, nr_written); | 1406 | page_started, 0, nr_written); |
1370 | else if (!btrfs_test_opt(root, COMPRESS) && | 1407 | } else if (!btrfs_test_opt(root, COMPRESS) && |
1371 | !(BTRFS_I(inode)->force_compress) && | 1408 | !(BTRFS_I(inode)->force_compress) && |
1372 | !(BTRFS_I(inode)->flags & BTRFS_INODE_COMPRESS)) | 1409 | !(BTRFS_I(inode)->flags & BTRFS_INODE_COMPRESS)) { |
1373 | ret = cow_file_range(inode, locked_page, start, end, | 1410 | ret = cow_file_range(inode, locked_page, start, end, |
1374 | page_started, nr_written, 1); | 1411 | page_started, nr_written, 1); |
1375 | else | 1412 | } else { |
1413 | set_bit(BTRFS_INODE_HAS_ASYNC_EXTENT, | ||
1414 | &BTRFS_I(inode)->runtime_flags); | ||
1376 | ret = cow_file_range_async(inode, locked_page, start, end, | 1415 | ret = cow_file_range_async(inode, locked_page, start, end, |
1377 | page_started, nr_written); | 1416 | page_started, nr_written); |
1417 | } | ||
1378 | return ret; | 1418 | return ret; |
1379 | } | 1419 | } |
1380 | 1420 | ||
@@ -7054,10 +7094,13 @@ static void fixup_inode_flags(struct inode *dir, struct inode *inode) | |||
7054 | else | 7094 | else |
7055 | b_inode->flags &= ~BTRFS_INODE_NODATACOW; | 7095 | b_inode->flags &= ~BTRFS_INODE_NODATACOW; |
7056 | 7096 | ||
7057 | if (b_dir->flags & BTRFS_INODE_COMPRESS) | 7097 | if (b_dir->flags & BTRFS_INODE_COMPRESS) { |
7058 | b_inode->flags |= BTRFS_INODE_COMPRESS; | 7098 | b_inode->flags |= BTRFS_INODE_COMPRESS; |
7059 | else | 7099 | b_inode->flags &= ~BTRFS_INODE_NOCOMPRESS; |
7060 | b_inode->flags &= ~BTRFS_INODE_COMPRESS; | 7100 | } else { |
7101 | b_inode->flags &= ~(BTRFS_INODE_COMPRESS | | ||
7102 | BTRFS_INODE_NOCOMPRESS); | ||
7103 | } | ||
7061 | } | 7104 | } |
7062 | 7105 | ||
7063 | static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry, | 7106 | static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry, |
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index 24b776c08d99..0e92e5763005 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c | |||
@@ -52,6 +52,7 @@ | |||
52 | #include "locking.h" | 52 | #include "locking.h" |
53 | #include "inode-map.h" | 53 | #include "inode-map.h" |
54 | #include "backref.h" | 54 | #include "backref.h" |
55 | #include "rcu-string.h" | ||
55 | 56 | ||
56 | /* Mask out flags that are inappropriate for the given type of inode. */ | 57 | /* Mask out flags that are inappropriate for the given type of inode. */ |
57 | static inline __u32 btrfs_mask_flags(umode_t mode, __u32 flags) | 58 | static inline __u32 btrfs_mask_flags(umode_t mode, __u32 flags) |
@@ -785,39 +786,57 @@ none: | |||
785 | return -ENOENT; | 786 | return -ENOENT; |
786 | } | 787 | } |
787 | 788 | ||
788 | /* | 789 | static struct extent_map *defrag_lookup_extent(struct inode *inode, u64 start) |
789 | * Validaty check of prev em and next em: | ||
790 | * 1) no prev/next em | ||
791 | * 2) prev/next em is an hole/inline extent | ||
792 | */ | ||
793 | static int check_adjacent_extents(struct inode *inode, struct extent_map *em) | ||
794 | { | 790 | { |
795 | struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; | 791 | struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; |
796 | struct extent_map *prev = NULL, *next = NULL; | 792 | struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; |
797 | int ret = 0; | 793 | struct extent_map *em; |
794 | u64 len = PAGE_CACHE_SIZE; | ||
798 | 795 | ||
796 | /* | ||
797 | * hopefully we have this extent in the tree already, try without | ||
798 | * the full extent lock | ||
799 | */ | ||
799 | read_lock(&em_tree->lock); | 800 | read_lock(&em_tree->lock); |
800 | prev = lookup_extent_mapping(em_tree, em->start - 1, (u64)-1); | 801 | em = lookup_extent_mapping(em_tree, start, len); |
801 | next = lookup_extent_mapping(em_tree, em->start + em->len, (u64)-1); | ||
802 | read_unlock(&em_tree->lock); | 802 | read_unlock(&em_tree->lock); |
803 | 803 | ||
804 | if ((!prev || prev->block_start >= EXTENT_MAP_LAST_BYTE) && | 804 | if (!em) { |
805 | (!next || next->block_start >= EXTENT_MAP_LAST_BYTE)) | 805 | /* get the big lock and read metadata off disk */ |
806 | ret = 1; | 806 | lock_extent(io_tree, start, start + len - 1); |
807 | free_extent_map(prev); | 807 | em = btrfs_get_extent(inode, NULL, 0, start, len, 0); |
808 | free_extent_map(next); | 808 | unlock_extent(io_tree, start, start + len - 1); |
809 | |||
810 | if (IS_ERR(em)) | ||
811 | return NULL; | ||
812 | } | ||
813 | |||
814 | return em; | ||
815 | } | ||
816 | |||
817 | static bool defrag_check_next_extent(struct inode *inode, struct extent_map *em) | ||
818 | { | ||
819 | struct extent_map *next; | ||
820 | bool ret = true; | ||
809 | 821 | ||
822 | /* this is the last extent */ | ||
823 | if (em->start + em->len >= i_size_read(inode)) | ||
824 | return false; | ||
825 | |||
826 | next = defrag_lookup_extent(inode, em->start + em->len); | ||
827 | if (!next || next->block_start >= EXTENT_MAP_LAST_BYTE) | ||
828 | ret = false; | ||
829 | |||
830 | free_extent_map(next); | ||
810 | return ret; | 831 | return ret; |
811 | } | 832 | } |
812 | 833 | ||
813 | static int should_defrag_range(struct inode *inode, u64 start, u64 len, | 834 | static int should_defrag_range(struct inode *inode, u64 start, int thresh, |
814 | int thresh, u64 *last_len, u64 *skip, | 835 | u64 *last_len, u64 *skip, u64 *defrag_end) |
815 | u64 *defrag_end) | ||
816 | { | 836 | { |
817 | struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; | 837 | struct extent_map *em; |
818 | struct extent_map *em = NULL; | ||
819 | struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; | ||
820 | int ret = 1; | 838 | int ret = 1; |
839 | bool next_mergeable = true; | ||
821 | 840 | ||
822 | /* | 841 | /* |
823 | * make sure that once we start defragging an extent, we keep on | 842 | * make sure that once we start defragging an extent, we keep on |
@@ -828,23 +847,9 @@ static int should_defrag_range(struct inode *inode, u64 start, u64 len, | |||
828 | 847 | ||
829 | *skip = 0; | 848 | *skip = 0; |
830 | 849 | ||
831 | /* | 850 | em = defrag_lookup_extent(inode, start); |
832 | * hopefully we have this extent in the tree already, try without | 851 | if (!em) |
833 | * the full extent lock | 852 | return 0; |
834 | */ | ||
835 | read_lock(&em_tree->lock); | ||
836 | em = lookup_extent_mapping(em_tree, start, len); | ||
837 | read_unlock(&em_tree->lock); | ||
838 | |||
839 | if (!em) { | ||
840 | /* get the big lock and read metadata off disk */ | ||
841 | lock_extent(io_tree, start, start + len - 1); | ||
842 | em = btrfs_get_extent(inode, NULL, 0, start, len, 0); | ||
843 | unlock_extent(io_tree, start, start + len - 1); | ||
844 | |||
845 | if (IS_ERR(em)) | ||
846 | return 0; | ||
847 | } | ||
848 | 853 | ||
849 | /* this will cover holes, and inline extents */ | 854 | /* this will cover holes, and inline extents */ |
850 | if (em->block_start >= EXTENT_MAP_LAST_BYTE) { | 855 | if (em->block_start >= EXTENT_MAP_LAST_BYTE) { |
@@ -852,18 +857,15 @@ static int should_defrag_range(struct inode *inode, u64 start, u64 len, | |||
852 | goto out; | 857 | goto out; |
853 | } | 858 | } |
854 | 859 | ||
855 | /* If we have nothing to merge with us, just skip. */ | 860 | next_mergeable = defrag_check_next_extent(inode, em); |
856 | if (check_adjacent_extents(inode, em)) { | ||
857 | ret = 0; | ||
858 | goto out; | ||
859 | } | ||
860 | 861 | ||
861 | /* | 862 | /* |
862 | * we hit a real extent, if it is big don't bother defragging it again | 863 | * we hit a real extent, if it is big or the next extent is not a |
864 | * real extent, don't bother defragging it | ||
863 | */ | 865 | */ |
864 | if ((*last_len == 0 || *last_len >= thresh) && em->len >= thresh) | 866 | if ((*last_len == 0 || *last_len >= thresh) && |
867 | (em->len >= thresh || !next_mergeable)) | ||
865 | ret = 0; | 868 | ret = 0; |
866 | |||
867 | out: | 869 | out: |
868 | /* | 870 | /* |
869 | * last_len ends up being a counter of how many bytes we've defragged. | 871 | * last_len ends up being a counter of how many bytes we've defragged. |
@@ -1142,8 +1144,8 @@ int btrfs_defrag_file(struct inode *inode, struct file *file, | |||
1142 | break; | 1144 | break; |
1143 | 1145 | ||
1144 | if (!should_defrag_range(inode, (u64)i << PAGE_CACHE_SHIFT, | 1146 | if (!should_defrag_range(inode, (u64)i << PAGE_CACHE_SHIFT, |
1145 | PAGE_CACHE_SIZE, extent_thresh, | 1147 | extent_thresh, &last_len, &skip, |
1146 | &last_len, &skip, &defrag_end)) { | 1148 | &defrag_end)) { |
1147 | unsigned long next; | 1149 | unsigned long next; |
1148 | /* | 1150 | /* |
1149 | * the should_defrag function tells us how much to skip | 1151 | * the should_defrag function tells us how much to skip |
@@ -1304,6 +1306,14 @@ static noinline int btrfs_ioctl_resize(struct btrfs_root *root, | |||
1304 | ret = -EINVAL; | 1306 | ret = -EINVAL; |
1305 | goto out_free; | 1307 | goto out_free; |
1306 | } | 1308 | } |
1309 | if (device->fs_devices && device->fs_devices->seeding) { | ||
1310 | printk(KERN_INFO "btrfs: resizer unable to apply on " | ||
1311 | "seeding device %llu\n", | ||
1312 | (unsigned long long)devid); | ||
1313 | ret = -EINVAL; | ||
1314 | goto out_free; | ||
1315 | } | ||
1316 | |||
1307 | if (!strcmp(sizestr, "max")) | 1317 | if (!strcmp(sizestr, "max")) |
1308 | new_size = device->bdev->bd_inode->i_size; | 1318 | new_size = device->bdev->bd_inode->i_size; |
1309 | else { | 1319 | else { |
@@ -1345,8 +1355,9 @@ static noinline int btrfs_ioctl_resize(struct btrfs_root *root, | |||
1345 | do_div(new_size, root->sectorsize); | 1355 | do_div(new_size, root->sectorsize); |
1346 | new_size *= root->sectorsize; | 1356 | new_size *= root->sectorsize; |
1347 | 1357 | ||
1348 | printk(KERN_INFO "btrfs: new size for %s is %llu\n", | 1358 | printk_in_rcu(KERN_INFO "btrfs: new size for %s is %llu\n", |
1349 | device->name, (unsigned long long)new_size); | 1359 | rcu_str_deref(device->name), |
1360 | (unsigned long long)new_size); | ||
1350 | 1361 | ||
1351 | if (new_size > old_size) { | 1362 | if (new_size > old_size) { |
1352 | trans = btrfs_start_transaction(root, 0); | 1363 | trans = btrfs_start_transaction(root, 0); |
@@ -2264,7 +2275,12 @@ static long btrfs_ioctl_dev_info(struct btrfs_root *root, void __user *arg) | |||
2264 | di_args->total_bytes = dev->total_bytes; | 2275 | di_args->total_bytes = dev->total_bytes; |
2265 | memcpy(di_args->uuid, dev->uuid, sizeof(di_args->uuid)); | 2276 | memcpy(di_args->uuid, dev->uuid, sizeof(di_args->uuid)); |
2266 | if (dev->name) { | 2277 | if (dev->name) { |
2267 | strncpy(di_args->path, dev->name, sizeof(di_args->path)); | 2278 | struct rcu_string *name; |
2279 | |||
2280 | rcu_read_lock(); | ||
2281 | name = rcu_dereference(dev->name); | ||
2282 | strncpy(di_args->path, name->str, sizeof(di_args->path)); | ||
2283 | rcu_read_unlock(); | ||
2268 | di_args->path[sizeof(di_args->path) - 1] = 0; | 2284 | di_args->path[sizeof(di_args->path) - 1] = 0; |
2269 | } else { | 2285 | } else { |
2270 | di_args->path[0] = '\0'; | 2286 | di_args->path[0] = '\0'; |
diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c index 9e138cdc36c5..643335a4fe3c 100644 --- a/fs/btrfs/ordered-data.c +++ b/fs/btrfs/ordered-data.c | |||
@@ -627,7 +627,27 @@ void btrfs_wait_ordered_range(struct inode *inode, u64 start, u64 len) | |||
627 | /* start IO across the range first to instantiate any delalloc | 627 | /* start IO across the range first to instantiate any delalloc |
628 | * extents | 628 | * extents |
629 | */ | 629 | */ |
630 | filemap_write_and_wait_range(inode->i_mapping, start, orig_end); | 630 | filemap_fdatawrite_range(inode->i_mapping, start, orig_end); |
631 | |||
632 | /* | ||
633 | * So with compression we will find and lock a dirty page and clear the | ||
634 | * first one as dirty, setup an async extent, and immediately return | ||
635 | * with the entire range locked but with nobody actually marked with | ||
636 | * writeback. So we can't just filemap_write_and_wait_range() and | ||
637 | * expect it to work since it will just kick off a thread to do the | ||
638 | * actual work. So we need to call filemap_fdatawrite_range _again_ | ||
639 | * since it will wait on the page lock, which won't be unlocked until | ||
640 | * after the pages have been marked as writeback and so we're good to go | ||
641 | * from there. We have to do this otherwise we'll miss the ordered | ||
642 | * extents and that results in badness. Please Josef, do not think you | ||
643 | * know better and pull this out at some point in the future, it is | ||
644 | * right and you are wrong. | ||
645 | */ | ||
646 | if (test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT, | ||
647 | &BTRFS_I(inode)->runtime_flags)) | ||
648 | filemap_fdatawrite_range(inode->i_mapping, start, orig_end); | ||
649 | |||
650 | filemap_fdatawait_range(inode->i_mapping, start, orig_end); | ||
631 | 651 | ||
632 | end = orig_end; | 652 | end = orig_end; |
633 | found = 0; | 653 | found = 0; |
diff --git a/fs/btrfs/rcu-string.h b/fs/btrfs/rcu-string.h new file mode 100644 index 000000000000..9e111e4576d4 --- /dev/null +++ b/fs/btrfs/rcu-string.h | |||
@@ -0,0 +1,56 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2012 Red Hat. All rights reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public | ||
6 | * License v2 as published by the Free Software Foundation. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, | ||
9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
11 | * General Public License for more details. | ||
12 | * | ||
13 | * You should have received a copy of the GNU General Public | ||
14 | * License along with this program; if not, write to the | ||
15 | * Free Software Foundation, Inc., 59 Temple Place - Suite 330, | ||
16 | * Boston, MA 021110-1307, USA. | ||
17 | */ | ||
18 | |||
19 | struct rcu_string { | ||
20 | struct rcu_head rcu; | ||
21 | char str[0]; | ||
22 | }; | ||
23 | |||
24 | static inline struct rcu_string *rcu_string_strdup(const char *src, gfp_t mask) | ||
25 | { | ||
26 | size_t len = strlen(src) + 1; | ||
27 | struct rcu_string *ret = kzalloc(sizeof(struct rcu_string) + | ||
28 | (len * sizeof(char)), mask); | ||
29 | if (!ret) | ||
30 | return ret; | ||
31 | strncpy(ret->str, src, len); | ||
32 | return ret; | ||
33 | } | ||
34 | |||
35 | static inline void rcu_string_free(struct rcu_string *str) | ||
36 | { | ||
37 | if (str) | ||
38 | kfree_rcu(str, rcu); | ||
39 | } | ||
40 | |||
41 | #define printk_in_rcu(fmt, ...) do { \ | ||
42 | rcu_read_lock(); \ | ||
43 | printk(fmt, __VA_ARGS__); \ | ||
44 | rcu_read_unlock(); \ | ||
45 | } while (0) | ||
46 | |||
47 | #define printk_ratelimited_in_rcu(fmt, ...) do { \ | ||
48 | rcu_read_lock(); \ | ||
49 | printk_ratelimited(fmt, __VA_ARGS__); \ | ||
50 | rcu_read_unlock(); \ | ||
51 | } while (0) | ||
52 | |||
53 | #define rcu_str_deref(rcu_str) ({ \ | ||
54 | struct rcu_string *__str = rcu_dereference(rcu_str); \ | ||
55 | __str->str; \ | ||
56 | }) | ||
diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c index a38cfa4f251e..b223620cd5a6 100644 --- a/fs/btrfs/scrub.c +++ b/fs/btrfs/scrub.c | |||
@@ -26,6 +26,7 @@ | |||
26 | #include "backref.h" | 26 | #include "backref.h" |
27 | #include "extent_io.h" | 27 | #include "extent_io.h" |
28 | #include "check-integrity.h" | 28 | #include "check-integrity.h" |
29 | #include "rcu-string.h" | ||
29 | 30 | ||
30 | /* | 31 | /* |
31 | * This is only the first step towards a full-features scrub. It reads all | 32 | * This is only the first step towards a full-features scrub. It reads all |
@@ -320,10 +321,10 @@ static int scrub_print_warning_inode(u64 inum, u64 offset, u64 root, void *ctx) | |||
320 | * hold all of the paths here | 321 | * hold all of the paths here |
321 | */ | 322 | */ |
322 | for (i = 0; i < ipath->fspath->elem_cnt; ++i) | 323 | for (i = 0; i < ipath->fspath->elem_cnt; ++i) |
323 | printk(KERN_WARNING "btrfs: %s at logical %llu on dev " | 324 | printk_in_rcu(KERN_WARNING "btrfs: %s at logical %llu on dev " |
324 | "%s, sector %llu, root %llu, inode %llu, offset %llu, " | 325 | "%s, sector %llu, root %llu, inode %llu, offset %llu, " |
325 | "length %llu, links %u (path: %s)\n", swarn->errstr, | 326 | "length %llu, links %u (path: %s)\n", swarn->errstr, |
326 | swarn->logical, swarn->dev->name, | 327 | swarn->logical, rcu_str_deref(swarn->dev->name), |
327 | (unsigned long long)swarn->sector, root, inum, offset, | 328 | (unsigned long long)swarn->sector, root, inum, offset, |
328 | min(isize - offset, (u64)PAGE_SIZE), nlink, | 329 | min(isize - offset, (u64)PAGE_SIZE), nlink, |
329 | (char *)(unsigned long)ipath->fspath->val[i]); | 330 | (char *)(unsigned long)ipath->fspath->val[i]); |
@@ -332,10 +333,10 @@ static int scrub_print_warning_inode(u64 inum, u64 offset, u64 root, void *ctx) | |||
332 | return 0; | 333 | return 0; |
333 | 334 | ||
334 | err: | 335 | err: |
335 | printk(KERN_WARNING "btrfs: %s at logical %llu on dev " | 336 | printk_in_rcu(KERN_WARNING "btrfs: %s at logical %llu on dev " |
336 | "%s, sector %llu, root %llu, inode %llu, offset %llu: path " | 337 | "%s, sector %llu, root %llu, inode %llu, offset %llu: path " |
337 | "resolving failed with ret=%d\n", swarn->errstr, | 338 | "resolving failed with ret=%d\n", swarn->errstr, |
338 | swarn->logical, swarn->dev->name, | 339 | swarn->logical, rcu_str_deref(swarn->dev->name), |
339 | (unsigned long long)swarn->sector, root, inum, offset, ret); | 340 | (unsigned long long)swarn->sector, root, inum, offset, ret); |
340 | 341 | ||
341 | free_ipath(ipath); | 342 | free_ipath(ipath); |
@@ -390,10 +391,11 @@ static void scrub_print_warning(const char *errstr, struct scrub_block *sblock) | |||
390 | do { | 391 | do { |
391 | ret = tree_backref_for_extent(&ptr, eb, ei, item_size, | 392 | ret = tree_backref_for_extent(&ptr, eb, ei, item_size, |
392 | &ref_root, &ref_level); | 393 | &ref_root, &ref_level); |
393 | printk(KERN_WARNING | 394 | printk_in_rcu(KERN_WARNING |
394 | "btrfs: %s at logical %llu on dev %s, " | 395 | "btrfs: %s at logical %llu on dev %s, " |
395 | "sector %llu: metadata %s (level %d) in tree " | 396 | "sector %llu: metadata %s (level %d) in tree " |
396 | "%llu\n", errstr, swarn.logical, dev->name, | 397 | "%llu\n", errstr, swarn.logical, |
398 | rcu_str_deref(dev->name), | ||
397 | (unsigned long long)swarn.sector, | 399 | (unsigned long long)swarn.sector, |
398 | ref_level ? "node" : "leaf", | 400 | ref_level ? "node" : "leaf", |
399 | ret < 0 ? -1 : ref_level, | 401 | ret < 0 ? -1 : ref_level, |
@@ -580,9 +582,11 @@ out: | |||
580 | spin_lock(&sdev->stat_lock); | 582 | spin_lock(&sdev->stat_lock); |
581 | ++sdev->stat.uncorrectable_errors; | 583 | ++sdev->stat.uncorrectable_errors; |
582 | spin_unlock(&sdev->stat_lock); | 584 | spin_unlock(&sdev->stat_lock); |
583 | printk_ratelimited(KERN_ERR | 585 | |
586 | printk_ratelimited_in_rcu(KERN_ERR | ||
584 | "btrfs: unable to fixup (nodatasum) error at logical %llu on dev %s\n", | 587 | "btrfs: unable to fixup (nodatasum) error at logical %llu on dev %s\n", |
585 | (unsigned long long)fixup->logical, sdev->dev->name); | 588 | (unsigned long long)fixup->logical, |
589 | rcu_str_deref(sdev->dev->name)); | ||
586 | } | 590 | } |
587 | 591 | ||
588 | btrfs_free_path(path); | 592 | btrfs_free_path(path); |
@@ -936,18 +940,20 @@ corrected_error: | |||
936 | spin_lock(&sdev->stat_lock); | 940 | spin_lock(&sdev->stat_lock); |
937 | sdev->stat.corrected_errors++; | 941 | sdev->stat.corrected_errors++; |
938 | spin_unlock(&sdev->stat_lock); | 942 | spin_unlock(&sdev->stat_lock); |
939 | printk_ratelimited(KERN_ERR | 943 | printk_ratelimited_in_rcu(KERN_ERR |
940 | "btrfs: fixed up error at logical %llu on dev %s\n", | 944 | "btrfs: fixed up error at logical %llu on dev %s\n", |
941 | (unsigned long long)logical, sdev->dev->name); | 945 | (unsigned long long)logical, |
946 | rcu_str_deref(sdev->dev->name)); | ||
942 | } | 947 | } |
943 | } else { | 948 | } else { |
944 | did_not_correct_error: | 949 | did_not_correct_error: |
945 | spin_lock(&sdev->stat_lock); | 950 | spin_lock(&sdev->stat_lock); |
946 | sdev->stat.uncorrectable_errors++; | 951 | sdev->stat.uncorrectable_errors++; |
947 | spin_unlock(&sdev->stat_lock); | 952 | spin_unlock(&sdev->stat_lock); |
948 | printk_ratelimited(KERN_ERR | 953 | printk_ratelimited_in_rcu(KERN_ERR |
949 | "btrfs: unable to fixup (regular) error at logical %llu on dev %s\n", | 954 | "btrfs: unable to fixup (regular) error at logical %llu on dev %s\n", |
950 | (unsigned long long)logical, sdev->dev->name); | 955 | (unsigned long long)logical, |
956 | rcu_str_deref(sdev->dev->name)); | ||
951 | } | 957 | } |
952 | 958 | ||
953 | out: | 959 | out: |
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c index 96eb9fef7bd2..0eb9a4da069e 100644 --- a/fs/btrfs/super.c +++ b/fs/btrfs/super.c | |||
@@ -54,6 +54,7 @@ | |||
54 | #include "version.h" | 54 | #include "version.h" |
55 | #include "export.h" | 55 | #include "export.h" |
56 | #include "compression.h" | 56 | #include "compression.h" |
57 | #include "rcu-string.h" | ||
57 | 58 | ||
58 | #define CREATE_TRACE_POINTS | 59 | #define CREATE_TRACE_POINTS |
59 | #include <trace/events/btrfs.h> | 60 | #include <trace/events/btrfs.h> |
@@ -1482,12 +1483,44 @@ static void btrfs_fs_dirty_inode(struct inode *inode, int flags) | |||
1482 | "error %d\n", btrfs_ino(inode), ret); | 1483 | "error %d\n", btrfs_ino(inode), ret); |
1483 | } | 1484 | } |
1484 | 1485 | ||
1486 | static int btrfs_show_devname(struct seq_file *m, struct dentry *root) | ||
1487 | { | ||
1488 | struct btrfs_fs_info *fs_info = btrfs_sb(root->d_sb); | ||
1489 | struct btrfs_fs_devices *cur_devices; | ||
1490 | struct btrfs_device *dev, *first_dev = NULL; | ||
1491 | struct list_head *head; | ||
1492 | struct rcu_string *name; | ||
1493 | |||
1494 | mutex_lock(&fs_info->fs_devices->device_list_mutex); | ||
1495 | cur_devices = fs_info->fs_devices; | ||
1496 | while (cur_devices) { | ||
1497 | head = &cur_devices->devices; | ||
1498 | list_for_each_entry(dev, head, dev_list) { | ||
1499 | if (!first_dev || dev->devid < first_dev->devid) | ||
1500 | first_dev = dev; | ||
1501 | } | ||
1502 | cur_devices = cur_devices->seed; | ||
1503 | } | ||
1504 | |||
1505 | if (first_dev) { | ||
1506 | rcu_read_lock(); | ||
1507 | name = rcu_dereference(first_dev->name); | ||
1508 | seq_escape(m, name->str, " \t\n\\"); | ||
1509 | rcu_read_unlock(); | ||
1510 | } else { | ||
1511 | WARN_ON(1); | ||
1512 | } | ||
1513 | mutex_unlock(&fs_info->fs_devices->device_list_mutex); | ||
1514 | return 0; | ||
1515 | } | ||
1516 | |||
1485 | static const struct super_operations btrfs_super_ops = { | 1517 | static const struct super_operations btrfs_super_ops = { |
1486 | .drop_inode = btrfs_drop_inode, | 1518 | .drop_inode = btrfs_drop_inode, |
1487 | .evict_inode = btrfs_evict_inode, | 1519 | .evict_inode = btrfs_evict_inode, |
1488 | .put_super = btrfs_put_super, | 1520 | .put_super = btrfs_put_super, |
1489 | .sync_fs = btrfs_sync_fs, | 1521 | .sync_fs = btrfs_sync_fs, |
1490 | .show_options = btrfs_show_options, | 1522 | .show_options = btrfs_show_options, |
1523 | .show_devname = btrfs_show_devname, | ||
1491 | .write_inode = btrfs_write_inode, | 1524 | .write_inode = btrfs_write_inode, |
1492 | .dirty_inode = btrfs_fs_dirty_inode, | 1525 | .dirty_inode = btrfs_fs_dirty_inode, |
1493 | .alloc_inode = btrfs_alloc_inode, | 1526 | .alloc_inode = btrfs_alloc_inode, |
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c index 1791c6e3d834..b72b068183ec 100644 --- a/fs/btrfs/transaction.c +++ b/fs/btrfs/transaction.c | |||
@@ -100,6 +100,10 @@ loop: | |||
100 | kmem_cache_free(btrfs_transaction_cachep, cur_trans); | 100 | kmem_cache_free(btrfs_transaction_cachep, cur_trans); |
101 | cur_trans = fs_info->running_transaction; | 101 | cur_trans = fs_info->running_transaction; |
102 | goto loop; | 102 | goto loop; |
103 | } else if (root->fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR) { | ||
104 | spin_unlock(&root->fs_info->trans_lock); | ||
105 | kmem_cache_free(btrfs_transaction_cachep, cur_trans); | ||
106 | return -EROFS; | ||
103 | } | 107 | } |
104 | 108 | ||
105 | atomic_set(&cur_trans->num_writers, 1); | 109 | atomic_set(&cur_trans->num_writers, 1); |
@@ -1213,14 +1217,20 @@ int btrfs_commit_transaction_async(struct btrfs_trans_handle *trans, | |||
1213 | 1217 | ||
1214 | 1218 | ||
1215 | static void cleanup_transaction(struct btrfs_trans_handle *trans, | 1219 | static void cleanup_transaction(struct btrfs_trans_handle *trans, |
1216 | struct btrfs_root *root) | 1220 | struct btrfs_root *root, int err) |
1217 | { | 1221 | { |
1218 | struct btrfs_transaction *cur_trans = trans->transaction; | 1222 | struct btrfs_transaction *cur_trans = trans->transaction; |
1219 | 1223 | ||
1220 | WARN_ON(trans->use_count > 1); | 1224 | WARN_ON(trans->use_count > 1); |
1221 | 1225 | ||
1226 | btrfs_abort_transaction(trans, root, err); | ||
1227 | |||
1222 | spin_lock(&root->fs_info->trans_lock); | 1228 | spin_lock(&root->fs_info->trans_lock); |
1223 | list_del_init(&cur_trans->list); | 1229 | list_del_init(&cur_trans->list); |
1230 | if (cur_trans == root->fs_info->running_transaction) { | ||
1231 | root->fs_info->running_transaction = NULL; | ||
1232 | root->fs_info->trans_no_join = 0; | ||
1233 | } | ||
1224 | spin_unlock(&root->fs_info->trans_lock); | 1234 | spin_unlock(&root->fs_info->trans_lock); |
1225 | 1235 | ||
1226 | btrfs_cleanup_one_transaction(trans->transaction, root); | 1236 | btrfs_cleanup_one_transaction(trans->transaction, root); |
@@ -1526,7 +1536,7 @@ cleanup_transaction: | |||
1526 | // WARN_ON(1); | 1536 | // WARN_ON(1); |
1527 | if (current->journal_info == trans) | 1537 | if (current->journal_info == trans) |
1528 | current->journal_info = NULL; | 1538 | current->journal_info = NULL; |
1529 | cleanup_transaction(trans, root); | 1539 | cleanup_transaction(trans, root, ret); |
1530 | 1540 | ||
1531 | return ret; | 1541 | return ret; |
1532 | } | 1542 | } |
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index 7782020996fe..8a3d2594b807 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c | |||
@@ -35,6 +35,7 @@ | |||
35 | #include "volumes.h" | 35 | #include "volumes.h" |
36 | #include "async-thread.h" | 36 | #include "async-thread.h" |
37 | #include "check-integrity.h" | 37 | #include "check-integrity.h" |
38 | #include "rcu-string.h" | ||
38 | 39 | ||
39 | static int init_first_rw_device(struct btrfs_trans_handle *trans, | 40 | static int init_first_rw_device(struct btrfs_trans_handle *trans, |
40 | struct btrfs_root *root, | 41 | struct btrfs_root *root, |
@@ -64,7 +65,7 @@ static void free_fs_devices(struct btrfs_fs_devices *fs_devices) | |||
64 | device = list_entry(fs_devices->devices.next, | 65 | device = list_entry(fs_devices->devices.next, |
65 | struct btrfs_device, dev_list); | 66 | struct btrfs_device, dev_list); |
66 | list_del(&device->dev_list); | 67 | list_del(&device->dev_list); |
67 | kfree(device->name); | 68 | rcu_string_free(device->name); |
68 | kfree(device); | 69 | kfree(device); |
69 | } | 70 | } |
70 | kfree(fs_devices); | 71 | kfree(fs_devices); |
@@ -334,8 +335,8 @@ static noinline int device_list_add(const char *path, | |||
334 | { | 335 | { |
335 | struct btrfs_device *device; | 336 | struct btrfs_device *device; |
336 | struct btrfs_fs_devices *fs_devices; | 337 | struct btrfs_fs_devices *fs_devices; |
338 | struct rcu_string *name; | ||
337 | u64 found_transid = btrfs_super_generation(disk_super); | 339 | u64 found_transid = btrfs_super_generation(disk_super); |
338 | char *name; | ||
339 | 340 | ||
340 | fs_devices = find_fsid(disk_super->fsid); | 341 | fs_devices = find_fsid(disk_super->fsid); |
341 | if (!fs_devices) { | 342 | if (!fs_devices) { |
@@ -369,11 +370,13 @@ static noinline int device_list_add(const char *path, | |||
369 | memcpy(device->uuid, disk_super->dev_item.uuid, | 370 | memcpy(device->uuid, disk_super->dev_item.uuid, |
370 | BTRFS_UUID_SIZE); | 371 | BTRFS_UUID_SIZE); |
371 | spin_lock_init(&device->io_lock); | 372 | spin_lock_init(&device->io_lock); |
372 | device->name = kstrdup(path, GFP_NOFS); | 373 | |
373 | if (!device->name) { | 374 | name = rcu_string_strdup(path, GFP_NOFS); |
375 | if (!name) { | ||
374 | kfree(device); | 376 | kfree(device); |
375 | return -ENOMEM; | 377 | return -ENOMEM; |
376 | } | 378 | } |
379 | rcu_assign_pointer(device->name, name); | ||
377 | INIT_LIST_HEAD(&device->dev_alloc_list); | 380 | INIT_LIST_HEAD(&device->dev_alloc_list); |
378 | 381 | ||
379 | /* init readahead state */ | 382 | /* init readahead state */ |
@@ -390,12 +393,12 @@ static noinline int device_list_add(const char *path, | |||
390 | 393 | ||
391 | device->fs_devices = fs_devices; | 394 | device->fs_devices = fs_devices; |
392 | fs_devices->num_devices++; | 395 | fs_devices->num_devices++; |
393 | } else if (!device->name || strcmp(device->name, path)) { | 396 | } else if (!device->name || strcmp(device->name->str, path)) { |
394 | name = kstrdup(path, GFP_NOFS); | 397 | name = rcu_string_strdup(path, GFP_NOFS); |
395 | if (!name) | 398 | if (!name) |
396 | return -ENOMEM; | 399 | return -ENOMEM; |
397 | kfree(device->name); | 400 | rcu_string_free(device->name); |
398 | device->name = name; | 401 | rcu_assign_pointer(device->name, name); |
399 | if (device->missing) { | 402 | if (device->missing) { |
400 | fs_devices->missing_devices--; | 403 | fs_devices->missing_devices--; |
401 | device->missing = 0; | 404 | device->missing = 0; |
@@ -430,15 +433,22 @@ static struct btrfs_fs_devices *clone_fs_devices(struct btrfs_fs_devices *orig) | |||
430 | 433 | ||
431 | /* We have held the volume lock, it is safe to get the devices. */ | 434 | /* We have held the volume lock, it is safe to get the devices. */ |
432 | list_for_each_entry(orig_dev, &orig->devices, dev_list) { | 435 | list_for_each_entry(orig_dev, &orig->devices, dev_list) { |
436 | struct rcu_string *name; | ||
437 | |||
433 | device = kzalloc(sizeof(*device), GFP_NOFS); | 438 | device = kzalloc(sizeof(*device), GFP_NOFS); |
434 | if (!device) | 439 | if (!device) |
435 | goto error; | 440 | goto error; |
436 | 441 | ||
437 | device->name = kstrdup(orig_dev->name, GFP_NOFS); | 442 | /* |
438 | if (!device->name) { | 443 | * This is ok to do without rcu read locked because we hold the |
444 | * uuid mutex so nothing we touch in here is going to disappear. | ||
445 | */ | ||
446 | name = rcu_string_strdup(orig_dev->name->str, GFP_NOFS); | ||
447 | if (!name) { | ||
439 | kfree(device); | 448 | kfree(device); |
440 | goto error; | 449 | goto error; |
441 | } | 450 | } |
451 | rcu_assign_pointer(device->name, name); | ||
442 | 452 | ||
443 | device->devid = orig_dev->devid; | 453 | device->devid = orig_dev->devid; |
444 | device->work.func = pending_bios_fn; | 454 | device->work.func = pending_bios_fn; |
@@ -491,7 +501,7 @@ again: | |||
491 | } | 501 | } |
492 | list_del_init(&device->dev_list); | 502 | list_del_init(&device->dev_list); |
493 | fs_devices->num_devices--; | 503 | fs_devices->num_devices--; |
494 | kfree(device->name); | 504 | rcu_string_free(device->name); |
495 | kfree(device); | 505 | kfree(device); |
496 | } | 506 | } |
497 | 507 | ||
@@ -516,7 +526,7 @@ static void __free_device(struct work_struct *work) | |||
516 | if (device->bdev) | 526 | if (device->bdev) |
517 | blkdev_put(device->bdev, device->mode); | 527 | blkdev_put(device->bdev, device->mode); |
518 | 528 | ||
519 | kfree(device->name); | 529 | rcu_string_free(device->name); |
520 | kfree(device); | 530 | kfree(device); |
521 | } | 531 | } |
522 | 532 | ||
@@ -540,6 +550,7 @@ static int __btrfs_close_devices(struct btrfs_fs_devices *fs_devices) | |||
540 | mutex_lock(&fs_devices->device_list_mutex); | 550 | mutex_lock(&fs_devices->device_list_mutex); |
541 | list_for_each_entry(device, &fs_devices->devices, dev_list) { | 551 | list_for_each_entry(device, &fs_devices->devices, dev_list) { |
542 | struct btrfs_device *new_device; | 552 | struct btrfs_device *new_device; |
553 | struct rcu_string *name; | ||
543 | 554 | ||
544 | if (device->bdev) | 555 | if (device->bdev) |
545 | fs_devices->open_devices--; | 556 | fs_devices->open_devices--; |
@@ -555,8 +566,11 @@ static int __btrfs_close_devices(struct btrfs_fs_devices *fs_devices) | |||
555 | new_device = kmalloc(sizeof(*new_device), GFP_NOFS); | 566 | new_device = kmalloc(sizeof(*new_device), GFP_NOFS); |
556 | BUG_ON(!new_device); /* -ENOMEM */ | 567 | BUG_ON(!new_device); /* -ENOMEM */ |
557 | memcpy(new_device, device, sizeof(*new_device)); | 568 | memcpy(new_device, device, sizeof(*new_device)); |
558 | new_device->name = kstrdup(device->name, GFP_NOFS); | 569 | |
559 | BUG_ON(device->name && !new_device->name); /* -ENOMEM */ | 570 | /* Safe because we are under uuid_mutex */ |
571 | name = rcu_string_strdup(device->name->str, GFP_NOFS); | ||
572 | BUG_ON(device->name && !name); /* -ENOMEM */ | ||
573 | rcu_assign_pointer(new_device->name, name); | ||
560 | new_device->bdev = NULL; | 574 | new_device->bdev = NULL; |
561 | new_device->writeable = 0; | 575 | new_device->writeable = 0; |
562 | new_device->in_fs_metadata = 0; | 576 | new_device->in_fs_metadata = 0; |
@@ -621,9 +635,9 @@ static int __btrfs_open_devices(struct btrfs_fs_devices *fs_devices, | |||
621 | if (!device->name) | 635 | if (!device->name) |
622 | continue; | 636 | continue; |
623 | 637 | ||
624 | bdev = blkdev_get_by_path(device->name, flags, holder); | 638 | bdev = blkdev_get_by_path(device->name->str, flags, holder); |
625 | if (IS_ERR(bdev)) { | 639 | if (IS_ERR(bdev)) { |
626 | printk(KERN_INFO "open %s failed\n", device->name); | 640 | printk(KERN_INFO "open %s failed\n", device->name->str); |
627 | goto error; | 641 | goto error; |
628 | } | 642 | } |
629 | filemap_write_and_wait(bdev->bd_inode->i_mapping); | 643 | filemap_write_and_wait(bdev->bd_inode->i_mapping); |
@@ -1632,6 +1646,7 @@ int btrfs_init_new_device(struct btrfs_root *root, char *device_path) | |||
1632 | struct block_device *bdev; | 1646 | struct block_device *bdev; |
1633 | struct list_head *devices; | 1647 | struct list_head *devices; |
1634 | struct super_block *sb = root->fs_info->sb; | 1648 | struct super_block *sb = root->fs_info->sb; |
1649 | struct rcu_string *name; | ||
1635 | u64 total_bytes; | 1650 | u64 total_bytes; |
1636 | int seeding_dev = 0; | 1651 | int seeding_dev = 0; |
1637 | int ret = 0; | 1652 | int ret = 0; |
@@ -1671,23 +1686,24 @@ int btrfs_init_new_device(struct btrfs_root *root, char *device_path) | |||
1671 | goto error; | 1686 | goto error; |
1672 | } | 1687 | } |
1673 | 1688 | ||
1674 | device->name = kstrdup(device_path, GFP_NOFS); | 1689 | name = rcu_string_strdup(device_path, GFP_NOFS); |
1675 | if (!device->name) { | 1690 | if (!name) { |
1676 | kfree(device); | 1691 | kfree(device); |
1677 | ret = -ENOMEM; | 1692 | ret = -ENOMEM; |
1678 | goto error; | 1693 | goto error; |
1679 | } | 1694 | } |
1695 | rcu_assign_pointer(device->name, name); | ||
1680 | 1696 | ||
1681 | ret = find_next_devid(root, &device->devid); | 1697 | ret = find_next_devid(root, &device->devid); |
1682 | if (ret) { | 1698 | if (ret) { |
1683 | kfree(device->name); | 1699 | rcu_string_free(device->name); |
1684 | kfree(device); | 1700 | kfree(device); |
1685 | goto error; | 1701 | goto error; |
1686 | } | 1702 | } |
1687 | 1703 | ||
1688 | trans = btrfs_start_transaction(root, 0); | 1704 | trans = btrfs_start_transaction(root, 0); |
1689 | if (IS_ERR(trans)) { | 1705 | if (IS_ERR(trans)) { |
1690 | kfree(device->name); | 1706 | rcu_string_free(device->name); |
1691 | kfree(device); | 1707 | kfree(device); |
1692 | ret = PTR_ERR(trans); | 1708 | ret = PTR_ERR(trans); |
1693 | goto error; | 1709 | goto error; |
@@ -1796,7 +1812,7 @@ error_trans: | |||
1796 | unlock_chunks(root); | 1812 | unlock_chunks(root); |
1797 | btrfs_abort_transaction(trans, root, ret); | 1813 | btrfs_abort_transaction(trans, root, ret); |
1798 | btrfs_end_transaction(trans, root); | 1814 | btrfs_end_transaction(trans, root); |
1799 | kfree(device->name); | 1815 | rcu_string_free(device->name); |
1800 | kfree(device); | 1816 | kfree(device); |
1801 | error: | 1817 | error: |
1802 | blkdev_put(bdev, FMODE_EXCL); | 1818 | blkdev_put(bdev, FMODE_EXCL); |
@@ -4204,10 +4220,17 @@ int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio, | |||
4204 | bio->bi_sector = bbio->stripes[dev_nr].physical >> 9; | 4220 | bio->bi_sector = bbio->stripes[dev_nr].physical >> 9; |
4205 | dev = bbio->stripes[dev_nr].dev; | 4221 | dev = bbio->stripes[dev_nr].dev; |
4206 | if (dev && dev->bdev && (rw != WRITE || dev->writeable)) { | 4222 | if (dev && dev->bdev && (rw != WRITE || dev->writeable)) { |
4223 | #ifdef DEBUG | ||
4224 | struct rcu_string *name; | ||
4225 | |||
4226 | rcu_read_lock(); | ||
4227 | name = rcu_dereference(dev->name); | ||
4207 | pr_debug("btrfs_map_bio: rw %d, secor=%llu, dev=%lu " | 4228 | pr_debug("btrfs_map_bio: rw %d, secor=%llu, dev=%lu " |
4208 | "(%s id %llu), size=%u\n", rw, | 4229 | "(%s id %llu), size=%u\n", rw, |
4209 | (u64)bio->bi_sector, (u_long)dev->bdev->bd_dev, | 4230 | (u64)bio->bi_sector, (u_long)dev->bdev->bd_dev, |
4210 | dev->name, dev->devid, bio->bi_size); | 4231 | name->str, dev->devid, bio->bi_size); |
4232 | rcu_read_unlock(); | ||
4233 | #endif | ||
4211 | bio->bi_bdev = dev->bdev; | 4234 | bio->bi_bdev = dev->bdev; |
4212 | if (async_submit) | 4235 | if (async_submit) |
4213 | schedule_bio(root, dev, rw, bio); | 4236 | schedule_bio(root, dev, rw, bio); |
@@ -4694,8 +4717,9 @@ int btrfs_init_dev_stats(struct btrfs_fs_info *fs_info) | |||
4694 | key.offset = device->devid; | 4717 | key.offset = device->devid; |
4695 | ret = btrfs_search_slot(NULL, dev_root, &key, path, 0, 0); | 4718 | ret = btrfs_search_slot(NULL, dev_root, &key, path, 0, 0); |
4696 | if (ret) { | 4719 | if (ret) { |
4697 | printk(KERN_WARNING "btrfs: no dev_stats entry found for device %s (devid %llu) (OK on first mount after mkfs)\n", | 4720 | printk_in_rcu(KERN_WARNING "btrfs: no dev_stats entry found for device %s (devid %llu) (OK on first mount after mkfs)\n", |
4698 | device->name, (unsigned long long)device->devid); | 4721 | rcu_str_deref(device->name), |
4722 | (unsigned long long)device->devid); | ||
4699 | __btrfs_reset_dev_stats(device); | 4723 | __btrfs_reset_dev_stats(device); |
4700 | device->dev_stats_valid = 1; | 4724 | device->dev_stats_valid = 1; |
4701 | btrfs_release_path(path); | 4725 | btrfs_release_path(path); |
@@ -4747,8 +4771,8 @@ static int update_dev_stat_item(struct btrfs_trans_handle *trans, | |||
4747 | BUG_ON(!path); | 4771 | BUG_ON(!path); |
4748 | ret = btrfs_search_slot(trans, dev_root, &key, path, -1, 1); | 4772 | ret = btrfs_search_slot(trans, dev_root, &key, path, -1, 1); |
4749 | if (ret < 0) { | 4773 | if (ret < 0) { |
4750 | printk(KERN_WARNING "btrfs: error %d while searching for dev_stats item for device %s!\n", | 4774 | printk_in_rcu(KERN_WARNING "btrfs: error %d while searching for dev_stats item for device %s!\n", |
4751 | ret, device->name); | 4775 | ret, rcu_str_deref(device->name)); |
4752 | goto out; | 4776 | goto out; |
4753 | } | 4777 | } |
4754 | 4778 | ||
@@ -4757,8 +4781,8 @@ static int update_dev_stat_item(struct btrfs_trans_handle *trans, | |||
4757 | /* need to delete old one and insert a new one */ | 4781 | /* need to delete old one and insert a new one */ |
4758 | ret = btrfs_del_item(trans, dev_root, path); | 4782 | ret = btrfs_del_item(trans, dev_root, path); |
4759 | if (ret != 0) { | 4783 | if (ret != 0) { |
4760 | printk(KERN_WARNING "btrfs: delete too small dev_stats item for device %s failed %d!\n", | 4784 | printk_in_rcu(KERN_WARNING "btrfs: delete too small dev_stats item for device %s failed %d!\n", |
4761 | device->name, ret); | 4785 | rcu_str_deref(device->name), ret); |
4762 | goto out; | 4786 | goto out; |
4763 | } | 4787 | } |
4764 | ret = 1; | 4788 | ret = 1; |
@@ -4770,8 +4794,8 @@ static int update_dev_stat_item(struct btrfs_trans_handle *trans, | |||
4770 | ret = btrfs_insert_empty_item(trans, dev_root, path, | 4794 | ret = btrfs_insert_empty_item(trans, dev_root, path, |
4771 | &key, sizeof(*ptr)); | 4795 | &key, sizeof(*ptr)); |
4772 | if (ret < 0) { | 4796 | if (ret < 0) { |
4773 | printk(KERN_WARNING "btrfs: insert dev_stats item for device %s failed %d!\n", | 4797 | printk_in_rcu(KERN_WARNING "btrfs: insert dev_stats item for device %s failed %d!\n", |
4774 | device->name, ret); | 4798 | rcu_str_deref(device->name), ret); |
4775 | goto out; | 4799 | goto out; |
4776 | } | 4800 | } |
4777 | } | 4801 | } |
@@ -4823,9 +4847,9 @@ void btrfs_dev_stat_print_on_error(struct btrfs_device *dev) | |||
4823 | { | 4847 | { |
4824 | if (!dev->dev_stats_valid) | 4848 | if (!dev->dev_stats_valid) |
4825 | return; | 4849 | return; |
4826 | printk_ratelimited(KERN_ERR | 4850 | printk_ratelimited_in_rcu(KERN_ERR |
4827 | "btrfs: bdev %s errs: wr %u, rd %u, flush %u, corrupt %u, gen %u\n", | 4851 | "btrfs: bdev %s errs: wr %u, rd %u, flush %u, corrupt %u, gen %u\n", |
4828 | dev->name, | 4852 | rcu_str_deref(dev->name), |
4829 | btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_WRITE_ERRS), | 4853 | btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_WRITE_ERRS), |
4830 | btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_READ_ERRS), | 4854 | btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_READ_ERRS), |
4831 | btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_FLUSH_ERRS), | 4855 | btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_FLUSH_ERRS), |
@@ -4837,8 +4861,8 @@ void btrfs_dev_stat_print_on_error(struct btrfs_device *dev) | |||
4837 | 4861 | ||
4838 | static void btrfs_dev_stat_print_on_load(struct btrfs_device *dev) | 4862 | static void btrfs_dev_stat_print_on_load(struct btrfs_device *dev) |
4839 | { | 4863 | { |
4840 | printk(KERN_INFO "btrfs: bdev %s errs: wr %u, rd %u, flush %u, corrupt %u, gen %u\n", | 4864 | printk_in_rcu(KERN_INFO "btrfs: bdev %s errs: wr %u, rd %u, flush %u, corrupt %u, gen %u\n", |
4841 | dev->name, | 4865 | rcu_str_deref(dev->name), |
4842 | btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_WRITE_ERRS), | 4866 | btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_WRITE_ERRS), |
4843 | btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_READ_ERRS), | 4867 | btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_READ_ERRS), |
4844 | btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_FLUSH_ERRS), | 4868 | btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_FLUSH_ERRS), |
diff --git a/fs/btrfs/volumes.h b/fs/btrfs/volumes.h index 3406a88ca83e..74366f27a76b 100644 --- a/fs/btrfs/volumes.h +++ b/fs/btrfs/volumes.h | |||
@@ -58,7 +58,7 @@ struct btrfs_device { | |||
58 | /* the mode sent to blkdev_get */ | 58 | /* the mode sent to blkdev_get */ |
59 | fmode_t mode; | 59 | fmode_t mode; |
60 | 60 | ||
61 | char *name; | 61 | struct rcu_string *name; |
62 | 62 | ||
63 | /* the internal btrfs device id */ | 63 | /* the internal btrfs device id */ |
64 | u64 devid; | 64 | u64 devid; |
diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c index 173b1d22e59b..8b67304e4b80 100644 --- a/fs/ceph/addr.c +++ b/fs/ceph/addr.c | |||
@@ -54,7 +54,12 @@ | |||
54 | (CONGESTION_ON_THRESH(congestion_kb) - \ | 54 | (CONGESTION_ON_THRESH(congestion_kb) - \ |
55 | (CONGESTION_ON_THRESH(congestion_kb) >> 2)) | 55 | (CONGESTION_ON_THRESH(congestion_kb) >> 2)) |
56 | 56 | ||
57 | 57 | static inline struct ceph_snap_context *page_snap_context(struct page *page) | |
58 | { | ||
59 | if (PagePrivate(page)) | ||
60 | return (void *)page->private; | ||
61 | return NULL; | ||
62 | } | ||
58 | 63 | ||
59 | /* | 64 | /* |
60 | * Dirty a page. Optimistically adjust accounting, on the assumption | 65 | * Dirty a page. Optimistically adjust accounting, on the assumption |
@@ -142,10 +147,9 @@ static void ceph_invalidatepage(struct page *page, unsigned long offset) | |||
142 | { | 147 | { |
143 | struct inode *inode; | 148 | struct inode *inode; |
144 | struct ceph_inode_info *ci; | 149 | struct ceph_inode_info *ci; |
145 | struct ceph_snap_context *snapc = (void *)page->private; | 150 | struct ceph_snap_context *snapc = page_snap_context(page); |
146 | 151 | ||
147 | BUG_ON(!PageLocked(page)); | 152 | BUG_ON(!PageLocked(page)); |
148 | BUG_ON(!page->private); | ||
149 | BUG_ON(!PagePrivate(page)); | 153 | BUG_ON(!PagePrivate(page)); |
150 | BUG_ON(!page->mapping); | 154 | BUG_ON(!page->mapping); |
151 | 155 | ||
@@ -182,7 +186,6 @@ static int ceph_releasepage(struct page *page, gfp_t g) | |||
182 | struct inode *inode = page->mapping ? page->mapping->host : NULL; | 186 | struct inode *inode = page->mapping ? page->mapping->host : NULL; |
183 | dout("%p releasepage %p idx %lu\n", inode, page, page->index); | 187 | dout("%p releasepage %p idx %lu\n", inode, page, page->index); |
184 | WARN_ON(PageDirty(page)); | 188 | WARN_ON(PageDirty(page)); |
185 | WARN_ON(page->private); | ||
186 | WARN_ON(PagePrivate(page)); | 189 | WARN_ON(PagePrivate(page)); |
187 | return 0; | 190 | return 0; |
188 | } | 191 | } |
@@ -443,7 +446,7 @@ static int writepage_nounlock(struct page *page, struct writeback_control *wbc) | |||
443 | osdc = &fsc->client->osdc; | 446 | osdc = &fsc->client->osdc; |
444 | 447 | ||
445 | /* verify this is a writeable snap context */ | 448 | /* verify this is a writeable snap context */ |
446 | snapc = (void *)page->private; | 449 | snapc = page_snap_context(page); |
447 | if (snapc == NULL) { | 450 | if (snapc == NULL) { |
448 | dout("writepage %p page %p not dirty?\n", inode, page); | 451 | dout("writepage %p page %p not dirty?\n", inode, page); |
449 | goto out; | 452 | goto out; |
@@ -451,7 +454,7 @@ static int writepage_nounlock(struct page *page, struct writeback_control *wbc) | |||
451 | oldest = get_oldest_context(inode, &snap_size); | 454 | oldest = get_oldest_context(inode, &snap_size); |
452 | if (snapc->seq > oldest->seq) { | 455 | if (snapc->seq > oldest->seq) { |
453 | dout("writepage %p page %p snapc %p not writeable - noop\n", | 456 | dout("writepage %p page %p snapc %p not writeable - noop\n", |
454 | inode, page, (void *)page->private); | 457 | inode, page, snapc); |
455 | /* we should only noop if called by kswapd */ | 458 | /* we should only noop if called by kswapd */ |
456 | WARN_ON((current->flags & PF_MEMALLOC) == 0); | 459 | WARN_ON((current->flags & PF_MEMALLOC) == 0); |
457 | ceph_put_snap_context(oldest); | 460 | ceph_put_snap_context(oldest); |
@@ -591,7 +594,7 @@ static void writepages_finish(struct ceph_osd_request *req, | |||
591 | clear_bdi_congested(&fsc->backing_dev_info, | 594 | clear_bdi_congested(&fsc->backing_dev_info, |
592 | BLK_RW_ASYNC); | 595 | BLK_RW_ASYNC); |
593 | 596 | ||
594 | ceph_put_snap_context((void *)page->private); | 597 | ceph_put_snap_context(page_snap_context(page)); |
595 | page->private = 0; | 598 | page->private = 0; |
596 | ClearPagePrivate(page); | 599 | ClearPagePrivate(page); |
597 | dout("unlocking %d %p\n", i, page); | 600 | dout("unlocking %d %p\n", i, page); |
@@ -795,7 +798,7 @@ get_more_pages: | |||
795 | } | 798 | } |
796 | 799 | ||
797 | /* only if matching snap context */ | 800 | /* only if matching snap context */ |
798 | pgsnapc = (void *)page->private; | 801 | pgsnapc = page_snap_context(page); |
799 | if (pgsnapc->seq > snapc->seq) { | 802 | if (pgsnapc->seq > snapc->seq) { |
800 | dout("page snapc %p %lld > oldest %p %lld\n", | 803 | dout("page snapc %p %lld > oldest %p %lld\n", |
801 | pgsnapc, pgsnapc->seq, snapc, snapc->seq); | 804 | pgsnapc, pgsnapc->seq, snapc, snapc->seq); |
@@ -984,7 +987,7 @@ retry_locked: | |||
984 | BUG_ON(!ci->i_snap_realm); | 987 | BUG_ON(!ci->i_snap_realm); |
985 | down_read(&mdsc->snap_rwsem); | 988 | down_read(&mdsc->snap_rwsem); |
986 | BUG_ON(!ci->i_snap_realm->cached_context); | 989 | BUG_ON(!ci->i_snap_realm->cached_context); |
987 | snapc = (void *)page->private; | 990 | snapc = page_snap_context(page); |
988 | if (snapc && snapc != ci->i_head_snapc) { | 991 | if (snapc && snapc != ci->i_head_snapc) { |
989 | /* | 992 | /* |
990 | * this page is already dirty in another (older) snap | 993 | * this page is already dirty in another (older) snap |
diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h index 20350a93ed99..6df0cbe1cbc9 100644 --- a/fs/cifs/cifsglob.h +++ b/fs/cifs/cifsglob.h | |||
@@ -174,6 +174,7 @@ struct smb_version_operations { | |||
174 | void (*add_credits)(struct TCP_Server_Info *, const unsigned int); | 174 | void (*add_credits)(struct TCP_Server_Info *, const unsigned int); |
175 | void (*set_credits)(struct TCP_Server_Info *, const int); | 175 | void (*set_credits)(struct TCP_Server_Info *, const int); |
176 | int * (*get_credits_field)(struct TCP_Server_Info *); | 176 | int * (*get_credits_field)(struct TCP_Server_Info *); |
177 | __u64 (*get_next_mid)(struct TCP_Server_Info *); | ||
177 | /* data offset from read response message */ | 178 | /* data offset from read response message */ |
178 | unsigned int (*read_data_offset)(char *); | 179 | unsigned int (*read_data_offset)(char *); |
179 | /* data length from read response message */ | 180 | /* data length from read response message */ |
@@ -399,6 +400,12 @@ set_credits(struct TCP_Server_Info *server, const int val) | |||
399 | server->ops->set_credits(server, val); | 400 | server->ops->set_credits(server, val); |
400 | } | 401 | } |
401 | 402 | ||
403 | static inline __u64 | ||
404 | get_next_mid(struct TCP_Server_Info *server) | ||
405 | { | ||
406 | return server->ops->get_next_mid(server); | ||
407 | } | ||
408 | |||
402 | /* | 409 | /* |
403 | * Macros to allow the TCP_Server_Info->net field and related code to drop out | 410 | * Macros to allow the TCP_Server_Info->net field and related code to drop out |
404 | * when CONFIG_NET_NS isn't set. | 411 | * when CONFIG_NET_NS isn't set. |
diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h index 5ec21ecf7980..0a6cbfe2761e 100644 --- a/fs/cifs/cifsproto.h +++ b/fs/cifs/cifsproto.h | |||
@@ -114,7 +114,6 @@ extern int small_smb_init_no_tc(const int smb_cmd, const int wct, | |||
114 | void **request_buf); | 114 | void **request_buf); |
115 | extern int CIFS_SessSetup(unsigned int xid, struct cifs_ses *ses, | 115 | extern int CIFS_SessSetup(unsigned int xid, struct cifs_ses *ses, |
116 | const struct nls_table *nls_cp); | 116 | const struct nls_table *nls_cp); |
117 | extern __u64 GetNextMid(struct TCP_Server_Info *server); | ||
118 | extern struct timespec cifs_NTtimeToUnix(__le64 utc_nanoseconds_since_1601); | 117 | extern struct timespec cifs_NTtimeToUnix(__le64 utc_nanoseconds_since_1601); |
119 | extern u64 cifs_UnixTimeToNT(struct timespec); | 118 | extern u64 cifs_UnixTimeToNT(struct timespec); |
120 | extern struct timespec cnvrtDosUnixTm(__le16 le_date, __le16 le_time, | 119 | extern struct timespec cnvrtDosUnixTm(__le16 le_date, __le16 le_time, |
diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c index b5ad716b2642..5b400730c213 100644 --- a/fs/cifs/cifssmb.c +++ b/fs/cifs/cifssmb.c | |||
@@ -268,7 +268,7 @@ small_smb_init_no_tc(const int smb_command, const int wct, | |||
268 | return rc; | 268 | return rc; |
269 | 269 | ||
270 | buffer = (struct smb_hdr *)*request_buf; | 270 | buffer = (struct smb_hdr *)*request_buf; |
271 | buffer->Mid = GetNextMid(ses->server); | 271 | buffer->Mid = get_next_mid(ses->server); |
272 | if (ses->capabilities & CAP_UNICODE) | 272 | if (ses->capabilities & CAP_UNICODE) |
273 | buffer->Flags2 |= SMBFLG2_UNICODE; | 273 | buffer->Flags2 |= SMBFLG2_UNICODE; |
274 | if (ses->capabilities & CAP_STATUS32) | 274 | if (ses->capabilities & CAP_STATUS32) |
@@ -402,7 +402,7 @@ CIFSSMBNegotiate(unsigned int xid, struct cifs_ses *ses) | |||
402 | 402 | ||
403 | cFYI(1, "secFlags 0x%x", secFlags); | 403 | cFYI(1, "secFlags 0x%x", secFlags); |
404 | 404 | ||
405 | pSMB->hdr.Mid = GetNextMid(server); | 405 | pSMB->hdr.Mid = get_next_mid(server); |
406 | pSMB->hdr.Flags2 |= (SMBFLG2_UNICODE | SMBFLG2_ERR_STATUS); | 406 | pSMB->hdr.Flags2 |= (SMBFLG2_UNICODE | SMBFLG2_ERR_STATUS); |
407 | 407 | ||
408 | if ((secFlags & CIFSSEC_MUST_KRB5) == CIFSSEC_MUST_KRB5) | 408 | if ((secFlags & CIFSSEC_MUST_KRB5) == CIFSSEC_MUST_KRB5) |
@@ -782,7 +782,7 @@ CIFSSMBLogoff(const int xid, struct cifs_ses *ses) | |||
782 | return rc; | 782 | return rc; |
783 | } | 783 | } |
784 | 784 | ||
785 | pSMB->hdr.Mid = GetNextMid(ses->server); | 785 | pSMB->hdr.Mid = get_next_mid(ses->server); |
786 | 786 | ||
787 | if (ses->server->sec_mode & | 787 | if (ses->server->sec_mode & |
788 | (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED)) | 788 | (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED)) |
@@ -4762,7 +4762,7 @@ getDFSRetry: | |||
4762 | 4762 | ||
4763 | /* server pointer checked in called function, | 4763 | /* server pointer checked in called function, |
4764 | but should never be null here anyway */ | 4764 | but should never be null here anyway */ |
4765 | pSMB->hdr.Mid = GetNextMid(ses->server); | 4765 | pSMB->hdr.Mid = get_next_mid(ses->server); |
4766 | pSMB->hdr.Tid = ses->ipc_tid; | 4766 | pSMB->hdr.Tid = ses->ipc_tid; |
4767 | pSMB->hdr.Uid = ses->Suid; | 4767 | pSMB->hdr.Uid = ses->Suid; |
4768 | if (ses->capabilities & CAP_STATUS32) | 4768 | if (ses->capabilities & CAP_STATUS32) |
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c index ccafdedd0dbc..78db68a5cf44 100644 --- a/fs/cifs/connect.c +++ b/fs/cifs/connect.c | |||
@@ -1058,13 +1058,15 @@ cifs_demultiplex_thread(void *p) | |||
1058 | if (mid_entry != NULL) { | 1058 | if (mid_entry != NULL) { |
1059 | if (!mid_entry->multiRsp || mid_entry->multiEnd) | 1059 | if (!mid_entry->multiRsp || mid_entry->multiEnd) |
1060 | mid_entry->callback(mid_entry); | 1060 | mid_entry->callback(mid_entry); |
1061 | } else if (!server->ops->is_oplock_break(buf, server)) { | 1061 | } else if (!server->ops->is_oplock_break || |
1062 | !server->ops->is_oplock_break(buf, server)) { | ||
1062 | cERROR(1, "No task to wake, unknown frame received! " | 1063 | cERROR(1, "No task to wake, unknown frame received! " |
1063 | "NumMids %d", atomic_read(&midCount)); | 1064 | "NumMids %d", atomic_read(&midCount)); |
1064 | cifs_dump_mem("Received Data is: ", buf, | 1065 | cifs_dump_mem("Received Data is: ", buf, |
1065 | HEADER_SIZE(server)); | 1066 | HEADER_SIZE(server)); |
1066 | #ifdef CONFIG_CIFS_DEBUG2 | 1067 | #ifdef CONFIG_CIFS_DEBUG2 |
1067 | server->ops->dump_detail(buf); | 1068 | if (server->ops->dump_detail) |
1069 | server->ops->dump_detail(buf); | ||
1068 | cifs_dump_mids(server); | 1070 | cifs_dump_mids(server); |
1069 | #endif /* CIFS_DEBUG2 */ | 1071 | #endif /* CIFS_DEBUG2 */ |
1070 | 1072 | ||
@@ -3938,7 +3940,7 @@ CIFSTCon(unsigned int xid, struct cifs_ses *ses, | |||
3938 | header_assemble(smb_buffer, SMB_COM_TREE_CONNECT_ANDX, | 3940 | header_assemble(smb_buffer, SMB_COM_TREE_CONNECT_ANDX, |
3939 | NULL /*no tid */ , 4 /*wct */ ); | 3941 | NULL /*no tid */ , 4 /*wct */ ); |
3940 | 3942 | ||
3941 | smb_buffer->Mid = GetNextMid(ses->server); | 3943 | smb_buffer->Mid = get_next_mid(ses->server); |
3942 | smb_buffer->Uid = ses->Suid; | 3944 | smb_buffer->Uid = ses->Suid; |
3943 | pSMB = (TCONX_REQ *) smb_buffer; | 3945 | pSMB = (TCONX_REQ *) smb_buffer; |
3944 | pSMBr = (TCONX_RSP *) smb_buffer_response; | 3946 | pSMBr = (TCONX_RSP *) smb_buffer_response; |
diff --git a/fs/cifs/file.c b/fs/cifs/file.c index 253170dfa716..513adbc211d7 100644 --- a/fs/cifs/file.c +++ b/fs/cifs/file.c | |||
@@ -876,7 +876,7 @@ cifs_push_mandatory_locks(struct cifsFileInfo *cfile) | |||
876 | struct cifsLockInfo *li, *tmp; | 876 | struct cifsLockInfo *li, *tmp; |
877 | struct cifs_tcon *tcon; | 877 | struct cifs_tcon *tcon; |
878 | struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode); | 878 | struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode); |
879 | unsigned int num, max_num; | 879 | unsigned int num, max_num, max_buf; |
880 | LOCKING_ANDX_RANGE *buf, *cur; | 880 | LOCKING_ANDX_RANGE *buf, *cur; |
881 | int types[] = {LOCKING_ANDX_LARGE_FILES, | 881 | int types[] = {LOCKING_ANDX_LARGE_FILES, |
882 | LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES}; | 882 | LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES}; |
@@ -892,8 +892,19 @@ cifs_push_mandatory_locks(struct cifsFileInfo *cfile) | |||
892 | return rc; | 892 | return rc; |
893 | } | 893 | } |
894 | 894 | ||
895 | max_num = (tcon->ses->server->maxBuf - sizeof(struct smb_hdr)) / | 895 | /* |
896 | sizeof(LOCKING_ANDX_RANGE); | 896 | * Accessing maxBuf is racy with cifs_reconnect - need to store value |
897 | * and check it for zero before using. | ||
898 | */ | ||
899 | max_buf = tcon->ses->server->maxBuf; | ||
900 | if (!max_buf) { | ||
901 | mutex_unlock(&cinode->lock_mutex); | ||
902 | FreeXid(xid); | ||
903 | return -EINVAL; | ||
904 | } | ||
905 | |||
906 | max_num = (max_buf - sizeof(struct smb_hdr)) / | ||
907 | sizeof(LOCKING_ANDX_RANGE); | ||
897 | buf = kzalloc(max_num * sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL); | 908 | buf = kzalloc(max_num * sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL); |
898 | if (!buf) { | 909 | if (!buf) { |
899 | mutex_unlock(&cinode->lock_mutex); | 910 | mutex_unlock(&cinode->lock_mutex); |
@@ -1218,7 +1229,7 @@ cifs_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock, int xid) | |||
1218 | int types[] = {LOCKING_ANDX_LARGE_FILES, | 1229 | int types[] = {LOCKING_ANDX_LARGE_FILES, |
1219 | LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES}; | 1230 | LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES}; |
1220 | unsigned int i; | 1231 | unsigned int i; |
1221 | unsigned int max_num, num; | 1232 | unsigned int max_num, num, max_buf; |
1222 | LOCKING_ANDX_RANGE *buf, *cur; | 1233 | LOCKING_ANDX_RANGE *buf, *cur; |
1223 | struct cifs_tcon *tcon = tlink_tcon(cfile->tlink); | 1234 | struct cifs_tcon *tcon = tlink_tcon(cfile->tlink); |
1224 | struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode); | 1235 | struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode); |
@@ -1228,8 +1239,16 @@ cifs_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock, int xid) | |||
1228 | 1239 | ||
1229 | INIT_LIST_HEAD(&tmp_llist); | 1240 | INIT_LIST_HEAD(&tmp_llist); |
1230 | 1241 | ||
1231 | max_num = (tcon->ses->server->maxBuf - sizeof(struct smb_hdr)) / | 1242 | /* |
1232 | sizeof(LOCKING_ANDX_RANGE); | 1243 | * Accessing maxBuf is racy with cifs_reconnect - need to store value |
1244 | * and check it for zero before using. | ||
1245 | */ | ||
1246 | max_buf = tcon->ses->server->maxBuf; | ||
1247 | if (!max_buf) | ||
1248 | return -EINVAL; | ||
1249 | |||
1250 | max_num = (max_buf - sizeof(struct smb_hdr)) / | ||
1251 | sizeof(LOCKING_ANDX_RANGE); | ||
1233 | buf = kzalloc(max_num * sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL); | 1252 | buf = kzalloc(max_num * sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL); |
1234 | if (!buf) | 1253 | if (!buf) |
1235 | return -ENOMEM; | 1254 | return -ENOMEM; |
@@ -1247,46 +1266,7 @@ cifs_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock, int xid) | |||
1247 | continue; | 1266 | continue; |
1248 | if (types[i] != li->type) | 1267 | if (types[i] != li->type) |
1249 | continue; | 1268 | continue; |
1250 | if (!cinode->can_cache_brlcks) { | 1269 | if (cinode->can_cache_brlcks) { |
1251 | cur->Pid = cpu_to_le16(li->pid); | ||
1252 | cur->LengthLow = cpu_to_le32((u32)li->length); | ||
1253 | cur->LengthHigh = | ||
1254 | cpu_to_le32((u32)(li->length>>32)); | ||
1255 | cur->OffsetLow = cpu_to_le32((u32)li->offset); | ||
1256 | cur->OffsetHigh = | ||
1257 | cpu_to_le32((u32)(li->offset>>32)); | ||
1258 | /* | ||
1259 | * We need to save a lock here to let us add | ||
1260 | * it again to the file's list if the unlock | ||
1261 | * range request fails on the server. | ||
1262 | */ | ||
1263 | list_move(&li->llist, &tmp_llist); | ||
1264 | if (++num == max_num) { | ||
1265 | stored_rc = cifs_lockv(xid, tcon, | ||
1266 | cfile->netfid, | ||
1267 | li->type, num, | ||
1268 | 0, buf); | ||
1269 | if (stored_rc) { | ||
1270 | /* | ||
1271 | * We failed on the unlock range | ||
1272 | * request - add all locks from | ||
1273 | * the tmp list to the head of | ||
1274 | * the file's list. | ||
1275 | */ | ||
1276 | cifs_move_llist(&tmp_llist, | ||
1277 | &cfile->llist); | ||
1278 | rc = stored_rc; | ||
1279 | } else | ||
1280 | /* | ||
1281 | * The unlock range request | ||
1282 | * succeed - free the tmp list. | ||
1283 | */ | ||
1284 | cifs_free_llist(&tmp_llist); | ||
1285 | cur = buf; | ||
1286 | num = 0; | ||
1287 | } else | ||
1288 | cur++; | ||
1289 | } else { | ||
1290 | /* | 1270 | /* |
1291 | * We can cache brlock requests - simply remove | 1271 | * We can cache brlock requests - simply remove |
1292 | * a lock from the file's list. | 1272 | * a lock from the file's list. |
@@ -1294,7 +1274,41 @@ cifs_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock, int xid) | |||
1294 | list_del(&li->llist); | 1274 | list_del(&li->llist); |
1295 | cifs_del_lock_waiters(li); | 1275 | cifs_del_lock_waiters(li); |
1296 | kfree(li); | 1276 | kfree(li); |
1277 | continue; | ||
1297 | } | 1278 | } |
1279 | cur->Pid = cpu_to_le16(li->pid); | ||
1280 | cur->LengthLow = cpu_to_le32((u32)li->length); | ||
1281 | cur->LengthHigh = cpu_to_le32((u32)(li->length>>32)); | ||
1282 | cur->OffsetLow = cpu_to_le32((u32)li->offset); | ||
1283 | cur->OffsetHigh = cpu_to_le32((u32)(li->offset>>32)); | ||
1284 | /* | ||
1285 | * We need to save a lock here to let us add it again to | ||
1286 | * the file's list if the unlock range request fails on | ||
1287 | * the server. | ||
1288 | */ | ||
1289 | list_move(&li->llist, &tmp_llist); | ||
1290 | if (++num == max_num) { | ||
1291 | stored_rc = cifs_lockv(xid, tcon, cfile->netfid, | ||
1292 | li->type, num, 0, buf); | ||
1293 | if (stored_rc) { | ||
1294 | /* | ||
1295 | * We failed on the unlock range | ||
1296 | * request - add all locks from the tmp | ||
1297 | * list to the head of the file's list. | ||
1298 | */ | ||
1299 | cifs_move_llist(&tmp_llist, | ||
1300 | &cfile->llist); | ||
1301 | rc = stored_rc; | ||
1302 | } else | ||
1303 | /* | ||
1304 | * The unlock range request succeed - | ||
1305 | * free the tmp list. | ||
1306 | */ | ||
1307 | cifs_free_llist(&tmp_llist); | ||
1308 | cur = buf; | ||
1309 | num = 0; | ||
1310 | } else | ||
1311 | cur++; | ||
1298 | } | 1312 | } |
1299 | if (num) { | 1313 | if (num) { |
1300 | stored_rc = cifs_lockv(xid, tcon, cfile->netfid, | 1314 | stored_rc = cifs_lockv(xid, tcon, cfile->netfid, |
diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c index e2552d2b2e42..557506ae1e2a 100644 --- a/fs/cifs/misc.c +++ b/fs/cifs/misc.c | |||
@@ -212,93 +212,6 @@ cifs_small_buf_release(void *buf_to_free) | |||
212 | return; | 212 | return; |
213 | } | 213 | } |
214 | 214 | ||
215 | /* | ||
216 | * Find a free multiplex id (SMB mid). Otherwise there could be | ||
217 | * mid collisions which might cause problems, demultiplexing the | ||
218 | * wrong response to this request. Multiplex ids could collide if | ||
219 | * one of a series requests takes much longer than the others, or | ||
220 | * if a very large number of long lived requests (byte range | ||
221 | * locks or FindNotify requests) are pending. No more than | ||
222 | * 64K-1 requests can be outstanding at one time. If no | ||
223 | * mids are available, return zero. A future optimization | ||
224 | * could make the combination of mids and uid the key we use | ||
225 | * to demultiplex on (rather than mid alone). | ||
226 | * In addition to the above check, the cifs demultiplex | ||
227 | * code already used the command code as a secondary | ||
228 | * check of the frame and if signing is negotiated the | ||
229 | * response would be discarded if the mid were the same | ||
230 | * but the signature was wrong. Since the mid is not put in the | ||
231 | * pending queue until later (when it is about to be dispatched) | ||
232 | * we do have to limit the number of outstanding requests | ||
233 | * to somewhat less than 64K-1 although it is hard to imagine | ||
234 | * so many threads being in the vfs at one time. | ||
235 | */ | ||
236 | __u64 GetNextMid(struct TCP_Server_Info *server) | ||
237 | { | ||
238 | __u64 mid = 0; | ||
239 | __u16 last_mid, cur_mid; | ||
240 | bool collision; | ||
241 | |||
242 | spin_lock(&GlobalMid_Lock); | ||
243 | |||
244 | /* mid is 16 bit only for CIFS/SMB */ | ||
245 | cur_mid = (__u16)((server->CurrentMid) & 0xffff); | ||
246 | /* we do not want to loop forever */ | ||
247 | last_mid = cur_mid; | ||
248 | cur_mid++; | ||
249 | |||
250 | /* | ||
251 | * This nested loop looks more expensive than it is. | ||
252 | * In practice the list of pending requests is short, | ||
253 | * fewer than 50, and the mids are likely to be unique | ||
254 | * on the first pass through the loop unless some request | ||
255 | * takes longer than the 64 thousand requests before it | ||
256 | * (and it would also have to have been a request that | ||
257 | * did not time out). | ||
258 | */ | ||
259 | while (cur_mid != last_mid) { | ||
260 | struct mid_q_entry *mid_entry; | ||
261 | unsigned int num_mids; | ||
262 | |||
263 | collision = false; | ||
264 | if (cur_mid == 0) | ||
265 | cur_mid++; | ||
266 | |||
267 | num_mids = 0; | ||
268 | list_for_each_entry(mid_entry, &server->pending_mid_q, qhead) { | ||
269 | ++num_mids; | ||
270 | if (mid_entry->mid == cur_mid && | ||
271 | mid_entry->mid_state == MID_REQUEST_SUBMITTED) { | ||
272 | /* This mid is in use, try a different one */ | ||
273 | collision = true; | ||
274 | break; | ||
275 | } | ||
276 | } | ||
277 | |||
278 | /* | ||
279 | * if we have more than 32k mids in the list, then something | ||
280 | * is very wrong. Possibly a local user is trying to DoS the | ||
281 | * box by issuing long-running calls and SIGKILL'ing them. If | ||
282 | * we get to 2^16 mids then we're in big trouble as this | ||
283 | * function could loop forever. | ||
284 | * | ||
285 | * Go ahead and assign out the mid in this situation, but force | ||
286 | * an eventual reconnect to clean out the pending_mid_q. | ||
287 | */ | ||
288 | if (num_mids > 32768) | ||
289 | server->tcpStatus = CifsNeedReconnect; | ||
290 | |||
291 | if (!collision) { | ||
292 | mid = (__u64)cur_mid; | ||
293 | server->CurrentMid = mid; | ||
294 | break; | ||
295 | } | ||
296 | cur_mid++; | ||
297 | } | ||
298 | spin_unlock(&GlobalMid_Lock); | ||
299 | return mid; | ||
300 | } | ||
301 | |||
302 | /* NB: MID can not be set if treeCon not passed in, in that | 215 | /* NB: MID can not be set if treeCon not passed in, in that |
303 | case it is responsbility of caller to set the mid */ | 216 | case it is responsbility of caller to set the mid */ |
304 | void | 217 | void |
@@ -334,7 +247,7 @@ header_assemble(struct smb_hdr *buffer, char smb_command /* command */ , | |||
334 | 247 | ||
335 | /* Uid is not converted */ | 248 | /* Uid is not converted */ |
336 | buffer->Uid = treeCon->ses->Suid; | 249 | buffer->Uid = treeCon->ses->Suid; |
337 | buffer->Mid = GetNextMid(treeCon->ses->server); | 250 | buffer->Mid = get_next_mid(treeCon->ses->server); |
338 | } | 251 | } |
339 | if (treeCon->Flags & SMB_SHARE_IS_IN_DFS) | 252 | if (treeCon->Flags & SMB_SHARE_IS_IN_DFS) |
340 | buffer->Flags2 |= SMBFLG2_DFS; | 253 | buffer->Flags2 |= SMBFLG2_DFS; |
diff --git a/fs/cifs/smb1ops.c b/fs/cifs/smb1ops.c index d9d615fbed3f..6dec38f5522d 100644 --- a/fs/cifs/smb1ops.c +++ b/fs/cifs/smb1ops.c | |||
@@ -125,6 +125,94 @@ cifs_get_credits_field(struct TCP_Server_Info *server) | |||
125 | return &server->credits; | 125 | return &server->credits; |
126 | } | 126 | } |
127 | 127 | ||
128 | /* | ||
129 | * Find a free multiplex id (SMB mid). Otherwise there could be | ||
130 | * mid collisions which might cause problems, demultiplexing the | ||
131 | * wrong response to this request. Multiplex ids could collide if | ||
132 | * one of a series requests takes much longer than the others, or | ||
133 | * if a very large number of long lived requests (byte range | ||
134 | * locks or FindNotify requests) are pending. No more than | ||
135 | * 64K-1 requests can be outstanding at one time. If no | ||
136 | * mids are available, return zero. A future optimization | ||
137 | * could make the combination of mids and uid the key we use | ||
138 | * to demultiplex on (rather than mid alone). | ||
139 | * In addition to the above check, the cifs demultiplex | ||
140 | * code already used the command code as a secondary | ||
141 | * check of the frame and if signing is negotiated the | ||
142 | * response would be discarded if the mid were the same | ||
143 | * but the signature was wrong. Since the mid is not put in the | ||
144 | * pending queue until later (when it is about to be dispatched) | ||
145 | * we do have to limit the number of outstanding requests | ||
146 | * to somewhat less than 64K-1 although it is hard to imagine | ||
147 | * so many threads being in the vfs at one time. | ||
148 | */ | ||
149 | static __u64 | ||
150 | cifs_get_next_mid(struct TCP_Server_Info *server) | ||
151 | { | ||
152 | __u64 mid = 0; | ||
153 | __u16 last_mid, cur_mid; | ||
154 | bool collision; | ||
155 | |||
156 | spin_lock(&GlobalMid_Lock); | ||
157 | |||
158 | /* mid is 16 bit only for CIFS/SMB */ | ||
159 | cur_mid = (__u16)((server->CurrentMid) & 0xffff); | ||
160 | /* we do not want to loop forever */ | ||
161 | last_mid = cur_mid; | ||
162 | cur_mid++; | ||
163 | |||
164 | /* | ||
165 | * This nested loop looks more expensive than it is. | ||
166 | * In practice the list of pending requests is short, | ||
167 | * fewer than 50, and the mids are likely to be unique | ||
168 | * on the first pass through the loop unless some request | ||
169 | * takes longer than the 64 thousand requests before it | ||
170 | * (and it would also have to have been a request that | ||
171 | * did not time out). | ||
172 | */ | ||
173 | while (cur_mid != last_mid) { | ||
174 | struct mid_q_entry *mid_entry; | ||
175 | unsigned int num_mids; | ||
176 | |||
177 | collision = false; | ||
178 | if (cur_mid == 0) | ||
179 | cur_mid++; | ||
180 | |||
181 | num_mids = 0; | ||
182 | list_for_each_entry(mid_entry, &server->pending_mid_q, qhead) { | ||
183 | ++num_mids; | ||
184 | if (mid_entry->mid == cur_mid && | ||
185 | mid_entry->mid_state == MID_REQUEST_SUBMITTED) { | ||
186 | /* This mid is in use, try a different one */ | ||
187 | collision = true; | ||
188 | break; | ||
189 | } | ||
190 | } | ||
191 | |||
192 | /* | ||
193 | * if we have more than 32k mids in the list, then something | ||
194 | * is very wrong. Possibly a local user is trying to DoS the | ||
195 | * box by issuing long-running calls and SIGKILL'ing them. If | ||
196 | * we get to 2^16 mids then we're in big trouble as this | ||
197 | * function could loop forever. | ||
198 | * | ||
199 | * Go ahead and assign out the mid in this situation, but force | ||
200 | * an eventual reconnect to clean out the pending_mid_q. | ||
201 | */ | ||
202 | if (num_mids > 32768) | ||
203 | server->tcpStatus = CifsNeedReconnect; | ||
204 | |||
205 | if (!collision) { | ||
206 | mid = (__u64)cur_mid; | ||
207 | server->CurrentMid = mid; | ||
208 | break; | ||
209 | } | ||
210 | cur_mid++; | ||
211 | } | ||
212 | spin_unlock(&GlobalMid_Lock); | ||
213 | return mid; | ||
214 | } | ||
215 | |||
128 | struct smb_version_operations smb1_operations = { | 216 | struct smb_version_operations smb1_operations = { |
129 | .send_cancel = send_nt_cancel, | 217 | .send_cancel = send_nt_cancel, |
130 | .compare_fids = cifs_compare_fids, | 218 | .compare_fids = cifs_compare_fids, |
@@ -133,6 +221,7 @@ struct smb_version_operations smb1_operations = { | |||
133 | .add_credits = cifs_add_credits, | 221 | .add_credits = cifs_add_credits, |
134 | .set_credits = cifs_set_credits, | 222 | .set_credits = cifs_set_credits, |
135 | .get_credits_field = cifs_get_credits_field, | 223 | .get_credits_field = cifs_get_credits_field, |
224 | .get_next_mid = cifs_get_next_mid, | ||
136 | .read_data_offset = cifs_read_data_offset, | 225 | .read_data_offset = cifs_read_data_offset, |
137 | .read_data_length = cifs_read_data_length, | 226 | .read_data_length = cifs_read_data_length, |
138 | .map_error = map_smb_to_linux_error, | 227 | .map_error = map_smb_to_linux_error, |
diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c index 1b36ffe6a47b..3097ee58fd7d 100644 --- a/fs/cifs/transport.c +++ b/fs/cifs/transport.c | |||
@@ -779,7 +779,7 @@ send_lock_cancel(const unsigned int xid, struct cifs_tcon *tcon, | |||
779 | 779 | ||
780 | pSMB->LockType = LOCKING_ANDX_CANCEL_LOCK|LOCKING_ANDX_LARGE_FILES; | 780 | pSMB->LockType = LOCKING_ANDX_CANCEL_LOCK|LOCKING_ANDX_LARGE_FILES; |
781 | pSMB->Timeout = 0; | 781 | pSMB->Timeout = 0; |
782 | pSMB->hdr.Mid = GetNextMid(ses->server); | 782 | pSMB->hdr.Mid = get_next_mid(ses->server); |
783 | 783 | ||
784 | return SendReceive(xid, ses, in_buf, out_buf, | 784 | return SendReceive(xid, ses, in_buf, out_buf, |
785 | &bytes_returned, 0); | 785 | &bytes_returned, 0); |
diff --git a/fs/dcache.c b/fs/dcache.c index 85c9e2bff8e6..40469044088d 100644 --- a/fs/dcache.c +++ b/fs/dcache.c | |||
@@ -683,6 +683,8 @@ EXPORT_SYMBOL(dget_parent); | |||
683 | /** | 683 | /** |
684 | * d_find_alias - grab a hashed alias of inode | 684 | * d_find_alias - grab a hashed alias of inode |
685 | * @inode: inode in question | 685 | * @inode: inode in question |
686 | * @want_discon: flag, used by d_splice_alias, to request | ||
687 | * that only a DISCONNECTED alias be returned. | ||
686 | * | 688 | * |
687 | * If inode has a hashed alias, or is a directory and has any alias, | 689 | * If inode has a hashed alias, or is a directory and has any alias, |
688 | * acquire the reference to alias and return it. Otherwise return NULL. | 690 | * acquire the reference to alias and return it. Otherwise return NULL. |
@@ -691,9 +693,10 @@ EXPORT_SYMBOL(dget_parent); | |||
691 | * of a filesystem. | 693 | * of a filesystem. |
692 | * | 694 | * |
693 | * If the inode has an IS_ROOT, DCACHE_DISCONNECTED alias, then prefer | 695 | * If the inode has an IS_ROOT, DCACHE_DISCONNECTED alias, then prefer |
694 | * any other hashed alias over that. | 696 | * any other hashed alias over that one unless @want_discon is set, |
697 | * in which case only return an IS_ROOT, DCACHE_DISCONNECTED alias. | ||
695 | */ | 698 | */ |
696 | static struct dentry *__d_find_alias(struct inode *inode) | 699 | static struct dentry *__d_find_alias(struct inode *inode, int want_discon) |
697 | { | 700 | { |
698 | struct dentry *alias, *discon_alias; | 701 | struct dentry *alias, *discon_alias; |
699 | 702 | ||
@@ -705,7 +708,7 @@ again: | |||
705 | if (IS_ROOT(alias) && | 708 | if (IS_ROOT(alias) && |
706 | (alias->d_flags & DCACHE_DISCONNECTED)) { | 709 | (alias->d_flags & DCACHE_DISCONNECTED)) { |
707 | discon_alias = alias; | 710 | discon_alias = alias; |
708 | } else { | 711 | } else if (!want_discon) { |
709 | __dget_dlock(alias); | 712 | __dget_dlock(alias); |
710 | spin_unlock(&alias->d_lock); | 713 | spin_unlock(&alias->d_lock); |
711 | return alias; | 714 | return alias; |
@@ -736,7 +739,7 @@ struct dentry *d_find_alias(struct inode *inode) | |||
736 | 739 | ||
737 | if (!list_empty(&inode->i_dentry)) { | 740 | if (!list_empty(&inode->i_dentry)) { |
738 | spin_lock(&inode->i_lock); | 741 | spin_lock(&inode->i_lock); |
739 | de = __d_find_alias(inode); | 742 | de = __d_find_alias(inode, 0); |
740 | spin_unlock(&inode->i_lock); | 743 | spin_unlock(&inode->i_lock); |
741 | } | 744 | } |
742 | return de; | 745 | return de; |
@@ -1647,8 +1650,9 @@ struct dentry *d_splice_alias(struct inode *inode, struct dentry *dentry) | |||
1647 | 1650 | ||
1648 | if (inode && S_ISDIR(inode->i_mode)) { | 1651 | if (inode && S_ISDIR(inode->i_mode)) { |
1649 | spin_lock(&inode->i_lock); | 1652 | spin_lock(&inode->i_lock); |
1650 | new = __d_find_any_alias(inode); | 1653 | new = __d_find_alias(inode, 1); |
1651 | if (new) { | 1654 | if (new) { |
1655 | BUG_ON(!(new->d_flags & DCACHE_DISCONNECTED)); | ||
1652 | spin_unlock(&inode->i_lock); | 1656 | spin_unlock(&inode->i_lock); |
1653 | security_d_instantiate(new, inode); | 1657 | security_d_instantiate(new, inode); |
1654 | d_move(new, dentry); | 1658 | d_move(new, dentry); |
@@ -2478,7 +2482,7 @@ struct dentry *d_materialise_unique(struct dentry *dentry, struct inode *inode) | |||
2478 | struct dentry *alias; | 2482 | struct dentry *alias; |
2479 | 2483 | ||
2480 | /* Does an aliased dentry already exist? */ | 2484 | /* Does an aliased dentry already exist? */ |
2481 | alias = __d_find_alias(inode); | 2485 | alias = __d_find_alias(inode, 0); |
2482 | if (alias) { | 2486 | if (alias) { |
2483 | actual = alias; | 2487 | actual = alias; |
2484 | write_seqlock(&rename_lock); | 2488 | write_seqlock(&rename_lock); |
@@ -819,10 +819,10 @@ static int exec_mmap(struct mm_struct *mm) | |||
819 | /* Notify parent that we're no longer interested in the old VM */ | 819 | /* Notify parent that we're no longer interested in the old VM */ |
820 | tsk = current; | 820 | tsk = current; |
821 | old_mm = current->mm; | 821 | old_mm = current->mm; |
822 | sync_mm_rss(old_mm); | ||
823 | mm_release(tsk, old_mm); | 822 | mm_release(tsk, old_mm); |
824 | 823 | ||
825 | if (old_mm) { | 824 | if (old_mm) { |
825 | sync_mm_rss(old_mm); | ||
826 | /* | 826 | /* |
827 | * Make sure that if there is a core dump in progress | 827 | * Make sure that if there is a core dump in progress |
828 | * for the old mm, we get out and die instead of going | 828 | * for the old mm, we get out and die instead of going |
diff --git a/fs/exofs/sys.c b/fs/exofs/sys.c index e32bc919e4e3..5a7b691e748b 100644 --- a/fs/exofs/sys.c +++ b/fs/exofs/sys.c | |||
@@ -109,7 +109,7 @@ static struct kobj_type odev_ktype = { | |||
109 | static struct kobj_type uuid_ktype = { | 109 | static struct kobj_type uuid_ktype = { |
110 | }; | 110 | }; |
111 | 111 | ||
112 | void exofs_sysfs_dbg_print() | 112 | void exofs_sysfs_dbg_print(void) |
113 | { | 113 | { |
114 | #ifdef CONFIG_EXOFS_DEBUG | 114 | #ifdef CONFIG_EXOFS_DEBUG |
115 | struct kobject *k_name, *k_tmp; | 115 | struct kobject *k_name, *k_tmp; |
diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c index 99b6324290db..cee7812cc3cf 100644 --- a/fs/ext4/balloc.c +++ b/fs/ext4/balloc.c | |||
@@ -90,8 +90,8 @@ unsigned ext4_num_overhead_clusters(struct super_block *sb, | |||
90 | * unusual file system layouts. | 90 | * unusual file system layouts. |
91 | */ | 91 | */ |
92 | if (ext4_block_in_group(sb, ext4_block_bitmap(sb, gdp), block_group)) { | 92 | if (ext4_block_in_group(sb, ext4_block_bitmap(sb, gdp), block_group)) { |
93 | block_cluster = EXT4_B2C(sbi, (start - | 93 | block_cluster = EXT4_B2C(sbi, |
94 | ext4_block_bitmap(sb, gdp))); | 94 | ext4_block_bitmap(sb, gdp) - start); |
95 | if (block_cluster < num_clusters) | 95 | if (block_cluster < num_clusters) |
96 | block_cluster = -1; | 96 | block_cluster = -1; |
97 | else if (block_cluster == num_clusters) { | 97 | else if (block_cluster == num_clusters) { |
@@ -102,7 +102,7 @@ unsigned ext4_num_overhead_clusters(struct super_block *sb, | |||
102 | 102 | ||
103 | if (ext4_block_in_group(sb, ext4_inode_bitmap(sb, gdp), block_group)) { | 103 | if (ext4_block_in_group(sb, ext4_inode_bitmap(sb, gdp), block_group)) { |
104 | inode_cluster = EXT4_B2C(sbi, | 104 | inode_cluster = EXT4_B2C(sbi, |
105 | start - ext4_inode_bitmap(sb, gdp)); | 105 | ext4_inode_bitmap(sb, gdp) - start); |
106 | if (inode_cluster < num_clusters) | 106 | if (inode_cluster < num_clusters) |
107 | inode_cluster = -1; | 107 | inode_cluster = -1; |
108 | else if (inode_cluster == num_clusters) { | 108 | else if (inode_cluster == num_clusters) { |
@@ -114,7 +114,7 @@ unsigned ext4_num_overhead_clusters(struct super_block *sb, | |||
114 | itbl_blk = ext4_inode_table(sb, gdp); | 114 | itbl_blk = ext4_inode_table(sb, gdp); |
115 | for (i = 0; i < sbi->s_itb_per_group; i++) { | 115 | for (i = 0; i < sbi->s_itb_per_group; i++) { |
116 | if (ext4_block_in_group(sb, itbl_blk + i, block_group)) { | 116 | if (ext4_block_in_group(sb, itbl_blk + i, block_group)) { |
117 | c = EXT4_B2C(sbi, start - itbl_blk + i); | 117 | c = EXT4_B2C(sbi, itbl_blk + i - start); |
118 | if ((c < num_clusters) || (c == inode_cluster) || | 118 | if ((c < num_clusters) || (c == inode_cluster) || |
119 | (c == block_cluster) || (c == itbl_cluster)) | 119 | (c == block_cluster) || (c == itbl_cluster)) |
120 | continue; | 120 | continue; |
diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c index 8ad112ae0ade..e34deac3f366 100644 --- a/fs/ext4/ioctl.c +++ b/fs/ext4/ioctl.c | |||
@@ -123,7 +123,6 @@ long ext4_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) | |||
123 | else | 123 | else |
124 | ext4_clear_inode_flag(inode, i); | 124 | ext4_clear_inode_flag(inode, i); |
125 | } | 125 | } |
126 | ei->i_flags = flags; | ||
127 | 126 | ||
128 | ext4_set_inode_flags(inode); | 127 | ext4_set_inode_flags(inode); |
129 | inode->i_ctime = ext4_current_time(inode); | 128 | inode->i_ctime = ext4_current_time(inode); |
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c index 8d2fb8c88cf3..41a3ccff18d8 100644 --- a/fs/fs-writeback.c +++ b/fs/fs-writeback.c | |||
@@ -664,6 +664,7 @@ static long writeback_sb_inodes(struct super_block *sb, | |||
664 | /* Wait for I_SYNC. This function drops i_lock... */ | 664 | /* Wait for I_SYNC. This function drops i_lock... */ |
665 | inode_sleep_on_writeback(inode); | 665 | inode_sleep_on_writeback(inode); |
666 | /* Inode may be gone, start again */ | 666 | /* Inode may be gone, start again */ |
667 | spin_lock(&wb->list_lock); | ||
667 | continue; | 668 | continue; |
668 | } | 669 | } |
669 | inode->i_state |= I_SYNC; | 670 | inode->i_state |= I_SYNC; |
diff --git a/fs/fuse/control.c b/fs/fuse/control.c index 42593c587d48..03ff5b1eba93 100644 --- a/fs/fuse/control.c +++ b/fs/fuse/control.c | |||
@@ -75,19 +75,13 @@ static ssize_t fuse_conn_limit_write(struct file *file, const char __user *buf, | |||
75 | unsigned global_limit) | 75 | unsigned global_limit) |
76 | { | 76 | { |
77 | unsigned long t; | 77 | unsigned long t; |
78 | char tmp[32]; | ||
79 | unsigned limit = (1 << 16) - 1; | 78 | unsigned limit = (1 << 16) - 1; |
80 | int err; | 79 | int err; |
81 | 80 | ||
82 | if (*ppos || count >= sizeof(tmp) - 1) | 81 | if (*ppos) |
83 | return -EINVAL; | ||
84 | |||
85 | if (copy_from_user(tmp, buf, count)) | ||
86 | return -EINVAL; | 82 | return -EINVAL; |
87 | 83 | ||
88 | tmp[count] = '\0'; | 84 | err = kstrtoul_from_user(buf, count, 0, &t); |
89 | |||
90 | err = strict_strtoul(tmp, 0, &t); | ||
91 | if (err) | 85 | if (err) |
92 | return err; | 86 | return err; |
93 | 87 | ||
diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c index df5ac048dc74..334e0b18a014 100644 --- a/fs/fuse/dir.c +++ b/fs/fuse/dir.c | |||
@@ -775,6 +775,8 @@ static int fuse_link(struct dentry *entry, struct inode *newdir, | |||
775 | static void fuse_fillattr(struct inode *inode, struct fuse_attr *attr, | 775 | static void fuse_fillattr(struct inode *inode, struct fuse_attr *attr, |
776 | struct kstat *stat) | 776 | struct kstat *stat) |
777 | { | 777 | { |
778 | unsigned int blkbits; | ||
779 | |||
778 | stat->dev = inode->i_sb->s_dev; | 780 | stat->dev = inode->i_sb->s_dev; |
779 | stat->ino = attr->ino; | 781 | stat->ino = attr->ino; |
780 | stat->mode = (inode->i_mode & S_IFMT) | (attr->mode & 07777); | 782 | stat->mode = (inode->i_mode & S_IFMT) | (attr->mode & 07777); |
@@ -790,7 +792,13 @@ static void fuse_fillattr(struct inode *inode, struct fuse_attr *attr, | |||
790 | stat->ctime.tv_nsec = attr->ctimensec; | 792 | stat->ctime.tv_nsec = attr->ctimensec; |
791 | stat->size = attr->size; | 793 | stat->size = attr->size; |
792 | stat->blocks = attr->blocks; | 794 | stat->blocks = attr->blocks; |
793 | stat->blksize = (1 << inode->i_blkbits); | 795 | |
796 | if (attr->blksize != 0) | ||
797 | blkbits = ilog2(attr->blksize); | ||
798 | else | ||
799 | blkbits = inode->i_sb->s_blocksize_bits; | ||
800 | |||
801 | stat->blksize = 1 << blkbits; | ||
794 | } | 802 | } |
795 | 803 | ||
796 | static int fuse_do_getattr(struct inode *inode, struct kstat *stat, | 804 | static int fuse_do_getattr(struct inode *inode, struct kstat *stat, |
@@ -863,6 +871,7 @@ int fuse_update_attributes(struct inode *inode, struct kstat *stat, | |||
863 | if (stat) { | 871 | if (stat) { |
864 | generic_fillattr(inode, stat); | 872 | generic_fillattr(inode, stat); |
865 | stat->mode = fi->orig_i_mode; | 873 | stat->mode = fi->orig_i_mode; |
874 | stat->ino = fi->orig_ino; | ||
866 | } | 875 | } |
867 | } | 876 | } |
868 | 877 | ||
diff --git a/fs/fuse/file.c b/fs/fuse/file.c index 9562109d3a87..b321a688cde7 100644 --- a/fs/fuse/file.c +++ b/fs/fuse/file.c | |||
@@ -2173,6 +2173,44 @@ fuse_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov, | |||
2173 | return ret; | 2173 | return ret; |
2174 | } | 2174 | } |
2175 | 2175 | ||
2176 | long fuse_file_fallocate(struct file *file, int mode, loff_t offset, | ||
2177 | loff_t length) | ||
2178 | { | ||
2179 | struct fuse_file *ff = file->private_data; | ||
2180 | struct fuse_conn *fc = ff->fc; | ||
2181 | struct fuse_req *req; | ||
2182 | struct fuse_fallocate_in inarg = { | ||
2183 | .fh = ff->fh, | ||
2184 | .offset = offset, | ||
2185 | .length = length, | ||
2186 | .mode = mode | ||
2187 | }; | ||
2188 | int err; | ||
2189 | |||
2190 | if (fc->no_fallocate) | ||
2191 | return -EOPNOTSUPP; | ||
2192 | |||
2193 | req = fuse_get_req(fc); | ||
2194 | if (IS_ERR(req)) | ||
2195 | return PTR_ERR(req); | ||
2196 | |||
2197 | req->in.h.opcode = FUSE_FALLOCATE; | ||
2198 | req->in.h.nodeid = ff->nodeid; | ||
2199 | req->in.numargs = 1; | ||
2200 | req->in.args[0].size = sizeof(inarg); | ||
2201 | req->in.args[0].value = &inarg; | ||
2202 | fuse_request_send(fc, req); | ||
2203 | err = req->out.h.error; | ||
2204 | if (err == -ENOSYS) { | ||
2205 | fc->no_fallocate = 1; | ||
2206 | err = -EOPNOTSUPP; | ||
2207 | } | ||
2208 | fuse_put_request(fc, req); | ||
2209 | |||
2210 | return err; | ||
2211 | } | ||
2212 | EXPORT_SYMBOL_GPL(fuse_file_fallocate); | ||
2213 | |||
2176 | static const struct file_operations fuse_file_operations = { | 2214 | static const struct file_operations fuse_file_operations = { |
2177 | .llseek = fuse_file_llseek, | 2215 | .llseek = fuse_file_llseek, |
2178 | .read = do_sync_read, | 2216 | .read = do_sync_read, |
@@ -2190,6 +2228,7 @@ static const struct file_operations fuse_file_operations = { | |||
2190 | .unlocked_ioctl = fuse_file_ioctl, | 2228 | .unlocked_ioctl = fuse_file_ioctl, |
2191 | .compat_ioctl = fuse_file_compat_ioctl, | 2229 | .compat_ioctl = fuse_file_compat_ioctl, |
2192 | .poll = fuse_file_poll, | 2230 | .poll = fuse_file_poll, |
2231 | .fallocate = fuse_file_fallocate, | ||
2193 | }; | 2232 | }; |
2194 | 2233 | ||
2195 | static const struct file_operations fuse_direct_io_file_operations = { | 2234 | static const struct file_operations fuse_direct_io_file_operations = { |
@@ -2206,6 +2245,7 @@ static const struct file_operations fuse_direct_io_file_operations = { | |||
2206 | .unlocked_ioctl = fuse_file_ioctl, | 2245 | .unlocked_ioctl = fuse_file_ioctl, |
2207 | .compat_ioctl = fuse_file_compat_ioctl, | 2246 | .compat_ioctl = fuse_file_compat_ioctl, |
2208 | .poll = fuse_file_poll, | 2247 | .poll = fuse_file_poll, |
2248 | .fallocate = fuse_file_fallocate, | ||
2209 | /* no splice_read */ | 2249 | /* no splice_read */ |
2210 | }; | 2250 | }; |
2211 | 2251 | ||
diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h index 572cefc78012..771fb6322c07 100644 --- a/fs/fuse/fuse_i.h +++ b/fs/fuse/fuse_i.h | |||
@@ -82,6 +82,9 @@ struct fuse_inode { | |||
82 | preserve the original mode */ | 82 | preserve the original mode */ |
83 | umode_t orig_i_mode; | 83 | umode_t orig_i_mode; |
84 | 84 | ||
85 | /** 64 bit inode number */ | ||
86 | u64 orig_ino; | ||
87 | |||
85 | /** Version of last attribute change */ | 88 | /** Version of last attribute change */ |
86 | u64 attr_version; | 89 | u64 attr_version; |
87 | 90 | ||
@@ -478,6 +481,9 @@ struct fuse_conn { | |||
478 | /** Are BSD file locking primitives not implemented by fs? */ | 481 | /** Are BSD file locking primitives not implemented by fs? */ |
479 | unsigned no_flock:1; | 482 | unsigned no_flock:1; |
480 | 483 | ||
484 | /** Is fallocate not implemented by fs? */ | ||
485 | unsigned no_fallocate:1; | ||
486 | |||
481 | /** The number of requests waiting for completion */ | 487 | /** The number of requests waiting for completion */ |
482 | atomic_t num_waiting; | 488 | atomic_t num_waiting; |
483 | 489 | ||
diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c index 42678a33b7bb..1cd61652018c 100644 --- a/fs/fuse/inode.c +++ b/fs/fuse/inode.c | |||
@@ -91,6 +91,7 @@ static struct inode *fuse_alloc_inode(struct super_block *sb) | |||
91 | fi->nlookup = 0; | 91 | fi->nlookup = 0; |
92 | fi->attr_version = 0; | 92 | fi->attr_version = 0; |
93 | fi->writectr = 0; | 93 | fi->writectr = 0; |
94 | fi->orig_ino = 0; | ||
94 | INIT_LIST_HEAD(&fi->write_files); | 95 | INIT_LIST_HEAD(&fi->write_files); |
95 | INIT_LIST_HEAD(&fi->queued_writes); | 96 | INIT_LIST_HEAD(&fi->queued_writes); |
96 | INIT_LIST_HEAD(&fi->writepages); | 97 | INIT_LIST_HEAD(&fi->writepages); |
@@ -139,6 +140,18 @@ static int fuse_remount_fs(struct super_block *sb, int *flags, char *data) | |||
139 | return 0; | 140 | return 0; |
140 | } | 141 | } |
141 | 142 | ||
143 | /* | ||
144 | * ino_t is 32-bits on 32-bit arch. We have to squash the 64-bit value down | ||
145 | * so that it will fit. | ||
146 | */ | ||
147 | static ino_t fuse_squash_ino(u64 ino64) | ||
148 | { | ||
149 | ino_t ino = (ino_t) ino64; | ||
150 | if (sizeof(ino_t) < sizeof(u64)) | ||
151 | ino ^= ino64 >> (sizeof(u64) - sizeof(ino_t)) * 8; | ||
152 | return ino; | ||
153 | } | ||
154 | |||
142 | void fuse_change_attributes_common(struct inode *inode, struct fuse_attr *attr, | 155 | void fuse_change_attributes_common(struct inode *inode, struct fuse_attr *attr, |
143 | u64 attr_valid) | 156 | u64 attr_valid) |
144 | { | 157 | { |
@@ -148,7 +161,7 @@ void fuse_change_attributes_common(struct inode *inode, struct fuse_attr *attr, | |||
148 | fi->attr_version = ++fc->attr_version; | 161 | fi->attr_version = ++fc->attr_version; |
149 | fi->i_time = attr_valid; | 162 | fi->i_time = attr_valid; |
150 | 163 | ||
151 | inode->i_ino = attr->ino; | 164 | inode->i_ino = fuse_squash_ino(attr->ino); |
152 | inode->i_mode = (inode->i_mode & S_IFMT) | (attr->mode & 07777); | 165 | inode->i_mode = (inode->i_mode & S_IFMT) | (attr->mode & 07777); |
153 | set_nlink(inode, attr->nlink); | 166 | set_nlink(inode, attr->nlink); |
154 | inode->i_uid = attr->uid; | 167 | inode->i_uid = attr->uid; |
@@ -174,6 +187,8 @@ void fuse_change_attributes_common(struct inode *inode, struct fuse_attr *attr, | |||
174 | fi->orig_i_mode = inode->i_mode; | 187 | fi->orig_i_mode = inode->i_mode; |
175 | if (!(fc->flags & FUSE_DEFAULT_PERMISSIONS)) | 188 | if (!(fc->flags & FUSE_DEFAULT_PERMISSIONS)) |
176 | inode->i_mode &= ~S_ISVTX; | 189 | inode->i_mode &= ~S_ISVTX; |
190 | |||
191 | fi->orig_ino = attr->ino; | ||
177 | } | 192 | } |
178 | 193 | ||
179 | void fuse_change_attributes(struct inode *inode, struct fuse_attr *attr, | 194 | void fuse_change_attributes(struct inode *inode, struct fuse_attr *attr, |
diff --git a/fs/hfsplus/ioctl.c b/fs/hfsplus/ioctl.c index c640ba57074b..09addc8615fa 100644 --- a/fs/hfsplus/ioctl.c +++ b/fs/hfsplus/ioctl.c | |||
@@ -31,6 +31,7 @@ static int hfsplus_ioctl_bless(struct file *file, int __user *user_flags) | |||
31 | struct hfsplus_sb_info *sbi = HFSPLUS_SB(inode->i_sb); | 31 | struct hfsplus_sb_info *sbi = HFSPLUS_SB(inode->i_sb); |
32 | struct hfsplus_vh *vh = sbi->s_vhdr; | 32 | struct hfsplus_vh *vh = sbi->s_vhdr; |
33 | struct hfsplus_vh *bvh = sbi->s_backup_vhdr; | 33 | struct hfsplus_vh *bvh = sbi->s_backup_vhdr; |
34 | u32 cnid = (unsigned long)dentry->d_fsdata; | ||
34 | 35 | ||
35 | if (!capable(CAP_SYS_ADMIN)) | 36 | if (!capable(CAP_SYS_ADMIN)) |
36 | return -EPERM; | 37 | return -EPERM; |
@@ -41,8 +42,12 @@ static int hfsplus_ioctl_bless(struct file *file, int __user *user_flags) | |||
41 | vh->finder_info[0] = bvh->finder_info[0] = | 42 | vh->finder_info[0] = bvh->finder_info[0] = |
42 | cpu_to_be32(parent_ino(dentry)); | 43 | cpu_to_be32(parent_ino(dentry)); |
43 | 44 | ||
44 | /* Bootloader */ | 45 | /* |
45 | vh->finder_info[1] = bvh->finder_info[1] = cpu_to_be32(inode->i_ino); | 46 | * Bootloader. Just using the inode here breaks in the case of |
47 | * hard links - the firmware wants the ID of the hard link file, | ||
48 | * but the inode points at the indirect inode | ||
49 | */ | ||
50 | vh->finder_info[1] = bvh->finder_info[1] = cpu_to_be32(cnid); | ||
46 | 51 | ||
47 | /* Per spec, the OS X system folder - same as finder_info[0] here */ | 52 | /* Per spec, the OS X system folder - same as finder_info[0] here */ |
48 | vh->finder_info[5] = bvh->finder_info[5] = | 53 | vh->finder_info[5] = bvh->finder_info[5] = |
diff --git a/fs/hfsplus/wrapper.c b/fs/hfsplus/wrapper.c index 7daf4b852d1c..90effcccca9a 100644 --- a/fs/hfsplus/wrapper.c +++ b/fs/hfsplus/wrapper.c | |||
@@ -56,7 +56,7 @@ int hfsplus_submit_bio(struct super_block *sb, sector_t sector, | |||
56 | DECLARE_COMPLETION_ONSTACK(wait); | 56 | DECLARE_COMPLETION_ONSTACK(wait); |
57 | struct bio *bio; | 57 | struct bio *bio; |
58 | int ret = 0; | 58 | int ret = 0; |
59 | unsigned int io_size; | 59 | u64 io_size; |
60 | loff_t start; | 60 | loff_t start; |
61 | int offset; | 61 | int offset; |
62 | 62 | ||
diff --git a/fs/nfs/callback.c b/fs/nfs/callback.c index 970659daa323..23ff18fe080a 100644 --- a/fs/nfs/callback.c +++ b/fs/nfs/callback.c | |||
@@ -17,7 +17,6 @@ | |||
17 | #include <linux/kthread.h> | 17 | #include <linux/kthread.h> |
18 | #include <linux/sunrpc/svcauth_gss.h> | 18 | #include <linux/sunrpc/svcauth_gss.h> |
19 | #include <linux/sunrpc/bc_xprt.h> | 19 | #include <linux/sunrpc/bc_xprt.h> |
20 | #include <linux/nsproxy.h> | ||
21 | 20 | ||
22 | #include <net/inet_sock.h> | 21 | #include <net/inet_sock.h> |
23 | 22 | ||
@@ -107,7 +106,7 @@ nfs4_callback_up(struct svc_serv *serv, struct rpc_xprt *xprt) | |||
107 | { | 106 | { |
108 | int ret; | 107 | int ret; |
109 | 108 | ||
110 | ret = svc_create_xprt(serv, "tcp", xprt->xprt_net, PF_INET, | 109 | ret = svc_create_xprt(serv, "tcp", &init_net, PF_INET, |
111 | nfs_callback_set_tcpport, SVC_SOCK_ANONYMOUS); | 110 | nfs_callback_set_tcpport, SVC_SOCK_ANONYMOUS); |
112 | if (ret <= 0) | 111 | if (ret <= 0) |
113 | goto out_err; | 112 | goto out_err; |
@@ -115,7 +114,7 @@ nfs4_callback_up(struct svc_serv *serv, struct rpc_xprt *xprt) | |||
115 | dprintk("NFS: Callback listener port = %u (af %u)\n", | 114 | dprintk("NFS: Callback listener port = %u (af %u)\n", |
116 | nfs_callback_tcpport, PF_INET); | 115 | nfs_callback_tcpport, PF_INET); |
117 | 116 | ||
118 | ret = svc_create_xprt(serv, "tcp", xprt->xprt_net, PF_INET6, | 117 | ret = svc_create_xprt(serv, "tcp", &init_net, PF_INET6, |
119 | nfs_callback_set_tcpport, SVC_SOCK_ANONYMOUS); | 118 | nfs_callback_set_tcpport, SVC_SOCK_ANONYMOUS); |
120 | if (ret > 0) { | 119 | if (ret > 0) { |
121 | nfs_callback_tcpport6 = ret; | 120 | nfs_callback_tcpport6 = ret; |
@@ -184,7 +183,7 @@ nfs41_callback_up(struct svc_serv *serv, struct rpc_xprt *xprt) | |||
184 | * fore channel connection. | 183 | * fore channel connection. |
185 | * Returns the input port (0) and sets the svc_serv bc_xprt on success | 184 | * Returns the input port (0) and sets the svc_serv bc_xprt on success |
186 | */ | 185 | */ |
187 | ret = svc_create_xprt(serv, "tcp-bc", xprt->xprt_net, PF_INET, 0, | 186 | ret = svc_create_xprt(serv, "tcp-bc", &init_net, PF_INET, 0, |
188 | SVC_SOCK_ANONYMOUS); | 187 | SVC_SOCK_ANONYMOUS); |
189 | if (ret < 0) { | 188 | if (ret < 0) { |
190 | rqstp = ERR_PTR(ret); | 189 | rqstp = ERR_PTR(ret); |
@@ -254,7 +253,7 @@ int nfs_callback_up(u32 minorversion, struct rpc_xprt *xprt) | |||
254 | char svc_name[12]; | 253 | char svc_name[12]; |
255 | int ret = 0; | 254 | int ret = 0; |
256 | int minorversion_setup; | 255 | int minorversion_setup; |
257 | struct net *net = current->nsproxy->net_ns; | 256 | struct net *net = &init_net; |
258 | 257 | ||
259 | mutex_lock(&nfs_callback_mutex); | 258 | mutex_lock(&nfs_callback_mutex); |
260 | if (cb_info->users++ || cb_info->task != NULL) { | 259 | if (cb_info->users++ || cb_info->task != NULL) { |
@@ -330,7 +329,7 @@ void nfs_callback_down(int minorversion) | |||
330 | cb_info->users--; | 329 | cb_info->users--; |
331 | if (cb_info->users == 0 && cb_info->task != NULL) { | 330 | if (cb_info->users == 0 && cb_info->task != NULL) { |
332 | kthread_stop(cb_info->task); | 331 | kthread_stop(cb_info->task); |
333 | svc_shutdown_net(cb_info->serv, current->nsproxy->net_ns); | 332 | svc_shutdown_net(cb_info->serv, &init_net); |
334 | svc_exit_thread(cb_info->rqst); | 333 | svc_exit_thread(cb_info->rqst); |
335 | cb_info->serv = NULL; | 334 | cb_info->serv = NULL; |
336 | cb_info->rqst = NULL; | 335 | cb_info->rqst = NULL; |
diff --git a/fs/nfs/callback_xdr.c b/fs/nfs/callback_xdr.c index 95bfc243992c..e64b01d2a338 100644 --- a/fs/nfs/callback_xdr.c +++ b/fs/nfs/callback_xdr.c | |||
@@ -455,9 +455,9 @@ static __be32 decode_cb_sequence_args(struct svc_rqst *rqstp, | |||
455 | args->csa_nrclists = ntohl(*p++); | 455 | args->csa_nrclists = ntohl(*p++); |
456 | args->csa_rclists = NULL; | 456 | args->csa_rclists = NULL; |
457 | if (args->csa_nrclists) { | 457 | if (args->csa_nrclists) { |
458 | args->csa_rclists = kmalloc(args->csa_nrclists * | 458 | args->csa_rclists = kmalloc_array(args->csa_nrclists, |
459 | sizeof(*args->csa_rclists), | 459 | sizeof(*args->csa_rclists), |
460 | GFP_KERNEL); | 460 | GFP_KERNEL); |
461 | if (unlikely(args->csa_rclists == NULL)) | 461 | if (unlikely(args->csa_rclists == NULL)) |
462 | goto out; | 462 | goto out; |
463 | 463 | ||
@@ -696,7 +696,7 @@ static __be32 encode_cb_sequence_res(struct svc_rqst *rqstp, | |||
696 | const struct cb_sequenceres *res) | 696 | const struct cb_sequenceres *res) |
697 | { | 697 | { |
698 | __be32 *p; | 698 | __be32 *p; |
699 | unsigned status = res->csr_status; | 699 | __be32 status = res->csr_status; |
700 | 700 | ||
701 | if (unlikely(status != 0)) | 701 | if (unlikely(status != 0)) |
702 | goto out; | 702 | goto out; |
diff --git a/fs/nfs/client.c b/fs/nfs/client.c index 7d108753af81..f005b5bebdc7 100644 --- a/fs/nfs/client.c +++ b/fs/nfs/client.c | |||
@@ -207,7 +207,6 @@ error_0: | |||
207 | static void nfs4_shutdown_session(struct nfs_client *clp) | 207 | static void nfs4_shutdown_session(struct nfs_client *clp) |
208 | { | 208 | { |
209 | if (nfs4_has_session(clp)) { | 209 | if (nfs4_has_session(clp)) { |
210 | nfs4_deviceid_purge_client(clp); | ||
211 | nfs4_destroy_session(clp->cl_session); | 210 | nfs4_destroy_session(clp->cl_session); |
212 | nfs4_destroy_clientid(clp); | 211 | nfs4_destroy_clientid(clp); |
213 | } | 212 | } |
@@ -544,8 +543,6 @@ nfs_found_client(const struct nfs_client_initdata *cl_init, | |||
544 | 543 | ||
545 | smp_rmb(); | 544 | smp_rmb(); |
546 | 545 | ||
547 | BUG_ON(clp->cl_cons_state != NFS_CS_READY); | ||
548 | |||
549 | dprintk("<-- %s found nfs_client %p for %s\n", | 546 | dprintk("<-- %s found nfs_client %p for %s\n", |
550 | __func__, clp, cl_init->hostname ?: ""); | 547 | __func__, clp, cl_init->hostname ?: ""); |
551 | return clp; | 548 | return clp; |
diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c index ad2775d3e219..9a4cbfc85d81 100644 --- a/fs/nfs/direct.c +++ b/fs/nfs/direct.c | |||
@@ -490,6 +490,7 @@ static void nfs_direct_write_reschedule(struct nfs_direct_req *dreq) | |||
490 | dreq->error = -EIO; | 490 | dreq->error = -EIO; |
491 | spin_unlock(cinfo.lock); | 491 | spin_unlock(cinfo.lock); |
492 | } | 492 | } |
493 | nfs_release_request(req); | ||
493 | } | 494 | } |
494 | nfs_pageio_complete(&desc); | 495 | nfs_pageio_complete(&desc); |
495 | 496 | ||
@@ -523,9 +524,9 @@ static void nfs_direct_commit_complete(struct nfs_commit_data *data) | |||
523 | nfs_list_remove_request(req); | 524 | nfs_list_remove_request(req); |
524 | if (dreq->flags == NFS_ODIRECT_RESCHED_WRITES) { | 525 | if (dreq->flags == NFS_ODIRECT_RESCHED_WRITES) { |
525 | /* Note the rewrite will go through mds */ | 526 | /* Note the rewrite will go through mds */ |
526 | kref_get(&req->wb_kref); | ||
527 | nfs_mark_request_commit(req, NULL, &cinfo); | 527 | nfs_mark_request_commit(req, NULL, &cinfo); |
528 | } | 528 | } else |
529 | nfs_release_request(req); | ||
529 | nfs_unlock_and_release_request(req); | 530 | nfs_unlock_and_release_request(req); |
530 | } | 531 | } |
531 | 532 | ||
@@ -716,12 +717,12 @@ static void nfs_direct_write_completion(struct nfs_pgio_header *hdr) | |||
716 | if (dreq->flags == NFS_ODIRECT_RESCHED_WRITES) | 717 | if (dreq->flags == NFS_ODIRECT_RESCHED_WRITES) |
717 | bit = NFS_IOHDR_NEED_RESCHED; | 718 | bit = NFS_IOHDR_NEED_RESCHED; |
718 | else if (dreq->flags == 0) { | 719 | else if (dreq->flags == 0) { |
719 | memcpy(&dreq->verf, &req->wb_verf, | 720 | memcpy(&dreq->verf, hdr->verf, |
720 | sizeof(dreq->verf)); | 721 | sizeof(dreq->verf)); |
721 | bit = NFS_IOHDR_NEED_COMMIT; | 722 | bit = NFS_IOHDR_NEED_COMMIT; |
722 | dreq->flags = NFS_ODIRECT_DO_COMMIT; | 723 | dreq->flags = NFS_ODIRECT_DO_COMMIT; |
723 | } else if (dreq->flags == NFS_ODIRECT_DO_COMMIT) { | 724 | } else if (dreq->flags == NFS_ODIRECT_DO_COMMIT) { |
724 | if (memcmp(&dreq->verf, &req->wb_verf, sizeof(dreq->verf))) { | 725 | if (memcmp(&dreq->verf, hdr->verf, sizeof(dreq->verf))) { |
725 | dreq->flags = NFS_ODIRECT_RESCHED_WRITES; | 726 | dreq->flags = NFS_ODIRECT_RESCHED_WRITES; |
726 | bit = NFS_IOHDR_NEED_RESCHED; | 727 | bit = NFS_IOHDR_NEED_RESCHED; |
727 | } else | 728 | } else |
diff --git a/fs/nfs/idmap.c b/fs/nfs/idmap.c index b5b86a05059c..864c51e4b400 100644 --- a/fs/nfs/idmap.c +++ b/fs/nfs/idmap.c | |||
@@ -57,6 +57,11 @@ unsigned int nfs_idmap_cache_timeout = 600; | |||
57 | static const struct cred *id_resolver_cache; | 57 | static const struct cred *id_resolver_cache; |
58 | static struct key_type key_type_id_resolver_legacy; | 58 | static struct key_type key_type_id_resolver_legacy; |
59 | 59 | ||
60 | struct idmap { | ||
61 | struct rpc_pipe *idmap_pipe; | ||
62 | struct key_construction *idmap_key_cons; | ||
63 | struct mutex idmap_mutex; | ||
64 | }; | ||
60 | 65 | ||
61 | /** | 66 | /** |
62 | * nfs_fattr_init_names - initialise the nfs_fattr owner_name/group_name fields | 67 | * nfs_fattr_init_names - initialise the nfs_fattr owner_name/group_name fields |
@@ -310,9 +315,11 @@ static ssize_t nfs_idmap_get_key(const char *name, size_t namelen, | |||
310 | name, namelen, type, data, | 315 | name, namelen, type, data, |
311 | data_size, NULL); | 316 | data_size, NULL); |
312 | if (ret < 0) { | 317 | if (ret < 0) { |
318 | mutex_lock(&idmap->idmap_mutex); | ||
313 | ret = nfs_idmap_request_key(&key_type_id_resolver_legacy, | 319 | ret = nfs_idmap_request_key(&key_type_id_resolver_legacy, |
314 | name, namelen, type, data, | 320 | name, namelen, type, data, |
315 | data_size, idmap); | 321 | data_size, idmap); |
322 | mutex_unlock(&idmap->idmap_mutex); | ||
316 | } | 323 | } |
317 | return ret; | 324 | return ret; |
318 | } | 325 | } |
@@ -354,11 +361,6 @@ static int nfs_idmap_lookup_id(const char *name, size_t namelen, const char *typ | |||
354 | /* idmap classic begins here */ | 361 | /* idmap classic begins here */ |
355 | module_param(nfs_idmap_cache_timeout, int, 0644); | 362 | module_param(nfs_idmap_cache_timeout, int, 0644); |
356 | 363 | ||
357 | struct idmap { | ||
358 | struct rpc_pipe *idmap_pipe; | ||
359 | struct key_construction *idmap_key_cons; | ||
360 | }; | ||
361 | |||
362 | enum { | 364 | enum { |
363 | Opt_find_uid, Opt_find_gid, Opt_find_user, Opt_find_group, Opt_find_err | 365 | Opt_find_uid, Opt_find_gid, Opt_find_user, Opt_find_group, Opt_find_err |
364 | }; | 366 | }; |
@@ -469,6 +471,7 @@ nfs_idmap_new(struct nfs_client *clp) | |||
469 | return error; | 471 | return error; |
470 | } | 472 | } |
471 | idmap->idmap_pipe = pipe; | 473 | idmap->idmap_pipe = pipe; |
474 | mutex_init(&idmap->idmap_mutex); | ||
472 | 475 | ||
473 | clp->cl_idmap = idmap; | 476 | clp->cl_idmap = idmap; |
474 | return 0; | 477 | return 0; |
diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c index e605d695dbcb..f7296983eba6 100644 --- a/fs/nfs/inode.c +++ b/fs/nfs/inode.c | |||
@@ -1530,7 +1530,6 @@ static inline void nfs4_init_once(struct nfs_inode *nfsi) | |||
1530 | nfsi->delegation_state = 0; | 1530 | nfsi->delegation_state = 0; |
1531 | init_rwsem(&nfsi->rwsem); | 1531 | init_rwsem(&nfsi->rwsem); |
1532 | nfsi->layout = NULL; | 1532 | nfsi->layout = NULL; |
1533 | atomic_set(&nfsi->commit_info.rpcs_out, 0); | ||
1534 | #endif | 1533 | #endif |
1535 | } | 1534 | } |
1536 | 1535 | ||
@@ -1545,6 +1544,7 @@ static void init_once(void *foo) | |||
1545 | INIT_LIST_HEAD(&nfsi->commit_info.list); | 1544 | INIT_LIST_HEAD(&nfsi->commit_info.list); |
1546 | nfsi->npages = 0; | 1545 | nfsi->npages = 0; |
1547 | nfsi->commit_info.ncommit = 0; | 1546 | nfsi->commit_info.ncommit = 0; |
1547 | atomic_set(&nfsi->commit_info.rpcs_out, 0); | ||
1548 | atomic_set(&nfsi->silly_count, 1); | 1548 | atomic_set(&nfsi->silly_count, 1); |
1549 | INIT_HLIST_HEAD(&nfsi->silly_list); | 1549 | INIT_HLIST_HEAD(&nfsi->silly_list); |
1550 | init_waitqueue_head(&nfsi->waitqueue); | 1550 | init_waitqueue_head(&nfsi->waitqueue); |
diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h index c6827f93ab57..cc5900ac61b5 100644 --- a/fs/nfs/nfs4_fs.h +++ b/fs/nfs/nfs4_fs.h | |||
@@ -295,7 +295,7 @@ is_ds_client(struct nfs_client *clp) | |||
295 | 295 | ||
296 | extern const struct nfs4_minor_version_ops *nfs_v4_minor_ops[]; | 296 | extern const struct nfs4_minor_version_ops *nfs_v4_minor_ops[]; |
297 | 297 | ||
298 | extern const u32 nfs4_fattr_bitmap[2]; | 298 | extern const u32 nfs4_fattr_bitmap[3]; |
299 | extern const u32 nfs4_statfs_bitmap[2]; | 299 | extern const u32 nfs4_statfs_bitmap[2]; |
300 | extern const u32 nfs4_pathconf_bitmap[2]; | 300 | extern const u32 nfs4_pathconf_bitmap[2]; |
301 | extern const u32 nfs4_fsinfo_bitmap[3]; | 301 | extern const u32 nfs4_fsinfo_bitmap[3]; |
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index d48dbefa0e71..15fc7e4664ed 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c | |||
@@ -105,6 +105,8 @@ static int nfs4_map_errors(int err) | |||
105 | return -EINVAL; | 105 | return -EINVAL; |
106 | case -NFS4ERR_SHARE_DENIED: | 106 | case -NFS4ERR_SHARE_DENIED: |
107 | return -EACCES; | 107 | return -EACCES; |
108 | case -NFS4ERR_MINOR_VERS_MISMATCH: | ||
109 | return -EPROTONOSUPPORT; | ||
108 | default: | 110 | default: |
109 | dprintk("%s could not handle NFSv4 error %d\n", | 111 | dprintk("%s could not handle NFSv4 error %d\n", |
110 | __func__, -err); | 112 | __func__, -err); |
@@ -116,7 +118,7 @@ static int nfs4_map_errors(int err) | |||
116 | /* | 118 | /* |
117 | * This is our standard bitmap for GETATTR requests. | 119 | * This is our standard bitmap for GETATTR requests. |
118 | */ | 120 | */ |
119 | const u32 nfs4_fattr_bitmap[2] = { | 121 | const u32 nfs4_fattr_bitmap[3] = { |
120 | FATTR4_WORD0_TYPE | 122 | FATTR4_WORD0_TYPE |
121 | | FATTR4_WORD0_CHANGE | 123 | | FATTR4_WORD0_CHANGE |
122 | | FATTR4_WORD0_SIZE | 124 | | FATTR4_WORD0_SIZE |
@@ -133,6 +135,24 @@ const u32 nfs4_fattr_bitmap[2] = { | |||
133 | | FATTR4_WORD1_TIME_MODIFY | 135 | | FATTR4_WORD1_TIME_MODIFY |
134 | }; | 136 | }; |
135 | 137 | ||
138 | static const u32 nfs4_pnfs_open_bitmap[3] = { | ||
139 | FATTR4_WORD0_TYPE | ||
140 | | FATTR4_WORD0_CHANGE | ||
141 | | FATTR4_WORD0_SIZE | ||
142 | | FATTR4_WORD0_FSID | ||
143 | | FATTR4_WORD0_FILEID, | ||
144 | FATTR4_WORD1_MODE | ||
145 | | FATTR4_WORD1_NUMLINKS | ||
146 | | FATTR4_WORD1_OWNER | ||
147 | | FATTR4_WORD1_OWNER_GROUP | ||
148 | | FATTR4_WORD1_RAWDEV | ||
149 | | FATTR4_WORD1_SPACE_USED | ||
150 | | FATTR4_WORD1_TIME_ACCESS | ||
151 | | FATTR4_WORD1_TIME_METADATA | ||
152 | | FATTR4_WORD1_TIME_MODIFY, | ||
153 | FATTR4_WORD2_MDSTHRESHOLD | ||
154 | }; | ||
155 | |||
136 | const u32 nfs4_statfs_bitmap[2] = { | 156 | const u32 nfs4_statfs_bitmap[2] = { |
137 | FATTR4_WORD0_FILES_AVAIL | 157 | FATTR4_WORD0_FILES_AVAIL |
138 | | FATTR4_WORD0_FILES_FREE | 158 | | FATTR4_WORD0_FILES_FREE |
@@ -844,6 +864,7 @@ static struct nfs4_opendata *nfs4_opendata_alloc(struct dentry *dentry, | |||
844 | p->o_arg.name = &dentry->d_name; | 864 | p->o_arg.name = &dentry->d_name; |
845 | p->o_arg.server = server; | 865 | p->o_arg.server = server; |
846 | p->o_arg.bitmask = server->attr_bitmask; | 866 | p->o_arg.bitmask = server->attr_bitmask; |
867 | p->o_arg.open_bitmap = &nfs4_fattr_bitmap[0]; | ||
847 | p->o_arg.claim = NFS4_OPEN_CLAIM_NULL; | 868 | p->o_arg.claim = NFS4_OPEN_CLAIM_NULL; |
848 | if (attrs != NULL && attrs->ia_valid != 0) { | 869 | if (attrs != NULL && attrs->ia_valid != 0) { |
849 | __be32 verf[2]; | 870 | __be32 verf[2]; |
@@ -1820,6 +1841,7 @@ static int _nfs4_do_open(struct inode *dir, | |||
1820 | opendata->f_attr.mdsthreshold = pnfs_mdsthreshold_alloc(); | 1841 | opendata->f_attr.mdsthreshold = pnfs_mdsthreshold_alloc(); |
1821 | if (!opendata->f_attr.mdsthreshold) | 1842 | if (!opendata->f_attr.mdsthreshold) |
1822 | goto err_opendata_put; | 1843 | goto err_opendata_put; |
1844 | opendata->o_arg.open_bitmap = &nfs4_pnfs_open_bitmap[0]; | ||
1823 | } | 1845 | } |
1824 | if (dentry->d_inode != NULL) | 1846 | if (dentry->d_inode != NULL) |
1825 | opendata->state = nfs4_get_open_state(dentry->d_inode, sp); | 1847 | opendata->state = nfs4_get_open_state(dentry->d_inode, sp); |
@@ -1880,6 +1902,7 @@ static struct nfs4_state *nfs4_do_open(struct inode *dir, | |||
1880 | struct nfs4_state *res; | 1902 | struct nfs4_state *res; |
1881 | int status; | 1903 | int status; |
1882 | 1904 | ||
1905 | fmode &= FMODE_READ|FMODE_WRITE; | ||
1883 | do { | 1906 | do { |
1884 | status = _nfs4_do_open(dir, dentry, fmode, flags, sattr, cred, | 1907 | status = _nfs4_do_open(dir, dentry, fmode, flags, sattr, cred, |
1885 | &res, ctx_th); | 1908 | &res, ctx_th); |
@@ -2526,6 +2549,14 @@ nfs4_proc_setattr(struct dentry *dentry, struct nfs_fattr *fattr, | |||
2526 | 2549 | ||
2527 | nfs_fattr_init(fattr); | 2550 | nfs_fattr_init(fattr); |
2528 | 2551 | ||
2552 | /* Deal with open(O_TRUNC) */ | ||
2553 | if (sattr->ia_valid & ATTR_OPEN) | ||
2554 | sattr->ia_valid &= ~(ATTR_MTIME|ATTR_CTIME|ATTR_OPEN); | ||
2555 | |||
2556 | /* Optimization: if the end result is no change, don't RPC */ | ||
2557 | if ((sattr->ia_valid & ~(ATTR_FILE)) == 0) | ||
2558 | return 0; | ||
2559 | |||
2529 | /* Search for an existing open(O_WRITE) file */ | 2560 | /* Search for an existing open(O_WRITE) file */ |
2530 | if (sattr->ia_valid & ATTR_FILE) { | 2561 | if (sattr->ia_valid & ATTR_FILE) { |
2531 | struct nfs_open_context *ctx; | 2562 | struct nfs_open_context *ctx; |
@@ -2537,10 +2568,6 @@ nfs4_proc_setattr(struct dentry *dentry, struct nfs_fattr *fattr, | |||
2537 | } | 2568 | } |
2538 | } | 2569 | } |
2539 | 2570 | ||
2540 | /* Deal with open(O_TRUNC) */ | ||
2541 | if (sattr->ia_valid & ATTR_OPEN) | ||
2542 | sattr->ia_valid &= ~(ATTR_MTIME|ATTR_CTIME|ATTR_OPEN); | ||
2543 | |||
2544 | status = nfs4_do_setattr(inode, cred, fattr, sattr, state); | 2571 | status = nfs4_do_setattr(inode, cred, fattr, sattr, state); |
2545 | if (status == 0) | 2572 | if (status == 0) |
2546 | nfs_setattr_update_inode(inode, sattr); | 2573 | nfs_setattr_update_inode(inode, sattr); |
@@ -5275,7 +5302,7 @@ static int _nfs4_proc_destroy_clientid(struct nfs_client *clp, | |||
5275 | 5302 | ||
5276 | status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT); | 5303 | status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT); |
5277 | if (status) | 5304 | if (status) |
5278 | pr_warn("NFS: Got error %d from the server %s on " | 5305 | dprintk("NFS: Got error %d from the server %s on " |
5279 | "DESTROY_CLIENTID.", status, clp->cl_hostname); | 5306 | "DESTROY_CLIENTID.", status, clp->cl_hostname); |
5280 | return status; | 5307 | return status; |
5281 | } | 5308 | } |
@@ -5746,8 +5773,7 @@ int nfs4_proc_destroy_session(struct nfs4_session *session, | |||
5746 | status = rpc_call_sync(session->clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT); | 5773 | status = rpc_call_sync(session->clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT); |
5747 | 5774 | ||
5748 | if (status) | 5775 | if (status) |
5749 | printk(KERN_WARNING | 5776 | dprintk("NFS: Got error %d from the server on DESTROY_SESSION. " |
5750 | "NFS: Got error %d from the server on DESTROY_SESSION. " | ||
5751 | "Session has been destroyed regardless...\n", status); | 5777 | "Session has been destroyed regardless...\n", status); |
5752 | 5778 | ||
5753 | dprintk("<-- nfs4_proc_destroy_session\n"); | 5779 | dprintk("<-- nfs4_proc_destroy_session\n"); |
diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c index c679b9ecef63..f38300e9f171 100644 --- a/fs/nfs/nfs4state.c +++ b/fs/nfs/nfs4state.c | |||
@@ -244,6 +244,16 @@ static int nfs4_begin_drain_session(struct nfs_client *clp) | |||
244 | return nfs4_wait_on_slot_tbl(&ses->fc_slot_table); | 244 | return nfs4_wait_on_slot_tbl(&ses->fc_slot_table); |
245 | } | 245 | } |
246 | 246 | ||
247 | static void nfs41_finish_session_reset(struct nfs_client *clp) | ||
248 | { | ||
249 | clear_bit(NFS4CLNT_LEASE_CONFIRM, &clp->cl_state); | ||
250 | clear_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state); | ||
251 | /* create_session negotiated new slot table */ | ||
252 | clear_bit(NFS4CLNT_RECALL_SLOT, &clp->cl_state); | ||
253 | clear_bit(NFS4CLNT_BIND_CONN_TO_SESSION, &clp->cl_state); | ||
254 | nfs41_setup_state_renewal(clp); | ||
255 | } | ||
256 | |||
247 | int nfs41_init_clientid(struct nfs_client *clp, struct rpc_cred *cred) | 257 | int nfs41_init_clientid(struct nfs_client *clp, struct rpc_cred *cred) |
248 | { | 258 | { |
249 | int status; | 259 | int status; |
@@ -259,8 +269,7 @@ do_confirm: | |||
259 | status = nfs4_proc_create_session(clp, cred); | 269 | status = nfs4_proc_create_session(clp, cred); |
260 | if (status != 0) | 270 | if (status != 0) |
261 | goto out; | 271 | goto out; |
262 | clear_bit(NFS4CLNT_LEASE_CONFIRM, &clp->cl_state); | 272 | nfs41_finish_session_reset(clp); |
263 | nfs41_setup_state_renewal(clp); | ||
264 | nfs_mark_client_ready(clp, NFS_CS_READY); | 273 | nfs_mark_client_ready(clp, NFS_CS_READY); |
265 | out: | 274 | out: |
266 | return status; | 275 | return status; |
@@ -1772,16 +1781,9 @@ static int nfs4_reset_session(struct nfs_client *clp) | |||
1772 | status = nfs4_handle_reclaim_lease_error(clp, status); | 1781 | status = nfs4_handle_reclaim_lease_error(clp, status); |
1773 | goto out; | 1782 | goto out; |
1774 | } | 1783 | } |
1775 | clear_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state); | 1784 | nfs41_finish_session_reset(clp); |
1776 | /* create_session negotiated new slot table */ | ||
1777 | clear_bit(NFS4CLNT_RECALL_SLOT, &clp->cl_state); | ||
1778 | clear_bit(NFS4CLNT_BIND_CONN_TO_SESSION, &clp->cl_state); | ||
1779 | dprintk("%s: session reset was successful for server %s!\n", | 1785 | dprintk("%s: session reset was successful for server %s!\n", |
1780 | __func__, clp->cl_hostname); | 1786 | __func__, clp->cl_hostname); |
1781 | |||
1782 | /* Let the state manager reestablish state */ | ||
1783 | if (!test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state)) | ||
1784 | nfs41_setup_state_renewal(clp); | ||
1785 | out: | 1787 | out: |
1786 | if (cred) | 1788 | if (cred) |
1787 | put_rpccred(cred); | 1789 | put_rpccred(cred); |
diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c index ee4a74db95d0..18fae29b0301 100644 --- a/fs/nfs/nfs4xdr.c +++ b/fs/nfs/nfs4xdr.c | |||
@@ -1198,12 +1198,13 @@ static void encode_getfattr(struct xdr_stream *xdr, const u32* bitmask, struct c | |||
1198 | } | 1198 | } |
1199 | 1199 | ||
1200 | static void encode_getfattr_open(struct xdr_stream *xdr, const u32 *bitmask, | 1200 | static void encode_getfattr_open(struct xdr_stream *xdr, const u32 *bitmask, |
1201 | const u32 *open_bitmap, | ||
1201 | struct compound_hdr *hdr) | 1202 | struct compound_hdr *hdr) |
1202 | { | 1203 | { |
1203 | encode_getattr_three(xdr, | 1204 | encode_getattr_three(xdr, |
1204 | bitmask[0] & nfs4_fattr_bitmap[0], | 1205 | bitmask[0] & open_bitmap[0], |
1205 | bitmask[1] & nfs4_fattr_bitmap[1], | 1206 | bitmask[1] & open_bitmap[1], |
1206 | bitmask[2] & FATTR4_WORD2_MDSTHRESHOLD, | 1207 | bitmask[2] & open_bitmap[2], |
1207 | hdr); | 1208 | hdr); |
1208 | } | 1209 | } |
1209 | 1210 | ||
@@ -2221,7 +2222,7 @@ static void nfs4_xdr_enc_open(struct rpc_rqst *req, struct xdr_stream *xdr, | |||
2221 | encode_putfh(xdr, args->fh, &hdr); | 2222 | encode_putfh(xdr, args->fh, &hdr); |
2222 | encode_open(xdr, args, &hdr); | 2223 | encode_open(xdr, args, &hdr); |
2223 | encode_getfh(xdr, &hdr); | 2224 | encode_getfh(xdr, &hdr); |
2224 | encode_getfattr_open(xdr, args->bitmask, &hdr); | 2225 | encode_getfattr_open(xdr, args->bitmask, args->open_bitmap, &hdr); |
2225 | encode_nops(&hdr); | 2226 | encode_nops(&hdr); |
2226 | } | 2227 | } |
2227 | 2228 | ||
@@ -4359,7 +4360,10 @@ static int decode_attr_mdsthreshold(struct xdr_stream *xdr, | |||
4359 | 4360 | ||
4360 | if (unlikely(bitmap[2] & (FATTR4_WORD2_MDSTHRESHOLD - 1U))) | 4361 | if (unlikely(bitmap[2] & (FATTR4_WORD2_MDSTHRESHOLD - 1U))) |
4361 | return -EIO; | 4362 | return -EIO; |
4362 | if (likely(bitmap[2] & FATTR4_WORD2_MDSTHRESHOLD)) { | 4363 | if (bitmap[2] & FATTR4_WORD2_MDSTHRESHOLD) { |
4364 | /* Did the server return an unrequested attribute? */ | ||
4365 | if (unlikely(res == NULL)) | ||
4366 | return -EREMOTEIO; | ||
4363 | p = xdr_inline_decode(xdr, 4); | 4367 | p = xdr_inline_decode(xdr, 4); |
4364 | if (unlikely(!p)) | 4368 | if (unlikely(!p)) |
4365 | goto out_overflow; | 4369 | goto out_overflow; |
@@ -4372,6 +4376,7 @@ static int decode_attr_mdsthreshold(struct xdr_stream *xdr, | |||
4372 | __func__); | 4376 | __func__); |
4373 | 4377 | ||
4374 | status = decode_first_threshold_item4(xdr, res); | 4378 | status = decode_first_threshold_item4(xdr, res); |
4379 | bitmap[2] &= ~FATTR4_WORD2_MDSTHRESHOLD; | ||
4375 | } | 4380 | } |
4376 | return status; | 4381 | return status; |
4377 | out_overflow: | 4382 | out_overflow: |
diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c index b8323aa7b543..bbc49caa7a82 100644 --- a/fs/nfs/pnfs.c +++ b/fs/nfs/pnfs.c | |||
@@ -70,6 +70,10 @@ find_pnfs_driver(u32 id) | |||
70 | 70 | ||
71 | spin_lock(&pnfs_spinlock); | 71 | spin_lock(&pnfs_spinlock); |
72 | local = find_pnfs_driver_locked(id); | 72 | local = find_pnfs_driver_locked(id); |
73 | if (local != NULL && !try_module_get(local->owner)) { | ||
74 | dprintk("%s: Could not grab reference on module\n", __func__); | ||
75 | local = NULL; | ||
76 | } | ||
73 | spin_unlock(&pnfs_spinlock); | 77 | spin_unlock(&pnfs_spinlock); |
74 | return local; | 78 | return local; |
75 | } | 79 | } |
@@ -80,6 +84,9 @@ unset_pnfs_layoutdriver(struct nfs_server *nfss) | |||
80 | if (nfss->pnfs_curr_ld) { | 84 | if (nfss->pnfs_curr_ld) { |
81 | if (nfss->pnfs_curr_ld->clear_layoutdriver) | 85 | if (nfss->pnfs_curr_ld->clear_layoutdriver) |
82 | nfss->pnfs_curr_ld->clear_layoutdriver(nfss); | 86 | nfss->pnfs_curr_ld->clear_layoutdriver(nfss); |
87 | /* Decrement the MDS count. Purge the deviceid cache if zero */ | ||
88 | if (atomic_dec_and_test(&nfss->nfs_client->cl_mds_count)) | ||
89 | nfs4_deviceid_purge_client(nfss->nfs_client); | ||
83 | module_put(nfss->pnfs_curr_ld->owner); | 90 | module_put(nfss->pnfs_curr_ld->owner); |
84 | } | 91 | } |
85 | nfss->pnfs_curr_ld = NULL; | 92 | nfss->pnfs_curr_ld = NULL; |
@@ -115,10 +122,6 @@ set_pnfs_layoutdriver(struct nfs_server *server, const struct nfs_fh *mntfh, | |||
115 | goto out_no_driver; | 122 | goto out_no_driver; |
116 | } | 123 | } |
117 | } | 124 | } |
118 | if (!try_module_get(ld_type->owner)) { | ||
119 | dprintk("%s: Could not grab reference on module\n", __func__); | ||
120 | goto out_no_driver; | ||
121 | } | ||
122 | server->pnfs_curr_ld = ld_type; | 125 | server->pnfs_curr_ld = ld_type; |
123 | if (ld_type->set_layoutdriver | 126 | if (ld_type->set_layoutdriver |
124 | && ld_type->set_layoutdriver(server, mntfh)) { | 127 | && ld_type->set_layoutdriver(server, mntfh)) { |
@@ -127,6 +130,8 @@ set_pnfs_layoutdriver(struct nfs_server *server, const struct nfs_fh *mntfh, | |||
127 | module_put(ld_type->owner); | 130 | module_put(ld_type->owner); |
128 | goto out_no_driver; | 131 | goto out_no_driver; |
129 | } | 132 | } |
133 | /* Bump the MDS count */ | ||
134 | atomic_inc(&server->nfs_client->cl_mds_count); | ||
130 | 135 | ||
131 | dprintk("%s: pNFS module for %u set\n", __func__, id); | 136 | dprintk("%s: pNFS module for %u set\n", __func__, id); |
132 | return; | 137 | return; |
diff --git a/fs/nfs/pnfs.h b/fs/nfs/pnfs.h index 29fd23c0efdc..64f90d845f6a 100644 --- a/fs/nfs/pnfs.h +++ b/fs/nfs/pnfs.h | |||
@@ -365,7 +365,7 @@ static inline bool | |||
365 | pnfs_use_threshold(struct nfs4_threshold **dst, struct nfs4_threshold *src, | 365 | pnfs_use_threshold(struct nfs4_threshold **dst, struct nfs4_threshold *src, |
366 | struct nfs_server *nfss) | 366 | struct nfs_server *nfss) |
367 | { | 367 | { |
368 | return (dst && src && src->bm != 0 && | 368 | return (dst && src && src->bm != 0 && nfss->pnfs_curr_ld && |
369 | nfss->pnfs_curr_ld->id == src->l_type); | 369 | nfss->pnfs_curr_ld->id == src->l_type); |
370 | } | 370 | } |
371 | 371 | ||
diff --git a/fs/nfs/proc.c b/fs/nfs/proc.c index a706b6bcc286..617c7419a08e 100644 --- a/fs/nfs/proc.c +++ b/fs/nfs/proc.c | |||
@@ -651,7 +651,7 @@ static int nfs_read_done(struct rpc_task *task, struct nfs_read_data *data) | |||
651 | /* Emulate the eof flag, which isn't normally needed in NFSv2 | 651 | /* Emulate the eof flag, which isn't normally needed in NFSv2 |
652 | * as it is guaranteed to always return the file attributes | 652 | * as it is guaranteed to always return the file attributes |
653 | */ | 653 | */ |
654 | if (data->args.offset + data->args.count >= data->res.fattr->size) | 654 | if (data->args.offset + data->res.count >= data->res.fattr->size) |
655 | data->res.eof = 1; | 655 | data->res.eof = 1; |
656 | } | 656 | } |
657 | return 0; | 657 | return 0; |
diff --git a/fs/nfs/super.c b/fs/nfs/super.c index ff656c022684..906f09c7d842 100644 --- a/fs/nfs/super.c +++ b/fs/nfs/super.c | |||
@@ -1867,6 +1867,7 @@ static int nfs23_validate_mount_data(void *options, | |||
1867 | if (data == NULL) | 1867 | if (data == NULL) |
1868 | goto out_no_data; | 1868 | goto out_no_data; |
1869 | 1869 | ||
1870 | args->version = NFS_DEFAULT_VERSION; | ||
1870 | switch (data->version) { | 1871 | switch (data->version) { |
1871 | case 1: | 1872 | case 1: |
1872 | data->namlen = 0; | 1873 | data->namlen = 0; |
@@ -2637,6 +2638,8 @@ static int nfs4_validate_mount_data(void *options, | |||
2637 | if (data == NULL) | 2638 | if (data == NULL) |
2638 | goto out_no_data; | 2639 | goto out_no_data; |
2639 | 2640 | ||
2641 | args->version = 4; | ||
2642 | |||
2640 | switch (data->version) { | 2643 | switch (data->version) { |
2641 | case 1: | 2644 | case 1: |
2642 | if (data->host_addrlen > sizeof(args->nfs_server.address)) | 2645 | if (data->host_addrlen > sizeof(args->nfs_server.address)) |
diff --git a/fs/nfs/write.c b/fs/nfs/write.c index e6fe3d69d14c..4d6861c0dc14 100644 --- a/fs/nfs/write.c +++ b/fs/nfs/write.c | |||
@@ -80,6 +80,7 @@ struct nfs_write_header *nfs_writehdr_alloc(void) | |||
80 | INIT_LIST_HEAD(&hdr->rpc_list); | 80 | INIT_LIST_HEAD(&hdr->rpc_list); |
81 | spin_lock_init(&hdr->lock); | 81 | spin_lock_init(&hdr->lock); |
82 | atomic_set(&hdr->refcnt, 0); | 82 | atomic_set(&hdr->refcnt, 0); |
83 | hdr->verf = &p->verf; | ||
83 | } | 84 | } |
84 | return p; | 85 | return p; |
85 | } | 86 | } |
@@ -619,6 +620,7 @@ static void nfs_write_completion(struct nfs_pgio_header *hdr) | |||
619 | goto next; | 620 | goto next; |
620 | } | 621 | } |
621 | if (test_bit(NFS_IOHDR_NEED_COMMIT, &hdr->flags)) { | 622 | if (test_bit(NFS_IOHDR_NEED_COMMIT, &hdr->flags)) { |
623 | memcpy(&req->wb_verf, hdr->verf, sizeof(req->wb_verf)); | ||
622 | nfs_mark_request_commit(req, hdr->lseg, &cinfo); | 624 | nfs_mark_request_commit(req, hdr->lseg, &cinfo); |
623 | goto next; | 625 | goto next; |
624 | } | 626 | } |
@@ -1255,15 +1257,14 @@ static void nfs_writeback_release_common(void *calldata) | |||
1255 | struct nfs_write_data *data = calldata; | 1257 | struct nfs_write_data *data = calldata; |
1256 | struct nfs_pgio_header *hdr = data->header; | 1258 | struct nfs_pgio_header *hdr = data->header; |
1257 | int status = data->task.tk_status; | 1259 | int status = data->task.tk_status; |
1258 | struct nfs_page *req = hdr->req; | ||
1259 | 1260 | ||
1260 | if ((status >= 0) && nfs_write_need_commit(data)) { | 1261 | if ((status >= 0) && nfs_write_need_commit(data)) { |
1261 | spin_lock(&hdr->lock); | 1262 | spin_lock(&hdr->lock); |
1262 | if (test_bit(NFS_IOHDR_NEED_RESCHED, &hdr->flags)) | 1263 | if (test_bit(NFS_IOHDR_NEED_RESCHED, &hdr->flags)) |
1263 | ; /* Do nothing */ | 1264 | ; /* Do nothing */ |
1264 | else if (!test_and_set_bit(NFS_IOHDR_NEED_COMMIT, &hdr->flags)) | 1265 | else if (!test_and_set_bit(NFS_IOHDR_NEED_COMMIT, &hdr->flags)) |
1265 | memcpy(&req->wb_verf, &data->verf, sizeof(req->wb_verf)); | 1266 | memcpy(hdr->verf, &data->verf, sizeof(*hdr->verf)); |
1266 | else if (memcmp(&req->wb_verf, &data->verf, sizeof(req->wb_verf))) | 1267 | else if (memcmp(hdr->verf, &data->verf, sizeof(*hdr->verf))) |
1267 | set_bit(NFS_IOHDR_NEED_RESCHED, &hdr->flags); | 1268 | set_bit(NFS_IOHDR_NEED_RESCHED, &hdr->flags); |
1268 | spin_unlock(&hdr->lock); | 1269 | spin_unlock(&hdr->lock); |
1269 | } | 1270 | } |
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c index 8fdc9ec5c5d3..94effd5bc4a1 100644 --- a/fs/nfsd/nfs4state.c +++ b/fs/nfsd/nfs4state.c | |||
@@ -900,7 +900,7 @@ static void free_session(struct kref *kref) | |||
900 | struct nfsd4_session *ses; | 900 | struct nfsd4_session *ses; |
901 | int mem; | 901 | int mem; |
902 | 902 | ||
903 | BUG_ON(!spin_is_locked(&client_lock)); | 903 | lockdep_assert_held(&client_lock); |
904 | ses = container_of(kref, struct nfsd4_session, se_ref); | 904 | ses = container_of(kref, struct nfsd4_session, se_ref); |
905 | nfsd4_del_conns(ses); | 905 | nfsd4_del_conns(ses); |
906 | spin_lock(&nfsd_drc_lock); | 906 | spin_lock(&nfsd_drc_lock); |
@@ -1080,7 +1080,7 @@ static struct nfs4_client *alloc_client(struct xdr_netobj name) | |||
1080 | static inline void | 1080 | static inline void |
1081 | free_client(struct nfs4_client *clp) | 1081 | free_client(struct nfs4_client *clp) |
1082 | { | 1082 | { |
1083 | BUG_ON(!spin_is_locked(&client_lock)); | 1083 | lockdep_assert_held(&client_lock); |
1084 | while (!list_empty(&clp->cl_sessions)) { | 1084 | while (!list_empty(&clp->cl_sessions)) { |
1085 | struct nfsd4_session *ses; | 1085 | struct nfsd4_session *ses; |
1086 | ses = list_entry(clp->cl_sessions.next, struct nfsd4_session, | 1086 | ses = list_entry(clp->cl_sessions.next, struct nfsd4_session, |
diff --git a/fs/nilfs2/gcinode.c b/fs/nilfs2/gcinode.c index 08a07a218d26..57ceaf33d177 100644 --- a/fs/nilfs2/gcinode.c +++ b/fs/nilfs2/gcinode.c | |||
@@ -191,6 +191,8 @@ void nilfs_remove_all_gcinodes(struct the_nilfs *nilfs) | |||
191 | while (!list_empty(head)) { | 191 | while (!list_empty(head)) { |
192 | ii = list_first_entry(head, struct nilfs_inode_info, i_dirty); | 192 | ii = list_first_entry(head, struct nilfs_inode_info, i_dirty); |
193 | list_del_init(&ii->i_dirty); | 193 | list_del_init(&ii->i_dirty); |
194 | truncate_inode_pages(&ii->vfs_inode.i_data, 0); | ||
195 | nilfs_btnode_cache_clear(&ii->i_btnode_cache); | ||
194 | iput(&ii->vfs_inode); | 196 | iput(&ii->vfs_inode); |
195 | } | 197 | } |
196 | } | 198 | } |
diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c index 0e72ad6f22aa..88e11fb346b6 100644 --- a/fs/nilfs2/segment.c +++ b/fs/nilfs2/segment.c | |||
@@ -2309,6 +2309,8 @@ nilfs_remove_written_gcinodes(struct the_nilfs *nilfs, struct list_head *head) | |||
2309 | if (!test_bit(NILFS_I_UPDATED, &ii->i_state)) | 2309 | if (!test_bit(NILFS_I_UPDATED, &ii->i_state)) |
2310 | continue; | 2310 | continue; |
2311 | list_del_init(&ii->i_dirty); | 2311 | list_del_init(&ii->i_dirty); |
2312 | truncate_inode_pages(&ii->vfs_inode.i_data, 0); | ||
2313 | nilfs_btnode_cache_clear(&ii->i_btnode_cache); | ||
2312 | iput(&ii->vfs_inode); | 2314 | iput(&ii->vfs_inode); |
2313 | } | 2315 | } |
2314 | } | 2316 | } |
diff --git a/fs/proc/base.c b/fs/proc/base.c index 616f41a7cde6..437195f204e1 100644 --- a/fs/proc/base.c +++ b/fs/proc/base.c | |||
@@ -1803,7 +1803,7 @@ static int tid_fd_revalidate(struct dentry *dentry, struct nameidata *nd) | |||
1803 | rcu_read_lock(); | 1803 | rcu_read_lock(); |
1804 | file = fcheck_files(files, fd); | 1804 | file = fcheck_files(files, fd); |
1805 | if (file) { | 1805 | if (file) { |
1806 | unsigned i_mode, f_mode = file->f_mode; | 1806 | unsigned f_mode = file->f_mode; |
1807 | 1807 | ||
1808 | rcu_read_unlock(); | 1808 | rcu_read_unlock(); |
1809 | put_files_struct(files); | 1809 | put_files_struct(files); |
@@ -1819,12 +1819,14 @@ static int tid_fd_revalidate(struct dentry *dentry, struct nameidata *nd) | |||
1819 | inode->i_gid = GLOBAL_ROOT_GID; | 1819 | inode->i_gid = GLOBAL_ROOT_GID; |
1820 | } | 1820 | } |
1821 | 1821 | ||
1822 | i_mode = S_IFLNK; | 1822 | if (S_ISLNK(inode->i_mode)) { |
1823 | if (f_mode & FMODE_READ) | 1823 | unsigned i_mode = S_IFLNK; |
1824 | i_mode |= S_IRUSR | S_IXUSR; | 1824 | if (f_mode & FMODE_READ) |
1825 | if (f_mode & FMODE_WRITE) | 1825 | i_mode |= S_IRUSR | S_IXUSR; |
1826 | i_mode |= S_IWUSR | S_IXUSR; | 1826 | if (f_mode & FMODE_WRITE) |
1827 | inode->i_mode = i_mode; | 1827 | i_mode |= S_IWUSR | S_IXUSR; |
1828 | inode->i_mode = i_mode; | ||
1829 | } | ||
1828 | 1830 | ||
1829 | security_task_to_inode(task, inode); | 1831 | security_task_to_inode(task, inode); |
1830 | put_task_struct(task); | 1832 | put_task_struct(task); |
@@ -1859,6 +1861,7 @@ static struct dentry *proc_fd_instantiate(struct inode *dir, | |||
1859 | ei = PROC_I(inode); | 1861 | ei = PROC_I(inode); |
1860 | ei->fd = fd; | 1862 | ei->fd = fd; |
1861 | 1863 | ||
1864 | inode->i_mode = S_IFLNK; | ||
1862 | inode->i_op = &proc_pid_link_inode_operations; | 1865 | inode->i_op = &proc_pid_link_inode_operations; |
1863 | inode->i_size = 64; | 1866 | inode->i_size = 64; |
1864 | ei->op.proc_get_link = proc_fd_link; | 1867 | ei->op.proc_get_link = proc_fd_link; |
diff --git a/fs/pstore/platform.c b/fs/pstore/platform.c index be4614f24a2f..6b3ff045fe6e 100644 --- a/fs/pstore/platform.c +++ b/fs/pstore/platform.c | |||
@@ -102,20 +102,15 @@ static const char *get_reason_str(enum kmsg_dump_reason reason) | |||
102 | * as we can from the end of the buffer. | 102 | * as we can from the end of the buffer. |
103 | */ | 103 | */ |
104 | static void pstore_dump(struct kmsg_dumper *dumper, | 104 | static void pstore_dump(struct kmsg_dumper *dumper, |
105 | enum kmsg_dump_reason reason, | 105 | enum kmsg_dump_reason reason) |
106 | const char *s1, unsigned long l1, | ||
107 | const char *s2, unsigned long l2) | ||
108 | { | 106 | { |
109 | unsigned long s1_start, s2_start; | 107 | unsigned long total = 0; |
110 | unsigned long l1_cpy, l2_cpy; | ||
111 | unsigned long size, total = 0; | ||
112 | char *dst; | ||
113 | const char *why; | 108 | const char *why; |
114 | u64 id; | 109 | u64 id; |
115 | int hsize, ret; | ||
116 | unsigned int part = 1; | 110 | unsigned int part = 1; |
117 | unsigned long flags = 0; | 111 | unsigned long flags = 0; |
118 | int is_locked = 0; | 112 | int is_locked = 0; |
113 | int ret; | ||
119 | 114 | ||
120 | why = get_reason_str(reason); | 115 | why = get_reason_str(reason); |
121 | 116 | ||
@@ -127,30 +122,25 @@ static void pstore_dump(struct kmsg_dumper *dumper, | |||
127 | spin_lock_irqsave(&psinfo->buf_lock, flags); | 122 | spin_lock_irqsave(&psinfo->buf_lock, flags); |
128 | oopscount++; | 123 | oopscount++; |
129 | while (total < kmsg_bytes) { | 124 | while (total < kmsg_bytes) { |
125 | char *dst; | ||
126 | unsigned long size; | ||
127 | int hsize; | ||
128 | size_t len; | ||
129 | |||
130 | dst = psinfo->buf; | 130 | dst = psinfo->buf; |
131 | hsize = sprintf(dst, "%s#%d Part%d\n", why, oopscount, part); | 131 | hsize = sprintf(dst, "%s#%d Part%d\n", why, oopscount, part); |
132 | size = psinfo->bufsize - hsize; | 132 | size = psinfo->bufsize - hsize; |
133 | dst += hsize; | 133 | dst += hsize; |
134 | 134 | ||
135 | l2_cpy = min(l2, size); | 135 | if (!kmsg_dump_get_buffer(dumper, true, dst, size, &len)) |
136 | l1_cpy = min(l1, size - l2_cpy); | ||
137 | |||
138 | if (l1_cpy + l2_cpy == 0) | ||
139 | break; | 136 | break; |
140 | 137 | ||
141 | s2_start = l2 - l2_cpy; | ||
142 | s1_start = l1 - l1_cpy; | ||
143 | |||
144 | memcpy(dst, s1 + s1_start, l1_cpy); | ||
145 | memcpy(dst + l1_cpy, s2 + s2_start, l2_cpy); | ||
146 | |||
147 | ret = psinfo->write(PSTORE_TYPE_DMESG, reason, &id, part, | 138 | ret = psinfo->write(PSTORE_TYPE_DMESG, reason, &id, part, |
148 | hsize + l1_cpy + l2_cpy, psinfo); | 139 | hsize + len, psinfo); |
149 | if (ret == 0 && reason == KMSG_DUMP_OOPS && pstore_is_mounted()) | 140 | if (ret == 0 && reason == KMSG_DUMP_OOPS && pstore_is_mounted()) |
150 | pstore_new_entry = 1; | 141 | pstore_new_entry = 1; |
151 | l1 -= l1_cpy; | 142 | |
152 | l2 -= l2_cpy; | 143 | total += hsize + len; |
153 | total += l1_cpy + l2_cpy; | ||
154 | part++; | 144 | part++; |
155 | } | 145 | } |
156 | if (in_nmi()) { | 146 | if (in_nmi()) { |
diff --git a/fs/ubifs/debug.c b/fs/ubifs/debug.c index 685a83756b2b..84a7e6f3c046 100644 --- a/fs/ubifs/debug.c +++ b/fs/ubifs/debug.c | |||
@@ -2918,6 +2918,9 @@ int dbg_debugfs_init_fs(struct ubifs_info *c) | |||
2918 | struct dentry *dent; | 2918 | struct dentry *dent; |
2919 | struct ubifs_debug_info *d = c->dbg; | 2919 | struct ubifs_debug_info *d = c->dbg; |
2920 | 2920 | ||
2921 | if (!IS_ENABLED(DEBUG_FS)) | ||
2922 | return 0; | ||
2923 | |||
2921 | n = snprintf(d->dfs_dir_name, UBIFS_DFS_DIR_LEN + 1, UBIFS_DFS_DIR_NAME, | 2924 | n = snprintf(d->dfs_dir_name, UBIFS_DFS_DIR_LEN + 1, UBIFS_DFS_DIR_NAME, |
2922 | c->vi.ubi_num, c->vi.vol_id); | 2925 | c->vi.ubi_num, c->vi.vol_id); |
2923 | if (n == UBIFS_DFS_DIR_LEN) { | 2926 | if (n == UBIFS_DFS_DIR_LEN) { |
@@ -3010,7 +3013,8 @@ out: | |||
3010 | */ | 3013 | */ |
3011 | void dbg_debugfs_exit_fs(struct ubifs_info *c) | 3014 | void dbg_debugfs_exit_fs(struct ubifs_info *c) |
3012 | { | 3015 | { |
3013 | debugfs_remove_recursive(c->dbg->dfs_dir); | 3016 | if (IS_ENABLED(DEBUG_FS)) |
3017 | debugfs_remove_recursive(c->dbg->dfs_dir); | ||
3014 | } | 3018 | } |
3015 | 3019 | ||
3016 | struct ubifs_global_debug_info ubifs_dbg; | 3020 | struct ubifs_global_debug_info ubifs_dbg; |
@@ -3095,6 +3099,9 @@ int dbg_debugfs_init(void) | |||
3095 | const char *fname; | 3099 | const char *fname; |
3096 | struct dentry *dent; | 3100 | struct dentry *dent; |
3097 | 3101 | ||
3102 | if (!IS_ENABLED(DEBUG_FS)) | ||
3103 | return 0; | ||
3104 | |||
3098 | fname = "ubifs"; | 3105 | fname = "ubifs"; |
3099 | dent = debugfs_create_dir(fname, NULL); | 3106 | dent = debugfs_create_dir(fname, NULL); |
3100 | if (IS_ERR_OR_NULL(dent)) | 3107 | if (IS_ERR_OR_NULL(dent)) |
@@ -3159,7 +3166,8 @@ out: | |||
3159 | */ | 3166 | */ |
3160 | void dbg_debugfs_exit(void) | 3167 | void dbg_debugfs_exit(void) |
3161 | { | 3168 | { |
3162 | debugfs_remove_recursive(dfs_rootdir); | 3169 | if (IS_ENABLED(DEBUG_FS)) |
3170 | debugfs_remove_recursive(dfs_rootdir); | ||
3163 | } | 3171 | } |
3164 | 3172 | ||
3165 | /** | 3173 | /** |
diff --git a/fs/xfs/xfs_alloc.c b/fs/xfs/xfs_alloc.c index 229641fb8e67..9d1aeb7e2734 100644 --- a/fs/xfs/xfs_alloc.c +++ b/fs/xfs/xfs_alloc.c | |||
@@ -1080,6 +1080,7 @@ restart: | |||
1080 | goto restart; | 1080 | goto restart; |
1081 | } | 1081 | } |
1082 | 1082 | ||
1083 | xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR); | ||
1083 | trace_xfs_alloc_size_neither(args); | 1084 | trace_xfs_alloc_size_neither(args); |
1084 | args->agbno = NULLAGBLOCK; | 1085 | args->agbno = NULLAGBLOCK; |
1085 | return 0; | 1086 | return 0; |
@@ -2441,7 +2442,7 @@ xfs_alloc_vextent( | |||
2441 | DECLARE_COMPLETION_ONSTACK(done); | 2442 | DECLARE_COMPLETION_ONSTACK(done); |
2442 | 2443 | ||
2443 | args->done = &done; | 2444 | args->done = &done; |
2444 | INIT_WORK(&args->work, xfs_alloc_vextent_worker); | 2445 | INIT_WORK_ONSTACK(&args->work, xfs_alloc_vextent_worker); |
2445 | queue_work(xfs_alloc_wq, &args->work); | 2446 | queue_work(xfs_alloc_wq, &args->work); |
2446 | wait_for_completion(&done); | 2447 | wait_for_completion(&done); |
2447 | return args->result; | 2448 | return args->result; |
diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c index ae31c313a79e..8dad722c0041 100644 --- a/fs/xfs/xfs_aops.c +++ b/fs/xfs/xfs_aops.c | |||
@@ -981,10 +981,15 @@ xfs_vm_writepage( | |||
981 | imap_valid = 0; | 981 | imap_valid = 0; |
982 | } | 982 | } |
983 | } else { | 983 | } else { |
984 | if (PageUptodate(page)) { | 984 | if (PageUptodate(page)) |
985 | ASSERT(buffer_mapped(bh)); | 985 | ASSERT(buffer_mapped(bh)); |
986 | imap_valid = 0; | 986 | /* |
987 | } | 987 | * This buffer is not uptodate and will not be |
988 | * written to disk. Ensure that we will put any | ||
989 | * subsequent writeable buffers into a new | ||
990 | * ioend. | ||
991 | */ | ||
992 | imap_valid = 0; | ||
988 | continue; | 993 | continue; |
989 | } | 994 | } |
990 | 995 | ||
diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c index 172d3cc8f8cb..a4beb421018a 100644 --- a/fs/xfs/xfs_buf.c +++ b/fs/xfs/xfs_buf.c | |||
@@ -201,14 +201,7 @@ xfs_buf_alloc( | |||
201 | bp->b_length = numblks; | 201 | bp->b_length = numblks; |
202 | bp->b_io_length = numblks; | 202 | bp->b_io_length = numblks; |
203 | bp->b_flags = flags; | 203 | bp->b_flags = flags; |
204 | 204 | bp->b_bn = blkno; | |
205 | /* | ||
206 | * We do not set the block number here in the buffer because we have not | ||
207 | * finished initialising the buffer. We insert the buffer into the cache | ||
208 | * in this state, so this ensures that we are unable to do IO on a | ||
209 | * buffer that hasn't been fully initialised. | ||
210 | */ | ||
211 | bp->b_bn = XFS_BUF_DADDR_NULL; | ||
212 | atomic_set(&bp->b_pin_count, 0); | 205 | atomic_set(&bp->b_pin_count, 0); |
213 | init_waitqueue_head(&bp->b_waiters); | 206 | init_waitqueue_head(&bp->b_waiters); |
214 | 207 | ||
@@ -567,11 +560,6 @@ xfs_buf_get( | |||
567 | if (bp != new_bp) | 560 | if (bp != new_bp) |
568 | xfs_buf_free(new_bp); | 561 | xfs_buf_free(new_bp); |
569 | 562 | ||
570 | /* | ||
571 | * Now we have a workable buffer, fill in the block number so | ||
572 | * that we can do IO on it. | ||
573 | */ | ||
574 | bp->b_bn = blkno; | ||
575 | bp->b_io_length = bp->b_length; | 563 | bp->b_io_length = bp->b_length; |
576 | 564 | ||
577 | found: | 565 | found: |
@@ -772,7 +760,7 @@ xfs_buf_get_uncached( | |||
772 | int error, i; | 760 | int error, i; |
773 | xfs_buf_t *bp; | 761 | xfs_buf_t *bp; |
774 | 762 | ||
775 | bp = xfs_buf_alloc(target, 0, numblks, 0); | 763 | bp = xfs_buf_alloc(target, XFS_BUF_DADDR_NULL, numblks, 0); |
776 | if (unlikely(bp == NULL)) | 764 | if (unlikely(bp == NULL)) |
777 | goto fail; | 765 | goto fail; |
778 | 766 | ||
diff --git a/fs/xfs/xfs_inode_item.c b/fs/xfs/xfs_inode_item.c index 6cdbf90c6f7b..d041d47d9d86 100644 --- a/fs/xfs/xfs_inode_item.c +++ b/fs/xfs/xfs_inode_item.c | |||
@@ -505,6 +505,14 @@ xfs_inode_item_push( | |||
505 | } | 505 | } |
506 | 506 | ||
507 | /* | 507 | /* |
508 | * Stale inode items should force out the iclog. | ||
509 | */ | ||
510 | if (ip->i_flags & XFS_ISTALE) { | ||
511 | rval = XFS_ITEM_PINNED; | ||
512 | goto out_unlock; | ||
513 | } | ||
514 | |||
515 | /* | ||
508 | * Someone else is already flushing the inode. Nothing we can do | 516 | * Someone else is already flushing the inode. Nothing we can do |
509 | * here but wait for the flush to finish and remove the item from | 517 | * here but wait for the flush to finish and remove the item from |
510 | * the AIL. | 518 | * the AIL. |
@@ -514,15 +522,6 @@ xfs_inode_item_push( | |||
514 | goto out_unlock; | 522 | goto out_unlock; |
515 | } | 523 | } |
516 | 524 | ||
517 | /* | ||
518 | * Stale inode items should force out the iclog. | ||
519 | */ | ||
520 | if (ip->i_flags & XFS_ISTALE) { | ||
521 | xfs_ifunlock(ip); | ||
522 | xfs_iunlock(ip, XFS_ILOCK_SHARED); | ||
523 | return XFS_ITEM_PINNED; | ||
524 | } | ||
525 | |||
526 | ASSERT(iip->ili_fields != 0 || XFS_FORCED_SHUTDOWN(ip->i_mount)); | 525 | ASSERT(iip->ili_fields != 0 || XFS_FORCED_SHUTDOWN(ip->i_mount)); |
527 | ASSERT(iip->ili_logged == 0 || XFS_FORCED_SHUTDOWN(ip->i_mount)); | 526 | ASSERT(iip->ili_logged == 0 || XFS_FORCED_SHUTDOWN(ip->i_mount)); |
528 | 527 | ||
diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c index f30d9807dc48..d90d4a388609 100644 --- a/fs/xfs/xfs_log.c +++ b/fs/xfs/xfs_log.c | |||
@@ -38,13 +38,21 @@ | |||
38 | kmem_zone_t *xfs_log_ticket_zone; | 38 | kmem_zone_t *xfs_log_ticket_zone; |
39 | 39 | ||
40 | /* Local miscellaneous function prototypes */ | 40 | /* Local miscellaneous function prototypes */ |
41 | STATIC int xlog_commit_record(struct log *log, struct xlog_ticket *ticket, | 41 | STATIC int |
42 | xlog_in_core_t **, xfs_lsn_t *); | 42 | xlog_commit_record( |
43 | struct xlog *log, | ||
44 | struct xlog_ticket *ticket, | ||
45 | struct xlog_in_core **iclog, | ||
46 | xfs_lsn_t *commitlsnp); | ||
47 | |||
43 | STATIC xlog_t * xlog_alloc_log(xfs_mount_t *mp, | 48 | STATIC xlog_t * xlog_alloc_log(xfs_mount_t *mp, |
44 | xfs_buftarg_t *log_target, | 49 | xfs_buftarg_t *log_target, |
45 | xfs_daddr_t blk_offset, | 50 | xfs_daddr_t blk_offset, |
46 | int num_bblks); | 51 | int num_bblks); |
47 | STATIC int xlog_space_left(struct log *log, atomic64_t *head); | 52 | STATIC int |
53 | xlog_space_left( | ||
54 | struct xlog *log, | ||
55 | atomic64_t *head); | ||
48 | STATIC int xlog_sync(xlog_t *log, xlog_in_core_t *iclog); | 56 | STATIC int xlog_sync(xlog_t *log, xlog_in_core_t *iclog); |
49 | STATIC void xlog_dealloc_log(xlog_t *log); | 57 | STATIC void xlog_dealloc_log(xlog_t *log); |
50 | 58 | ||
@@ -64,8 +72,10 @@ STATIC void xlog_state_switch_iclogs(xlog_t *log, | |||
64 | int eventual_size); | 72 | int eventual_size); |
65 | STATIC void xlog_state_want_sync(xlog_t *log, xlog_in_core_t *iclog); | 73 | STATIC void xlog_state_want_sync(xlog_t *log, xlog_in_core_t *iclog); |
66 | 74 | ||
67 | STATIC void xlog_grant_push_ail(struct log *log, | 75 | STATIC void |
68 | int need_bytes); | 76 | xlog_grant_push_ail( |
77 | struct xlog *log, | ||
78 | int need_bytes); | ||
69 | STATIC void xlog_regrant_reserve_log_space(xlog_t *log, | 79 | STATIC void xlog_regrant_reserve_log_space(xlog_t *log, |
70 | xlog_ticket_t *ticket); | 80 | xlog_ticket_t *ticket); |
71 | STATIC void xlog_ungrant_log_space(xlog_t *log, | 81 | STATIC void xlog_ungrant_log_space(xlog_t *log, |
@@ -73,7 +83,9 @@ STATIC void xlog_ungrant_log_space(xlog_t *log, | |||
73 | 83 | ||
74 | #if defined(DEBUG) | 84 | #if defined(DEBUG) |
75 | STATIC void xlog_verify_dest_ptr(xlog_t *log, char *ptr); | 85 | STATIC void xlog_verify_dest_ptr(xlog_t *log, char *ptr); |
76 | STATIC void xlog_verify_grant_tail(struct log *log); | 86 | STATIC void |
87 | xlog_verify_grant_tail( | ||
88 | struct xlog *log); | ||
77 | STATIC void xlog_verify_iclog(xlog_t *log, xlog_in_core_t *iclog, | 89 | STATIC void xlog_verify_iclog(xlog_t *log, xlog_in_core_t *iclog, |
78 | int count, boolean_t syncing); | 90 | int count, boolean_t syncing); |
79 | STATIC void xlog_verify_tail_lsn(xlog_t *log, xlog_in_core_t *iclog, | 91 | STATIC void xlog_verify_tail_lsn(xlog_t *log, xlog_in_core_t *iclog, |
@@ -89,9 +101,9 @@ STATIC int xlog_iclogs_empty(xlog_t *log); | |||
89 | 101 | ||
90 | static void | 102 | static void |
91 | xlog_grant_sub_space( | 103 | xlog_grant_sub_space( |
92 | struct log *log, | 104 | struct xlog *log, |
93 | atomic64_t *head, | 105 | atomic64_t *head, |
94 | int bytes) | 106 | int bytes) |
95 | { | 107 | { |
96 | int64_t head_val = atomic64_read(head); | 108 | int64_t head_val = atomic64_read(head); |
97 | int64_t new, old; | 109 | int64_t new, old; |
@@ -115,9 +127,9 @@ xlog_grant_sub_space( | |||
115 | 127 | ||
116 | static void | 128 | static void |
117 | xlog_grant_add_space( | 129 | xlog_grant_add_space( |
118 | struct log *log, | 130 | struct xlog *log, |
119 | atomic64_t *head, | 131 | atomic64_t *head, |
120 | int bytes) | 132 | int bytes) |
121 | { | 133 | { |
122 | int64_t head_val = atomic64_read(head); | 134 | int64_t head_val = atomic64_read(head); |
123 | int64_t new, old; | 135 | int64_t new, old; |
@@ -165,7 +177,7 @@ xlog_grant_head_wake_all( | |||
165 | 177 | ||
166 | static inline int | 178 | static inline int |
167 | xlog_ticket_reservation( | 179 | xlog_ticket_reservation( |
168 | struct log *log, | 180 | struct xlog *log, |
169 | struct xlog_grant_head *head, | 181 | struct xlog_grant_head *head, |
170 | struct xlog_ticket *tic) | 182 | struct xlog_ticket *tic) |
171 | { | 183 | { |
@@ -182,7 +194,7 @@ xlog_ticket_reservation( | |||
182 | 194 | ||
183 | STATIC bool | 195 | STATIC bool |
184 | xlog_grant_head_wake( | 196 | xlog_grant_head_wake( |
185 | struct log *log, | 197 | struct xlog *log, |
186 | struct xlog_grant_head *head, | 198 | struct xlog_grant_head *head, |
187 | int *free_bytes) | 199 | int *free_bytes) |
188 | { | 200 | { |
@@ -204,7 +216,7 @@ xlog_grant_head_wake( | |||
204 | 216 | ||
205 | STATIC int | 217 | STATIC int |
206 | xlog_grant_head_wait( | 218 | xlog_grant_head_wait( |
207 | struct log *log, | 219 | struct xlog *log, |
208 | struct xlog_grant_head *head, | 220 | struct xlog_grant_head *head, |
209 | struct xlog_ticket *tic, | 221 | struct xlog_ticket *tic, |
210 | int need_bytes) | 222 | int need_bytes) |
@@ -256,7 +268,7 @@ shutdown: | |||
256 | */ | 268 | */ |
257 | STATIC int | 269 | STATIC int |
258 | xlog_grant_head_check( | 270 | xlog_grant_head_check( |
259 | struct log *log, | 271 | struct xlog *log, |
260 | struct xlog_grant_head *head, | 272 | struct xlog_grant_head *head, |
261 | struct xlog_ticket *tic, | 273 | struct xlog_ticket *tic, |
262 | int *need_bytes) | 274 | int *need_bytes) |
@@ -323,7 +335,7 @@ xfs_log_regrant( | |||
323 | struct xfs_mount *mp, | 335 | struct xfs_mount *mp, |
324 | struct xlog_ticket *tic) | 336 | struct xlog_ticket *tic) |
325 | { | 337 | { |
326 | struct log *log = mp->m_log; | 338 | struct xlog *log = mp->m_log; |
327 | int need_bytes; | 339 | int need_bytes; |
328 | int error = 0; | 340 | int error = 0; |
329 | 341 | ||
@@ -389,7 +401,7 @@ xfs_log_reserve( | |||
389 | bool permanent, | 401 | bool permanent, |
390 | uint t_type) | 402 | uint t_type) |
391 | { | 403 | { |
392 | struct log *log = mp->m_log; | 404 | struct xlog *log = mp->m_log; |
393 | struct xlog_ticket *tic; | 405 | struct xlog_ticket *tic; |
394 | int need_bytes; | 406 | int need_bytes; |
395 | int error = 0; | 407 | int error = 0; |
@@ -465,7 +477,7 @@ xfs_log_done( | |||
465 | struct xlog_in_core **iclog, | 477 | struct xlog_in_core **iclog, |
466 | uint flags) | 478 | uint flags) |
467 | { | 479 | { |
468 | struct log *log = mp->m_log; | 480 | struct xlog *log = mp->m_log; |
469 | xfs_lsn_t lsn = 0; | 481 | xfs_lsn_t lsn = 0; |
470 | 482 | ||
471 | if (XLOG_FORCED_SHUTDOWN(log) || | 483 | if (XLOG_FORCED_SHUTDOWN(log) || |
@@ -810,6 +822,7 @@ xfs_log_unmount_write(xfs_mount_t *mp) | |||
810 | void | 822 | void |
811 | xfs_log_unmount(xfs_mount_t *mp) | 823 | xfs_log_unmount(xfs_mount_t *mp) |
812 | { | 824 | { |
825 | cancel_delayed_work_sync(&mp->m_sync_work); | ||
813 | xfs_trans_ail_destroy(mp); | 826 | xfs_trans_ail_destroy(mp); |
814 | xlog_dealloc_log(mp->m_log); | 827 | xlog_dealloc_log(mp->m_log); |
815 | } | 828 | } |
@@ -838,7 +851,7 @@ void | |||
838 | xfs_log_space_wake( | 851 | xfs_log_space_wake( |
839 | struct xfs_mount *mp) | 852 | struct xfs_mount *mp) |
840 | { | 853 | { |
841 | struct log *log = mp->m_log; | 854 | struct xlog *log = mp->m_log; |
842 | int free_bytes; | 855 | int free_bytes; |
843 | 856 | ||
844 | if (XLOG_FORCED_SHUTDOWN(log)) | 857 | if (XLOG_FORCED_SHUTDOWN(log)) |
@@ -916,7 +929,7 @@ xfs_lsn_t | |||
916 | xlog_assign_tail_lsn_locked( | 929 | xlog_assign_tail_lsn_locked( |
917 | struct xfs_mount *mp) | 930 | struct xfs_mount *mp) |
918 | { | 931 | { |
919 | struct log *log = mp->m_log; | 932 | struct xlog *log = mp->m_log; |
920 | struct xfs_log_item *lip; | 933 | struct xfs_log_item *lip; |
921 | xfs_lsn_t tail_lsn; | 934 | xfs_lsn_t tail_lsn; |
922 | 935 | ||
@@ -965,7 +978,7 @@ xlog_assign_tail_lsn( | |||
965 | */ | 978 | */ |
966 | STATIC int | 979 | STATIC int |
967 | xlog_space_left( | 980 | xlog_space_left( |
968 | struct log *log, | 981 | struct xlog *log, |
969 | atomic64_t *head) | 982 | atomic64_t *head) |
970 | { | 983 | { |
971 | int free_bytes; | 984 | int free_bytes; |
@@ -1277,7 +1290,7 @@ out: | |||
1277 | */ | 1290 | */ |
1278 | STATIC int | 1291 | STATIC int |
1279 | xlog_commit_record( | 1292 | xlog_commit_record( |
1280 | struct log *log, | 1293 | struct xlog *log, |
1281 | struct xlog_ticket *ticket, | 1294 | struct xlog_ticket *ticket, |
1282 | struct xlog_in_core **iclog, | 1295 | struct xlog_in_core **iclog, |
1283 | xfs_lsn_t *commitlsnp) | 1296 | xfs_lsn_t *commitlsnp) |
@@ -1311,7 +1324,7 @@ xlog_commit_record( | |||
1311 | */ | 1324 | */ |
1312 | STATIC void | 1325 | STATIC void |
1313 | xlog_grant_push_ail( | 1326 | xlog_grant_push_ail( |
1314 | struct log *log, | 1327 | struct xlog *log, |
1315 | int need_bytes) | 1328 | int need_bytes) |
1316 | { | 1329 | { |
1317 | xfs_lsn_t threshold_lsn = 0; | 1330 | xfs_lsn_t threshold_lsn = 0; |
@@ -1790,7 +1803,7 @@ xlog_write_start_rec( | |||
1790 | 1803 | ||
1791 | static xlog_op_header_t * | 1804 | static xlog_op_header_t * |
1792 | xlog_write_setup_ophdr( | 1805 | xlog_write_setup_ophdr( |
1793 | struct log *log, | 1806 | struct xlog *log, |
1794 | struct xlog_op_header *ophdr, | 1807 | struct xlog_op_header *ophdr, |
1795 | struct xlog_ticket *ticket, | 1808 | struct xlog_ticket *ticket, |
1796 | uint flags) | 1809 | uint flags) |
@@ -1873,7 +1886,7 @@ xlog_write_setup_copy( | |||
1873 | 1886 | ||
1874 | static int | 1887 | static int |
1875 | xlog_write_copy_finish( | 1888 | xlog_write_copy_finish( |
1876 | struct log *log, | 1889 | struct xlog *log, |
1877 | struct xlog_in_core *iclog, | 1890 | struct xlog_in_core *iclog, |
1878 | uint flags, | 1891 | uint flags, |
1879 | int *record_cnt, | 1892 | int *record_cnt, |
@@ -1958,7 +1971,7 @@ xlog_write_copy_finish( | |||
1958 | */ | 1971 | */ |
1959 | int | 1972 | int |
1960 | xlog_write( | 1973 | xlog_write( |
1961 | struct log *log, | 1974 | struct xlog *log, |
1962 | struct xfs_log_vec *log_vector, | 1975 | struct xfs_log_vec *log_vector, |
1963 | struct xlog_ticket *ticket, | 1976 | struct xlog_ticket *ticket, |
1964 | xfs_lsn_t *start_lsn, | 1977 | xfs_lsn_t *start_lsn, |
@@ -2821,7 +2834,7 @@ _xfs_log_force( | |||
2821 | uint flags, | 2834 | uint flags, |
2822 | int *log_flushed) | 2835 | int *log_flushed) |
2823 | { | 2836 | { |
2824 | struct log *log = mp->m_log; | 2837 | struct xlog *log = mp->m_log; |
2825 | struct xlog_in_core *iclog; | 2838 | struct xlog_in_core *iclog; |
2826 | xfs_lsn_t lsn; | 2839 | xfs_lsn_t lsn; |
2827 | 2840 | ||
@@ -2969,7 +2982,7 @@ _xfs_log_force_lsn( | |||
2969 | uint flags, | 2982 | uint flags, |
2970 | int *log_flushed) | 2983 | int *log_flushed) |
2971 | { | 2984 | { |
2972 | struct log *log = mp->m_log; | 2985 | struct xlog *log = mp->m_log; |
2973 | struct xlog_in_core *iclog; | 2986 | struct xlog_in_core *iclog; |
2974 | int already_slept = 0; | 2987 | int already_slept = 0; |
2975 | 2988 | ||
@@ -3147,7 +3160,7 @@ xfs_log_ticket_get( | |||
3147 | */ | 3160 | */ |
3148 | xlog_ticket_t * | 3161 | xlog_ticket_t * |
3149 | xlog_ticket_alloc( | 3162 | xlog_ticket_alloc( |
3150 | struct log *log, | 3163 | struct xlog *log, |
3151 | int unit_bytes, | 3164 | int unit_bytes, |
3152 | int cnt, | 3165 | int cnt, |
3153 | char client, | 3166 | char client, |
@@ -3278,7 +3291,7 @@ xlog_ticket_alloc( | |||
3278 | */ | 3291 | */ |
3279 | void | 3292 | void |
3280 | xlog_verify_dest_ptr( | 3293 | xlog_verify_dest_ptr( |
3281 | struct log *log, | 3294 | struct xlog *log, |
3282 | char *ptr) | 3295 | char *ptr) |
3283 | { | 3296 | { |
3284 | int i; | 3297 | int i; |
@@ -3307,7 +3320,7 @@ xlog_verify_dest_ptr( | |||
3307 | */ | 3320 | */ |
3308 | STATIC void | 3321 | STATIC void |
3309 | xlog_verify_grant_tail( | 3322 | xlog_verify_grant_tail( |
3310 | struct log *log) | 3323 | struct xlog *log) |
3311 | { | 3324 | { |
3312 | int tail_cycle, tail_blocks; | 3325 | int tail_cycle, tail_blocks; |
3313 | int cycle, space; | 3326 | int cycle, space; |
diff --git a/fs/xfs/xfs_log_cil.c b/fs/xfs/xfs_log_cil.c index 7d6197c58493..ddc4529d07d3 100644 --- a/fs/xfs/xfs_log_cil.c +++ b/fs/xfs/xfs_log_cil.c | |||
@@ -44,7 +44,7 @@ | |||
44 | */ | 44 | */ |
45 | static struct xlog_ticket * | 45 | static struct xlog_ticket * |
46 | xlog_cil_ticket_alloc( | 46 | xlog_cil_ticket_alloc( |
47 | struct log *log) | 47 | struct xlog *log) |
48 | { | 48 | { |
49 | struct xlog_ticket *tic; | 49 | struct xlog_ticket *tic; |
50 | 50 | ||
@@ -72,7 +72,7 @@ xlog_cil_ticket_alloc( | |||
72 | */ | 72 | */ |
73 | void | 73 | void |
74 | xlog_cil_init_post_recovery( | 74 | xlog_cil_init_post_recovery( |
75 | struct log *log) | 75 | struct xlog *log) |
76 | { | 76 | { |
77 | log->l_cilp->xc_ctx->ticket = xlog_cil_ticket_alloc(log); | 77 | log->l_cilp->xc_ctx->ticket = xlog_cil_ticket_alloc(log); |
78 | log->l_cilp->xc_ctx->sequence = 1; | 78 | log->l_cilp->xc_ctx->sequence = 1; |
@@ -182,7 +182,7 @@ xlog_cil_prepare_log_vecs( | |||
182 | */ | 182 | */ |
183 | STATIC void | 183 | STATIC void |
184 | xfs_cil_prepare_item( | 184 | xfs_cil_prepare_item( |
185 | struct log *log, | 185 | struct xlog *log, |
186 | struct xfs_log_vec *lv, | 186 | struct xfs_log_vec *lv, |
187 | int *len, | 187 | int *len, |
188 | int *diff_iovecs) | 188 | int *diff_iovecs) |
@@ -231,7 +231,7 @@ xfs_cil_prepare_item( | |||
231 | */ | 231 | */ |
232 | static void | 232 | static void |
233 | xlog_cil_insert_items( | 233 | xlog_cil_insert_items( |
234 | struct log *log, | 234 | struct xlog *log, |
235 | struct xfs_log_vec *log_vector, | 235 | struct xfs_log_vec *log_vector, |
236 | struct xlog_ticket *ticket) | 236 | struct xlog_ticket *ticket) |
237 | { | 237 | { |
@@ -373,7 +373,7 @@ xlog_cil_committed( | |||
373 | */ | 373 | */ |
374 | STATIC int | 374 | STATIC int |
375 | xlog_cil_push( | 375 | xlog_cil_push( |
376 | struct log *log) | 376 | struct xlog *log) |
377 | { | 377 | { |
378 | struct xfs_cil *cil = log->l_cilp; | 378 | struct xfs_cil *cil = log->l_cilp; |
379 | struct xfs_log_vec *lv; | 379 | struct xfs_log_vec *lv; |
@@ -601,7 +601,7 @@ xlog_cil_push_work( | |||
601 | */ | 601 | */ |
602 | static void | 602 | static void |
603 | xlog_cil_push_background( | 603 | xlog_cil_push_background( |
604 | struct log *log) | 604 | struct xlog *log) |
605 | { | 605 | { |
606 | struct xfs_cil *cil = log->l_cilp; | 606 | struct xfs_cil *cil = log->l_cilp; |
607 | 607 | ||
@@ -629,7 +629,7 @@ xlog_cil_push_background( | |||
629 | 629 | ||
630 | static void | 630 | static void |
631 | xlog_cil_push_foreground( | 631 | xlog_cil_push_foreground( |
632 | struct log *log, | 632 | struct xlog *log, |
633 | xfs_lsn_t push_seq) | 633 | xfs_lsn_t push_seq) |
634 | { | 634 | { |
635 | struct xfs_cil *cil = log->l_cilp; | 635 | struct xfs_cil *cil = log->l_cilp; |
@@ -683,7 +683,7 @@ xfs_log_commit_cil( | |||
683 | xfs_lsn_t *commit_lsn, | 683 | xfs_lsn_t *commit_lsn, |
684 | int flags) | 684 | int flags) |
685 | { | 685 | { |
686 | struct log *log = mp->m_log; | 686 | struct xlog *log = mp->m_log; |
687 | int log_flags = 0; | 687 | int log_flags = 0; |
688 | struct xfs_log_vec *log_vector; | 688 | struct xfs_log_vec *log_vector; |
689 | 689 | ||
@@ -754,7 +754,7 @@ xfs_log_commit_cil( | |||
754 | */ | 754 | */ |
755 | xfs_lsn_t | 755 | xfs_lsn_t |
756 | xlog_cil_force_lsn( | 756 | xlog_cil_force_lsn( |
757 | struct log *log, | 757 | struct xlog *log, |
758 | xfs_lsn_t sequence) | 758 | xfs_lsn_t sequence) |
759 | { | 759 | { |
760 | struct xfs_cil *cil = log->l_cilp; | 760 | struct xfs_cil *cil = log->l_cilp; |
@@ -833,7 +833,7 @@ xfs_log_item_in_current_chkpt( | |||
833 | */ | 833 | */ |
834 | int | 834 | int |
835 | xlog_cil_init( | 835 | xlog_cil_init( |
836 | struct log *log) | 836 | struct xlog *log) |
837 | { | 837 | { |
838 | struct xfs_cil *cil; | 838 | struct xfs_cil *cil; |
839 | struct xfs_cil_ctx *ctx; | 839 | struct xfs_cil_ctx *ctx; |
@@ -869,7 +869,7 @@ xlog_cil_init( | |||
869 | 869 | ||
870 | void | 870 | void |
871 | xlog_cil_destroy( | 871 | xlog_cil_destroy( |
872 | struct log *log) | 872 | struct xlog *log) |
873 | { | 873 | { |
874 | if (log->l_cilp->xc_ctx) { | 874 | if (log->l_cilp->xc_ctx) { |
875 | if (log->l_cilp->xc_ctx->ticket) | 875 | if (log->l_cilp->xc_ctx->ticket) |
diff --git a/fs/xfs/xfs_log_priv.h b/fs/xfs/xfs_log_priv.h index 5bc33261f5be..72eba2201b14 100644 --- a/fs/xfs/xfs_log_priv.h +++ b/fs/xfs/xfs_log_priv.h | |||
@@ -19,7 +19,7 @@ | |||
19 | #define __XFS_LOG_PRIV_H__ | 19 | #define __XFS_LOG_PRIV_H__ |
20 | 20 | ||
21 | struct xfs_buf; | 21 | struct xfs_buf; |
22 | struct log; | 22 | struct xlog; |
23 | struct xlog_ticket; | 23 | struct xlog_ticket; |
24 | struct xfs_mount; | 24 | struct xfs_mount; |
25 | 25 | ||
@@ -352,7 +352,7 @@ typedef struct xlog_in_core { | |||
352 | struct xlog_in_core *ic_next; | 352 | struct xlog_in_core *ic_next; |
353 | struct xlog_in_core *ic_prev; | 353 | struct xlog_in_core *ic_prev; |
354 | struct xfs_buf *ic_bp; | 354 | struct xfs_buf *ic_bp; |
355 | struct log *ic_log; | 355 | struct xlog *ic_log; |
356 | int ic_size; | 356 | int ic_size; |
357 | int ic_offset; | 357 | int ic_offset; |
358 | int ic_bwritecnt; | 358 | int ic_bwritecnt; |
@@ -409,7 +409,7 @@ struct xfs_cil_ctx { | |||
409 | * operations almost as efficient as the old logging methods. | 409 | * operations almost as efficient as the old logging methods. |
410 | */ | 410 | */ |
411 | struct xfs_cil { | 411 | struct xfs_cil { |
412 | struct log *xc_log; | 412 | struct xlog *xc_log; |
413 | struct list_head xc_cil; | 413 | struct list_head xc_cil; |
414 | spinlock_t xc_cil_lock; | 414 | spinlock_t xc_cil_lock; |
415 | struct xfs_cil_ctx *xc_ctx; | 415 | struct xfs_cil_ctx *xc_ctx; |
@@ -487,7 +487,7 @@ struct xlog_grant_head { | |||
487 | * overflow 31 bits worth of byte offset, so using a byte number will mean | 487 | * overflow 31 bits worth of byte offset, so using a byte number will mean |
488 | * that round off problems won't occur when releasing partial reservations. | 488 | * that round off problems won't occur when releasing partial reservations. |
489 | */ | 489 | */ |
490 | typedef struct log { | 490 | typedef struct xlog { |
491 | /* The following fields don't need locking */ | 491 | /* The following fields don't need locking */ |
492 | struct xfs_mount *l_mp; /* mount point */ | 492 | struct xfs_mount *l_mp; /* mount point */ |
493 | struct xfs_ail *l_ailp; /* AIL log is working with */ | 493 | struct xfs_ail *l_ailp; /* AIL log is working with */ |
@@ -553,9 +553,14 @@ extern int xlog_recover_finish(xlog_t *log); | |||
553 | extern void xlog_pack_data(xlog_t *log, xlog_in_core_t *iclog, int); | 553 | extern void xlog_pack_data(xlog_t *log, xlog_in_core_t *iclog, int); |
554 | 554 | ||
555 | extern kmem_zone_t *xfs_log_ticket_zone; | 555 | extern kmem_zone_t *xfs_log_ticket_zone; |
556 | struct xlog_ticket *xlog_ticket_alloc(struct log *log, int unit_bytes, | 556 | struct xlog_ticket * |
557 | int count, char client, bool permanent, | 557 | xlog_ticket_alloc( |
558 | xfs_km_flags_t alloc_flags); | 558 | struct xlog *log, |
559 | int unit_bytes, | ||
560 | int count, | ||
561 | char client, | ||
562 | bool permanent, | ||
563 | xfs_km_flags_t alloc_flags); | ||
559 | 564 | ||
560 | 565 | ||
561 | static inline void | 566 | static inline void |
@@ -567,9 +572,14 @@ xlog_write_adv_cnt(void **ptr, int *len, int *off, size_t bytes) | |||
567 | } | 572 | } |
568 | 573 | ||
569 | void xlog_print_tic_res(struct xfs_mount *mp, struct xlog_ticket *ticket); | 574 | void xlog_print_tic_res(struct xfs_mount *mp, struct xlog_ticket *ticket); |
570 | int xlog_write(struct log *log, struct xfs_log_vec *log_vector, | 575 | int |
571 | struct xlog_ticket *tic, xfs_lsn_t *start_lsn, | 576 | xlog_write( |
572 | xlog_in_core_t **commit_iclog, uint flags); | 577 | struct xlog *log, |
578 | struct xfs_log_vec *log_vector, | ||
579 | struct xlog_ticket *tic, | ||
580 | xfs_lsn_t *start_lsn, | ||
581 | struct xlog_in_core **commit_iclog, | ||
582 | uint flags); | ||
573 | 583 | ||
574 | /* | 584 | /* |
575 | * When we crack an atomic LSN, we sample it first so that the value will not | 585 | * When we crack an atomic LSN, we sample it first so that the value will not |
@@ -629,17 +639,23 @@ xlog_assign_grant_head(atomic64_t *head, int cycle, int space) | |||
629 | /* | 639 | /* |
630 | * Committed Item List interfaces | 640 | * Committed Item List interfaces |
631 | */ | 641 | */ |
632 | int xlog_cil_init(struct log *log); | 642 | int |
633 | void xlog_cil_init_post_recovery(struct log *log); | 643 | xlog_cil_init(struct xlog *log); |
634 | void xlog_cil_destroy(struct log *log); | 644 | void |
645 | xlog_cil_init_post_recovery(struct xlog *log); | ||
646 | void | ||
647 | xlog_cil_destroy(struct xlog *log); | ||
635 | 648 | ||
636 | /* | 649 | /* |
637 | * CIL force routines | 650 | * CIL force routines |
638 | */ | 651 | */ |
639 | xfs_lsn_t xlog_cil_force_lsn(struct log *log, xfs_lsn_t sequence); | 652 | xfs_lsn_t |
653 | xlog_cil_force_lsn( | ||
654 | struct xlog *log, | ||
655 | xfs_lsn_t sequence); | ||
640 | 656 | ||
641 | static inline void | 657 | static inline void |
642 | xlog_cil_force(struct log *log) | 658 | xlog_cil_force(struct xlog *log) |
643 | { | 659 | { |
644 | xlog_cil_force_lsn(log, log->l_cilp->xc_current_sequence); | 660 | xlog_cil_force_lsn(log, log->l_cilp->xc_current_sequence); |
645 | } | 661 | } |
diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c index ca386909131a..a7be98abd6a9 100644 --- a/fs/xfs/xfs_log_recover.c +++ b/fs/xfs/xfs_log_recover.c | |||
@@ -1471,8 +1471,8 @@ xlog_recover_add_item( | |||
1471 | 1471 | ||
1472 | STATIC int | 1472 | STATIC int |
1473 | xlog_recover_add_to_cont_trans( | 1473 | xlog_recover_add_to_cont_trans( |
1474 | struct log *log, | 1474 | struct xlog *log, |
1475 | xlog_recover_t *trans, | 1475 | struct xlog_recover *trans, |
1476 | xfs_caddr_t dp, | 1476 | xfs_caddr_t dp, |
1477 | int len) | 1477 | int len) |
1478 | { | 1478 | { |
@@ -1517,8 +1517,8 @@ xlog_recover_add_to_cont_trans( | |||
1517 | */ | 1517 | */ |
1518 | STATIC int | 1518 | STATIC int |
1519 | xlog_recover_add_to_trans( | 1519 | xlog_recover_add_to_trans( |
1520 | struct log *log, | 1520 | struct xlog *log, |
1521 | xlog_recover_t *trans, | 1521 | struct xlog_recover *trans, |
1522 | xfs_caddr_t dp, | 1522 | xfs_caddr_t dp, |
1523 | int len) | 1523 | int len) |
1524 | { | 1524 | { |
@@ -1588,8 +1588,8 @@ xlog_recover_add_to_trans( | |||
1588 | */ | 1588 | */ |
1589 | STATIC int | 1589 | STATIC int |
1590 | xlog_recover_reorder_trans( | 1590 | xlog_recover_reorder_trans( |
1591 | struct log *log, | 1591 | struct xlog *log, |
1592 | xlog_recover_t *trans, | 1592 | struct xlog_recover *trans, |
1593 | int pass) | 1593 | int pass) |
1594 | { | 1594 | { |
1595 | xlog_recover_item_t *item, *n; | 1595 | xlog_recover_item_t *item, *n; |
@@ -1642,8 +1642,8 @@ xlog_recover_reorder_trans( | |||
1642 | */ | 1642 | */ |
1643 | STATIC int | 1643 | STATIC int |
1644 | xlog_recover_buffer_pass1( | 1644 | xlog_recover_buffer_pass1( |
1645 | struct log *log, | 1645 | struct xlog *log, |
1646 | xlog_recover_item_t *item) | 1646 | struct xlog_recover_item *item) |
1647 | { | 1647 | { |
1648 | xfs_buf_log_format_t *buf_f = item->ri_buf[0].i_addr; | 1648 | xfs_buf_log_format_t *buf_f = item->ri_buf[0].i_addr; |
1649 | struct list_head *bucket; | 1649 | struct list_head *bucket; |
@@ -1696,7 +1696,7 @@ xlog_recover_buffer_pass1( | |||
1696 | */ | 1696 | */ |
1697 | STATIC int | 1697 | STATIC int |
1698 | xlog_check_buffer_cancelled( | 1698 | xlog_check_buffer_cancelled( |
1699 | struct log *log, | 1699 | struct xlog *log, |
1700 | xfs_daddr_t blkno, | 1700 | xfs_daddr_t blkno, |
1701 | uint len, | 1701 | uint len, |
1702 | ushort flags) | 1702 | ushort flags) |
@@ -2689,9 +2689,9 @@ xlog_recover_free_trans( | |||
2689 | 2689 | ||
2690 | STATIC int | 2690 | STATIC int |
2691 | xlog_recover_commit_pass1( | 2691 | xlog_recover_commit_pass1( |
2692 | struct log *log, | 2692 | struct xlog *log, |
2693 | struct xlog_recover *trans, | 2693 | struct xlog_recover *trans, |
2694 | xlog_recover_item_t *item) | 2694 | struct xlog_recover_item *item) |
2695 | { | 2695 | { |
2696 | trace_xfs_log_recover_item_recover(log, trans, item, XLOG_RECOVER_PASS1); | 2696 | trace_xfs_log_recover_item_recover(log, trans, item, XLOG_RECOVER_PASS1); |
2697 | 2697 | ||
@@ -2716,10 +2716,10 @@ xlog_recover_commit_pass1( | |||
2716 | 2716 | ||
2717 | STATIC int | 2717 | STATIC int |
2718 | xlog_recover_commit_pass2( | 2718 | xlog_recover_commit_pass2( |
2719 | struct log *log, | 2719 | struct xlog *log, |
2720 | struct xlog_recover *trans, | 2720 | struct xlog_recover *trans, |
2721 | struct list_head *buffer_list, | 2721 | struct list_head *buffer_list, |
2722 | xlog_recover_item_t *item) | 2722 | struct xlog_recover_item *item) |
2723 | { | 2723 | { |
2724 | trace_xfs_log_recover_item_recover(log, trans, item, XLOG_RECOVER_PASS2); | 2724 | trace_xfs_log_recover_item_recover(log, trans, item, XLOG_RECOVER_PASS2); |
2725 | 2725 | ||
@@ -2753,7 +2753,7 @@ xlog_recover_commit_pass2( | |||
2753 | */ | 2753 | */ |
2754 | STATIC int | 2754 | STATIC int |
2755 | xlog_recover_commit_trans( | 2755 | xlog_recover_commit_trans( |
2756 | struct log *log, | 2756 | struct xlog *log, |
2757 | struct xlog_recover *trans, | 2757 | struct xlog_recover *trans, |
2758 | int pass) | 2758 | int pass) |
2759 | { | 2759 | { |
@@ -2793,8 +2793,8 @@ out: | |||
2793 | 2793 | ||
2794 | STATIC int | 2794 | STATIC int |
2795 | xlog_recover_unmount_trans( | 2795 | xlog_recover_unmount_trans( |
2796 | struct log *log, | 2796 | struct xlog *log, |
2797 | xlog_recover_t *trans) | 2797 | struct xlog_recover *trans) |
2798 | { | 2798 | { |
2799 | /* Do nothing now */ | 2799 | /* Do nothing now */ |
2800 | xfs_warn(log->l_mp, "%s: Unmount LR", __func__); | 2800 | xfs_warn(log->l_mp, "%s: Unmount LR", __func__); |
diff --git a/fs/xfs/xfs_mount.h b/fs/xfs/xfs_mount.h index 8b89c5ac72d9..90c1fc9eaea4 100644 --- a/fs/xfs/xfs_mount.h +++ b/fs/xfs/xfs_mount.h | |||
@@ -53,7 +53,7 @@ typedef struct xfs_trans_reservations { | |||
53 | 53 | ||
54 | #include "xfs_sync.h" | 54 | #include "xfs_sync.h" |
55 | 55 | ||
56 | struct log; | 56 | struct xlog; |
57 | struct xfs_mount_args; | 57 | struct xfs_mount_args; |
58 | struct xfs_inode; | 58 | struct xfs_inode; |
59 | struct xfs_bmbt_irec; | 59 | struct xfs_bmbt_irec; |
@@ -133,7 +133,7 @@ typedef struct xfs_mount { | |||
133 | uint m_readio_blocks; /* min read size blocks */ | 133 | uint m_readio_blocks; /* min read size blocks */ |
134 | uint m_writeio_log; /* min write size log bytes */ | 134 | uint m_writeio_log; /* min write size log bytes */ |
135 | uint m_writeio_blocks; /* min write size blocks */ | 135 | uint m_writeio_blocks; /* min write size blocks */ |
136 | struct log *m_log; /* log specific stuff */ | 136 | struct xlog *m_log; /* log specific stuff */ |
137 | int m_logbufs; /* number of log buffers */ | 137 | int m_logbufs; /* number of log buffers */ |
138 | int m_logbsize; /* size of each log buffer */ | 138 | int m_logbsize; /* size of each log buffer */ |
139 | uint m_rsumlevels; /* rt summary levels */ | 139 | uint m_rsumlevels; /* rt summary levels */ |
diff --git a/fs/xfs/xfs_sync.c b/fs/xfs/xfs_sync.c index c9d3409c5ca3..1e9ee064dbb2 100644 --- a/fs/xfs/xfs_sync.c +++ b/fs/xfs/xfs_sync.c | |||
@@ -386,23 +386,23 @@ xfs_sync_worker( | |||
386 | * We shouldn't write/force the log if we are in the mount/unmount | 386 | * We shouldn't write/force the log if we are in the mount/unmount |
387 | * process or on a read only filesystem. The workqueue still needs to be | 387 | * process or on a read only filesystem. The workqueue still needs to be |
388 | * active in both cases, however, because it is used for inode reclaim | 388 | * active in both cases, however, because it is used for inode reclaim |
389 | * during these times. Use the s_umount semaphore to provide exclusion | 389 | * during these times. Use the MS_ACTIVE flag to avoid doing anything |
390 | * with unmount. | 390 | * during mount. Doing work during unmount is avoided by calling |
391 | * cancel_delayed_work_sync on this work queue before tearing down | ||
392 | * the ail and the log in xfs_log_unmount. | ||
391 | */ | 393 | */ |
392 | if (down_read_trylock(&mp->m_super->s_umount)) { | 394 | if (!(mp->m_super->s_flags & MS_ACTIVE) && |
393 | if (!(mp->m_flags & XFS_MOUNT_RDONLY)) { | 395 | !(mp->m_flags & XFS_MOUNT_RDONLY)) { |
394 | /* dgc: errors ignored here */ | 396 | /* dgc: errors ignored here */ |
395 | if (mp->m_super->s_frozen == SB_UNFROZEN && | 397 | if (mp->m_super->s_frozen == SB_UNFROZEN && |
396 | xfs_log_need_covered(mp)) | 398 | xfs_log_need_covered(mp)) |
397 | error = xfs_fs_log_dummy(mp); | 399 | error = xfs_fs_log_dummy(mp); |
398 | else | 400 | else |
399 | xfs_log_force(mp, 0); | 401 | xfs_log_force(mp, 0); |
400 | 402 | ||
401 | /* start pushing all the metadata that is currently | 403 | /* start pushing all the metadata that is currently |
402 | * dirty */ | 404 | * dirty */ |
403 | xfs_ail_push_all(mp->m_ail); | 405 | xfs_ail_push_all(mp->m_ail); |
404 | } | ||
405 | up_read(&mp->m_super->s_umount); | ||
406 | } | 406 | } |
407 | 407 | ||
408 | /* queue us up again */ | 408 | /* queue us up again */ |
diff --git a/fs/xfs/xfs_trace.h b/fs/xfs/xfs_trace.h index 7cf9d3529e51..caf5dabfd553 100644 --- a/fs/xfs/xfs_trace.h +++ b/fs/xfs/xfs_trace.h | |||
@@ -32,7 +32,7 @@ struct xfs_da_node_entry; | |||
32 | struct xfs_dquot; | 32 | struct xfs_dquot; |
33 | struct xfs_log_item; | 33 | struct xfs_log_item; |
34 | struct xlog_ticket; | 34 | struct xlog_ticket; |
35 | struct log; | 35 | struct xlog; |
36 | struct xlog_recover; | 36 | struct xlog_recover; |
37 | struct xlog_recover_item; | 37 | struct xlog_recover_item; |
38 | struct xfs_buf_log_format; | 38 | struct xfs_buf_log_format; |
@@ -762,7 +762,7 @@ DEFINE_DQUOT_EVENT(xfs_dqflush_force); | |||
762 | DEFINE_DQUOT_EVENT(xfs_dqflush_done); | 762 | DEFINE_DQUOT_EVENT(xfs_dqflush_done); |
763 | 763 | ||
764 | DECLARE_EVENT_CLASS(xfs_loggrant_class, | 764 | DECLARE_EVENT_CLASS(xfs_loggrant_class, |
765 | TP_PROTO(struct log *log, struct xlog_ticket *tic), | 765 | TP_PROTO(struct xlog *log, struct xlog_ticket *tic), |
766 | TP_ARGS(log, tic), | 766 | TP_ARGS(log, tic), |
767 | TP_STRUCT__entry( | 767 | TP_STRUCT__entry( |
768 | __field(dev_t, dev) | 768 | __field(dev_t, dev) |
@@ -830,7 +830,7 @@ DECLARE_EVENT_CLASS(xfs_loggrant_class, | |||
830 | 830 | ||
831 | #define DEFINE_LOGGRANT_EVENT(name) \ | 831 | #define DEFINE_LOGGRANT_EVENT(name) \ |
832 | DEFINE_EVENT(xfs_loggrant_class, name, \ | 832 | DEFINE_EVENT(xfs_loggrant_class, name, \ |
833 | TP_PROTO(struct log *log, struct xlog_ticket *tic), \ | 833 | TP_PROTO(struct xlog *log, struct xlog_ticket *tic), \ |
834 | TP_ARGS(log, tic)) | 834 | TP_ARGS(log, tic)) |
835 | DEFINE_LOGGRANT_EVENT(xfs_log_done_nonperm); | 835 | DEFINE_LOGGRANT_EVENT(xfs_log_done_nonperm); |
836 | DEFINE_LOGGRANT_EVENT(xfs_log_done_perm); | 836 | DEFINE_LOGGRANT_EVENT(xfs_log_done_perm); |
@@ -1664,7 +1664,7 @@ DEFINE_SWAPEXT_EVENT(xfs_swap_extent_before); | |||
1664 | DEFINE_SWAPEXT_EVENT(xfs_swap_extent_after); | 1664 | DEFINE_SWAPEXT_EVENT(xfs_swap_extent_after); |
1665 | 1665 | ||
1666 | DECLARE_EVENT_CLASS(xfs_log_recover_item_class, | 1666 | DECLARE_EVENT_CLASS(xfs_log_recover_item_class, |
1667 | TP_PROTO(struct log *log, struct xlog_recover *trans, | 1667 | TP_PROTO(struct xlog *log, struct xlog_recover *trans, |
1668 | struct xlog_recover_item *item, int pass), | 1668 | struct xlog_recover_item *item, int pass), |
1669 | TP_ARGS(log, trans, item, pass), | 1669 | TP_ARGS(log, trans, item, pass), |
1670 | TP_STRUCT__entry( | 1670 | TP_STRUCT__entry( |
@@ -1698,7 +1698,7 @@ DECLARE_EVENT_CLASS(xfs_log_recover_item_class, | |||
1698 | 1698 | ||
1699 | #define DEFINE_LOG_RECOVER_ITEM(name) \ | 1699 | #define DEFINE_LOG_RECOVER_ITEM(name) \ |
1700 | DEFINE_EVENT(xfs_log_recover_item_class, name, \ | 1700 | DEFINE_EVENT(xfs_log_recover_item_class, name, \ |
1701 | TP_PROTO(struct log *log, struct xlog_recover *trans, \ | 1701 | TP_PROTO(struct xlog *log, struct xlog_recover *trans, \ |
1702 | struct xlog_recover_item *item, int pass), \ | 1702 | struct xlog_recover_item *item, int pass), \ |
1703 | TP_ARGS(log, trans, item, pass)) | 1703 | TP_ARGS(log, trans, item, pass)) |
1704 | 1704 | ||
@@ -1709,7 +1709,7 @@ DEFINE_LOG_RECOVER_ITEM(xfs_log_recover_item_reorder_tail); | |||
1709 | DEFINE_LOG_RECOVER_ITEM(xfs_log_recover_item_recover); | 1709 | DEFINE_LOG_RECOVER_ITEM(xfs_log_recover_item_recover); |
1710 | 1710 | ||
1711 | DECLARE_EVENT_CLASS(xfs_log_recover_buf_item_class, | 1711 | DECLARE_EVENT_CLASS(xfs_log_recover_buf_item_class, |
1712 | TP_PROTO(struct log *log, struct xfs_buf_log_format *buf_f), | 1712 | TP_PROTO(struct xlog *log, struct xfs_buf_log_format *buf_f), |
1713 | TP_ARGS(log, buf_f), | 1713 | TP_ARGS(log, buf_f), |
1714 | TP_STRUCT__entry( | 1714 | TP_STRUCT__entry( |
1715 | __field(dev_t, dev) | 1715 | __field(dev_t, dev) |
@@ -1739,7 +1739,7 @@ DECLARE_EVENT_CLASS(xfs_log_recover_buf_item_class, | |||
1739 | 1739 | ||
1740 | #define DEFINE_LOG_RECOVER_BUF_ITEM(name) \ | 1740 | #define DEFINE_LOG_RECOVER_BUF_ITEM(name) \ |
1741 | DEFINE_EVENT(xfs_log_recover_buf_item_class, name, \ | 1741 | DEFINE_EVENT(xfs_log_recover_buf_item_class, name, \ |
1742 | TP_PROTO(struct log *log, struct xfs_buf_log_format *buf_f), \ | 1742 | TP_PROTO(struct xlog *log, struct xfs_buf_log_format *buf_f), \ |
1743 | TP_ARGS(log, buf_f)) | 1743 | TP_ARGS(log, buf_f)) |
1744 | 1744 | ||
1745 | DEFINE_LOG_RECOVER_BUF_ITEM(xfs_log_recover_buf_not_cancel); | 1745 | DEFINE_LOG_RECOVER_BUF_ITEM(xfs_log_recover_buf_not_cancel); |
@@ -1752,7 +1752,7 @@ DEFINE_LOG_RECOVER_BUF_ITEM(xfs_log_recover_buf_reg_buf); | |||
1752 | DEFINE_LOG_RECOVER_BUF_ITEM(xfs_log_recover_buf_dquot_buf); | 1752 | DEFINE_LOG_RECOVER_BUF_ITEM(xfs_log_recover_buf_dquot_buf); |
1753 | 1753 | ||
1754 | DECLARE_EVENT_CLASS(xfs_log_recover_ino_item_class, | 1754 | DECLARE_EVENT_CLASS(xfs_log_recover_ino_item_class, |
1755 | TP_PROTO(struct log *log, struct xfs_inode_log_format *in_f), | 1755 | TP_PROTO(struct xlog *log, struct xfs_inode_log_format *in_f), |
1756 | TP_ARGS(log, in_f), | 1756 | TP_ARGS(log, in_f), |
1757 | TP_STRUCT__entry( | 1757 | TP_STRUCT__entry( |
1758 | __field(dev_t, dev) | 1758 | __field(dev_t, dev) |
@@ -1790,7 +1790,7 @@ DECLARE_EVENT_CLASS(xfs_log_recover_ino_item_class, | |||
1790 | ) | 1790 | ) |
1791 | #define DEFINE_LOG_RECOVER_INO_ITEM(name) \ | 1791 | #define DEFINE_LOG_RECOVER_INO_ITEM(name) \ |
1792 | DEFINE_EVENT(xfs_log_recover_ino_item_class, name, \ | 1792 | DEFINE_EVENT(xfs_log_recover_ino_item_class, name, \ |
1793 | TP_PROTO(struct log *log, struct xfs_inode_log_format *in_f), \ | 1793 | TP_PROTO(struct xlog *log, struct xfs_inode_log_format *in_f), \ |
1794 | TP_ARGS(log, in_f)) | 1794 | TP_ARGS(log, in_f)) |
1795 | 1795 | ||
1796 | DEFINE_LOG_RECOVER_INO_ITEM(xfs_log_recover_inode_recover); | 1796 | DEFINE_LOG_RECOVER_INO_ITEM(xfs_log_recover_inode_recover); |
diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h index b0d62820ada1..9e6e1c6eb60a 100644 --- a/include/acpi/acpi_bus.h +++ b/include/acpi/acpi_bus.h | |||
@@ -440,8 +440,8 @@ static inline int acpi_pm_device_sleep_wake(struct device *dev, bool enable) | |||
440 | 440 | ||
441 | #else /* CONFIG_ACPI */ | 441 | #else /* CONFIG_ACPI */ |
442 | 442 | ||
443 | static int register_acpi_bus_type(struct acpi_bus_type *bus) { return 0; } | 443 | static inline int register_acpi_bus_type(void *bus) { return 0; } |
444 | static int unregister_acpi_bus_type(struct acpi_bus_type *bus) { return 0; } | 444 | static inline int unregister_acpi_bus_type(void *bus) { return 0; } |
445 | 445 | ||
446 | #endif /* CONFIG_ACPI */ | 446 | #endif /* CONFIG_ACPI */ |
447 | 447 | ||
diff --git a/include/asm-generic/bug.h b/include/asm-generic/bug.h index 2520a6e241dc..506ec19a3736 100644 --- a/include/asm-generic/bug.h +++ b/include/asm-generic/bug.h | |||
@@ -31,6 +31,9 @@ struct bug_entry { | |||
31 | 31 | ||
32 | #endif /* CONFIG_GENERIC_BUG */ | 32 | #endif /* CONFIG_GENERIC_BUG */ |
33 | 33 | ||
34 | #ifndef __ASSEMBLY__ | ||
35 | #include <linux/kernel.h> | ||
36 | |||
34 | /* | 37 | /* |
35 | * Don't use BUG() or BUG_ON() unless there's really no way out; one | 38 | * Don't use BUG() or BUG_ON() unless there's really no way out; one |
36 | * example might be detecting data structure corruption in the middle | 39 | * example might be detecting data structure corruption in the middle |
@@ -60,7 +63,6 @@ struct bug_entry { | |||
60 | * to provide better diagnostics. | 63 | * to provide better diagnostics. |
61 | */ | 64 | */ |
62 | #ifndef __WARN_TAINT | 65 | #ifndef __WARN_TAINT |
63 | #ifndef __ASSEMBLY__ | ||
64 | extern __printf(3, 4) | 66 | extern __printf(3, 4) |
65 | void warn_slowpath_fmt(const char *file, const int line, | 67 | void warn_slowpath_fmt(const char *file, const int line, |
66 | const char *fmt, ...); | 68 | const char *fmt, ...); |
@@ -69,7 +71,6 @@ void warn_slowpath_fmt_taint(const char *file, const int line, unsigned taint, | |||
69 | const char *fmt, ...); | 71 | const char *fmt, ...); |
70 | extern void warn_slowpath_null(const char *file, const int line); | 72 | extern void warn_slowpath_null(const char *file, const int line); |
71 | #define WANT_WARN_ON_SLOWPATH | 73 | #define WANT_WARN_ON_SLOWPATH |
72 | #endif | ||
73 | #define __WARN() warn_slowpath_null(__FILE__, __LINE__) | 74 | #define __WARN() warn_slowpath_null(__FILE__, __LINE__) |
74 | #define __WARN_printf(arg...) warn_slowpath_fmt(__FILE__, __LINE__, arg) | 75 | #define __WARN_printf(arg...) warn_slowpath_fmt(__FILE__, __LINE__, arg) |
75 | #define __WARN_printf_taint(taint, arg...) \ | 76 | #define __WARN_printf_taint(taint, arg...) \ |
@@ -202,4 +203,6 @@ extern void warn_slowpath_null(const char *file, const int line); | |||
202 | # define WARN_ON_SMP(x) ({0;}) | 203 | # define WARN_ON_SMP(x) ({0;}) |
203 | #endif | 204 | #endif |
204 | 205 | ||
206 | #endif /* __ASSEMBLY__ */ | ||
207 | |||
205 | #endif | 208 | #endif |
diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h index 6f2b45a9b6bc..ff4947b7a976 100644 --- a/include/asm-generic/pgtable.h +++ b/include/asm-generic/pgtable.h | |||
@@ -484,6 +484,16 @@ static inline int pmd_none_or_trans_huge_or_clear_bad(pmd_t *pmd) | |||
484 | /* | 484 | /* |
485 | * The barrier will stabilize the pmdval in a register or on | 485 | * The barrier will stabilize the pmdval in a register or on |
486 | * the stack so that it will stop changing under the code. | 486 | * the stack so that it will stop changing under the code. |
487 | * | ||
488 | * When CONFIG_TRANSPARENT_HUGEPAGE=y on x86 32bit PAE, | ||
489 | * pmd_read_atomic is allowed to return a not atomic pmdval | ||
490 | * (for example pointing to an hugepage that has never been | ||
491 | * mapped in the pmd). The below checks will only care about | ||
492 | * the low part of the pmd with 32bit PAE x86 anyway, with the | ||
493 | * exception of pmd_none(). So the important thing is that if | ||
494 | * the low part of the pmd is found null, the high part will | ||
495 | * be also null or the pmd_none() check below would be | ||
496 | * confused. | ||
487 | */ | 497 | */ |
488 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE | 498 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
489 | barrier(); | 499 | barrier(); |
diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h index 73e45600f95d..bac55c215113 100644 --- a/include/drm/drm_crtc.h +++ b/include/drm/drm_crtc.h | |||
@@ -54,7 +54,7 @@ struct drm_mode_object { | |||
54 | struct drm_object_properties *properties; | 54 | struct drm_object_properties *properties; |
55 | }; | 55 | }; |
56 | 56 | ||
57 | #define DRM_OBJECT_MAX_PROPERTY 16 | 57 | #define DRM_OBJECT_MAX_PROPERTY 24 |
58 | struct drm_object_properties { | 58 | struct drm_object_properties { |
59 | int count; | 59 | int count; |
60 | uint32_t ids[DRM_OBJECT_MAX_PROPERTY]; | 60 | uint32_t ids[DRM_OBJECT_MAX_PROPERTY]; |
diff --git a/include/drm/drm_pciids.h b/include/drm/drm_pciids.h index 58d0bdab68dd..a7aec391b7b7 100644 --- a/include/drm/drm_pciids.h +++ b/include/drm/drm_pciids.h | |||
@@ -1,7 +1,3 @@ | |||
1 | /* | ||
2 | This file is auto-generated from the drm_pciids.txt in the DRM CVS | ||
3 | Please contact dri-devel@lists.sf.net to add new cards to this list | ||
4 | */ | ||
5 | #define radeon_PCI_IDS \ | 1 | #define radeon_PCI_IDS \ |
6 | {0x1002, 0x3150, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY}, \ | 2 | {0x1002, 0x3150, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY}, \ |
7 | {0x1002, 0x3151, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ | 3 | {0x1002, 0x3151, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ |
@@ -181,6 +177,7 @@ | |||
181 | {0x1002, 0x6747, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \ | 177 | {0x1002, 0x6747, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \ |
182 | {0x1002, 0x6748, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \ | 178 | {0x1002, 0x6748, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \ |
183 | {0x1002, 0x6749, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \ | 179 | {0x1002, 0x6749, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \ |
180 | {0x1002, 0x674A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \ | ||
184 | {0x1002, 0x6750, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \ | 181 | {0x1002, 0x6750, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \ |
185 | {0x1002, 0x6751, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \ | 182 | {0x1002, 0x6751, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \ |
186 | {0x1002, 0x6758, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \ | 183 | {0x1002, 0x6758, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \ |
@@ -198,6 +195,7 @@ | |||
198 | {0x1002, 0x6767, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \ | 195 | {0x1002, 0x6767, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \ |
199 | {0x1002, 0x6768, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \ | 196 | {0x1002, 0x6768, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \ |
200 | {0x1002, 0x6770, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \ | 197 | {0x1002, 0x6770, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \ |
198 | {0x1002, 0x6771, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \ | ||
201 | {0x1002, 0x6772, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \ | 199 | {0x1002, 0x6772, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \ |
202 | {0x1002, 0x6778, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \ | 200 | {0x1002, 0x6778, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \ |
203 | {0x1002, 0x6779, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \ | 201 | {0x1002, 0x6779, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \ |
@@ -229,10 +227,11 @@ | |||
229 | {0x1002, 0x6827, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ | 227 | {0x1002, 0x6827, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ |
230 | {0x1002, 0x6828, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_NEW_MEMMAP}, \ | 228 | {0x1002, 0x6828, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_NEW_MEMMAP}, \ |
231 | {0x1002, 0x6829, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_NEW_MEMMAP}, \ | 229 | {0x1002, 0x6829, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_NEW_MEMMAP}, \ |
230 | {0x1002, 0x682B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ | ||
232 | {0x1002, 0x682D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ | 231 | {0x1002, 0x682D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ |
233 | {0x1002, 0x682F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ | 232 | {0x1002, 0x682F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ |
234 | {0x1002, 0x6830, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_NEW_MEMMAP}, \ | 233 | {0x1002, 0x6830, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ |
235 | {0x1002, 0x6831, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_NEW_MEMMAP}, \ | 234 | {0x1002, 0x6831, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ |
236 | {0x1002, 0x6837, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_NEW_MEMMAP}, \ | 235 | {0x1002, 0x6837, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_NEW_MEMMAP}, \ |
237 | {0x1002, 0x6838, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_NEW_MEMMAP}, \ | 236 | {0x1002, 0x6838, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_NEW_MEMMAP}, \ |
238 | {0x1002, 0x6839, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_NEW_MEMMAP}, \ | 237 | {0x1002, 0x6839, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_NEW_MEMMAP}, \ |
@@ -531,6 +530,7 @@ | |||
531 | {0x1002, 0x9645, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO2|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ | 530 | {0x1002, 0x9645, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO2|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ |
532 | {0x1002, 0x9647, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP},\ | 531 | {0x1002, 0x9647, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP},\ |
533 | {0x1002, 0x9648, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP},\ | 532 | {0x1002, 0x9648, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP},\ |
533 | {0x1002, 0x9649, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP},\ | ||
534 | {0x1002, 0x964a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ | 534 | {0x1002, 0x964a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ |
535 | {0x1002, 0x964b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ | 535 | {0x1002, 0x964b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ |
536 | {0x1002, 0x964c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ | 536 | {0x1002, 0x964c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ |
@@ -550,6 +550,7 @@ | |||
550 | {0x1002, 0x9807, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PALM|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ | 550 | {0x1002, 0x9807, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PALM|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ |
551 | {0x1002, 0x9808, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PALM|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ | 551 | {0x1002, 0x9808, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PALM|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ |
552 | {0x1002, 0x9809, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PALM|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ | 552 | {0x1002, 0x9809, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PALM|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ |
553 | {0x1002, 0x980A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PALM|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ | ||
553 | {0x1002, 0x9900, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ | 554 | {0x1002, 0x9900, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ |
554 | {0x1002, 0x9901, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ | 555 | {0x1002, 0x9901, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ |
555 | {0x1002, 0x9903, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ | 556 | {0x1002, 0x9903, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ |
@@ -561,11 +562,19 @@ | |||
561 | {0x1002, 0x9909, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ | 562 | {0x1002, 0x9909, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ |
562 | {0x1002, 0x990A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ | 563 | {0x1002, 0x990A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ |
563 | {0x1002, 0x990F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ | 564 | {0x1002, 0x990F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ |
565 | {0x1002, 0x9910, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ | ||
566 | {0x1002, 0x9913, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ | ||
567 | {0x1002, 0x9917, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ | ||
568 | {0x1002, 0x9918, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ | ||
569 | {0x1002, 0x9919, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ | ||
564 | {0x1002, 0x9990, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ | 570 | {0x1002, 0x9990, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ |
565 | {0x1002, 0x9991, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ | 571 | {0x1002, 0x9991, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ |
566 | {0x1002, 0x9992, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ | 572 | {0x1002, 0x9992, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ |
567 | {0x1002, 0x9993, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ | 573 | {0x1002, 0x9993, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ |
568 | {0x1002, 0x9994, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ | 574 | {0x1002, 0x9994, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ |
575 | {0x1002, 0x99A0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ | ||
576 | {0x1002, 0x99A2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ | ||
577 | {0x1002, 0x99A4, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ | ||
569 | {0, 0, 0} | 578 | {0, 0, 0} |
570 | 579 | ||
571 | #define r128_PCI_IDS \ | 580 | #define r128_PCI_IDS \ |
diff --git a/include/drm/exynos_drm.h b/include/drm/exynos_drm.h index b6d7ce92eadd..68733587e700 100644 --- a/include/drm/exynos_drm.h +++ b/include/drm/exynos_drm.h | |||
@@ -64,6 +64,7 @@ struct drm_exynos_gem_map_off { | |||
64 | * A structure for mapping buffer. | 64 | * A structure for mapping buffer. |
65 | * | 65 | * |
66 | * @handle: a handle to gem object created. | 66 | * @handle: a handle to gem object created. |
67 | * @pad: just padding to be 64-bit aligned. | ||
67 | * @size: memory size to be mapped. | 68 | * @size: memory size to be mapped. |
68 | * @mapped: having user virtual address mmaped. | 69 | * @mapped: having user virtual address mmaped. |
69 | * - this variable would be filled by exynos gem module | 70 | * - this variable would be filled by exynos gem module |
@@ -72,7 +73,8 @@ struct drm_exynos_gem_map_off { | |||
72 | */ | 73 | */ |
73 | struct drm_exynos_gem_mmap { | 74 | struct drm_exynos_gem_mmap { |
74 | unsigned int handle; | 75 | unsigned int handle; |
75 | unsigned int size; | 76 | unsigned int pad; |
77 | uint64_t size; | ||
76 | uint64_t mapped; | 78 | uint64_t mapped; |
77 | }; | 79 | }; |
78 | 80 | ||
diff --git a/include/linux/clockchips.h b/include/linux/clockchips.h index 81e803e90aa4..acba894374a1 100644 --- a/include/linux/clockchips.h +++ b/include/linux/clockchips.h | |||
@@ -132,6 +132,7 @@ extern u64 clockevent_delta2ns(unsigned long latch, | |||
132 | struct clock_event_device *evt); | 132 | struct clock_event_device *evt); |
133 | extern void clockevents_register_device(struct clock_event_device *dev); | 133 | extern void clockevents_register_device(struct clock_event_device *dev); |
134 | 134 | ||
135 | extern void clockevents_config(struct clock_event_device *dev, u32 freq); | ||
135 | extern void clockevents_config_and_register(struct clock_event_device *dev, | 136 | extern void clockevents_config_and_register(struct clock_event_device *dev, |
136 | u32 freq, unsigned long min_delta, | 137 | u32 freq, unsigned long min_delta, |
137 | unsigned long max_delta); | 138 | unsigned long max_delta); |
diff --git a/include/linux/compaction.h b/include/linux/compaction.h index e988037abd2a..51a90b7f2d60 100644 --- a/include/linux/compaction.h +++ b/include/linux/compaction.h | |||
@@ -1,8 +1,6 @@ | |||
1 | #ifndef _LINUX_COMPACTION_H | 1 | #ifndef _LINUX_COMPACTION_H |
2 | #define _LINUX_COMPACTION_H | 2 | #define _LINUX_COMPACTION_H |
3 | 3 | ||
4 | #include <linux/node.h> | ||
5 | |||
6 | /* Return values for compact_zone() and try_to_compact_pages() */ | 4 | /* Return values for compact_zone() and try_to_compact_pages() */ |
7 | /* compaction didn't start as it was not possible or direct reclaim was more suitable */ | 5 | /* compaction didn't start as it was not possible or direct reclaim was more suitable */ |
8 | #define COMPACT_SKIPPED 0 | 6 | #define COMPACT_SKIPPED 0 |
@@ -13,23 +11,6 @@ | |||
13 | /* The full zone was compacted */ | 11 | /* The full zone was compacted */ |
14 | #define COMPACT_COMPLETE 3 | 12 | #define COMPACT_COMPLETE 3 |
15 | 13 | ||
16 | /* | ||
17 | * compaction supports three modes | ||
18 | * | ||
19 | * COMPACT_ASYNC_MOVABLE uses asynchronous migration and only scans | ||
20 | * MIGRATE_MOVABLE pageblocks as migration sources and targets. | ||
21 | * COMPACT_ASYNC_UNMOVABLE uses asynchronous migration and only scans | ||
22 | * MIGRATE_MOVABLE pageblocks as migration sources. | ||
23 | * MIGRATE_UNMOVABLE pageblocks are scanned as potential migration | ||
24 | * targets and convers them to MIGRATE_MOVABLE if possible | ||
25 | * COMPACT_SYNC uses synchronous migration and scans all pageblocks | ||
26 | */ | ||
27 | enum compact_mode { | ||
28 | COMPACT_ASYNC_MOVABLE, | ||
29 | COMPACT_ASYNC_UNMOVABLE, | ||
30 | COMPACT_SYNC, | ||
31 | }; | ||
32 | |||
33 | #ifdef CONFIG_COMPACTION | 14 | #ifdef CONFIG_COMPACTION |
34 | extern int sysctl_compact_memory; | 15 | extern int sysctl_compact_memory; |
35 | extern int sysctl_compaction_handler(struct ctl_table *table, int write, | 16 | extern int sysctl_compaction_handler(struct ctl_table *table, int write, |
diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h index e5834aa24b9e..6a6d7aefe12d 100644 --- a/include/linux/compiler-gcc.h +++ b/include/linux/compiler-gcc.h | |||
@@ -47,9 +47,9 @@ | |||
47 | */ | 47 | */ |
48 | #if !defined(CONFIG_ARCH_SUPPORTS_OPTIMIZED_INLINING) || \ | 48 | #if !defined(CONFIG_ARCH_SUPPORTS_OPTIMIZED_INLINING) || \ |
49 | !defined(CONFIG_OPTIMIZE_INLINING) || (__GNUC__ < 4) | 49 | !defined(CONFIG_OPTIMIZE_INLINING) || (__GNUC__ < 4) |
50 | # define inline inline __attribute__((always_inline)) | 50 | # define inline inline __attribute__((always_inline)) notrace |
51 | # define __inline__ __inline__ __attribute__((always_inline)) | 51 | # define __inline__ __inline__ __attribute__((always_inline)) notrace |
52 | # define __inline __inline __attribute__((always_inline)) | 52 | # define __inline __inline __attribute__((always_inline)) notrace |
53 | #else | 53 | #else |
54 | /* A lot of inline functions can cause havoc with function tracing */ | 54 | /* A lot of inline functions can cause havoc with function tracing */ |
55 | # define inline inline notrace | 55 | # define inline inline notrace |
diff --git a/include/linux/frontswap.h b/include/linux/frontswap.h new file mode 100644 index 000000000000..0e4e2eec5c1d --- /dev/null +++ b/include/linux/frontswap.h | |||
@@ -0,0 +1,127 @@ | |||
1 | #ifndef _LINUX_FRONTSWAP_H | ||
2 | #define _LINUX_FRONTSWAP_H | ||
3 | |||
4 | #include <linux/swap.h> | ||
5 | #include <linux/mm.h> | ||
6 | #include <linux/bitops.h> | ||
7 | |||
8 | struct frontswap_ops { | ||
9 | void (*init)(unsigned); | ||
10 | int (*store)(unsigned, pgoff_t, struct page *); | ||
11 | int (*load)(unsigned, pgoff_t, struct page *); | ||
12 | void (*invalidate_page)(unsigned, pgoff_t); | ||
13 | void (*invalidate_area)(unsigned); | ||
14 | }; | ||
15 | |||
16 | extern bool frontswap_enabled; | ||
17 | extern struct frontswap_ops | ||
18 | frontswap_register_ops(struct frontswap_ops *ops); | ||
19 | extern void frontswap_shrink(unsigned long); | ||
20 | extern unsigned long frontswap_curr_pages(void); | ||
21 | extern void frontswap_writethrough(bool); | ||
22 | |||
23 | extern void __frontswap_init(unsigned type); | ||
24 | extern int __frontswap_store(struct page *page); | ||
25 | extern int __frontswap_load(struct page *page); | ||
26 | extern void __frontswap_invalidate_page(unsigned, pgoff_t); | ||
27 | extern void __frontswap_invalidate_area(unsigned); | ||
28 | |||
29 | #ifdef CONFIG_FRONTSWAP | ||
30 | |||
31 | static inline bool frontswap_test(struct swap_info_struct *sis, pgoff_t offset) | ||
32 | { | ||
33 | bool ret = false; | ||
34 | |||
35 | if (frontswap_enabled && sis->frontswap_map) | ||
36 | ret = test_bit(offset, sis->frontswap_map); | ||
37 | return ret; | ||
38 | } | ||
39 | |||
40 | static inline void frontswap_set(struct swap_info_struct *sis, pgoff_t offset) | ||
41 | { | ||
42 | if (frontswap_enabled && sis->frontswap_map) | ||
43 | set_bit(offset, sis->frontswap_map); | ||
44 | } | ||
45 | |||
46 | static inline void frontswap_clear(struct swap_info_struct *sis, pgoff_t offset) | ||
47 | { | ||
48 | if (frontswap_enabled && sis->frontswap_map) | ||
49 | clear_bit(offset, sis->frontswap_map); | ||
50 | } | ||
51 | |||
52 | static inline void frontswap_map_set(struct swap_info_struct *p, | ||
53 | unsigned long *map) | ||
54 | { | ||
55 | p->frontswap_map = map; | ||
56 | } | ||
57 | |||
58 | static inline unsigned long *frontswap_map_get(struct swap_info_struct *p) | ||
59 | { | ||
60 | return p->frontswap_map; | ||
61 | } | ||
62 | #else | ||
63 | /* all inline routines become no-ops and all externs are ignored */ | ||
64 | |||
65 | #define frontswap_enabled (0) | ||
66 | |||
67 | static inline bool frontswap_test(struct swap_info_struct *sis, pgoff_t offset) | ||
68 | { | ||
69 | return false; | ||
70 | } | ||
71 | |||
72 | static inline void frontswap_set(struct swap_info_struct *sis, pgoff_t offset) | ||
73 | { | ||
74 | } | ||
75 | |||
76 | static inline void frontswap_clear(struct swap_info_struct *sis, pgoff_t offset) | ||
77 | { | ||
78 | } | ||
79 | |||
80 | static inline void frontswap_map_set(struct swap_info_struct *p, | ||
81 | unsigned long *map) | ||
82 | { | ||
83 | } | ||
84 | |||
85 | static inline unsigned long *frontswap_map_get(struct swap_info_struct *p) | ||
86 | { | ||
87 | return NULL; | ||
88 | } | ||
89 | #endif | ||
90 | |||
91 | static inline int frontswap_store(struct page *page) | ||
92 | { | ||
93 | int ret = -1; | ||
94 | |||
95 | if (frontswap_enabled) | ||
96 | ret = __frontswap_store(page); | ||
97 | return ret; | ||
98 | } | ||
99 | |||
100 | static inline int frontswap_load(struct page *page) | ||
101 | { | ||
102 | int ret = -1; | ||
103 | |||
104 | if (frontswap_enabled) | ||
105 | ret = __frontswap_load(page); | ||
106 | return ret; | ||
107 | } | ||
108 | |||
109 | static inline void frontswap_invalidate_page(unsigned type, pgoff_t offset) | ||
110 | { | ||
111 | if (frontswap_enabled) | ||
112 | __frontswap_invalidate_page(type, offset); | ||
113 | } | ||
114 | |||
115 | static inline void frontswap_invalidate_area(unsigned type) | ||
116 | { | ||
117 | if (frontswap_enabled) | ||
118 | __frontswap_invalidate_area(type); | ||
119 | } | ||
120 | |||
121 | static inline void frontswap_init(unsigned type) | ||
122 | { | ||
123 | if (frontswap_enabled) | ||
124 | __frontswap_init(type); | ||
125 | } | ||
126 | |||
127 | #endif /* _LINUX_FRONTSWAP_H */ | ||
diff --git a/include/linux/fs.h b/include/linux/fs.h index 51978ed43e97..17fd887c798f 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h | |||
@@ -802,13 +802,14 @@ struct inode { | |||
802 | unsigned int __i_nlink; | 802 | unsigned int __i_nlink; |
803 | }; | 803 | }; |
804 | dev_t i_rdev; | 804 | dev_t i_rdev; |
805 | loff_t i_size; | ||
805 | struct timespec i_atime; | 806 | struct timespec i_atime; |
806 | struct timespec i_mtime; | 807 | struct timespec i_mtime; |
807 | struct timespec i_ctime; | 808 | struct timespec i_ctime; |
808 | spinlock_t i_lock; /* i_blocks, i_bytes, maybe i_size */ | 809 | spinlock_t i_lock; /* i_blocks, i_bytes, maybe i_size */ |
809 | unsigned short i_bytes; | 810 | unsigned short i_bytes; |
811 | unsigned int i_blkbits; | ||
810 | blkcnt_t i_blocks; | 812 | blkcnt_t i_blocks; |
811 | loff_t i_size; | ||
812 | 813 | ||
813 | #ifdef __NEED_I_SIZE_ORDERED | 814 | #ifdef __NEED_I_SIZE_ORDERED |
814 | seqcount_t i_size_seqcount; | 815 | seqcount_t i_size_seqcount; |
@@ -828,9 +829,8 @@ struct inode { | |||
828 | struct list_head i_dentry; | 829 | struct list_head i_dentry; |
829 | struct rcu_head i_rcu; | 830 | struct rcu_head i_rcu; |
830 | }; | 831 | }; |
831 | atomic_t i_count; | ||
832 | unsigned int i_blkbits; | ||
833 | u64 i_version; | 832 | u64 i_version; |
833 | atomic_t i_count; | ||
834 | atomic_t i_dio_count; | 834 | atomic_t i_dio_count; |
835 | atomic_t i_writecount; | 835 | atomic_t i_writecount; |
836 | const struct file_operations *i_fop; /* former ->i_op->default_file_ops */ | 836 | const struct file_operations *i_fop; /* former ->i_op->default_file_ops */ |
diff --git a/include/linux/fuse.h b/include/linux/fuse.h index 8f2ab8fef929..9303348965fb 100644 --- a/include/linux/fuse.h +++ b/include/linux/fuse.h | |||
@@ -54,6 +54,9 @@ | |||
54 | * 7.18 | 54 | * 7.18 |
55 | * - add FUSE_IOCTL_DIR flag | 55 | * - add FUSE_IOCTL_DIR flag |
56 | * - add FUSE_NOTIFY_DELETE | 56 | * - add FUSE_NOTIFY_DELETE |
57 | * | ||
58 | * 7.19 | ||
59 | * - add FUSE_FALLOCATE | ||
57 | */ | 60 | */ |
58 | 61 | ||
59 | #ifndef _LINUX_FUSE_H | 62 | #ifndef _LINUX_FUSE_H |
@@ -85,7 +88,7 @@ | |||
85 | #define FUSE_KERNEL_VERSION 7 | 88 | #define FUSE_KERNEL_VERSION 7 |
86 | 89 | ||
87 | /** Minor version number of this interface */ | 90 | /** Minor version number of this interface */ |
88 | #define FUSE_KERNEL_MINOR_VERSION 18 | 91 | #define FUSE_KERNEL_MINOR_VERSION 19 |
89 | 92 | ||
90 | /** The node ID of the root inode */ | 93 | /** The node ID of the root inode */ |
91 | #define FUSE_ROOT_ID 1 | 94 | #define FUSE_ROOT_ID 1 |
@@ -278,6 +281,7 @@ enum fuse_opcode { | |||
278 | FUSE_POLL = 40, | 281 | FUSE_POLL = 40, |
279 | FUSE_NOTIFY_REPLY = 41, | 282 | FUSE_NOTIFY_REPLY = 41, |
280 | FUSE_BATCH_FORGET = 42, | 283 | FUSE_BATCH_FORGET = 42, |
284 | FUSE_FALLOCATE = 43, | ||
281 | 285 | ||
282 | /* CUSE specific operations */ | 286 | /* CUSE specific operations */ |
283 | CUSE_INIT = 4096, | 287 | CUSE_INIT = 4096, |
@@ -571,6 +575,14 @@ struct fuse_notify_poll_wakeup_out { | |||
571 | __u64 kh; | 575 | __u64 kh; |
572 | }; | 576 | }; |
573 | 577 | ||
578 | struct fuse_fallocate_in { | ||
579 | __u64 fh; | ||
580 | __u64 offset; | ||
581 | __u64 length; | ||
582 | __u32 mode; | ||
583 | __u32 padding; | ||
584 | }; | ||
585 | |||
574 | struct fuse_in_header { | 586 | struct fuse_in_header { |
575 | __u32 len; | 587 | __u32 len; |
576 | __u32 opcode; | 588 | __u32 opcode; |
diff --git a/include/linux/i2c-mux-pinctrl.h b/include/linux/i2c-mux-pinctrl.h new file mode 100644 index 000000000000..a65c86429e84 --- /dev/null +++ b/include/linux/i2c-mux-pinctrl.h | |||
@@ -0,0 +1,41 @@ | |||
1 | /* | ||
2 | * i2c-mux-pinctrl platform data | ||
3 | * | ||
4 | * Copyright (c) 2012, NVIDIA CORPORATION. All rights reserved. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify it | ||
7 | * under the terms and conditions of the GNU General Public License, | ||
8 | * version 2, as published by the Free Software Foundation. | ||
9 | * | ||
10 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
11 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
12 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
13 | * more details. | ||
14 | * | ||
15 | * You should have received a copy of the GNU General Public License | ||
16 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
17 | */ | ||
18 | |||
19 | #ifndef _LINUX_I2C_MUX_PINCTRL_H | ||
20 | #define _LINUX_I2C_MUX_PINCTRL_H | ||
21 | |||
22 | /** | ||
23 | * struct i2c_mux_pinctrl_platform_data - Platform data for i2c-mux-pinctrl | ||
24 | * @parent_bus_num: Parent I2C bus number | ||
25 | * @base_bus_num: Base I2C bus number for the child busses. 0 for dynamic. | ||
26 | * @bus_count: Number of child busses. Also the number of elements in | ||
27 | * @pinctrl_states | ||
28 | * @pinctrl_states: The names of the pinctrl state to select for each child bus | ||
29 | * @pinctrl_state_idle: The pinctrl state to select when no child bus is being | ||
30 | * accessed. If NULL, the most recently used pinctrl state will be left | ||
31 | * selected. | ||
32 | */ | ||
33 | struct i2c_mux_pinctrl_platform_data { | ||
34 | int parent_bus_num; | ||
35 | int base_bus_num; | ||
36 | int bus_count; | ||
37 | const char **pinctrl_states; | ||
38 | const char *pinctrl_state_idle; | ||
39 | }; | ||
40 | |||
41 | #endif | ||
diff --git a/include/linux/init_task.h b/include/linux/init_task.h index e4baff5f7ff4..9e65eff6af3b 100644 --- a/include/linux/init_task.h +++ b/include/linux/init_task.h | |||
@@ -149,6 +149,7 @@ extern struct cred init_cred; | |||
149 | .normal_prio = MAX_PRIO-20, \ | 149 | .normal_prio = MAX_PRIO-20, \ |
150 | .policy = SCHED_NORMAL, \ | 150 | .policy = SCHED_NORMAL, \ |
151 | .cpus_allowed = CPU_MASK_ALL, \ | 151 | .cpus_allowed = CPU_MASK_ALL, \ |
152 | .nr_cpus_allowed= NR_CPUS, \ | ||
152 | .mm = NULL, \ | 153 | .mm = NULL, \ |
153 | .active_mm = &init_mm, \ | 154 | .active_mm = &init_mm, \ |
154 | .se = { \ | 155 | .se = { \ |
@@ -157,7 +158,6 @@ extern struct cred init_cred; | |||
157 | .rt = { \ | 158 | .rt = { \ |
158 | .run_list = LIST_HEAD_INIT(tsk.rt.run_list), \ | 159 | .run_list = LIST_HEAD_INIT(tsk.rt.run_list), \ |
159 | .time_slice = RR_TIMESLICE, \ | 160 | .time_slice = RR_TIMESLICE, \ |
160 | .nr_cpus_allowed = NR_CPUS, \ | ||
161 | }, \ | 161 | }, \ |
162 | .tasks = LIST_HEAD_INIT(tsk.tasks), \ | 162 | .tasks = LIST_HEAD_INIT(tsk.tasks), \ |
163 | INIT_PUSHABLE_TASKS(tsk) \ | 163 | INIT_PUSHABLE_TASKS(tsk) \ |
diff --git a/include/linux/kmsg_dump.h b/include/linux/kmsg_dump.h index 35f7237ec972..d6bd50110ec2 100644 --- a/include/linux/kmsg_dump.h +++ b/include/linux/kmsg_dump.h | |||
@@ -21,6 +21,7 @@ | |||
21 | * is passed to the kernel. | 21 | * is passed to the kernel. |
22 | */ | 22 | */ |
23 | enum kmsg_dump_reason { | 23 | enum kmsg_dump_reason { |
24 | KMSG_DUMP_UNDEF, | ||
24 | KMSG_DUMP_PANIC, | 25 | KMSG_DUMP_PANIC, |
25 | KMSG_DUMP_OOPS, | 26 | KMSG_DUMP_OOPS, |
26 | KMSG_DUMP_EMERG, | 27 | KMSG_DUMP_EMERG, |
@@ -31,23 +32,37 @@ enum kmsg_dump_reason { | |||
31 | 32 | ||
32 | /** | 33 | /** |
33 | * struct kmsg_dumper - kernel crash message dumper structure | 34 | * struct kmsg_dumper - kernel crash message dumper structure |
34 | * @dump: The callback which gets called on crashes. The buffer is passed | ||
35 | * as two sections, where s1 (length l1) contains the older | ||
36 | * messages and s2 (length l2) contains the newer. | ||
37 | * @list: Entry in the dumper list (private) | 35 | * @list: Entry in the dumper list (private) |
36 | * @dump: Call into dumping code which will retrieve the data with | ||
37 | * through the record iterator | ||
38 | * @max_reason: filter for highest reason number that should be dumped | ||
38 | * @registered: Flag that specifies if this is already registered | 39 | * @registered: Flag that specifies if this is already registered |
39 | */ | 40 | */ |
40 | struct kmsg_dumper { | 41 | struct kmsg_dumper { |
41 | void (*dump)(struct kmsg_dumper *dumper, enum kmsg_dump_reason reason, | ||
42 | const char *s1, unsigned long l1, | ||
43 | const char *s2, unsigned long l2); | ||
44 | struct list_head list; | 42 | struct list_head list; |
45 | int registered; | 43 | void (*dump)(struct kmsg_dumper *dumper, enum kmsg_dump_reason reason); |
44 | enum kmsg_dump_reason max_reason; | ||
45 | bool active; | ||
46 | bool registered; | ||
47 | |||
48 | /* private state of the kmsg iterator */ | ||
49 | u32 cur_idx; | ||
50 | u32 next_idx; | ||
51 | u64 cur_seq; | ||
52 | u64 next_seq; | ||
46 | }; | 53 | }; |
47 | 54 | ||
48 | #ifdef CONFIG_PRINTK | 55 | #ifdef CONFIG_PRINTK |
49 | void kmsg_dump(enum kmsg_dump_reason reason); | 56 | void kmsg_dump(enum kmsg_dump_reason reason); |
50 | 57 | ||
58 | bool kmsg_dump_get_line(struct kmsg_dumper *dumper, bool syslog, | ||
59 | char *line, size_t size, size_t *len); | ||
60 | |||
61 | bool kmsg_dump_get_buffer(struct kmsg_dumper *dumper, bool syslog, | ||
62 | char *buf, size_t size, size_t *len); | ||
63 | |||
64 | void kmsg_dump_rewind(struct kmsg_dumper *dumper); | ||
65 | |||
51 | int kmsg_dump_register(struct kmsg_dumper *dumper); | 66 | int kmsg_dump_register(struct kmsg_dumper *dumper); |
52 | 67 | ||
53 | int kmsg_dump_unregister(struct kmsg_dumper *dumper); | 68 | int kmsg_dump_unregister(struct kmsg_dumper *dumper); |
@@ -56,6 +71,22 @@ static inline void kmsg_dump(enum kmsg_dump_reason reason) | |||
56 | { | 71 | { |
57 | } | 72 | } |
58 | 73 | ||
74 | static inline bool kmsg_dump_get_line(struct kmsg_dumper *dumper, bool syslog, | ||
75 | const char *line, size_t size, size_t *len) | ||
76 | { | ||
77 | return false; | ||
78 | } | ||
79 | |||
80 | static inline bool kmsg_dump_get_buffer(struct kmsg_dumper *dumper, bool syslog, | ||
81 | char *buf, size_t size, size_t *len) | ||
82 | { | ||
83 | return false; | ||
84 | } | ||
85 | |||
86 | static inline void kmsg_dump_rewind(struct kmsg_dumper *dumper) | ||
87 | { | ||
88 | } | ||
89 | |||
59 | static inline int kmsg_dump_register(struct kmsg_dumper *dumper) | 90 | static inline int kmsg_dump_register(struct kmsg_dumper *dumper) |
60 | { | 91 | { |
61 | return -EINVAL; | 92 | return -EINVAL; |
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index dad95bdd06d7..704a626d94a0 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h | |||
@@ -57,8 +57,18 @@ struct page { | |||
57 | }; | 57 | }; |
58 | 58 | ||
59 | union { | 59 | union { |
60 | #if defined(CONFIG_HAVE_CMPXCHG_DOUBLE) && \ | ||
61 | defined(CONFIG_HAVE_ALIGNED_STRUCT_PAGE) | ||
60 | /* Used for cmpxchg_double in slub */ | 62 | /* Used for cmpxchg_double in slub */ |
61 | unsigned long counters; | 63 | unsigned long counters; |
64 | #else | ||
65 | /* | ||
66 | * Keep _count separate from slub cmpxchg_double data. | ||
67 | * As the rest of the double word is protected by | ||
68 | * slab_lock but _count is not. | ||
69 | */ | ||
70 | unsigned counters; | ||
71 | #endif | ||
62 | 72 | ||
63 | struct { | 73 | struct { |
64 | 74 | ||
diff --git a/include/linux/mmc/sdhci-spear.h b/include/linux/mmc/sdhci-spear.h index 5cdc96da9dd5..e78c0e236e9d 100644 --- a/include/linux/mmc/sdhci-spear.h +++ b/include/linux/mmc/sdhci-spear.h | |||
@@ -4,7 +4,7 @@ | |||
4 | * SDHCI declarations specific to ST SPEAr platform | 4 | * SDHCI declarations specific to ST SPEAr platform |
5 | * | 5 | * |
6 | * Copyright (C) 2010 ST Microelectronics | 6 | * Copyright (C) 2010 ST Microelectronics |
7 | * Viresh Kumar<viresh.kumar@st.com> | 7 | * Viresh Kumar <viresh.linux@gmail.com> |
8 | * | 8 | * |
9 | * This file is licensed under the terms of the GNU General Public | 9 | * This file is licensed under the terms of the GNU General Public |
10 | * License version 2. This program is licensed "as is" without any | 10 | * License version 2. This program is licensed "as is" without any |
diff --git a/include/linux/mmc/sdio.h b/include/linux/mmc/sdio.h index c9fe66c58f8f..17446d3c3602 100644 --- a/include/linux/mmc/sdio.h +++ b/include/linux/mmc/sdio.h | |||
@@ -98,7 +98,9 @@ | |||
98 | 98 | ||
99 | #define SDIO_CCCR_IF 0x07 /* bus interface controls */ | 99 | #define SDIO_CCCR_IF 0x07 /* bus interface controls */ |
100 | 100 | ||
101 | #define SDIO_BUS_WIDTH_MASK 0x03 /* data bus width setting */ | ||
101 | #define SDIO_BUS_WIDTH_1BIT 0x00 | 102 | #define SDIO_BUS_WIDTH_1BIT 0x00 |
103 | #define SDIO_BUS_WIDTH_RESERVED 0x01 | ||
102 | #define SDIO_BUS_WIDTH_4BIT 0x02 | 104 | #define SDIO_BUS_WIDTH_4BIT 0x02 |
103 | #define SDIO_BUS_ECSI 0x20 /* Enable continuous SPI interrupt */ | 105 | #define SDIO_BUS_ECSI 0x20 /* Enable continuous SPI interrupt */ |
104 | #define SDIO_BUS_SCSI 0x40 /* Support continuous SPI interrupt */ | 106 | #define SDIO_BUS_SCSI 0x40 /* Support continuous SPI interrupt */ |
diff --git a/include/linux/moduleparam.h b/include/linux/moduleparam.h index 1b14d25162cb..d6a58065c09c 100644 --- a/include/linux/moduleparam.h +++ b/include/linux/moduleparam.h | |||
@@ -128,7 +128,7 @@ struct kparam_array | |||
128 | * The ops can have NULL set or get functions. | 128 | * The ops can have NULL set or get functions. |
129 | */ | 129 | */ |
130 | #define module_param_cb(name, ops, arg, perm) \ | 130 | #define module_param_cb(name, ops, arg, perm) \ |
131 | __module_param_call(MODULE_PARAM_PREFIX, name, ops, arg, perm, 0) | 131 | __module_param_call(MODULE_PARAM_PREFIX, name, ops, arg, perm, -1) |
132 | 132 | ||
133 | /** | 133 | /** |
134 | * <level>_param_cb - general callback for a module/cmdline parameter | 134 | * <level>_param_cb - general callback for a module/cmdline parameter |
@@ -192,7 +192,7 @@ struct kparam_array | |||
192 | { (void *)set, (void *)get }; \ | 192 | { (void *)set, (void *)get }; \ |
193 | __module_param_call(MODULE_PARAM_PREFIX, \ | 193 | __module_param_call(MODULE_PARAM_PREFIX, \ |
194 | name, &__param_ops_##name, arg, \ | 194 | name, &__param_ops_##name, arg, \ |
195 | (perm) + sizeof(__check_old_set_param(set))*0, 0) | 195 | (perm) + sizeof(__check_old_set_param(set))*0, -1) |
196 | 196 | ||
197 | /* We don't get oldget: it's often a new-style param_get_uint, etc. */ | 197 | /* We don't get oldget: it's often a new-style param_get_uint, etc. */ |
198 | static inline int | 198 | static inline int |
@@ -272,7 +272,7 @@ static inline void __kernel_param_unlock(void) | |||
272 | */ | 272 | */ |
273 | #define core_param(name, var, type, perm) \ | 273 | #define core_param(name, var, type, perm) \ |
274 | param_check_##type(name, &(var)); \ | 274 | param_check_##type(name, &(var)); \ |
275 | __module_param_call("", name, ¶m_ops_##type, &var, perm, 0) | 275 | __module_param_call("", name, ¶m_ops_##type, &var, perm, -1) |
276 | #endif /* !MODULE */ | 276 | #endif /* !MODULE */ |
277 | 277 | ||
278 | /** | 278 | /** |
@@ -290,7 +290,7 @@ static inline void __kernel_param_unlock(void) | |||
290 | = { len, string }; \ | 290 | = { len, string }; \ |
291 | __module_param_call(MODULE_PARAM_PREFIX, name, \ | 291 | __module_param_call(MODULE_PARAM_PREFIX, name, \ |
292 | ¶m_ops_string, \ | 292 | ¶m_ops_string, \ |
293 | .str = &__param_string_##name, perm, 0); \ | 293 | .str = &__param_string_##name, perm, -1); \ |
294 | __MODULE_PARM_TYPE(name, "string") | 294 | __MODULE_PARM_TYPE(name, "string") |
295 | 295 | ||
296 | /** | 296 | /** |
@@ -432,7 +432,7 @@ extern int param_set_bint(const char *val, const struct kernel_param *kp); | |||
432 | __module_param_call(MODULE_PARAM_PREFIX, name, \ | 432 | __module_param_call(MODULE_PARAM_PREFIX, name, \ |
433 | ¶m_array_ops, \ | 433 | ¶m_array_ops, \ |
434 | .arr = &__param_arr_##name, \ | 434 | .arr = &__param_arr_##name, \ |
435 | perm, 0); \ | 435 | perm, -1); \ |
436 | __MODULE_PARM_TYPE(name, "array of " #type) | 436 | __MODULE_PARM_TYPE(name, "array of " #type) |
437 | 437 | ||
438 | extern struct kernel_param_ops param_array_ops; | 438 | extern struct kernel_param_ops param_array_ops; |
diff --git a/include/linux/netfilter/xt_HMARK.h b/include/linux/netfilter/xt_HMARK.h index abb1650940d2..826fc5807577 100644 --- a/include/linux/netfilter/xt_HMARK.h +++ b/include/linux/netfilter/xt_HMARK.h | |||
@@ -27,7 +27,12 @@ union hmark_ports { | |||
27 | __u16 src; | 27 | __u16 src; |
28 | __u16 dst; | 28 | __u16 dst; |
29 | } p16; | 29 | } p16; |
30 | struct { | ||
31 | __be16 src; | ||
32 | __be16 dst; | ||
33 | } b16; | ||
30 | __u32 v32; | 34 | __u32 v32; |
35 | __be32 b32; | ||
31 | }; | 36 | }; |
32 | 37 | ||
33 | struct xt_hmark_info { | 38 | struct xt_hmark_info { |
diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h index fbb78fb09bd2..f58325a1d8fb 100644 --- a/include/linux/nfs_fs_sb.h +++ b/include/linux/nfs_fs_sb.h | |||
@@ -25,6 +25,7 @@ struct nfs41_impl_id; | |||
25 | */ | 25 | */ |
26 | struct nfs_client { | 26 | struct nfs_client { |
27 | atomic_t cl_count; | 27 | atomic_t cl_count; |
28 | atomic_t cl_mds_count; | ||
28 | int cl_cons_state; /* current construction state (-ve: init error) */ | 29 | int cl_cons_state; /* current construction state (-ve: init error) */ |
29 | #define NFS_CS_READY 0 /* ready to be used */ | 30 | #define NFS_CS_READY 0 /* ready to be used */ |
30 | #define NFS_CS_INITING 1 /* busy initialising */ | 31 | #define NFS_CS_INITING 1 /* busy initialising */ |
diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h index d1a7bf51c326..8aadd90b808a 100644 --- a/include/linux/nfs_xdr.h +++ b/include/linux/nfs_xdr.h | |||
@@ -348,6 +348,7 @@ struct nfs_openargs { | |||
348 | const struct qstr * name; | 348 | const struct qstr * name; |
349 | const struct nfs_server *server; /* Needed for ID mapping */ | 349 | const struct nfs_server *server; /* Needed for ID mapping */ |
350 | const u32 * bitmask; | 350 | const u32 * bitmask; |
351 | const u32 * open_bitmap; | ||
351 | __u32 claim; | 352 | __u32 claim; |
352 | struct nfs4_sequence_args seq_args; | 353 | struct nfs4_sequence_args seq_args; |
353 | }; | 354 | }; |
@@ -1236,6 +1237,7 @@ struct nfs_pgio_header { | |||
1236 | struct list_head rpc_list; | 1237 | struct list_head rpc_list; |
1237 | atomic_t refcnt; | 1238 | atomic_t refcnt; |
1238 | struct nfs_page *req; | 1239 | struct nfs_page *req; |
1240 | struct nfs_writeverf *verf; | ||
1239 | struct pnfs_layout_segment *lseg; | 1241 | struct pnfs_layout_segment *lseg; |
1240 | loff_t io_start; | 1242 | loff_t io_start; |
1241 | const struct rpc_call_ops *mds_ops; | 1243 | const struct rpc_call_ops *mds_ops; |
@@ -1273,6 +1275,7 @@ struct nfs_write_data { | |||
1273 | struct nfs_write_header { | 1275 | struct nfs_write_header { |
1274 | struct nfs_pgio_header header; | 1276 | struct nfs_pgio_header header; |
1275 | struct nfs_write_data rpc_data; | 1277 | struct nfs_write_data rpc_data; |
1278 | struct nfs_writeverf verf; | ||
1276 | }; | 1279 | }; |
1277 | 1280 | ||
1278 | struct nfs_mds_commit_info { | 1281 | struct nfs_mds_commit_info { |
diff --git a/include/linux/pata_arasan_cf_data.h b/include/linux/pata_arasan_cf_data.h index a6ee9aa898bb..a7b4fc386e63 100644 --- a/include/linux/pata_arasan_cf_data.h +++ b/include/linux/pata_arasan_cf_data.h | |||
@@ -4,7 +4,7 @@ | |||
4 | * Arasan Compact Flash host controller platform data header file | 4 | * Arasan Compact Flash host controller platform data header file |
5 | * | 5 | * |
6 | * Copyright (C) 2011 ST Microelectronics | 6 | * Copyright (C) 2011 ST Microelectronics |
7 | * Viresh Kumar <viresh.kumar@st.com> | 7 | * Viresh Kumar <viresh.linux@gmail.com> |
8 | * | 8 | * |
9 | * This file is licensed under the terms of the GNU General Public | 9 | * This file is licensed under the terms of the GNU General Public |
10 | * License version 2. This program is licensed "as is" without any | 10 | * License version 2. This program is licensed "as is" without any |
diff --git a/include/linux/pci.h b/include/linux/pci.h index d8c379dba6ad..fefb4e19bf6a 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h | |||
@@ -176,6 +176,8 @@ enum pci_dev_flags { | |||
176 | PCI_DEV_FLAGS_NO_D3 = (__force pci_dev_flags_t) 2, | 176 | PCI_DEV_FLAGS_NO_D3 = (__force pci_dev_flags_t) 2, |
177 | /* Provide indication device is assigned by a Virtual Machine Manager */ | 177 | /* Provide indication device is assigned by a Virtual Machine Manager */ |
178 | PCI_DEV_FLAGS_ASSIGNED = (__force pci_dev_flags_t) 4, | 178 | PCI_DEV_FLAGS_ASSIGNED = (__force pci_dev_flags_t) 4, |
179 | /* Device causes system crash if in D3 during S3 sleep */ | ||
180 | PCI_DEV_FLAGS_NO_D3_DURING_SLEEP = (__force pci_dev_flags_t) 8, | ||
179 | }; | 181 | }; |
180 | 182 | ||
181 | enum pci_irq_reroute_variant { | 183 | enum pci_irq_reroute_variant { |
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index f32578634d9d..45db49f64bb4 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h | |||
@@ -555,6 +555,8 @@ enum perf_event_type { | |||
555 | PERF_RECORD_MAX, /* non-ABI */ | 555 | PERF_RECORD_MAX, /* non-ABI */ |
556 | }; | 556 | }; |
557 | 557 | ||
558 | #define PERF_MAX_STACK_DEPTH 127 | ||
559 | |||
558 | enum perf_callchain_context { | 560 | enum perf_callchain_context { |
559 | PERF_CONTEXT_HV = (__u64)-32, | 561 | PERF_CONTEXT_HV = (__u64)-32, |
560 | PERF_CONTEXT_KERNEL = (__u64)-128, | 562 | PERF_CONTEXT_KERNEL = (__u64)-128, |
@@ -609,8 +611,6 @@ struct perf_guest_info_callbacks { | |||
609 | #include <linux/sysfs.h> | 611 | #include <linux/sysfs.h> |
610 | #include <asm/local.h> | 612 | #include <asm/local.h> |
611 | 613 | ||
612 | #define PERF_MAX_STACK_DEPTH 255 | ||
613 | |||
614 | struct perf_callchain_entry { | 614 | struct perf_callchain_entry { |
615 | __u64 nr; | 615 | __u64 nr; |
616 | __u64 ip[PERF_MAX_STACK_DEPTH]; | 616 | __u64 ip[PERF_MAX_STACK_DEPTH]; |
diff --git a/include/linux/prctl.h b/include/linux/prctl.h index 711e0a30aacc..3988012255dc 100644 --- a/include/linux/prctl.h +++ b/include/linux/prctl.h | |||
@@ -127,8 +127,8 @@ | |||
127 | #define PR_SET_PTRACER 0x59616d61 | 127 | #define PR_SET_PTRACER 0x59616d61 |
128 | # define PR_SET_PTRACER_ANY ((unsigned long)-1) | 128 | # define PR_SET_PTRACER_ANY ((unsigned long)-1) |
129 | 129 | ||
130 | #define PR_SET_CHILD_SUBREAPER 36 | 130 | #define PR_SET_CHILD_SUBREAPER 36 |
131 | #define PR_GET_CHILD_SUBREAPER 37 | 131 | #define PR_GET_CHILD_SUBREAPER 37 |
132 | 132 | ||
133 | /* | 133 | /* |
134 | * If no_new_privs is set, then operations that grant new privileges (i.e. | 134 | * If no_new_privs is set, then operations that grant new privileges (i.e. |
@@ -142,7 +142,9 @@ | |||
142 | * asking selinux for a specific new context (e.g. with runcon) will result | 142 | * asking selinux for a specific new context (e.g. with runcon) will result |
143 | * in execve returning -EPERM. | 143 | * in execve returning -EPERM. |
144 | */ | 144 | */ |
145 | #define PR_SET_NO_NEW_PRIVS 38 | 145 | #define PR_SET_NO_NEW_PRIVS 38 |
146 | #define PR_GET_NO_NEW_PRIVS 39 | 146 | #define PR_GET_NO_NEW_PRIVS 39 |
147 | |||
148 | #define PR_GET_TID_ADDRESS 40 | ||
147 | 149 | ||
148 | #endif /* _LINUX_PRCTL_H */ | 150 | #endif /* _LINUX_PRCTL_H */ |
diff --git a/include/linux/pxa2xx_ssp.h b/include/linux/pxa2xx_ssp.h index 44835fb39793..f36632061c66 100644 --- a/include/linux/pxa2xx_ssp.h +++ b/include/linux/pxa2xx_ssp.h | |||
@@ -160,7 +160,9 @@ enum pxa_ssp_type { | |||
160 | PXA25x_SSP, /* pxa 210, 250, 255, 26x */ | 160 | PXA25x_SSP, /* pxa 210, 250, 255, 26x */ |
161 | PXA25x_NSSP, /* pxa 255, 26x (including ASSP) */ | 161 | PXA25x_NSSP, /* pxa 255, 26x (including ASSP) */ |
162 | PXA27x_SSP, | 162 | PXA27x_SSP, |
163 | PXA3xx_SSP, | ||
163 | PXA168_SSP, | 164 | PXA168_SSP, |
165 | PXA910_SSP, | ||
164 | CE4100_SSP, | 166 | CE4100_SSP, |
165 | }; | 167 | }; |
166 | 168 | ||
diff --git a/include/linux/radix-tree.h b/include/linux/radix-tree.h index 0d04cd69ab9b..ffc444c38b0a 100644 --- a/include/linux/radix-tree.h +++ b/include/linux/radix-tree.h | |||
@@ -368,8 +368,11 @@ radix_tree_next_slot(void **slot, struct radix_tree_iter *iter, unsigned flags) | |||
368 | iter->index++; | 368 | iter->index++; |
369 | if (likely(*slot)) | 369 | if (likely(*slot)) |
370 | return slot; | 370 | return slot; |
371 | if (flags & RADIX_TREE_ITER_CONTIG) | 371 | if (flags & RADIX_TREE_ITER_CONTIG) { |
372 | /* forbid switching to the next chunk */ | ||
373 | iter->next_index = 0; | ||
372 | break; | 374 | break; |
375 | } | ||
373 | } | 376 | } |
374 | } | 377 | } |
375 | return NULL; | 378 | return NULL; |
diff --git a/include/linux/rcutiny.h b/include/linux/rcutiny.h index adb5e5a38cae..854dc4c5c271 100644 --- a/include/linux/rcutiny.h +++ b/include/linux/rcutiny.h | |||
@@ -87,8 +87,9 @@ static inline void kfree_call_rcu(struct rcu_head *head, | |||
87 | 87 | ||
88 | #ifdef CONFIG_TINY_RCU | 88 | #ifdef CONFIG_TINY_RCU |
89 | 89 | ||
90 | static inline int rcu_needs_cpu(int cpu) | 90 | static inline int rcu_needs_cpu(int cpu, unsigned long *delta_jiffies) |
91 | { | 91 | { |
92 | *delta_jiffies = ULONG_MAX; | ||
92 | return 0; | 93 | return 0; |
93 | } | 94 | } |
94 | 95 | ||
@@ -96,8 +97,9 @@ static inline int rcu_needs_cpu(int cpu) | |||
96 | 97 | ||
97 | int rcu_preempt_needs_cpu(void); | 98 | int rcu_preempt_needs_cpu(void); |
98 | 99 | ||
99 | static inline int rcu_needs_cpu(int cpu) | 100 | static inline int rcu_needs_cpu(int cpu, unsigned long *delta_jiffies) |
100 | { | 101 | { |
102 | *delta_jiffies = ULONG_MAX; | ||
101 | return rcu_preempt_needs_cpu(); | 103 | return rcu_preempt_needs_cpu(); |
102 | } | 104 | } |
103 | 105 | ||
diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h index 3c6083cde4fc..952b79339304 100644 --- a/include/linux/rcutree.h +++ b/include/linux/rcutree.h | |||
@@ -32,7 +32,7 @@ | |||
32 | 32 | ||
33 | extern void rcu_init(void); | 33 | extern void rcu_init(void); |
34 | extern void rcu_note_context_switch(int cpu); | 34 | extern void rcu_note_context_switch(int cpu); |
35 | extern int rcu_needs_cpu(int cpu); | 35 | extern int rcu_needs_cpu(int cpu, unsigned long *delta_jiffies); |
36 | extern void rcu_cpu_stall_reset(void); | 36 | extern void rcu_cpu_stall_reset(void); |
37 | 37 | ||
38 | /* | 38 | /* |
diff --git a/include/linux/sched.h b/include/linux/sched.h index f34437e835a7..4059c0f33f07 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
@@ -145,6 +145,7 @@ extern unsigned long this_cpu_load(void); | |||
145 | 145 | ||
146 | 146 | ||
147 | extern void calc_global_load(unsigned long ticks); | 147 | extern void calc_global_load(unsigned long ticks); |
148 | extern void update_cpu_load_nohz(void); | ||
148 | 149 | ||
149 | extern unsigned long get_parent_ip(unsigned long addr); | 150 | extern unsigned long get_parent_ip(unsigned long addr); |
150 | 151 | ||
@@ -438,6 +439,7 @@ extern int get_dumpable(struct mm_struct *mm); | |||
438 | /* leave room for more dump flags */ | 439 | /* leave room for more dump flags */ |
439 | #define MMF_VM_MERGEABLE 16 /* KSM may merge identical pages */ | 440 | #define MMF_VM_MERGEABLE 16 /* KSM may merge identical pages */ |
440 | #define MMF_VM_HUGEPAGE 17 /* set when VM_HUGEPAGE is set on vma */ | 441 | #define MMF_VM_HUGEPAGE 17 /* set when VM_HUGEPAGE is set on vma */ |
442 | #define MMF_EXE_FILE_CHANGED 18 /* see prctl_set_mm_exe_file() */ | ||
441 | 443 | ||
442 | #define MMF_INIT_MASK (MMF_DUMPABLE_MASK | MMF_DUMP_FILTER_MASK) | 444 | #define MMF_INIT_MASK (MMF_DUMPABLE_MASK | MMF_DUMP_FILTER_MASK) |
443 | 445 | ||
@@ -875,6 +877,8 @@ struct sched_group_power { | |||
875 | * Number of busy cpus in this group. | 877 | * Number of busy cpus in this group. |
876 | */ | 878 | */ |
877 | atomic_t nr_busy_cpus; | 879 | atomic_t nr_busy_cpus; |
880 | |||
881 | unsigned long cpumask[0]; /* iteration mask */ | ||
878 | }; | 882 | }; |
879 | 883 | ||
880 | struct sched_group { | 884 | struct sched_group { |
@@ -899,6 +903,15 @@ static inline struct cpumask *sched_group_cpus(struct sched_group *sg) | |||
899 | return to_cpumask(sg->cpumask); | 903 | return to_cpumask(sg->cpumask); |
900 | } | 904 | } |
901 | 905 | ||
906 | /* | ||
907 | * cpumask masking which cpus in the group are allowed to iterate up the domain | ||
908 | * tree. | ||
909 | */ | ||
910 | static inline struct cpumask *sched_group_mask(struct sched_group *sg) | ||
911 | { | ||
912 | return to_cpumask(sg->sgp->cpumask); | ||
913 | } | ||
914 | |||
902 | /** | 915 | /** |
903 | * group_first_cpu - Returns the first cpu in the cpumask of a sched_group. | 916 | * group_first_cpu - Returns the first cpu in the cpumask of a sched_group. |
904 | * @group: The group whose first cpu is to be returned. | 917 | * @group: The group whose first cpu is to be returned. |
@@ -1187,7 +1200,6 @@ struct sched_rt_entity { | |||
1187 | struct list_head run_list; | 1200 | struct list_head run_list; |
1188 | unsigned long timeout; | 1201 | unsigned long timeout; |
1189 | unsigned int time_slice; | 1202 | unsigned int time_slice; |
1190 | int nr_cpus_allowed; | ||
1191 | 1203 | ||
1192 | struct sched_rt_entity *back; | 1204 | struct sched_rt_entity *back; |
1193 | #ifdef CONFIG_RT_GROUP_SCHED | 1205 | #ifdef CONFIG_RT_GROUP_SCHED |
@@ -1252,6 +1264,7 @@ struct task_struct { | |||
1252 | #endif | 1264 | #endif |
1253 | 1265 | ||
1254 | unsigned int policy; | 1266 | unsigned int policy; |
1267 | int nr_cpus_allowed; | ||
1255 | cpumask_t cpus_allowed; | 1268 | cpumask_t cpus_allowed; |
1256 | 1269 | ||
1257 | #ifdef CONFIG_PREEMPT_RCU | 1270 | #ifdef CONFIG_PREEMPT_RCU |
diff --git a/include/linux/spi/pxa2xx_spi.h b/include/linux/spi/pxa2xx_spi.h index d3e1075f7b60..c73d1445c77e 100644 --- a/include/linux/spi/pxa2xx_spi.h +++ b/include/linux/spi/pxa2xx_spi.h | |||
@@ -43,7 +43,7 @@ struct pxa2xx_spi_chip { | |||
43 | void (*cs_control)(u32 command); | 43 | void (*cs_control)(u32 command); |
44 | }; | 44 | }; |
45 | 45 | ||
46 | #ifdef CONFIG_ARCH_PXA | 46 | #if defined(CONFIG_ARCH_PXA) || defined(CONFIG_ARCH_MMP) |
47 | 47 | ||
48 | #include <linux/clk.h> | 48 | #include <linux/clk.h> |
49 | #include <mach/dma.h> | 49 | #include <mach/dma.h> |
diff --git a/include/linux/swap.h b/include/linux/swap.h index b6661933e252..c84ec68eaec9 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h | |||
@@ -197,6 +197,10 @@ struct swap_info_struct { | |||
197 | struct block_device *bdev; /* swap device or bdev of swap file */ | 197 | struct block_device *bdev; /* swap device or bdev of swap file */ |
198 | struct file *swap_file; /* seldom referenced */ | 198 | struct file *swap_file; /* seldom referenced */ |
199 | unsigned int old_block_size; /* seldom referenced */ | 199 | unsigned int old_block_size; /* seldom referenced */ |
200 | #ifdef CONFIG_FRONTSWAP | ||
201 | unsigned long *frontswap_map; /* frontswap in-use, one bit per page */ | ||
202 | atomic_t frontswap_pages; /* frontswap pages in-use counter */ | ||
203 | #endif | ||
200 | }; | 204 | }; |
201 | 205 | ||
202 | struct swap_list_t { | 206 | struct swap_list_t { |
diff --git a/include/linux/swapfile.h b/include/linux/swapfile.h new file mode 100644 index 000000000000..e282624e8c10 --- /dev/null +++ b/include/linux/swapfile.h | |||
@@ -0,0 +1,13 @@ | |||
1 | #ifndef _LINUX_SWAPFILE_H | ||
2 | #define _LINUX_SWAPFILE_H | ||
3 | |||
4 | /* | ||
5 | * these were static in swapfile.c but frontswap.c needs them and we don't | ||
6 | * want to expose them to the dozens of source files that include swap.h | ||
7 | */ | ||
8 | extern spinlock_t swap_lock; | ||
9 | extern struct swap_list_t swap_list; | ||
10 | extern struct swap_info_struct *swap_info[]; | ||
11 | extern int try_to_unuse(unsigned int, bool, unsigned long); | ||
12 | |||
13 | #endif /* _LINUX_SWAPFILE_H */ | ||
diff --git a/include/linux/swapops.h b/include/linux/swapops.h index 792d16d9cbc7..47ead515c811 100644 --- a/include/linux/swapops.h +++ b/include/linux/swapops.h | |||
@@ -9,13 +9,15 @@ | |||
9 | * get good packing density in that tree, so the index should be dense in | 9 | * get good packing density in that tree, so the index should be dense in |
10 | * the low-order bits. | 10 | * the low-order bits. |
11 | * | 11 | * |
12 | * We arrange the `type' and `offset' fields so that `type' is at the five | 12 | * We arrange the `type' and `offset' fields so that `type' is at the seven |
13 | * high-order bits of the swp_entry_t and `offset' is right-aligned in the | 13 | * high-order bits of the swp_entry_t and `offset' is right-aligned in the |
14 | * remaining bits. | 14 | * remaining bits. Although `type' itself needs only five bits, we allow for |
15 | * shmem/tmpfs to shift it all up a further two bits: see swp_to_radix_entry(). | ||
15 | * | 16 | * |
16 | * swp_entry_t's are *never* stored anywhere in their arch-dependent format. | 17 | * swp_entry_t's are *never* stored anywhere in their arch-dependent format. |
17 | */ | 18 | */ |
18 | #define SWP_TYPE_SHIFT(e) (sizeof(e.val) * 8 - MAX_SWAPFILES_SHIFT) | 19 | #define SWP_TYPE_SHIFT(e) ((sizeof(e.val) * 8) - \ |
20 | (MAX_SWAPFILES_SHIFT + RADIX_TREE_EXCEPTIONAL_SHIFT)) | ||
19 | #define SWP_OFFSET_MASK(e) ((1UL << SWP_TYPE_SHIFT(e)) - 1) | 21 | #define SWP_OFFSET_MASK(e) ((1UL << SWP_TYPE_SHIFT(e)) - 1) |
20 | 22 | ||
21 | /* | 23 | /* |
diff --git a/include/linux/tcp.h b/include/linux/tcp.h index 4c5b63283377..5f359dbfcdce 100644 --- a/include/linux/tcp.h +++ b/include/linux/tcp.h | |||
@@ -69,16 +69,16 @@ union tcp_word_hdr { | |||
69 | #define tcp_flag_word(tp) ( ((union tcp_word_hdr *)(tp))->words [3]) | 69 | #define tcp_flag_word(tp) ( ((union tcp_word_hdr *)(tp))->words [3]) |
70 | 70 | ||
71 | enum { | 71 | enum { |
72 | TCP_FLAG_CWR = __cpu_to_be32(0x00800000), | 72 | TCP_FLAG_CWR = __constant_cpu_to_be32(0x00800000), |
73 | TCP_FLAG_ECE = __cpu_to_be32(0x00400000), | 73 | TCP_FLAG_ECE = __constant_cpu_to_be32(0x00400000), |
74 | TCP_FLAG_URG = __cpu_to_be32(0x00200000), | 74 | TCP_FLAG_URG = __constant_cpu_to_be32(0x00200000), |
75 | TCP_FLAG_ACK = __cpu_to_be32(0x00100000), | 75 | TCP_FLAG_ACK = __constant_cpu_to_be32(0x00100000), |
76 | TCP_FLAG_PSH = __cpu_to_be32(0x00080000), | 76 | TCP_FLAG_PSH = __constant_cpu_to_be32(0x00080000), |
77 | TCP_FLAG_RST = __cpu_to_be32(0x00040000), | 77 | TCP_FLAG_RST = __constant_cpu_to_be32(0x00040000), |
78 | TCP_FLAG_SYN = __cpu_to_be32(0x00020000), | 78 | TCP_FLAG_SYN = __constant_cpu_to_be32(0x00020000), |
79 | TCP_FLAG_FIN = __cpu_to_be32(0x00010000), | 79 | TCP_FLAG_FIN = __constant_cpu_to_be32(0x00010000), |
80 | TCP_RESERVED_BITS = __cpu_to_be32(0x0F000000), | 80 | TCP_RESERVED_BITS = __constant_cpu_to_be32(0x0F000000), |
81 | TCP_DATA_OFFSET = __cpu_to_be32(0xF0000000) | 81 | TCP_DATA_OFFSET = __constant_cpu_to_be32(0xF0000000) |
82 | }; | 82 | }; |
83 | 83 | ||
84 | /* | 84 | /* |
diff --git a/include/linux/usb/hcd.h b/include/linux/usb/hcd.h index 7f855d50cdf5..49b3ac29726a 100644 --- a/include/linux/usb/hcd.h +++ b/include/linux/usb/hcd.h | |||
@@ -126,8 +126,6 @@ struct usb_hcd { | |||
126 | unsigned wireless:1; /* Wireless USB HCD */ | 126 | unsigned wireless:1; /* Wireless USB HCD */ |
127 | unsigned authorized_default:1; | 127 | unsigned authorized_default:1; |
128 | unsigned has_tt:1; /* Integrated TT in root hub */ | 128 | unsigned has_tt:1; /* Integrated TT in root hub */ |
129 | unsigned broken_pci_sleep:1; /* Don't put the | ||
130 | controller in PCI-D3 for system sleep */ | ||
131 | 129 | ||
132 | unsigned int irq; /* irq allocated */ | 130 | unsigned int irq; /* irq allocated */ |
133 | void __iomem *regs; /* device memory/io */ | 131 | void __iomem *regs; /* device memory/io */ |
diff --git a/include/linux/vga_switcheroo.h b/include/linux/vga_switcheroo.h index b455c7c212eb..ddb419cf4530 100644 --- a/include/linux/vga_switcheroo.h +++ b/include/linux/vga_switcheroo.h | |||
@@ -7,11 +7,19 @@ | |||
7 | * vga_switcheroo.h - Support for laptop with dual GPU using one set of outputs | 7 | * vga_switcheroo.h - Support for laptop with dual GPU using one set of outputs |
8 | */ | 8 | */ |
9 | 9 | ||
10 | #ifndef _LINUX_VGA_SWITCHEROO_H_ | ||
11 | #define _LINUX_VGA_SWITCHEROO_H_ | ||
12 | |||
10 | #include <linux/fb.h> | 13 | #include <linux/fb.h> |
11 | 14 | ||
15 | struct pci_dev; | ||
16 | |||
12 | enum vga_switcheroo_state { | 17 | enum vga_switcheroo_state { |
13 | VGA_SWITCHEROO_OFF, | 18 | VGA_SWITCHEROO_OFF, |
14 | VGA_SWITCHEROO_ON, | 19 | VGA_SWITCHEROO_ON, |
20 | /* below are referred only from vga_switcheroo_get_client_state() */ | ||
21 | VGA_SWITCHEROO_INIT, | ||
22 | VGA_SWITCHEROO_NOT_FOUND, | ||
15 | }; | 23 | }; |
16 | 24 | ||
17 | enum vga_switcheroo_client_id { | 25 | enum vga_switcheroo_client_id { |
@@ -50,6 +58,8 @@ void vga_switcheroo_unregister_handler(void); | |||
50 | 58 | ||
51 | int vga_switcheroo_process_delayed_switch(void); | 59 | int vga_switcheroo_process_delayed_switch(void); |
52 | 60 | ||
61 | int vga_switcheroo_get_client_state(struct pci_dev *dev); | ||
62 | |||
53 | #else | 63 | #else |
54 | 64 | ||
55 | static inline void vga_switcheroo_unregister_client(struct pci_dev *dev) {} | 65 | static inline void vga_switcheroo_unregister_client(struct pci_dev *dev) {} |
@@ -62,5 +72,8 @@ static inline int vga_switcheroo_register_audio_client(struct pci_dev *pdev, | |||
62 | int id, bool active) { return 0; } | 72 | int id, bool active) { return 0; } |
63 | static inline void vga_switcheroo_unregister_handler(void) {} | 73 | static inline void vga_switcheroo_unregister_handler(void) {} |
64 | static inline int vga_switcheroo_process_delayed_switch(void) { return 0; } | 74 | static inline int vga_switcheroo_process_delayed_switch(void) { return 0; } |
75 | static inline int vga_switcheroo_get_client_state(struct pci_dev *dev) { return VGA_SWITCHEROO_ON; } | ||
76 | |||
65 | 77 | ||
66 | #endif | 78 | #endif |
79 | #endif /* _LINUX_VGA_SWITCHEROO_H_ */ | ||
diff --git a/include/net/inetpeer.h b/include/net/inetpeer.h index b94765e38e80..2040bff945d4 100644 --- a/include/net/inetpeer.h +++ b/include/net/inetpeer.h | |||
@@ -40,7 +40,10 @@ struct inet_peer { | |||
40 | u32 pmtu_orig; | 40 | u32 pmtu_orig; |
41 | u32 pmtu_learned; | 41 | u32 pmtu_learned; |
42 | struct inetpeer_addr_base redirect_learned; | 42 | struct inetpeer_addr_base redirect_learned; |
43 | struct list_head gc_list; | 43 | union { |
44 | struct list_head gc_list; | ||
45 | struct rcu_head gc_rcu; | ||
46 | }; | ||
44 | /* | 47 | /* |
45 | * Once inet_peer is queued for deletion (refcnt == -1), following fields | 48 | * Once inet_peer is queued for deletion (refcnt == -1), following fields |
46 | * are not available: rid, ip_id_count, tcp_ts, tcp_ts_stamp | 49 | * are not available: rid, ip_id_count, tcp_ts, tcp_ts_stamp |
diff --git a/include/net/route.h b/include/net/route.h index ed2b78e2375d..98705468ac03 100644 --- a/include/net/route.h +++ b/include/net/route.h | |||
@@ -130,9 +130,9 @@ static inline struct rtable *ip_route_output(struct net *net, __be32 daddr, | |||
130 | { | 130 | { |
131 | struct flowi4 fl4 = { | 131 | struct flowi4 fl4 = { |
132 | .flowi4_oif = oif, | 132 | .flowi4_oif = oif, |
133 | .flowi4_tos = tos, | ||
133 | .daddr = daddr, | 134 | .daddr = daddr, |
134 | .saddr = saddr, | 135 | .saddr = saddr, |
135 | .flowi4_tos = tos, | ||
136 | }; | 136 | }; |
137 | return ip_route_output_key(net, &fl4); | 137 | return ip_route_output_key(net, &fl4); |
138 | } | 138 | } |
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h index 55ce96b53b09..9d7d54a00e63 100644 --- a/include/net/sch_generic.h +++ b/include/net/sch_generic.h | |||
@@ -220,13 +220,16 @@ struct tcf_proto { | |||
220 | 220 | ||
221 | struct qdisc_skb_cb { | 221 | struct qdisc_skb_cb { |
222 | unsigned int pkt_len; | 222 | unsigned int pkt_len; |
223 | unsigned char data[24]; | 223 | u16 bond_queue_mapping; |
224 | u16 _pad; | ||
225 | unsigned char data[20]; | ||
224 | }; | 226 | }; |
225 | 227 | ||
226 | static inline void qdisc_cb_private_validate(const struct sk_buff *skb, int sz) | 228 | static inline void qdisc_cb_private_validate(const struct sk_buff *skb, int sz) |
227 | { | 229 | { |
228 | struct qdisc_skb_cb *qcb; | 230 | struct qdisc_skb_cb *qcb; |
229 | BUILD_BUG_ON(sizeof(skb->cb) < sizeof(unsigned int) + sz); | 231 | |
232 | BUILD_BUG_ON(sizeof(skb->cb) < offsetof(struct qdisc_skb_cb, data) + sz); | ||
230 | BUILD_BUG_ON(sizeof(qcb->data) < sz); | 233 | BUILD_BUG_ON(sizeof(qcb->data) < sz); |
231 | } | 234 | } |
232 | 235 | ||
diff --git a/include/target/target_core_fabric.h b/include/target/target_core_fabric.h index 116959933f46..c78a23333c4f 100644 --- a/include/target/target_core_fabric.h +++ b/include/target/target_core_fabric.h | |||
@@ -47,6 +47,7 @@ struct target_core_fabric_ops { | |||
47 | */ | 47 | */ |
48 | int (*check_stop_free)(struct se_cmd *); | 48 | int (*check_stop_free)(struct se_cmd *); |
49 | void (*release_cmd)(struct se_cmd *); | 49 | void (*release_cmd)(struct se_cmd *); |
50 | void (*put_session)(struct se_session *); | ||
50 | /* | 51 | /* |
51 | * Called with spin_lock_bh(struct se_portal_group->session_lock held. | 52 | * Called with spin_lock_bh(struct se_portal_group->session_lock held. |
52 | */ | 53 | */ |
diff --git a/include/trace/events/rcu.h b/include/trace/events/rcu.h index 1480900c511c..d274734b2aa4 100644 --- a/include/trace/events/rcu.h +++ b/include/trace/events/rcu.h | |||
@@ -289,6 +289,7 @@ TRACE_EVENT(rcu_dyntick, | |||
289 | * "In holdoff": Nothing to do, holding off after unsuccessful attempt. | 289 | * "In holdoff": Nothing to do, holding off after unsuccessful attempt. |
290 | * "Begin holdoff": Attempt failed, don't retry until next jiffy. | 290 | * "Begin holdoff": Attempt failed, don't retry until next jiffy. |
291 | * "Dyntick with callbacks": Entering dyntick-idle despite callbacks. | 291 | * "Dyntick with callbacks": Entering dyntick-idle despite callbacks. |
292 | * "Dyntick with lazy callbacks": Entering dyntick-idle w/lazy callbacks. | ||
292 | * "More callbacks": Still more callbacks, try again to clear them out. | 293 | * "More callbacks": Still more callbacks, try again to clear them out. |
293 | * "Callbacks drained": All callbacks processed, off to dyntick idle! | 294 | * "Callbacks drained": All callbacks processed, off to dyntick idle! |
294 | * "Timer": Timer fired to cause CPU to continue processing callbacks. | 295 | * "Timer": Timer fired to cause CPU to continue processing callbacks. |
diff --git a/init/main.c b/init/main.c index 1ca6b32c4828..b5cc0a7c4708 100644 --- a/init/main.c +++ b/init/main.c | |||
@@ -508,7 +508,7 @@ asmlinkage void __init start_kernel(void) | |||
508 | parse_early_param(); | 508 | parse_early_param(); |
509 | parse_args("Booting kernel", static_command_line, __start___param, | 509 | parse_args("Booting kernel", static_command_line, __start___param, |
510 | __stop___param - __start___param, | 510 | __stop___param - __start___param, |
511 | 0, 0, &unknown_bootoption); | 511 | -1, -1, &unknown_bootoption); |
512 | 512 | ||
513 | jump_label_init(); | 513 | jump_label_init(); |
514 | 514 | ||
@@ -755,13 +755,8 @@ static void __init do_initcalls(void) | |||
755 | { | 755 | { |
756 | int level; | 756 | int level; |
757 | 757 | ||
758 | for (level = 0; level < ARRAY_SIZE(initcall_levels) - 1; level++) { | 758 | for (level = 0; level < ARRAY_SIZE(initcall_levels) - 1; level++) |
759 | pr_info("initlevel:%d=%s, %d registered initcalls\n", | ||
760 | level, initcall_level_names[level], | ||
761 | (int) (initcall_levels[level+1] | ||
762 | - initcall_levels[level])); | ||
763 | do_initcall_level(level); | 759 | do_initcall_level(level); |
764 | } | ||
765 | } | 760 | } |
766 | 761 | ||
767 | /* | 762 | /* |
@@ -393,6 +393,16 @@ static int shm_fsync(struct file *file, loff_t start, loff_t end, int datasync) | |||
393 | return sfd->file->f_op->fsync(sfd->file, start, end, datasync); | 393 | return sfd->file->f_op->fsync(sfd->file, start, end, datasync); |
394 | } | 394 | } |
395 | 395 | ||
396 | static long shm_fallocate(struct file *file, int mode, loff_t offset, | ||
397 | loff_t len) | ||
398 | { | ||
399 | struct shm_file_data *sfd = shm_file_data(file); | ||
400 | |||
401 | if (!sfd->file->f_op->fallocate) | ||
402 | return -EOPNOTSUPP; | ||
403 | return sfd->file->f_op->fallocate(file, mode, offset, len); | ||
404 | } | ||
405 | |||
396 | static unsigned long shm_get_unmapped_area(struct file *file, | 406 | static unsigned long shm_get_unmapped_area(struct file *file, |
397 | unsigned long addr, unsigned long len, unsigned long pgoff, | 407 | unsigned long addr, unsigned long len, unsigned long pgoff, |
398 | unsigned long flags) | 408 | unsigned long flags) |
@@ -410,6 +420,7 @@ static const struct file_operations shm_file_operations = { | |||
410 | .get_unmapped_area = shm_get_unmapped_area, | 420 | .get_unmapped_area = shm_get_unmapped_area, |
411 | #endif | 421 | #endif |
412 | .llseek = noop_llseek, | 422 | .llseek = noop_llseek, |
423 | .fallocate = shm_fallocate, | ||
413 | }; | 424 | }; |
414 | 425 | ||
415 | static const struct file_operations shm_file_operations_huge = { | 426 | static const struct file_operations shm_file_operations_huge = { |
@@ -418,6 +429,7 @@ static const struct file_operations shm_file_operations_huge = { | |||
418 | .release = shm_release, | 429 | .release = shm_release, |
419 | .get_unmapped_area = shm_get_unmapped_area, | 430 | .get_unmapped_area = shm_get_unmapped_area, |
420 | .llseek = noop_llseek, | 431 | .llseek = noop_llseek, |
432 | .fallocate = shm_fallocate, | ||
421 | }; | 433 | }; |
422 | 434 | ||
423 | int is_file_shm_hugepages(struct file *file) | 435 | int is_file_shm_hugepages(struct file *file) |
diff --git a/kernel/cgroup.c b/kernel/cgroup.c index 0f3527d6184a..2097684cf194 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c | |||
@@ -255,12 +255,17 @@ int cgroup_lock_is_held(void) | |||
255 | 255 | ||
256 | EXPORT_SYMBOL_GPL(cgroup_lock_is_held); | 256 | EXPORT_SYMBOL_GPL(cgroup_lock_is_held); |
257 | 257 | ||
258 | static int css_unbias_refcnt(int refcnt) | ||
259 | { | ||
260 | return refcnt >= 0 ? refcnt : refcnt - CSS_DEACT_BIAS; | ||
261 | } | ||
262 | |||
258 | /* the current nr of refs, always >= 0 whether @css is deactivated or not */ | 263 | /* the current nr of refs, always >= 0 whether @css is deactivated or not */ |
259 | static int css_refcnt(struct cgroup_subsys_state *css) | 264 | static int css_refcnt(struct cgroup_subsys_state *css) |
260 | { | 265 | { |
261 | int v = atomic_read(&css->refcnt); | 266 | int v = atomic_read(&css->refcnt); |
262 | 267 | ||
263 | return v >= 0 ? v : v - CSS_DEACT_BIAS; | 268 | return css_unbias_refcnt(v); |
264 | } | 269 | } |
265 | 270 | ||
266 | /* convenient tests for these bits */ | 271 | /* convenient tests for these bits */ |
@@ -896,10 +901,13 @@ static void cgroup_diput(struct dentry *dentry, struct inode *inode) | |||
896 | mutex_unlock(&cgroup_mutex); | 901 | mutex_unlock(&cgroup_mutex); |
897 | 902 | ||
898 | /* | 903 | /* |
899 | * Drop the active superblock reference that we took when we | 904 | * We want to drop the active superblock reference from the |
900 | * created the cgroup | 905 | * cgroup creation after all the dentry refs are gone - |
906 | * kill_sb gets mighty unhappy otherwise. Mark | ||
907 | * dentry->d_fsdata with cgroup_diput() to tell | ||
908 | * cgroup_d_release() to call deactivate_super(). | ||
901 | */ | 909 | */ |
902 | deactivate_super(cgrp->root->sb); | 910 | dentry->d_fsdata = cgroup_diput; |
903 | 911 | ||
904 | /* | 912 | /* |
905 | * if we're getting rid of the cgroup, refcount should ensure | 913 | * if we're getting rid of the cgroup, refcount should ensure |
@@ -925,6 +933,13 @@ static int cgroup_delete(const struct dentry *d) | |||
925 | return 1; | 933 | return 1; |
926 | } | 934 | } |
927 | 935 | ||
936 | static void cgroup_d_release(struct dentry *dentry) | ||
937 | { | ||
938 | /* did cgroup_diput() tell me to deactivate super? */ | ||
939 | if (dentry->d_fsdata == cgroup_diput) | ||
940 | deactivate_super(dentry->d_sb); | ||
941 | } | ||
942 | |||
928 | static void remove_dir(struct dentry *d) | 943 | static void remove_dir(struct dentry *d) |
929 | { | 944 | { |
930 | struct dentry *parent = dget(d->d_parent); | 945 | struct dentry *parent = dget(d->d_parent); |
@@ -1532,6 +1547,7 @@ static int cgroup_get_rootdir(struct super_block *sb) | |||
1532 | static const struct dentry_operations cgroup_dops = { | 1547 | static const struct dentry_operations cgroup_dops = { |
1533 | .d_iput = cgroup_diput, | 1548 | .d_iput = cgroup_diput, |
1534 | .d_delete = cgroup_delete, | 1549 | .d_delete = cgroup_delete, |
1550 | .d_release = cgroup_d_release, | ||
1535 | }; | 1551 | }; |
1536 | 1552 | ||
1537 | struct inode *inode = | 1553 | struct inode *inode = |
@@ -4971,10 +4987,12 @@ EXPORT_SYMBOL_GPL(__css_tryget); | |||
4971 | void __css_put(struct cgroup_subsys_state *css) | 4987 | void __css_put(struct cgroup_subsys_state *css) |
4972 | { | 4988 | { |
4973 | struct cgroup *cgrp = css->cgroup; | 4989 | struct cgroup *cgrp = css->cgroup; |
4990 | int v; | ||
4974 | 4991 | ||
4975 | rcu_read_lock(); | 4992 | rcu_read_lock(); |
4976 | atomic_dec(&css->refcnt); | 4993 | v = css_unbias_refcnt(atomic_dec_return(&css->refcnt)); |
4977 | switch (css_refcnt(css)) { | 4994 | |
4995 | switch (v) { | ||
4978 | case 1: | 4996 | case 1: |
4979 | if (notify_on_release(cgrp)) { | 4997 | if (notify_on_release(cgrp)) { |
4980 | set_bit(CGRP_RELEASABLE, &cgrp->flags); | 4998 | set_bit(CGRP_RELEASABLE, &cgrp->flags); |
diff --git a/kernel/events/core.c b/kernel/events/core.c index 5b06cbbf6931..d7d71d6ec972 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c | |||
@@ -253,9 +253,9 @@ perf_cgroup_match(struct perf_event *event) | |||
253 | return !event->cgrp || event->cgrp == cpuctx->cgrp; | 253 | return !event->cgrp || event->cgrp == cpuctx->cgrp; |
254 | } | 254 | } |
255 | 255 | ||
256 | static inline void perf_get_cgroup(struct perf_event *event) | 256 | static inline bool perf_tryget_cgroup(struct perf_event *event) |
257 | { | 257 | { |
258 | css_get(&event->cgrp->css); | 258 | return css_tryget(&event->cgrp->css); |
259 | } | 259 | } |
260 | 260 | ||
261 | static inline void perf_put_cgroup(struct perf_event *event) | 261 | static inline void perf_put_cgroup(struct perf_event *event) |
@@ -484,7 +484,11 @@ static inline int perf_cgroup_connect(int fd, struct perf_event *event, | |||
484 | event->cgrp = cgrp; | 484 | event->cgrp = cgrp; |
485 | 485 | ||
486 | /* must be done before we fput() the file */ | 486 | /* must be done before we fput() the file */ |
487 | perf_get_cgroup(event); | 487 | if (!perf_tryget_cgroup(event)) { |
488 | event->cgrp = NULL; | ||
489 | ret = -ENOENT; | ||
490 | goto out; | ||
491 | } | ||
488 | 492 | ||
489 | /* | 493 | /* |
490 | * all events in a group must monitor | 494 | * all events in a group must monitor |
@@ -3181,7 +3185,6 @@ static void perf_event_for_each(struct perf_event *event, | |||
3181 | event = event->group_leader; | 3185 | event = event->group_leader; |
3182 | 3186 | ||
3183 | perf_event_for_each_child(event, func); | 3187 | perf_event_for_each_child(event, func); |
3184 | func(event); | ||
3185 | list_for_each_entry(sibling, &event->sibling_list, group_entry) | 3188 | list_for_each_entry(sibling, &event->sibling_list, group_entry) |
3186 | perf_event_for_each_child(sibling, func); | 3189 | perf_event_for_each_child(sibling, func); |
3187 | mutex_unlock(&ctx->mutex); | 3190 | mutex_unlock(&ctx->mutex); |
diff --git a/kernel/exit.c b/kernel/exit.c index 34867cc5b42a..2f59cc334516 100644 --- a/kernel/exit.c +++ b/kernel/exit.c | |||
@@ -72,6 +72,18 @@ static void __unhash_process(struct task_struct *p, bool group_dead) | |||
72 | list_del_rcu(&p->tasks); | 72 | list_del_rcu(&p->tasks); |
73 | list_del_init(&p->sibling); | 73 | list_del_init(&p->sibling); |
74 | __this_cpu_dec(process_counts); | 74 | __this_cpu_dec(process_counts); |
75 | /* | ||
76 | * If we are the last child process in a pid namespace to be | ||
77 | * reaped, notify the reaper sleeping zap_pid_ns_processes(). | ||
78 | */ | ||
79 | if (IS_ENABLED(CONFIG_PID_NS)) { | ||
80 | struct task_struct *parent = p->real_parent; | ||
81 | |||
82 | if ((task_active_pid_ns(parent)->child_reaper == parent) && | ||
83 | list_empty(&parent->children) && | ||
84 | (parent->flags & PF_EXITING)) | ||
85 | wake_up_process(parent); | ||
86 | } | ||
75 | } | 87 | } |
76 | list_del_rcu(&p->thread_group); | 88 | list_del_rcu(&p->thread_group); |
77 | } | 89 | } |
@@ -643,6 +655,7 @@ static void exit_mm(struct task_struct * tsk) | |||
643 | mm_release(tsk, mm); | 655 | mm_release(tsk, mm); |
644 | if (!mm) | 656 | if (!mm) |
645 | return; | 657 | return; |
658 | sync_mm_rss(mm); | ||
646 | /* | 659 | /* |
647 | * Serialize with any possible pending coredump. | 660 | * Serialize with any possible pending coredump. |
648 | * We must hold mmap_sem around checking core_state | 661 | * We must hold mmap_sem around checking core_state |
@@ -719,12 +732,6 @@ static struct task_struct *find_new_reaper(struct task_struct *father) | |||
719 | 732 | ||
720 | zap_pid_ns_processes(pid_ns); | 733 | zap_pid_ns_processes(pid_ns); |
721 | write_lock_irq(&tasklist_lock); | 734 | write_lock_irq(&tasklist_lock); |
722 | /* | ||
723 | * We can not clear ->child_reaper or leave it alone. | ||
724 | * There may by stealth EXIT_DEAD tasks on ->children, | ||
725 | * forget_original_parent() must move them somewhere. | ||
726 | */ | ||
727 | pid_ns->child_reaper = init_pid_ns.child_reaper; | ||
728 | } else if (father->signal->has_child_subreaper) { | 735 | } else if (father->signal->has_child_subreaper) { |
729 | struct task_struct *reaper; | 736 | struct task_struct *reaper; |
730 | 737 | ||
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c index fc275e4f629b..eebd6d5cfb44 100644 --- a/kernel/irq/chip.c +++ b/kernel/irq/chip.c | |||
@@ -275,8 +275,10 @@ void handle_nested_irq(unsigned int irq) | |||
275 | kstat_incr_irqs_this_cpu(irq, desc); | 275 | kstat_incr_irqs_this_cpu(irq, desc); |
276 | 276 | ||
277 | action = desc->action; | 277 | action = desc->action; |
278 | if (unlikely(!action || irqd_irq_disabled(&desc->irq_data))) | 278 | if (unlikely(!action || irqd_irq_disabled(&desc->irq_data))) { |
279 | desc->istate |= IRQS_PENDING; | ||
279 | goto out_unlock; | 280 | goto out_unlock; |
281 | } | ||
280 | 282 | ||
281 | irqd_set(&desc->irq_data, IRQD_IRQ_INPROGRESS); | 283 | irqd_set(&desc->irq_data, IRQD_IRQ_INPROGRESS); |
282 | raw_spin_unlock_irq(&desc->lock); | 284 | raw_spin_unlock_irq(&desc->lock); |
@@ -324,8 +326,10 @@ handle_simple_irq(unsigned int irq, struct irq_desc *desc) | |||
324 | desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); | 326 | desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); |
325 | kstat_incr_irqs_this_cpu(irq, desc); | 327 | kstat_incr_irqs_this_cpu(irq, desc); |
326 | 328 | ||
327 | if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) | 329 | if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) { |
330 | desc->istate |= IRQS_PENDING; | ||
328 | goto out_unlock; | 331 | goto out_unlock; |
332 | } | ||
329 | 333 | ||
330 | handle_irq_event(desc); | 334 | handle_irq_event(desc); |
331 | 335 | ||
diff --git a/kernel/irq/internals.h b/kernel/irq/internals.h index 8e5c56b3b7d9..001fa5bab490 100644 --- a/kernel/irq/internals.h +++ b/kernel/irq/internals.h | |||
@@ -101,6 +101,9 @@ extern int irq_select_affinity_usr(unsigned int irq, struct cpumask *mask); | |||
101 | 101 | ||
102 | extern void irq_set_thread_affinity(struct irq_desc *desc); | 102 | extern void irq_set_thread_affinity(struct irq_desc *desc); |
103 | 103 | ||
104 | extern int irq_do_set_affinity(struct irq_data *data, | ||
105 | const struct cpumask *dest, bool force); | ||
106 | |||
104 | /* Inline functions for support of irq chips on slow busses */ | 107 | /* Inline functions for support of irq chips on slow busses */ |
105 | static inline void chip_bus_lock(struct irq_desc *desc) | 108 | static inline void chip_bus_lock(struct irq_desc *desc) |
106 | { | 109 | { |
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index ea0c6c2ae6f7..8c548232ba39 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c | |||
@@ -142,6 +142,25 @@ static inline void | |||
142 | irq_get_pending(struct cpumask *mask, struct irq_desc *desc) { } | 142 | irq_get_pending(struct cpumask *mask, struct irq_desc *desc) { } |
143 | #endif | 143 | #endif |
144 | 144 | ||
145 | int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask, | ||
146 | bool force) | ||
147 | { | ||
148 | struct irq_desc *desc = irq_data_to_desc(data); | ||
149 | struct irq_chip *chip = irq_data_get_irq_chip(data); | ||
150 | int ret; | ||
151 | |||
152 | ret = chip->irq_set_affinity(data, mask, false); | ||
153 | switch (ret) { | ||
154 | case IRQ_SET_MASK_OK: | ||
155 | cpumask_copy(data->affinity, mask); | ||
156 | case IRQ_SET_MASK_OK_NOCOPY: | ||
157 | irq_set_thread_affinity(desc); | ||
158 | ret = 0; | ||
159 | } | ||
160 | |||
161 | return ret; | ||
162 | } | ||
163 | |||
145 | int __irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask) | 164 | int __irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask) |
146 | { | 165 | { |
147 | struct irq_chip *chip = irq_data_get_irq_chip(data); | 166 | struct irq_chip *chip = irq_data_get_irq_chip(data); |
@@ -152,14 +171,7 @@ int __irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask) | |||
152 | return -EINVAL; | 171 | return -EINVAL; |
153 | 172 | ||
154 | if (irq_can_move_pcntxt(data)) { | 173 | if (irq_can_move_pcntxt(data)) { |
155 | ret = chip->irq_set_affinity(data, mask, false); | 174 | ret = irq_do_set_affinity(data, mask, false); |
156 | switch (ret) { | ||
157 | case IRQ_SET_MASK_OK: | ||
158 | cpumask_copy(data->affinity, mask); | ||
159 | case IRQ_SET_MASK_OK_NOCOPY: | ||
160 | irq_set_thread_affinity(desc); | ||
161 | ret = 0; | ||
162 | } | ||
163 | } else { | 175 | } else { |
164 | irqd_set_move_pending(data); | 176 | irqd_set_move_pending(data); |
165 | irq_copy_pending(desc, mask); | 177 | irq_copy_pending(desc, mask); |
@@ -283,9 +295,8 @@ EXPORT_SYMBOL_GPL(irq_set_affinity_notifier); | |||
283 | static int | 295 | static int |
284 | setup_affinity(unsigned int irq, struct irq_desc *desc, struct cpumask *mask) | 296 | setup_affinity(unsigned int irq, struct irq_desc *desc, struct cpumask *mask) |
285 | { | 297 | { |
286 | struct irq_chip *chip = irq_desc_get_chip(desc); | ||
287 | struct cpumask *set = irq_default_affinity; | 298 | struct cpumask *set = irq_default_affinity; |
288 | int ret, node = desc->irq_data.node; | 299 | int node = desc->irq_data.node; |
289 | 300 | ||
290 | /* Excludes PER_CPU and NO_BALANCE interrupts */ | 301 | /* Excludes PER_CPU and NO_BALANCE interrupts */ |
291 | if (!irq_can_set_affinity(irq)) | 302 | if (!irq_can_set_affinity(irq)) |
@@ -311,13 +322,7 @@ setup_affinity(unsigned int irq, struct irq_desc *desc, struct cpumask *mask) | |||
311 | if (cpumask_intersects(mask, nodemask)) | 322 | if (cpumask_intersects(mask, nodemask)) |
312 | cpumask_and(mask, mask, nodemask); | 323 | cpumask_and(mask, mask, nodemask); |
313 | } | 324 | } |
314 | ret = chip->irq_set_affinity(&desc->irq_data, mask, false); | 325 | irq_do_set_affinity(&desc->irq_data, mask, false); |
315 | switch (ret) { | ||
316 | case IRQ_SET_MASK_OK: | ||
317 | cpumask_copy(desc->irq_data.affinity, mask); | ||
318 | case IRQ_SET_MASK_OK_NOCOPY: | ||
319 | irq_set_thread_affinity(desc); | ||
320 | } | ||
321 | return 0; | 326 | return 0; |
322 | } | 327 | } |
323 | #else | 328 | #else |
diff --git a/kernel/irq/migration.c b/kernel/irq/migration.c index c3c89751b327..ca3f4aaff707 100644 --- a/kernel/irq/migration.c +++ b/kernel/irq/migration.c | |||
@@ -42,17 +42,8 @@ void irq_move_masked_irq(struct irq_data *idata) | |||
42 | * For correct operation this depends on the caller | 42 | * For correct operation this depends on the caller |
43 | * masking the irqs. | 43 | * masking the irqs. |
44 | */ | 44 | */ |
45 | if (likely(cpumask_any_and(desc->pending_mask, cpu_online_mask) | 45 | if (cpumask_any_and(desc->pending_mask, cpu_online_mask) < nr_cpu_ids) |
46 | < nr_cpu_ids)) { | 46 | irq_do_set_affinity(&desc->irq_data, desc->pending_mask, false); |
47 | int ret = chip->irq_set_affinity(&desc->irq_data, | ||
48 | desc->pending_mask, false); | ||
49 | switch (ret) { | ||
50 | case IRQ_SET_MASK_OK: | ||
51 | cpumask_copy(desc->irq_data.affinity, desc->pending_mask); | ||
52 | case IRQ_SET_MASK_OK_NOCOPY: | ||
53 | irq_set_thread_affinity(desc); | ||
54 | } | ||
55 | } | ||
56 | 47 | ||
57 | cpumask_clear(desc->pending_mask); | 48 | cpumask_clear(desc->pending_mask); |
58 | } | 49 | } |
diff --git a/kernel/panic.c b/kernel/panic.c index 8ed89a175d79..d2a5f4ecc6dd 100644 --- a/kernel/panic.c +++ b/kernel/panic.c | |||
@@ -27,7 +27,7 @@ | |||
27 | #define PANIC_TIMER_STEP 100 | 27 | #define PANIC_TIMER_STEP 100 |
28 | #define PANIC_BLINK_SPD 18 | 28 | #define PANIC_BLINK_SPD 18 |
29 | 29 | ||
30 | int panic_on_oops; | 30 | int panic_on_oops = CONFIG_PANIC_ON_OOPS_VALUE; |
31 | static unsigned long tainted_mask; | 31 | static unsigned long tainted_mask; |
32 | static int pause_on_oops; | 32 | static int pause_on_oops; |
33 | static int pause_on_oops_flag; | 33 | static int pause_on_oops_flag; |
@@ -108,8 +108,6 @@ void panic(const char *fmt, ...) | |||
108 | */ | 108 | */ |
109 | crash_kexec(NULL); | 109 | crash_kexec(NULL); |
110 | 110 | ||
111 | kmsg_dump(KMSG_DUMP_PANIC); | ||
112 | |||
113 | /* | 111 | /* |
114 | * Note smp_send_stop is the usual smp shutdown function, which | 112 | * Note smp_send_stop is the usual smp shutdown function, which |
115 | * unfortunately means it may not be hardened to work in a panic | 113 | * unfortunately means it may not be hardened to work in a panic |
@@ -117,6 +115,8 @@ void panic(const char *fmt, ...) | |||
117 | */ | 115 | */ |
118 | smp_send_stop(); | 116 | smp_send_stop(); |
119 | 117 | ||
118 | kmsg_dump(KMSG_DUMP_PANIC); | ||
119 | |||
120 | atomic_notifier_call_chain(&panic_notifier_list, 0, buf); | 120 | atomic_notifier_call_chain(&panic_notifier_list, 0, buf); |
121 | 121 | ||
122 | bust_spinlocks(0); | 122 | bust_spinlocks(0); |
diff --git a/kernel/pid_namespace.c b/kernel/pid_namespace.c index 16b20e38c4a1..b3c7fd554250 100644 --- a/kernel/pid_namespace.c +++ b/kernel/pid_namespace.c | |||
@@ -184,11 +184,31 @@ void zap_pid_ns_processes(struct pid_namespace *pid_ns) | |||
184 | } | 184 | } |
185 | read_unlock(&tasklist_lock); | 185 | read_unlock(&tasklist_lock); |
186 | 186 | ||
187 | /* Firstly reap the EXIT_ZOMBIE children we may have. */ | ||
187 | do { | 188 | do { |
188 | clear_thread_flag(TIF_SIGPENDING); | 189 | clear_thread_flag(TIF_SIGPENDING); |
189 | rc = sys_wait4(-1, NULL, __WALL, NULL); | 190 | rc = sys_wait4(-1, NULL, __WALL, NULL); |
190 | } while (rc != -ECHILD); | 191 | } while (rc != -ECHILD); |
191 | 192 | ||
193 | /* | ||
194 | * sys_wait4() above can't reap the TASK_DEAD children. | ||
195 | * Make sure they all go away, see __unhash_process(). | ||
196 | */ | ||
197 | for (;;) { | ||
198 | bool need_wait = false; | ||
199 | |||
200 | read_lock(&tasklist_lock); | ||
201 | if (!list_empty(¤t->children)) { | ||
202 | __set_current_state(TASK_UNINTERRUPTIBLE); | ||
203 | need_wait = true; | ||
204 | } | ||
205 | read_unlock(&tasklist_lock); | ||
206 | |||
207 | if (!need_wait) | ||
208 | break; | ||
209 | schedule(); | ||
210 | } | ||
211 | |||
192 | if (pid_ns->reboot) | 212 | if (pid_ns->reboot) |
193 | current->signal->group_exit_code = pid_ns->reboot; | 213 | current->signal->group_exit_code = pid_ns->reboot; |
194 | 214 | ||
diff --git a/kernel/printk.c b/kernel/printk.c index 32462d2b364a..a2276b916769 100644 --- a/kernel/printk.c +++ b/kernel/printk.c | |||
@@ -227,10 +227,10 @@ static u32 clear_idx; | |||
227 | #define LOG_LINE_MAX 1024 | 227 | #define LOG_LINE_MAX 1024 |
228 | 228 | ||
229 | /* record buffer */ | 229 | /* record buffer */ |
230 | #if !defined(CONFIG_64BIT) || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) | 230 | #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) |
231 | #define LOG_ALIGN 4 | 231 | #define LOG_ALIGN 4 |
232 | #else | 232 | #else |
233 | #define LOG_ALIGN 8 | 233 | #define LOG_ALIGN __alignof__(struct log) |
234 | #endif | 234 | #endif |
235 | #define __LOG_BUF_LEN (1 << CONFIG_LOG_BUF_SHIFT) | 235 | #define __LOG_BUF_LEN (1 << CONFIG_LOG_BUF_SHIFT) |
236 | static char __log_buf[__LOG_BUF_LEN] __aligned(LOG_ALIGN); | 236 | static char __log_buf[__LOG_BUF_LEN] __aligned(LOG_ALIGN); |
@@ -414,7 +414,9 @@ static ssize_t devkmsg_read(struct file *file, char __user *buf, | |||
414 | if (!user) | 414 | if (!user) |
415 | return -EBADF; | 415 | return -EBADF; |
416 | 416 | ||
417 | mutex_lock(&user->lock); | 417 | ret = mutex_lock_interruptible(&user->lock); |
418 | if (ret) | ||
419 | return ret; | ||
418 | raw_spin_lock(&logbuf_lock); | 420 | raw_spin_lock(&logbuf_lock); |
419 | while (user->seq == log_next_seq) { | 421 | while (user->seq == log_next_seq) { |
420 | if (file->f_flags & O_NONBLOCK) { | 422 | if (file->f_flags & O_NONBLOCK) { |
@@ -878,7 +880,9 @@ static int syslog_print(char __user *buf, int size) | |||
878 | syslog_seq++; | 880 | syslog_seq++; |
879 | raw_spin_unlock_irq(&logbuf_lock); | 881 | raw_spin_unlock_irq(&logbuf_lock); |
880 | 882 | ||
881 | if (len > 0 && copy_to_user(buf, text, len)) | 883 | if (len > size) |
884 | len = -EINVAL; | ||
885 | else if (len > 0 && copy_to_user(buf, text, len)) | ||
882 | len = -EFAULT; | 886 | len = -EFAULT; |
883 | 887 | ||
884 | kfree(text); | 888 | kfree(text); |
@@ -909,7 +913,7 @@ static int syslog_print_all(char __user *buf, int size, bool clear) | |||
909 | /* | 913 | /* |
910 | * Find first record that fits, including all following records, | 914 | * Find first record that fits, including all following records, |
911 | * into the user-provided buffer for this dump. | 915 | * into the user-provided buffer for this dump. |
912 | */ | 916 | */ |
913 | seq = clear_seq; | 917 | seq = clear_seq; |
914 | idx = clear_idx; | 918 | idx = clear_idx; |
915 | while (seq < log_next_seq) { | 919 | while (seq < log_next_seq) { |
@@ -919,6 +923,8 @@ static int syslog_print_all(char __user *buf, int size, bool clear) | |||
919 | idx = log_next(idx); | 923 | idx = log_next(idx); |
920 | seq++; | 924 | seq++; |
921 | } | 925 | } |
926 | |||
927 | /* move first record forward until length fits into the buffer */ | ||
922 | seq = clear_seq; | 928 | seq = clear_seq; |
923 | idx = clear_idx; | 929 | idx = clear_idx; |
924 | while (len > size && seq < log_next_seq) { | 930 | while (len > size && seq < log_next_seq) { |
@@ -929,7 +935,7 @@ static int syslog_print_all(char __user *buf, int size, bool clear) | |||
929 | seq++; | 935 | seq++; |
930 | } | 936 | } |
931 | 937 | ||
932 | /* last message in this dump */ | 938 | /* last message fitting into this dump */ |
933 | next_seq = log_next_seq; | 939 | next_seq = log_next_seq; |
934 | 940 | ||
935 | len = 0; | 941 | len = 0; |
@@ -974,6 +980,7 @@ int do_syslog(int type, char __user *buf, int len, bool from_file) | |||
974 | { | 980 | { |
975 | bool clear = false; | 981 | bool clear = false; |
976 | static int saved_console_loglevel = -1; | 982 | static int saved_console_loglevel = -1; |
983 | static DEFINE_MUTEX(syslog_mutex); | ||
977 | int error; | 984 | int error; |
978 | 985 | ||
979 | error = check_syslog_permissions(type, from_file); | 986 | error = check_syslog_permissions(type, from_file); |
@@ -1000,11 +1007,17 @@ int do_syslog(int type, char __user *buf, int len, bool from_file) | |||
1000 | error = -EFAULT; | 1007 | error = -EFAULT; |
1001 | goto out; | 1008 | goto out; |
1002 | } | 1009 | } |
1010 | error = mutex_lock_interruptible(&syslog_mutex); | ||
1011 | if (error) | ||
1012 | goto out; | ||
1003 | error = wait_event_interruptible(log_wait, | 1013 | error = wait_event_interruptible(log_wait, |
1004 | syslog_seq != log_next_seq); | 1014 | syslog_seq != log_next_seq); |
1005 | if (error) | 1015 | if (error) { |
1016 | mutex_unlock(&syslog_mutex); | ||
1006 | goto out; | 1017 | goto out; |
1018 | } | ||
1007 | error = syslog_print(buf, len); | 1019 | error = syslog_print(buf, len); |
1020 | mutex_unlock(&syslog_mutex); | ||
1008 | break; | 1021 | break; |
1009 | /* Read/clear last kernel messages */ | 1022 | /* Read/clear last kernel messages */ |
1010 | case SYSLOG_ACTION_READ_CLEAR: | 1023 | case SYSLOG_ACTION_READ_CLEAR: |
@@ -2300,48 +2313,210 @@ module_param_named(always_kmsg_dump, always_kmsg_dump, bool, S_IRUGO | S_IWUSR); | |||
2300 | * kmsg_dump - dump kernel log to kernel message dumpers. | 2313 | * kmsg_dump - dump kernel log to kernel message dumpers. |
2301 | * @reason: the reason (oops, panic etc) for dumping | 2314 | * @reason: the reason (oops, panic etc) for dumping |
2302 | * | 2315 | * |
2303 | * Iterate through each of the dump devices and call the oops/panic | 2316 | * Call each of the registered dumper's dump() callback, which can |
2304 | * callbacks with the log buffer. | 2317 | * retrieve the kmsg records with kmsg_dump_get_line() or |
2318 | * kmsg_dump_get_buffer(). | ||
2305 | */ | 2319 | */ |
2306 | void kmsg_dump(enum kmsg_dump_reason reason) | 2320 | void kmsg_dump(enum kmsg_dump_reason reason) |
2307 | { | 2321 | { |
2308 | u64 idx; | ||
2309 | struct kmsg_dumper *dumper; | 2322 | struct kmsg_dumper *dumper; |
2310 | const char *s1, *s2; | ||
2311 | unsigned long l1, l2; | ||
2312 | unsigned long flags; | 2323 | unsigned long flags; |
2313 | 2324 | ||
2314 | if ((reason > KMSG_DUMP_OOPS) && !always_kmsg_dump) | 2325 | if ((reason > KMSG_DUMP_OOPS) && !always_kmsg_dump) |
2315 | return; | 2326 | return; |
2316 | 2327 | ||
2317 | /* Theoretically, the log could move on after we do this, but | 2328 | rcu_read_lock(); |
2318 | there's not a lot we can do about that. The new messages | 2329 | list_for_each_entry_rcu(dumper, &dump_list, list) { |
2319 | will overwrite the start of what we dump. */ | 2330 | if (dumper->max_reason && reason > dumper->max_reason) |
2331 | continue; | ||
2332 | |||
2333 | /* initialize iterator with data about the stored records */ | ||
2334 | dumper->active = true; | ||
2335 | |||
2336 | raw_spin_lock_irqsave(&logbuf_lock, flags); | ||
2337 | dumper->cur_seq = clear_seq; | ||
2338 | dumper->cur_idx = clear_idx; | ||
2339 | dumper->next_seq = log_next_seq; | ||
2340 | dumper->next_idx = log_next_idx; | ||
2341 | raw_spin_unlock_irqrestore(&logbuf_lock, flags); | ||
2342 | |||
2343 | /* invoke dumper which will iterate over records */ | ||
2344 | dumper->dump(dumper, reason); | ||
2345 | |||
2346 | /* reset iterator */ | ||
2347 | dumper->active = false; | ||
2348 | } | ||
2349 | rcu_read_unlock(); | ||
2350 | } | ||
2351 | |||
2352 | /** | ||
2353 | * kmsg_dump_get_line - retrieve one kmsg log line | ||
2354 | * @dumper: registered kmsg dumper | ||
2355 | * @syslog: include the "<4>" prefixes | ||
2356 | * @line: buffer to copy the line to | ||
2357 | * @size: maximum size of the buffer | ||
2358 | * @len: length of line placed into buffer | ||
2359 | * | ||
2360 | * Start at the beginning of the kmsg buffer, with the oldest kmsg | ||
2361 | * record, and copy one record into the provided buffer. | ||
2362 | * | ||
2363 | * Consecutive calls will return the next available record moving | ||
2364 | * towards the end of the buffer with the youngest messages. | ||
2365 | * | ||
2366 | * A return value of FALSE indicates that there are no more records to | ||
2367 | * read. | ||
2368 | */ | ||
2369 | bool kmsg_dump_get_line(struct kmsg_dumper *dumper, bool syslog, | ||
2370 | char *line, size_t size, size_t *len) | ||
2371 | { | ||
2372 | unsigned long flags; | ||
2373 | struct log *msg; | ||
2374 | size_t l = 0; | ||
2375 | bool ret = false; | ||
2376 | |||
2377 | if (!dumper->active) | ||
2378 | goto out; | ||
2320 | 2379 | ||
2321 | raw_spin_lock_irqsave(&logbuf_lock, flags); | 2380 | raw_spin_lock_irqsave(&logbuf_lock, flags); |
2322 | if (syslog_seq < log_first_seq) | 2381 | if (dumper->cur_seq < log_first_seq) { |
2323 | idx = syslog_idx; | 2382 | /* messages are gone, move to first available one */ |
2324 | else | 2383 | dumper->cur_seq = log_first_seq; |
2325 | idx = log_first_idx; | 2384 | dumper->cur_idx = log_first_idx; |
2385 | } | ||
2386 | |||
2387 | /* last entry */ | ||
2388 | if (dumper->cur_seq >= log_next_seq) { | ||
2389 | raw_spin_unlock_irqrestore(&logbuf_lock, flags); | ||
2390 | goto out; | ||
2391 | } | ||
2326 | 2392 | ||
2327 | if (idx > log_next_idx) { | 2393 | msg = log_from_idx(dumper->cur_idx); |
2328 | s1 = log_buf; | 2394 | l = msg_print_text(msg, syslog, |
2329 | l1 = log_next_idx; | 2395 | line, size); |
2330 | 2396 | ||
2331 | s2 = log_buf + idx; | 2397 | dumper->cur_idx = log_next(dumper->cur_idx); |
2332 | l2 = log_buf_len - idx; | 2398 | dumper->cur_seq++; |
2333 | } else { | 2399 | ret = true; |
2334 | s1 = ""; | 2400 | raw_spin_unlock_irqrestore(&logbuf_lock, flags); |
2335 | l1 = 0; | 2401 | out: |
2402 | if (len) | ||
2403 | *len = l; | ||
2404 | return ret; | ||
2405 | } | ||
2406 | EXPORT_SYMBOL_GPL(kmsg_dump_get_line); | ||
2407 | |||
2408 | /** | ||
2409 | * kmsg_dump_get_buffer - copy kmsg log lines | ||
2410 | * @dumper: registered kmsg dumper | ||
2411 | * @syslog: include the "<4>" prefixes | ||
2412 | * @line: buffer to copy the line to | ||
2413 | * @size: maximum size of the buffer | ||
2414 | * @len: length of line placed into buffer | ||
2415 | * | ||
2416 | * Start at the end of the kmsg buffer and fill the provided buffer | ||
2417 | * with as many of the the *youngest* kmsg records that fit into it. | ||
2418 | * If the buffer is large enough, all available kmsg records will be | ||
2419 | * copied with a single call. | ||
2420 | * | ||
2421 | * Consecutive calls will fill the buffer with the next block of | ||
2422 | * available older records, not including the earlier retrieved ones. | ||
2423 | * | ||
2424 | * A return value of FALSE indicates that there are no more records to | ||
2425 | * read. | ||
2426 | */ | ||
2427 | bool kmsg_dump_get_buffer(struct kmsg_dumper *dumper, bool syslog, | ||
2428 | char *buf, size_t size, size_t *len) | ||
2429 | { | ||
2430 | unsigned long flags; | ||
2431 | u64 seq; | ||
2432 | u32 idx; | ||
2433 | u64 next_seq; | ||
2434 | u32 next_idx; | ||
2435 | size_t l = 0; | ||
2436 | bool ret = false; | ||
2437 | |||
2438 | if (!dumper->active) | ||
2439 | goto out; | ||
2440 | |||
2441 | raw_spin_lock_irqsave(&logbuf_lock, flags); | ||
2442 | if (dumper->cur_seq < log_first_seq) { | ||
2443 | /* messages are gone, move to first available one */ | ||
2444 | dumper->cur_seq = log_first_seq; | ||
2445 | dumper->cur_idx = log_first_idx; | ||
2446 | } | ||
2447 | |||
2448 | /* last entry */ | ||
2449 | if (dumper->cur_seq >= dumper->next_seq) { | ||
2450 | raw_spin_unlock_irqrestore(&logbuf_lock, flags); | ||
2451 | goto out; | ||
2452 | } | ||
2453 | |||
2454 | /* calculate length of entire buffer */ | ||
2455 | seq = dumper->cur_seq; | ||
2456 | idx = dumper->cur_idx; | ||
2457 | while (seq < dumper->next_seq) { | ||
2458 | struct log *msg = log_from_idx(idx); | ||
2459 | |||
2460 | l += msg_print_text(msg, true, NULL, 0); | ||
2461 | idx = log_next(idx); | ||
2462 | seq++; | ||
2463 | } | ||
2336 | 2464 | ||
2337 | s2 = log_buf + idx; | 2465 | /* move first record forward until length fits into the buffer */ |
2338 | l2 = log_next_idx - idx; | 2466 | seq = dumper->cur_seq; |
2467 | idx = dumper->cur_idx; | ||
2468 | while (l > size && seq < dumper->next_seq) { | ||
2469 | struct log *msg = log_from_idx(idx); | ||
2470 | |||
2471 | l -= msg_print_text(msg, true, NULL, 0); | ||
2472 | idx = log_next(idx); | ||
2473 | seq++; | ||
2474 | } | ||
2475 | |||
2476 | /* last message in next interation */ | ||
2477 | next_seq = seq; | ||
2478 | next_idx = idx; | ||
2479 | |||
2480 | l = 0; | ||
2481 | while (seq < dumper->next_seq) { | ||
2482 | struct log *msg = log_from_idx(idx); | ||
2483 | |||
2484 | l += msg_print_text(msg, syslog, | ||
2485 | buf + l, size - l); | ||
2486 | |||
2487 | idx = log_next(idx); | ||
2488 | seq++; | ||
2339 | } | 2489 | } |
2490 | |||
2491 | dumper->next_seq = next_seq; | ||
2492 | dumper->next_idx = next_idx; | ||
2493 | ret = true; | ||
2340 | raw_spin_unlock_irqrestore(&logbuf_lock, flags); | 2494 | raw_spin_unlock_irqrestore(&logbuf_lock, flags); |
2495 | out: | ||
2496 | if (len) | ||
2497 | *len = l; | ||
2498 | return ret; | ||
2499 | } | ||
2500 | EXPORT_SYMBOL_GPL(kmsg_dump_get_buffer); | ||
2341 | 2501 | ||
2342 | rcu_read_lock(); | 2502 | /** |
2343 | list_for_each_entry_rcu(dumper, &dump_list, list) | 2503 | * kmsg_dump_rewind - reset the interator |
2344 | dumper->dump(dumper, reason, s1, l1, s2, l2); | 2504 | * @dumper: registered kmsg dumper |
2345 | rcu_read_unlock(); | 2505 | * |
2506 | * Reset the dumper's iterator so that kmsg_dump_get_line() and | ||
2507 | * kmsg_dump_get_buffer() can be called again and used multiple | ||
2508 | * times within the same dumper.dump() callback. | ||
2509 | */ | ||
2510 | void kmsg_dump_rewind(struct kmsg_dumper *dumper) | ||
2511 | { | ||
2512 | unsigned long flags; | ||
2513 | |||
2514 | raw_spin_lock_irqsave(&logbuf_lock, flags); | ||
2515 | dumper->cur_seq = clear_seq; | ||
2516 | dumper->cur_idx = clear_idx; | ||
2517 | dumper->next_seq = log_next_seq; | ||
2518 | dumper->next_idx = log_next_idx; | ||
2519 | raw_spin_unlock_irqrestore(&logbuf_lock, flags); | ||
2346 | } | 2520 | } |
2521 | EXPORT_SYMBOL_GPL(kmsg_dump_rewind); | ||
2347 | #endif | 2522 | #endif |
diff --git a/kernel/rcutree.c b/kernel/rcutree.c index 0da7b88d92d0..3b0f1337f75b 100644 --- a/kernel/rcutree.c +++ b/kernel/rcutree.c | |||
@@ -1397,6 +1397,8 @@ static void rcu_adopt_orphan_cbs(struct rcu_state *rsp) | |||
1397 | rdp->qlen_lazy += rsp->qlen_lazy; | 1397 | rdp->qlen_lazy += rsp->qlen_lazy; |
1398 | rdp->qlen += rsp->qlen; | 1398 | rdp->qlen += rsp->qlen; |
1399 | rdp->n_cbs_adopted += rsp->qlen; | 1399 | rdp->n_cbs_adopted += rsp->qlen; |
1400 | if (rsp->qlen_lazy != rsp->qlen) | ||
1401 | rcu_idle_count_callbacks_posted(); | ||
1400 | rsp->qlen_lazy = 0; | 1402 | rsp->qlen_lazy = 0; |
1401 | rsp->qlen = 0; | 1403 | rsp->qlen = 0; |
1402 | 1404 | ||
diff --git a/kernel/rcutree.h b/kernel/rcutree.h index 7f5d138dedf5..ea056495783e 100644 --- a/kernel/rcutree.h +++ b/kernel/rcutree.h | |||
@@ -84,6 +84,20 @@ struct rcu_dynticks { | |||
84 | /* Process level is worth LLONG_MAX/2. */ | 84 | /* Process level is worth LLONG_MAX/2. */ |
85 | int dynticks_nmi_nesting; /* Track NMI nesting level. */ | 85 | int dynticks_nmi_nesting; /* Track NMI nesting level. */ |
86 | atomic_t dynticks; /* Even value for idle, else odd. */ | 86 | atomic_t dynticks; /* Even value for idle, else odd. */ |
87 | #ifdef CONFIG_RCU_FAST_NO_HZ | ||
88 | int dyntick_drain; /* Prepare-for-idle state variable. */ | ||
89 | unsigned long dyntick_holdoff; | ||
90 | /* No retries for the jiffy of failure. */ | ||
91 | struct timer_list idle_gp_timer; | ||
92 | /* Wake up CPU sleeping with callbacks. */ | ||
93 | unsigned long idle_gp_timer_expires; | ||
94 | /* When to wake up CPU (for repost). */ | ||
95 | bool idle_first_pass; /* First pass of attempt to go idle? */ | ||
96 | unsigned long nonlazy_posted; | ||
97 | /* # times non-lazy CBs posted to CPU. */ | ||
98 | unsigned long nonlazy_posted_snap; | ||
99 | /* idle-period nonlazy_posted snapshot. */ | ||
100 | #endif /* #ifdef CONFIG_RCU_FAST_NO_HZ */ | ||
87 | }; | 101 | }; |
88 | 102 | ||
89 | /* RCU's kthread states for tracing. */ | 103 | /* RCU's kthread states for tracing. */ |
diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h index 2411000d9869..5271a020887e 100644 --- a/kernel/rcutree_plugin.h +++ b/kernel/rcutree_plugin.h | |||
@@ -1886,8 +1886,9 @@ static void __cpuinit rcu_prepare_kthreads(int cpu) | |||
1886 | * Because we not have RCU_FAST_NO_HZ, just check whether this CPU needs | 1886 | * Because we not have RCU_FAST_NO_HZ, just check whether this CPU needs |
1887 | * any flavor of RCU. | 1887 | * any flavor of RCU. |
1888 | */ | 1888 | */ |
1889 | int rcu_needs_cpu(int cpu) | 1889 | int rcu_needs_cpu(int cpu, unsigned long *delta_jiffies) |
1890 | { | 1890 | { |
1891 | *delta_jiffies = ULONG_MAX; | ||
1891 | return rcu_cpu_has_callbacks(cpu); | 1892 | return rcu_cpu_has_callbacks(cpu); |
1892 | } | 1893 | } |
1893 | 1894 | ||
@@ -1962,41 +1963,6 @@ static void rcu_idle_count_callbacks_posted(void) | |||
1962 | #define RCU_IDLE_GP_DELAY 6 /* Roughly one grace period. */ | 1963 | #define RCU_IDLE_GP_DELAY 6 /* Roughly one grace period. */ |
1963 | #define RCU_IDLE_LAZY_GP_DELAY (6 * HZ) /* Roughly six seconds. */ | 1964 | #define RCU_IDLE_LAZY_GP_DELAY (6 * HZ) /* Roughly six seconds. */ |
1964 | 1965 | ||
1965 | /* Loop counter for rcu_prepare_for_idle(). */ | ||
1966 | static DEFINE_PER_CPU(int, rcu_dyntick_drain); | ||
1967 | /* If rcu_dyntick_holdoff==jiffies, don't try to enter dyntick-idle mode. */ | ||
1968 | static DEFINE_PER_CPU(unsigned long, rcu_dyntick_holdoff); | ||
1969 | /* Timer to awaken the CPU if it enters dyntick-idle mode with callbacks. */ | ||
1970 | static DEFINE_PER_CPU(struct timer_list, rcu_idle_gp_timer); | ||
1971 | /* Scheduled expiry time for rcu_idle_gp_timer to allow reposting. */ | ||
1972 | static DEFINE_PER_CPU(unsigned long, rcu_idle_gp_timer_expires); | ||
1973 | /* Enable special processing on first attempt to enter dyntick-idle mode. */ | ||
1974 | static DEFINE_PER_CPU(bool, rcu_idle_first_pass); | ||
1975 | /* Running count of non-lazy callbacks posted, never decremented. */ | ||
1976 | static DEFINE_PER_CPU(unsigned long, rcu_nonlazy_posted); | ||
1977 | /* Snapshot of rcu_nonlazy_posted to detect meaningful exits from idle. */ | ||
1978 | static DEFINE_PER_CPU(unsigned long, rcu_nonlazy_posted_snap); | ||
1979 | |||
1980 | /* | ||
1981 | * Allow the CPU to enter dyntick-idle mode if either: (1) There are no | ||
1982 | * callbacks on this CPU, (2) this CPU has not yet attempted to enter | ||
1983 | * dyntick-idle mode, or (3) this CPU is in the process of attempting to | ||
1984 | * enter dyntick-idle mode. Otherwise, if we have recently tried and failed | ||
1985 | * to enter dyntick-idle mode, we refuse to try to enter it. After all, | ||
1986 | * it is better to incur scheduling-clock interrupts than to spin | ||
1987 | * continuously for the same time duration! | ||
1988 | */ | ||
1989 | int rcu_needs_cpu(int cpu) | ||
1990 | { | ||
1991 | /* Flag a new idle sojourn to the idle-entry state machine. */ | ||
1992 | per_cpu(rcu_idle_first_pass, cpu) = 1; | ||
1993 | /* If no callbacks, RCU doesn't need the CPU. */ | ||
1994 | if (!rcu_cpu_has_callbacks(cpu)) | ||
1995 | return 0; | ||
1996 | /* Otherwise, RCU needs the CPU only if it recently tried and failed. */ | ||
1997 | return per_cpu(rcu_dyntick_holdoff, cpu) == jiffies; | ||
1998 | } | ||
1999 | |||
2000 | /* | 1966 | /* |
2001 | * Does the specified flavor of RCU have non-lazy callbacks pending on | 1967 | * Does the specified flavor of RCU have non-lazy callbacks pending on |
2002 | * the specified CPU? Both RCU flavor and CPU are specified by the | 1968 | * the specified CPU? Both RCU flavor and CPU are specified by the |
@@ -2040,6 +2006,47 @@ static bool rcu_cpu_has_nonlazy_callbacks(int cpu) | |||
2040 | } | 2006 | } |
2041 | 2007 | ||
2042 | /* | 2008 | /* |
2009 | * Allow the CPU to enter dyntick-idle mode if either: (1) There are no | ||
2010 | * callbacks on this CPU, (2) this CPU has not yet attempted to enter | ||
2011 | * dyntick-idle mode, or (3) this CPU is in the process of attempting to | ||
2012 | * enter dyntick-idle mode. Otherwise, if we have recently tried and failed | ||
2013 | * to enter dyntick-idle mode, we refuse to try to enter it. After all, | ||
2014 | * it is better to incur scheduling-clock interrupts than to spin | ||
2015 | * continuously for the same time duration! | ||
2016 | * | ||
2017 | * The delta_jiffies argument is used to store the time when RCU is | ||
2018 | * going to need the CPU again if it still has callbacks. The reason | ||
2019 | * for this is that rcu_prepare_for_idle() might need to post a timer, | ||
2020 | * but if so, it will do so after tick_nohz_stop_sched_tick() has set | ||
2021 | * the wakeup time for this CPU. This means that RCU's timer can be | ||
2022 | * delayed until the wakeup time, which defeats the purpose of posting | ||
2023 | * a timer. | ||
2024 | */ | ||
2025 | int rcu_needs_cpu(int cpu, unsigned long *delta_jiffies) | ||
2026 | { | ||
2027 | struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu); | ||
2028 | |||
2029 | /* Flag a new idle sojourn to the idle-entry state machine. */ | ||
2030 | rdtp->idle_first_pass = 1; | ||
2031 | /* If no callbacks, RCU doesn't need the CPU. */ | ||
2032 | if (!rcu_cpu_has_callbacks(cpu)) { | ||
2033 | *delta_jiffies = ULONG_MAX; | ||
2034 | return 0; | ||
2035 | } | ||
2036 | if (rdtp->dyntick_holdoff == jiffies) { | ||
2037 | /* RCU recently tried and failed, so don't try again. */ | ||
2038 | *delta_jiffies = 1; | ||
2039 | return 1; | ||
2040 | } | ||
2041 | /* Set up for the possibility that RCU will post a timer. */ | ||
2042 | if (rcu_cpu_has_nonlazy_callbacks(cpu)) | ||
2043 | *delta_jiffies = RCU_IDLE_GP_DELAY; | ||
2044 | else | ||
2045 | *delta_jiffies = RCU_IDLE_LAZY_GP_DELAY; | ||
2046 | return 0; | ||
2047 | } | ||
2048 | |||
2049 | /* | ||
2043 | * Handler for smp_call_function_single(). The only point of this | 2050 | * Handler for smp_call_function_single(). The only point of this |
2044 | * handler is to wake the CPU up, so the handler does only tracing. | 2051 | * handler is to wake the CPU up, so the handler does only tracing. |
2045 | */ | 2052 | */ |
@@ -2075,21 +2082,24 @@ static void rcu_idle_gp_timer_func(unsigned long cpu_in) | |||
2075 | */ | 2082 | */ |
2076 | static void rcu_prepare_for_idle_init(int cpu) | 2083 | static void rcu_prepare_for_idle_init(int cpu) |
2077 | { | 2084 | { |
2078 | per_cpu(rcu_dyntick_holdoff, cpu) = jiffies - 1; | 2085 | struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu); |
2079 | setup_timer(&per_cpu(rcu_idle_gp_timer, cpu), | 2086 | |
2080 | rcu_idle_gp_timer_func, cpu); | 2087 | rdtp->dyntick_holdoff = jiffies - 1; |
2081 | per_cpu(rcu_idle_gp_timer_expires, cpu) = jiffies - 1; | 2088 | setup_timer(&rdtp->idle_gp_timer, rcu_idle_gp_timer_func, cpu); |
2082 | per_cpu(rcu_idle_first_pass, cpu) = 1; | 2089 | rdtp->idle_gp_timer_expires = jiffies - 1; |
2090 | rdtp->idle_first_pass = 1; | ||
2083 | } | 2091 | } |
2084 | 2092 | ||
2085 | /* | 2093 | /* |
2086 | * Clean up for exit from idle. Because we are exiting from idle, there | 2094 | * Clean up for exit from idle. Because we are exiting from idle, there |
2087 | * is no longer any point to rcu_idle_gp_timer, so cancel it. This will | 2095 | * is no longer any point to ->idle_gp_timer, so cancel it. This will |
2088 | * do nothing if this timer is not active, so just cancel it unconditionally. | 2096 | * do nothing if this timer is not active, so just cancel it unconditionally. |
2089 | */ | 2097 | */ |
2090 | static void rcu_cleanup_after_idle(int cpu) | 2098 | static void rcu_cleanup_after_idle(int cpu) |
2091 | { | 2099 | { |
2092 | del_timer(&per_cpu(rcu_idle_gp_timer, cpu)); | 2100 | struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu); |
2101 | |||
2102 | del_timer(&rdtp->idle_gp_timer); | ||
2093 | trace_rcu_prep_idle("Cleanup after idle"); | 2103 | trace_rcu_prep_idle("Cleanup after idle"); |
2094 | } | 2104 | } |
2095 | 2105 | ||
@@ -2108,42 +2118,41 @@ static void rcu_cleanup_after_idle(int cpu) | |||
2108 | * Because it is not legal to invoke rcu_process_callbacks() with irqs | 2118 | * Because it is not legal to invoke rcu_process_callbacks() with irqs |
2109 | * disabled, we do one pass of force_quiescent_state(), then do a | 2119 | * disabled, we do one pass of force_quiescent_state(), then do a |
2110 | * invoke_rcu_core() to cause rcu_process_callbacks() to be invoked | 2120 | * invoke_rcu_core() to cause rcu_process_callbacks() to be invoked |
2111 | * later. The per-cpu rcu_dyntick_drain variable controls the sequencing. | 2121 | * later. The ->dyntick_drain field controls the sequencing. |
2112 | * | 2122 | * |
2113 | * The caller must have disabled interrupts. | 2123 | * The caller must have disabled interrupts. |
2114 | */ | 2124 | */ |
2115 | static void rcu_prepare_for_idle(int cpu) | 2125 | static void rcu_prepare_for_idle(int cpu) |
2116 | { | 2126 | { |
2117 | struct timer_list *tp; | 2127 | struct timer_list *tp; |
2128 | struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu); | ||
2118 | 2129 | ||
2119 | /* | 2130 | /* |
2120 | * If this is an idle re-entry, for example, due to use of | 2131 | * If this is an idle re-entry, for example, due to use of |
2121 | * RCU_NONIDLE() or the new idle-loop tracing API within the idle | 2132 | * RCU_NONIDLE() or the new idle-loop tracing API within the idle |
2122 | * loop, then don't take any state-machine actions, unless the | 2133 | * loop, then don't take any state-machine actions, unless the |
2123 | * momentary exit from idle queued additional non-lazy callbacks. | 2134 | * momentary exit from idle queued additional non-lazy callbacks. |
2124 | * Instead, repost the rcu_idle_gp_timer if this CPU has callbacks | 2135 | * Instead, repost the ->idle_gp_timer if this CPU has callbacks |
2125 | * pending. | 2136 | * pending. |
2126 | */ | 2137 | */ |
2127 | if (!per_cpu(rcu_idle_first_pass, cpu) && | 2138 | if (!rdtp->idle_first_pass && |
2128 | (per_cpu(rcu_nonlazy_posted, cpu) == | 2139 | (rdtp->nonlazy_posted == rdtp->nonlazy_posted_snap)) { |
2129 | per_cpu(rcu_nonlazy_posted_snap, cpu))) { | ||
2130 | if (rcu_cpu_has_callbacks(cpu)) { | 2140 | if (rcu_cpu_has_callbacks(cpu)) { |
2131 | tp = &per_cpu(rcu_idle_gp_timer, cpu); | 2141 | tp = &rdtp->idle_gp_timer; |
2132 | mod_timer_pinned(tp, per_cpu(rcu_idle_gp_timer_expires, cpu)); | 2142 | mod_timer_pinned(tp, rdtp->idle_gp_timer_expires); |
2133 | } | 2143 | } |
2134 | return; | 2144 | return; |
2135 | } | 2145 | } |
2136 | per_cpu(rcu_idle_first_pass, cpu) = 0; | 2146 | rdtp->idle_first_pass = 0; |
2137 | per_cpu(rcu_nonlazy_posted_snap, cpu) = | 2147 | rdtp->nonlazy_posted_snap = rdtp->nonlazy_posted - 1; |
2138 | per_cpu(rcu_nonlazy_posted, cpu) - 1; | ||
2139 | 2148 | ||
2140 | /* | 2149 | /* |
2141 | * If there are no callbacks on this CPU, enter dyntick-idle mode. | 2150 | * If there are no callbacks on this CPU, enter dyntick-idle mode. |
2142 | * Also reset state to avoid prejudicing later attempts. | 2151 | * Also reset state to avoid prejudicing later attempts. |
2143 | */ | 2152 | */ |
2144 | if (!rcu_cpu_has_callbacks(cpu)) { | 2153 | if (!rcu_cpu_has_callbacks(cpu)) { |
2145 | per_cpu(rcu_dyntick_holdoff, cpu) = jiffies - 1; | 2154 | rdtp->dyntick_holdoff = jiffies - 1; |
2146 | per_cpu(rcu_dyntick_drain, cpu) = 0; | 2155 | rdtp->dyntick_drain = 0; |
2147 | trace_rcu_prep_idle("No callbacks"); | 2156 | trace_rcu_prep_idle("No callbacks"); |
2148 | return; | 2157 | return; |
2149 | } | 2158 | } |
@@ -2152,36 +2161,37 @@ static void rcu_prepare_for_idle(int cpu) | |||
2152 | * If in holdoff mode, just return. We will presumably have | 2161 | * If in holdoff mode, just return. We will presumably have |
2153 | * refrained from disabling the scheduling-clock tick. | 2162 | * refrained from disabling the scheduling-clock tick. |
2154 | */ | 2163 | */ |
2155 | if (per_cpu(rcu_dyntick_holdoff, cpu) == jiffies) { | 2164 | if (rdtp->dyntick_holdoff == jiffies) { |
2156 | trace_rcu_prep_idle("In holdoff"); | 2165 | trace_rcu_prep_idle("In holdoff"); |
2157 | return; | 2166 | return; |
2158 | } | 2167 | } |
2159 | 2168 | ||
2160 | /* Check and update the rcu_dyntick_drain sequencing. */ | 2169 | /* Check and update the ->dyntick_drain sequencing. */ |
2161 | if (per_cpu(rcu_dyntick_drain, cpu) <= 0) { | 2170 | if (rdtp->dyntick_drain <= 0) { |
2162 | /* First time through, initialize the counter. */ | 2171 | /* First time through, initialize the counter. */ |
2163 | per_cpu(rcu_dyntick_drain, cpu) = RCU_IDLE_FLUSHES; | 2172 | rdtp->dyntick_drain = RCU_IDLE_FLUSHES; |
2164 | } else if (per_cpu(rcu_dyntick_drain, cpu) <= RCU_IDLE_OPT_FLUSHES && | 2173 | } else if (rdtp->dyntick_drain <= RCU_IDLE_OPT_FLUSHES && |
2165 | !rcu_pending(cpu) && | 2174 | !rcu_pending(cpu) && |
2166 | !local_softirq_pending()) { | 2175 | !local_softirq_pending()) { |
2167 | /* Can we go dyntick-idle despite still having callbacks? */ | 2176 | /* Can we go dyntick-idle despite still having callbacks? */ |
2168 | trace_rcu_prep_idle("Dyntick with callbacks"); | 2177 | rdtp->dyntick_drain = 0; |
2169 | per_cpu(rcu_dyntick_drain, cpu) = 0; | 2178 | rdtp->dyntick_holdoff = jiffies; |
2170 | per_cpu(rcu_dyntick_holdoff, cpu) = jiffies; | 2179 | if (rcu_cpu_has_nonlazy_callbacks(cpu)) { |
2171 | if (rcu_cpu_has_nonlazy_callbacks(cpu)) | 2180 | trace_rcu_prep_idle("Dyntick with callbacks"); |
2172 | per_cpu(rcu_idle_gp_timer_expires, cpu) = | 2181 | rdtp->idle_gp_timer_expires = |
2173 | jiffies + RCU_IDLE_GP_DELAY; | 2182 | jiffies + RCU_IDLE_GP_DELAY; |
2174 | else | 2183 | } else { |
2175 | per_cpu(rcu_idle_gp_timer_expires, cpu) = | 2184 | rdtp->idle_gp_timer_expires = |
2176 | jiffies + RCU_IDLE_LAZY_GP_DELAY; | 2185 | jiffies + RCU_IDLE_LAZY_GP_DELAY; |
2177 | tp = &per_cpu(rcu_idle_gp_timer, cpu); | 2186 | trace_rcu_prep_idle("Dyntick with lazy callbacks"); |
2178 | mod_timer_pinned(tp, per_cpu(rcu_idle_gp_timer_expires, cpu)); | 2187 | } |
2179 | per_cpu(rcu_nonlazy_posted_snap, cpu) = | 2188 | tp = &rdtp->idle_gp_timer; |
2180 | per_cpu(rcu_nonlazy_posted, cpu); | 2189 | mod_timer_pinned(tp, rdtp->idle_gp_timer_expires); |
2190 | rdtp->nonlazy_posted_snap = rdtp->nonlazy_posted; | ||
2181 | return; /* Nothing more to do immediately. */ | 2191 | return; /* Nothing more to do immediately. */ |
2182 | } else if (--per_cpu(rcu_dyntick_drain, cpu) <= 0) { | 2192 | } else if (--(rdtp->dyntick_drain) <= 0) { |
2183 | /* We have hit the limit, so time to give up. */ | 2193 | /* We have hit the limit, so time to give up. */ |
2184 | per_cpu(rcu_dyntick_holdoff, cpu) = jiffies; | 2194 | rdtp->dyntick_holdoff = jiffies; |
2185 | trace_rcu_prep_idle("Begin holdoff"); | 2195 | trace_rcu_prep_idle("Begin holdoff"); |
2186 | invoke_rcu_core(); /* Force the CPU out of dyntick-idle. */ | 2196 | invoke_rcu_core(); /* Force the CPU out of dyntick-idle. */ |
2187 | return; | 2197 | return; |
@@ -2227,7 +2237,7 @@ static void rcu_prepare_for_idle(int cpu) | |||
2227 | */ | 2237 | */ |
2228 | static void rcu_idle_count_callbacks_posted(void) | 2238 | static void rcu_idle_count_callbacks_posted(void) |
2229 | { | 2239 | { |
2230 | __this_cpu_add(rcu_nonlazy_posted, 1); | 2240 | __this_cpu_add(rcu_dynticks.nonlazy_posted, 1); |
2231 | } | 2241 | } |
2232 | 2242 | ||
2233 | #endif /* #else #if !defined(CONFIG_RCU_FAST_NO_HZ) */ | 2243 | #endif /* #else #if !defined(CONFIG_RCU_FAST_NO_HZ) */ |
@@ -2238,11 +2248,12 @@ static void rcu_idle_count_callbacks_posted(void) | |||
2238 | 2248 | ||
2239 | static void print_cpu_stall_fast_no_hz(char *cp, int cpu) | 2249 | static void print_cpu_stall_fast_no_hz(char *cp, int cpu) |
2240 | { | 2250 | { |
2241 | struct timer_list *tltp = &per_cpu(rcu_idle_gp_timer, cpu); | 2251 | struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu); |
2252 | struct timer_list *tltp = &rdtp->idle_gp_timer; | ||
2242 | 2253 | ||
2243 | sprintf(cp, "drain=%d %c timer=%lu", | 2254 | sprintf(cp, "drain=%d %c timer=%lu", |
2244 | per_cpu(rcu_dyntick_drain, cpu), | 2255 | rdtp->dyntick_drain, |
2245 | per_cpu(rcu_dyntick_holdoff, cpu) == jiffies ? 'H' : '.', | 2256 | rdtp->dyntick_holdoff == jiffies ? 'H' : '.', |
2246 | timer_pending(tltp) ? tltp->expires - jiffies : -1); | 2257 | timer_pending(tltp) ? tltp->expires - jiffies : -1); |
2247 | } | 2258 | } |
2248 | 2259 | ||
diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 39eb6011bc38..d5594a4268d4 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c | |||
@@ -142,9 +142,8 @@ const_debug unsigned int sysctl_sched_features = | |||
142 | #define SCHED_FEAT(name, enabled) \ | 142 | #define SCHED_FEAT(name, enabled) \ |
143 | #name , | 143 | #name , |
144 | 144 | ||
145 | static __read_mostly char *sched_feat_names[] = { | 145 | static const char * const sched_feat_names[] = { |
146 | #include "features.h" | 146 | #include "features.h" |
147 | NULL | ||
148 | }; | 147 | }; |
149 | 148 | ||
150 | #undef SCHED_FEAT | 149 | #undef SCHED_FEAT |
@@ -2517,25 +2516,32 @@ static void __update_cpu_load(struct rq *this_rq, unsigned long this_load, | |||
2517 | sched_avg_update(this_rq); | 2516 | sched_avg_update(this_rq); |
2518 | } | 2517 | } |
2519 | 2518 | ||
2519 | #ifdef CONFIG_NO_HZ | ||
2520 | /* | ||
2521 | * There is no sane way to deal with nohz on smp when using jiffies because the | ||
2522 | * cpu doing the jiffies update might drift wrt the cpu doing the jiffy reading | ||
2523 | * causing off-by-one errors in observed deltas; {0,2} instead of {1,1}. | ||
2524 | * | ||
2525 | * Therefore we cannot use the delta approach from the regular tick since that | ||
2526 | * would seriously skew the load calculation. However we'll make do for those | ||
2527 | * updates happening while idle (nohz_idle_balance) or coming out of idle | ||
2528 | * (tick_nohz_idle_exit). | ||
2529 | * | ||
2530 | * This means we might still be one tick off for nohz periods. | ||
2531 | */ | ||
2532 | |||
2520 | /* | 2533 | /* |
2521 | * Called from nohz_idle_balance() to update the load ratings before doing the | 2534 | * Called from nohz_idle_balance() to update the load ratings before doing the |
2522 | * idle balance. | 2535 | * idle balance. |
2523 | */ | 2536 | */ |
2524 | void update_idle_cpu_load(struct rq *this_rq) | 2537 | void update_idle_cpu_load(struct rq *this_rq) |
2525 | { | 2538 | { |
2526 | unsigned long curr_jiffies = jiffies; | 2539 | unsigned long curr_jiffies = ACCESS_ONCE(jiffies); |
2527 | unsigned long load = this_rq->load.weight; | 2540 | unsigned long load = this_rq->load.weight; |
2528 | unsigned long pending_updates; | 2541 | unsigned long pending_updates; |
2529 | 2542 | ||
2530 | /* | 2543 | /* |
2531 | * Bloody broken means of dealing with nohz, but better than nothing.. | 2544 | * bail if there's load or we're actually up-to-date. |
2532 | * jiffies is updated by one cpu, another cpu can drift wrt the jiffy | ||
2533 | * update and see 0 difference the one time and 2 the next, even though | ||
2534 | * we ticked at roughtly the same rate. | ||
2535 | * | ||
2536 | * Hence we only use this from nohz_idle_balance() and skip this | ||
2537 | * nonsense when called from the scheduler_tick() since that's | ||
2538 | * guaranteed a stable rate. | ||
2539 | */ | 2545 | */ |
2540 | if (load || curr_jiffies == this_rq->last_load_update_tick) | 2546 | if (load || curr_jiffies == this_rq->last_load_update_tick) |
2541 | return; | 2547 | return; |
@@ -2547,12 +2553,38 @@ void update_idle_cpu_load(struct rq *this_rq) | |||
2547 | } | 2553 | } |
2548 | 2554 | ||
2549 | /* | 2555 | /* |
2556 | * Called from tick_nohz_idle_exit() -- try and fix up the ticks we missed. | ||
2557 | */ | ||
2558 | void update_cpu_load_nohz(void) | ||
2559 | { | ||
2560 | struct rq *this_rq = this_rq(); | ||
2561 | unsigned long curr_jiffies = ACCESS_ONCE(jiffies); | ||
2562 | unsigned long pending_updates; | ||
2563 | |||
2564 | if (curr_jiffies == this_rq->last_load_update_tick) | ||
2565 | return; | ||
2566 | |||
2567 | raw_spin_lock(&this_rq->lock); | ||
2568 | pending_updates = curr_jiffies - this_rq->last_load_update_tick; | ||
2569 | if (pending_updates) { | ||
2570 | this_rq->last_load_update_tick = curr_jiffies; | ||
2571 | /* | ||
2572 | * We were idle, this means load 0, the current load might be | ||
2573 | * !0 due to remote wakeups and the sort. | ||
2574 | */ | ||
2575 | __update_cpu_load(this_rq, 0, pending_updates); | ||
2576 | } | ||
2577 | raw_spin_unlock(&this_rq->lock); | ||
2578 | } | ||
2579 | #endif /* CONFIG_NO_HZ */ | ||
2580 | |||
2581 | /* | ||
2550 | * Called from scheduler_tick() | 2582 | * Called from scheduler_tick() |
2551 | */ | 2583 | */ |
2552 | static void update_cpu_load_active(struct rq *this_rq) | 2584 | static void update_cpu_load_active(struct rq *this_rq) |
2553 | { | 2585 | { |
2554 | /* | 2586 | /* |
2555 | * See the mess in update_idle_cpu_load(). | 2587 | * See the mess around update_idle_cpu_load() / update_cpu_load_nohz(). |
2556 | */ | 2588 | */ |
2557 | this_rq->last_load_update_tick = jiffies; | 2589 | this_rq->last_load_update_tick = jiffies; |
2558 | __update_cpu_load(this_rq, this_rq->load.weight, 1); | 2590 | __update_cpu_load(this_rq, this_rq->load.weight, 1); |
@@ -4982,7 +5014,7 @@ void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask) | |||
4982 | p->sched_class->set_cpus_allowed(p, new_mask); | 5014 | p->sched_class->set_cpus_allowed(p, new_mask); |
4983 | 5015 | ||
4984 | cpumask_copy(&p->cpus_allowed, new_mask); | 5016 | cpumask_copy(&p->cpus_allowed, new_mask); |
4985 | p->rt.nr_cpus_allowed = cpumask_weight(new_mask); | 5017 | p->nr_cpus_allowed = cpumask_weight(new_mask); |
4986 | } | 5018 | } |
4987 | 5019 | ||
4988 | /* | 5020 | /* |
@@ -5524,15 +5556,20 @@ static cpumask_var_t sched_domains_tmpmask; /* sched_domains_mutex */ | |||
5524 | 5556 | ||
5525 | #ifdef CONFIG_SCHED_DEBUG | 5557 | #ifdef CONFIG_SCHED_DEBUG |
5526 | 5558 | ||
5527 | static __read_mostly int sched_domain_debug_enabled; | 5559 | static __read_mostly int sched_debug_enabled; |
5528 | 5560 | ||
5529 | static int __init sched_domain_debug_setup(char *str) | 5561 | static int __init sched_debug_setup(char *str) |
5530 | { | 5562 | { |
5531 | sched_domain_debug_enabled = 1; | 5563 | sched_debug_enabled = 1; |
5532 | 5564 | ||
5533 | return 0; | 5565 | return 0; |
5534 | } | 5566 | } |
5535 | early_param("sched_debug", sched_domain_debug_setup); | 5567 | early_param("sched_debug", sched_debug_setup); |
5568 | |||
5569 | static inline bool sched_debug(void) | ||
5570 | { | ||
5571 | return sched_debug_enabled; | ||
5572 | } | ||
5536 | 5573 | ||
5537 | static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level, | 5574 | static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level, |
5538 | struct cpumask *groupmask) | 5575 | struct cpumask *groupmask) |
@@ -5572,7 +5609,12 @@ static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level, | |||
5572 | break; | 5609 | break; |
5573 | } | 5610 | } |
5574 | 5611 | ||
5575 | if (!group->sgp->power) { | 5612 | /* |
5613 | * Even though we initialize ->power to something semi-sane, | ||
5614 | * we leave power_orig unset. This allows us to detect if | ||
5615 | * domain iteration is still funny without causing /0 traps. | ||
5616 | */ | ||
5617 | if (!group->sgp->power_orig) { | ||
5576 | printk(KERN_CONT "\n"); | 5618 | printk(KERN_CONT "\n"); |
5577 | printk(KERN_ERR "ERROR: domain->cpu_power not " | 5619 | printk(KERN_ERR "ERROR: domain->cpu_power not " |
5578 | "set\n"); | 5620 | "set\n"); |
@@ -5620,7 +5662,7 @@ static void sched_domain_debug(struct sched_domain *sd, int cpu) | |||
5620 | { | 5662 | { |
5621 | int level = 0; | 5663 | int level = 0; |
5622 | 5664 | ||
5623 | if (!sched_domain_debug_enabled) | 5665 | if (!sched_debug_enabled) |
5624 | return; | 5666 | return; |
5625 | 5667 | ||
5626 | if (!sd) { | 5668 | if (!sd) { |
@@ -5641,6 +5683,10 @@ static void sched_domain_debug(struct sched_domain *sd, int cpu) | |||
5641 | } | 5683 | } |
5642 | #else /* !CONFIG_SCHED_DEBUG */ | 5684 | #else /* !CONFIG_SCHED_DEBUG */ |
5643 | # define sched_domain_debug(sd, cpu) do { } while (0) | 5685 | # define sched_domain_debug(sd, cpu) do { } while (0) |
5686 | static inline bool sched_debug(void) | ||
5687 | { | ||
5688 | return false; | ||
5689 | } | ||
5644 | #endif /* CONFIG_SCHED_DEBUG */ | 5690 | #endif /* CONFIG_SCHED_DEBUG */ |
5645 | 5691 | ||
5646 | static int sd_degenerate(struct sched_domain *sd) | 5692 | static int sd_degenerate(struct sched_domain *sd) |
@@ -5962,6 +6008,44 @@ struct sched_domain_topology_level { | |||
5962 | struct sd_data data; | 6008 | struct sd_data data; |
5963 | }; | 6009 | }; |
5964 | 6010 | ||
6011 | /* | ||
6012 | * Build an iteration mask that can exclude certain CPUs from the upwards | ||
6013 | * domain traversal. | ||
6014 | * | ||
6015 | * Asymmetric node setups can result in situations where the domain tree is of | ||
6016 | * unequal depth, make sure to skip domains that already cover the entire | ||
6017 | * range. | ||
6018 | * | ||
6019 | * In that case build_sched_domains() will have terminated the iteration early | ||
6020 | * and our sibling sd spans will be empty. Domains should always include the | ||
6021 | * cpu they're built on, so check that. | ||
6022 | * | ||
6023 | */ | ||
6024 | static void build_group_mask(struct sched_domain *sd, struct sched_group *sg) | ||
6025 | { | ||
6026 | const struct cpumask *span = sched_domain_span(sd); | ||
6027 | struct sd_data *sdd = sd->private; | ||
6028 | struct sched_domain *sibling; | ||
6029 | int i; | ||
6030 | |||
6031 | for_each_cpu(i, span) { | ||
6032 | sibling = *per_cpu_ptr(sdd->sd, i); | ||
6033 | if (!cpumask_test_cpu(i, sched_domain_span(sibling))) | ||
6034 | continue; | ||
6035 | |||
6036 | cpumask_set_cpu(i, sched_group_mask(sg)); | ||
6037 | } | ||
6038 | } | ||
6039 | |||
6040 | /* | ||
6041 | * Return the canonical balance cpu for this group, this is the first cpu | ||
6042 | * of this group that's also in the iteration mask. | ||
6043 | */ | ||
6044 | int group_balance_cpu(struct sched_group *sg) | ||
6045 | { | ||
6046 | return cpumask_first_and(sched_group_cpus(sg), sched_group_mask(sg)); | ||
6047 | } | ||
6048 | |||
5965 | static int | 6049 | static int |
5966 | build_overlap_sched_groups(struct sched_domain *sd, int cpu) | 6050 | build_overlap_sched_groups(struct sched_domain *sd, int cpu) |
5967 | { | 6051 | { |
@@ -5980,6 +6064,12 @@ build_overlap_sched_groups(struct sched_domain *sd, int cpu) | |||
5980 | if (cpumask_test_cpu(i, covered)) | 6064 | if (cpumask_test_cpu(i, covered)) |
5981 | continue; | 6065 | continue; |
5982 | 6066 | ||
6067 | child = *per_cpu_ptr(sdd->sd, i); | ||
6068 | |||
6069 | /* See the comment near build_group_mask(). */ | ||
6070 | if (!cpumask_test_cpu(i, sched_domain_span(child))) | ||
6071 | continue; | ||
6072 | |||
5983 | sg = kzalloc_node(sizeof(struct sched_group) + cpumask_size(), | 6073 | sg = kzalloc_node(sizeof(struct sched_group) + cpumask_size(), |
5984 | GFP_KERNEL, cpu_to_node(cpu)); | 6074 | GFP_KERNEL, cpu_to_node(cpu)); |
5985 | 6075 | ||
@@ -5987,8 +6077,6 @@ build_overlap_sched_groups(struct sched_domain *sd, int cpu) | |||
5987 | goto fail; | 6077 | goto fail; |
5988 | 6078 | ||
5989 | sg_span = sched_group_cpus(sg); | 6079 | sg_span = sched_group_cpus(sg); |
5990 | |||
5991 | child = *per_cpu_ptr(sdd->sd, i); | ||
5992 | if (child->child) { | 6080 | if (child->child) { |
5993 | child = child->child; | 6081 | child = child->child; |
5994 | cpumask_copy(sg_span, sched_domain_span(child)); | 6082 | cpumask_copy(sg_span, sched_domain_span(child)); |
@@ -5997,10 +6085,24 @@ build_overlap_sched_groups(struct sched_domain *sd, int cpu) | |||
5997 | 6085 | ||
5998 | cpumask_or(covered, covered, sg_span); | 6086 | cpumask_or(covered, covered, sg_span); |
5999 | 6087 | ||
6000 | sg->sgp = *per_cpu_ptr(sdd->sgp, cpumask_first(sg_span)); | 6088 | sg->sgp = *per_cpu_ptr(sdd->sgp, i); |
6001 | atomic_inc(&sg->sgp->ref); | 6089 | if (atomic_inc_return(&sg->sgp->ref) == 1) |
6090 | build_group_mask(sd, sg); | ||
6002 | 6091 | ||
6003 | if (cpumask_test_cpu(cpu, sg_span)) | 6092 | /* |
6093 | * Initialize sgp->power such that even if we mess up the | ||
6094 | * domains and no possible iteration will get us here, we won't | ||
6095 | * die on a /0 trap. | ||
6096 | */ | ||
6097 | sg->sgp->power = SCHED_POWER_SCALE * cpumask_weight(sg_span); | ||
6098 | |||
6099 | /* | ||
6100 | * Make sure the first group of this domain contains the | ||
6101 | * canonical balance cpu. Otherwise the sched_domain iteration | ||
6102 | * breaks. See update_sg_lb_stats(). | ||
6103 | */ | ||
6104 | if ((!groups && cpumask_test_cpu(cpu, sg_span)) || | ||
6105 | group_balance_cpu(sg) == cpu) | ||
6004 | groups = sg; | 6106 | groups = sg; |
6005 | 6107 | ||
6006 | if (!first) | 6108 | if (!first) |
@@ -6074,6 +6176,7 @@ build_sched_groups(struct sched_domain *sd, int cpu) | |||
6074 | 6176 | ||
6075 | cpumask_clear(sched_group_cpus(sg)); | 6177 | cpumask_clear(sched_group_cpus(sg)); |
6076 | sg->sgp->power = 0; | 6178 | sg->sgp->power = 0; |
6179 | cpumask_setall(sched_group_mask(sg)); | ||
6077 | 6180 | ||
6078 | for_each_cpu(j, span) { | 6181 | for_each_cpu(j, span) { |
6079 | if (get_group(j, sdd, NULL) != group) | 6182 | if (get_group(j, sdd, NULL) != group) |
@@ -6115,7 +6218,7 @@ static void init_sched_groups_power(int cpu, struct sched_domain *sd) | |||
6115 | sg = sg->next; | 6218 | sg = sg->next; |
6116 | } while (sg != sd->groups); | 6219 | } while (sg != sd->groups); |
6117 | 6220 | ||
6118 | if (cpu != group_first_cpu(sg)) | 6221 | if (cpu != group_balance_cpu(sg)) |
6119 | return; | 6222 | return; |
6120 | 6223 | ||
6121 | update_group_power(sd, cpu); | 6224 | update_group_power(sd, cpu); |
@@ -6165,11 +6268,8 @@ int sched_domain_level_max; | |||
6165 | 6268 | ||
6166 | static int __init setup_relax_domain_level(char *str) | 6269 | static int __init setup_relax_domain_level(char *str) |
6167 | { | 6270 | { |
6168 | unsigned long val; | 6271 | if (kstrtoint(str, 0, &default_relax_domain_level)) |
6169 | 6272 | pr_warn("Unable to set relax_domain_level\n"); | |
6170 | val = simple_strtoul(str, NULL, 0); | ||
6171 | if (val < sched_domain_level_max) | ||
6172 | default_relax_domain_level = val; | ||
6173 | 6273 | ||
6174 | return 1; | 6274 | return 1; |
6175 | } | 6275 | } |
@@ -6279,14 +6379,13 @@ static struct sched_domain_topology_level *sched_domain_topology = default_topol | |||
6279 | #ifdef CONFIG_NUMA | 6379 | #ifdef CONFIG_NUMA |
6280 | 6380 | ||
6281 | static int sched_domains_numa_levels; | 6381 | static int sched_domains_numa_levels; |
6282 | static int sched_domains_numa_scale; | ||
6283 | static int *sched_domains_numa_distance; | 6382 | static int *sched_domains_numa_distance; |
6284 | static struct cpumask ***sched_domains_numa_masks; | 6383 | static struct cpumask ***sched_domains_numa_masks; |
6285 | static int sched_domains_curr_level; | 6384 | static int sched_domains_curr_level; |
6286 | 6385 | ||
6287 | static inline int sd_local_flags(int level) | 6386 | static inline int sd_local_flags(int level) |
6288 | { | 6387 | { |
6289 | if (sched_domains_numa_distance[level] > REMOTE_DISTANCE) | 6388 | if (sched_domains_numa_distance[level] > RECLAIM_DISTANCE) |
6290 | return 0; | 6389 | return 0; |
6291 | 6390 | ||
6292 | return SD_BALANCE_EXEC | SD_BALANCE_FORK | SD_WAKE_AFFINE; | 6391 | return SD_BALANCE_EXEC | SD_BALANCE_FORK | SD_WAKE_AFFINE; |
@@ -6344,6 +6443,42 @@ static const struct cpumask *sd_numa_mask(int cpu) | |||
6344 | return sched_domains_numa_masks[sched_domains_curr_level][cpu_to_node(cpu)]; | 6443 | return sched_domains_numa_masks[sched_domains_curr_level][cpu_to_node(cpu)]; |
6345 | } | 6444 | } |
6346 | 6445 | ||
6446 | static void sched_numa_warn(const char *str) | ||
6447 | { | ||
6448 | static int done = false; | ||
6449 | int i,j; | ||
6450 | |||
6451 | if (done) | ||
6452 | return; | ||
6453 | |||
6454 | done = true; | ||
6455 | |||
6456 | printk(KERN_WARNING "ERROR: %s\n\n", str); | ||
6457 | |||
6458 | for (i = 0; i < nr_node_ids; i++) { | ||
6459 | printk(KERN_WARNING " "); | ||
6460 | for (j = 0; j < nr_node_ids; j++) | ||
6461 | printk(KERN_CONT "%02d ", node_distance(i,j)); | ||
6462 | printk(KERN_CONT "\n"); | ||
6463 | } | ||
6464 | printk(KERN_WARNING "\n"); | ||
6465 | } | ||
6466 | |||
6467 | static bool find_numa_distance(int distance) | ||
6468 | { | ||
6469 | int i; | ||
6470 | |||
6471 | if (distance == node_distance(0, 0)) | ||
6472 | return true; | ||
6473 | |||
6474 | for (i = 0; i < sched_domains_numa_levels; i++) { | ||
6475 | if (sched_domains_numa_distance[i] == distance) | ||
6476 | return true; | ||
6477 | } | ||
6478 | |||
6479 | return false; | ||
6480 | } | ||
6481 | |||
6347 | static void sched_init_numa(void) | 6482 | static void sched_init_numa(void) |
6348 | { | 6483 | { |
6349 | int next_distance, curr_distance = node_distance(0, 0); | 6484 | int next_distance, curr_distance = node_distance(0, 0); |
@@ -6351,7 +6486,6 @@ static void sched_init_numa(void) | |||
6351 | int level = 0; | 6486 | int level = 0; |
6352 | int i, j, k; | 6487 | int i, j, k; |
6353 | 6488 | ||
6354 | sched_domains_numa_scale = curr_distance; | ||
6355 | sched_domains_numa_distance = kzalloc(sizeof(int) * nr_node_ids, GFP_KERNEL); | 6489 | sched_domains_numa_distance = kzalloc(sizeof(int) * nr_node_ids, GFP_KERNEL); |
6356 | if (!sched_domains_numa_distance) | 6490 | if (!sched_domains_numa_distance) |
6357 | return; | 6491 | return; |
@@ -6362,23 +6496,41 @@ static void sched_init_numa(void) | |||
6362 | * | 6496 | * |
6363 | * Assumes node_distance(0,j) includes all distances in | 6497 | * Assumes node_distance(0,j) includes all distances in |
6364 | * node_distance(i,j) in order to avoid cubic time. | 6498 | * node_distance(i,j) in order to avoid cubic time. |
6365 | * | ||
6366 | * XXX: could be optimized to O(n log n) by using sort() | ||
6367 | */ | 6499 | */ |
6368 | next_distance = curr_distance; | 6500 | next_distance = curr_distance; |
6369 | for (i = 0; i < nr_node_ids; i++) { | 6501 | for (i = 0; i < nr_node_ids; i++) { |
6370 | for (j = 0; j < nr_node_ids; j++) { | 6502 | for (j = 0; j < nr_node_ids; j++) { |
6371 | int distance = node_distance(0, j); | 6503 | for (k = 0; k < nr_node_ids; k++) { |
6372 | if (distance > curr_distance && | 6504 | int distance = node_distance(i, k); |
6373 | (distance < next_distance || | 6505 | |
6374 | next_distance == curr_distance)) | 6506 | if (distance > curr_distance && |
6375 | next_distance = distance; | 6507 | (distance < next_distance || |
6508 | next_distance == curr_distance)) | ||
6509 | next_distance = distance; | ||
6510 | |||
6511 | /* | ||
6512 | * While not a strong assumption it would be nice to know | ||
6513 | * about cases where if node A is connected to B, B is not | ||
6514 | * equally connected to A. | ||
6515 | */ | ||
6516 | if (sched_debug() && node_distance(k, i) != distance) | ||
6517 | sched_numa_warn("Node-distance not symmetric"); | ||
6518 | |||
6519 | if (sched_debug() && i && !find_numa_distance(distance)) | ||
6520 | sched_numa_warn("Node-0 not representative"); | ||
6521 | } | ||
6522 | if (next_distance != curr_distance) { | ||
6523 | sched_domains_numa_distance[level++] = next_distance; | ||
6524 | sched_domains_numa_levels = level; | ||
6525 | curr_distance = next_distance; | ||
6526 | } else break; | ||
6376 | } | 6527 | } |
6377 | if (next_distance != curr_distance) { | 6528 | |
6378 | sched_domains_numa_distance[level++] = next_distance; | 6529 | /* |
6379 | sched_domains_numa_levels = level; | 6530 | * In case of sched_debug() we verify the above assumption. |
6380 | curr_distance = next_distance; | 6531 | */ |
6381 | } else break; | 6532 | if (!sched_debug()) |
6533 | break; | ||
6382 | } | 6534 | } |
6383 | /* | 6535 | /* |
6384 | * 'level' contains the number of unique distances, excluding the | 6536 | * 'level' contains the number of unique distances, excluding the |
@@ -6403,7 +6555,7 @@ static void sched_init_numa(void) | |||
6403 | return; | 6555 | return; |
6404 | 6556 | ||
6405 | for (j = 0; j < nr_node_ids; j++) { | 6557 | for (j = 0; j < nr_node_ids; j++) { |
6406 | struct cpumask *mask = kzalloc_node(cpumask_size(), GFP_KERNEL, j); | 6558 | struct cpumask *mask = kzalloc(cpumask_size(), GFP_KERNEL); |
6407 | if (!mask) | 6559 | if (!mask) |
6408 | return; | 6560 | return; |
6409 | 6561 | ||
@@ -6490,7 +6642,7 @@ static int __sdt_alloc(const struct cpumask *cpu_map) | |||
6490 | 6642 | ||
6491 | *per_cpu_ptr(sdd->sg, j) = sg; | 6643 | *per_cpu_ptr(sdd->sg, j) = sg; |
6492 | 6644 | ||
6493 | sgp = kzalloc_node(sizeof(struct sched_group_power), | 6645 | sgp = kzalloc_node(sizeof(struct sched_group_power) + cpumask_size(), |
6494 | GFP_KERNEL, cpu_to_node(j)); | 6646 | GFP_KERNEL, cpu_to_node(j)); |
6495 | if (!sgp) | 6647 | if (!sgp) |
6496 | return -ENOMEM; | 6648 | return -ENOMEM; |
@@ -6543,7 +6695,6 @@ struct sched_domain *build_sched_domain(struct sched_domain_topology_level *tl, | |||
6543 | if (!sd) | 6695 | if (!sd) |
6544 | return child; | 6696 | return child; |
6545 | 6697 | ||
6546 | set_domain_attribute(sd, attr); | ||
6547 | cpumask_and(sched_domain_span(sd), cpu_map, tl->mask(cpu)); | 6698 | cpumask_and(sched_domain_span(sd), cpu_map, tl->mask(cpu)); |
6548 | if (child) { | 6699 | if (child) { |
6549 | sd->level = child->level + 1; | 6700 | sd->level = child->level + 1; |
@@ -6551,6 +6702,7 @@ struct sched_domain *build_sched_domain(struct sched_domain_topology_level *tl, | |||
6551 | child->parent = sd; | 6702 | child->parent = sd; |
6552 | } | 6703 | } |
6553 | sd->child = child; | 6704 | sd->child = child; |
6705 | set_domain_attribute(sd, attr); | ||
6554 | 6706 | ||
6555 | return sd; | 6707 | return sd; |
6556 | } | 6708 | } |
@@ -6691,7 +6843,6 @@ static int init_sched_domains(const struct cpumask *cpu_map) | |||
6691 | if (!doms_cur) | 6843 | if (!doms_cur) |
6692 | doms_cur = &fallback_doms; | 6844 | doms_cur = &fallback_doms; |
6693 | cpumask_andnot(doms_cur[0], cpu_map, cpu_isolated_map); | 6845 | cpumask_andnot(doms_cur[0], cpu_map, cpu_isolated_map); |
6694 | dattr_cur = NULL; | ||
6695 | err = build_sched_domains(doms_cur[0], NULL); | 6846 | err = build_sched_domains(doms_cur[0], NULL); |
6696 | register_sched_domain_sysctl(); | 6847 | register_sched_domain_sysctl(); |
6697 | 6848 | ||
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 940e6d17cf96..c099cc6eebe3 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c | |||
@@ -2703,7 +2703,7 @@ select_task_rq_fair(struct task_struct *p, int sd_flag, int wake_flags) | |||
2703 | int want_sd = 1; | 2703 | int want_sd = 1; |
2704 | int sync = wake_flags & WF_SYNC; | 2704 | int sync = wake_flags & WF_SYNC; |
2705 | 2705 | ||
2706 | if (p->rt.nr_cpus_allowed == 1) | 2706 | if (p->nr_cpus_allowed == 1) |
2707 | return prev_cpu; | 2707 | return prev_cpu; |
2708 | 2708 | ||
2709 | if (sd_flag & SD_BALANCE_WAKE) { | 2709 | if (sd_flag & SD_BALANCE_WAKE) { |
@@ -3503,15 +3503,22 @@ unsigned long __weak arch_scale_smt_power(struct sched_domain *sd, int cpu) | |||
3503 | unsigned long scale_rt_power(int cpu) | 3503 | unsigned long scale_rt_power(int cpu) |
3504 | { | 3504 | { |
3505 | struct rq *rq = cpu_rq(cpu); | 3505 | struct rq *rq = cpu_rq(cpu); |
3506 | u64 total, available; | 3506 | u64 total, available, age_stamp, avg; |
3507 | 3507 | ||
3508 | total = sched_avg_period() + (rq->clock - rq->age_stamp); | 3508 | /* |
3509 | * Since we're reading these variables without serialization make sure | ||
3510 | * we read them once before doing sanity checks on them. | ||
3511 | */ | ||
3512 | age_stamp = ACCESS_ONCE(rq->age_stamp); | ||
3513 | avg = ACCESS_ONCE(rq->rt_avg); | ||
3514 | |||
3515 | total = sched_avg_period() + (rq->clock - age_stamp); | ||
3509 | 3516 | ||
3510 | if (unlikely(total < rq->rt_avg)) { | 3517 | if (unlikely(total < avg)) { |
3511 | /* Ensures that power won't end up being negative */ | 3518 | /* Ensures that power won't end up being negative */ |
3512 | available = 0; | 3519 | available = 0; |
3513 | } else { | 3520 | } else { |
3514 | available = total - rq->rt_avg; | 3521 | available = total - avg; |
3515 | } | 3522 | } |
3516 | 3523 | ||
3517 | if (unlikely((s64)total < SCHED_POWER_SCALE)) | 3524 | if (unlikely((s64)total < SCHED_POWER_SCALE)) |
@@ -3574,13 +3581,28 @@ void update_group_power(struct sched_domain *sd, int cpu) | |||
3574 | 3581 | ||
3575 | power = 0; | 3582 | power = 0; |
3576 | 3583 | ||
3577 | group = child->groups; | 3584 | if (child->flags & SD_OVERLAP) { |
3578 | do { | 3585 | /* |
3579 | power += group->sgp->power; | 3586 | * SD_OVERLAP domains cannot assume that child groups |
3580 | group = group->next; | 3587 | * span the current group. |
3581 | } while (group != child->groups); | 3588 | */ |
3582 | 3589 | ||
3583 | sdg->sgp->power = power; | 3590 | for_each_cpu(cpu, sched_group_cpus(sdg)) |
3591 | power += power_of(cpu); | ||
3592 | } else { | ||
3593 | /* | ||
3594 | * !SD_OVERLAP domains can assume that child groups | ||
3595 | * span the current group. | ||
3596 | */ | ||
3597 | |||
3598 | group = child->groups; | ||
3599 | do { | ||
3600 | power += group->sgp->power; | ||
3601 | group = group->next; | ||
3602 | } while (group != child->groups); | ||
3603 | } | ||
3604 | |||
3605 | sdg->sgp->power_orig = sdg->sgp->power = power; | ||
3584 | } | 3606 | } |
3585 | 3607 | ||
3586 | /* | 3608 | /* |
@@ -3610,7 +3632,7 @@ fix_small_capacity(struct sched_domain *sd, struct sched_group *group) | |||
3610 | 3632 | ||
3611 | /** | 3633 | /** |
3612 | * update_sg_lb_stats - Update sched_group's statistics for load balancing. | 3634 | * update_sg_lb_stats - Update sched_group's statistics for load balancing. |
3613 | * @sd: The sched_domain whose statistics are to be updated. | 3635 | * @env: The load balancing environment. |
3614 | * @group: sched_group whose statistics are to be updated. | 3636 | * @group: sched_group whose statistics are to be updated. |
3615 | * @load_idx: Load index of sched_domain of this_cpu for load calc. | 3637 | * @load_idx: Load index of sched_domain of this_cpu for load calc. |
3616 | * @local_group: Does group contain this_cpu. | 3638 | * @local_group: Does group contain this_cpu. |
@@ -3630,7 +3652,7 @@ static inline void update_sg_lb_stats(struct lb_env *env, | |||
3630 | int i; | 3652 | int i; |
3631 | 3653 | ||
3632 | if (local_group) | 3654 | if (local_group) |
3633 | balance_cpu = group_first_cpu(group); | 3655 | balance_cpu = group_balance_cpu(group); |
3634 | 3656 | ||
3635 | /* Tally up the load of all CPUs in the group */ | 3657 | /* Tally up the load of all CPUs in the group */ |
3636 | max_cpu_load = 0; | 3658 | max_cpu_load = 0; |
@@ -3645,7 +3667,8 @@ static inline void update_sg_lb_stats(struct lb_env *env, | |||
3645 | 3667 | ||
3646 | /* Bias balancing toward cpus of our domain */ | 3668 | /* Bias balancing toward cpus of our domain */ |
3647 | if (local_group) { | 3669 | if (local_group) { |
3648 | if (idle_cpu(i) && !first_idle_cpu) { | 3670 | if (idle_cpu(i) && !first_idle_cpu && |
3671 | cpumask_test_cpu(i, sched_group_mask(group))) { | ||
3649 | first_idle_cpu = 1; | 3672 | first_idle_cpu = 1; |
3650 | balance_cpu = i; | 3673 | balance_cpu = i; |
3651 | } | 3674 | } |
@@ -3719,11 +3742,10 @@ static inline void update_sg_lb_stats(struct lb_env *env, | |||
3719 | 3742 | ||
3720 | /** | 3743 | /** |
3721 | * update_sd_pick_busiest - return 1 on busiest group | 3744 | * update_sd_pick_busiest - return 1 on busiest group |
3722 | * @sd: sched_domain whose statistics are to be checked | 3745 | * @env: The load balancing environment. |
3723 | * @sds: sched_domain statistics | 3746 | * @sds: sched_domain statistics |
3724 | * @sg: sched_group candidate to be checked for being the busiest | 3747 | * @sg: sched_group candidate to be checked for being the busiest |
3725 | * @sgs: sched_group statistics | 3748 | * @sgs: sched_group statistics |
3726 | * @this_cpu: the current cpu | ||
3727 | * | 3749 | * |
3728 | * Determine if @sg is a busier group than the previously selected | 3750 | * Determine if @sg is a busier group than the previously selected |
3729 | * busiest group. | 3751 | * busiest group. |
@@ -3761,9 +3783,7 @@ static bool update_sd_pick_busiest(struct lb_env *env, | |||
3761 | 3783 | ||
3762 | /** | 3784 | /** |
3763 | * update_sd_lb_stats - Update sched_domain's statistics for load balancing. | 3785 | * update_sd_lb_stats - Update sched_domain's statistics for load balancing. |
3764 | * @sd: sched_domain whose statistics are to be updated. | 3786 | * @env: The load balancing environment. |
3765 | * @this_cpu: Cpu for which load balance is currently performed. | ||
3766 | * @idle: Idle status of this_cpu | ||
3767 | * @cpus: Set of cpus considered for load balancing. | 3787 | * @cpus: Set of cpus considered for load balancing. |
3768 | * @balance: Should we balance. | 3788 | * @balance: Should we balance. |
3769 | * @sds: variable to hold the statistics for this sched_domain. | 3789 | * @sds: variable to hold the statistics for this sched_domain. |
@@ -3852,10 +3872,8 @@ static inline void update_sd_lb_stats(struct lb_env *env, | |||
3852 | * Returns 1 when packing is required and a task should be moved to | 3872 | * Returns 1 when packing is required and a task should be moved to |
3853 | * this CPU. The amount of the imbalance is returned in *imbalance. | 3873 | * this CPU. The amount of the imbalance is returned in *imbalance. |
3854 | * | 3874 | * |
3855 | * @sd: The sched_domain whose packing is to be checked. | 3875 | * @env: The load balancing environment. |
3856 | * @sds: Statistics of the sched_domain which is to be packed | 3876 | * @sds: Statistics of the sched_domain which is to be packed |
3857 | * @this_cpu: The cpu at whose sched_domain we're performing load-balance. | ||
3858 | * @imbalance: returns amount of imbalanced due to packing. | ||
3859 | */ | 3877 | */ |
3860 | static int check_asym_packing(struct lb_env *env, struct sd_lb_stats *sds) | 3878 | static int check_asym_packing(struct lb_env *env, struct sd_lb_stats *sds) |
3861 | { | 3879 | { |
@@ -3881,9 +3899,8 @@ static int check_asym_packing(struct lb_env *env, struct sd_lb_stats *sds) | |||
3881 | * fix_small_imbalance - Calculate the minor imbalance that exists | 3899 | * fix_small_imbalance - Calculate the minor imbalance that exists |
3882 | * amongst the groups of a sched_domain, during | 3900 | * amongst the groups of a sched_domain, during |
3883 | * load balancing. | 3901 | * load balancing. |
3902 | * @env: The load balancing environment. | ||
3884 | * @sds: Statistics of the sched_domain whose imbalance is to be calculated. | 3903 | * @sds: Statistics of the sched_domain whose imbalance is to be calculated. |
3885 | * @this_cpu: The cpu at whose sched_domain we're performing load-balance. | ||
3886 | * @imbalance: Variable to store the imbalance. | ||
3887 | */ | 3904 | */ |
3888 | static inline | 3905 | static inline |
3889 | void fix_small_imbalance(struct lb_env *env, struct sd_lb_stats *sds) | 3906 | void fix_small_imbalance(struct lb_env *env, struct sd_lb_stats *sds) |
@@ -4026,11 +4043,7 @@ static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *s | |||
4026 | * Also calculates the amount of weighted load which should be moved | 4043 | * Also calculates the amount of weighted load which should be moved |
4027 | * to restore balance. | 4044 | * to restore balance. |
4028 | * | 4045 | * |
4029 | * @sd: The sched_domain whose busiest group is to be returned. | 4046 | * @env: The load balancing environment. |
4030 | * @this_cpu: The cpu for which load balancing is currently being performed. | ||
4031 | * @imbalance: Variable which stores amount of weighted load which should | ||
4032 | * be moved to restore balance/put a group to idle. | ||
4033 | * @idle: The idle status of this_cpu. | ||
4034 | * @cpus: The set of CPUs under consideration for load-balancing. | 4047 | * @cpus: The set of CPUs under consideration for load-balancing. |
4035 | * @balance: Pointer to a variable indicating if this_cpu | 4048 | * @balance: Pointer to a variable indicating if this_cpu |
4036 | * is the appropriate cpu to perform load balancing at this_level. | 4049 | * is the appropriate cpu to perform load balancing at this_level. |
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c index c5565c3c515f..573e1ca01102 100644 --- a/kernel/sched/rt.c +++ b/kernel/sched/rt.c | |||
@@ -274,13 +274,16 @@ static void update_rt_migration(struct rt_rq *rt_rq) | |||
274 | 274 | ||
275 | static void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) | 275 | static void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) |
276 | { | 276 | { |
277 | struct task_struct *p; | ||
278 | |||
277 | if (!rt_entity_is_task(rt_se)) | 279 | if (!rt_entity_is_task(rt_se)) |
278 | return; | 280 | return; |
279 | 281 | ||
282 | p = rt_task_of(rt_se); | ||
280 | rt_rq = &rq_of_rt_rq(rt_rq)->rt; | 283 | rt_rq = &rq_of_rt_rq(rt_rq)->rt; |
281 | 284 | ||
282 | rt_rq->rt_nr_total++; | 285 | rt_rq->rt_nr_total++; |
283 | if (rt_se->nr_cpus_allowed > 1) | 286 | if (p->nr_cpus_allowed > 1) |
284 | rt_rq->rt_nr_migratory++; | 287 | rt_rq->rt_nr_migratory++; |
285 | 288 | ||
286 | update_rt_migration(rt_rq); | 289 | update_rt_migration(rt_rq); |
@@ -288,13 +291,16 @@ static void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) | |||
288 | 291 | ||
289 | static void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) | 292 | static void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) |
290 | { | 293 | { |
294 | struct task_struct *p; | ||
295 | |||
291 | if (!rt_entity_is_task(rt_se)) | 296 | if (!rt_entity_is_task(rt_se)) |
292 | return; | 297 | return; |
293 | 298 | ||
299 | p = rt_task_of(rt_se); | ||
294 | rt_rq = &rq_of_rt_rq(rt_rq)->rt; | 300 | rt_rq = &rq_of_rt_rq(rt_rq)->rt; |
295 | 301 | ||
296 | rt_rq->rt_nr_total--; | 302 | rt_rq->rt_nr_total--; |
297 | if (rt_se->nr_cpus_allowed > 1) | 303 | if (p->nr_cpus_allowed > 1) |
298 | rt_rq->rt_nr_migratory--; | 304 | rt_rq->rt_nr_migratory--; |
299 | 305 | ||
300 | update_rt_migration(rt_rq); | 306 | update_rt_migration(rt_rq); |
@@ -1161,7 +1167,7 @@ enqueue_task_rt(struct rq *rq, struct task_struct *p, int flags) | |||
1161 | 1167 | ||
1162 | enqueue_rt_entity(rt_se, flags & ENQUEUE_HEAD); | 1168 | enqueue_rt_entity(rt_se, flags & ENQUEUE_HEAD); |
1163 | 1169 | ||
1164 | if (!task_current(rq, p) && p->rt.nr_cpus_allowed > 1) | 1170 | if (!task_current(rq, p) && p->nr_cpus_allowed > 1) |
1165 | enqueue_pushable_task(rq, p); | 1171 | enqueue_pushable_task(rq, p); |
1166 | 1172 | ||
1167 | inc_nr_running(rq); | 1173 | inc_nr_running(rq); |
@@ -1225,7 +1231,7 @@ select_task_rq_rt(struct task_struct *p, int sd_flag, int flags) | |||
1225 | 1231 | ||
1226 | cpu = task_cpu(p); | 1232 | cpu = task_cpu(p); |
1227 | 1233 | ||
1228 | if (p->rt.nr_cpus_allowed == 1) | 1234 | if (p->nr_cpus_allowed == 1) |
1229 | goto out; | 1235 | goto out; |
1230 | 1236 | ||
1231 | /* For anything but wake ups, just return the task_cpu */ | 1237 | /* For anything but wake ups, just return the task_cpu */ |
@@ -1260,9 +1266,9 @@ select_task_rq_rt(struct task_struct *p, int sd_flag, int flags) | |||
1260 | * will have to sort it out. | 1266 | * will have to sort it out. |
1261 | */ | 1267 | */ |
1262 | if (curr && unlikely(rt_task(curr)) && | 1268 | if (curr && unlikely(rt_task(curr)) && |
1263 | (curr->rt.nr_cpus_allowed < 2 || | 1269 | (curr->nr_cpus_allowed < 2 || |
1264 | curr->prio <= p->prio) && | 1270 | curr->prio <= p->prio) && |
1265 | (p->rt.nr_cpus_allowed > 1)) { | 1271 | (p->nr_cpus_allowed > 1)) { |
1266 | int target = find_lowest_rq(p); | 1272 | int target = find_lowest_rq(p); |
1267 | 1273 | ||
1268 | if (target != -1) | 1274 | if (target != -1) |
@@ -1276,10 +1282,10 @@ out: | |||
1276 | 1282 | ||
1277 | static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p) | 1283 | static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p) |
1278 | { | 1284 | { |
1279 | if (rq->curr->rt.nr_cpus_allowed == 1) | 1285 | if (rq->curr->nr_cpus_allowed == 1) |
1280 | return; | 1286 | return; |
1281 | 1287 | ||
1282 | if (p->rt.nr_cpus_allowed != 1 | 1288 | if (p->nr_cpus_allowed != 1 |
1283 | && cpupri_find(&rq->rd->cpupri, p, NULL)) | 1289 | && cpupri_find(&rq->rd->cpupri, p, NULL)) |
1284 | return; | 1290 | return; |
1285 | 1291 | ||
@@ -1395,7 +1401,7 @@ static void put_prev_task_rt(struct rq *rq, struct task_struct *p) | |||
1395 | * The previous task needs to be made eligible for pushing | 1401 | * The previous task needs to be made eligible for pushing |
1396 | * if it is still active | 1402 | * if it is still active |
1397 | */ | 1403 | */ |
1398 | if (on_rt_rq(&p->rt) && p->rt.nr_cpus_allowed > 1) | 1404 | if (on_rt_rq(&p->rt) && p->nr_cpus_allowed > 1) |
1399 | enqueue_pushable_task(rq, p); | 1405 | enqueue_pushable_task(rq, p); |
1400 | } | 1406 | } |
1401 | 1407 | ||
@@ -1408,7 +1414,7 @@ static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu) | |||
1408 | { | 1414 | { |
1409 | if (!task_running(rq, p) && | 1415 | if (!task_running(rq, p) && |
1410 | (cpu < 0 || cpumask_test_cpu(cpu, tsk_cpus_allowed(p))) && | 1416 | (cpu < 0 || cpumask_test_cpu(cpu, tsk_cpus_allowed(p))) && |
1411 | (p->rt.nr_cpus_allowed > 1)) | 1417 | (p->nr_cpus_allowed > 1)) |
1412 | return 1; | 1418 | return 1; |
1413 | return 0; | 1419 | return 0; |
1414 | } | 1420 | } |
@@ -1464,7 +1470,7 @@ static int find_lowest_rq(struct task_struct *task) | |||
1464 | if (unlikely(!lowest_mask)) | 1470 | if (unlikely(!lowest_mask)) |
1465 | return -1; | 1471 | return -1; |
1466 | 1472 | ||
1467 | if (task->rt.nr_cpus_allowed == 1) | 1473 | if (task->nr_cpus_allowed == 1) |
1468 | return -1; /* No other targets possible */ | 1474 | return -1; /* No other targets possible */ |
1469 | 1475 | ||
1470 | if (!cpupri_find(&task_rq(task)->rd->cpupri, task, lowest_mask)) | 1476 | if (!cpupri_find(&task_rq(task)->rd->cpupri, task, lowest_mask)) |
@@ -1556,7 +1562,7 @@ static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq) | |||
1556 | task_running(rq, task) || | 1562 | task_running(rq, task) || |
1557 | !task->on_rq)) { | 1563 | !task->on_rq)) { |
1558 | 1564 | ||
1559 | raw_spin_unlock(&lowest_rq->lock); | 1565 | double_unlock_balance(rq, lowest_rq); |
1560 | lowest_rq = NULL; | 1566 | lowest_rq = NULL; |
1561 | break; | 1567 | break; |
1562 | } | 1568 | } |
@@ -1586,7 +1592,7 @@ static struct task_struct *pick_next_pushable_task(struct rq *rq) | |||
1586 | 1592 | ||
1587 | BUG_ON(rq->cpu != task_cpu(p)); | 1593 | BUG_ON(rq->cpu != task_cpu(p)); |
1588 | BUG_ON(task_current(rq, p)); | 1594 | BUG_ON(task_current(rq, p)); |
1589 | BUG_ON(p->rt.nr_cpus_allowed <= 1); | 1595 | BUG_ON(p->nr_cpus_allowed <= 1); |
1590 | 1596 | ||
1591 | BUG_ON(!p->on_rq); | 1597 | BUG_ON(!p->on_rq); |
1592 | BUG_ON(!rt_task(p)); | 1598 | BUG_ON(!rt_task(p)); |
@@ -1793,9 +1799,9 @@ static void task_woken_rt(struct rq *rq, struct task_struct *p) | |||
1793 | if (!task_running(rq, p) && | 1799 | if (!task_running(rq, p) && |
1794 | !test_tsk_need_resched(rq->curr) && | 1800 | !test_tsk_need_resched(rq->curr) && |
1795 | has_pushable_tasks(rq) && | 1801 | has_pushable_tasks(rq) && |
1796 | p->rt.nr_cpus_allowed > 1 && | 1802 | p->nr_cpus_allowed > 1 && |
1797 | rt_task(rq->curr) && | 1803 | rt_task(rq->curr) && |
1798 | (rq->curr->rt.nr_cpus_allowed < 2 || | 1804 | (rq->curr->nr_cpus_allowed < 2 || |
1799 | rq->curr->prio <= p->prio)) | 1805 | rq->curr->prio <= p->prio)) |
1800 | push_rt_tasks(rq); | 1806 | push_rt_tasks(rq); |
1801 | } | 1807 | } |
@@ -1817,7 +1823,7 @@ static void set_cpus_allowed_rt(struct task_struct *p, | |||
1817 | * Only update if the process changes its state from whether it | 1823 | * Only update if the process changes its state from whether it |
1818 | * can migrate or not. | 1824 | * can migrate or not. |
1819 | */ | 1825 | */ |
1820 | if ((p->rt.nr_cpus_allowed > 1) == (weight > 1)) | 1826 | if ((p->nr_cpus_allowed > 1) == (weight > 1)) |
1821 | return; | 1827 | return; |
1822 | 1828 | ||
1823 | rq = task_rq(p); | 1829 | rq = task_rq(p); |
@@ -1979,6 +1985,8 @@ static void watchdog(struct rq *rq, struct task_struct *p) | |||
1979 | 1985 | ||
1980 | static void task_tick_rt(struct rq *rq, struct task_struct *p, int queued) | 1986 | static void task_tick_rt(struct rq *rq, struct task_struct *p, int queued) |
1981 | { | 1987 | { |
1988 | struct sched_rt_entity *rt_se = &p->rt; | ||
1989 | |||
1982 | update_curr_rt(rq); | 1990 | update_curr_rt(rq); |
1983 | 1991 | ||
1984 | watchdog(rq, p); | 1992 | watchdog(rq, p); |
@@ -1996,12 +2004,15 @@ static void task_tick_rt(struct rq *rq, struct task_struct *p, int queued) | |||
1996 | p->rt.time_slice = RR_TIMESLICE; | 2004 | p->rt.time_slice = RR_TIMESLICE; |
1997 | 2005 | ||
1998 | /* | 2006 | /* |
1999 | * Requeue to the end of queue if we are not the only element | 2007 | * Requeue to the end of queue if we (and all of our ancestors) are the |
2000 | * on the queue: | 2008 | * only element on the queue |
2001 | */ | 2009 | */ |
2002 | if (p->rt.run_list.prev != p->rt.run_list.next) { | 2010 | for_each_sched_rt_entity(rt_se) { |
2003 | requeue_task_rt(rq, p, 0); | 2011 | if (rt_se->run_list.prev != rt_se->run_list.next) { |
2004 | set_tsk_need_resched(p); | 2012 | requeue_task_rt(rq, p, 0); |
2013 | set_tsk_need_resched(p); | ||
2014 | return; | ||
2015 | } | ||
2005 | } | 2016 | } |
2006 | } | 2017 | } |
2007 | 2018 | ||
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index ba9dccfd24ce..6d52cea7f33d 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h | |||
@@ -526,6 +526,8 @@ static inline struct sched_domain *highest_flag_domain(int cpu, int flag) | |||
526 | DECLARE_PER_CPU(struct sched_domain *, sd_llc); | 526 | DECLARE_PER_CPU(struct sched_domain *, sd_llc); |
527 | DECLARE_PER_CPU(int, sd_llc_id); | 527 | DECLARE_PER_CPU(int, sd_llc_id); |
528 | 528 | ||
529 | extern int group_balance_cpu(struct sched_group *sg); | ||
530 | |||
529 | #endif /* CONFIG_SMP */ | 531 | #endif /* CONFIG_SMP */ |
530 | 532 | ||
531 | #include "stats.h" | 533 | #include "stats.h" |
diff --git a/kernel/smpboot.c b/kernel/smpboot.c index e1a797e028a3..98f60c5caa1b 100644 --- a/kernel/smpboot.c +++ b/kernel/smpboot.c | |||
@@ -31,6 +31,12 @@ void __init idle_thread_set_boot_cpu(void) | |||
31 | per_cpu(idle_threads, smp_processor_id()) = current; | 31 | per_cpu(idle_threads, smp_processor_id()) = current; |
32 | } | 32 | } |
33 | 33 | ||
34 | /** | ||
35 | * idle_init - Initialize the idle thread for a cpu | ||
36 | * @cpu: The cpu for which the idle thread should be initialized | ||
37 | * | ||
38 | * Creates the thread if it does not exist. | ||
39 | */ | ||
34 | static inline void idle_init(unsigned int cpu) | 40 | static inline void idle_init(unsigned int cpu) |
35 | { | 41 | { |
36 | struct task_struct *tsk = per_cpu(idle_threads, cpu); | 42 | struct task_struct *tsk = per_cpu(idle_threads, cpu); |
@@ -45,17 +51,16 @@ static inline void idle_init(unsigned int cpu) | |||
45 | } | 51 | } |
46 | 52 | ||
47 | /** | 53 | /** |
48 | * idle_thread_init - Initialize the idle thread for a cpu | 54 | * idle_threads_init - Initialize idle threads for all cpus |
49 | * @cpu: The cpu for which the idle thread should be initialized | ||
50 | * | ||
51 | * Creates the thread if it does not exist. | ||
52 | */ | 55 | */ |
53 | void __init idle_threads_init(void) | 56 | void __init idle_threads_init(void) |
54 | { | 57 | { |
55 | unsigned int cpu; | 58 | unsigned int cpu, boot_cpu; |
59 | |||
60 | boot_cpu = smp_processor_id(); | ||
56 | 61 | ||
57 | for_each_possible_cpu(cpu) { | 62 | for_each_possible_cpu(cpu) { |
58 | if (cpu != smp_processor_id()) | 63 | if (cpu != boot_cpu) |
59 | idle_init(cpu); | 64 | idle_init(cpu); |
60 | } | 65 | } |
61 | } | 66 | } |
diff --git a/kernel/sys.c b/kernel/sys.c index 9ff89cb9657a..e0c8ffc50d7f 100644 --- a/kernel/sys.c +++ b/kernel/sys.c | |||
@@ -1786,27 +1786,13 @@ SYSCALL_DEFINE1(umask, int, mask) | |||
1786 | } | 1786 | } |
1787 | 1787 | ||
1788 | #ifdef CONFIG_CHECKPOINT_RESTORE | 1788 | #ifdef CONFIG_CHECKPOINT_RESTORE |
1789 | static bool vma_flags_mismatch(struct vm_area_struct *vma, | ||
1790 | unsigned long required, | ||
1791 | unsigned long banned) | ||
1792 | { | ||
1793 | return (vma->vm_flags & required) != required || | ||
1794 | (vma->vm_flags & banned); | ||
1795 | } | ||
1796 | |||
1797 | static int prctl_set_mm_exe_file(struct mm_struct *mm, unsigned int fd) | 1789 | static int prctl_set_mm_exe_file(struct mm_struct *mm, unsigned int fd) |
1798 | { | 1790 | { |
1791 | struct vm_area_struct *vma; | ||
1799 | struct file *exe_file; | 1792 | struct file *exe_file; |
1800 | struct dentry *dentry; | 1793 | struct dentry *dentry; |
1801 | int err; | 1794 | int err; |
1802 | 1795 | ||
1803 | /* | ||
1804 | * Setting new mm::exe_file is only allowed when no VM_EXECUTABLE vma's | ||
1805 | * remain. So perform a quick test first. | ||
1806 | */ | ||
1807 | if (mm->num_exe_file_vmas) | ||
1808 | return -EBUSY; | ||
1809 | |||
1810 | exe_file = fget(fd); | 1796 | exe_file = fget(fd); |
1811 | if (!exe_file) | 1797 | if (!exe_file) |
1812 | return -EBADF; | 1798 | return -EBADF; |
@@ -1827,17 +1813,30 @@ static int prctl_set_mm_exe_file(struct mm_struct *mm, unsigned int fd) | |||
1827 | if (err) | 1813 | if (err) |
1828 | goto exit; | 1814 | goto exit; |
1829 | 1815 | ||
1816 | down_write(&mm->mmap_sem); | ||
1817 | |||
1818 | /* | ||
1819 | * Forbid mm->exe_file change if there are mapped other files. | ||
1820 | */ | ||
1821 | err = -EBUSY; | ||
1822 | for (vma = mm->mmap; vma; vma = vma->vm_next) { | ||
1823 | if (vma->vm_file && !path_equal(&vma->vm_file->f_path, | ||
1824 | &exe_file->f_path)) | ||
1825 | goto exit_unlock; | ||
1826 | } | ||
1827 | |||
1830 | /* | 1828 | /* |
1831 | * The symlink can be changed only once, just to disallow arbitrary | 1829 | * The symlink can be changed only once, just to disallow arbitrary |
1832 | * transitions malicious software might bring in. This means one | 1830 | * transitions malicious software might bring in. This means one |
1833 | * could make a snapshot over all processes running and monitor | 1831 | * could make a snapshot over all processes running and monitor |
1834 | * /proc/pid/exe changes to notice unusual activity if needed. | 1832 | * /proc/pid/exe changes to notice unusual activity if needed. |
1835 | */ | 1833 | */ |
1836 | down_write(&mm->mmap_sem); | 1834 | err = -EPERM; |
1837 | if (likely(!mm->exe_file)) | 1835 | if (test_and_set_bit(MMF_EXE_FILE_CHANGED, &mm->flags)) |
1838 | set_mm_exe_file(mm, exe_file); | 1836 | goto exit_unlock; |
1839 | else | 1837 | |
1840 | err = -EBUSY; | 1838 | set_mm_exe_file(mm, exe_file); |
1839 | exit_unlock: | ||
1841 | up_write(&mm->mmap_sem); | 1840 | up_write(&mm->mmap_sem); |
1842 | 1841 | ||
1843 | exit: | 1842 | exit: |
@@ -1862,7 +1861,7 @@ static int prctl_set_mm(int opt, unsigned long addr, | |||
1862 | if (opt == PR_SET_MM_EXE_FILE) | 1861 | if (opt == PR_SET_MM_EXE_FILE) |
1863 | return prctl_set_mm_exe_file(mm, (unsigned int)addr); | 1862 | return prctl_set_mm_exe_file(mm, (unsigned int)addr); |
1864 | 1863 | ||
1865 | if (addr >= TASK_SIZE) | 1864 | if (addr >= TASK_SIZE || addr < mmap_min_addr) |
1866 | return -EINVAL; | 1865 | return -EINVAL; |
1867 | 1866 | ||
1868 | error = -EINVAL; | 1867 | error = -EINVAL; |
@@ -1924,12 +1923,6 @@ static int prctl_set_mm(int opt, unsigned long addr, | |||
1924 | error = -EFAULT; | 1923 | error = -EFAULT; |
1925 | goto out; | 1924 | goto out; |
1926 | } | 1925 | } |
1927 | #ifdef CONFIG_STACK_GROWSUP | ||
1928 | if (vma_flags_mismatch(vma, VM_READ | VM_WRITE | VM_GROWSUP, 0)) | ||
1929 | #else | ||
1930 | if (vma_flags_mismatch(vma, VM_READ | VM_WRITE | VM_GROWSDOWN, 0)) | ||
1931 | #endif | ||
1932 | goto out; | ||
1933 | if (opt == PR_SET_MM_START_STACK) | 1926 | if (opt == PR_SET_MM_START_STACK) |
1934 | mm->start_stack = addr; | 1927 | mm->start_stack = addr; |
1935 | else if (opt == PR_SET_MM_ARG_START) | 1928 | else if (opt == PR_SET_MM_ARG_START) |
@@ -1981,12 +1974,22 @@ out: | |||
1981 | up_read(&mm->mmap_sem); | 1974 | up_read(&mm->mmap_sem); |
1982 | return error; | 1975 | return error; |
1983 | } | 1976 | } |
1977 | |||
1978 | static int prctl_get_tid_address(struct task_struct *me, int __user **tid_addr) | ||
1979 | { | ||
1980 | return put_user(me->clear_child_tid, tid_addr); | ||
1981 | } | ||
1982 | |||
1984 | #else /* CONFIG_CHECKPOINT_RESTORE */ | 1983 | #else /* CONFIG_CHECKPOINT_RESTORE */ |
1985 | static int prctl_set_mm(int opt, unsigned long addr, | 1984 | static int prctl_set_mm(int opt, unsigned long addr, |
1986 | unsigned long arg4, unsigned long arg5) | 1985 | unsigned long arg4, unsigned long arg5) |
1987 | { | 1986 | { |
1988 | return -EINVAL; | 1987 | return -EINVAL; |
1989 | } | 1988 | } |
1989 | static int prctl_get_tid_address(struct task_struct *me, int __user **tid_addr) | ||
1990 | { | ||
1991 | return -EINVAL; | ||
1992 | } | ||
1990 | #endif | 1993 | #endif |
1991 | 1994 | ||
1992 | SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3, | 1995 | SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3, |
@@ -2141,6 +2144,9 @@ SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3, | |||
2141 | case PR_SET_MM: | 2144 | case PR_SET_MM: |
2142 | error = prctl_set_mm(arg2, arg3, arg4, arg5); | 2145 | error = prctl_set_mm(arg2, arg3, arg4, arg5); |
2143 | break; | 2146 | break; |
2147 | case PR_GET_TID_ADDRESS: | ||
2148 | error = prctl_get_tid_address(me, (int __user **)arg2); | ||
2149 | break; | ||
2144 | case PR_SET_CHILD_SUBREAPER: | 2150 | case PR_SET_CHILD_SUBREAPER: |
2145 | me->signal->is_child_subreaper = !!arg2; | 2151 | me->signal->is_child_subreaper = !!arg2; |
2146 | error = 0; | 2152 | error = 0; |
diff --git a/kernel/time/clockevents.c b/kernel/time/clockevents.c index 9cd928f7a7c6..7e1ce012a851 100644 --- a/kernel/time/clockevents.c +++ b/kernel/time/clockevents.c | |||
@@ -297,8 +297,7 @@ void clockevents_register_device(struct clock_event_device *dev) | |||
297 | } | 297 | } |
298 | EXPORT_SYMBOL_GPL(clockevents_register_device); | 298 | EXPORT_SYMBOL_GPL(clockevents_register_device); |
299 | 299 | ||
300 | static void clockevents_config(struct clock_event_device *dev, | 300 | void clockevents_config(struct clock_event_device *dev, u32 freq) |
301 | u32 freq) | ||
302 | { | 301 | { |
303 | u64 sec; | 302 | u64 sec; |
304 | 303 | ||
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c index 6a3a5b9ff561..869997833928 100644 --- a/kernel/time/tick-sched.c +++ b/kernel/time/tick-sched.c | |||
@@ -274,6 +274,7 @@ EXPORT_SYMBOL_GPL(get_cpu_iowait_time_us); | |||
274 | static void tick_nohz_stop_sched_tick(struct tick_sched *ts) | 274 | static void tick_nohz_stop_sched_tick(struct tick_sched *ts) |
275 | { | 275 | { |
276 | unsigned long seq, last_jiffies, next_jiffies, delta_jiffies; | 276 | unsigned long seq, last_jiffies, next_jiffies, delta_jiffies; |
277 | unsigned long rcu_delta_jiffies; | ||
277 | ktime_t last_update, expires, now; | 278 | ktime_t last_update, expires, now; |
278 | struct clock_event_device *dev = __get_cpu_var(tick_cpu_device).evtdev; | 279 | struct clock_event_device *dev = __get_cpu_var(tick_cpu_device).evtdev; |
279 | u64 time_delta; | 280 | u64 time_delta; |
@@ -322,7 +323,7 @@ static void tick_nohz_stop_sched_tick(struct tick_sched *ts) | |||
322 | time_delta = timekeeping_max_deferment(); | 323 | time_delta = timekeeping_max_deferment(); |
323 | } while (read_seqretry(&xtime_lock, seq)); | 324 | } while (read_seqretry(&xtime_lock, seq)); |
324 | 325 | ||
325 | if (rcu_needs_cpu(cpu) || printk_needs_cpu(cpu) || | 326 | if (rcu_needs_cpu(cpu, &rcu_delta_jiffies) || printk_needs_cpu(cpu) || |
326 | arch_needs_cpu(cpu)) { | 327 | arch_needs_cpu(cpu)) { |
327 | next_jiffies = last_jiffies + 1; | 328 | next_jiffies = last_jiffies + 1; |
328 | delta_jiffies = 1; | 329 | delta_jiffies = 1; |
@@ -330,6 +331,10 @@ static void tick_nohz_stop_sched_tick(struct tick_sched *ts) | |||
330 | /* Get the next timer wheel timer */ | 331 | /* Get the next timer wheel timer */ |
331 | next_jiffies = get_next_timer_interrupt(last_jiffies); | 332 | next_jiffies = get_next_timer_interrupt(last_jiffies); |
332 | delta_jiffies = next_jiffies - last_jiffies; | 333 | delta_jiffies = next_jiffies - last_jiffies; |
334 | if (rcu_delta_jiffies < delta_jiffies) { | ||
335 | next_jiffies = last_jiffies + rcu_delta_jiffies; | ||
336 | delta_jiffies = rcu_delta_jiffies; | ||
337 | } | ||
333 | } | 338 | } |
334 | /* | 339 | /* |
335 | * Do not stop the tick, if we are only one off | 340 | * Do not stop the tick, if we are only one off |
@@ -576,6 +581,7 @@ void tick_nohz_idle_exit(void) | |||
576 | /* Update jiffies first */ | 581 | /* Update jiffies first */ |
577 | select_nohz_load_balancer(0); | 582 | select_nohz_load_balancer(0); |
578 | tick_do_update_jiffies64(now); | 583 | tick_do_update_jiffies64(now); |
584 | update_cpu_load_nohz(); | ||
579 | 585 | ||
580 | #ifndef CONFIG_VIRT_CPU_ACCOUNTING | 586 | #ifndef CONFIG_VIRT_CPU_ACCOUNTING |
581 | /* | 587 | /* |
@@ -814,6 +820,16 @@ static enum hrtimer_restart tick_sched_timer(struct hrtimer *timer) | |||
814 | return HRTIMER_RESTART; | 820 | return HRTIMER_RESTART; |
815 | } | 821 | } |
816 | 822 | ||
823 | static int sched_skew_tick; | ||
824 | |||
825 | static int __init skew_tick(char *str) | ||
826 | { | ||
827 | get_option(&str, &sched_skew_tick); | ||
828 | |||
829 | return 0; | ||
830 | } | ||
831 | early_param("skew_tick", skew_tick); | ||
832 | |||
817 | /** | 833 | /** |
818 | * tick_setup_sched_timer - setup the tick emulation timer | 834 | * tick_setup_sched_timer - setup the tick emulation timer |
819 | */ | 835 | */ |
@@ -831,6 +847,14 @@ void tick_setup_sched_timer(void) | |||
831 | /* Get the next period (per cpu) */ | 847 | /* Get the next period (per cpu) */ |
832 | hrtimer_set_expires(&ts->sched_timer, tick_init_jiffy_update()); | 848 | hrtimer_set_expires(&ts->sched_timer, tick_init_jiffy_update()); |
833 | 849 | ||
850 | /* Offset the tick to avert xtime_lock contention. */ | ||
851 | if (sched_skew_tick) { | ||
852 | u64 offset = ktime_to_ns(tick_period) >> 1; | ||
853 | do_div(offset, num_possible_cpus()); | ||
854 | offset *= smp_processor_id(); | ||
855 | hrtimer_add_expires_ns(&ts->sched_timer, offset); | ||
856 | } | ||
857 | |||
834 | for (;;) { | 858 | for (;;) { |
835 | hrtimer_forward(&ts->sched_timer, now, tick_period); | 859 | hrtimer_forward(&ts->sched_timer, now, tick_period); |
836 | hrtimer_start_expires(&ts->sched_timer, | 860 | hrtimer_start_expires(&ts->sched_timer, |
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c index 6e46cacf5969..6f46a00a1e8a 100644 --- a/kernel/time/timekeeping.c +++ b/kernel/time/timekeeping.c | |||
@@ -962,6 +962,7 @@ static cycle_t logarithmic_accumulation(cycle_t offset, int shift) | |||
962 | timekeeper.xtime.tv_sec++; | 962 | timekeeper.xtime.tv_sec++; |
963 | leap = second_overflow(timekeeper.xtime.tv_sec); | 963 | leap = second_overflow(timekeeper.xtime.tv_sec); |
964 | timekeeper.xtime.tv_sec += leap; | 964 | timekeeper.xtime.tv_sec += leap; |
965 | timekeeper.wall_to_monotonic.tv_sec -= leap; | ||
965 | } | 966 | } |
966 | 967 | ||
967 | /* Accumulate raw time */ | 968 | /* Accumulate raw time */ |
@@ -1077,6 +1078,7 @@ static void update_wall_time(void) | |||
1077 | timekeeper.xtime.tv_sec++; | 1078 | timekeeper.xtime.tv_sec++; |
1078 | leap = second_overflow(timekeeper.xtime.tv_sec); | 1079 | leap = second_overflow(timekeeper.xtime.tv_sec); |
1079 | timekeeper.xtime.tv_sec += leap; | 1080 | timekeeper.xtime.tv_sec += leap; |
1081 | timekeeper.wall_to_monotonic.tv_sec -= leap; | ||
1080 | } | 1082 | } |
1081 | 1083 | ||
1082 | timekeeping_update(false); | 1084 | timekeeping_update(false); |
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 68032c6177db..49249c28690d 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
@@ -371,7 +371,7 @@ EXPORT_SYMBOL_GPL(tracing_on); | |||
371 | void tracing_off(void) | 371 | void tracing_off(void) |
372 | { | 372 | { |
373 | if (global_trace.buffer) | 373 | if (global_trace.buffer) |
374 | ring_buffer_record_on(global_trace.buffer); | 374 | ring_buffer_record_off(global_trace.buffer); |
375 | /* | 375 | /* |
376 | * This flag is only looked at when buffers haven't been | 376 | * This flag is only looked at when buffers haven't been |
377 | * allocated yet. We don't really care about the race | 377 | * allocated yet. We don't really care about the race |
diff --git a/kernel/watchdog.c b/kernel/watchdog.c index e5e1d85b8c7c..4b1dfba70f7c 100644 --- a/kernel/watchdog.c +++ b/kernel/watchdog.c | |||
@@ -372,6 +372,13 @@ static int watchdog(void *unused) | |||
372 | 372 | ||
373 | 373 | ||
374 | #ifdef CONFIG_HARDLOCKUP_DETECTOR | 374 | #ifdef CONFIG_HARDLOCKUP_DETECTOR |
375 | /* | ||
376 | * People like the simple clean cpu node info on boot. | ||
377 | * Reduce the watchdog noise by only printing messages | ||
378 | * that are different from what cpu0 displayed. | ||
379 | */ | ||
380 | static unsigned long cpu0_err; | ||
381 | |||
375 | static int watchdog_nmi_enable(int cpu) | 382 | static int watchdog_nmi_enable(int cpu) |
376 | { | 383 | { |
377 | struct perf_event_attr *wd_attr; | 384 | struct perf_event_attr *wd_attr; |
@@ -390,11 +397,21 @@ static int watchdog_nmi_enable(int cpu) | |||
390 | 397 | ||
391 | /* Try to register using hardware perf events */ | 398 | /* Try to register using hardware perf events */ |
392 | event = perf_event_create_kernel_counter(wd_attr, cpu, NULL, watchdog_overflow_callback, NULL); | 399 | event = perf_event_create_kernel_counter(wd_attr, cpu, NULL, watchdog_overflow_callback, NULL); |
400 | |||
401 | /* save cpu0 error for future comparision */ | ||
402 | if (cpu == 0 && IS_ERR(event)) | ||
403 | cpu0_err = PTR_ERR(event); | ||
404 | |||
393 | if (!IS_ERR(event)) { | 405 | if (!IS_ERR(event)) { |
394 | pr_info("enabled, takes one hw-pmu counter.\n"); | 406 | /* only print for cpu0 or different than cpu0 */ |
407 | if (cpu == 0 || cpu0_err) | ||
408 | pr_info("enabled on all CPUs, permanently consumes one hw-PMU counter.\n"); | ||
395 | goto out_save; | 409 | goto out_save; |
396 | } | 410 | } |
397 | 411 | ||
412 | /* skip displaying the same error again */ | ||
413 | if (cpu > 0 && (PTR_ERR(event) == cpu0_err)) | ||
414 | return PTR_ERR(event); | ||
398 | 415 | ||
399 | /* vary the KERN level based on the returned errno */ | 416 | /* vary the KERN level based on the returned errno */ |
400 | if (PTR_ERR(event) == -EOPNOTSUPP) | 417 | if (PTR_ERR(event) == -EOPNOTSUPP) |
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index a42d3ae39648..ff5bdee4716d 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug | |||
@@ -241,6 +241,26 @@ config BOOTPARAM_SOFTLOCKUP_PANIC_VALUE | |||
241 | default 0 if !BOOTPARAM_SOFTLOCKUP_PANIC | 241 | default 0 if !BOOTPARAM_SOFTLOCKUP_PANIC |
242 | default 1 if BOOTPARAM_SOFTLOCKUP_PANIC | 242 | default 1 if BOOTPARAM_SOFTLOCKUP_PANIC |
243 | 243 | ||
244 | config PANIC_ON_OOPS | ||
245 | bool "Panic on Oops" if EXPERT | ||
246 | default n | ||
247 | help | ||
248 | Say Y here to enable the kernel to panic when it oopses. This | ||
249 | has the same effect as setting oops=panic on the kernel command | ||
250 | line. | ||
251 | |||
252 | This feature is useful to ensure that the kernel does not do | ||
253 | anything erroneous after an oops which could result in data | ||
254 | corruption or other issues. | ||
255 | |||
256 | Say N if unsure. | ||
257 | |||
258 | config PANIC_ON_OOPS_VALUE | ||
259 | int | ||
260 | range 0 1 | ||
261 | default 0 if !PANIC_ON_OOPS | ||
262 | default 1 if PANIC_ON_OOPS | ||
263 | |||
244 | config DETECT_HUNG_TASK | 264 | config DETECT_HUNG_TASK |
245 | bool "Detect Hung Tasks" | 265 | bool "Detect Hung Tasks" |
246 | depends on DEBUG_KERNEL | 266 | depends on DEBUG_KERNEL |
diff --git a/lib/btree.c b/lib/btree.c index e5ec1e9c1aa5..f9a484676cb6 100644 --- a/lib/btree.c +++ b/lib/btree.c | |||
@@ -319,8 +319,8 @@ void *btree_get_prev(struct btree_head *head, struct btree_geo *geo, | |||
319 | 319 | ||
320 | if (head->height == 0) | 320 | if (head->height == 0) |
321 | return NULL; | 321 | return NULL; |
322 | retry: | ||
323 | longcpy(key, __key, geo->keylen); | 322 | longcpy(key, __key, geo->keylen); |
323 | retry: | ||
324 | dec_key(geo, key); | 324 | dec_key(geo, key); |
325 | 325 | ||
326 | node = head->node; | 326 | node = head->node; |
@@ -351,7 +351,7 @@ retry: | |||
351 | } | 351 | } |
352 | miss: | 352 | miss: |
353 | if (retry_key) { | 353 | if (retry_key) { |
354 | __key = retry_key; | 354 | longcpy(key, retry_key, geo->keylen); |
355 | retry_key = NULL; | 355 | retry_key = NULL; |
356 | goto retry; | 356 | goto retry; |
357 | } | 357 | } |
@@ -509,6 +509,7 @@ retry: | |||
509 | int btree_insert(struct btree_head *head, struct btree_geo *geo, | 509 | int btree_insert(struct btree_head *head, struct btree_geo *geo, |
510 | unsigned long *key, void *val, gfp_t gfp) | 510 | unsigned long *key, void *val, gfp_t gfp) |
511 | { | 511 | { |
512 | BUG_ON(!val); | ||
512 | return btree_insert_level(head, geo, key, val, 1, gfp); | 513 | return btree_insert_level(head, geo, key, val, 1, gfp); |
513 | } | 514 | } |
514 | EXPORT_SYMBOL_GPL(btree_insert); | 515 | EXPORT_SYMBOL_GPL(btree_insert); |
diff --git a/lib/fault-inject.c b/lib/fault-inject.c index 6805453c18e7..f7210ad6cffd 100644 --- a/lib/fault-inject.c +++ b/lib/fault-inject.c | |||
@@ -101,6 +101,10 @@ static inline bool fail_stacktrace(struct fault_attr *attr) | |||
101 | 101 | ||
102 | bool should_fail(struct fault_attr *attr, ssize_t size) | 102 | bool should_fail(struct fault_attr *attr, ssize_t size) |
103 | { | 103 | { |
104 | /* No need to check any other properties if the probability is 0 */ | ||
105 | if (attr->probability == 0) | ||
106 | return false; | ||
107 | |||
104 | if (attr->task_filter && !fail_task(attr, current)) | 108 | if (attr->task_filter && !fail_task(attr, current)) |
105 | return false; | 109 | return false; |
106 | 110 | ||
diff --git a/lib/radix-tree.c b/lib/radix-tree.c index d7c878cc006c..e7964296fd50 100644 --- a/lib/radix-tree.c +++ b/lib/radix-tree.c | |||
@@ -686,6 +686,9 @@ void **radix_tree_next_chunk(struct radix_tree_root *root, | |||
686 | * during iterating; it can be zero only at the beginning. | 686 | * during iterating; it can be zero only at the beginning. |
687 | * And we cannot overflow iter->next_index in a single step, | 687 | * And we cannot overflow iter->next_index in a single step, |
688 | * because RADIX_TREE_MAP_SHIFT < BITS_PER_LONG. | 688 | * because RADIX_TREE_MAP_SHIFT < BITS_PER_LONG. |
689 | * | ||
690 | * This condition also used by radix_tree_next_slot() to stop | ||
691 | * contiguous iterating, and forbid swithing to the next chunk. | ||
689 | */ | 692 | */ |
690 | index = iter->next_index; | 693 | index = iter->next_index; |
691 | if (!index && iter->index) | 694 | if (!index && iter->index) |
diff --git a/lib/raid6/recov.c b/lib/raid6/recov.c index 1805a5cc5daa..a95bccb8497d 100644 --- a/lib/raid6/recov.c +++ b/lib/raid6/recov.c | |||
@@ -22,8 +22,8 @@ | |||
22 | #include <linux/raid/pq.h> | 22 | #include <linux/raid/pq.h> |
23 | 23 | ||
24 | /* Recover two failed data blocks. */ | 24 | /* Recover two failed data blocks. */ |
25 | void raid6_2data_recov_intx1(int disks, size_t bytes, int faila, int failb, | 25 | static void raid6_2data_recov_intx1(int disks, size_t bytes, int faila, |
26 | void **ptrs) | 26 | int failb, void **ptrs) |
27 | { | 27 | { |
28 | u8 *p, *q, *dp, *dq; | 28 | u8 *p, *q, *dp, *dq; |
29 | u8 px, qx, db; | 29 | u8 px, qx, db; |
@@ -66,7 +66,8 @@ void raid6_2data_recov_intx1(int disks, size_t bytes, int faila, int failb, | |||
66 | } | 66 | } |
67 | 67 | ||
68 | /* Recover failure of one data block plus the P block */ | 68 | /* Recover failure of one data block plus the P block */ |
69 | void raid6_datap_recov_intx1(int disks, size_t bytes, int faila, void **ptrs) | 69 | static void raid6_datap_recov_intx1(int disks, size_t bytes, int faila, |
70 | void **ptrs) | ||
70 | { | 71 | { |
71 | u8 *p, *q, *dq; | 72 | u8 *p, *q, *dq; |
72 | const u8 *qmul; /* Q multiplier table */ | 73 | const u8 *qmul; /* Q multiplier table */ |
diff --git a/lib/raid6/recov_ssse3.c b/lib/raid6/recov_ssse3.c index 37ae61930559..ecb710c0b4d9 100644 --- a/lib/raid6/recov_ssse3.c +++ b/lib/raid6/recov_ssse3.c | |||
@@ -19,8 +19,8 @@ static int raid6_has_ssse3(void) | |||
19 | boot_cpu_has(X86_FEATURE_SSSE3); | 19 | boot_cpu_has(X86_FEATURE_SSSE3); |
20 | } | 20 | } |
21 | 21 | ||
22 | void raid6_2data_recov_ssse3(int disks, size_t bytes, int faila, int failb, | 22 | static void raid6_2data_recov_ssse3(int disks, size_t bytes, int faila, |
23 | void **ptrs) | 23 | int failb, void **ptrs) |
24 | { | 24 | { |
25 | u8 *p, *q, *dp, *dq; | 25 | u8 *p, *q, *dp, *dq; |
26 | const u8 *pbmul; /* P multiplier table for B data */ | 26 | const u8 *pbmul; /* P multiplier table for B data */ |
@@ -194,7 +194,8 @@ void raid6_2data_recov_ssse3(int disks, size_t bytes, int faila, int failb, | |||
194 | } | 194 | } |
195 | 195 | ||
196 | 196 | ||
197 | void raid6_datap_recov_ssse3(int disks, size_t bytes, int faila, void **ptrs) | 197 | static void raid6_datap_recov_ssse3(int disks, size_t bytes, int faila, |
198 | void **ptrs) | ||
198 | { | 199 | { |
199 | u8 *p, *q, *dq; | 200 | u8 *p, *q, *dq; |
200 | const u8 *qmul; /* Q multiplier table */ | 201 | const u8 *qmul; /* Q multiplier table */ |
diff --git a/lib/spinlock_debug.c b/lib/spinlock_debug.c index d0ec4f3d1593..e91fbc23fff1 100644 --- a/lib/spinlock_debug.c +++ b/lib/spinlock_debug.c | |||
@@ -118,7 +118,7 @@ static void __spin_lock_debug(raw_spinlock_t *lock) | |||
118 | /* lockup suspected: */ | 118 | /* lockup suspected: */ |
119 | if (print_once) { | 119 | if (print_once) { |
120 | print_once = 0; | 120 | print_once = 0; |
121 | spin_dump(lock, "lockup"); | 121 | spin_dump(lock, "lockup suspected"); |
122 | #ifdef CONFIG_SMP | 122 | #ifdef CONFIG_SMP |
123 | trigger_all_cpu_backtrace(); | 123 | trigger_all_cpu_backtrace(); |
124 | #endif | 124 | #endif |
diff --git a/mm/Kconfig b/mm/Kconfig index b2176374b98e..82fed4eb2b6f 100644 --- a/mm/Kconfig +++ b/mm/Kconfig | |||
@@ -389,3 +389,20 @@ config CLEANCACHE | |||
389 | in a negligible performance hit. | 389 | in a negligible performance hit. |
390 | 390 | ||
391 | If unsure, say Y to enable cleancache | 391 | If unsure, say Y to enable cleancache |
392 | |||
393 | config FRONTSWAP | ||
394 | bool "Enable frontswap to cache swap pages if tmem is present" | ||
395 | depends on SWAP | ||
396 | default n | ||
397 | help | ||
398 | Frontswap is so named because it can be thought of as the opposite | ||
399 | of a "backing" store for a swap device. The data is stored into | ||
400 | "transcendent memory", memory that is not directly accessible or | ||
401 | addressable by the kernel and is of unknown and possibly | ||
402 | time-varying size. When space in transcendent memory is available, | ||
403 | a significant swap I/O reduction may be achieved. When none is | ||
404 | available, all frontswap calls are reduced to a single pointer- | ||
405 | compare-against-NULL resulting in a negligible performance hit | ||
406 | and swap data is stored as normal on the matching swap device. | ||
407 | |||
408 | If unsure, say Y to enable frontswap. | ||
diff --git a/mm/Makefile b/mm/Makefile index a156285ce88d..2e2fbbefb99f 100644 --- a/mm/Makefile +++ b/mm/Makefile | |||
@@ -29,6 +29,7 @@ obj-$(CONFIG_HAVE_MEMBLOCK) += memblock.o | |||
29 | 29 | ||
30 | obj-$(CONFIG_BOUNCE) += bounce.o | 30 | obj-$(CONFIG_BOUNCE) += bounce.o |
31 | obj-$(CONFIG_SWAP) += page_io.o swap_state.o swapfile.o | 31 | obj-$(CONFIG_SWAP) += page_io.o swap_state.o swapfile.o |
32 | obj-$(CONFIG_FRONTSWAP) += frontswap.o | ||
32 | obj-$(CONFIG_HAS_DMA) += dmapool.o | 33 | obj-$(CONFIG_HAS_DMA) += dmapool.o |
33 | obj-$(CONFIG_HUGETLBFS) += hugetlb.o | 34 | obj-$(CONFIG_HUGETLBFS) += hugetlb.o |
34 | obj-$(CONFIG_NUMA) += mempolicy.o | 35 | obj-$(CONFIG_NUMA) += mempolicy.o |
diff --git a/mm/compaction.c b/mm/compaction.c index 4ac338af5120..7ea259d82a99 100644 --- a/mm/compaction.c +++ b/mm/compaction.c | |||
@@ -236,7 +236,7 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc, | |||
236 | */ | 236 | */ |
237 | while (unlikely(too_many_isolated(zone))) { | 237 | while (unlikely(too_many_isolated(zone))) { |
238 | /* async migration should just abort */ | 238 | /* async migration should just abort */ |
239 | if (cc->mode != COMPACT_SYNC) | 239 | if (!cc->sync) |
240 | return 0; | 240 | return 0; |
241 | 241 | ||
242 | congestion_wait(BLK_RW_ASYNC, HZ/10); | 242 | congestion_wait(BLK_RW_ASYNC, HZ/10); |
@@ -304,8 +304,7 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc, | |||
304 | * satisfies the allocation | 304 | * satisfies the allocation |
305 | */ | 305 | */ |
306 | pageblock_nr = low_pfn >> pageblock_order; | 306 | pageblock_nr = low_pfn >> pageblock_order; |
307 | if (cc->mode != COMPACT_SYNC && | 307 | if (!cc->sync && last_pageblock_nr != pageblock_nr && |
308 | last_pageblock_nr != pageblock_nr && | ||
309 | !migrate_async_suitable(get_pageblock_migratetype(page))) { | 308 | !migrate_async_suitable(get_pageblock_migratetype(page))) { |
310 | low_pfn += pageblock_nr_pages; | 309 | low_pfn += pageblock_nr_pages; |
311 | low_pfn = ALIGN(low_pfn, pageblock_nr_pages) - 1; | 310 | low_pfn = ALIGN(low_pfn, pageblock_nr_pages) - 1; |
@@ -326,7 +325,7 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc, | |||
326 | continue; | 325 | continue; |
327 | } | 326 | } |
328 | 327 | ||
329 | if (cc->mode != COMPACT_SYNC) | 328 | if (!cc->sync) |
330 | mode |= ISOLATE_ASYNC_MIGRATE; | 329 | mode |= ISOLATE_ASYNC_MIGRATE; |
331 | 330 | ||
332 | lruvec = mem_cgroup_page_lruvec(page, zone); | 331 | lruvec = mem_cgroup_page_lruvec(page, zone); |
@@ -361,90 +360,27 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc, | |||
361 | 360 | ||
362 | #endif /* CONFIG_COMPACTION || CONFIG_CMA */ | 361 | #endif /* CONFIG_COMPACTION || CONFIG_CMA */ |
363 | #ifdef CONFIG_COMPACTION | 362 | #ifdef CONFIG_COMPACTION |
364 | /* | ||
365 | * Returns true if MIGRATE_UNMOVABLE pageblock was successfully | ||
366 | * converted to MIGRATE_MOVABLE type, false otherwise. | ||
367 | */ | ||
368 | static bool rescue_unmovable_pageblock(struct page *page) | ||
369 | { | ||
370 | unsigned long pfn, start_pfn, end_pfn; | ||
371 | struct page *start_page, *end_page; | ||
372 | |||
373 | pfn = page_to_pfn(page); | ||
374 | start_pfn = pfn & ~(pageblock_nr_pages - 1); | ||
375 | end_pfn = start_pfn + pageblock_nr_pages; | ||
376 | |||
377 | start_page = pfn_to_page(start_pfn); | ||
378 | end_page = pfn_to_page(end_pfn); | ||
379 | |||
380 | /* Do not deal with pageblocks that overlap zones */ | ||
381 | if (page_zone(start_page) != page_zone(end_page)) | ||
382 | return false; | ||
383 | |||
384 | for (page = start_page, pfn = start_pfn; page < end_page; pfn++, | ||
385 | page++) { | ||
386 | if (!pfn_valid_within(pfn)) | ||
387 | continue; | ||
388 | |||
389 | if (PageBuddy(page)) { | ||
390 | int order = page_order(page); | ||
391 | |||
392 | pfn += (1 << order) - 1; | ||
393 | page += (1 << order) - 1; | ||
394 | |||
395 | continue; | ||
396 | } else if (page_count(page) == 0 || PageLRU(page)) | ||
397 | continue; | ||
398 | |||
399 | return false; | ||
400 | } | ||
401 | |||
402 | set_pageblock_migratetype(page, MIGRATE_MOVABLE); | ||
403 | move_freepages_block(page_zone(page), page, MIGRATE_MOVABLE); | ||
404 | return true; | ||
405 | } | ||
406 | 363 | ||
407 | enum smt_result { | 364 | /* Returns true if the page is within a block suitable for migration to */ |
408 | GOOD_AS_MIGRATION_TARGET, | 365 | static bool suitable_migration_target(struct page *page) |
409 | FAIL_UNMOVABLE_TARGET, | ||
410 | FAIL_BAD_TARGET, | ||
411 | }; | ||
412 | |||
413 | /* | ||
414 | * Returns GOOD_AS_MIGRATION_TARGET if the page is within a block | ||
415 | * suitable for migration to, FAIL_UNMOVABLE_TARGET if the page | ||
416 | * is within a MIGRATE_UNMOVABLE block, FAIL_BAD_TARGET otherwise. | ||
417 | */ | ||
418 | static enum smt_result suitable_migration_target(struct page *page, | ||
419 | struct compact_control *cc) | ||
420 | { | 366 | { |
421 | 367 | ||
422 | int migratetype = get_pageblock_migratetype(page); | 368 | int migratetype = get_pageblock_migratetype(page); |
423 | 369 | ||
424 | /* Don't interfere with memory hot-remove or the min_free_kbytes blocks */ | 370 | /* Don't interfere with memory hot-remove or the min_free_kbytes blocks */ |
425 | if (migratetype == MIGRATE_ISOLATE || migratetype == MIGRATE_RESERVE) | 371 | if (migratetype == MIGRATE_ISOLATE || migratetype == MIGRATE_RESERVE) |
426 | return FAIL_BAD_TARGET; | 372 | return false; |
427 | 373 | ||
428 | /* If the page is a large free page, then allow migration */ | 374 | /* If the page is a large free page, then allow migration */ |
429 | if (PageBuddy(page) && page_order(page) >= pageblock_order) | 375 | if (PageBuddy(page) && page_order(page) >= pageblock_order) |
430 | return GOOD_AS_MIGRATION_TARGET; | 376 | return true; |
431 | 377 | ||
432 | /* If the block is MIGRATE_MOVABLE or MIGRATE_CMA, allow migration */ | 378 | /* If the block is MIGRATE_MOVABLE or MIGRATE_CMA, allow migration */ |
433 | if (cc->mode != COMPACT_ASYNC_UNMOVABLE && | 379 | if (migrate_async_suitable(migratetype)) |
434 | migrate_async_suitable(migratetype)) | 380 | return true; |
435 | return GOOD_AS_MIGRATION_TARGET; | ||
436 | |||
437 | if (cc->mode == COMPACT_ASYNC_MOVABLE && | ||
438 | migratetype == MIGRATE_UNMOVABLE) | ||
439 | return FAIL_UNMOVABLE_TARGET; | ||
440 | |||
441 | if (cc->mode != COMPACT_ASYNC_MOVABLE && | ||
442 | migratetype == MIGRATE_UNMOVABLE && | ||
443 | rescue_unmovable_pageblock(page)) | ||
444 | return GOOD_AS_MIGRATION_TARGET; | ||
445 | 381 | ||
446 | /* Otherwise skip the block */ | 382 | /* Otherwise skip the block */ |
447 | return FAIL_BAD_TARGET; | 383 | return false; |
448 | } | 384 | } |
449 | 385 | ||
450 | /* | 386 | /* |
@@ -478,13 +414,6 @@ static void isolate_freepages(struct zone *zone, | |||
478 | zone_end_pfn = zone->zone_start_pfn + zone->spanned_pages; | 414 | zone_end_pfn = zone->zone_start_pfn + zone->spanned_pages; |
479 | 415 | ||
480 | /* | 416 | /* |
481 | * isolate_freepages() may be called more than once during | ||
482 | * compact_zone_order() run and we want only the most recent | ||
483 | * count. | ||
484 | */ | ||
485 | cc->nr_pageblocks_skipped = 0; | ||
486 | |||
487 | /* | ||
488 | * Isolate free pages until enough are available to migrate the | 417 | * Isolate free pages until enough are available to migrate the |
489 | * pages on cc->migratepages. We stop searching if the migrate | 418 | * pages on cc->migratepages. We stop searching if the migrate |
490 | * and free page scanners meet or enough free pages are isolated. | 419 | * and free page scanners meet or enough free pages are isolated. |
@@ -492,7 +421,6 @@ static void isolate_freepages(struct zone *zone, | |||
492 | for (; pfn > low_pfn && cc->nr_migratepages > nr_freepages; | 421 | for (; pfn > low_pfn && cc->nr_migratepages > nr_freepages; |
493 | pfn -= pageblock_nr_pages) { | 422 | pfn -= pageblock_nr_pages) { |
494 | unsigned long isolated; | 423 | unsigned long isolated; |
495 | enum smt_result ret; | ||
496 | 424 | ||
497 | if (!pfn_valid(pfn)) | 425 | if (!pfn_valid(pfn)) |
498 | continue; | 426 | continue; |
@@ -509,12 +437,9 @@ static void isolate_freepages(struct zone *zone, | |||
509 | continue; | 437 | continue; |
510 | 438 | ||
511 | /* Check the block is suitable for migration */ | 439 | /* Check the block is suitable for migration */ |
512 | ret = suitable_migration_target(page, cc); | 440 | if (!suitable_migration_target(page)) |
513 | if (ret != GOOD_AS_MIGRATION_TARGET) { | ||
514 | if (ret == FAIL_UNMOVABLE_TARGET) | ||
515 | cc->nr_pageblocks_skipped++; | ||
516 | continue; | 441 | continue; |
517 | } | 442 | |
518 | /* | 443 | /* |
519 | * Found a block suitable for isolating free pages from. Now | 444 | * Found a block suitable for isolating free pages from. Now |
520 | * we disabled interrupts, double check things are ok and | 445 | * we disabled interrupts, double check things are ok and |
@@ -523,14 +448,12 @@ static void isolate_freepages(struct zone *zone, | |||
523 | */ | 448 | */ |
524 | isolated = 0; | 449 | isolated = 0; |
525 | spin_lock_irqsave(&zone->lock, flags); | 450 | spin_lock_irqsave(&zone->lock, flags); |
526 | ret = suitable_migration_target(page, cc); | 451 | if (suitable_migration_target(page)) { |
527 | if (ret == GOOD_AS_MIGRATION_TARGET) { | ||
528 | end_pfn = min(pfn + pageblock_nr_pages, zone_end_pfn); | 452 | end_pfn = min(pfn + pageblock_nr_pages, zone_end_pfn); |
529 | isolated = isolate_freepages_block(pfn, end_pfn, | 453 | isolated = isolate_freepages_block(pfn, end_pfn, |
530 | freelist, false); | 454 | freelist, false); |
531 | nr_freepages += isolated; | 455 | nr_freepages += isolated; |
532 | } else if (ret == FAIL_UNMOVABLE_TARGET) | 456 | } |
533 | cc->nr_pageblocks_skipped++; | ||
534 | spin_unlock_irqrestore(&zone->lock, flags); | 457 | spin_unlock_irqrestore(&zone->lock, flags); |
535 | 458 | ||
536 | /* | 459 | /* |
@@ -762,9 +685,8 @@ static int compact_zone(struct zone *zone, struct compact_control *cc) | |||
762 | 685 | ||
763 | nr_migrate = cc->nr_migratepages; | 686 | nr_migrate = cc->nr_migratepages; |
764 | err = migrate_pages(&cc->migratepages, compaction_alloc, | 687 | err = migrate_pages(&cc->migratepages, compaction_alloc, |
765 | (unsigned long)&cc->freepages, false, | 688 | (unsigned long)cc, false, |
766 | (cc->mode == COMPACT_SYNC) ? MIGRATE_SYNC_LIGHT | 689 | cc->sync ? MIGRATE_SYNC_LIGHT : MIGRATE_ASYNC); |
767 | : MIGRATE_ASYNC); | ||
768 | update_nr_listpages(cc); | 690 | update_nr_listpages(cc); |
769 | nr_remaining = cc->nr_migratepages; | 691 | nr_remaining = cc->nr_migratepages; |
770 | 692 | ||
@@ -793,8 +715,7 @@ out: | |||
793 | 715 | ||
794 | static unsigned long compact_zone_order(struct zone *zone, | 716 | static unsigned long compact_zone_order(struct zone *zone, |
795 | int order, gfp_t gfp_mask, | 717 | int order, gfp_t gfp_mask, |
796 | enum compact_mode mode, | 718 | bool sync) |
797 | unsigned long *nr_pageblocks_skipped) | ||
798 | { | 719 | { |
799 | struct compact_control cc = { | 720 | struct compact_control cc = { |
800 | .nr_freepages = 0, | 721 | .nr_freepages = 0, |
@@ -802,17 +723,12 @@ static unsigned long compact_zone_order(struct zone *zone, | |||
802 | .order = order, | 723 | .order = order, |
803 | .migratetype = allocflags_to_migratetype(gfp_mask), | 724 | .migratetype = allocflags_to_migratetype(gfp_mask), |
804 | .zone = zone, | 725 | .zone = zone, |
805 | .mode = mode, | 726 | .sync = sync, |
806 | }; | 727 | }; |
807 | unsigned long rc; | ||
808 | |||
809 | INIT_LIST_HEAD(&cc.freepages); | 728 | INIT_LIST_HEAD(&cc.freepages); |
810 | INIT_LIST_HEAD(&cc.migratepages); | 729 | INIT_LIST_HEAD(&cc.migratepages); |
811 | 730 | ||
812 | rc = compact_zone(zone, &cc); | 731 | return compact_zone(zone, &cc); |
813 | *nr_pageblocks_skipped = cc.nr_pageblocks_skipped; | ||
814 | |||
815 | return rc; | ||
816 | } | 732 | } |
817 | 733 | ||
818 | int sysctl_extfrag_threshold = 500; | 734 | int sysctl_extfrag_threshold = 500; |
@@ -837,8 +753,6 @@ unsigned long try_to_compact_pages(struct zonelist *zonelist, | |||
837 | struct zoneref *z; | 753 | struct zoneref *z; |
838 | struct zone *zone; | 754 | struct zone *zone; |
839 | int rc = COMPACT_SKIPPED; | 755 | int rc = COMPACT_SKIPPED; |
840 | unsigned long nr_pageblocks_skipped; | ||
841 | enum compact_mode mode; | ||
842 | 756 | ||
843 | /* | 757 | /* |
844 | * Check whether it is worth even starting compaction. The order check is | 758 | * Check whether it is worth even starting compaction. The order check is |
@@ -855,22 +769,12 @@ unsigned long try_to_compact_pages(struct zonelist *zonelist, | |||
855 | nodemask) { | 769 | nodemask) { |
856 | int status; | 770 | int status; |
857 | 771 | ||
858 | mode = sync ? COMPACT_SYNC : COMPACT_ASYNC_MOVABLE; | 772 | status = compact_zone_order(zone, order, gfp_mask, sync); |
859 | retry: | ||
860 | status = compact_zone_order(zone, order, gfp_mask, mode, | ||
861 | &nr_pageblocks_skipped); | ||
862 | rc = max(status, rc); | 773 | rc = max(status, rc); |
863 | 774 | ||
864 | /* If a normal allocation would succeed, stop compacting */ | 775 | /* If a normal allocation would succeed, stop compacting */ |
865 | if (zone_watermark_ok(zone, order, low_wmark_pages(zone), 0, 0)) | 776 | if (zone_watermark_ok(zone, order, low_wmark_pages(zone), 0, 0)) |
866 | break; | 777 | break; |
867 | |||
868 | if (rc == COMPACT_COMPLETE && mode == COMPACT_ASYNC_MOVABLE) { | ||
869 | if (nr_pageblocks_skipped) { | ||
870 | mode = COMPACT_ASYNC_UNMOVABLE; | ||
871 | goto retry; | ||
872 | } | ||
873 | } | ||
874 | } | 778 | } |
875 | 779 | ||
876 | return rc; | 780 | return rc; |
@@ -904,7 +808,7 @@ static int __compact_pgdat(pg_data_t *pgdat, struct compact_control *cc) | |||
904 | if (ok && cc->order > zone->compact_order_failed) | 808 | if (ok && cc->order > zone->compact_order_failed) |
905 | zone->compact_order_failed = cc->order + 1; | 809 | zone->compact_order_failed = cc->order + 1; |
906 | /* Currently async compaction is never deferred. */ | 810 | /* Currently async compaction is never deferred. */ |
907 | else if (!ok && cc->mode == COMPACT_SYNC) | 811 | else if (!ok && cc->sync) |
908 | defer_compaction(zone, cc->order); | 812 | defer_compaction(zone, cc->order); |
909 | } | 813 | } |
910 | 814 | ||
@@ -919,7 +823,7 @@ int compact_pgdat(pg_data_t *pgdat, int order) | |||
919 | { | 823 | { |
920 | struct compact_control cc = { | 824 | struct compact_control cc = { |
921 | .order = order, | 825 | .order = order, |
922 | .mode = COMPACT_ASYNC_MOVABLE, | 826 | .sync = false, |
923 | }; | 827 | }; |
924 | 828 | ||
925 | return __compact_pgdat(pgdat, &cc); | 829 | return __compact_pgdat(pgdat, &cc); |
@@ -929,7 +833,7 @@ static int compact_node(int nid) | |||
929 | { | 833 | { |
930 | struct compact_control cc = { | 834 | struct compact_control cc = { |
931 | .order = -1, | 835 | .order = -1, |
932 | .mode = COMPACT_SYNC, | 836 | .sync = true, |
933 | }; | 837 | }; |
934 | 838 | ||
935 | return __compact_pgdat(NODE_DATA(nid), &cc); | 839 | return __compact_pgdat(NODE_DATA(nid), &cc); |
diff --git a/mm/frontswap.c b/mm/frontswap.c new file mode 100644 index 000000000000..e25025574a02 --- /dev/null +++ b/mm/frontswap.c | |||
@@ -0,0 +1,314 @@ | |||
1 | /* | ||
2 | * Frontswap frontend | ||
3 | * | ||
4 | * This code provides the generic "frontend" layer to call a matching | ||
5 | * "backend" driver implementation of frontswap. See | ||
6 | * Documentation/vm/frontswap.txt for more information. | ||
7 | * | ||
8 | * Copyright (C) 2009-2012 Oracle Corp. All rights reserved. | ||
9 | * Author: Dan Magenheimer | ||
10 | * | ||
11 | * This work is licensed under the terms of the GNU GPL, version 2. | ||
12 | */ | ||
13 | |||
14 | #include <linux/mm.h> | ||
15 | #include <linux/mman.h> | ||
16 | #include <linux/swap.h> | ||
17 | #include <linux/swapops.h> | ||
18 | #include <linux/proc_fs.h> | ||
19 | #include <linux/security.h> | ||
20 | #include <linux/capability.h> | ||
21 | #include <linux/module.h> | ||
22 | #include <linux/uaccess.h> | ||
23 | #include <linux/debugfs.h> | ||
24 | #include <linux/frontswap.h> | ||
25 | #include <linux/swapfile.h> | ||
26 | |||
27 | /* | ||
28 | * frontswap_ops is set by frontswap_register_ops to contain the pointers | ||
29 | * to the frontswap "backend" implementation functions. | ||
30 | */ | ||
31 | static struct frontswap_ops frontswap_ops __read_mostly; | ||
32 | |||
33 | /* | ||
34 | * This global enablement flag reduces overhead on systems where frontswap_ops | ||
35 | * has not been registered, so is preferred to the slower alternative: a | ||
36 | * function call that checks a non-global. | ||
37 | */ | ||
38 | bool frontswap_enabled __read_mostly; | ||
39 | EXPORT_SYMBOL(frontswap_enabled); | ||
40 | |||
41 | /* | ||
42 | * If enabled, frontswap_store will return failure even on success. As | ||
43 | * a result, the swap subsystem will always write the page to swap, in | ||
44 | * effect converting frontswap into a writethrough cache. In this mode, | ||
45 | * there is no direct reduction in swap writes, but a frontswap backend | ||
46 | * can unilaterally "reclaim" any pages in use with no data loss, thus | ||
47 | * providing increases control over maximum memory usage due to frontswap. | ||
48 | */ | ||
49 | static bool frontswap_writethrough_enabled __read_mostly; | ||
50 | |||
51 | #ifdef CONFIG_DEBUG_FS | ||
52 | /* | ||
53 | * Counters available via /sys/kernel/debug/frontswap (if debugfs is | ||
54 | * properly configured). These are for information only so are not protected | ||
55 | * against increment races. | ||
56 | */ | ||
57 | static u64 frontswap_loads; | ||
58 | static u64 frontswap_succ_stores; | ||
59 | static u64 frontswap_failed_stores; | ||
60 | static u64 frontswap_invalidates; | ||
61 | |||
62 | static inline void inc_frontswap_loads(void) { | ||
63 | frontswap_loads++; | ||
64 | } | ||
65 | static inline void inc_frontswap_succ_stores(void) { | ||
66 | frontswap_succ_stores++; | ||
67 | } | ||
68 | static inline void inc_frontswap_failed_stores(void) { | ||
69 | frontswap_failed_stores++; | ||
70 | } | ||
71 | static inline void inc_frontswap_invalidates(void) { | ||
72 | frontswap_invalidates++; | ||
73 | } | ||
74 | #else | ||
75 | static inline void inc_frontswap_loads(void) { } | ||
76 | static inline void inc_frontswap_succ_stores(void) { } | ||
77 | static inline void inc_frontswap_failed_stores(void) { } | ||
78 | static inline void inc_frontswap_invalidates(void) { } | ||
79 | #endif | ||
80 | /* | ||
81 | * Register operations for frontswap, returning previous thus allowing | ||
82 | * detection of multiple backends and possible nesting. | ||
83 | */ | ||
84 | struct frontswap_ops frontswap_register_ops(struct frontswap_ops *ops) | ||
85 | { | ||
86 | struct frontswap_ops old = frontswap_ops; | ||
87 | |||
88 | frontswap_ops = *ops; | ||
89 | frontswap_enabled = true; | ||
90 | return old; | ||
91 | } | ||
92 | EXPORT_SYMBOL(frontswap_register_ops); | ||
93 | |||
94 | /* | ||
95 | * Enable/disable frontswap writethrough (see above). | ||
96 | */ | ||
97 | void frontswap_writethrough(bool enable) | ||
98 | { | ||
99 | frontswap_writethrough_enabled = enable; | ||
100 | } | ||
101 | EXPORT_SYMBOL(frontswap_writethrough); | ||
102 | |||
103 | /* | ||
104 | * Called when a swap device is swapon'd. | ||
105 | */ | ||
106 | void __frontswap_init(unsigned type) | ||
107 | { | ||
108 | struct swap_info_struct *sis = swap_info[type]; | ||
109 | |||
110 | BUG_ON(sis == NULL); | ||
111 | if (sis->frontswap_map == NULL) | ||
112 | return; | ||
113 | if (frontswap_enabled) | ||
114 | (*frontswap_ops.init)(type); | ||
115 | } | ||
116 | EXPORT_SYMBOL(__frontswap_init); | ||
117 | |||
118 | /* | ||
119 | * "Store" data from a page to frontswap and associate it with the page's | ||
120 | * swaptype and offset. Page must be locked and in the swap cache. | ||
121 | * If frontswap already contains a page with matching swaptype and | ||
122 | * offset, the frontswap implmentation may either overwrite the data and | ||
123 | * return success or invalidate the page from frontswap and return failure. | ||
124 | */ | ||
125 | int __frontswap_store(struct page *page) | ||
126 | { | ||
127 | int ret = -1, dup = 0; | ||
128 | swp_entry_t entry = { .val = page_private(page), }; | ||
129 | int type = swp_type(entry); | ||
130 | struct swap_info_struct *sis = swap_info[type]; | ||
131 | pgoff_t offset = swp_offset(entry); | ||
132 | |||
133 | BUG_ON(!PageLocked(page)); | ||
134 | BUG_ON(sis == NULL); | ||
135 | if (frontswap_test(sis, offset)) | ||
136 | dup = 1; | ||
137 | ret = (*frontswap_ops.store)(type, offset, page); | ||
138 | if (ret == 0) { | ||
139 | frontswap_set(sis, offset); | ||
140 | inc_frontswap_succ_stores(); | ||
141 | if (!dup) | ||
142 | atomic_inc(&sis->frontswap_pages); | ||
143 | } else if (dup) { | ||
144 | /* | ||
145 | failed dup always results in automatic invalidate of | ||
146 | the (older) page from frontswap | ||
147 | */ | ||
148 | frontswap_clear(sis, offset); | ||
149 | atomic_dec(&sis->frontswap_pages); | ||
150 | inc_frontswap_failed_stores(); | ||
151 | } else | ||
152 | inc_frontswap_failed_stores(); | ||
153 | if (frontswap_writethrough_enabled) | ||
154 | /* report failure so swap also writes to swap device */ | ||
155 | ret = -1; | ||
156 | return ret; | ||
157 | } | ||
158 | EXPORT_SYMBOL(__frontswap_store); | ||
159 | |||
160 | /* | ||
161 | * "Get" data from frontswap associated with swaptype and offset that were | ||
162 | * specified when the data was put to frontswap and use it to fill the | ||
163 | * specified page with data. Page must be locked and in the swap cache. | ||
164 | */ | ||
165 | int __frontswap_load(struct page *page) | ||
166 | { | ||
167 | int ret = -1; | ||
168 | swp_entry_t entry = { .val = page_private(page), }; | ||
169 | int type = swp_type(entry); | ||
170 | struct swap_info_struct *sis = swap_info[type]; | ||
171 | pgoff_t offset = swp_offset(entry); | ||
172 | |||
173 | BUG_ON(!PageLocked(page)); | ||
174 | BUG_ON(sis == NULL); | ||
175 | if (frontswap_test(sis, offset)) | ||
176 | ret = (*frontswap_ops.load)(type, offset, page); | ||
177 | if (ret == 0) | ||
178 | inc_frontswap_loads(); | ||
179 | return ret; | ||
180 | } | ||
181 | EXPORT_SYMBOL(__frontswap_load); | ||
182 | |||
183 | /* | ||
184 | * Invalidate any data from frontswap associated with the specified swaptype | ||
185 | * and offset so that a subsequent "get" will fail. | ||
186 | */ | ||
187 | void __frontswap_invalidate_page(unsigned type, pgoff_t offset) | ||
188 | { | ||
189 | struct swap_info_struct *sis = swap_info[type]; | ||
190 | |||
191 | BUG_ON(sis == NULL); | ||
192 | if (frontswap_test(sis, offset)) { | ||
193 | (*frontswap_ops.invalidate_page)(type, offset); | ||
194 | atomic_dec(&sis->frontswap_pages); | ||
195 | frontswap_clear(sis, offset); | ||
196 | inc_frontswap_invalidates(); | ||
197 | } | ||
198 | } | ||
199 | EXPORT_SYMBOL(__frontswap_invalidate_page); | ||
200 | |||
201 | /* | ||
202 | * Invalidate all data from frontswap associated with all offsets for the | ||
203 | * specified swaptype. | ||
204 | */ | ||
205 | void __frontswap_invalidate_area(unsigned type) | ||
206 | { | ||
207 | struct swap_info_struct *sis = swap_info[type]; | ||
208 | |||
209 | BUG_ON(sis == NULL); | ||
210 | if (sis->frontswap_map == NULL) | ||
211 | return; | ||
212 | (*frontswap_ops.invalidate_area)(type); | ||
213 | atomic_set(&sis->frontswap_pages, 0); | ||
214 | memset(sis->frontswap_map, 0, sis->max / sizeof(long)); | ||
215 | } | ||
216 | EXPORT_SYMBOL(__frontswap_invalidate_area); | ||
217 | |||
218 | /* | ||
219 | * Frontswap, like a true swap device, may unnecessarily retain pages | ||
220 | * under certain circumstances; "shrink" frontswap is essentially a | ||
221 | * "partial swapoff" and works by calling try_to_unuse to attempt to | ||
222 | * unuse enough frontswap pages to attempt to -- subject to memory | ||
223 | * constraints -- reduce the number of pages in frontswap to the | ||
224 | * number given in the parameter target_pages. | ||
225 | */ | ||
226 | void frontswap_shrink(unsigned long target_pages) | ||
227 | { | ||
228 | struct swap_info_struct *si = NULL; | ||
229 | int si_frontswap_pages; | ||
230 | unsigned long total_pages = 0, total_pages_to_unuse; | ||
231 | unsigned long pages = 0, pages_to_unuse = 0; | ||
232 | int type; | ||
233 | bool locked = false; | ||
234 | |||
235 | /* | ||
236 | * we don't want to hold swap_lock while doing a very | ||
237 | * lengthy try_to_unuse, but swap_list may change | ||
238 | * so restart scan from swap_list.head each time | ||
239 | */ | ||
240 | spin_lock(&swap_lock); | ||
241 | locked = true; | ||
242 | total_pages = 0; | ||
243 | for (type = swap_list.head; type >= 0; type = si->next) { | ||
244 | si = swap_info[type]; | ||
245 | total_pages += atomic_read(&si->frontswap_pages); | ||
246 | } | ||
247 | if (total_pages <= target_pages) | ||
248 | goto out; | ||
249 | total_pages_to_unuse = total_pages - target_pages; | ||
250 | for (type = swap_list.head; type >= 0; type = si->next) { | ||
251 | si = swap_info[type]; | ||
252 | si_frontswap_pages = atomic_read(&si->frontswap_pages); | ||
253 | if (total_pages_to_unuse < si_frontswap_pages) | ||
254 | pages = pages_to_unuse = total_pages_to_unuse; | ||
255 | else { | ||
256 | pages = si_frontswap_pages; | ||
257 | pages_to_unuse = 0; /* unuse all */ | ||
258 | } | ||
259 | /* ensure there is enough RAM to fetch pages from frontswap */ | ||
260 | if (security_vm_enough_memory_mm(current->mm, pages)) | ||
261 | continue; | ||
262 | vm_unacct_memory(pages); | ||
263 | break; | ||
264 | } | ||
265 | if (type < 0) | ||
266 | goto out; | ||
267 | locked = false; | ||
268 | spin_unlock(&swap_lock); | ||
269 | try_to_unuse(type, true, pages_to_unuse); | ||
270 | out: | ||
271 | if (locked) | ||
272 | spin_unlock(&swap_lock); | ||
273 | return; | ||
274 | } | ||
275 | EXPORT_SYMBOL(frontswap_shrink); | ||
276 | |||
277 | /* | ||
278 | * Count and return the number of frontswap pages across all | ||
279 | * swap devices. This is exported so that backend drivers can | ||
280 | * determine current usage without reading debugfs. | ||
281 | */ | ||
282 | unsigned long frontswap_curr_pages(void) | ||
283 | { | ||
284 | int type; | ||
285 | unsigned long totalpages = 0; | ||
286 | struct swap_info_struct *si = NULL; | ||
287 | |||
288 | spin_lock(&swap_lock); | ||
289 | for (type = swap_list.head; type >= 0; type = si->next) { | ||
290 | si = swap_info[type]; | ||
291 | totalpages += atomic_read(&si->frontswap_pages); | ||
292 | } | ||
293 | spin_unlock(&swap_lock); | ||
294 | return totalpages; | ||
295 | } | ||
296 | EXPORT_SYMBOL(frontswap_curr_pages); | ||
297 | |||
298 | static int __init init_frontswap(void) | ||
299 | { | ||
300 | #ifdef CONFIG_DEBUG_FS | ||
301 | struct dentry *root = debugfs_create_dir("frontswap", NULL); | ||
302 | if (root == NULL) | ||
303 | return -ENXIO; | ||
304 | debugfs_create_u64("loads", S_IRUGO, root, &frontswap_loads); | ||
305 | debugfs_create_u64("succ_stores", S_IRUGO, root, &frontswap_succ_stores); | ||
306 | debugfs_create_u64("failed_stores", S_IRUGO, root, | ||
307 | &frontswap_failed_stores); | ||
308 | debugfs_create_u64("invalidates", S_IRUGO, | ||
309 | root, &frontswap_invalidates); | ||
310 | #endif | ||
311 | return 0; | ||
312 | } | ||
313 | |||
314 | module_init(init_frontswap); | ||
diff --git a/mm/internal.h b/mm/internal.h index 5cbb78190041..2ba87fbfb75b 100644 --- a/mm/internal.h +++ b/mm/internal.h | |||
@@ -94,9 +94,6 @@ extern void putback_lru_page(struct page *page); | |||
94 | /* | 94 | /* |
95 | * in mm/page_alloc.c | 95 | * in mm/page_alloc.c |
96 | */ | 96 | */ |
97 | extern void set_pageblock_migratetype(struct page *page, int migratetype); | ||
98 | extern int move_freepages_block(struct zone *zone, struct page *page, | ||
99 | int migratetype); | ||
100 | extern void __free_pages_bootmem(struct page *page, unsigned int order); | 97 | extern void __free_pages_bootmem(struct page *page, unsigned int order); |
101 | extern void prep_compound_page(struct page *page, unsigned long order); | 98 | extern void prep_compound_page(struct page *page, unsigned long order); |
102 | #ifdef CONFIG_MEMORY_FAILURE | 99 | #ifdef CONFIG_MEMORY_FAILURE |
@@ -104,7 +101,6 @@ extern bool is_free_buddy_page(struct page *page); | |||
104 | #endif | 101 | #endif |
105 | 102 | ||
106 | #if defined CONFIG_COMPACTION || defined CONFIG_CMA | 103 | #if defined CONFIG_COMPACTION || defined CONFIG_CMA |
107 | #include <linux/compaction.h> | ||
108 | 104 | ||
109 | /* | 105 | /* |
110 | * in mm/compaction.c | 106 | * in mm/compaction.c |
@@ -123,14 +119,11 @@ struct compact_control { | |||
123 | unsigned long nr_migratepages; /* Number of pages to migrate */ | 119 | unsigned long nr_migratepages; /* Number of pages to migrate */ |
124 | unsigned long free_pfn; /* isolate_freepages search base */ | 120 | unsigned long free_pfn; /* isolate_freepages search base */ |
125 | unsigned long migrate_pfn; /* isolate_migratepages search base */ | 121 | unsigned long migrate_pfn; /* isolate_migratepages search base */ |
126 | enum compact_mode mode; /* Compaction mode */ | 122 | bool sync; /* Synchronous migration */ |
127 | 123 | ||
128 | int order; /* order a direct compactor needs */ | 124 | int order; /* order a direct compactor needs */ |
129 | int migratetype; /* MOVABLE, RECLAIMABLE etc */ | 125 | int migratetype; /* MOVABLE, RECLAIMABLE etc */ |
130 | struct zone *zone; | 126 | struct zone *zone; |
131 | |||
132 | /* Number of UNMOVABLE destination pageblocks skipped during scan */ | ||
133 | unsigned long nr_pageblocks_skipped; | ||
134 | }; | 127 | }; |
135 | 128 | ||
136 | unsigned long | 129 | unsigned long |
diff --git a/mm/memblock.c b/mm/memblock.c index 952123eba433..d4382095f8bd 100644 --- a/mm/memblock.c +++ b/mm/memblock.c | |||
@@ -184,7 +184,24 @@ static void __init_memblock memblock_remove_region(struct memblock_type *type, u | |||
184 | } | 184 | } |
185 | } | 185 | } |
186 | 186 | ||
187 | static int __init_memblock memblock_double_array(struct memblock_type *type) | 187 | /** |
188 | * memblock_double_array - double the size of the memblock regions array | ||
189 | * @type: memblock type of the regions array being doubled | ||
190 | * @new_area_start: starting address of memory range to avoid overlap with | ||
191 | * @new_area_size: size of memory range to avoid overlap with | ||
192 | * | ||
193 | * Double the size of the @type regions array. If memblock is being used to | ||
194 | * allocate memory for a new reserved regions array and there is a previously | ||
195 | * allocated memory range [@new_area_start,@new_area_start+@new_area_size] | ||
196 | * waiting to be reserved, ensure the memory used by the new array does | ||
197 | * not overlap. | ||
198 | * | ||
199 | * RETURNS: | ||
200 | * 0 on success, -1 on failure. | ||
201 | */ | ||
202 | static int __init_memblock memblock_double_array(struct memblock_type *type, | ||
203 | phys_addr_t new_area_start, | ||
204 | phys_addr_t new_area_size) | ||
188 | { | 205 | { |
189 | struct memblock_region *new_array, *old_array; | 206 | struct memblock_region *new_array, *old_array; |
190 | phys_addr_t old_size, new_size, addr; | 207 | phys_addr_t old_size, new_size, addr; |
@@ -222,7 +239,18 @@ static int __init_memblock memblock_double_array(struct memblock_type *type) | |||
222 | new_array = kmalloc(new_size, GFP_KERNEL); | 239 | new_array = kmalloc(new_size, GFP_KERNEL); |
223 | addr = new_array ? __pa(new_array) : 0; | 240 | addr = new_array ? __pa(new_array) : 0; |
224 | } else { | 241 | } else { |
225 | addr = memblock_find_in_range(0, MEMBLOCK_ALLOC_ACCESSIBLE, new_size, sizeof(phys_addr_t)); | 242 | /* only exclude range when trying to double reserved.regions */ |
243 | if (type != &memblock.reserved) | ||
244 | new_area_start = new_area_size = 0; | ||
245 | |||
246 | addr = memblock_find_in_range(new_area_start + new_area_size, | ||
247 | memblock.current_limit, | ||
248 | new_size, sizeof(phys_addr_t)); | ||
249 | if (!addr && new_area_size) | ||
250 | addr = memblock_find_in_range(0, | ||
251 | min(new_area_start, memblock.current_limit), | ||
252 | new_size, sizeof(phys_addr_t)); | ||
253 | |||
226 | new_array = addr ? __va(addr) : 0; | 254 | new_array = addr ? __va(addr) : 0; |
227 | } | 255 | } |
228 | if (!addr) { | 256 | if (!addr) { |
@@ -399,7 +427,7 @@ repeat: | |||
399 | */ | 427 | */ |
400 | if (!insert) { | 428 | if (!insert) { |
401 | while (type->cnt + nr_new > type->max) | 429 | while (type->cnt + nr_new > type->max) |
402 | if (memblock_double_array(type) < 0) | 430 | if (memblock_double_array(type, obase, size) < 0) |
403 | return -ENOMEM; | 431 | return -ENOMEM; |
404 | insert = true; | 432 | insert = true; |
405 | goto repeat; | 433 | goto repeat; |
@@ -450,7 +478,7 @@ static int __init_memblock memblock_isolate_range(struct memblock_type *type, | |||
450 | 478 | ||
451 | /* we'll create at most two more regions */ | 479 | /* we'll create at most two more regions */ |
452 | while (type->cnt + 2 > type->max) | 480 | while (type->cnt + 2 > type->max) |
453 | if (memblock_double_array(type) < 0) | 481 | if (memblock_double_array(type, base, size) < 0) |
454 | return -ENOMEM; | 482 | return -ENOMEM; |
455 | 483 | ||
456 | for (i = 0; i < type->cnt; i++) { | 484 | for (i = 0; i < type->cnt; i++) { |
@@ -540,9 +568,9 @@ int __init_memblock memblock_reserve(phys_addr_t base, phys_addr_t size) | |||
540 | * __next_free_mem_range - next function for for_each_free_mem_range() | 568 | * __next_free_mem_range - next function for for_each_free_mem_range() |
541 | * @idx: pointer to u64 loop variable | 569 | * @idx: pointer to u64 loop variable |
542 | * @nid: nid: node selector, %MAX_NUMNODES for all nodes | 570 | * @nid: nid: node selector, %MAX_NUMNODES for all nodes |
543 | * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL | 571 | * @out_start: ptr to phys_addr_t for start address of the range, can be %NULL |
544 | * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL | 572 | * @out_end: ptr to phys_addr_t for end address of the range, can be %NULL |
545 | * @p_nid: ptr to int for nid of the range, can be %NULL | 573 | * @out_nid: ptr to int for nid of the range, can be %NULL |
546 | * | 574 | * |
547 | * Find the first free area from *@idx which matches @nid, fill the out | 575 | * Find the first free area from *@idx which matches @nid, fill the out |
548 | * parameters, and update *@idx for the next iteration. The lower 32bit of | 576 | * parameters, and update *@idx for the next iteration. The lower 32bit of |
@@ -616,9 +644,9 @@ void __init_memblock __next_free_mem_range(u64 *idx, int nid, | |||
616 | * __next_free_mem_range_rev - next function for for_each_free_mem_range_reverse() | 644 | * __next_free_mem_range_rev - next function for for_each_free_mem_range_reverse() |
617 | * @idx: pointer to u64 loop variable | 645 | * @idx: pointer to u64 loop variable |
618 | * @nid: nid: node selector, %MAX_NUMNODES for all nodes | 646 | * @nid: nid: node selector, %MAX_NUMNODES for all nodes |
619 | * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL | 647 | * @out_start: ptr to phys_addr_t for start address of the range, can be %NULL |
620 | * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL | 648 | * @out_end: ptr to phys_addr_t for end address of the range, can be %NULL |
621 | * @p_nid: ptr to int for nid of the range, can be %NULL | 649 | * @out_nid: ptr to int for nid of the range, can be %NULL |
622 | * | 650 | * |
623 | * Reverse of __next_free_mem_range(). | 651 | * Reverse of __next_free_mem_range(). |
624 | */ | 652 | */ |
@@ -867,6 +895,16 @@ int __init_memblock memblock_is_memory(phys_addr_t addr) | |||
867 | return memblock_search(&memblock.memory, addr) != -1; | 895 | return memblock_search(&memblock.memory, addr) != -1; |
868 | } | 896 | } |
869 | 897 | ||
898 | /** | ||
899 | * memblock_is_region_memory - check if a region is a subset of memory | ||
900 | * @base: base of region to check | ||
901 | * @size: size of region to check | ||
902 | * | ||
903 | * Check if the region [@base, @base+@size) is a subset of a memory block. | ||
904 | * | ||
905 | * RETURNS: | ||
906 | * 0 if false, non-zero if true | ||
907 | */ | ||
870 | int __init_memblock memblock_is_region_memory(phys_addr_t base, phys_addr_t size) | 908 | int __init_memblock memblock_is_region_memory(phys_addr_t base, phys_addr_t size) |
871 | { | 909 | { |
872 | int idx = memblock_search(&memblock.memory, base); | 910 | int idx = memblock_search(&memblock.memory, base); |
@@ -879,6 +917,16 @@ int __init_memblock memblock_is_region_memory(phys_addr_t base, phys_addr_t size | |||
879 | memblock.memory.regions[idx].size) >= end; | 917 | memblock.memory.regions[idx].size) >= end; |
880 | } | 918 | } |
881 | 919 | ||
920 | /** | ||
921 | * memblock_is_region_reserved - check if a region intersects reserved memory | ||
922 | * @base: base of region to check | ||
923 | * @size: size of region to check | ||
924 | * | ||
925 | * Check if the region [@base, @base+@size) intersects a reserved memory block. | ||
926 | * | ||
927 | * RETURNS: | ||
928 | * 0 if false, non-zero if true | ||
929 | */ | ||
882 | int __init_memblock memblock_is_region_reserved(phys_addr_t base, phys_addr_t size) | 930 | int __init_memblock memblock_is_region_reserved(phys_addr_t base, phys_addr_t size) |
883 | { | 931 | { |
884 | memblock_cap_size(base, &size); | 932 | memblock_cap_size(base, &size); |
diff --git a/mm/memcontrol.c b/mm/memcontrol.c index ac35bccadb7b..f72b5e52451a 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c | |||
@@ -1148,7 +1148,7 @@ bool __mem_cgroup_same_or_subtree(const struct mem_cgroup *root_memcg, | |||
1148 | { | 1148 | { |
1149 | if (root_memcg == memcg) | 1149 | if (root_memcg == memcg) |
1150 | return true; | 1150 | return true; |
1151 | if (!root_memcg->use_hierarchy) | 1151 | if (!root_memcg->use_hierarchy || !memcg) |
1152 | return false; | 1152 | return false; |
1153 | return css_is_ancestor(&memcg->css, &root_memcg->css); | 1153 | return css_is_ancestor(&memcg->css, &root_memcg->css); |
1154 | } | 1154 | } |
@@ -1234,7 +1234,7 @@ int mem_cgroup_inactive_file_is_low(struct lruvec *lruvec) | |||
1234 | 1234 | ||
1235 | /** | 1235 | /** |
1236 | * mem_cgroup_margin - calculate chargeable space of a memory cgroup | 1236 | * mem_cgroup_margin - calculate chargeable space of a memory cgroup |
1237 | * @mem: the memory cgroup | 1237 | * @memcg: the memory cgroup |
1238 | * | 1238 | * |
1239 | * Returns the maximum amount of memory @mem can be charged with, in | 1239 | * Returns the maximum amount of memory @mem can be charged with, in |
1240 | * pages. | 1240 | * pages. |
@@ -1508,7 +1508,7 @@ static unsigned long mem_cgroup_reclaim(struct mem_cgroup *memcg, | |||
1508 | 1508 | ||
1509 | /** | 1509 | /** |
1510 | * test_mem_cgroup_node_reclaimable | 1510 | * test_mem_cgroup_node_reclaimable |
1511 | * @mem: the target memcg | 1511 | * @memcg: the target memcg |
1512 | * @nid: the node ID to be checked. | 1512 | * @nid: the node ID to be checked. |
1513 | * @noswap : specify true here if the user wants flle only information. | 1513 | * @noswap : specify true here if the user wants flle only information. |
1514 | * | 1514 | * |
diff --git a/mm/memory.c b/mm/memory.c index 1b7dc662bf9f..2466d1250231 100644 --- a/mm/memory.c +++ b/mm/memory.c | |||
@@ -1225,7 +1225,15 @@ static inline unsigned long zap_pmd_range(struct mmu_gather *tlb, | |||
1225 | next = pmd_addr_end(addr, end); | 1225 | next = pmd_addr_end(addr, end); |
1226 | if (pmd_trans_huge(*pmd)) { | 1226 | if (pmd_trans_huge(*pmd)) { |
1227 | if (next - addr != HPAGE_PMD_SIZE) { | 1227 | if (next - addr != HPAGE_PMD_SIZE) { |
1228 | VM_BUG_ON(!rwsem_is_locked(&tlb->mm->mmap_sem)); | 1228 | #ifdef CONFIG_DEBUG_VM |
1229 | if (!rwsem_is_locked(&tlb->mm->mmap_sem)) { | ||
1230 | pr_err("%s: mmap_sem is unlocked! addr=0x%lx end=0x%lx vma->vm_start=0x%lx vma->vm_end=0x%lx\n", | ||
1231 | __func__, addr, end, | ||
1232 | vma->vm_start, | ||
1233 | vma->vm_end); | ||
1234 | BUG(); | ||
1235 | } | ||
1236 | #endif | ||
1229 | split_huge_page_pmd(vma->vm_mm, pmd); | 1237 | split_huge_page_pmd(vma->vm_mm, pmd); |
1230 | } else if (zap_huge_pmd(tlb, vma, pmd, addr)) | 1238 | } else if (zap_huge_pmd(tlb, vma, pmd, addr)) |
1231 | goto next; | 1239 | goto next; |
@@ -1366,7 +1374,7 @@ void unmap_vmas(struct mmu_gather *tlb, | |||
1366 | /** | 1374 | /** |
1367 | * zap_page_range - remove user pages in a given range | 1375 | * zap_page_range - remove user pages in a given range |
1368 | * @vma: vm_area_struct holding the applicable pages | 1376 | * @vma: vm_area_struct holding the applicable pages |
1369 | * @address: starting address of pages to zap | 1377 | * @start: starting address of pages to zap |
1370 | * @size: number of bytes to zap | 1378 | * @size: number of bytes to zap |
1371 | * @details: details of nonlinear truncation or shared cache invalidation | 1379 | * @details: details of nonlinear truncation or shared cache invalidation |
1372 | * | 1380 | * |
diff --git a/mm/mempolicy.c b/mm/mempolicy.c index f15c1b24ca18..1d771e4200d2 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c | |||
@@ -1177,7 +1177,7 @@ static long do_mbind(unsigned long start, unsigned long len, | |||
1177 | if (!list_empty(&pagelist)) { | 1177 | if (!list_empty(&pagelist)) { |
1178 | nr_failed = migrate_pages(&pagelist, new_vma_page, | 1178 | nr_failed = migrate_pages(&pagelist, new_vma_page, |
1179 | (unsigned long)vma, | 1179 | (unsigned long)vma, |
1180 | false, true); | 1180 | false, MIGRATE_SYNC); |
1181 | if (nr_failed) | 1181 | if (nr_failed) |
1182 | putback_lru_pages(&pagelist); | 1182 | putback_lru_pages(&pagelist); |
1183 | } | 1183 | } |
diff --git a/mm/migrate.c b/mm/migrate.c index ab81d482ae6f..be26d5cbe56b 100644 --- a/mm/migrate.c +++ b/mm/migrate.c | |||
@@ -436,7 +436,10 @@ void migrate_page_copy(struct page *newpage, struct page *page) | |||
436 | * is actually a signal that all of the page has become dirty. | 436 | * is actually a signal that all of the page has become dirty. |
437 | * Whereas only part of our page may be dirty. | 437 | * Whereas only part of our page may be dirty. |
438 | */ | 438 | */ |
439 | __set_page_dirty_nobuffers(newpage); | 439 | if (PageSwapBacked(page)) |
440 | SetPageDirty(newpage); | ||
441 | else | ||
442 | __set_page_dirty_nobuffers(newpage); | ||
440 | } | 443 | } |
441 | 444 | ||
442 | mlock_migrate_page(newpage, page); | 445 | mlock_migrate_page(newpage, page); |
diff --git a/mm/nommu.c b/mm/nommu.c index c4acfbc09972..d4b0c10872de 100644 --- a/mm/nommu.c +++ b/mm/nommu.c | |||
@@ -1486,7 +1486,7 @@ SYSCALL_DEFINE6(mmap_pgoff, unsigned long, addr, unsigned long, len, | |||
1486 | 1486 | ||
1487 | flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE); | 1487 | flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE); |
1488 | 1488 | ||
1489 | ret = vm_mmap_pgoff(file, addr, len, prot, flags, pgoff); | 1489 | retval = vm_mmap_pgoff(file, addr, len, prot, flags, pgoff); |
1490 | 1490 | ||
1491 | if (file) | 1491 | if (file) |
1492 | fput(file); | 1492 | fput(file); |
diff --git a/mm/oom_kill.c b/mm/oom_kill.c index ed0e19677360..ac300c99baf6 100644 --- a/mm/oom_kill.c +++ b/mm/oom_kill.c | |||
@@ -183,7 +183,8 @@ static bool oom_unkillable_task(struct task_struct *p, | |||
183 | unsigned long oom_badness(struct task_struct *p, struct mem_cgroup *memcg, | 183 | unsigned long oom_badness(struct task_struct *p, struct mem_cgroup *memcg, |
184 | const nodemask_t *nodemask, unsigned long totalpages) | 184 | const nodemask_t *nodemask, unsigned long totalpages) |
185 | { | 185 | { |
186 | unsigned long points; | 186 | long points; |
187 | long adj; | ||
187 | 188 | ||
188 | if (oom_unkillable_task(p, memcg, nodemask)) | 189 | if (oom_unkillable_task(p, memcg, nodemask)) |
189 | return 0; | 190 | return 0; |
@@ -192,7 +193,8 @@ unsigned long oom_badness(struct task_struct *p, struct mem_cgroup *memcg, | |||
192 | if (!p) | 193 | if (!p) |
193 | return 0; | 194 | return 0; |
194 | 195 | ||
195 | if (p->signal->oom_score_adj == OOM_SCORE_ADJ_MIN) { | 196 | adj = p->signal->oom_score_adj; |
197 | if (adj == OOM_SCORE_ADJ_MIN) { | ||
196 | task_unlock(p); | 198 | task_unlock(p); |
197 | return 0; | 199 | return 0; |
198 | } | 200 | } |
@@ -210,20 +212,17 @@ unsigned long oom_badness(struct task_struct *p, struct mem_cgroup *memcg, | |||
210 | * implementation used by LSMs. | 212 | * implementation used by LSMs. |
211 | */ | 213 | */ |
212 | if (has_capability_noaudit(p, CAP_SYS_ADMIN)) | 214 | if (has_capability_noaudit(p, CAP_SYS_ADMIN)) |
213 | points -= 30 * totalpages / 1000; | 215 | adj -= 30; |
214 | 216 | ||
215 | /* | 217 | /* Normalize to oom_score_adj units */ |
216 | * /proc/pid/oom_score_adj ranges from -1000 to +1000 such that it may | 218 | adj *= totalpages / 1000; |
217 | * either completely disable oom killing or always prefer a certain | 219 | points += adj; |
218 | * task. | ||
219 | */ | ||
220 | points += p->signal->oom_score_adj * totalpages / 1000; | ||
221 | 220 | ||
222 | /* | 221 | /* |
223 | * Never return 0 for an eligible task regardless of the root bonus and | 222 | * Never return 0 for an eligible task regardless of the root bonus and |
224 | * oom_score_adj (oom_score_adj can't be OOM_SCORE_ADJ_MIN here). | 223 | * oom_score_adj (oom_score_adj can't be OOM_SCORE_ADJ_MIN here). |
225 | */ | 224 | */ |
226 | return points ? points : 1; | 225 | return points > 0 ? points : 1; |
227 | } | 226 | } |
228 | 227 | ||
229 | /* | 228 | /* |
@@ -366,7 +365,7 @@ static struct task_struct *select_bad_process(unsigned int *ppoints, | |||
366 | 365 | ||
367 | /** | 366 | /** |
368 | * dump_tasks - dump current memory state of all system tasks | 367 | * dump_tasks - dump current memory state of all system tasks |
369 | * @mem: current's memory controller, if constrained | 368 | * @memcg: current's memory controller, if constrained |
370 | * @nodemask: nodemask passed to page allocator for mempolicy ooms | 369 | * @nodemask: nodemask passed to page allocator for mempolicy ooms |
371 | * | 370 | * |
372 | * Dumps the current memory state of all eligible tasks. Tasks not in the same | 371 | * Dumps the current memory state of all eligible tasks. Tasks not in the same |
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 6092f331b32e..44030096da63 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
@@ -219,7 +219,7 @@ EXPORT_SYMBOL(nr_online_nodes); | |||
219 | 219 | ||
220 | int page_group_by_mobility_disabled __read_mostly; | 220 | int page_group_by_mobility_disabled __read_mostly; |
221 | 221 | ||
222 | void set_pageblock_migratetype(struct page *page, int migratetype) | 222 | static void set_pageblock_migratetype(struct page *page, int migratetype) |
223 | { | 223 | { |
224 | 224 | ||
225 | if (unlikely(page_group_by_mobility_disabled)) | 225 | if (unlikely(page_group_by_mobility_disabled)) |
@@ -954,8 +954,8 @@ static int move_freepages(struct zone *zone, | |||
954 | return pages_moved; | 954 | return pages_moved; |
955 | } | 955 | } |
956 | 956 | ||
957 | int move_freepages_block(struct zone *zone, struct page *page, | 957 | static int move_freepages_block(struct zone *zone, struct page *page, |
958 | int migratetype) | 958 | int migratetype) |
959 | { | 959 | { |
960 | unsigned long start_pfn, end_pfn; | 960 | unsigned long start_pfn, end_pfn; |
961 | struct page *start_page, *end_page; | 961 | struct page *start_page, *end_page; |
@@ -5651,7 +5651,7 @@ static int __alloc_contig_migrate_range(unsigned long start, unsigned long end) | |||
5651 | .nr_migratepages = 0, | 5651 | .nr_migratepages = 0, |
5652 | .order = -1, | 5652 | .order = -1, |
5653 | .zone = page_zone(pfn_to_page(start)), | 5653 | .zone = page_zone(pfn_to_page(start)), |
5654 | .mode = COMPACT_SYNC, | 5654 | .sync = true, |
5655 | }; | 5655 | }; |
5656 | INIT_LIST_HEAD(&cc.migratepages); | 5656 | INIT_LIST_HEAD(&cc.migratepages); |
5657 | 5657 | ||
diff --git a/mm/page_cgroup.c b/mm/page_cgroup.c index 1ccbd714059c..eb750f851395 100644 --- a/mm/page_cgroup.c +++ b/mm/page_cgroup.c | |||
@@ -392,7 +392,7 @@ static struct swap_cgroup *lookup_swap_cgroup(swp_entry_t ent, | |||
392 | 392 | ||
393 | /** | 393 | /** |
394 | * swap_cgroup_cmpxchg - cmpxchg mem_cgroup's id for this swp_entry. | 394 | * swap_cgroup_cmpxchg - cmpxchg mem_cgroup's id for this swp_entry. |
395 | * @end: swap entry to be cmpxchged | 395 | * @ent: swap entry to be cmpxchged |
396 | * @old: old id | 396 | * @old: old id |
397 | * @new: new id | 397 | * @new: new id |
398 | * | 398 | * |
@@ -422,7 +422,7 @@ unsigned short swap_cgroup_cmpxchg(swp_entry_t ent, | |||
422 | /** | 422 | /** |
423 | * swap_cgroup_record - record mem_cgroup for this swp_entry. | 423 | * swap_cgroup_record - record mem_cgroup for this swp_entry. |
424 | * @ent: swap entry to be recorded into | 424 | * @ent: swap entry to be recorded into |
425 | * @mem: mem_cgroup to be recorded | 425 | * @id: mem_cgroup to be recorded |
426 | * | 426 | * |
427 | * Returns old value at success, 0 at failure. | 427 | * Returns old value at success, 0 at failure. |
428 | * (Of course, old value can be 0.) | 428 | * (Of course, old value can be 0.) |
diff --git a/mm/page_io.c b/mm/page_io.c index dc76b4d0611e..34f02923744c 100644 --- a/mm/page_io.c +++ b/mm/page_io.c | |||
@@ -18,6 +18,7 @@ | |||
18 | #include <linux/bio.h> | 18 | #include <linux/bio.h> |
19 | #include <linux/swapops.h> | 19 | #include <linux/swapops.h> |
20 | #include <linux/writeback.h> | 20 | #include <linux/writeback.h> |
21 | #include <linux/frontswap.h> | ||
21 | #include <asm/pgtable.h> | 22 | #include <asm/pgtable.h> |
22 | 23 | ||
23 | static struct bio *get_swap_bio(gfp_t gfp_flags, | 24 | static struct bio *get_swap_bio(gfp_t gfp_flags, |
@@ -98,6 +99,12 @@ int swap_writepage(struct page *page, struct writeback_control *wbc) | |||
98 | unlock_page(page); | 99 | unlock_page(page); |
99 | goto out; | 100 | goto out; |
100 | } | 101 | } |
102 | if (frontswap_store(page) == 0) { | ||
103 | set_page_writeback(page); | ||
104 | unlock_page(page); | ||
105 | end_page_writeback(page); | ||
106 | goto out; | ||
107 | } | ||
101 | bio = get_swap_bio(GFP_NOIO, page, end_swap_bio_write); | 108 | bio = get_swap_bio(GFP_NOIO, page, end_swap_bio_write); |
102 | if (bio == NULL) { | 109 | if (bio == NULL) { |
103 | set_page_dirty(page); | 110 | set_page_dirty(page); |
@@ -122,6 +129,11 @@ int swap_readpage(struct page *page) | |||
122 | 129 | ||
123 | VM_BUG_ON(!PageLocked(page)); | 130 | VM_BUG_ON(!PageLocked(page)); |
124 | VM_BUG_ON(PageUptodate(page)); | 131 | VM_BUG_ON(PageUptodate(page)); |
132 | if (frontswap_load(page) == 0) { | ||
133 | SetPageUptodate(page); | ||
134 | unlock_page(page); | ||
135 | goto out; | ||
136 | } | ||
125 | bio = get_swap_bio(GFP_KERNEL, page, end_swap_bio_read); | 137 | bio = get_swap_bio(GFP_KERNEL, page, end_swap_bio_read); |
126 | if (bio == NULL) { | 138 | if (bio == NULL) { |
127 | unlock_page(page); | 139 | unlock_page(page); |
diff --git a/mm/pagewalk.c b/mm/pagewalk.c index aa9701e12714..6c118d012bb5 100644 --- a/mm/pagewalk.c +++ b/mm/pagewalk.c | |||
@@ -162,7 +162,6 @@ static int walk_hugetlb_range(struct vm_area_struct *vma, | |||
162 | 162 | ||
163 | /** | 163 | /** |
164 | * walk_page_range - walk a memory map's page tables with a callback | 164 | * walk_page_range - walk a memory map's page tables with a callback |
165 | * @mm: memory map to walk | ||
166 | * @addr: starting address | 165 | * @addr: starting address |
167 | * @end: ending address | 166 | * @end: ending address |
168 | * @walk: set of callbacks to invoke for each level of the tree | 167 | * @walk: set of callbacks to invoke for each level of the tree |
diff --git a/mm/percpu-vm.c b/mm/percpu-vm.c index 405d331804c3..3707c71ae4cd 100644 --- a/mm/percpu-vm.c +++ b/mm/percpu-vm.c | |||
@@ -360,7 +360,6 @@ err_free: | |||
360 | * @chunk: chunk to depopulate | 360 | * @chunk: chunk to depopulate |
361 | * @off: offset to the area to depopulate | 361 | * @off: offset to the area to depopulate |
362 | * @size: size of the area to depopulate in bytes | 362 | * @size: size of the area to depopulate in bytes |
363 | * @flush: whether to flush cache and tlb or not | ||
364 | * | 363 | * |
365 | * For each cpu, depopulate and unmap pages [@page_start,@page_end) | 364 | * For each cpu, depopulate and unmap pages [@page_start,@page_end) |
366 | * from @chunk. If @flush is true, vcache is flushed before unmapping | 365 | * from @chunk. If @flush is true, vcache is flushed before unmapping |
diff --git a/mm/shmem.c b/mm/shmem.c index 585bd220a21e..a15a466d0d1d 100644 --- a/mm/shmem.c +++ b/mm/shmem.c | |||
@@ -683,10 +683,21 @@ static int shmem_unuse_inode(struct shmem_inode_info *info, | |||
683 | mutex_lock(&shmem_swaplist_mutex); | 683 | mutex_lock(&shmem_swaplist_mutex); |
684 | /* | 684 | /* |
685 | * We needed to drop mutex to make that restrictive page | 685 | * We needed to drop mutex to make that restrictive page |
686 | * allocation; but the inode might already be freed by now, | 686 | * allocation, but the inode might have been freed while we |
687 | * and we cannot refer to inode or mapping or info to check. | 687 | * dropped it: although a racing shmem_evict_inode() cannot |
688 | * However, we do hold page lock on the PageSwapCache page, | 688 | * complete without emptying the radix_tree, our page lock |
689 | * so can check if that still has our reference remaining. | 689 | * on this swapcache page is not enough to prevent that - |
690 | * free_swap_and_cache() of our swap entry will only | ||
691 | * trylock_page(), removing swap from radix_tree whatever. | ||
692 | * | ||
693 | * We must not proceed to shmem_add_to_page_cache() if the | ||
694 | * inode has been freed, but of course we cannot rely on | ||
695 | * inode or mapping or info to check that. However, we can | ||
696 | * safely check if our swap entry is still in use (and here | ||
697 | * it can't have got reused for another page): if it's still | ||
698 | * in use, then the inode cannot have been freed yet, and we | ||
699 | * can safely proceed (if it's no longer in use, that tells | ||
700 | * nothing about the inode, but we don't need to unuse swap). | ||
690 | */ | 701 | */ |
691 | if (!page_swapcount(*pagep)) | 702 | if (!page_swapcount(*pagep)) |
692 | error = -ENOENT; | 703 | error = -ENOENT; |
@@ -730,9 +741,9 @@ int shmem_unuse(swp_entry_t swap, struct page *page) | |||
730 | 741 | ||
731 | /* | 742 | /* |
732 | * There's a faint possibility that swap page was replaced before | 743 | * There's a faint possibility that swap page was replaced before |
733 | * caller locked it: it will come back later with the right page. | 744 | * caller locked it: caller will come back later with the right page. |
734 | */ | 745 | */ |
735 | if (unlikely(!PageSwapCache(page))) | 746 | if (unlikely(!PageSwapCache(page) || page_private(page) != swap.val)) |
736 | goto out; | 747 | goto out; |
737 | 748 | ||
738 | /* | 749 | /* |
@@ -995,21 +1006,15 @@ static int shmem_replace_page(struct page **pagep, gfp_t gfp, | |||
995 | newpage = shmem_alloc_page(gfp, info, index); | 1006 | newpage = shmem_alloc_page(gfp, info, index); |
996 | if (!newpage) | 1007 | if (!newpage) |
997 | return -ENOMEM; | 1008 | return -ENOMEM; |
998 | VM_BUG_ON(shmem_should_replace_page(newpage, gfp)); | ||
999 | 1009 | ||
1000 | *pagep = newpage; | ||
1001 | page_cache_get(newpage); | 1010 | page_cache_get(newpage); |
1002 | copy_highpage(newpage, oldpage); | 1011 | copy_highpage(newpage, oldpage); |
1012 | flush_dcache_page(newpage); | ||
1003 | 1013 | ||
1004 | VM_BUG_ON(!PageLocked(oldpage)); | ||
1005 | __set_page_locked(newpage); | 1014 | __set_page_locked(newpage); |
1006 | VM_BUG_ON(!PageUptodate(oldpage)); | ||
1007 | SetPageUptodate(newpage); | 1015 | SetPageUptodate(newpage); |
1008 | VM_BUG_ON(!PageSwapBacked(oldpage)); | ||
1009 | SetPageSwapBacked(newpage); | 1016 | SetPageSwapBacked(newpage); |
1010 | VM_BUG_ON(!swap_index); | ||
1011 | set_page_private(newpage, swap_index); | 1017 | set_page_private(newpage, swap_index); |
1012 | VM_BUG_ON(!PageSwapCache(oldpage)); | ||
1013 | SetPageSwapCache(newpage); | 1018 | SetPageSwapCache(newpage); |
1014 | 1019 | ||
1015 | /* | 1020 | /* |
@@ -1019,13 +1024,24 @@ static int shmem_replace_page(struct page **pagep, gfp_t gfp, | |||
1019 | spin_lock_irq(&swap_mapping->tree_lock); | 1024 | spin_lock_irq(&swap_mapping->tree_lock); |
1020 | error = shmem_radix_tree_replace(swap_mapping, swap_index, oldpage, | 1025 | error = shmem_radix_tree_replace(swap_mapping, swap_index, oldpage, |
1021 | newpage); | 1026 | newpage); |
1022 | __inc_zone_page_state(newpage, NR_FILE_PAGES); | 1027 | if (!error) { |
1023 | __dec_zone_page_state(oldpage, NR_FILE_PAGES); | 1028 | __inc_zone_page_state(newpage, NR_FILE_PAGES); |
1029 | __dec_zone_page_state(oldpage, NR_FILE_PAGES); | ||
1030 | } | ||
1024 | spin_unlock_irq(&swap_mapping->tree_lock); | 1031 | spin_unlock_irq(&swap_mapping->tree_lock); |
1025 | BUG_ON(error); | ||
1026 | 1032 | ||
1027 | mem_cgroup_replace_page_cache(oldpage, newpage); | 1033 | if (unlikely(error)) { |
1028 | lru_cache_add_anon(newpage); | 1034 | /* |
1035 | * Is this possible? I think not, now that our callers check | ||
1036 | * both PageSwapCache and page_private after getting page lock; | ||
1037 | * but be defensive. Reverse old to newpage for clear and free. | ||
1038 | */ | ||
1039 | oldpage = newpage; | ||
1040 | } else { | ||
1041 | mem_cgroup_replace_page_cache(oldpage, newpage); | ||
1042 | lru_cache_add_anon(newpage); | ||
1043 | *pagep = newpage; | ||
1044 | } | ||
1029 | 1045 | ||
1030 | ClearPageSwapCache(oldpage); | 1046 | ClearPageSwapCache(oldpage); |
1031 | set_page_private(oldpage, 0); | 1047 | set_page_private(oldpage, 0); |
@@ -1033,7 +1049,7 @@ static int shmem_replace_page(struct page **pagep, gfp_t gfp, | |||
1033 | unlock_page(oldpage); | 1049 | unlock_page(oldpage); |
1034 | page_cache_release(oldpage); | 1050 | page_cache_release(oldpage); |
1035 | page_cache_release(oldpage); | 1051 | page_cache_release(oldpage); |
1036 | return 0; | 1052 | return error; |
1037 | } | 1053 | } |
1038 | 1054 | ||
1039 | /* | 1055 | /* |
@@ -1107,7 +1123,8 @@ repeat: | |||
1107 | 1123 | ||
1108 | /* We have to do this with page locked to prevent races */ | 1124 | /* We have to do this with page locked to prevent races */ |
1109 | lock_page(page); | 1125 | lock_page(page); |
1110 | if (!PageSwapCache(page) || page->mapping) { | 1126 | if (!PageSwapCache(page) || page_private(page) != swap.val || |
1127 | page->mapping) { | ||
1111 | error = -EEXIST; /* try again */ | 1128 | error = -EEXIST; /* try again */ |
1112 | goto failed; | 1129 | goto failed; |
1113 | } | 1130 | } |
diff --git a/mm/swapfile.c b/mm/swapfile.c index 457b10baef59..71373d03fcee 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c | |||
@@ -31,6 +31,8 @@ | |||
31 | #include <linux/memcontrol.h> | 31 | #include <linux/memcontrol.h> |
32 | #include <linux/poll.h> | 32 | #include <linux/poll.h> |
33 | #include <linux/oom.h> | 33 | #include <linux/oom.h> |
34 | #include <linux/frontswap.h> | ||
35 | #include <linux/swapfile.h> | ||
34 | 36 | ||
35 | #include <asm/pgtable.h> | 37 | #include <asm/pgtable.h> |
36 | #include <asm/tlbflush.h> | 38 | #include <asm/tlbflush.h> |
@@ -42,7 +44,7 @@ static bool swap_count_continued(struct swap_info_struct *, pgoff_t, | |||
42 | static void free_swap_count_continuations(struct swap_info_struct *); | 44 | static void free_swap_count_continuations(struct swap_info_struct *); |
43 | static sector_t map_swap_entry(swp_entry_t, struct block_device**); | 45 | static sector_t map_swap_entry(swp_entry_t, struct block_device**); |
44 | 46 | ||
45 | static DEFINE_SPINLOCK(swap_lock); | 47 | DEFINE_SPINLOCK(swap_lock); |
46 | static unsigned int nr_swapfiles; | 48 | static unsigned int nr_swapfiles; |
47 | long nr_swap_pages; | 49 | long nr_swap_pages; |
48 | long total_swap_pages; | 50 | long total_swap_pages; |
@@ -53,9 +55,9 @@ static const char Unused_file[] = "Unused swap file entry "; | |||
53 | static const char Bad_offset[] = "Bad swap offset entry "; | 55 | static const char Bad_offset[] = "Bad swap offset entry "; |
54 | static const char Unused_offset[] = "Unused swap offset entry "; | 56 | static const char Unused_offset[] = "Unused swap offset entry "; |
55 | 57 | ||
56 | static struct swap_list_t swap_list = {-1, -1}; | 58 | struct swap_list_t swap_list = {-1, -1}; |
57 | 59 | ||
58 | static struct swap_info_struct *swap_info[MAX_SWAPFILES]; | 60 | struct swap_info_struct *swap_info[MAX_SWAPFILES]; |
59 | 61 | ||
60 | static DEFINE_MUTEX(swapon_mutex); | 62 | static DEFINE_MUTEX(swapon_mutex); |
61 | 63 | ||
@@ -556,6 +558,7 @@ static unsigned char swap_entry_free(struct swap_info_struct *p, | |||
556 | swap_list.next = p->type; | 558 | swap_list.next = p->type; |
557 | nr_swap_pages++; | 559 | nr_swap_pages++; |
558 | p->inuse_pages--; | 560 | p->inuse_pages--; |
561 | frontswap_invalidate_page(p->type, offset); | ||
559 | if ((p->flags & SWP_BLKDEV) && | 562 | if ((p->flags & SWP_BLKDEV) && |
560 | disk->fops->swap_slot_free_notify) | 563 | disk->fops->swap_slot_free_notify) |
561 | disk->fops->swap_slot_free_notify(p->bdev, offset); | 564 | disk->fops->swap_slot_free_notify(p->bdev, offset); |
@@ -985,11 +988,12 @@ static int unuse_mm(struct mm_struct *mm, | |||
985 | } | 988 | } |
986 | 989 | ||
987 | /* | 990 | /* |
988 | * Scan swap_map from current position to next entry still in use. | 991 | * Scan swap_map (or frontswap_map if frontswap parameter is true) |
992 | * from current position to next entry still in use. | ||
989 | * Recycle to start on reaching the end, returning 0 when empty. | 993 | * Recycle to start on reaching the end, returning 0 when empty. |
990 | */ | 994 | */ |
991 | static unsigned int find_next_to_unuse(struct swap_info_struct *si, | 995 | static unsigned int find_next_to_unuse(struct swap_info_struct *si, |
992 | unsigned int prev) | 996 | unsigned int prev, bool frontswap) |
993 | { | 997 | { |
994 | unsigned int max = si->max; | 998 | unsigned int max = si->max; |
995 | unsigned int i = prev; | 999 | unsigned int i = prev; |
@@ -1015,6 +1019,12 @@ static unsigned int find_next_to_unuse(struct swap_info_struct *si, | |||
1015 | prev = 0; | 1019 | prev = 0; |
1016 | i = 1; | 1020 | i = 1; |
1017 | } | 1021 | } |
1022 | if (frontswap) { | ||
1023 | if (frontswap_test(si, i)) | ||
1024 | break; | ||
1025 | else | ||
1026 | continue; | ||
1027 | } | ||
1018 | count = si->swap_map[i]; | 1028 | count = si->swap_map[i]; |
1019 | if (count && swap_count(count) != SWAP_MAP_BAD) | 1029 | if (count && swap_count(count) != SWAP_MAP_BAD) |
1020 | break; | 1030 | break; |
@@ -1026,8 +1036,12 @@ static unsigned int find_next_to_unuse(struct swap_info_struct *si, | |||
1026 | * We completely avoid races by reading each swap page in advance, | 1036 | * We completely avoid races by reading each swap page in advance, |
1027 | * and then search for the process using it. All the necessary | 1037 | * and then search for the process using it. All the necessary |
1028 | * page table adjustments can then be made atomically. | 1038 | * page table adjustments can then be made atomically. |
1039 | * | ||
1040 | * if the boolean frontswap is true, only unuse pages_to_unuse pages; | ||
1041 | * pages_to_unuse==0 means all pages; ignored if frontswap is false | ||
1029 | */ | 1042 | */ |
1030 | static int try_to_unuse(unsigned int type) | 1043 | int try_to_unuse(unsigned int type, bool frontswap, |
1044 | unsigned long pages_to_unuse) | ||
1031 | { | 1045 | { |
1032 | struct swap_info_struct *si = swap_info[type]; | 1046 | struct swap_info_struct *si = swap_info[type]; |
1033 | struct mm_struct *start_mm; | 1047 | struct mm_struct *start_mm; |
@@ -1060,7 +1074,7 @@ static int try_to_unuse(unsigned int type) | |||
1060 | * one pass through swap_map is enough, but not necessarily: | 1074 | * one pass through swap_map is enough, but not necessarily: |
1061 | * there are races when an instance of an entry might be missed. | 1075 | * there are races when an instance of an entry might be missed. |
1062 | */ | 1076 | */ |
1063 | while ((i = find_next_to_unuse(si, i)) != 0) { | 1077 | while ((i = find_next_to_unuse(si, i, frontswap)) != 0) { |
1064 | if (signal_pending(current)) { | 1078 | if (signal_pending(current)) { |
1065 | retval = -EINTR; | 1079 | retval = -EINTR; |
1066 | break; | 1080 | break; |
@@ -1227,6 +1241,10 @@ static int try_to_unuse(unsigned int type) | |||
1227 | * interactive performance. | 1241 | * interactive performance. |
1228 | */ | 1242 | */ |
1229 | cond_resched(); | 1243 | cond_resched(); |
1244 | if (frontswap && pages_to_unuse > 0) { | ||
1245 | if (!--pages_to_unuse) | ||
1246 | break; | ||
1247 | } | ||
1230 | } | 1248 | } |
1231 | 1249 | ||
1232 | mmput(start_mm); | 1250 | mmput(start_mm); |
@@ -1486,7 +1504,8 @@ bad_bmap: | |||
1486 | } | 1504 | } |
1487 | 1505 | ||
1488 | static void enable_swap_info(struct swap_info_struct *p, int prio, | 1506 | static void enable_swap_info(struct swap_info_struct *p, int prio, |
1489 | unsigned char *swap_map) | 1507 | unsigned char *swap_map, |
1508 | unsigned long *frontswap_map) | ||
1490 | { | 1509 | { |
1491 | int i, prev; | 1510 | int i, prev; |
1492 | 1511 | ||
@@ -1496,6 +1515,7 @@ static void enable_swap_info(struct swap_info_struct *p, int prio, | |||
1496 | else | 1515 | else |
1497 | p->prio = --least_priority; | 1516 | p->prio = --least_priority; |
1498 | p->swap_map = swap_map; | 1517 | p->swap_map = swap_map; |
1518 | frontswap_map_set(p, frontswap_map); | ||
1499 | p->flags |= SWP_WRITEOK; | 1519 | p->flags |= SWP_WRITEOK; |
1500 | nr_swap_pages += p->pages; | 1520 | nr_swap_pages += p->pages; |
1501 | total_swap_pages += p->pages; | 1521 | total_swap_pages += p->pages; |
@@ -1512,6 +1532,7 @@ static void enable_swap_info(struct swap_info_struct *p, int prio, | |||
1512 | swap_list.head = swap_list.next = p->type; | 1532 | swap_list.head = swap_list.next = p->type; |
1513 | else | 1533 | else |
1514 | swap_info[prev]->next = p->type; | 1534 | swap_info[prev]->next = p->type; |
1535 | frontswap_init(p->type); | ||
1515 | spin_unlock(&swap_lock); | 1536 | spin_unlock(&swap_lock); |
1516 | } | 1537 | } |
1517 | 1538 | ||
@@ -1585,7 +1606,7 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile) | |||
1585 | spin_unlock(&swap_lock); | 1606 | spin_unlock(&swap_lock); |
1586 | 1607 | ||
1587 | oom_score_adj = test_set_oom_score_adj(OOM_SCORE_ADJ_MAX); | 1608 | oom_score_adj = test_set_oom_score_adj(OOM_SCORE_ADJ_MAX); |
1588 | err = try_to_unuse(type); | 1609 | err = try_to_unuse(type, false, 0); /* force all pages to be unused */ |
1589 | compare_swap_oom_score_adj(OOM_SCORE_ADJ_MAX, oom_score_adj); | 1610 | compare_swap_oom_score_adj(OOM_SCORE_ADJ_MAX, oom_score_adj); |
1590 | 1611 | ||
1591 | if (err) { | 1612 | if (err) { |
@@ -1596,7 +1617,7 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile) | |||
1596 | * sys_swapoff for this swap_info_struct at this point. | 1617 | * sys_swapoff for this swap_info_struct at this point. |
1597 | */ | 1618 | */ |
1598 | /* re-insert swap space back into swap_list */ | 1619 | /* re-insert swap space back into swap_list */ |
1599 | enable_swap_info(p, p->prio, p->swap_map); | 1620 | enable_swap_info(p, p->prio, p->swap_map, frontswap_map_get(p)); |
1600 | goto out_dput; | 1621 | goto out_dput; |
1601 | } | 1622 | } |
1602 | 1623 | ||
@@ -1622,9 +1643,11 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile) | |||
1622 | swap_map = p->swap_map; | 1643 | swap_map = p->swap_map; |
1623 | p->swap_map = NULL; | 1644 | p->swap_map = NULL; |
1624 | p->flags = 0; | 1645 | p->flags = 0; |
1646 | frontswap_invalidate_area(type); | ||
1625 | spin_unlock(&swap_lock); | 1647 | spin_unlock(&swap_lock); |
1626 | mutex_unlock(&swapon_mutex); | 1648 | mutex_unlock(&swapon_mutex); |
1627 | vfree(swap_map); | 1649 | vfree(swap_map); |
1650 | vfree(frontswap_map_get(p)); | ||
1628 | /* Destroy swap account informatin */ | 1651 | /* Destroy swap account informatin */ |
1629 | swap_cgroup_swapoff(type); | 1652 | swap_cgroup_swapoff(type); |
1630 | 1653 | ||
@@ -1893,24 +1916,20 @@ static unsigned long read_swap_header(struct swap_info_struct *p, | |||
1893 | 1916 | ||
1894 | /* | 1917 | /* |
1895 | * Find out how many pages are allowed for a single swap | 1918 | * Find out how many pages are allowed for a single swap |
1896 | * device. There are three limiting factors: 1) the number | 1919 | * device. There are two limiting factors: 1) the number |
1897 | * of bits for the swap offset in the swp_entry_t type, and | 1920 | * of bits for the swap offset in the swp_entry_t type, and |
1898 | * 2) the number of bits in the swap pte as defined by the | 1921 | * 2) the number of bits in the swap pte as defined by the |
1899 | * the different architectures, and 3) the number of free bits | 1922 | * different architectures. In order to find the |
1900 | * in an exceptional radix_tree entry. In order to find the | ||
1901 | * largest possible bit mask, a swap entry with swap type 0 | 1923 | * largest possible bit mask, a swap entry with swap type 0 |
1902 | * and swap offset ~0UL is created, encoded to a swap pte, | 1924 | * and swap offset ~0UL is created, encoded to a swap pte, |
1903 | * decoded to a swp_entry_t again, and finally the swap | 1925 | * decoded to a swp_entry_t again, and finally the swap |
1904 | * offset is extracted. This will mask all the bits from | 1926 | * offset is extracted. This will mask all the bits from |
1905 | * the initial ~0UL mask that can't be encoded in either | 1927 | * the initial ~0UL mask that can't be encoded in either |
1906 | * the swp_entry_t or the architecture definition of a | 1928 | * the swp_entry_t or the architecture definition of a |
1907 | * swap pte. Then the same is done for a radix_tree entry. | 1929 | * swap pte. |
1908 | */ | 1930 | */ |
1909 | maxpages = swp_offset(pte_to_swp_entry( | 1931 | maxpages = swp_offset(pte_to_swp_entry( |
1910 | swp_entry_to_pte(swp_entry(0, ~0UL)))); | 1932 | swp_entry_to_pte(swp_entry(0, ~0UL)))) + 1; |
1911 | maxpages = swp_offset(radix_to_swp_entry( | ||
1912 | swp_to_radix_entry(swp_entry(0, maxpages)))) + 1; | ||
1913 | |||
1914 | if (maxpages > swap_header->info.last_page) { | 1933 | if (maxpages > swap_header->info.last_page) { |
1915 | maxpages = swap_header->info.last_page + 1; | 1934 | maxpages = swap_header->info.last_page + 1; |
1916 | /* p->max is an unsigned int: don't overflow it */ | 1935 | /* p->max is an unsigned int: don't overflow it */ |
@@ -1988,6 +2007,7 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags) | |||
1988 | sector_t span; | 2007 | sector_t span; |
1989 | unsigned long maxpages; | 2008 | unsigned long maxpages; |
1990 | unsigned char *swap_map = NULL; | 2009 | unsigned char *swap_map = NULL; |
2010 | unsigned long *frontswap_map = NULL; | ||
1991 | struct page *page = NULL; | 2011 | struct page *page = NULL; |
1992 | struct inode *inode = NULL; | 2012 | struct inode *inode = NULL; |
1993 | 2013 | ||
@@ -2071,6 +2091,9 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags) | |||
2071 | error = nr_extents; | 2091 | error = nr_extents; |
2072 | goto bad_swap; | 2092 | goto bad_swap; |
2073 | } | 2093 | } |
2094 | /* frontswap enabled? set up bit-per-page map for frontswap */ | ||
2095 | if (frontswap_enabled) | ||
2096 | frontswap_map = vzalloc(maxpages / sizeof(long)); | ||
2074 | 2097 | ||
2075 | if (p->bdev) { | 2098 | if (p->bdev) { |
2076 | if (blk_queue_nonrot(bdev_get_queue(p->bdev))) { | 2099 | if (blk_queue_nonrot(bdev_get_queue(p->bdev))) { |
@@ -2086,14 +2109,15 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags) | |||
2086 | if (swap_flags & SWAP_FLAG_PREFER) | 2109 | if (swap_flags & SWAP_FLAG_PREFER) |
2087 | prio = | 2110 | prio = |
2088 | (swap_flags & SWAP_FLAG_PRIO_MASK) >> SWAP_FLAG_PRIO_SHIFT; | 2111 | (swap_flags & SWAP_FLAG_PRIO_MASK) >> SWAP_FLAG_PRIO_SHIFT; |
2089 | enable_swap_info(p, prio, swap_map); | 2112 | enable_swap_info(p, prio, swap_map, frontswap_map); |
2090 | 2113 | ||
2091 | printk(KERN_INFO "Adding %uk swap on %s. " | 2114 | printk(KERN_INFO "Adding %uk swap on %s. " |
2092 | "Priority:%d extents:%d across:%lluk %s%s\n", | 2115 | "Priority:%d extents:%d across:%lluk %s%s%s\n", |
2093 | p->pages<<(PAGE_SHIFT-10), name, p->prio, | 2116 | p->pages<<(PAGE_SHIFT-10), name, p->prio, |
2094 | nr_extents, (unsigned long long)span<<(PAGE_SHIFT-10), | 2117 | nr_extents, (unsigned long long)span<<(PAGE_SHIFT-10), |
2095 | (p->flags & SWP_SOLIDSTATE) ? "SS" : "", | 2118 | (p->flags & SWP_SOLIDSTATE) ? "SS" : "", |
2096 | (p->flags & SWP_DISCARDABLE) ? "D" : ""); | 2119 | (p->flags & SWP_DISCARDABLE) ? "D" : "", |
2120 | (frontswap_map) ? "FS" : ""); | ||
2097 | 2121 | ||
2098 | mutex_unlock(&swapon_mutex); | 2122 | mutex_unlock(&swapon_mutex); |
2099 | atomic_inc(&proc_poll_event); | 2123 | atomic_inc(&proc_poll_event); |
diff --git a/net/9p/trans_virtio.c b/net/9p/trans_virtio.c index 5af18d11b518..2a167658bb95 100644 --- a/net/9p/trans_virtio.c +++ b/net/9p/trans_virtio.c | |||
@@ -192,10 +192,10 @@ static int pack_sg_list(struct scatterlist *sg, int start, | |||
192 | s = rest_of_page(data); | 192 | s = rest_of_page(data); |
193 | if (s > count) | 193 | if (s > count) |
194 | s = count; | 194 | s = count; |
195 | BUG_ON(index > limit); | ||
195 | sg_set_buf(&sg[index++], data, s); | 196 | sg_set_buf(&sg[index++], data, s); |
196 | count -= s; | 197 | count -= s; |
197 | data += s; | 198 | data += s; |
198 | BUG_ON(index > limit); | ||
199 | } | 199 | } |
200 | 200 | ||
201 | return index-start; | 201 | return index-start; |
diff --git a/net/appletalk/ddp.c b/net/appletalk/ddp.c index 0301b328cf0f..86852963b7f7 100644 --- a/net/appletalk/ddp.c +++ b/net/appletalk/ddp.c | |||
@@ -1208,9 +1208,7 @@ static int atalk_connect(struct socket *sock, struct sockaddr *uaddr, | |||
1208 | if (addr->sat_addr.s_node == ATADDR_BCAST && | 1208 | if (addr->sat_addr.s_node == ATADDR_BCAST && |
1209 | !sock_flag(sk, SOCK_BROADCAST)) { | 1209 | !sock_flag(sk, SOCK_BROADCAST)) { |
1210 | #if 1 | 1210 | #if 1 |
1211 | printk(KERN_WARNING "%s is broken and did not set " | 1211 | pr_warn("atalk_connect: %s is broken and did not set SO_BROADCAST.\n", |
1212 | "SO_BROADCAST. It will break when 2.2 is " | ||
1213 | "released.\n", | ||
1214 | current->comm); | 1212 | current->comm); |
1215 | #else | 1213 | #else |
1216 | return -EACCES; | 1214 | return -EACCES; |
diff --git a/net/bluetooth/af_bluetooth.c b/net/bluetooth/af_bluetooth.c index 46e7f86acfc9..3e18af4dadc4 100644 --- a/net/bluetooth/af_bluetooth.c +++ b/net/bluetooth/af_bluetooth.c | |||
@@ -210,7 +210,7 @@ struct sock *bt_accept_dequeue(struct sock *parent, struct socket *newsock) | |||
210 | } | 210 | } |
211 | 211 | ||
212 | if (sk->sk_state == BT_CONNECTED || !newsock || | 212 | if (sk->sk_state == BT_CONNECTED || !newsock || |
213 | test_bit(BT_DEFER_SETUP, &bt_sk(parent)->flags)) { | 213 | test_bit(BT_SK_DEFER_SETUP, &bt_sk(parent)->flags)) { |
214 | bt_accept_unlink(sk); | 214 | bt_accept_unlink(sk); |
215 | if (newsock) | 215 | if (newsock) |
216 | sock_graft(sk, newsock); | 216 | sock_graft(sk, newsock); |
diff --git a/net/ceph/ceph_common.c b/net/ceph/ceph_common.c index a776f751edbf..ba4323bce0e9 100644 --- a/net/ceph/ceph_common.c +++ b/net/ceph/ceph_common.c | |||
@@ -504,13 +504,6 @@ void ceph_destroy_client(struct ceph_client *client) | |||
504 | /* unmount */ | 504 | /* unmount */ |
505 | ceph_osdc_stop(&client->osdc); | 505 | ceph_osdc_stop(&client->osdc); |
506 | 506 | ||
507 | /* | ||
508 | * make sure osd connections close out before destroying the | ||
509 | * auth module, which is needed to free those connections' | ||
510 | * ceph_authorizers. | ||
511 | */ | ||
512 | ceph_msgr_flush(); | ||
513 | |||
514 | ceph_monc_stop(&client->monc); | 507 | ceph_monc_stop(&client->monc); |
515 | 508 | ||
516 | ceph_debugfs_client_cleanup(client); | 509 | ceph_debugfs_client_cleanup(client); |
diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c index 524f4e4f598b..b332c3d76059 100644 --- a/net/ceph/messenger.c +++ b/net/ceph/messenger.c | |||
@@ -563,6 +563,10 @@ static void prepare_write_message(struct ceph_connection *con) | |||
563 | m->hdr.seq = cpu_to_le64(++con->out_seq); | 563 | m->hdr.seq = cpu_to_le64(++con->out_seq); |
564 | m->needs_out_seq = false; | 564 | m->needs_out_seq = false; |
565 | } | 565 | } |
566 | #ifdef CONFIG_BLOCK | ||
567 | else | ||
568 | m->bio_iter = NULL; | ||
569 | #endif | ||
566 | 570 | ||
567 | dout("prepare_write_message %p seq %lld type %d len %d+%d+%d %d pgs\n", | 571 | dout("prepare_write_message %p seq %lld type %d len %d+%d+%d %d pgs\n", |
568 | m, con->out_seq, le16_to_cpu(m->hdr.type), | 572 | m, con->out_seq, le16_to_cpu(m->hdr.type), |
diff --git a/net/ceph/mon_client.c b/net/ceph/mon_client.c index 10d6008d31f2..d0649a9655be 100644 --- a/net/ceph/mon_client.c +++ b/net/ceph/mon_client.c | |||
@@ -847,6 +847,14 @@ void ceph_monc_stop(struct ceph_mon_client *monc) | |||
847 | 847 | ||
848 | mutex_unlock(&monc->mutex); | 848 | mutex_unlock(&monc->mutex); |
849 | 849 | ||
850 | /* | ||
851 | * flush msgr queue before we destroy ourselves to ensure that: | ||
852 | * - any work that references our embedded con is finished. | ||
853 | * - any osd_client or other work that may reference an authorizer | ||
854 | * finishes before we shut down the auth subsystem. | ||
855 | */ | ||
856 | ceph_msgr_flush(); | ||
857 | |||
850 | ceph_auth_destroy(monc->auth); | 858 | ceph_auth_destroy(monc->auth); |
851 | 859 | ||
852 | ceph_msg_put(monc->m_auth); | 860 | ceph_msg_put(monc->m_auth); |
diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c index 1ffebed5ce0f..ca59e66c9787 100644 --- a/net/ceph/osd_client.c +++ b/net/ceph/osd_client.c | |||
@@ -139,15 +139,15 @@ void ceph_osdc_release_request(struct kref *kref) | |||
139 | 139 | ||
140 | if (req->r_request) | 140 | if (req->r_request) |
141 | ceph_msg_put(req->r_request); | 141 | ceph_msg_put(req->r_request); |
142 | if (req->r_reply) | ||
143 | ceph_msg_put(req->r_reply); | ||
144 | if (req->r_con_filling_msg) { | 142 | if (req->r_con_filling_msg) { |
145 | dout("release_request revoking pages %p from con %p\n", | 143 | dout("release_request revoking pages %p from con %p\n", |
146 | req->r_pages, req->r_con_filling_msg); | 144 | req->r_pages, req->r_con_filling_msg); |
147 | ceph_con_revoke_message(req->r_con_filling_msg, | 145 | ceph_con_revoke_message(req->r_con_filling_msg, |
148 | req->r_reply); | 146 | req->r_reply); |
149 | ceph_con_put(req->r_con_filling_msg); | 147 | req->r_con_filling_msg->ops->put(req->r_con_filling_msg); |
150 | } | 148 | } |
149 | if (req->r_reply) | ||
150 | ceph_msg_put(req->r_reply); | ||
151 | if (req->r_own_pages) | 151 | if (req->r_own_pages) |
152 | ceph_release_page_vector(req->r_pages, | 152 | ceph_release_page_vector(req->r_pages, |
153 | req->r_num_pages); | 153 | req->r_num_pages); |
@@ -1216,7 +1216,7 @@ static void handle_reply(struct ceph_osd_client *osdc, struct ceph_msg *msg, | |||
1216 | if (req->r_con_filling_msg == con && req->r_reply == msg) { | 1216 | if (req->r_con_filling_msg == con && req->r_reply == msg) { |
1217 | dout(" dropping con_filling_msg ref %p\n", con); | 1217 | dout(" dropping con_filling_msg ref %p\n", con); |
1218 | req->r_con_filling_msg = NULL; | 1218 | req->r_con_filling_msg = NULL; |
1219 | ceph_con_put(con); | 1219 | con->ops->put(con); |
1220 | } | 1220 | } |
1221 | 1221 | ||
1222 | if (!req->r_got_reply) { | 1222 | if (!req->r_got_reply) { |
@@ -2028,7 +2028,7 @@ static struct ceph_msg *get_reply(struct ceph_connection *con, | |||
2028 | dout("get_reply revoking msg %p from old con %p\n", | 2028 | dout("get_reply revoking msg %p from old con %p\n", |
2029 | req->r_reply, req->r_con_filling_msg); | 2029 | req->r_reply, req->r_con_filling_msg); |
2030 | ceph_con_revoke_message(req->r_con_filling_msg, req->r_reply); | 2030 | ceph_con_revoke_message(req->r_con_filling_msg, req->r_reply); |
2031 | ceph_con_put(req->r_con_filling_msg); | 2031 | req->r_con_filling_msg->ops->put(req->r_con_filling_msg); |
2032 | req->r_con_filling_msg = NULL; | 2032 | req->r_con_filling_msg = NULL; |
2033 | } | 2033 | } |
2034 | 2034 | ||
@@ -2063,7 +2063,7 @@ static struct ceph_msg *get_reply(struct ceph_connection *con, | |||
2063 | #endif | 2063 | #endif |
2064 | } | 2064 | } |
2065 | *skip = 0; | 2065 | *skip = 0; |
2066 | req->r_con_filling_msg = ceph_con_get(con); | 2066 | req->r_con_filling_msg = con->ops->get(con); |
2067 | dout("get_reply tid %lld %p\n", tid, m); | 2067 | dout("get_reply tid %lld %p\n", tid, m); |
2068 | 2068 | ||
2069 | out: | 2069 | out: |
diff --git a/net/core/drop_monitor.c b/net/core/drop_monitor.c index ea5fb9fcc3f5..d23b6682f4e9 100644 --- a/net/core/drop_monitor.c +++ b/net/core/drop_monitor.c | |||
@@ -36,9 +36,6 @@ | |||
36 | #define TRACE_ON 1 | 36 | #define TRACE_ON 1 |
37 | #define TRACE_OFF 0 | 37 | #define TRACE_OFF 0 |
38 | 38 | ||
39 | static void send_dm_alert(struct work_struct *unused); | ||
40 | |||
41 | |||
42 | /* | 39 | /* |
43 | * Globals, our netlink socket pointer | 40 | * Globals, our netlink socket pointer |
44 | * and the work handle that will send up | 41 | * and the work handle that will send up |
@@ -48,11 +45,10 @@ static int trace_state = TRACE_OFF; | |||
48 | static DEFINE_MUTEX(trace_state_mutex); | 45 | static DEFINE_MUTEX(trace_state_mutex); |
49 | 46 | ||
50 | struct per_cpu_dm_data { | 47 | struct per_cpu_dm_data { |
51 | struct work_struct dm_alert_work; | 48 | spinlock_t lock; |
52 | struct sk_buff __rcu *skb; | 49 | struct sk_buff *skb; |
53 | atomic_t dm_hit_count; | 50 | struct work_struct dm_alert_work; |
54 | struct timer_list send_timer; | 51 | struct timer_list send_timer; |
55 | int cpu; | ||
56 | }; | 52 | }; |
57 | 53 | ||
58 | struct dm_hw_stat_delta { | 54 | struct dm_hw_stat_delta { |
@@ -78,13 +74,13 @@ static int dm_delay = 1; | |||
78 | static unsigned long dm_hw_check_delta = 2*HZ; | 74 | static unsigned long dm_hw_check_delta = 2*HZ; |
79 | static LIST_HEAD(hw_stats_list); | 75 | static LIST_HEAD(hw_stats_list); |
80 | 76 | ||
81 | static void reset_per_cpu_data(struct per_cpu_dm_data *data) | 77 | static struct sk_buff *reset_per_cpu_data(struct per_cpu_dm_data *data) |
82 | { | 78 | { |
83 | size_t al; | 79 | size_t al; |
84 | struct net_dm_alert_msg *msg; | 80 | struct net_dm_alert_msg *msg; |
85 | struct nlattr *nla; | 81 | struct nlattr *nla; |
86 | struct sk_buff *skb; | 82 | struct sk_buff *skb; |
87 | struct sk_buff *oskb = rcu_dereference_protected(data->skb, 1); | 83 | unsigned long flags; |
88 | 84 | ||
89 | al = sizeof(struct net_dm_alert_msg); | 85 | al = sizeof(struct net_dm_alert_msg); |
90 | al += dm_hit_limit * sizeof(struct net_dm_drop_point); | 86 | al += dm_hit_limit * sizeof(struct net_dm_drop_point); |
@@ -99,65 +95,40 @@ static void reset_per_cpu_data(struct per_cpu_dm_data *data) | |||
99 | sizeof(struct net_dm_alert_msg)); | 95 | sizeof(struct net_dm_alert_msg)); |
100 | msg = nla_data(nla); | 96 | msg = nla_data(nla); |
101 | memset(msg, 0, al); | 97 | memset(msg, 0, al); |
102 | } else | 98 | } else { |
103 | schedule_work_on(data->cpu, &data->dm_alert_work); | 99 | mod_timer(&data->send_timer, jiffies + HZ / 10); |
104 | |||
105 | /* | ||
106 | * Don't need to lock this, since we are guaranteed to only | ||
107 | * run this on a single cpu at a time. | ||
108 | * Note also that we only update data->skb if the old and new skb | ||
109 | * pointers don't match. This ensures that we don't continually call | ||
110 | * synchornize_rcu if we repeatedly fail to alloc a new netlink message. | ||
111 | */ | ||
112 | if (skb != oskb) { | ||
113 | rcu_assign_pointer(data->skb, skb); | ||
114 | |||
115 | synchronize_rcu(); | ||
116 | |||
117 | atomic_set(&data->dm_hit_count, dm_hit_limit); | ||
118 | } | 100 | } |
119 | 101 | ||
102 | spin_lock_irqsave(&data->lock, flags); | ||
103 | swap(data->skb, skb); | ||
104 | spin_unlock_irqrestore(&data->lock, flags); | ||
105 | |||
106 | return skb; | ||
120 | } | 107 | } |
121 | 108 | ||
122 | static void send_dm_alert(struct work_struct *unused) | 109 | static void send_dm_alert(struct work_struct *work) |
123 | { | 110 | { |
124 | struct sk_buff *skb; | 111 | struct sk_buff *skb; |
125 | struct per_cpu_dm_data *data = &get_cpu_var(dm_cpu_data); | 112 | struct per_cpu_dm_data *data; |
126 | 113 | ||
127 | WARN_ON_ONCE(data->cpu != smp_processor_id()); | 114 | data = container_of(work, struct per_cpu_dm_data, dm_alert_work); |
128 | 115 | ||
129 | /* | 116 | skb = reset_per_cpu_data(data); |
130 | * Grab the skb we're about to send | ||
131 | */ | ||
132 | skb = rcu_dereference_protected(data->skb, 1); | ||
133 | |||
134 | /* | ||
135 | * Replace it with a new one | ||
136 | */ | ||
137 | reset_per_cpu_data(data); | ||
138 | 117 | ||
139 | /* | ||
140 | * Ship it! | ||
141 | */ | ||
142 | if (skb) | 118 | if (skb) |
143 | genlmsg_multicast(skb, 0, NET_DM_GRP_ALERT, GFP_KERNEL); | 119 | genlmsg_multicast(skb, 0, NET_DM_GRP_ALERT, GFP_KERNEL); |
144 | |||
145 | put_cpu_var(dm_cpu_data); | ||
146 | } | 120 | } |
147 | 121 | ||
148 | /* | 122 | /* |
149 | * This is the timer function to delay the sending of an alert | 123 | * This is the timer function to delay the sending of an alert |
150 | * in the event that more drops will arrive during the | 124 | * in the event that more drops will arrive during the |
151 | * hysteresis period. Note that it operates under the timer interrupt | 125 | * hysteresis period. |
152 | * so we don't need to disable preemption here | ||
153 | */ | 126 | */ |
154 | static void sched_send_work(unsigned long unused) | 127 | static void sched_send_work(unsigned long _data) |
155 | { | 128 | { |
156 | struct per_cpu_dm_data *data = &get_cpu_var(dm_cpu_data); | 129 | struct per_cpu_dm_data *data = (struct per_cpu_dm_data *)_data; |
157 | |||
158 | schedule_work_on(smp_processor_id(), &data->dm_alert_work); | ||
159 | 130 | ||
160 | put_cpu_var(dm_cpu_data); | 131 | schedule_work(&data->dm_alert_work); |
161 | } | 132 | } |
162 | 133 | ||
163 | static void trace_drop_common(struct sk_buff *skb, void *location) | 134 | static void trace_drop_common(struct sk_buff *skb, void *location) |
@@ -167,33 +138,28 @@ static void trace_drop_common(struct sk_buff *skb, void *location) | |||
167 | struct nlattr *nla; | 138 | struct nlattr *nla; |
168 | int i; | 139 | int i; |
169 | struct sk_buff *dskb; | 140 | struct sk_buff *dskb; |
170 | struct per_cpu_dm_data *data = &get_cpu_var(dm_cpu_data); | 141 | struct per_cpu_dm_data *data; |
171 | 142 | unsigned long flags; | |
172 | 143 | ||
173 | rcu_read_lock(); | 144 | local_irq_save(flags); |
174 | dskb = rcu_dereference(data->skb); | 145 | data = &__get_cpu_var(dm_cpu_data); |
146 | spin_lock(&data->lock); | ||
147 | dskb = data->skb; | ||
175 | 148 | ||
176 | if (!dskb) | 149 | if (!dskb) |
177 | goto out; | 150 | goto out; |
178 | 151 | ||
179 | if (!atomic_add_unless(&data->dm_hit_count, -1, 0)) { | ||
180 | /* | ||
181 | * we're already at zero, discard this hit | ||
182 | */ | ||
183 | goto out; | ||
184 | } | ||
185 | |||
186 | nlh = (struct nlmsghdr *)dskb->data; | 152 | nlh = (struct nlmsghdr *)dskb->data; |
187 | nla = genlmsg_data(nlmsg_data(nlh)); | 153 | nla = genlmsg_data(nlmsg_data(nlh)); |
188 | msg = nla_data(nla); | 154 | msg = nla_data(nla); |
189 | for (i = 0; i < msg->entries; i++) { | 155 | for (i = 0; i < msg->entries; i++) { |
190 | if (!memcmp(&location, msg->points[i].pc, sizeof(void *))) { | 156 | if (!memcmp(&location, msg->points[i].pc, sizeof(void *))) { |
191 | msg->points[i].count++; | 157 | msg->points[i].count++; |
192 | atomic_inc(&data->dm_hit_count); | ||
193 | goto out; | 158 | goto out; |
194 | } | 159 | } |
195 | } | 160 | } |
196 | 161 | if (msg->entries == dm_hit_limit) | |
162 | goto out; | ||
197 | /* | 163 | /* |
198 | * We need to create a new entry | 164 | * We need to create a new entry |
199 | */ | 165 | */ |
@@ -205,13 +171,11 @@ static void trace_drop_common(struct sk_buff *skb, void *location) | |||
205 | 171 | ||
206 | if (!timer_pending(&data->send_timer)) { | 172 | if (!timer_pending(&data->send_timer)) { |
207 | data->send_timer.expires = jiffies + dm_delay * HZ; | 173 | data->send_timer.expires = jiffies + dm_delay * HZ; |
208 | add_timer_on(&data->send_timer, smp_processor_id()); | 174 | add_timer(&data->send_timer); |
209 | } | 175 | } |
210 | 176 | ||
211 | out: | 177 | out: |
212 | rcu_read_unlock(); | 178 | spin_unlock_irqrestore(&data->lock, flags); |
213 | put_cpu_var(dm_cpu_data); | ||
214 | return; | ||
215 | } | 179 | } |
216 | 180 | ||
217 | static void trace_kfree_skb_hit(void *ignore, struct sk_buff *skb, void *location) | 181 | static void trace_kfree_skb_hit(void *ignore, struct sk_buff *skb, void *location) |
@@ -418,11 +382,11 @@ static int __init init_net_drop_monitor(void) | |||
418 | 382 | ||
419 | for_each_possible_cpu(cpu) { | 383 | for_each_possible_cpu(cpu) { |
420 | data = &per_cpu(dm_cpu_data, cpu); | 384 | data = &per_cpu(dm_cpu_data, cpu); |
421 | data->cpu = cpu; | ||
422 | INIT_WORK(&data->dm_alert_work, send_dm_alert); | 385 | INIT_WORK(&data->dm_alert_work, send_dm_alert); |
423 | init_timer(&data->send_timer); | 386 | init_timer(&data->send_timer); |
424 | data->send_timer.data = cpu; | 387 | data->send_timer.data = (unsigned long)data; |
425 | data->send_timer.function = sched_send_work; | 388 | data->send_timer.function = sched_send_work; |
389 | spin_lock_init(&data->lock); | ||
426 | reset_per_cpu_data(data); | 390 | reset_per_cpu_data(data); |
427 | } | 391 | } |
428 | 392 | ||
diff --git a/net/core/filter.c b/net/core/filter.c index a3eddb515d1b..d4ce2dc712e3 100644 --- a/net/core/filter.c +++ b/net/core/filter.c | |||
@@ -616,9 +616,9 @@ static int __sk_prepare_filter(struct sk_filter *fp) | |||
616 | /** | 616 | /** |
617 | * sk_unattached_filter_create - create an unattached filter | 617 | * sk_unattached_filter_create - create an unattached filter |
618 | * @fprog: the filter program | 618 | * @fprog: the filter program |
619 | * @sk: the socket to use | 619 | * @pfp: the unattached filter that is created |
620 | * | 620 | * |
621 | * Create a filter independent ofr any socket. We first run some | 621 | * Create a filter independent of any socket. We first run some |
622 | * sanity checks on it to make sure it does not explode on us later. | 622 | * sanity checks on it to make sure it does not explode on us later. |
623 | * If an error occurs or there is insufficient memory for the filter | 623 | * If an error occurs or there is insufficient memory for the filter |
624 | * a negative errno code is returned. On success the return is zero. | 624 | * a negative errno code is returned. On success the return is zero. |
diff --git a/net/core/neighbour.c b/net/core/neighbour.c index eb09f8bbbf07..d81d026138f0 100644 --- a/net/core/neighbour.c +++ b/net/core/neighbour.c | |||
@@ -2219,9 +2219,7 @@ static int neigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb, | |||
2219 | rcu_read_lock_bh(); | 2219 | rcu_read_lock_bh(); |
2220 | nht = rcu_dereference_bh(tbl->nht); | 2220 | nht = rcu_dereference_bh(tbl->nht); |
2221 | 2221 | ||
2222 | for (h = 0; h < (1 << nht->hash_shift); h++) { | 2222 | for (h = s_h; h < (1 << nht->hash_shift); h++) { |
2223 | if (h < s_h) | ||
2224 | continue; | ||
2225 | if (h > s_h) | 2223 | if (h > s_h) |
2226 | s_idx = 0; | 2224 | s_idx = 0; |
2227 | for (n = rcu_dereference_bh(nht->hash_buckets[h]), idx = 0; | 2225 | for (n = rcu_dereference_bh(nht->hash_buckets[h]), idx = 0; |
@@ -2260,9 +2258,7 @@ static int pneigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb, | |||
2260 | 2258 | ||
2261 | read_lock_bh(&tbl->lock); | 2259 | read_lock_bh(&tbl->lock); |
2262 | 2260 | ||
2263 | for (h = 0; h <= PNEIGH_HASHMASK; h++) { | 2261 | for (h = s_h; h <= PNEIGH_HASHMASK; h++) { |
2264 | if (h < s_h) | ||
2265 | continue; | ||
2266 | if (h > s_h) | 2262 | if (h > s_h) |
2267 | s_idx = 0; | 2263 | s_idx = 0; |
2268 | for (n = tbl->phash_buckets[h], idx = 0; n; n = n->next) { | 2264 | for (n = tbl->phash_buckets[h], idx = 0; n; n = n->next) { |
@@ -2297,7 +2293,7 @@ static int neigh_dump_info(struct sk_buff *skb, struct netlink_callback *cb) | |||
2297 | struct neigh_table *tbl; | 2293 | struct neigh_table *tbl; |
2298 | int t, family, s_t; | 2294 | int t, family, s_t; |
2299 | int proxy = 0; | 2295 | int proxy = 0; |
2300 | int err = 0; | 2296 | int err; |
2301 | 2297 | ||
2302 | read_lock(&neigh_tbl_lock); | 2298 | read_lock(&neigh_tbl_lock); |
2303 | family = ((struct rtgenmsg *) nlmsg_data(cb->nlh))->rtgen_family; | 2299 | family = ((struct rtgenmsg *) nlmsg_data(cb->nlh))->rtgen_family; |
@@ -2311,7 +2307,7 @@ static int neigh_dump_info(struct sk_buff *skb, struct netlink_callback *cb) | |||
2311 | 2307 | ||
2312 | s_t = cb->args[0]; | 2308 | s_t = cb->args[0]; |
2313 | 2309 | ||
2314 | for (tbl = neigh_tables, t = 0; tbl && (err >= 0); | 2310 | for (tbl = neigh_tables, t = 0; tbl; |
2315 | tbl = tbl->next, t++) { | 2311 | tbl = tbl->next, t++) { |
2316 | if (t < s_t || (family && tbl->family != family)) | 2312 | if (t < s_t || (family && tbl->family != family)) |
2317 | continue; | 2313 | continue; |
@@ -2322,6 +2318,8 @@ static int neigh_dump_info(struct sk_buff *skb, struct netlink_callback *cb) | |||
2322 | err = pneigh_dump_table(tbl, skb, cb); | 2318 | err = pneigh_dump_table(tbl, skb, cb); |
2323 | else | 2319 | else |
2324 | err = neigh_dump_table(tbl, skb, cb); | 2320 | err = neigh_dump_table(tbl, skb, cb); |
2321 | if (err < 0) | ||
2322 | break; | ||
2325 | } | 2323 | } |
2326 | read_unlock(&neigh_tbl_lock); | 2324 | read_unlock(&neigh_tbl_lock); |
2327 | 2325 | ||
diff --git a/net/core/netpoll.c b/net/core/netpoll.c index 3d84fb9d8873..f9f40b932e4b 100644 --- a/net/core/netpoll.c +++ b/net/core/netpoll.c | |||
@@ -362,22 +362,23 @@ EXPORT_SYMBOL(netpoll_send_skb_on_dev); | |||
362 | 362 | ||
363 | void netpoll_send_udp(struct netpoll *np, const char *msg, int len) | 363 | void netpoll_send_udp(struct netpoll *np, const char *msg, int len) |
364 | { | 364 | { |
365 | int total_len, eth_len, ip_len, udp_len; | 365 | int total_len, ip_len, udp_len; |
366 | struct sk_buff *skb; | 366 | struct sk_buff *skb; |
367 | struct udphdr *udph; | 367 | struct udphdr *udph; |
368 | struct iphdr *iph; | 368 | struct iphdr *iph; |
369 | struct ethhdr *eth; | 369 | struct ethhdr *eth; |
370 | 370 | ||
371 | udp_len = len + sizeof(*udph); | 371 | udp_len = len + sizeof(*udph); |
372 | ip_len = eth_len = udp_len + sizeof(*iph); | 372 | ip_len = udp_len + sizeof(*iph); |
373 | total_len = eth_len + ETH_HLEN + NET_IP_ALIGN; | 373 | total_len = ip_len + LL_RESERVED_SPACE(np->dev); |
374 | 374 | ||
375 | skb = find_skb(np, total_len, total_len - len); | 375 | skb = find_skb(np, total_len + np->dev->needed_tailroom, |
376 | total_len - len); | ||
376 | if (!skb) | 377 | if (!skb) |
377 | return; | 378 | return; |
378 | 379 | ||
379 | skb_copy_to_linear_data(skb, msg, len); | 380 | skb_copy_to_linear_data(skb, msg, len); |
380 | skb->len += len; | 381 | skb_put(skb, len); |
381 | 382 | ||
382 | skb_push(skb, sizeof(*udph)); | 383 | skb_push(skb, sizeof(*udph)); |
383 | skb_reset_transport_header(skb); | 384 | skb_reset_transport_header(skb); |
diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 016694d62484..d78671e9d545 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c | |||
@@ -3361,7 +3361,7 @@ EXPORT_SYMBOL(kfree_skb_partial); | |||
3361 | * @to: prior buffer | 3361 | * @to: prior buffer |
3362 | * @from: buffer to add | 3362 | * @from: buffer to add |
3363 | * @fragstolen: pointer to boolean | 3363 | * @fragstolen: pointer to boolean |
3364 | * | 3364 | * @delta_truesize: how much more was allocated than was requested |
3365 | */ | 3365 | */ |
3366 | bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from, | 3366 | bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from, |
3367 | bool *fragstolen, int *delta_truesize) | 3367 | bool *fragstolen, int *delta_truesize) |
diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c index d4d61b694fab..dfba343b2509 100644 --- a/net/ipv4/inetpeer.c +++ b/net/ipv4/inetpeer.c | |||
@@ -560,6 +560,17 @@ bool inet_peer_xrlim_allow(struct inet_peer *peer, int timeout) | |||
560 | } | 560 | } |
561 | EXPORT_SYMBOL(inet_peer_xrlim_allow); | 561 | EXPORT_SYMBOL(inet_peer_xrlim_allow); |
562 | 562 | ||
563 | static void inetpeer_inval_rcu(struct rcu_head *head) | ||
564 | { | ||
565 | struct inet_peer *p = container_of(head, struct inet_peer, gc_rcu); | ||
566 | |||
567 | spin_lock_bh(&gc_lock); | ||
568 | list_add_tail(&p->gc_list, &gc_list); | ||
569 | spin_unlock_bh(&gc_lock); | ||
570 | |||
571 | schedule_delayed_work(&gc_work, gc_delay); | ||
572 | } | ||
573 | |||
563 | void inetpeer_invalidate_tree(int family) | 574 | void inetpeer_invalidate_tree(int family) |
564 | { | 575 | { |
565 | struct inet_peer *old, *new, *prev; | 576 | struct inet_peer *old, *new, *prev; |
@@ -576,10 +587,7 @@ void inetpeer_invalidate_tree(int family) | |||
576 | prev = cmpxchg(&base->root, old, new); | 587 | prev = cmpxchg(&base->root, old, new); |
577 | if (prev == old) { | 588 | if (prev == old) { |
578 | base->total = 0; | 589 | base->total = 0; |
579 | spin_lock(&gc_lock); | 590 | call_rcu(&prev->gc_rcu, inetpeer_inval_rcu); |
580 | list_add_tail(&prev->gc_list, &gc_list); | ||
581 | spin_unlock(&gc_lock); | ||
582 | schedule_delayed_work(&gc_work, gc_delay); | ||
583 | } | 591 | } |
584 | 592 | ||
585 | out: | 593 | out: |
diff --git a/net/ipv4/ip_forward.c b/net/ipv4/ip_forward.c index e5c44fc586ab..ab09b126423c 100644 --- a/net/ipv4/ip_forward.c +++ b/net/ipv4/ip_forward.c | |||
@@ -44,6 +44,7 @@ static int ip_forward_finish(struct sk_buff *skb) | |||
44 | struct ip_options *opt = &(IPCB(skb)->opt); | 44 | struct ip_options *opt = &(IPCB(skb)->opt); |
45 | 45 | ||
46 | IP_INC_STATS_BH(dev_net(skb_dst(skb)->dev), IPSTATS_MIB_OUTFORWDATAGRAMS); | 46 | IP_INC_STATS_BH(dev_net(skb_dst(skb)->dev), IPSTATS_MIB_OUTFORWDATAGRAMS); |
47 | IP_ADD_STATS_BH(dev_net(skb_dst(skb)->dev), IPSTATS_MIB_OUTOCTETS, skb->len); | ||
47 | 48 | ||
48 | if (unlikely(opt->optlen)) | 49 | if (unlikely(opt->optlen)) |
49 | ip_forward_options(skb); | 50 | ip_forward_options(skb); |
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c index a9e519ad6db5..c94bbc6f2ba3 100644 --- a/net/ipv4/ipmr.c +++ b/net/ipv4/ipmr.c | |||
@@ -1574,6 +1574,7 @@ static inline int ipmr_forward_finish(struct sk_buff *skb) | |||
1574 | struct ip_options *opt = &(IPCB(skb)->opt); | 1574 | struct ip_options *opt = &(IPCB(skb)->opt); |
1575 | 1575 | ||
1576 | IP_INC_STATS_BH(dev_net(skb_dst(skb)->dev), IPSTATS_MIB_OUTFORWDATAGRAMS); | 1576 | IP_INC_STATS_BH(dev_net(skb_dst(skb)->dev), IPSTATS_MIB_OUTFORWDATAGRAMS); |
1577 | IP_ADD_STATS_BH(dev_net(skb_dst(skb)->dev), IPSTATS_MIB_OUTOCTETS, skb->len); | ||
1577 | 1578 | ||
1578 | if (unlikely(opt->optlen)) | 1579 | if (unlikely(opt->optlen)) |
1579 | ip_forward_options(skb); | 1580 | ip_forward_options(skb); |
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c index 0c220a416626..74c21b924a79 100644 --- a/net/ipv6/ip6_fib.c +++ b/net/ipv6/ip6_fib.c | |||
@@ -1561,7 +1561,7 @@ static int fib6_age(struct rt6_info *rt, void *arg) | |||
1561 | neigh_flags = neigh->flags; | 1561 | neigh_flags = neigh->flags; |
1562 | neigh_release(neigh); | 1562 | neigh_release(neigh); |
1563 | } | 1563 | } |
1564 | if (neigh_flags & NTF_ROUTER) { | 1564 | if (!(neigh_flags & NTF_ROUTER)) { |
1565 | RT6_TRACE("purging route %p via non-router but gateway\n", | 1565 | RT6_TRACE("purging route %p via non-router but gateway\n", |
1566 | rt); | 1566 | rt); |
1567 | return -1; | 1567 | return -1; |
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index 17b8c67998bb..decc21d19c53 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c | |||
@@ -526,6 +526,7 @@ int ip6_forward(struct sk_buff *skb) | |||
526 | hdr->hop_limit--; | 526 | hdr->hop_limit--; |
527 | 527 | ||
528 | IP6_INC_STATS_BH(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTFORWDATAGRAMS); | 528 | IP6_INC_STATS_BH(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTFORWDATAGRAMS); |
529 | IP6_ADD_STATS_BH(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTOCTETS, skb->len); | ||
529 | return NF_HOOK(NFPROTO_IPV6, NF_INET_FORWARD, skb, skb->dev, dst->dev, | 530 | return NF_HOOK(NFPROTO_IPV6, NF_INET_FORWARD, skb, skb->dev, dst->dev, |
530 | ip6_forward_finish); | 531 | ip6_forward_finish); |
531 | 532 | ||
diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c index b15dc08643a4..461e47c8e956 100644 --- a/net/ipv6/ip6mr.c +++ b/net/ipv6/ip6mr.c | |||
@@ -1886,6 +1886,8 @@ static inline int ip6mr_forward2_finish(struct sk_buff *skb) | |||
1886 | { | 1886 | { |
1887 | IP6_INC_STATS_BH(dev_net(skb_dst(skb)->dev), ip6_dst_idev(skb_dst(skb)), | 1887 | IP6_INC_STATS_BH(dev_net(skb_dst(skb)->dev), ip6_dst_idev(skb_dst(skb)), |
1888 | IPSTATS_MIB_OUTFORWDATAGRAMS); | 1888 | IPSTATS_MIB_OUTFORWDATAGRAMS); |
1889 | IP6_ADD_STATS_BH(dev_net(skb_dst(skb)->dev), ip6_dst_idev(skb_dst(skb)), | ||
1890 | IPSTATS_MIB_OUTOCTETS, skb->len); | ||
1889 | return dst_output(skb); | 1891 | return dst_output(skb); |
1890 | } | 1892 | } |
1891 | 1893 | ||
diff --git a/net/l2tp/l2tp_eth.c b/net/l2tp/l2tp_eth.c index 443591d629ca..185f12f4a5fa 100644 --- a/net/l2tp/l2tp_eth.c +++ b/net/l2tp/l2tp_eth.c | |||
@@ -162,6 +162,7 @@ static void l2tp_eth_delete(struct l2tp_session *session) | |||
162 | if (dev) { | 162 | if (dev) { |
163 | unregister_netdev(dev); | 163 | unregister_netdev(dev); |
164 | spriv->dev = NULL; | 164 | spriv->dev = NULL; |
165 | module_put(THIS_MODULE); | ||
165 | } | 166 | } |
166 | } | 167 | } |
167 | } | 168 | } |
@@ -249,6 +250,7 @@ static int l2tp_eth_create(struct net *net, u32 tunnel_id, u32 session_id, u32 p | |||
249 | if (rc < 0) | 250 | if (rc < 0) |
250 | goto out_del_dev; | 251 | goto out_del_dev; |
251 | 252 | ||
253 | __module_get(THIS_MODULE); | ||
252 | /* Must be done after register_netdev() */ | 254 | /* Must be done after register_netdev() */ |
253 | strlcpy(session->ifname, dev->name, IFNAMSIZ); | 255 | strlcpy(session->ifname, dev->name, IFNAMSIZ); |
254 | 256 | ||
diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c index 70614e7affab..61d8b75d2686 100644 --- a/net/l2tp/l2tp_ip.c +++ b/net/l2tp/l2tp_ip.c | |||
@@ -464,10 +464,12 @@ static int l2tp_ip_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *m | |||
464 | sk->sk_bound_dev_if); | 464 | sk->sk_bound_dev_if); |
465 | if (IS_ERR(rt)) | 465 | if (IS_ERR(rt)) |
466 | goto no_route; | 466 | goto no_route; |
467 | if (connected) | 467 | if (connected) { |
468 | sk_setup_caps(sk, &rt->dst); | 468 | sk_setup_caps(sk, &rt->dst); |
469 | else | 469 | } else { |
470 | dst_release(&rt->dst); /* safe since we hold rcu_read_lock */ | 470 | skb_dst_set(skb, &rt->dst); |
471 | goto xmit; | ||
472 | } | ||
471 | } | 473 | } |
472 | 474 | ||
473 | /* We dont need to clone dst here, it is guaranteed to not disappear. | 475 | /* We dont need to clone dst here, it is guaranteed to not disappear. |
@@ -475,6 +477,7 @@ static int l2tp_ip_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *m | |||
475 | */ | 477 | */ |
476 | skb_dst_set_noref(skb, &rt->dst); | 478 | skb_dst_set_noref(skb, &rt->dst); |
477 | 479 | ||
480 | xmit: | ||
478 | /* Queue the packet to IP for output */ | 481 | /* Queue the packet to IP for output */ |
479 | rc = ip_queue_xmit(skb, &inet->cork.fl); | 482 | rc = ip_queue_xmit(skb, &inet->cork.fl); |
480 | rcu_read_unlock(); | 483 | rcu_read_unlock(); |
diff --git a/net/mac80211/agg-rx.c b/net/mac80211/agg-rx.c index 26ddb699d693..c649188314cc 100644 --- a/net/mac80211/agg-rx.c +++ b/net/mac80211/agg-rx.c | |||
@@ -145,15 +145,20 @@ static void sta_rx_agg_session_timer_expired(unsigned long data) | |||
145 | struct tid_ampdu_rx *tid_rx; | 145 | struct tid_ampdu_rx *tid_rx; |
146 | unsigned long timeout; | 146 | unsigned long timeout; |
147 | 147 | ||
148 | rcu_read_lock(); | ||
148 | tid_rx = rcu_dereference(sta->ampdu_mlme.tid_rx[*ptid]); | 149 | tid_rx = rcu_dereference(sta->ampdu_mlme.tid_rx[*ptid]); |
149 | if (!tid_rx) | 150 | if (!tid_rx) { |
151 | rcu_read_unlock(); | ||
150 | return; | 152 | return; |
153 | } | ||
151 | 154 | ||
152 | timeout = tid_rx->last_rx + TU_TO_JIFFIES(tid_rx->timeout); | 155 | timeout = tid_rx->last_rx + TU_TO_JIFFIES(tid_rx->timeout); |
153 | if (time_is_after_jiffies(timeout)) { | 156 | if (time_is_after_jiffies(timeout)) { |
154 | mod_timer(&tid_rx->session_timer, timeout); | 157 | mod_timer(&tid_rx->session_timer, timeout); |
158 | rcu_read_unlock(); | ||
155 | return; | 159 | return; |
156 | } | 160 | } |
161 | rcu_read_unlock(); | ||
157 | 162 | ||
158 | #ifdef CONFIG_MAC80211_HT_DEBUG | 163 | #ifdef CONFIG_MAC80211_HT_DEBUG |
159 | printk(KERN_DEBUG "rx session timer expired on tid %d\n", (u16)*ptid); | 164 | printk(KERN_DEBUG "rx session timer expired on tid %d\n", (u16)*ptid); |
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c index 495831ee48f1..e9cecca5c44d 100644 --- a/net/mac80211/cfg.c +++ b/net/mac80211/cfg.c | |||
@@ -533,16 +533,16 @@ static void ieee80211_get_et_stats(struct wiphy *wiphy, | |||
533 | sinfo.filled = 0; | 533 | sinfo.filled = 0; |
534 | sta_set_sinfo(sta, &sinfo); | 534 | sta_set_sinfo(sta, &sinfo); |
535 | 535 | ||
536 | if (sinfo.filled | STATION_INFO_TX_BITRATE) | 536 | if (sinfo.filled & STATION_INFO_TX_BITRATE) |
537 | data[i] = 100000 * | 537 | data[i] = 100000 * |
538 | cfg80211_calculate_bitrate(&sinfo.txrate); | 538 | cfg80211_calculate_bitrate(&sinfo.txrate); |
539 | i++; | 539 | i++; |
540 | if (sinfo.filled | STATION_INFO_RX_BITRATE) | 540 | if (sinfo.filled & STATION_INFO_RX_BITRATE) |
541 | data[i] = 100000 * | 541 | data[i] = 100000 * |
542 | cfg80211_calculate_bitrate(&sinfo.rxrate); | 542 | cfg80211_calculate_bitrate(&sinfo.rxrate); |
543 | i++; | 543 | i++; |
544 | 544 | ||
545 | if (sinfo.filled | STATION_INFO_SIGNAL_AVG) | 545 | if (sinfo.filled & STATION_INFO_SIGNAL_AVG) |
546 | data[i] = (u8)sinfo.signal_avg; | 546 | data[i] = (u8)sinfo.signal_avg; |
547 | i++; | 547 | i++; |
548 | } else { | 548 | } else { |
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c index d4c19a7773db..8664111d0566 100644 --- a/net/mac80211/iface.c +++ b/net/mac80211/iface.c | |||
@@ -637,6 +637,18 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata, | |||
637 | ieee80211_configure_filter(local); | 637 | ieee80211_configure_filter(local); |
638 | break; | 638 | break; |
639 | default: | 639 | default: |
640 | mutex_lock(&local->mtx); | ||
641 | if (local->hw_roc_dev == sdata->dev && | ||
642 | local->hw_roc_channel) { | ||
643 | /* ignore return value since this is racy */ | ||
644 | drv_cancel_remain_on_channel(local); | ||
645 | ieee80211_queue_work(&local->hw, &local->hw_roc_done); | ||
646 | } | ||
647 | mutex_unlock(&local->mtx); | ||
648 | |||
649 | flush_work(&local->hw_roc_start); | ||
650 | flush_work(&local->hw_roc_done); | ||
651 | |||
640 | flush_work(&sdata->work); | 652 | flush_work(&sdata->work); |
641 | /* | 653 | /* |
642 | * When we get here, the interface is marked down. | 654 | * When we get here, the interface is marked down. |
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index 04c306308987..91d84cc77bbf 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c | |||
@@ -1220,6 +1220,22 @@ static void ieee80211_sta_wmm_params(struct ieee80211_local *local, | |||
1220 | sdata->vif.bss_conf.qos = true; | 1220 | sdata->vif.bss_conf.qos = true; |
1221 | } | 1221 | } |
1222 | 1222 | ||
1223 | static void __ieee80211_stop_poll(struct ieee80211_sub_if_data *sdata) | ||
1224 | { | ||
1225 | lockdep_assert_held(&sdata->local->mtx); | ||
1226 | |||
1227 | sdata->u.mgd.flags &= ~(IEEE80211_STA_CONNECTION_POLL | | ||
1228 | IEEE80211_STA_BEACON_POLL); | ||
1229 | ieee80211_run_deferred_scan(sdata->local); | ||
1230 | } | ||
1231 | |||
1232 | static void ieee80211_stop_poll(struct ieee80211_sub_if_data *sdata) | ||
1233 | { | ||
1234 | mutex_lock(&sdata->local->mtx); | ||
1235 | __ieee80211_stop_poll(sdata); | ||
1236 | mutex_unlock(&sdata->local->mtx); | ||
1237 | } | ||
1238 | |||
1223 | static u32 ieee80211_handle_bss_capability(struct ieee80211_sub_if_data *sdata, | 1239 | static u32 ieee80211_handle_bss_capability(struct ieee80211_sub_if_data *sdata, |
1224 | u16 capab, bool erp_valid, u8 erp) | 1240 | u16 capab, bool erp_valid, u8 erp) |
1225 | { | 1241 | { |
@@ -1285,8 +1301,7 @@ static void ieee80211_set_associated(struct ieee80211_sub_if_data *sdata, | |||
1285 | sdata->u.mgd.flags |= IEEE80211_STA_RESET_SIGNAL_AVE; | 1301 | sdata->u.mgd.flags |= IEEE80211_STA_RESET_SIGNAL_AVE; |
1286 | 1302 | ||
1287 | /* just to be sure */ | 1303 | /* just to be sure */ |
1288 | sdata->u.mgd.flags &= ~(IEEE80211_STA_CONNECTION_POLL | | 1304 | ieee80211_stop_poll(sdata); |
1289 | IEEE80211_STA_BEACON_POLL); | ||
1290 | 1305 | ||
1291 | ieee80211_led_assoc(local, 1); | 1306 | ieee80211_led_assoc(local, 1); |
1292 | 1307 | ||
@@ -1456,8 +1471,7 @@ static void ieee80211_reset_ap_probe(struct ieee80211_sub_if_data *sdata) | |||
1456 | return; | 1471 | return; |
1457 | } | 1472 | } |
1458 | 1473 | ||
1459 | ifmgd->flags &= ~(IEEE80211_STA_CONNECTION_POLL | | 1474 | __ieee80211_stop_poll(sdata); |
1460 | IEEE80211_STA_BEACON_POLL); | ||
1461 | 1475 | ||
1462 | mutex_lock(&local->iflist_mtx); | 1476 | mutex_lock(&local->iflist_mtx); |
1463 | ieee80211_recalc_ps(local, -1); | 1477 | ieee80211_recalc_ps(local, -1); |
@@ -1477,7 +1491,6 @@ static void ieee80211_reset_ap_probe(struct ieee80211_sub_if_data *sdata) | |||
1477 | round_jiffies_up(jiffies + | 1491 | round_jiffies_up(jiffies + |
1478 | IEEE80211_CONNECTION_IDLE_TIME)); | 1492 | IEEE80211_CONNECTION_IDLE_TIME)); |
1479 | out: | 1493 | out: |
1480 | ieee80211_run_deferred_scan(local); | ||
1481 | mutex_unlock(&local->mtx); | 1494 | mutex_unlock(&local->mtx); |
1482 | } | 1495 | } |
1483 | 1496 | ||
@@ -2408,7 +2421,11 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata, | |||
2408 | net_dbg_ratelimited("%s: cancelling probereq poll due to a received beacon\n", | 2421 | net_dbg_ratelimited("%s: cancelling probereq poll due to a received beacon\n", |
2409 | sdata->name); | 2422 | sdata->name); |
2410 | #endif | 2423 | #endif |
2424 | mutex_lock(&local->mtx); | ||
2411 | ifmgd->flags &= ~IEEE80211_STA_BEACON_POLL; | 2425 | ifmgd->flags &= ~IEEE80211_STA_BEACON_POLL; |
2426 | ieee80211_run_deferred_scan(local); | ||
2427 | mutex_unlock(&local->mtx); | ||
2428 | |||
2412 | mutex_lock(&local->iflist_mtx); | 2429 | mutex_lock(&local->iflist_mtx); |
2413 | ieee80211_recalc_ps(local, -1); | 2430 | ieee80211_recalc_ps(local, -1); |
2414 | mutex_unlock(&local->iflist_mtx); | 2431 | mutex_unlock(&local->iflist_mtx); |
@@ -2595,8 +2612,7 @@ static void ieee80211_sta_connection_lost(struct ieee80211_sub_if_data *sdata, | |||
2595 | struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; | 2612 | struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; |
2596 | u8 frame_buf[DEAUTH_DISASSOC_LEN]; | 2613 | u8 frame_buf[DEAUTH_DISASSOC_LEN]; |
2597 | 2614 | ||
2598 | ifmgd->flags &= ~(IEEE80211_STA_CONNECTION_POLL | | 2615 | ieee80211_stop_poll(sdata); |
2599 | IEEE80211_STA_BEACON_POLL); | ||
2600 | 2616 | ||
2601 | ieee80211_set_disassoc(sdata, IEEE80211_STYPE_DEAUTH, reason, | 2617 | ieee80211_set_disassoc(sdata, IEEE80211_STYPE_DEAUTH, reason, |
2602 | false, frame_buf); | 2618 | false, frame_buf); |
@@ -2874,8 +2890,7 @@ static void ieee80211_restart_sta_timer(struct ieee80211_sub_if_data *sdata) | |||
2874 | u32 flags; | 2890 | u32 flags; |
2875 | 2891 | ||
2876 | if (sdata->vif.type == NL80211_IFTYPE_STATION) { | 2892 | if (sdata->vif.type == NL80211_IFTYPE_STATION) { |
2877 | sdata->u.mgd.flags &= ~(IEEE80211_STA_BEACON_POLL | | 2893 | __ieee80211_stop_poll(sdata); |
2878 | IEEE80211_STA_CONNECTION_POLL); | ||
2879 | 2894 | ||
2880 | /* let's probe the connection once */ | 2895 | /* let's probe the connection once */ |
2881 | flags = sdata->local->hw.flags; | 2896 | flags = sdata->local->hw.flags; |
@@ -2944,7 +2959,10 @@ void ieee80211_sta_restart(struct ieee80211_sub_if_data *sdata) | |||
2944 | if (test_and_clear_bit(TMR_RUNNING_CHANSW, &ifmgd->timers_running)) | 2959 | if (test_and_clear_bit(TMR_RUNNING_CHANSW, &ifmgd->timers_running)) |
2945 | add_timer(&ifmgd->chswitch_timer); | 2960 | add_timer(&ifmgd->chswitch_timer); |
2946 | ieee80211_sta_reset_beacon_monitor(sdata); | 2961 | ieee80211_sta_reset_beacon_monitor(sdata); |
2962 | |||
2963 | mutex_lock(&sdata->local->mtx); | ||
2947 | ieee80211_restart_sta_timer(sdata); | 2964 | ieee80211_restart_sta_timer(sdata); |
2965 | mutex_unlock(&sdata->local->mtx); | ||
2948 | } | 2966 | } |
2949 | #endif | 2967 | #endif |
2950 | 2968 | ||
@@ -3106,7 +3124,7 @@ static int ieee80211_prep_connection(struct ieee80211_sub_if_data *sdata, | |||
3106 | } | 3124 | } |
3107 | 3125 | ||
3108 | local->oper_channel = cbss->channel; | 3126 | local->oper_channel = cbss->channel; |
3109 | ieee80211_hw_config(local, 0); | 3127 | ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL); |
3110 | 3128 | ||
3111 | if (!have_sta) { | 3129 | if (!have_sta) { |
3112 | u32 rates = 0, basic_rates = 0; | 3130 | u32 rates = 0, basic_rates = 0; |
diff --git a/net/mac80211/offchannel.c b/net/mac80211/offchannel.c index f054e94901a2..935aa4b6deee 100644 --- a/net/mac80211/offchannel.c +++ b/net/mac80211/offchannel.c | |||
@@ -234,6 +234,22 @@ static void ieee80211_hw_roc_done(struct work_struct *work) | |||
234 | return; | 234 | return; |
235 | } | 235 | } |
236 | 236 | ||
237 | /* was never transmitted */ | ||
238 | if (local->hw_roc_skb) { | ||
239 | u64 cookie; | ||
240 | |||
241 | cookie = local->hw_roc_cookie ^ 2; | ||
242 | |||
243 | cfg80211_mgmt_tx_status(local->hw_roc_dev, cookie, | ||
244 | local->hw_roc_skb->data, | ||
245 | local->hw_roc_skb->len, false, | ||
246 | GFP_KERNEL); | ||
247 | |||
248 | kfree_skb(local->hw_roc_skb); | ||
249 | local->hw_roc_skb = NULL; | ||
250 | local->hw_roc_skb_for_status = NULL; | ||
251 | } | ||
252 | |||
237 | if (!local->hw_roc_for_tx) | 253 | if (!local->hw_roc_for_tx) |
238 | cfg80211_remain_on_channel_expired(local->hw_roc_dev, | 254 | cfg80211_remain_on_channel_expired(local->hw_roc_dev, |
239 | local->hw_roc_cookie, | 255 | local->hw_roc_cookie, |
diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c index f5b1638fbf80..de455f8bbb91 100644 --- a/net/mac80211/sta_info.c +++ b/net/mac80211/sta_info.c | |||
@@ -378,7 +378,7 @@ static int sta_info_insert_finish(struct sta_info *sta) __acquires(RCU) | |||
378 | /* make the station visible */ | 378 | /* make the station visible */ |
379 | sta_info_hash_add(local, sta); | 379 | sta_info_hash_add(local, sta); |
380 | 380 | ||
381 | list_add(&sta->list, &local->sta_list); | 381 | list_add_rcu(&sta->list, &local->sta_list); |
382 | 382 | ||
383 | set_sta_flag(sta, WLAN_STA_INSERTED); | 383 | set_sta_flag(sta, WLAN_STA_INSERTED); |
384 | 384 | ||
@@ -688,7 +688,7 @@ int __must_check __sta_info_destroy(struct sta_info *sta) | |||
688 | if (ret) | 688 | if (ret) |
689 | return ret; | 689 | return ret; |
690 | 690 | ||
691 | list_del(&sta->list); | 691 | list_del_rcu(&sta->list); |
692 | 692 | ||
693 | mutex_lock(&local->key_mtx); | 693 | mutex_lock(&local->key_mtx); |
694 | for (i = 0; i < NUM_DEFAULT_KEYS; i++) | 694 | for (i = 0; i < NUM_DEFAULT_KEYS; i++) |
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c index 847215bb2a6f..e453212fa17f 100644 --- a/net/mac80211/tx.c +++ b/net/mac80211/tx.c | |||
@@ -1737,7 +1737,7 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb, | |||
1737 | __le16 fc; | 1737 | __le16 fc; |
1738 | struct ieee80211_hdr hdr; | 1738 | struct ieee80211_hdr hdr; |
1739 | struct ieee80211s_hdr mesh_hdr __maybe_unused; | 1739 | struct ieee80211s_hdr mesh_hdr __maybe_unused; |
1740 | struct mesh_path __maybe_unused *mppath = NULL; | 1740 | struct mesh_path __maybe_unused *mppath = NULL, *mpath = NULL; |
1741 | const u8 *encaps_data; | 1741 | const u8 *encaps_data; |
1742 | int encaps_len, skip_header_bytes; | 1742 | int encaps_len, skip_header_bytes; |
1743 | int nh_pos, h_pos; | 1743 | int nh_pos, h_pos; |
@@ -1803,8 +1803,11 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb, | |||
1803 | goto fail; | 1803 | goto fail; |
1804 | } | 1804 | } |
1805 | rcu_read_lock(); | 1805 | rcu_read_lock(); |
1806 | if (!is_multicast_ether_addr(skb->data)) | 1806 | if (!is_multicast_ether_addr(skb->data)) { |
1807 | mppath = mpp_path_lookup(skb->data, sdata); | 1807 | mpath = mesh_path_lookup(skb->data, sdata); |
1808 | if (!mpath) | ||
1809 | mppath = mpp_path_lookup(skb->data, sdata); | ||
1810 | } | ||
1808 | 1811 | ||
1809 | /* | 1812 | /* |
1810 | * Use address extension if it is a packet from | 1813 | * Use address extension if it is a packet from |
diff --git a/net/mac80211/util.c b/net/mac80211/util.c index a44c6807df01..8dd4712620ff 100644 --- a/net/mac80211/util.c +++ b/net/mac80211/util.c | |||
@@ -1271,7 +1271,7 @@ int ieee80211_reconfig(struct ieee80211_local *local) | |||
1271 | enum ieee80211_sta_state state; | 1271 | enum ieee80211_sta_state state; |
1272 | 1272 | ||
1273 | for (state = IEEE80211_STA_NOTEXIST; | 1273 | for (state = IEEE80211_STA_NOTEXIST; |
1274 | state < sta->sta_state - 1; state++) | 1274 | state < sta->sta_state; state++) |
1275 | WARN_ON(drv_sta_state(local, sta->sdata, sta, | 1275 | WARN_ON(drv_sta_state(local, sta->sdata, sta, |
1276 | state, state + 1)); | 1276 | state, state + 1)); |
1277 | } | 1277 | } |
diff --git a/net/netfilter/nf_conntrack_h323_main.c b/net/netfilter/nf_conntrack_h323_main.c index 46d69d7f1bb4..31f50bc3a312 100644 --- a/net/netfilter/nf_conntrack_h323_main.c +++ b/net/netfilter/nf_conntrack_h323_main.c | |||
@@ -270,9 +270,8 @@ static int expect_rtp_rtcp(struct sk_buff *skb, struct nf_conn *ct, | |||
270 | return 0; | 270 | return 0; |
271 | 271 | ||
272 | /* RTP port is even */ | 272 | /* RTP port is even */ |
273 | port &= htons(~1); | 273 | rtp_port = port & ~htons(1); |
274 | rtp_port = port; | 274 | rtcp_port = port | htons(1); |
275 | rtcp_port = htons(ntohs(port) + 1); | ||
276 | 275 | ||
277 | /* Create expect for RTP */ | 276 | /* Create expect for RTP */ |
278 | if ((rtp_exp = nf_ct_expect_alloc(ct)) == NULL) | 277 | if ((rtp_exp = nf_ct_expect_alloc(ct)) == NULL) |
diff --git a/net/netfilter/xt_HMARK.c b/net/netfilter/xt_HMARK.c index 0a96a43108ed..1686ca1b53a1 100644 --- a/net/netfilter/xt_HMARK.c +++ b/net/netfilter/xt_HMARK.c | |||
@@ -32,13 +32,13 @@ MODULE_ALIAS("ipt_HMARK"); | |||
32 | MODULE_ALIAS("ip6t_HMARK"); | 32 | MODULE_ALIAS("ip6t_HMARK"); |
33 | 33 | ||
34 | struct hmark_tuple { | 34 | struct hmark_tuple { |
35 | u32 src; | 35 | __be32 src; |
36 | u32 dst; | 36 | __be32 dst; |
37 | union hmark_ports uports; | 37 | union hmark_ports uports; |
38 | uint8_t proto; | 38 | u8 proto; |
39 | }; | 39 | }; |
40 | 40 | ||
41 | static inline u32 hmark_addr6_mask(const __u32 *addr32, const __u32 *mask) | 41 | static inline __be32 hmark_addr6_mask(const __be32 *addr32, const __be32 *mask) |
42 | { | 42 | { |
43 | return (addr32[0] & mask[0]) ^ | 43 | return (addr32[0] & mask[0]) ^ |
44 | (addr32[1] & mask[1]) ^ | 44 | (addr32[1] & mask[1]) ^ |
@@ -46,8 +46,8 @@ static inline u32 hmark_addr6_mask(const __u32 *addr32, const __u32 *mask) | |||
46 | (addr32[3] & mask[3]); | 46 | (addr32[3] & mask[3]); |
47 | } | 47 | } |
48 | 48 | ||
49 | static inline u32 | 49 | static inline __be32 |
50 | hmark_addr_mask(int l3num, const __u32 *addr32, const __u32 *mask) | 50 | hmark_addr_mask(int l3num, const __be32 *addr32, const __be32 *mask) |
51 | { | 51 | { |
52 | switch (l3num) { | 52 | switch (l3num) { |
53 | case AF_INET: | 53 | case AF_INET: |
@@ -58,6 +58,22 @@ hmark_addr_mask(int l3num, const __u32 *addr32, const __u32 *mask) | |||
58 | return 0; | 58 | return 0; |
59 | } | 59 | } |
60 | 60 | ||
61 | static inline void hmark_swap_ports(union hmark_ports *uports, | ||
62 | const struct xt_hmark_info *info) | ||
63 | { | ||
64 | union hmark_ports hp; | ||
65 | u16 src, dst; | ||
66 | |||
67 | hp.b32 = (uports->b32 & info->port_mask.b32) | info->port_set.b32; | ||
68 | src = ntohs(hp.b16.src); | ||
69 | dst = ntohs(hp.b16.dst); | ||
70 | |||
71 | if (dst > src) | ||
72 | uports->v32 = (dst << 16) | src; | ||
73 | else | ||
74 | uports->v32 = (src << 16) | dst; | ||
75 | } | ||
76 | |||
61 | static int | 77 | static int |
62 | hmark_ct_set_htuple(const struct sk_buff *skb, struct hmark_tuple *t, | 78 | hmark_ct_set_htuple(const struct sk_buff *skb, struct hmark_tuple *t, |
63 | const struct xt_hmark_info *info) | 79 | const struct xt_hmark_info *info) |
@@ -74,22 +90,19 @@ hmark_ct_set_htuple(const struct sk_buff *skb, struct hmark_tuple *t, | |||
74 | otuple = &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple; | 90 | otuple = &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple; |
75 | rtuple = &ct->tuplehash[IP_CT_DIR_REPLY].tuple; | 91 | rtuple = &ct->tuplehash[IP_CT_DIR_REPLY].tuple; |
76 | 92 | ||
77 | t->src = hmark_addr_mask(otuple->src.l3num, otuple->src.u3.all, | 93 | t->src = hmark_addr_mask(otuple->src.l3num, otuple->src.u3.ip6, |
78 | info->src_mask.all); | 94 | info->src_mask.ip6); |
79 | t->dst = hmark_addr_mask(otuple->src.l3num, rtuple->src.u3.all, | 95 | t->dst = hmark_addr_mask(otuple->src.l3num, rtuple->src.u3.ip6, |
80 | info->dst_mask.all); | 96 | info->dst_mask.ip6); |
81 | 97 | ||
82 | if (info->flags & XT_HMARK_FLAG(XT_HMARK_METHOD_L3)) | 98 | if (info->flags & XT_HMARK_FLAG(XT_HMARK_METHOD_L3)) |
83 | return 0; | 99 | return 0; |
84 | 100 | ||
85 | t->proto = nf_ct_protonum(ct); | 101 | t->proto = nf_ct_protonum(ct); |
86 | if (t->proto != IPPROTO_ICMP) { | 102 | if (t->proto != IPPROTO_ICMP) { |
87 | t->uports.p16.src = otuple->src.u.all; | 103 | t->uports.b16.src = otuple->src.u.all; |
88 | t->uports.p16.dst = rtuple->src.u.all; | 104 | t->uports.b16.dst = rtuple->src.u.all; |
89 | t->uports.v32 = (t->uports.v32 & info->port_mask.v32) | | 105 | hmark_swap_ports(&t->uports, info); |
90 | info->port_set.v32; | ||
91 | if (t->uports.p16.dst < t->uports.p16.src) | ||
92 | swap(t->uports.p16.dst, t->uports.p16.src); | ||
93 | } | 106 | } |
94 | 107 | ||
95 | return 0; | 108 | return 0; |
@@ -98,15 +111,19 @@ hmark_ct_set_htuple(const struct sk_buff *skb, struct hmark_tuple *t, | |||
98 | #endif | 111 | #endif |
99 | } | 112 | } |
100 | 113 | ||
114 | /* This hash function is endian independent, to ensure consistent hashing if | ||
115 | * the cluster is composed of big and little endian systems. */ | ||
101 | static inline u32 | 116 | static inline u32 |
102 | hmark_hash(struct hmark_tuple *t, const struct xt_hmark_info *info) | 117 | hmark_hash(struct hmark_tuple *t, const struct xt_hmark_info *info) |
103 | { | 118 | { |
104 | u32 hash; | 119 | u32 hash; |
120 | u32 src = ntohl(t->src); | ||
121 | u32 dst = ntohl(t->dst); | ||
105 | 122 | ||
106 | if (t->dst < t->src) | 123 | if (dst < src) |
107 | swap(t->src, t->dst); | 124 | swap(src, dst); |
108 | 125 | ||
109 | hash = jhash_3words(t->src, t->dst, t->uports.v32, info->hashrnd); | 126 | hash = jhash_3words(src, dst, t->uports.v32, info->hashrnd); |
110 | hash = hash ^ (t->proto & info->proto_mask); | 127 | hash = hash ^ (t->proto & info->proto_mask); |
111 | 128 | ||
112 | return (((u64)hash * info->hmodulus) >> 32) + info->hoffset; | 129 | return (((u64)hash * info->hmodulus) >> 32) + info->hoffset; |
@@ -126,11 +143,7 @@ hmark_set_tuple_ports(const struct sk_buff *skb, unsigned int nhoff, | |||
126 | if (skb_copy_bits(skb, nhoff, &t->uports, sizeof(t->uports)) < 0) | 143 | if (skb_copy_bits(skb, nhoff, &t->uports, sizeof(t->uports)) < 0) |
127 | return; | 144 | return; |
128 | 145 | ||
129 | t->uports.v32 = (t->uports.v32 & info->port_mask.v32) | | 146 | hmark_swap_ports(&t->uports, info); |
130 | info->port_set.v32; | ||
131 | |||
132 | if (t->uports.p16.dst < t->uports.p16.src) | ||
133 | swap(t->uports.p16.dst, t->uports.p16.src); | ||
134 | } | 147 | } |
135 | 148 | ||
136 | #if IS_ENABLED(CONFIG_IP6_NF_IPTABLES) | 149 | #if IS_ENABLED(CONFIG_IP6_NF_IPTABLES) |
@@ -178,8 +191,8 @@ hmark_pkt_set_htuple_ipv6(const struct sk_buff *skb, struct hmark_tuple *t, | |||
178 | return -1; | 191 | return -1; |
179 | } | 192 | } |
180 | noicmp: | 193 | noicmp: |
181 | t->src = hmark_addr6_mask(ip6->saddr.s6_addr32, info->src_mask.all); | 194 | t->src = hmark_addr6_mask(ip6->saddr.s6_addr32, info->src_mask.ip6); |
182 | t->dst = hmark_addr6_mask(ip6->daddr.s6_addr32, info->dst_mask.all); | 195 | t->dst = hmark_addr6_mask(ip6->daddr.s6_addr32, info->dst_mask.ip6); |
183 | 196 | ||
184 | if (info->flags & XT_HMARK_FLAG(XT_HMARK_METHOD_L3)) | 197 | if (info->flags & XT_HMARK_FLAG(XT_HMARK_METHOD_L3)) |
185 | return 0; | 198 | return 0; |
@@ -255,11 +268,8 @@ hmark_pkt_set_htuple_ipv4(const struct sk_buff *skb, struct hmark_tuple *t, | |||
255 | } | 268 | } |
256 | } | 269 | } |
257 | 270 | ||
258 | t->src = (__force u32) ip->saddr; | 271 | t->src = ip->saddr & info->src_mask.ip; |
259 | t->dst = (__force u32) ip->daddr; | 272 | t->dst = ip->daddr & info->dst_mask.ip; |
260 | |||
261 | t->src &= info->src_mask.ip; | ||
262 | t->dst &= info->dst_mask.ip; | ||
263 | 273 | ||
264 | if (info->flags & XT_HMARK_FLAG(XT_HMARK_METHOD_L3)) | 274 | if (info->flags & XT_HMARK_FLAG(XT_HMARK_METHOD_L3)) |
265 | return 0; | 275 | return 0; |
diff --git a/net/nfc/llcp/sock.c b/net/nfc/llcp/sock.c index 3f339b19d140..17a707db40eb 100644 --- a/net/nfc/llcp/sock.c +++ b/net/nfc/llcp/sock.c | |||
@@ -292,6 +292,9 @@ static int llcp_sock_getname(struct socket *sock, struct sockaddr *addr, | |||
292 | 292 | ||
293 | pr_debug("%p\n", sk); | 293 | pr_debug("%p\n", sk); |
294 | 294 | ||
295 | if (llcp_sock == NULL) | ||
296 | return -EBADFD; | ||
297 | |||
295 | addr->sa_family = AF_NFC; | 298 | addr->sa_family = AF_NFC; |
296 | *len = sizeof(struct sockaddr_nfc_llcp); | 299 | *len = sizeof(struct sockaddr_nfc_llcp); |
297 | 300 | ||
diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c index 04040476082e..21fde99e5c56 100644 --- a/net/sunrpc/rpc_pipe.c +++ b/net/sunrpc/rpc_pipe.c | |||
@@ -71,7 +71,9 @@ static void rpc_purge_list(wait_queue_head_t *waitq, struct list_head *head, | |||
71 | msg->errno = err; | 71 | msg->errno = err; |
72 | destroy_msg(msg); | 72 | destroy_msg(msg); |
73 | } while (!list_empty(head)); | 73 | } while (!list_empty(head)); |
74 | wake_up(waitq); | 74 | |
75 | if (waitq) | ||
76 | wake_up(waitq); | ||
75 | } | 77 | } |
76 | 78 | ||
77 | static void | 79 | static void |
@@ -91,11 +93,9 @@ rpc_timeout_upcall_queue(struct work_struct *work) | |||
91 | } | 93 | } |
92 | dentry = dget(pipe->dentry); | 94 | dentry = dget(pipe->dentry); |
93 | spin_unlock(&pipe->lock); | 95 | spin_unlock(&pipe->lock); |
94 | if (dentry) { | 96 | rpc_purge_list(dentry ? &RPC_I(dentry->d_inode)->waitq : NULL, |
95 | rpc_purge_list(&RPC_I(dentry->d_inode)->waitq, | 97 | &free_list, destroy_msg, -ETIMEDOUT); |
96 | &free_list, destroy_msg, -ETIMEDOUT); | 98 | dput(dentry); |
97 | dput(dentry); | ||
98 | } | ||
99 | } | 99 | } |
100 | 100 | ||
101 | ssize_t rpc_pipe_generic_upcall(struct file *filp, struct rpc_pipe_msg *msg, | 101 | ssize_t rpc_pipe_generic_upcall(struct file *filp, struct rpc_pipe_msg *msg, |
diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c index 7e9baaa1e543..3ee7461926d8 100644 --- a/net/sunrpc/svc.c +++ b/net/sunrpc/svc.c | |||
@@ -1374,7 +1374,8 @@ bc_svc_process(struct svc_serv *serv, struct rpc_rqst *req, | |||
1374 | sizeof(req->rq_snd_buf)); | 1374 | sizeof(req->rq_snd_buf)); |
1375 | return bc_send(req); | 1375 | return bc_send(req); |
1376 | } else { | 1376 | } else { |
1377 | /* Nothing to do to drop request */ | 1377 | /* drop request */ |
1378 | xprt_free_bc_request(req); | ||
1378 | return 0; | 1379 | return 0; |
1379 | } | 1380 | } |
1380 | } | 1381 | } |
diff --git a/net/wireless/ibss.c b/net/wireless/ibss.c index d2a19b0ff71f..89baa3328411 100644 --- a/net/wireless/ibss.c +++ b/net/wireless/ibss.c | |||
@@ -42,6 +42,7 @@ void __cfg80211_ibss_joined(struct net_device *dev, const u8 *bssid) | |||
42 | cfg80211_hold_bss(bss_from_pub(bss)); | 42 | cfg80211_hold_bss(bss_from_pub(bss)); |
43 | wdev->current_bss = bss_from_pub(bss); | 43 | wdev->current_bss = bss_from_pub(bss); |
44 | 44 | ||
45 | wdev->sme_state = CFG80211_SME_CONNECTED; | ||
45 | cfg80211_upload_connect_keys(wdev); | 46 | cfg80211_upload_connect_keys(wdev); |
46 | 47 | ||
47 | nl80211_send_ibss_bssid(wiphy_to_dev(wdev->wiphy), dev, bssid, | 48 | nl80211_send_ibss_bssid(wiphy_to_dev(wdev->wiphy), dev, bssid, |
@@ -60,7 +61,7 @@ void cfg80211_ibss_joined(struct net_device *dev, const u8 *bssid, gfp_t gfp) | |||
60 | struct cfg80211_event *ev; | 61 | struct cfg80211_event *ev; |
61 | unsigned long flags; | 62 | unsigned long flags; |
62 | 63 | ||
63 | CFG80211_DEV_WARN_ON(!wdev->ssid_len); | 64 | CFG80211_DEV_WARN_ON(wdev->sme_state != CFG80211_SME_CONNECTING); |
64 | 65 | ||
65 | ev = kzalloc(sizeof(*ev), gfp); | 66 | ev = kzalloc(sizeof(*ev), gfp); |
66 | if (!ev) | 67 | if (!ev) |
@@ -115,9 +116,11 @@ int __cfg80211_join_ibss(struct cfg80211_registered_device *rdev, | |||
115 | #ifdef CONFIG_CFG80211_WEXT | 116 | #ifdef CONFIG_CFG80211_WEXT |
116 | wdev->wext.ibss.channel = params->channel; | 117 | wdev->wext.ibss.channel = params->channel; |
117 | #endif | 118 | #endif |
119 | wdev->sme_state = CFG80211_SME_CONNECTING; | ||
118 | err = rdev->ops->join_ibss(&rdev->wiphy, dev, params); | 120 | err = rdev->ops->join_ibss(&rdev->wiphy, dev, params); |
119 | if (err) { | 121 | if (err) { |
120 | wdev->connect_keys = NULL; | 122 | wdev->connect_keys = NULL; |
123 | wdev->sme_state = CFG80211_SME_IDLE; | ||
121 | return err; | 124 | return err; |
122 | } | 125 | } |
123 | 126 | ||
@@ -169,6 +172,7 @@ static void __cfg80211_clear_ibss(struct net_device *dev, bool nowext) | |||
169 | } | 172 | } |
170 | 173 | ||
171 | wdev->current_bss = NULL; | 174 | wdev->current_bss = NULL; |
175 | wdev->sme_state = CFG80211_SME_IDLE; | ||
172 | wdev->ssid_len = 0; | 176 | wdev->ssid_len = 0; |
173 | #ifdef CONFIG_CFG80211_WEXT | 177 | #ifdef CONFIG_CFG80211_WEXT |
174 | if (!nowext) | 178 | if (!nowext) |
diff --git a/net/wireless/util.c b/net/wireless/util.c index 55d99466babb..8f2d68fc3a44 100644 --- a/net/wireless/util.c +++ b/net/wireless/util.c | |||
@@ -935,6 +935,7 @@ int cfg80211_can_change_interface(struct cfg80211_registered_device *rdev, | |||
935 | enum nl80211_iftype iftype) | 935 | enum nl80211_iftype iftype) |
936 | { | 936 | { |
937 | struct wireless_dev *wdev_iter; | 937 | struct wireless_dev *wdev_iter; |
938 | u32 used_iftypes = BIT(iftype); | ||
938 | int num[NUM_NL80211_IFTYPES]; | 939 | int num[NUM_NL80211_IFTYPES]; |
939 | int total = 1; | 940 | int total = 1; |
940 | int i, j; | 941 | int i, j; |
@@ -961,6 +962,7 @@ int cfg80211_can_change_interface(struct cfg80211_registered_device *rdev, | |||
961 | 962 | ||
962 | num[wdev_iter->iftype]++; | 963 | num[wdev_iter->iftype]++; |
963 | total++; | 964 | total++; |
965 | used_iftypes |= BIT(wdev_iter->iftype); | ||
964 | } | 966 | } |
965 | mutex_unlock(&rdev->devlist_mtx); | 967 | mutex_unlock(&rdev->devlist_mtx); |
966 | 968 | ||
@@ -970,6 +972,7 @@ int cfg80211_can_change_interface(struct cfg80211_registered_device *rdev, | |||
970 | for (i = 0; i < rdev->wiphy.n_iface_combinations; i++) { | 972 | for (i = 0; i < rdev->wiphy.n_iface_combinations; i++) { |
971 | const struct ieee80211_iface_combination *c; | 973 | const struct ieee80211_iface_combination *c; |
972 | struct ieee80211_iface_limit *limits; | 974 | struct ieee80211_iface_limit *limits; |
975 | u32 all_iftypes = 0; | ||
973 | 976 | ||
974 | c = &rdev->wiphy.iface_combinations[i]; | 977 | c = &rdev->wiphy.iface_combinations[i]; |
975 | 978 | ||
@@ -984,6 +987,7 @@ int cfg80211_can_change_interface(struct cfg80211_registered_device *rdev, | |||
984 | if (rdev->wiphy.software_iftypes & BIT(iftype)) | 987 | if (rdev->wiphy.software_iftypes & BIT(iftype)) |
985 | continue; | 988 | continue; |
986 | for (j = 0; j < c->n_limits; j++) { | 989 | for (j = 0; j < c->n_limits; j++) { |
990 | all_iftypes |= limits[j].types; | ||
987 | if (!(limits[j].types & BIT(iftype))) | 991 | if (!(limits[j].types & BIT(iftype))) |
988 | continue; | 992 | continue; |
989 | if (limits[j].max < num[iftype]) | 993 | if (limits[j].max < num[iftype]) |
@@ -991,7 +995,20 @@ int cfg80211_can_change_interface(struct cfg80211_registered_device *rdev, | |||
991 | limits[j].max -= num[iftype]; | 995 | limits[j].max -= num[iftype]; |
992 | } | 996 | } |
993 | } | 997 | } |
994 | /* yay, it fits */ | 998 | |
999 | /* | ||
1000 | * Finally check that all iftypes that we're currently | ||
1001 | * using are actually part of this combination. If they | ||
1002 | * aren't then we can't use this combination and have | ||
1003 | * to continue to the next. | ||
1004 | */ | ||
1005 | if ((all_iftypes & used_iftypes) != used_iftypes) | ||
1006 | goto cont; | ||
1007 | |||
1008 | /* | ||
1009 | * This combination covered all interface types and | ||
1010 | * supported the requested numbers, so we're good. | ||
1011 | */ | ||
995 | kfree(limits); | 1012 | kfree(limits); |
996 | return 0; | 1013 | return 0; |
997 | cont: | 1014 | cont: |
diff --git a/scripts/get_maintainer.pl b/scripts/get_maintainer.pl index 0948c6b5a321..8b673dd4627f 100755 --- a/scripts/get_maintainer.pl +++ b/scripts/get_maintainer.pl | |||
@@ -83,6 +83,8 @@ push(@signature_tags, "Signed-off-by:"); | |||
83 | push(@signature_tags, "Reviewed-by:"); | 83 | push(@signature_tags, "Reviewed-by:"); |
84 | push(@signature_tags, "Acked-by:"); | 84 | push(@signature_tags, "Acked-by:"); |
85 | 85 | ||
86 | my $signature_pattern = "\(" . join("|", @signature_tags) . "\)"; | ||
87 | |||
86 | # rfc822 email address - preloaded methods go here. | 88 | # rfc822 email address - preloaded methods go here. |
87 | my $rfc822_lwsp = "(?:(?:\\r\\n)?[ \\t])"; | 89 | my $rfc822_lwsp = "(?:(?:\\r\\n)?[ \\t])"; |
88 | my $rfc822_char = '[\\000-\\377]'; | 90 | my $rfc822_char = '[\\000-\\377]'; |
@@ -473,7 +475,6 @@ my @subsystem = (); | |||
473 | my @status = (); | 475 | my @status = (); |
474 | my %deduplicate_name_hash = (); | 476 | my %deduplicate_name_hash = (); |
475 | my %deduplicate_address_hash = (); | 477 | my %deduplicate_address_hash = (); |
476 | my $signature_pattern; | ||
477 | 478 | ||
478 | my @maintainers = get_maintainers(); | 479 | my @maintainers = get_maintainers(); |
479 | 480 | ||
diff --git a/sound/core/compress_offload.c b/sound/core/compress_offload.c index a68aed7fce02..ec2118d0e27a 100644 --- a/sound/core/compress_offload.c +++ b/sound/core/compress_offload.c | |||
@@ -502,10 +502,8 @@ static int snd_compr_pause(struct snd_compr_stream *stream) | |||
502 | if (stream->runtime->state != SNDRV_PCM_STATE_RUNNING) | 502 | if (stream->runtime->state != SNDRV_PCM_STATE_RUNNING) |
503 | return -EPERM; | 503 | return -EPERM; |
504 | retval = stream->ops->trigger(stream, SNDRV_PCM_TRIGGER_PAUSE_PUSH); | 504 | retval = stream->ops->trigger(stream, SNDRV_PCM_TRIGGER_PAUSE_PUSH); |
505 | if (!retval) { | 505 | if (!retval) |
506 | stream->runtime->state = SNDRV_PCM_STATE_PAUSED; | 506 | stream->runtime->state = SNDRV_PCM_STATE_PAUSED; |
507 | wake_up(&stream->runtime->sleep); | ||
508 | } | ||
509 | return retval; | 507 | return retval; |
510 | } | 508 | } |
511 | 509 | ||
@@ -544,6 +542,10 @@ static int snd_compr_stop(struct snd_compr_stream *stream) | |||
544 | if (!retval) { | 542 | if (!retval) { |
545 | stream->runtime->state = SNDRV_PCM_STATE_SETUP; | 543 | stream->runtime->state = SNDRV_PCM_STATE_SETUP; |
546 | wake_up(&stream->runtime->sleep); | 544 | wake_up(&stream->runtime->sleep); |
545 | stream->runtime->hw_pointer = 0; | ||
546 | stream->runtime->app_pointer = 0; | ||
547 | stream->runtime->total_bytes_available = 0; | ||
548 | stream->runtime->total_bytes_transferred = 0; | ||
547 | } | 549 | } |
548 | return retval; | 550 | return retval; |
549 | } | 551 | } |
diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c index 41ca803a1fff..7504e62188d6 100644 --- a/sound/pci/hda/hda_codec.c +++ b/sound/pci/hda/hda_codec.c | |||
@@ -4393,20 +4393,19 @@ void snd_hda_update_power_acct(struct hda_codec *codec) | |||
4393 | codec->power_jiffies += delta; | 4393 | codec->power_jiffies += delta; |
4394 | } | 4394 | } |
4395 | 4395 | ||
4396 | /** | 4396 | /* Transition to powered up, if wait_power_down then wait for a pending |
4397 | * snd_hda_power_up - Power-up the codec | 4397 | * transition to D3 to complete. A pending D3 transition is indicated |
4398 | * @codec: HD-audio codec | 4398 | * with power_transition == -1. */ |
4399 | * | 4399 | static void __snd_hda_power_up(struct hda_codec *codec, bool wait_power_down) |
4400 | * Increment the power-up counter and power up the hardware really when | ||
4401 | * not turned on yet. | ||
4402 | */ | ||
4403 | void snd_hda_power_up(struct hda_codec *codec) | ||
4404 | { | 4400 | { |
4405 | struct hda_bus *bus = codec->bus; | 4401 | struct hda_bus *bus = codec->bus; |
4406 | 4402 | ||
4407 | spin_lock(&codec->power_lock); | 4403 | spin_lock(&codec->power_lock); |
4408 | codec->power_count++; | 4404 | codec->power_count++; |
4409 | if (codec->power_on || codec->power_transition > 0) { | 4405 | /* Return if power_on or transitioning to power_on, unless currently |
4406 | * powering down. */ | ||
4407 | if ((codec->power_on || codec->power_transition > 0) && | ||
4408 | !(wait_power_down && codec->power_transition < 0)) { | ||
4410 | spin_unlock(&codec->power_lock); | 4409 | spin_unlock(&codec->power_lock); |
4411 | return; | 4410 | return; |
4412 | } | 4411 | } |
@@ -4430,8 +4429,37 @@ void snd_hda_power_up(struct hda_codec *codec) | |||
4430 | codec->power_transition = 0; | 4429 | codec->power_transition = 0; |
4431 | spin_unlock(&codec->power_lock); | 4430 | spin_unlock(&codec->power_lock); |
4432 | } | 4431 | } |
4432 | |||
4433 | /** | ||
4434 | * snd_hda_power_up - Power-up the codec | ||
4435 | * @codec: HD-audio codec | ||
4436 | * | ||
4437 | * Increment the power-up counter and power up the hardware really when | ||
4438 | * not turned on yet. | ||
4439 | */ | ||
4440 | void snd_hda_power_up(struct hda_codec *codec) | ||
4441 | { | ||
4442 | __snd_hda_power_up(codec, false); | ||
4443 | } | ||
4433 | EXPORT_SYMBOL_HDA(snd_hda_power_up); | 4444 | EXPORT_SYMBOL_HDA(snd_hda_power_up); |
4434 | 4445 | ||
4446 | /** | ||
4447 | * snd_hda_power_up_d3wait - Power-up the codec after waiting for any pending | ||
4448 | * D3 transition to complete. This differs from snd_hda_power_up() when | ||
4449 | * power_transition == -1. snd_hda_power_up sees this case as a nop, | ||
4450 | * snd_hda_power_up_d3wait waits for the D3 transition to complete then powers | ||
4451 | * back up. | ||
4452 | * @codec: HD-audio codec | ||
4453 | * | ||
4454 | * Cancel any power down operation hapenning on the work queue, then power up. | ||
4455 | */ | ||
4456 | void snd_hda_power_up_d3wait(struct hda_codec *codec) | ||
4457 | { | ||
4458 | /* This will cancel and wait for pending power_work to complete. */ | ||
4459 | __snd_hda_power_up(codec, true); | ||
4460 | } | ||
4461 | EXPORT_SYMBOL_HDA(snd_hda_power_up_d3wait); | ||
4462 | |||
4435 | #define power_save(codec) \ | 4463 | #define power_save(codec) \ |
4436 | ((codec)->bus->power_save ? *(codec)->bus->power_save : 0) | 4464 | ((codec)->bus->power_save ? *(codec)->bus->power_save : 0) |
4437 | 4465 | ||
diff --git a/sound/pci/hda/hda_codec.h b/sound/pci/hda/hda_codec.h index 4fc3960c8591..2fdaadbb4326 100644 --- a/sound/pci/hda/hda_codec.h +++ b/sound/pci/hda/hda_codec.h | |||
@@ -1056,10 +1056,12 @@ const char *snd_hda_get_jack_location(u32 cfg); | |||
1056 | */ | 1056 | */ |
1057 | #ifdef CONFIG_SND_HDA_POWER_SAVE | 1057 | #ifdef CONFIG_SND_HDA_POWER_SAVE |
1058 | void snd_hda_power_up(struct hda_codec *codec); | 1058 | void snd_hda_power_up(struct hda_codec *codec); |
1059 | void snd_hda_power_up_d3wait(struct hda_codec *codec); | ||
1059 | void snd_hda_power_down(struct hda_codec *codec); | 1060 | void snd_hda_power_down(struct hda_codec *codec); |
1060 | void snd_hda_update_power_acct(struct hda_codec *codec); | 1061 | void snd_hda_update_power_acct(struct hda_codec *codec); |
1061 | #else | 1062 | #else |
1062 | static inline void snd_hda_power_up(struct hda_codec *codec) {} | 1063 | static inline void snd_hda_power_up(struct hda_codec *codec) {} |
1064 | static inline void snd_hda_power_up_d3wait(struct hda_codec *codec) {} | ||
1063 | static inline void snd_hda_power_down(struct hda_codec *codec) {} | 1065 | static inline void snd_hda_power_down(struct hda_codec *codec) {} |
1064 | #endif | 1066 | #endif |
1065 | 1067 | ||
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c index 2b6392be451c..7757536b9d5f 100644 --- a/sound/pci/hda/hda_intel.c +++ b/sound/pci/hda/hda_intel.c | |||
@@ -1766,7 +1766,7 @@ static int azx_pcm_open(struct snd_pcm_substream *substream) | |||
1766 | buff_step); | 1766 | buff_step); |
1767 | snd_pcm_hw_constraint_step(runtime, 0, SNDRV_PCM_HW_PARAM_PERIOD_BYTES, | 1767 | snd_pcm_hw_constraint_step(runtime, 0, SNDRV_PCM_HW_PARAM_PERIOD_BYTES, |
1768 | buff_step); | 1768 | buff_step); |
1769 | snd_hda_power_up(apcm->codec); | 1769 | snd_hda_power_up_d3wait(apcm->codec); |
1770 | err = hinfo->ops.open(hinfo, apcm->codec, substream); | 1770 | err = hinfo->ops.open(hinfo, apcm->codec, substream); |
1771 | if (err < 0) { | 1771 | if (err < 0) { |
1772 | azx_release_device(azx_dev); | 1772 | azx_release_device(azx_dev); |
@@ -2484,9 +2484,9 @@ static void azx_notifier_unregister(struct azx *chip) | |||
2484 | static int DELAYED_INIT_MARK azx_first_init(struct azx *chip); | 2484 | static int DELAYED_INIT_MARK azx_first_init(struct azx *chip); |
2485 | static int DELAYED_INIT_MARK azx_probe_continue(struct azx *chip); | 2485 | static int DELAYED_INIT_MARK azx_probe_continue(struct azx *chip); |
2486 | 2486 | ||
2487 | #ifdef SUPPORT_VGA_SWITCHEROO | ||
2487 | static struct pci_dev __devinit *get_bound_vga(struct pci_dev *pci); | 2488 | static struct pci_dev __devinit *get_bound_vga(struct pci_dev *pci); |
2488 | 2489 | ||
2489 | #ifdef SUPPORT_VGA_SWITCHEROO | ||
2490 | static void azx_vs_set_state(struct pci_dev *pci, | 2490 | static void azx_vs_set_state(struct pci_dev *pci, |
2491 | enum vga_switcheroo_state state) | 2491 | enum vga_switcheroo_state state) |
2492 | { | 2492 | { |
@@ -2578,6 +2578,7 @@ static int __devinit register_vga_switcheroo(struct azx *chip) | |||
2578 | #else | 2578 | #else |
2579 | #define init_vga_switcheroo(chip) /* NOP */ | 2579 | #define init_vga_switcheroo(chip) /* NOP */ |
2580 | #define register_vga_switcheroo(chip) 0 | 2580 | #define register_vga_switcheroo(chip) 0 |
2581 | #define check_hdmi_disabled(pci) false | ||
2581 | #endif /* SUPPORT_VGA_SWITCHER */ | 2582 | #endif /* SUPPORT_VGA_SWITCHER */ |
2582 | 2583 | ||
2583 | /* | 2584 | /* |
@@ -2638,6 +2639,7 @@ static int azx_dev_free(struct snd_device *device) | |||
2638 | return azx_free(device->device_data); | 2639 | return azx_free(device->device_data); |
2639 | } | 2640 | } |
2640 | 2641 | ||
2642 | #ifdef SUPPORT_VGA_SWITCHEROO | ||
2641 | /* | 2643 | /* |
2642 | * Check of disabled HDMI controller by vga-switcheroo | 2644 | * Check of disabled HDMI controller by vga-switcheroo |
2643 | */ | 2645 | */ |
@@ -2670,12 +2672,13 @@ static bool __devinit check_hdmi_disabled(struct pci_dev *pci) | |||
2670 | struct pci_dev *p = get_bound_vga(pci); | 2672 | struct pci_dev *p = get_bound_vga(pci); |
2671 | 2673 | ||
2672 | if (p) { | 2674 | if (p) { |
2673 | if (vga_default_device() && p != vga_default_device()) | 2675 | if (vga_switcheroo_get_client_state(p) == VGA_SWITCHEROO_OFF) |
2674 | vga_inactive = true; | 2676 | vga_inactive = true; |
2675 | pci_dev_put(p); | 2677 | pci_dev_put(p); |
2676 | } | 2678 | } |
2677 | return vga_inactive; | 2679 | return vga_inactive; |
2678 | } | 2680 | } |
2681 | #endif /* SUPPORT_VGA_SWITCHEROO */ | ||
2679 | 2682 | ||
2680 | /* | 2683 | /* |
2681 | * white/black-listing for position_fix | 2684 | * white/black-listing for position_fix |
@@ -3351,6 +3354,11 @@ static DEFINE_PCI_DEVICE_TABLE(azx_ids) = { | |||
3351 | { PCI_DEVICE(0x6549, 0x1200), | 3354 | { PCI_DEVICE(0x6549, 0x1200), |
3352 | .driver_data = AZX_DRIVER_TERA | AZX_DCAPS_NO_64BIT }, | 3355 | .driver_data = AZX_DRIVER_TERA | AZX_DCAPS_NO_64BIT }, |
3353 | /* Creative X-Fi (CA0110-IBG) */ | 3356 | /* Creative X-Fi (CA0110-IBG) */ |
3357 | /* CTHDA chips */ | ||
3358 | { PCI_DEVICE(0x1102, 0x0010), | ||
3359 | .driver_data = AZX_DRIVER_CTHDA | AZX_DCAPS_PRESET_CTHDA }, | ||
3360 | { PCI_DEVICE(0x1102, 0x0012), | ||
3361 | .driver_data = AZX_DRIVER_CTHDA | AZX_DCAPS_PRESET_CTHDA }, | ||
3354 | #if !defined(CONFIG_SND_CTXFI) && !defined(CONFIG_SND_CTXFI_MODULE) | 3362 | #if !defined(CONFIG_SND_CTXFI) && !defined(CONFIG_SND_CTXFI_MODULE) |
3355 | /* the following entry conflicts with snd-ctxfi driver, | 3363 | /* the following entry conflicts with snd-ctxfi driver, |
3356 | * as ctxfi driver mutates from HD-audio to native mode with | 3364 | * as ctxfi driver mutates from HD-audio to native mode with |
@@ -3367,11 +3375,6 @@ static DEFINE_PCI_DEVICE_TABLE(azx_ids) = { | |||
3367 | .driver_data = AZX_DRIVER_CTX | AZX_DCAPS_CTX_WORKAROUND | | 3375 | .driver_data = AZX_DRIVER_CTX | AZX_DCAPS_CTX_WORKAROUND | |
3368 | AZX_DCAPS_RIRB_PRE_DELAY | AZX_DCAPS_POSFIX_LPIB }, | 3376 | AZX_DCAPS_RIRB_PRE_DELAY | AZX_DCAPS_POSFIX_LPIB }, |
3369 | #endif | 3377 | #endif |
3370 | /* CTHDA chips */ | ||
3371 | { PCI_DEVICE(0x1102, 0x0010), | ||
3372 | .driver_data = AZX_DRIVER_CTHDA | AZX_DCAPS_PRESET_CTHDA }, | ||
3373 | { PCI_DEVICE(0x1102, 0x0012), | ||
3374 | .driver_data = AZX_DRIVER_CTHDA | AZX_DCAPS_PRESET_CTHDA }, | ||
3375 | /* Vortex86MX */ | 3378 | /* Vortex86MX */ |
3376 | { PCI_DEVICE(0x17f3, 0x3010), .driver_data = AZX_DRIVER_GENERIC }, | 3379 | { PCI_DEVICE(0x17f3, 0x3010), .driver_data = AZX_DRIVER_GENERIC }, |
3377 | /* VMware HDAudio */ | 3380 | /* VMware HDAudio */ |
diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c index 3acb5824ad39..172370b3793b 100644 --- a/sound/pci/hda/patch_conexant.c +++ b/sound/pci/hda/patch_conexant.c | |||
@@ -4061,7 +4061,7 @@ static void cx_auto_init_digital(struct hda_codec *codec) | |||
4061 | static int cx_auto_init(struct hda_codec *codec) | 4061 | static int cx_auto_init(struct hda_codec *codec) |
4062 | { | 4062 | { |
4063 | struct conexant_spec *spec = codec->spec; | 4063 | struct conexant_spec *spec = codec->spec; |
4064 | /*snd_hda_sequence_write(codec, cx_auto_init_verbs);*/ | 4064 | snd_hda_gen_apply_verbs(codec); |
4065 | cx_auto_init_output(codec); | 4065 | cx_auto_init_output(codec); |
4066 | cx_auto_init_input(codec); | 4066 | cx_auto_init_input(codec); |
4067 | cx_auto_init_digital(codec); | 4067 | cx_auto_init_digital(codec); |
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index 224410e8e9e7..f8f4906e498d 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c | |||
@@ -1896,6 +1896,7 @@ static int alc_init(struct hda_codec *codec) | |||
1896 | alc_fix_pll(codec); | 1896 | alc_fix_pll(codec); |
1897 | alc_auto_init_amp(codec, spec->init_amp); | 1897 | alc_auto_init_amp(codec, spec->init_amp); |
1898 | 1898 | ||
1899 | snd_hda_gen_apply_verbs(codec); | ||
1899 | alc_init_special_input_src(codec); | 1900 | alc_init_special_input_src(codec); |
1900 | alc_auto_init_std(codec); | 1901 | alc_auto_init_std(codec); |
1901 | 1902 | ||
@@ -6439,6 +6440,7 @@ enum { | |||
6439 | ALC662_FIXUP_ASUS_MODE7, | 6440 | ALC662_FIXUP_ASUS_MODE7, |
6440 | ALC662_FIXUP_ASUS_MODE8, | 6441 | ALC662_FIXUP_ASUS_MODE8, |
6441 | ALC662_FIXUP_NO_JACK_DETECT, | 6442 | ALC662_FIXUP_NO_JACK_DETECT, |
6443 | ALC662_FIXUP_ZOTAC_Z68, | ||
6442 | }; | 6444 | }; |
6443 | 6445 | ||
6444 | static const struct alc_fixup alc662_fixups[] = { | 6446 | static const struct alc_fixup alc662_fixups[] = { |
@@ -6588,6 +6590,13 @@ static const struct alc_fixup alc662_fixups[] = { | |||
6588 | .type = ALC_FIXUP_FUNC, | 6590 | .type = ALC_FIXUP_FUNC, |
6589 | .v.func = alc_fixup_no_jack_detect, | 6591 | .v.func = alc_fixup_no_jack_detect, |
6590 | }, | 6592 | }, |
6593 | [ALC662_FIXUP_ZOTAC_Z68] = { | ||
6594 | .type = ALC_FIXUP_PINS, | ||
6595 | .v.pins = (const struct alc_pincfg[]) { | ||
6596 | { 0x1b, 0x02214020 }, /* Front HP */ | ||
6597 | { } | ||
6598 | } | ||
6599 | }, | ||
6591 | }; | 6600 | }; |
6592 | 6601 | ||
6593 | static const struct snd_pci_quirk alc662_fixup_tbl[] = { | 6602 | static const struct snd_pci_quirk alc662_fixup_tbl[] = { |
@@ -6601,6 +6610,7 @@ static const struct snd_pci_quirk alc662_fixup_tbl[] = { | |||
6601 | SND_PCI_QUIRK(0x144d, 0xc051, "Samsung R720", ALC662_FIXUP_IDEAPAD), | 6610 | SND_PCI_QUIRK(0x144d, 0xc051, "Samsung R720", ALC662_FIXUP_IDEAPAD), |
6602 | SND_PCI_QUIRK(0x17aa, 0x38af, "Lenovo Ideapad Y550P", ALC662_FIXUP_IDEAPAD), | 6611 | SND_PCI_QUIRK(0x17aa, 0x38af, "Lenovo Ideapad Y550P", ALC662_FIXUP_IDEAPAD), |
6603 | SND_PCI_QUIRK(0x17aa, 0x3a0d, "Lenovo Ideapad Y550", ALC662_FIXUP_IDEAPAD), | 6612 | SND_PCI_QUIRK(0x17aa, 0x3a0d, "Lenovo Ideapad Y550", ALC662_FIXUP_IDEAPAD), |
6613 | SND_PCI_QUIRK(0x19da, 0xa130, "Zotac Z68", ALC662_FIXUP_ZOTAC_Z68), | ||
6604 | SND_PCI_QUIRK(0x1b35, 0x2206, "CZC P10T", ALC662_FIXUP_CZC_P10T), | 6614 | SND_PCI_QUIRK(0x1b35, 0x2206, "CZC P10T", ALC662_FIXUP_CZC_P10T), |
6605 | 6615 | ||
6606 | #if 0 | 6616 | #if 0 |
diff --git a/sound/soc/codecs/wm2000.c b/sound/soc/codecs/wm2000.c index a75c3766aede..0418fa11e6bd 100644 --- a/sound/soc/codecs/wm2000.c +++ b/sound/soc/codecs/wm2000.c | |||
@@ -99,8 +99,9 @@ static void wm2000_reset(struct wm2000_priv *wm2000) | |||
99 | } | 99 | } |
100 | 100 | ||
101 | static int wm2000_poll_bit(struct i2c_client *i2c, | 101 | static int wm2000_poll_bit(struct i2c_client *i2c, |
102 | unsigned int reg, u8 mask, int timeout) | 102 | unsigned int reg, u8 mask) |
103 | { | 103 | { |
104 | int timeout = 4000; | ||
104 | int val; | 105 | int val; |
105 | 106 | ||
106 | val = wm2000_read(i2c, reg); | 107 | val = wm2000_read(i2c, reg); |
@@ -119,7 +120,7 @@ static int wm2000_poll_bit(struct i2c_client *i2c, | |||
119 | static int wm2000_power_up(struct i2c_client *i2c, int analogue) | 120 | static int wm2000_power_up(struct i2c_client *i2c, int analogue) |
120 | { | 121 | { |
121 | struct wm2000_priv *wm2000 = dev_get_drvdata(&i2c->dev); | 122 | struct wm2000_priv *wm2000 = dev_get_drvdata(&i2c->dev); |
122 | int ret, timeout; | 123 | int ret; |
123 | 124 | ||
124 | BUG_ON(wm2000->anc_mode != ANC_OFF); | 125 | BUG_ON(wm2000->anc_mode != ANC_OFF); |
125 | 126 | ||
@@ -140,13 +141,13 @@ static int wm2000_power_up(struct i2c_client *i2c, int analogue) | |||
140 | 141 | ||
141 | /* Wait for ANC engine to become ready */ | 142 | /* Wait for ANC engine to become ready */ |
142 | if (!wm2000_poll_bit(i2c, WM2000_REG_ANC_STAT, | 143 | if (!wm2000_poll_bit(i2c, WM2000_REG_ANC_STAT, |
143 | WM2000_ANC_ENG_IDLE, 1)) { | 144 | WM2000_ANC_ENG_IDLE)) { |
144 | dev_err(&i2c->dev, "ANC engine failed to reset\n"); | 145 | dev_err(&i2c->dev, "ANC engine failed to reset\n"); |
145 | return -ETIMEDOUT; | 146 | return -ETIMEDOUT; |
146 | } | 147 | } |
147 | 148 | ||
148 | if (!wm2000_poll_bit(i2c, WM2000_REG_SYS_STATUS, | 149 | if (!wm2000_poll_bit(i2c, WM2000_REG_SYS_STATUS, |
149 | WM2000_STATUS_BOOT_COMPLETE, 1)) { | 150 | WM2000_STATUS_BOOT_COMPLETE)) { |
150 | dev_err(&i2c->dev, "ANC engine failed to initialise\n"); | 151 | dev_err(&i2c->dev, "ANC engine failed to initialise\n"); |
151 | return -ETIMEDOUT; | 152 | return -ETIMEDOUT; |
152 | } | 153 | } |
@@ -173,16 +174,13 @@ static int wm2000_power_up(struct i2c_client *i2c, int analogue) | |||
173 | dev_dbg(&i2c->dev, "Download complete\n"); | 174 | dev_dbg(&i2c->dev, "Download complete\n"); |
174 | 175 | ||
175 | if (analogue) { | 176 | if (analogue) { |
176 | timeout = 248; | 177 | wm2000_write(i2c, WM2000_REG_ANA_VMID_PU_TIME, 248 / 4); |
177 | wm2000_write(i2c, WM2000_REG_ANA_VMID_PU_TIME, timeout / 4); | ||
178 | 178 | ||
179 | wm2000_write(i2c, WM2000_REG_SYS_MODE_CNTRL, | 179 | wm2000_write(i2c, WM2000_REG_SYS_MODE_CNTRL, |
180 | WM2000_MODE_ANA_SEQ_INCLUDE | | 180 | WM2000_MODE_ANA_SEQ_INCLUDE | |
181 | WM2000_MODE_MOUSE_ENABLE | | 181 | WM2000_MODE_MOUSE_ENABLE | |
182 | WM2000_MODE_THERMAL_ENABLE); | 182 | WM2000_MODE_THERMAL_ENABLE); |
183 | } else { | 183 | } else { |
184 | timeout = 10; | ||
185 | |||
186 | wm2000_write(i2c, WM2000_REG_SYS_MODE_CNTRL, | 184 | wm2000_write(i2c, WM2000_REG_SYS_MODE_CNTRL, |
187 | WM2000_MODE_MOUSE_ENABLE | | 185 | WM2000_MODE_MOUSE_ENABLE | |
188 | WM2000_MODE_THERMAL_ENABLE); | 186 | WM2000_MODE_THERMAL_ENABLE); |
@@ -201,9 +199,8 @@ static int wm2000_power_up(struct i2c_client *i2c, int analogue) | |||
201 | wm2000_write(i2c, WM2000_REG_SYS_CTL2, WM2000_ANC_INT_N_CLR); | 199 | wm2000_write(i2c, WM2000_REG_SYS_CTL2, WM2000_ANC_INT_N_CLR); |
202 | 200 | ||
203 | if (!wm2000_poll_bit(i2c, WM2000_REG_SYS_STATUS, | 201 | if (!wm2000_poll_bit(i2c, WM2000_REG_SYS_STATUS, |
204 | WM2000_STATUS_MOUSE_ACTIVE, timeout)) { | 202 | WM2000_STATUS_MOUSE_ACTIVE)) { |
205 | dev_err(&i2c->dev, "Timed out waiting for device after %dms\n", | 203 | dev_err(&i2c->dev, "Timed out waiting for device\n"); |
206 | timeout * 10); | ||
207 | return -ETIMEDOUT; | 204 | return -ETIMEDOUT; |
208 | } | 205 | } |
209 | 206 | ||
@@ -218,28 +215,25 @@ static int wm2000_power_up(struct i2c_client *i2c, int analogue) | |||
218 | static int wm2000_power_down(struct i2c_client *i2c, int analogue) | 215 | static int wm2000_power_down(struct i2c_client *i2c, int analogue) |
219 | { | 216 | { |
220 | struct wm2000_priv *wm2000 = dev_get_drvdata(&i2c->dev); | 217 | struct wm2000_priv *wm2000 = dev_get_drvdata(&i2c->dev); |
221 | int timeout; | ||
222 | 218 | ||
223 | if (analogue) { | 219 | if (analogue) { |
224 | timeout = 248; | 220 | wm2000_write(i2c, WM2000_REG_ANA_VMID_PD_TIME, 248 / 4); |
225 | wm2000_write(i2c, WM2000_REG_ANA_VMID_PD_TIME, timeout / 4); | ||
226 | wm2000_write(i2c, WM2000_REG_SYS_MODE_CNTRL, | 221 | wm2000_write(i2c, WM2000_REG_SYS_MODE_CNTRL, |
227 | WM2000_MODE_ANA_SEQ_INCLUDE | | 222 | WM2000_MODE_ANA_SEQ_INCLUDE | |
228 | WM2000_MODE_POWER_DOWN); | 223 | WM2000_MODE_POWER_DOWN); |
229 | } else { | 224 | } else { |
230 | timeout = 10; | ||
231 | wm2000_write(i2c, WM2000_REG_SYS_MODE_CNTRL, | 225 | wm2000_write(i2c, WM2000_REG_SYS_MODE_CNTRL, |
232 | WM2000_MODE_POWER_DOWN); | 226 | WM2000_MODE_POWER_DOWN); |
233 | } | 227 | } |
234 | 228 | ||
235 | if (!wm2000_poll_bit(i2c, WM2000_REG_SYS_STATUS, | 229 | if (!wm2000_poll_bit(i2c, WM2000_REG_SYS_STATUS, |
236 | WM2000_STATUS_POWER_DOWN_COMPLETE, timeout)) { | 230 | WM2000_STATUS_POWER_DOWN_COMPLETE)) { |
237 | dev_err(&i2c->dev, "Timeout waiting for ANC power down\n"); | 231 | dev_err(&i2c->dev, "Timeout waiting for ANC power down\n"); |
238 | return -ETIMEDOUT; | 232 | return -ETIMEDOUT; |
239 | } | 233 | } |
240 | 234 | ||
241 | if (!wm2000_poll_bit(i2c, WM2000_REG_ANC_STAT, | 235 | if (!wm2000_poll_bit(i2c, WM2000_REG_ANC_STAT, |
242 | WM2000_ANC_ENG_IDLE, 1)) { | 236 | WM2000_ANC_ENG_IDLE)) { |
243 | dev_err(&i2c->dev, "Timeout waiting for ANC engine idle\n"); | 237 | dev_err(&i2c->dev, "Timeout waiting for ANC engine idle\n"); |
244 | return -ETIMEDOUT; | 238 | return -ETIMEDOUT; |
245 | } | 239 | } |
@@ -268,13 +262,13 @@ static int wm2000_enter_bypass(struct i2c_client *i2c, int analogue) | |||
268 | } | 262 | } |
269 | 263 | ||
270 | if (!wm2000_poll_bit(i2c, WM2000_REG_SYS_STATUS, | 264 | if (!wm2000_poll_bit(i2c, WM2000_REG_SYS_STATUS, |
271 | WM2000_STATUS_ANC_DISABLED, 10)) { | 265 | WM2000_STATUS_ANC_DISABLED)) { |
272 | dev_err(&i2c->dev, "Timeout waiting for ANC disable\n"); | 266 | dev_err(&i2c->dev, "Timeout waiting for ANC disable\n"); |
273 | return -ETIMEDOUT; | 267 | return -ETIMEDOUT; |
274 | } | 268 | } |
275 | 269 | ||
276 | if (!wm2000_poll_bit(i2c, WM2000_REG_ANC_STAT, | 270 | if (!wm2000_poll_bit(i2c, WM2000_REG_ANC_STAT, |
277 | WM2000_ANC_ENG_IDLE, 1)) { | 271 | WM2000_ANC_ENG_IDLE)) { |
278 | dev_err(&i2c->dev, "Timeout waiting for ANC engine idle\n"); | 272 | dev_err(&i2c->dev, "Timeout waiting for ANC engine idle\n"); |
279 | return -ETIMEDOUT; | 273 | return -ETIMEDOUT; |
280 | } | 274 | } |
@@ -311,7 +305,7 @@ static int wm2000_exit_bypass(struct i2c_client *i2c, int analogue) | |||
311 | wm2000_write(i2c, WM2000_REG_SYS_CTL2, WM2000_ANC_INT_N_CLR); | 305 | wm2000_write(i2c, WM2000_REG_SYS_CTL2, WM2000_ANC_INT_N_CLR); |
312 | 306 | ||
313 | if (!wm2000_poll_bit(i2c, WM2000_REG_SYS_STATUS, | 307 | if (!wm2000_poll_bit(i2c, WM2000_REG_SYS_STATUS, |
314 | WM2000_STATUS_MOUSE_ACTIVE, 10)) { | 308 | WM2000_STATUS_MOUSE_ACTIVE)) { |
315 | dev_err(&i2c->dev, "Timed out waiting for MOUSE\n"); | 309 | dev_err(&i2c->dev, "Timed out waiting for MOUSE\n"); |
316 | return -ETIMEDOUT; | 310 | return -ETIMEDOUT; |
317 | } | 311 | } |
@@ -325,38 +319,32 @@ static int wm2000_exit_bypass(struct i2c_client *i2c, int analogue) | |||
325 | static int wm2000_enter_standby(struct i2c_client *i2c, int analogue) | 319 | static int wm2000_enter_standby(struct i2c_client *i2c, int analogue) |
326 | { | 320 | { |
327 | struct wm2000_priv *wm2000 = dev_get_drvdata(&i2c->dev); | 321 | struct wm2000_priv *wm2000 = dev_get_drvdata(&i2c->dev); |
328 | int timeout; | ||
329 | 322 | ||
330 | BUG_ON(wm2000->anc_mode != ANC_ACTIVE); | 323 | BUG_ON(wm2000->anc_mode != ANC_ACTIVE); |
331 | 324 | ||
332 | if (analogue) { | 325 | if (analogue) { |
333 | timeout = 248; | 326 | wm2000_write(i2c, WM2000_REG_ANA_VMID_PD_TIME, 248 / 4); |
334 | wm2000_write(i2c, WM2000_REG_ANA_VMID_PD_TIME, timeout / 4); | ||
335 | 327 | ||
336 | wm2000_write(i2c, WM2000_REG_SYS_MODE_CNTRL, | 328 | wm2000_write(i2c, WM2000_REG_SYS_MODE_CNTRL, |
337 | WM2000_MODE_ANA_SEQ_INCLUDE | | 329 | WM2000_MODE_ANA_SEQ_INCLUDE | |
338 | WM2000_MODE_THERMAL_ENABLE | | 330 | WM2000_MODE_THERMAL_ENABLE | |
339 | WM2000_MODE_STANDBY_ENTRY); | 331 | WM2000_MODE_STANDBY_ENTRY); |
340 | } else { | 332 | } else { |
341 | timeout = 10; | ||
342 | |||
343 | wm2000_write(i2c, WM2000_REG_SYS_MODE_CNTRL, | 333 | wm2000_write(i2c, WM2000_REG_SYS_MODE_CNTRL, |
344 | WM2000_MODE_THERMAL_ENABLE | | 334 | WM2000_MODE_THERMAL_ENABLE | |
345 | WM2000_MODE_STANDBY_ENTRY); | 335 | WM2000_MODE_STANDBY_ENTRY); |
346 | } | 336 | } |
347 | 337 | ||
348 | if (!wm2000_poll_bit(i2c, WM2000_REG_SYS_STATUS, | 338 | if (!wm2000_poll_bit(i2c, WM2000_REG_SYS_STATUS, |
349 | WM2000_STATUS_ANC_DISABLED, timeout)) { | 339 | WM2000_STATUS_ANC_DISABLED)) { |
350 | dev_err(&i2c->dev, | 340 | dev_err(&i2c->dev, |
351 | "Timed out waiting for ANC disable after 1ms\n"); | 341 | "Timed out waiting for ANC disable after 1ms\n"); |
352 | return -ETIMEDOUT; | 342 | return -ETIMEDOUT; |
353 | } | 343 | } |
354 | 344 | ||
355 | if (!wm2000_poll_bit(i2c, WM2000_REG_ANC_STAT, WM2000_ANC_ENG_IDLE, | 345 | if (!wm2000_poll_bit(i2c, WM2000_REG_ANC_STAT, WM2000_ANC_ENG_IDLE)) { |
356 | 1)) { | ||
357 | dev_err(&i2c->dev, | 346 | dev_err(&i2c->dev, |
358 | "Timed out waiting for standby after %dms\n", | 347 | "Timed out waiting for standby\n"); |
359 | timeout * 10); | ||
360 | return -ETIMEDOUT; | 348 | return -ETIMEDOUT; |
361 | } | 349 | } |
362 | 350 | ||
@@ -374,23 +362,19 @@ static int wm2000_enter_standby(struct i2c_client *i2c, int analogue) | |||
374 | static int wm2000_exit_standby(struct i2c_client *i2c, int analogue) | 362 | static int wm2000_exit_standby(struct i2c_client *i2c, int analogue) |
375 | { | 363 | { |
376 | struct wm2000_priv *wm2000 = dev_get_drvdata(&i2c->dev); | 364 | struct wm2000_priv *wm2000 = dev_get_drvdata(&i2c->dev); |
377 | int timeout; | ||
378 | 365 | ||
379 | BUG_ON(wm2000->anc_mode != ANC_STANDBY); | 366 | BUG_ON(wm2000->anc_mode != ANC_STANDBY); |
380 | 367 | ||
381 | wm2000_write(i2c, WM2000_REG_SYS_CTL1, 0); | 368 | wm2000_write(i2c, WM2000_REG_SYS_CTL1, 0); |
382 | 369 | ||
383 | if (analogue) { | 370 | if (analogue) { |
384 | timeout = 248; | 371 | wm2000_write(i2c, WM2000_REG_ANA_VMID_PU_TIME, 248 / 4); |
385 | wm2000_write(i2c, WM2000_REG_ANA_VMID_PU_TIME, timeout / 4); | ||
386 | 372 | ||
387 | wm2000_write(i2c, WM2000_REG_SYS_MODE_CNTRL, | 373 | wm2000_write(i2c, WM2000_REG_SYS_MODE_CNTRL, |
388 | WM2000_MODE_ANA_SEQ_INCLUDE | | 374 | WM2000_MODE_ANA_SEQ_INCLUDE | |
389 | WM2000_MODE_THERMAL_ENABLE | | 375 | WM2000_MODE_THERMAL_ENABLE | |
390 | WM2000_MODE_MOUSE_ENABLE); | 376 | WM2000_MODE_MOUSE_ENABLE); |
391 | } else { | 377 | } else { |
392 | timeout = 10; | ||
393 | |||
394 | wm2000_write(i2c, WM2000_REG_SYS_MODE_CNTRL, | 378 | wm2000_write(i2c, WM2000_REG_SYS_MODE_CNTRL, |
395 | WM2000_MODE_THERMAL_ENABLE | | 379 | WM2000_MODE_THERMAL_ENABLE | |
396 | WM2000_MODE_MOUSE_ENABLE); | 380 | WM2000_MODE_MOUSE_ENABLE); |
@@ -400,9 +384,8 @@ static int wm2000_exit_standby(struct i2c_client *i2c, int analogue) | |||
400 | wm2000_write(i2c, WM2000_REG_SYS_CTL2, WM2000_ANC_INT_N_CLR); | 384 | wm2000_write(i2c, WM2000_REG_SYS_CTL2, WM2000_ANC_INT_N_CLR); |
401 | 385 | ||
402 | if (!wm2000_poll_bit(i2c, WM2000_REG_SYS_STATUS, | 386 | if (!wm2000_poll_bit(i2c, WM2000_REG_SYS_STATUS, |
403 | WM2000_STATUS_MOUSE_ACTIVE, timeout)) { | 387 | WM2000_STATUS_MOUSE_ACTIVE)) { |
404 | dev_err(&i2c->dev, "Timed out waiting for MOUSE after %dms\n", | 388 | dev_err(&i2c->dev, "Timed out waiting for MOUSE\n"); |
405 | timeout * 10); | ||
406 | return -ETIMEDOUT; | 389 | return -ETIMEDOUT; |
407 | } | 390 | } |
408 | 391 | ||
diff --git a/sound/soc/codecs/wm8904.c b/sound/soc/codecs/wm8904.c index 65d525d74c54..812acd83fb48 100644 --- a/sound/soc/codecs/wm8904.c +++ b/sound/soc/codecs/wm8904.c | |||
@@ -1863,6 +1863,7 @@ static int wm8904_set_bias_level(struct snd_soc_codec *codec, | |||
1863 | return ret; | 1863 | return ret; |
1864 | } | 1864 | } |
1865 | 1865 | ||
1866 | regcache_cache_only(wm8904->regmap, false); | ||
1866 | regcache_sync(wm8904->regmap); | 1867 | regcache_sync(wm8904->regmap); |
1867 | 1868 | ||
1868 | /* Enable bias */ | 1869 | /* Enable bias */ |
@@ -1899,14 +1900,8 @@ static int wm8904_set_bias_level(struct snd_soc_codec *codec, | |||
1899 | snd_soc_update_bits(codec, WM8904_BIAS_CONTROL_0, | 1900 | snd_soc_update_bits(codec, WM8904_BIAS_CONTROL_0, |
1900 | WM8904_BIAS_ENA, 0); | 1901 | WM8904_BIAS_ENA, 0); |
1901 | 1902 | ||
1902 | #ifdef CONFIG_REGULATOR | 1903 | regcache_cache_only(wm8904->regmap, true); |
1903 | /* Post 2.6.34 we will be able to get a callback when | 1904 | regcache_mark_dirty(wm8904->regmap); |
1904 | * the regulators are disabled which we can use but | ||
1905 | * for now just assume that the power will be cut if | ||
1906 | * the regulator API is in use. | ||
1907 | */ | ||
1908 | codec->cache_sync = 1; | ||
1909 | #endif | ||
1910 | 1905 | ||
1911 | regulator_bulk_disable(ARRAY_SIZE(wm8904->supplies), | 1906 | regulator_bulk_disable(ARRAY_SIZE(wm8904->supplies), |
1912 | wm8904->supplies); | 1907 | wm8904->supplies); |
@@ -2084,10 +2079,8 @@ static int wm8904_probe(struct snd_soc_codec *codec) | |||
2084 | { | 2079 | { |
2085 | struct wm8904_priv *wm8904 = snd_soc_codec_get_drvdata(codec); | 2080 | struct wm8904_priv *wm8904 = snd_soc_codec_get_drvdata(codec); |
2086 | struct wm8904_pdata *pdata = wm8904->pdata; | 2081 | struct wm8904_pdata *pdata = wm8904->pdata; |
2087 | u16 *reg_cache = codec->reg_cache; | ||
2088 | int ret, i; | 2082 | int ret, i; |
2089 | 2083 | ||
2090 | codec->cache_sync = 1; | ||
2091 | codec->control_data = wm8904->regmap; | 2084 | codec->control_data = wm8904->regmap; |
2092 | 2085 | ||
2093 | switch (wm8904->devtype) { | 2086 | switch (wm8904->devtype) { |
@@ -2150,6 +2143,7 @@ static int wm8904_probe(struct snd_soc_codec *codec) | |||
2150 | goto err_enable; | 2143 | goto err_enable; |
2151 | } | 2144 | } |
2152 | 2145 | ||
2146 | regcache_cache_only(wm8904->regmap, true); | ||
2153 | /* Change some default settings - latch VU and enable ZC */ | 2147 | /* Change some default settings - latch VU and enable ZC */ |
2154 | snd_soc_update_bits(codec, WM8904_ADC_DIGITAL_VOLUME_LEFT, | 2148 | snd_soc_update_bits(codec, WM8904_ADC_DIGITAL_VOLUME_LEFT, |
2155 | WM8904_ADC_VU, WM8904_ADC_VU); | 2149 | WM8904_ADC_VU, WM8904_ADC_VU); |
@@ -2180,14 +2174,18 @@ static int wm8904_probe(struct snd_soc_codec *codec) | |||
2180 | if (!pdata->gpio_cfg[i]) | 2174 | if (!pdata->gpio_cfg[i]) |
2181 | continue; | 2175 | continue; |
2182 | 2176 | ||
2183 | reg_cache[WM8904_GPIO_CONTROL_1 + i] | 2177 | regmap_update_bits(wm8904->regmap, |
2184 | = pdata->gpio_cfg[i] & 0xffff; | 2178 | WM8904_GPIO_CONTROL_1 + i, |
2179 | 0xffff, | ||
2180 | pdata->gpio_cfg[i]); | ||
2185 | } | 2181 | } |
2186 | 2182 | ||
2187 | /* Zero is the default value for these anyway */ | 2183 | /* Zero is the default value for these anyway */ |
2188 | for (i = 0; i < WM8904_MIC_REGS; i++) | 2184 | for (i = 0; i < WM8904_MIC_REGS; i++) |
2189 | reg_cache[WM8904_MIC_BIAS_CONTROL_0 + i] | 2185 | regmap_update_bits(wm8904->regmap, |
2190 | = pdata->mic_cfg[i]; | 2186 | WM8904_MIC_BIAS_CONTROL_0 + i, |
2187 | 0xffff, | ||
2188 | pdata->mic_cfg[i]); | ||
2191 | } | 2189 | } |
2192 | 2190 | ||
2193 | /* Set Class W by default - this will be managed by the Class | 2191 | /* Set Class W by default - this will be managed by the Class |
diff --git a/sound/soc/codecs/wm8994.c b/sound/soc/codecs/wm8994.c index 993639d694ce..aa8c98b628da 100644 --- a/sound/soc/codecs/wm8994.c +++ b/sound/soc/codecs/wm8994.c | |||
@@ -46,6 +46,39 @@ | |||
46 | #define WM8994_NUM_DRC 3 | 46 | #define WM8994_NUM_DRC 3 |
47 | #define WM8994_NUM_EQ 3 | 47 | #define WM8994_NUM_EQ 3 |
48 | 48 | ||
49 | static struct { | ||
50 | unsigned int reg; | ||
51 | unsigned int mask; | ||
52 | } wm8994_vu_bits[] = { | ||
53 | { WM8994_LEFT_LINE_INPUT_1_2_VOLUME, WM8994_IN1_VU }, | ||
54 | { WM8994_RIGHT_LINE_INPUT_1_2_VOLUME, WM8994_IN1_VU }, | ||
55 | { WM8994_LEFT_LINE_INPUT_3_4_VOLUME, WM8994_IN2_VU }, | ||
56 | { WM8994_RIGHT_LINE_INPUT_3_4_VOLUME, WM8994_IN2_VU }, | ||
57 | { WM8994_SPEAKER_VOLUME_LEFT, WM8994_SPKOUT_VU }, | ||
58 | { WM8994_SPEAKER_VOLUME_RIGHT, WM8994_SPKOUT_VU }, | ||
59 | { WM8994_LEFT_OUTPUT_VOLUME, WM8994_HPOUT1_VU }, | ||
60 | { WM8994_RIGHT_OUTPUT_VOLUME, WM8994_HPOUT1_VU }, | ||
61 | { WM8994_LEFT_OPGA_VOLUME, WM8994_MIXOUT_VU }, | ||
62 | { WM8994_RIGHT_OPGA_VOLUME, WM8994_MIXOUT_VU }, | ||
63 | |||
64 | { WM8994_AIF1_DAC1_LEFT_VOLUME, WM8994_AIF1DAC1_VU }, | ||
65 | { WM8994_AIF1_DAC1_RIGHT_VOLUME, WM8994_AIF1DAC1_VU }, | ||
66 | { WM8994_AIF1_DAC2_LEFT_VOLUME, WM8994_AIF1DAC2_VU }, | ||
67 | { WM8994_AIF1_DAC2_RIGHT_VOLUME, WM8994_AIF1DAC2_VU }, | ||
68 | { WM8994_AIF2_DAC_LEFT_VOLUME, WM8994_AIF2DAC_VU }, | ||
69 | { WM8994_AIF2_DAC_RIGHT_VOLUME, WM8994_AIF2DAC_VU }, | ||
70 | { WM8994_AIF1_ADC1_LEFT_VOLUME, WM8994_AIF1ADC1_VU }, | ||
71 | { WM8994_AIF1_ADC1_RIGHT_VOLUME, WM8994_AIF1ADC1_VU }, | ||
72 | { WM8994_AIF1_ADC2_LEFT_VOLUME, WM8994_AIF1ADC2_VU }, | ||
73 | { WM8994_AIF1_ADC2_RIGHT_VOLUME, WM8994_AIF1ADC2_VU }, | ||
74 | { WM8994_AIF2_ADC_LEFT_VOLUME, WM8994_AIF2ADC_VU }, | ||
75 | { WM8994_AIF2_ADC_RIGHT_VOLUME, WM8994_AIF1ADC2_VU }, | ||
76 | { WM8994_DAC1_LEFT_VOLUME, WM8994_DAC1_VU }, | ||
77 | { WM8994_DAC1_RIGHT_VOLUME, WM8994_DAC1_VU }, | ||
78 | { WM8994_DAC2_LEFT_VOLUME, WM8994_DAC2_VU }, | ||
79 | { WM8994_DAC2_RIGHT_VOLUME, WM8994_DAC2_VU }, | ||
80 | }; | ||
81 | |||
49 | static int wm8994_drc_base[] = { | 82 | static int wm8994_drc_base[] = { |
50 | WM8994_AIF1_DRC1_1, | 83 | WM8994_AIF1_DRC1_1, |
51 | WM8994_AIF1_DRC2_1, | 84 | WM8994_AIF1_DRC2_1, |
@@ -989,6 +1022,7 @@ static int aif1clk_ev(struct snd_soc_dapm_widget *w, | |||
989 | struct snd_soc_codec *codec = w->codec; | 1022 | struct snd_soc_codec *codec = w->codec; |
990 | struct wm8994 *control = codec->control_data; | 1023 | struct wm8994 *control = codec->control_data; |
991 | int mask = WM8994_AIF1DAC1L_ENA | WM8994_AIF1DAC1R_ENA; | 1024 | int mask = WM8994_AIF1DAC1L_ENA | WM8994_AIF1DAC1R_ENA; |
1025 | int i; | ||
992 | int dac; | 1026 | int dac; |
993 | int adc; | 1027 | int adc; |
994 | int val; | 1028 | int val; |
@@ -1047,6 +1081,13 @@ static int aif1clk_ev(struct snd_soc_dapm_widget *w, | |||
1047 | WM8994_AIF1DAC2L_ENA); | 1081 | WM8994_AIF1DAC2L_ENA); |
1048 | break; | 1082 | break; |
1049 | 1083 | ||
1084 | case SND_SOC_DAPM_POST_PMU: | ||
1085 | for (i = 0; i < ARRAY_SIZE(wm8994_vu_bits); i++) | ||
1086 | snd_soc_write(codec, wm8994_vu_bits[i].reg, | ||
1087 | snd_soc_read(codec, | ||
1088 | wm8994_vu_bits[i].reg)); | ||
1089 | break; | ||
1090 | |||
1050 | case SND_SOC_DAPM_PRE_PMD: | 1091 | case SND_SOC_DAPM_PRE_PMD: |
1051 | case SND_SOC_DAPM_POST_PMD: | 1092 | case SND_SOC_DAPM_POST_PMD: |
1052 | snd_soc_update_bits(codec, WM8994_POWER_MANAGEMENT_5, | 1093 | snd_soc_update_bits(codec, WM8994_POWER_MANAGEMENT_5, |
@@ -1072,6 +1113,7 @@ static int aif2clk_ev(struct snd_soc_dapm_widget *w, | |||
1072 | struct snd_kcontrol *kcontrol, int event) | 1113 | struct snd_kcontrol *kcontrol, int event) |
1073 | { | 1114 | { |
1074 | struct snd_soc_codec *codec = w->codec; | 1115 | struct snd_soc_codec *codec = w->codec; |
1116 | int i; | ||
1075 | int dac; | 1117 | int dac; |
1076 | int adc; | 1118 | int adc; |
1077 | int val; | 1119 | int val; |
@@ -1122,6 +1164,13 @@ static int aif2clk_ev(struct snd_soc_dapm_widget *w, | |||
1122 | WM8994_AIF2DACR_ENA); | 1164 | WM8994_AIF2DACR_ENA); |
1123 | break; | 1165 | break; |
1124 | 1166 | ||
1167 | case SND_SOC_DAPM_POST_PMU: | ||
1168 | for (i = 0; i < ARRAY_SIZE(wm8994_vu_bits); i++) | ||
1169 | snd_soc_write(codec, wm8994_vu_bits[i].reg, | ||
1170 | snd_soc_read(codec, | ||
1171 | wm8994_vu_bits[i].reg)); | ||
1172 | break; | ||
1173 | |||
1125 | case SND_SOC_DAPM_PRE_PMD: | 1174 | case SND_SOC_DAPM_PRE_PMD: |
1126 | case SND_SOC_DAPM_POST_PMD: | 1175 | case SND_SOC_DAPM_POST_PMD: |
1127 | snd_soc_update_bits(codec, WM8994_POWER_MANAGEMENT_5, | 1176 | snd_soc_update_bits(codec, WM8994_POWER_MANAGEMENT_5, |
@@ -1190,17 +1239,19 @@ static int late_enable_ev(struct snd_soc_dapm_widget *w, | |||
1190 | switch (event) { | 1239 | switch (event) { |
1191 | case SND_SOC_DAPM_PRE_PMU: | 1240 | case SND_SOC_DAPM_PRE_PMU: |
1192 | if (wm8994->aif1clk_enable) { | 1241 | if (wm8994->aif1clk_enable) { |
1193 | aif1clk_ev(w, kcontrol, event); | 1242 | aif1clk_ev(w, kcontrol, SND_SOC_DAPM_PRE_PMU); |
1194 | snd_soc_update_bits(codec, WM8994_AIF1_CLOCKING_1, | 1243 | snd_soc_update_bits(codec, WM8994_AIF1_CLOCKING_1, |
1195 | WM8994_AIF1CLK_ENA_MASK, | 1244 | WM8994_AIF1CLK_ENA_MASK, |
1196 | WM8994_AIF1CLK_ENA); | 1245 | WM8994_AIF1CLK_ENA); |
1246 | aif1clk_ev(w, kcontrol, SND_SOC_DAPM_POST_PMU); | ||
1197 | wm8994->aif1clk_enable = 0; | 1247 | wm8994->aif1clk_enable = 0; |
1198 | } | 1248 | } |
1199 | if (wm8994->aif2clk_enable) { | 1249 | if (wm8994->aif2clk_enable) { |
1200 | aif2clk_ev(w, kcontrol, event); | 1250 | aif2clk_ev(w, kcontrol, SND_SOC_DAPM_PRE_PMU); |
1201 | snd_soc_update_bits(codec, WM8994_AIF2_CLOCKING_1, | 1251 | snd_soc_update_bits(codec, WM8994_AIF2_CLOCKING_1, |
1202 | WM8994_AIF2CLK_ENA_MASK, | 1252 | WM8994_AIF2CLK_ENA_MASK, |
1203 | WM8994_AIF2CLK_ENA); | 1253 | WM8994_AIF2CLK_ENA); |
1254 | aif2clk_ev(w, kcontrol, SND_SOC_DAPM_POST_PMU); | ||
1204 | wm8994->aif2clk_enable = 0; | 1255 | wm8994->aif2clk_enable = 0; |
1205 | } | 1256 | } |
1206 | break; | 1257 | break; |
@@ -1221,15 +1272,17 @@ static int late_disable_ev(struct snd_soc_dapm_widget *w, | |||
1221 | switch (event) { | 1272 | switch (event) { |
1222 | case SND_SOC_DAPM_POST_PMD: | 1273 | case SND_SOC_DAPM_POST_PMD: |
1223 | if (wm8994->aif1clk_disable) { | 1274 | if (wm8994->aif1clk_disable) { |
1275 | aif1clk_ev(w, kcontrol, SND_SOC_DAPM_PRE_PMD); | ||
1224 | snd_soc_update_bits(codec, WM8994_AIF1_CLOCKING_1, | 1276 | snd_soc_update_bits(codec, WM8994_AIF1_CLOCKING_1, |
1225 | WM8994_AIF1CLK_ENA_MASK, 0); | 1277 | WM8994_AIF1CLK_ENA_MASK, 0); |
1226 | aif1clk_ev(w, kcontrol, event); | 1278 | aif1clk_ev(w, kcontrol, SND_SOC_DAPM_POST_PMD); |
1227 | wm8994->aif1clk_disable = 0; | 1279 | wm8994->aif1clk_disable = 0; |
1228 | } | 1280 | } |
1229 | if (wm8994->aif2clk_disable) { | 1281 | if (wm8994->aif2clk_disable) { |
1282 | aif2clk_ev(w, kcontrol, SND_SOC_DAPM_PRE_PMD); | ||
1230 | snd_soc_update_bits(codec, WM8994_AIF2_CLOCKING_1, | 1283 | snd_soc_update_bits(codec, WM8994_AIF2_CLOCKING_1, |
1231 | WM8994_AIF2CLK_ENA_MASK, 0); | 1284 | WM8994_AIF2CLK_ENA_MASK, 0); |
1232 | aif2clk_ev(w, kcontrol, event); | 1285 | aif2clk_ev(w, kcontrol, SND_SOC_DAPM_POST_PMD); |
1233 | wm8994->aif2clk_disable = 0; | 1286 | wm8994->aif2clk_disable = 0; |
1234 | } | 1287 | } |
1235 | break; | 1288 | break; |
@@ -1527,9 +1580,11 @@ SND_SOC_DAPM_POST("Late Disable PGA", late_disable_ev) | |||
1527 | 1580 | ||
1528 | static const struct snd_soc_dapm_widget wm8994_lateclk_widgets[] = { | 1581 | static const struct snd_soc_dapm_widget wm8994_lateclk_widgets[] = { |
1529 | SND_SOC_DAPM_SUPPLY("AIF1CLK", WM8994_AIF1_CLOCKING_1, 0, 0, aif1clk_ev, | 1582 | SND_SOC_DAPM_SUPPLY("AIF1CLK", WM8994_AIF1_CLOCKING_1, 0, 0, aif1clk_ev, |
1530 | SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_PRE_PMD), | 1583 | SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU | |
1584 | SND_SOC_DAPM_PRE_PMD), | ||
1531 | SND_SOC_DAPM_SUPPLY("AIF2CLK", WM8994_AIF2_CLOCKING_1, 0, 0, aif2clk_ev, | 1585 | SND_SOC_DAPM_SUPPLY("AIF2CLK", WM8994_AIF2_CLOCKING_1, 0, 0, aif2clk_ev, |
1532 | SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_PRE_PMD), | 1586 | SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU | |
1587 | SND_SOC_DAPM_PRE_PMD), | ||
1533 | SND_SOC_DAPM_PGA("Direct Voice", SND_SOC_NOPM, 0, 0, NULL, 0), | 1588 | SND_SOC_DAPM_PGA("Direct Voice", SND_SOC_NOPM, 0, 0, NULL, 0), |
1534 | SND_SOC_DAPM_MIXER("SPKL", WM8994_POWER_MANAGEMENT_3, 8, 0, | 1589 | SND_SOC_DAPM_MIXER("SPKL", WM8994_POWER_MANAGEMENT_3, 8, 0, |
1535 | left_speaker_mixer, ARRAY_SIZE(left_speaker_mixer)), | 1590 | left_speaker_mixer, ARRAY_SIZE(left_speaker_mixer)), |
@@ -3879,39 +3934,11 @@ static int wm8994_codec_probe(struct snd_soc_codec *codec) | |||
3879 | 3934 | ||
3880 | pm_runtime_put(codec->dev); | 3935 | pm_runtime_put(codec->dev); |
3881 | 3936 | ||
3882 | /* Latch volume updates (right only; we always do left then right). */ | 3937 | /* Latch volume update bits */ |
3883 | snd_soc_update_bits(codec, WM8994_AIF1_DAC1_LEFT_VOLUME, | 3938 | for (i = 0; i < ARRAY_SIZE(wm8994_vu_bits); i++) |
3884 | WM8994_AIF1DAC1_VU, WM8994_AIF1DAC1_VU); | 3939 | snd_soc_update_bits(codec, wm8994_vu_bits[i].reg, |
3885 | snd_soc_update_bits(codec, WM8994_AIF1_DAC1_RIGHT_VOLUME, | 3940 | wm8994_vu_bits[i].mask, |
3886 | WM8994_AIF1DAC1_VU, WM8994_AIF1DAC1_VU); | 3941 | wm8994_vu_bits[i].mask); |
3887 | snd_soc_update_bits(codec, WM8994_AIF1_DAC2_LEFT_VOLUME, | ||
3888 | WM8994_AIF1DAC2_VU, WM8994_AIF1DAC2_VU); | ||
3889 | snd_soc_update_bits(codec, WM8994_AIF1_DAC2_RIGHT_VOLUME, | ||
3890 | WM8994_AIF1DAC2_VU, WM8994_AIF1DAC2_VU); | ||
3891 | snd_soc_update_bits(codec, WM8994_AIF2_DAC_LEFT_VOLUME, | ||
3892 | WM8994_AIF2DAC_VU, WM8994_AIF2DAC_VU); | ||
3893 | snd_soc_update_bits(codec, WM8994_AIF2_DAC_RIGHT_VOLUME, | ||
3894 | WM8994_AIF2DAC_VU, WM8994_AIF2DAC_VU); | ||
3895 | snd_soc_update_bits(codec, WM8994_AIF1_ADC1_LEFT_VOLUME, | ||
3896 | WM8994_AIF1ADC1_VU, WM8994_AIF1ADC1_VU); | ||
3897 | snd_soc_update_bits(codec, WM8994_AIF1_ADC1_RIGHT_VOLUME, | ||
3898 | WM8994_AIF1ADC1_VU, WM8994_AIF1ADC1_VU); | ||
3899 | snd_soc_update_bits(codec, WM8994_AIF1_ADC2_LEFT_VOLUME, | ||
3900 | WM8994_AIF1ADC2_VU, WM8994_AIF1ADC2_VU); | ||
3901 | snd_soc_update_bits(codec, WM8994_AIF1_ADC2_RIGHT_VOLUME, | ||
3902 | WM8994_AIF1ADC2_VU, WM8994_AIF1ADC2_VU); | ||
3903 | snd_soc_update_bits(codec, WM8994_AIF2_ADC_LEFT_VOLUME, | ||
3904 | WM8994_AIF2ADC_VU, WM8994_AIF1ADC2_VU); | ||
3905 | snd_soc_update_bits(codec, WM8994_AIF2_ADC_RIGHT_VOLUME, | ||
3906 | WM8994_AIF2ADC_VU, WM8994_AIF1ADC2_VU); | ||
3907 | snd_soc_update_bits(codec, WM8994_DAC1_LEFT_VOLUME, | ||
3908 | WM8994_DAC1_VU, WM8994_DAC1_VU); | ||
3909 | snd_soc_update_bits(codec, WM8994_DAC1_RIGHT_VOLUME, | ||
3910 | WM8994_DAC1_VU, WM8994_DAC1_VU); | ||
3911 | snd_soc_update_bits(codec, WM8994_DAC2_LEFT_VOLUME, | ||
3912 | WM8994_DAC2_VU, WM8994_DAC2_VU); | ||
3913 | snd_soc_update_bits(codec, WM8994_DAC2_RIGHT_VOLUME, | ||
3914 | WM8994_DAC2_VU, WM8994_DAC2_VU); | ||
3915 | 3942 | ||
3916 | /* Set the low bit of the 3D stereo depth so TLV matches */ | 3943 | /* Set the low bit of the 3D stereo depth so TLV matches */ |
3917 | snd_soc_update_bits(codec, WM8994_AIF1_DAC1_FILTERS_2, | 3944 | snd_soc_update_bits(codec, WM8994_AIF1_DAC1_FILTERS_2, |
diff --git a/sound/soc/codecs/wm8996.c b/sound/soc/codecs/wm8996.c index 8af422e38fd0..dc9b42b7fc4d 100644 --- a/sound/soc/codecs/wm8996.c +++ b/sound/soc/codecs/wm8996.c | |||
@@ -2837,8 +2837,6 @@ static int wm8996_probe(struct snd_soc_codec *codec) | |||
2837 | } | 2837 | } |
2838 | } | 2838 | } |
2839 | 2839 | ||
2840 | regcache_cache_only(codec->control_data, true); | ||
2841 | |||
2842 | /* Apply platform data settings */ | 2840 | /* Apply platform data settings */ |
2843 | snd_soc_update_bits(codec, WM8996_LINE_INPUT_CONTROL, | 2841 | snd_soc_update_bits(codec, WM8996_LINE_INPUT_CONTROL, |
2844 | WM8996_INL_MODE_MASK | WM8996_INR_MODE_MASK, | 2842 | WM8996_INL_MODE_MASK | WM8996_INR_MODE_MASK, |
@@ -3051,7 +3049,6 @@ static int wm8996_remove(struct snd_soc_codec *codec) | |||
3051 | for (i = 0; i < ARRAY_SIZE(wm8996->supplies); i++) | 3049 | for (i = 0; i < ARRAY_SIZE(wm8996->supplies); i++) |
3052 | regulator_unregister_notifier(wm8996->supplies[i].consumer, | 3050 | regulator_unregister_notifier(wm8996->supplies[i].consumer, |
3053 | &wm8996->disable_nb[i]); | 3051 | &wm8996->disable_nb[i]); |
3054 | regulator_bulk_free(ARRAY_SIZE(wm8996->supplies), wm8996->supplies); | ||
3055 | 3052 | ||
3056 | return 0; | 3053 | return 0; |
3057 | } | 3054 | } |
@@ -3206,14 +3203,15 @@ static __devinit int wm8996_i2c_probe(struct i2c_client *i2c, | |||
3206 | dev_info(&i2c->dev, "revision %c\n", | 3203 | dev_info(&i2c->dev, "revision %c\n", |
3207 | (reg & WM8996_CHIP_REV_MASK) + 'A'); | 3204 | (reg & WM8996_CHIP_REV_MASK) + 'A'); |
3208 | 3205 | ||
3209 | regulator_bulk_disable(ARRAY_SIZE(wm8996->supplies), wm8996->supplies); | ||
3210 | |||
3211 | ret = wm8996_reset(wm8996); | 3206 | ret = wm8996_reset(wm8996); |
3212 | if (ret < 0) { | 3207 | if (ret < 0) { |
3213 | dev_err(&i2c->dev, "Failed to issue reset\n"); | 3208 | dev_err(&i2c->dev, "Failed to issue reset\n"); |
3214 | goto err_regmap; | 3209 | goto err_regmap; |
3215 | } | 3210 | } |
3216 | 3211 | ||
3212 | regcache_cache_only(wm8996->regmap, true); | ||
3213 | regulator_bulk_disable(ARRAY_SIZE(wm8996->supplies), wm8996->supplies); | ||
3214 | |||
3217 | wm8996_init_gpio(wm8996); | 3215 | wm8996_init_gpio(wm8996); |
3218 | 3216 | ||
3219 | ret = snd_soc_register_codec(&i2c->dev, | 3217 | ret = snd_soc_register_codec(&i2c->dev, |
diff --git a/sound/soc/fsl/imx-audmux.c b/sound/soc/fsl/imx-audmux.c index f23700359c67..080327414c6b 100644 --- a/sound/soc/fsl/imx-audmux.c +++ b/sound/soc/fsl/imx-audmux.c | |||
@@ -26,6 +26,7 @@ | |||
26 | #include <linux/of_device.h> | 26 | #include <linux/of_device.h> |
27 | #include <linux/platform_device.h> | 27 | #include <linux/platform_device.h> |
28 | #include <linux/slab.h> | 28 | #include <linux/slab.h> |
29 | #include <linux/pinctrl/consumer.h> | ||
29 | 30 | ||
30 | #include "imx-audmux.h" | 31 | #include "imx-audmux.h" |
31 | 32 | ||
@@ -249,6 +250,7 @@ EXPORT_SYMBOL_GPL(imx_audmux_v2_configure_port); | |||
249 | static int __devinit imx_audmux_probe(struct platform_device *pdev) | 250 | static int __devinit imx_audmux_probe(struct platform_device *pdev) |
250 | { | 251 | { |
251 | struct resource *res; | 252 | struct resource *res; |
253 | struct pinctrl *pinctrl; | ||
252 | const struct of_device_id *of_id = | 254 | const struct of_device_id *of_id = |
253 | of_match_device(imx_audmux_dt_ids, &pdev->dev); | 255 | of_match_device(imx_audmux_dt_ids, &pdev->dev); |
254 | 256 | ||
@@ -257,6 +259,12 @@ static int __devinit imx_audmux_probe(struct platform_device *pdev) | |||
257 | if (!audmux_base) | 259 | if (!audmux_base) |
258 | return -EADDRNOTAVAIL; | 260 | return -EADDRNOTAVAIL; |
259 | 261 | ||
262 | pinctrl = devm_pinctrl_get_select_default(&pdev->dev); | ||
263 | if (IS_ERR(pinctrl)) { | ||
264 | dev_err(&pdev->dev, "setup pinctrl failed!"); | ||
265 | return PTR_ERR(pinctrl); | ||
266 | } | ||
267 | |||
260 | audmux_clk = clk_get(&pdev->dev, "audmux"); | 268 | audmux_clk = clk_get(&pdev->dev, "audmux"); |
261 | if (IS_ERR(audmux_clk)) { | 269 | if (IS_ERR(audmux_clk)) { |
262 | dev_dbg(&pdev->dev, "cannot get clock: %ld\n", | 270 | dev_dbg(&pdev->dev, "cannot get clock: %ld\n", |
diff --git a/sound/soc/pxa/pxa-ssp.c b/sound/soc/pxa/pxa-ssp.c index 1c2aa7fab3fd..4da5fc55c7ee 100644 --- a/sound/soc/pxa/pxa-ssp.c +++ b/sound/soc/pxa/pxa-ssp.c | |||
@@ -33,7 +33,6 @@ | |||
33 | 33 | ||
34 | #include <mach/hardware.h> | 34 | #include <mach/hardware.h> |
35 | #include <mach/dma.h> | 35 | #include <mach/dma.h> |
36 | #include <mach/audio.h> | ||
37 | 36 | ||
38 | #include "../../arm/pxa2xx-pcm.h" | 37 | #include "../../arm/pxa2xx-pcm.h" |
39 | #include "pxa-ssp.h" | 38 | #include "pxa-ssp.h" |
@@ -194,7 +193,7 @@ static void pxa_ssp_set_scr(struct ssp_device *ssp, u32 div) | |||
194 | { | 193 | { |
195 | u32 sscr0 = pxa_ssp_read_reg(ssp, SSCR0); | 194 | u32 sscr0 = pxa_ssp_read_reg(ssp, SSCR0); |
196 | 195 | ||
197 | if (cpu_is_pxa25x() && ssp->type == PXA25x_SSP) { | 196 | if (ssp->type == PXA25x_SSP) { |
198 | sscr0 &= ~0x0000ff00; | 197 | sscr0 &= ~0x0000ff00; |
199 | sscr0 |= ((div - 2)/2) << 8; /* 2..512 */ | 198 | sscr0 |= ((div - 2)/2) << 8; /* 2..512 */ |
200 | } else { | 199 | } else { |
@@ -212,7 +211,7 @@ static u32 pxa_ssp_get_scr(struct ssp_device *ssp) | |||
212 | u32 sscr0 = pxa_ssp_read_reg(ssp, SSCR0); | 211 | u32 sscr0 = pxa_ssp_read_reg(ssp, SSCR0); |
213 | u32 div; | 212 | u32 div; |
214 | 213 | ||
215 | if (cpu_is_pxa25x() && ssp->type == PXA25x_SSP) | 214 | if (ssp->type == PXA25x_SSP) |
216 | div = ((sscr0 >> 8) & 0xff) * 2 + 2; | 215 | div = ((sscr0 >> 8) & 0xff) * 2 + 2; |
217 | else | 216 | else |
218 | div = ((sscr0 >> 8) & 0xfff) + 1; | 217 | div = ((sscr0 >> 8) & 0xfff) + 1; |
@@ -242,7 +241,7 @@ static int pxa_ssp_set_dai_sysclk(struct snd_soc_dai *cpu_dai, | |||
242 | break; | 241 | break; |
243 | case PXA_SSP_CLK_PLL: | 242 | case PXA_SSP_CLK_PLL: |
244 | /* Internal PLL is fixed */ | 243 | /* Internal PLL is fixed */ |
245 | if (cpu_is_pxa25x()) | 244 | if (ssp->type == PXA25x_SSP) |
246 | priv->sysclk = 1843200; | 245 | priv->sysclk = 1843200; |
247 | else | 246 | else |
248 | priv->sysclk = 13000000; | 247 | priv->sysclk = 13000000; |
@@ -266,11 +265,11 @@ static int pxa_ssp_set_dai_sysclk(struct snd_soc_dai *cpu_dai, | |||
266 | 265 | ||
267 | /* The SSP clock must be disabled when changing SSP clock mode | 266 | /* The SSP clock must be disabled when changing SSP clock mode |
268 | * on PXA2xx. On PXA3xx it must be enabled when doing so. */ | 267 | * on PXA2xx. On PXA3xx it must be enabled when doing so. */ |
269 | if (!cpu_is_pxa3xx()) | 268 | if (ssp->type != PXA3xx_SSP) |
270 | clk_disable(ssp->clk); | 269 | clk_disable(ssp->clk); |
271 | val = pxa_ssp_read_reg(ssp, SSCR0) | sscr0; | 270 | val = pxa_ssp_read_reg(ssp, SSCR0) | sscr0; |
272 | pxa_ssp_write_reg(ssp, SSCR0, val); | 271 | pxa_ssp_write_reg(ssp, SSCR0, val); |
273 | if (!cpu_is_pxa3xx()) | 272 | if (ssp->type != PXA3xx_SSP) |
274 | clk_enable(ssp->clk); | 273 | clk_enable(ssp->clk); |
275 | 274 | ||
276 | return 0; | 275 | return 0; |
@@ -294,24 +293,20 @@ static int pxa_ssp_set_dai_clkdiv(struct snd_soc_dai *cpu_dai, | |||
294 | case PXA_SSP_AUDIO_DIV_SCDB: | 293 | case PXA_SSP_AUDIO_DIV_SCDB: |
295 | val = pxa_ssp_read_reg(ssp, SSACD); | 294 | val = pxa_ssp_read_reg(ssp, SSACD); |
296 | val &= ~SSACD_SCDB; | 295 | val &= ~SSACD_SCDB; |
297 | #if defined(CONFIG_PXA3xx) | 296 | if (ssp->type == PXA3xx_SSP) |
298 | if (cpu_is_pxa3xx()) | ||
299 | val &= ~SSACD_SCDX8; | 297 | val &= ~SSACD_SCDX8; |
300 | #endif | ||
301 | switch (div) { | 298 | switch (div) { |
302 | case PXA_SSP_CLK_SCDB_1: | 299 | case PXA_SSP_CLK_SCDB_1: |
303 | val |= SSACD_SCDB; | 300 | val |= SSACD_SCDB; |
304 | break; | 301 | break; |
305 | case PXA_SSP_CLK_SCDB_4: | 302 | case PXA_SSP_CLK_SCDB_4: |
306 | break; | 303 | break; |
307 | #if defined(CONFIG_PXA3xx) | ||
308 | case PXA_SSP_CLK_SCDB_8: | 304 | case PXA_SSP_CLK_SCDB_8: |
309 | if (cpu_is_pxa3xx()) | 305 | if (ssp->type == PXA3xx_SSP) |
310 | val |= SSACD_SCDX8; | 306 | val |= SSACD_SCDX8; |
311 | else | 307 | else |
312 | return -EINVAL; | 308 | return -EINVAL; |
313 | break; | 309 | break; |
314 | #endif | ||
315 | default: | 310 | default: |
316 | return -EINVAL; | 311 | return -EINVAL; |
317 | } | 312 | } |
@@ -337,10 +332,8 @@ static int pxa_ssp_set_dai_pll(struct snd_soc_dai *cpu_dai, int pll_id, | |||
337 | struct ssp_device *ssp = priv->ssp; | 332 | struct ssp_device *ssp = priv->ssp; |
338 | u32 ssacd = pxa_ssp_read_reg(ssp, SSACD) & ~0x70; | 333 | u32 ssacd = pxa_ssp_read_reg(ssp, SSACD) & ~0x70; |
339 | 334 | ||
340 | #if defined(CONFIG_PXA3xx) | 335 | if (ssp->type == PXA3xx_SSP) |
341 | if (cpu_is_pxa3xx()) | ||
342 | pxa_ssp_write_reg(ssp, SSACDD, 0); | 336 | pxa_ssp_write_reg(ssp, SSACDD, 0); |
343 | #endif | ||
344 | 337 | ||
345 | switch (freq_out) { | 338 | switch (freq_out) { |
346 | case 5622000: | 339 | case 5622000: |
@@ -365,11 +358,10 @@ static int pxa_ssp_set_dai_pll(struct snd_soc_dai *cpu_dai, int pll_id, | |||
365 | break; | 358 | break; |
366 | 359 | ||
367 | default: | 360 | default: |
368 | #ifdef CONFIG_PXA3xx | ||
369 | /* PXA3xx has a clock ditherer which can be used to generate | 361 | /* PXA3xx has a clock ditherer which can be used to generate |
370 | * a wider range of frequencies - calculate a value for it. | 362 | * a wider range of frequencies - calculate a value for it. |
371 | */ | 363 | */ |
372 | if (cpu_is_pxa3xx()) { | 364 | if (ssp->type == PXA3xx_SSP) { |
373 | u32 val; | 365 | u32 val; |
374 | u64 tmp = 19968; | 366 | u64 tmp = 19968; |
375 | tmp *= 1000000; | 367 | tmp *= 1000000; |
@@ -386,7 +378,6 @@ static int pxa_ssp_set_dai_pll(struct snd_soc_dai *cpu_dai, int pll_id, | |||
386 | val, freq_out); | 378 | val, freq_out); |
387 | break; | 379 | break; |
388 | } | 380 | } |
389 | #endif | ||
390 | 381 | ||
391 | return -EINVAL; | 382 | return -EINVAL; |
392 | } | 383 | } |
@@ -590,10 +581,8 @@ static int pxa_ssp_hw_params(struct snd_pcm_substream *substream, | |||
590 | /* bit size */ | 581 | /* bit size */ |
591 | switch (params_format(params)) { | 582 | switch (params_format(params)) { |
592 | case SNDRV_PCM_FORMAT_S16_LE: | 583 | case SNDRV_PCM_FORMAT_S16_LE: |
593 | #ifdef CONFIG_PXA3xx | 584 | if (ssp->type == PXA3xx_SSP) |
594 | if (cpu_is_pxa3xx()) | ||
595 | sscr0 |= SSCR0_FPCKE; | 585 | sscr0 |= SSCR0_FPCKE; |
596 | #endif | ||
597 | sscr0 |= SSCR0_DataSize(16); | 586 | sscr0 |= SSCR0_DataSize(16); |
598 | break; | 587 | break; |
599 | case SNDRV_PCM_FORMAT_S24_LE: | 588 | case SNDRV_PCM_FORMAT_S24_LE: |
@@ -618,9 +607,7 @@ static int pxa_ssp_hw_params(struct snd_pcm_substream *substream, | |||
618 | * trying and failing a lot; some of the registers | 607 | * trying and failing a lot; some of the registers |
619 | * needed for that mode are only available on PXA3xx. | 608 | * needed for that mode are only available on PXA3xx. |
620 | */ | 609 | */ |
621 | 610 | if (ssp->type != PXA3xx_SSP) | |
622 | #ifdef CONFIG_PXA3xx | ||
623 | if (!cpu_is_pxa3xx()) | ||
624 | return -EINVAL; | 611 | return -EINVAL; |
625 | 612 | ||
626 | sspsp |= SSPSP_SFRMWDTH(width * 2); | 613 | sspsp |= SSPSP_SFRMWDTH(width * 2); |
@@ -628,9 +615,6 @@ static int pxa_ssp_hw_params(struct snd_pcm_substream *substream, | |||
628 | sspsp |= SSPSP_EDMYSTOP(3); | 615 | sspsp |= SSPSP_EDMYSTOP(3); |
629 | sspsp |= SSPSP_DMYSTOP(3); | 616 | sspsp |= SSPSP_DMYSTOP(3); |
630 | sspsp |= SSPSP_DMYSTRT(1); | 617 | sspsp |= SSPSP_DMYSTRT(1); |
631 | #else | ||
632 | return -EINVAL; | ||
633 | #endif | ||
634 | } else { | 618 | } else { |
635 | /* The frame width is the width the LRCLK is | 619 | /* The frame width is the width the LRCLK is |
636 | * asserted for; the delay is expressed in | 620 | * asserted for; the delay is expressed in |
diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c index 90ee77d2409d..89eae93445cf 100644 --- a/sound/soc/soc-dapm.c +++ b/sound/soc/soc-dapm.c | |||
@@ -913,7 +913,7 @@ static int is_connected_input_ep(struct snd_soc_dapm_widget *widget, | |||
913 | /* do we need to add this widget to the list ? */ | 913 | /* do we need to add this widget to the list ? */ |
914 | if (list) { | 914 | if (list) { |
915 | int err; | 915 | int err; |
916 | err = dapm_list_add_widget(list, path->sink); | 916 | err = dapm_list_add_widget(list, path->source); |
917 | if (err < 0) { | 917 | if (err < 0) { |
918 | dev_err(widget->dapm->dev, "could not add widget %s\n", | 918 | dev_err(widget->dapm->dev, "could not add widget %s\n", |
919 | widget->name); | 919 | widget->name); |
@@ -954,7 +954,7 @@ int snd_soc_dapm_dai_get_connected_widgets(struct snd_soc_dai *dai, int stream, | |||
954 | if (stream == SNDRV_PCM_STREAM_PLAYBACK) | 954 | if (stream == SNDRV_PCM_STREAM_PLAYBACK) |
955 | paths = is_connected_output_ep(dai->playback_widget, list); | 955 | paths = is_connected_output_ep(dai->playback_widget, list); |
956 | else | 956 | else |
957 | paths = is_connected_input_ep(dai->playback_widget, list); | 957 | paths = is_connected_input_ep(dai->capture_widget, list); |
958 | 958 | ||
959 | trace_snd_soc_dapm_connected(paths, stream); | 959 | trace_snd_soc_dapm_connected(paths, stream); |
960 | dapm_clear_walk(&card->dapm); | 960 | dapm_clear_walk(&card->dapm); |
diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c index bedd1717a373..48fd15b312c1 100644 --- a/sound/soc/soc-pcm.c +++ b/sound/soc/soc-pcm.c | |||
@@ -794,6 +794,9 @@ static struct snd_soc_pcm_runtime *dpcm_get_be(struct snd_soc_card *card, | |||
794 | for (i = 0; i < card->num_links; i++) { | 794 | for (i = 0; i < card->num_links; i++) { |
795 | be = &card->rtd[i]; | 795 | be = &card->rtd[i]; |
796 | 796 | ||
797 | if (!be->dai_link->no_pcm) | ||
798 | continue; | ||
799 | |||
797 | if (be->cpu_dai->playback_widget == widget || | 800 | if (be->cpu_dai->playback_widget == widget || |
798 | be->codec_dai->playback_widget == widget) | 801 | be->codec_dai->playback_widget == widget) |
799 | return be; | 802 | return be; |
@@ -803,6 +806,9 @@ static struct snd_soc_pcm_runtime *dpcm_get_be(struct snd_soc_card *card, | |||
803 | for (i = 0; i < card->num_links; i++) { | 806 | for (i = 0; i < card->num_links; i++) { |
804 | be = &card->rtd[i]; | 807 | be = &card->rtd[i]; |
805 | 808 | ||
809 | if (!be->dai_link->no_pcm) | ||
810 | continue; | ||
811 | |||
806 | if (be->cpu_dai->capture_widget == widget || | 812 | if (be->cpu_dai->capture_widget == widget || |
807 | be->codec_dai->capture_widget == widget) | 813 | be->codec_dai->capture_widget == widget) |
808 | return be; | 814 | return be; |
diff --git a/sound/soc/tegra/tegra30_ahub.c b/sound/soc/tegra/tegra30_ahub.c index 57cd419f743e..f43edb364a18 100644 --- a/sound/soc/tegra/tegra30_ahub.c +++ b/sound/soc/tegra/tegra30_ahub.c | |||
@@ -629,3 +629,4 @@ MODULE_AUTHOR("Stephen Warren <swarren@nvidia.com>"); | |||
629 | MODULE_DESCRIPTION("Tegra30 AHUB driver"); | 629 | MODULE_DESCRIPTION("Tegra30 AHUB driver"); |
630 | MODULE_LICENSE("GPL v2"); | 630 | MODULE_LICENSE("GPL v2"); |
631 | MODULE_ALIAS("platform:" DRV_NAME); | 631 | MODULE_ALIAS("platform:" DRV_NAME); |
632 | MODULE_DEVICE_TABLE(of, tegra30_ahub_of_match); | ||
diff --git a/sound/soc/tegra/tegra_wm8903.c b/sound/soc/tegra/tegra_wm8903.c index 0b0df49d9d33..3b6da91188a9 100644 --- a/sound/soc/tegra/tegra_wm8903.c +++ b/sound/soc/tegra/tegra_wm8903.c | |||
@@ -346,6 +346,17 @@ static int tegra_wm8903_init(struct snd_soc_pcm_runtime *rtd) | |||
346 | return 0; | 346 | return 0; |
347 | } | 347 | } |
348 | 348 | ||
349 | static int tegra_wm8903_remove(struct snd_soc_card *card) | ||
350 | { | ||
351 | struct snd_soc_pcm_runtime *rtd = &(card->rtd[0]); | ||
352 | struct snd_soc_dai *codec_dai = rtd->codec_dai; | ||
353 | struct snd_soc_codec *codec = codec_dai->codec; | ||
354 | |||
355 | wm8903_mic_detect(codec, NULL, 0, 0); | ||
356 | |||
357 | return 0; | ||
358 | } | ||
359 | |||
349 | static struct snd_soc_dai_link tegra_wm8903_dai = { | 360 | static struct snd_soc_dai_link tegra_wm8903_dai = { |
350 | .name = "WM8903", | 361 | .name = "WM8903", |
351 | .stream_name = "WM8903 PCM", | 362 | .stream_name = "WM8903 PCM", |
@@ -363,6 +374,8 @@ static struct snd_soc_card snd_soc_tegra_wm8903 = { | |||
363 | .dai_link = &tegra_wm8903_dai, | 374 | .dai_link = &tegra_wm8903_dai, |
364 | .num_links = 1, | 375 | .num_links = 1, |
365 | 376 | ||
377 | .remove = tegra_wm8903_remove, | ||
378 | |||
366 | .controls = tegra_wm8903_controls, | 379 | .controls = tegra_wm8903_controls, |
367 | .num_controls = ARRAY_SIZE(tegra_wm8903_controls), | 380 | .num_controls = ARRAY_SIZE(tegra_wm8903_controls), |
368 | .dapm_widgets = tegra_wm8903_dapm_widgets, | 381 | .dapm_widgets = tegra_wm8903_dapm_widgets, |
diff --git a/sound/usb/6fire/firmware.c b/sound/usb/6fire/firmware.c index 6f9715ab32fe..56ad923bf6b5 100644 --- a/sound/usb/6fire/firmware.c +++ b/sound/usb/6fire/firmware.c | |||
@@ -209,7 +209,7 @@ static int usb6fire_fw_ezusb_upload( | |||
209 | int ret; | 209 | int ret; |
210 | u8 data; | 210 | u8 data; |
211 | struct usb_device *device = interface_to_usbdev(intf); | 211 | struct usb_device *device = interface_to_usbdev(intf); |
212 | const struct firmware *fw = 0; | 212 | const struct firmware *fw = NULL; |
213 | struct ihex_record *rec = kmalloc(sizeof(struct ihex_record), | 213 | struct ihex_record *rec = kmalloc(sizeof(struct ihex_record), |
214 | GFP_KERNEL); | 214 | GFP_KERNEL); |
215 | 215 | ||
diff --git a/sound/usb/card.h b/sound/usb/card.h index 0d37238b8457..2b9fffff23b6 100644 --- a/sound/usb/card.h +++ b/sound/usb/card.h | |||
@@ -119,6 +119,7 @@ struct snd_usb_substream { | |||
119 | unsigned long unlink_mask; /* bitmask of unlinked urbs */ | 119 | unsigned long unlink_mask; /* bitmask of unlinked urbs */ |
120 | 120 | ||
121 | /* data and sync endpoints for this stream */ | 121 | /* data and sync endpoints for this stream */ |
122 | unsigned int ep_num; /* the endpoint number */ | ||
122 | struct snd_usb_endpoint *data_endpoint; | 123 | struct snd_usb_endpoint *data_endpoint; |
123 | struct snd_usb_endpoint *sync_endpoint; | 124 | struct snd_usb_endpoint *sync_endpoint; |
124 | unsigned long flags; | 125 | unsigned long flags; |
diff --git a/sound/usb/pcm.c b/sound/usb/pcm.c index cdf8b7601973..54607f8c4f66 100644 --- a/sound/usb/pcm.c +++ b/sound/usb/pcm.c | |||
@@ -354,17 +354,21 @@ static int set_format(struct snd_usb_substream *subs, struct audioformat *fmt) | |||
354 | (get_endpoint(alts, 1)->bLength >= USB_DT_ENDPOINT_AUDIO_SIZE && | 354 | (get_endpoint(alts, 1)->bLength >= USB_DT_ENDPOINT_AUDIO_SIZE && |
355 | get_endpoint(alts, 1)->bSynchAddress != 0 && | 355 | get_endpoint(alts, 1)->bSynchAddress != 0 && |
356 | !implicit_fb)) { | 356 | !implicit_fb)) { |
357 | snd_printk(KERN_ERR "%d:%d:%d : invalid synch pipe\n", | 357 | snd_printk(KERN_ERR "%d:%d:%d : invalid sync pipe. bmAttributes %02x, bLength %d, bSynchAddress %02x\n", |
358 | dev->devnum, fmt->iface, fmt->altsetting); | 358 | dev->devnum, fmt->iface, fmt->altsetting, |
359 | get_endpoint(alts, 1)->bmAttributes, | ||
360 | get_endpoint(alts, 1)->bLength, | ||
361 | get_endpoint(alts, 1)->bSynchAddress); | ||
359 | return -EINVAL; | 362 | return -EINVAL; |
360 | } | 363 | } |
361 | ep = get_endpoint(alts, 1)->bEndpointAddress; | 364 | ep = get_endpoint(alts, 1)->bEndpointAddress; |
362 | if (get_endpoint(alts, 0)->bLength >= USB_DT_ENDPOINT_AUDIO_SIZE && | 365 | if (!implicit_fb && |
366 | get_endpoint(alts, 0)->bLength >= USB_DT_ENDPOINT_AUDIO_SIZE && | ||
363 | (( is_playback && ep != (unsigned int)(get_endpoint(alts, 0)->bSynchAddress | USB_DIR_IN)) || | 367 | (( is_playback && ep != (unsigned int)(get_endpoint(alts, 0)->bSynchAddress | USB_DIR_IN)) || |
364 | (!is_playback && ep != (unsigned int)(get_endpoint(alts, 0)->bSynchAddress & ~USB_DIR_IN)) || | 368 | (!is_playback && ep != (unsigned int)(get_endpoint(alts, 0)->bSynchAddress & ~USB_DIR_IN)))) { |
365 | ( is_playback && !implicit_fb))) { | 369 | snd_printk(KERN_ERR "%d:%d:%d : invalid sync pipe. is_playback %d, ep %02x, bSynchAddress %02x\n", |
366 | snd_printk(KERN_ERR "%d:%d:%d : invalid synch pipe\n", | 370 | dev->devnum, fmt->iface, fmt->altsetting, |
367 | dev->devnum, fmt->iface, fmt->altsetting); | 371 | is_playback, ep, get_endpoint(alts, 0)->bSynchAddress); |
368 | return -EINVAL; | 372 | return -EINVAL; |
369 | } | 373 | } |
370 | 374 | ||
@@ -1147,7 +1151,8 @@ static int snd_usb_substream_playback_trigger(struct snd_pcm_substream *substrea | |||
1147 | return -EINVAL; | 1151 | return -EINVAL; |
1148 | } | 1152 | } |
1149 | 1153 | ||
1150 | int snd_usb_substream_capture_trigger(struct snd_pcm_substream *substream, int cmd) | 1154 | static int snd_usb_substream_capture_trigger(struct snd_pcm_substream *substream, |
1155 | int cmd) | ||
1151 | { | 1156 | { |
1152 | int err; | 1157 | int err; |
1153 | struct snd_usb_substream *subs = substream->runtime->private_data; | 1158 | struct snd_usb_substream *subs = substream->runtime->private_data; |
diff --git a/sound/usb/stream.c b/sound/usb/stream.c index 6b7d7a2b7baa..083ed81160e5 100644 --- a/sound/usb/stream.c +++ b/sound/usb/stream.c | |||
@@ -97,6 +97,7 @@ static void snd_usb_init_substream(struct snd_usb_stream *as, | |||
97 | subs->formats |= fp->formats; | 97 | subs->formats |= fp->formats; |
98 | subs->num_formats++; | 98 | subs->num_formats++; |
99 | subs->fmt_type = fp->fmt_type; | 99 | subs->fmt_type = fp->fmt_type; |
100 | subs->ep_num = fp->endpoint; | ||
100 | } | 101 | } |
101 | 102 | ||
102 | /* | 103 | /* |
@@ -119,9 +120,7 @@ int snd_usb_add_audio_stream(struct snd_usb_audio *chip, | |||
119 | if (as->fmt_type != fp->fmt_type) | 120 | if (as->fmt_type != fp->fmt_type) |
120 | continue; | 121 | continue; |
121 | subs = &as->substream[stream]; | 122 | subs = &as->substream[stream]; |
122 | if (!subs->data_endpoint) | 123 | if (subs->ep_num == fp->endpoint) { |
123 | continue; | ||
124 | if (subs->data_endpoint->ep_num == fp->endpoint) { | ||
125 | list_add_tail(&fp->list, &subs->fmt_list); | 124 | list_add_tail(&fp->list, &subs->fmt_list); |
126 | subs->num_formats++; | 125 | subs->num_formats++; |
127 | subs->formats |= fp->formats; | 126 | subs->formats |= fp->formats; |
@@ -134,7 +133,7 @@ int snd_usb_add_audio_stream(struct snd_usb_audio *chip, | |||
134 | if (as->fmt_type != fp->fmt_type) | 133 | if (as->fmt_type != fp->fmt_type) |
135 | continue; | 134 | continue; |
136 | subs = &as->substream[stream]; | 135 | subs = &as->substream[stream]; |
137 | if (subs->data_endpoint) | 136 | if (subs->ep_num) |
138 | continue; | 137 | continue; |
139 | err = snd_pcm_new_stream(as->pcm, stream, 1); | 138 | err = snd_pcm_new_stream(as->pcm, stream, 1); |
140 | if (err < 0) | 139 | if (err < 0) |
diff --git a/tools/hv/hv_kvp_daemon.c b/tools/hv/hv_kvp_daemon.c index 146fd6147e84..d9834b362943 100644 --- a/tools/hv/hv_kvp_daemon.c +++ b/tools/hv/hv_kvp_daemon.c | |||
@@ -701,14 +701,18 @@ int main(void) | |||
701 | pfd.fd = fd; | 701 | pfd.fd = fd; |
702 | 702 | ||
703 | while (1) { | 703 | while (1) { |
704 | struct sockaddr *addr_p = (struct sockaddr *) &addr; | ||
705 | socklen_t addr_l = sizeof(addr); | ||
704 | pfd.events = POLLIN; | 706 | pfd.events = POLLIN; |
705 | pfd.revents = 0; | 707 | pfd.revents = 0; |
706 | poll(&pfd, 1, -1); | 708 | poll(&pfd, 1, -1); |
707 | 709 | ||
708 | len = recv(fd, kvp_recv_buffer, sizeof(kvp_recv_buffer), 0); | 710 | len = recvfrom(fd, kvp_recv_buffer, sizeof(kvp_recv_buffer), 0, |
711 | addr_p, &addr_l); | ||
709 | 712 | ||
710 | if (len < 0) { | 713 | if (len < 0 || addr.nl_pid) { |
711 | syslog(LOG_ERR, "recv failed; error:%d", len); | 714 | syslog(LOG_ERR, "recvfrom failed; pid:%u error:%d %s", |
715 | addr.nl_pid, errno, strerror(errno)); | ||
712 | close(fd); | 716 | close(fd); |
713 | return -1; | 717 | return -1; |
714 | } | 718 | } |
diff --git a/tools/perf/MANIFEST b/tools/perf/MANIFEST index 5476bc0a1eac..b4b572e8c100 100644 --- a/tools/perf/MANIFEST +++ b/tools/perf/MANIFEST | |||
@@ -1,4 +1,6 @@ | |||
1 | tools/perf | 1 | tools/perf |
2 | tools/scripts | ||
3 | tools/lib/traceevent | ||
2 | include/linux/const.h | 4 | include/linux/const.h |
3 | include/linux/perf_event.h | 5 | include/linux/perf_event.h |
4 | include/linux/rbtree.h | 6 | include/linux/rbtree.h |
diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c index 8c767c6bca91..25249f76329d 100644 --- a/tools/perf/builtin-report.c +++ b/tools/perf/builtin-report.c | |||
@@ -152,7 +152,7 @@ static int perf_evsel__add_hist_entry(struct perf_evsel *evsel, | |||
152 | 152 | ||
153 | if (symbol_conf.use_callchain) { | 153 | if (symbol_conf.use_callchain) { |
154 | err = callchain_append(he->callchain, | 154 | err = callchain_append(he->callchain, |
155 | &evsel->hists.callchain_cursor, | 155 | &callchain_cursor, |
156 | sample->period); | 156 | sample->period); |
157 | if (err) | 157 | if (err) |
158 | return err; | 158 | return err; |
@@ -162,7 +162,7 @@ static int perf_evsel__add_hist_entry(struct perf_evsel *evsel, | |||
162 | * so we don't allocated the extra space needed because the stdio | 162 | * so we don't allocated the extra space needed because the stdio |
163 | * code will not use it. | 163 | * code will not use it. |
164 | */ | 164 | */ |
165 | if (al->sym != NULL && use_browser > 0) { | 165 | if (he->ms.sym != NULL && use_browser > 0) { |
166 | struct annotation *notes = symbol__annotation(he->ms.sym); | 166 | struct annotation *notes = symbol__annotation(he->ms.sym); |
167 | 167 | ||
168 | assert(evsel != NULL); | 168 | assert(evsel != NULL); |
diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c index 62ae30d34fa6..07b5c7703dd1 100644 --- a/tools/perf/builtin-stat.c +++ b/tools/perf/builtin-stat.c | |||
@@ -1129,7 +1129,7 @@ static int add_default_attributes(void) | |||
1129 | return 0; | 1129 | return 0; |
1130 | 1130 | ||
1131 | if (!evsel_list->nr_entries) { | 1131 | if (!evsel_list->nr_entries) { |
1132 | if (perf_evlist__add_attrs_array(evsel_list, default_attrs) < 0) | 1132 | if (perf_evlist__add_default_attrs(evsel_list, default_attrs) < 0) |
1133 | return -1; | 1133 | return -1; |
1134 | } | 1134 | } |
1135 | 1135 | ||
@@ -1139,21 +1139,21 @@ static int add_default_attributes(void) | |||
1139 | return 0; | 1139 | return 0; |
1140 | 1140 | ||
1141 | /* Append detailed run extra attributes: */ | 1141 | /* Append detailed run extra attributes: */ |
1142 | if (perf_evlist__add_attrs_array(evsel_list, detailed_attrs) < 0) | 1142 | if (perf_evlist__add_default_attrs(evsel_list, detailed_attrs) < 0) |
1143 | return -1; | 1143 | return -1; |
1144 | 1144 | ||
1145 | if (detailed_run < 2) | 1145 | if (detailed_run < 2) |
1146 | return 0; | 1146 | return 0; |
1147 | 1147 | ||
1148 | /* Append very detailed run extra attributes: */ | 1148 | /* Append very detailed run extra attributes: */ |
1149 | if (perf_evlist__add_attrs_array(evsel_list, very_detailed_attrs) < 0) | 1149 | if (perf_evlist__add_default_attrs(evsel_list, very_detailed_attrs) < 0) |
1150 | return -1; | 1150 | return -1; |
1151 | 1151 | ||
1152 | if (detailed_run < 3) | 1152 | if (detailed_run < 3) |
1153 | return 0; | 1153 | return 0; |
1154 | 1154 | ||
1155 | /* Append very, very detailed run extra attributes: */ | 1155 | /* Append very, very detailed run extra attributes: */ |
1156 | return perf_evlist__add_attrs_array(evsel_list, very_very_detailed_attrs); | 1156 | return perf_evlist__add_default_attrs(evsel_list, very_very_detailed_attrs); |
1157 | } | 1157 | } |
1158 | 1158 | ||
1159 | int cmd_stat(int argc, const char **argv, const char *prefix __used) | 1159 | int cmd_stat(int argc, const char **argv, const char *prefix __used) |
@@ -1179,6 +1179,12 @@ int cmd_stat(int argc, const char **argv, const char *prefix __used) | |||
1179 | fprintf(stderr, "cannot use both --output and --log-fd\n"); | 1179 | fprintf(stderr, "cannot use both --output and --log-fd\n"); |
1180 | usage_with_options(stat_usage, options); | 1180 | usage_with_options(stat_usage, options); |
1181 | } | 1181 | } |
1182 | |||
1183 | if (output_fd < 0) { | ||
1184 | fprintf(stderr, "argument to --log-fd must be a > 0\n"); | ||
1185 | usage_with_options(stat_usage, options); | ||
1186 | } | ||
1187 | |||
1182 | if (!output) { | 1188 | if (!output) { |
1183 | struct timespec tm; | 1189 | struct timespec tm; |
1184 | mode = append_file ? "a" : "w"; | 1190 | mode = append_file ? "a" : "w"; |
@@ -1190,7 +1196,7 @@ int cmd_stat(int argc, const char **argv, const char *prefix __used) | |||
1190 | } | 1196 | } |
1191 | clock_gettime(CLOCK_REALTIME, &tm); | 1197 | clock_gettime(CLOCK_REALTIME, &tm); |
1192 | fprintf(output, "# started on %s\n", ctime(&tm.tv_sec)); | 1198 | fprintf(output, "# started on %s\n", ctime(&tm.tv_sec)); |
1193 | } else if (output_fd != 2) { | 1199 | } else if (output_fd > 0) { |
1194 | mode = append_file ? "a" : "w"; | 1200 | mode = append_file ? "a" : "w"; |
1195 | output = fdopen(output_fd, mode); | 1201 | output = fdopen(output_fd, mode); |
1196 | if (!output) { | 1202 | if (!output) { |
diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c index 871b540293e1..6bb0277b7dfe 100644 --- a/tools/perf/builtin-top.c +++ b/tools/perf/builtin-top.c | |||
@@ -787,7 +787,7 @@ static void perf_event__process_sample(struct perf_tool *tool, | |||
787 | } | 787 | } |
788 | 788 | ||
789 | if (symbol_conf.use_callchain) { | 789 | if (symbol_conf.use_callchain) { |
790 | err = callchain_append(he->callchain, &evsel->hists.callchain_cursor, | 790 | err = callchain_append(he->callchain, &callchain_cursor, |
791 | sample->period); | 791 | sample->period); |
792 | if (err) | 792 | if (err) |
793 | return; | 793 | return; |
diff --git a/tools/perf/design.txt b/tools/perf/design.txt index bd0bb1b1279b..67e5d0cace85 100644 --- a/tools/perf/design.txt +++ b/tools/perf/design.txt | |||
@@ -409,14 +409,15 @@ Counters can be enabled and disabled in two ways: via ioctl and via | |||
409 | prctl. When a counter is disabled, it doesn't count or generate | 409 | prctl. When a counter is disabled, it doesn't count or generate |
410 | events but does continue to exist and maintain its count value. | 410 | events but does continue to exist and maintain its count value. |
411 | 411 | ||
412 | An individual counter or counter group can be enabled with | 412 | An individual counter can be enabled with |
413 | 413 | ||
414 | ioctl(fd, PERF_EVENT_IOC_ENABLE); | 414 | ioctl(fd, PERF_EVENT_IOC_ENABLE, 0); |
415 | 415 | ||
416 | or disabled with | 416 | or disabled with |
417 | 417 | ||
418 | ioctl(fd, PERF_EVENT_IOC_DISABLE); | 418 | ioctl(fd, PERF_EVENT_IOC_DISABLE, 0); |
419 | 419 | ||
420 | For a counter group, pass PERF_IOC_FLAG_GROUP as the third argument. | ||
420 | Enabling or disabling the leader of a group enables or disables the | 421 | Enabling or disabling the leader of a group enables or disables the |
421 | whole group; that is, while the group leader is disabled, none of the | 422 | whole group; that is, while the group leader is disabled, none of the |
422 | counters in the group will count. Enabling or disabling a member of a | 423 | counters in the group will count. Enabling or disabling a member of a |
diff --git a/tools/perf/ui/browsers/annotate.c b/tools/perf/ui/browsers/annotate.c index 4deea6aaf927..34b1c46eaf42 100644 --- a/tools/perf/ui/browsers/annotate.c +++ b/tools/perf/ui/browsers/annotate.c | |||
@@ -668,7 +668,7 @@ static int annotate_browser__run(struct annotate_browser *browser, int evidx, | |||
668 | "q/ESC/CTRL+C Exit\n\n" | 668 | "q/ESC/CTRL+C Exit\n\n" |
669 | "-> Go to target\n" | 669 | "-> Go to target\n" |
670 | "<- Exit\n" | 670 | "<- Exit\n" |
671 | "h Cycle thru hottest instructions\n" | 671 | "H Cycle thru hottest instructions\n" |
672 | "j Toggle showing jump to target arrows\n" | 672 | "j Toggle showing jump to target arrows\n" |
673 | "J Toggle showing number of jump sources on targets\n" | 673 | "J Toggle showing number of jump sources on targets\n" |
674 | "n Search next string\n" | 674 | "n Search next string\n" |
diff --git a/tools/perf/util/PERF-VERSION-GEN b/tools/perf/util/PERF-VERSION-GEN index ad73300f7bac..95264f304179 100755 --- a/tools/perf/util/PERF-VERSION-GEN +++ b/tools/perf/util/PERF-VERSION-GEN | |||
@@ -12,7 +12,7 @@ LF=' | |||
12 | # First check if there is a .git to get the version from git describe | 12 | # First check if there is a .git to get the version from git describe |
13 | # otherwise try to get the version from the kernel makefile | 13 | # otherwise try to get the version from the kernel makefile |
14 | if test -d ../../.git -o -f ../../.git && | 14 | if test -d ../../.git -o -f ../../.git && |
15 | VN=$(git describe --abbrev=4 HEAD 2>/dev/null) && | 15 | VN=$(git describe --match 'v[0-9].[0-9]*' --abbrev=4 HEAD 2>/dev/null) && |
16 | case "$VN" in | 16 | case "$VN" in |
17 | *$LF*) (exit 1) ;; | 17 | *$LF*) (exit 1) ;; |
18 | v[0-9]*) | 18 | v[0-9]*) |
diff --git a/tools/perf/util/callchain.c b/tools/perf/util/callchain.c index 9f7106a8d9a4..3a6bff47614f 100644 --- a/tools/perf/util/callchain.c +++ b/tools/perf/util/callchain.c | |||
@@ -18,6 +18,8 @@ | |||
18 | #include "util.h" | 18 | #include "util.h" |
19 | #include "callchain.h" | 19 | #include "callchain.h" |
20 | 20 | ||
21 | __thread struct callchain_cursor callchain_cursor; | ||
22 | |||
21 | bool ip_callchain__valid(struct ip_callchain *chain, | 23 | bool ip_callchain__valid(struct ip_callchain *chain, |
22 | const union perf_event *event) | 24 | const union perf_event *event) |
23 | { | 25 | { |
diff --git a/tools/perf/util/callchain.h b/tools/perf/util/callchain.h index 7f9c0f1ae3a9..3bdb407f9cd9 100644 --- a/tools/perf/util/callchain.h +++ b/tools/perf/util/callchain.h | |||
@@ -76,6 +76,8 @@ struct callchain_cursor { | |||
76 | struct callchain_cursor_node *curr; | 76 | struct callchain_cursor_node *curr; |
77 | }; | 77 | }; |
78 | 78 | ||
79 | extern __thread struct callchain_cursor callchain_cursor; | ||
80 | |||
79 | static inline void callchain_init(struct callchain_root *root) | 81 | static inline void callchain_init(struct callchain_root *root) |
80 | { | 82 | { |
81 | INIT_LIST_HEAD(&root->node.siblings); | 83 | INIT_LIST_HEAD(&root->node.siblings); |
diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c index 4ac5f5ae4ce9..7400fb3fc50c 100644 --- a/tools/perf/util/evlist.c +++ b/tools/perf/util/evlist.c | |||
@@ -159,6 +159,17 @@ out_delete_partial_list: | |||
159 | return -1; | 159 | return -1; |
160 | } | 160 | } |
161 | 161 | ||
162 | int __perf_evlist__add_default_attrs(struct perf_evlist *evlist, | ||
163 | struct perf_event_attr *attrs, size_t nr_attrs) | ||
164 | { | ||
165 | size_t i; | ||
166 | |||
167 | for (i = 0; i < nr_attrs; i++) | ||
168 | event_attr_init(attrs + i); | ||
169 | |||
170 | return perf_evlist__add_attrs(evlist, attrs, nr_attrs); | ||
171 | } | ||
172 | |||
162 | static int trace_event__id(const char *evname) | 173 | static int trace_event__id(const char *evname) |
163 | { | 174 | { |
164 | char *filename, *colon; | 175 | char *filename, *colon; |
@@ -263,7 +274,8 @@ void perf_evlist__disable(struct perf_evlist *evlist) | |||
263 | for (cpu = 0; cpu < evlist->cpus->nr; cpu++) { | 274 | for (cpu = 0; cpu < evlist->cpus->nr; cpu++) { |
264 | list_for_each_entry(pos, &evlist->entries, node) { | 275 | list_for_each_entry(pos, &evlist->entries, node) { |
265 | for (thread = 0; thread < evlist->threads->nr; thread++) | 276 | for (thread = 0; thread < evlist->threads->nr; thread++) |
266 | ioctl(FD(pos, cpu, thread), PERF_EVENT_IOC_DISABLE); | 277 | ioctl(FD(pos, cpu, thread), |
278 | PERF_EVENT_IOC_DISABLE, 0); | ||
267 | } | 279 | } |
268 | } | 280 | } |
269 | } | 281 | } |
@@ -276,7 +288,8 @@ void perf_evlist__enable(struct perf_evlist *evlist) | |||
276 | for (cpu = 0; cpu < evlist->cpus->nr; cpu++) { | 288 | for (cpu = 0; cpu < evlist->cpus->nr; cpu++) { |
277 | list_for_each_entry(pos, &evlist->entries, node) { | 289 | list_for_each_entry(pos, &evlist->entries, node) { |
278 | for (thread = 0; thread < evlist->threads->nr; thread++) | 290 | for (thread = 0; thread < evlist->threads->nr; thread++) |
279 | ioctl(FD(pos, cpu, thread), PERF_EVENT_IOC_ENABLE); | 291 | ioctl(FD(pos, cpu, thread), |
292 | PERF_EVENT_IOC_ENABLE, 0); | ||
280 | } | 293 | } |
281 | } | 294 | } |
282 | } | 295 | } |
diff --git a/tools/perf/util/evlist.h b/tools/perf/util/evlist.h index 58abb63ac13a..989bee9624c2 100644 --- a/tools/perf/util/evlist.h +++ b/tools/perf/util/evlist.h | |||
@@ -54,6 +54,8 @@ void perf_evlist__add(struct perf_evlist *evlist, struct perf_evsel *entry); | |||
54 | int perf_evlist__add_default(struct perf_evlist *evlist); | 54 | int perf_evlist__add_default(struct perf_evlist *evlist); |
55 | int perf_evlist__add_attrs(struct perf_evlist *evlist, | 55 | int perf_evlist__add_attrs(struct perf_evlist *evlist, |
56 | struct perf_event_attr *attrs, size_t nr_attrs); | 56 | struct perf_event_attr *attrs, size_t nr_attrs); |
57 | int __perf_evlist__add_default_attrs(struct perf_evlist *evlist, | ||
58 | struct perf_event_attr *attrs, size_t nr_attrs); | ||
57 | int perf_evlist__add_tracepoints(struct perf_evlist *evlist, | 59 | int perf_evlist__add_tracepoints(struct perf_evlist *evlist, |
58 | const char *tracepoints[], size_t nr_tracepoints); | 60 | const char *tracepoints[], size_t nr_tracepoints); |
59 | int perf_evlist__set_tracepoints_handlers(struct perf_evlist *evlist, | 61 | int perf_evlist__set_tracepoints_handlers(struct perf_evlist *evlist, |
@@ -62,6 +64,8 @@ int perf_evlist__set_tracepoints_handlers(struct perf_evlist *evlist, | |||
62 | 64 | ||
63 | #define perf_evlist__add_attrs_array(evlist, array) \ | 65 | #define perf_evlist__add_attrs_array(evlist, array) \ |
64 | perf_evlist__add_attrs(evlist, array, ARRAY_SIZE(array)) | 66 | perf_evlist__add_attrs(evlist, array, ARRAY_SIZE(array)) |
67 | #define perf_evlist__add_default_attrs(evlist, array) \ | ||
68 | __perf_evlist__add_default_attrs(evlist, array, ARRAY_SIZE(array)) | ||
65 | 69 | ||
66 | #define perf_evlist__add_tracepoints_array(evlist, array) \ | 70 | #define perf_evlist__add_tracepoints_array(evlist, array) \ |
67 | perf_evlist__add_tracepoints(evlist, array, ARRAY_SIZE(array)) | 71 | perf_evlist__add_tracepoints(evlist, array, ARRAY_SIZE(array)) |
diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c index 91d19138f3ec..9f6cebd798ee 100644 --- a/tools/perf/util/evsel.c +++ b/tools/perf/util/evsel.c | |||
@@ -494,16 +494,24 @@ int perf_evsel__open_per_thread(struct perf_evsel *evsel, | |||
494 | } | 494 | } |
495 | 495 | ||
496 | static int perf_event__parse_id_sample(const union perf_event *event, u64 type, | 496 | static int perf_event__parse_id_sample(const union perf_event *event, u64 type, |
497 | struct perf_sample *sample) | 497 | struct perf_sample *sample, |
498 | bool swapped) | ||
498 | { | 499 | { |
499 | const u64 *array = event->sample.array; | 500 | const u64 *array = event->sample.array; |
501 | union u64_swap u; | ||
500 | 502 | ||
501 | array += ((event->header.size - | 503 | array += ((event->header.size - |
502 | sizeof(event->header)) / sizeof(u64)) - 1; | 504 | sizeof(event->header)) / sizeof(u64)) - 1; |
503 | 505 | ||
504 | if (type & PERF_SAMPLE_CPU) { | 506 | if (type & PERF_SAMPLE_CPU) { |
505 | u32 *p = (u32 *)array; | 507 | u.val64 = *array; |
506 | sample->cpu = *p; | 508 | if (swapped) { |
509 | /* undo swap of u64, then swap on individual u32s */ | ||
510 | u.val64 = bswap_64(u.val64); | ||
511 | u.val32[0] = bswap_32(u.val32[0]); | ||
512 | } | ||
513 | |||
514 | sample->cpu = u.val32[0]; | ||
507 | array--; | 515 | array--; |
508 | } | 516 | } |
509 | 517 | ||
@@ -523,9 +531,16 @@ static int perf_event__parse_id_sample(const union perf_event *event, u64 type, | |||
523 | } | 531 | } |
524 | 532 | ||
525 | if (type & PERF_SAMPLE_TID) { | 533 | if (type & PERF_SAMPLE_TID) { |
526 | u32 *p = (u32 *)array; | 534 | u.val64 = *array; |
527 | sample->pid = p[0]; | 535 | if (swapped) { |
528 | sample->tid = p[1]; | 536 | /* undo swap of u64, then swap on individual u32s */ |
537 | u.val64 = bswap_64(u.val64); | ||
538 | u.val32[0] = bswap_32(u.val32[0]); | ||
539 | u.val32[1] = bswap_32(u.val32[1]); | ||
540 | } | ||
541 | |||
542 | sample->pid = u.val32[0]; | ||
543 | sample->tid = u.val32[1]; | ||
529 | } | 544 | } |
530 | 545 | ||
531 | return 0; | 546 | return 0; |
@@ -562,7 +577,7 @@ int perf_event__parse_sample(const union perf_event *event, u64 type, | |||
562 | if (event->header.type != PERF_RECORD_SAMPLE) { | 577 | if (event->header.type != PERF_RECORD_SAMPLE) { |
563 | if (!sample_id_all) | 578 | if (!sample_id_all) |
564 | return 0; | 579 | return 0; |
565 | return perf_event__parse_id_sample(event, type, data); | 580 | return perf_event__parse_id_sample(event, type, data, swapped); |
566 | } | 581 | } |
567 | 582 | ||
568 | array = event->sample.array; | 583 | array = event->sample.array; |
diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c index 2dd5edf161b7..e909d43cf542 100644 --- a/tools/perf/util/header.c +++ b/tools/perf/util/header.c | |||
@@ -1942,7 +1942,6 @@ int perf_file_header__read(struct perf_file_header *header, | |||
1942 | else | 1942 | else |
1943 | return -1; | 1943 | return -1; |
1944 | } else if (ph->needs_swap) { | 1944 | } else if (ph->needs_swap) { |
1945 | unsigned int i; | ||
1946 | /* | 1945 | /* |
1947 | * feature bitmap is declared as an array of unsigned longs -- | 1946 | * feature bitmap is declared as an array of unsigned longs -- |
1948 | * not good since its size can differ between the host that | 1947 | * not good since its size can differ between the host that |
@@ -1958,14 +1957,17 @@ int perf_file_header__read(struct perf_file_header *header, | |||
1958 | * file), punt and fallback to the original behavior -- | 1957 | * file), punt and fallback to the original behavior -- |
1959 | * clearing all feature bits and setting buildid. | 1958 | * clearing all feature bits and setting buildid. |
1960 | */ | 1959 | */ |
1961 | for (i = 0; i < BITS_TO_LONGS(HEADER_FEAT_BITS); ++i) | 1960 | mem_bswap_64(&header->adds_features, |
1962 | header->adds_features[i] = bswap_64(header->adds_features[i]); | 1961 | BITS_TO_U64(HEADER_FEAT_BITS)); |
1963 | 1962 | ||
1964 | if (!test_bit(HEADER_HOSTNAME, header->adds_features)) { | 1963 | if (!test_bit(HEADER_HOSTNAME, header->adds_features)) { |
1965 | for (i = 0; i < BITS_TO_LONGS(HEADER_FEAT_BITS); ++i) { | 1964 | /* unswap as u64 */ |
1966 | header->adds_features[i] = bswap_64(header->adds_features[i]); | 1965 | mem_bswap_64(&header->adds_features, |
1967 | header->adds_features[i] = bswap_32(header->adds_features[i]); | 1966 | BITS_TO_U64(HEADER_FEAT_BITS)); |
1968 | } | 1967 | |
1968 | /* unswap as u32 */ | ||
1969 | mem_bswap_32(&header->adds_features, | ||
1970 | BITS_TO_U32(HEADER_FEAT_BITS)); | ||
1969 | } | 1971 | } |
1970 | 1972 | ||
1971 | if (!test_bit(HEADER_HOSTNAME, header->adds_features)) { | 1973 | if (!test_bit(HEADER_HOSTNAME, header->adds_features)) { |
@@ -2091,6 +2093,35 @@ static int read_attr(int fd, struct perf_header *ph, | |||
2091 | return ret <= 0 ? -1 : 0; | 2093 | return ret <= 0 ? -1 : 0; |
2092 | } | 2094 | } |
2093 | 2095 | ||
2096 | static int perf_evsel__set_tracepoint_name(struct perf_evsel *evsel) | ||
2097 | { | ||
2098 | struct event_format *event = trace_find_event(evsel->attr.config); | ||
2099 | char bf[128]; | ||
2100 | |||
2101 | if (event == NULL) | ||
2102 | return -1; | ||
2103 | |||
2104 | snprintf(bf, sizeof(bf), "%s:%s", event->system, event->name); | ||
2105 | evsel->name = strdup(bf); | ||
2106 | if (event->name == NULL) | ||
2107 | return -1; | ||
2108 | |||
2109 | return 0; | ||
2110 | } | ||
2111 | |||
2112 | static int perf_evlist__set_tracepoint_names(struct perf_evlist *evlist) | ||
2113 | { | ||
2114 | struct perf_evsel *pos; | ||
2115 | |||
2116 | list_for_each_entry(pos, &evlist->entries, node) { | ||
2117 | if (pos->attr.type == PERF_TYPE_TRACEPOINT && | ||
2118 | perf_evsel__set_tracepoint_name(pos)) | ||
2119 | return -1; | ||
2120 | } | ||
2121 | |||
2122 | return 0; | ||
2123 | } | ||
2124 | |||
2094 | int perf_session__read_header(struct perf_session *session, int fd) | 2125 | int perf_session__read_header(struct perf_session *session, int fd) |
2095 | { | 2126 | { |
2096 | struct perf_header *header = &session->header; | 2127 | struct perf_header *header = &session->header; |
@@ -2172,6 +2203,9 @@ int perf_session__read_header(struct perf_session *session, int fd) | |||
2172 | 2203 | ||
2173 | lseek(fd, header->data_offset, SEEK_SET); | 2204 | lseek(fd, header->data_offset, SEEK_SET); |
2174 | 2205 | ||
2206 | if (perf_evlist__set_tracepoint_names(session->evlist)) | ||
2207 | goto out_delete_evlist; | ||
2208 | |||
2175 | header->frozen = 1; | 2209 | header->frozen = 1; |
2176 | return 0; | 2210 | return 0; |
2177 | out_errno: | 2211 | out_errno: |
diff --git a/tools/perf/util/hist.c b/tools/perf/util/hist.c index 1293b5ebea4d..514e2a4b367d 100644 --- a/tools/perf/util/hist.c +++ b/tools/perf/util/hist.c | |||
@@ -378,7 +378,7 @@ void hist_entry__free(struct hist_entry *he) | |||
378 | * collapse the histogram | 378 | * collapse the histogram |
379 | */ | 379 | */ |
380 | 380 | ||
381 | static bool hists__collapse_insert_entry(struct hists *hists, | 381 | static bool hists__collapse_insert_entry(struct hists *hists __used, |
382 | struct rb_root *root, | 382 | struct rb_root *root, |
383 | struct hist_entry *he) | 383 | struct hist_entry *he) |
384 | { | 384 | { |
@@ -397,8 +397,9 @@ static bool hists__collapse_insert_entry(struct hists *hists, | |||
397 | iter->period += he->period; | 397 | iter->period += he->period; |
398 | iter->nr_events += he->nr_events; | 398 | iter->nr_events += he->nr_events; |
399 | if (symbol_conf.use_callchain) { | 399 | if (symbol_conf.use_callchain) { |
400 | callchain_cursor_reset(&hists->callchain_cursor); | 400 | callchain_cursor_reset(&callchain_cursor); |
401 | callchain_merge(&hists->callchain_cursor, iter->callchain, | 401 | callchain_merge(&callchain_cursor, |
402 | iter->callchain, | ||
402 | he->callchain); | 403 | he->callchain); |
403 | } | 404 | } |
404 | hist_entry__free(he); | 405 | hist_entry__free(he); |
diff --git a/tools/perf/util/hist.h b/tools/perf/util/hist.h index cfc64e293f90..34bb556d6219 100644 --- a/tools/perf/util/hist.h +++ b/tools/perf/util/hist.h | |||
@@ -67,8 +67,6 @@ struct hists { | |||
67 | struct events_stats stats; | 67 | struct events_stats stats; |
68 | u64 event_stream; | 68 | u64 event_stream; |
69 | u16 col_len[HISTC_NR_COLS]; | 69 | u16 col_len[HISTC_NR_COLS]; |
70 | /* Best would be to reuse the session callchain cursor */ | ||
71 | struct callchain_cursor callchain_cursor; | ||
72 | }; | 70 | }; |
73 | 71 | ||
74 | struct hist_entry *__hists__add_entry(struct hists *self, | 72 | struct hist_entry *__hists__add_entry(struct hists *self, |
diff --git a/tools/perf/util/include/linux/bitops.h b/tools/perf/util/include/linux/bitops.h index f1584833bd22..587a230d2075 100644 --- a/tools/perf/util/include/linux/bitops.h +++ b/tools/perf/util/include/linux/bitops.h | |||
@@ -8,6 +8,8 @@ | |||
8 | #define BITS_PER_LONG __WORDSIZE | 8 | #define BITS_PER_LONG __WORDSIZE |
9 | #define BITS_PER_BYTE 8 | 9 | #define BITS_PER_BYTE 8 |
10 | #define BITS_TO_LONGS(nr) DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(long)) | 10 | #define BITS_TO_LONGS(nr) DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(long)) |
11 | #define BITS_TO_U64(nr) DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(u64)) | ||
12 | #define BITS_TO_U32(nr) DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(u32)) | ||
11 | 13 | ||
12 | #define for_each_set_bit(bit, addr, size) \ | 14 | #define for_each_set_bit(bit, addr, size) \ |
13 | for ((bit) = find_first_bit((addr), (size)); \ | 15 | for ((bit) = find_first_bit((addr), (size)); \ |
diff --git a/tools/perf/util/pager.c b/tools/perf/util/pager.c index 1915de20dcac..3322b8446e89 100644 --- a/tools/perf/util/pager.c +++ b/tools/perf/util/pager.c | |||
@@ -57,6 +57,10 @@ void setup_pager(void) | |||
57 | } | 57 | } |
58 | if (!pager) | 58 | if (!pager) |
59 | pager = getenv("PAGER"); | 59 | pager = getenv("PAGER"); |
60 | if (!pager) { | ||
61 | if (!access("/usr/bin/pager", X_OK)) | ||
62 | pager = "/usr/bin/pager"; | ||
63 | } | ||
60 | if (!pager) | 64 | if (!pager) |
61 | pager = "less"; | 65 | pager = "less"; |
62 | else if (!*pager || !strcmp(pager, "cat")) | 66 | else if (!*pager || !strcmp(pager, "cat")) |
diff --git a/tools/perf/util/probe-event.c b/tools/perf/util/probe-event.c index 59dccc98b554..0dda25d82d06 100644 --- a/tools/perf/util/probe-event.c +++ b/tools/perf/util/probe-event.c | |||
@@ -2164,16 +2164,12 @@ int del_perf_probe_events(struct strlist *dellist) | |||
2164 | 2164 | ||
2165 | error: | 2165 | error: |
2166 | if (kfd >= 0) { | 2166 | if (kfd >= 0) { |
2167 | if (namelist) | 2167 | strlist__delete(namelist); |
2168 | strlist__delete(namelist); | ||
2169 | |||
2170 | close(kfd); | 2168 | close(kfd); |
2171 | } | 2169 | } |
2172 | 2170 | ||
2173 | if (ufd >= 0) { | 2171 | if (ufd >= 0) { |
2174 | if (unamelist) | 2172 | strlist__delete(unamelist); |
2175 | strlist__delete(unamelist); | ||
2176 | |||
2177 | close(ufd); | 2173 | close(ufd); |
2178 | } | 2174 | } |
2179 | 2175 | ||
diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c index 93d355d27109..c3e399bcf18d 100644 --- a/tools/perf/util/session.c +++ b/tools/perf/util/session.c | |||
@@ -288,7 +288,8 @@ struct branch_info *machine__resolve_bstack(struct machine *self, | |||
288 | return bi; | 288 | return bi; |
289 | } | 289 | } |
290 | 290 | ||
291 | int machine__resolve_callchain(struct machine *self, struct perf_evsel *evsel, | 291 | int machine__resolve_callchain(struct machine *self, |
292 | struct perf_evsel *evsel __used, | ||
292 | struct thread *thread, | 293 | struct thread *thread, |
293 | struct ip_callchain *chain, | 294 | struct ip_callchain *chain, |
294 | struct symbol **parent) | 295 | struct symbol **parent) |
@@ -297,7 +298,12 @@ int machine__resolve_callchain(struct machine *self, struct perf_evsel *evsel, | |||
297 | unsigned int i; | 298 | unsigned int i; |
298 | int err; | 299 | int err; |
299 | 300 | ||
300 | callchain_cursor_reset(&evsel->hists.callchain_cursor); | 301 | callchain_cursor_reset(&callchain_cursor); |
302 | |||
303 | if (chain->nr > PERF_MAX_STACK_DEPTH) { | ||
304 | pr_warning("corrupted callchain. skipping...\n"); | ||
305 | return 0; | ||
306 | } | ||
301 | 307 | ||
302 | for (i = 0; i < chain->nr; i++) { | 308 | for (i = 0; i < chain->nr; i++) { |
303 | u64 ip; | 309 | u64 ip; |
@@ -317,7 +323,14 @@ int machine__resolve_callchain(struct machine *self, struct perf_evsel *evsel, | |||
317 | case PERF_CONTEXT_USER: | 323 | case PERF_CONTEXT_USER: |
318 | cpumode = PERF_RECORD_MISC_USER; break; | 324 | cpumode = PERF_RECORD_MISC_USER; break; |
319 | default: | 325 | default: |
320 | break; | 326 | pr_debug("invalid callchain context: " |
327 | "%"PRId64"\n", (s64) ip); | ||
328 | /* | ||
329 | * It seems the callchain is corrupted. | ||
330 | * Discard all. | ||
331 | */ | ||
332 | callchain_cursor_reset(&callchain_cursor); | ||
333 | return 0; | ||
321 | } | 334 | } |
322 | continue; | 335 | continue; |
323 | } | 336 | } |
@@ -333,7 +346,7 @@ int machine__resolve_callchain(struct machine *self, struct perf_evsel *evsel, | |||
333 | break; | 346 | break; |
334 | } | 347 | } |
335 | 348 | ||
336 | err = callchain_cursor_append(&evsel->hists.callchain_cursor, | 349 | err = callchain_cursor_append(&callchain_cursor, |
337 | ip, al.map, al.sym); | 350 | ip, al.map, al.sym); |
338 | if (err) | 351 | if (err) |
339 | return err; | 352 | return err; |
@@ -429,6 +442,16 @@ static void perf_tool__fill_defaults(struct perf_tool *tool) | |||
429 | tool->finished_round = process_finished_round_stub; | 442 | tool->finished_round = process_finished_round_stub; |
430 | } | 443 | } |
431 | } | 444 | } |
445 | |||
446 | void mem_bswap_32(void *src, int byte_size) | ||
447 | { | ||
448 | u32 *m = src; | ||
449 | while (byte_size > 0) { | ||
450 | *m = bswap_32(*m); | ||
451 | byte_size -= sizeof(u32); | ||
452 | ++m; | ||
453 | } | ||
454 | } | ||
432 | 455 | ||
433 | void mem_bswap_64(void *src, int byte_size) | 456 | void mem_bswap_64(void *src, int byte_size) |
434 | { | 457 | { |
@@ -441,37 +464,65 @@ void mem_bswap_64(void *src, int byte_size) | |||
441 | } | 464 | } |
442 | } | 465 | } |
443 | 466 | ||
444 | static void perf_event__all64_swap(union perf_event *event) | 467 | static void swap_sample_id_all(union perf_event *event, void *data) |
468 | { | ||
469 | void *end = (void *) event + event->header.size; | ||
470 | int size = end - data; | ||
471 | |||
472 | BUG_ON(size % sizeof(u64)); | ||
473 | mem_bswap_64(data, size); | ||
474 | } | ||
475 | |||
476 | static void perf_event__all64_swap(union perf_event *event, | ||
477 | bool sample_id_all __used) | ||
445 | { | 478 | { |
446 | struct perf_event_header *hdr = &event->header; | 479 | struct perf_event_header *hdr = &event->header; |
447 | mem_bswap_64(hdr + 1, event->header.size - sizeof(*hdr)); | 480 | mem_bswap_64(hdr + 1, event->header.size - sizeof(*hdr)); |
448 | } | 481 | } |
449 | 482 | ||
450 | static void perf_event__comm_swap(union perf_event *event) | 483 | static void perf_event__comm_swap(union perf_event *event, bool sample_id_all) |
451 | { | 484 | { |
452 | event->comm.pid = bswap_32(event->comm.pid); | 485 | event->comm.pid = bswap_32(event->comm.pid); |
453 | event->comm.tid = bswap_32(event->comm.tid); | 486 | event->comm.tid = bswap_32(event->comm.tid); |
487 | |||
488 | if (sample_id_all) { | ||
489 | void *data = &event->comm.comm; | ||
490 | |||
491 | data += ALIGN(strlen(data) + 1, sizeof(u64)); | ||
492 | swap_sample_id_all(event, data); | ||
493 | } | ||
454 | } | 494 | } |
455 | 495 | ||
456 | static void perf_event__mmap_swap(union perf_event *event) | 496 | static void perf_event__mmap_swap(union perf_event *event, |
497 | bool sample_id_all) | ||
457 | { | 498 | { |
458 | event->mmap.pid = bswap_32(event->mmap.pid); | 499 | event->mmap.pid = bswap_32(event->mmap.pid); |
459 | event->mmap.tid = bswap_32(event->mmap.tid); | 500 | event->mmap.tid = bswap_32(event->mmap.tid); |
460 | event->mmap.start = bswap_64(event->mmap.start); | 501 | event->mmap.start = bswap_64(event->mmap.start); |
461 | event->mmap.len = bswap_64(event->mmap.len); | 502 | event->mmap.len = bswap_64(event->mmap.len); |
462 | event->mmap.pgoff = bswap_64(event->mmap.pgoff); | 503 | event->mmap.pgoff = bswap_64(event->mmap.pgoff); |
504 | |||
505 | if (sample_id_all) { | ||
506 | void *data = &event->mmap.filename; | ||
507 | |||
508 | data += ALIGN(strlen(data) + 1, sizeof(u64)); | ||
509 | swap_sample_id_all(event, data); | ||
510 | } | ||
463 | } | 511 | } |
464 | 512 | ||
465 | static void perf_event__task_swap(union perf_event *event) | 513 | static void perf_event__task_swap(union perf_event *event, bool sample_id_all) |
466 | { | 514 | { |
467 | event->fork.pid = bswap_32(event->fork.pid); | 515 | event->fork.pid = bswap_32(event->fork.pid); |
468 | event->fork.tid = bswap_32(event->fork.tid); | 516 | event->fork.tid = bswap_32(event->fork.tid); |
469 | event->fork.ppid = bswap_32(event->fork.ppid); | 517 | event->fork.ppid = bswap_32(event->fork.ppid); |
470 | event->fork.ptid = bswap_32(event->fork.ptid); | 518 | event->fork.ptid = bswap_32(event->fork.ptid); |
471 | event->fork.time = bswap_64(event->fork.time); | 519 | event->fork.time = bswap_64(event->fork.time); |
520 | |||
521 | if (sample_id_all) | ||
522 | swap_sample_id_all(event, &event->fork + 1); | ||
472 | } | 523 | } |
473 | 524 | ||
474 | static void perf_event__read_swap(union perf_event *event) | 525 | static void perf_event__read_swap(union perf_event *event, bool sample_id_all) |
475 | { | 526 | { |
476 | event->read.pid = bswap_32(event->read.pid); | 527 | event->read.pid = bswap_32(event->read.pid); |
477 | event->read.tid = bswap_32(event->read.tid); | 528 | event->read.tid = bswap_32(event->read.tid); |
@@ -479,6 +530,9 @@ static void perf_event__read_swap(union perf_event *event) | |||
479 | event->read.time_enabled = bswap_64(event->read.time_enabled); | 530 | event->read.time_enabled = bswap_64(event->read.time_enabled); |
480 | event->read.time_running = bswap_64(event->read.time_running); | 531 | event->read.time_running = bswap_64(event->read.time_running); |
481 | event->read.id = bswap_64(event->read.id); | 532 | event->read.id = bswap_64(event->read.id); |
533 | |||
534 | if (sample_id_all) | ||
535 | swap_sample_id_all(event, &event->read + 1); | ||
482 | } | 536 | } |
483 | 537 | ||
484 | static u8 revbyte(u8 b) | 538 | static u8 revbyte(u8 b) |
@@ -530,7 +584,8 @@ void perf_event__attr_swap(struct perf_event_attr *attr) | |||
530 | swap_bitfield((u8 *) (&attr->read_format + 1), sizeof(u64)); | 584 | swap_bitfield((u8 *) (&attr->read_format + 1), sizeof(u64)); |
531 | } | 585 | } |
532 | 586 | ||
533 | static void perf_event__hdr_attr_swap(union perf_event *event) | 587 | static void perf_event__hdr_attr_swap(union perf_event *event, |
588 | bool sample_id_all __used) | ||
534 | { | 589 | { |
535 | size_t size; | 590 | size_t size; |
536 | 591 | ||
@@ -541,18 +596,21 @@ static void perf_event__hdr_attr_swap(union perf_event *event) | |||
541 | mem_bswap_64(event->attr.id, size); | 596 | mem_bswap_64(event->attr.id, size); |
542 | } | 597 | } |
543 | 598 | ||
544 | static void perf_event__event_type_swap(union perf_event *event) | 599 | static void perf_event__event_type_swap(union perf_event *event, |
600 | bool sample_id_all __used) | ||
545 | { | 601 | { |
546 | event->event_type.event_type.event_id = | 602 | event->event_type.event_type.event_id = |
547 | bswap_64(event->event_type.event_type.event_id); | 603 | bswap_64(event->event_type.event_type.event_id); |
548 | } | 604 | } |
549 | 605 | ||
550 | static void perf_event__tracing_data_swap(union perf_event *event) | 606 | static void perf_event__tracing_data_swap(union perf_event *event, |
607 | bool sample_id_all __used) | ||
551 | { | 608 | { |
552 | event->tracing_data.size = bswap_32(event->tracing_data.size); | 609 | event->tracing_data.size = bswap_32(event->tracing_data.size); |
553 | } | 610 | } |
554 | 611 | ||
555 | typedef void (*perf_event__swap_op)(union perf_event *event); | 612 | typedef void (*perf_event__swap_op)(union perf_event *event, |
613 | bool sample_id_all); | ||
556 | 614 | ||
557 | static perf_event__swap_op perf_event__swap_ops[] = { | 615 | static perf_event__swap_op perf_event__swap_ops[] = { |
558 | [PERF_RECORD_MMAP] = perf_event__mmap_swap, | 616 | [PERF_RECORD_MMAP] = perf_event__mmap_swap, |
@@ -986,6 +1044,15 @@ static int perf_session__process_user_event(struct perf_session *session, union | |||
986 | } | 1044 | } |
987 | } | 1045 | } |
988 | 1046 | ||
1047 | static void event_swap(union perf_event *event, bool sample_id_all) | ||
1048 | { | ||
1049 | perf_event__swap_op swap; | ||
1050 | |||
1051 | swap = perf_event__swap_ops[event->header.type]; | ||
1052 | if (swap) | ||
1053 | swap(event, sample_id_all); | ||
1054 | } | ||
1055 | |||
989 | static int perf_session__process_event(struct perf_session *session, | 1056 | static int perf_session__process_event(struct perf_session *session, |
990 | union perf_event *event, | 1057 | union perf_event *event, |
991 | struct perf_tool *tool, | 1058 | struct perf_tool *tool, |
@@ -994,9 +1061,8 @@ static int perf_session__process_event(struct perf_session *session, | |||
994 | struct perf_sample sample; | 1061 | struct perf_sample sample; |
995 | int ret; | 1062 | int ret; |
996 | 1063 | ||
997 | if (session->header.needs_swap && | 1064 | if (session->header.needs_swap) |
998 | perf_event__swap_ops[event->header.type]) | 1065 | event_swap(event, session->sample_id_all); |
999 | perf_event__swap_ops[event->header.type](event); | ||
1000 | 1066 | ||
1001 | if (event->header.type >= PERF_RECORD_HEADER_MAX) | 1067 | if (event->header.type >= PERF_RECORD_HEADER_MAX) |
1002 | return -EINVAL; | 1068 | return -EINVAL; |
@@ -1428,7 +1494,6 @@ void perf_event__print_ip(union perf_event *event, struct perf_sample *sample, | |||
1428 | int print_sym, int print_dso, int print_symoffset) | 1494 | int print_sym, int print_dso, int print_symoffset) |
1429 | { | 1495 | { |
1430 | struct addr_location al; | 1496 | struct addr_location al; |
1431 | struct callchain_cursor *cursor = &evsel->hists.callchain_cursor; | ||
1432 | struct callchain_cursor_node *node; | 1497 | struct callchain_cursor_node *node; |
1433 | 1498 | ||
1434 | if (perf_event__preprocess_sample(event, machine, &al, sample, | 1499 | if (perf_event__preprocess_sample(event, machine, &al, sample, |
@@ -1446,10 +1511,10 @@ void perf_event__print_ip(union perf_event *event, struct perf_sample *sample, | |||
1446 | error("Failed to resolve callchain. Skipping\n"); | 1511 | error("Failed to resolve callchain. Skipping\n"); |
1447 | return; | 1512 | return; |
1448 | } | 1513 | } |
1449 | callchain_cursor_commit(cursor); | 1514 | callchain_cursor_commit(&callchain_cursor); |
1450 | 1515 | ||
1451 | while (1) { | 1516 | while (1) { |
1452 | node = callchain_cursor_current(cursor); | 1517 | node = callchain_cursor_current(&callchain_cursor); |
1453 | if (!node) | 1518 | if (!node) |
1454 | break; | 1519 | break; |
1455 | 1520 | ||
@@ -1460,12 +1525,12 @@ void perf_event__print_ip(union perf_event *event, struct perf_sample *sample, | |||
1460 | } | 1525 | } |
1461 | if (print_dso) { | 1526 | if (print_dso) { |
1462 | printf(" ("); | 1527 | printf(" ("); |
1463 | map__fprintf_dsoname(al.map, stdout); | 1528 | map__fprintf_dsoname(node->map, stdout); |
1464 | printf(")"); | 1529 | printf(")"); |
1465 | } | 1530 | } |
1466 | printf("\n"); | 1531 | printf("\n"); |
1467 | 1532 | ||
1468 | callchain_cursor_advance(cursor); | 1533 | callchain_cursor_advance(&callchain_cursor); |
1469 | } | 1534 | } |
1470 | 1535 | ||
1471 | } else { | 1536 | } else { |
diff --git a/tools/perf/util/session.h b/tools/perf/util/session.h index 7a5434c00565..0c702e3f0a36 100644 --- a/tools/perf/util/session.h +++ b/tools/perf/util/session.h | |||
@@ -80,6 +80,7 @@ struct branch_info *machine__resolve_bstack(struct machine *self, | |||
80 | bool perf_session__has_traces(struct perf_session *self, const char *msg); | 80 | bool perf_session__has_traces(struct perf_session *self, const char *msg); |
81 | 81 | ||
82 | void mem_bswap_64(void *src, int byte_size); | 82 | void mem_bswap_64(void *src, int byte_size); |
83 | void mem_bswap_32(void *src, int byte_size); | ||
83 | void perf_event__attr_swap(struct perf_event_attr *attr); | 84 | void perf_event__attr_swap(struct perf_event_attr *attr); |
84 | 85 | ||
85 | int perf_session__create_kernel_maps(struct perf_session *self); | 86 | int perf_session__create_kernel_maps(struct perf_session *self); |
diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c index e2ba8858f3e1..3e2e5ea0f03f 100644 --- a/tools/perf/util/symbol.c +++ b/tools/perf/util/symbol.c | |||
@@ -323,6 +323,7 @@ struct dso *dso__new(const char *name) | |||
323 | dso->sorted_by_name = 0; | 323 | dso->sorted_by_name = 0; |
324 | dso->has_build_id = 0; | 324 | dso->has_build_id = 0; |
325 | dso->kernel = DSO_TYPE_USER; | 325 | dso->kernel = DSO_TYPE_USER; |
326 | dso->needs_swap = DSO_SWAP__UNSET; | ||
326 | INIT_LIST_HEAD(&dso->node); | 327 | INIT_LIST_HEAD(&dso->node); |
327 | } | 328 | } |
328 | 329 | ||
@@ -1156,6 +1157,33 @@ static size_t elf_addr_to_index(Elf *elf, GElf_Addr addr) | |||
1156 | return -1; | 1157 | return -1; |
1157 | } | 1158 | } |
1158 | 1159 | ||
1160 | static int dso__swap_init(struct dso *dso, unsigned char eidata) | ||
1161 | { | ||
1162 | static unsigned int const endian = 1; | ||
1163 | |||
1164 | dso->needs_swap = DSO_SWAP__NO; | ||
1165 | |||
1166 | switch (eidata) { | ||
1167 | case ELFDATA2LSB: | ||
1168 | /* We are big endian, DSO is little endian. */ | ||
1169 | if (*(unsigned char const *)&endian != 1) | ||
1170 | dso->needs_swap = DSO_SWAP__YES; | ||
1171 | break; | ||
1172 | |||
1173 | case ELFDATA2MSB: | ||
1174 | /* We are little endian, DSO is big endian. */ | ||
1175 | if (*(unsigned char const *)&endian != 0) | ||
1176 | dso->needs_swap = DSO_SWAP__YES; | ||
1177 | break; | ||
1178 | |||
1179 | default: | ||
1180 | pr_err("unrecognized DSO data encoding %d\n", eidata); | ||
1181 | return -EINVAL; | ||
1182 | } | ||
1183 | |||
1184 | return 0; | ||
1185 | } | ||
1186 | |||
1159 | static int dso__load_sym(struct dso *dso, struct map *map, const char *name, | 1187 | static int dso__load_sym(struct dso *dso, struct map *map, const char *name, |
1160 | int fd, symbol_filter_t filter, int kmodule, | 1188 | int fd, symbol_filter_t filter, int kmodule, |
1161 | int want_symtab) | 1189 | int want_symtab) |
@@ -1187,6 +1215,9 @@ static int dso__load_sym(struct dso *dso, struct map *map, const char *name, | |||
1187 | goto out_elf_end; | 1215 | goto out_elf_end; |
1188 | } | 1216 | } |
1189 | 1217 | ||
1218 | if (dso__swap_init(dso, ehdr.e_ident[EI_DATA])) | ||
1219 | goto out_elf_end; | ||
1220 | |||
1190 | /* Always reject images with a mismatched build-id: */ | 1221 | /* Always reject images with a mismatched build-id: */ |
1191 | if (dso->has_build_id) { | 1222 | if (dso->has_build_id) { |
1192 | u8 build_id[BUILD_ID_SIZE]; | 1223 | u8 build_id[BUILD_ID_SIZE]; |
@@ -1272,7 +1303,7 @@ static int dso__load_sym(struct dso *dso, struct map *map, const char *name, | |||
1272 | if (opdsec && sym.st_shndx == opdidx) { | 1303 | if (opdsec && sym.st_shndx == opdidx) { |
1273 | u32 offset = sym.st_value - opdshdr.sh_addr; | 1304 | u32 offset = sym.st_value - opdshdr.sh_addr; |
1274 | u64 *opd = opddata->d_buf + offset; | 1305 | u64 *opd = opddata->d_buf + offset; |
1275 | sym.st_value = *opd; | 1306 | sym.st_value = DSO__SWAP(dso, u64, *opd); |
1276 | sym.st_shndx = elf_addr_to_index(elf, sym.st_value); | 1307 | sym.st_shndx = elf_addr_to_index(elf, sym.st_value); |
1277 | } | 1308 | } |
1278 | 1309 | ||
@@ -2786,8 +2817,11 @@ int machine__load_vmlinux_path(struct machine *machine, enum map_type type, | |||
2786 | 2817 | ||
2787 | struct map *dso__new_map(const char *name) | 2818 | struct map *dso__new_map(const char *name) |
2788 | { | 2819 | { |
2820 | struct map *map = NULL; | ||
2789 | struct dso *dso = dso__new(name); | 2821 | struct dso *dso = dso__new(name); |
2790 | struct map *map = map__new2(0, dso, MAP__FUNCTION); | 2822 | |
2823 | if (dso) | ||
2824 | map = map__new2(0, dso, MAP__FUNCTION); | ||
2791 | 2825 | ||
2792 | return map; | 2826 | return map; |
2793 | } | 2827 | } |
diff --git a/tools/perf/util/symbol.h b/tools/perf/util/symbol.h index 5649d63798cb..af0752b1aca1 100644 --- a/tools/perf/util/symbol.h +++ b/tools/perf/util/symbol.h | |||
@@ -9,6 +9,7 @@ | |||
9 | #include <linux/list.h> | 9 | #include <linux/list.h> |
10 | #include <linux/rbtree.h> | 10 | #include <linux/rbtree.h> |
11 | #include <stdio.h> | 11 | #include <stdio.h> |
12 | #include <byteswap.h> | ||
12 | 13 | ||
13 | #ifdef HAVE_CPLUS_DEMANGLE | 14 | #ifdef HAVE_CPLUS_DEMANGLE |
14 | extern char *cplus_demangle(const char *, int); | 15 | extern char *cplus_demangle(const char *, int); |
@@ -160,11 +161,18 @@ enum dso_kernel_type { | |||
160 | DSO_TYPE_GUEST_KERNEL | 161 | DSO_TYPE_GUEST_KERNEL |
161 | }; | 162 | }; |
162 | 163 | ||
164 | enum dso_swap_type { | ||
165 | DSO_SWAP__UNSET, | ||
166 | DSO_SWAP__NO, | ||
167 | DSO_SWAP__YES, | ||
168 | }; | ||
169 | |||
163 | struct dso { | 170 | struct dso { |
164 | struct list_head node; | 171 | struct list_head node; |
165 | struct rb_root symbols[MAP__NR_TYPES]; | 172 | struct rb_root symbols[MAP__NR_TYPES]; |
166 | struct rb_root symbol_names[MAP__NR_TYPES]; | 173 | struct rb_root symbol_names[MAP__NR_TYPES]; |
167 | enum dso_kernel_type kernel; | 174 | enum dso_kernel_type kernel; |
175 | enum dso_swap_type needs_swap; | ||
168 | u8 adjust_symbols:1; | 176 | u8 adjust_symbols:1; |
169 | u8 has_build_id:1; | 177 | u8 has_build_id:1; |
170 | u8 hit:1; | 178 | u8 hit:1; |
@@ -182,6 +190,28 @@ struct dso { | |||
182 | char name[0]; | 190 | char name[0]; |
183 | }; | 191 | }; |
184 | 192 | ||
193 | #define DSO__SWAP(dso, type, val) \ | ||
194 | ({ \ | ||
195 | type ____r = val; \ | ||
196 | BUG_ON(dso->needs_swap == DSO_SWAP__UNSET); \ | ||
197 | if (dso->needs_swap == DSO_SWAP__YES) { \ | ||
198 | switch (sizeof(____r)) { \ | ||
199 | case 2: \ | ||
200 | ____r = bswap_16(val); \ | ||
201 | break; \ | ||
202 | case 4: \ | ||
203 | ____r = bswap_32(val); \ | ||
204 | break; \ | ||
205 | case 8: \ | ||
206 | ____r = bswap_64(val); \ | ||
207 | break; \ | ||
208 | default: \ | ||
209 | BUG_ON(1); \ | ||
210 | } \ | ||
211 | } \ | ||
212 | ____r; \ | ||
213 | }) | ||
214 | |||
185 | struct dso *dso__new(const char *name); | 215 | struct dso *dso__new(const char *name); |
186 | void dso__delete(struct dso *dso); | 216 | void dso__delete(struct dso *dso); |
187 | 217 | ||
diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c index ab2f682fd44c..16de7ad4850f 100644 --- a/tools/power/x86/turbostat/turbostat.c +++ b/tools/power/x86/turbostat/turbostat.c | |||
@@ -73,8 +73,8 @@ int backwards_count; | |||
73 | char *progname; | 73 | char *progname; |
74 | 74 | ||
75 | int num_cpus; | 75 | int num_cpus; |
76 | cpu_set_t *cpu_mask; | 76 | cpu_set_t *cpu_present_set, *cpu_mask; |
77 | size_t cpu_mask_size; | 77 | size_t cpu_present_setsize, cpu_mask_size; |
78 | 78 | ||
79 | struct counters { | 79 | struct counters { |
80 | unsigned long long tsc; /* per thread */ | 80 | unsigned long long tsc; /* per thread */ |
@@ -103,6 +103,12 @@ struct timeval tv_even; | |||
103 | struct timeval tv_odd; | 103 | struct timeval tv_odd; |
104 | struct timeval tv_delta; | 104 | struct timeval tv_delta; |
105 | 105 | ||
106 | int mark_cpu_present(int pkg, int core, int cpu) | ||
107 | { | ||
108 | CPU_SET_S(cpu, cpu_present_setsize, cpu_present_set); | ||
109 | return 0; | ||
110 | } | ||
111 | |||
106 | /* | 112 | /* |
107 | * cpu_mask_init(ncpus) | 113 | * cpu_mask_init(ncpus) |
108 | * | 114 | * |
@@ -118,6 +124,18 @@ void cpu_mask_init(int ncpus) | |||
118 | } | 124 | } |
119 | cpu_mask_size = CPU_ALLOC_SIZE(ncpus); | 125 | cpu_mask_size = CPU_ALLOC_SIZE(ncpus); |
120 | CPU_ZERO_S(cpu_mask_size, cpu_mask); | 126 | CPU_ZERO_S(cpu_mask_size, cpu_mask); |
127 | |||
128 | /* | ||
129 | * Allocate and initialize cpu_present_set | ||
130 | */ | ||
131 | cpu_present_set = CPU_ALLOC(ncpus); | ||
132 | if (cpu_present_set == NULL) { | ||
133 | perror("CPU_ALLOC"); | ||
134 | exit(3); | ||
135 | } | ||
136 | cpu_present_setsize = CPU_ALLOC_SIZE(ncpus); | ||
137 | CPU_ZERO_S(cpu_present_setsize, cpu_present_set); | ||
138 | for_all_cpus(mark_cpu_present); | ||
121 | } | 139 | } |
122 | 140 | ||
123 | void cpu_mask_uninit() | 141 | void cpu_mask_uninit() |
@@ -125,6 +143,9 @@ void cpu_mask_uninit() | |||
125 | CPU_FREE(cpu_mask); | 143 | CPU_FREE(cpu_mask); |
126 | cpu_mask = NULL; | 144 | cpu_mask = NULL; |
127 | cpu_mask_size = 0; | 145 | cpu_mask_size = 0; |
146 | CPU_FREE(cpu_present_set); | ||
147 | cpu_present_set = NULL; | ||
148 | cpu_present_setsize = 0; | ||
128 | } | 149 | } |
129 | 150 | ||
130 | int cpu_migrate(int cpu) | 151 | int cpu_migrate(int cpu) |
@@ -912,6 +933,8 @@ int is_snb(unsigned int family, unsigned int model) | |||
912 | switch (model) { | 933 | switch (model) { |
913 | case 0x2A: | 934 | case 0x2A: |
914 | case 0x2D: | 935 | case 0x2D: |
936 | case 0x3A: /* IVB */ | ||
937 | case 0x3D: /* IVB Xeon */ | ||
915 | return 1; | 938 | return 1; |
916 | } | 939 | } |
917 | return 0; | 940 | return 0; |
@@ -1047,6 +1070,9 @@ int fork_it(char **argv) | |||
1047 | int retval; | 1070 | int retval; |
1048 | pid_t child_pid; | 1071 | pid_t child_pid; |
1049 | get_counters(cnt_even); | 1072 | get_counters(cnt_even); |
1073 | |||
1074 | /* clear affinity side-effect of get_counters() */ | ||
1075 | sched_setaffinity(0, cpu_present_setsize, cpu_present_set); | ||
1050 | gettimeofday(&tv_even, (struct timezone *)NULL); | 1076 | gettimeofday(&tv_even, (struct timezone *)NULL); |
1051 | 1077 | ||
1052 | child_pid = fork(); | 1078 | child_pid = fork(); |
diff --git a/virt/kvm/assigned-dev.c b/virt/kvm/assigned-dev.c index 01f572c10c71..b1e091ae2f37 100644 --- a/virt/kvm/assigned-dev.c +++ b/virt/kvm/assigned-dev.c | |||
@@ -635,7 +635,6 @@ static int kvm_vm_ioctl_assign_device(struct kvm *kvm, | |||
635 | int r = 0, idx; | 635 | int r = 0, idx; |
636 | struct kvm_assigned_dev_kernel *match; | 636 | struct kvm_assigned_dev_kernel *match; |
637 | struct pci_dev *dev; | 637 | struct pci_dev *dev; |
638 | u8 header_type; | ||
639 | 638 | ||
640 | if (!(assigned_dev->flags & KVM_DEV_ASSIGN_ENABLE_IOMMU)) | 639 | if (!(assigned_dev->flags & KVM_DEV_ASSIGN_ENABLE_IOMMU)) |
641 | return -EINVAL; | 640 | return -EINVAL; |
@@ -668,8 +667,7 @@ static int kvm_vm_ioctl_assign_device(struct kvm *kvm, | |||
668 | } | 667 | } |
669 | 668 | ||
670 | /* Don't allow bridges to be assigned */ | 669 | /* Don't allow bridges to be assigned */ |
671 | pci_read_config_byte(dev, PCI_HEADER_TYPE, &header_type); | 670 | if (dev->hdr_type != PCI_HEADER_TYPE_NORMAL) { |
672 | if ((header_type & PCI_HEADER_TYPE) != PCI_HEADER_TYPE_NORMAL) { | ||
673 | r = -EPERM; | 671 | r = -EPERM; |
674 | goto out_put; | 672 | goto out_put; |
675 | } | 673 | } |
diff --git a/virt/kvm/irq_comm.c b/virt/kvm/irq_comm.c index a6a0365475ed..5afb43114020 100644 --- a/virt/kvm/irq_comm.c +++ b/virt/kvm/irq_comm.c | |||
@@ -332,6 +332,7 @@ static int setup_routing_entry(struct kvm_irq_routing_table *rt, | |||
332 | */ | 332 | */ |
333 | hlist_for_each_entry(ei, n, &rt->map[ue->gsi], link) | 333 | hlist_for_each_entry(ei, n, &rt->map[ue->gsi], link) |
334 | if (ei->type == KVM_IRQ_ROUTING_MSI || | 334 | if (ei->type == KVM_IRQ_ROUTING_MSI || |
335 | ue->type == KVM_IRQ_ROUTING_MSI || | ||
335 | ue->u.irqchip.irqchip == ei->irqchip.irqchip) | 336 | ue->u.irqchip.irqchip == ei->irqchip.irqchip) |
336 | return r; | 337 | return r; |
337 | 338 | ||