diff options
author | Greg Kroah-Hartman <gregkh@linuxfoundation.org> | 2012-06-20 19:24:02 -0400 |
---|---|---|
committer | Greg Kroah-Hartman <gregkh@linuxfoundation.org> | 2012-06-20 19:24:02 -0400 |
commit | ff446f2001cf9b5ed97c6256c4ee3549d3b7abed (patch) | |
tree | ec5fd60a8b9c21ae85eaa99003f26e6c342e2234 | |
parent | 78091dc2f6f04b03131218df590c877cadcd9379 (diff) | |
parent | 485802a6c524e62b5924849dd727ddbb1497cc71 (diff) |
Merge 3.5-rc3 into usb-next
This lets us catch the USB fixes that went into 3.5-rc3 into this branch,
as we want them here as well.
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
292 files changed, 4218 insertions, 1754 deletions
diff --git a/Documentation/networking/stmmac.txt b/Documentation/networking/stmmac.txt index ab1e8d7004c5..5cb9a1972460 100644 --- a/Documentation/networking/stmmac.txt +++ b/Documentation/networking/stmmac.txt | |||
@@ -10,8 +10,8 @@ Currently this network device driver is for all STM embedded MAC/GMAC | |||
10 | (i.e. 7xxx/5xxx SoCs), SPEAr (arm), Loongson1B (mips) and XLINX XC2V3000 | 10 | (i.e. 7xxx/5xxx SoCs), SPEAr (arm), Loongson1B (mips) and XLINX XC2V3000 |
11 | FF1152AMT0221 D1215994A VIRTEX FPGA board. | 11 | FF1152AMT0221 D1215994A VIRTEX FPGA board. |
12 | 12 | ||
13 | DWC Ether MAC 10/100/1000 Universal version 3.60a (and older) and DWC Ether MAC 10/100 | 13 | DWC Ether MAC 10/100/1000 Universal version 3.60a (and older) and DWC Ether |
14 | Universal version 4.0 have been used for developing this driver. | 14 | MAC 10/100 Universal version 4.0 have been used for developing this driver. |
15 | 15 | ||
16 | This driver supports both the platform bus and PCI. | 16 | This driver supports both the platform bus and PCI. |
17 | 17 | ||
@@ -54,27 +54,27 @@ net_device structure enabling the scatter/gather feature. | |||
54 | When one or more packets are received, an interrupt happens. The interrupts | 54 | When one or more packets are received, an interrupt happens. The interrupts |
55 | are not queued so the driver has to scan all the descriptors in the ring during | 55 | are not queued so the driver has to scan all the descriptors in the ring during |
56 | the receive process. | 56 | the receive process. |
57 | This is based on NAPI so the interrupt handler signals only if there is work to be | 57 | This is based on NAPI so the interrupt handler signals only if there is work |
58 | done, and it exits. | 58 | to be done, and it exits. |
59 | Then the poll method will be scheduled at some future point. | 59 | Then the poll method will be scheduled at some future point. |
60 | The incoming packets are stored, by the DMA, in a list of pre-allocated socket | 60 | The incoming packets are stored, by the DMA, in a list of pre-allocated socket |
61 | buffers in order to avoid the memcpy (Zero-copy). | 61 | buffers in order to avoid the memcpy (Zero-copy). |
62 | 62 | ||
63 | 4.3) Timer-Driver Interrupt | 63 | 4.3) Timer-Driver Interrupt |
64 | Instead of having the device that asynchronously notifies the frame receptions, the | 64 | Instead of having the device that asynchronously notifies the frame receptions, |
65 | driver configures a timer to generate an interrupt at regular intervals. | 65 | the driver configures a timer to generate an interrupt at regular intervals. |
66 | Based on the granularity of the timer, the frames that are received by the device | 66 | Based on the granularity of the timer, the frames that are received by the |
67 | will experience different levels of latency. Some NICs have dedicated timer | 67 | device will experience different levels of latency. Some NICs have dedicated |
68 | device to perform this task. STMMAC can use either the RTC device or the TMU | 68 | timer device to perform this task. STMMAC can use either the RTC device or the |
69 | channel 2 on STLinux platforms. | 69 | TMU channel 2 on STLinux platforms. |
70 | The timers frequency can be passed to the driver as parameter; when change it, | 70 | The timers frequency can be passed to the driver as parameter; when change it, |
71 | take care of both hardware capability and network stability/performance impact. | 71 | take care of both hardware capability and network stability/performance impact. |
72 | Several performance tests on STM platforms showed this optimisation allows to spare | 72 | Several performance tests on STM platforms showed this optimisation allows to |
73 | the CPU while having the maximum throughput. | 73 | spare the CPU while having the maximum throughput. |
74 | 74 | ||
75 | 4.4) WOL | 75 | 4.4) WOL |
76 | Wake up on Lan feature through Magic and Unicast frames are supported for the GMAC | 76 | Wake up on Lan feature through Magic and Unicast frames are supported for the |
77 | core. | 77 | GMAC core. |
78 | 78 | ||
79 | 4.5) DMA descriptors | 79 | 4.5) DMA descriptors |
80 | Driver handles both normal and enhanced descriptors. The latter has been only | 80 | Driver handles both normal and enhanced descriptors. The latter has been only |
@@ -106,7 +106,8 @@ Several driver's information can be passed through the platform | |||
106 | These are included in the include/linux/stmmac.h header file | 106 | These are included in the include/linux/stmmac.h header file |
107 | and detailed below as well: | 107 | and detailed below as well: |
108 | 108 | ||
109 | struct plat_stmmacenet_data { | 109 | struct plat_stmmacenet_data { |
110 | char *phy_bus_name; | ||
110 | int bus_id; | 111 | int bus_id; |
111 | int phy_addr; | 112 | int phy_addr; |
112 | int interface; | 113 | int interface; |
@@ -124,19 +125,24 @@ and detailed below as well: | |||
124 | void (*bus_setup)(void __iomem *ioaddr); | 125 | void (*bus_setup)(void __iomem *ioaddr); |
125 | int (*init)(struct platform_device *pdev); | 126 | int (*init)(struct platform_device *pdev); |
126 | void (*exit)(struct platform_device *pdev); | 127 | void (*exit)(struct platform_device *pdev); |
128 | void *custom_cfg; | ||
129 | void *custom_data; | ||
127 | void *bsp_priv; | 130 | void *bsp_priv; |
128 | }; | 131 | }; |
129 | 132 | ||
130 | Where: | 133 | Where: |
134 | o phy_bus_name: phy bus name to attach to the stmmac. | ||
131 | o bus_id: bus identifier. | 135 | o bus_id: bus identifier. |
132 | o phy_addr: the physical address can be passed from the platform. | 136 | o phy_addr: the physical address can be passed from the platform. |
133 | If it is set to -1 the driver will automatically | 137 | If it is set to -1 the driver will automatically |
134 | detect it at run-time by probing all the 32 addresses. | 138 | detect it at run-time by probing all the 32 addresses. |
135 | o interface: PHY device's interface. | 139 | o interface: PHY device's interface. |
136 | o mdio_bus_data: specific platform fields for the MDIO bus. | 140 | o mdio_bus_data: specific platform fields for the MDIO bus. |
137 | o pbl: the Programmable Burst Length is maximum number of beats to | 141 | o dma_cfg: internal DMA parameters |
142 | o pbl: the Programmable Burst Length is maximum number of beats to | ||
138 | be transferred in one DMA transaction. | 143 | be transferred in one DMA transaction. |
139 | GMAC also enables the 4xPBL by default. | 144 | GMAC also enables the 4xPBL by default. |
145 | o fixed_burst/mixed_burst/burst_len | ||
140 | o clk_csr: fixed CSR Clock range selection. | 146 | o clk_csr: fixed CSR Clock range selection. |
141 | o has_gmac: uses the GMAC core. | 147 | o has_gmac: uses the GMAC core. |
142 | o enh_desc: if sets the MAC will use the enhanced descriptor structure. | 148 | o enh_desc: if sets the MAC will use the enhanced descriptor structure. |
@@ -160,8 +166,9 @@ Where: | |||
160 | this is sometime necessary on some platforms (e.g. ST boxes) | 166 | this is sometime necessary on some platforms (e.g. ST boxes) |
161 | where the HW needs to have set some PIO lines or system cfg | 167 | where the HW needs to have set some PIO lines or system cfg |
162 | registers. | 168 | registers. |
163 | o custom_cfg: this is a custom configuration that can be passed while | 169 | o custom_cfg/custom_data: this is a custom configuration that can be passed |
164 | initialising the resources. | 170 | while initialising the resources. |
171 | o bsp_priv: another private poiter. | ||
165 | 172 | ||
166 | For MDIO bus The we have: | 173 | For MDIO bus The we have: |
167 | 174 | ||
@@ -180,7 +187,6 @@ Where: | |||
180 | o irqs: list of IRQs, one per PHY. | 187 | o irqs: list of IRQs, one per PHY. |
181 | o probed_phy_irq: if irqs is NULL, use this for probed PHY. | 188 | o probed_phy_irq: if irqs is NULL, use this for probed PHY. |
182 | 189 | ||
183 | |||
184 | For DMA engine we have the following internal fields that should be | 190 | For DMA engine we have the following internal fields that should be |
185 | tuned according to the HW capabilities. | 191 | tuned according to the HW capabilities. |
186 | 192 | ||
diff --git a/MAINTAINERS b/MAINTAINERS index 14bc7071f9df..3e30a3afe2a4 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
@@ -1646,11 +1646,11 @@ S: Maintained | |||
1646 | F: drivers/gpio/gpio-bt8xx.c | 1646 | F: drivers/gpio/gpio-bt8xx.c |
1647 | 1647 | ||
1648 | BTRFS FILE SYSTEM | 1648 | BTRFS FILE SYSTEM |
1649 | M: Chris Mason <chris.mason@oracle.com> | 1649 | M: Chris Mason <chris.mason@fusionio.com> |
1650 | L: linux-btrfs@vger.kernel.org | 1650 | L: linux-btrfs@vger.kernel.org |
1651 | W: http://btrfs.wiki.kernel.org/ | 1651 | W: http://btrfs.wiki.kernel.org/ |
1652 | Q: http://patchwork.kernel.org/project/linux-btrfs/list/ | 1652 | Q: http://patchwork.kernel.org/project/linux-btrfs/list/ |
1653 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/mason/btrfs-unstable.git | 1653 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/mason/linux-btrfs.git |
1654 | S: Maintained | 1654 | S: Maintained |
1655 | F: Documentation/filesystems/btrfs.txt | 1655 | F: Documentation/filesystems/btrfs.txt |
1656 | F: fs/btrfs/ | 1656 | F: fs/btrfs/ |
@@ -1800,6 +1800,9 @@ F: include/linux/cfag12864b.h | |||
1800 | CFG80211 and NL80211 | 1800 | CFG80211 and NL80211 |
1801 | M: Johannes Berg <johannes@sipsolutions.net> | 1801 | M: Johannes Berg <johannes@sipsolutions.net> |
1802 | L: linux-wireless@vger.kernel.org | 1802 | L: linux-wireless@vger.kernel.org |
1803 | W: http://wireless.kernel.org/ | ||
1804 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/jberg/mac80211.git | ||
1805 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/jberg/mac80211-next.git | ||
1803 | S: Maintained | 1806 | S: Maintained |
1804 | F: include/linux/nl80211.h | 1807 | F: include/linux/nl80211.h |
1805 | F: include/net/cfg80211.h | 1808 | F: include/net/cfg80211.h |
@@ -4349,7 +4352,8 @@ MAC80211 | |||
4349 | M: Johannes Berg <johannes@sipsolutions.net> | 4352 | M: Johannes Berg <johannes@sipsolutions.net> |
4350 | L: linux-wireless@vger.kernel.org | 4353 | L: linux-wireless@vger.kernel.org |
4351 | W: http://linuxwireless.org/ | 4354 | W: http://linuxwireless.org/ |
4352 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/linville/wireless.git | 4355 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/jberg/mac80211.git |
4356 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/jberg/mac80211-next.git | ||
4353 | S: Maintained | 4357 | S: Maintained |
4354 | F: Documentation/networking/mac80211-injection.txt | 4358 | F: Documentation/networking/mac80211-injection.txt |
4355 | F: include/net/mac80211.h | 4359 | F: include/net/mac80211.h |
@@ -4360,7 +4364,8 @@ M: Stefano Brivio <stefano.brivio@polimi.it> | |||
4360 | M: Mattias Nissler <mattias.nissler@gmx.de> | 4364 | M: Mattias Nissler <mattias.nissler@gmx.de> |
4361 | L: linux-wireless@vger.kernel.org | 4365 | L: linux-wireless@vger.kernel.org |
4362 | W: http://linuxwireless.org/en/developers/Documentation/mac80211/RateControl/PID | 4366 | W: http://linuxwireless.org/en/developers/Documentation/mac80211/RateControl/PID |
4363 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/linville/wireless.git | 4367 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/jberg/mac80211.git |
4368 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/jberg/mac80211-next.git | ||
4364 | S: Maintained | 4369 | S: Maintained |
4365 | F: net/mac80211/rc80211_pid* | 4370 | F: net/mac80211/rc80211_pid* |
4366 | 4371 | ||
@@ -5711,6 +5716,9 @@ F: include/linux/remoteproc.h | |||
5711 | RFKILL | 5716 | RFKILL |
5712 | M: Johannes Berg <johannes@sipsolutions.net> | 5717 | M: Johannes Berg <johannes@sipsolutions.net> |
5713 | L: linux-wireless@vger.kernel.org | 5718 | L: linux-wireless@vger.kernel.org |
5719 | W: http://wireless.kernel.org/ | ||
5720 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/jberg/mac80211.git | ||
5721 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/jberg/mac80211-next.git | ||
5714 | S: Maintained | 5722 | S: Maintained |
5715 | F: Documentation/rfkill.txt | 5723 | F: Documentation/rfkill.txt |
5716 | F: net/rfkill/ | 5724 | F: net/rfkill/ |
@@ -1,7 +1,7 @@ | |||
1 | VERSION = 3 | 1 | VERSION = 3 |
2 | PATCHLEVEL = 5 | 2 | PATCHLEVEL = 5 |
3 | SUBLEVEL = 0 | 3 | SUBLEVEL = 0 |
4 | EXTRAVERSION = -rc2 | 4 | EXTRAVERSION = -rc3 |
5 | NAME = Saber-toothed Squirrel | 5 | NAME = Saber-toothed Squirrel |
6 | 6 | ||
7 | # *DOCUMENTATION* | 7 | # *DOCUMENTATION* |
diff --git a/arch/arm/common/dmabounce.c b/arch/arm/common/dmabounce.c index 9d7eb530f95f..aa07f5938f05 100644 --- a/arch/arm/common/dmabounce.c +++ b/arch/arm/common/dmabounce.c | |||
@@ -366,8 +366,8 @@ static int __dmabounce_sync_for_cpu(struct device *dev, dma_addr_t addr, | |||
366 | struct safe_buffer *buf; | 366 | struct safe_buffer *buf; |
367 | unsigned long off; | 367 | unsigned long off; |
368 | 368 | ||
369 | dev_dbg(dev, "%s(dma=%#x,off=%#lx,sz=%zx,dir=%x)\n", | 369 | dev_dbg(dev, "%s(dma=%#x,sz=%zx,dir=%x)\n", |
370 | __func__, addr, off, sz, dir); | 370 | __func__, addr, sz, dir); |
371 | 371 | ||
372 | buf = find_safe_buffer_dev(dev, addr, __func__); | 372 | buf = find_safe_buffer_dev(dev, addr, __func__); |
373 | if (!buf) | 373 | if (!buf) |
@@ -377,8 +377,8 @@ static int __dmabounce_sync_for_cpu(struct device *dev, dma_addr_t addr, | |||
377 | 377 | ||
378 | BUG_ON(buf->direction != dir); | 378 | BUG_ON(buf->direction != dir); |
379 | 379 | ||
380 | dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n", | 380 | dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x off=%#lx) mapped to %p (dma=%#x)\n", |
381 | __func__, buf->ptr, virt_to_dma(dev, buf->ptr), | 381 | __func__, buf->ptr, virt_to_dma(dev, buf->ptr), off, |
382 | buf->safe, buf->safe_dma_addr); | 382 | buf->safe, buf->safe_dma_addr); |
383 | 383 | ||
384 | DO_STATS(dev->archdata.dmabounce->bounce_count++); | 384 | DO_STATS(dev->archdata.dmabounce->bounce_count++); |
@@ -406,8 +406,8 @@ static int __dmabounce_sync_for_device(struct device *dev, dma_addr_t addr, | |||
406 | struct safe_buffer *buf; | 406 | struct safe_buffer *buf; |
407 | unsigned long off; | 407 | unsigned long off; |
408 | 408 | ||
409 | dev_dbg(dev, "%s(dma=%#x,off=%#lx,sz=%zx,dir=%x)\n", | 409 | dev_dbg(dev, "%s(dma=%#x,sz=%zx,dir=%x)\n", |
410 | __func__, addr, off, sz, dir); | 410 | __func__, addr, sz, dir); |
411 | 411 | ||
412 | buf = find_safe_buffer_dev(dev, addr, __func__); | 412 | buf = find_safe_buffer_dev(dev, addr, __func__); |
413 | if (!buf) | 413 | if (!buf) |
@@ -417,8 +417,8 @@ static int __dmabounce_sync_for_device(struct device *dev, dma_addr_t addr, | |||
417 | 417 | ||
418 | BUG_ON(buf->direction != dir); | 418 | BUG_ON(buf->direction != dir); |
419 | 419 | ||
420 | dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n", | 420 | dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x off=%#lx) mapped to %p (dma=%#x)\n", |
421 | __func__, buf->ptr, virt_to_dma(dev, buf->ptr), | 421 | __func__, buf->ptr, virt_to_dma(dev, buf->ptr), off, |
422 | buf->safe, buf->safe_dma_addr); | 422 | buf->safe, buf->safe_dma_addr); |
423 | 423 | ||
424 | DO_STATS(dev->archdata.dmabounce->bounce_count++); | 424 | DO_STATS(dev->archdata.dmabounce->bounce_count++); |
diff --git a/arch/arm/mach-omap2/display.c b/arch/arm/mach-omap2/display.c index 54d49ddb9b81..5fb47a14f4ba 100644 --- a/arch/arm/mach-omap2/display.c +++ b/arch/arm/mach-omap2/display.c | |||
@@ -271,9 +271,9 @@ static struct platform_device *create_simple_dss_pdev(const char *pdev_name, | |||
271 | goto err; | 271 | goto err; |
272 | } | 272 | } |
273 | 273 | ||
274 | r = omap_device_register(pdev); | 274 | r = platform_device_add(pdev); |
275 | if (r) { | 275 | if (r) { |
276 | pr_err("Could not register omap_device for %s\n", pdev_name); | 276 | pr_err("Could not register platform_device for %s\n", pdev_name); |
277 | goto err; | 277 | goto err; |
278 | } | 278 | } |
279 | 279 | ||
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index 106c4c0ebccd..d766e4256b74 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c | |||
@@ -228,7 +228,7 @@ static pte_t **consistent_pte; | |||
228 | 228 | ||
229 | #define DEFAULT_CONSISTENT_DMA_SIZE SZ_2M | 229 | #define DEFAULT_CONSISTENT_DMA_SIZE SZ_2M |
230 | 230 | ||
231 | unsigned long consistent_base = CONSISTENT_END - DEFAULT_CONSISTENT_DMA_SIZE; | 231 | static unsigned long consistent_base = CONSISTENT_END - DEFAULT_CONSISTENT_DMA_SIZE; |
232 | 232 | ||
233 | void __init init_consistent_dma_size(unsigned long size) | 233 | void __init init_consistent_dma_size(unsigned long size) |
234 | { | 234 | { |
@@ -321,7 +321,7 @@ static struct arm_vmregion_head coherent_head = { | |||
321 | .vm_list = LIST_HEAD_INIT(coherent_head.vm_list), | 321 | .vm_list = LIST_HEAD_INIT(coherent_head.vm_list), |
322 | }; | 322 | }; |
323 | 323 | ||
324 | size_t coherent_pool_size = DEFAULT_CONSISTENT_DMA_SIZE / 8; | 324 | static size_t coherent_pool_size = DEFAULT_CONSISTENT_DMA_SIZE / 8; |
325 | 325 | ||
326 | static int __init early_coherent_pool(char *p) | 326 | static int __init early_coherent_pool(char *p) |
327 | { | 327 | { |
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c index c21d06c7dd7e..f54d59219764 100644 --- a/arch/arm/mm/init.c +++ b/arch/arm/mm/init.c | |||
@@ -212,7 +212,7 @@ EXPORT_SYMBOL(arm_dma_zone_size); | |||
212 | * allocations. This must be the smallest DMA mask in the system, | 212 | * allocations. This must be the smallest DMA mask in the system, |
213 | * so a successful GFP_DMA allocation will always satisfy this. | 213 | * so a successful GFP_DMA allocation will always satisfy this. |
214 | */ | 214 | */ |
215 | u32 arm_dma_limit; | 215 | phys_addr_t arm_dma_limit; |
216 | 216 | ||
217 | static void __init arm_adjust_dma_zone(unsigned long *size, unsigned long *hole, | 217 | static void __init arm_adjust_dma_zone(unsigned long *size, unsigned long *hole, |
218 | unsigned long dma_size) | 218 | unsigned long dma_size) |
diff --git a/arch/arm/mm/mm.h b/arch/arm/mm/mm.h index 93dc0c17cdcb..c471436c7952 100644 --- a/arch/arm/mm/mm.h +++ b/arch/arm/mm/mm.h | |||
@@ -62,7 +62,7 @@ extern void __flush_dcache_page(struct address_space *mapping, struct page *page | |||
62 | #endif | 62 | #endif |
63 | 63 | ||
64 | #ifdef CONFIG_ZONE_DMA | 64 | #ifdef CONFIG_ZONE_DMA |
65 | extern u32 arm_dma_limit; | 65 | extern phys_addr_t arm_dma_limit; |
66 | #else | 66 | #else |
67 | #define arm_dma_limit ((u32)~0) | 67 | #define arm_dma_limit ((u32)~0) |
68 | #endif | 68 | #endif |
diff --git a/arch/m68k/Kconfig b/arch/m68k/Kconfig index cac5b6be572a..147120128260 100644 --- a/arch/m68k/Kconfig +++ b/arch/m68k/Kconfig | |||
@@ -7,6 +7,8 @@ config M68K | |||
7 | select GENERIC_IRQ_SHOW | 7 | select GENERIC_IRQ_SHOW |
8 | select ARCH_HAVE_NMI_SAFE_CMPXCHG if RMW_INSNS | 8 | select ARCH_HAVE_NMI_SAFE_CMPXCHG if RMW_INSNS |
9 | select GENERIC_CPU_DEVICES | 9 | select GENERIC_CPU_DEVICES |
10 | select GENERIC_STRNCPY_FROM_USER if MMU | ||
11 | select GENERIC_STRNLEN_USER if MMU | ||
10 | select FPU if MMU | 12 | select FPU if MMU |
11 | select ARCH_USES_GETTIMEOFFSET if MMU && !COLDFIRE | 13 | select ARCH_USES_GETTIMEOFFSET if MMU && !COLDFIRE |
12 | 14 | ||
diff --git a/arch/m68k/include/asm/Kbuild b/arch/m68k/include/asm/Kbuild index 1a922fad76f7..eafa2539a8ee 100644 --- a/arch/m68k/include/asm/Kbuild +++ b/arch/m68k/include/asm/Kbuild | |||
@@ -1,2 +1,4 @@ | |||
1 | include include/asm-generic/Kbuild.asm | 1 | include include/asm-generic/Kbuild.asm |
2 | header-y += cachectl.h | 2 | header-y += cachectl.h |
3 | |||
4 | generic-y += word-at-a-time.h | ||
diff --git a/arch/m68k/include/asm/m528xsim.h b/arch/m68k/include/asm/m528xsim.h index d63b99ff7ff7..497c31c803ff 100644 --- a/arch/m68k/include/asm/m528xsim.h +++ b/arch/m68k/include/asm/m528xsim.h | |||
@@ -86,7 +86,7 @@ | |||
86 | /* | 86 | /* |
87 | * QSPI module. | 87 | * QSPI module. |
88 | */ | 88 | */ |
89 | #define MCFQSPI_IOBASE (MCF_IPSBAR + 0x340) | 89 | #define MCFQSPI_BASE (MCF_IPSBAR + 0x340) |
90 | #define MCFQSPI_SIZE 0x40 | 90 | #define MCFQSPI_SIZE 0x40 |
91 | 91 | ||
92 | #define MCFQSPI_CS0 147 | 92 | #define MCFQSPI_CS0 147 |
diff --git a/arch/m68k/include/asm/uaccess_mm.h b/arch/m68k/include/asm/uaccess_mm.h index 9c80cd515b20..472c891a4aee 100644 --- a/arch/m68k/include/asm/uaccess_mm.h +++ b/arch/m68k/include/asm/uaccess_mm.h | |||
@@ -379,12 +379,15 @@ __constant_copy_to_user(void __user *to, const void *from, unsigned long n) | |||
379 | #define copy_from_user(to, from, n) __copy_from_user(to, from, n) | 379 | #define copy_from_user(to, from, n) __copy_from_user(to, from, n) |
380 | #define copy_to_user(to, from, n) __copy_to_user(to, from, n) | 380 | #define copy_to_user(to, from, n) __copy_to_user(to, from, n) |
381 | 381 | ||
382 | long strncpy_from_user(char *dst, const char __user *src, long count); | 382 | #define user_addr_max() \ |
383 | long strnlen_user(const char __user *src, long n); | 383 | (segment_eq(get_fs(), USER_DS) ? TASK_SIZE : ~0UL) |
384 | |||
385 | extern long strncpy_from_user(char *dst, const char __user *src, long count); | ||
386 | extern __must_check long strlen_user(const char __user *str); | ||
387 | extern __must_check long strnlen_user(const char __user *str, long n); | ||
388 | |||
384 | unsigned long __clear_user(void __user *to, unsigned long n); | 389 | unsigned long __clear_user(void __user *to, unsigned long n); |
385 | 390 | ||
386 | #define clear_user __clear_user | 391 | #define clear_user __clear_user |
387 | 392 | ||
388 | #define strlen_user(str) strnlen_user(str, 32767) | ||
389 | |||
390 | #endif /* _M68K_UACCESS_H */ | 393 | #endif /* _M68K_UACCESS_H */ |
diff --git a/arch/m68k/kernel/ptrace.c b/arch/m68k/kernel/ptrace.c index 8b4a2222e658..1bc10e62b9af 100644 --- a/arch/m68k/kernel/ptrace.c +++ b/arch/m68k/kernel/ptrace.c | |||
@@ -286,7 +286,7 @@ asmlinkage void syscall_trace(void) | |||
286 | } | 286 | } |
287 | } | 287 | } |
288 | 288 | ||
289 | #ifdef CONFIG_COLDFIRE | 289 | #if defined(CONFIG_COLDFIRE) || !defined(CONFIG_MMU) |
290 | asmlinkage int syscall_trace_enter(void) | 290 | asmlinkage int syscall_trace_enter(void) |
291 | { | 291 | { |
292 | int ret = 0; | 292 | int ret = 0; |
diff --git a/arch/m68k/kernel/time.c b/arch/m68k/kernel/time.c index d7deb7fc7eb5..707f0573ec6b 100644 --- a/arch/m68k/kernel/time.c +++ b/arch/m68k/kernel/time.c | |||
@@ -85,7 +85,7 @@ void __init time_init(void) | |||
85 | mach_sched_init(timer_interrupt); | 85 | mach_sched_init(timer_interrupt); |
86 | } | 86 | } |
87 | 87 | ||
88 | #ifdef CONFIG_M68KCLASSIC | 88 | #ifdef CONFIG_ARCH_USES_GETTIMEOFFSET |
89 | 89 | ||
90 | u32 arch_gettimeoffset(void) | 90 | u32 arch_gettimeoffset(void) |
91 | { | 91 | { |
@@ -108,4 +108,4 @@ static int __init rtc_init(void) | |||
108 | 108 | ||
109 | module_init(rtc_init); | 109 | module_init(rtc_init); |
110 | 110 | ||
111 | #endif /* CONFIG_M68KCLASSIC */ | 111 | #endif /* CONFIG_ARCH_USES_GETTIMEOFFSET */ |
diff --git a/arch/m68k/lib/uaccess.c b/arch/m68k/lib/uaccess.c index 5664386338da..5e97f2ee7c11 100644 --- a/arch/m68k/lib/uaccess.c +++ b/arch/m68k/lib/uaccess.c | |||
@@ -104,80 +104,6 @@ unsigned long __generic_copy_to_user(void __user *to, const void *from, | |||
104 | EXPORT_SYMBOL(__generic_copy_to_user); | 104 | EXPORT_SYMBOL(__generic_copy_to_user); |
105 | 105 | ||
106 | /* | 106 | /* |
107 | * Copy a null terminated string from userspace. | ||
108 | */ | ||
109 | long strncpy_from_user(char *dst, const char __user *src, long count) | ||
110 | { | ||
111 | long res; | ||
112 | char c; | ||
113 | |||
114 | if (count <= 0) | ||
115 | return count; | ||
116 | |||
117 | asm volatile ("\n" | ||
118 | "1: "MOVES".b (%2)+,%4\n" | ||
119 | " move.b %4,(%1)+\n" | ||
120 | " jeq 2f\n" | ||
121 | " subq.l #1,%3\n" | ||
122 | " jne 1b\n" | ||
123 | "2: sub.l %3,%0\n" | ||
124 | "3:\n" | ||
125 | " .section .fixup,\"ax\"\n" | ||
126 | " .even\n" | ||
127 | "10: move.l %5,%0\n" | ||
128 | " jra 3b\n" | ||
129 | " .previous\n" | ||
130 | "\n" | ||
131 | " .section __ex_table,\"a\"\n" | ||
132 | " .align 4\n" | ||
133 | " .long 1b,10b\n" | ||
134 | " .previous" | ||
135 | : "=d" (res), "+a" (dst), "+a" (src), "+r" (count), "=&d" (c) | ||
136 | : "i" (-EFAULT), "0" (count)); | ||
137 | |||
138 | return res; | ||
139 | } | ||
140 | EXPORT_SYMBOL(strncpy_from_user); | ||
141 | |||
142 | /* | ||
143 | * Return the size of a string (including the ending 0) | ||
144 | * | ||
145 | * Return 0 on exception, a value greater than N if too long | ||
146 | */ | ||
147 | long strnlen_user(const char __user *src, long n) | ||
148 | { | ||
149 | char c; | ||
150 | long res; | ||
151 | |||
152 | asm volatile ("\n" | ||
153 | "1: subq.l #1,%1\n" | ||
154 | " jmi 3f\n" | ||
155 | "2: "MOVES".b (%0)+,%2\n" | ||
156 | " tst.b %2\n" | ||
157 | " jne 1b\n" | ||
158 | " jra 4f\n" | ||
159 | "\n" | ||
160 | "3: addq.l #1,%0\n" | ||
161 | "4: sub.l %4,%0\n" | ||
162 | "5:\n" | ||
163 | " .section .fixup,\"ax\"\n" | ||
164 | " .even\n" | ||
165 | "20: sub.l %0,%0\n" | ||
166 | " jra 5b\n" | ||
167 | " .previous\n" | ||
168 | "\n" | ||
169 | " .section __ex_table,\"a\"\n" | ||
170 | " .align 4\n" | ||
171 | " .long 2b,20b\n" | ||
172 | " .previous\n" | ||
173 | : "=&a" (res), "+d" (n), "=&d" (c) | ||
174 | : "0" (src), "r" (src)); | ||
175 | |||
176 | return res; | ||
177 | } | ||
178 | EXPORT_SYMBOL(strnlen_user); | ||
179 | |||
180 | /* | ||
181 | * Zero Userspace | 107 | * Zero Userspace |
182 | */ | 108 | */ |
183 | 109 | ||
diff --git a/arch/m68k/platform/68328/timers.c b/arch/m68k/platform/68328/timers.c index c801c172b822..f4dc9b295609 100644 --- a/arch/m68k/platform/68328/timers.c +++ b/arch/m68k/platform/68328/timers.c | |||
@@ -53,6 +53,7 @@ | |||
53 | #endif | 53 | #endif |
54 | 54 | ||
55 | static u32 m68328_tick_cnt; | 55 | static u32 m68328_tick_cnt; |
56 | static irq_handler_t timer_interrupt; | ||
56 | 57 | ||
57 | /***************************************************************************/ | 58 | /***************************************************************************/ |
58 | 59 | ||
@@ -62,7 +63,7 @@ static irqreturn_t hw_tick(int irq, void *dummy) | |||
62 | TSTAT &= 0; | 63 | TSTAT &= 0; |
63 | 64 | ||
64 | m68328_tick_cnt += TICKS_PER_JIFFY; | 65 | m68328_tick_cnt += TICKS_PER_JIFFY; |
65 | return arch_timer_interrupt(irq, dummy); | 66 | return timer_interrupt(irq, dummy); |
66 | } | 67 | } |
67 | 68 | ||
68 | /***************************************************************************/ | 69 | /***************************************************************************/ |
@@ -99,7 +100,7 @@ static struct clocksource m68328_clk = { | |||
99 | 100 | ||
100 | /***************************************************************************/ | 101 | /***************************************************************************/ |
101 | 102 | ||
102 | void hw_timer_init(void) | 103 | void hw_timer_init(irq_handler_t handler) |
103 | { | 104 | { |
104 | /* disable timer 1 */ | 105 | /* disable timer 1 */ |
105 | TCTL = 0; | 106 | TCTL = 0; |
@@ -115,6 +116,7 @@ void hw_timer_init(void) | |||
115 | /* Enable timer 1 */ | 116 | /* Enable timer 1 */ |
116 | TCTL |= TCTL_TEN; | 117 | TCTL |= TCTL_TEN; |
117 | clocksource_register_hz(&m68328_clk, TICKS_PER_JIFFY*HZ); | 118 | clocksource_register_hz(&m68328_clk, TICKS_PER_JIFFY*HZ); |
119 | timer_interrupt = handler; | ||
118 | } | 120 | } |
119 | 121 | ||
120 | /***************************************************************************/ | 122 | /***************************************************************************/ |
diff --git a/arch/m68k/platform/68360/config.c b/arch/m68k/platform/68360/config.c index 255fc03913e9..9877cefad1e7 100644 --- a/arch/m68k/platform/68360/config.c +++ b/arch/m68k/platform/68360/config.c | |||
@@ -35,6 +35,7 @@ extern void m360_cpm_reset(void); | |||
35 | #define OSCILLATOR (unsigned long int)33000000 | 35 | #define OSCILLATOR (unsigned long int)33000000 |
36 | #endif | 36 | #endif |
37 | 37 | ||
38 | static irq_handler_t timer_interrupt; | ||
38 | unsigned long int system_clock; | 39 | unsigned long int system_clock; |
39 | 40 | ||
40 | extern QUICC *pquicc; | 41 | extern QUICC *pquicc; |
@@ -52,7 +53,7 @@ static irqreturn_t hw_tick(int irq, void *dummy) | |||
52 | 53 | ||
53 | pquicc->timer_ter1 = 0x0002; /* clear timer event */ | 54 | pquicc->timer_ter1 = 0x0002; /* clear timer event */ |
54 | 55 | ||
55 | return arch_timer_interrupt(irq, dummy); | 56 | return timer_interrupt(irq, dummy); |
56 | } | 57 | } |
57 | 58 | ||
58 | static struct irqaction m68360_timer_irq = { | 59 | static struct irqaction m68360_timer_irq = { |
@@ -61,7 +62,7 @@ static struct irqaction m68360_timer_irq = { | |||
61 | .handler = hw_tick, | 62 | .handler = hw_tick, |
62 | }; | 63 | }; |
63 | 64 | ||
64 | void hw_timer_init(void) | 65 | void hw_timer_init(irq_handler_t handler) |
65 | { | 66 | { |
66 | unsigned char prescaler; | 67 | unsigned char prescaler; |
67 | unsigned short tgcr_save; | 68 | unsigned short tgcr_save; |
@@ -94,6 +95,8 @@ void hw_timer_init(void) | |||
94 | 95 | ||
95 | pquicc->timer_ter1 = 0x0003; /* clear timer events */ | 96 | pquicc->timer_ter1 = 0x0003; /* clear timer events */ |
96 | 97 | ||
98 | timer_interrupt = handler; | ||
99 | |||
97 | /* enable timer 1 interrupt in CIMR */ | 100 | /* enable timer 1 interrupt in CIMR */ |
98 | setup_irq(CPMVEC_TIMER1, &m68360_timer_irq); | 101 | setup_irq(CPMVEC_TIMER1, &m68360_timer_irq); |
99 | 102 | ||
diff --git a/arch/powerpc/include/asm/hw_irq.h b/arch/powerpc/include/asm/hw_irq.h index c9aac24b02e2..32b394f3b854 100644 --- a/arch/powerpc/include/asm/hw_irq.h +++ b/arch/powerpc/include/asm/hw_irq.h | |||
@@ -100,6 +100,9 @@ static inline void hard_irq_disable(void) | |||
100 | get_paca()->irq_happened |= PACA_IRQ_HARD_DIS; | 100 | get_paca()->irq_happened |= PACA_IRQ_HARD_DIS; |
101 | } | 101 | } |
102 | 102 | ||
103 | /* include/linux/interrupt.h needs hard_irq_disable to be a macro */ | ||
104 | #define hard_irq_disable hard_irq_disable | ||
105 | |||
103 | /* | 106 | /* |
104 | * This is called by asynchronous interrupts to conditionally | 107 | * This is called by asynchronous interrupts to conditionally |
105 | * re-enable hard interrupts when soft-disabled after having | 108 | * re-enable hard interrupts when soft-disabled after having |
diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig index 99bcd0ee838d..31d9db7913e4 100644 --- a/arch/sh/Kconfig +++ b/arch/sh/Kconfig | |||
@@ -32,6 +32,8 @@ config SUPERH | |||
32 | select GENERIC_SMP_IDLE_THREAD | 32 | select GENERIC_SMP_IDLE_THREAD |
33 | select GENERIC_CLOCKEVENTS | 33 | select GENERIC_CLOCKEVENTS |
34 | select GENERIC_CMOS_UPDATE if SH_SH03 || SH_DREAMCAST | 34 | select GENERIC_CMOS_UPDATE if SH_SH03 || SH_DREAMCAST |
35 | select GENERIC_STRNCPY_FROM_USER | ||
36 | select GENERIC_STRNLEN_USER | ||
35 | help | 37 | help |
36 | The SuperH is a RISC processor targeted for use in embedded systems | 38 | The SuperH is a RISC processor targeted for use in embedded systems |
37 | and consumer electronics; it was also used in the Sega Dreamcast | 39 | and consumer electronics; it was also used in the Sega Dreamcast |
diff --git a/arch/sh/Makefile b/arch/sh/Makefile index 46edf070da1c..aed701c7b11b 100644 --- a/arch/sh/Makefile +++ b/arch/sh/Makefile | |||
@@ -9,6 +9,12 @@ | |||
9 | # License. See the file "COPYING" in the main directory of this archive | 9 | # License. See the file "COPYING" in the main directory of this archive |
10 | # for more details. | 10 | # for more details. |
11 | # | 11 | # |
12 | ifneq ($(SUBARCH),$(ARCH)) | ||
13 | ifeq ($(CROSS_COMPILE),) | ||
14 | CROSS_COMPILE := $(call cc-cross-prefix, $(UTS_MACHINE)-linux- $(UTS_MACHINE)-linux-gnu- $(UTS_MACHINE)-unknown-linux-gnu-) | ||
15 | endif | ||
16 | endif | ||
17 | |||
12 | isa-y := any | 18 | isa-y := any |
13 | isa-$(CONFIG_SH_DSP) := sh | 19 | isa-$(CONFIG_SH_DSP) := sh |
14 | isa-$(CONFIG_CPU_SH2) := sh2 | 20 | isa-$(CONFIG_CPU_SH2) := sh2 |
@@ -106,19 +112,13 @@ LDFLAGS_vmlinux += --defsym phys_stext=_stext-$(CONFIG_PAGE_OFFSET) \ | |||
106 | KBUILD_DEFCONFIG := cayman_defconfig | 112 | KBUILD_DEFCONFIG := cayman_defconfig |
107 | endif | 113 | endif |
108 | 114 | ||
109 | ifneq ($(SUBARCH),$(ARCH)) | ||
110 | ifeq ($(CROSS_COMPILE),) | ||
111 | CROSS_COMPILE := $(call cc-cross-prefix, $(UTS_MACHINE)-linux- $(UTS_MACHINE)-linux-gnu- $(UTS_MACHINE)-unknown-linux-gnu-) | ||
112 | endif | ||
113 | endif | ||
114 | |||
115 | ifdef CONFIG_CPU_LITTLE_ENDIAN | 115 | ifdef CONFIG_CPU_LITTLE_ENDIAN |
116 | ld-bfd := elf32-$(UTS_MACHINE)-linux | 116 | ld-bfd := elf32-$(UTS_MACHINE)-linux |
117 | LDFLAGS_vmlinux += --defsym 'jiffies=jiffies_64' --oformat $(ld-bfd) | 117 | LDFLAGS_vmlinux += --defsym jiffies=jiffies_64 --oformat $(ld-bfd) |
118 | LDFLAGS += -EL | 118 | LDFLAGS += -EL |
119 | else | 119 | else |
120 | ld-bfd := elf32-$(UTS_MACHINE)big-linux | 120 | ld-bfd := elf32-$(UTS_MACHINE)big-linux |
121 | LDFLAGS_vmlinux += --defsym 'jiffies=jiffies_64+4' --oformat $(ld-bfd) | 121 | LDFLAGS_vmlinux += --defsym jiffies=jiffies_64+4 --oformat $(ld-bfd) |
122 | LDFLAGS += -EB | 122 | LDFLAGS += -EB |
123 | endif | 123 | endif |
124 | 124 | ||
diff --git a/arch/sh/include/asm/Kbuild b/arch/sh/include/asm/Kbuild index 7beb42322f60..7b673ddcd555 100644 --- a/arch/sh/include/asm/Kbuild +++ b/arch/sh/include/asm/Kbuild | |||
@@ -1,5 +1,39 @@ | |||
1 | include include/asm-generic/Kbuild.asm | 1 | include include/asm-generic/Kbuild.asm |
2 | 2 | ||
3 | generic-y += bitsperlong.h | ||
4 | generic-y += cputime.h | ||
5 | generic-y += current.h | ||
6 | generic-y += delay.h | ||
7 | generic-y += div64.h | ||
8 | generic-y += emergency-restart.h | ||
9 | generic-y += errno.h | ||
10 | generic-y += fcntl.h | ||
11 | generic-y += ioctl.h | ||
12 | generic-y += ipcbuf.h | ||
13 | generic-y += irq_regs.h | ||
14 | generic-y += kvm_para.h | ||
15 | generic-y += local.h | ||
16 | generic-y += local64.h | ||
17 | generic-y += param.h | ||
18 | generic-y += parport.h | ||
19 | generic-y += percpu.h | ||
20 | generic-y += poll.h | ||
21 | generic-y += mman.h | ||
22 | generic-y += msgbuf.h | ||
23 | generic-y += resource.h | ||
24 | generic-y += scatterlist.h | ||
25 | generic-y += sembuf.h | ||
26 | generic-y += serial.h | ||
27 | generic-y += shmbuf.h | ||
28 | generic-y += siginfo.h | ||
29 | generic-y += sizes.h | ||
30 | generic-y += socket.h | ||
31 | generic-y += statfs.h | ||
32 | generic-y += termbits.h | ||
33 | generic-y += termios.h | ||
34 | generic-y += ucontext.h | ||
35 | generic-y += xor.h | ||
36 | |||
3 | header-y += cachectl.h | 37 | header-y += cachectl.h |
4 | header-y += cpu-features.h | 38 | header-y += cpu-features.h |
5 | header-y += hw_breakpoint.h | 39 | header-y += hw_breakpoint.h |
diff --git a/arch/sh/include/asm/bitsperlong.h b/arch/sh/include/asm/bitsperlong.h deleted file mode 100644 index 6dc0bb0c13b2..000000000000 --- a/arch/sh/include/asm/bitsperlong.h +++ /dev/null | |||
@@ -1 +0,0 @@ | |||
1 | #include <asm-generic/bitsperlong.h> | ||
diff --git a/arch/sh/include/asm/cputime.h b/arch/sh/include/asm/cputime.h deleted file mode 100644 index 6ca395d1393e..000000000000 --- a/arch/sh/include/asm/cputime.h +++ /dev/null | |||
@@ -1,6 +0,0 @@ | |||
1 | #ifndef __SH_CPUTIME_H | ||
2 | #define __SH_CPUTIME_H | ||
3 | |||
4 | #include <asm-generic/cputime.h> | ||
5 | |||
6 | #endif /* __SH_CPUTIME_H */ | ||
diff --git a/arch/sh/include/asm/current.h b/arch/sh/include/asm/current.h deleted file mode 100644 index 4c51401b5537..000000000000 --- a/arch/sh/include/asm/current.h +++ /dev/null | |||
@@ -1 +0,0 @@ | |||
1 | #include <asm-generic/current.h> | ||
diff --git a/arch/sh/include/asm/delay.h b/arch/sh/include/asm/delay.h deleted file mode 100644 index 9670e127b7b2..000000000000 --- a/arch/sh/include/asm/delay.h +++ /dev/null | |||
@@ -1 +0,0 @@ | |||
1 | #include <asm-generic/delay.h> | ||
diff --git a/arch/sh/include/asm/div64.h b/arch/sh/include/asm/div64.h deleted file mode 100644 index 6cd978cefb28..000000000000 --- a/arch/sh/include/asm/div64.h +++ /dev/null | |||
@@ -1 +0,0 @@ | |||
1 | #include <asm-generic/div64.h> | ||
diff --git a/arch/sh/include/asm/emergency-restart.h b/arch/sh/include/asm/emergency-restart.h deleted file mode 100644 index 108d8c48e42e..000000000000 --- a/arch/sh/include/asm/emergency-restart.h +++ /dev/null | |||
@@ -1,6 +0,0 @@ | |||
1 | #ifndef _ASM_EMERGENCY_RESTART_H | ||
2 | #define _ASM_EMERGENCY_RESTART_H | ||
3 | |||
4 | #include <asm-generic/emergency-restart.h> | ||
5 | |||
6 | #endif /* _ASM_EMERGENCY_RESTART_H */ | ||
diff --git a/arch/sh/include/asm/errno.h b/arch/sh/include/asm/errno.h deleted file mode 100644 index 51cf6f9cebb8..000000000000 --- a/arch/sh/include/asm/errno.h +++ /dev/null | |||
@@ -1,6 +0,0 @@ | |||
1 | #ifndef __ASM_SH_ERRNO_H | ||
2 | #define __ASM_SH_ERRNO_H | ||
3 | |||
4 | #include <asm-generic/errno.h> | ||
5 | |||
6 | #endif /* __ASM_SH_ERRNO_H */ | ||
diff --git a/arch/sh/include/asm/fcntl.h b/arch/sh/include/asm/fcntl.h deleted file mode 100644 index 46ab12db5739..000000000000 --- a/arch/sh/include/asm/fcntl.h +++ /dev/null | |||
@@ -1 +0,0 @@ | |||
1 | #include <asm-generic/fcntl.h> | ||
diff --git a/arch/sh/include/asm/ioctl.h b/arch/sh/include/asm/ioctl.h deleted file mode 100644 index b279fe06dfe5..000000000000 --- a/arch/sh/include/asm/ioctl.h +++ /dev/null | |||
@@ -1 +0,0 @@ | |||
1 | #include <asm-generic/ioctl.h> | ||
diff --git a/arch/sh/include/asm/ipcbuf.h b/arch/sh/include/asm/ipcbuf.h deleted file mode 100644 index 84c7e51cb6d0..000000000000 --- a/arch/sh/include/asm/ipcbuf.h +++ /dev/null | |||
@@ -1 +0,0 @@ | |||
1 | #include <asm-generic/ipcbuf.h> | ||
diff --git a/arch/sh/include/asm/irq_regs.h b/arch/sh/include/asm/irq_regs.h deleted file mode 100644 index 3dd9c0b70270..000000000000 --- a/arch/sh/include/asm/irq_regs.h +++ /dev/null | |||
@@ -1 +0,0 @@ | |||
1 | #include <asm-generic/irq_regs.h> | ||
diff --git a/arch/sh/include/asm/kvm_para.h b/arch/sh/include/asm/kvm_para.h deleted file mode 100644 index 14fab8f0b957..000000000000 --- a/arch/sh/include/asm/kvm_para.h +++ /dev/null | |||
@@ -1 +0,0 @@ | |||
1 | #include <asm-generic/kvm_para.h> | ||
diff --git a/arch/sh/include/asm/local.h b/arch/sh/include/asm/local.h deleted file mode 100644 index 9ed9b9cb459a..000000000000 --- a/arch/sh/include/asm/local.h +++ /dev/null | |||
@@ -1,7 +0,0 @@ | |||
1 | #ifndef __ASM_SH_LOCAL_H | ||
2 | #define __ASM_SH_LOCAL_H | ||
3 | |||
4 | #include <asm-generic/local.h> | ||
5 | |||
6 | #endif /* __ASM_SH_LOCAL_H */ | ||
7 | |||
diff --git a/arch/sh/include/asm/local64.h b/arch/sh/include/asm/local64.h deleted file mode 100644 index 36c93b5cc239..000000000000 --- a/arch/sh/include/asm/local64.h +++ /dev/null | |||
@@ -1 +0,0 @@ | |||
1 | #include <asm-generic/local64.h> | ||
diff --git a/arch/sh/include/asm/mman.h b/arch/sh/include/asm/mman.h deleted file mode 100644 index 8eebf89f5ab1..000000000000 --- a/arch/sh/include/asm/mman.h +++ /dev/null | |||
@@ -1 +0,0 @@ | |||
1 | #include <asm-generic/mman.h> | ||
diff --git a/arch/sh/include/asm/msgbuf.h b/arch/sh/include/asm/msgbuf.h deleted file mode 100644 index 809134c644a6..000000000000 --- a/arch/sh/include/asm/msgbuf.h +++ /dev/null | |||
@@ -1 +0,0 @@ | |||
1 | #include <asm-generic/msgbuf.h> | ||
diff --git a/arch/sh/include/asm/param.h b/arch/sh/include/asm/param.h deleted file mode 100644 index 965d45427975..000000000000 --- a/arch/sh/include/asm/param.h +++ /dev/null | |||
@@ -1 +0,0 @@ | |||
1 | #include <asm-generic/param.h> | ||
diff --git a/arch/sh/include/asm/parport.h b/arch/sh/include/asm/parport.h deleted file mode 100644 index cf252af64590..000000000000 --- a/arch/sh/include/asm/parport.h +++ /dev/null | |||
@@ -1 +0,0 @@ | |||
1 | #include <asm-generic/parport.h> | ||
diff --git a/arch/sh/include/asm/percpu.h b/arch/sh/include/asm/percpu.h deleted file mode 100644 index 4db4b39a4399..000000000000 --- a/arch/sh/include/asm/percpu.h +++ /dev/null | |||
@@ -1,6 +0,0 @@ | |||
1 | #ifndef __ARCH_SH_PERCPU | ||
2 | #define __ARCH_SH_PERCPU | ||
3 | |||
4 | #include <asm-generic/percpu.h> | ||
5 | |||
6 | #endif /* __ARCH_SH_PERCPU */ | ||
diff --git a/arch/sh/include/asm/poll.h b/arch/sh/include/asm/poll.h deleted file mode 100644 index c98509d3149e..000000000000 --- a/arch/sh/include/asm/poll.h +++ /dev/null | |||
@@ -1 +0,0 @@ | |||
1 | #include <asm-generic/poll.h> | ||
diff --git a/arch/sh/include/asm/resource.h b/arch/sh/include/asm/resource.h deleted file mode 100644 index 9c2499a86ec0..000000000000 --- a/arch/sh/include/asm/resource.h +++ /dev/null | |||
@@ -1,6 +0,0 @@ | |||
1 | #ifndef __ASM_SH_RESOURCE_H | ||
2 | #define __ASM_SH_RESOURCE_H | ||
3 | |||
4 | #include <asm-generic/resource.h> | ||
5 | |||
6 | #endif /* __ASM_SH_RESOURCE_H */ | ||
diff --git a/arch/sh/include/asm/scatterlist.h b/arch/sh/include/asm/scatterlist.h deleted file mode 100644 index 98dfc3510f10..000000000000 --- a/arch/sh/include/asm/scatterlist.h +++ /dev/null | |||
@@ -1,6 +0,0 @@ | |||
1 | #ifndef __ASM_SH_SCATTERLIST_H | ||
2 | #define __ASM_SH_SCATTERLIST_H | ||
3 | |||
4 | #include <asm-generic/scatterlist.h> | ||
5 | |||
6 | #endif /* __ASM_SH_SCATTERLIST_H */ | ||
diff --git a/arch/sh/include/asm/sembuf.h b/arch/sh/include/asm/sembuf.h deleted file mode 100644 index 7673b83cfef7..000000000000 --- a/arch/sh/include/asm/sembuf.h +++ /dev/null | |||
@@ -1 +0,0 @@ | |||
1 | #include <asm-generic/sembuf.h> | ||
diff --git a/arch/sh/include/asm/serial.h b/arch/sh/include/asm/serial.h deleted file mode 100644 index a0cb0caff152..000000000000 --- a/arch/sh/include/asm/serial.h +++ /dev/null | |||
@@ -1 +0,0 @@ | |||
1 | #include <asm-generic/serial.h> | ||
diff --git a/arch/sh/include/asm/shmbuf.h b/arch/sh/include/asm/shmbuf.h deleted file mode 100644 index 83c05fc2de38..000000000000 --- a/arch/sh/include/asm/shmbuf.h +++ /dev/null | |||
@@ -1 +0,0 @@ | |||
1 | #include <asm-generic/shmbuf.h> | ||
diff --git a/arch/sh/include/asm/siginfo.h b/arch/sh/include/asm/siginfo.h deleted file mode 100644 index 813040ed68a9..000000000000 --- a/arch/sh/include/asm/siginfo.h +++ /dev/null | |||
@@ -1,6 +0,0 @@ | |||
1 | #ifndef __ASM_SH_SIGINFO_H | ||
2 | #define __ASM_SH_SIGINFO_H | ||
3 | |||
4 | #include <asm-generic/siginfo.h> | ||
5 | |||
6 | #endif /* __ASM_SH_SIGINFO_H */ | ||
diff --git a/arch/sh/include/asm/sizes.h b/arch/sh/include/asm/sizes.h deleted file mode 100644 index dd248c2e1085..000000000000 --- a/arch/sh/include/asm/sizes.h +++ /dev/null | |||
@@ -1 +0,0 @@ | |||
1 | #include <asm-generic/sizes.h> | ||
diff --git a/arch/sh/include/asm/socket.h b/arch/sh/include/asm/socket.h deleted file mode 100644 index 6b71384b9d8b..000000000000 --- a/arch/sh/include/asm/socket.h +++ /dev/null | |||
@@ -1 +0,0 @@ | |||
1 | #include <asm-generic/socket.h> | ||
diff --git a/arch/sh/include/asm/statfs.h b/arch/sh/include/asm/statfs.h deleted file mode 100644 index 9202a023328f..000000000000 --- a/arch/sh/include/asm/statfs.h +++ /dev/null | |||
@@ -1,6 +0,0 @@ | |||
1 | #ifndef __ASM_SH_STATFS_H | ||
2 | #define __ASM_SH_STATFS_H | ||
3 | |||
4 | #include <asm-generic/statfs.h> | ||
5 | |||
6 | #endif /* __ASM_SH_STATFS_H */ | ||
diff --git a/arch/sh/include/asm/termbits.h b/arch/sh/include/asm/termbits.h deleted file mode 100644 index 3935b106de79..000000000000 --- a/arch/sh/include/asm/termbits.h +++ /dev/null | |||
@@ -1 +0,0 @@ | |||
1 | #include <asm-generic/termbits.h> | ||
diff --git a/arch/sh/include/asm/termios.h b/arch/sh/include/asm/termios.h deleted file mode 100644 index 280d78a9d966..000000000000 --- a/arch/sh/include/asm/termios.h +++ /dev/null | |||
@@ -1 +0,0 @@ | |||
1 | #include <asm-generic/termios.h> | ||
diff --git a/arch/sh/include/asm/uaccess.h b/arch/sh/include/asm/uaccess.h index 050f221fa898..8698a80ed00c 100644 --- a/arch/sh/include/asm/uaccess.h +++ b/arch/sh/include/asm/uaccess.h | |||
@@ -25,6 +25,8 @@ | |||
25 | (__chk_user_ptr(addr), \ | 25 | (__chk_user_ptr(addr), \ |
26 | __access_ok((unsigned long __force)(addr), (size))) | 26 | __access_ok((unsigned long __force)(addr), (size))) |
27 | 27 | ||
28 | #define user_addr_max() (current_thread_info()->addr_limit.seg) | ||
29 | |||
28 | /* | 30 | /* |
29 | * Uh, these should become the main single-value transfer routines ... | 31 | * Uh, these should become the main single-value transfer routines ... |
30 | * They automatically use the right size if we just have the right | 32 | * They automatically use the right size if we just have the right |
@@ -100,6 +102,11 @@ struct __large_struct { unsigned long buf[100]; }; | |||
100 | # include "uaccess_64.h" | 102 | # include "uaccess_64.h" |
101 | #endif | 103 | #endif |
102 | 104 | ||
105 | extern long strncpy_from_user(char *dest, const char __user *src, long count); | ||
106 | |||
107 | extern __must_check long strlen_user(const char __user *str); | ||
108 | extern __must_check long strnlen_user(const char __user *str, long n); | ||
109 | |||
103 | /* Generic arbitrary sized copy. */ | 110 | /* Generic arbitrary sized copy. */ |
104 | /* Return the number of bytes NOT copied */ | 111 | /* Return the number of bytes NOT copied */ |
105 | __kernel_size_t __copy_user(void *to, const void *from, __kernel_size_t n); | 112 | __kernel_size_t __copy_user(void *to, const void *from, __kernel_size_t n); |
@@ -137,37 +144,6 @@ __kernel_size_t __clear_user(void *addr, __kernel_size_t size); | |||
137 | __cl_size; \ | 144 | __cl_size; \ |
138 | }) | 145 | }) |
139 | 146 | ||
140 | /** | ||
141 | * strncpy_from_user: - Copy a NUL terminated string from userspace. | ||
142 | * @dst: Destination address, in kernel space. This buffer must be at | ||
143 | * least @count bytes long. | ||
144 | * @src: Source address, in user space. | ||
145 | * @count: Maximum number of bytes to copy, including the trailing NUL. | ||
146 | * | ||
147 | * Copies a NUL-terminated string from userspace to kernel space. | ||
148 | * | ||
149 | * On success, returns the length of the string (not including the trailing | ||
150 | * NUL). | ||
151 | * | ||
152 | * If access to userspace fails, returns -EFAULT (some data may have been | ||
153 | * copied). | ||
154 | * | ||
155 | * If @count is smaller than the length of the string, copies @count bytes | ||
156 | * and returns @count. | ||
157 | */ | ||
158 | #define strncpy_from_user(dest,src,count) \ | ||
159 | ({ \ | ||
160 | unsigned long __sfu_src = (unsigned long)(src); \ | ||
161 | int __sfu_count = (int)(count); \ | ||
162 | long __sfu_res = -EFAULT; \ | ||
163 | \ | ||
164 | if (__access_ok(__sfu_src, __sfu_count)) \ | ||
165 | __sfu_res = __strncpy_from_user((unsigned long)(dest), \ | ||
166 | __sfu_src, __sfu_count); \ | ||
167 | \ | ||
168 | __sfu_res; \ | ||
169 | }) | ||
170 | |||
171 | static inline unsigned long | 147 | static inline unsigned long |
172 | copy_from_user(void *to, const void __user *from, unsigned long n) | 148 | copy_from_user(void *to, const void __user *from, unsigned long n) |
173 | { | 149 | { |
@@ -192,43 +168,6 @@ copy_to_user(void __user *to, const void *from, unsigned long n) | |||
192 | return __copy_size; | 168 | return __copy_size; |
193 | } | 169 | } |
194 | 170 | ||
195 | /** | ||
196 | * strnlen_user: - Get the size of a string in user space. | ||
197 | * @s: The string to measure. | ||
198 | * @n: The maximum valid length | ||
199 | * | ||
200 | * Context: User context only. This function may sleep. | ||
201 | * | ||
202 | * Get the size of a NUL-terminated string in user space. | ||
203 | * | ||
204 | * Returns the size of the string INCLUDING the terminating NUL. | ||
205 | * On exception, returns 0. | ||
206 | * If the string is too long, returns a value greater than @n. | ||
207 | */ | ||
208 | static inline long strnlen_user(const char __user *s, long n) | ||
209 | { | ||
210 | if (!__addr_ok(s)) | ||
211 | return 0; | ||
212 | else | ||
213 | return __strnlen_user(s, n); | ||
214 | } | ||
215 | |||
216 | /** | ||
217 | * strlen_user: - Get the size of a string in user space. | ||
218 | * @str: The string to measure. | ||
219 | * | ||
220 | * Context: User context only. This function may sleep. | ||
221 | * | ||
222 | * Get the size of a NUL-terminated string in user space. | ||
223 | * | ||
224 | * Returns the size of the string INCLUDING the terminating NUL. | ||
225 | * On exception, returns 0. | ||
226 | * | ||
227 | * If there is a limit on the length of a valid string, you may wish to | ||
228 | * consider using strnlen_user() instead. | ||
229 | */ | ||
230 | #define strlen_user(str) strnlen_user(str, ~0UL >> 1) | ||
231 | |||
232 | /* | 171 | /* |
233 | * The exception table consists of pairs of addresses: the first is the | 172 | * The exception table consists of pairs of addresses: the first is the |
234 | * address of an instruction that is allowed to fault, and the second is | 173 | * address of an instruction that is allowed to fault, and the second is |
diff --git a/arch/sh/include/asm/uaccess_32.h b/arch/sh/include/asm/uaccess_32.h index ae0d24f6653f..c0de7ee35ab7 100644 --- a/arch/sh/include/asm/uaccess_32.h +++ b/arch/sh/include/asm/uaccess_32.h | |||
@@ -170,79 +170,4 @@ __asm__ __volatile__( \ | |||
170 | 170 | ||
171 | extern void __put_user_unknown(void); | 171 | extern void __put_user_unknown(void); |
172 | 172 | ||
173 | static inline int | ||
174 | __strncpy_from_user(unsigned long __dest, unsigned long __user __src, int __count) | ||
175 | { | ||
176 | __kernel_size_t res; | ||
177 | unsigned long __dummy, _d, _s, _c; | ||
178 | |||
179 | __asm__ __volatile__( | ||
180 | "9:\n" | ||
181 | "mov.b @%2+, %1\n\t" | ||
182 | "cmp/eq #0, %1\n\t" | ||
183 | "bt/s 2f\n" | ||
184 | "1:\n" | ||
185 | "mov.b %1, @%3\n\t" | ||
186 | "dt %4\n\t" | ||
187 | "bf/s 9b\n\t" | ||
188 | " add #1, %3\n\t" | ||
189 | "2:\n\t" | ||
190 | "sub %4, %0\n" | ||
191 | "3:\n" | ||
192 | ".section .fixup,\"ax\"\n" | ||
193 | "4:\n\t" | ||
194 | "mov.l 5f, %1\n\t" | ||
195 | "jmp @%1\n\t" | ||
196 | " mov %9, %0\n\t" | ||
197 | ".balign 4\n" | ||
198 | "5: .long 3b\n" | ||
199 | ".previous\n" | ||
200 | ".section __ex_table,\"a\"\n" | ||
201 | " .balign 4\n" | ||
202 | " .long 9b,4b\n" | ||
203 | ".previous" | ||
204 | : "=r" (res), "=&z" (__dummy), "=r" (_s), "=r" (_d), "=r"(_c) | ||
205 | : "0" (__count), "2" (__src), "3" (__dest), "4" (__count), | ||
206 | "i" (-EFAULT) | ||
207 | : "memory", "t"); | ||
208 | |||
209 | return res; | ||
210 | } | ||
211 | |||
212 | /* | ||
213 | * Return the size of a string (including the ending 0 even when we have | ||
214 | * exceeded the maximum string length). | ||
215 | */ | ||
216 | static inline long __strnlen_user(const char __user *__s, long __n) | ||
217 | { | ||
218 | unsigned long res; | ||
219 | unsigned long __dummy; | ||
220 | |||
221 | __asm__ __volatile__( | ||
222 | "1:\t" | ||
223 | "mov.b @(%0,%3), %1\n\t" | ||
224 | "cmp/eq %4, %0\n\t" | ||
225 | "bt/s 2f\n\t" | ||
226 | " add #1, %0\n\t" | ||
227 | "tst %1, %1\n\t" | ||
228 | "bf 1b\n\t" | ||
229 | "2:\n" | ||
230 | ".section .fixup,\"ax\"\n" | ||
231 | "3:\n\t" | ||
232 | "mov.l 4f, %1\n\t" | ||
233 | "jmp @%1\n\t" | ||
234 | " mov #0, %0\n" | ||
235 | ".balign 4\n" | ||
236 | "4: .long 2b\n" | ||
237 | ".previous\n" | ||
238 | ".section __ex_table,\"a\"\n" | ||
239 | " .balign 4\n" | ||
240 | " .long 1b,3b\n" | ||
241 | ".previous" | ||
242 | : "=z" (res), "=&r" (__dummy) | ||
243 | : "0" (0), "r" (__s), "r" (__n) | ||
244 | : "t"); | ||
245 | return res; | ||
246 | } | ||
247 | |||
248 | #endif /* __ASM_SH_UACCESS_32_H */ | 173 | #endif /* __ASM_SH_UACCESS_32_H */ |
diff --git a/arch/sh/include/asm/uaccess_64.h b/arch/sh/include/asm/uaccess_64.h index 56fd20b8cdcc..2e07e0f40c6a 100644 --- a/arch/sh/include/asm/uaccess_64.h +++ b/arch/sh/include/asm/uaccess_64.h | |||
@@ -84,8 +84,4 @@ extern long __put_user_asm_l(void *, long); | |||
84 | extern long __put_user_asm_q(void *, long); | 84 | extern long __put_user_asm_q(void *, long); |
85 | extern void __put_user_unknown(void); | 85 | extern void __put_user_unknown(void); |
86 | 86 | ||
87 | extern long __strnlen_user(const char *__s, long __n); | ||
88 | extern int __strncpy_from_user(unsigned long __dest, | ||
89 | unsigned long __user __src, int __count); | ||
90 | |||
91 | #endif /* __ASM_SH_UACCESS_64_H */ | 87 | #endif /* __ASM_SH_UACCESS_64_H */ |
diff --git a/arch/sh/include/asm/ucontext.h b/arch/sh/include/asm/ucontext.h deleted file mode 100644 index 9bc07b9f30fb..000000000000 --- a/arch/sh/include/asm/ucontext.h +++ /dev/null | |||
@@ -1 +0,0 @@ | |||
1 | #include <asm-generic/ucontext.h> | ||
diff --git a/arch/sh/include/asm/word-at-a-time.h b/arch/sh/include/asm/word-at-a-time.h new file mode 100644 index 000000000000..6e38953ff7fd --- /dev/null +++ b/arch/sh/include/asm/word-at-a-time.h | |||
@@ -0,0 +1,53 @@ | |||
1 | #ifndef __ASM_SH_WORD_AT_A_TIME_H | ||
2 | #define __ASM_SH_WORD_AT_A_TIME_H | ||
3 | |||
4 | #ifdef CONFIG_CPU_BIG_ENDIAN | ||
5 | # include <asm-generic/word-at-a-time.h> | ||
6 | #else | ||
7 | /* | ||
8 | * Little-endian version cribbed from x86. | ||
9 | */ | ||
10 | struct word_at_a_time { | ||
11 | const unsigned long one_bits, high_bits; | ||
12 | }; | ||
13 | |||
14 | #define WORD_AT_A_TIME_CONSTANTS { REPEAT_BYTE(0x01), REPEAT_BYTE(0x80) } | ||
15 | |||
16 | /* Carl Chatfield / Jan Achrenius G+ version for 32-bit */ | ||
17 | static inline long count_masked_bytes(long mask) | ||
18 | { | ||
19 | /* (000000 0000ff 00ffff ffffff) -> ( 1 1 2 3 ) */ | ||
20 | long a = (0x0ff0001+mask) >> 23; | ||
21 | /* Fix the 1 for 00 case */ | ||
22 | return a & mask; | ||
23 | } | ||
24 | |||
25 | /* Return nonzero if it has a zero */ | ||
26 | static inline unsigned long has_zero(unsigned long a, unsigned long *bits, const struct word_at_a_time *c) | ||
27 | { | ||
28 | unsigned long mask = ((a - c->one_bits) & ~a) & c->high_bits; | ||
29 | *bits = mask; | ||
30 | return mask; | ||
31 | } | ||
32 | |||
33 | static inline unsigned long prep_zero_mask(unsigned long a, unsigned long bits, const struct word_at_a_time *c) | ||
34 | { | ||
35 | return bits; | ||
36 | } | ||
37 | |||
38 | static inline unsigned long create_zero_mask(unsigned long bits) | ||
39 | { | ||
40 | bits = (bits - 1) & ~bits; | ||
41 | return bits >> 7; | ||
42 | } | ||
43 | |||
44 | /* The mask we created is directly usable as a bytemask */ | ||
45 | #define zero_bytemask(mask) (mask) | ||
46 | |||
47 | static inline unsigned long find_zero(unsigned long mask) | ||
48 | { | ||
49 | return count_masked_bytes(mask); | ||
50 | } | ||
51 | #endif | ||
52 | |||
53 | #endif | ||
diff --git a/arch/sh/include/asm/xor.h b/arch/sh/include/asm/xor.h deleted file mode 100644 index c82eb12a5b18..000000000000 --- a/arch/sh/include/asm/xor.h +++ /dev/null | |||
@@ -1 +0,0 @@ | |||
1 | #include <asm-generic/xor.h> | ||
diff --git a/arch/sh/include/cpu-sh2a/cpu/ubc.h b/arch/sh/include/cpu-sh2a/cpu/ubc.h deleted file mode 100644 index 1192e1c761a7..000000000000 --- a/arch/sh/include/cpu-sh2a/cpu/ubc.h +++ /dev/null | |||
@@ -1,28 +0,0 @@ | |||
1 | /* | ||
2 | * SH-2A UBC definitions | ||
3 | * | ||
4 | * Copyright (C) 2008 Kieran Bingham | ||
5 | * | ||
6 | * This file is subject to the terms and conditions of the GNU General Public | ||
7 | * License. See the file "COPYING" in the main directory of this archive | ||
8 | * for more details. | ||
9 | */ | ||
10 | |||
11 | #ifndef __ASM_CPU_SH2A_UBC_H | ||
12 | #define __ASM_CPU_SH2A_UBC_H | ||
13 | |||
14 | #define UBC_BARA 0xfffc0400 | ||
15 | #define UBC_BAMRA 0xfffc0404 | ||
16 | #define UBC_BBRA 0xfffc04a0 /* 16 bit access */ | ||
17 | #define UBC_BDRA 0xfffc0408 | ||
18 | #define UBC_BDMRA 0xfffc040c | ||
19 | |||
20 | #define UBC_BARB 0xfffc0410 | ||
21 | #define UBC_BAMRB 0xfffc0414 | ||
22 | #define UBC_BBRB 0xfffc04b0 /* 16 bit access */ | ||
23 | #define UBC_BDRB 0xfffc0418 | ||
24 | #define UBC_BDMRB 0xfffc041c | ||
25 | |||
26 | #define UBC_BRCR 0xfffc04c0 | ||
27 | |||
28 | #endif /* __ASM_CPU_SH2A_UBC_H */ | ||
diff --git a/arch/sh/kernel/cpu/sh5/entry.S b/arch/sh/kernel/cpu/sh5/entry.S index ff1f0e6e9bec..b7cf6a547f11 100644 --- a/arch/sh/kernel/cpu/sh5/entry.S +++ b/arch/sh/kernel/cpu/sh5/entry.S | |||
@@ -1569,86 +1569,6 @@ ___clear_user_exit: | |||
1569 | #endif /* CONFIG_MMU */ | 1569 | #endif /* CONFIG_MMU */ |
1570 | 1570 | ||
1571 | /* | 1571 | /* |
1572 | * int __strncpy_from_user(unsigned long __dest, unsigned long __src, | ||
1573 | * int __count) | ||
1574 | * | ||
1575 | * Inputs: | ||
1576 | * (r2) target address | ||
1577 | * (r3) source address | ||
1578 | * (r4) maximum size in bytes | ||
1579 | * | ||
1580 | * Ouputs: | ||
1581 | * (*r2) copied data | ||
1582 | * (r2) -EFAULT (in case of faulting) | ||
1583 | * copied data (otherwise) | ||
1584 | */ | ||
1585 | .global __strncpy_from_user | ||
1586 | __strncpy_from_user: | ||
1587 | pta ___strncpy_from_user1, tr0 | ||
1588 | pta ___strncpy_from_user_done, tr1 | ||
1589 | or r4, ZERO, r5 /* r5 = original count */ | ||
1590 | beq/u r4, r63, tr1 /* early exit if r4==0 */ | ||
1591 | movi -(EFAULT), r6 /* r6 = reply, no real fixup */ | ||
1592 | or ZERO, ZERO, r7 /* r7 = data, clear top byte of data */ | ||
1593 | |||
1594 | ___strncpy_from_user1: | ||
1595 | ld.b r3, 0, r7 /* Fault address: only in reading */ | ||
1596 | st.b r2, 0, r7 | ||
1597 | addi r2, 1, r2 | ||
1598 | addi r3, 1, r3 | ||
1599 | beq/u ZERO, r7, tr1 | ||
1600 | addi r4, -1, r4 /* return real number of copied bytes */ | ||
1601 | bne/l ZERO, r4, tr0 | ||
1602 | |||
1603 | ___strncpy_from_user_done: | ||
1604 | sub r5, r4, r6 /* If done, return copied */ | ||
1605 | |||
1606 | ___strncpy_from_user_exit: | ||
1607 | or r6, ZERO, r2 | ||
1608 | ptabs LINK, tr0 | ||
1609 | blink tr0, ZERO | ||
1610 | |||
1611 | /* | ||
1612 | * extern long __strnlen_user(const char *__s, long __n) | ||
1613 | * | ||
1614 | * Inputs: | ||
1615 | * (r2) source address | ||
1616 | * (r3) source size in bytes | ||
1617 | * | ||
1618 | * Ouputs: | ||
1619 | * (r2) -EFAULT (in case of faulting) | ||
1620 | * string length (otherwise) | ||
1621 | */ | ||
1622 | .global __strnlen_user | ||
1623 | __strnlen_user: | ||
1624 | pta ___strnlen_user_set_reply, tr0 | ||
1625 | pta ___strnlen_user1, tr1 | ||
1626 | or ZERO, ZERO, r5 /* r5 = counter */ | ||
1627 | movi -(EFAULT), r6 /* r6 = reply, no real fixup */ | ||
1628 | or ZERO, ZERO, r7 /* r7 = data, clear top byte of data */ | ||
1629 | beq r3, ZERO, tr0 | ||
1630 | |||
1631 | ___strnlen_user1: | ||
1632 | ldx.b r2, r5, r7 /* Fault address: only in reading */ | ||
1633 | addi r3, -1, r3 /* No real fixup */ | ||
1634 | addi r5, 1, r5 | ||
1635 | beq r3, ZERO, tr0 | ||
1636 | bne r7, ZERO, tr1 | ||
1637 | ! The line below used to be active. This meant led to a junk byte lying between each pair | ||
1638 | ! of entries in the argv & envp structures in memory. Whilst the program saw the right data | ||
1639 | ! via the argv and envp arguments to main, it meant the 'flat' representation visible through | ||
1640 | ! /proc/$pid/cmdline was corrupt, causing trouble with ps, for example. | ||
1641 | ! addi r5, 1, r5 /* Include '\0' */ | ||
1642 | |||
1643 | ___strnlen_user_set_reply: | ||
1644 | or r5, ZERO, r6 /* If done, return counter */ | ||
1645 | |||
1646 | ___strnlen_user_exit: | ||
1647 | or r6, ZERO, r2 | ||
1648 | ptabs LINK, tr0 | ||
1649 | blink tr0, ZERO | ||
1650 | |||
1651 | /* | ||
1652 | * extern long __get_user_asm_?(void *val, long addr) | 1572 | * extern long __get_user_asm_?(void *val, long addr) |
1653 | * | 1573 | * |
1654 | * Inputs: | 1574 | * Inputs: |
@@ -1982,8 +1902,6 @@ asm_uaccess_start: | |||
1982 | .long ___copy_user2, ___copy_user_exit | 1902 | .long ___copy_user2, ___copy_user_exit |
1983 | .long ___clear_user1, ___clear_user_exit | 1903 | .long ___clear_user1, ___clear_user_exit |
1984 | #endif | 1904 | #endif |
1985 | .long ___strncpy_from_user1, ___strncpy_from_user_exit | ||
1986 | .long ___strnlen_user1, ___strnlen_user_exit | ||
1987 | .long ___get_user_asm_b1, ___get_user_asm_b_exit | 1905 | .long ___get_user_asm_b1, ___get_user_asm_b_exit |
1988 | .long ___get_user_asm_w1, ___get_user_asm_w_exit | 1906 | .long ___get_user_asm_w1, ___get_user_asm_w_exit |
1989 | .long ___get_user_asm_l1, ___get_user_asm_l_exit | 1907 | .long ___get_user_asm_l1, ___get_user_asm_l_exit |
diff --git a/arch/sh/kernel/process.c b/arch/sh/kernel/process.c index 9b7a459a4613..055d91b70305 100644 --- a/arch/sh/kernel/process.c +++ b/arch/sh/kernel/process.c | |||
@@ -4,6 +4,7 @@ | |||
4 | #include <linux/sched.h> | 4 | #include <linux/sched.h> |
5 | #include <linux/export.h> | 5 | #include <linux/export.h> |
6 | #include <linux/stackprotector.h> | 6 | #include <linux/stackprotector.h> |
7 | #include <asm/fpu.h> | ||
7 | 8 | ||
8 | struct kmem_cache *task_xstate_cachep = NULL; | 9 | struct kmem_cache *task_xstate_cachep = NULL; |
9 | unsigned int xstate_size; | 10 | unsigned int xstate_size; |
diff --git a/arch/sh/kernel/process_64.c b/arch/sh/kernel/process_64.c index 4264583eabac..602545b12a86 100644 --- a/arch/sh/kernel/process_64.c +++ b/arch/sh/kernel/process_64.c | |||
@@ -33,6 +33,7 @@ | |||
33 | #include <asm/switch_to.h> | 33 | #include <asm/switch_to.h> |
34 | 34 | ||
35 | struct task_struct *last_task_used_math = NULL; | 35 | struct task_struct *last_task_used_math = NULL; |
36 | struct pt_regs fake_swapper_regs = { 0, }; | ||
36 | 37 | ||
37 | void show_regs(struct pt_regs *regs) | 38 | void show_regs(struct pt_regs *regs) |
38 | { | 39 | { |
diff --git a/arch/sh/kernel/sh_ksyms_64.c b/arch/sh/kernel/sh_ksyms_64.c index 45afa5c51f67..26a0774f5272 100644 --- a/arch/sh/kernel/sh_ksyms_64.c +++ b/arch/sh/kernel/sh_ksyms_64.c | |||
@@ -32,8 +32,6 @@ EXPORT_SYMBOL(__get_user_asm_b); | |||
32 | EXPORT_SYMBOL(__get_user_asm_w); | 32 | EXPORT_SYMBOL(__get_user_asm_w); |
33 | EXPORT_SYMBOL(__get_user_asm_l); | 33 | EXPORT_SYMBOL(__get_user_asm_l); |
34 | EXPORT_SYMBOL(__get_user_asm_q); | 34 | EXPORT_SYMBOL(__get_user_asm_q); |
35 | EXPORT_SYMBOL(__strnlen_user); | ||
36 | EXPORT_SYMBOL(__strncpy_from_user); | ||
37 | EXPORT_SYMBOL(__clear_user); | 35 | EXPORT_SYMBOL(__clear_user); |
38 | EXPORT_SYMBOL(copy_page); | 36 | EXPORT_SYMBOL(copy_page); |
39 | EXPORT_SYMBOL(__copy_user); | 37 | EXPORT_SYMBOL(__copy_user); |
diff --git a/arch/sparc/include/asm/cmt.h b/arch/sparc/include/asm/cmt.h deleted file mode 100644 index 870db5928577..000000000000 --- a/arch/sparc/include/asm/cmt.h +++ /dev/null | |||
@@ -1,59 +0,0 @@ | |||
1 | #ifndef _SPARC64_CMT_H | ||
2 | #define _SPARC64_CMT_H | ||
3 | |||
4 | /* cmt.h: Chip Multi-Threading register definitions | ||
5 | * | ||
6 | * Copyright (C) 2004 David S. Miller (davem@redhat.com) | ||
7 | */ | ||
8 | |||
9 | /* ASI_CORE_ID - private */ | ||
10 | #define LP_ID 0x0000000000000010UL | ||
11 | #define LP_ID_MAX 0x00000000003f0000UL | ||
12 | #define LP_ID_ID 0x000000000000003fUL | ||
13 | |||
14 | /* ASI_INTR_ID - private */ | ||
15 | #define LP_INTR_ID 0x0000000000000000UL | ||
16 | #define LP_INTR_ID_ID 0x00000000000003ffUL | ||
17 | |||
18 | /* ASI_CESR_ID - private */ | ||
19 | #define CESR_ID 0x0000000000000040UL | ||
20 | #define CESR_ID_ID 0x00000000000000ffUL | ||
21 | |||
22 | /* ASI_CORE_AVAILABLE - shared */ | ||
23 | #define LP_AVAIL 0x0000000000000000UL | ||
24 | #define LP_AVAIL_1 0x0000000000000002UL | ||
25 | #define LP_AVAIL_0 0x0000000000000001UL | ||
26 | |||
27 | /* ASI_CORE_ENABLE_STATUS - shared */ | ||
28 | #define LP_ENAB_STAT 0x0000000000000010UL | ||
29 | #define LP_ENAB_STAT_1 0x0000000000000002UL | ||
30 | #define LP_ENAB_STAT_0 0x0000000000000001UL | ||
31 | |||
32 | /* ASI_CORE_ENABLE - shared */ | ||
33 | #define LP_ENAB 0x0000000000000020UL | ||
34 | #define LP_ENAB_1 0x0000000000000002UL | ||
35 | #define LP_ENAB_0 0x0000000000000001UL | ||
36 | |||
37 | /* ASI_CORE_RUNNING - shared */ | ||
38 | #define LP_RUNNING_RW 0x0000000000000050UL | ||
39 | #define LP_RUNNING_W1S 0x0000000000000060UL | ||
40 | #define LP_RUNNING_W1C 0x0000000000000068UL | ||
41 | #define LP_RUNNING_1 0x0000000000000002UL | ||
42 | #define LP_RUNNING_0 0x0000000000000001UL | ||
43 | |||
44 | /* ASI_CORE_RUNNING_STAT - shared */ | ||
45 | #define LP_RUN_STAT 0x0000000000000058UL | ||
46 | #define LP_RUN_STAT_1 0x0000000000000002UL | ||
47 | #define LP_RUN_STAT_0 0x0000000000000001UL | ||
48 | |||
49 | /* ASI_XIR_STEERING - shared */ | ||
50 | #define LP_XIR_STEER 0x0000000000000030UL | ||
51 | #define LP_XIR_STEER_1 0x0000000000000002UL | ||
52 | #define LP_XIR_STEER_0 0x0000000000000001UL | ||
53 | |||
54 | /* ASI_CMT_ERROR_STEERING - shared */ | ||
55 | #define CMT_ER_STEER 0x0000000000000040UL | ||
56 | #define CMT_ER_STEER_1 0x0000000000000002UL | ||
57 | #define CMT_ER_STEER_0 0x0000000000000001UL | ||
58 | |||
59 | #endif /* _SPARC64_CMT_H */ | ||
diff --git a/arch/sparc/include/asm/mpmbox.h b/arch/sparc/include/asm/mpmbox.h deleted file mode 100644 index f8423039b242..000000000000 --- a/arch/sparc/include/asm/mpmbox.h +++ /dev/null | |||
@@ -1,67 +0,0 @@ | |||
1 | /* | ||
2 | * mpmbox.h: Interface and defines for the OpenProm mailbox | ||
3 | * facilities for MP machines under Linux. | ||
4 | * | ||
5 | * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu) | ||
6 | */ | ||
7 | |||
8 | #ifndef _SPARC_MPMBOX_H | ||
9 | #define _SPARC_MPMBOX_H | ||
10 | |||
11 | /* The prom allocates, for each CPU on the machine an unsigned | ||
12 | * byte in physical ram. You probe the device tree prom nodes | ||
13 | * for these values. The purpose of this byte is to be able to | ||
14 | * pass messages from one cpu to another. | ||
15 | */ | ||
16 | |||
17 | /* These are the main message types we have to look for in our | ||
18 | * Cpu mailboxes, based upon these values we decide what course | ||
19 | * of action to take. | ||
20 | */ | ||
21 | |||
22 | /* The CPU is executing code in the kernel. */ | ||
23 | #define MAILBOX_ISRUNNING 0xf0 | ||
24 | |||
25 | /* Another CPU called romvec->pv_exit(), you should call | ||
26 | * prom_stopcpu() when you see this in your mailbox. | ||
27 | */ | ||
28 | #define MAILBOX_EXIT 0xfb | ||
29 | |||
30 | /* Another CPU called romvec->pv_enter(), you should call | ||
31 | * prom_cpuidle() when this is seen. | ||
32 | */ | ||
33 | #define MAILBOX_GOSPIN 0xfc | ||
34 | |||
35 | /* Another CPU has hit a breakpoint either into kadb or the prom | ||
36 | * itself. Just like MAILBOX_GOSPIN, you should call prom_cpuidle() | ||
37 | * at this point. | ||
38 | */ | ||
39 | #define MAILBOX_BPT_SPIN 0xfd | ||
40 | |||
41 | /* Oh geese, some other nitwit got a damn watchdog reset. The party's | ||
42 | * over so go call prom_stopcpu(). | ||
43 | */ | ||
44 | #define MAILBOX_WDOG_STOP 0xfe | ||
45 | |||
46 | #ifndef __ASSEMBLY__ | ||
47 | |||
48 | /* Handy macro's to determine a cpu's state. */ | ||
49 | |||
50 | /* Is the cpu still in Power On Self Test? */ | ||
51 | #define MBOX_POST_P(letter) ((letter) >= 0x00 && (letter) <= 0x7f) | ||
52 | |||
53 | /* Is the cpu at the 'ok' prompt of the PROM? */ | ||
54 | #define MBOX_PROMPROMPT_P(letter) ((letter) >= 0x80 && (letter) <= 0x8f) | ||
55 | |||
56 | /* Is the cpu spinning in the PROM? */ | ||
57 | #define MBOX_PROMSPIN_P(letter) ((letter) >= 0x90 && (letter) <= 0xef) | ||
58 | |||
59 | /* Sanity check... This is junk mail, throw it out. */ | ||
60 | #define MBOX_BOGON_P(letter) ((letter) >= 0xf1 && (letter) <= 0xfa) | ||
61 | |||
62 | /* Is the cpu actively running an application/kernel-code? */ | ||
63 | #define MBOX_RUNNING_P(letter) ((letter) == MAILBOX_ISRUNNING) | ||
64 | |||
65 | #endif /* !(__ASSEMBLY__) */ | ||
66 | |||
67 | #endif /* !(_SPARC_MPMBOX_H) */ | ||
diff --git a/arch/tile/include/asm/uaccess.h b/arch/tile/include/asm/uaccess.h index c3dd275f25e2..9ab078a4605d 100644 --- a/arch/tile/include/asm/uaccess.h +++ b/arch/tile/include/asm/uaccess.h | |||
@@ -146,7 +146,7 @@ extern int fixup_exception(struct pt_regs *regs); | |||
146 | #ifdef __tilegx__ | 146 | #ifdef __tilegx__ |
147 | #define __get_user_1(x, ptr, ret) __get_user_asm(ld1u, x, ptr, ret) | 147 | #define __get_user_1(x, ptr, ret) __get_user_asm(ld1u, x, ptr, ret) |
148 | #define __get_user_2(x, ptr, ret) __get_user_asm(ld2u, x, ptr, ret) | 148 | #define __get_user_2(x, ptr, ret) __get_user_asm(ld2u, x, ptr, ret) |
149 | #define __get_user_4(x, ptr, ret) __get_user_asm(ld4u, x, ptr, ret) | 149 | #define __get_user_4(x, ptr, ret) __get_user_asm(ld4s, x, ptr, ret) |
150 | #define __get_user_8(x, ptr, ret) __get_user_asm(ld, x, ptr, ret) | 150 | #define __get_user_8(x, ptr, ret) __get_user_asm(ld, x, ptr, ret) |
151 | #else | 151 | #else |
152 | #define __get_user_1(x, ptr, ret) __get_user_asm(lb_u, x, ptr, ret) | 152 | #define __get_user_1(x, ptr, ret) __get_user_asm(lb_u, x, ptr, ret) |
diff --git a/arch/x86/crypto/aesni-intel_asm.S b/arch/x86/crypto/aesni-intel_asm.S index be6d9e365a80..3470624d7835 100644 --- a/arch/x86/crypto/aesni-intel_asm.S +++ b/arch/x86/crypto/aesni-intel_asm.S | |||
@@ -2460,10 +2460,12 @@ ENTRY(aesni_cbc_dec) | |||
2460 | pxor IN3, STATE4 | 2460 | pxor IN3, STATE4 |
2461 | movaps IN4, IV | 2461 | movaps IN4, IV |
2462 | #else | 2462 | #else |
2463 | pxor (INP), STATE2 | ||
2464 | pxor 0x10(INP), STATE3 | ||
2465 | pxor IN1, STATE4 | 2463 | pxor IN1, STATE4 |
2466 | movaps IN2, IV | 2464 | movaps IN2, IV |
2465 | movups (INP), IN1 | ||
2466 | pxor IN1, STATE2 | ||
2467 | movups 0x10(INP), IN2 | ||
2468 | pxor IN2, STATE3 | ||
2467 | #endif | 2469 | #endif |
2468 | movups STATE1, (OUTP) | 2470 | movups STATE1, (OUTP) |
2469 | movups STATE2, 0x10(OUTP) | 2471 | movups STATE2, 0x10(OUTP) |
diff --git a/arch/x86/kernel/kvmclock.c b/arch/x86/kernel/kvmclock.c index 086eb58c6e80..f1b42b3a186c 100644 --- a/arch/x86/kernel/kvmclock.c +++ b/arch/x86/kernel/kvmclock.c | |||
@@ -120,11 +120,6 @@ bool kvm_check_and_clear_guest_paused(void) | |||
120 | bool ret = false; | 120 | bool ret = false; |
121 | struct pvclock_vcpu_time_info *src; | 121 | struct pvclock_vcpu_time_info *src; |
122 | 122 | ||
123 | /* | ||
124 | * per_cpu() is safe here because this function is only called from | ||
125 | * timer functions where preemption is already disabled. | ||
126 | */ | ||
127 | WARN_ON(!in_atomic()); | ||
128 | src = &__get_cpu_var(hv_clock); | 123 | src = &__get_cpu_var(hv_clock); |
129 | if ((src->flags & PVCLOCK_GUEST_STOPPED) != 0) { | 124 | if ((src->flags & PVCLOCK_GUEST_STOPPED) != 0) { |
130 | __this_cpu_and(hv_clock.flags, ~PVCLOCK_GUEST_STOPPED); | 125 | __this_cpu_and(hv_clock.flags, ~PVCLOCK_GUEST_STOPPED); |
diff --git a/arch/x86/kernel/pci-dma.c b/arch/x86/kernel/pci-dma.c index 62c9457ccd2f..c0f420f76cd3 100644 --- a/arch/x86/kernel/pci-dma.c +++ b/arch/x86/kernel/pci-dma.c | |||
@@ -100,7 +100,7 @@ void *dma_generic_alloc_coherent(struct device *dev, size_t size, | |||
100 | struct dma_attrs *attrs) | 100 | struct dma_attrs *attrs) |
101 | { | 101 | { |
102 | unsigned long dma_mask; | 102 | unsigned long dma_mask; |
103 | struct page *page = NULL; | 103 | struct page *page; |
104 | unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT; | 104 | unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT; |
105 | dma_addr_t addr; | 105 | dma_addr_t addr; |
106 | 106 | ||
@@ -108,6 +108,7 @@ void *dma_generic_alloc_coherent(struct device *dev, size_t size, | |||
108 | 108 | ||
109 | flag |= __GFP_ZERO; | 109 | flag |= __GFP_ZERO; |
110 | again: | 110 | again: |
111 | page = NULL; | ||
111 | if (!(flag & GFP_ATOMIC)) | 112 | if (!(flag & GFP_ATOMIC)) |
112 | page = dma_alloc_from_contiguous(dev, count, get_order(size)); | 113 | page = dma_alloc_from_contiguous(dev, count, get_order(size)); |
113 | if (!page) | 114 | if (!page) |
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index 3fab55bea29b..7bd8a0823654 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c | |||
@@ -349,9 +349,12 @@ static bool __cpuinit match_llc(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o) | |||
349 | 349 | ||
350 | static bool __cpuinit match_mc(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o) | 350 | static bool __cpuinit match_mc(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o) |
351 | { | 351 | { |
352 | if (c->phys_proc_id == o->phys_proc_id) | 352 | if (c->phys_proc_id == o->phys_proc_id) { |
353 | return topology_sane(c, o, "mc"); | 353 | if (cpu_has(c, X86_FEATURE_AMD_DCM)) |
354 | return true; | ||
354 | 355 | ||
356 | return topology_sane(c, o, "mc"); | ||
357 | } | ||
355 | return false; | 358 | return false; |
356 | } | 359 | } |
357 | 360 | ||
diff --git a/arch/x86/lib/usercopy.c b/arch/x86/lib/usercopy.c index 677b1ed184c9..4f74d94c8d97 100644 --- a/arch/x86/lib/usercopy.c +++ b/arch/x86/lib/usercopy.c | |||
@@ -22,7 +22,7 @@ copy_from_user_nmi(void *to, const void __user *from, unsigned long n) | |||
22 | void *map; | 22 | void *map; |
23 | int ret; | 23 | int ret; |
24 | 24 | ||
25 | if (__range_not_ok(from, n, TASK_SIZE) == 0) | 25 | if (__range_not_ok(from, n, TASK_SIZE)) |
26 | return len; | 26 | return len; |
27 | 27 | ||
28 | do { | 28 | do { |
diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c index be1ef574ce9a..78fe3f1ac49f 100644 --- a/arch/x86/mm/ioremap.c +++ b/arch/x86/mm/ioremap.c | |||
@@ -180,7 +180,7 @@ err_free_memtype: | |||
180 | 180 | ||
181 | /** | 181 | /** |
182 | * ioremap_nocache - map bus memory into CPU space | 182 | * ioremap_nocache - map bus memory into CPU space |
183 | * @offset: bus address of the memory | 183 | * @phys_addr: bus address of the memory |
184 | * @size: size of the resource to map | 184 | * @size: size of the resource to map |
185 | * | 185 | * |
186 | * ioremap_nocache performs a platform specific sequence of operations to | 186 | * ioremap_nocache performs a platform specific sequence of operations to |
@@ -217,7 +217,7 @@ EXPORT_SYMBOL(ioremap_nocache); | |||
217 | 217 | ||
218 | /** | 218 | /** |
219 | * ioremap_wc - map memory into CPU space write combined | 219 | * ioremap_wc - map memory into CPU space write combined |
220 | * @offset: bus address of the memory | 220 | * @phys_addr: bus address of the memory |
221 | * @size: size of the resource to map | 221 | * @size: size of the resource to map |
222 | * | 222 | * |
223 | * This version of ioremap ensures that the memory is marked write combining. | 223 | * This version of ioremap ensures that the memory is marked write combining. |
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c index e1ebde315210..a718e0d23503 100644 --- a/arch/x86/mm/pageattr.c +++ b/arch/x86/mm/pageattr.c | |||
@@ -122,7 +122,7 @@ within(unsigned long addr, unsigned long start, unsigned long end) | |||
122 | 122 | ||
123 | /** | 123 | /** |
124 | * clflush_cache_range - flush a cache range with clflush | 124 | * clflush_cache_range - flush a cache range with clflush |
125 | * @addr: virtual start address | 125 | * @vaddr: virtual start address |
126 | * @size: number of bytes to flush | 126 | * @size: number of bytes to flush |
127 | * | 127 | * |
128 | * clflush is an unordered instruction which needs fencing with mfence | 128 | * clflush is an unordered instruction which needs fencing with mfence |
diff --git a/arch/x86/um/sys_call_table_32.c b/arch/x86/um/sys_call_table_32.c index 416bd40c0eba..68d1dc91b37b 100644 --- a/arch/x86/um/sys_call_table_32.c +++ b/arch/x86/um/sys_call_table_32.c | |||
@@ -39,9 +39,9 @@ | |||
39 | #undef __SYSCALL_I386 | 39 | #undef __SYSCALL_I386 |
40 | #define __SYSCALL_I386(nr, sym, compat) [ nr ] = sym, | 40 | #define __SYSCALL_I386(nr, sym, compat) [ nr ] = sym, |
41 | 41 | ||
42 | typedef void (*sys_call_ptr_t)(void); | 42 | typedef asmlinkage void (*sys_call_ptr_t)(void); |
43 | 43 | ||
44 | extern void sys_ni_syscall(void); | 44 | extern asmlinkage void sys_ni_syscall(void); |
45 | 45 | ||
46 | const sys_call_ptr_t sys_call_table[] __cacheline_aligned = { | 46 | const sys_call_ptr_t sys_call_table[] __cacheline_aligned = { |
47 | /* | 47 | /* |
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c index e74df9548a02..ff962d4b821e 100644 --- a/arch/x86/xen/enlighten.c +++ b/arch/x86/xen/enlighten.c | |||
@@ -209,6 +209,9 @@ static void __init xen_banner(void) | |||
209 | xen_feature(XENFEAT_mmu_pt_update_preserve_ad) ? " (preserve-AD)" : ""); | 209 | xen_feature(XENFEAT_mmu_pt_update_preserve_ad) ? " (preserve-AD)" : ""); |
210 | } | 210 | } |
211 | 211 | ||
212 | #define CPUID_THERM_POWER_LEAF 6 | ||
213 | #define APERFMPERF_PRESENT 0 | ||
214 | |||
212 | static __read_mostly unsigned int cpuid_leaf1_edx_mask = ~0; | 215 | static __read_mostly unsigned int cpuid_leaf1_edx_mask = ~0; |
213 | static __read_mostly unsigned int cpuid_leaf1_ecx_mask = ~0; | 216 | static __read_mostly unsigned int cpuid_leaf1_ecx_mask = ~0; |
214 | 217 | ||
@@ -242,6 +245,11 @@ static void xen_cpuid(unsigned int *ax, unsigned int *bx, | |||
242 | *dx = cpuid_leaf5_edx_val; | 245 | *dx = cpuid_leaf5_edx_val; |
243 | return; | 246 | return; |
244 | 247 | ||
248 | case CPUID_THERM_POWER_LEAF: | ||
249 | /* Disabling APERFMPERF for kernel usage */ | ||
250 | maskecx = ~(1 << APERFMPERF_PRESENT); | ||
251 | break; | ||
252 | |||
245 | case 0xb: | 253 | case 0xb: |
246 | /* Suppress extended topology stuff */ | 254 | /* Suppress extended topology stuff */ |
247 | maskebx = 0; | 255 | maskebx = 0; |
diff --git a/arch/x86/xen/p2m.c b/arch/x86/xen/p2m.c index ffd08c414e91..64effdc6da94 100644 --- a/arch/x86/xen/p2m.c +++ b/arch/x86/xen/p2m.c | |||
@@ -706,6 +706,7 @@ int m2p_add_override(unsigned long mfn, struct page *page, | |||
706 | unsigned long uninitialized_var(address); | 706 | unsigned long uninitialized_var(address); |
707 | unsigned level; | 707 | unsigned level; |
708 | pte_t *ptep = NULL; | 708 | pte_t *ptep = NULL; |
709 | int ret = 0; | ||
709 | 710 | ||
710 | pfn = page_to_pfn(page); | 711 | pfn = page_to_pfn(page); |
711 | if (!PageHighMem(page)) { | 712 | if (!PageHighMem(page)) { |
@@ -741,6 +742,24 @@ int m2p_add_override(unsigned long mfn, struct page *page, | |||
741 | list_add(&page->lru, &m2p_overrides[mfn_hash(mfn)]); | 742 | list_add(&page->lru, &m2p_overrides[mfn_hash(mfn)]); |
742 | spin_unlock_irqrestore(&m2p_override_lock, flags); | 743 | spin_unlock_irqrestore(&m2p_override_lock, flags); |
743 | 744 | ||
745 | /* p2m(m2p(mfn)) == mfn: the mfn is already present somewhere in | ||
746 | * this domain. Set the FOREIGN_FRAME_BIT in the p2m for the other | ||
747 | * pfn so that the following mfn_to_pfn(mfn) calls will return the | ||
748 | * pfn from the m2p_override (the backend pfn) instead. | ||
749 | * We need to do this because the pages shared by the frontend | ||
750 | * (xen-blkfront) can be already locked (lock_page, called by | ||
751 | * do_read_cache_page); when the userspace backend tries to use them | ||
752 | * with direct_IO, mfn_to_pfn returns the pfn of the frontend, so | ||
753 | * do_blockdev_direct_IO is going to try to lock the same pages | ||
754 | * again resulting in a deadlock. | ||
755 | * As a side effect get_user_pages_fast might not be safe on the | ||
756 | * frontend pages while they are being shared with the backend, | ||
757 | * because mfn_to_pfn (that ends up being called by GUPF) will | ||
758 | * return the backend pfn rather than the frontend pfn. */ | ||
759 | ret = __get_user(pfn, &machine_to_phys_mapping[mfn]); | ||
760 | if (ret == 0 && get_phys_to_machine(pfn) == mfn) | ||
761 | set_phys_to_machine(pfn, FOREIGN_FRAME(mfn)); | ||
762 | |||
744 | return 0; | 763 | return 0; |
745 | } | 764 | } |
746 | EXPORT_SYMBOL_GPL(m2p_add_override); | 765 | EXPORT_SYMBOL_GPL(m2p_add_override); |
@@ -752,6 +771,7 @@ int m2p_remove_override(struct page *page, bool clear_pte) | |||
752 | unsigned long uninitialized_var(address); | 771 | unsigned long uninitialized_var(address); |
753 | unsigned level; | 772 | unsigned level; |
754 | pte_t *ptep = NULL; | 773 | pte_t *ptep = NULL; |
774 | int ret = 0; | ||
755 | 775 | ||
756 | pfn = page_to_pfn(page); | 776 | pfn = page_to_pfn(page); |
757 | mfn = get_phys_to_machine(pfn); | 777 | mfn = get_phys_to_machine(pfn); |
@@ -821,6 +841,22 @@ int m2p_remove_override(struct page *page, bool clear_pte) | |||
821 | } else | 841 | } else |
822 | set_phys_to_machine(pfn, page->index); | 842 | set_phys_to_machine(pfn, page->index); |
823 | 843 | ||
844 | /* p2m(m2p(mfn)) == FOREIGN_FRAME(mfn): the mfn is already present | ||
845 | * somewhere in this domain, even before being added to the | ||
846 | * m2p_override (see comment above in m2p_add_override). | ||
847 | * If there are no other entries in the m2p_override corresponding | ||
848 | * to this mfn, then remove the FOREIGN_FRAME_BIT from the p2m for | ||
849 | * the original pfn (the one shared by the frontend): the backend | ||
850 | * cannot do any IO on this page anymore because it has been | ||
851 | * unshared. Removing the FOREIGN_FRAME_BIT from the p2m entry of | ||
852 | * the original pfn causes mfn_to_pfn(mfn) to return the frontend | ||
853 | * pfn again. */ | ||
854 | mfn &= ~FOREIGN_FRAME_BIT; | ||
855 | ret = __get_user(pfn, &machine_to_phys_mapping[mfn]); | ||
856 | if (ret == 0 && get_phys_to_machine(pfn) == FOREIGN_FRAME(mfn) && | ||
857 | m2p_find_override(mfn) == NULL) | ||
858 | set_phys_to_machine(pfn, mfn); | ||
859 | |||
824 | return 0; | 860 | return 0; |
825 | } | 861 | } |
826 | EXPORT_SYMBOL_GPL(m2p_remove_override); | 862 | EXPORT_SYMBOL_GPL(m2p_remove_override); |
diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c index 3ebba0753d38..a4790bf22c59 100644 --- a/arch/x86/xen/setup.c +++ b/arch/x86/xen/setup.c | |||
@@ -371,7 +371,8 @@ char * __init xen_memory_setup(void) | |||
371 | populated = xen_populate_chunk(map, memmap.nr_entries, | 371 | populated = xen_populate_chunk(map, memmap.nr_entries, |
372 | max_pfn, &last_pfn, xen_released_pages); | 372 | max_pfn, &last_pfn, xen_released_pages); |
373 | 373 | ||
374 | extra_pages += (xen_released_pages - populated); | 374 | xen_released_pages -= populated; |
375 | extra_pages += xen_released_pages; | ||
375 | 376 | ||
376 | if (last_pfn > max_pfn) { | 377 | if (last_pfn > max_pfn) { |
377 | max_pfn = min(MAX_DOMAIN_PAGES, last_pfn); | 378 | max_pfn = min(MAX_DOMAIN_PAGES, last_pfn); |
diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c index 0bcda488f11c..c89aa01fb1de 100644 --- a/drivers/base/regmap/regmap.c +++ b/drivers/base/regmap/regmap.c | |||
@@ -246,11 +246,11 @@ struct regmap *regmap_init(struct device *dev, | |||
246 | map->lock = regmap_lock_mutex; | 246 | map->lock = regmap_lock_mutex; |
247 | map->unlock = regmap_unlock_mutex; | 247 | map->unlock = regmap_unlock_mutex; |
248 | } | 248 | } |
249 | map->format.buf_size = (config->reg_bits + config->val_bits) / 8; | ||
250 | map->format.reg_bytes = DIV_ROUND_UP(config->reg_bits, 8); | 249 | map->format.reg_bytes = DIV_ROUND_UP(config->reg_bits, 8); |
251 | map->format.pad_bytes = config->pad_bits / 8; | 250 | map->format.pad_bytes = config->pad_bits / 8; |
252 | map->format.val_bytes = DIV_ROUND_UP(config->val_bits, 8); | 251 | map->format.val_bytes = DIV_ROUND_UP(config->val_bits, 8); |
253 | map->format.buf_size += map->format.pad_bytes; | 252 | map->format.buf_size = DIV_ROUND_UP(config->reg_bits + |
253 | config->val_bits + config->pad_bits, 8); | ||
254 | map->reg_shift = config->pad_bits % 8; | 254 | map->reg_shift = config->pad_bits % 8; |
255 | if (config->reg_stride) | 255 | if (config->reg_stride) |
256 | map->reg_stride = config->reg_stride; | 256 | map->reg_stride = config->reg_stride; |
@@ -368,7 +368,7 @@ struct regmap *regmap_init(struct device *dev, | |||
368 | 368 | ||
369 | ret = regcache_init(map, config); | 369 | ret = regcache_init(map, config); |
370 | if (ret < 0) | 370 | if (ret < 0) |
371 | goto err_free_workbuf; | 371 | goto err_debugfs; |
372 | 372 | ||
373 | /* Add a devres resource for dev_get_regmap() */ | 373 | /* Add a devres resource for dev_get_regmap() */ |
374 | m = devres_alloc(dev_get_regmap_release, sizeof(*m), GFP_KERNEL); | 374 | m = devres_alloc(dev_get_regmap_release, sizeof(*m), GFP_KERNEL); |
@@ -383,7 +383,8 @@ struct regmap *regmap_init(struct device *dev, | |||
383 | 383 | ||
384 | err_cache: | 384 | err_cache: |
385 | regcache_exit(map); | 385 | regcache_exit(map); |
386 | err_free_workbuf: | 386 | err_debugfs: |
387 | regmap_debugfs_exit(map); | ||
387 | kfree(map->work_buf); | 388 | kfree(map->work_buf); |
388 | err_map: | 389 | err_map: |
389 | kfree(map); | 390 | kfree(map); |
@@ -471,6 +472,7 @@ int regmap_reinit_cache(struct regmap *map, const struct regmap_config *config) | |||
471 | 472 | ||
472 | return ret; | 473 | return ret; |
473 | } | 474 | } |
475 | EXPORT_SYMBOL_GPL(regmap_reinit_cache); | ||
474 | 476 | ||
475 | /** | 477 | /** |
476 | * regmap_exit(): Free a previously allocated register map | 478 | * regmap_exit(): Free a previously allocated register map |
diff --git a/drivers/bcma/driver_chipcommon_pmu.c b/drivers/bcma/driver_chipcommon_pmu.c index a058842f14fd..61ce4054b3c3 100644 --- a/drivers/bcma/driver_chipcommon_pmu.c +++ b/drivers/bcma/driver_chipcommon_pmu.c | |||
@@ -139,7 +139,9 @@ void bcma_pmu_workarounds(struct bcma_drv_cc *cc) | |||
139 | bcma_chipco_chipctl_maskset(cc, 0, ~0, 0x7); | 139 | bcma_chipco_chipctl_maskset(cc, 0, ~0, 0x7); |
140 | break; | 140 | break; |
141 | case 0x4331: | 141 | case 0x4331: |
142 | /* BCM4331 workaround is SPROM-related, we put it in sprom.c */ | 142 | case 43431: |
143 | /* Ext PA lines must be enabled for tx on BCM4331 */ | ||
144 | bcma_chipco_bcm4331_ext_pa_lines_ctl(cc, true); | ||
143 | break; | 145 | break; |
144 | case 43224: | 146 | case 43224: |
145 | if (bus->chipinfo.rev == 0) { | 147 | if (bus->chipinfo.rev == 0) { |
diff --git a/drivers/bcma/driver_pci.c b/drivers/bcma/driver_pci.c index 9a96f14c8f47..c32ebd537abe 100644 --- a/drivers/bcma/driver_pci.c +++ b/drivers/bcma/driver_pci.c | |||
@@ -232,17 +232,19 @@ void __devinit bcma_core_pci_init(struct bcma_drv_pci *pc) | |||
232 | int bcma_core_pci_irq_ctl(struct bcma_drv_pci *pc, struct bcma_device *core, | 232 | int bcma_core_pci_irq_ctl(struct bcma_drv_pci *pc, struct bcma_device *core, |
233 | bool enable) | 233 | bool enable) |
234 | { | 234 | { |
235 | struct pci_dev *pdev = pc->core->bus->host_pci; | 235 | struct pci_dev *pdev; |
236 | u32 coremask, tmp; | 236 | u32 coremask, tmp; |
237 | int err = 0; | 237 | int err = 0; |
238 | 238 | ||
239 | if (core->bus->hosttype != BCMA_HOSTTYPE_PCI) { | 239 | if (!pc || core->bus->hosttype != BCMA_HOSTTYPE_PCI) { |
240 | /* This bcma device is not on a PCI host-bus. So the IRQs are | 240 | /* This bcma device is not on a PCI host-bus. So the IRQs are |
241 | * not routed through the PCI core. | 241 | * not routed through the PCI core. |
242 | * So we must not enable routing through the PCI core. */ | 242 | * So we must not enable routing through the PCI core. */ |
243 | goto out; | 243 | goto out; |
244 | } | 244 | } |
245 | 245 | ||
246 | pdev = pc->core->bus->host_pci; | ||
247 | |||
246 | err = pci_read_config_dword(pdev, BCMA_PCI_IRQMASK, &tmp); | 248 | err = pci_read_config_dword(pdev, BCMA_PCI_IRQMASK, &tmp); |
247 | if (err) | 249 | if (err) |
248 | goto out; | 250 | goto out; |
diff --git a/drivers/bcma/sprom.c b/drivers/bcma/sprom.c index c7f93359acb0..f16f42d36071 100644 --- a/drivers/bcma/sprom.c +++ b/drivers/bcma/sprom.c | |||
@@ -579,13 +579,13 @@ int bcma_sprom_get(struct bcma_bus *bus) | |||
579 | if (!sprom) | 579 | if (!sprom) |
580 | return -ENOMEM; | 580 | return -ENOMEM; |
581 | 581 | ||
582 | if (bus->chipinfo.id == 0x4331) | 582 | if (bus->chipinfo.id == 0x4331 || bus->chipinfo.id == 43431) |
583 | bcma_chipco_bcm4331_ext_pa_lines_ctl(&bus->drv_cc, false); | 583 | bcma_chipco_bcm4331_ext_pa_lines_ctl(&bus->drv_cc, false); |
584 | 584 | ||
585 | pr_debug("SPROM offset 0x%x\n", offset); | 585 | pr_debug("SPROM offset 0x%x\n", offset); |
586 | bcma_sprom_read(bus, offset, sprom); | 586 | bcma_sprom_read(bus, offset, sprom); |
587 | 587 | ||
588 | if (bus->chipinfo.id == 0x4331) | 588 | if (bus->chipinfo.id == 0x4331 || bus->chipinfo.id == 43431) |
589 | bcma_chipco_bcm4331_ext_pa_lines_ctl(&bus->drv_cc, true); | 589 | bcma_chipco_bcm4331_ext_pa_lines_ctl(&bus->drv_cc, true); |
590 | 590 | ||
591 | err = bcma_sprom_valid(sprom); | 591 | err = bcma_sprom_valid(sprom); |
diff --git a/drivers/char/hw_random/atmel-rng.c b/drivers/char/hw_random/atmel-rng.c index f518b99f53f5..6289f0eee24c 100644 --- a/drivers/char/hw_random/atmel-rng.c +++ b/drivers/char/hw_random/atmel-rng.c | |||
@@ -36,6 +36,13 @@ static int atmel_trng_read(struct hwrng *rng, void *buf, size_t max, | |||
36 | /* data ready? */ | 36 | /* data ready? */ |
37 | if (readl(trng->base + TRNG_ODATA) & 1) { | 37 | if (readl(trng->base + TRNG_ODATA) & 1) { |
38 | *data = readl(trng->base + TRNG_ODATA); | 38 | *data = readl(trng->base + TRNG_ODATA); |
39 | /* | ||
40 | ensure data ready is only set again AFTER the next data | ||
41 | word is ready in case it got set between checking ISR | ||
42 | and reading ODATA, so we don't risk re-reading the | ||
43 | same word | ||
44 | */ | ||
45 | readl(trng->base + TRNG_ISR); | ||
39 | return 4; | 46 | return 4; |
40 | } else | 47 | } else |
41 | return 0; | 48 | return 0; |
diff --git a/drivers/clocksource/sh_cmt.c b/drivers/clocksource/sh_cmt.c index 32fe9ef5cc5c..98b06baafcc6 100644 --- a/drivers/clocksource/sh_cmt.c +++ b/drivers/clocksource/sh_cmt.c | |||
@@ -48,13 +48,13 @@ struct sh_cmt_priv { | |||
48 | unsigned long next_match_value; | 48 | unsigned long next_match_value; |
49 | unsigned long max_match_value; | 49 | unsigned long max_match_value; |
50 | unsigned long rate; | 50 | unsigned long rate; |
51 | spinlock_t lock; | 51 | raw_spinlock_t lock; |
52 | struct clock_event_device ced; | 52 | struct clock_event_device ced; |
53 | struct clocksource cs; | 53 | struct clocksource cs; |
54 | unsigned long total_cycles; | 54 | unsigned long total_cycles; |
55 | }; | 55 | }; |
56 | 56 | ||
57 | static DEFINE_SPINLOCK(sh_cmt_lock); | 57 | static DEFINE_RAW_SPINLOCK(sh_cmt_lock); |
58 | 58 | ||
59 | #define CMSTR -1 /* shared register */ | 59 | #define CMSTR -1 /* shared register */ |
60 | #define CMCSR 0 /* channel register */ | 60 | #define CMCSR 0 /* channel register */ |
@@ -139,7 +139,7 @@ static void sh_cmt_start_stop_ch(struct sh_cmt_priv *p, int start) | |||
139 | unsigned long flags, value; | 139 | unsigned long flags, value; |
140 | 140 | ||
141 | /* start stop register shared by multiple timer channels */ | 141 | /* start stop register shared by multiple timer channels */ |
142 | spin_lock_irqsave(&sh_cmt_lock, flags); | 142 | raw_spin_lock_irqsave(&sh_cmt_lock, flags); |
143 | value = sh_cmt_read(p, CMSTR); | 143 | value = sh_cmt_read(p, CMSTR); |
144 | 144 | ||
145 | if (start) | 145 | if (start) |
@@ -148,7 +148,7 @@ static void sh_cmt_start_stop_ch(struct sh_cmt_priv *p, int start) | |||
148 | value &= ~(1 << cfg->timer_bit); | 148 | value &= ~(1 << cfg->timer_bit); |
149 | 149 | ||
150 | sh_cmt_write(p, CMSTR, value); | 150 | sh_cmt_write(p, CMSTR, value); |
151 | spin_unlock_irqrestore(&sh_cmt_lock, flags); | 151 | raw_spin_unlock_irqrestore(&sh_cmt_lock, flags); |
152 | } | 152 | } |
153 | 153 | ||
154 | static int sh_cmt_enable(struct sh_cmt_priv *p, unsigned long *rate) | 154 | static int sh_cmt_enable(struct sh_cmt_priv *p, unsigned long *rate) |
@@ -328,9 +328,9 @@ static void sh_cmt_set_next(struct sh_cmt_priv *p, unsigned long delta) | |||
328 | { | 328 | { |
329 | unsigned long flags; | 329 | unsigned long flags; |
330 | 330 | ||
331 | spin_lock_irqsave(&p->lock, flags); | 331 | raw_spin_lock_irqsave(&p->lock, flags); |
332 | __sh_cmt_set_next(p, delta); | 332 | __sh_cmt_set_next(p, delta); |
333 | spin_unlock_irqrestore(&p->lock, flags); | 333 | raw_spin_unlock_irqrestore(&p->lock, flags); |
334 | } | 334 | } |
335 | 335 | ||
336 | static irqreturn_t sh_cmt_interrupt(int irq, void *dev_id) | 336 | static irqreturn_t sh_cmt_interrupt(int irq, void *dev_id) |
@@ -385,7 +385,7 @@ static int sh_cmt_start(struct sh_cmt_priv *p, unsigned long flag) | |||
385 | int ret = 0; | 385 | int ret = 0; |
386 | unsigned long flags; | 386 | unsigned long flags; |
387 | 387 | ||
388 | spin_lock_irqsave(&p->lock, flags); | 388 | raw_spin_lock_irqsave(&p->lock, flags); |
389 | 389 | ||
390 | if (!(p->flags & (FLAG_CLOCKEVENT | FLAG_CLOCKSOURCE))) | 390 | if (!(p->flags & (FLAG_CLOCKEVENT | FLAG_CLOCKSOURCE))) |
391 | ret = sh_cmt_enable(p, &p->rate); | 391 | ret = sh_cmt_enable(p, &p->rate); |
@@ -398,7 +398,7 @@ static int sh_cmt_start(struct sh_cmt_priv *p, unsigned long flag) | |||
398 | if ((flag == FLAG_CLOCKSOURCE) && (!(p->flags & FLAG_CLOCKEVENT))) | 398 | if ((flag == FLAG_CLOCKSOURCE) && (!(p->flags & FLAG_CLOCKEVENT))) |
399 | __sh_cmt_set_next(p, p->max_match_value); | 399 | __sh_cmt_set_next(p, p->max_match_value); |
400 | out: | 400 | out: |
401 | spin_unlock_irqrestore(&p->lock, flags); | 401 | raw_spin_unlock_irqrestore(&p->lock, flags); |
402 | 402 | ||
403 | return ret; | 403 | return ret; |
404 | } | 404 | } |
@@ -408,7 +408,7 @@ static void sh_cmt_stop(struct sh_cmt_priv *p, unsigned long flag) | |||
408 | unsigned long flags; | 408 | unsigned long flags; |
409 | unsigned long f; | 409 | unsigned long f; |
410 | 410 | ||
411 | spin_lock_irqsave(&p->lock, flags); | 411 | raw_spin_lock_irqsave(&p->lock, flags); |
412 | 412 | ||
413 | f = p->flags & (FLAG_CLOCKEVENT | FLAG_CLOCKSOURCE); | 413 | f = p->flags & (FLAG_CLOCKEVENT | FLAG_CLOCKSOURCE); |
414 | p->flags &= ~flag; | 414 | p->flags &= ~flag; |
@@ -420,7 +420,7 @@ static void sh_cmt_stop(struct sh_cmt_priv *p, unsigned long flag) | |||
420 | if ((flag == FLAG_CLOCKEVENT) && (p->flags & FLAG_CLOCKSOURCE)) | 420 | if ((flag == FLAG_CLOCKEVENT) && (p->flags & FLAG_CLOCKSOURCE)) |
421 | __sh_cmt_set_next(p, p->max_match_value); | 421 | __sh_cmt_set_next(p, p->max_match_value); |
422 | 422 | ||
423 | spin_unlock_irqrestore(&p->lock, flags); | 423 | raw_spin_unlock_irqrestore(&p->lock, flags); |
424 | } | 424 | } |
425 | 425 | ||
426 | static struct sh_cmt_priv *cs_to_sh_cmt(struct clocksource *cs) | 426 | static struct sh_cmt_priv *cs_to_sh_cmt(struct clocksource *cs) |
@@ -435,13 +435,13 @@ static cycle_t sh_cmt_clocksource_read(struct clocksource *cs) | |||
435 | unsigned long value; | 435 | unsigned long value; |
436 | int has_wrapped; | 436 | int has_wrapped; |
437 | 437 | ||
438 | spin_lock_irqsave(&p->lock, flags); | 438 | raw_spin_lock_irqsave(&p->lock, flags); |
439 | value = p->total_cycles; | 439 | value = p->total_cycles; |
440 | raw = sh_cmt_get_counter(p, &has_wrapped); | 440 | raw = sh_cmt_get_counter(p, &has_wrapped); |
441 | 441 | ||
442 | if (unlikely(has_wrapped)) | 442 | if (unlikely(has_wrapped)) |
443 | raw += p->match_value + 1; | 443 | raw += p->match_value + 1; |
444 | spin_unlock_irqrestore(&p->lock, flags); | 444 | raw_spin_unlock_irqrestore(&p->lock, flags); |
445 | 445 | ||
446 | return value + raw; | 446 | return value + raw; |
447 | } | 447 | } |
@@ -591,7 +591,7 @@ static int sh_cmt_register(struct sh_cmt_priv *p, char *name, | |||
591 | p->max_match_value = (1 << p->width) - 1; | 591 | p->max_match_value = (1 << p->width) - 1; |
592 | 592 | ||
593 | p->match_value = p->max_match_value; | 593 | p->match_value = p->max_match_value; |
594 | spin_lock_init(&p->lock); | 594 | raw_spin_lock_init(&p->lock); |
595 | 595 | ||
596 | if (clockevent_rating) | 596 | if (clockevent_rating) |
597 | sh_cmt_register_clockevent(p, name, clockevent_rating); | 597 | sh_cmt_register_clockevent(p, name, clockevent_rating); |
diff --git a/drivers/clocksource/sh_mtu2.c b/drivers/clocksource/sh_mtu2.c index a2172f690418..d9b76ca64a61 100644 --- a/drivers/clocksource/sh_mtu2.c +++ b/drivers/clocksource/sh_mtu2.c | |||
@@ -43,7 +43,7 @@ struct sh_mtu2_priv { | |||
43 | struct clock_event_device ced; | 43 | struct clock_event_device ced; |
44 | }; | 44 | }; |
45 | 45 | ||
46 | static DEFINE_SPINLOCK(sh_mtu2_lock); | 46 | static DEFINE_RAW_SPINLOCK(sh_mtu2_lock); |
47 | 47 | ||
48 | #define TSTR -1 /* shared register */ | 48 | #define TSTR -1 /* shared register */ |
49 | #define TCR 0 /* channel register */ | 49 | #define TCR 0 /* channel register */ |
@@ -107,7 +107,7 @@ static void sh_mtu2_start_stop_ch(struct sh_mtu2_priv *p, int start) | |||
107 | unsigned long flags, value; | 107 | unsigned long flags, value; |
108 | 108 | ||
109 | /* start stop register shared by multiple timer channels */ | 109 | /* start stop register shared by multiple timer channels */ |
110 | spin_lock_irqsave(&sh_mtu2_lock, flags); | 110 | raw_spin_lock_irqsave(&sh_mtu2_lock, flags); |
111 | value = sh_mtu2_read(p, TSTR); | 111 | value = sh_mtu2_read(p, TSTR); |
112 | 112 | ||
113 | if (start) | 113 | if (start) |
@@ -116,7 +116,7 @@ static void sh_mtu2_start_stop_ch(struct sh_mtu2_priv *p, int start) | |||
116 | value &= ~(1 << cfg->timer_bit); | 116 | value &= ~(1 << cfg->timer_bit); |
117 | 117 | ||
118 | sh_mtu2_write(p, TSTR, value); | 118 | sh_mtu2_write(p, TSTR, value); |
119 | spin_unlock_irqrestore(&sh_mtu2_lock, flags); | 119 | raw_spin_unlock_irqrestore(&sh_mtu2_lock, flags); |
120 | } | 120 | } |
121 | 121 | ||
122 | static int sh_mtu2_enable(struct sh_mtu2_priv *p) | 122 | static int sh_mtu2_enable(struct sh_mtu2_priv *p) |
diff --git a/drivers/clocksource/sh_tmu.c b/drivers/clocksource/sh_tmu.c index 97f54b634be4..c1b51d49d106 100644 --- a/drivers/clocksource/sh_tmu.c +++ b/drivers/clocksource/sh_tmu.c | |||
@@ -45,7 +45,7 @@ struct sh_tmu_priv { | |||
45 | struct clocksource cs; | 45 | struct clocksource cs; |
46 | }; | 46 | }; |
47 | 47 | ||
48 | static DEFINE_SPINLOCK(sh_tmu_lock); | 48 | static DEFINE_RAW_SPINLOCK(sh_tmu_lock); |
49 | 49 | ||
50 | #define TSTR -1 /* shared register */ | 50 | #define TSTR -1 /* shared register */ |
51 | #define TCOR 0 /* channel register */ | 51 | #define TCOR 0 /* channel register */ |
@@ -95,7 +95,7 @@ static void sh_tmu_start_stop_ch(struct sh_tmu_priv *p, int start) | |||
95 | unsigned long flags, value; | 95 | unsigned long flags, value; |
96 | 96 | ||
97 | /* start stop register shared by multiple timer channels */ | 97 | /* start stop register shared by multiple timer channels */ |
98 | spin_lock_irqsave(&sh_tmu_lock, flags); | 98 | raw_spin_lock_irqsave(&sh_tmu_lock, flags); |
99 | value = sh_tmu_read(p, TSTR); | 99 | value = sh_tmu_read(p, TSTR); |
100 | 100 | ||
101 | if (start) | 101 | if (start) |
@@ -104,7 +104,7 @@ static void sh_tmu_start_stop_ch(struct sh_tmu_priv *p, int start) | |||
104 | value &= ~(1 << cfg->timer_bit); | 104 | value &= ~(1 << cfg->timer_bit); |
105 | 105 | ||
106 | sh_tmu_write(p, TSTR, value); | 106 | sh_tmu_write(p, TSTR, value); |
107 | spin_unlock_irqrestore(&sh_tmu_lock, flags); | 107 | raw_spin_unlock_irqrestore(&sh_tmu_lock, flags); |
108 | } | 108 | } |
109 | 109 | ||
110 | static int sh_tmu_enable(struct sh_tmu_priv *p) | 110 | static int sh_tmu_enable(struct sh_tmu_priv *p) |
@@ -245,12 +245,7 @@ static void sh_tmu_clock_event_start(struct sh_tmu_priv *p, int periodic) | |||
245 | 245 | ||
246 | sh_tmu_enable(p); | 246 | sh_tmu_enable(p); |
247 | 247 | ||
248 | /* TODO: calculate good shift from rate and counter bit width */ | 248 | clockevents_config(ced, p->rate); |
249 | |||
250 | ced->shift = 32; | ||
251 | ced->mult = div_sc(p->rate, NSEC_PER_SEC, ced->shift); | ||
252 | ced->max_delta_ns = clockevent_delta2ns(0xffffffff, ced); | ||
253 | ced->min_delta_ns = 5000; | ||
254 | 249 | ||
255 | if (periodic) { | 250 | if (periodic) { |
256 | p->periodic = (p->rate + HZ/2) / HZ; | 251 | p->periodic = (p->rate + HZ/2) / HZ; |
@@ -323,7 +318,8 @@ static void sh_tmu_register_clockevent(struct sh_tmu_priv *p, | |||
323 | ced->set_mode = sh_tmu_clock_event_mode; | 318 | ced->set_mode = sh_tmu_clock_event_mode; |
324 | 319 | ||
325 | dev_info(&p->pdev->dev, "used for clock events\n"); | 320 | dev_info(&p->pdev->dev, "used for clock events\n"); |
326 | clockevents_register_device(ced); | 321 | |
322 | clockevents_config_and_register(ced, 1, 0x300, 0xffffffff); | ||
327 | 323 | ||
328 | ret = setup_irq(p->irqaction.irq, &p->irqaction); | 324 | ret = setup_irq(p->irqaction.irq, &p->irqaction); |
329 | if (ret) { | 325 | if (ret) { |
diff --git a/drivers/gpu/drm/radeon/evergreen_cs.c b/drivers/gpu/drm/radeon/evergreen_cs.c index 4e7dd2b4843d..c16554122ccd 100644 --- a/drivers/gpu/drm/radeon/evergreen_cs.c +++ b/drivers/gpu/drm/radeon/evergreen_cs.c | |||
@@ -52,6 +52,7 @@ struct evergreen_cs_track { | |||
52 | u32 cb_color_view[12]; | 52 | u32 cb_color_view[12]; |
53 | u32 cb_color_pitch[12]; | 53 | u32 cb_color_pitch[12]; |
54 | u32 cb_color_slice[12]; | 54 | u32 cb_color_slice[12]; |
55 | u32 cb_color_slice_idx[12]; | ||
55 | u32 cb_color_attrib[12]; | 56 | u32 cb_color_attrib[12]; |
56 | u32 cb_color_cmask_slice[8];/* unused */ | 57 | u32 cb_color_cmask_slice[8];/* unused */ |
57 | u32 cb_color_fmask_slice[8];/* unused */ | 58 | u32 cb_color_fmask_slice[8];/* unused */ |
@@ -127,12 +128,14 @@ static void evergreen_cs_track_init(struct evergreen_cs_track *track) | |||
127 | track->cb_color_info[i] = 0; | 128 | track->cb_color_info[i] = 0; |
128 | track->cb_color_view[i] = 0xFFFFFFFF; | 129 | track->cb_color_view[i] = 0xFFFFFFFF; |
129 | track->cb_color_pitch[i] = 0; | 130 | track->cb_color_pitch[i] = 0; |
130 | track->cb_color_slice[i] = 0; | 131 | track->cb_color_slice[i] = 0xfffffff; |
132 | track->cb_color_slice_idx[i] = 0; | ||
131 | } | 133 | } |
132 | track->cb_target_mask = 0xFFFFFFFF; | 134 | track->cb_target_mask = 0xFFFFFFFF; |
133 | track->cb_shader_mask = 0xFFFFFFFF; | 135 | track->cb_shader_mask = 0xFFFFFFFF; |
134 | track->cb_dirty = true; | 136 | track->cb_dirty = true; |
135 | 137 | ||
138 | track->db_depth_slice = 0xffffffff; | ||
136 | track->db_depth_view = 0xFFFFC000; | 139 | track->db_depth_view = 0xFFFFC000; |
137 | track->db_depth_size = 0xFFFFFFFF; | 140 | track->db_depth_size = 0xFFFFFFFF; |
138 | track->db_depth_control = 0xFFFFFFFF; | 141 | track->db_depth_control = 0xFFFFFFFF; |
@@ -250,10 +253,9 @@ static int evergreen_surface_check_2d(struct radeon_cs_parser *p, | |||
250 | { | 253 | { |
251 | struct evergreen_cs_track *track = p->track; | 254 | struct evergreen_cs_track *track = p->track; |
252 | unsigned palign, halign, tileb, slice_pt; | 255 | unsigned palign, halign, tileb, slice_pt; |
256 | unsigned mtile_pr, mtile_ps, mtileb; | ||
253 | 257 | ||
254 | tileb = 64 * surf->bpe * surf->nsamples; | 258 | tileb = 64 * surf->bpe * surf->nsamples; |
255 | palign = track->group_size / (8 * surf->bpe * surf->nsamples); | ||
256 | palign = MAX(8, palign); | ||
257 | slice_pt = 1; | 259 | slice_pt = 1; |
258 | if (tileb > surf->tsplit) { | 260 | if (tileb > surf->tsplit) { |
259 | slice_pt = tileb / surf->tsplit; | 261 | slice_pt = tileb / surf->tsplit; |
@@ -262,7 +264,10 @@ static int evergreen_surface_check_2d(struct radeon_cs_parser *p, | |||
262 | /* macro tile width & height */ | 264 | /* macro tile width & height */ |
263 | palign = (8 * surf->bankw * track->npipes) * surf->mtilea; | 265 | palign = (8 * surf->bankw * track->npipes) * surf->mtilea; |
264 | halign = (8 * surf->bankh * surf->nbanks) / surf->mtilea; | 266 | halign = (8 * surf->bankh * surf->nbanks) / surf->mtilea; |
265 | surf->layer_size = surf->nbx * surf->nby * surf->bpe * slice_pt; | 267 | mtileb = (palign / 8) * (halign / 8) * tileb;; |
268 | mtile_pr = surf->nbx / palign; | ||
269 | mtile_ps = (mtile_pr * surf->nby) / halign; | ||
270 | surf->layer_size = mtile_ps * mtileb * slice_pt; | ||
266 | surf->base_align = (palign / 8) * (halign / 8) * tileb; | 271 | surf->base_align = (palign / 8) * (halign / 8) * tileb; |
267 | surf->palign = palign; | 272 | surf->palign = palign; |
268 | surf->halign = halign; | 273 | surf->halign = halign; |
@@ -434,6 +439,39 @@ static int evergreen_cs_track_validate_cb(struct radeon_cs_parser *p, unsigned i | |||
434 | 439 | ||
435 | offset += surf.layer_size * mslice; | 440 | offset += surf.layer_size * mslice; |
436 | if (offset > radeon_bo_size(track->cb_color_bo[id])) { | 441 | if (offset > radeon_bo_size(track->cb_color_bo[id])) { |
442 | /* old ddx are broken they allocate bo with w*h*bpp but | ||
443 | * program slice with ALIGN(h, 8), catch this and patch | ||
444 | * command stream. | ||
445 | */ | ||
446 | if (!surf.mode) { | ||
447 | volatile u32 *ib = p->ib.ptr; | ||
448 | unsigned long tmp, nby, bsize, size, min = 0; | ||
449 | |||
450 | /* find the height the ddx wants */ | ||
451 | if (surf.nby > 8) { | ||
452 | min = surf.nby - 8; | ||
453 | } | ||
454 | bsize = radeon_bo_size(track->cb_color_bo[id]); | ||
455 | tmp = track->cb_color_bo_offset[id] << 8; | ||
456 | for (nby = surf.nby; nby > min; nby--) { | ||
457 | size = nby * surf.nbx * surf.bpe * surf.nsamples; | ||
458 | if ((tmp + size * mslice) <= bsize) { | ||
459 | break; | ||
460 | } | ||
461 | } | ||
462 | if (nby > min) { | ||
463 | surf.nby = nby; | ||
464 | slice = ((nby * surf.nbx) / 64) - 1; | ||
465 | if (!evergreen_surface_check(p, &surf, "cb")) { | ||
466 | /* check if this one works */ | ||
467 | tmp += surf.layer_size * mslice; | ||
468 | if (tmp <= bsize) { | ||
469 | ib[track->cb_color_slice_idx[id]] = slice; | ||
470 | goto old_ddx_ok; | ||
471 | } | ||
472 | } | ||
473 | } | ||
474 | } | ||
437 | dev_warn(p->dev, "%s:%d cb[%d] bo too small (layer size %d, " | 475 | dev_warn(p->dev, "%s:%d cb[%d] bo too small (layer size %d, " |
438 | "offset %d, max layer %d, bo size %ld, slice %d)\n", | 476 | "offset %d, max layer %d, bo size %ld, slice %d)\n", |
439 | __func__, __LINE__, id, surf.layer_size, | 477 | __func__, __LINE__, id, surf.layer_size, |
@@ -446,6 +484,7 @@ static int evergreen_cs_track_validate_cb(struct radeon_cs_parser *p, unsigned i | |||
446 | surf.tsplit, surf.mtilea); | 484 | surf.tsplit, surf.mtilea); |
447 | return -EINVAL; | 485 | return -EINVAL; |
448 | } | 486 | } |
487 | old_ddx_ok: | ||
449 | 488 | ||
450 | return 0; | 489 | return 0; |
451 | } | 490 | } |
@@ -1532,6 +1571,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx) | |||
1532 | case CB_COLOR7_SLICE: | 1571 | case CB_COLOR7_SLICE: |
1533 | tmp = (reg - CB_COLOR0_SLICE) / 0x3c; | 1572 | tmp = (reg - CB_COLOR0_SLICE) / 0x3c; |
1534 | track->cb_color_slice[tmp] = radeon_get_ib_value(p, idx); | 1573 | track->cb_color_slice[tmp] = radeon_get_ib_value(p, idx); |
1574 | track->cb_color_slice_idx[tmp] = idx; | ||
1535 | track->cb_dirty = true; | 1575 | track->cb_dirty = true; |
1536 | break; | 1576 | break; |
1537 | case CB_COLOR8_SLICE: | 1577 | case CB_COLOR8_SLICE: |
@@ -1540,6 +1580,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx) | |||
1540 | case CB_COLOR11_SLICE: | 1580 | case CB_COLOR11_SLICE: |
1541 | tmp = ((reg - CB_COLOR8_SLICE) / 0x1c) + 8; | 1581 | tmp = ((reg - CB_COLOR8_SLICE) / 0x1c) + 8; |
1542 | track->cb_color_slice[tmp] = radeon_get_ib_value(p, idx); | 1582 | track->cb_color_slice[tmp] = radeon_get_ib_value(p, idx); |
1583 | track->cb_color_slice_idx[tmp] = idx; | ||
1543 | track->cb_dirty = true; | 1584 | track->cb_dirty = true; |
1544 | break; | 1585 | break; |
1545 | case CB_COLOR0_ATTRIB: | 1586 | case CB_COLOR0_ATTRIB: |
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c index f0bb2b543b13..03e5f5df40f1 100644 --- a/drivers/gpu/drm/radeon/radeon_drv.c +++ b/drivers/gpu/drm/radeon/radeon_drv.c | |||
@@ -57,9 +57,10 @@ | |||
57 | * 2.13.0 - virtual memory support, streamout | 57 | * 2.13.0 - virtual memory support, streamout |
58 | * 2.14.0 - add evergreen tiling informations | 58 | * 2.14.0 - add evergreen tiling informations |
59 | * 2.15.0 - add max_pipes query | 59 | * 2.15.0 - add max_pipes query |
60 | * 2.16.0 - fix evergreen 2D tiled surface calculation | ||
60 | */ | 61 | */ |
61 | #define KMS_DRIVER_MAJOR 2 | 62 | #define KMS_DRIVER_MAJOR 2 |
62 | #define KMS_DRIVER_MINOR 15 | 63 | #define KMS_DRIVER_MINOR 16 |
63 | #define KMS_DRIVER_PATCHLEVEL 0 | 64 | #define KMS_DRIVER_PATCHLEVEL 0 |
64 | int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags); | 65 | int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags); |
65 | int radeon_driver_unload_kms(struct drm_device *dev); | 66 | int radeon_driver_unload_kms(struct drm_device *dev); |
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index b67cfcaa661f..36f4b28c1b90 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c | |||
@@ -1204,6 +1204,7 @@ int ttm_bo_init(struct ttm_bo_device *bdev, | |||
1204 | (*destroy)(bo); | 1204 | (*destroy)(bo); |
1205 | else | 1205 | else |
1206 | kfree(bo); | 1206 | kfree(bo); |
1207 | ttm_mem_global_free(mem_glob, acc_size); | ||
1207 | return -EINVAL; | 1208 | return -EINVAL; |
1208 | } | 1209 | } |
1209 | bo->destroy = destroy; | 1210 | bo->destroy = destroy; |
@@ -1307,22 +1308,14 @@ int ttm_bo_create(struct ttm_bo_device *bdev, | |||
1307 | struct ttm_buffer_object **p_bo) | 1308 | struct ttm_buffer_object **p_bo) |
1308 | { | 1309 | { |
1309 | struct ttm_buffer_object *bo; | 1310 | struct ttm_buffer_object *bo; |
1310 | struct ttm_mem_global *mem_glob = bdev->glob->mem_glob; | ||
1311 | size_t acc_size; | 1311 | size_t acc_size; |
1312 | int ret; | 1312 | int ret; |
1313 | 1313 | ||
1314 | acc_size = ttm_bo_acc_size(bdev, size, sizeof(struct ttm_buffer_object)); | ||
1315 | ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false); | ||
1316 | if (unlikely(ret != 0)) | ||
1317 | return ret; | ||
1318 | |||
1319 | bo = kzalloc(sizeof(*bo), GFP_KERNEL); | 1314 | bo = kzalloc(sizeof(*bo), GFP_KERNEL); |
1320 | 1315 | if (unlikely(bo == NULL)) | |
1321 | if (unlikely(bo == NULL)) { | ||
1322 | ttm_mem_global_free(mem_glob, acc_size); | ||
1323 | return -ENOMEM; | 1316 | return -ENOMEM; |
1324 | } | ||
1325 | 1317 | ||
1318 | acc_size = ttm_bo_acc_size(bdev, size, sizeof(struct ttm_buffer_object)); | ||
1326 | ret = ttm_bo_init(bdev, bo, size, type, placement, page_alignment, | 1319 | ret = ttm_bo_init(bdev, bo, size, type, placement, page_alignment, |
1327 | buffer_start, interruptible, | 1320 | buffer_start, interruptible, |
1328 | persistent_swap_storage, acc_size, NULL, NULL); | 1321 | persistent_swap_storage, acc_size, NULL, NULL); |
diff --git a/drivers/gpu/vga/vga_switcheroo.c b/drivers/gpu/vga/vga_switcheroo.c index 38f9534ac513..5b3c7d135dc9 100644 --- a/drivers/gpu/vga/vga_switcheroo.c +++ b/drivers/gpu/vga/vga_switcheroo.c | |||
@@ -190,6 +190,19 @@ find_active_client(struct list_head *head) | |||
190 | return NULL; | 190 | return NULL; |
191 | } | 191 | } |
192 | 192 | ||
193 | int vga_switcheroo_get_client_state(struct pci_dev *pdev) | ||
194 | { | ||
195 | struct vga_switcheroo_client *client; | ||
196 | |||
197 | client = find_client_from_pci(&vgasr_priv.clients, pdev); | ||
198 | if (!client) | ||
199 | return VGA_SWITCHEROO_NOT_FOUND; | ||
200 | if (!vgasr_priv.active) | ||
201 | return VGA_SWITCHEROO_INIT; | ||
202 | return client->pwr_state; | ||
203 | } | ||
204 | EXPORT_SYMBOL(vga_switcheroo_get_client_state); | ||
205 | |||
193 | void vga_switcheroo_unregister_client(struct pci_dev *pdev) | 206 | void vga_switcheroo_unregister_client(struct pci_dev *pdev) |
194 | { | 207 | { |
195 | struct vga_switcheroo_client *client; | 208 | struct vga_switcheroo_client *client; |
@@ -291,8 +304,6 @@ static int vga_switchto_stage1(struct vga_switcheroo_client *new_client) | |||
291 | vga_switchon(new_client); | 304 | vga_switchon(new_client); |
292 | 305 | ||
293 | vga_set_default_device(new_client->pdev); | 306 | vga_set_default_device(new_client->pdev); |
294 | set_audio_state(new_client->id, VGA_SWITCHEROO_ON); | ||
295 | |||
296 | return 0; | 307 | return 0; |
297 | } | 308 | } |
298 | 309 | ||
@@ -308,6 +319,8 @@ static int vga_switchto_stage2(struct vga_switcheroo_client *new_client) | |||
308 | 319 | ||
309 | active->active = false; | 320 | active->active = false; |
310 | 321 | ||
322 | set_audio_state(active->id, VGA_SWITCHEROO_OFF); | ||
323 | |||
311 | if (new_client->fb_info) { | 324 | if (new_client->fb_info) { |
312 | struct fb_event event; | 325 | struct fb_event event; |
313 | event.info = new_client->fb_info; | 326 | event.info = new_client->fb_info; |
@@ -321,11 +334,11 @@ static int vga_switchto_stage2(struct vga_switcheroo_client *new_client) | |||
321 | if (new_client->ops->reprobe) | 334 | if (new_client->ops->reprobe) |
322 | new_client->ops->reprobe(new_client->pdev); | 335 | new_client->ops->reprobe(new_client->pdev); |
323 | 336 | ||
324 | set_audio_state(active->id, VGA_SWITCHEROO_OFF); | ||
325 | |||
326 | if (active->pwr_state == VGA_SWITCHEROO_ON) | 337 | if (active->pwr_state == VGA_SWITCHEROO_ON) |
327 | vga_switchoff(active); | 338 | vga_switchoff(active); |
328 | 339 | ||
340 | set_audio_state(new_client->id, VGA_SWITCHEROO_ON); | ||
341 | |||
329 | new_client->active = true; | 342 | new_client->active = true; |
330 | return 0; | 343 | return 0; |
331 | } | 344 | } |
@@ -371,8 +384,9 @@ vga_switcheroo_debugfs_write(struct file *filp, const char __user *ubuf, | |||
371 | /* pwr off the device not in use */ | 384 | /* pwr off the device not in use */ |
372 | if (strncmp(usercmd, "OFF", 3) == 0) { | 385 | if (strncmp(usercmd, "OFF", 3) == 0) { |
373 | list_for_each_entry(client, &vgasr_priv.clients, list) { | 386 | list_for_each_entry(client, &vgasr_priv.clients, list) { |
374 | if (client->active) | 387 | if (client->active || client_is_audio(client)) |
375 | continue; | 388 | continue; |
389 | set_audio_state(client->id, VGA_SWITCHEROO_OFF); | ||
376 | if (client->pwr_state == VGA_SWITCHEROO_ON) | 390 | if (client->pwr_state == VGA_SWITCHEROO_ON) |
377 | vga_switchoff(client); | 391 | vga_switchoff(client); |
378 | } | 392 | } |
@@ -381,10 +395,11 @@ vga_switcheroo_debugfs_write(struct file *filp, const char __user *ubuf, | |||
381 | /* pwr on the device not in use */ | 395 | /* pwr on the device not in use */ |
382 | if (strncmp(usercmd, "ON", 2) == 0) { | 396 | if (strncmp(usercmd, "ON", 2) == 0) { |
383 | list_for_each_entry(client, &vgasr_priv.clients, list) { | 397 | list_for_each_entry(client, &vgasr_priv.clients, list) { |
384 | if (client->active) | 398 | if (client->active || client_is_audio(client)) |
385 | continue; | 399 | continue; |
386 | if (client->pwr_state == VGA_SWITCHEROO_OFF) | 400 | if (client->pwr_state == VGA_SWITCHEROO_OFF) |
387 | vga_switchon(client); | 401 | vga_switchon(client); |
402 | set_audio_state(client->id, VGA_SWITCHEROO_ON); | ||
388 | } | 403 | } |
389 | goto out; | 404 | goto out; |
390 | } | 405 | } |
diff --git a/drivers/ide/icside.c b/drivers/ide/icside.c index 8716066a2f2b..bcb507b0cfd4 100644 --- a/drivers/ide/icside.c +++ b/drivers/ide/icside.c | |||
@@ -236,7 +236,7 @@ static const struct ide_port_ops icside_v6_no_dma_port_ops = { | |||
236 | */ | 236 | */ |
237 | static void icside_set_dma_mode(ide_hwif_t *hwif, ide_drive_t *drive) | 237 | static void icside_set_dma_mode(ide_hwif_t *hwif, ide_drive_t *drive) |
238 | { | 238 | { |
239 | unsigned long cycle_time; | 239 | unsigned long cycle_time = 0; |
240 | int use_dma_info = 0; | 240 | int use_dma_info = 0; |
241 | const u8 xfer_mode = drive->dma_mode; | 241 | const u8 xfer_mode = drive->dma_mode; |
242 | 242 | ||
@@ -271,9 +271,9 @@ static void icside_set_dma_mode(ide_hwif_t *hwif, ide_drive_t *drive) | |||
271 | 271 | ||
272 | ide_set_drivedata(drive, (void *)cycle_time); | 272 | ide_set_drivedata(drive, (void *)cycle_time); |
273 | 273 | ||
274 | printk("%s: %s selected (peak %dMB/s)\n", drive->name, | 274 | printk(KERN_INFO "%s: %s selected (peak %luMB/s)\n", |
275 | ide_xfer_verbose(xfer_mode), | 275 | drive->name, ide_xfer_verbose(xfer_mode), |
276 | 2000 / (unsigned long)ide_get_drivedata(drive)); | 276 | 2000 / (cycle_time ? cycle_time : (unsigned long) -1)); |
277 | } | 277 | } |
278 | 278 | ||
279 | static const struct ide_port_ops icside_v6_port_ops = { | 279 | static const struct ide_port_ops icside_v6_port_ops = { |
@@ -375,8 +375,6 @@ static const struct ide_dma_ops icside_v6_dma_ops = { | |||
375 | .dma_test_irq = icside_dma_test_irq, | 375 | .dma_test_irq = icside_dma_test_irq, |
376 | .dma_lost_irq = ide_dma_lost_irq, | 376 | .dma_lost_irq = ide_dma_lost_irq, |
377 | }; | 377 | }; |
378 | #else | ||
379 | #define icside_v6_dma_ops NULL | ||
380 | #endif | 378 | #endif |
381 | 379 | ||
382 | static int icside_dma_off_init(ide_hwif_t *hwif, const struct ide_port_info *d) | 380 | static int icside_dma_off_init(ide_hwif_t *hwif, const struct ide_port_info *d) |
@@ -456,7 +454,6 @@ err_free: | |||
456 | static const struct ide_port_info icside_v6_port_info __initdata = { | 454 | static const struct ide_port_info icside_v6_port_info __initdata = { |
457 | .init_dma = icside_dma_off_init, | 455 | .init_dma = icside_dma_off_init, |
458 | .port_ops = &icside_v6_no_dma_port_ops, | 456 | .port_ops = &icside_v6_no_dma_port_ops, |
459 | .dma_ops = &icside_v6_dma_ops, | ||
460 | .host_flags = IDE_HFLAG_SERIALIZE | IDE_HFLAG_MMIO, | 457 | .host_flags = IDE_HFLAG_SERIALIZE | IDE_HFLAG_MMIO, |
461 | .mwdma_mask = ATA_MWDMA2, | 458 | .mwdma_mask = ATA_MWDMA2, |
462 | .swdma_mask = ATA_SWDMA2, | 459 | .swdma_mask = ATA_SWDMA2, |
@@ -518,11 +515,13 @@ icside_register_v6(struct icside_state *state, struct expansion_card *ec) | |||
518 | 515 | ||
519 | ecard_set_drvdata(ec, state); | 516 | ecard_set_drvdata(ec, state); |
520 | 517 | ||
518 | #ifdef CONFIG_BLK_DEV_IDEDMA_ICS | ||
521 | if (ec->dma != NO_DMA && !request_dma(ec->dma, DRV_NAME)) { | 519 | if (ec->dma != NO_DMA && !request_dma(ec->dma, DRV_NAME)) { |
522 | d.init_dma = icside_dma_init; | 520 | d.init_dma = icside_dma_init; |
523 | d.port_ops = &icside_v6_port_ops; | 521 | d.port_ops = &icside_v6_port_ops; |
524 | } else | 522 | d.dma_ops = &icside_v6_dma_ops; |
525 | d.dma_ops = NULL; | 523 | } |
524 | #endif | ||
526 | 525 | ||
527 | ret = ide_host_register(host, &d, hws); | 526 | ret = ide_host_register(host, &d, hws); |
528 | if (ret) | 527 | if (ret) |
diff --git a/drivers/ide/ide-cs.c b/drivers/ide/ide-cs.c index 28e344ea514c..f1e922e2479a 100644 --- a/drivers/ide/ide-cs.c +++ b/drivers/ide/ide-cs.c | |||
@@ -167,7 +167,8 @@ static int pcmcia_check_one_config(struct pcmcia_device *pdev, void *priv_data) | |||
167 | { | 167 | { |
168 | int *is_kme = priv_data; | 168 | int *is_kme = priv_data; |
169 | 169 | ||
170 | if (!(pdev->resource[0]->flags & IO_DATA_PATH_WIDTH_8)) { | 170 | if ((pdev->resource[0]->flags & IO_DATA_PATH_WIDTH) |
171 | != IO_DATA_PATH_WIDTH_8) { | ||
171 | pdev->resource[0]->flags &= ~IO_DATA_PATH_WIDTH; | 172 | pdev->resource[0]->flags &= ~IO_DATA_PATH_WIDTH; |
172 | pdev->resource[0]->flags |= IO_DATA_PATH_WIDTH_AUTO; | 173 | pdev->resource[0]->flags |= IO_DATA_PATH_WIDTH_AUTO; |
173 | } | 174 | } |
diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig index 04cb8c88d74b..12b2b55c519e 100644 --- a/drivers/leds/Kconfig +++ b/drivers/leds/Kconfig | |||
@@ -379,7 +379,7 @@ config LEDS_NETXBIG | |||
379 | 379 | ||
380 | config LEDS_ASIC3 | 380 | config LEDS_ASIC3 |
381 | bool "LED support for the HTC ASIC3" | 381 | bool "LED support for the HTC ASIC3" |
382 | depends on LEDS_CLASS | 382 | depends on LEDS_CLASS=y |
383 | depends on MFD_ASIC3 | 383 | depends on MFD_ASIC3 |
384 | default y | 384 | default y |
385 | help | 385 | help |
@@ -390,7 +390,7 @@ config LEDS_ASIC3 | |||
390 | 390 | ||
391 | config LEDS_RENESAS_TPU | 391 | config LEDS_RENESAS_TPU |
392 | bool "LED support for Renesas TPU" | 392 | bool "LED support for Renesas TPU" |
393 | depends on LEDS_CLASS && HAVE_CLK && GENERIC_GPIO | 393 | depends on LEDS_CLASS=y && HAVE_CLK && GENERIC_GPIO |
394 | help | 394 | help |
395 | This option enables build of the LED TPU platform driver, | 395 | This option enables build of the LED TPU platform driver, |
396 | suitable to drive any TPU channel on newer Renesas SoCs. | 396 | suitable to drive any TPU channel on newer Renesas SoCs. |
diff --git a/drivers/leds/led-class.c b/drivers/leds/led-class.c index 8ee92c81aec2..e663e6f413e9 100644 --- a/drivers/leds/led-class.c +++ b/drivers/leds/led-class.c | |||
@@ -29,7 +29,7 @@ static void led_update_brightness(struct led_classdev *led_cdev) | |||
29 | led_cdev->brightness = led_cdev->brightness_get(led_cdev); | 29 | led_cdev->brightness = led_cdev->brightness_get(led_cdev); |
30 | } | 30 | } |
31 | 31 | ||
32 | static ssize_t led_brightness_show(struct device *dev, | 32 | static ssize_t led_brightness_show(struct device *dev, |
33 | struct device_attribute *attr, char *buf) | 33 | struct device_attribute *attr, char *buf) |
34 | { | 34 | { |
35 | struct led_classdev *led_cdev = dev_get_drvdata(dev); | 35 | struct led_classdev *led_cdev = dev_get_drvdata(dev); |
diff --git a/drivers/leds/led-core.c b/drivers/leds/led-core.c index d6860043f6f9..d65353d8d3fc 100644 --- a/drivers/leds/led-core.c +++ b/drivers/leds/led-core.c | |||
@@ -44,13 +44,6 @@ static void led_set_software_blink(struct led_classdev *led_cdev, | |||
44 | if (!led_cdev->blink_brightness) | 44 | if (!led_cdev->blink_brightness) |
45 | led_cdev->blink_brightness = led_cdev->max_brightness; | 45 | led_cdev->blink_brightness = led_cdev->max_brightness; |
46 | 46 | ||
47 | if (led_get_trigger_data(led_cdev) && | ||
48 | delay_on == led_cdev->blink_delay_on && | ||
49 | delay_off == led_cdev->blink_delay_off) | ||
50 | return; | ||
51 | |||
52 | led_stop_software_blink(led_cdev); | ||
53 | |||
54 | led_cdev->blink_delay_on = delay_on; | 47 | led_cdev->blink_delay_on = delay_on; |
55 | led_cdev->blink_delay_off = delay_off; | 48 | led_cdev->blink_delay_off = delay_off; |
56 | 49 | ||
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 2ee8cf9e8a3b..b9c2ae62166d 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c | |||
@@ -76,6 +76,7 @@ | |||
76 | #include <net/route.h> | 76 | #include <net/route.h> |
77 | #include <net/net_namespace.h> | 77 | #include <net/net_namespace.h> |
78 | #include <net/netns/generic.h> | 78 | #include <net/netns/generic.h> |
79 | #include <net/pkt_sched.h> | ||
79 | #include "bonding.h" | 80 | #include "bonding.h" |
80 | #include "bond_3ad.h" | 81 | #include "bond_3ad.h" |
81 | #include "bond_alb.h" | 82 | #include "bond_alb.h" |
@@ -381,8 +382,6 @@ struct vlan_entry *bond_next_vlan(struct bonding *bond, struct vlan_entry *curr) | |||
381 | return next; | 382 | return next; |
382 | } | 383 | } |
383 | 384 | ||
384 | #define bond_queue_mapping(skb) (*(u16 *)((skb)->cb)) | ||
385 | |||
386 | /** | 385 | /** |
387 | * bond_dev_queue_xmit - Prepare skb for xmit. | 386 | * bond_dev_queue_xmit - Prepare skb for xmit. |
388 | * | 387 | * |
@@ -395,7 +394,9 @@ int bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb, | |||
395 | { | 394 | { |
396 | skb->dev = slave_dev; | 395 | skb->dev = slave_dev; |
397 | 396 | ||
398 | skb->queue_mapping = bond_queue_mapping(skb); | 397 | BUILD_BUG_ON(sizeof(skb->queue_mapping) != |
398 | sizeof(qdisc_skb_cb(skb)->bond_queue_mapping)); | ||
399 | skb->queue_mapping = qdisc_skb_cb(skb)->bond_queue_mapping; | ||
399 | 400 | ||
400 | if (unlikely(netpoll_tx_running(slave_dev))) | 401 | if (unlikely(netpoll_tx_running(slave_dev))) |
401 | bond_netpoll_send_skb(bond_get_slave_by_dev(bond, slave_dev), skb); | 402 | bond_netpoll_send_skb(bond_get_slave_by_dev(bond, slave_dev), skb); |
@@ -4171,7 +4172,7 @@ static u16 bond_select_queue(struct net_device *dev, struct sk_buff *skb) | |||
4171 | /* | 4172 | /* |
4172 | * Save the original txq to restore before passing to the driver | 4173 | * Save the original txq to restore before passing to the driver |
4173 | */ | 4174 | */ |
4174 | bond_queue_mapping(skb) = skb->queue_mapping; | 4175 | qdisc_skb_cb(skb)->bond_queue_mapping = skb->queue_mapping; |
4175 | 4176 | ||
4176 | if (unlikely(txq >= dev->real_num_tx_queues)) { | 4177 | if (unlikely(txq >= dev->real_num_tx_queues)) { |
4177 | do { | 4178 | do { |
diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c index aef42f045320..485bedb8278c 100644 --- a/drivers/net/bonding/bond_sysfs.c +++ b/drivers/net/bonding/bond_sysfs.c | |||
@@ -1082,8 +1082,12 @@ static ssize_t bonding_store_primary(struct device *d, | |||
1082 | } | 1082 | } |
1083 | } | 1083 | } |
1084 | 1084 | ||
1085 | pr_info("%s: Unable to set %.*s as primary slave.\n", | 1085 | strncpy(bond->params.primary, ifname, IFNAMSIZ); |
1086 | bond->dev->name, (int)strlen(buf) - 1, buf); | 1086 | bond->params.primary[IFNAMSIZ - 1] = 0; |
1087 | |||
1088 | pr_info("%s: Recording %s as primary, " | ||
1089 | "but it has not been enslaved to %s yet.\n", | ||
1090 | bond->dev->name, ifname, bond->dev->name); | ||
1087 | out: | 1091 | out: |
1088 | write_unlock_bh(&bond->curr_slave_lock); | 1092 | write_unlock_bh(&bond->curr_slave_lock); |
1089 | read_unlock(&bond->lock); | 1093 | read_unlock(&bond->lock); |
diff --git a/drivers/net/can/c_can/c_can.c b/drivers/net/can/c_can/c_can.c index 536bda072a16..8dc84d66eea1 100644 --- a/drivers/net/can/c_can/c_can.c +++ b/drivers/net/can/c_can/c_can.c | |||
@@ -686,7 +686,7 @@ static int c_can_get_berr_counter(const struct net_device *dev, | |||
686 | * | 686 | * |
687 | * We iterate from priv->tx_echo to priv->tx_next and check if the | 687 | * We iterate from priv->tx_echo to priv->tx_next and check if the |
688 | * packet has been transmitted, echo it back to the CAN framework. | 688 | * packet has been transmitted, echo it back to the CAN framework. |
689 | * If we discover a not yet transmitted package, stop looking for more. | 689 | * If we discover a not yet transmitted packet, stop looking for more. |
690 | */ | 690 | */ |
691 | static void c_can_do_tx(struct net_device *dev) | 691 | static void c_can_do_tx(struct net_device *dev) |
692 | { | 692 | { |
@@ -698,7 +698,7 @@ static void c_can_do_tx(struct net_device *dev) | |||
698 | for (/* nix */; (priv->tx_next - priv->tx_echo) > 0; priv->tx_echo++) { | 698 | for (/* nix */; (priv->tx_next - priv->tx_echo) > 0; priv->tx_echo++) { |
699 | msg_obj_no = get_tx_echo_msg_obj(priv); | 699 | msg_obj_no = get_tx_echo_msg_obj(priv); |
700 | val = c_can_read_reg32(priv, &priv->regs->txrqst1); | 700 | val = c_can_read_reg32(priv, &priv->regs->txrqst1); |
701 | if (!(val & (1 << msg_obj_no))) { | 701 | if (!(val & (1 << (msg_obj_no - 1)))) { |
702 | can_get_echo_skb(dev, | 702 | can_get_echo_skb(dev, |
703 | msg_obj_no - C_CAN_MSG_OBJ_TX_FIRST); | 703 | msg_obj_no - C_CAN_MSG_OBJ_TX_FIRST); |
704 | stats->tx_bytes += priv->read_reg(priv, | 704 | stats->tx_bytes += priv->read_reg(priv, |
@@ -706,6 +706,8 @@ static void c_can_do_tx(struct net_device *dev) | |||
706 | & IF_MCONT_DLC_MASK; | 706 | & IF_MCONT_DLC_MASK; |
707 | stats->tx_packets++; | 707 | stats->tx_packets++; |
708 | c_can_inval_msg_object(dev, 0, msg_obj_no); | 708 | c_can_inval_msg_object(dev, 0, msg_obj_no); |
709 | } else { | ||
710 | break; | ||
709 | } | 711 | } |
710 | } | 712 | } |
711 | 713 | ||
@@ -950,7 +952,7 @@ static int c_can_poll(struct napi_struct *napi, int quota) | |||
950 | struct net_device *dev = napi->dev; | 952 | struct net_device *dev = napi->dev; |
951 | struct c_can_priv *priv = netdev_priv(dev); | 953 | struct c_can_priv *priv = netdev_priv(dev); |
952 | 954 | ||
953 | irqstatus = priv->read_reg(priv, &priv->regs->interrupt); | 955 | irqstatus = priv->irqstatus; |
954 | if (!irqstatus) | 956 | if (!irqstatus) |
955 | goto end; | 957 | goto end; |
956 | 958 | ||
@@ -1028,12 +1030,11 @@ end: | |||
1028 | 1030 | ||
1029 | static irqreturn_t c_can_isr(int irq, void *dev_id) | 1031 | static irqreturn_t c_can_isr(int irq, void *dev_id) |
1030 | { | 1032 | { |
1031 | u16 irqstatus; | ||
1032 | struct net_device *dev = (struct net_device *)dev_id; | 1033 | struct net_device *dev = (struct net_device *)dev_id; |
1033 | struct c_can_priv *priv = netdev_priv(dev); | 1034 | struct c_can_priv *priv = netdev_priv(dev); |
1034 | 1035 | ||
1035 | irqstatus = priv->read_reg(priv, &priv->regs->interrupt); | 1036 | priv->irqstatus = priv->read_reg(priv, &priv->regs->interrupt); |
1036 | if (!irqstatus) | 1037 | if (!priv->irqstatus) |
1037 | return IRQ_NONE; | 1038 | return IRQ_NONE; |
1038 | 1039 | ||
1039 | /* disable all interrupts and schedule the NAPI */ | 1040 | /* disable all interrupts and schedule the NAPI */ |
@@ -1063,10 +1064,11 @@ static int c_can_open(struct net_device *dev) | |||
1063 | goto exit_irq_fail; | 1064 | goto exit_irq_fail; |
1064 | } | 1065 | } |
1065 | 1066 | ||
1067 | napi_enable(&priv->napi); | ||
1068 | |||
1066 | /* start the c_can controller */ | 1069 | /* start the c_can controller */ |
1067 | c_can_start(dev); | 1070 | c_can_start(dev); |
1068 | 1071 | ||
1069 | napi_enable(&priv->napi); | ||
1070 | netif_start_queue(dev); | 1072 | netif_start_queue(dev); |
1071 | 1073 | ||
1072 | return 0; | 1074 | return 0; |
diff --git a/drivers/net/can/c_can/c_can.h b/drivers/net/can/c_can/c_can.h index 9b7fbef3d09a..5f32d34af507 100644 --- a/drivers/net/can/c_can/c_can.h +++ b/drivers/net/can/c_can/c_can.h | |||
@@ -76,6 +76,7 @@ struct c_can_priv { | |||
76 | unsigned int tx_next; | 76 | unsigned int tx_next; |
77 | unsigned int tx_echo; | 77 | unsigned int tx_echo; |
78 | void *priv; /* for board-specific data */ | 78 | void *priv; /* for board-specific data */ |
79 | u16 irqstatus; | ||
79 | }; | 80 | }; |
80 | 81 | ||
81 | struct net_device *alloc_c_can_dev(void); | 82 | struct net_device *alloc_c_can_dev(void); |
diff --git a/drivers/net/can/cc770/cc770_platform.c b/drivers/net/can/cc770/cc770_platform.c index 53115eee8075..688371cda37a 100644 --- a/drivers/net/can/cc770/cc770_platform.c +++ b/drivers/net/can/cc770/cc770_platform.c | |||
@@ -154,7 +154,7 @@ static int __devinit cc770_get_platform_data(struct platform_device *pdev, | |||
154 | struct cc770_platform_data *pdata = pdev->dev.platform_data; | 154 | struct cc770_platform_data *pdata = pdev->dev.platform_data; |
155 | 155 | ||
156 | priv->can.clock.freq = pdata->osc_freq; | 156 | priv->can.clock.freq = pdata->osc_freq; |
157 | if (priv->cpu_interface | CPUIF_DSC) | 157 | if (priv->cpu_interface & CPUIF_DSC) |
158 | priv->can.clock.freq /= 2; | 158 | priv->can.clock.freq /= 2; |
159 | priv->clkout = pdata->cor; | 159 | priv->clkout = pdata->cor; |
160 | priv->bus_config = pdata->bcr; | 160 | priv->bus_config = pdata->bcr; |
diff --git a/drivers/net/dummy.c b/drivers/net/dummy.c index 442d91a2747b..bab0158f1cc3 100644 --- a/drivers/net/dummy.c +++ b/drivers/net/dummy.c | |||
@@ -187,8 +187,10 @@ static int __init dummy_init_module(void) | |||
187 | rtnl_lock(); | 187 | rtnl_lock(); |
188 | err = __rtnl_link_register(&dummy_link_ops); | 188 | err = __rtnl_link_register(&dummy_link_ops); |
189 | 189 | ||
190 | for (i = 0; i < numdummies && !err; i++) | 190 | for (i = 0; i < numdummies && !err; i++) { |
191 | err = dummy_init_one(); | 191 | err = dummy_init_one(); |
192 | cond_resched(); | ||
193 | } | ||
192 | if (err < 0) | 194 | if (err < 0) |
193 | __rtnl_link_unregister(&dummy_link_ops); | 195 | __rtnl_link_unregister(&dummy_link_ops); |
194 | rtnl_unlock(); | 196 | rtnl_unlock(); |
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h index e30e2a2f354c..7de824184979 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h | |||
@@ -747,21 +747,6 @@ struct bnx2x_fastpath { | |||
747 | 747 | ||
748 | #define ETH_RX_ERROR_FALGS ETH_FAST_PATH_RX_CQE_PHY_DECODE_ERR_FLG | 748 | #define ETH_RX_ERROR_FALGS ETH_FAST_PATH_RX_CQE_PHY_DECODE_ERR_FLG |
749 | 749 | ||
750 | #define BNX2X_IP_CSUM_ERR(cqe) \ | ||
751 | (!((cqe)->fast_path_cqe.status_flags & \ | ||
752 | ETH_FAST_PATH_RX_CQE_IP_XSUM_NO_VALIDATION_FLG) && \ | ||
753 | ((cqe)->fast_path_cqe.type_error_flags & \ | ||
754 | ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG)) | ||
755 | |||
756 | #define BNX2X_L4_CSUM_ERR(cqe) \ | ||
757 | (!((cqe)->fast_path_cqe.status_flags & \ | ||
758 | ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG) && \ | ||
759 | ((cqe)->fast_path_cqe.type_error_flags & \ | ||
760 | ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG)) | ||
761 | |||
762 | #define BNX2X_RX_CSUM_OK(cqe) \ | ||
763 | (!(BNX2X_L4_CSUM_ERR(cqe) || BNX2X_IP_CSUM_ERR(cqe))) | ||
764 | |||
765 | #define BNX2X_PRS_FLAG_OVERETH_IPV4(flags) \ | 750 | #define BNX2X_PRS_FLAG_OVERETH_IPV4(flags) \ |
766 | (((le16_to_cpu(flags) & \ | 751 | (((le16_to_cpu(flags) & \ |
767 | PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) >> \ | 752 | PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) >> \ |
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c index ad0743bf4bde..cbc56f274e0c 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c | |||
@@ -617,6 +617,25 @@ static int bnx2x_alloc_rx_data(struct bnx2x *bp, | |||
617 | return 0; | 617 | return 0; |
618 | } | 618 | } |
619 | 619 | ||
620 | static void bnx2x_csum_validate(struct sk_buff *skb, union eth_rx_cqe *cqe, | ||
621 | struct bnx2x_fastpath *fp) | ||
622 | { | ||
623 | /* Do nothing if no IP/L4 csum validation was done */ | ||
624 | |||
625 | if (cqe->fast_path_cqe.status_flags & | ||
626 | (ETH_FAST_PATH_RX_CQE_IP_XSUM_NO_VALIDATION_FLG | | ||
627 | ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG)) | ||
628 | return; | ||
629 | |||
630 | /* If both IP/L4 validation were done, check if an error was found. */ | ||
631 | |||
632 | if (cqe->fast_path_cqe.type_error_flags & | ||
633 | (ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG | | ||
634 | ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG)) | ||
635 | fp->eth_q_stats.hw_csum_err++; | ||
636 | else | ||
637 | skb->ip_summed = CHECKSUM_UNNECESSARY; | ||
638 | } | ||
620 | 639 | ||
621 | int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget) | 640 | int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget) |
622 | { | 641 | { |
@@ -806,13 +825,9 @@ reuse_rx: | |||
806 | 825 | ||
807 | skb_checksum_none_assert(skb); | 826 | skb_checksum_none_assert(skb); |
808 | 827 | ||
809 | if (bp->dev->features & NETIF_F_RXCSUM) { | 828 | if (bp->dev->features & NETIF_F_RXCSUM) |
829 | bnx2x_csum_validate(skb, cqe, fp); | ||
810 | 830 | ||
811 | if (likely(BNX2X_RX_CSUM_OK(cqe))) | ||
812 | skb->ip_summed = CHECKSUM_UNNECESSARY; | ||
813 | else | ||
814 | fp->eth_q_stats.hw_csum_err++; | ||
815 | } | ||
816 | 831 | ||
817 | skb_record_rx_queue(skb, fp->rx_queue); | 832 | skb_record_rx_queue(skb, fp->rx_queue); |
818 | 833 | ||
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index edeeb516807a..e47ff8be1d7b 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c | |||
@@ -14275,7 +14275,8 @@ static int __devinit tg3_get_invariants(struct tg3 *tp) | |||
14275 | } | 14275 | } |
14276 | } | 14276 | } |
14277 | 14277 | ||
14278 | if (tg3_flag(tp, 5755_PLUS)) | 14278 | if (tg3_flag(tp, 5755_PLUS) || |
14279 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) | ||
14279 | tg3_flag_set(tp, SHORT_DMA_BUG); | 14280 | tg3_flag_set(tp, SHORT_DMA_BUG); |
14280 | 14281 | ||
14281 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) | 14282 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) |
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index 08efd308d78a..fdb50cec6b51 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c | |||
@@ -736,6 +736,8 @@ static netdev_tx_t be_xmit(struct sk_buff *skb, | |||
736 | 736 | ||
737 | copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb); | 737 | copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb); |
738 | if (copied) { | 738 | if (copied) { |
739 | int gso_segs = skb_shinfo(skb)->gso_segs; | ||
740 | |||
739 | /* record the sent skb in the sent_skb table */ | 741 | /* record the sent skb in the sent_skb table */ |
740 | BUG_ON(txo->sent_skb_list[start]); | 742 | BUG_ON(txo->sent_skb_list[start]); |
741 | txo->sent_skb_list[start] = skb; | 743 | txo->sent_skb_list[start] = skb; |
@@ -753,8 +755,7 @@ static netdev_tx_t be_xmit(struct sk_buff *skb, | |||
753 | 755 | ||
754 | be_txq_notify(adapter, txq->id, wrb_cnt); | 756 | be_txq_notify(adapter, txq->id, wrb_cnt); |
755 | 757 | ||
756 | be_tx_stats_update(txo, wrb_cnt, copied, | 758 | be_tx_stats_update(txo, wrb_cnt, copied, gso_segs, stopped); |
757 | skb_shinfo(skb)->gso_segs, stopped); | ||
758 | } else { | 759 | } else { |
759 | txq->head = start; | 760 | txq->head = start; |
760 | dev_kfree_skb_any(skb); | 761 | dev_kfree_skb_any(skb); |
diff --git a/drivers/net/ethernet/intel/e1000e/ethtool.c b/drivers/net/ethernet/intel/e1000e/ethtool.c index d863075df7a4..905e2147d918 100644 --- a/drivers/net/ethernet/intel/e1000e/ethtool.c +++ b/drivers/net/ethernet/intel/e1000e/ethtool.c | |||
@@ -258,7 +258,8 @@ static int e1000_set_settings(struct net_device *netdev, | |||
258 | * When SoL/IDER sessions are active, autoneg/speed/duplex | 258 | * When SoL/IDER sessions are active, autoneg/speed/duplex |
259 | * cannot be changed | 259 | * cannot be changed |
260 | */ | 260 | */ |
261 | if (hw->phy.ops.check_reset_block(hw)) { | 261 | if (hw->phy.ops.check_reset_block && |
262 | hw->phy.ops.check_reset_block(hw)) { | ||
262 | e_err("Cannot change link characteristics when SoL/IDER is active.\n"); | 263 | e_err("Cannot change link characteristics when SoL/IDER is active.\n"); |
263 | return -EINVAL; | 264 | return -EINVAL; |
264 | } | 265 | } |
@@ -1615,7 +1616,8 @@ static int e1000_loopback_test(struct e1000_adapter *adapter, u64 *data) | |||
1615 | * PHY loopback cannot be performed if SoL/IDER | 1616 | * PHY loopback cannot be performed if SoL/IDER |
1616 | * sessions are active | 1617 | * sessions are active |
1617 | */ | 1618 | */ |
1618 | if (hw->phy.ops.check_reset_block(hw)) { | 1619 | if (hw->phy.ops.check_reset_block && |
1620 | hw->phy.ops.check_reset_block(hw)) { | ||
1619 | e_err("Cannot do PHY loopback test when SoL/IDER is active.\n"); | 1621 | e_err("Cannot do PHY loopback test when SoL/IDER is active.\n"); |
1620 | *data = 0; | 1622 | *data = 0; |
1621 | goto out; | 1623 | goto out; |
diff --git a/drivers/net/ethernet/intel/e1000e/mac.c b/drivers/net/ethernet/intel/e1000e/mac.c index 026e8b3ab52e..a13439928488 100644 --- a/drivers/net/ethernet/intel/e1000e/mac.c +++ b/drivers/net/ethernet/intel/e1000e/mac.c | |||
@@ -709,7 +709,7 @@ s32 e1000e_setup_link_generic(struct e1000_hw *hw) | |||
709 | * In the case of the phy reset being blocked, we already have a link. | 709 | * In the case of the phy reset being blocked, we already have a link. |
710 | * We do not need to set it up again. | 710 | * We do not need to set it up again. |
711 | */ | 711 | */ |
712 | if (hw->phy.ops.check_reset_block(hw)) | 712 | if (hw->phy.ops.check_reset_block && hw->phy.ops.check_reset_block(hw)) |
713 | return 0; | 713 | return 0; |
714 | 714 | ||
715 | /* | 715 | /* |
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index a4b0435b00dc..31d37a2b5ba8 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c | |||
@@ -6237,7 +6237,7 @@ static int __devinit e1000_probe(struct pci_dev *pdev, | |||
6237 | adapter->hw.phy.ms_type = e1000_ms_hw_default; | 6237 | adapter->hw.phy.ms_type = e1000_ms_hw_default; |
6238 | } | 6238 | } |
6239 | 6239 | ||
6240 | if (hw->phy.ops.check_reset_block(hw)) | 6240 | if (hw->phy.ops.check_reset_block && hw->phy.ops.check_reset_block(hw)) |
6241 | e_info("PHY reset is blocked due to SOL/IDER session.\n"); | 6241 | e_info("PHY reset is blocked due to SOL/IDER session.\n"); |
6242 | 6242 | ||
6243 | /* Set initial default active device features */ | 6243 | /* Set initial default active device features */ |
@@ -6404,7 +6404,7 @@ err_register: | |||
6404 | if (!(adapter->flags & FLAG_HAS_AMT)) | 6404 | if (!(adapter->flags & FLAG_HAS_AMT)) |
6405 | e1000e_release_hw_control(adapter); | 6405 | e1000e_release_hw_control(adapter); |
6406 | err_eeprom: | 6406 | err_eeprom: |
6407 | if (!hw->phy.ops.check_reset_block(hw)) | 6407 | if (hw->phy.ops.check_reset_block && !hw->phy.ops.check_reset_block(hw)) |
6408 | e1000_phy_hw_reset(&adapter->hw); | 6408 | e1000_phy_hw_reset(&adapter->hw); |
6409 | err_hw_init: | 6409 | err_hw_init: |
6410 | kfree(adapter->tx_ring); | 6410 | kfree(adapter->tx_ring); |
diff --git a/drivers/net/ethernet/intel/e1000e/phy.c b/drivers/net/ethernet/intel/e1000e/phy.c index 0334d013bc3c..b860d4f7ea2a 100644 --- a/drivers/net/ethernet/intel/e1000e/phy.c +++ b/drivers/net/ethernet/intel/e1000e/phy.c | |||
@@ -2155,9 +2155,11 @@ s32 e1000e_phy_hw_reset_generic(struct e1000_hw *hw) | |||
2155 | s32 ret_val; | 2155 | s32 ret_val; |
2156 | u32 ctrl; | 2156 | u32 ctrl; |
2157 | 2157 | ||
2158 | ret_val = phy->ops.check_reset_block(hw); | 2158 | if (phy->ops.check_reset_block) { |
2159 | if (ret_val) | 2159 | ret_val = phy->ops.check_reset_block(hw); |
2160 | return 0; | 2160 | if (ret_val) |
2161 | return 0; | ||
2162 | } | ||
2161 | 2163 | ||
2162 | ret_val = phy->ops.acquire(hw); | 2164 | ret_val = phy->ops.acquire(hw); |
2163 | if (ret_val) | 2165 | if (ret_val) |
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index bf20457ea23a..17ad6a3c1be1 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | |||
@@ -1390,6 +1390,8 @@ static void ixgbe_process_skb_fields(struct ixgbe_ring *rx_ring, | |||
1390 | union ixgbe_adv_rx_desc *rx_desc, | 1390 | union ixgbe_adv_rx_desc *rx_desc, |
1391 | struct sk_buff *skb) | 1391 | struct sk_buff *skb) |
1392 | { | 1392 | { |
1393 | struct net_device *dev = rx_ring->netdev; | ||
1394 | |||
1393 | ixgbe_update_rsc_stats(rx_ring, skb); | 1395 | ixgbe_update_rsc_stats(rx_ring, skb); |
1394 | 1396 | ||
1395 | ixgbe_rx_hash(rx_ring, rx_desc, skb); | 1397 | ixgbe_rx_hash(rx_ring, rx_desc, skb); |
@@ -1401,14 +1403,15 @@ static void ixgbe_process_skb_fields(struct ixgbe_ring *rx_ring, | |||
1401 | ixgbe_ptp_rx_hwtstamp(rx_ring->q_vector, skb); | 1403 | ixgbe_ptp_rx_hwtstamp(rx_ring->q_vector, skb); |
1402 | #endif | 1404 | #endif |
1403 | 1405 | ||
1404 | if (ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_VP)) { | 1406 | if ((dev->features & NETIF_F_HW_VLAN_RX) && |
1407 | ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_VP)) { | ||
1405 | u16 vid = le16_to_cpu(rx_desc->wb.upper.vlan); | 1408 | u16 vid = le16_to_cpu(rx_desc->wb.upper.vlan); |
1406 | __vlan_hwaccel_put_tag(skb, vid); | 1409 | __vlan_hwaccel_put_tag(skb, vid); |
1407 | } | 1410 | } |
1408 | 1411 | ||
1409 | skb_record_rx_queue(skb, rx_ring->queue_index); | 1412 | skb_record_rx_queue(skb, rx_ring->queue_index); |
1410 | 1413 | ||
1411 | skb->protocol = eth_type_trans(skb, rx_ring->netdev); | 1414 | skb->protocol = eth_type_trans(skb, dev); |
1412 | } | 1415 | } |
1413 | 1416 | ||
1414 | static void ixgbe_rx_skb(struct ixgbe_q_vector *q_vector, | 1417 | static void ixgbe_rx_skb(struct ixgbe_q_vector *q_vector, |
@@ -3607,10 +3610,6 @@ static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter) | |||
3607 | if (hw->mac.type == ixgbe_mac_82598EB) | 3610 | if (hw->mac.type == ixgbe_mac_82598EB) |
3608 | netif_set_gso_max_size(adapter->netdev, 32768); | 3611 | netif_set_gso_max_size(adapter->netdev, 32768); |
3609 | 3612 | ||
3610 | |||
3611 | /* Enable VLAN tag insert/strip */ | ||
3612 | adapter->netdev->features |= NETIF_F_HW_VLAN_RX; | ||
3613 | |||
3614 | hw->mac.ops.set_vfta(&adapter->hw, 0, 0, true); | 3613 | hw->mac.ops.set_vfta(&adapter->hw, 0, 0, true); |
3615 | 3614 | ||
3616 | #ifdef IXGBE_FCOE | 3615 | #ifdef IXGBE_FCOE |
@@ -6701,11 +6700,6 @@ static netdev_features_t ixgbe_fix_features(struct net_device *netdev, | |||
6701 | { | 6700 | { |
6702 | struct ixgbe_adapter *adapter = netdev_priv(netdev); | 6701 | struct ixgbe_adapter *adapter = netdev_priv(netdev); |
6703 | 6702 | ||
6704 | #ifdef CONFIG_DCB | ||
6705 | if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) | ||
6706 | features &= ~NETIF_F_HW_VLAN_RX; | ||
6707 | #endif | ||
6708 | |||
6709 | /* return error if RXHASH is being enabled when RSS is not supported */ | 6703 | /* return error if RXHASH is being enabled when RSS is not supported */ |
6710 | if (!(adapter->flags & IXGBE_FLAG_RSS_ENABLED)) | 6704 | if (!(adapter->flags & IXGBE_FLAG_RSS_ENABLED)) |
6711 | features &= ~NETIF_F_RXHASH; | 6705 | features &= ~NETIF_F_RXHASH; |
@@ -6718,7 +6712,6 @@ static netdev_features_t ixgbe_fix_features(struct net_device *netdev, | |||
6718 | if (!(adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE)) | 6712 | if (!(adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE)) |
6719 | features &= ~NETIF_F_LRO; | 6713 | features &= ~NETIF_F_LRO; |
6720 | 6714 | ||
6721 | |||
6722 | return features; | 6715 | return features; |
6723 | } | 6716 | } |
6724 | 6717 | ||
@@ -6766,6 +6759,11 @@ static int ixgbe_set_features(struct net_device *netdev, | |||
6766 | need_reset = true; | 6759 | need_reset = true; |
6767 | } | 6760 | } |
6768 | 6761 | ||
6762 | if (features & NETIF_F_HW_VLAN_RX) | ||
6763 | ixgbe_vlan_strip_enable(adapter); | ||
6764 | else | ||
6765 | ixgbe_vlan_strip_disable(adapter); | ||
6766 | |||
6769 | if (changed & NETIF_F_RXALL) | 6767 | if (changed & NETIF_F_RXALL) |
6770 | need_reset = true; | 6768 | need_reset = true; |
6771 | 6769 | ||
diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c index 04d901d0ff63..f0f06b2bc28b 100644 --- a/drivers/net/ethernet/marvell/mv643xx_eth.c +++ b/drivers/net/ethernet/marvell/mv643xx_eth.c | |||
@@ -436,7 +436,9 @@ struct mv643xx_eth_private { | |||
436 | /* | 436 | /* |
437 | * Hardware-specific parameters. | 437 | * Hardware-specific parameters. |
438 | */ | 438 | */ |
439 | #if defined(CONFIG_HAVE_CLK) | ||
439 | struct clk *clk; | 440 | struct clk *clk; |
441 | #endif | ||
440 | unsigned int t_clk; | 442 | unsigned int t_clk; |
441 | }; | 443 | }; |
442 | 444 | ||
@@ -2895,17 +2897,17 @@ static int mv643xx_eth_probe(struct platform_device *pdev) | |||
2895 | mp->dev = dev; | 2897 | mp->dev = dev; |
2896 | 2898 | ||
2897 | /* | 2899 | /* |
2898 | * Get the clk rate, if there is one, otherwise use the default. | 2900 | * Start with a default rate, and if there is a clock, allow |
2901 | * it to override the default. | ||
2899 | */ | 2902 | */ |
2903 | mp->t_clk = 133000000; | ||
2904 | #if defined(CONFIG_HAVE_CLK) | ||
2900 | mp->clk = clk_get(&pdev->dev, (pdev->id ? "1" : "0")); | 2905 | mp->clk = clk_get(&pdev->dev, (pdev->id ? "1" : "0")); |
2901 | if (!IS_ERR(mp->clk)) { | 2906 | if (!IS_ERR(mp->clk)) { |
2902 | clk_prepare_enable(mp->clk); | 2907 | clk_prepare_enable(mp->clk); |
2903 | mp->t_clk = clk_get_rate(mp->clk); | 2908 | mp->t_clk = clk_get_rate(mp->clk); |
2904 | } else { | ||
2905 | mp->t_clk = 133000000; | ||
2906 | printk(KERN_WARNING "Unable to get clock"); | ||
2907 | } | 2909 | } |
2908 | 2910 | #endif | |
2909 | set_params(mp, pd); | 2911 | set_params(mp, pd); |
2910 | netif_set_real_num_tx_queues(dev, mp->txq_count); | 2912 | netif_set_real_num_tx_queues(dev, mp->txq_count); |
2911 | netif_set_real_num_rx_queues(dev, mp->rxq_count); | 2913 | netif_set_real_num_rx_queues(dev, mp->rxq_count); |
@@ -2995,10 +2997,13 @@ static int mv643xx_eth_remove(struct platform_device *pdev) | |||
2995 | phy_detach(mp->phy); | 2997 | phy_detach(mp->phy); |
2996 | cancel_work_sync(&mp->tx_timeout_task); | 2998 | cancel_work_sync(&mp->tx_timeout_task); |
2997 | 2999 | ||
3000 | #if defined(CONFIG_HAVE_CLK) | ||
2998 | if (!IS_ERR(mp->clk)) { | 3001 | if (!IS_ERR(mp->clk)) { |
2999 | clk_disable_unprepare(mp->clk); | 3002 | clk_disable_unprepare(mp->clk); |
3000 | clk_put(mp->clk); | 3003 | clk_put(mp->clk); |
3001 | } | 3004 | } |
3005 | #endif | ||
3006 | |||
3002 | free_netdev(mp->dev); | 3007 | free_netdev(mp->dev); |
3003 | 3008 | ||
3004 | platform_set_drvdata(pdev, NULL); | 3009 | platform_set_drvdata(pdev, NULL); |
diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c index cace36f2ab92..28a54451a3e5 100644 --- a/drivers/net/ethernet/marvell/sky2.c +++ b/drivers/net/ethernet/marvell/sky2.c | |||
@@ -4381,10 +4381,12 @@ static int sky2_set_features(struct net_device *dev, netdev_features_t features) | |||
4381 | struct sky2_port *sky2 = netdev_priv(dev); | 4381 | struct sky2_port *sky2 = netdev_priv(dev); |
4382 | netdev_features_t changed = dev->features ^ features; | 4382 | netdev_features_t changed = dev->features ^ features; |
4383 | 4383 | ||
4384 | if (changed & NETIF_F_RXCSUM) { | 4384 | if ((changed & NETIF_F_RXCSUM) && |
4385 | bool on = features & NETIF_F_RXCSUM; | 4385 | !(sky2->hw->flags & SKY2_HW_NEW_LE)) { |
4386 | sky2_write32(sky2->hw, Q_ADDR(rxqaddr[sky2->port], Q_CSR), | 4386 | sky2_write32(sky2->hw, |
4387 | on ? BMU_ENA_RX_CHKSUM : BMU_DIS_RX_CHKSUM); | 4387 | Q_ADDR(rxqaddr[sky2->port], Q_CSR), |
4388 | (features & NETIF_F_RXCSUM) | ||
4389 | ? BMU_ENA_RX_CHKSUM : BMU_DIS_RX_CHKSUM); | ||
4388 | } | 4390 | } |
4389 | 4391 | ||
4390 | if (changed & NETIF_F_RXHASH) | 4392 | if (changed & NETIF_F_RXHASH) |
diff --git a/drivers/net/ethernet/nxp/lpc_eth.c b/drivers/net/ethernet/nxp/lpc_eth.c index 8d2666fcffd7..083d6715335c 100644 --- a/drivers/net/ethernet/nxp/lpc_eth.c +++ b/drivers/net/ethernet/nxp/lpc_eth.c | |||
@@ -946,16 +946,16 @@ static void __lpc_handle_xmit(struct net_device *ndev) | |||
946 | /* Update stats */ | 946 | /* Update stats */ |
947 | ndev->stats.tx_packets++; | 947 | ndev->stats.tx_packets++; |
948 | ndev->stats.tx_bytes += skb->len; | 948 | ndev->stats.tx_bytes += skb->len; |
949 | |||
950 | /* Free buffer */ | ||
951 | dev_kfree_skb_irq(skb); | ||
952 | } | 949 | } |
950 | dev_kfree_skb_irq(skb); | ||
953 | 951 | ||
954 | txcidx = readl(LPC_ENET_TXCONSUMEINDEX(pldat->net_base)); | 952 | txcidx = readl(LPC_ENET_TXCONSUMEINDEX(pldat->net_base)); |
955 | } | 953 | } |
956 | 954 | ||
957 | if (netif_queue_stopped(ndev)) | 955 | if (pldat->num_used_tx_buffs <= ENET_TX_DESC/2) { |
958 | netif_wake_queue(ndev); | 956 | if (netif_queue_stopped(ndev)) |
957 | netif_wake_queue(ndev); | ||
958 | } | ||
959 | } | 959 | } |
960 | 960 | ||
961 | static int __lpc_handle_recv(struct net_device *ndev, int budget) | 961 | static int __lpc_handle_recv(struct net_device *ndev, int budget) |
@@ -1320,6 +1320,7 @@ static const struct net_device_ops lpc_netdev_ops = { | |||
1320 | .ndo_set_rx_mode = lpc_eth_set_multicast_list, | 1320 | .ndo_set_rx_mode = lpc_eth_set_multicast_list, |
1321 | .ndo_do_ioctl = lpc_eth_ioctl, | 1321 | .ndo_do_ioctl = lpc_eth_ioctl, |
1322 | .ndo_set_mac_address = lpc_set_mac_address, | 1322 | .ndo_set_mac_address = lpc_set_mac_address, |
1323 | .ndo_change_mtu = eth_change_mtu, | ||
1323 | }; | 1324 | }; |
1324 | 1325 | ||
1325 | static int lpc_eth_drv_probe(struct platform_device *pdev) | 1326 | static int lpc_eth_drv_probe(struct platform_device *pdev) |
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c index 9757ce3543a0..7260aa79466a 100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c | |||
@@ -5889,11 +5889,7 @@ static void rtl_slow_event_work(struct rtl8169_private *tp) | |||
5889 | if (status & LinkChg) | 5889 | if (status & LinkChg) |
5890 | __rtl8169_check_link_status(dev, tp, tp->mmio_addr, true); | 5890 | __rtl8169_check_link_status(dev, tp, tp->mmio_addr, true); |
5891 | 5891 | ||
5892 | napi_disable(&tp->napi); | 5892 | rtl_irq_enable_all(tp); |
5893 | rtl_irq_disable(tp); | ||
5894 | |||
5895 | napi_enable(&tp->napi); | ||
5896 | napi_schedule(&tp->napi); | ||
5897 | } | 5893 | } |
5898 | 5894 | ||
5899 | static void rtl_task(struct work_struct *work) | 5895 | static void rtl_task(struct work_struct *work) |
diff --git a/drivers/net/ethernet/stmicro/stmmac/Kconfig b/drivers/net/ethernet/stmicro/stmmac/Kconfig index 036428348faa..9f448279e12a 100644 --- a/drivers/net/ethernet/stmicro/stmmac/Kconfig +++ b/drivers/net/ethernet/stmicro/stmmac/Kconfig | |||
@@ -13,7 +13,7 @@ config STMMAC_ETH | |||
13 | if STMMAC_ETH | 13 | if STMMAC_ETH |
14 | 14 | ||
15 | config STMMAC_PLATFORM | 15 | config STMMAC_PLATFORM |
16 | tristate "STMMAC platform bus support" | 16 | bool "STMMAC Platform bus support" |
17 | depends on STMMAC_ETH | 17 | depends on STMMAC_ETH |
18 | default y | 18 | default y |
19 | ---help--- | 19 | ---help--- |
@@ -26,7 +26,7 @@ config STMMAC_PLATFORM | |||
26 | If unsure, say N. | 26 | If unsure, say N. |
27 | 27 | ||
28 | config STMMAC_PCI | 28 | config STMMAC_PCI |
29 | tristate "STMMAC support on PCI bus (EXPERIMENTAL)" | 29 | bool "STMMAC PCI bus support (EXPERIMENTAL)" |
30 | depends on STMMAC_ETH && PCI && EXPERIMENTAL | 30 | depends on STMMAC_ETH && PCI && EXPERIMENTAL |
31 | ---help--- | 31 | ---help--- |
32 | This is to select the Synopsys DWMAC available on PCI devices, | 32 | This is to select the Synopsys DWMAC available on PCI devices, |
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h index 6b5d060ee9de..dc20c56efc9d 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h | |||
@@ -26,6 +26,7 @@ | |||
26 | #include <linux/clk.h> | 26 | #include <linux/clk.h> |
27 | #include <linux/stmmac.h> | 27 | #include <linux/stmmac.h> |
28 | #include <linux/phy.h> | 28 | #include <linux/phy.h> |
29 | #include <linux/pci.h> | ||
29 | #include "common.h" | 30 | #include "common.h" |
30 | #ifdef CONFIG_STMMAC_TIMER | 31 | #ifdef CONFIG_STMMAC_TIMER |
31 | #include "stmmac_timer.h" | 32 | #include "stmmac_timer.h" |
@@ -95,7 +96,6 @@ extern int stmmac_mdio_register(struct net_device *ndev); | |||
95 | extern void stmmac_set_ethtool_ops(struct net_device *netdev); | 96 | extern void stmmac_set_ethtool_ops(struct net_device *netdev); |
96 | extern const struct stmmac_desc_ops enh_desc_ops; | 97 | extern const struct stmmac_desc_ops enh_desc_ops; |
97 | extern const struct stmmac_desc_ops ndesc_ops; | 98 | extern const struct stmmac_desc_ops ndesc_ops; |
98 | |||
99 | int stmmac_freeze(struct net_device *ndev); | 99 | int stmmac_freeze(struct net_device *ndev); |
100 | int stmmac_restore(struct net_device *ndev); | 100 | int stmmac_restore(struct net_device *ndev); |
101 | int stmmac_resume(struct net_device *ndev); | 101 | int stmmac_resume(struct net_device *ndev); |
@@ -109,7 +109,7 @@ struct stmmac_priv *stmmac_dvr_probe(struct device *device, | |||
109 | static inline int stmmac_clk_enable(struct stmmac_priv *priv) | 109 | static inline int stmmac_clk_enable(struct stmmac_priv *priv) |
110 | { | 110 | { |
111 | if (!IS_ERR(priv->stmmac_clk)) | 111 | if (!IS_ERR(priv->stmmac_clk)) |
112 | return clk_enable(priv->stmmac_clk); | 112 | return clk_prepare_enable(priv->stmmac_clk); |
113 | 113 | ||
114 | return 0; | 114 | return 0; |
115 | } | 115 | } |
@@ -119,7 +119,7 @@ static inline void stmmac_clk_disable(struct stmmac_priv *priv) | |||
119 | if (IS_ERR(priv->stmmac_clk)) | 119 | if (IS_ERR(priv->stmmac_clk)) |
120 | return; | 120 | return; |
121 | 121 | ||
122 | clk_disable(priv->stmmac_clk); | 122 | clk_disable_unprepare(priv->stmmac_clk); |
123 | } | 123 | } |
124 | static inline int stmmac_clk_get(struct stmmac_priv *priv) | 124 | static inline int stmmac_clk_get(struct stmmac_priv *priv) |
125 | { | 125 | { |
@@ -143,3 +143,60 @@ static inline int stmmac_clk_get(struct stmmac_priv *priv) | |||
143 | return 0; | 143 | return 0; |
144 | } | 144 | } |
145 | #endif /* CONFIG_HAVE_CLK */ | 145 | #endif /* CONFIG_HAVE_CLK */ |
146 | |||
147 | |||
148 | #ifdef CONFIG_STMMAC_PLATFORM | ||
149 | extern struct platform_driver stmmac_pltfr_driver; | ||
150 | static inline int stmmac_register_platform(void) | ||
151 | { | ||
152 | int err; | ||
153 | |||
154 | err = platform_driver_register(&stmmac_pltfr_driver); | ||
155 | if (err) | ||
156 | pr_err("stmmac: failed to register the platform driver\n"); | ||
157 | |||
158 | return err; | ||
159 | } | ||
160 | static inline void stmmac_unregister_platform(void) | ||
161 | { | ||
162 | platform_driver_register(&stmmac_pltfr_driver); | ||
163 | } | ||
164 | #else | ||
165 | static inline int stmmac_register_platform(void) | ||
166 | { | ||
167 | pr_debug("stmmac: do not register the platf driver\n"); | ||
168 | |||
169 | return -EINVAL; | ||
170 | } | ||
171 | static inline void stmmac_unregister_platform(void) | ||
172 | { | ||
173 | } | ||
174 | #endif /* CONFIG_STMMAC_PLATFORM */ | ||
175 | |||
176 | #ifdef CONFIG_STMMAC_PCI | ||
177 | extern struct pci_driver stmmac_pci_driver; | ||
178 | static inline int stmmac_register_pci(void) | ||
179 | { | ||
180 | int err; | ||
181 | |||
182 | err = pci_register_driver(&stmmac_pci_driver); | ||
183 | if (err) | ||
184 | pr_err("stmmac: failed to register the PCI driver\n"); | ||
185 | |||
186 | return err; | ||
187 | } | ||
188 | static inline void stmmac_unregister_pci(void) | ||
189 | { | ||
190 | pci_unregister_driver(&stmmac_pci_driver); | ||
191 | } | ||
192 | #else | ||
193 | static inline int stmmac_register_pci(void) | ||
194 | { | ||
195 | pr_debug("stmmac: do not register the PCI driver\n"); | ||
196 | |||
197 | return -EINVAL; | ||
198 | } | ||
199 | static inline void stmmac_unregister_pci(void) | ||
200 | { | ||
201 | } | ||
202 | #endif /* CONFIG_STMMAC_PCI */ | ||
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 70966330f44e..51b3b68528ee 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c | |||
@@ -833,8 +833,9 @@ static u32 stmmac_get_synopsys_id(struct stmmac_priv *priv) | |||
833 | 833 | ||
834 | /** | 834 | /** |
835 | * stmmac_selec_desc_mode | 835 | * stmmac_selec_desc_mode |
836 | * @dev : device pointer | 836 | * @priv : private structure |
837 | * Description: select the Enhanced/Alternate or Normal descriptors */ | 837 | * Description: select the Enhanced/Alternate or Normal descriptors |
838 | */ | ||
838 | static void stmmac_selec_desc_mode(struct stmmac_priv *priv) | 839 | static void stmmac_selec_desc_mode(struct stmmac_priv *priv) |
839 | { | 840 | { |
840 | if (priv->plat->enh_desc) { | 841 | if (priv->plat->enh_desc) { |
@@ -1861,6 +1862,8 @@ static int stmmac_hw_init(struct stmmac_priv *priv) | |||
1861 | /** | 1862 | /** |
1862 | * stmmac_dvr_probe | 1863 | * stmmac_dvr_probe |
1863 | * @device: device pointer | 1864 | * @device: device pointer |
1865 | * @plat_dat: platform data pointer | ||
1866 | * @addr: iobase memory address | ||
1864 | * Description: this is the main probe function used to | 1867 | * Description: this is the main probe function used to |
1865 | * call the alloc_etherdev, allocate the priv structure. | 1868 | * call the alloc_etherdev, allocate the priv structure. |
1866 | */ | 1869 | */ |
@@ -2090,6 +2093,34 @@ int stmmac_restore(struct net_device *ndev) | |||
2090 | } | 2093 | } |
2091 | #endif /* CONFIG_PM */ | 2094 | #endif /* CONFIG_PM */ |
2092 | 2095 | ||
2096 | /* Driver can be configured w/ and w/ both PCI and Platf drivers | ||
2097 | * depending on the configuration selected. | ||
2098 | */ | ||
2099 | static int __init stmmac_init(void) | ||
2100 | { | ||
2101 | int err_plt = 0; | ||
2102 | int err_pci = 0; | ||
2103 | |||
2104 | err_plt = stmmac_register_platform(); | ||
2105 | err_pci = stmmac_register_pci(); | ||
2106 | |||
2107 | if ((err_pci) && (err_plt)) { | ||
2108 | pr_err("stmmac: driver registration failed\n"); | ||
2109 | return -EINVAL; | ||
2110 | } | ||
2111 | |||
2112 | return 0; | ||
2113 | } | ||
2114 | |||
2115 | static void __exit stmmac_exit(void) | ||
2116 | { | ||
2117 | stmmac_unregister_platform(); | ||
2118 | stmmac_unregister_pci(); | ||
2119 | } | ||
2120 | |||
2121 | module_init(stmmac_init); | ||
2122 | module_exit(stmmac_exit); | ||
2123 | |||
2093 | #ifndef MODULE | 2124 | #ifndef MODULE |
2094 | static int __init stmmac_cmdline_opt(char *str) | 2125 | static int __init stmmac_cmdline_opt(char *str) |
2095 | { | 2126 | { |
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c index 58fab5303e9c..cf826e6b6aa1 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c | |||
@@ -179,7 +179,7 @@ static DEFINE_PCI_DEVICE_TABLE(stmmac_id_table) = { | |||
179 | 179 | ||
180 | MODULE_DEVICE_TABLE(pci, stmmac_id_table); | 180 | MODULE_DEVICE_TABLE(pci, stmmac_id_table); |
181 | 181 | ||
182 | static struct pci_driver stmmac_driver = { | 182 | struct pci_driver stmmac_pci_driver = { |
183 | .name = STMMAC_RESOURCE_NAME, | 183 | .name = STMMAC_RESOURCE_NAME, |
184 | .id_table = stmmac_id_table, | 184 | .id_table = stmmac_id_table, |
185 | .probe = stmmac_pci_probe, | 185 | .probe = stmmac_pci_probe, |
@@ -190,33 +190,6 @@ static struct pci_driver stmmac_driver = { | |||
190 | #endif | 190 | #endif |
191 | }; | 191 | }; |
192 | 192 | ||
193 | /** | ||
194 | * stmmac_init_module - Entry point for the driver | ||
195 | * Description: This function is the entry point for the driver. | ||
196 | */ | ||
197 | static int __init stmmac_init_module(void) | ||
198 | { | ||
199 | int ret; | ||
200 | |||
201 | ret = pci_register_driver(&stmmac_driver); | ||
202 | if (ret < 0) | ||
203 | pr_err("%s: ERROR: driver registration failed\n", __func__); | ||
204 | |||
205 | return ret; | ||
206 | } | ||
207 | |||
208 | /** | ||
209 | * stmmac_cleanup_module - Cleanup routine for the driver | ||
210 | * Description: This function is the cleanup routine for the driver. | ||
211 | */ | ||
212 | static void __exit stmmac_cleanup_module(void) | ||
213 | { | ||
214 | pci_unregister_driver(&stmmac_driver); | ||
215 | } | ||
216 | |||
217 | module_init(stmmac_init_module); | ||
218 | module_exit(stmmac_cleanup_module); | ||
219 | |||
220 | MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet PCI driver"); | 193 | MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet PCI driver"); |
221 | MODULE_AUTHOR("Rayagond Kokatanur <rayagond.kokatanur@vayavyalabs.com>"); | 194 | MODULE_AUTHOR("Rayagond Kokatanur <rayagond.kokatanur@vayavyalabs.com>"); |
222 | MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>"); | 195 | MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>"); |
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c index 3dd8f0803808..680d2b8dfe27 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c | |||
@@ -255,7 +255,7 @@ static const struct of_device_id stmmac_dt_ids[] = { | |||
255 | }; | 255 | }; |
256 | MODULE_DEVICE_TABLE(of, stmmac_dt_ids); | 256 | MODULE_DEVICE_TABLE(of, stmmac_dt_ids); |
257 | 257 | ||
258 | static struct platform_driver stmmac_driver = { | 258 | struct platform_driver stmmac_pltfr_driver = { |
259 | .probe = stmmac_pltfr_probe, | 259 | .probe = stmmac_pltfr_probe, |
260 | .remove = stmmac_pltfr_remove, | 260 | .remove = stmmac_pltfr_remove, |
261 | .driver = { | 261 | .driver = { |
@@ -266,8 +266,6 @@ static struct platform_driver stmmac_driver = { | |||
266 | }, | 266 | }, |
267 | }; | 267 | }; |
268 | 268 | ||
269 | module_platform_driver(stmmac_driver); | ||
270 | |||
271 | MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet PLATFORM driver"); | 269 | MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet PLATFORM driver"); |
272 | MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>"); | 270 | MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>"); |
273 | MODULE_LICENSE("GPL"); | 271 | MODULE_LICENSE("GPL"); |
diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c index 703c8cce2a2c..8c726b7004d3 100644 --- a/drivers/net/ethernet/sun/niu.c +++ b/drivers/net/ethernet/sun/niu.c | |||
@@ -3598,7 +3598,6 @@ static int release_tx_packet(struct niu *np, struct tx_ring_info *rp, int idx) | |||
3598 | static void niu_tx_work(struct niu *np, struct tx_ring_info *rp) | 3598 | static void niu_tx_work(struct niu *np, struct tx_ring_info *rp) |
3599 | { | 3599 | { |
3600 | struct netdev_queue *txq; | 3600 | struct netdev_queue *txq; |
3601 | unsigned int tx_bytes; | ||
3602 | u16 pkt_cnt, tmp; | 3601 | u16 pkt_cnt, tmp; |
3603 | int cons, index; | 3602 | int cons, index; |
3604 | u64 cs; | 3603 | u64 cs; |
@@ -3621,18 +3620,12 @@ static void niu_tx_work(struct niu *np, struct tx_ring_info *rp) | |||
3621 | netif_printk(np, tx_done, KERN_DEBUG, np->dev, | 3620 | netif_printk(np, tx_done, KERN_DEBUG, np->dev, |
3622 | "%s() pkt_cnt[%u] cons[%d]\n", __func__, pkt_cnt, cons); | 3621 | "%s() pkt_cnt[%u] cons[%d]\n", __func__, pkt_cnt, cons); |
3623 | 3622 | ||
3624 | tx_bytes = 0; | 3623 | while (pkt_cnt--) |
3625 | tmp = pkt_cnt; | ||
3626 | while (tmp--) { | ||
3627 | tx_bytes += rp->tx_buffs[cons].skb->len; | ||
3628 | cons = release_tx_packet(np, rp, cons); | 3624 | cons = release_tx_packet(np, rp, cons); |
3629 | } | ||
3630 | 3625 | ||
3631 | rp->cons = cons; | 3626 | rp->cons = cons; |
3632 | smp_mb(); | 3627 | smp_mb(); |
3633 | 3628 | ||
3634 | netdev_tx_completed_queue(txq, pkt_cnt, tx_bytes); | ||
3635 | |||
3636 | out: | 3629 | out: |
3637 | if (unlikely(netif_tx_queue_stopped(txq) && | 3630 | if (unlikely(netif_tx_queue_stopped(txq) && |
3638 | (niu_tx_avail(rp) > NIU_TX_WAKEUP_THRESH(rp)))) { | 3631 | (niu_tx_avail(rp) > NIU_TX_WAKEUP_THRESH(rp)))) { |
@@ -4333,7 +4326,6 @@ static void niu_free_channels(struct niu *np) | |||
4333 | struct tx_ring_info *rp = &np->tx_rings[i]; | 4326 | struct tx_ring_info *rp = &np->tx_rings[i]; |
4334 | 4327 | ||
4335 | niu_free_tx_ring_info(np, rp); | 4328 | niu_free_tx_ring_info(np, rp); |
4336 | netdev_tx_reset_queue(netdev_get_tx_queue(np->dev, i)); | ||
4337 | } | 4329 | } |
4338 | kfree(np->tx_rings); | 4330 | kfree(np->tx_rings); |
4339 | np->tx_rings = NULL; | 4331 | np->tx_rings = NULL; |
@@ -6739,8 +6731,6 @@ static netdev_tx_t niu_start_xmit(struct sk_buff *skb, | |||
6739 | prod = NEXT_TX(rp, prod); | 6731 | prod = NEXT_TX(rp, prod); |
6740 | } | 6732 | } |
6741 | 6733 | ||
6742 | netdev_tx_sent_queue(txq, skb->len); | ||
6743 | |||
6744 | if (prod < rp->prod) | 6734 | if (prod < rp->prod) |
6745 | rp->wrap_bit ^= TX_RING_KICK_WRAP; | 6735 | rp->wrap_bit ^= TX_RING_KICK_WRAP; |
6746 | rp->prod = prod; | 6736 | rp->prod = prod; |
diff --git a/drivers/net/ethernet/tile/Kconfig b/drivers/net/ethernet/tile/Kconfig index 2d9218f86bca..098b1c42b393 100644 --- a/drivers/net/ethernet/tile/Kconfig +++ b/drivers/net/ethernet/tile/Kconfig | |||
@@ -7,6 +7,8 @@ config TILE_NET | |||
7 | depends on TILE | 7 | depends on TILE |
8 | default y | 8 | default y |
9 | select CRC32 | 9 | select CRC32 |
10 | select TILE_GXIO_MPIPE if TILEGX | ||
11 | select HIGH_RES_TIMERS if TILEGX | ||
10 | ---help--- | 12 | ---help--- |
11 | This is a standard Linux network device driver for the | 13 | This is a standard Linux network device driver for the |
12 | on-chip Tilera Gigabit Ethernet and XAUI interfaces. | 14 | on-chip Tilera Gigabit Ethernet and XAUI interfaces. |
diff --git a/drivers/net/ethernet/tile/Makefile b/drivers/net/ethernet/tile/Makefile index f634f142cab4..0ef9eefd3211 100644 --- a/drivers/net/ethernet/tile/Makefile +++ b/drivers/net/ethernet/tile/Makefile | |||
@@ -4,7 +4,7 @@ | |||
4 | 4 | ||
5 | obj-$(CONFIG_TILE_NET) += tile_net.o | 5 | obj-$(CONFIG_TILE_NET) += tile_net.o |
6 | ifdef CONFIG_TILEGX | 6 | ifdef CONFIG_TILEGX |
7 | tile_net-objs := tilegx.o mpipe.o iorpc_mpipe.o dma_queue.o | 7 | tile_net-y := tilegx.o |
8 | else | 8 | else |
9 | tile_net-objs := tilepro.o | 9 | tile_net-y := tilepro.o |
10 | endif | 10 | endif |
diff --git a/drivers/net/ethernet/tile/tilegx.c b/drivers/net/ethernet/tile/tilegx.c new file mode 100644 index 000000000000..83b4b388ad49 --- /dev/null +++ b/drivers/net/ethernet/tile/tilegx.c | |||
@@ -0,0 +1,1898 @@ | |||
1 | /* | ||
2 | * Copyright 2012 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #include <linux/module.h> | ||
16 | #include <linux/init.h> | ||
17 | #include <linux/moduleparam.h> | ||
18 | #include <linux/sched.h> | ||
19 | #include <linux/kernel.h> /* printk() */ | ||
20 | #include <linux/slab.h> /* kmalloc() */ | ||
21 | #include <linux/errno.h> /* error codes */ | ||
22 | #include <linux/types.h> /* size_t */ | ||
23 | #include <linux/interrupt.h> | ||
24 | #include <linux/in.h> | ||
25 | #include <linux/irq.h> | ||
26 | #include <linux/netdevice.h> /* struct device, and other headers */ | ||
27 | #include <linux/etherdevice.h> /* eth_type_trans */ | ||
28 | #include <linux/skbuff.h> | ||
29 | #include <linux/ioctl.h> | ||
30 | #include <linux/cdev.h> | ||
31 | #include <linux/hugetlb.h> | ||
32 | #include <linux/in6.h> | ||
33 | #include <linux/timer.h> | ||
34 | #include <linux/hrtimer.h> | ||
35 | #include <linux/ktime.h> | ||
36 | #include <linux/io.h> | ||
37 | #include <linux/ctype.h> | ||
38 | #include <linux/ip.h> | ||
39 | #include <linux/tcp.h> | ||
40 | |||
41 | #include <asm/checksum.h> | ||
42 | #include <asm/homecache.h> | ||
43 | #include <gxio/mpipe.h> | ||
44 | #include <arch/sim.h> | ||
45 | |||
46 | /* Default transmit lockup timeout period, in jiffies. */ | ||
47 | #define TILE_NET_TIMEOUT (5 * HZ) | ||
48 | |||
49 | /* The maximum number of distinct channels (idesc.channel is 5 bits). */ | ||
50 | #define TILE_NET_CHANNELS 32 | ||
51 | |||
52 | /* Maximum number of idescs to handle per "poll". */ | ||
53 | #define TILE_NET_BATCH 128 | ||
54 | |||
55 | /* Maximum number of packets to handle per "poll". */ | ||
56 | #define TILE_NET_WEIGHT 64 | ||
57 | |||
58 | /* Number of entries in each iqueue. */ | ||
59 | #define IQUEUE_ENTRIES 512 | ||
60 | |||
61 | /* Number of entries in each equeue. */ | ||
62 | #define EQUEUE_ENTRIES 2048 | ||
63 | |||
64 | /* Total header bytes per equeue slot. Must be big enough for 2 bytes | ||
65 | * of NET_IP_ALIGN alignment, plus 14 bytes (?) of L2 header, plus up to | ||
66 | * 60 bytes of actual TCP header. We round up to align to cache lines. | ||
67 | */ | ||
68 | #define HEADER_BYTES 128 | ||
69 | |||
70 | /* Maximum completions per cpu per device (must be a power of two). | ||
71 | * ISSUE: What is the right number here? If this is too small, then | ||
72 | * egress might block waiting for free space in a completions array. | ||
73 | * ISSUE: At the least, allocate these only for initialized echannels. | ||
74 | */ | ||
75 | #define TILE_NET_MAX_COMPS 64 | ||
76 | |||
77 | #define MAX_FRAGS (MAX_SKB_FRAGS + 1) | ||
78 | |||
79 | /* Size of completions data to allocate. | ||
80 | * ISSUE: Probably more than needed since we don't use all the channels. | ||
81 | */ | ||
82 | #define COMPS_SIZE (TILE_NET_CHANNELS * sizeof(struct tile_net_comps)) | ||
83 | |||
84 | /* Size of NotifRing data to allocate. */ | ||
85 | #define NOTIF_RING_SIZE (IQUEUE_ENTRIES * sizeof(gxio_mpipe_idesc_t)) | ||
86 | |||
87 | /* Timeout to wake the per-device TX timer after we stop the queue. | ||
88 | * We don't want the timeout too short (adds overhead, and might end | ||
89 | * up causing stop/wake/stop/wake cycles) or too long (affects performance). | ||
90 | * For the 10 Gb NIC, 30 usec means roughly 30+ 1500-byte packets. | ||
91 | */ | ||
92 | #define TX_TIMER_DELAY_USEC 30 | ||
93 | |||
94 | /* Timeout to wake the per-cpu egress timer to free completions. */ | ||
95 | #define EGRESS_TIMER_DELAY_USEC 1000 | ||
96 | |||
97 | MODULE_AUTHOR("Tilera Corporation"); | ||
98 | MODULE_LICENSE("GPL"); | ||
99 | |||
100 | /* A "packet fragment" (a chunk of memory). */ | ||
101 | struct frag { | ||
102 | void *buf; | ||
103 | size_t length; | ||
104 | }; | ||
105 | |||
106 | /* A single completion. */ | ||
107 | struct tile_net_comp { | ||
108 | /* The "complete_count" when the completion will be complete. */ | ||
109 | s64 when; | ||
110 | /* The buffer to be freed when the completion is complete. */ | ||
111 | struct sk_buff *skb; | ||
112 | }; | ||
113 | |||
114 | /* The completions for a given cpu and echannel. */ | ||
115 | struct tile_net_comps { | ||
116 | /* The completions. */ | ||
117 | struct tile_net_comp comp_queue[TILE_NET_MAX_COMPS]; | ||
118 | /* The number of completions used. */ | ||
119 | unsigned long comp_next; | ||
120 | /* The number of completions freed. */ | ||
121 | unsigned long comp_last; | ||
122 | }; | ||
123 | |||
124 | /* The transmit wake timer for a given cpu and echannel. */ | ||
125 | struct tile_net_tx_wake { | ||
126 | struct hrtimer timer; | ||
127 | struct net_device *dev; | ||
128 | }; | ||
129 | |||
130 | /* Info for a specific cpu. */ | ||
131 | struct tile_net_info { | ||
132 | /* The NAPI struct. */ | ||
133 | struct napi_struct napi; | ||
134 | /* Packet queue. */ | ||
135 | gxio_mpipe_iqueue_t iqueue; | ||
136 | /* Our cpu. */ | ||
137 | int my_cpu; | ||
138 | /* True if iqueue is valid. */ | ||
139 | bool has_iqueue; | ||
140 | /* NAPI flags. */ | ||
141 | bool napi_added; | ||
142 | bool napi_enabled; | ||
143 | /* Number of small sk_buffs which must still be provided. */ | ||
144 | unsigned int num_needed_small_buffers; | ||
145 | /* Number of large sk_buffs which must still be provided. */ | ||
146 | unsigned int num_needed_large_buffers; | ||
147 | /* A timer for handling egress completions. */ | ||
148 | struct hrtimer egress_timer; | ||
149 | /* True if "egress_timer" is scheduled. */ | ||
150 | bool egress_timer_scheduled; | ||
151 | /* Comps for each egress channel. */ | ||
152 | struct tile_net_comps *comps_for_echannel[TILE_NET_CHANNELS]; | ||
153 | /* Transmit wake timer for each egress channel. */ | ||
154 | struct tile_net_tx_wake tx_wake[TILE_NET_CHANNELS]; | ||
155 | }; | ||
156 | |||
157 | /* Info for egress on a particular egress channel. */ | ||
158 | struct tile_net_egress { | ||
159 | /* The "equeue". */ | ||
160 | gxio_mpipe_equeue_t *equeue; | ||
161 | /* The headers for TSO. */ | ||
162 | unsigned char *headers; | ||
163 | }; | ||
164 | |||
165 | /* Info for a specific device. */ | ||
166 | struct tile_net_priv { | ||
167 | /* Our network device. */ | ||
168 | struct net_device *dev; | ||
169 | /* The primary link. */ | ||
170 | gxio_mpipe_link_t link; | ||
171 | /* The primary channel, if open, else -1. */ | ||
172 | int channel; | ||
173 | /* The "loopify" egress link, if needed. */ | ||
174 | gxio_mpipe_link_t loopify_link; | ||
175 | /* The "loopify" egress channel, if open, else -1. */ | ||
176 | int loopify_channel; | ||
177 | /* The egress channel (channel or loopify_channel). */ | ||
178 | int echannel; | ||
179 | /* Total stats. */ | ||
180 | struct net_device_stats stats; | ||
181 | }; | ||
182 | |||
183 | /* Egress info, indexed by "priv->echannel" (lazily created as needed). */ | ||
184 | static struct tile_net_egress egress_for_echannel[TILE_NET_CHANNELS]; | ||
185 | |||
186 | /* Devices currently associated with each channel. | ||
187 | * NOTE: The array entry can become NULL after ifconfig down, but | ||
188 | * we do not free the underlying net_device structures, so it is | ||
189 | * safe to use a pointer after reading it from this array. | ||
190 | */ | ||
191 | static struct net_device *tile_net_devs_for_channel[TILE_NET_CHANNELS]; | ||
192 | |||
193 | /* A mutex for "tile_net_devs_for_channel". */ | ||
194 | static DEFINE_MUTEX(tile_net_devs_for_channel_mutex); | ||
195 | |||
196 | /* The per-cpu info. */ | ||
197 | static DEFINE_PER_CPU(struct tile_net_info, per_cpu_info); | ||
198 | |||
199 | /* The "context" for all devices. */ | ||
200 | static gxio_mpipe_context_t context; | ||
201 | |||
202 | /* Buffer sizes and mpipe enum codes for buffer stacks. | ||
203 | * See arch/tile/include/gxio/mpipe.h for the set of possible values. | ||
204 | */ | ||
205 | #define BUFFER_SIZE_SMALL_ENUM GXIO_MPIPE_BUFFER_SIZE_128 | ||
206 | #define BUFFER_SIZE_SMALL 128 | ||
207 | #define BUFFER_SIZE_LARGE_ENUM GXIO_MPIPE_BUFFER_SIZE_1664 | ||
208 | #define BUFFER_SIZE_LARGE 1664 | ||
209 | |||
210 | /* The small/large "buffer stacks". */ | ||
211 | static int small_buffer_stack = -1; | ||
212 | static int large_buffer_stack = -1; | ||
213 | |||
214 | /* Amount of memory allocated for each buffer stack. */ | ||
215 | static size_t buffer_stack_size; | ||
216 | |||
217 | /* The actual memory allocated for the buffer stacks. */ | ||
218 | static void *small_buffer_stack_va; | ||
219 | static void *large_buffer_stack_va; | ||
220 | |||
221 | /* The buckets. */ | ||
222 | static int first_bucket = -1; | ||
223 | static int num_buckets = 1; | ||
224 | |||
225 | /* The ingress irq. */ | ||
226 | static int ingress_irq = -1; | ||
227 | |||
228 | /* Text value of tile_net.cpus if passed as a module parameter. */ | ||
229 | static char *network_cpus_string; | ||
230 | |||
231 | /* The actual cpus in "network_cpus". */ | ||
232 | static struct cpumask network_cpus_map; | ||
233 | |||
234 | /* If "loopify=LINK" was specified, this is "LINK". */ | ||
235 | static char *loopify_link_name; | ||
236 | |||
237 | /* If "tile_net.custom" was specified, this is non-NULL. */ | ||
238 | static char *custom_str; | ||
239 | |||
240 | /* The "tile_net.cpus" argument specifies the cpus that are dedicated | ||
241 | * to handle ingress packets. | ||
242 | * | ||
243 | * The parameter should be in the form "tile_net.cpus=m-n[,x-y]", where | ||
244 | * m, n, x, y are integer numbers that represent the cpus that can be | ||
245 | * neither a dedicated cpu nor a dataplane cpu. | ||
246 | */ | ||
247 | static bool network_cpus_init(void) | ||
248 | { | ||
249 | char buf[1024]; | ||
250 | int rc; | ||
251 | |||
252 | if (network_cpus_string == NULL) | ||
253 | return false; | ||
254 | |||
255 | rc = cpulist_parse_crop(network_cpus_string, &network_cpus_map); | ||
256 | if (rc != 0) { | ||
257 | pr_warn("tile_net.cpus=%s: malformed cpu list\n", | ||
258 | network_cpus_string); | ||
259 | return false; | ||
260 | } | ||
261 | |||
262 | /* Remove dedicated cpus. */ | ||
263 | cpumask_and(&network_cpus_map, &network_cpus_map, cpu_possible_mask); | ||
264 | |||
265 | if (cpumask_empty(&network_cpus_map)) { | ||
266 | pr_warn("Ignoring empty tile_net.cpus='%s'.\n", | ||
267 | network_cpus_string); | ||
268 | return false; | ||
269 | } | ||
270 | |||
271 | cpulist_scnprintf(buf, sizeof(buf), &network_cpus_map); | ||
272 | pr_info("Linux network CPUs: %s\n", buf); | ||
273 | return true; | ||
274 | } | ||
275 | |||
276 | module_param_named(cpus, network_cpus_string, charp, 0444); | ||
277 | MODULE_PARM_DESC(cpus, "cpulist of cores that handle network interrupts"); | ||
278 | |||
279 | /* The "tile_net.loopify=LINK" argument causes the named device to | ||
280 | * actually use "loop0" for ingress, and "loop1" for egress. This | ||
281 | * allows an app to sit between the actual link and linux, passing | ||
282 | * (some) packets along to linux, and forwarding (some) packets sent | ||
283 | * out by linux. | ||
284 | */ | ||
285 | module_param_named(loopify, loopify_link_name, charp, 0444); | ||
286 | MODULE_PARM_DESC(loopify, "name the device to use loop0/1 for ingress/egress"); | ||
287 | |||
288 | /* The "tile_net.custom" argument causes us to ignore the "conventional" | ||
289 | * classifier metadata, in particular, the "l2_offset". | ||
290 | */ | ||
291 | module_param_named(custom, custom_str, charp, 0444); | ||
292 | MODULE_PARM_DESC(custom, "indicates a (heavily) customized classifier"); | ||
293 | |||
294 | /* Atomically update a statistics field. | ||
295 | * Note that on TILE-Gx, this operation is fire-and-forget on the | ||
296 | * issuing core (single-cycle dispatch) and takes only a few cycles | ||
297 | * longer than a regular store when the request reaches the home cache. | ||
298 | * No expensive bus management overhead is required. | ||
299 | */ | ||
300 | static void tile_net_stats_add(unsigned long value, unsigned long *field) | ||
301 | { | ||
302 | BUILD_BUG_ON(sizeof(atomic_long_t) != sizeof(unsigned long)); | ||
303 | atomic_long_add(value, (atomic_long_t *)field); | ||
304 | } | ||
305 | |||
306 | /* Allocate and push a buffer. */ | ||
307 | static bool tile_net_provide_buffer(bool small) | ||
308 | { | ||
309 | int stack = small ? small_buffer_stack : large_buffer_stack; | ||
310 | const unsigned long buffer_alignment = 128; | ||
311 | struct sk_buff *skb; | ||
312 | int len; | ||
313 | |||
314 | len = sizeof(struct sk_buff **) + buffer_alignment; | ||
315 | len += (small ? BUFFER_SIZE_SMALL : BUFFER_SIZE_LARGE); | ||
316 | skb = dev_alloc_skb(len); | ||
317 | if (skb == NULL) | ||
318 | return false; | ||
319 | |||
320 | /* Make room for a back-pointer to 'skb' and guarantee alignment. */ | ||
321 | skb_reserve(skb, sizeof(struct sk_buff **)); | ||
322 | skb_reserve(skb, -(long)skb->data & (buffer_alignment - 1)); | ||
323 | |||
324 | /* Save a back-pointer to 'skb'. */ | ||
325 | *(struct sk_buff **)(skb->data - sizeof(struct sk_buff **)) = skb; | ||
326 | |||
327 | /* Make sure "skb" and the back-pointer have been flushed. */ | ||
328 | wmb(); | ||
329 | |||
330 | gxio_mpipe_push_buffer(&context, stack, | ||
331 | (void *)va_to_tile_io_addr(skb->data)); | ||
332 | |||
333 | return true; | ||
334 | } | ||
335 | |||
336 | /* Convert a raw mpipe buffer to its matching skb pointer. */ | ||
337 | static struct sk_buff *mpipe_buf_to_skb(void *va) | ||
338 | { | ||
339 | /* Acquire the associated "skb". */ | ||
340 | struct sk_buff **skb_ptr = va - sizeof(*skb_ptr); | ||
341 | struct sk_buff *skb = *skb_ptr; | ||
342 | |||
343 | /* Paranoia. */ | ||
344 | if (skb->data != va) { | ||
345 | /* Panic here since there's a reasonable chance | ||
346 | * that corrupt buffers means generic memory | ||
347 | * corruption, with unpredictable system effects. | ||
348 | */ | ||
349 | panic("Corrupt linux buffer! va=%p, skb=%p, skb->data=%p", | ||
350 | va, skb, skb->data); | ||
351 | } | ||
352 | |||
353 | return skb; | ||
354 | } | ||
355 | |||
356 | static void tile_net_pop_all_buffers(int stack) | ||
357 | { | ||
358 | for (;;) { | ||
359 | tile_io_addr_t addr = | ||
360 | (tile_io_addr_t)gxio_mpipe_pop_buffer(&context, stack); | ||
361 | if (addr == 0) | ||
362 | break; | ||
363 | dev_kfree_skb_irq(mpipe_buf_to_skb(tile_io_addr_to_va(addr))); | ||
364 | } | ||
365 | } | ||
366 | |||
367 | /* Provide linux buffers to mPIPE. */ | ||
368 | static void tile_net_provide_needed_buffers(void) | ||
369 | { | ||
370 | struct tile_net_info *info = &__get_cpu_var(per_cpu_info); | ||
371 | |||
372 | while (info->num_needed_small_buffers != 0) { | ||
373 | if (!tile_net_provide_buffer(true)) | ||
374 | goto oops; | ||
375 | info->num_needed_small_buffers--; | ||
376 | } | ||
377 | |||
378 | while (info->num_needed_large_buffers != 0) { | ||
379 | if (!tile_net_provide_buffer(false)) | ||
380 | goto oops; | ||
381 | info->num_needed_large_buffers--; | ||
382 | } | ||
383 | |||
384 | return; | ||
385 | |||
386 | oops: | ||
387 | /* Add a description to the page allocation failure dump. */ | ||
388 | pr_notice("Tile %d still needs some buffers\n", info->my_cpu); | ||
389 | } | ||
390 | |||
391 | static inline bool filter_packet(struct net_device *dev, void *buf) | ||
392 | { | ||
393 | /* Filter packets received before we're up. */ | ||
394 | if (dev == NULL || !(dev->flags & IFF_UP)) | ||
395 | return true; | ||
396 | |||
397 | /* Filter out packets that aren't for us. */ | ||
398 | if (!(dev->flags & IFF_PROMISC) && | ||
399 | !is_multicast_ether_addr(buf) && | ||
400 | compare_ether_addr(dev->dev_addr, buf) != 0) | ||
401 | return true; | ||
402 | |||
403 | return false; | ||
404 | } | ||
405 | |||
406 | static void tile_net_receive_skb(struct net_device *dev, struct sk_buff *skb, | ||
407 | gxio_mpipe_idesc_t *idesc, unsigned long len) | ||
408 | { | ||
409 | struct tile_net_info *info = &__get_cpu_var(per_cpu_info); | ||
410 | struct tile_net_priv *priv = netdev_priv(dev); | ||
411 | |||
412 | /* Encode the actual packet length. */ | ||
413 | skb_put(skb, len); | ||
414 | |||
415 | skb->protocol = eth_type_trans(skb, dev); | ||
416 | |||
417 | /* Acknowledge "good" hardware checksums. */ | ||
418 | if (idesc->cs && idesc->csum_seed_val == 0xFFFF) | ||
419 | skb->ip_summed = CHECKSUM_UNNECESSARY; | ||
420 | |||
421 | netif_receive_skb(skb); | ||
422 | |||
423 | /* Update stats. */ | ||
424 | tile_net_stats_add(1, &priv->stats.rx_packets); | ||
425 | tile_net_stats_add(len, &priv->stats.rx_bytes); | ||
426 | |||
427 | /* Need a new buffer. */ | ||
428 | if (idesc->size == BUFFER_SIZE_SMALL_ENUM) | ||
429 | info->num_needed_small_buffers++; | ||
430 | else | ||
431 | info->num_needed_large_buffers++; | ||
432 | } | ||
433 | |||
434 | /* Handle a packet. Return true if "processed", false if "filtered". */ | ||
435 | static bool tile_net_handle_packet(gxio_mpipe_idesc_t *idesc) | ||
436 | { | ||
437 | struct tile_net_info *info = &__get_cpu_var(per_cpu_info); | ||
438 | struct net_device *dev = tile_net_devs_for_channel[idesc->channel]; | ||
439 | uint8_t l2_offset; | ||
440 | void *va; | ||
441 | void *buf; | ||
442 | unsigned long len; | ||
443 | bool filter; | ||
444 | |||
445 | /* Drop packets for which no buffer was available. | ||
446 | * NOTE: This happens under heavy load. | ||
447 | */ | ||
448 | if (idesc->be) { | ||
449 | struct tile_net_priv *priv = netdev_priv(dev); | ||
450 | tile_net_stats_add(1, &priv->stats.rx_dropped); | ||
451 | gxio_mpipe_iqueue_consume(&info->iqueue, idesc); | ||
452 | if (net_ratelimit()) | ||
453 | pr_info("Dropping packet (insufficient buffers).\n"); | ||
454 | return false; | ||
455 | } | ||
456 | |||
457 | /* Get the "l2_offset", if allowed. */ | ||
458 | l2_offset = custom_str ? 0 : gxio_mpipe_idesc_get_l2_offset(idesc); | ||
459 | |||
460 | /* Get the raw buffer VA (includes "headroom"). */ | ||
461 | va = tile_io_addr_to_va((unsigned long)(long)idesc->va); | ||
462 | |||
463 | /* Get the actual packet start/length. */ | ||
464 | buf = va + l2_offset; | ||
465 | len = idesc->l2_size - l2_offset; | ||
466 | |||
467 | /* Point "va" at the raw buffer. */ | ||
468 | va -= NET_IP_ALIGN; | ||
469 | |||
470 | filter = filter_packet(dev, buf); | ||
471 | if (filter) { | ||
472 | gxio_mpipe_iqueue_drop(&info->iqueue, idesc); | ||
473 | } else { | ||
474 | struct sk_buff *skb = mpipe_buf_to_skb(va); | ||
475 | |||
476 | /* Skip headroom, and any custom header. */ | ||
477 | skb_reserve(skb, NET_IP_ALIGN + l2_offset); | ||
478 | |||
479 | tile_net_receive_skb(dev, skb, idesc, len); | ||
480 | } | ||
481 | |||
482 | gxio_mpipe_iqueue_consume(&info->iqueue, idesc); | ||
483 | return !filter; | ||
484 | } | ||
485 | |||
486 | /* Handle some packets for the current CPU. | ||
487 | * | ||
488 | * This function handles up to TILE_NET_BATCH idescs per call. | ||
489 | * | ||
490 | * ISSUE: Since we do not provide new buffers until this function is | ||
491 | * complete, we must initially provide enough buffers for each network | ||
492 | * cpu to fill its iqueue and also its batched idescs. | ||
493 | * | ||
494 | * ISSUE: The "rotting packet" race condition occurs if a packet | ||
495 | * arrives after the queue appears to be empty, and before the | ||
496 | * hypervisor interrupt is re-enabled. | ||
497 | */ | ||
498 | static int tile_net_poll(struct napi_struct *napi, int budget) | ||
499 | { | ||
500 | struct tile_net_info *info = &__get_cpu_var(per_cpu_info); | ||
501 | unsigned int work = 0; | ||
502 | gxio_mpipe_idesc_t *idesc; | ||
503 | int i, n; | ||
504 | |||
505 | /* Process packets. */ | ||
506 | while ((n = gxio_mpipe_iqueue_try_peek(&info->iqueue, &idesc)) > 0) { | ||
507 | for (i = 0; i < n; i++) { | ||
508 | if (i == TILE_NET_BATCH) | ||
509 | goto done; | ||
510 | if (tile_net_handle_packet(idesc + i)) { | ||
511 | if (++work >= budget) | ||
512 | goto done; | ||
513 | } | ||
514 | } | ||
515 | } | ||
516 | |||
517 | /* There are no packets left. */ | ||
518 | napi_complete(&info->napi); | ||
519 | |||
520 | /* Re-enable hypervisor interrupts. */ | ||
521 | gxio_mpipe_enable_notif_ring_interrupt(&context, info->iqueue.ring); | ||
522 | |||
523 | /* HACK: Avoid the "rotting packet" problem. */ | ||
524 | if (gxio_mpipe_iqueue_try_peek(&info->iqueue, &idesc) > 0) | ||
525 | napi_schedule(&info->napi); | ||
526 | |||
527 | /* ISSUE: Handle completions? */ | ||
528 | |||
529 | done: | ||
530 | tile_net_provide_needed_buffers(); | ||
531 | |||
532 | return work; | ||
533 | } | ||
534 | |||
535 | /* Handle an ingress interrupt on the current cpu. */ | ||
536 | static irqreturn_t tile_net_handle_ingress_irq(int irq, void *unused) | ||
537 | { | ||
538 | struct tile_net_info *info = &__get_cpu_var(per_cpu_info); | ||
539 | napi_schedule(&info->napi); | ||
540 | return IRQ_HANDLED; | ||
541 | } | ||
542 | |||
543 | /* Free some completions. This must be called with interrupts blocked. */ | ||
544 | static int tile_net_free_comps(gxio_mpipe_equeue_t *equeue, | ||
545 | struct tile_net_comps *comps, | ||
546 | int limit, bool force_update) | ||
547 | { | ||
548 | int n = 0; | ||
549 | while (comps->comp_last < comps->comp_next) { | ||
550 | unsigned int cid = comps->comp_last % TILE_NET_MAX_COMPS; | ||
551 | struct tile_net_comp *comp = &comps->comp_queue[cid]; | ||
552 | if (!gxio_mpipe_equeue_is_complete(equeue, comp->when, | ||
553 | force_update || n == 0)) | ||
554 | break; | ||
555 | dev_kfree_skb_irq(comp->skb); | ||
556 | comps->comp_last++; | ||
557 | if (++n == limit) | ||
558 | break; | ||
559 | } | ||
560 | return n; | ||
561 | } | ||
562 | |||
563 | /* Add a completion. This must be called with interrupts blocked. | ||
564 | * tile_net_equeue_try_reserve() will have ensured a free completion entry. | ||
565 | */ | ||
566 | static void add_comp(gxio_mpipe_equeue_t *equeue, | ||
567 | struct tile_net_comps *comps, | ||
568 | uint64_t when, struct sk_buff *skb) | ||
569 | { | ||
570 | int cid = comps->comp_next % TILE_NET_MAX_COMPS; | ||
571 | comps->comp_queue[cid].when = when; | ||
572 | comps->comp_queue[cid].skb = skb; | ||
573 | comps->comp_next++; | ||
574 | } | ||
575 | |||
576 | static void tile_net_schedule_tx_wake_timer(struct net_device *dev) | ||
577 | { | ||
578 | struct tile_net_info *info = &__get_cpu_var(per_cpu_info); | ||
579 | struct tile_net_priv *priv = netdev_priv(dev); | ||
580 | |||
581 | hrtimer_start(&info->tx_wake[priv->echannel].timer, | ||
582 | ktime_set(0, TX_TIMER_DELAY_USEC * 1000UL), | ||
583 | HRTIMER_MODE_REL_PINNED); | ||
584 | } | ||
585 | |||
586 | static enum hrtimer_restart tile_net_handle_tx_wake_timer(struct hrtimer *t) | ||
587 | { | ||
588 | struct tile_net_tx_wake *tx_wake = | ||
589 | container_of(t, struct tile_net_tx_wake, timer); | ||
590 | netif_wake_subqueue(tx_wake->dev, smp_processor_id()); | ||
591 | return HRTIMER_NORESTART; | ||
592 | } | ||
593 | |||
594 | /* Make sure the egress timer is scheduled. */ | ||
595 | static void tile_net_schedule_egress_timer(void) | ||
596 | { | ||
597 | struct tile_net_info *info = &__get_cpu_var(per_cpu_info); | ||
598 | |||
599 | if (!info->egress_timer_scheduled) { | ||
600 | hrtimer_start(&info->egress_timer, | ||
601 | ktime_set(0, EGRESS_TIMER_DELAY_USEC * 1000UL), | ||
602 | HRTIMER_MODE_REL_PINNED); | ||
603 | info->egress_timer_scheduled = true; | ||
604 | } | ||
605 | } | ||
606 | |||
607 | /* The "function" for "info->egress_timer". | ||
608 | * | ||
609 | * This timer will reschedule itself as long as there are any pending | ||
610 | * completions expected for this tile. | ||
611 | */ | ||
612 | static enum hrtimer_restart tile_net_handle_egress_timer(struct hrtimer *t) | ||
613 | { | ||
614 | struct tile_net_info *info = &__get_cpu_var(per_cpu_info); | ||
615 | unsigned long irqflags; | ||
616 | bool pending = false; | ||
617 | int i; | ||
618 | |||
619 | local_irq_save(irqflags); | ||
620 | |||
621 | /* The timer is no longer scheduled. */ | ||
622 | info->egress_timer_scheduled = false; | ||
623 | |||
624 | /* Free all possible comps for this tile. */ | ||
625 | for (i = 0; i < TILE_NET_CHANNELS; i++) { | ||
626 | struct tile_net_egress *egress = &egress_for_echannel[i]; | ||
627 | struct tile_net_comps *comps = info->comps_for_echannel[i]; | ||
628 | if (comps->comp_last >= comps->comp_next) | ||
629 | continue; | ||
630 | tile_net_free_comps(egress->equeue, comps, -1, true); | ||
631 | pending = pending || (comps->comp_last < comps->comp_next); | ||
632 | } | ||
633 | |||
634 | /* Reschedule timer if needed. */ | ||
635 | if (pending) | ||
636 | tile_net_schedule_egress_timer(); | ||
637 | |||
638 | local_irq_restore(irqflags); | ||
639 | |||
640 | return HRTIMER_NORESTART; | ||
641 | } | ||
642 | |||
643 | /* Helper function for "tile_net_update()". | ||
644 | * "dev" (i.e. arg) is the device being brought up or down, | ||
645 | * or NULL if all devices are now down. | ||
646 | */ | ||
647 | static void tile_net_update_cpu(void *arg) | ||
648 | { | ||
649 | struct tile_net_info *info = &__get_cpu_var(per_cpu_info); | ||
650 | struct net_device *dev = arg; | ||
651 | |||
652 | if (!info->has_iqueue) | ||
653 | return; | ||
654 | |||
655 | if (dev != NULL) { | ||
656 | if (!info->napi_added) { | ||
657 | netif_napi_add(dev, &info->napi, | ||
658 | tile_net_poll, TILE_NET_WEIGHT); | ||
659 | info->napi_added = true; | ||
660 | } | ||
661 | if (!info->napi_enabled) { | ||
662 | napi_enable(&info->napi); | ||
663 | info->napi_enabled = true; | ||
664 | } | ||
665 | enable_percpu_irq(ingress_irq, 0); | ||
666 | } else { | ||
667 | disable_percpu_irq(ingress_irq); | ||
668 | if (info->napi_enabled) { | ||
669 | napi_disable(&info->napi); | ||
670 | info->napi_enabled = false; | ||
671 | } | ||
672 | /* FIXME: Drain the iqueue. */ | ||
673 | } | ||
674 | } | ||
675 | |||
676 | /* Helper function for tile_net_open() and tile_net_stop(). | ||
677 | * Always called under tile_net_devs_for_channel_mutex. | ||
678 | */ | ||
679 | static int tile_net_update(struct net_device *dev) | ||
680 | { | ||
681 | static gxio_mpipe_rules_t rules; /* too big to fit on the stack */ | ||
682 | bool saw_channel = false; | ||
683 | int channel; | ||
684 | int rc; | ||
685 | int cpu; | ||
686 | |||
687 | gxio_mpipe_rules_init(&rules, &context); | ||
688 | |||
689 | for (channel = 0; channel < TILE_NET_CHANNELS; channel++) { | ||
690 | if (tile_net_devs_for_channel[channel] == NULL) | ||
691 | continue; | ||
692 | if (!saw_channel) { | ||
693 | saw_channel = true; | ||
694 | gxio_mpipe_rules_begin(&rules, first_bucket, | ||
695 | num_buckets, NULL); | ||
696 | gxio_mpipe_rules_set_headroom(&rules, NET_IP_ALIGN); | ||
697 | } | ||
698 | gxio_mpipe_rules_add_channel(&rules, channel); | ||
699 | } | ||
700 | |||
701 | /* NOTE: This can fail if there is no classifier. | ||
702 | * ISSUE: Can anything else cause it to fail? | ||
703 | */ | ||
704 | rc = gxio_mpipe_rules_commit(&rules); | ||
705 | if (rc != 0) { | ||
706 | netdev_warn(dev, "gxio_mpipe_rules_commit failed: %d\n", rc); | ||
707 | return -EIO; | ||
708 | } | ||
709 | |||
710 | /* Update all cpus, sequentially (to protect "netif_napi_add()"). */ | ||
711 | for_each_online_cpu(cpu) | ||
712 | smp_call_function_single(cpu, tile_net_update_cpu, | ||
713 | (saw_channel ? dev : NULL), 1); | ||
714 | |||
715 | /* HACK: Allow packets to flow in the simulator. */ | ||
716 | if (saw_channel) | ||
717 | sim_enable_mpipe_links(0, -1); | ||
718 | |||
719 | return 0; | ||
720 | } | ||
721 | |||
722 | /* Allocate and initialize mpipe buffer stacks, and register them in | ||
723 | * the mPIPE TLBs, for both small and large packet sizes. | ||
724 | * This routine supports tile_net_init_mpipe(), below. | ||
725 | */ | ||
726 | static int init_buffer_stacks(struct net_device *dev, int num_buffers) | ||
727 | { | ||
728 | pte_t hash_pte = pte_set_home((pte_t) { 0 }, PAGE_HOME_HASH); | ||
729 | int rc; | ||
730 | |||
731 | /* Compute stack bytes; we round up to 64KB and then use | ||
732 | * alloc_pages() so we get the required 64KB alignment as well. | ||
733 | */ | ||
734 | buffer_stack_size = | ||
735 | ALIGN(gxio_mpipe_calc_buffer_stack_bytes(num_buffers), | ||
736 | 64 * 1024); | ||
737 | |||
738 | /* Allocate two buffer stack indices. */ | ||
739 | rc = gxio_mpipe_alloc_buffer_stacks(&context, 2, 0, 0); | ||
740 | if (rc < 0) { | ||
741 | netdev_err(dev, "gxio_mpipe_alloc_buffer_stacks failed: %d\n", | ||
742 | rc); | ||
743 | return rc; | ||
744 | } | ||
745 | small_buffer_stack = rc; | ||
746 | large_buffer_stack = rc + 1; | ||
747 | |||
748 | /* Allocate the small memory stack. */ | ||
749 | small_buffer_stack_va = | ||
750 | alloc_pages_exact(buffer_stack_size, GFP_KERNEL); | ||
751 | if (small_buffer_stack_va == NULL) { | ||
752 | netdev_err(dev, | ||
753 | "Could not alloc %zd bytes for buffer stacks\n", | ||
754 | buffer_stack_size); | ||
755 | return -ENOMEM; | ||
756 | } | ||
757 | rc = gxio_mpipe_init_buffer_stack(&context, small_buffer_stack, | ||
758 | BUFFER_SIZE_SMALL_ENUM, | ||
759 | small_buffer_stack_va, | ||
760 | buffer_stack_size, 0); | ||
761 | if (rc != 0) { | ||
762 | netdev_err(dev, "gxio_mpipe_init_buffer_stack: %d\n", rc); | ||
763 | return rc; | ||
764 | } | ||
765 | rc = gxio_mpipe_register_client_memory(&context, small_buffer_stack, | ||
766 | hash_pte, 0); | ||
767 | if (rc != 0) { | ||
768 | netdev_err(dev, | ||
769 | "gxio_mpipe_register_buffer_memory failed: %d\n", | ||
770 | rc); | ||
771 | return rc; | ||
772 | } | ||
773 | |||
774 | /* Allocate the large buffer stack. */ | ||
775 | large_buffer_stack_va = | ||
776 | alloc_pages_exact(buffer_stack_size, GFP_KERNEL); | ||
777 | if (large_buffer_stack_va == NULL) { | ||
778 | netdev_err(dev, | ||
779 | "Could not alloc %zd bytes for buffer stacks\n", | ||
780 | buffer_stack_size); | ||
781 | return -ENOMEM; | ||
782 | } | ||
783 | rc = gxio_mpipe_init_buffer_stack(&context, large_buffer_stack, | ||
784 | BUFFER_SIZE_LARGE_ENUM, | ||
785 | large_buffer_stack_va, | ||
786 | buffer_stack_size, 0); | ||
787 | if (rc != 0) { | ||
788 | netdev_err(dev, "gxio_mpipe_init_buffer_stack failed: %d\n", | ||
789 | rc); | ||
790 | return rc; | ||
791 | } | ||
792 | rc = gxio_mpipe_register_client_memory(&context, large_buffer_stack, | ||
793 | hash_pte, 0); | ||
794 | if (rc != 0) { | ||
795 | netdev_err(dev, | ||
796 | "gxio_mpipe_register_buffer_memory failed: %d\n", | ||
797 | rc); | ||
798 | return rc; | ||
799 | } | ||
800 | |||
801 | return 0; | ||
802 | } | ||
803 | |||
804 | /* Allocate per-cpu resources (memory for completions and idescs). | ||
805 | * This routine supports tile_net_init_mpipe(), below. | ||
806 | */ | ||
807 | static int alloc_percpu_mpipe_resources(struct net_device *dev, | ||
808 | int cpu, int ring) | ||
809 | { | ||
810 | struct tile_net_info *info = &per_cpu(per_cpu_info, cpu); | ||
811 | int order, i, rc; | ||
812 | struct page *page; | ||
813 | void *addr; | ||
814 | |||
815 | /* Allocate the "comps". */ | ||
816 | order = get_order(COMPS_SIZE); | ||
817 | page = homecache_alloc_pages(GFP_KERNEL, order, cpu); | ||
818 | if (page == NULL) { | ||
819 | netdev_err(dev, "Failed to alloc %zd bytes comps memory\n", | ||
820 | COMPS_SIZE); | ||
821 | return -ENOMEM; | ||
822 | } | ||
823 | addr = pfn_to_kaddr(page_to_pfn(page)); | ||
824 | memset(addr, 0, COMPS_SIZE); | ||
825 | for (i = 0; i < TILE_NET_CHANNELS; i++) | ||
826 | info->comps_for_echannel[i] = | ||
827 | addr + i * sizeof(struct tile_net_comps); | ||
828 | |||
829 | /* If this is a network cpu, create an iqueue. */ | ||
830 | if (cpu_isset(cpu, network_cpus_map)) { | ||
831 | order = get_order(NOTIF_RING_SIZE); | ||
832 | page = homecache_alloc_pages(GFP_KERNEL, order, cpu); | ||
833 | if (page == NULL) { | ||
834 | netdev_err(dev, | ||
835 | "Failed to alloc %zd bytes iqueue memory\n", | ||
836 | NOTIF_RING_SIZE); | ||
837 | return -ENOMEM; | ||
838 | } | ||
839 | addr = pfn_to_kaddr(page_to_pfn(page)); | ||
840 | rc = gxio_mpipe_iqueue_init(&info->iqueue, &context, ring++, | ||
841 | addr, NOTIF_RING_SIZE, 0); | ||
842 | if (rc < 0) { | ||
843 | netdev_err(dev, | ||
844 | "gxio_mpipe_iqueue_init failed: %d\n", rc); | ||
845 | return rc; | ||
846 | } | ||
847 | info->has_iqueue = true; | ||
848 | } | ||
849 | |||
850 | return ring; | ||
851 | } | ||
852 | |||
853 | /* Initialize NotifGroup and buckets. | ||
854 | * This routine supports tile_net_init_mpipe(), below. | ||
855 | */ | ||
856 | static int init_notif_group_and_buckets(struct net_device *dev, | ||
857 | int ring, int network_cpus_count) | ||
858 | { | ||
859 | int group, rc; | ||
860 | |||
861 | /* Allocate one NotifGroup. */ | ||
862 | rc = gxio_mpipe_alloc_notif_groups(&context, 1, 0, 0); | ||
863 | if (rc < 0) { | ||
864 | netdev_err(dev, "gxio_mpipe_alloc_notif_groups failed: %d\n", | ||
865 | rc); | ||
866 | return rc; | ||
867 | } | ||
868 | group = rc; | ||
869 | |||
870 | /* Initialize global num_buckets value. */ | ||
871 | if (network_cpus_count > 4) | ||
872 | num_buckets = 256; | ||
873 | else if (network_cpus_count > 1) | ||
874 | num_buckets = 16; | ||
875 | |||
876 | /* Allocate some buckets, and set global first_bucket value. */ | ||
877 | rc = gxio_mpipe_alloc_buckets(&context, num_buckets, 0, 0); | ||
878 | if (rc < 0) { | ||
879 | netdev_err(dev, "gxio_mpipe_alloc_buckets failed: %d\n", rc); | ||
880 | return rc; | ||
881 | } | ||
882 | first_bucket = rc; | ||
883 | |||
884 | /* Init group and buckets. */ | ||
885 | rc = gxio_mpipe_init_notif_group_and_buckets( | ||
886 | &context, group, ring, network_cpus_count, | ||
887 | first_bucket, num_buckets, | ||
888 | GXIO_MPIPE_BUCKET_STICKY_FLOW_LOCALITY); | ||
889 | if (rc != 0) { | ||
890 | netdev_err( | ||
891 | dev, | ||
892 | "gxio_mpipe_init_notif_group_and_buckets failed: %d\n", | ||
893 | rc); | ||
894 | return rc; | ||
895 | } | ||
896 | |||
897 | return 0; | ||
898 | } | ||
899 | |||
900 | /* Create an irq and register it, then activate the irq and request | ||
901 | * interrupts on all cores. Note that "ingress_irq" being initialized | ||
902 | * is how we know not to call tile_net_init_mpipe() again. | ||
903 | * This routine supports tile_net_init_mpipe(), below. | ||
904 | */ | ||
905 | static int tile_net_setup_interrupts(struct net_device *dev) | ||
906 | { | ||
907 | int cpu, rc; | ||
908 | |||
909 | rc = create_irq(); | ||
910 | if (rc < 0) { | ||
911 | netdev_err(dev, "create_irq failed: %d\n", rc); | ||
912 | return rc; | ||
913 | } | ||
914 | ingress_irq = rc; | ||
915 | tile_irq_activate(ingress_irq, TILE_IRQ_PERCPU); | ||
916 | rc = request_irq(ingress_irq, tile_net_handle_ingress_irq, | ||
917 | 0, NULL, NULL); | ||
918 | if (rc != 0) { | ||
919 | netdev_err(dev, "request_irq failed: %d\n", rc); | ||
920 | destroy_irq(ingress_irq); | ||
921 | ingress_irq = -1; | ||
922 | return rc; | ||
923 | } | ||
924 | |||
925 | for_each_online_cpu(cpu) { | ||
926 | struct tile_net_info *info = &per_cpu(per_cpu_info, cpu); | ||
927 | if (info->has_iqueue) { | ||
928 | gxio_mpipe_request_notif_ring_interrupt( | ||
929 | &context, cpu_x(cpu), cpu_y(cpu), | ||
930 | 1, ingress_irq, info->iqueue.ring); | ||
931 | } | ||
932 | } | ||
933 | |||
934 | return 0; | ||
935 | } | ||
936 | |||
937 | /* Undo any state set up partially by a failed call to tile_net_init_mpipe. */ | ||
938 | static void tile_net_init_mpipe_fail(void) | ||
939 | { | ||
940 | int cpu; | ||
941 | |||
942 | /* Do cleanups that require the mpipe context first. */ | ||
943 | if (small_buffer_stack >= 0) | ||
944 | tile_net_pop_all_buffers(small_buffer_stack); | ||
945 | if (large_buffer_stack >= 0) | ||
946 | tile_net_pop_all_buffers(large_buffer_stack); | ||
947 | |||
948 | /* Destroy mpipe context so the hardware no longer owns any memory. */ | ||
949 | gxio_mpipe_destroy(&context); | ||
950 | |||
951 | for_each_online_cpu(cpu) { | ||
952 | struct tile_net_info *info = &per_cpu(per_cpu_info, cpu); | ||
953 | free_pages((unsigned long)(info->comps_for_echannel[0]), | ||
954 | get_order(COMPS_SIZE)); | ||
955 | info->comps_for_echannel[0] = NULL; | ||
956 | free_pages((unsigned long)(info->iqueue.idescs), | ||
957 | get_order(NOTIF_RING_SIZE)); | ||
958 | info->iqueue.idescs = NULL; | ||
959 | } | ||
960 | |||
961 | if (small_buffer_stack_va) | ||
962 | free_pages_exact(small_buffer_stack_va, buffer_stack_size); | ||
963 | if (large_buffer_stack_va) | ||
964 | free_pages_exact(large_buffer_stack_va, buffer_stack_size); | ||
965 | |||
966 | small_buffer_stack_va = NULL; | ||
967 | large_buffer_stack_va = NULL; | ||
968 | large_buffer_stack = -1; | ||
969 | small_buffer_stack = -1; | ||
970 | first_bucket = -1; | ||
971 | } | ||
972 | |||
973 | /* The first time any tilegx network device is opened, we initialize | ||
974 | * the global mpipe state. If this step fails, we fail to open the | ||
975 | * device, but if it succeeds, we never need to do it again, and since | ||
976 | * tile_net can't be unloaded, we never undo it. | ||
977 | * | ||
978 | * Note that some resources in this path (buffer stack indices, | ||
979 | * bindings from init_buffer_stack, etc.) are hypervisor resources | ||
980 | * that are freed implicitly by gxio_mpipe_destroy(). | ||
981 | */ | ||
982 | static int tile_net_init_mpipe(struct net_device *dev) | ||
983 | { | ||
984 | int i, num_buffers, rc; | ||
985 | int cpu; | ||
986 | int first_ring, ring; | ||
987 | int network_cpus_count = cpus_weight(network_cpus_map); | ||
988 | |||
989 | if (!hash_default) { | ||
990 | netdev_err(dev, "Networking requires hash_default!\n"); | ||
991 | return -EIO; | ||
992 | } | ||
993 | |||
994 | rc = gxio_mpipe_init(&context, 0); | ||
995 | if (rc != 0) { | ||
996 | netdev_err(dev, "gxio_mpipe_init failed: %d\n", rc); | ||
997 | return -EIO; | ||
998 | } | ||
999 | |||
1000 | /* Set up the buffer stacks. */ | ||
1001 | num_buffers = | ||
1002 | network_cpus_count * (IQUEUE_ENTRIES + TILE_NET_BATCH); | ||
1003 | rc = init_buffer_stacks(dev, num_buffers); | ||
1004 | if (rc != 0) | ||
1005 | goto fail; | ||
1006 | |||
1007 | /* Provide initial buffers. */ | ||
1008 | rc = -ENOMEM; | ||
1009 | for (i = 0; i < num_buffers; i++) { | ||
1010 | if (!tile_net_provide_buffer(true)) { | ||
1011 | netdev_err(dev, "Cannot allocate initial sk_bufs!\n"); | ||
1012 | goto fail; | ||
1013 | } | ||
1014 | } | ||
1015 | for (i = 0; i < num_buffers; i++) { | ||
1016 | if (!tile_net_provide_buffer(false)) { | ||
1017 | netdev_err(dev, "Cannot allocate initial sk_bufs!\n"); | ||
1018 | goto fail; | ||
1019 | } | ||
1020 | } | ||
1021 | |||
1022 | /* Allocate one NotifRing for each network cpu. */ | ||
1023 | rc = gxio_mpipe_alloc_notif_rings(&context, network_cpus_count, 0, 0); | ||
1024 | if (rc < 0) { | ||
1025 | netdev_err(dev, "gxio_mpipe_alloc_notif_rings failed %d\n", | ||
1026 | rc); | ||
1027 | goto fail; | ||
1028 | } | ||
1029 | |||
1030 | /* Init NotifRings per-cpu. */ | ||
1031 | first_ring = rc; | ||
1032 | ring = first_ring; | ||
1033 | for_each_online_cpu(cpu) { | ||
1034 | rc = alloc_percpu_mpipe_resources(dev, cpu, ring); | ||
1035 | if (rc < 0) | ||
1036 | goto fail; | ||
1037 | ring = rc; | ||
1038 | } | ||
1039 | |||
1040 | /* Initialize NotifGroup and buckets. */ | ||
1041 | rc = init_notif_group_and_buckets(dev, first_ring, network_cpus_count); | ||
1042 | if (rc != 0) | ||
1043 | goto fail; | ||
1044 | |||
1045 | /* Create and enable interrupts. */ | ||
1046 | rc = tile_net_setup_interrupts(dev); | ||
1047 | if (rc != 0) | ||
1048 | goto fail; | ||
1049 | |||
1050 | return 0; | ||
1051 | |||
1052 | fail: | ||
1053 | tile_net_init_mpipe_fail(); | ||
1054 | return rc; | ||
1055 | } | ||
1056 | |||
1057 | /* Create persistent egress info for a given egress channel. | ||
1058 | * Note that this may be shared between, say, "gbe0" and "xgbe0". | ||
1059 | * ISSUE: Defer header allocation until TSO is actually needed? | ||
1060 | */ | ||
1061 | static int tile_net_init_egress(struct net_device *dev, int echannel) | ||
1062 | { | ||
1063 | struct page *headers_page, *edescs_page, *equeue_page; | ||
1064 | gxio_mpipe_edesc_t *edescs; | ||
1065 | gxio_mpipe_equeue_t *equeue; | ||
1066 | unsigned char *headers; | ||
1067 | int headers_order, edescs_order, equeue_order; | ||
1068 | size_t edescs_size; | ||
1069 | int edma; | ||
1070 | int rc = -ENOMEM; | ||
1071 | |||
1072 | /* Only initialize once. */ | ||
1073 | if (egress_for_echannel[echannel].equeue != NULL) | ||
1074 | return 0; | ||
1075 | |||
1076 | /* Allocate memory for the "headers". */ | ||
1077 | headers_order = get_order(EQUEUE_ENTRIES * HEADER_BYTES); | ||
1078 | headers_page = alloc_pages(GFP_KERNEL, headers_order); | ||
1079 | if (headers_page == NULL) { | ||
1080 | netdev_warn(dev, | ||
1081 | "Could not alloc %zd bytes for TSO headers.\n", | ||
1082 | PAGE_SIZE << headers_order); | ||
1083 | goto fail; | ||
1084 | } | ||
1085 | headers = pfn_to_kaddr(page_to_pfn(headers_page)); | ||
1086 | |||
1087 | /* Allocate memory for the "edescs". */ | ||
1088 | edescs_size = EQUEUE_ENTRIES * sizeof(*edescs); | ||
1089 | edescs_order = get_order(edescs_size); | ||
1090 | edescs_page = alloc_pages(GFP_KERNEL, edescs_order); | ||
1091 | if (edescs_page == NULL) { | ||
1092 | netdev_warn(dev, | ||
1093 | "Could not alloc %zd bytes for eDMA ring.\n", | ||
1094 | edescs_size); | ||
1095 | goto fail_headers; | ||
1096 | } | ||
1097 | edescs = pfn_to_kaddr(page_to_pfn(edescs_page)); | ||
1098 | |||
1099 | /* Allocate memory for the "equeue". */ | ||
1100 | equeue_order = get_order(sizeof(*equeue)); | ||
1101 | equeue_page = alloc_pages(GFP_KERNEL, equeue_order); | ||
1102 | if (equeue_page == NULL) { | ||
1103 | netdev_warn(dev, | ||
1104 | "Could not alloc %zd bytes for equeue info.\n", | ||
1105 | PAGE_SIZE << equeue_order); | ||
1106 | goto fail_edescs; | ||
1107 | } | ||
1108 | equeue = pfn_to_kaddr(page_to_pfn(equeue_page)); | ||
1109 | |||
1110 | /* Allocate an edma ring. Note that in practice this can't | ||
1111 | * fail, which is good, because we will leak an edma ring if so. | ||
1112 | */ | ||
1113 | rc = gxio_mpipe_alloc_edma_rings(&context, 1, 0, 0); | ||
1114 | if (rc < 0) { | ||
1115 | netdev_warn(dev, "gxio_mpipe_alloc_edma_rings failed: %d\n", | ||
1116 | rc); | ||
1117 | goto fail_equeue; | ||
1118 | } | ||
1119 | edma = rc; | ||
1120 | |||
1121 | /* Initialize the equeue. */ | ||
1122 | rc = gxio_mpipe_equeue_init(equeue, &context, edma, echannel, | ||
1123 | edescs, edescs_size, 0); | ||
1124 | if (rc != 0) { | ||
1125 | netdev_err(dev, "gxio_mpipe_equeue_init failed: %d\n", rc); | ||
1126 | goto fail_equeue; | ||
1127 | } | ||
1128 | |||
1129 | /* Done. */ | ||
1130 | egress_for_echannel[echannel].equeue = equeue; | ||
1131 | egress_for_echannel[echannel].headers = headers; | ||
1132 | return 0; | ||
1133 | |||
1134 | fail_equeue: | ||
1135 | __free_pages(equeue_page, equeue_order); | ||
1136 | |||
1137 | fail_edescs: | ||
1138 | __free_pages(edescs_page, edescs_order); | ||
1139 | |||
1140 | fail_headers: | ||
1141 | __free_pages(headers_page, headers_order); | ||
1142 | |||
1143 | fail: | ||
1144 | return rc; | ||
1145 | } | ||
1146 | |||
1147 | /* Return channel number for a newly-opened link. */ | ||
1148 | static int tile_net_link_open(struct net_device *dev, gxio_mpipe_link_t *link, | ||
1149 | const char *link_name) | ||
1150 | { | ||
1151 | int rc = gxio_mpipe_link_open(link, &context, link_name, 0); | ||
1152 | if (rc < 0) { | ||
1153 | netdev_err(dev, "Failed to open '%s'\n", link_name); | ||
1154 | return rc; | ||
1155 | } | ||
1156 | rc = gxio_mpipe_link_channel(link); | ||
1157 | if (rc < 0 || rc >= TILE_NET_CHANNELS) { | ||
1158 | netdev_err(dev, "gxio_mpipe_link_channel bad value: %d\n", rc); | ||
1159 | gxio_mpipe_link_close(link); | ||
1160 | return -EINVAL; | ||
1161 | } | ||
1162 | return rc; | ||
1163 | } | ||
1164 | |||
1165 | /* Help the kernel activate the given network interface. */ | ||
1166 | static int tile_net_open(struct net_device *dev) | ||
1167 | { | ||
1168 | struct tile_net_priv *priv = netdev_priv(dev); | ||
1169 | int cpu, rc; | ||
1170 | |||
1171 | mutex_lock(&tile_net_devs_for_channel_mutex); | ||
1172 | |||
1173 | /* Do one-time initialization the first time any device is opened. */ | ||
1174 | if (ingress_irq < 0) { | ||
1175 | rc = tile_net_init_mpipe(dev); | ||
1176 | if (rc != 0) | ||
1177 | goto fail; | ||
1178 | } | ||
1179 | |||
1180 | /* Determine if this is the "loopify" device. */ | ||
1181 | if (unlikely((loopify_link_name != NULL) && | ||
1182 | !strcmp(dev->name, loopify_link_name))) { | ||
1183 | rc = tile_net_link_open(dev, &priv->link, "loop0"); | ||
1184 | if (rc < 0) | ||
1185 | goto fail; | ||
1186 | priv->channel = rc; | ||
1187 | rc = tile_net_link_open(dev, &priv->loopify_link, "loop1"); | ||
1188 | if (rc < 0) | ||
1189 | goto fail; | ||
1190 | priv->loopify_channel = rc; | ||
1191 | priv->echannel = rc; | ||
1192 | } else { | ||
1193 | rc = tile_net_link_open(dev, &priv->link, dev->name); | ||
1194 | if (rc < 0) | ||
1195 | goto fail; | ||
1196 | priv->channel = rc; | ||
1197 | priv->echannel = rc; | ||
1198 | } | ||
1199 | |||
1200 | /* Initialize egress info (if needed). Once ever, per echannel. */ | ||
1201 | rc = tile_net_init_egress(dev, priv->echannel); | ||
1202 | if (rc != 0) | ||
1203 | goto fail; | ||
1204 | |||
1205 | tile_net_devs_for_channel[priv->channel] = dev; | ||
1206 | |||
1207 | rc = tile_net_update(dev); | ||
1208 | if (rc != 0) | ||
1209 | goto fail; | ||
1210 | |||
1211 | mutex_unlock(&tile_net_devs_for_channel_mutex); | ||
1212 | |||
1213 | /* Initialize the transmit wake timer for this device for each cpu. */ | ||
1214 | for_each_online_cpu(cpu) { | ||
1215 | struct tile_net_info *info = &per_cpu(per_cpu_info, cpu); | ||
1216 | struct tile_net_tx_wake *tx_wake = | ||
1217 | &info->tx_wake[priv->echannel]; | ||
1218 | |||
1219 | hrtimer_init(&tx_wake->timer, CLOCK_MONOTONIC, | ||
1220 | HRTIMER_MODE_REL); | ||
1221 | tx_wake->timer.function = tile_net_handle_tx_wake_timer; | ||
1222 | tx_wake->dev = dev; | ||
1223 | } | ||
1224 | |||
1225 | for_each_online_cpu(cpu) | ||
1226 | netif_start_subqueue(dev, cpu); | ||
1227 | netif_carrier_on(dev); | ||
1228 | return 0; | ||
1229 | |||
1230 | fail: | ||
1231 | if (priv->loopify_channel >= 0) { | ||
1232 | if (gxio_mpipe_link_close(&priv->loopify_link) != 0) | ||
1233 | netdev_warn(dev, "Failed to close loopify link!\n"); | ||
1234 | priv->loopify_channel = -1; | ||
1235 | } | ||
1236 | if (priv->channel >= 0) { | ||
1237 | if (gxio_mpipe_link_close(&priv->link) != 0) | ||
1238 | netdev_warn(dev, "Failed to close link!\n"); | ||
1239 | priv->channel = -1; | ||
1240 | } | ||
1241 | priv->echannel = -1; | ||
1242 | tile_net_devs_for_channel[priv->channel] = NULL; | ||
1243 | mutex_unlock(&tile_net_devs_for_channel_mutex); | ||
1244 | |||
1245 | /* Don't return raw gxio error codes to generic Linux. */ | ||
1246 | return (rc > -512) ? rc : -EIO; | ||
1247 | } | ||
1248 | |||
1249 | /* Help the kernel deactivate the given network interface. */ | ||
1250 | static int tile_net_stop(struct net_device *dev) | ||
1251 | { | ||
1252 | struct tile_net_priv *priv = netdev_priv(dev); | ||
1253 | int cpu; | ||
1254 | |||
1255 | for_each_online_cpu(cpu) { | ||
1256 | struct tile_net_info *info = &per_cpu(per_cpu_info, cpu); | ||
1257 | struct tile_net_tx_wake *tx_wake = | ||
1258 | &info->tx_wake[priv->echannel]; | ||
1259 | |||
1260 | hrtimer_cancel(&tx_wake->timer); | ||
1261 | netif_stop_subqueue(dev, cpu); | ||
1262 | } | ||
1263 | |||
1264 | mutex_lock(&tile_net_devs_for_channel_mutex); | ||
1265 | tile_net_devs_for_channel[priv->channel] = NULL; | ||
1266 | (void)tile_net_update(dev); | ||
1267 | if (priv->loopify_channel >= 0) { | ||
1268 | if (gxio_mpipe_link_close(&priv->loopify_link) != 0) | ||
1269 | netdev_warn(dev, "Failed to close loopify link!\n"); | ||
1270 | priv->loopify_channel = -1; | ||
1271 | } | ||
1272 | if (priv->channel >= 0) { | ||
1273 | if (gxio_mpipe_link_close(&priv->link) != 0) | ||
1274 | netdev_warn(dev, "Failed to close link!\n"); | ||
1275 | priv->channel = -1; | ||
1276 | } | ||
1277 | priv->echannel = -1; | ||
1278 | mutex_unlock(&tile_net_devs_for_channel_mutex); | ||
1279 | |||
1280 | return 0; | ||
1281 | } | ||
1282 | |||
1283 | /* Determine the VA for a fragment. */ | ||
1284 | static inline void *tile_net_frag_buf(skb_frag_t *f) | ||
1285 | { | ||
1286 | unsigned long pfn = page_to_pfn(skb_frag_page(f)); | ||
1287 | return pfn_to_kaddr(pfn) + f->page_offset; | ||
1288 | } | ||
1289 | |||
1290 | /* Acquire a completion entry and an egress slot, or if we can't, | ||
1291 | * stop the queue and schedule the tx_wake timer. | ||
1292 | */ | ||
1293 | static s64 tile_net_equeue_try_reserve(struct net_device *dev, | ||
1294 | struct tile_net_comps *comps, | ||
1295 | gxio_mpipe_equeue_t *equeue, | ||
1296 | int num_edescs) | ||
1297 | { | ||
1298 | /* Try to acquire a completion entry. */ | ||
1299 | if (comps->comp_next - comps->comp_last < TILE_NET_MAX_COMPS - 1 || | ||
1300 | tile_net_free_comps(equeue, comps, 32, false) != 0) { | ||
1301 | |||
1302 | /* Try to acquire an egress slot. */ | ||
1303 | s64 slot = gxio_mpipe_equeue_try_reserve(equeue, num_edescs); | ||
1304 | if (slot >= 0) | ||
1305 | return slot; | ||
1306 | |||
1307 | /* Freeing some completions gives the equeue time to drain. */ | ||
1308 | tile_net_free_comps(equeue, comps, TILE_NET_MAX_COMPS, false); | ||
1309 | |||
1310 | slot = gxio_mpipe_equeue_try_reserve(equeue, num_edescs); | ||
1311 | if (slot >= 0) | ||
1312 | return slot; | ||
1313 | } | ||
1314 | |||
1315 | /* Still nothing; give up and stop the queue for a short while. */ | ||
1316 | netif_stop_subqueue(dev, smp_processor_id()); | ||
1317 | tile_net_schedule_tx_wake_timer(dev); | ||
1318 | return -1; | ||
1319 | } | ||
1320 | |||
1321 | /* Determine how many edesc's are needed for TSO. | ||
1322 | * | ||
1323 | * Sometimes, if "sendfile()" requires copying, we will be called with | ||
1324 | * "data" containing the header and payload, with "frags" being empty. | ||
1325 | * Sometimes, for example when using NFS over TCP, a single segment can | ||
1326 | * span 3 fragments. This requires special care. | ||
1327 | */ | ||
1328 | static int tso_count_edescs(struct sk_buff *skb) | ||
1329 | { | ||
1330 | struct skb_shared_info *sh = skb_shinfo(skb); | ||
1331 | unsigned int data_len = skb->data_len; | ||
1332 | unsigned int p_len = sh->gso_size; | ||
1333 | long f_id = -1; /* id of the current fragment */ | ||
1334 | long f_size = -1; /* size of the current fragment */ | ||
1335 | long f_used = -1; /* bytes used from the current fragment */ | ||
1336 | long n; /* size of the current piece of payload */ | ||
1337 | int num_edescs = 0; | ||
1338 | int segment; | ||
1339 | |||
1340 | for (segment = 0; segment < sh->gso_segs; segment++) { | ||
1341 | |||
1342 | unsigned int p_used = 0; | ||
1343 | |||
1344 | /* One edesc for header and for each piece of the payload. */ | ||
1345 | for (num_edescs++; p_used < p_len; num_edescs++) { | ||
1346 | |||
1347 | /* Advance as needed. */ | ||
1348 | while (f_used >= f_size) { | ||
1349 | f_id++; | ||
1350 | f_size = sh->frags[f_id].size; | ||
1351 | f_used = 0; | ||
1352 | } | ||
1353 | |||
1354 | /* Use bytes from the current fragment. */ | ||
1355 | n = p_len - p_used; | ||
1356 | if (n > f_size - f_used) | ||
1357 | n = f_size - f_used; | ||
1358 | f_used += n; | ||
1359 | p_used += n; | ||
1360 | } | ||
1361 | |||
1362 | /* The last segment may be less than gso_size. */ | ||
1363 | data_len -= p_len; | ||
1364 | if (data_len < p_len) | ||
1365 | p_len = data_len; | ||
1366 | } | ||
1367 | |||
1368 | return num_edescs; | ||
1369 | } | ||
1370 | |||
1371 | /* Prepare modified copies of the skbuff headers. | ||
1372 | * FIXME: add support for IPv6. | ||
1373 | */ | ||
1374 | static void tso_headers_prepare(struct sk_buff *skb, unsigned char *headers, | ||
1375 | s64 slot) | ||
1376 | { | ||
1377 | struct skb_shared_info *sh = skb_shinfo(skb); | ||
1378 | struct iphdr *ih; | ||
1379 | struct tcphdr *th; | ||
1380 | unsigned int data_len = skb->data_len; | ||
1381 | unsigned char *data = skb->data; | ||
1382 | unsigned int ih_off, th_off, sh_len, p_len; | ||
1383 | unsigned int isum_seed, tsum_seed, id, seq; | ||
1384 | long f_id = -1; /* id of the current fragment */ | ||
1385 | long f_size = -1; /* size of the current fragment */ | ||
1386 | long f_used = -1; /* bytes used from the current fragment */ | ||
1387 | long n; /* size of the current piece of payload */ | ||
1388 | int segment; | ||
1389 | |||
1390 | /* Locate original headers and compute various lengths. */ | ||
1391 | ih = ip_hdr(skb); | ||
1392 | th = tcp_hdr(skb); | ||
1393 | ih_off = skb_network_offset(skb); | ||
1394 | th_off = skb_transport_offset(skb); | ||
1395 | sh_len = th_off + tcp_hdrlen(skb); | ||
1396 | p_len = sh->gso_size; | ||
1397 | |||
1398 | /* Set up seed values for IP and TCP csum and initialize id and seq. */ | ||
1399 | isum_seed = ((0xFFFF - ih->check) + | ||
1400 | (0xFFFF - ih->tot_len) + | ||
1401 | (0xFFFF - ih->id)); | ||
1402 | tsum_seed = th->check + (0xFFFF ^ htons(skb->len)); | ||
1403 | id = ntohs(ih->id); | ||
1404 | seq = ntohl(th->seq); | ||
1405 | |||
1406 | /* Prepare all the headers. */ | ||
1407 | for (segment = 0; segment < sh->gso_segs; segment++) { | ||
1408 | unsigned char *buf; | ||
1409 | unsigned int p_used = 0; | ||
1410 | |||
1411 | /* Copy to the header memory for this segment. */ | ||
1412 | buf = headers + (slot % EQUEUE_ENTRIES) * HEADER_BYTES + | ||
1413 | NET_IP_ALIGN; | ||
1414 | memcpy(buf, data, sh_len); | ||
1415 | |||
1416 | /* Update copied ip header. */ | ||
1417 | ih = (struct iphdr *)(buf + ih_off); | ||
1418 | ih->tot_len = htons(sh_len + p_len - ih_off); | ||
1419 | ih->id = htons(id); | ||
1420 | ih->check = csum_long(isum_seed + ih->tot_len + | ||
1421 | ih->id) ^ 0xffff; | ||
1422 | |||
1423 | /* Update copied tcp header. */ | ||
1424 | th = (struct tcphdr *)(buf + th_off); | ||
1425 | th->seq = htonl(seq); | ||
1426 | th->check = csum_long(tsum_seed + htons(sh_len + p_len)); | ||
1427 | if (segment != sh->gso_segs - 1) { | ||
1428 | th->fin = 0; | ||
1429 | th->psh = 0; | ||
1430 | } | ||
1431 | |||
1432 | /* Skip past the header. */ | ||
1433 | slot++; | ||
1434 | |||
1435 | /* Skip past the payload. */ | ||
1436 | while (p_used < p_len) { | ||
1437 | |||
1438 | /* Advance as needed. */ | ||
1439 | while (f_used >= f_size) { | ||
1440 | f_id++; | ||
1441 | f_size = sh->frags[f_id].size; | ||
1442 | f_used = 0; | ||
1443 | } | ||
1444 | |||
1445 | /* Use bytes from the current fragment. */ | ||
1446 | n = p_len - p_used; | ||
1447 | if (n > f_size - f_used) | ||
1448 | n = f_size - f_used; | ||
1449 | f_used += n; | ||
1450 | p_used += n; | ||
1451 | |||
1452 | slot++; | ||
1453 | } | ||
1454 | |||
1455 | id++; | ||
1456 | seq += p_len; | ||
1457 | |||
1458 | /* The last segment may be less than gso_size. */ | ||
1459 | data_len -= p_len; | ||
1460 | if (data_len < p_len) | ||
1461 | p_len = data_len; | ||
1462 | } | ||
1463 | |||
1464 | /* Flush the headers so they are ready for hardware DMA. */ | ||
1465 | wmb(); | ||
1466 | } | ||
1467 | |||
1468 | /* Pass all the data to mpipe for egress. */ | ||
1469 | static void tso_egress(struct net_device *dev, gxio_mpipe_equeue_t *equeue, | ||
1470 | struct sk_buff *skb, unsigned char *headers, s64 slot) | ||
1471 | { | ||
1472 | struct tile_net_priv *priv = netdev_priv(dev); | ||
1473 | struct skb_shared_info *sh = skb_shinfo(skb); | ||
1474 | unsigned int data_len = skb->data_len; | ||
1475 | unsigned int p_len = sh->gso_size; | ||
1476 | gxio_mpipe_edesc_t edesc_head = { { 0 } }; | ||
1477 | gxio_mpipe_edesc_t edesc_body = { { 0 } }; | ||
1478 | long f_id = -1; /* id of the current fragment */ | ||
1479 | long f_size = -1; /* size of the current fragment */ | ||
1480 | long f_used = -1; /* bytes used from the current fragment */ | ||
1481 | long n; /* size of the current piece of payload */ | ||
1482 | unsigned long tx_packets = 0, tx_bytes = 0; | ||
1483 | unsigned int csum_start, sh_len; | ||
1484 | int segment; | ||
1485 | |||
1486 | /* Prepare to egress the headers: set up header edesc. */ | ||
1487 | csum_start = skb_checksum_start_offset(skb); | ||
1488 | sh_len = skb_transport_offset(skb) + tcp_hdrlen(skb); | ||
1489 | edesc_head.csum = 1; | ||
1490 | edesc_head.csum_start = csum_start; | ||
1491 | edesc_head.csum_dest = csum_start + skb->csum_offset; | ||
1492 | edesc_head.xfer_size = sh_len; | ||
1493 | |||
1494 | /* This is only used to specify the TLB. */ | ||
1495 | edesc_head.stack_idx = large_buffer_stack; | ||
1496 | edesc_body.stack_idx = large_buffer_stack; | ||
1497 | |||
1498 | /* Egress all the edescs. */ | ||
1499 | for (segment = 0; segment < sh->gso_segs; segment++) { | ||
1500 | void *va; | ||
1501 | unsigned char *buf; | ||
1502 | unsigned int p_used = 0; | ||
1503 | |||
1504 | /* Egress the header. */ | ||
1505 | buf = headers + (slot % EQUEUE_ENTRIES) * HEADER_BYTES + | ||
1506 | NET_IP_ALIGN; | ||
1507 | edesc_head.va = va_to_tile_io_addr(buf); | ||
1508 | gxio_mpipe_equeue_put_at(equeue, edesc_head, slot); | ||
1509 | slot++; | ||
1510 | |||
1511 | /* Egress the payload. */ | ||
1512 | while (p_used < p_len) { | ||
1513 | |||
1514 | /* Advance as needed. */ | ||
1515 | while (f_used >= f_size) { | ||
1516 | f_id++; | ||
1517 | f_size = sh->frags[f_id].size; | ||
1518 | f_used = 0; | ||
1519 | } | ||
1520 | |||
1521 | va = tile_net_frag_buf(&sh->frags[f_id]) + f_used; | ||
1522 | |||
1523 | /* Use bytes from the current fragment. */ | ||
1524 | n = p_len - p_used; | ||
1525 | if (n > f_size - f_used) | ||
1526 | n = f_size - f_used; | ||
1527 | f_used += n; | ||
1528 | p_used += n; | ||
1529 | |||
1530 | /* Egress a piece of the payload. */ | ||
1531 | edesc_body.va = va_to_tile_io_addr(va); | ||
1532 | edesc_body.xfer_size = n; | ||
1533 | edesc_body.bound = !(p_used < p_len); | ||
1534 | gxio_mpipe_equeue_put_at(equeue, edesc_body, slot); | ||
1535 | slot++; | ||
1536 | } | ||
1537 | |||
1538 | tx_packets++; | ||
1539 | tx_bytes += sh_len + p_len; | ||
1540 | |||
1541 | /* The last segment may be less than gso_size. */ | ||
1542 | data_len -= p_len; | ||
1543 | if (data_len < p_len) | ||
1544 | p_len = data_len; | ||
1545 | } | ||
1546 | |||
1547 | /* Update stats. */ | ||
1548 | tile_net_stats_add(tx_packets, &priv->stats.tx_packets); | ||
1549 | tile_net_stats_add(tx_bytes, &priv->stats.tx_bytes); | ||
1550 | } | ||
1551 | |||
1552 | /* Do "TSO" handling for egress. | ||
1553 | * | ||
1554 | * Normally drivers set NETIF_F_TSO only to support hardware TSO; | ||
1555 | * otherwise the stack uses scatter-gather to implement GSO in software. | ||
1556 | * On our testing, enabling GSO support (via NETIF_F_SG) drops network | ||
1557 | * performance down to around 7.5 Gbps on the 10G interfaces, although | ||
1558 | * also dropping cpu utilization way down, to under 8%. But | ||
1559 | * implementing "TSO" in the driver brings performance back up to line | ||
1560 | * rate, while dropping cpu usage even further, to less than 4%. In | ||
1561 | * practice, profiling of GSO shows that skb_segment() is what causes | ||
1562 | * the performance overheads; we benefit in the driver from using | ||
1563 | * preallocated memory to duplicate the TCP/IP headers. | ||
1564 | */ | ||
1565 | static int tile_net_tx_tso(struct sk_buff *skb, struct net_device *dev) | ||
1566 | { | ||
1567 | struct tile_net_info *info = &__get_cpu_var(per_cpu_info); | ||
1568 | struct tile_net_priv *priv = netdev_priv(dev); | ||
1569 | int channel = priv->echannel; | ||
1570 | struct tile_net_egress *egress = &egress_for_echannel[channel]; | ||
1571 | struct tile_net_comps *comps = info->comps_for_echannel[channel]; | ||
1572 | gxio_mpipe_equeue_t *equeue = egress->equeue; | ||
1573 | unsigned long irqflags; | ||
1574 | int num_edescs; | ||
1575 | s64 slot; | ||
1576 | |||
1577 | /* Determine how many mpipe edesc's are needed. */ | ||
1578 | num_edescs = tso_count_edescs(skb); | ||
1579 | |||
1580 | local_irq_save(irqflags); | ||
1581 | |||
1582 | /* Try to acquire a completion entry and an egress slot. */ | ||
1583 | slot = tile_net_equeue_try_reserve(dev, comps, equeue, num_edescs); | ||
1584 | if (slot < 0) { | ||
1585 | local_irq_restore(irqflags); | ||
1586 | return NETDEV_TX_BUSY; | ||
1587 | } | ||
1588 | |||
1589 | /* Set up copies of header data properly. */ | ||
1590 | tso_headers_prepare(skb, egress->headers, slot); | ||
1591 | |||
1592 | /* Actually pass the data to the network hardware. */ | ||
1593 | tso_egress(dev, equeue, skb, egress->headers, slot); | ||
1594 | |||
1595 | /* Add a completion record. */ | ||
1596 | add_comp(equeue, comps, slot + num_edescs - 1, skb); | ||
1597 | |||
1598 | local_irq_restore(irqflags); | ||
1599 | |||
1600 | /* Make sure the egress timer is scheduled. */ | ||
1601 | tile_net_schedule_egress_timer(); | ||
1602 | |||
1603 | return NETDEV_TX_OK; | ||
1604 | } | ||
1605 | |||
1606 | /* Analyze the body and frags for a transmit request. */ | ||
1607 | static unsigned int tile_net_tx_frags(struct frag *frags, | ||
1608 | struct sk_buff *skb, | ||
1609 | void *b_data, unsigned int b_len) | ||
1610 | { | ||
1611 | unsigned int i, n = 0; | ||
1612 | |||
1613 | struct skb_shared_info *sh = skb_shinfo(skb); | ||
1614 | |||
1615 | if (b_len != 0) { | ||
1616 | frags[n].buf = b_data; | ||
1617 | frags[n++].length = b_len; | ||
1618 | } | ||
1619 | |||
1620 | for (i = 0; i < sh->nr_frags; i++) { | ||
1621 | skb_frag_t *f = &sh->frags[i]; | ||
1622 | frags[n].buf = tile_net_frag_buf(f); | ||
1623 | frags[n++].length = skb_frag_size(f); | ||
1624 | } | ||
1625 | |||
1626 | return n; | ||
1627 | } | ||
1628 | |||
1629 | /* Help the kernel transmit a packet. */ | ||
1630 | static int tile_net_tx(struct sk_buff *skb, struct net_device *dev) | ||
1631 | { | ||
1632 | struct tile_net_info *info = &__get_cpu_var(per_cpu_info); | ||
1633 | struct tile_net_priv *priv = netdev_priv(dev); | ||
1634 | struct tile_net_egress *egress = &egress_for_echannel[priv->echannel]; | ||
1635 | gxio_mpipe_equeue_t *equeue = egress->equeue; | ||
1636 | struct tile_net_comps *comps = | ||
1637 | info->comps_for_echannel[priv->echannel]; | ||
1638 | unsigned int len = skb->len; | ||
1639 | unsigned char *data = skb->data; | ||
1640 | unsigned int num_edescs; | ||
1641 | struct frag frags[MAX_FRAGS]; | ||
1642 | gxio_mpipe_edesc_t edescs[MAX_FRAGS]; | ||
1643 | unsigned long irqflags; | ||
1644 | gxio_mpipe_edesc_t edesc = { { 0 } }; | ||
1645 | unsigned int i; | ||
1646 | s64 slot; | ||
1647 | |||
1648 | if (skb_is_gso(skb)) | ||
1649 | return tile_net_tx_tso(skb, dev); | ||
1650 | |||
1651 | num_edescs = tile_net_tx_frags(frags, skb, data, skb_headlen(skb)); | ||
1652 | |||
1653 | /* This is only used to specify the TLB. */ | ||
1654 | edesc.stack_idx = large_buffer_stack; | ||
1655 | |||
1656 | /* Prepare the edescs. */ | ||
1657 | for (i = 0; i < num_edescs; i++) { | ||
1658 | edesc.xfer_size = frags[i].length; | ||
1659 | edesc.va = va_to_tile_io_addr(frags[i].buf); | ||
1660 | edescs[i] = edesc; | ||
1661 | } | ||
1662 | |||
1663 | /* Mark the final edesc. */ | ||
1664 | edescs[num_edescs - 1].bound = 1; | ||
1665 | |||
1666 | /* Add checksum info to the initial edesc, if needed. */ | ||
1667 | if (skb->ip_summed == CHECKSUM_PARTIAL) { | ||
1668 | unsigned int csum_start = skb_checksum_start_offset(skb); | ||
1669 | edescs[0].csum = 1; | ||
1670 | edescs[0].csum_start = csum_start; | ||
1671 | edescs[0].csum_dest = csum_start + skb->csum_offset; | ||
1672 | } | ||
1673 | |||
1674 | local_irq_save(irqflags); | ||
1675 | |||
1676 | /* Try to acquire a completion entry and an egress slot. */ | ||
1677 | slot = tile_net_equeue_try_reserve(dev, comps, equeue, num_edescs); | ||
1678 | if (slot < 0) { | ||
1679 | local_irq_restore(irqflags); | ||
1680 | return NETDEV_TX_BUSY; | ||
1681 | } | ||
1682 | |||
1683 | for (i = 0; i < num_edescs; i++) | ||
1684 | gxio_mpipe_equeue_put_at(equeue, edescs[i], slot++); | ||
1685 | |||
1686 | /* Add a completion record. */ | ||
1687 | add_comp(equeue, comps, slot - 1, skb); | ||
1688 | |||
1689 | /* NOTE: Use ETH_ZLEN for short packets (e.g. 42 < 60). */ | ||
1690 | tile_net_stats_add(1, &priv->stats.tx_packets); | ||
1691 | tile_net_stats_add(max_t(unsigned int, len, ETH_ZLEN), | ||
1692 | &priv->stats.tx_bytes); | ||
1693 | |||
1694 | local_irq_restore(irqflags); | ||
1695 | |||
1696 | /* Make sure the egress timer is scheduled. */ | ||
1697 | tile_net_schedule_egress_timer(); | ||
1698 | |||
1699 | return NETDEV_TX_OK; | ||
1700 | } | ||
1701 | |||
1702 | /* Return subqueue id on this core (one per core). */ | ||
1703 | static u16 tile_net_select_queue(struct net_device *dev, struct sk_buff *skb) | ||
1704 | { | ||
1705 | return smp_processor_id(); | ||
1706 | } | ||
1707 | |||
1708 | /* Deal with a transmit timeout. */ | ||
1709 | static void tile_net_tx_timeout(struct net_device *dev) | ||
1710 | { | ||
1711 | int cpu; | ||
1712 | |||
1713 | for_each_online_cpu(cpu) | ||
1714 | netif_wake_subqueue(dev, cpu); | ||
1715 | } | ||
1716 | |||
1717 | /* Ioctl commands. */ | ||
1718 | static int tile_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) | ||
1719 | { | ||
1720 | return -EOPNOTSUPP; | ||
1721 | } | ||
1722 | |||
1723 | /* Get system network statistics for device. */ | ||
1724 | static struct net_device_stats *tile_net_get_stats(struct net_device *dev) | ||
1725 | { | ||
1726 | struct tile_net_priv *priv = netdev_priv(dev); | ||
1727 | return &priv->stats; | ||
1728 | } | ||
1729 | |||
1730 | /* Change the MTU. */ | ||
1731 | static int tile_net_change_mtu(struct net_device *dev, int new_mtu) | ||
1732 | { | ||
1733 | if ((new_mtu < 68) || (new_mtu > 1500)) | ||
1734 | return -EINVAL; | ||
1735 | dev->mtu = new_mtu; | ||
1736 | return 0; | ||
1737 | } | ||
1738 | |||
1739 | /* Change the Ethernet address of the NIC. | ||
1740 | * | ||
1741 | * The hypervisor driver does not support changing MAC address. However, | ||
1742 | * the hardware does not do anything with the MAC address, so the address | ||
1743 | * which gets used on outgoing packets, and which is accepted on incoming | ||
1744 | * packets, is completely up to us. | ||
1745 | * | ||
1746 | * Returns 0 on success, negative on failure. | ||
1747 | */ | ||
1748 | static int tile_net_set_mac_address(struct net_device *dev, void *p) | ||
1749 | { | ||
1750 | struct sockaddr *addr = p; | ||
1751 | |||
1752 | if (!is_valid_ether_addr(addr->sa_data)) | ||
1753 | return -EINVAL; | ||
1754 | memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); | ||
1755 | return 0; | ||
1756 | } | ||
1757 | |||
1758 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
1759 | /* Polling 'interrupt' - used by things like netconsole to send skbs | ||
1760 | * without having to re-enable interrupts. It's not called while | ||
1761 | * the interrupt routine is executing. | ||
1762 | */ | ||
1763 | static void tile_net_netpoll(struct net_device *dev) | ||
1764 | { | ||
1765 | disable_percpu_irq(ingress_irq); | ||
1766 | tile_net_handle_ingress_irq(ingress_irq, NULL); | ||
1767 | enable_percpu_irq(ingress_irq, 0); | ||
1768 | } | ||
1769 | #endif | ||
1770 | |||
1771 | static const struct net_device_ops tile_net_ops = { | ||
1772 | .ndo_open = tile_net_open, | ||
1773 | .ndo_stop = tile_net_stop, | ||
1774 | .ndo_start_xmit = tile_net_tx, | ||
1775 | .ndo_select_queue = tile_net_select_queue, | ||
1776 | .ndo_do_ioctl = tile_net_ioctl, | ||
1777 | .ndo_get_stats = tile_net_get_stats, | ||
1778 | .ndo_change_mtu = tile_net_change_mtu, | ||
1779 | .ndo_tx_timeout = tile_net_tx_timeout, | ||
1780 | .ndo_set_mac_address = tile_net_set_mac_address, | ||
1781 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
1782 | .ndo_poll_controller = tile_net_netpoll, | ||
1783 | #endif | ||
1784 | }; | ||
1785 | |||
1786 | /* The setup function. | ||
1787 | * | ||
1788 | * This uses ether_setup() to assign various fields in dev, including | ||
1789 | * setting IFF_BROADCAST and IFF_MULTICAST, then sets some extra fields. | ||
1790 | */ | ||
1791 | static void tile_net_setup(struct net_device *dev) | ||
1792 | { | ||
1793 | ether_setup(dev); | ||
1794 | dev->netdev_ops = &tile_net_ops; | ||
1795 | dev->watchdog_timeo = TILE_NET_TIMEOUT; | ||
1796 | dev->features |= NETIF_F_LLTX; | ||
1797 | dev->features |= NETIF_F_HW_CSUM; | ||
1798 | dev->features |= NETIF_F_SG; | ||
1799 | dev->features |= NETIF_F_TSO; | ||
1800 | dev->mtu = 1500; | ||
1801 | } | ||
1802 | |||
1803 | /* Allocate the device structure, register the device, and obtain the | ||
1804 | * MAC address from the hypervisor. | ||
1805 | */ | ||
1806 | static void tile_net_dev_init(const char *name, const uint8_t *mac) | ||
1807 | { | ||
1808 | int ret; | ||
1809 | int i; | ||
1810 | int nz_addr = 0; | ||
1811 | struct net_device *dev; | ||
1812 | struct tile_net_priv *priv; | ||
1813 | |||
1814 | /* HACK: Ignore "loop" links. */ | ||
1815 | if (strncmp(name, "loop", 4) == 0) | ||
1816 | return; | ||
1817 | |||
1818 | /* Allocate the device structure. Normally, "name" is a | ||
1819 | * template, instantiated by register_netdev(), but not for us. | ||
1820 | */ | ||
1821 | dev = alloc_netdev_mqs(sizeof(*priv), name, tile_net_setup, | ||
1822 | NR_CPUS, 1); | ||
1823 | if (!dev) { | ||
1824 | pr_err("alloc_netdev_mqs(%s) failed\n", name); | ||
1825 | return; | ||
1826 | } | ||
1827 | |||
1828 | /* Initialize "priv". */ | ||
1829 | priv = netdev_priv(dev); | ||
1830 | memset(priv, 0, sizeof(*priv)); | ||
1831 | priv->dev = dev; | ||
1832 | priv->channel = -1; | ||
1833 | priv->loopify_channel = -1; | ||
1834 | priv->echannel = -1; | ||
1835 | |||
1836 | /* Get the MAC address and set it in the device struct; this must | ||
1837 | * be done before the device is opened. If the MAC is all zeroes, | ||
1838 | * we use a random address, since we're probably on the simulator. | ||
1839 | */ | ||
1840 | for (i = 0; i < 6; i++) | ||
1841 | nz_addr |= mac[i]; | ||
1842 | |||
1843 | if (nz_addr) { | ||
1844 | memcpy(dev->dev_addr, mac, 6); | ||
1845 | dev->addr_len = 6; | ||
1846 | } else { | ||
1847 | random_ether_addr(dev->dev_addr); | ||
1848 | } | ||
1849 | |||
1850 | /* Register the network device. */ | ||
1851 | ret = register_netdev(dev); | ||
1852 | if (ret) { | ||
1853 | netdev_err(dev, "register_netdev failed %d\n", ret); | ||
1854 | free_netdev(dev); | ||
1855 | return; | ||
1856 | } | ||
1857 | } | ||
1858 | |||
1859 | /* Per-cpu module initialization. */ | ||
1860 | static void tile_net_init_module_percpu(void *unused) | ||
1861 | { | ||
1862 | struct tile_net_info *info = &__get_cpu_var(per_cpu_info); | ||
1863 | int my_cpu = smp_processor_id(); | ||
1864 | |||
1865 | info->has_iqueue = false; | ||
1866 | |||
1867 | info->my_cpu = my_cpu; | ||
1868 | |||
1869 | /* Initialize the egress timer. */ | ||
1870 | hrtimer_init(&info->egress_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); | ||
1871 | info->egress_timer.function = tile_net_handle_egress_timer; | ||
1872 | } | ||
1873 | |||
1874 | /* Module initialization. */ | ||
1875 | static int __init tile_net_init_module(void) | ||
1876 | { | ||
1877 | int i; | ||
1878 | char name[GXIO_MPIPE_LINK_NAME_LEN]; | ||
1879 | uint8_t mac[6]; | ||
1880 | |||
1881 | pr_info("Tilera Network Driver\n"); | ||
1882 | |||
1883 | mutex_init(&tile_net_devs_for_channel_mutex); | ||
1884 | |||
1885 | /* Initialize each CPU. */ | ||
1886 | on_each_cpu(tile_net_init_module_percpu, NULL, 1); | ||
1887 | |||
1888 | /* Find out what devices we have, and initialize them. */ | ||
1889 | for (i = 0; gxio_mpipe_link_enumerate_mac(i, name, mac) >= 0; i++) | ||
1890 | tile_net_dev_init(name, mac); | ||
1891 | |||
1892 | if (!network_cpus_init()) | ||
1893 | network_cpus_map = *cpu_online_mask; | ||
1894 | |||
1895 | return 0; | ||
1896 | } | ||
1897 | |||
1898 | module_init(tile_net_init_module); | ||
diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h index 4ffcd57b011b..2857ab078aac 100644 --- a/drivers/net/hyperv/hyperv_net.h +++ b/drivers/net/hyperv/hyperv_net.h | |||
@@ -478,6 +478,7 @@ struct netvsc_device { | |||
478 | u32 nvsp_version; | 478 | u32 nvsp_version; |
479 | 479 | ||
480 | atomic_t num_outstanding_sends; | 480 | atomic_t num_outstanding_sends; |
481 | wait_queue_head_t wait_drain; | ||
481 | bool start_remove; | 482 | bool start_remove; |
482 | bool destroy; | 483 | bool destroy; |
483 | /* | 484 | /* |
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c index 8b919471472f..0c569831db5a 100644 --- a/drivers/net/hyperv/netvsc.c +++ b/drivers/net/hyperv/netvsc.c | |||
@@ -42,6 +42,7 @@ static struct netvsc_device *alloc_net_device(struct hv_device *device) | |||
42 | if (!net_device) | 42 | if (!net_device) |
43 | return NULL; | 43 | return NULL; |
44 | 44 | ||
45 | init_waitqueue_head(&net_device->wait_drain); | ||
45 | net_device->start_remove = false; | 46 | net_device->start_remove = false; |
46 | net_device->destroy = false; | 47 | net_device->destroy = false; |
47 | net_device->dev = device; | 48 | net_device->dev = device; |
@@ -387,12 +388,8 @@ int netvsc_device_remove(struct hv_device *device) | |||
387 | spin_unlock_irqrestore(&device->channel->inbound_lock, flags); | 388 | spin_unlock_irqrestore(&device->channel->inbound_lock, flags); |
388 | 389 | ||
389 | /* Wait for all send completions */ | 390 | /* Wait for all send completions */ |
390 | while (atomic_read(&net_device->num_outstanding_sends)) { | 391 | wait_event(net_device->wait_drain, |
391 | dev_info(&device->device, | 392 | atomic_read(&net_device->num_outstanding_sends) == 0); |
392 | "waiting for %d requests to complete...\n", | ||
393 | atomic_read(&net_device->num_outstanding_sends)); | ||
394 | udelay(100); | ||
395 | } | ||
396 | 393 | ||
397 | netvsc_disconnect_vsp(net_device); | 394 | netvsc_disconnect_vsp(net_device); |
398 | 395 | ||
@@ -486,6 +483,9 @@ static void netvsc_send_completion(struct hv_device *device, | |||
486 | num_outstanding_sends = | 483 | num_outstanding_sends = |
487 | atomic_dec_return(&net_device->num_outstanding_sends); | 484 | atomic_dec_return(&net_device->num_outstanding_sends); |
488 | 485 | ||
486 | if (net_device->destroy && num_outstanding_sends == 0) | ||
487 | wake_up(&net_device->wait_drain); | ||
488 | |||
489 | if (netif_queue_stopped(ndev) && !net_device->start_remove && | 489 | if (netif_queue_stopped(ndev) && !net_device->start_remove && |
490 | (hv_ringbuf_avail_percent(&device->channel->outbound) | 490 | (hv_ringbuf_avail_percent(&device->channel->outbound) |
491 | > RING_AVAIL_PERCENT_HIWATER || | 491 | > RING_AVAIL_PERCENT_HIWATER || |
diff --git a/drivers/net/phy/icplus.c b/drivers/net/phy/icplus.c index 5ac46f5226f3..47f8e8939266 100644 --- a/drivers/net/phy/icplus.c +++ b/drivers/net/phy/icplus.c | |||
@@ -41,6 +41,8 @@ MODULE_LICENSE("GPL"); | |||
41 | #define IP1001_APS_ON 11 /* IP1001 APS Mode bit */ | 41 | #define IP1001_APS_ON 11 /* IP1001 APS Mode bit */ |
42 | #define IP101A_G_APS_ON 2 /* IP101A/G APS Mode bit */ | 42 | #define IP101A_G_APS_ON 2 /* IP101A/G APS Mode bit */ |
43 | #define IP101A_G_IRQ_CONF_STATUS 0x11 /* Conf Info IRQ & Status Reg */ | 43 | #define IP101A_G_IRQ_CONF_STATUS 0x11 /* Conf Info IRQ & Status Reg */ |
44 | #define IP101A_G_IRQ_PIN_USED (1<<15) /* INTR pin used */ | ||
45 | #define IP101A_G_IRQ_DEFAULT IP101A_G_IRQ_PIN_USED | ||
44 | 46 | ||
45 | static int ip175c_config_init(struct phy_device *phydev) | 47 | static int ip175c_config_init(struct phy_device *phydev) |
46 | { | 48 | { |
@@ -136,6 +138,11 @@ static int ip1001_config_init(struct phy_device *phydev) | |||
136 | if (c < 0) | 138 | if (c < 0) |
137 | return c; | 139 | return c; |
138 | 140 | ||
141 | /* INTR pin used: speed/link/duplex will cause an interrupt */ | ||
142 | c = phy_write(phydev, IP101A_G_IRQ_CONF_STATUS, IP101A_G_IRQ_DEFAULT); | ||
143 | if (c < 0) | ||
144 | return c; | ||
145 | |||
139 | if (phydev->interface == PHY_INTERFACE_MODE_RGMII) { | 146 | if (phydev->interface == PHY_INTERFACE_MODE_RGMII) { |
140 | /* Additional delay (2ns) used to adjust RX clock phase | 147 | /* Additional delay (2ns) used to adjust RX clock phase |
141 | * at RGMII interface */ | 148 | * at RGMII interface */ |
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c index 683ef1ce5519..5061608f408c 100644 --- a/drivers/net/phy/mdio_bus.c +++ b/drivers/net/phy/mdio_bus.c | |||
@@ -96,7 +96,7 @@ static int of_mdio_bus_match(struct device *dev, void *mdio_bus_np) | |||
96 | } | 96 | } |
97 | /** | 97 | /** |
98 | * of_mdio_find_bus - Given an mii_bus node, find the mii_bus. | 98 | * of_mdio_find_bus - Given an mii_bus node, find the mii_bus. |
99 | * @mdio_np: Pointer to the mii_bus. | 99 | * @mdio_bus_np: Pointer to the mii_bus. |
100 | * | 100 | * |
101 | * Returns a pointer to the mii_bus, or NULL if none found. | 101 | * Returns a pointer to the mii_bus, or NULL if none found. |
102 | * | 102 | * |
diff --git a/drivers/net/usb/sierra_net.c b/drivers/net/usb/sierra_net.c index 3faef5670d1f..d75d1f56becf 100644 --- a/drivers/net/usb/sierra_net.c +++ b/drivers/net/usb/sierra_net.c | |||
@@ -946,7 +946,7 @@ struct sk_buff *sierra_net_tx_fixup(struct usbnet *dev, struct sk_buff *skb, | |||
946 | } | 946 | } |
947 | 947 | ||
948 | static const u8 sierra_net_ifnum_list[] = { 7, 10, 11 }; | 948 | static const u8 sierra_net_ifnum_list[] = { 7, 10, 11 }; |
949 | static const struct sierra_net_info_data sierra_net_info_data_68A3 = { | 949 | static const struct sierra_net_info_data sierra_net_info_data_direct_ip = { |
950 | .rx_urb_size = 8 * 1024, | 950 | .rx_urb_size = 8 * 1024, |
951 | .whitelist = { | 951 | .whitelist = { |
952 | .infolen = ARRAY_SIZE(sierra_net_ifnum_list), | 952 | .infolen = ARRAY_SIZE(sierra_net_ifnum_list), |
@@ -954,7 +954,7 @@ static const struct sierra_net_info_data sierra_net_info_data_68A3 = { | |||
954 | } | 954 | } |
955 | }; | 955 | }; |
956 | 956 | ||
957 | static const struct driver_info sierra_net_info_68A3 = { | 957 | static const struct driver_info sierra_net_info_direct_ip = { |
958 | .description = "Sierra Wireless USB-to-WWAN Modem", | 958 | .description = "Sierra Wireless USB-to-WWAN Modem", |
959 | .flags = FLAG_WWAN | FLAG_SEND_ZLP, | 959 | .flags = FLAG_WWAN | FLAG_SEND_ZLP, |
960 | .bind = sierra_net_bind, | 960 | .bind = sierra_net_bind, |
@@ -962,12 +962,18 @@ static const struct driver_info sierra_net_info_68A3 = { | |||
962 | .status = sierra_net_status, | 962 | .status = sierra_net_status, |
963 | .rx_fixup = sierra_net_rx_fixup, | 963 | .rx_fixup = sierra_net_rx_fixup, |
964 | .tx_fixup = sierra_net_tx_fixup, | 964 | .tx_fixup = sierra_net_tx_fixup, |
965 | .data = (unsigned long)&sierra_net_info_data_68A3, | 965 | .data = (unsigned long)&sierra_net_info_data_direct_ip, |
966 | }; | 966 | }; |
967 | 967 | ||
968 | static const struct usb_device_id products[] = { | 968 | static const struct usb_device_id products[] = { |
969 | {USB_DEVICE(0x1199, 0x68A3), /* Sierra Wireless USB-to-WWAN modem */ | 969 | {USB_DEVICE(0x1199, 0x68A3), /* Sierra Wireless USB-to-WWAN modem */ |
970 | .driver_info = (unsigned long) &sierra_net_info_68A3}, | 970 | .driver_info = (unsigned long) &sierra_net_info_direct_ip}, |
971 | {USB_DEVICE(0x0F3D, 0x68A3), /* AT&T Direct IP modem */ | ||
972 | .driver_info = (unsigned long) &sierra_net_info_direct_ip}, | ||
973 | {USB_DEVICE(0x1199, 0x68AA), /* Sierra Wireless Direct IP LTE modem */ | ||
974 | .driver_info = (unsigned long) &sierra_net_info_direct_ip}, | ||
975 | {USB_DEVICE(0x0F3D, 0x68AA), /* AT&T Direct IP LTE modem */ | ||
976 | .driver_info = (unsigned long) &sierra_net_info_direct_ip}, | ||
971 | 977 | ||
972 | {}, /* last item */ | 978 | {}, /* last item */ |
973 | }; | 979 | }; |
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 5214b1eceb95..f18149ae2588 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c | |||
@@ -42,7 +42,8 @@ module_param(gso, bool, 0444); | |||
42 | #define VIRTNET_DRIVER_VERSION "1.0.0" | 42 | #define VIRTNET_DRIVER_VERSION "1.0.0" |
43 | 43 | ||
44 | struct virtnet_stats { | 44 | struct virtnet_stats { |
45 | struct u64_stats_sync syncp; | 45 | struct u64_stats_sync tx_syncp; |
46 | struct u64_stats_sync rx_syncp; | ||
46 | u64 tx_bytes; | 47 | u64 tx_bytes; |
47 | u64 tx_packets; | 48 | u64 tx_packets; |
48 | 49 | ||
@@ -300,10 +301,10 @@ static void receive_buf(struct net_device *dev, void *buf, unsigned int len) | |||
300 | 301 | ||
301 | hdr = skb_vnet_hdr(skb); | 302 | hdr = skb_vnet_hdr(skb); |
302 | 303 | ||
303 | u64_stats_update_begin(&stats->syncp); | 304 | u64_stats_update_begin(&stats->rx_syncp); |
304 | stats->rx_bytes += skb->len; | 305 | stats->rx_bytes += skb->len; |
305 | stats->rx_packets++; | 306 | stats->rx_packets++; |
306 | u64_stats_update_end(&stats->syncp); | 307 | u64_stats_update_end(&stats->rx_syncp); |
307 | 308 | ||
308 | if (hdr->hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) { | 309 | if (hdr->hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) { |
309 | pr_debug("Needs csum!\n"); | 310 | pr_debug("Needs csum!\n"); |
@@ -565,10 +566,10 @@ static unsigned int free_old_xmit_skbs(struct virtnet_info *vi) | |||
565 | while ((skb = virtqueue_get_buf(vi->svq, &len)) != NULL) { | 566 | while ((skb = virtqueue_get_buf(vi->svq, &len)) != NULL) { |
566 | pr_debug("Sent skb %p\n", skb); | 567 | pr_debug("Sent skb %p\n", skb); |
567 | 568 | ||
568 | u64_stats_update_begin(&stats->syncp); | 569 | u64_stats_update_begin(&stats->tx_syncp); |
569 | stats->tx_bytes += skb->len; | 570 | stats->tx_bytes += skb->len; |
570 | stats->tx_packets++; | 571 | stats->tx_packets++; |
571 | u64_stats_update_end(&stats->syncp); | 572 | u64_stats_update_end(&stats->tx_syncp); |
572 | 573 | ||
573 | tot_sgs += skb_vnet_hdr(skb)->num_sg; | 574 | tot_sgs += skb_vnet_hdr(skb)->num_sg; |
574 | dev_kfree_skb_any(skb); | 575 | dev_kfree_skb_any(skb); |
@@ -703,12 +704,16 @@ static struct rtnl_link_stats64 *virtnet_stats(struct net_device *dev, | |||
703 | u64 tpackets, tbytes, rpackets, rbytes; | 704 | u64 tpackets, tbytes, rpackets, rbytes; |
704 | 705 | ||
705 | do { | 706 | do { |
706 | start = u64_stats_fetch_begin(&stats->syncp); | 707 | start = u64_stats_fetch_begin(&stats->tx_syncp); |
707 | tpackets = stats->tx_packets; | 708 | tpackets = stats->tx_packets; |
708 | tbytes = stats->tx_bytes; | 709 | tbytes = stats->tx_bytes; |
710 | } while (u64_stats_fetch_retry(&stats->tx_syncp, start)); | ||
711 | |||
712 | do { | ||
713 | start = u64_stats_fetch_begin(&stats->rx_syncp); | ||
709 | rpackets = stats->rx_packets; | 714 | rpackets = stats->rx_packets; |
710 | rbytes = stats->rx_bytes; | 715 | rbytes = stats->rx_bytes; |
711 | } while (u64_stats_fetch_retry(&stats->syncp, start)); | 716 | } while (u64_stats_fetch_retry(&stats->rx_syncp, start)); |
712 | 717 | ||
713 | tot->rx_packets += rpackets; | 718 | tot->rx_packets += rpackets; |
714 | tot->tx_packets += tpackets; | 719 | tot->tx_packets += tpackets; |
diff --git a/drivers/net/wireless/b43/b43.h b/drivers/net/wireless/b43/b43.h index 67c13af6f206..c06b6cb5c91e 100644 --- a/drivers/net/wireless/b43/b43.h +++ b/drivers/net/wireless/b43/b43.h | |||
@@ -877,6 +877,10 @@ struct b43_wl { | |||
877 | * from the mac80211 subsystem. */ | 877 | * from the mac80211 subsystem. */ |
878 | u16 mac80211_initially_registered_queues; | 878 | u16 mac80211_initially_registered_queues; |
879 | 879 | ||
880 | /* Set this if we call ieee80211_register_hw() and check if we call | ||
881 | * ieee80211_unregister_hw(). */ | ||
882 | bool hw_registred; | ||
883 | |||
880 | /* We can only have one operating interface (802.11 core) | 884 | /* We can only have one operating interface (802.11 core) |
881 | * at a time. General information about this interface follows. | 885 | * at a time. General information about this interface follows. |
882 | */ | 886 | */ |
diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c index 5a39b226b2e3..acd03a4f9730 100644 --- a/drivers/net/wireless/b43/main.c +++ b/drivers/net/wireless/b43/main.c | |||
@@ -2437,6 +2437,7 @@ start_ieee80211: | |||
2437 | err = ieee80211_register_hw(wl->hw); | 2437 | err = ieee80211_register_hw(wl->hw); |
2438 | if (err) | 2438 | if (err) |
2439 | goto err_one_core_detach; | 2439 | goto err_one_core_detach; |
2440 | wl->hw_registred = true; | ||
2440 | b43_leds_register(wl->current_dev); | 2441 | b43_leds_register(wl->current_dev); |
2441 | goto out; | 2442 | goto out; |
2442 | 2443 | ||
@@ -5299,6 +5300,7 @@ static struct b43_wl *b43_wireless_init(struct b43_bus_dev *dev) | |||
5299 | 5300 | ||
5300 | hw->queues = modparam_qos ? B43_QOS_QUEUE_NUM : 1; | 5301 | hw->queues = modparam_qos ? B43_QOS_QUEUE_NUM : 1; |
5301 | wl->mac80211_initially_registered_queues = hw->queues; | 5302 | wl->mac80211_initially_registered_queues = hw->queues; |
5303 | wl->hw_registred = false; | ||
5302 | hw->max_rates = 2; | 5304 | hw->max_rates = 2; |
5303 | SET_IEEE80211_DEV(hw, dev->dev); | 5305 | SET_IEEE80211_DEV(hw, dev->dev); |
5304 | if (is_valid_ether_addr(sprom->et1mac)) | 5306 | if (is_valid_ether_addr(sprom->et1mac)) |
@@ -5370,12 +5372,15 @@ static void b43_bcma_remove(struct bcma_device *core) | |||
5370 | * as the ieee80211 unreg will destroy the workqueue. */ | 5372 | * as the ieee80211 unreg will destroy the workqueue. */ |
5371 | cancel_work_sync(&wldev->restart_work); | 5373 | cancel_work_sync(&wldev->restart_work); |
5372 | 5374 | ||
5373 | /* Restore the queues count before unregistering, because firmware detect | 5375 | B43_WARN_ON(!wl); |
5374 | * might have modified it. Restoring is important, so the networking | 5376 | if (wl->current_dev == wldev && wl->hw_registred) { |
5375 | * stack can properly free resources. */ | 5377 | /* Restore the queues count before unregistering, because firmware detect |
5376 | wl->hw->queues = wl->mac80211_initially_registered_queues; | 5378 | * might have modified it. Restoring is important, so the networking |
5377 | b43_leds_stop(wldev); | 5379 | * stack can properly free resources. */ |
5378 | ieee80211_unregister_hw(wl->hw); | 5380 | wl->hw->queues = wl->mac80211_initially_registered_queues; |
5381 | b43_leds_stop(wldev); | ||
5382 | ieee80211_unregister_hw(wl->hw); | ||
5383 | } | ||
5379 | 5384 | ||
5380 | b43_one_core_detach(wldev->dev); | 5385 | b43_one_core_detach(wldev->dev); |
5381 | 5386 | ||
@@ -5446,7 +5451,7 @@ static void b43_ssb_remove(struct ssb_device *sdev) | |||
5446 | cancel_work_sync(&wldev->restart_work); | 5451 | cancel_work_sync(&wldev->restart_work); |
5447 | 5452 | ||
5448 | B43_WARN_ON(!wl); | 5453 | B43_WARN_ON(!wl); |
5449 | if (wl->current_dev == wldev) { | 5454 | if (wl->current_dev == wldev && wl->hw_registred) { |
5450 | /* Restore the queues count before unregistering, because firmware detect | 5455 | /* Restore the queues count before unregistering, because firmware detect |
5451 | * might have modified it. Restoring is important, so the networking | 5456 | * might have modified it. Restoring is important, so the networking |
5452 | * stack can properly free resources. */ | 5457 | * stack can properly free resources. */ |
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c index e2480d196276..8e7e6928c936 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c | |||
@@ -89,9 +89,9 @@ int brcmf_sdio_intr_register(struct brcmf_sdio_dev *sdiodev) | |||
89 | data |= 1 << SDIO_FUNC_1 | 1 << SDIO_FUNC_2 | 1; | 89 | data |= 1 << SDIO_FUNC_1 | 1 << SDIO_FUNC_2 | 1; |
90 | brcmf_sdio_regwb(sdiodev, SDIO_CCCR_IENx, data, &ret); | 90 | brcmf_sdio_regwb(sdiodev, SDIO_CCCR_IENx, data, &ret); |
91 | 91 | ||
92 | /* redirect, configure ane enable io for interrupt signal */ | 92 | /* redirect, configure and enable io for interrupt signal */ |
93 | data = SDIO_SEPINT_MASK | SDIO_SEPINT_OE; | 93 | data = SDIO_SEPINT_MASK | SDIO_SEPINT_OE; |
94 | if (sdiodev->irq_flags | IRQF_TRIGGER_HIGH) | 94 | if (sdiodev->irq_flags & IRQF_TRIGGER_HIGH) |
95 | data |= SDIO_SEPINT_ACT_HI; | 95 | data |= SDIO_SEPINT_ACT_HI; |
96 | brcmf_sdio_regwb(sdiodev, SDIO_CCCR_BRCM_SEPINT, data, &ret); | 96 | brcmf_sdio_regwb(sdiodev, SDIO_CCCR_BRCM_SEPINT, data, &ret); |
97 | 97 | ||
diff --git a/drivers/net/wireless/ipw2x00/ipw2100.c b/drivers/net/wireless/ipw2x00/ipw2100.c index 9cfae0c08707..95aa8e1683ec 100644 --- a/drivers/net/wireless/ipw2x00/ipw2100.c +++ b/drivers/net/wireless/ipw2x00/ipw2100.c | |||
@@ -1903,14 +1903,6 @@ static void ipw2100_down(struct ipw2100_priv *priv) | |||
1903 | netif_stop_queue(priv->net_dev); | 1903 | netif_stop_queue(priv->net_dev); |
1904 | } | 1904 | } |
1905 | 1905 | ||
1906 | /* Called by register_netdev() */ | ||
1907 | static int ipw2100_net_init(struct net_device *dev) | ||
1908 | { | ||
1909 | struct ipw2100_priv *priv = libipw_priv(dev); | ||
1910 | |||
1911 | return ipw2100_up(priv, 1); | ||
1912 | } | ||
1913 | |||
1914 | static int ipw2100_wdev_init(struct net_device *dev) | 1906 | static int ipw2100_wdev_init(struct net_device *dev) |
1915 | { | 1907 | { |
1916 | struct ipw2100_priv *priv = libipw_priv(dev); | 1908 | struct ipw2100_priv *priv = libipw_priv(dev); |
@@ -6087,7 +6079,6 @@ static const struct net_device_ops ipw2100_netdev_ops = { | |||
6087 | .ndo_stop = ipw2100_close, | 6079 | .ndo_stop = ipw2100_close, |
6088 | .ndo_start_xmit = libipw_xmit, | 6080 | .ndo_start_xmit = libipw_xmit, |
6089 | .ndo_change_mtu = libipw_change_mtu, | 6081 | .ndo_change_mtu = libipw_change_mtu, |
6090 | .ndo_init = ipw2100_net_init, | ||
6091 | .ndo_tx_timeout = ipw2100_tx_timeout, | 6082 | .ndo_tx_timeout = ipw2100_tx_timeout, |
6092 | .ndo_set_mac_address = ipw2100_set_address, | 6083 | .ndo_set_mac_address = ipw2100_set_address, |
6093 | .ndo_validate_addr = eth_validate_addr, | 6084 | .ndo_validate_addr = eth_validate_addr, |
@@ -6329,6 +6320,10 @@ static int ipw2100_pci_init_one(struct pci_dev *pci_dev, | |||
6329 | printk(KERN_INFO DRV_NAME | 6320 | printk(KERN_INFO DRV_NAME |
6330 | ": Detected Intel PRO/Wireless 2100 Network Connection\n"); | 6321 | ": Detected Intel PRO/Wireless 2100 Network Connection\n"); |
6331 | 6322 | ||
6323 | err = ipw2100_up(priv, 1); | ||
6324 | if (err) | ||
6325 | goto fail; | ||
6326 | |||
6332 | err = ipw2100_wdev_init(dev); | 6327 | err = ipw2100_wdev_init(dev); |
6333 | if (err) | 6328 | if (err) |
6334 | goto fail; | 6329 | goto fail; |
@@ -6338,12 +6333,7 @@ static int ipw2100_pci_init_one(struct pci_dev *pci_dev, | |||
6338 | * network device we would call ipw2100_up. This introduced a race | 6333 | * network device we would call ipw2100_up. This introduced a race |
6339 | * condition with newer hotplug configurations (network was coming | 6334 | * condition with newer hotplug configurations (network was coming |
6340 | * up and making calls before the device was initialized). | 6335 | * up and making calls before the device was initialized). |
6341 | * | 6336 | */ |
6342 | * If we called ipw2100_up before we registered the device, then the | ||
6343 | * device name wasn't registered. So, we instead use the net_dev->init | ||
6344 | * member to call a function that then just turns and calls ipw2100_up. | ||
6345 | * net_dev->init is called after name allocation but before the | ||
6346 | * notifier chain is called */ | ||
6347 | err = register_netdev(dev); | 6337 | err = register_netdev(dev); |
6348 | if (err) { | 6338 | if (err) { |
6349 | printk(KERN_WARNING DRV_NAME | 6339 | printk(KERN_WARNING DRV_NAME |
diff --git a/drivers/net/wireless/iwlwifi/iwl-6000.c b/drivers/net/wireless/iwlwifi/iwl-6000.c index 19f7ee84ae89..e5e8ada4aaf6 100644 --- a/drivers/net/wireless/iwlwifi/iwl-6000.c +++ b/drivers/net/wireless/iwlwifi/iwl-6000.c | |||
@@ -35,17 +35,20 @@ | |||
35 | #define IWL6000_UCODE_API_MAX 6 | 35 | #define IWL6000_UCODE_API_MAX 6 |
36 | #define IWL6050_UCODE_API_MAX 5 | 36 | #define IWL6050_UCODE_API_MAX 5 |
37 | #define IWL6000G2_UCODE_API_MAX 6 | 37 | #define IWL6000G2_UCODE_API_MAX 6 |
38 | #define IWL6035_UCODE_API_MAX 6 | ||
38 | 39 | ||
39 | /* Oldest version we won't warn about */ | 40 | /* Oldest version we won't warn about */ |
40 | #define IWL6000_UCODE_API_OK 4 | 41 | #define IWL6000_UCODE_API_OK 4 |
41 | #define IWL6000G2_UCODE_API_OK 5 | 42 | #define IWL6000G2_UCODE_API_OK 5 |
42 | #define IWL6050_UCODE_API_OK 5 | 43 | #define IWL6050_UCODE_API_OK 5 |
43 | #define IWL6000G2B_UCODE_API_OK 6 | 44 | #define IWL6000G2B_UCODE_API_OK 6 |
45 | #define IWL6035_UCODE_API_OK 6 | ||
44 | 46 | ||
45 | /* Lowest firmware API version supported */ | 47 | /* Lowest firmware API version supported */ |
46 | #define IWL6000_UCODE_API_MIN 4 | 48 | #define IWL6000_UCODE_API_MIN 4 |
47 | #define IWL6050_UCODE_API_MIN 4 | 49 | #define IWL6050_UCODE_API_MIN 4 |
48 | #define IWL6000G2_UCODE_API_MIN 4 | 50 | #define IWL6000G2_UCODE_API_MIN 5 |
51 | #define IWL6035_UCODE_API_MIN 6 | ||
49 | 52 | ||
50 | /* EEPROM versions */ | 53 | /* EEPROM versions */ |
51 | #define EEPROM_6000_TX_POWER_VERSION (4) | 54 | #define EEPROM_6000_TX_POWER_VERSION (4) |
@@ -227,9 +230,25 @@ const struct iwl_cfg iwl6030_2bg_cfg = { | |||
227 | IWL_DEVICE_6030, | 230 | IWL_DEVICE_6030, |
228 | }; | 231 | }; |
229 | 232 | ||
233 | #define IWL_DEVICE_6035 \ | ||
234 | .fw_name_pre = IWL6030_FW_PRE, \ | ||
235 | .ucode_api_max = IWL6035_UCODE_API_MAX, \ | ||
236 | .ucode_api_ok = IWL6035_UCODE_API_OK, \ | ||
237 | .ucode_api_min = IWL6035_UCODE_API_MIN, \ | ||
238 | .device_family = IWL_DEVICE_FAMILY_6030, \ | ||
239 | .max_inst_size = IWL60_RTC_INST_SIZE, \ | ||
240 | .max_data_size = IWL60_RTC_DATA_SIZE, \ | ||
241 | .eeprom_ver = EEPROM_6030_EEPROM_VERSION, \ | ||
242 | .eeprom_calib_ver = EEPROM_6030_TX_POWER_VERSION, \ | ||
243 | .base_params = &iwl6000_g2_base_params, \ | ||
244 | .bt_params = &iwl6000_bt_params, \ | ||
245 | .need_temp_offset_calib = true, \ | ||
246 | .led_mode = IWL_LED_RF_STATE, \ | ||
247 | .adv_pm = true | ||
248 | |||
230 | const struct iwl_cfg iwl6035_2agn_cfg = { | 249 | const struct iwl_cfg iwl6035_2agn_cfg = { |
231 | .name = "Intel(R) Centrino(R) Advanced-N 6235 AGN", | 250 | .name = "Intel(R) Centrino(R) Advanced-N 6235 AGN", |
232 | IWL_DEVICE_6030, | 251 | IWL_DEVICE_6035, |
233 | .ht_params = &iwl6000_ht_params, | 252 | .ht_params = &iwl6000_ht_params, |
234 | }; | 253 | }; |
235 | 254 | ||
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-sta.c b/drivers/net/wireless/iwlwifi/iwl-agn-sta.c index aea07aab3c9e..eb6a8eaf42fc 100644 --- a/drivers/net/wireless/iwlwifi/iwl-agn-sta.c +++ b/drivers/net/wireless/iwlwifi/iwl-agn-sta.c | |||
@@ -1267,7 +1267,7 @@ int iwl_remove_dynamic_key(struct iwl_priv *priv, | |||
1267 | key_flags |= STA_KEY_MULTICAST_MSK; | 1267 | key_flags |= STA_KEY_MULTICAST_MSK; |
1268 | 1268 | ||
1269 | sta_cmd.key.key_flags = key_flags; | 1269 | sta_cmd.key.key_flags = key_flags; |
1270 | sta_cmd.key.key_offset = WEP_INVALID_OFFSET; | 1270 | sta_cmd.key.key_offset = keyconf->hw_key_idx; |
1271 | sta_cmd.sta.modify_mask = STA_MODIFY_KEY_MASK; | 1271 | sta_cmd.sta.modify_mask = STA_MODIFY_KEY_MASK; |
1272 | sta_cmd.mode = STA_CONTROL_MODIFY_MSK; | 1272 | sta_cmd.mode = STA_CONTROL_MODIFY_MSK; |
1273 | 1273 | ||
diff --git a/drivers/net/wireless/iwlwifi/iwl-drv.c b/drivers/net/wireless/iwlwifi/iwl-drv.c index d742900969ea..fac67a526a30 100644 --- a/drivers/net/wireless/iwlwifi/iwl-drv.c +++ b/drivers/net/wireless/iwlwifi/iwl-drv.c | |||
@@ -861,13 +861,18 @@ static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context) | |||
861 | 861 | ||
862 | /* We have our copies now, allow OS release its copies */ | 862 | /* We have our copies now, allow OS release its copies */ |
863 | release_firmware(ucode_raw); | 863 | release_firmware(ucode_raw); |
864 | complete(&drv->request_firmware_complete); | ||
865 | 864 | ||
866 | drv->op_mode = iwl_dvm_ops.start(drv->trans, drv->cfg, &drv->fw); | 865 | drv->op_mode = iwl_dvm_ops.start(drv->trans, drv->cfg, &drv->fw); |
867 | 866 | ||
868 | if (!drv->op_mode) | 867 | if (!drv->op_mode) |
869 | goto out_free_fw; | 868 | goto out_unbind; |
870 | 869 | ||
870 | /* | ||
871 | * Complete the firmware request last so that | ||
872 | * a driver unbind (stop) doesn't run while we | ||
873 | * are doing the start() above. | ||
874 | */ | ||
875 | complete(&drv->request_firmware_complete); | ||
871 | return; | 876 | return; |
872 | 877 | ||
873 | try_again: | 878 | try_again: |
diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom.c b/drivers/net/wireless/iwlwifi/iwl-eeprom.c index 50c58911e718..b8e2b223ac36 100644 --- a/drivers/net/wireless/iwlwifi/iwl-eeprom.c +++ b/drivers/net/wireless/iwlwifi/iwl-eeprom.c | |||
@@ -568,28 +568,28 @@ static int iwl_find_otp_image(struct iwl_trans *trans, | |||
568 | * iwl_get_max_txpower_avg - get the highest tx power from all chains. | 568 | * iwl_get_max_txpower_avg - get the highest tx power from all chains. |
569 | * find the highest tx power from all chains for the channel | 569 | * find the highest tx power from all chains for the channel |
570 | */ | 570 | */ |
571 | static s8 iwl_get_max_txpower_avg(const struct iwl_cfg *cfg, | 571 | static s8 iwl_get_max_txpower_avg(struct iwl_priv *priv, |
572 | struct iwl_eeprom_enhanced_txpwr *enhanced_txpower, | 572 | struct iwl_eeprom_enhanced_txpwr *enhanced_txpower, |
573 | int element, s8 *max_txpower_in_half_dbm) | 573 | int element, s8 *max_txpower_in_half_dbm) |
574 | { | 574 | { |
575 | s8 max_txpower_avg = 0; /* (dBm) */ | 575 | s8 max_txpower_avg = 0; /* (dBm) */ |
576 | 576 | ||
577 | /* Take the highest tx power from any valid chains */ | 577 | /* Take the highest tx power from any valid chains */ |
578 | if ((cfg->valid_tx_ant & ANT_A) && | 578 | if ((priv->hw_params.valid_tx_ant & ANT_A) && |
579 | (enhanced_txpower[element].chain_a_max > max_txpower_avg)) | 579 | (enhanced_txpower[element].chain_a_max > max_txpower_avg)) |
580 | max_txpower_avg = enhanced_txpower[element].chain_a_max; | 580 | max_txpower_avg = enhanced_txpower[element].chain_a_max; |
581 | if ((cfg->valid_tx_ant & ANT_B) && | 581 | if ((priv->hw_params.valid_tx_ant & ANT_B) && |
582 | (enhanced_txpower[element].chain_b_max > max_txpower_avg)) | 582 | (enhanced_txpower[element].chain_b_max > max_txpower_avg)) |
583 | max_txpower_avg = enhanced_txpower[element].chain_b_max; | 583 | max_txpower_avg = enhanced_txpower[element].chain_b_max; |
584 | if ((cfg->valid_tx_ant & ANT_C) && | 584 | if ((priv->hw_params.valid_tx_ant & ANT_C) && |
585 | (enhanced_txpower[element].chain_c_max > max_txpower_avg)) | 585 | (enhanced_txpower[element].chain_c_max > max_txpower_avg)) |
586 | max_txpower_avg = enhanced_txpower[element].chain_c_max; | 586 | max_txpower_avg = enhanced_txpower[element].chain_c_max; |
587 | if (((cfg->valid_tx_ant == ANT_AB) | | 587 | if (((priv->hw_params.valid_tx_ant == ANT_AB) | |
588 | (cfg->valid_tx_ant == ANT_BC) | | 588 | (priv->hw_params.valid_tx_ant == ANT_BC) | |
589 | (cfg->valid_tx_ant == ANT_AC)) && | 589 | (priv->hw_params.valid_tx_ant == ANT_AC)) && |
590 | (enhanced_txpower[element].mimo2_max > max_txpower_avg)) | 590 | (enhanced_txpower[element].mimo2_max > max_txpower_avg)) |
591 | max_txpower_avg = enhanced_txpower[element].mimo2_max; | 591 | max_txpower_avg = enhanced_txpower[element].mimo2_max; |
592 | if ((cfg->valid_tx_ant == ANT_ABC) && | 592 | if ((priv->hw_params.valid_tx_ant == ANT_ABC) && |
593 | (enhanced_txpower[element].mimo3_max > max_txpower_avg)) | 593 | (enhanced_txpower[element].mimo3_max > max_txpower_avg)) |
594 | max_txpower_avg = enhanced_txpower[element].mimo3_max; | 594 | max_txpower_avg = enhanced_txpower[element].mimo3_max; |
595 | 595 | ||
@@ -691,7 +691,7 @@ static void iwl_eeprom_enhanced_txpower(struct iwl_priv *priv) | |||
691 | ((txp->delta_20_in_40 & 0xf0) >> 4), | 691 | ((txp->delta_20_in_40 & 0xf0) >> 4), |
692 | (txp->delta_20_in_40 & 0x0f)); | 692 | (txp->delta_20_in_40 & 0x0f)); |
693 | 693 | ||
694 | max_txp_avg = iwl_get_max_txpower_avg(priv->cfg, txp_array, idx, | 694 | max_txp_avg = iwl_get_max_txpower_avg(priv, txp_array, idx, |
695 | &max_txp_avg_halfdbm); | 695 | &max_txp_avg_halfdbm); |
696 | 696 | ||
697 | /* | 697 | /* |
diff --git a/drivers/net/wireless/iwlwifi/iwl-mac80211.c b/drivers/net/wireless/iwlwifi/iwl-mac80211.c index ab2f4d7500a4..3ee23134c02b 100644 --- a/drivers/net/wireless/iwlwifi/iwl-mac80211.c +++ b/drivers/net/wireless/iwlwifi/iwl-mac80211.c | |||
@@ -199,6 +199,7 @@ int iwlagn_mac_setup_register(struct iwl_priv *priv, | |||
199 | WIPHY_FLAG_DISABLE_BEACON_HINTS | | 199 | WIPHY_FLAG_DISABLE_BEACON_HINTS | |
200 | WIPHY_FLAG_IBSS_RSN; | 200 | WIPHY_FLAG_IBSS_RSN; |
201 | 201 | ||
202 | #ifdef CONFIG_PM_SLEEP | ||
202 | if (priv->fw->img[IWL_UCODE_WOWLAN].sec[0].len && | 203 | if (priv->fw->img[IWL_UCODE_WOWLAN].sec[0].len && |
203 | priv->trans->ops->wowlan_suspend && | 204 | priv->trans->ops->wowlan_suspend && |
204 | device_can_wakeup(priv->trans->dev)) { | 205 | device_can_wakeup(priv->trans->dev)) { |
@@ -217,6 +218,7 @@ int iwlagn_mac_setup_register(struct iwl_priv *priv, | |||
217 | hw->wiphy->wowlan.pattern_max_len = | 218 | hw->wiphy->wowlan.pattern_max_len = |
218 | IWLAGN_WOWLAN_MAX_PATTERN_LEN; | 219 | IWLAGN_WOWLAN_MAX_PATTERN_LEN; |
219 | } | 220 | } |
221 | #endif | ||
220 | 222 | ||
221 | if (iwlwifi_mod_params.power_save) | 223 | if (iwlwifi_mod_params.power_save) |
222 | hw->wiphy->flags |= WIPHY_FLAG_PS_ON_BY_DEFAULT; | 224 | hw->wiphy->flags |= WIPHY_FLAG_PS_ON_BY_DEFAULT; |
@@ -249,6 +251,7 @@ int iwlagn_mac_setup_register(struct iwl_priv *priv, | |||
249 | ret = ieee80211_register_hw(priv->hw); | 251 | ret = ieee80211_register_hw(priv->hw); |
250 | if (ret) { | 252 | if (ret) { |
251 | IWL_ERR(priv, "Failed to register hw (error %d)\n", ret); | 253 | IWL_ERR(priv, "Failed to register hw (error %d)\n", ret); |
254 | iwl_leds_exit(priv); | ||
252 | return ret; | 255 | return ret; |
253 | } | 256 | } |
254 | priv->mac80211_registered = 1; | 257 | priv->mac80211_registered = 1; |
diff --git a/drivers/net/wireless/iwlwifi/iwl-prph.h b/drivers/net/wireless/iwlwifi/iwl-prph.h index 3b1069290fa9..dfd54662e3e6 100644 --- a/drivers/net/wireless/iwlwifi/iwl-prph.h +++ b/drivers/net/wireless/iwlwifi/iwl-prph.h | |||
@@ -224,6 +224,7 @@ | |||
224 | #define SCD_TXFACT (SCD_BASE + 0x10) | 224 | #define SCD_TXFACT (SCD_BASE + 0x10) |
225 | #define SCD_ACTIVE (SCD_BASE + 0x14) | 225 | #define SCD_ACTIVE (SCD_BASE + 0x14) |
226 | #define SCD_QUEUECHAIN_SEL (SCD_BASE + 0xe8) | 226 | #define SCD_QUEUECHAIN_SEL (SCD_BASE + 0xe8) |
227 | #define SCD_CHAINEXT_EN (SCD_BASE + 0x244) | ||
227 | #define SCD_AGGR_SEL (SCD_BASE + 0x248) | 228 | #define SCD_AGGR_SEL (SCD_BASE + 0x248) |
228 | #define SCD_INTERRUPT_MASK (SCD_BASE + 0x108) | 229 | #define SCD_INTERRUPT_MASK (SCD_BASE + 0x108) |
229 | 230 | ||
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans-pcie.c b/drivers/net/wireless/iwlwifi/iwl-trans-pcie.c index ec6fb395b84d..79c6b91417f9 100644 --- a/drivers/net/wireless/iwlwifi/iwl-trans-pcie.c +++ b/drivers/net/wireless/iwlwifi/iwl-trans-pcie.c | |||
@@ -1058,6 +1058,11 @@ static void iwl_tx_start(struct iwl_trans *trans) | |||
1058 | iwl_write_prph(trans, SCD_DRAM_BASE_ADDR, | 1058 | iwl_write_prph(trans, SCD_DRAM_BASE_ADDR, |
1059 | trans_pcie->scd_bc_tbls.dma >> 10); | 1059 | trans_pcie->scd_bc_tbls.dma >> 10); |
1060 | 1060 | ||
1061 | /* The chain extension of the SCD doesn't work well. This feature is | ||
1062 | * enabled by default by the HW, so we need to disable it manually. | ||
1063 | */ | ||
1064 | iwl_write_prph(trans, SCD_CHAINEXT_EN, 0); | ||
1065 | |||
1061 | /* Enable DMA channel */ | 1066 | /* Enable DMA channel */ |
1062 | for (chan = 0; chan < FH_TCSR_CHNL_NUM ; chan++) | 1067 | for (chan = 0; chan < FH_TCSR_CHNL_NUM ; chan++) |
1063 | iwl_write_direct32(trans, FH_TCSR_CHNL_TX_CONFIG_REG(chan), | 1068 | iwl_write_direct32(trans, FH_TCSR_CHNL_TX_CONFIG_REG(chan), |
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c index fb787df01666..a0b7cfd34685 100644 --- a/drivers/net/wireless/mac80211_hwsim.c +++ b/drivers/net/wireless/mac80211_hwsim.c | |||
@@ -1555,6 +1555,7 @@ static int hwsim_tx_info_frame_received_nl(struct sk_buff *skb_2, | |||
1555 | hdr = (struct ieee80211_hdr *) skb->data; | 1555 | hdr = (struct ieee80211_hdr *) skb->data; |
1556 | mac80211_hwsim_monitor_ack(data2->hw, hdr->addr2); | 1556 | mac80211_hwsim_monitor_ack(data2->hw, hdr->addr2); |
1557 | } | 1557 | } |
1558 | txi->flags |= IEEE80211_TX_STAT_ACK; | ||
1558 | } | 1559 | } |
1559 | ieee80211_tx_status_irqsafe(data2->hw, skb); | 1560 | ieee80211_tx_status_irqsafe(data2->hw, skb); |
1560 | return 0; | 1561 | return 0; |
@@ -1721,6 +1722,24 @@ static void hwsim_exit_netlink(void) | |||
1721 | "unregister family %i\n", ret); | 1722 | "unregister family %i\n", ret); |
1722 | } | 1723 | } |
1723 | 1724 | ||
1725 | static const struct ieee80211_iface_limit hwsim_if_limits[] = { | ||
1726 | { .max = 1, .types = BIT(NL80211_IFTYPE_ADHOC) }, | ||
1727 | { .max = 2048, .types = BIT(NL80211_IFTYPE_STATION) | | ||
1728 | BIT(NL80211_IFTYPE_P2P_CLIENT) | | ||
1729 | #ifdef CONFIG_MAC80211_MESH | ||
1730 | BIT(NL80211_IFTYPE_MESH_POINT) | | ||
1731 | #endif | ||
1732 | BIT(NL80211_IFTYPE_AP) | | ||
1733 | BIT(NL80211_IFTYPE_P2P_GO) }, | ||
1734 | }; | ||
1735 | |||
1736 | static const struct ieee80211_iface_combination hwsim_if_comb = { | ||
1737 | .limits = hwsim_if_limits, | ||
1738 | .n_limits = ARRAY_SIZE(hwsim_if_limits), | ||
1739 | .max_interfaces = 2048, | ||
1740 | .num_different_channels = 1, | ||
1741 | }; | ||
1742 | |||
1724 | static int __init init_mac80211_hwsim(void) | 1743 | static int __init init_mac80211_hwsim(void) |
1725 | { | 1744 | { |
1726 | int i, err = 0; | 1745 | int i, err = 0; |
@@ -1782,6 +1801,9 @@ static int __init init_mac80211_hwsim(void) | |||
1782 | hw->wiphy->n_addresses = 2; | 1801 | hw->wiphy->n_addresses = 2; |
1783 | hw->wiphy->addresses = data->addresses; | 1802 | hw->wiphy->addresses = data->addresses; |
1784 | 1803 | ||
1804 | hw->wiphy->iface_combinations = &hwsim_if_comb; | ||
1805 | hw->wiphy->n_iface_combinations = 1; | ||
1806 | |||
1785 | if (fake_hw_scan) { | 1807 | if (fake_hw_scan) { |
1786 | hw->wiphy->max_scan_ssids = 255; | 1808 | hw->wiphy->max_scan_ssids = 255; |
1787 | hw->wiphy->max_scan_ie_len = IEEE80211_MAX_DATA_LEN; | 1809 | hw->wiphy->max_scan_ie_len = IEEE80211_MAX_DATA_LEN; |
diff --git a/drivers/net/wireless/mwifiex/cfg80211.c b/drivers/net/wireless/mwifiex/cfg80211.c index 87671446e24b..015fec3371a0 100644 --- a/drivers/net/wireless/mwifiex/cfg80211.c +++ b/drivers/net/wireless/mwifiex/cfg80211.c | |||
@@ -948,6 +948,19 @@ static int mwifiex_cfg80211_start_ap(struct wiphy *wiphy, | |||
948 | bss_cfg->ssid.ssid_len = params->ssid_len; | 948 | bss_cfg->ssid.ssid_len = params->ssid_len; |
949 | } | 949 | } |
950 | 950 | ||
951 | switch (params->hidden_ssid) { | ||
952 | case NL80211_HIDDEN_SSID_NOT_IN_USE: | ||
953 | bss_cfg->bcast_ssid_ctl = 1; | ||
954 | break; | ||
955 | case NL80211_HIDDEN_SSID_ZERO_LEN: | ||
956 | bss_cfg->bcast_ssid_ctl = 0; | ||
957 | break; | ||
958 | case NL80211_HIDDEN_SSID_ZERO_CONTENTS: | ||
959 | /* firmware doesn't support this type of hidden SSID */ | ||
960 | default: | ||
961 | return -EINVAL; | ||
962 | } | ||
963 | |||
951 | if (mwifiex_set_secure_params(priv, bss_cfg, params)) { | 964 | if (mwifiex_set_secure_params(priv, bss_cfg, params)) { |
952 | kfree(bss_cfg); | 965 | kfree(bss_cfg); |
953 | wiphy_err(wiphy, "Failed to parse secuirty parameters!\n"); | 966 | wiphy_err(wiphy, "Failed to parse secuirty parameters!\n"); |
diff --git a/drivers/net/wireless/mwifiex/fw.h b/drivers/net/wireless/mwifiex/fw.h index 9f674bbebe65..561452a5c818 100644 --- a/drivers/net/wireless/mwifiex/fw.h +++ b/drivers/net/wireless/mwifiex/fw.h | |||
@@ -122,6 +122,7 @@ enum MWIFIEX_802_11_PRIVACY_FILTER { | |||
122 | #define TLV_TYPE_CHANNELBANDLIST (PROPRIETARY_TLV_BASE_ID + 42) | 122 | #define TLV_TYPE_CHANNELBANDLIST (PROPRIETARY_TLV_BASE_ID + 42) |
123 | #define TLV_TYPE_UAP_BEACON_PERIOD (PROPRIETARY_TLV_BASE_ID + 44) | 123 | #define TLV_TYPE_UAP_BEACON_PERIOD (PROPRIETARY_TLV_BASE_ID + 44) |
124 | #define TLV_TYPE_UAP_DTIM_PERIOD (PROPRIETARY_TLV_BASE_ID + 45) | 124 | #define TLV_TYPE_UAP_DTIM_PERIOD (PROPRIETARY_TLV_BASE_ID + 45) |
125 | #define TLV_TYPE_UAP_BCAST_SSID (PROPRIETARY_TLV_BASE_ID + 48) | ||
125 | #define TLV_TYPE_UAP_RTS_THRESHOLD (PROPRIETARY_TLV_BASE_ID + 51) | 126 | #define TLV_TYPE_UAP_RTS_THRESHOLD (PROPRIETARY_TLV_BASE_ID + 51) |
126 | #define TLV_TYPE_UAP_WPA_PASSPHRASE (PROPRIETARY_TLV_BASE_ID + 60) | 127 | #define TLV_TYPE_UAP_WPA_PASSPHRASE (PROPRIETARY_TLV_BASE_ID + 60) |
127 | #define TLV_TYPE_UAP_ENCRY_PROTOCOL (PROPRIETARY_TLV_BASE_ID + 64) | 128 | #define TLV_TYPE_UAP_ENCRY_PROTOCOL (PROPRIETARY_TLV_BASE_ID + 64) |
@@ -1209,6 +1210,11 @@ struct host_cmd_tlv_ssid { | |||
1209 | u8 ssid[0]; | 1210 | u8 ssid[0]; |
1210 | } __packed; | 1211 | } __packed; |
1211 | 1212 | ||
1213 | struct host_cmd_tlv_bcast_ssid { | ||
1214 | struct host_cmd_tlv tlv; | ||
1215 | u8 bcast_ctl; | ||
1216 | } __packed; | ||
1217 | |||
1212 | struct host_cmd_tlv_beacon_period { | 1218 | struct host_cmd_tlv_beacon_period { |
1213 | struct host_cmd_tlv tlv; | 1219 | struct host_cmd_tlv tlv; |
1214 | __le16 period; | 1220 | __le16 period; |
diff --git a/drivers/net/wireless/mwifiex/uap_cmd.c b/drivers/net/wireless/mwifiex/uap_cmd.c index 76dfbc42a732..8173ab66066d 100644 --- a/drivers/net/wireless/mwifiex/uap_cmd.c +++ b/drivers/net/wireless/mwifiex/uap_cmd.c | |||
@@ -132,6 +132,7 @@ mwifiex_uap_bss_param_prepare(u8 *tlv, void *cmd_buf, u16 *param_size) | |||
132 | struct host_cmd_tlv_dtim_period *dtim_period; | 132 | struct host_cmd_tlv_dtim_period *dtim_period; |
133 | struct host_cmd_tlv_beacon_period *beacon_period; | 133 | struct host_cmd_tlv_beacon_period *beacon_period; |
134 | struct host_cmd_tlv_ssid *ssid; | 134 | struct host_cmd_tlv_ssid *ssid; |
135 | struct host_cmd_tlv_bcast_ssid *bcast_ssid; | ||
135 | struct host_cmd_tlv_channel_band *chan_band; | 136 | struct host_cmd_tlv_channel_band *chan_band; |
136 | struct host_cmd_tlv_frag_threshold *frag_threshold; | 137 | struct host_cmd_tlv_frag_threshold *frag_threshold; |
137 | struct host_cmd_tlv_rts_threshold *rts_threshold; | 138 | struct host_cmd_tlv_rts_threshold *rts_threshold; |
@@ -153,6 +154,14 @@ mwifiex_uap_bss_param_prepare(u8 *tlv, void *cmd_buf, u16 *param_size) | |||
153 | cmd_size += sizeof(struct host_cmd_tlv) + | 154 | cmd_size += sizeof(struct host_cmd_tlv) + |
154 | bss_cfg->ssid.ssid_len; | 155 | bss_cfg->ssid.ssid_len; |
155 | tlv += sizeof(struct host_cmd_tlv) + bss_cfg->ssid.ssid_len; | 156 | tlv += sizeof(struct host_cmd_tlv) + bss_cfg->ssid.ssid_len; |
157 | |||
158 | bcast_ssid = (struct host_cmd_tlv_bcast_ssid *)tlv; | ||
159 | bcast_ssid->tlv.type = cpu_to_le16(TLV_TYPE_UAP_BCAST_SSID); | ||
160 | bcast_ssid->tlv.len = | ||
161 | cpu_to_le16(sizeof(bcast_ssid->bcast_ctl)); | ||
162 | bcast_ssid->bcast_ctl = bss_cfg->bcast_ssid_ctl; | ||
163 | cmd_size += sizeof(struct host_cmd_tlv_bcast_ssid); | ||
164 | tlv += sizeof(struct host_cmd_tlv_bcast_ssid); | ||
156 | } | 165 | } |
157 | if (bss_cfg->channel && bss_cfg->channel <= MAX_CHANNEL_BAND_BG) { | 166 | if (bss_cfg->channel && bss_cfg->channel <= MAX_CHANNEL_BAND_BG) { |
158 | chan_band = (struct host_cmd_tlv_channel_band *)tlv; | 167 | chan_band = (struct host_cmd_tlv_channel_band *)tlv; |
@@ -416,6 +425,7 @@ int mwifiex_uap_set_channel(struct mwifiex_private *priv, int channel) | |||
416 | if (!bss_cfg) | 425 | if (!bss_cfg) |
417 | return -ENOMEM; | 426 | return -ENOMEM; |
418 | 427 | ||
428 | mwifiex_set_sys_config_invalid_data(bss_cfg); | ||
419 | bss_cfg->band_cfg = BAND_CONFIG_MANUAL; | 429 | bss_cfg->band_cfg = BAND_CONFIG_MANUAL; |
420 | bss_cfg->channel = channel; | 430 | bss_cfg->channel = channel; |
421 | 431 | ||
diff --git a/drivers/net/wireless/rt2x00/rt2x00.h b/drivers/net/wireless/rt2x00/rt2x00.h index ca36cccaba31..8f754025b06e 100644 --- a/drivers/net/wireless/rt2x00/rt2x00.h +++ b/drivers/net/wireless/rt2x00/rt2x00.h | |||
@@ -396,8 +396,7 @@ struct rt2x00_intf { | |||
396 | * for hardware which doesn't support hardware | 396 | * for hardware which doesn't support hardware |
397 | * sequence counting. | 397 | * sequence counting. |
398 | */ | 398 | */ |
399 | spinlock_t seqlock; | 399 | atomic_t seqno; |
400 | u16 seqno; | ||
401 | }; | 400 | }; |
402 | 401 | ||
403 | static inline struct rt2x00_intf* vif_to_intf(struct ieee80211_vif *vif) | 402 | static inline struct rt2x00_intf* vif_to_intf(struct ieee80211_vif *vif) |
diff --git a/drivers/net/wireless/rt2x00/rt2x00mac.c b/drivers/net/wireless/rt2x00/rt2x00mac.c index b49773ef72f2..dd24b2663b5e 100644 --- a/drivers/net/wireless/rt2x00/rt2x00mac.c +++ b/drivers/net/wireless/rt2x00/rt2x00mac.c | |||
@@ -277,7 +277,6 @@ int rt2x00mac_add_interface(struct ieee80211_hw *hw, | |||
277 | else | 277 | else |
278 | rt2x00dev->intf_sta_count++; | 278 | rt2x00dev->intf_sta_count++; |
279 | 279 | ||
280 | spin_lock_init(&intf->seqlock); | ||
281 | mutex_init(&intf->beacon_skb_mutex); | 280 | mutex_init(&intf->beacon_skb_mutex); |
282 | intf->beacon = entry; | 281 | intf->beacon = entry; |
283 | 282 | ||
diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.c b/drivers/net/wireless/rt2x00/rt2x00queue.c index 4c662eccf53c..2fd830103415 100644 --- a/drivers/net/wireless/rt2x00/rt2x00queue.c +++ b/drivers/net/wireless/rt2x00/rt2x00queue.c | |||
@@ -207,6 +207,7 @@ static void rt2x00queue_create_tx_descriptor_seq(struct rt2x00_dev *rt2x00dev, | |||
207 | struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); | 207 | struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); |
208 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; | 208 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; |
209 | struct rt2x00_intf *intf = vif_to_intf(tx_info->control.vif); | 209 | struct rt2x00_intf *intf = vif_to_intf(tx_info->control.vif); |
210 | u16 seqno; | ||
210 | 211 | ||
211 | if (!(tx_info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ)) | 212 | if (!(tx_info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ)) |
212 | return; | 213 | return; |
@@ -238,15 +239,13 @@ static void rt2x00queue_create_tx_descriptor_seq(struct rt2x00_dev *rt2x00dev, | |||
238 | * sequence counting per-frame, since those will override the | 239 | * sequence counting per-frame, since those will override the |
239 | * sequence counter given by mac80211. | 240 | * sequence counter given by mac80211. |
240 | */ | 241 | */ |
241 | spin_lock(&intf->seqlock); | ||
242 | |||
243 | if (test_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags)) | 242 | if (test_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags)) |
244 | intf->seqno += 0x10; | 243 | seqno = atomic_add_return(0x10, &intf->seqno); |
245 | hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG); | 244 | else |
246 | hdr->seq_ctrl |= cpu_to_le16(intf->seqno); | 245 | seqno = atomic_read(&intf->seqno); |
247 | |||
248 | spin_unlock(&intf->seqlock); | ||
249 | 246 | ||
247 | hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG); | ||
248 | hdr->seq_ctrl |= cpu_to_le16(seqno); | ||
250 | } | 249 | } |
251 | 250 | ||
252 | static void rt2x00queue_create_tx_descriptor_plcp(struct rt2x00_dev *rt2x00dev, | 251 | static void rt2x00queue_create_tx_descriptor_plcp(struct rt2x00_dev *rt2x00dev, |
diff --git a/drivers/net/wireless/rtl818x/rtl8187/leds.c b/drivers/net/wireless/rtl818x/rtl8187/leds.c index 2e0de2f5f0f9..c2d5b495c179 100644 --- a/drivers/net/wireless/rtl818x/rtl8187/leds.c +++ b/drivers/net/wireless/rtl818x/rtl8187/leds.c | |||
@@ -117,7 +117,7 @@ static void rtl8187_led_brightness_set(struct led_classdev *led_dev, | |||
117 | radio_on = true; | 117 | radio_on = true; |
118 | } else if (radio_on) { | 118 | } else if (radio_on) { |
119 | radio_on = false; | 119 | radio_on = false; |
120 | cancel_delayed_work_sync(&priv->led_on); | 120 | cancel_delayed_work(&priv->led_on); |
121 | ieee80211_queue_delayed_work(hw, &priv->led_off, 0); | 121 | ieee80211_queue_delayed_work(hw, &priv->led_off, 0); |
122 | } | 122 | } |
123 | } else if (radio_on) { | 123 | } else if (radio_on) { |
diff --git a/drivers/pinctrl/core.c b/drivers/pinctrl/core.c index c3b331b74fa0..0cc053af70bd 100644 --- a/drivers/pinctrl/core.c +++ b/drivers/pinctrl/core.c | |||
@@ -61,7 +61,7 @@ static LIST_HEAD(pinctrl_maps); | |||
61 | list_for_each_entry(_maps_node_, &pinctrl_maps, node) \ | 61 | list_for_each_entry(_maps_node_, &pinctrl_maps, node) \ |
62 | for (_i_ = 0, _map_ = &_maps_node_->maps[_i_]; \ | 62 | for (_i_ = 0, _map_ = &_maps_node_->maps[_i_]; \ |
63 | _i_ < _maps_node_->num_maps; \ | 63 | _i_ < _maps_node_->num_maps; \ |
64 | i++, _map_ = &_maps_node_->maps[_i_]) | 64 | _i_++, _map_ = &_maps_node_->maps[_i_]) |
65 | 65 | ||
66 | /** | 66 | /** |
67 | * pinctrl_provide_dummies() - indicate if pinctrl provides dummy state support | 67 | * pinctrl_provide_dummies() - indicate if pinctrl provides dummy state support |
diff --git a/drivers/pinctrl/pinctrl-imx.c b/drivers/pinctrl/pinctrl-imx.c index f6e7c670906c..dd6d93aa5334 100644 --- a/drivers/pinctrl/pinctrl-imx.c +++ b/drivers/pinctrl/pinctrl-imx.c | |||
@@ -27,16 +27,16 @@ | |||
27 | #include "core.h" | 27 | #include "core.h" |
28 | #include "pinctrl-imx.h" | 28 | #include "pinctrl-imx.h" |
29 | 29 | ||
30 | #define IMX_PMX_DUMP(info, p, m, c, n) \ | 30 | #define IMX_PMX_DUMP(info, p, m, c, n) \ |
31 | { \ | 31 | { \ |
32 | int i, j; \ | 32 | int i, j; \ |
33 | printk("Format: Pin Mux Config\n"); \ | 33 | printk(KERN_DEBUG "Format: Pin Mux Config\n"); \ |
34 | for (i = 0; i < n; i++) { \ | 34 | for (i = 0; i < n; i++) { \ |
35 | j = p[i]; \ | 35 | j = p[i]; \ |
36 | printk("%s %d 0x%lx\n", \ | 36 | printk(KERN_DEBUG "%s %d 0x%lx\n", \ |
37 | info->pins[j].name, \ | 37 | info->pins[j].name, \ |
38 | m[i], c[i]); \ | 38 | m[i], c[i]); \ |
39 | } \ | 39 | } \ |
40 | } | 40 | } |
41 | 41 | ||
42 | /* The bits in CONFIG cell defined in binding doc*/ | 42 | /* The bits in CONFIG cell defined in binding doc*/ |
@@ -173,8 +173,10 @@ static int imx_dt_node_to_map(struct pinctrl_dev *pctldev, | |||
173 | 173 | ||
174 | /* create mux map */ | 174 | /* create mux map */ |
175 | parent = of_get_parent(np); | 175 | parent = of_get_parent(np); |
176 | if (!parent) | 176 | if (!parent) { |
177 | kfree(new_map); | ||
177 | return -EINVAL; | 178 | return -EINVAL; |
179 | } | ||
178 | new_map[0].type = PIN_MAP_TYPE_MUX_GROUP; | 180 | new_map[0].type = PIN_MAP_TYPE_MUX_GROUP; |
179 | new_map[0].data.mux.function = parent->name; | 181 | new_map[0].data.mux.function = parent->name; |
180 | new_map[0].data.mux.group = np->name; | 182 | new_map[0].data.mux.group = np->name; |
@@ -193,7 +195,7 @@ static int imx_dt_node_to_map(struct pinctrl_dev *pctldev, | |||
193 | } | 195 | } |
194 | 196 | ||
195 | dev_dbg(pctldev->dev, "maps: function %s group %s num %d\n", | 197 | dev_dbg(pctldev->dev, "maps: function %s group %s num %d\n", |
196 | new_map->data.mux.function, new_map->data.mux.group, map_num); | 198 | (*map)->data.mux.function, (*map)->data.mux.group, map_num); |
197 | 199 | ||
198 | return 0; | 200 | return 0; |
199 | } | 201 | } |
@@ -201,10 +203,7 @@ static int imx_dt_node_to_map(struct pinctrl_dev *pctldev, | |||
201 | static void imx_dt_free_map(struct pinctrl_dev *pctldev, | 203 | static void imx_dt_free_map(struct pinctrl_dev *pctldev, |
202 | struct pinctrl_map *map, unsigned num_maps) | 204 | struct pinctrl_map *map, unsigned num_maps) |
203 | { | 205 | { |
204 | int i; | 206 | kfree(map); |
205 | |||
206 | for (i = 0; i < num_maps; i++) | ||
207 | kfree(map); | ||
208 | } | 207 | } |
209 | 208 | ||
210 | static struct pinctrl_ops imx_pctrl_ops = { | 209 | static struct pinctrl_ops imx_pctrl_ops = { |
@@ -475,9 +474,8 @@ static int __devinit imx_pinctrl_parse_groups(struct device_node *np, | |||
475 | grp->configs[j] = config & ~IMX_PAD_SION; | 474 | grp->configs[j] = config & ~IMX_PAD_SION; |
476 | } | 475 | } |
477 | 476 | ||
478 | #ifdef DEBUG | ||
479 | IMX_PMX_DUMP(info, grp->pins, grp->mux_mode, grp->configs, grp->npins); | 477 | IMX_PMX_DUMP(info, grp->pins, grp->mux_mode, grp->configs, grp->npins); |
480 | #endif | 478 | |
481 | return 0; | 479 | return 0; |
482 | } | 480 | } |
483 | 481 | ||
diff --git a/drivers/pinctrl/pinctrl-mxs.c b/drivers/pinctrl/pinctrl-mxs.c index 556e45a213eb..afb50ee64598 100644 --- a/drivers/pinctrl/pinctrl-mxs.c +++ b/drivers/pinctrl/pinctrl-mxs.c | |||
@@ -107,8 +107,10 @@ static int mxs_dt_node_to_map(struct pinctrl_dev *pctldev, | |||
107 | 107 | ||
108 | /* Compose group name */ | 108 | /* Compose group name */ |
109 | group = kzalloc(length, GFP_KERNEL); | 109 | group = kzalloc(length, GFP_KERNEL); |
110 | if (!group) | 110 | if (!group) { |
111 | return -ENOMEM; | 111 | ret = -ENOMEM; |
112 | goto free; | ||
113 | } | ||
112 | snprintf(group, length, "%s.%d", np->name, reg); | 114 | snprintf(group, length, "%s.%d", np->name, reg); |
113 | new_map[i].data.mux.group = group; | 115 | new_map[i].data.mux.group = group; |
114 | i++; | 116 | i++; |
@@ -118,7 +120,7 @@ static int mxs_dt_node_to_map(struct pinctrl_dev *pctldev, | |||
118 | pconfig = kmemdup(&config, sizeof(config), GFP_KERNEL); | 120 | pconfig = kmemdup(&config, sizeof(config), GFP_KERNEL); |
119 | if (!pconfig) { | 121 | if (!pconfig) { |
120 | ret = -ENOMEM; | 122 | ret = -ENOMEM; |
121 | goto free; | 123 | goto free_group; |
122 | } | 124 | } |
123 | 125 | ||
124 | new_map[i].type = PIN_MAP_TYPE_CONFIGS_GROUP; | 126 | new_map[i].type = PIN_MAP_TYPE_CONFIGS_GROUP; |
@@ -133,6 +135,9 @@ static int mxs_dt_node_to_map(struct pinctrl_dev *pctldev, | |||
133 | 135 | ||
134 | return 0; | 136 | return 0; |
135 | 137 | ||
138 | free_group: | ||
139 | if (!purecfg) | ||
140 | free(group); | ||
136 | free: | 141 | free: |
137 | kfree(new_map); | 142 | kfree(new_map); |
138 | return ret; | 143 | return ret; |
@@ -511,6 +516,7 @@ int __devinit mxs_pinctrl_probe(struct platform_device *pdev, | |||
511 | return 0; | 516 | return 0; |
512 | 517 | ||
513 | err: | 518 | err: |
519 | platform_set_drvdata(pdev, NULL); | ||
514 | iounmap(d->base); | 520 | iounmap(d->base); |
515 | return ret; | 521 | return ret; |
516 | } | 522 | } |
@@ -520,6 +526,7 @@ int __devexit mxs_pinctrl_remove(struct platform_device *pdev) | |||
520 | { | 526 | { |
521 | struct mxs_pinctrl_data *d = platform_get_drvdata(pdev); | 527 | struct mxs_pinctrl_data *d = platform_get_drvdata(pdev); |
522 | 528 | ||
529 | platform_set_drvdata(pdev, NULL); | ||
523 | pinctrl_unregister(d->pctl); | 530 | pinctrl_unregister(d->pctl); |
524 | iounmap(d->base); | 531 | iounmap(d->base); |
525 | 532 | ||
diff --git a/drivers/pinctrl/pinctrl-nomadik.c b/drivers/pinctrl/pinctrl-nomadik.c index b26395d16347..e8937e7e4999 100644 --- a/drivers/pinctrl/pinctrl-nomadik.c +++ b/drivers/pinctrl/pinctrl-nomadik.c | |||
@@ -673,7 +673,7 @@ static void __nmk_gpio_set_wake(struct nmk_gpio_chip *nmk_chip, | |||
673 | * wakeup is anyhow controlled by the RIMSC and FIMSC registers. | 673 | * wakeup is anyhow controlled by the RIMSC and FIMSC registers. |
674 | */ | 674 | */ |
675 | if (nmk_chip->sleepmode && on) { | 675 | if (nmk_chip->sleepmode && on) { |
676 | __nmk_gpio_set_slpm(nmk_chip, gpio % nmk_chip->chip.base, | 676 | __nmk_gpio_set_slpm(nmk_chip, gpio % NMK_GPIO_PER_CHIP, |
677 | NMK_GPIO_SLPM_WAKEUP_ENABLE); | 677 | NMK_GPIO_SLPM_WAKEUP_ENABLE); |
678 | } | 678 | } |
679 | 679 | ||
@@ -1246,6 +1246,7 @@ static int __devinit nmk_gpio_probe(struct platform_device *dev) | |||
1246 | ret = PTR_ERR(clk); | 1246 | ret = PTR_ERR(clk); |
1247 | goto out_unmap; | 1247 | goto out_unmap; |
1248 | } | 1248 | } |
1249 | clk_prepare(clk); | ||
1249 | 1250 | ||
1250 | nmk_chip = kzalloc(sizeof(*nmk_chip), GFP_KERNEL); | 1251 | nmk_chip = kzalloc(sizeof(*nmk_chip), GFP_KERNEL); |
1251 | if (!nmk_chip) { | 1252 | if (!nmk_chip) { |
diff --git a/drivers/pinctrl/pinctrl-sirf.c b/drivers/pinctrl/pinctrl-sirf.c index ba15b1a29e52..e9f8e7d11001 100644 --- a/drivers/pinctrl/pinctrl-sirf.c +++ b/drivers/pinctrl/pinctrl-sirf.c | |||
@@ -1184,7 +1184,7 @@ out_no_gpio_remap: | |||
1184 | return ret; | 1184 | return ret; |
1185 | } | 1185 | } |
1186 | 1186 | ||
1187 | static const struct of_device_id pinmux_ids[] = { | 1187 | static const struct of_device_id pinmux_ids[] __devinitconst = { |
1188 | { .compatible = "sirf,prima2-gpio-pinmux" }, | 1188 | { .compatible = "sirf,prima2-gpio-pinmux" }, |
1189 | {} | 1189 | {} |
1190 | }; | 1190 | }; |
diff --git a/drivers/regulator/anatop-regulator.c b/drivers/regulator/anatop-regulator.c index 3660bace123c..e82e7eaac0f1 100644 --- a/drivers/regulator/anatop-regulator.c +++ b/drivers/regulator/anatop-regulator.c | |||
@@ -224,7 +224,7 @@ static struct platform_driver anatop_regulator_driver = { | |||
224 | .of_match_table = of_anatop_regulator_match_tbl, | 224 | .of_match_table = of_anatop_regulator_match_tbl, |
225 | }, | 225 | }, |
226 | .probe = anatop_regulator_probe, | 226 | .probe = anatop_regulator_probe, |
227 | .remove = anatop_regulator_remove, | 227 | .remove = __devexit_p(anatop_regulator_remove), |
228 | }; | 228 | }; |
229 | 229 | ||
230 | static int __init anatop_regulator_init(void) | 230 | static int __init anatop_regulator_init(void) |
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c index 7584a74eec8a..09a737c868b5 100644 --- a/drivers/regulator/core.c +++ b/drivers/regulator/core.c | |||
@@ -2050,6 +2050,9 @@ int regulator_map_voltage_linear(struct regulator_dev *rdev, | |||
2050 | return -EINVAL; | 2050 | return -EINVAL; |
2051 | } | 2051 | } |
2052 | 2052 | ||
2053 | if (min_uV < rdev->desc->min_uV) | ||
2054 | min_uV = rdev->desc->min_uV; | ||
2055 | |||
2053 | ret = DIV_ROUND_UP(min_uV - rdev->desc->min_uV, rdev->desc->uV_step); | 2056 | ret = DIV_ROUND_UP(min_uV - rdev->desc->min_uV, rdev->desc->uV_step); |
2054 | if (ret < 0) | 2057 | if (ret < 0) |
2055 | return ret; | 2058 | return ret; |
diff --git a/drivers/regulator/gpio-regulator.c b/drivers/regulator/gpio-regulator.c index 9997d7aaca84..242851a4c1a6 100644 --- a/drivers/regulator/gpio-regulator.c +++ b/drivers/regulator/gpio-regulator.c | |||
@@ -101,16 +101,20 @@ static int gpio_regulator_get_value(struct regulator_dev *dev) | |||
101 | } | 101 | } |
102 | 102 | ||
103 | static int gpio_regulator_set_value(struct regulator_dev *dev, | 103 | static int gpio_regulator_set_value(struct regulator_dev *dev, |
104 | int min, int max) | 104 | int min, int max, unsigned *selector) |
105 | { | 105 | { |
106 | struct gpio_regulator_data *data = rdev_get_drvdata(dev); | 106 | struct gpio_regulator_data *data = rdev_get_drvdata(dev); |
107 | int ptr, target, state, best_val = INT_MAX; | 107 | int ptr, target = 0, state, best_val = INT_MAX; |
108 | 108 | ||
109 | for (ptr = 0; ptr < data->nr_states; ptr++) | 109 | for (ptr = 0; ptr < data->nr_states; ptr++) |
110 | if (data->states[ptr].value < best_val && | 110 | if (data->states[ptr].value < best_val && |
111 | data->states[ptr].value >= min && | 111 | data->states[ptr].value >= min && |
112 | data->states[ptr].value <= max) | 112 | data->states[ptr].value <= max) { |
113 | target = data->states[ptr].gpios; | 113 | target = data->states[ptr].gpios; |
114 | best_val = data->states[ptr].value; | ||
115 | if (selector) | ||
116 | *selector = ptr; | ||
117 | } | ||
114 | 118 | ||
115 | if (best_val == INT_MAX) | 119 | if (best_val == INT_MAX) |
116 | return -EINVAL; | 120 | return -EINVAL; |
@@ -128,7 +132,7 @@ static int gpio_regulator_set_voltage(struct regulator_dev *dev, | |||
128 | int min_uV, int max_uV, | 132 | int min_uV, int max_uV, |
129 | unsigned *selector) | 133 | unsigned *selector) |
130 | { | 134 | { |
131 | return gpio_regulator_set_value(dev, min_uV, max_uV); | 135 | return gpio_regulator_set_value(dev, min_uV, max_uV, selector); |
132 | } | 136 | } |
133 | 137 | ||
134 | static int gpio_regulator_list_voltage(struct regulator_dev *dev, | 138 | static int gpio_regulator_list_voltage(struct regulator_dev *dev, |
@@ -145,7 +149,7 @@ static int gpio_regulator_list_voltage(struct regulator_dev *dev, | |||
145 | static int gpio_regulator_set_current_limit(struct regulator_dev *dev, | 149 | static int gpio_regulator_set_current_limit(struct regulator_dev *dev, |
146 | int min_uA, int max_uA) | 150 | int min_uA, int max_uA) |
147 | { | 151 | { |
148 | return gpio_regulator_set_value(dev, min_uA, max_uA); | 152 | return gpio_regulator_set_value(dev, min_uA, max_uA, NULL); |
149 | } | 153 | } |
150 | 154 | ||
151 | static struct regulator_ops gpio_regulator_voltage_ops = { | 155 | static struct regulator_ops gpio_regulator_voltage_ops = { |
@@ -286,7 +290,7 @@ static int __devinit gpio_regulator_probe(struct platform_device *pdev) | |||
286 | 290 | ||
287 | cfg.dev = &pdev->dev; | 291 | cfg.dev = &pdev->dev; |
288 | cfg.init_data = config->init_data; | 292 | cfg.init_data = config->init_data; |
289 | cfg.driver_data = &drvdata; | 293 | cfg.driver_data = drvdata; |
290 | 294 | ||
291 | drvdata->dev = regulator_register(&drvdata->desc, &cfg); | 295 | drvdata->dev = regulator_register(&drvdata->desc, &cfg); |
292 | if (IS_ERR(drvdata->dev)) { | 296 | if (IS_ERR(drvdata->dev)) { |
diff --git a/drivers/regulator/max8649.c b/drivers/regulator/max8649.c index 1f4bb80457b3..9d540cd02dab 100644 --- a/drivers/regulator/max8649.c +++ b/drivers/regulator/max8649.c | |||
@@ -259,6 +259,7 @@ static int __devinit max8649_regulator_probe(struct i2c_client *client, | |||
259 | config.dev = &client->dev; | 259 | config.dev = &client->dev; |
260 | config.init_data = pdata->regulator; | 260 | config.init_data = pdata->regulator; |
261 | config.driver_data = info; | 261 | config.driver_data = info; |
262 | config.regmap = info->regmap; | ||
262 | 263 | ||
263 | info->regulator = regulator_register(&dcdc_desc, &config); | 264 | info->regulator = regulator_register(&dcdc_desc, &config); |
264 | if (IS_ERR(info->regulator)) { | 265 | if (IS_ERR(info->regulator)) { |
diff --git a/drivers/regulator/palmas-regulator.c b/drivers/regulator/palmas-regulator.c index c4435f608df7..9b7ca90057d5 100644 --- a/drivers/regulator/palmas-regulator.c +++ b/drivers/regulator/palmas-regulator.c | |||
@@ -775,9 +775,6 @@ static __devinit int palmas_probe(struct platform_device *pdev) | |||
775 | err_unregister_regulator: | 775 | err_unregister_regulator: |
776 | while (--id >= 0) | 776 | while (--id >= 0) |
777 | regulator_unregister(pmic->rdev[id]); | 777 | regulator_unregister(pmic->rdev[id]); |
778 | kfree(pmic->rdev); | ||
779 | kfree(pmic->desc); | ||
780 | kfree(pmic); | ||
781 | return ret; | 778 | return ret; |
782 | } | 779 | } |
783 | 780 | ||
@@ -788,10 +785,6 @@ static int __devexit palmas_remove(struct platform_device *pdev) | |||
788 | 785 | ||
789 | for (id = 0; id < PALMAS_NUM_REGS; id++) | 786 | for (id = 0; id < PALMAS_NUM_REGS; id++) |
790 | regulator_unregister(pmic->rdev[id]); | 787 | regulator_unregister(pmic->rdev[id]); |
791 | |||
792 | kfree(pmic->rdev); | ||
793 | kfree(pmic->desc); | ||
794 | kfree(pmic); | ||
795 | return 0; | 788 | return 0; |
796 | } | 789 | } |
797 | 790 | ||
diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.c b/drivers/scsi/mpt2sas/mpt2sas_base.c index 6102ef2cb2d8..9d46fcbe7755 100644 --- a/drivers/scsi/mpt2sas/mpt2sas_base.c +++ b/drivers/scsi/mpt2sas/mpt2sas_base.c | |||
@@ -1792,7 +1792,7 @@ static inline void _base_writeq(__u64 b, volatile void __iomem *addr, | |||
1792 | static inline u8 | 1792 | static inline u8 |
1793 | _base_get_msix_index(struct MPT2SAS_ADAPTER *ioc) | 1793 | _base_get_msix_index(struct MPT2SAS_ADAPTER *ioc) |
1794 | { | 1794 | { |
1795 | return ioc->cpu_msix_table[smp_processor_id()]; | 1795 | return ioc->cpu_msix_table[raw_smp_processor_id()]; |
1796 | } | 1796 | } |
1797 | 1797 | ||
1798 | /** | 1798 | /** |
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c index 04f80ebf09eb..6986552b47e6 100644 --- a/drivers/scsi/qla2xxx/qla_target.c +++ b/drivers/scsi/qla2xxx/qla_target.c | |||
@@ -26,7 +26,6 @@ | |||
26 | #include <linux/module.h> | 26 | #include <linux/module.h> |
27 | #include <linux/init.h> | 27 | #include <linux/init.h> |
28 | #include <linux/types.h> | 28 | #include <linux/types.h> |
29 | #include <linux/version.h> | ||
30 | #include <linux/blkdev.h> | 29 | #include <linux/blkdev.h> |
31 | #include <linux/interrupt.h> | 30 | #include <linux/interrupt.h> |
32 | #include <linux/pci.h> | 31 | #include <linux/pci.h> |
@@ -2477,11 +2476,9 @@ static void qlt_do_ctio_completion(struct scsi_qla_host *vha, uint32_t handle, | |||
2477 | } | 2476 | } |
2478 | 2477 | ||
2479 | cmd = qlt_ctio_to_cmd(vha, handle, ctio); | 2478 | cmd = qlt_ctio_to_cmd(vha, handle, ctio); |
2480 | if (cmd == NULL) { | 2479 | if (cmd == NULL) |
2481 | if (status != CTIO_SUCCESS) | ||
2482 | qlt_term_ctio_exchange(vha, ctio, NULL, status); | ||
2483 | return; | 2480 | return; |
2484 | } | 2481 | |
2485 | se_cmd = &cmd->se_cmd; | 2482 | se_cmd = &cmd->se_cmd; |
2486 | tfo = se_cmd->se_tfo; | 2483 | tfo = se_cmd->se_tfo; |
2487 | 2484 | ||
@@ -2727,10 +2724,12 @@ static void qlt_do_work(struct work_struct *work) | |||
2727 | out_term: | 2724 | out_term: |
2728 | ql_dbg(ql_dbg_tgt_mgt, vha, 0xf020, "Terminating work cmd %p", cmd); | 2725 | ql_dbg(ql_dbg_tgt_mgt, vha, 0xf020, "Terminating work cmd %p", cmd); |
2729 | /* | 2726 | /* |
2730 | * cmd has not sent to target yet, so pass NULL as the second argument | 2727 | * cmd has not sent to target yet, so pass NULL as the second |
2728 | * argument to qlt_send_term_exchange() and free the memory here. | ||
2731 | */ | 2729 | */ |
2732 | spin_lock_irqsave(&ha->hardware_lock, flags); | 2730 | spin_lock_irqsave(&ha->hardware_lock, flags); |
2733 | qlt_send_term_exchange(vha, NULL, &cmd->atio, 1); | 2731 | qlt_send_term_exchange(vha, NULL, &cmd->atio, 1); |
2732 | kmem_cache_free(qla_tgt_cmd_cachep, cmd); | ||
2734 | spin_unlock_irqrestore(&ha->hardware_lock, flags); | 2733 | spin_unlock_irqrestore(&ha->hardware_lock, flags); |
2735 | if (sess) | 2734 | if (sess) |
2736 | ha->tgt.tgt_ops->put_sess(sess); | 2735 | ha->tgt.tgt_ops->put_sess(sess); |
diff --git a/drivers/scsi/qla2xxx/qla_target.h b/drivers/scsi/qla2xxx/qla_target.h index 9ec19bc2f0fe..9f9ef1644fd9 100644 --- a/drivers/scsi/qla2xxx/qla_target.h +++ b/drivers/scsi/qla2xxx/qla_target.h | |||
@@ -919,7 +919,6 @@ struct qla_tgt_srr_ctio { | |||
919 | #define QLA_TGT_XMIT_STATUS 2 | 919 | #define QLA_TGT_XMIT_STATUS 2 |
920 | #define QLA_TGT_XMIT_ALL (QLA_TGT_XMIT_STATUS|QLA_TGT_XMIT_DATA) | 920 | #define QLA_TGT_XMIT_ALL (QLA_TGT_XMIT_STATUS|QLA_TGT_XMIT_DATA) |
921 | 921 | ||
922 | #include <linux/version.h> | ||
923 | 922 | ||
924 | extern struct qla_tgt_data qla_target; | 923 | extern struct qla_tgt_data qla_target; |
925 | /* | 924 | /* |
diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c index 436598f57404..6e64314dbbb3 100644 --- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c +++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c | |||
@@ -137,13 +137,15 @@ static char *tcm_qla2xxx_get_fabric_name(void) | |||
137 | */ | 137 | */ |
138 | static int tcm_qla2xxx_npiv_extract_wwn(const char *ns, u64 *nm) | 138 | static int tcm_qla2xxx_npiv_extract_wwn(const char *ns, u64 *nm) |
139 | { | 139 | { |
140 | unsigned int i, j, value; | 140 | unsigned int i, j; |
141 | u8 wwn[8]; | 141 | u8 wwn[8]; |
142 | 142 | ||
143 | memset(wwn, 0, sizeof(wwn)); | 143 | memset(wwn, 0, sizeof(wwn)); |
144 | 144 | ||
145 | /* Validate and store the new name */ | 145 | /* Validate and store the new name */ |
146 | for (i = 0, j = 0; i < 16; i++) { | 146 | for (i = 0, j = 0; i < 16; i++) { |
147 | int value; | ||
148 | |||
147 | value = hex_to_bin(*ns++); | 149 | value = hex_to_bin(*ns++); |
148 | if (value >= 0) | 150 | if (value >= 0) |
149 | j = (j << 4) | value; | 151 | j = (j << 4) | value; |
@@ -652,8 +654,8 @@ static int tcm_qla2xxx_handle_data(struct qla_tgt_cmd *cmd) | |||
652 | /* | 654 | /* |
653 | * Called from qla_target.c:qlt_issue_task_mgmt() | 655 | * Called from qla_target.c:qlt_issue_task_mgmt() |
654 | */ | 656 | */ |
655 | int tcm_qla2xxx_handle_tmr(struct qla_tgt_mgmt_cmd *mcmd, uint32_t lun, | 657 | static int tcm_qla2xxx_handle_tmr(struct qla_tgt_mgmt_cmd *mcmd, uint32_t lun, |
656 | uint8_t tmr_func, uint32_t tag) | 658 | uint8_t tmr_func, uint32_t tag) |
657 | { | 659 | { |
658 | struct qla_tgt_sess *sess = mcmd->sess; | 660 | struct qla_tgt_sess *sess = mcmd->sess; |
659 | struct se_cmd *se_cmd = &mcmd->se_cmd; | 661 | struct se_cmd *se_cmd = &mcmd->se_cmd; |
@@ -762,65 +764,8 @@ static u16 tcm_qla2xxx_set_fabric_sense_len(struct se_cmd *se_cmd, | |||
762 | struct target_fabric_configfs *tcm_qla2xxx_fabric_configfs; | 764 | struct target_fabric_configfs *tcm_qla2xxx_fabric_configfs; |
763 | struct target_fabric_configfs *tcm_qla2xxx_npiv_fabric_configfs; | 765 | struct target_fabric_configfs *tcm_qla2xxx_npiv_fabric_configfs; |
764 | 766 | ||
765 | static int tcm_qla2xxx_setup_nacl_from_rport( | 767 | static void tcm_qla2xxx_clear_sess_lookup(struct tcm_qla2xxx_lport *, |
766 | struct se_portal_group *se_tpg, | 768 | struct tcm_qla2xxx_nacl *, struct qla_tgt_sess *); |
767 | struct se_node_acl *se_nacl, | ||
768 | struct tcm_qla2xxx_lport *lport, | ||
769 | struct tcm_qla2xxx_nacl *nacl, | ||
770 | u64 rport_wwnn) | ||
771 | { | ||
772 | struct scsi_qla_host *vha = lport->qla_vha; | ||
773 | struct Scsi_Host *sh = vha->host; | ||
774 | struct fc_host_attrs *fc_host = shost_to_fc_host(sh); | ||
775 | struct fc_rport *rport; | ||
776 | unsigned long flags; | ||
777 | void *node; | ||
778 | int rc; | ||
779 | |||
780 | /* | ||
781 | * Scan the existing rports, and create a session for the | ||
782 | * explict NodeACL is an matching rport->node_name already | ||
783 | * exists. | ||
784 | */ | ||
785 | spin_lock_irqsave(sh->host_lock, flags); | ||
786 | list_for_each_entry(rport, &fc_host->rports, peers) { | ||
787 | if (rport_wwnn != rport->node_name) | ||
788 | continue; | ||
789 | |||
790 | pr_debug("Located existing rport_wwpn and rport->node_name: 0x%016LX, port_id: 0x%04x\n", | ||
791 | rport->node_name, rport->port_id); | ||
792 | nacl->nport_id = rport->port_id; | ||
793 | |||
794 | spin_unlock_irqrestore(sh->host_lock, flags); | ||
795 | |||
796 | spin_lock_irqsave(&vha->hw->hardware_lock, flags); | ||
797 | node = btree_lookup32(&lport->lport_fcport_map, rport->port_id); | ||
798 | if (node) { | ||
799 | rc = btree_update32(&lport->lport_fcport_map, | ||
800 | rport->port_id, se_nacl); | ||
801 | } else { | ||
802 | rc = btree_insert32(&lport->lport_fcport_map, | ||
803 | rport->port_id, se_nacl, | ||
804 | GFP_ATOMIC); | ||
805 | } | ||
806 | spin_unlock_irqrestore(&vha->hw->hardware_lock, flags); | ||
807 | |||
808 | if (rc) { | ||
809 | pr_err("Unable to insert se_nacl into fcport_map"); | ||
810 | WARN_ON(rc > 0); | ||
811 | return rc; | ||
812 | } | ||
813 | |||
814 | pr_debug("Inserted into fcport_map: %p for WWNN: 0x%016LX, port_id: 0x%08x\n", | ||
815 | se_nacl, rport_wwnn, nacl->nport_id); | ||
816 | |||
817 | return 1; | ||
818 | } | ||
819 | spin_unlock_irqrestore(sh->host_lock, flags); | ||
820 | |||
821 | return 0; | ||
822 | } | ||
823 | |||
824 | /* | 769 | /* |
825 | * Expected to be called with struct qla_hw_data->hardware_lock held | 770 | * Expected to be called with struct qla_hw_data->hardware_lock held |
826 | */ | 771 | */ |
@@ -842,11 +787,40 @@ static void tcm_qla2xxx_clear_nacl_from_fcport_map(struct qla_tgt_sess *sess) | |||
842 | 787 | ||
843 | pr_debug("Removed from fcport_map: %p for WWNN: 0x%016LX, port_id: 0x%06x\n", | 788 | pr_debug("Removed from fcport_map: %p for WWNN: 0x%016LX, port_id: 0x%06x\n", |
844 | se_nacl, nacl->nport_wwnn, nacl->nport_id); | 789 | se_nacl, nacl->nport_wwnn, nacl->nport_id); |
790 | /* | ||
791 | * Now clear the se_nacl and session pointers from our HW lport lookup | ||
792 | * table mapping for this initiator's fabric S_ID and LOOP_ID entries. | ||
793 | * | ||
794 | * This is done ahead of callbacks into tcm_qla2xxx_free_session() -> | ||
795 | * target_wait_for_sess_cmds() before the session waits for outstanding | ||
796 | * I/O to complete, to avoid a race between session shutdown execution | ||
797 | * and incoming ATIOs or TMRs picking up a stale se_node_act reference. | ||
798 | */ | ||
799 | tcm_qla2xxx_clear_sess_lookup(lport, nacl, sess); | ||
800 | } | ||
801 | |||
802 | static void tcm_qla2xxx_release_session(struct kref *kref) | ||
803 | { | ||
804 | struct se_session *se_sess = container_of(kref, | ||
805 | struct se_session, sess_kref); | ||
806 | |||
807 | qlt_unreg_sess(se_sess->fabric_sess_ptr); | ||
808 | } | ||
809 | |||
810 | static void tcm_qla2xxx_put_session(struct se_session *se_sess) | ||
811 | { | ||
812 | struct qla_tgt_sess *sess = se_sess->fabric_sess_ptr; | ||
813 | struct qla_hw_data *ha = sess->vha->hw; | ||
814 | unsigned long flags; | ||
815 | |||
816 | spin_lock_irqsave(&ha->hardware_lock, flags); | ||
817 | kref_put(&se_sess->sess_kref, tcm_qla2xxx_release_session); | ||
818 | spin_unlock_irqrestore(&ha->hardware_lock, flags); | ||
845 | } | 819 | } |
846 | 820 | ||
847 | static void tcm_qla2xxx_put_sess(struct qla_tgt_sess *sess) | 821 | static void tcm_qla2xxx_put_sess(struct qla_tgt_sess *sess) |
848 | { | 822 | { |
849 | target_put_session(sess->se_sess); | 823 | tcm_qla2xxx_put_session(sess->se_sess); |
850 | } | 824 | } |
851 | 825 | ||
852 | static void tcm_qla2xxx_shutdown_sess(struct qla_tgt_sess *sess) | 826 | static void tcm_qla2xxx_shutdown_sess(struct qla_tgt_sess *sess) |
@@ -859,14 +833,10 @@ static struct se_node_acl *tcm_qla2xxx_make_nodeacl( | |||
859 | struct config_group *group, | 833 | struct config_group *group, |
860 | const char *name) | 834 | const char *name) |
861 | { | 835 | { |
862 | struct se_wwn *se_wwn = se_tpg->se_tpg_wwn; | ||
863 | struct tcm_qla2xxx_lport *lport = container_of(se_wwn, | ||
864 | struct tcm_qla2xxx_lport, lport_wwn); | ||
865 | struct se_node_acl *se_nacl, *se_nacl_new; | 836 | struct se_node_acl *se_nacl, *se_nacl_new; |
866 | struct tcm_qla2xxx_nacl *nacl; | 837 | struct tcm_qla2xxx_nacl *nacl; |
867 | u64 wwnn; | 838 | u64 wwnn; |
868 | u32 qla2xxx_nexus_depth; | 839 | u32 qla2xxx_nexus_depth; |
869 | int rc; | ||
870 | 840 | ||
871 | if (tcm_qla2xxx_parse_wwn(name, &wwnn, 1) < 0) | 841 | if (tcm_qla2xxx_parse_wwn(name, &wwnn, 1) < 0) |
872 | return ERR_PTR(-EINVAL); | 842 | return ERR_PTR(-EINVAL); |
@@ -893,16 +863,6 @@ static struct se_node_acl *tcm_qla2xxx_make_nodeacl( | |||
893 | nacl = container_of(se_nacl, struct tcm_qla2xxx_nacl, se_node_acl); | 863 | nacl = container_of(se_nacl, struct tcm_qla2xxx_nacl, se_node_acl); |
894 | nacl->nport_wwnn = wwnn; | 864 | nacl->nport_wwnn = wwnn; |
895 | tcm_qla2xxx_format_wwn(&nacl->nport_name[0], TCM_QLA2XXX_NAMELEN, wwnn); | 865 | tcm_qla2xxx_format_wwn(&nacl->nport_name[0], TCM_QLA2XXX_NAMELEN, wwnn); |
896 | /* | ||
897 | * Setup a se_nacl handle based on an a matching struct fc_rport setup | ||
898 | * via drivers/scsi/qla2xxx/qla_init.c:qla2x00_reg_remote_port() | ||
899 | */ | ||
900 | rc = tcm_qla2xxx_setup_nacl_from_rport(se_tpg, se_nacl, lport, | ||
901 | nacl, wwnn); | ||
902 | if (rc < 0) { | ||
903 | tcm_qla2xxx_release_fabric_acl(se_tpg, se_nacl_new); | ||
904 | return ERR_PTR(rc); | ||
905 | } | ||
906 | 866 | ||
907 | return se_nacl; | 867 | return se_nacl; |
908 | } | 868 | } |
@@ -1390,6 +1350,25 @@ static void tcm_qla2xxx_set_sess_by_loop_id( | |||
1390 | nacl->qla_tgt_sess, new_se_nacl, new_se_nacl->initiatorname); | 1350 | nacl->qla_tgt_sess, new_se_nacl, new_se_nacl->initiatorname); |
1391 | } | 1351 | } |
1392 | 1352 | ||
1353 | /* | ||
1354 | * Should always be called with qla_hw_data->hardware_lock held. | ||
1355 | */ | ||
1356 | static void tcm_qla2xxx_clear_sess_lookup(struct tcm_qla2xxx_lport *lport, | ||
1357 | struct tcm_qla2xxx_nacl *nacl, struct qla_tgt_sess *sess) | ||
1358 | { | ||
1359 | struct se_session *se_sess = sess->se_sess; | ||
1360 | unsigned char be_sid[3]; | ||
1361 | |||
1362 | be_sid[0] = sess->s_id.b.domain; | ||
1363 | be_sid[1] = sess->s_id.b.area; | ||
1364 | be_sid[2] = sess->s_id.b.al_pa; | ||
1365 | |||
1366 | tcm_qla2xxx_set_sess_by_s_id(lport, NULL, nacl, se_sess, | ||
1367 | sess, be_sid); | ||
1368 | tcm_qla2xxx_set_sess_by_loop_id(lport, NULL, nacl, se_sess, | ||
1369 | sess, sess->loop_id); | ||
1370 | } | ||
1371 | |||
1393 | static void tcm_qla2xxx_free_session(struct qla_tgt_sess *sess) | 1372 | static void tcm_qla2xxx_free_session(struct qla_tgt_sess *sess) |
1394 | { | 1373 | { |
1395 | struct qla_tgt *tgt = sess->tgt; | 1374 | struct qla_tgt *tgt = sess->tgt; |
@@ -1398,8 +1377,6 @@ static void tcm_qla2xxx_free_session(struct qla_tgt_sess *sess) | |||
1398 | struct se_node_acl *se_nacl; | 1377 | struct se_node_acl *se_nacl; |
1399 | struct tcm_qla2xxx_lport *lport; | 1378 | struct tcm_qla2xxx_lport *lport; |
1400 | struct tcm_qla2xxx_nacl *nacl; | 1379 | struct tcm_qla2xxx_nacl *nacl; |
1401 | unsigned char be_sid[3]; | ||
1402 | unsigned long flags; | ||
1403 | 1380 | ||
1404 | BUG_ON(in_interrupt()); | 1381 | BUG_ON(in_interrupt()); |
1405 | 1382 | ||
@@ -1419,21 +1396,6 @@ static void tcm_qla2xxx_free_session(struct qla_tgt_sess *sess) | |||
1419 | return; | 1396 | return; |
1420 | } | 1397 | } |
1421 | target_wait_for_sess_cmds(se_sess, 0); | 1398 | target_wait_for_sess_cmds(se_sess, 0); |
1422 | /* | ||
1423 | * And now clear the se_nacl and session pointers from our HW lport | ||
1424 | * mappings for fabric S_ID and LOOP_ID. | ||
1425 | */ | ||
1426 | memset(&be_sid, 0, 3); | ||
1427 | be_sid[0] = sess->s_id.b.domain; | ||
1428 | be_sid[1] = sess->s_id.b.area; | ||
1429 | be_sid[2] = sess->s_id.b.al_pa; | ||
1430 | |||
1431 | spin_lock_irqsave(&ha->hardware_lock, flags); | ||
1432 | tcm_qla2xxx_set_sess_by_s_id(lport, NULL, nacl, se_sess, | ||
1433 | sess, be_sid); | ||
1434 | tcm_qla2xxx_set_sess_by_loop_id(lport, NULL, nacl, se_sess, | ||
1435 | sess, sess->loop_id); | ||
1436 | spin_unlock_irqrestore(&ha->hardware_lock, flags); | ||
1437 | 1399 | ||
1438 | transport_deregister_session_configfs(sess->se_sess); | 1400 | transport_deregister_session_configfs(sess->se_sess); |
1439 | transport_deregister_session(sess->se_sess); | 1401 | transport_deregister_session(sess->se_sess); |
@@ -1731,6 +1693,7 @@ static struct target_core_fabric_ops tcm_qla2xxx_ops = { | |||
1731 | .new_cmd_map = NULL, | 1693 | .new_cmd_map = NULL, |
1732 | .check_stop_free = tcm_qla2xxx_check_stop_free, | 1694 | .check_stop_free = tcm_qla2xxx_check_stop_free, |
1733 | .release_cmd = tcm_qla2xxx_release_cmd, | 1695 | .release_cmd = tcm_qla2xxx_release_cmd, |
1696 | .put_session = tcm_qla2xxx_put_session, | ||
1734 | .shutdown_session = tcm_qla2xxx_shutdown_session, | 1697 | .shutdown_session = tcm_qla2xxx_shutdown_session, |
1735 | .close_session = tcm_qla2xxx_close_session, | 1698 | .close_session = tcm_qla2xxx_close_session, |
1736 | .sess_get_index = tcm_qla2xxx_sess_get_index, | 1699 | .sess_get_index = tcm_qla2xxx_sess_get_index, |
@@ -1779,6 +1742,7 @@ static struct target_core_fabric_ops tcm_qla2xxx_npiv_ops = { | |||
1779 | .tpg_release_fabric_acl = tcm_qla2xxx_release_fabric_acl, | 1742 | .tpg_release_fabric_acl = tcm_qla2xxx_release_fabric_acl, |
1780 | .tpg_get_inst_index = tcm_qla2xxx_tpg_get_inst_index, | 1743 | .tpg_get_inst_index = tcm_qla2xxx_tpg_get_inst_index, |
1781 | .release_cmd = tcm_qla2xxx_release_cmd, | 1744 | .release_cmd = tcm_qla2xxx_release_cmd, |
1745 | .put_session = tcm_qla2xxx_put_session, | ||
1782 | .shutdown_session = tcm_qla2xxx_shutdown_session, | 1746 | .shutdown_session = tcm_qla2xxx_shutdown_session, |
1783 | .close_session = tcm_qla2xxx_close_session, | 1747 | .close_session = tcm_qla2xxx_close_session, |
1784 | .sess_get_index = tcm_qla2xxx_sess_get_index, | 1748 | .sess_get_index = tcm_qla2xxx_sess_get_index, |
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c index 61c82a345f82..bbbc9c918d4c 100644 --- a/drivers/scsi/scsi.c +++ b/drivers/scsi/scsi.c | |||
@@ -90,11 +90,9 @@ unsigned int scsi_logging_level; | |||
90 | EXPORT_SYMBOL(scsi_logging_level); | 90 | EXPORT_SYMBOL(scsi_logging_level); |
91 | #endif | 91 | #endif |
92 | 92 | ||
93 | #if IS_ENABLED(CONFIG_PM) || IS_ENABLED(CONFIG_BLK_DEV_SD) | 93 | /* sd, scsi core and power management need to coordinate flushing async actions */ |
94 | /* sd and scsi_pm need to coordinate flushing async actions */ | ||
95 | LIST_HEAD(scsi_sd_probe_domain); | 94 | LIST_HEAD(scsi_sd_probe_domain); |
96 | EXPORT_SYMBOL(scsi_sd_probe_domain); | 95 | EXPORT_SYMBOL(scsi_sd_probe_domain); |
97 | #endif | ||
98 | 96 | ||
99 | /* NB: These are exposed through /proc/scsi/scsi and form part of the ABI. | 97 | /* NB: These are exposed through /proc/scsi/scsi and form part of the ABI. |
100 | * You may not alter any existing entry (although adding new ones is | 98 | * You may not alter any existing entry (although adding new ones is |
diff --git a/drivers/target/target_core_alua.c b/drivers/target/target_core_alua.c index e624b836469c..91799973081a 100644 --- a/drivers/target/target_core_alua.c +++ b/drivers/target/target_core_alua.c | |||
@@ -374,8 +374,9 @@ int target_emulate_set_target_port_groups(struct se_cmd *cmd) | |||
374 | 374 | ||
375 | out: | 375 | out: |
376 | transport_kunmap_data_sg(cmd); | 376 | transport_kunmap_data_sg(cmd); |
377 | target_complete_cmd(cmd, GOOD); | 377 | if (!rc) |
378 | return 0; | 378 | target_complete_cmd(cmd, GOOD); |
379 | return rc; | ||
379 | } | 380 | } |
380 | 381 | ||
381 | static inline int core_alua_state_nonoptimized( | 382 | static inline int core_alua_state_nonoptimized( |
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index b05fdc0c05d3..634d0f31a28c 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c | |||
@@ -315,7 +315,7 @@ void transport_register_session( | |||
315 | } | 315 | } |
316 | EXPORT_SYMBOL(transport_register_session); | 316 | EXPORT_SYMBOL(transport_register_session); |
317 | 317 | ||
318 | static void target_release_session(struct kref *kref) | 318 | void target_release_session(struct kref *kref) |
319 | { | 319 | { |
320 | struct se_session *se_sess = container_of(kref, | 320 | struct se_session *se_sess = container_of(kref, |
321 | struct se_session, sess_kref); | 321 | struct se_session, sess_kref); |
@@ -332,6 +332,12 @@ EXPORT_SYMBOL(target_get_session); | |||
332 | 332 | ||
333 | void target_put_session(struct se_session *se_sess) | 333 | void target_put_session(struct se_session *se_sess) |
334 | { | 334 | { |
335 | struct se_portal_group *tpg = se_sess->se_tpg; | ||
336 | |||
337 | if (tpg->se_tpg_tfo->put_session != NULL) { | ||
338 | tpg->se_tpg_tfo->put_session(se_sess); | ||
339 | return; | ||
340 | } | ||
335 | kref_put(&se_sess->sess_kref, target_release_session); | 341 | kref_put(&se_sess->sess_kref, target_release_session); |
336 | } | 342 | } |
337 | EXPORT_SYMBOL(target_put_session); | 343 | EXPORT_SYMBOL(target_put_session); |
diff --git a/drivers/tty/hvc/hvc_xen.c b/drivers/tty/hvc/hvc_xen.c index d3d91dae065c..944eaeb8e0cf 100644 --- a/drivers/tty/hvc/hvc_xen.c +++ b/drivers/tty/hvc/hvc_xen.c | |||
@@ -214,24 +214,24 @@ static int xen_hvm_console_init(void) | |||
214 | /* already configured */ | 214 | /* already configured */ |
215 | if (info->intf != NULL) | 215 | if (info->intf != NULL) |
216 | return 0; | 216 | return 0; |
217 | 217 | /* | |
218 | * If the toolstack (or the hypervisor) hasn't set these values, the | ||
219 | * default value is 0. Even though mfn = 0 and evtchn = 0 are | ||
220 | * theoretically correct values, in practice they never are and they | ||
221 | * mean that a legacy toolstack hasn't initialized the pv console correctly. | ||
222 | */ | ||
218 | r = hvm_get_parameter(HVM_PARAM_CONSOLE_EVTCHN, &v); | 223 | r = hvm_get_parameter(HVM_PARAM_CONSOLE_EVTCHN, &v); |
219 | if (r < 0) { | 224 | if (r < 0 || v == 0) |
220 | kfree(info); | 225 | goto err; |
221 | return -ENODEV; | ||
222 | } | ||
223 | info->evtchn = v; | 226 | info->evtchn = v; |
224 | hvm_get_parameter(HVM_PARAM_CONSOLE_PFN, &v); | 227 | v = 0; |
225 | if (r < 0) { | 228 | r = hvm_get_parameter(HVM_PARAM_CONSOLE_PFN, &v); |
226 | kfree(info); | 229 | if (r < 0 || v == 0) |
227 | return -ENODEV; | 230 | goto err; |
228 | } | ||
229 | mfn = v; | 231 | mfn = v; |
230 | info->intf = ioremap(mfn << PAGE_SHIFT, PAGE_SIZE); | 232 | info->intf = ioremap(mfn << PAGE_SHIFT, PAGE_SIZE); |
231 | if (info->intf == NULL) { | 233 | if (info->intf == NULL) |
232 | kfree(info); | 234 | goto err; |
233 | return -ENODEV; | ||
234 | } | ||
235 | info->vtermno = HVC_COOKIE; | 235 | info->vtermno = HVC_COOKIE; |
236 | 236 | ||
237 | spin_lock(&xencons_lock); | 237 | spin_lock(&xencons_lock); |
@@ -239,6 +239,9 @@ static int xen_hvm_console_init(void) | |||
239 | spin_unlock(&xencons_lock); | 239 | spin_unlock(&xencons_lock); |
240 | 240 | ||
241 | return 0; | 241 | return 0; |
242 | err: | ||
243 | kfree(info); | ||
244 | return -ENODEV; | ||
242 | } | 245 | } |
243 | 246 | ||
244 | static int xen_pv_console_init(void) | 247 | static int xen_pv_console_init(void) |
diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c index 4604153b7954..1bd9163bc118 100644 --- a/drivers/tty/serial/sh-sci.c +++ b/drivers/tty/serial/sh-sci.c | |||
@@ -2179,6 +2179,16 @@ static int __devinit sci_init_single(struct platform_device *dev, | |||
2179 | return 0; | 2179 | return 0; |
2180 | } | 2180 | } |
2181 | 2181 | ||
2182 | static void sci_cleanup_single(struct sci_port *port) | ||
2183 | { | ||
2184 | sci_free_gpios(port); | ||
2185 | |||
2186 | clk_put(port->iclk); | ||
2187 | clk_put(port->fclk); | ||
2188 | |||
2189 | pm_runtime_disable(port->port.dev); | ||
2190 | } | ||
2191 | |||
2182 | #ifdef CONFIG_SERIAL_SH_SCI_CONSOLE | 2192 | #ifdef CONFIG_SERIAL_SH_SCI_CONSOLE |
2183 | static void serial_console_putchar(struct uart_port *port, int ch) | 2193 | static void serial_console_putchar(struct uart_port *port, int ch) |
2184 | { | 2194 | { |
@@ -2360,14 +2370,10 @@ static int sci_remove(struct platform_device *dev) | |||
2360 | cpufreq_unregister_notifier(&port->freq_transition, | 2370 | cpufreq_unregister_notifier(&port->freq_transition, |
2361 | CPUFREQ_TRANSITION_NOTIFIER); | 2371 | CPUFREQ_TRANSITION_NOTIFIER); |
2362 | 2372 | ||
2363 | sci_free_gpios(port); | ||
2364 | |||
2365 | uart_remove_one_port(&sci_uart_driver, &port->port); | 2373 | uart_remove_one_port(&sci_uart_driver, &port->port); |
2366 | 2374 | ||
2367 | clk_put(port->iclk); | 2375 | sci_cleanup_single(port); |
2368 | clk_put(port->fclk); | ||
2369 | 2376 | ||
2370 | pm_runtime_disable(&dev->dev); | ||
2371 | return 0; | 2377 | return 0; |
2372 | } | 2378 | } |
2373 | 2379 | ||
@@ -2385,14 +2391,20 @@ static int __devinit sci_probe_single(struct platform_device *dev, | |||
2385 | index+1, SCI_NPORTS); | 2391 | index+1, SCI_NPORTS); |
2386 | dev_notice(&dev->dev, "Consider bumping " | 2392 | dev_notice(&dev->dev, "Consider bumping " |
2387 | "CONFIG_SERIAL_SH_SCI_NR_UARTS!\n"); | 2393 | "CONFIG_SERIAL_SH_SCI_NR_UARTS!\n"); |
2388 | return 0; | 2394 | return -EINVAL; |
2389 | } | 2395 | } |
2390 | 2396 | ||
2391 | ret = sci_init_single(dev, sciport, index, p); | 2397 | ret = sci_init_single(dev, sciport, index, p); |
2392 | if (ret) | 2398 | if (ret) |
2393 | return ret; | 2399 | return ret; |
2394 | 2400 | ||
2395 | return uart_add_one_port(&sci_uart_driver, &sciport->port); | 2401 | ret = uart_add_one_port(&sci_uart_driver, &sciport->port); |
2402 | if (ret) { | ||
2403 | sci_cleanup_single(sciport); | ||
2404 | return ret; | ||
2405 | } | ||
2406 | |||
2407 | return 0; | ||
2396 | } | 2408 | } |
2397 | 2409 | ||
2398 | static int __devinit sci_probe(struct platform_device *dev) | 2410 | static int __devinit sci_probe(struct platform_device *dev) |
@@ -2413,24 +2425,22 @@ static int __devinit sci_probe(struct platform_device *dev) | |||
2413 | 2425 | ||
2414 | ret = sci_probe_single(dev, dev->id, p, sp); | 2426 | ret = sci_probe_single(dev, dev->id, p, sp); |
2415 | if (ret) | 2427 | if (ret) |
2416 | goto err_unreg; | 2428 | return ret; |
2417 | 2429 | ||
2418 | sp->freq_transition.notifier_call = sci_notifier; | 2430 | sp->freq_transition.notifier_call = sci_notifier; |
2419 | 2431 | ||
2420 | ret = cpufreq_register_notifier(&sp->freq_transition, | 2432 | ret = cpufreq_register_notifier(&sp->freq_transition, |
2421 | CPUFREQ_TRANSITION_NOTIFIER); | 2433 | CPUFREQ_TRANSITION_NOTIFIER); |
2422 | if (unlikely(ret < 0)) | 2434 | if (unlikely(ret < 0)) { |
2423 | goto err_unreg; | 2435 | sci_cleanup_single(sp); |
2436 | return ret; | ||
2437 | } | ||
2424 | 2438 | ||
2425 | #ifdef CONFIG_SH_STANDARD_BIOS | 2439 | #ifdef CONFIG_SH_STANDARD_BIOS |
2426 | sh_bios_gdb_detach(); | 2440 | sh_bios_gdb_detach(); |
2427 | #endif | 2441 | #endif |
2428 | 2442 | ||
2429 | return 0; | 2443 | return 0; |
2430 | |||
2431 | err_unreg: | ||
2432 | sci_remove(dev); | ||
2433 | return ret; | ||
2434 | } | 2444 | } |
2435 | 2445 | ||
2436 | static int sci_suspend(struct device *dev) | 2446 | static int sci_suspend(struct device *dev) |
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c index f2a120eea9d4..36a2a0b7b82c 100644 --- a/drivers/usb/class/cdc-acm.c +++ b/drivers/usb/class/cdc-acm.c | |||
@@ -567,6 +567,14 @@ static int acm_port_activate(struct tty_port *port, struct tty_struct *tty) | |||
567 | 567 | ||
568 | usb_autopm_put_interface(acm->control); | 568 | usb_autopm_put_interface(acm->control); |
569 | 569 | ||
570 | /* | ||
571 | * Unthrottle device in case the TTY was closed while throttled. | ||
572 | */ | ||
573 | spin_lock_irq(&acm->read_lock); | ||
574 | acm->throttled = 0; | ||
575 | acm->throttle_req = 0; | ||
576 | spin_unlock_irq(&acm->read_lock); | ||
577 | |||
570 | if (acm_submit_read_urbs(acm, GFP_KERNEL)) | 578 | if (acm_submit_read_urbs(acm, GFP_KERNEL)) |
571 | goto error_submit_read_urbs; | 579 | goto error_submit_read_urbs; |
572 | 580 | ||
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c index 04fb834c3fa1..25a7422ee657 100644 --- a/drivers/usb/core/hub.c +++ b/drivers/usb/core/hub.c | |||
@@ -3379,7 +3379,7 @@ int usb_disable_lpm(struct usb_device *udev) | |||
3379 | return 0; | 3379 | return 0; |
3380 | 3380 | ||
3381 | udev->lpm_disable_count++; | 3381 | udev->lpm_disable_count++; |
3382 | if ((udev->u1_params.timeout == 0 && udev->u1_params.timeout == 0)) | 3382 | if ((udev->u1_params.timeout == 0 && udev->u2_params.timeout == 0)) |
3383 | return 0; | 3383 | return 0; |
3384 | 3384 | ||
3385 | /* If LPM is enabled, attempt to disable it. */ | 3385 | /* If LPM is enabled, attempt to disable it. */ |
diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c index ca7fc392fd9e..8b9d669e3784 100644 --- a/drivers/usb/core/message.c +++ b/drivers/usb/core/message.c | |||
@@ -1839,7 +1839,6 @@ free_interfaces: | |||
1839 | intfc = cp->intf_cache[i]; | 1839 | intfc = cp->intf_cache[i]; |
1840 | intf->altsetting = intfc->altsetting; | 1840 | intf->altsetting = intfc->altsetting; |
1841 | intf->num_altsetting = intfc->num_altsetting; | 1841 | intf->num_altsetting = intfc->num_altsetting; |
1842 | intf->intf_assoc = find_iad(dev, cp, i); | ||
1843 | kref_get(&intfc->ref); | 1842 | kref_get(&intfc->ref); |
1844 | 1843 | ||
1845 | alt = usb_altnum_to_altsetting(intf, 0); | 1844 | alt = usb_altnum_to_altsetting(intf, 0); |
@@ -1852,6 +1851,8 @@ free_interfaces: | |||
1852 | if (!alt) | 1851 | if (!alt) |
1853 | alt = &intf->altsetting[0]; | 1852 | alt = &intf->altsetting[0]; |
1854 | 1853 | ||
1854 | intf->intf_assoc = | ||
1855 | find_iad(dev, cp, alt->desc.bInterfaceNumber); | ||
1855 | intf->cur_altsetting = alt; | 1856 | intf->cur_altsetting = alt; |
1856 | usb_enable_interface(dev, intf, true); | 1857 | usb_enable_interface(dev, intf, true); |
1857 | intf->dev.parent = &dev->dev; | 1858 | intf->dev.parent = &dev->dev; |
diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c index b100f5f9f4b6..800be38c78b4 100644 --- a/drivers/usb/host/ehci-hcd.c +++ b/drivers/usb/host/ehci-hcd.c | |||
@@ -671,7 +671,9 @@ static int ehci_init(struct usb_hcd *hcd) | |||
671 | hw = ehci->async->hw; | 671 | hw = ehci->async->hw; |
672 | hw->hw_next = QH_NEXT(ehci, ehci->async->qh_dma); | 672 | hw->hw_next = QH_NEXT(ehci, ehci->async->qh_dma); |
673 | hw->hw_info1 = cpu_to_hc32(ehci, QH_HEAD); | 673 | hw->hw_info1 = cpu_to_hc32(ehci, QH_HEAD); |
674 | #if defined(CONFIG_PPC_PS3) | ||
674 | hw->hw_info1 |= cpu_to_hc32(ehci, (1 << 7)); /* I = 1 */ | 675 | hw->hw_info1 |= cpu_to_hc32(ehci, (1 << 7)); /* I = 1 */ |
676 | #endif | ||
675 | hw->hw_token = cpu_to_hc32(ehci, QTD_STS_HALT); | 677 | hw->hw_token = cpu_to_hc32(ehci, QTD_STS_HALT); |
676 | hw->hw_qtd_next = EHCI_LIST_END(ehci); | 678 | hw->hw_qtd_next = EHCI_LIST_END(ehci); |
677 | ehci->async->qh_state = QH_STATE_LINKED; | 679 | ehci->async->qh_state = QH_STATE_LINKED; |
diff --git a/drivers/usb/host/ehci-omap.c b/drivers/usb/host/ehci-omap.c index a44294d13494..17cfb8a1131c 100644 --- a/drivers/usb/host/ehci-omap.c +++ b/drivers/usb/host/ehci-omap.c | |||
@@ -43,6 +43,7 @@ | |||
43 | #include <linux/regulator/consumer.h> | 43 | #include <linux/regulator/consumer.h> |
44 | #include <linux/pm_runtime.h> | 44 | #include <linux/pm_runtime.h> |
45 | #include <linux/gpio.h> | 45 | #include <linux/gpio.h> |
46 | #include <linux/clk.h> | ||
46 | 47 | ||
47 | /* EHCI Register Set */ | 48 | /* EHCI Register Set */ |
48 | #define EHCI_INSNREG04 (0xA0) | 49 | #define EHCI_INSNREG04 (0xA0) |
@@ -55,6 +56,15 @@ | |||
55 | #define EHCI_INSNREG05_ULPI_EXTREGADD_SHIFT 8 | 56 | #define EHCI_INSNREG05_ULPI_EXTREGADD_SHIFT 8 |
56 | #define EHCI_INSNREG05_ULPI_WRDATA_SHIFT 0 | 57 | #define EHCI_INSNREG05_ULPI_WRDATA_SHIFT 0 |
57 | 58 | ||
59 | /* Errata i693 */ | ||
60 | static struct clk *utmi_p1_fck; | ||
61 | static struct clk *utmi_p2_fck; | ||
62 | static struct clk *xclk60mhsp1_ck; | ||
63 | static struct clk *xclk60mhsp2_ck; | ||
64 | static struct clk *usbhost_p1_fck; | ||
65 | static struct clk *usbhost_p2_fck; | ||
66 | static struct clk *init_60m_fclk; | ||
67 | |||
58 | /*-------------------------------------------------------------------------*/ | 68 | /*-------------------------------------------------------------------------*/ |
59 | 69 | ||
60 | static const struct hc_driver ehci_omap_hc_driver; | 70 | static const struct hc_driver ehci_omap_hc_driver; |
@@ -70,6 +80,41 @@ static inline u32 ehci_read(void __iomem *base, u32 reg) | |||
70 | return __raw_readl(base + reg); | 80 | return __raw_readl(base + reg); |
71 | } | 81 | } |
72 | 82 | ||
83 | /* Erratum i693 workaround sequence */ | ||
84 | static void omap_ehci_erratum_i693(struct ehci_hcd *ehci) | ||
85 | { | ||
86 | int ret = 0; | ||
87 | |||
88 | /* Switch to the internal 60 MHz clock */ | ||
89 | ret = clk_set_parent(utmi_p1_fck, init_60m_fclk); | ||
90 | if (ret != 0) | ||
91 | ehci_err(ehci, "init_60m_fclk set parent" | ||
92 | "failed error:%d\n", ret); | ||
93 | |||
94 | ret = clk_set_parent(utmi_p2_fck, init_60m_fclk); | ||
95 | if (ret != 0) | ||
96 | ehci_err(ehci, "init_60m_fclk set parent" | ||
97 | "failed error:%d\n", ret); | ||
98 | |||
99 | clk_enable(usbhost_p1_fck); | ||
100 | clk_enable(usbhost_p2_fck); | ||
101 | |||
102 | /* Wait 1ms and switch back to the external clock */ | ||
103 | mdelay(1); | ||
104 | ret = clk_set_parent(utmi_p1_fck, xclk60mhsp1_ck); | ||
105 | if (ret != 0) | ||
106 | ehci_err(ehci, "xclk60mhsp1_ck set parent" | ||
107 | "failed error:%d\n", ret); | ||
108 | |||
109 | ret = clk_set_parent(utmi_p2_fck, xclk60mhsp2_ck); | ||
110 | if (ret != 0) | ||
111 | ehci_err(ehci, "xclk60mhsp2_ck set parent" | ||
112 | "failed error:%d\n", ret); | ||
113 | |||
114 | clk_disable(usbhost_p1_fck); | ||
115 | clk_disable(usbhost_p2_fck); | ||
116 | } | ||
117 | |||
73 | static void omap_ehci_soft_phy_reset(struct platform_device *pdev, u8 port) | 118 | static void omap_ehci_soft_phy_reset(struct platform_device *pdev, u8 port) |
74 | { | 119 | { |
75 | struct usb_hcd *hcd = dev_get_drvdata(&pdev->dev); | 120 | struct usb_hcd *hcd = dev_get_drvdata(&pdev->dev); |
@@ -100,6 +145,50 @@ static void omap_ehci_soft_phy_reset(struct platform_device *pdev, u8 port) | |||
100 | } | 145 | } |
101 | } | 146 | } |
102 | 147 | ||
148 | static int omap_ehci_hub_control( | ||
149 | struct usb_hcd *hcd, | ||
150 | u16 typeReq, | ||
151 | u16 wValue, | ||
152 | u16 wIndex, | ||
153 | char *buf, | ||
154 | u16 wLength | ||
155 | ) | ||
156 | { | ||
157 | struct ehci_hcd *ehci = hcd_to_ehci(hcd); | ||
158 | u32 __iomem *status_reg = &ehci->regs->port_status[ | ||
159 | (wIndex & 0xff) - 1]; | ||
160 | u32 temp; | ||
161 | unsigned long flags; | ||
162 | int retval = 0; | ||
163 | |||
164 | spin_lock_irqsave(&ehci->lock, flags); | ||
165 | |||
166 | if (typeReq == SetPortFeature && wValue == USB_PORT_FEAT_SUSPEND) { | ||
167 | temp = ehci_readl(ehci, status_reg); | ||
168 | if ((temp & PORT_PE) == 0 || (temp & PORT_RESET) != 0) { | ||
169 | retval = -EPIPE; | ||
170 | goto done; | ||
171 | } | ||
172 | |||
173 | temp &= ~PORT_WKCONN_E; | ||
174 | temp |= PORT_WKDISC_E | PORT_WKOC_E; | ||
175 | ehci_writel(ehci, temp | PORT_SUSPEND, status_reg); | ||
176 | |||
177 | omap_ehci_erratum_i693(ehci); | ||
178 | |||
179 | set_bit((wIndex & 0xff) - 1, &ehci->suspended_ports); | ||
180 | goto done; | ||
181 | } | ||
182 | |||
183 | spin_unlock_irqrestore(&ehci->lock, flags); | ||
184 | |||
185 | /* Handle the hub control events here */ | ||
186 | return ehci_hub_control(hcd, typeReq, wValue, wIndex, buf, wLength); | ||
187 | done: | ||
188 | spin_unlock_irqrestore(&ehci->lock, flags); | ||
189 | return retval; | ||
190 | } | ||
191 | |||
103 | static void disable_put_regulator( | 192 | static void disable_put_regulator( |
104 | struct ehci_hcd_omap_platform_data *pdata) | 193 | struct ehci_hcd_omap_platform_data *pdata) |
105 | { | 194 | { |
@@ -264,8 +353,76 @@ static int ehci_hcd_omap_probe(struct platform_device *pdev) | |||
264 | /* root ports should always stay powered */ | 353 | /* root ports should always stay powered */ |
265 | ehci_port_power(omap_ehci, 1); | 354 | ehci_port_power(omap_ehci, 1); |
266 | 355 | ||
356 | /* get clocks */ | ||
357 | utmi_p1_fck = clk_get(dev, "utmi_p1_gfclk"); | ||
358 | if (IS_ERR(utmi_p1_fck)) { | ||
359 | ret = PTR_ERR(utmi_p1_fck); | ||
360 | dev_err(dev, "utmi_p1_gfclk failed error:%d\n", ret); | ||
361 | goto err_add_hcd; | ||
362 | } | ||
363 | |||
364 | xclk60mhsp1_ck = clk_get(dev, "xclk60mhsp1_ck"); | ||
365 | if (IS_ERR(xclk60mhsp1_ck)) { | ||
366 | ret = PTR_ERR(xclk60mhsp1_ck); | ||
367 | dev_err(dev, "xclk60mhsp1_ck failed error:%d\n", ret); | ||
368 | goto err_utmi_p1_fck; | ||
369 | } | ||
370 | |||
371 | utmi_p2_fck = clk_get(dev, "utmi_p2_gfclk"); | ||
372 | if (IS_ERR(utmi_p2_fck)) { | ||
373 | ret = PTR_ERR(utmi_p2_fck); | ||
374 | dev_err(dev, "utmi_p2_gfclk failed error:%d\n", ret); | ||
375 | goto err_xclk60mhsp1_ck; | ||
376 | } | ||
377 | |||
378 | xclk60mhsp2_ck = clk_get(dev, "xclk60mhsp2_ck"); | ||
379 | if (IS_ERR(xclk60mhsp2_ck)) { | ||
380 | ret = PTR_ERR(xclk60mhsp2_ck); | ||
381 | dev_err(dev, "xclk60mhsp2_ck failed error:%d\n", ret); | ||
382 | goto err_utmi_p2_fck; | ||
383 | } | ||
384 | |||
385 | usbhost_p1_fck = clk_get(dev, "usb_host_hs_utmi_p1_clk"); | ||
386 | if (IS_ERR(usbhost_p1_fck)) { | ||
387 | ret = PTR_ERR(usbhost_p1_fck); | ||
388 | dev_err(dev, "usbhost_p1_fck failed error:%d\n", ret); | ||
389 | goto err_xclk60mhsp2_ck; | ||
390 | } | ||
391 | |||
392 | usbhost_p2_fck = clk_get(dev, "usb_host_hs_utmi_p2_clk"); | ||
393 | if (IS_ERR(usbhost_p2_fck)) { | ||
394 | ret = PTR_ERR(usbhost_p2_fck); | ||
395 | dev_err(dev, "usbhost_p2_fck failed error:%d\n", ret); | ||
396 | goto err_usbhost_p1_fck; | ||
397 | } | ||
398 | |||
399 | init_60m_fclk = clk_get(dev, "init_60m_fclk"); | ||
400 | if (IS_ERR(init_60m_fclk)) { | ||
401 | ret = PTR_ERR(init_60m_fclk); | ||
402 | dev_err(dev, "init_60m_fclk failed error:%d\n", ret); | ||
403 | goto err_usbhost_p2_fck; | ||
404 | } | ||
405 | |||
267 | return 0; | 406 | return 0; |
268 | 407 | ||
408 | err_usbhost_p2_fck: | ||
409 | clk_put(usbhost_p2_fck); | ||
410 | |||
411 | err_usbhost_p1_fck: | ||
412 | clk_put(usbhost_p1_fck); | ||
413 | |||
414 | err_xclk60mhsp2_ck: | ||
415 | clk_put(xclk60mhsp2_ck); | ||
416 | |||
417 | err_utmi_p2_fck: | ||
418 | clk_put(utmi_p2_fck); | ||
419 | |||
420 | err_xclk60mhsp1_ck: | ||
421 | clk_put(xclk60mhsp1_ck); | ||
422 | |||
423 | err_utmi_p1_fck: | ||
424 | clk_put(utmi_p1_fck); | ||
425 | |||
269 | err_add_hcd: | 426 | err_add_hcd: |
270 | disable_put_regulator(pdata); | 427 | disable_put_regulator(pdata); |
271 | pm_runtime_put_sync(dev); | 428 | pm_runtime_put_sync(dev); |
@@ -294,6 +451,15 @@ static int ehci_hcd_omap_remove(struct platform_device *pdev) | |||
294 | disable_put_regulator(dev->platform_data); | 451 | disable_put_regulator(dev->platform_data); |
295 | iounmap(hcd->regs); | 452 | iounmap(hcd->regs); |
296 | usb_put_hcd(hcd); | 453 | usb_put_hcd(hcd); |
454 | |||
455 | clk_put(utmi_p1_fck); | ||
456 | clk_put(utmi_p2_fck); | ||
457 | clk_put(xclk60mhsp1_ck); | ||
458 | clk_put(xclk60mhsp2_ck); | ||
459 | clk_put(usbhost_p1_fck); | ||
460 | clk_put(usbhost_p2_fck); | ||
461 | clk_put(init_60m_fclk); | ||
462 | |||
297 | pm_runtime_put_sync(dev); | 463 | pm_runtime_put_sync(dev); |
298 | pm_runtime_disable(dev); | 464 | pm_runtime_disable(dev); |
299 | 465 | ||
@@ -364,7 +530,7 @@ static const struct hc_driver ehci_omap_hc_driver = { | |||
364 | * root hub support | 530 | * root hub support |
365 | */ | 531 | */ |
366 | .hub_status_data = ehci_hub_status_data, | 532 | .hub_status_data = ehci_hub_status_data, |
367 | .hub_control = ehci_hub_control, | 533 | .hub_control = omap_ehci_hub_control, |
368 | .bus_suspend = ehci_bus_suspend, | 534 | .bus_suspend = ehci_bus_suspend, |
369 | .bus_resume = ehci_bus_resume, | 535 | .bus_resume = ehci_bus_resume, |
370 | 536 | ||
diff --git a/drivers/usb/host/ehci-sh.c b/drivers/usb/host/ehci-sh.c index ca819cdd0c5e..e7cb3925abf8 100644 --- a/drivers/usb/host/ehci-sh.c +++ b/drivers/usb/host/ehci-sh.c | |||
@@ -126,8 +126,7 @@ static int ehci_hcd_sh_probe(struct platform_device *pdev) | |||
126 | goto fail_create_hcd; | 126 | goto fail_create_hcd; |
127 | } | 127 | } |
128 | 128 | ||
129 | if (pdev->dev.platform_data != NULL) | 129 | pdata = pdev->dev.platform_data; |
130 | pdata = pdev->dev.platform_data; | ||
131 | 130 | ||
132 | /* initialize hcd */ | 131 | /* initialize hcd */ |
133 | hcd = usb_create_hcd(&ehci_sh_hc_driver, &pdev->dev, | 132 | hcd = usb_create_hcd(&ehci_sh_hc_driver, &pdev->dev, |
diff --git a/drivers/usb/host/ehci-xilinx-of.c b/drivers/usb/host/ehci-xilinx-of.c index 9c2cc4633894..e9713d589e30 100644 --- a/drivers/usb/host/ehci-xilinx-of.c +++ b/drivers/usb/host/ehci-xilinx-of.c | |||
@@ -270,14 +270,12 @@ static int ehci_hcd_xilinx_of_remove(struct platform_device *op) | |||
270 | * | 270 | * |
271 | * Properly shutdown the hcd, call driver's shutdown routine. | 271 | * Properly shutdown the hcd, call driver's shutdown routine. |
272 | */ | 272 | */ |
273 | static int ehci_hcd_xilinx_of_shutdown(struct platform_device *op) | 273 | static void ehci_hcd_xilinx_of_shutdown(struct platform_device *op) |
274 | { | 274 | { |
275 | struct usb_hcd *hcd = dev_get_drvdata(&op->dev); | 275 | struct usb_hcd *hcd = dev_get_drvdata(&op->dev); |
276 | 276 | ||
277 | if (hcd->driver->shutdown) | 277 | if (hcd->driver->shutdown) |
278 | hcd->driver->shutdown(hcd); | 278 | hcd->driver->shutdown(hcd); |
279 | |||
280 | return 0; | ||
281 | } | 279 | } |
282 | 280 | ||
283 | 281 | ||
diff --git a/drivers/usb/host/ohci-hub.c b/drivers/usb/host/ohci-hub.c index 836772dfabd3..2f3619eefefa 100644 --- a/drivers/usb/host/ohci-hub.c +++ b/drivers/usb/host/ohci-hub.c | |||
@@ -317,7 +317,7 @@ static int ohci_bus_resume (struct usb_hcd *hcd) | |||
317 | } | 317 | } |
318 | 318 | ||
319 | /* Carry out the final steps of resuming the controller device */ | 319 | /* Carry out the final steps of resuming the controller device */ |
320 | static void ohci_finish_controller_resume(struct usb_hcd *hcd) | 320 | static void __maybe_unused ohci_finish_controller_resume(struct usb_hcd *hcd) |
321 | { | 321 | { |
322 | struct ohci_hcd *ohci = hcd_to_ohci(hcd); | 322 | struct ohci_hcd *ohci = hcd_to_ohci(hcd); |
323 | int port; | 323 | int port; |
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c index ec4338eec826..77689bd64cac 100644 --- a/drivers/usb/host/xhci-mem.c +++ b/drivers/usb/host/xhci-mem.c | |||
@@ -793,10 +793,9 @@ static void xhci_free_tt_info(struct xhci_hcd *xhci, | |||
793 | struct xhci_virt_device *virt_dev, | 793 | struct xhci_virt_device *virt_dev, |
794 | int slot_id) | 794 | int slot_id) |
795 | { | 795 | { |
796 | struct list_head *tt; | ||
797 | struct list_head *tt_list_head; | 796 | struct list_head *tt_list_head; |
798 | struct list_head *tt_next; | 797 | struct xhci_tt_bw_info *tt_info, *next; |
799 | struct xhci_tt_bw_info *tt_info; | 798 | bool slot_found = false; |
800 | 799 | ||
801 | /* If the device never made it past the Set Address stage, | 800 | /* If the device never made it past the Set Address stage, |
802 | * it may not have the real_port set correctly. | 801 | * it may not have the real_port set correctly. |
@@ -808,34 +807,16 @@ static void xhci_free_tt_info(struct xhci_hcd *xhci, | |||
808 | } | 807 | } |
809 | 808 | ||
810 | tt_list_head = &(xhci->rh_bw[virt_dev->real_port - 1].tts); | 809 | tt_list_head = &(xhci->rh_bw[virt_dev->real_port - 1].tts); |
811 | if (list_empty(tt_list_head)) | 810 | list_for_each_entry_safe(tt_info, next, tt_list_head, tt_list) { |
812 | return; | 811 | /* Multi-TT hubs will have more than one entry */ |
813 | 812 | if (tt_info->slot_id == slot_id) { | |
814 | list_for_each(tt, tt_list_head) { | 813 | slot_found = true; |
815 | tt_info = list_entry(tt, struct xhci_tt_bw_info, tt_list); | 814 | list_del(&tt_info->tt_list); |
816 | if (tt_info->slot_id == slot_id) | 815 | kfree(tt_info); |
816 | } else if (slot_found) { | ||
817 | break; | 817 | break; |
818 | } | ||
818 | } | 819 | } |
819 | /* Cautionary measure in case the hub was disconnected before we | ||
820 | * stored the TT information. | ||
821 | */ | ||
822 | if (tt_info->slot_id != slot_id) | ||
823 | return; | ||
824 | |||
825 | tt_next = tt->next; | ||
826 | tt_info = list_entry(tt, struct xhci_tt_bw_info, | ||
827 | tt_list); | ||
828 | /* Multi-TT hubs will have more than one entry */ | ||
829 | do { | ||
830 | list_del(tt); | ||
831 | kfree(tt_info); | ||
832 | tt = tt_next; | ||
833 | if (list_empty(tt_list_head)) | ||
834 | break; | ||
835 | tt_next = tt->next; | ||
836 | tt_info = list_entry(tt, struct xhci_tt_bw_info, | ||
837 | tt_list); | ||
838 | } while (tt_info->slot_id == slot_id); | ||
839 | } | 820 | } |
840 | 821 | ||
841 | int xhci_alloc_tt_info(struct xhci_hcd *xhci, | 822 | int xhci_alloc_tt_info(struct xhci_hcd *xhci, |
@@ -1791,17 +1772,9 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci) | |||
1791 | { | 1772 | { |
1792 | struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller); | 1773 | struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller); |
1793 | struct dev_info *dev_info, *next; | 1774 | struct dev_info *dev_info, *next; |
1794 | struct list_head *tt_list_head; | ||
1795 | struct list_head *tt; | ||
1796 | struct list_head *endpoints; | ||
1797 | struct list_head *ep, *q; | ||
1798 | struct xhci_tt_bw_info *tt_info; | ||
1799 | struct xhci_interval_bw_table *bwt; | ||
1800 | struct xhci_virt_ep *virt_ep; | ||
1801 | |||
1802 | unsigned long flags; | 1775 | unsigned long flags; |
1803 | int size; | 1776 | int size; |
1804 | int i; | 1777 | int i, j, num_ports; |
1805 | 1778 | ||
1806 | /* Free the Event Ring Segment Table and the actual Event Ring */ | 1779 | /* Free the Event Ring Segment Table and the actual Event Ring */ |
1807 | size = sizeof(struct xhci_erst_entry)*(xhci->erst.num_entries); | 1780 | size = sizeof(struct xhci_erst_entry)*(xhci->erst.num_entries); |
@@ -1860,21 +1833,22 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci) | |||
1860 | } | 1833 | } |
1861 | spin_unlock_irqrestore(&xhci->lock, flags); | 1834 | spin_unlock_irqrestore(&xhci->lock, flags); |
1862 | 1835 | ||
1863 | bwt = &xhci->rh_bw->bw_table; | 1836 | num_ports = HCS_MAX_PORTS(xhci->hcs_params1); |
1864 | for (i = 0; i < XHCI_MAX_INTERVAL; i++) { | 1837 | for (i = 0; i < num_ports; i++) { |
1865 | endpoints = &bwt->interval_bw[i].endpoints; | 1838 | struct xhci_interval_bw_table *bwt = &xhci->rh_bw[i].bw_table; |
1866 | list_for_each_safe(ep, q, endpoints) { | 1839 | for (j = 0; j < XHCI_MAX_INTERVAL; j++) { |
1867 | virt_ep = list_entry(ep, struct xhci_virt_ep, bw_endpoint_list); | 1840 | struct list_head *ep = &bwt->interval_bw[j].endpoints; |
1868 | list_del(&virt_ep->bw_endpoint_list); | 1841 | while (!list_empty(ep)) |
1869 | kfree(virt_ep); | 1842 | list_del_init(ep->next); |
1870 | } | 1843 | } |
1871 | } | 1844 | } |
1872 | 1845 | ||
1873 | tt_list_head = &xhci->rh_bw->tts; | 1846 | for (i = 0; i < num_ports; i++) { |
1874 | list_for_each_safe(tt, q, tt_list_head) { | 1847 | struct xhci_tt_bw_info *tt, *n; |
1875 | tt_info = list_entry(tt, struct xhci_tt_bw_info, tt_list); | 1848 | list_for_each_entry_safe(tt, n, &xhci->rh_bw[i].tts, tt_list) { |
1876 | list_del(tt); | 1849 | list_del(&tt->tt_list); |
1877 | kfree(tt_info); | 1850 | kfree(tt); |
1851 | } | ||
1878 | } | 1852 | } |
1879 | 1853 | ||
1880 | xhci->num_usb2_ports = 0; | 1854 | xhci->num_usb2_ports = 0; |
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c index afdc73ee84a6..a979cd0dbe0f 100644 --- a/drivers/usb/host/xhci.c +++ b/drivers/usb/host/xhci.c | |||
@@ -795,8 +795,8 @@ int xhci_suspend(struct xhci_hcd *xhci) | |||
795 | command = xhci_readl(xhci, &xhci->op_regs->command); | 795 | command = xhci_readl(xhci, &xhci->op_regs->command); |
796 | command |= CMD_CSS; | 796 | command |= CMD_CSS; |
797 | xhci_writel(xhci, command, &xhci->op_regs->command); | 797 | xhci_writel(xhci, command, &xhci->op_regs->command); |
798 | if (handshake(xhci, &xhci->op_regs->status, STS_SAVE, 0, 10*100)) { | 798 | if (handshake(xhci, &xhci->op_regs->status, STS_SAVE, 0, 10 * 1000)) { |
799 | xhci_warn(xhci, "WARN: xHC CMD_CSS timeout\n"); | 799 | xhci_warn(xhci, "WARN: xHC save state timeout\n"); |
800 | spin_unlock_irq(&xhci->lock); | 800 | spin_unlock_irq(&xhci->lock); |
801 | return -ETIMEDOUT; | 801 | return -ETIMEDOUT; |
802 | } | 802 | } |
@@ -848,8 +848,8 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated) | |||
848 | command |= CMD_CRS; | 848 | command |= CMD_CRS; |
849 | xhci_writel(xhci, command, &xhci->op_regs->command); | 849 | xhci_writel(xhci, command, &xhci->op_regs->command); |
850 | if (handshake(xhci, &xhci->op_regs->status, | 850 | if (handshake(xhci, &xhci->op_regs->status, |
851 | STS_RESTORE, 0, 10*100)) { | 851 | STS_RESTORE, 0, 10 * 1000)) { |
852 | xhci_dbg(xhci, "WARN: xHC CMD_CSS timeout\n"); | 852 | xhci_warn(xhci, "WARN: xHC restore state timeout\n"); |
853 | spin_unlock_irq(&xhci->lock); | 853 | spin_unlock_irq(&xhci->lock); |
854 | return -ETIMEDOUT; | 854 | return -ETIMEDOUT; |
855 | } | 855 | } |
@@ -3906,7 +3906,7 @@ static u16 xhci_get_timeout_no_hub_lpm(struct usb_device *udev, | |||
3906 | default: | 3906 | default: |
3907 | dev_warn(&udev->dev, "%s: Can't get timeout for non-U1 or U2 state.\n", | 3907 | dev_warn(&udev->dev, "%s: Can't get timeout for non-U1 or U2 state.\n", |
3908 | __func__); | 3908 | __func__); |
3909 | return -EINVAL; | 3909 | return USB3_LPM_DISABLED; |
3910 | } | 3910 | } |
3911 | 3911 | ||
3912 | if (sel <= max_sel_pel && pel <= max_sel_pel) | 3912 | if (sel <= max_sel_pel && pel <= max_sel_pel) |
diff --git a/drivers/usb/serial/usb-serial.c b/drivers/usb/serial/usb-serial.c index 6e8c527e07c9..27483f91a4a3 100644 --- a/drivers/usb/serial/usb-serial.c +++ b/drivers/usb/serial/usb-serial.c | |||
@@ -757,7 +757,7 @@ static int usb_serial_probe(struct usb_interface *interface, | |||
757 | 757 | ||
758 | if (retval) { | 758 | if (retval) { |
759 | dbg("sub driver rejected device"); | 759 | dbg("sub driver rejected device"); |
760 | kfree(serial); | 760 | usb_serial_put(serial); |
761 | module_put(type->driver.owner); | 761 | module_put(type->driver.owner); |
762 | return retval; | 762 | return retval; |
763 | } | 763 | } |
@@ -829,7 +829,7 @@ static int usb_serial_probe(struct usb_interface *interface, | |||
829 | */ | 829 | */ |
830 | if (num_bulk_in == 0 || num_bulk_out == 0) { | 830 | if (num_bulk_in == 0 || num_bulk_out == 0) { |
831 | dev_info(&interface->dev, "PL-2303 hack: descriptors matched but endpoints did not\n"); | 831 | dev_info(&interface->dev, "PL-2303 hack: descriptors matched but endpoints did not\n"); |
832 | kfree(serial); | 832 | usb_serial_put(serial); |
833 | module_put(type->driver.owner); | 833 | module_put(type->driver.owner); |
834 | return -ENODEV; | 834 | return -ENODEV; |
835 | } | 835 | } |
@@ -843,7 +843,7 @@ static int usb_serial_probe(struct usb_interface *interface, | |||
843 | if (num_ports == 0) { | 843 | if (num_ports == 0) { |
844 | dev_err(&interface->dev, | 844 | dev_err(&interface->dev, |
845 | "Generic device with no bulk out, not allowed.\n"); | 845 | "Generic device with no bulk out, not allowed.\n"); |
846 | kfree(serial); | 846 | usb_serial_put(serial); |
847 | module_put(type->driver.owner); | 847 | module_put(type->driver.owner); |
848 | return -EIO; | 848 | return -EIO; |
849 | } | 849 | } |
diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h index 1719886bb9be..caf22bf5f822 100644 --- a/drivers/usb/storage/unusual_devs.h +++ b/drivers/usb/storage/unusual_devs.h | |||
@@ -1107,6 +1107,13 @@ UNUSUAL_DEV( 0x090a, 0x1200, 0x0000, 0x9999, | |||
1107 | USB_SC_RBC, USB_PR_BULK, NULL, | 1107 | USB_SC_RBC, USB_PR_BULK, NULL, |
1108 | 0 ), | 1108 | 0 ), |
1109 | 1109 | ||
1110 | /* Feiya QDI U2 DISK, reported by Hans de Goede <hdegoede@redhat.com> */ | ||
1111 | UNUSUAL_DEV( 0x090c, 0x1000, 0x0000, 0xffff, | ||
1112 | "Feiya", | ||
1113 | "QDI U2 DISK", | ||
1114 | USB_SC_DEVICE, USB_PR_DEVICE, NULL, | ||
1115 | US_FL_NO_READ_CAPACITY_16 ), | ||
1116 | |||
1110 | /* aeb */ | 1117 | /* aeb */ |
1111 | UNUSUAL_DEV( 0x090c, 0x1132, 0x0000, 0xffff, | 1118 | UNUSUAL_DEV( 0x090c, 0x1132, 0x0000, 0xffff, |
1112 | "Feiya", | 1119 | "Feiya", |
diff --git a/drivers/video/backlight/Kconfig b/drivers/video/backlight/Kconfig index fa2b03750316..2979292650d6 100644 --- a/drivers/video/backlight/Kconfig +++ b/drivers/video/backlight/Kconfig | |||
@@ -88,7 +88,7 @@ config LCD_PLATFORM | |||
88 | 88 | ||
89 | config LCD_TOSA | 89 | config LCD_TOSA |
90 | tristate "Sharp SL-6000 LCD Driver" | 90 | tristate "Sharp SL-6000 LCD Driver" |
91 | depends on SPI && MACH_TOSA | 91 | depends on I2C && SPI && MACH_TOSA |
92 | help | 92 | help |
93 | If you have an Sharp SL-6000 Zaurus say Y to enable a driver | 93 | If you have an Sharp SL-6000 Zaurus say Y to enable a driver |
94 | for its LCD. | 94 | for its LCD. |
diff --git a/drivers/video/backlight/ili9320.c b/drivers/video/backlight/ili9320.c index 6c9399341bcf..9327cd1b3143 100644 --- a/drivers/video/backlight/ili9320.c +++ b/drivers/video/backlight/ili9320.c | |||
@@ -263,7 +263,7 @@ int __devinit ili9320_probe_spi(struct spi_device *spi, | |||
263 | 263 | ||
264 | EXPORT_SYMBOL_GPL(ili9320_probe_spi); | 264 | EXPORT_SYMBOL_GPL(ili9320_probe_spi); |
265 | 265 | ||
266 | int __devexit ili9320_remove(struct ili9320 *ili) | 266 | int ili9320_remove(struct ili9320 *ili) |
267 | { | 267 | { |
268 | ili9320_power(ili, FB_BLANK_POWERDOWN); | 268 | ili9320_power(ili, FB_BLANK_POWERDOWN); |
269 | 269 | ||
diff --git a/drivers/video/bfin_adv7393fb.c b/drivers/video/bfin_adv7393fb.c index 33ea874c87d2..9bdd4b0c18c8 100644 --- a/drivers/video/bfin_adv7393fb.c +++ b/drivers/video/bfin_adv7393fb.c | |||
@@ -353,18 +353,16 @@ adv7393_read_proc(char *page, char **start, off_t off, | |||
353 | 353 | ||
354 | static int | 354 | static int |
355 | adv7393_write_proc(struct file *file, const char __user * buffer, | 355 | adv7393_write_proc(struct file *file, const char __user * buffer, |
356 | unsigned long count, void *data) | 356 | size_t count, void *data) |
357 | { | 357 | { |
358 | struct adv7393fb_device *fbdev = data; | 358 | struct adv7393fb_device *fbdev = data; |
359 | char line[8]; | ||
360 | unsigned int val; | 359 | unsigned int val; |
361 | int ret; | 360 | int ret; |
362 | 361 | ||
363 | ret = copy_from_user(line, buffer, count); | 362 | ret = kstrtouint_from_user(buffer, count, 0, &val); |
364 | if (ret) | 363 | if (ret) |
365 | return -EFAULT; | 364 | return -EFAULT; |
366 | 365 | ||
367 | val = simple_strtoul(line, NULL, 0); | ||
368 | adv7393_write(fbdev->client, val >> 8, val & 0xff); | 366 | adv7393_write(fbdev->client, val >> 8, val & 0xff); |
369 | 367 | ||
370 | return count; | 368 | return count; |
diff --git a/drivers/video/broadsheetfb.c b/drivers/video/broadsheetfb.c index 377dde3d5bfc..c95b417d0d41 100644 --- a/drivers/video/broadsheetfb.c +++ b/drivers/video/broadsheetfb.c | |||
@@ -1211,7 +1211,7 @@ static int __devexit broadsheetfb_remove(struct platform_device *dev) | |||
1211 | 1211 | ||
1212 | static struct platform_driver broadsheetfb_driver = { | 1212 | static struct platform_driver broadsheetfb_driver = { |
1213 | .probe = broadsheetfb_probe, | 1213 | .probe = broadsheetfb_probe, |
1214 | .remove = broadsheetfb_remove, | 1214 | .remove = __devexit_p(broadsheetfb_remove), |
1215 | .driver = { | 1215 | .driver = { |
1216 | .owner = THIS_MODULE, | 1216 | .owner = THIS_MODULE, |
1217 | .name = "broadsheetfb", | 1217 | .name = "broadsheetfb", |
diff --git a/drivers/video/console/Kconfig b/drivers/video/console/Kconfig index c2d11fef114b..e2c96d01d8f5 100644 --- a/drivers/video/console/Kconfig +++ b/drivers/video/console/Kconfig | |||
@@ -224,5 +224,19 @@ config FONT_10x18 | |||
224 | big letters. It fits between the sun 12x22 and the normal 8x16 font. | 224 | big letters. It fits between the sun 12x22 and the normal 8x16 font. |
225 | If other fonts are too big or too small for you, say Y, otherwise say N. | 225 | If other fonts are too big or too small for you, say Y, otherwise say N. |
226 | 226 | ||
227 | config FONT_AUTOSELECT | ||
228 | def_bool y | ||
229 | depends on FRAMEBUFFER_CONSOLE || SGI_NEWPORT_CONSOLE || STI_CONSOLE || USB_SISUSBVGA_CON | ||
230 | depends on !FONT_8x8 | ||
231 | depends on !FONT_6x11 | ||
232 | depends on !FONT_7x14 | ||
233 | depends on !FONT_PEARL_8x8 | ||
234 | depends on !FONT_ACORN_8x8 | ||
235 | depends on !FONT_MINI_4x6 | ||
236 | depends on !FONT_SUN8x16 | ||
237 | depends on !FONT_SUN12x22 | ||
238 | depends on !FONT_10x18 | ||
239 | select FONT_8x16 | ||
240 | |||
227 | endmenu | 241 | endmenu |
228 | 242 | ||
diff --git a/drivers/video/mbx/mbxfb.c b/drivers/video/mbx/mbxfb.c index ab0a8e527333..85e4f44bfa61 100644 --- a/drivers/video/mbx/mbxfb.c +++ b/drivers/video/mbx/mbxfb.c | |||
@@ -1045,7 +1045,7 @@ static int __devexit mbxfb_remove(struct platform_device *dev) | |||
1045 | 1045 | ||
1046 | static struct platform_driver mbxfb_driver = { | 1046 | static struct platform_driver mbxfb_driver = { |
1047 | .probe = mbxfb_probe, | 1047 | .probe = mbxfb_probe, |
1048 | .remove = mbxfb_remove, | 1048 | .remove = __devexit_p(mbxfb_remove), |
1049 | .suspend = mbxfb_suspend, | 1049 | .suspend = mbxfb_suspend, |
1050 | .resume = mbxfb_resume, | 1050 | .resume = mbxfb_resume, |
1051 | .driver = { | 1051 | .driver = { |
diff --git a/drivers/video/omap2/displays/panel-taal.c b/drivers/video/omap2/displays/panel-taal.c index 2ce9992f403b..901576eb5a84 100644 --- a/drivers/video/omap2/displays/panel-taal.c +++ b/drivers/video/omap2/displays/panel-taal.c | |||
@@ -526,7 +526,7 @@ static ssize_t taal_num_errors_show(struct device *dev, | |||
526 | { | 526 | { |
527 | struct omap_dss_device *dssdev = to_dss_device(dev); | 527 | struct omap_dss_device *dssdev = to_dss_device(dev); |
528 | struct taal_data *td = dev_get_drvdata(&dssdev->dev); | 528 | struct taal_data *td = dev_get_drvdata(&dssdev->dev); |
529 | u8 errors; | 529 | u8 errors = 0; |
530 | int r; | 530 | int r; |
531 | 531 | ||
532 | mutex_lock(&td->lock); | 532 | mutex_lock(&td->lock); |
diff --git a/drivers/video/omap2/dss/core.c b/drivers/video/omap2/dss/core.c index 72ded9cd2cb0..5066eee10ccf 100644 --- a/drivers/video/omap2/dss/core.c +++ b/drivers/video/omap2/dss/core.c | |||
@@ -194,8 +194,7 @@ static inline int dss_initialize_debugfs(void) | |||
194 | static inline void dss_uninitialize_debugfs(void) | 194 | static inline void dss_uninitialize_debugfs(void) |
195 | { | 195 | { |
196 | } | 196 | } |
197 | static inline int dss_debugfs_create_file(const char *name, | 197 | int dss_debugfs_create_file(const char *name, void (*write)(struct seq_file *)) |
198 | void (*write)(struct seq_file *)) | ||
199 | { | 198 | { |
200 | return 0; | 199 | return 0; |
201 | } | 200 | } |
diff --git a/drivers/video/omap2/dss/dsi.c b/drivers/video/omap2/dss/dsi.c index ec363d8390ed..ca8382d346e9 100644 --- a/drivers/video/omap2/dss/dsi.c +++ b/drivers/video/omap2/dss/dsi.c | |||
@@ -3724,7 +3724,7 @@ static int dsi_compute_interleave_lp(int blank, int enter_hs, int exit_hs, | |||
3724 | /* CLKIN4DDR = 16 * TXBYTECLKHS */ | 3724 | /* CLKIN4DDR = 16 * TXBYTECLKHS */ |
3725 | tlp_avail = thsbyte_clk * (blank - trans_lp); | 3725 | tlp_avail = thsbyte_clk * (blank - trans_lp); |
3726 | 3726 | ||
3727 | ttxclkesc = tdsi_fclk / lp_clk_div; | 3727 | ttxclkesc = tdsi_fclk * lp_clk_div; |
3728 | 3728 | ||
3729 | lp_inter = ((tlp_avail - 8 * thsbyte_clk - 5 * tdsi_fclk) / ttxclkesc - | 3729 | lp_inter = ((tlp_avail - 8 * thsbyte_clk - 5 * tdsi_fclk) / ttxclkesc - |
3730 | 26) / 16; | 3730 | 26) / 16; |
diff --git a/drivers/video/omap2/dss/dss.c b/drivers/video/omap2/dss/dss.c index 6ea1ff149f6f..770632359a17 100644 --- a/drivers/video/omap2/dss/dss.c +++ b/drivers/video/omap2/dss/dss.c | |||
@@ -731,7 +731,7 @@ static void dss_runtime_put(void) | |||
731 | DSSDBG("dss_runtime_put\n"); | 731 | DSSDBG("dss_runtime_put\n"); |
732 | 732 | ||
733 | r = pm_runtime_put_sync(&dss.pdev->dev); | 733 | r = pm_runtime_put_sync(&dss.pdev->dev); |
734 | WARN_ON(r < 0); | 734 | WARN_ON(r < 0 && r != -EBUSY); |
735 | } | 735 | } |
736 | 736 | ||
737 | /* DEBUGFS */ | 737 | /* DEBUGFS */ |
diff --git a/drivers/video/s3c-fb.c b/drivers/video/s3c-fb.c index 5f9d8e69029e..ea7b661e7229 100644 --- a/drivers/video/s3c-fb.c +++ b/drivers/video/s3c-fb.c | |||
@@ -361,7 +361,7 @@ static int s3c_fb_calc_pixclk(struct s3c_fb *sfb, unsigned int pixclk) | |||
361 | result = (unsigned int)tmp / 1000; | 361 | result = (unsigned int)tmp / 1000; |
362 | 362 | ||
363 | dev_dbg(sfb->dev, "pixclk=%u, clk=%lu, div=%d (%lu)\n", | 363 | dev_dbg(sfb->dev, "pixclk=%u, clk=%lu, div=%d (%lu)\n", |
364 | pixclk, clk, result, clk / result); | 364 | pixclk, clk, result, result ? clk / result : clk); |
365 | 365 | ||
366 | return result; | 366 | return result; |
367 | } | 367 | } |
@@ -1348,8 +1348,14 @@ static void s3c_fb_clear_win(struct s3c_fb *sfb, int win) | |||
1348 | writel(0, regs + VIDOSD_A(win, sfb->variant)); | 1348 | writel(0, regs + VIDOSD_A(win, sfb->variant)); |
1349 | writel(0, regs + VIDOSD_B(win, sfb->variant)); | 1349 | writel(0, regs + VIDOSD_B(win, sfb->variant)); |
1350 | writel(0, regs + VIDOSD_C(win, sfb->variant)); | 1350 | writel(0, regs + VIDOSD_C(win, sfb->variant)); |
1351 | reg = readl(regs + SHADOWCON); | 1351 | |
1352 | writel(reg & ~SHADOWCON_WINx_PROTECT(win), regs + SHADOWCON); | 1352 | if (sfb->variant.has_shadowcon) { |
1353 | reg = readl(sfb->regs + SHADOWCON); | ||
1354 | reg &= ~(SHADOWCON_WINx_PROTECT(win) | | ||
1355 | SHADOWCON_CHx_ENABLE(win) | | ||
1356 | SHADOWCON_CHx_LOCAL_ENABLE(win)); | ||
1357 | writel(reg, sfb->regs + SHADOWCON); | ||
1358 | } | ||
1353 | } | 1359 | } |
1354 | 1360 | ||
1355 | static int __devinit s3c_fb_probe(struct platform_device *pdev) | 1361 | static int __devinit s3c_fb_probe(struct platform_device *pdev) |
diff --git a/drivers/video/savage/savagefb_driver.c b/drivers/video/savage/savagefb_driver.c index cee7803a0a1c..f3d3b9ce4751 100644 --- a/drivers/video/savage/savagefb_driver.c +++ b/drivers/video/savage/savagefb_driver.c | |||
@@ -1351,7 +1351,7 @@ static void savagefb_set_par_int(struct savagefb_par *par, struct savage_reg *r | |||
1351 | /* following part not present in X11 driver */ | 1351 | /* following part not present in X11 driver */ |
1352 | cr67 = vga_in8(0x3d5, par) & 0xf; | 1352 | cr67 = vga_in8(0x3d5, par) & 0xf; |
1353 | vga_out8(0x3d5, 0x50 | cr67, par); | 1353 | vga_out8(0x3d5, 0x50 | cr67, par); |
1354 | udelay(10000); | 1354 | mdelay(10); |
1355 | vga_out8(0x3d4, 0x67, par); | 1355 | vga_out8(0x3d4, 0x67, par); |
1356 | /* end of part */ | 1356 | /* end of part */ |
1357 | vga_out8(0x3d5, reg->CR67 & ~0x0c, par); | 1357 | vga_out8(0x3d5, reg->CR67 & ~0x0c, par); |
@@ -1904,11 +1904,11 @@ static int savage_init_hw(struct savagefb_par *par) | |||
1904 | vga_out8(0x3d4, 0x66, par); | 1904 | vga_out8(0x3d4, 0x66, par); |
1905 | cr66 = vga_in8(0x3d5, par); | 1905 | cr66 = vga_in8(0x3d5, par); |
1906 | vga_out8(0x3d5, cr66 | 0x02, par); | 1906 | vga_out8(0x3d5, cr66 | 0x02, par); |
1907 | udelay(10000); | 1907 | mdelay(10); |
1908 | 1908 | ||
1909 | vga_out8(0x3d4, 0x66, par); | 1909 | vga_out8(0x3d4, 0x66, par); |
1910 | vga_out8(0x3d5, cr66 & ~0x02, par); /* clear reset flag */ | 1910 | vga_out8(0x3d5, cr66 & ~0x02, par); /* clear reset flag */ |
1911 | udelay(10000); | 1911 | mdelay(10); |
1912 | 1912 | ||
1913 | 1913 | ||
1914 | /* | 1914 | /* |
@@ -1918,11 +1918,11 @@ static int savage_init_hw(struct savagefb_par *par) | |||
1918 | vga_out8(0x3d4, 0x3f, par); | 1918 | vga_out8(0x3d4, 0x3f, par); |
1919 | cr3f = vga_in8(0x3d5, par); | 1919 | cr3f = vga_in8(0x3d5, par); |
1920 | vga_out8(0x3d5, cr3f | 0x08, par); | 1920 | vga_out8(0x3d5, cr3f | 0x08, par); |
1921 | udelay(10000); | 1921 | mdelay(10); |
1922 | 1922 | ||
1923 | vga_out8(0x3d4, 0x3f, par); | 1923 | vga_out8(0x3d4, 0x3f, par); |
1924 | vga_out8(0x3d5, cr3f & ~0x08, par); /* clear reset flags */ | 1924 | vga_out8(0x3d5, cr3f & ~0x08, par); /* clear reset flags */ |
1925 | udelay(10000); | 1925 | mdelay(10); |
1926 | 1926 | ||
1927 | /* Savage ramdac speeds */ | 1927 | /* Savage ramdac speeds */ |
1928 | par->numClocks = 4; | 1928 | par->numClocks = 4; |
diff --git a/drivers/xen/events.c b/drivers/xen/events.c index 6908e4ce2a0d..7595581d032c 100644 --- a/drivers/xen/events.c +++ b/drivers/xen/events.c | |||
@@ -827,6 +827,9 @@ int bind_evtchn_to_irq(unsigned int evtchn) | |||
827 | handle_edge_irq, "event"); | 827 | handle_edge_irq, "event"); |
828 | 828 | ||
829 | xen_irq_info_evtchn_init(irq, evtchn); | 829 | xen_irq_info_evtchn_init(irq, evtchn); |
830 | } else { | ||
831 | struct irq_info *info = info_for_irq(irq); | ||
832 | WARN_ON(info == NULL || info->type != IRQT_EVTCHN); | ||
830 | } | 833 | } |
831 | 834 | ||
832 | out: | 835 | out: |
@@ -862,6 +865,9 @@ static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu) | |||
862 | xen_irq_info_ipi_init(cpu, irq, evtchn, ipi); | 865 | xen_irq_info_ipi_init(cpu, irq, evtchn, ipi); |
863 | 866 | ||
864 | bind_evtchn_to_cpu(evtchn, cpu); | 867 | bind_evtchn_to_cpu(evtchn, cpu); |
868 | } else { | ||
869 | struct irq_info *info = info_for_irq(irq); | ||
870 | WARN_ON(info == NULL || info->type != IRQT_IPI); | ||
865 | } | 871 | } |
866 | 872 | ||
867 | out: | 873 | out: |
@@ -939,6 +945,9 @@ int bind_virq_to_irq(unsigned int virq, unsigned int cpu) | |||
939 | xen_irq_info_virq_init(cpu, irq, evtchn, virq); | 945 | xen_irq_info_virq_init(cpu, irq, evtchn, virq); |
940 | 946 | ||
941 | bind_evtchn_to_cpu(evtchn, cpu); | 947 | bind_evtchn_to_cpu(evtchn, cpu); |
948 | } else { | ||
949 | struct irq_info *info = info_for_irq(irq); | ||
950 | WARN_ON(info == NULL || info->type != IRQT_VIRQ); | ||
942 | } | 951 | } |
943 | 952 | ||
944 | out: | 953 | out: |
diff --git a/drivers/xen/pci.c b/drivers/xen/pci.c index b84bf0b6cc34..18fff88254eb 100644 --- a/drivers/xen/pci.c +++ b/drivers/xen/pci.c | |||
@@ -59,7 +59,7 @@ static int xen_add_device(struct device *dev) | |||
59 | 59 | ||
60 | #ifdef CONFIG_ACPI | 60 | #ifdef CONFIG_ACPI |
61 | handle = DEVICE_ACPI_HANDLE(&pci_dev->dev); | 61 | handle = DEVICE_ACPI_HANDLE(&pci_dev->dev); |
62 | if (!handle) | 62 | if (!handle && pci_dev->bus->bridge) |
63 | handle = DEVICE_ACPI_HANDLE(pci_dev->bus->bridge); | 63 | handle = DEVICE_ACPI_HANDLE(pci_dev->bus->bridge); |
64 | #ifdef CONFIG_PCI_IOV | 64 | #ifdef CONFIG_PCI_IOV |
65 | if (!handle && pci_dev->is_virtfn) | 65 | if (!handle && pci_dev->is_virtfn) |
diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c index 3f75895c919b..8f7d1237b7a0 100644 --- a/fs/btrfs/backref.c +++ b/fs/btrfs/backref.c | |||
@@ -179,7 +179,8 @@ static int __add_prelim_ref(struct list_head *head, u64 root_id, | |||
179 | 179 | ||
180 | static int add_all_parents(struct btrfs_root *root, struct btrfs_path *path, | 180 | static int add_all_parents(struct btrfs_root *root, struct btrfs_path *path, |
181 | struct ulist *parents, int level, | 181 | struct ulist *parents, int level, |
182 | struct btrfs_key *key, u64 wanted_disk_byte, | 182 | struct btrfs_key *key, u64 time_seq, |
183 | u64 wanted_disk_byte, | ||
183 | const u64 *extent_item_pos) | 184 | const u64 *extent_item_pos) |
184 | { | 185 | { |
185 | int ret; | 186 | int ret; |
@@ -212,7 +213,7 @@ add_parent: | |||
212 | */ | 213 | */ |
213 | while (1) { | 214 | while (1) { |
214 | eie = NULL; | 215 | eie = NULL; |
215 | ret = btrfs_next_leaf(root, path); | 216 | ret = btrfs_next_old_leaf(root, path, time_seq); |
216 | if (ret < 0) | 217 | if (ret < 0) |
217 | return ret; | 218 | return ret; |
218 | if (ret) | 219 | if (ret) |
@@ -294,18 +295,10 @@ static int __resolve_indirect_ref(struct btrfs_fs_info *fs_info, | |||
294 | goto out; | 295 | goto out; |
295 | } | 296 | } |
296 | 297 | ||
297 | if (level == 0) { | 298 | if (level == 0) |
298 | if (ret == 1 && path->slots[0] >= btrfs_header_nritems(eb)) { | ||
299 | ret = btrfs_next_leaf(root, path); | ||
300 | if (ret) | ||
301 | goto out; | ||
302 | eb = path->nodes[0]; | ||
303 | } | ||
304 | |||
305 | btrfs_item_key_to_cpu(eb, &key, path->slots[0]); | 299 | btrfs_item_key_to_cpu(eb, &key, path->slots[0]); |
306 | } | ||
307 | 300 | ||
308 | ret = add_all_parents(root, path, parents, level, &key, | 301 | ret = add_all_parents(root, path, parents, level, &key, time_seq, |
309 | ref->wanted_disk_byte, extent_item_pos); | 302 | ref->wanted_disk_byte, extent_item_pos); |
310 | out: | 303 | out: |
311 | btrfs_free_path(path); | 304 | btrfs_free_path(path); |
diff --git a/fs/btrfs/btrfs_inode.h b/fs/btrfs/btrfs_inode.h index e616f8872e69..12394a90d60f 100644 --- a/fs/btrfs/btrfs_inode.h +++ b/fs/btrfs/btrfs_inode.h | |||
@@ -37,6 +37,7 @@ | |||
37 | #define BTRFS_INODE_IN_DEFRAG 3 | 37 | #define BTRFS_INODE_IN_DEFRAG 3 |
38 | #define BTRFS_INODE_DELALLOC_META_RESERVED 4 | 38 | #define BTRFS_INODE_DELALLOC_META_RESERVED 4 |
39 | #define BTRFS_INODE_HAS_ORPHAN_ITEM 5 | 39 | #define BTRFS_INODE_HAS_ORPHAN_ITEM 5 |
40 | #define BTRFS_INODE_HAS_ASYNC_EXTENT 6 | ||
40 | 41 | ||
41 | /* in memory btrfs inode */ | 42 | /* in memory btrfs inode */ |
42 | struct btrfs_inode { | 43 | struct btrfs_inode { |
diff --git a/fs/btrfs/check-integrity.c b/fs/btrfs/check-integrity.c index 9cebb1fd6a3c..da6e9364a5e3 100644 --- a/fs/btrfs/check-integrity.c +++ b/fs/btrfs/check-integrity.c | |||
@@ -93,6 +93,7 @@ | |||
93 | #include "print-tree.h" | 93 | #include "print-tree.h" |
94 | #include "locking.h" | 94 | #include "locking.h" |
95 | #include "check-integrity.h" | 95 | #include "check-integrity.h" |
96 | #include "rcu-string.h" | ||
96 | 97 | ||
97 | #define BTRFSIC_BLOCK_HASHTABLE_SIZE 0x10000 | 98 | #define BTRFSIC_BLOCK_HASHTABLE_SIZE 0x10000 |
98 | #define BTRFSIC_BLOCK_LINK_HASHTABLE_SIZE 0x10000 | 99 | #define BTRFSIC_BLOCK_LINK_HASHTABLE_SIZE 0x10000 |
@@ -843,13 +844,14 @@ static int btrfsic_process_superblock_dev_mirror( | |||
843 | superblock_tmp->never_written = 0; | 844 | superblock_tmp->never_written = 0; |
844 | superblock_tmp->mirror_num = 1 + superblock_mirror_num; | 845 | superblock_tmp->mirror_num = 1 + superblock_mirror_num; |
845 | if (state->print_mask & BTRFSIC_PRINT_MASK_SUPERBLOCK_WRITE) | 846 | if (state->print_mask & BTRFSIC_PRINT_MASK_SUPERBLOCK_WRITE) |
846 | printk(KERN_INFO "New initial S-block (bdev %p, %s)" | 847 | printk_in_rcu(KERN_INFO "New initial S-block (bdev %p, %s)" |
847 | " @%llu (%s/%llu/%d)\n", | 848 | " @%llu (%s/%llu/%d)\n", |
848 | superblock_bdev, device->name, | 849 | superblock_bdev, |
849 | (unsigned long long)dev_bytenr, | 850 | rcu_str_deref(device->name), |
850 | dev_state->name, | 851 | (unsigned long long)dev_bytenr, |
851 | (unsigned long long)dev_bytenr, | 852 | dev_state->name, |
852 | superblock_mirror_num); | 853 | (unsigned long long)dev_bytenr, |
854 | superblock_mirror_num); | ||
853 | list_add(&superblock_tmp->all_blocks_node, | 855 | list_add(&superblock_tmp->all_blocks_node, |
854 | &state->all_blocks_list); | 856 | &state->all_blocks_list); |
855 | btrfsic_block_hashtable_add(superblock_tmp, | 857 | btrfsic_block_hashtable_add(superblock_tmp, |
diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c index d7a96cfdc50a..15cbc2bf4ff0 100644 --- a/fs/btrfs/ctree.c +++ b/fs/btrfs/ctree.c | |||
@@ -467,6 +467,15 @@ static inline int tree_mod_dont_log(struct btrfs_fs_info *fs_info, | |||
467 | return 0; | 467 | return 0; |
468 | } | 468 | } |
469 | 469 | ||
470 | /* | ||
471 | * This allocates memory and gets a tree modification sequence number when | ||
472 | * needed. | ||
473 | * | ||
474 | * Returns 0 when no sequence number is needed, < 0 on error. | ||
475 | * Returns 1 when a sequence number was added. In this case, | ||
476 | * fs_info->tree_mod_seq_lock was acquired and must be released by the caller | ||
477 | * after inserting into the rb tree. | ||
478 | */ | ||
470 | static inline int tree_mod_alloc(struct btrfs_fs_info *fs_info, gfp_t flags, | 479 | static inline int tree_mod_alloc(struct btrfs_fs_info *fs_info, gfp_t flags, |
471 | struct tree_mod_elem **tm_ret) | 480 | struct tree_mod_elem **tm_ret) |
472 | { | 481 | { |
@@ -491,11 +500,11 @@ static inline int tree_mod_alloc(struct btrfs_fs_info *fs_info, gfp_t flags, | |||
491 | */ | 500 | */ |
492 | kfree(tm); | 501 | kfree(tm); |
493 | seq = 0; | 502 | seq = 0; |
503 | spin_unlock(&fs_info->tree_mod_seq_lock); | ||
494 | } else { | 504 | } else { |
495 | __get_tree_mod_seq(fs_info, &tm->elem); | 505 | __get_tree_mod_seq(fs_info, &tm->elem); |
496 | seq = tm->elem.seq; | 506 | seq = tm->elem.seq; |
497 | } | 507 | } |
498 | spin_unlock(&fs_info->tree_mod_seq_lock); | ||
499 | 508 | ||
500 | return seq; | 509 | return seq; |
501 | } | 510 | } |
@@ -521,7 +530,9 @@ tree_mod_log_insert_key_mask(struct btrfs_fs_info *fs_info, | |||
521 | tm->slot = slot; | 530 | tm->slot = slot; |
522 | tm->generation = btrfs_node_ptr_generation(eb, slot); | 531 | tm->generation = btrfs_node_ptr_generation(eb, slot); |
523 | 532 | ||
524 | return __tree_mod_log_insert(fs_info, tm); | 533 | ret = __tree_mod_log_insert(fs_info, tm); |
534 | spin_unlock(&fs_info->tree_mod_seq_lock); | ||
535 | return ret; | ||
525 | } | 536 | } |
526 | 537 | ||
527 | static noinline int | 538 | static noinline int |
@@ -559,7 +570,9 @@ tree_mod_log_insert_move(struct btrfs_fs_info *fs_info, | |||
559 | tm->move.nr_items = nr_items; | 570 | tm->move.nr_items = nr_items; |
560 | tm->op = MOD_LOG_MOVE_KEYS; | 571 | tm->op = MOD_LOG_MOVE_KEYS; |
561 | 572 | ||
562 | return __tree_mod_log_insert(fs_info, tm); | 573 | ret = __tree_mod_log_insert(fs_info, tm); |
574 | spin_unlock(&fs_info->tree_mod_seq_lock); | ||
575 | return ret; | ||
563 | } | 576 | } |
564 | 577 | ||
565 | static noinline int | 578 | static noinline int |
@@ -580,7 +593,9 @@ tree_mod_log_insert_root(struct btrfs_fs_info *fs_info, | |||
580 | tm->generation = btrfs_header_generation(old_root); | 593 | tm->generation = btrfs_header_generation(old_root); |
581 | tm->op = MOD_LOG_ROOT_REPLACE; | 594 | tm->op = MOD_LOG_ROOT_REPLACE; |
582 | 595 | ||
583 | return __tree_mod_log_insert(fs_info, tm); | 596 | ret = __tree_mod_log_insert(fs_info, tm); |
597 | spin_unlock(&fs_info->tree_mod_seq_lock); | ||
598 | return ret; | ||
584 | } | 599 | } |
585 | 600 | ||
586 | static struct tree_mod_elem * | 601 | static struct tree_mod_elem * |
@@ -1023,6 +1038,10 @@ __tree_mod_log_oldest_root(struct btrfs_fs_info *fs_info, | |||
1023 | looped = 1; | 1038 | looped = 1; |
1024 | } | 1039 | } |
1025 | 1040 | ||
1041 | /* if there's no old root to return, return what we found instead */ | ||
1042 | if (!found) | ||
1043 | found = tm; | ||
1044 | |||
1026 | return found; | 1045 | return found; |
1027 | } | 1046 | } |
1028 | 1047 | ||
@@ -1143,22 +1162,36 @@ tree_mod_log_rewind(struct btrfs_fs_info *fs_info, struct extent_buffer *eb, | |||
1143 | return eb_rewin; | 1162 | return eb_rewin; |
1144 | } | 1163 | } |
1145 | 1164 | ||
1165 | /* | ||
1166 | * get_old_root() rewinds the state of @root's root node to the given @time_seq | ||
1167 | * value. If there are no changes, the current root->root_node is returned. If | ||
1168 | * anything changed in between, there's a fresh buffer allocated on which the | ||
1169 | * rewind operations are done. In any case, the returned buffer is read locked. | ||
1170 | * Returns NULL on error (with no locks held). | ||
1171 | */ | ||
1146 | static inline struct extent_buffer * | 1172 | static inline struct extent_buffer * |
1147 | get_old_root(struct btrfs_root *root, u64 time_seq) | 1173 | get_old_root(struct btrfs_root *root, u64 time_seq) |
1148 | { | 1174 | { |
1149 | struct tree_mod_elem *tm; | 1175 | struct tree_mod_elem *tm; |
1150 | struct extent_buffer *eb; | 1176 | struct extent_buffer *eb; |
1151 | struct tree_mod_root *old_root; | 1177 | struct tree_mod_root *old_root = NULL; |
1152 | u64 old_generation; | 1178 | u64 old_generation = 0; |
1179 | u64 logical; | ||
1153 | 1180 | ||
1181 | eb = btrfs_read_lock_root_node(root); | ||
1154 | tm = __tree_mod_log_oldest_root(root->fs_info, root, time_seq); | 1182 | tm = __tree_mod_log_oldest_root(root->fs_info, root, time_seq); |
1155 | if (!tm) | 1183 | if (!tm) |
1156 | return root->node; | 1184 | return root->node; |
1157 | 1185 | ||
1158 | old_root = &tm->old_root; | 1186 | if (tm->op == MOD_LOG_ROOT_REPLACE) { |
1159 | old_generation = tm->generation; | 1187 | old_root = &tm->old_root; |
1188 | old_generation = tm->generation; | ||
1189 | logical = old_root->logical; | ||
1190 | } else { | ||
1191 | logical = root->node->start; | ||
1192 | } | ||
1160 | 1193 | ||
1161 | tm = tree_mod_log_search(root->fs_info, old_root->logical, time_seq); | 1194 | tm = tree_mod_log_search(root->fs_info, logical, time_seq); |
1162 | /* | 1195 | /* |
1163 | * there was an item in the log when __tree_mod_log_oldest_root | 1196 | * there was an item in the log when __tree_mod_log_oldest_root |
1164 | * returned. this one must not go away, because the time_seq passed to | 1197 | * returned. this one must not go away, because the time_seq passed to |
@@ -1166,22 +1199,25 @@ get_old_root(struct btrfs_root *root, u64 time_seq) | |||
1166 | */ | 1199 | */ |
1167 | BUG_ON(!tm); | 1200 | BUG_ON(!tm); |
1168 | 1201 | ||
1169 | if (old_root->logical == root->node->start) { | 1202 | if (old_root) |
1170 | /* there are logged operations for the current root */ | ||
1171 | eb = btrfs_clone_extent_buffer(root->node); | ||
1172 | } else { | ||
1173 | /* there's a root replace operation for the current root */ | ||
1174 | eb = alloc_dummy_extent_buffer(tm->index << PAGE_CACHE_SHIFT, | 1203 | eb = alloc_dummy_extent_buffer(tm->index << PAGE_CACHE_SHIFT, |
1175 | root->nodesize); | 1204 | root->nodesize); |
1205 | else | ||
1206 | eb = btrfs_clone_extent_buffer(root->node); | ||
1207 | btrfs_tree_read_unlock(root->node); | ||
1208 | free_extent_buffer(root->node); | ||
1209 | if (!eb) | ||
1210 | return NULL; | ||
1211 | btrfs_tree_read_lock(eb); | ||
1212 | if (old_root) { | ||
1176 | btrfs_set_header_bytenr(eb, eb->start); | 1213 | btrfs_set_header_bytenr(eb, eb->start); |
1177 | btrfs_set_header_backref_rev(eb, BTRFS_MIXED_BACKREF_REV); | 1214 | btrfs_set_header_backref_rev(eb, BTRFS_MIXED_BACKREF_REV); |
1178 | btrfs_set_header_owner(eb, root->root_key.objectid); | 1215 | btrfs_set_header_owner(eb, root->root_key.objectid); |
1216 | btrfs_set_header_level(eb, old_root->level); | ||
1217 | btrfs_set_header_generation(eb, old_generation); | ||
1179 | } | 1218 | } |
1180 | if (!eb) | ||
1181 | return NULL; | ||
1182 | btrfs_set_header_level(eb, old_root->level); | ||
1183 | btrfs_set_header_generation(eb, old_generation); | ||
1184 | __tree_mod_log_rewind(eb, time_seq, tm); | 1219 | __tree_mod_log_rewind(eb, time_seq, tm); |
1220 | extent_buffer_get(eb); | ||
1185 | 1221 | ||
1186 | return eb; | 1222 | return eb; |
1187 | } | 1223 | } |
@@ -1650,8 +1686,6 @@ static noinline int balance_level(struct btrfs_trans_handle *trans, | |||
1650 | BTRFS_NODEPTRS_PER_BLOCK(root) / 4) | 1686 | BTRFS_NODEPTRS_PER_BLOCK(root) / 4) |
1651 | return 0; | 1687 | return 0; |
1652 | 1688 | ||
1653 | btrfs_header_nritems(mid); | ||
1654 | |||
1655 | left = read_node_slot(root, parent, pslot - 1); | 1689 | left = read_node_slot(root, parent, pslot - 1); |
1656 | if (left) { | 1690 | if (left) { |
1657 | btrfs_tree_lock(left); | 1691 | btrfs_tree_lock(left); |
@@ -1681,7 +1715,6 @@ static noinline int balance_level(struct btrfs_trans_handle *trans, | |||
1681 | wret = push_node_left(trans, root, left, mid, 1); | 1715 | wret = push_node_left(trans, root, left, mid, 1); |
1682 | if (wret < 0) | 1716 | if (wret < 0) |
1683 | ret = wret; | 1717 | ret = wret; |
1684 | btrfs_header_nritems(mid); | ||
1685 | } | 1718 | } |
1686 | 1719 | ||
1687 | /* | 1720 | /* |
@@ -2615,9 +2648,7 @@ int btrfs_search_old_slot(struct btrfs_root *root, struct btrfs_key *key, | |||
2615 | 2648 | ||
2616 | again: | 2649 | again: |
2617 | b = get_old_root(root, time_seq); | 2650 | b = get_old_root(root, time_seq); |
2618 | extent_buffer_get(b); | ||
2619 | level = btrfs_header_level(b); | 2651 | level = btrfs_header_level(b); |
2620 | btrfs_tree_read_lock(b); | ||
2621 | p->locks[level] = BTRFS_READ_LOCK; | 2652 | p->locks[level] = BTRFS_READ_LOCK; |
2622 | 2653 | ||
2623 | while (b) { | 2654 | while (b) { |
@@ -5001,6 +5032,12 @@ next: | |||
5001 | */ | 5032 | */ |
5002 | int btrfs_next_leaf(struct btrfs_root *root, struct btrfs_path *path) | 5033 | int btrfs_next_leaf(struct btrfs_root *root, struct btrfs_path *path) |
5003 | { | 5034 | { |
5035 | return btrfs_next_old_leaf(root, path, 0); | ||
5036 | } | ||
5037 | |||
5038 | int btrfs_next_old_leaf(struct btrfs_root *root, struct btrfs_path *path, | ||
5039 | u64 time_seq) | ||
5040 | { | ||
5004 | int slot; | 5041 | int slot; |
5005 | int level; | 5042 | int level; |
5006 | struct extent_buffer *c; | 5043 | struct extent_buffer *c; |
@@ -5025,7 +5062,10 @@ again: | |||
5025 | path->keep_locks = 1; | 5062 | path->keep_locks = 1; |
5026 | path->leave_spinning = 1; | 5063 | path->leave_spinning = 1; |
5027 | 5064 | ||
5028 | ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); | 5065 | if (time_seq) |
5066 | ret = btrfs_search_old_slot(root, &key, path, time_seq); | ||
5067 | else | ||
5068 | ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); | ||
5029 | path->keep_locks = 0; | 5069 | path->keep_locks = 0; |
5030 | 5070 | ||
5031 | if (ret < 0) | 5071 | if (ret < 0) |
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 0236d03c6732..8b73b2d4deb7 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h | |||
@@ -2753,6 +2753,8 @@ static inline int btrfs_insert_empty_item(struct btrfs_trans_handle *trans, | |||
2753 | } | 2753 | } |
2754 | 2754 | ||
2755 | int btrfs_next_leaf(struct btrfs_root *root, struct btrfs_path *path); | 2755 | int btrfs_next_leaf(struct btrfs_root *root, struct btrfs_path *path); |
2756 | int btrfs_next_old_leaf(struct btrfs_root *root, struct btrfs_path *path, | ||
2757 | u64 time_seq); | ||
2756 | static inline int btrfs_next_item(struct btrfs_root *root, struct btrfs_path *p) | 2758 | static inline int btrfs_next_item(struct btrfs_root *root, struct btrfs_path *p) |
2757 | { | 2759 | { |
2758 | ++p->slots[0]; | 2760 | ++p->slots[0]; |
diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c index c18d0442ae6d..2399f4086915 100644 --- a/fs/btrfs/delayed-inode.c +++ b/fs/btrfs/delayed-inode.c | |||
@@ -1879,3 +1879,21 @@ void btrfs_kill_all_delayed_nodes(struct btrfs_root *root) | |||
1879 | } | 1879 | } |
1880 | } | 1880 | } |
1881 | } | 1881 | } |
1882 | |||
1883 | void btrfs_destroy_delayed_inodes(struct btrfs_root *root) | ||
1884 | { | ||
1885 | struct btrfs_delayed_root *delayed_root; | ||
1886 | struct btrfs_delayed_node *curr_node, *prev_node; | ||
1887 | |||
1888 | delayed_root = btrfs_get_delayed_root(root); | ||
1889 | |||
1890 | curr_node = btrfs_first_delayed_node(delayed_root); | ||
1891 | while (curr_node) { | ||
1892 | __btrfs_kill_delayed_node(curr_node); | ||
1893 | |||
1894 | prev_node = curr_node; | ||
1895 | curr_node = btrfs_next_delayed_node(curr_node); | ||
1896 | btrfs_release_delayed_node(prev_node); | ||
1897 | } | ||
1898 | } | ||
1899 | |||
diff --git a/fs/btrfs/delayed-inode.h b/fs/btrfs/delayed-inode.h index 7083d08b2a21..f5aa4023d3e1 100644 --- a/fs/btrfs/delayed-inode.h +++ b/fs/btrfs/delayed-inode.h | |||
@@ -124,6 +124,9 @@ int btrfs_fill_inode(struct inode *inode, u32 *rdev); | |||
124 | /* Used for drop dead root */ | 124 | /* Used for drop dead root */ |
125 | void btrfs_kill_all_delayed_nodes(struct btrfs_root *root); | 125 | void btrfs_kill_all_delayed_nodes(struct btrfs_root *root); |
126 | 126 | ||
127 | /* Used for clean the transaction */ | ||
128 | void btrfs_destroy_delayed_inodes(struct btrfs_root *root); | ||
129 | |||
127 | /* Used for readdir() */ | 130 | /* Used for readdir() */ |
128 | void btrfs_get_delayed_items(struct inode *inode, struct list_head *ins_list, | 131 | void btrfs_get_delayed_items(struct inode *inode, struct list_head *ins_list, |
129 | struct list_head *del_list); | 132 | struct list_head *del_list); |
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 7ae51decf6d3..e1890b1d3075 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c | |||
@@ -44,6 +44,7 @@ | |||
44 | #include "free-space-cache.h" | 44 | #include "free-space-cache.h" |
45 | #include "inode-map.h" | 45 | #include "inode-map.h" |
46 | #include "check-integrity.h" | 46 | #include "check-integrity.h" |
47 | #include "rcu-string.h" | ||
47 | 48 | ||
48 | static struct extent_io_ops btree_extent_io_ops; | 49 | static struct extent_io_ops btree_extent_io_ops; |
49 | static void end_workqueue_fn(struct btrfs_work *work); | 50 | static void end_workqueue_fn(struct btrfs_work *work); |
@@ -2118,7 +2119,7 @@ int open_ctree(struct super_block *sb, | |||
2118 | 2119 | ||
2119 | features = btrfs_super_incompat_flags(disk_super); | 2120 | features = btrfs_super_incompat_flags(disk_super); |
2120 | features |= BTRFS_FEATURE_INCOMPAT_MIXED_BACKREF; | 2121 | features |= BTRFS_FEATURE_INCOMPAT_MIXED_BACKREF; |
2121 | if (tree_root->fs_info->compress_type & BTRFS_COMPRESS_LZO) | 2122 | if (tree_root->fs_info->compress_type == BTRFS_COMPRESS_LZO) |
2122 | features |= BTRFS_FEATURE_INCOMPAT_COMPRESS_LZO; | 2123 | features |= BTRFS_FEATURE_INCOMPAT_COMPRESS_LZO; |
2123 | 2124 | ||
2124 | /* | 2125 | /* |
@@ -2575,8 +2576,9 @@ static void btrfs_end_buffer_write_sync(struct buffer_head *bh, int uptodate) | |||
2575 | struct btrfs_device *device = (struct btrfs_device *) | 2576 | struct btrfs_device *device = (struct btrfs_device *) |
2576 | bh->b_private; | 2577 | bh->b_private; |
2577 | 2578 | ||
2578 | printk_ratelimited(KERN_WARNING "lost page write due to " | 2579 | printk_ratelimited_in_rcu(KERN_WARNING "lost page write due to " |
2579 | "I/O error on %s\n", device->name); | 2580 | "I/O error on %s\n", |
2581 | rcu_str_deref(device->name)); | ||
2580 | /* note, we dont' set_buffer_write_io_error because we have | 2582 | /* note, we dont' set_buffer_write_io_error because we have |
2581 | * our own ways of dealing with the IO errors | 2583 | * our own ways of dealing with the IO errors |
2582 | */ | 2584 | */ |
@@ -2749,8 +2751,8 @@ static int write_dev_flush(struct btrfs_device *device, int wait) | |||
2749 | wait_for_completion(&device->flush_wait); | 2751 | wait_for_completion(&device->flush_wait); |
2750 | 2752 | ||
2751 | if (bio_flagged(bio, BIO_EOPNOTSUPP)) { | 2753 | if (bio_flagged(bio, BIO_EOPNOTSUPP)) { |
2752 | printk("btrfs: disabling barriers on dev %s\n", | 2754 | printk_in_rcu("btrfs: disabling barriers on dev %s\n", |
2753 | device->name); | 2755 | rcu_str_deref(device->name)); |
2754 | device->nobarriers = 1; | 2756 | device->nobarriers = 1; |
2755 | } | 2757 | } |
2756 | if (!bio_flagged(bio, BIO_UPTODATE)) { | 2758 | if (!bio_flagged(bio, BIO_UPTODATE)) { |
@@ -3400,7 +3402,6 @@ int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans, | |||
3400 | 3402 | ||
3401 | delayed_refs = &trans->delayed_refs; | 3403 | delayed_refs = &trans->delayed_refs; |
3402 | 3404 | ||
3403 | again: | ||
3404 | spin_lock(&delayed_refs->lock); | 3405 | spin_lock(&delayed_refs->lock); |
3405 | if (delayed_refs->num_entries == 0) { | 3406 | if (delayed_refs->num_entries == 0) { |
3406 | spin_unlock(&delayed_refs->lock); | 3407 | spin_unlock(&delayed_refs->lock); |
@@ -3408,31 +3409,36 @@ again: | |||
3408 | return ret; | 3409 | return ret; |
3409 | } | 3410 | } |
3410 | 3411 | ||
3411 | node = rb_first(&delayed_refs->root); | 3412 | while ((node = rb_first(&delayed_refs->root)) != NULL) { |
3412 | while (node) { | ||
3413 | ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node); | 3413 | ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node); |
3414 | node = rb_next(node); | ||
3415 | |||
3416 | ref->in_tree = 0; | ||
3417 | rb_erase(&ref->rb_node, &delayed_refs->root); | ||
3418 | delayed_refs->num_entries--; | ||
3419 | 3414 | ||
3420 | atomic_set(&ref->refs, 1); | 3415 | atomic_set(&ref->refs, 1); |
3421 | if (btrfs_delayed_ref_is_head(ref)) { | 3416 | if (btrfs_delayed_ref_is_head(ref)) { |
3422 | struct btrfs_delayed_ref_head *head; | 3417 | struct btrfs_delayed_ref_head *head; |
3423 | 3418 | ||
3424 | head = btrfs_delayed_node_to_head(ref); | 3419 | head = btrfs_delayed_node_to_head(ref); |
3425 | spin_unlock(&delayed_refs->lock); | 3420 | if (!mutex_trylock(&head->mutex)) { |
3426 | mutex_lock(&head->mutex); | 3421 | atomic_inc(&ref->refs); |
3422 | spin_unlock(&delayed_refs->lock); | ||
3423 | |||
3424 | /* Need to wait for the delayed ref to run */ | ||
3425 | mutex_lock(&head->mutex); | ||
3426 | mutex_unlock(&head->mutex); | ||
3427 | btrfs_put_delayed_ref(ref); | ||
3428 | |||
3429 | continue; | ||
3430 | } | ||
3431 | |||
3427 | kfree(head->extent_op); | 3432 | kfree(head->extent_op); |
3428 | delayed_refs->num_heads--; | 3433 | delayed_refs->num_heads--; |
3429 | if (list_empty(&head->cluster)) | 3434 | if (list_empty(&head->cluster)) |
3430 | delayed_refs->num_heads_ready--; | 3435 | delayed_refs->num_heads_ready--; |
3431 | list_del_init(&head->cluster); | 3436 | list_del_init(&head->cluster); |
3432 | mutex_unlock(&head->mutex); | ||
3433 | btrfs_put_delayed_ref(ref); | ||
3434 | goto again; | ||
3435 | } | 3437 | } |
3438 | ref->in_tree = 0; | ||
3439 | rb_erase(&ref->rb_node, &delayed_refs->root); | ||
3440 | delayed_refs->num_entries--; | ||
3441 | |||
3436 | spin_unlock(&delayed_refs->lock); | 3442 | spin_unlock(&delayed_refs->lock); |
3437 | btrfs_put_delayed_ref(ref); | 3443 | btrfs_put_delayed_ref(ref); |
3438 | 3444 | ||
@@ -3520,11 +3526,9 @@ static int btrfs_destroy_marked_extents(struct btrfs_root *root, | |||
3520 | &(&BTRFS_I(page->mapping->host)->io_tree)->buffer, | 3526 | &(&BTRFS_I(page->mapping->host)->io_tree)->buffer, |
3521 | offset >> PAGE_CACHE_SHIFT); | 3527 | offset >> PAGE_CACHE_SHIFT); |
3522 | spin_unlock(&dirty_pages->buffer_lock); | 3528 | spin_unlock(&dirty_pages->buffer_lock); |
3523 | if (eb) { | 3529 | if (eb) |
3524 | ret = test_and_clear_bit(EXTENT_BUFFER_DIRTY, | 3530 | ret = test_and_clear_bit(EXTENT_BUFFER_DIRTY, |
3525 | &eb->bflags); | 3531 | &eb->bflags); |
3526 | atomic_set(&eb->refs, 1); | ||
3527 | } | ||
3528 | if (PageWriteback(page)) | 3532 | if (PageWriteback(page)) |
3529 | end_page_writeback(page); | 3533 | end_page_writeback(page); |
3530 | 3534 | ||
@@ -3538,8 +3542,8 @@ static int btrfs_destroy_marked_extents(struct btrfs_root *root, | |||
3538 | spin_unlock_irq(&page->mapping->tree_lock); | 3542 | spin_unlock_irq(&page->mapping->tree_lock); |
3539 | } | 3543 | } |
3540 | 3544 | ||
3541 | page->mapping->a_ops->invalidatepage(page, 0); | ||
3542 | unlock_page(page); | 3545 | unlock_page(page); |
3546 | page_cache_release(page); | ||
3543 | } | 3547 | } |
3544 | } | 3548 | } |
3545 | 3549 | ||
@@ -3553,8 +3557,10 @@ static int btrfs_destroy_pinned_extent(struct btrfs_root *root, | |||
3553 | u64 start; | 3557 | u64 start; |
3554 | u64 end; | 3558 | u64 end; |
3555 | int ret; | 3559 | int ret; |
3560 | bool loop = true; | ||
3556 | 3561 | ||
3557 | unpin = pinned_extents; | 3562 | unpin = pinned_extents; |
3563 | again: | ||
3558 | while (1) { | 3564 | while (1) { |
3559 | ret = find_first_extent_bit(unpin, 0, &start, &end, | 3565 | ret = find_first_extent_bit(unpin, 0, &start, &end, |
3560 | EXTENT_DIRTY); | 3566 | EXTENT_DIRTY); |
@@ -3572,6 +3578,15 @@ static int btrfs_destroy_pinned_extent(struct btrfs_root *root, | |||
3572 | cond_resched(); | 3578 | cond_resched(); |
3573 | } | 3579 | } |
3574 | 3580 | ||
3581 | if (loop) { | ||
3582 | if (unpin == &root->fs_info->freed_extents[0]) | ||
3583 | unpin = &root->fs_info->freed_extents[1]; | ||
3584 | else | ||
3585 | unpin = &root->fs_info->freed_extents[0]; | ||
3586 | loop = false; | ||
3587 | goto again; | ||
3588 | } | ||
3589 | |||
3575 | return 0; | 3590 | return 0; |
3576 | } | 3591 | } |
3577 | 3592 | ||
@@ -3585,21 +3600,23 @@ void btrfs_cleanup_one_transaction(struct btrfs_transaction *cur_trans, | |||
3585 | /* FIXME: cleanup wait for commit */ | 3600 | /* FIXME: cleanup wait for commit */ |
3586 | cur_trans->in_commit = 1; | 3601 | cur_trans->in_commit = 1; |
3587 | cur_trans->blocked = 1; | 3602 | cur_trans->blocked = 1; |
3588 | if (waitqueue_active(&root->fs_info->transaction_blocked_wait)) | 3603 | wake_up(&root->fs_info->transaction_blocked_wait); |
3589 | wake_up(&root->fs_info->transaction_blocked_wait); | ||
3590 | 3604 | ||
3591 | cur_trans->blocked = 0; | 3605 | cur_trans->blocked = 0; |
3592 | if (waitqueue_active(&root->fs_info->transaction_wait)) | 3606 | wake_up(&root->fs_info->transaction_wait); |
3593 | wake_up(&root->fs_info->transaction_wait); | ||
3594 | 3607 | ||
3595 | cur_trans->commit_done = 1; | 3608 | cur_trans->commit_done = 1; |
3596 | if (waitqueue_active(&cur_trans->commit_wait)) | 3609 | wake_up(&cur_trans->commit_wait); |
3597 | wake_up(&cur_trans->commit_wait); | 3610 | |
3611 | btrfs_destroy_delayed_inodes(root); | ||
3612 | btrfs_assert_delayed_root_empty(root); | ||
3598 | 3613 | ||
3599 | btrfs_destroy_pending_snapshots(cur_trans); | 3614 | btrfs_destroy_pending_snapshots(cur_trans); |
3600 | 3615 | ||
3601 | btrfs_destroy_marked_extents(root, &cur_trans->dirty_pages, | 3616 | btrfs_destroy_marked_extents(root, &cur_trans->dirty_pages, |
3602 | EXTENT_DIRTY); | 3617 | EXTENT_DIRTY); |
3618 | btrfs_destroy_pinned_extent(root, | ||
3619 | root->fs_info->pinned_extents); | ||
3603 | 3620 | ||
3604 | /* | 3621 | /* |
3605 | memset(cur_trans, 0, sizeof(*cur_trans)); | 3622 | memset(cur_trans, 0, sizeof(*cur_trans)); |
@@ -3648,6 +3665,9 @@ int btrfs_cleanup_transaction(struct btrfs_root *root) | |||
3648 | if (waitqueue_active(&t->commit_wait)) | 3665 | if (waitqueue_active(&t->commit_wait)) |
3649 | wake_up(&t->commit_wait); | 3666 | wake_up(&t->commit_wait); |
3650 | 3667 | ||
3668 | btrfs_destroy_delayed_inodes(root); | ||
3669 | btrfs_assert_delayed_root_empty(root); | ||
3670 | |||
3651 | btrfs_destroy_pending_snapshots(t); | 3671 | btrfs_destroy_pending_snapshots(t); |
3652 | 3672 | ||
3653 | btrfs_destroy_delalloc_inodes(root); | 3673 | btrfs_destroy_delalloc_inodes(root); |
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 2c8f7b204617..aaa12c1eb348 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c | |||
@@ -20,6 +20,7 @@ | |||
20 | #include "volumes.h" | 20 | #include "volumes.h" |
21 | #include "check-integrity.h" | 21 | #include "check-integrity.h" |
22 | #include "locking.h" | 22 | #include "locking.h" |
23 | #include "rcu-string.h" | ||
23 | 24 | ||
24 | static struct kmem_cache *extent_state_cache; | 25 | static struct kmem_cache *extent_state_cache; |
25 | static struct kmem_cache *extent_buffer_cache; | 26 | static struct kmem_cache *extent_buffer_cache; |
@@ -1917,9 +1918,9 @@ int repair_io_failure(struct btrfs_mapping_tree *map_tree, u64 start, | |||
1917 | return -EIO; | 1918 | return -EIO; |
1918 | } | 1919 | } |
1919 | 1920 | ||
1920 | printk(KERN_INFO "btrfs read error corrected: ino %lu off %llu (dev %s " | 1921 | printk_in_rcu(KERN_INFO "btrfs read error corrected: ino %lu off %llu " |
1921 | "sector %llu)\n", page->mapping->host->i_ino, start, | 1922 | "(dev %s sector %llu)\n", page->mapping->host->i_ino, |
1922 | dev->name, sector); | 1923 | start, rcu_str_deref(dev->name), sector); |
1923 | 1924 | ||
1924 | bio_put(bio); | 1925 | bio_put(bio); |
1925 | return 0; | 1926 | return 0; |
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index f6ab6f5e635a..a4f02501da40 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c | |||
@@ -830,7 +830,7 @@ static noinline int cow_file_range(struct inode *inode, | |||
830 | if (IS_ERR(trans)) { | 830 | if (IS_ERR(trans)) { |
831 | extent_clear_unlock_delalloc(inode, | 831 | extent_clear_unlock_delalloc(inode, |
832 | &BTRFS_I(inode)->io_tree, | 832 | &BTRFS_I(inode)->io_tree, |
833 | start, end, NULL, | 833 | start, end, locked_page, |
834 | EXTENT_CLEAR_UNLOCK_PAGE | | 834 | EXTENT_CLEAR_UNLOCK_PAGE | |
835 | EXTENT_CLEAR_UNLOCK | | 835 | EXTENT_CLEAR_UNLOCK | |
836 | EXTENT_CLEAR_DELALLOC | | 836 | EXTENT_CLEAR_DELALLOC | |
@@ -963,7 +963,7 @@ out: | |||
963 | out_unlock: | 963 | out_unlock: |
964 | extent_clear_unlock_delalloc(inode, | 964 | extent_clear_unlock_delalloc(inode, |
965 | &BTRFS_I(inode)->io_tree, | 965 | &BTRFS_I(inode)->io_tree, |
966 | start, end, NULL, | 966 | start, end, locked_page, |
967 | EXTENT_CLEAR_UNLOCK_PAGE | | 967 | EXTENT_CLEAR_UNLOCK_PAGE | |
968 | EXTENT_CLEAR_UNLOCK | | 968 | EXTENT_CLEAR_UNLOCK | |
969 | EXTENT_CLEAR_DELALLOC | | 969 | EXTENT_CLEAR_DELALLOC | |
@@ -986,8 +986,10 @@ static noinline void async_cow_start(struct btrfs_work *work) | |||
986 | compress_file_range(async_cow->inode, async_cow->locked_page, | 986 | compress_file_range(async_cow->inode, async_cow->locked_page, |
987 | async_cow->start, async_cow->end, async_cow, | 987 | async_cow->start, async_cow->end, async_cow, |
988 | &num_added); | 988 | &num_added); |
989 | if (num_added == 0) | 989 | if (num_added == 0) { |
990 | iput(async_cow->inode); | ||
990 | async_cow->inode = NULL; | 991 | async_cow->inode = NULL; |
992 | } | ||
991 | } | 993 | } |
992 | 994 | ||
993 | /* | 995 | /* |
@@ -1020,6 +1022,8 @@ static noinline void async_cow_free(struct btrfs_work *work) | |||
1020 | { | 1022 | { |
1021 | struct async_cow *async_cow; | 1023 | struct async_cow *async_cow; |
1022 | async_cow = container_of(work, struct async_cow, work); | 1024 | async_cow = container_of(work, struct async_cow, work); |
1025 | if (async_cow->inode) | ||
1026 | iput(async_cow->inode); | ||
1023 | kfree(async_cow); | 1027 | kfree(async_cow); |
1024 | } | 1028 | } |
1025 | 1029 | ||
@@ -1038,7 +1042,7 @@ static int cow_file_range_async(struct inode *inode, struct page *locked_page, | |||
1038 | while (start < end) { | 1042 | while (start < end) { |
1039 | async_cow = kmalloc(sizeof(*async_cow), GFP_NOFS); | 1043 | async_cow = kmalloc(sizeof(*async_cow), GFP_NOFS); |
1040 | BUG_ON(!async_cow); /* -ENOMEM */ | 1044 | BUG_ON(!async_cow); /* -ENOMEM */ |
1041 | async_cow->inode = inode; | 1045 | async_cow->inode = igrab(inode); |
1042 | async_cow->root = root; | 1046 | async_cow->root = root; |
1043 | async_cow->locked_page = locked_page; | 1047 | async_cow->locked_page = locked_page; |
1044 | async_cow->start = start; | 1048 | async_cow->start = start; |
@@ -1136,8 +1140,18 @@ static noinline int run_delalloc_nocow(struct inode *inode, | |||
1136 | u64 ino = btrfs_ino(inode); | 1140 | u64 ino = btrfs_ino(inode); |
1137 | 1141 | ||
1138 | path = btrfs_alloc_path(); | 1142 | path = btrfs_alloc_path(); |
1139 | if (!path) | 1143 | if (!path) { |
1144 | extent_clear_unlock_delalloc(inode, | ||
1145 | &BTRFS_I(inode)->io_tree, | ||
1146 | start, end, locked_page, | ||
1147 | EXTENT_CLEAR_UNLOCK_PAGE | | ||
1148 | EXTENT_CLEAR_UNLOCK | | ||
1149 | EXTENT_CLEAR_DELALLOC | | ||
1150 | EXTENT_CLEAR_DIRTY | | ||
1151 | EXTENT_SET_WRITEBACK | | ||
1152 | EXTENT_END_WRITEBACK); | ||
1140 | return -ENOMEM; | 1153 | return -ENOMEM; |
1154 | } | ||
1141 | 1155 | ||
1142 | nolock = btrfs_is_free_space_inode(root, inode); | 1156 | nolock = btrfs_is_free_space_inode(root, inode); |
1143 | 1157 | ||
@@ -1147,6 +1161,15 @@ static noinline int run_delalloc_nocow(struct inode *inode, | |||
1147 | trans = btrfs_join_transaction(root); | 1161 | trans = btrfs_join_transaction(root); |
1148 | 1162 | ||
1149 | if (IS_ERR(trans)) { | 1163 | if (IS_ERR(trans)) { |
1164 | extent_clear_unlock_delalloc(inode, | ||
1165 | &BTRFS_I(inode)->io_tree, | ||
1166 | start, end, locked_page, | ||
1167 | EXTENT_CLEAR_UNLOCK_PAGE | | ||
1168 | EXTENT_CLEAR_UNLOCK | | ||
1169 | EXTENT_CLEAR_DELALLOC | | ||
1170 | EXTENT_CLEAR_DIRTY | | ||
1171 | EXTENT_SET_WRITEBACK | | ||
1172 | EXTENT_END_WRITEBACK); | ||
1150 | btrfs_free_path(path); | 1173 | btrfs_free_path(path); |
1151 | return PTR_ERR(trans); | 1174 | return PTR_ERR(trans); |
1152 | } | 1175 | } |
@@ -1327,8 +1350,11 @@ out_check: | |||
1327 | } | 1350 | } |
1328 | btrfs_release_path(path); | 1351 | btrfs_release_path(path); |
1329 | 1352 | ||
1330 | if (cur_offset <= end && cow_start == (u64)-1) | 1353 | if (cur_offset <= end && cow_start == (u64)-1) { |
1331 | cow_start = cur_offset; | 1354 | cow_start = cur_offset; |
1355 | cur_offset = end; | ||
1356 | } | ||
1357 | |||
1332 | if (cow_start != (u64)-1) { | 1358 | if (cow_start != (u64)-1) { |
1333 | ret = cow_file_range(inode, locked_page, cow_start, end, | 1359 | ret = cow_file_range(inode, locked_page, cow_start, end, |
1334 | page_started, nr_written, 1); | 1360 | page_started, nr_written, 1); |
@@ -1347,6 +1373,17 @@ error: | |||
1347 | if (!ret) | 1373 | if (!ret) |
1348 | ret = err; | 1374 | ret = err; |
1349 | 1375 | ||
1376 | if (ret && cur_offset < end) | ||
1377 | extent_clear_unlock_delalloc(inode, | ||
1378 | &BTRFS_I(inode)->io_tree, | ||
1379 | cur_offset, end, locked_page, | ||
1380 | EXTENT_CLEAR_UNLOCK_PAGE | | ||
1381 | EXTENT_CLEAR_UNLOCK | | ||
1382 | EXTENT_CLEAR_DELALLOC | | ||
1383 | EXTENT_CLEAR_DIRTY | | ||
1384 | EXTENT_SET_WRITEBACK | | ||
1385 | EXTENT_END_WRITEBACK); | ||
1386 | |||
1350 | btrfs_free_path(path); | 1387 | btrfs_free_path(path); |
1351 | return ret; | 1388 | return ret; |
1352 | } | 1389 | } |
@@ -1361,20 +1398,23 @@ static int run_delalloc_range(struct inode *inode, struct page *locked_page, | |||
1361 | int ret; | 1398 | int ret; |
1362 | struct btrfs_root *root = BTRFS_I(inode)->root; | 1399 | struct btrfs_root *root = BTRFS_I(inode)->root; |
1363 | 1400 | ||
1364 | if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW) | 1401 | if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW) { |
1365 | ret = run_delalloc_nocow(inode, locked_page, start, end, | 1402 | ret = run_delalloc_nocow(inode, locked_page, start, end, |
1366 | page_started, 1, nr_written); | 1403 | page_started, 1, nr_written); |
1367 | else if (BTRFS_I(inode)->flags & BTRFS_INODE_PREALLOC) | 1404 | } else if (BTRFS_I(inode)->flags & BTRFS_INODE_PREALLOC) { |
1368 | ret = run_delalloc_nocow(inode, locked_page, start, end, | 1405 | ret = run_delalloc_nocow(inode, locked_page, start, end, |
1369 | page_started, 0, nr_written); | 1406 | page_started, 0, nr_written); |
1370 | else if (!btrfs_test_opt(root, COMPRESS) && | 1407 | } else if (!btrfs_test_opt(root, COMPRESS) && |
1371 | !(BTRFS_I(inode)->force_compress) && | 1408 | !(BTRFS_I(inode)->force_compress) && |
1372 | !(BTRFS_I(inode)->flags & BTRFS_INODE_COMPRESS)) | 1409 | !(BTRFS_I(inode)->flags & BTRFS_INODE_COMPRESS)) { |
1373 | ret = cow_file_range(inode, locked_page, start, end, | 1410 | ret = cow_file_range(inode, locked_page, start, end, |
1374 | page_started, nr_written, 1); | 1411 | page_started, nr_written, 1); |
1375 | else | 1412 | } else { |
1413 | set_bit(BTRFS_INODE_HAS_ASYNC_EXTENT, | ||
1414 | &BTRFS_I(inode)->runtime_flags); | ||
1376 | ret = cow_file_range_async(inode, locked_page, start, end, | 1415 | ret = cow_file_range_async(inode, locked_page, start, end, |
1377 | page_started, nr_written); | 1416 | page_started, nr_written); |
1417 | } | ||
1378 | return ret; | 1418 | return ret; |
1379 | } | 1419 | } |
1380 | 1420 | ||
@@ -7054,10 +7094,13 @@ static void fixup_inode_flags(struct inode *dir, struct inode *inode) | |||
7054 | else | 7094 | else |
7055 | b_inode->flags &= ~BTRFS_INODE_NODATACOW; | 7095 | b_inode->flags &= ~BTRFS_INODE_NODATACOW; |
7056 | 7096 | ||
7057 | if (b_dir->flags & BTRFS_INODE_COMPRESS) | 7097 | if (b_dir->flags & BTRFS_INODE_COMPRESS) { |
7058 | b_inode->flags |= BTRFS_INODE_COMPRESS; | 7098 | b_inode->flags |= BTRFS_INODE_COMPRESS; |
7059 | else | 7099 | b_inode->flags &= ~BTRFS_INODE_NOCOMPRESS; |
7060 | b_inode->flags &= ~BTRFS_INODE_COMPRESS; | 7100 | } else { |
7101 | b_inode->flags &= ~(BTRFS_INODE_COMPRESS | | ||
7102 | BTRFS_INODE_NOCOMPRESS); | ||
7103 | } | ||
7061 | } | 7104 | } |
7062 | 7105 | ||
7063 | static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry, | 7106 | static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry, |
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index 24b776c08d99..0e92e5763005 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c | |||
@@ -52,6 +52,7 @@ | |||
52 | #include "locking.h" | 52 | #include "locking.h" |
53 | #include "inode-map.h" | 53 | #include "inode-map.h" |
54 | #include "backref.h" | 54 | #include "backref.h" |
55 | #include "rcu-string.h" | ||
55 | 56 | ||
56 | /* Mask out flags that are inappropriate for the given type of inode. */ | 57 | /* Mask out flags that are inappropriate for the given type of inode. */ |
57 | static inline __u32 btrfs_mask_flags(umode_t mode, __u32 flags) | 58 | static inline __u32 btrfs_mask_flags(umode_t mode, __u32 flags) |
@@ -785,39 +786,57 @@ none: | |||
785 | return -ENOENT; | 786 | return -ENOENT; |
786 | } | 787 | } |
787 | 788 | ||
788 | /* | 789 | static struct extent_map *defrag_lookup_extent(struct inode *inode, u64 start) |
789 | * Validaty check of prev em and next em: | ||
790 | * 1) no prev/next em | ||
791 | * 2) prev/next em is an hole/inline extent | ||
792 | */ | ||
793 | static int check_adjacent_extents(struct inode *inode, struct extent_map *em) | ||
794 | { | 790 | { |
795 | struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; | 791 | struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; |
796 | struct extent_map *prev = NULL, *next = NULL; | 792 | struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; |
797 | int ret = 0; | 793 | struct extent_map *em; |
794 | u64 len = PAGE_CACHE_SIZE; | ||
798 | 795 | ||
796 | /* | ||
797 | * hopefully we have this extent in the tree already, try without | ||
798 | * the full extent lock | ||
799 | */ | ||
799 | read_lock(&em_tree->lock); | 800 | read_lock(&em_tree->lock); |
800 | prev = lookup_extent_mapping(em_tree, em->start - 1, (u64)-1); | 801 | em = lookup_extent_mapping(em_tree, start, len); |
801 | next = lookup_extent_mapping(em_tree, em->start + em->len, (u64)-1); | ||
802 | read_unlock(&em_tree->lock); | 802 | read_unlock(&em_tree->lock); |
803 | 803 | ||
804 | if ((!prev || prev->block_start >= EXTENT_MAP_LAST_BYTE) && | 804 | if (!em) { |
805 | (!next || next->block_start >= EXTENT_MAP_LAST_BYTE)) | 805 | /* get the big lock and read metadata off disk */ |
806 | ret = 1; | 806 | lock_extent(io_tree, start, start + len - 1); |
807 | free_extent_map(prev); | 807 | em = btrfs_get_extent(inode, NULL, 0, start, len, 0); |
808 | free_extent_map(next); | 808 | unlock_extent(io_tree, start, start + len - 1); |
809 | |||
810 | if (IS_ERR(em)) | ||
811 | return NULL; | ||
812 | } | ||
813 | |||
814 | return em; | ||
815 | } | ||
816 | |||
817 | static bool defrag_check_next_extent(struct inode *inode, struct extent_map *em) | ||
818 | { | ||
819 | struct extent_map *next; | ||
820 | bool ret = true; | ||
809 | 821 | ||
822 | /* this is the last extent */ | ||
823 | if (em->start + em->len >= i_size_read(inode)) | ||
824 | return false; | ||
825 | |||
826 | next = defrag_lookup_extent(inode, em->start + em->len); | ||
827 | if (!next || next->block_start >= EXTENT_MAP_LAST_BYTE) | ||
828 | ret = false; | ||
829 | |||
830 | free_extent_map(next); | ||
810 | return ret; | 831 | return ret; |
811 | } | 832 | } |
812 | 833 | ||
813 | static int should_defrag_range(struct inode *inode, u64 start, u64 len, | 834 | static int should_defrag_range(struct inode *inode, u64 start, int thresh, |
814 | int thresh, u64 *last_len, u64 *skip, | 835 | u64 *last_len, u64 *skip, u64 *defrag_end) |
815 | u64 *defrag_end) | ||
816 | { | 836 | { |
817 | struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; | 837 | struct extent_map *em; |
818 | struct extent_map *em = NULL; | ||
819 | struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; | ||
820 | int ret = 1; | 838 | int ret = 1; |
839 | bool next_mergeable = true; | ||
821 | 840 | ||
822 | /* | 841 | /* |
823 | * make sure that once we start defragging an extent, we keep on | 842 | * make sure that once we start defragging an extent, we keep on |
@@ -828,23 +847,9 @@ static int should_defrag_range(struct inode *inode, u64 start, u64 len, | |||
828 | 847 | ||
829 | *skip = 0; | 848 | *skip = 0; |
830 | 849 | ||
831 | /* | 850 | em = defrag_lookup_extent(inode, start); |
832 | * hopefully we have this extent in the tree already, try without | 851 | if (!em) |
833 | * the full extent lock | 852 | return 0; |
834 | */ | ||
835 | read_lock(&em_tree->lock); | ||
836 | em = lookup_extent_mapping(em_tree, start, len); | ||
837 | read_unlock(&em_tree->lock); | ||
838 | |||
839 | if (!em) { | ||
840 | /* get the big lock and read metadata off disk */ | ||
841 | lock_extent(io_tree, start, start + len - 1); | ||
842 | em = btrfs_get_extent(inode, NULL, 0, start, len, 0); | ||
843 | unlock_extent(io_tree, start, start + len - 1); | ||
844 | |||
845 | if (IS_ERR(em)) | ||
846 | return 0; | ||
847 | } | ||
848 | 853 | ||
849 | /* this will cover holes, and inline extents */ | 854 | /* this will cover holes, and inline extents */ |
850 | if (em->block_start >= EXTENT_MAP_LAST_BYTE) { | 855 | if (em->block_start >= EXTENT_MAP_LAST_BYTE) { |
@@ -852,18 +857,15 @@ static int should_defrag_range(struct inode *inode, u64 start, u64 len, | |||
852 | goto out; | 857 | goto out; |
853 | } | 858 | } |
854 | 859 | ||
855 | /* If we have nothing to merge with us, just skip. */ | 860 | next_mergeable = defrag_check_next_extent(inode, em); |
856 | if (check_adjacent_extents(inode, em)) { | ||
857 | ret = 0; | ||
858 | goto out; | ||
859 | } | ||
860 | 861 | ||
861 | /* | 862 | /* |
862 | * we hit a real extent, if it is big don't bother defragging it again | 863 | * we hit a real extent, if it is big or the next extent is not a |
864 | * real extent, don't bother defragging it | ||
863 | */ | 865 | */ |
864 | if ((*last_len == 0 || *last_len >= thresh) && em->len >= thresh) | 866 | if ((*last_len == 0 || *last_len >= thresh) && |
867 | (em->len >= thresh || !next_mergeable)) | ||
865 | ret = 0; | 868 | ret = 0; |
866 | |||
867 | out: | 869 | out: |
868 | /* | 870 | /* |
869 | * last_len ends up being a counter of how many bytes we've defragged. | 871 | * last_len ends up being a counter of how many bytes we've defragged. |
@@ -1142,8 +1144,8 @@ int btrfs_defrag_file(struct inode *inode, struct file *file, | |||
1142 | break; | 1144 | break; |
1143 | 1145 | ||
1144 | if (!should_defrag_range(inode, (u64)i << PAGE_CACHE_SHIFT, | 1146 | if (!should_defrag_range(inode, (u64)i << PAGE_CACHE_SHIFT, |
1145 | PAGE_CACHE_SIZE, extent_thresh, | 1147 | extent_thresh, &last_len, &skip, |
1146 | &last_len, &skip, &defrag_end)) { | 1148 | &defrag_end)) { |
1147 | unsigned long next; | 1149 | unsigned long next; |
1148 | /* | 1150 | /* |
1149 | * the should_defrag function tells us how much to skip | 1151 | * the should_defrag function tells us how much to skip |
@@ -1304,6 +1306,14 @@ static noinline int btrfs_ioctl_resize(struct btrfs_root *root, | |||
1304 | ret = -EINVAL; | 1306 | ret = -EINVAL; |
1305 | goto out_free; | 1307 | goto out_free; |
1306 | } | 1308 | } |
1309 | if (device->fs_devices && device->fs_devices->seeding) { | ||
1310 | printk(KERN_INFO "btrfs: resizer unable to apply on " | ||
1311 | "seeding device %llu\n", | ||
1312 | (unsigned long long)devid); | ||
1313 | ret = -EINVAL; | ||
1314 | goto out_free; | ||
1315 | } | ||
1316 | |||
1307 | if (!strcmp(sizestr, "max")) | 1317 | if (!strcmp(sizestr, "max")) |
1308 | new_size = device->bdev->bd_inode->i_size; | 1318 | new_size = device->bdev->bd_inode->i_size; |
1309 | else { | 1319 | else { |
@@ -1345,8 +1355,9 @@ static noinline int btrfs_ioctl_resize(struct btrfs_root *root, | |||
1345 | do_div(new_size, root->sectorsize); | 1355 | do_div(new_size, root->sectorsize); |
1346 | new_size *= root->sectorsize; | 1356 | new_size *= root->sectorsize; |
1347 | 1357 | ||
1348 | printk(KERN_INFO "btrfs: new size for %s is %llu\n", | 1358 | printk_in_rcu(KERN_INFO "btrfs: new size for %s is %llu\n", |
1349 | device->name, (unsigned long long)new_size); | 1359 | rcu_str_deref(device->name), |
1360 | (unsigned long long)new_size); | ||
1350 | 1361 | ||
1351 | if (new_size > old_size) { | 1362 | if (new_size > old_size) { |
1352 | trans = btrfs_start_transaction(root, 0); | 1363 | trans = btrfs_start_transaction(root, 0); |
@@ -2264,7 +2275,12 @@ static long btrfs_ioctl_dev_info(struct btrfs_root *root, void __user *arg) | |||
2264 | di_args->total_bytes = dev->total_bytes; | 2275 | di_args->total_bytes = dev->total_bytes; |
2265 | memcpy(di_args->uuid, dev->uuid, sizeof(di_args->uuid)); | 2276 | memcpy(di_args->uuid, dev->uuid, sizeof(di_args->uuid)); |
2266 | if (dev->name) { | 2277 | if (dev->name) { |
2267 | strncpy(di_args->path, dev->name, sizeof(di_args->path)); | 2278 | struct rcu_string *name; |
2279 | |||
2280 | rcu_read_lock(); | ||
2281 | name = rcu_dereference(dev->name); | ||
2282 | strncpy(di_args->path, name->str, sizeof(di_args->path)); | ||
2283 | rcu_read_unlock(); | ||
2268 | di_args->path[sizeof(di_args->path) - 1] = 0; | 2284 | di_args->path[sizeof(di_args->path) - 1] = 0; |
2269 | } else { | 2285 | } else { |
2270 | di_args->path[0] = '\0'; | 2286 | di_args->path[0] = '\0'; |
diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c index 9e138cdc36c5..643335a4fe3c 100644 --- a/fs/btrfs/ordered-data.c +++ b/fs/btrfs/ordered-data.c | |||
@@ -627,7 +627,27 @@ void btrfs_wait_ordered_range(struct inode *inode, u64 start, u64 len) | |||
627 | /* start IO across the range first to instantiate any delalloc | 627 | /* start IO across the range first to instantiate any delalloc |
628 | * extents | 628 | * extents |
629 | */ | 629 | */ |
630 | filemap_write_and_wait_range(inode->i_mapping, start, orig_end); | 630 | filemap_fdatawrite_range(inode->i_mapping, start, orig_end); |
631 | |||
632 | /* | ||
633 | * So with compression we will find and lock a dirty page and clear the | ||
634 | * first one as dirty, setup an async extent, and immediately return | ||
635 | * with the entire range locked but with nobody actually marked with | ||
636 | * writeback. So we can't just filemap_write_and_wait_range() and | ||
637 | * expect it to work since it will just kick off a thread to do the | ||
638 | * actual work. So we need to call filemap_fdatawrite_range _again_ | ||
639 | * since it will wait on the page lock, which won't be unlocked until | ||
640 | * after the pages have been marked as writeback and so we're good to go | ||
641 | * from there. We have to do this otherwise we'll miss the ordered | ||
642 | * extents and that results in badness. Please Josef, do not think you | ||
643 | * know better and pull this out at some point in the future, it is | ||
644 | * right and you are wrong. | ||
645 | */ | ||
646 | if (test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT, | ||
647 | &BTRFS_I(inode)->runtime_flags)) | ||
648 | filemap_fdatawrite_range(inode->i_mapping, start, orig_end); | ||
649 | |||
650 | filemap_fdatawait_range(inode->i_mapping, start, orig_end); | ||
631 | 651 | ||
632 | end = orig_end; | 652 | end = orig_end; |
633 | found = 0; | 653 | found = 0; |
diff --git a/fs/btrfs/rcu-string.h b/fs/btrfs/rcu-string.h new file mode 100644 index 000000000000..9e111e4576d4 --- /dev/null +++ b/fs/btrfs/rcu-string.h | |||
@@ -0,0 +1,56 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2012 Red Hat. All rights reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public | ||
6 | * License v2 as published by the Free Software Foundation. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, | ||
9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
11 | * General Public License for more details. | ||
12 | * | ||
13 | * You should have received a copy of the GNU General Public | ||
14 | * License along with this program; if not, write to the | ||
15 | * Free Software Foundation, Inc., 59 Temple Place - Suite 330, | ||
16 | * Boston, MA 021110-1307, USA. | ||
17 | */ | ||
18 | |||
19 | struct rcu_string { | ||
20 | struct rcu_head rcu; | ||
21 | char str[0]; | ||
22 | }; | ||
23 | |||
24 | static inline struct rcu_string *rcu_string_strdup(const char *src, gfp_t mask) | ||
25 | { | ||
26 | size_t len = strlen(src) + 1; | ||
27 | struct rcu_string *ret = kzalloc(sizeof(struct rcu_string) + | ||
28 | (len * sizeof(char)), mask); | ||
29 | if (!ret) | ||
30 | return ret; | ||
31 | strncpy(ret->str, src, len); | ||
32 | return ret; | ||
33 | } | ||
34 | |||
35 | static inline void rcu_string_free(struct rcu_string *str) | ||
36 | { | ||
37 | if (str) | ||
38 | kfree_rcu(str, rcu); | ||
39 | } | ||
40 | |||
41 | #define printk_in_rcu(fmt, ...) do { \ | ||
42 | rcu_read_lock(); \ | ||
43 | printk(fmt, __VA_ARGS__); \ | ||
44 | rcu_read_unlock(); \ | ||
45 | } while (0) | ||
46 | |||
47 | #define printk_ratelimited_in_rcu(fmt, ...) do { \ | ||
48 | rcu_read_lock(); \ | ||
49 | printk_ratelimited(fmt, __VA_ARGS__); \ | ||
50 | rcu_read_unlock(); \ | ||
51 | } while (0) | ||
52 | |||
53 | #define rcu_str_deref(rcu_str) ({ \ | ||
54 | struct rcu_string *__str = rcu_dereference(rcu_str); \ | ||
55 | __str->str; \ | ||
56 | }) | ||
diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c index a38cfa4f251e..b223620cd5a6 100644 --- a/fs/btrfs/scrub.c +++ b/fs/btrfs/scrub.c | |||
@@ -26,6 +26,7 @@ | |||
26 | #include "backref.h" | 26 | #include "backref.h" |
27 | #include "extent_io.h" | 27 | #include "extent_io.h" |
28 | #include "check-integrity.h" | 28 | #include "check-integrity.h" |
29 | #include "rcu-string.h" | ||
29 | 30 | ||
30 | /* | 31 | /* |
31 | * This is only the first step towards a full-features scrub. It reads all | 32 | * This is only the first step towards a full-features scrub. It reads all |
@@ -320,10 +321,10 @@ static int scrub_print_warning_inode(u64 inum, u64 offset, u64 root, void *ctx) | |||
320 | * hold all of the paths here | 321 | * hold all of the paths here |
321 | */ | 322 | */ |
322 | for (i = 0; i < ipath->fspath->elem_cnt; ++i) | 323 | for (i = 0; i < ipath->fspath->elem_cnt; ++i) |
323 | printk(KERN_WARNING "btrfs: %s at logical %llu on dev " | 324 | printk_in_rcu(KERN_WARNING "btrfs: %s at logical %llu on dev " |
324 | "%s, sector %llu, root %llu, inode %llu, offset %llu, " | 325 | "%s, sector %llu, root %llu, inode %llu, offset %llu, " |
325 | "length %llu, links %u (path: %s)\n", swarn->errstr, | 326 | "length %llu, links %u (path: %s)\n", swarn->errstr, |
326 | swarn->logical, swarn->dev->name, | 327 | swarn->logical, rcu_str_deref(swarn->dev->name), |
327 | (unsigned long long)swarn->sector, root, inum, offset, | 328 | (unsigned long long)swarn->sector, root, inum, offset, |
328 | min(isize - offset, (u64)PAGE_SIZE), nlink, | 329 | min(isize - offset, (u64)PAGE_SIZE), nlink, |
329 | (char *)(unsigned long)ipath->fspath->val[i]); | 330 | (char *)(unsigned long)ipath->fspath->val[i]); |
@@ -332,10 +333,10 @@ static int scrub_print_warning_inode(u64 inum, u64 offset, u64 root, void *ctx) | |||
332 | return 0; | 333 | return 0; |
333 | 334 | ||
334 | err: | 335 | err: |
335 | printk(KERN_WARNING "btrfs: %s at logical %llu on dev " | 336 | printk_in_rcu(KERN_WARNING "btrfs: %s at logical %llu on dev " |
336 | "%s, sector %llu, root %llu, inode %llu, offset %llu: path " | 337 | "%s, sector %llu, root %llu, inode %llu, offset %llu: path " |
337 | "resolving failed with ret=%d\n", swarn->errstr, | 338 | "resolving failed with ret=%d\n", swarn->errstr, |
338 | swarn->logical, swarn->dev->name, | 339 | swarn->logical, rcu_str_deref(swarn->dev->name), |
339 | (unsigned long long)swarn->sector, root, inum, offset, ret); | 340 | (unsigned long long)swarn->sector, root, inum, offset, ret); |
340 | 341 | ||
341 | free_ipath(ipath); | 342 | free_ipath(ipath); |
@@ -390,10 +391,11 @@ static void scrub_print_warning(const char *errstr, struct scrub_block *sblock) | |||
390 | do { | 391 | do { |
391 | ret = tree_backref_for_extent(&ptr, eb, ei, item_size, | 392 | ret = tree_backref_for_extent(&ptr, eb, ei, item_size, |
392 | &ref_root, &ref_level); | 393 | &ref_root, &ref_level); |
393 | printk(KERN_WARNING | 394 | printk_in_rcu(KERN_WARNING |
394 | "btrfs: %s at logical %llu on dev %s, " | 395 | "btrfs: %s at logical %llu on dev %s, " |
395 | "sector %llu: metadata %s (level %d) in tree " | 396 | "sector %llu: metadata %s (level %d) in tree " |
396 | "%llu\n", errstr, swarn.logical, dev->name, | 397 | "%llu\n", errstr, swarn.logical, |
398 | rcu_str_deref(dev->name), | ||
397 | (unsigned long long)swarn.sector, | 399 | (unsigned long long)swarn.sector, |
398 | ref_level ? "node" : "leaf", | 400 | ref_level ? "node" : "leaf", |
399 | ret < 0 ? -1 : ref_level, | 401 | ret < 0 ? -1 : ref_level, |
@@ -580,9 +582,11 @@ out: | |||
580 | spin_lock(&sdev->stat_lock); | 582 | spin_lock(&sdev->stat_lock); |
581 | ++sdev->stat.uncorrectable_errors; | 583 | ++sdev->stat.uncorrectable_errors; |
582 | spin_unlock(&sdev->stat_lock); | 584 | spin_unlock(&sdev->stat_lock); |
583 | printk_ratelimited(KERN_ERR | 585 | |
586 | printk_ratelimited_in_rcu(KERN_ERR | ||
584 | "btrfs: unable to fixup (nodatasum) error at logical %llu on dev %s\n", | 587 | "btrfs: unable to fixup (nodatasum) error at logical %llu on dev %s\n", |
585 | (unsigned long long)fixup->logical, sdev->dev->name); | 588 | (unsigned long long)fixup->logical, |
589 | rcu_str_deref(sdev->dev->name)); | ||
586 | } | 590 | } |
587 | 591 | ||
588 | btrfs_free_path(path); | 592 | btrfs_free_path(path); |
@@ -936,18 +940,20 @@ corrected_error: | |||
936 | spin_lock(&sdev->stat_lock); | 940 | spin_lock(&sdev->stat_lock); |
937 | sdev->stat.corrected_errors++; | 941 | sdev->stat.corrected_errors++; |
938 | spin_unlock(&sdev->stat_lock); | 942 | spin_unlock(&sdev->stat_lock); |
939 | printk_ratelimited(KERN_ERR | 943 | printk_ratelimited_in_rcu(KERN_ERR |
940 | "btrfs: fixed up error at logical %llu on dev %s\n", | 944 | "btrfs: fixed up error at logical %llu on dev %s\n", |
941 | (unsigned long long)logical, sdev->dev->name); | 945 | (unsigned long long)logical, |
946 | rcu_str_deref(sdev->dev->name)); | ||
942 | } | 947 | } |
943 | } else { | 948 | } else { |
944 | did_not_correct_error: | 949 | did_not_correct_error: |
945 | spin_lock(&sdev->stat_lock); | 950 | spin_lock(&sdev->stat_lock); |
946 | sdev->stat.uncorrectable_errors++; | 951 | sdev->stat.uncorrectable_errors++; |
947 | spin_unlock(&sdev->stat_lock); | 952 | spin_unlock(&sdev->stat_lock); |
948 | printk_ratelimited(KERN_ERR | 953 | printk_ratelimited_in_rcu(KERN_ERR |
949 | "btrfs: unable to fixup (regular) error at logical %llu on dev %s\n", | 954 | "btrfs: unable to fixup (regular) error at logical %llu on dev %s\n", |
950 | (unsigned long long)logical, sdev->dev->name); | 955 | (unsigned long long)logical, |
956 | rcu_str_deref(sdev->dev->name)); | ||
951 | } | 957 | } |
952 | 958 | ||
953 | out: | 959 | out: |
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c index 96eb9fef7bd2..0eb9a4da069e 100644 --- a/fs/btrfs/super.c +++ b/fs/btrfs/super.c | |||
@@ -54,6 +54,7 @@ | |||
54 | #include "version.h" | 54 | #include "version.h" |
55 | #include "export.h" | 55 | #include "export.h" |
56 | #include "compression.h" | 56 | #include "compression.h" |
57 | #include "rcu-string.h" | ||
57 | 58 | ||
58 | #define CREATE_TRACE_POINTS | 59 | #define CREATE_TRACE_POINTS |
59 | #include <trace/events/btrfs.h> | 60 | #include <trace/events/btrfs.h> |
@@ -1482,12 +1483,44 @@ static void btrfs_fs_dirty_inode(struct inode *inode, int flags) | |||
1482 | "error %d\n", btrfs_ino(inode), ret); | 1483 | "error %d\n", btrfs_ino(inode), ret); |
1483 | } | 1484 | } |
1484 | 1485 | ||
1486 | static int btrfs_show_devname(struct seq_file *m, struct dentry *root) | ||
1487 | { | ||
1488 | struct btrfs_fs_info *fs_info = btrfs_sb(root->d_sb); | ||
1489 | struct btrfs_fs_devices *cur_devices; | ||
1490 | struct btrfs_device *dev, *first_dev = NULL; | ||
1491 | struct list_head *head; | ||
1492 | struct rcu_string *name; | ||
1493 | |||
1494 | mutex_lock(&fs_info->fs_devices->device_list_mutex); | ||
1495 | cur_devices = fs_info->fs_devices; | ||
1496 | while (cur_devices) { | ||
1497 | head = &cur_devices->devices; | ||
1498 | list_for_each_entry(dev, head, dev_list) { | ||
1499 | if (!first_dev || dev->devid < first_dev->devid) | ||
1500 | first_dev = dev; | ||
1501 | } | ||
1502 | cur_devices = cur_devices->seed; | ||
1503 | } | ||
1504 | |||
1505 | if (first_dev) { | ||
1506 | rcu_read_lock(); | ||
1507 | name = rcu_dereference(first_dev->name); | ||
1508 | seq_escape(m, name->str, " \t\n\\"); | ||
1509 | rcu_read_unlock(); | ||
1510 | } else { | ||
1511 | WARN_ON(1); | ||
1512 | } | ||
1513 | mutex_unlock(&fs_info->fs_devices->device_list_mutex); | ||
1514 | return 0; | ||
1515 | } | ||
1516 | |||
1485 | static const struct super_operations btrfs_super_ops = { | 1517 | static const struct super_operations btrfs_super_ops = { |
1486 | .drop_inode = btrfs_drop_inode, | 1518 | .drop_inode = btrfs_drop_inode, |
1487 | .evict_inode = btrfs_evict_inode, | 1519 | .evict_inode = btrfs_evict_inode, |
1488 | .put_super = btrfs_put_super, | 1520 | .put_super = btrfs_put_super, |
1489 | .sync_fs = btrfs_sync_fs, | 1521 | .sync_fs = btrfs_sync_fs, |
1490 | .show_options = btrfs_show_options, | 1522 | .show_options = btrfs_show_options, |
1523 | .show_devname = btrfs_show_devname, | ||
1491 | .write_inode = btrfs_write_inode, | 1524 | .write_inode = btrfs_write_inode, |
1492 | .dirty_inode = btrfs_fs_dirty_inode, | 1525 | .dirty_inode = btrfs_fs_dirty_inode, |
1493 | .alloc_inode = btrfs_alloc_inode, | 1526 | .alloc_inode = btrfs_alloc_inode, |
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c index 1791c6e3d834..b72b068183ec 100644 --- a/fs/btrfs/transaction.c +++ b/fs/btrfs/transaction.c | |||
@@ -100,6 +100,10 @@ loop: | |||
100 | kmem_cache_free(btrfs_transaction_cachep, cur_trans); | 100 | kmem_cache_free(btrfs_transaction_cachep, cur_trans); |
101 | cur_trans = fs_info->running_transaction; | 101 | cur_trans = fs_info->running_transaction; |
102 | goto loop; | 102 | goto loop; |
103 | } else if (root->fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR) { | ||
104 | spin_unlock(&root->fs_info->trans_lock); | ||
105 | kmem_cache_free(btrfs_transaction_cachep, cur_trans); | ||
106 | return -EROFS; | ||
103 | } | 107 | } |
104 | 108 | ||
105 | atomic_set(&cur_trans->num_writers, 1); | 109 | atomic_set(&cur_trans->num_writers, 1); |
@@ -1213,14 +1217,20 @@ int btrfs_commit_transaction_async(struct btrfs_trans_handle *trans, | |||
1213 | 1217 | ||
1214 | 1218 | ||
1215 | static void cleanup_transaction(struct btrfs_trans_handle *trans, | 1219 | static void cleanup_transaction(struct btrfs_trans_handle *trans, |
1216 | struct btrfs_root *root) | 1220 | struct btrfs_root *root, int err) |
1217 | { | 1221 | { |
1218 | struct btrfs_transaction *cur_trans = trans->transaction; | 1222 | struct btrfs_transaction *cur_trans = trans->transaction; |
1219 | 1223 | ||
1220 | WARN_ON(trans->use_count > 1); | 1224 | WARN_ON(trans->use_count > 1); |
1221 | 1225 | ||
1226 | btrfs_abort_transaction(trans, root, err); | ||
1227 | |||
1222 | spin_lock(&root->fs_info->trans_lock); | 1228 | spin_lock(&root->fs_info->trans_lock); |
1223 | list_del_init(&cur_trans->list); | 1229 | list_del_init(&cur_trans->list); |
1230 | if (cur_trans == root->fs_info->running_transaction) { | ||
1231 | root->fs_info->running_transaction = NULL; | ||
1232 | root->fs_info->trans_no_join = 0; | ||
1233 | } | ||
1224 | spin_unlock(&root->fs_info->trans_lock); | 1234 | spin_unlock(&root->fs_info->trans_lock); |
1225 | 1235 | ||
1226 | btrfs_cleanup_one_transaction(trans->transaction, root); | 1236 | btrfs_cleanup_one_transaction(trans->transaction, root); |
@@ -1526,7 +1536,7 @@ cleanup_transaction: | |||
1526 | // WARN_ON(1); | 1536 | // WARN_ON(1); |
1527 | if (current->journal_info == trans) | 1537 | if (current->journal_info == trans) |
1528 | current->journal_info = NULL; | 1538 | current->journal_info = NULL; |
1529 | cleanup_transaction(trans, root); | 1539 | cleanup_transaction(trans, root, ret); |
1530 | 1540 | ||
1531 | return ret; | 1541 | return ret; |
1532 | } | 1542 | } |
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index 7782020996fe..8a3d2594b807 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c | |||
@@ -35,6 +35,7 @@ | |||
35 | #include "volumes.h" | 35 | #include "volumes.h" |
36 | #include "async-thread.h" | 36 | #include "async-thread.h" |
37 | #include "check-integrity.h" | 37 | #include "check-integrity.h" |
38 | #include "rcu-string.h" | ||
38 | 39 | ||
39 | static int init_first_rw_device(struct btrfs_trans_handle *trans, | 40 | static int init_first_rw_device(struct btrfs_trans_handle *trans, |
40 | struct btrfs_root *root, | 41 | struct btrfs_root *root, |
@@ -64,7 +65,7 @@ static void free_fs_devices(struct btrfs_fs_devices *fs_devices) | |||
64 | device = list_entry(fs_devices->devices.next, | 65 | device = list_entry(fs_devices->devices.next, |
65 | struct btrfs_device, dev_list); | 66 | struct btrfs_device, dev_list); |
66 | list_del(&device->dev_list); | 67 | list_del(&device->dev_list); |
67 | kfree(device->name); | 68 | rcu_string_free(device->name); |
68 | kfree(device); | 69 | kfree(device); |
69 | } | 70 | } |
70 | kfree(fs_devices); | 71 | kfree(fs_devices); |
@@ -334,8 +335,8 @@ static noinline int device_list_add(const char *path, | |||
334 | { | 335 | { |
335 | struct btrfs_device *device; | 336 | struct btrfs_device *device; |
336 | struct btrfs_fs_devices *fs_devices; | 337 | struct btrfs_fs_devices *fs_devices; |
338 | struct rcu_string *name; | ||
337 | u64 found_transid = btrfs_super_generation(disk_super); | 339 | u64 found_transid = btrfs_super_generation(disk_super); |
338 | char *name; | ||
339 | 340 | ||
340 | fs_devices = find_fsid(disk_super->fsid); | 341 | fs_devices = find_fsid(disk_super->fsid); |
341 | if (!fs_devices) { | 342 | if (!fs_devices) { |
@@ -369,11 +370,13 @@ static noinline int device_list_add(const char *path, | |||
369 | memcpy(device->uuid, disk_super->dev_item.uuid, | 370 | memcpy(device->uuid, disk_super->dev_item.uuid, |
370 | BTRFS_UUID_SIZE); | 371 | BTRFS_UUID_SIZE); |
371 | spin_lock_init(&device->io_lock); | 372 | spin_lock_init(&device->io_lock); |
372 | device->name = kstrdup(path, GFP_NOFS); | 373 | |
373 | if (!device->name) { | 374 | name = rcu_string_strdup(path, GFP_NOFS); |
375 | if (!name) { | ||
374 | kfree(device); | 376 | kfree(device); |
375 | return -ENOMEM; | 377 | return -ENOMEM; |
376 | } | 378 | } |
379 | rcu_assign_pointer(device->name, name); | ||
377 | INIT_LIST_HEAD(&device->dev_alloc_list); | 380 | INIT_LIST_HEAD(&device->dev_alloc_list); |
378 | 381 | ||
379 | /* init readahead state */ | 382 | /* init readahead state */ |
@@ -390,12 +393,12 @@ static noinline int device_list_add(const char *path, | |||
390 | 393 | ||
391 | device->fs_devices = fs_devices; | 394 | device->fs_devices = fs_devices; |
392 | fs_devices->num_devices++; | 395 | fs_devices->num_devices++; |
393 | } else if (!device->name || strcmp(device->name, path)) { | 396 | } else if (!device->name || strcmp(device->name->str, path)) { |
394 | name = kstrdup(path, GFP_NOFS); | 397 | name = rcu_string_strdup(path, GFP_NOFS); |
395 | if (!name) | 398 | if (!name) |
396 | return -ENOMEM; | 399 | return -ENOMEM; |
397 | kfree(device->name); | 400 | rcu_string_free(device->name); |
398 | device->name = name; | 401 | rcu_assign_pointer(device->name, name); |
399 | if (device->missing) { | 402 | if (device->missing) { |
400 | fs_devices->missing_devices--; | 403 | fs_devices->missing_devices--; |
401 | device->missing = 0; | 404 | device->missing = 0; |
@@ -430,15 +433,22 @@ static struct btrfs_fs_devices *clone_fs_devices(struct btrfs_fs_devices *orig) | |||
430 | 433 | ||
431 | /* We have held the volume lock, it is safe to get the devices. */ | 434 | /* We have held the volume lock, it is safe to get the devices. */ |
432 | list_for_each_entry(orig_dev, &orig->devices, dev_list) { | 435 | list_for_each_entry(orig_dev, &orig->devices, dev_list) { |
436 | struct rcu_string *name; | ||
437 | |||
433 | device = kzalloc(sizeof(*device), GFP_NOFS); | 438 | device = kzalloc(sizeof(*device), GFP_NOFS); |
434 | if (!device) | 439 | if (!device) |
435 | goto error; | 440 | goto error; |
436 | 441 | ||
437 | device->name = kstrdup(orig_dev->name, GFP_NOFS); | 442 | /* |
438 | if (!device->name) { | 443 | * This is ok to do without rcu read locked because we hold the |
444 | * uuid mutex so nothing we touch in here is going to disappear. | ||
445 | */ | ||
446 | name = rcu_string_strdup(orig_dev->name->str, GFP_NOFS); | ||
447 | if (!name) { | ||
439 | kfree(device); | 448 | kfree(device); |
440 | goto error; | 449 | goto error; |
441 | } | 450 | } |
451 | rcu_assign_pointer(device->name, name); | ||
442 | 452 | ||
443 | device->devid = orig_dev->devid; | 453 | device->devid = orig_dev->devid; |
444 | device->work.func = pending_bios_fn; | 454 | device->work.func = pending_bios_fn; |
@@ -491,7 +501,7 @@ again: | |||
491 | } | 501 | } |
492 | list_del_init(&device->dev_list); | 502 | list_del_init(&device->dev_list); |
493 | fs_devices->num_devices--; | 503 | fs_devices->num_devices--; |
494 | kfree(device->name); | 504 | rcu_string_free(device->name); |
495 | kfree(device); | 505 | kfree(device); |
496 | } | 506 | } |
497 | 507 | ||
@@ -516,7 +526,7 @@ static void __free_device(struct work_struct *work) | |||
516 | if (device->bdev) | 526 | if (device->bdev) |
517 | blkdev_put(device->bdev, device->mode); | 527 | blkdev_put(device->bdev, device->mode); |
518 | 528 | ||
519 | kfree(device->name); | 529 | rcu_string_free(device->name); |
520 | kfree(device); | 530 | kfree(device); |
521 | } | 531 | } |
522 | 532 | ||
@@ -540,6 +550,7 @@ static int __btrfs_close_devices(struct btrfs_fs_devices *fs_devices) | |||
540 | mutex_lock(&fs_devices->device_list_mutex); | 550 | mutex_lock(&fs_devices->device_list_mutex); |
541 | list_for_each_entry(device, &fs_devices->devices, dev_list) { | 551 | list_for_each_entry(device, &fs_devices->devices, dev_list) { |
542 | struct btrfs_device *new_device; | 552 | struct btrfs_device *new_device; |
553 | struct rcu_string *name; | ||
543 | 554 | ||
544 | if (device->bdev) | 555 | if (device->bdev) |
545 | fs_devices->open_devices--; | 556 | fs_devices->open_devices--; |
@@ -555,8 +566,11 @@ static int __btrfs_close_devices(struct btrfs_fs_devices *fs_devices) | |||
555 | new_device = kmalloc(sizeof(*new_device), GFP_NOFS); | 566 | new_device = kmalloc(sizeof(*new_device), GFP_NOFS); |
556 | BUG_ON(!new_device); /* -ENOMEM */ | 567 | BUG_ON(!new_device); /* -ENOMEM */ |
557 | memcpy(new_device, device, sizeof(*new_device)); | 568 | memcpy(new_device, device, sizeof(*new_device)); |
558 | new_device->name = kstrdup(device->name, GFP_NOFS); | 569 | |
559 | BUG_ON(device->name && !new_device->name); /* -ENOMEM */ | 570 | /* Safe because we are under uuid_mutex */ |
571 | name = rcu_string_strdup(device->name->str, GFP_NOFS); | ||
572 | BUG_ON(device->name && !name); /* -ENOMEM */ | ||
573 | rcu_assign_pointer(new_device->name, name); | ||
560 | new_device->bdev = NULL; | 574 | new_device->bdev = NULL; |
561 | new_device->writeable = 0; | 575 | new_device->writeable = 0; |
562 | new_device->in_fs_metadata = 0; | 576 | new_device->in_fs_metadata = 0; |
@@ -621,9 +635,9 @@ static int __btrfs_open_devices(struct btrfs_fs_devices *fs_devices, | |||
621 | if (!device->name) | 635 | if (!device->name) |
622 | continue; | 636 | continue; |
623 | 637 | ||
624 | bdev = blkdev_get_by_path(device->name, flags, holder); | 638 | bdev = blkdev_get_by_path(device->name->str, flags, holder); |
625 | if (IS_ERR(bdev)) { | 639 | if (IS_ERR(bdev)) { |
626 | printk(KERN_INFO "open %s failed\n", device->name); | 640 | printk(KERN_INFO "open %s failed\n", device->name->str); |
627 | goto error; | 641 | goto error; |
628 | } | 642 | } |
629 | filemap_write_and_wait(bdev->bd_inode->i_mapping); | 643 | filemap_write_and_wait(bdev->bd_inode->i_mapping); |
@@ -1632,6 +1646,7 @@ int btrfs_init_new_device(struct btrfs_root *root, char *device_path) | |||
1632 | struct block_device *bdev; | 1646 | struct block_device *bdev; |
1633 | struct list_head *devices; | 1647 | struct list_head *devices; |
1634 | struct super_block *sb = root->fs_info->sb; | 1648 | struct super_block *sb = root->fs_info->sb; |
1649 | struct rcu_string *name; | ||
1635 | u64 total_bytes; | 1650 | u64 total_bytes; |
1636 | int seeding_dev = 0; | 1651 | int seeding_dev = 0; |
1637 | int ret = 0; | 1652 | int ret = 0; |
@@ -1671,23 +1686,24 @@ int btrfs_init_new_device(struct btrfs_root *root, char *device_path) | |||
1671 | goto error; | 1686 | goto error; |
1672 | } | 1687 | } |
1673 | 1688 | ||
1674 | device->name = kstrdup(device_path, GFP_NOFS); | 1689 | name = rcu_string_strdup(device_path, GFP_NOFS); |
1675 | if (!device->name) { | 1690 | if (!name) { |
1676 | kfree(device); | 1691 | kfree(device); |
1677 | ret = -ENOMEM; | 1692 | ret = -ENOMEM; |
1678 | goto error; | 1693 | goto error; |
1679 | } | 1694 | } |
1695 | rcu_assign_pointer(device->name, name); | ||
1680 | 1696 | ||
1681 | ret = find_next_devid(root, &device->devid); | 1697 | ret = find_next_devid(root, &device->devid); |
1682 | if (ret) { | 1698 | if (ret) { |
1683 | kfree(device->name); | 1699 | rcu_string_free(device->name); |
1684 | kfree(device); | 1700 | kfree(device); |
1685 | goto error; | 1701 | goto error; |
1686 | } | 1702 | } |
1687 | 1703 | ||
1688 | trans = btrfs_start_transaction(root, 0); | 1704 | trans = btrfs_start_transaction(root, 0); |
1689 | if (IS_ERR(trans)) { | 1705 | if (IS_ERR(trans)) { |
1690 | kfree(device->name); | 1706 | rcu_string_free(device->name); |
1691 | kfree(device); | 1707 | kfree(device); |
1692 | ret = PTR_ERR(trans); | 1708 | ret = PTR_ERR(trans); |
1693 | goto error; | 1709 | goto error; |
@@ -1796,7 +1812,7 @@ error_trans: | |||
1796 | unlock_chunks(root); | 1812 | unlock_chunks(root); |
1797 | btrfs_abort_transaction(trans, root, ret); | 1813 | btrfs_abort_transaction(trans, root, ret); |
1798 | btrfs_end_transaction(trans, root); | 1814 | btrfs_end_transaction(trans, root); |
1799 | kfree(device->name); | 1815 | rcu_string_free(device->name); |
1800 | kfree(device); | 1816 | kfree(device); |
1801 | error: | 1817 | error: |
1802 | blkdev_put(bdev, FMODE_EXCL); | 1818 | blkdev_put(bdev, FMODE_EXCL); |
@@ -4204,10 +4220,17 @@ int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio, | |||
4204 | bio->bi_sector = bbio->stripes[dev_nr].physical >> 9; | 4220 | bio->bi_sector = bbio->stripes[dev_nr].physical >> 9; |
4205 | dev = bbio->stripes[dev_nr].dev; | 4221 | dev = bbio->stripes[dev_nr].dev; |
4206 | if (dev && dev->bdev && (rw != WRITE || dev->writeable)) { | 4222 | if (dev && dev->bdev && (rw != WRITE || dev->writeable)) { |
4223 | #ifdef DEBUG | ||
4224 | struct rcu_string *name; | ||
4225 | |||
4226 | rcu_read_lock(); | ||
4227 | name = rcu_dereference(dev->name); | ||
4207 | pr_debug("btrfs_map_bio: rw %d, secor=%llu, dev=%lu " | 4228 | pr_debug("btrfs_map_bio: rw %d, secor=%llu, dev=%lu " |
4208 | "(%s id %llu), size=%u\n", rw, | 4229 | "(%s id %llu), size=%u\n", rw, |
4209 | (u64)bio->bi_sector, (u_long)dev->bdev->bd_dev, | 4230 | (u64)bio->bi_sector, (u_long)dev->bdev->bd_dev, |
4210 | dev->name, dev->devid, bio->bi_size); | 4231 | name->str, dev->devid, bio->bi_size); |
4232 | rcu_read_unlock(); | ||
4233 | #endif | ||
4211 | bio->bi_bdev = dev->bdev; | 4234 | bio->bi_bdev = dev->bdev; |
4212 | if (async_submit) | 4235 | if (async_submit) |
4213 | schedule_bio(root, dev, rw, bio); | 4236 | schedule_bio(root, dev, rw, bio); |
@@ -4694,8 +4717,9 @@ int btrfs_init_dev_stats(struct btrfs_fs_info *fs_info) | |||
4694 | key.offset = device->devid; | 4717 | key.offset = device->devid; |
4695 | ret = btrfs_search_slot(NULL, dev_root, &key, path, 0, 0); | 4718 | ret = btrfs_search_slot(NULL, dev_root, &key, path, 0, 0); |
4696 | if (ret) { | 4719 | if (ret) { |
4697 | printk(KERN_WARNING "btrfs: no dev_stats entry found for device %s (devid %llu) (OK on first mount after mkfs)\n", | 4720 | printk_in_rcu(KERN_WARNING "btrfs: no dev_stats entry found for device %s (devid %llu) (OK on first mount after mkfs)\n", |
4698 | device->name, (unsigned long long)device->devid); | 4721 | rcu_str_deref(device->name), |
4722 | (unsigned long long)device->devid); | ||
4699 | __btrfs_reset_dev_stats(device); | 4723 | __btrfs_reset_dev_stats(device); |
4700 | device->dev_stats_valid = 1; | 4724 | device->dev_stats_valid = 1; |
4701 | btrfs_release_path(path); | 4725 | btrfs_release_path(path); |
@@ -4747,8 +4771,8 @@ static int update_dev_stat_item(struct btrfs_trans_handle *trans, | |||
4747 | BUG_ON(!path); | 4771 | BUG_ON(!path); |
4748 | ret = btrfs_search_slot(trans, dev_root, &key, path, -1, 1); | 4772 | ret = btrfs_search_slot(trans, dev_root, &key, path, -1, 1); |
4749 | if (ret < 0) { | 4773 | if (ret < 0) { |
4750 | printk(KERN_WARNING "btrfs: error %d while searching for dev_stats item for device %s!\n", | 4774 | printk_in_rcu(KERN_WARNING "btrfs: error %d while searching for dev_stats item for device %s!\n", |
4751 | ret, device->name); | 4775 | ret, rcu_str_deref(device->name)); |
4752 | goto out; | 4776 | goto out; |
4753 | } | 4777 | } |
4754 | 4778 | ||
@@ -4757,8 +4781,8 @@ static int update_dev_stat_item(struct btrfs_trans_handle *trans, | |||
4757 | /* need to delete old one and insert a new one */ | 4781 | /* need to delete old one and insert a new one */ |
4758 | ret = btrfs_del_item(trans, dev_root, path); | 4782 | ret = btrfs_del_item(trans, dev_root, path); |
4759 | if (ret != 0) { | 4783 | if (ret != 0) { |
4760 | printk(KERN_WARNING "btrfs: delete too small dev_stats item for device %s failed %d!\n", | 4784 | printk_in_rcu(KERN_WARNING "btrfs: delete too small dev_stats item for device %s failed %d!\n", |
4761 | device->name, ret); | 4785 | rcu_str_deref(device->name), ret); |
4762 | goto out; | 4786 | goto out; |
4763 | } | 4787 | } |
4764 | ret = 1; | 4788 | ret = 1; |
@@ -4770,8 +4794,8 @@ static int update_dev_stat_item(struct btrfs_trans_handle *trans, | |||
4770 | ret = btrfs_insert_empty_item(trans, dev_root, path, | 4794 | ret = btrfs_insert_empty_item(trans, dev_root, path, |
4771 | &key, sizeof(*ptr)); | 4795 | &key, sizeof(*ptr)); |
4772 | if (ret < 0) { | 4796 | if (ret < 0) { |
4773 | printk(KERN_WARNING "btrfs: insert dev_stats item for device %s failed %d!\n", | 4797 | printk_in_rcu(KERN_WARNING "btrfs: insert dev_stats item for device %s failed %d!\n", |
4774 | device->name, ret); | 4798 | rcu_str_deref(device->name), ret); |
4775 | goto out; | 4799 | goto out; |
4776 | } | 4800 | } |
4777 | } | 4801 | } |
@@ -4823,9 +4847,9 @@ void btrfs_dev_stat_print_on_error(struct btrfs_device *dev) | |||
4823 | { | 4847 | { |
4824 | if (!dev->dev_stats_valid) | 4848 | if (!dev->dev_stats_valid) |
4825 | return; | 4849 | return; |
4826 | printk_ratelimited(KERN_ERR | 4850 | printk_ratelimited_in_rcu(KERN_ERR |
4827 | "btrfs: bdev %s errs: wr %u, rd %u, flush %u, corrupt %u, gen %u\n", | 4851 | "btrfs: bdev %s errs: wr %u, rd %u, flush %u, corrupt %u, gen %u\n", |
4828 | dev->name, | 4852 | rcu_str_deref(dev->name), |
4829 | btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_WRITE_ERRS), | 4853 | btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_WRITE_ERRS), |
4830 | btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_READ_ERRS), | 4854 | btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_READ_ERRS), |
4831 | btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_FLUSH_ERRS), | 4855 | btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_FLUSH_ERRS), |
@@ -4837,8 +4861,8 @@ void btrfs_dev_stat_print_on_error(struct btrfs_device *dev) | |||
4837 | 4861 | ||
4838 | static void btrfs_dev_stat_print_on_load(struct btrfs_device *dev) | 4862 | static void btrfs_dev_stat_print_on_load(struct btrfs_device *dev) |
4839 | { | 4863 | { |
4840 | printk(KERN_INFO "btrfs: bdev %s errs: wr %u, rd %u, flush %u, corrupt %u, gen %u\n", | 4864 | printk_in_rcu(KERN_INFO "btrfs: bdev %s errs: wr %u, rd %u, flush %u, corrupt %u, gen %u\n", |
4841 | dev->name, | 4865 | rcu_str_deref(dev->name), |
4842 | btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_WRITE_ERRS), | 4866 | btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_WRITE_ERRS), |
4843 | btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_READ_ERRS), | 4867 | btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_READ_ERRS), |
4844 | btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_FLUSH_ERRS), | 4868 | btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_FLUSH_ERRS), |
diff --git a/fs/btrfs/volumes.h b/fs/btrfs/volumes.h index 3406a88ca83e..74366f27a76b 100644 --- a/fs/btrfs/volumes.h +++ b/fs/btrfs/volumes.h | |||
@@ -58,7 +58,7 @@ struct btrfs_device { | |||
58 | /* the mode sent to blkdev_get */ | 58 | /* the mode sent to blkdev_get */ |
59 | fmode_t mode; | 59 | fmode_t mode; |
60 | 60 | ||
61 | char *name; | 61 | struct rcu_string *name; |
62 | 62 | ||
63 | /* the internal btrfs device id */ | 63 | /* the internal btrfs device id */ |
64 | u64 devid; | 64 | u64 devid; |
diff --git a/fs/exofs/sys.c b/fs/exofs/sys.c index e32bc919e4e3..5a7b691e748b 100644 --- a/fs/exofs/sys.c +++ b/fs/exofs/sys.c | |||
@@ -109,7 +109,7 @@ static struct kobj_type odev_ktype = { | |||
109 | static struct kobj_type uuid_ktype = { | 109 | static struct kobj_type uuid_ktype = { |
110 | }; | 110 | }; |
111 | 111 | ||
112 | void exofs_sysfs_dbg_print() | 112 | void exofs_sysfs_dbg_print(void) |
113 | { | 113 | { |
114 | #ifdef CONFIG_EXOFS_DEBUG | 114 | #ifdef CONFIG_EXOFS_DEBUG |
115 | struct kobject *k_name, *k_tmp; | 115 | struct kobject *k_name, *k_tmp; |
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c index 8d2fb8c88cf3..41a3ccff18d8 100644 --- a/fs/fs-writeback.c +++ b/fs/fs-writeback.c | |||
@@ -664,6 +664,7 @@ static long writeback_sb_inodes(struct super_block *sb, | |||
664 | /* Wait for I_SYNC. This function drops i_lock... */ | 664 | /* Wait for I_SYNC. This function drops i_lock... */ |
665 | inode_sleep_on_writeback(inode); | 665 | inode_sleep_on_writeback(inode); |
666 | /* Inode may be gone, start again */ | 666 | /* Inode may be gone, start again */ |
667 | spin_lock(&wb->list_lock); | ||
667 | continue; | 668 | continue; |
668 | } | 669 | } |
669 | inode->i_state |= I_SYNC; | 670 | inode->i_state |= I_SYNC; |
diff --git a/fs/nfs/callback.c b/fs/nfs/callback.c index 970659daa323..23ff18fe080a 100644 --- a/fs/nfs/callback.c +++ b/fs/nfs/callback.c | |||
@@ -17,7 +17,6 @@ | |||
17 | #include <linux/kthread.h> | 17 | #include <linux/kthread.h> |
18 | #include <linux/sunrpc/svcauth_gss.h> | 18 | #include <linux/sunrpc/svcauth_gss.h> |
19 | #include <linux/sunrpc/bc_xprt.h> | 19 | #include <linux/sunrpc/bc_xprt.h> |
20 | #include <linux/nsproxy.h> | ||
21 | 20 | ||
22 | #include <net/inet_sock.h> | 21 | #include <net/inet_sock.h> |
23 | 22 | ||
@@ -107,7 +106,7 @@ nfs4_callback_up(struct svc_serv *serv, struct rpc_xprt *xprt) | |||
107 | { | 106 | { |
108 | int ret; | 107 | int ret; |
109 | 108 | ||
110 | ret = svc_create_xprt(serv, "tcp", xprt->xprt_net, PF_INET, | 109 | ret = svc_create_xprt(serv, "tcp", &init_net, PF_INET, |
111 | nfs_callback_set_tcpport, SVC_SOCK_ANONYMOUS); | 110 | nfs_callback_set_tcpport, SVC_SOCK_ANONYMOUS); |
112 | if (ret <= 0) | 111 | if (ret <= 0) |
113 | goto out_err; | 112 | goto out_err; |
@@ -115,7 +114,7 @@ nfs4_callback_up(struct svc_serv *serv, struct rpc_xprt *xprt) | |||
115 | dprintk("NFS: Callback listener port = %u (af %u)\n", | 114 | dprintk("NFS: Callback listener port = %u (af %u)\n", |
116 | nfs_callback_tcpport, PF_INET); | 115 | nfs_callback_tcpport, PF_INET); |
117 | 116 | ||
118 | ret = svc_create_xprt(serv, "tcp", xprt->xprt_net, PF_INET6, | 117 | ret = svc_create_xprt(serv, "tcp", &init_net, PF_INET6, |
119 | nfs_callback_set_tcpport, SVC_SOCK_ANONYMOUS); | 118 | nfs_callback_set_tcpport, SVC_SOCK_ANONYMOUS); |
120 | if (ret > 0) { | 119 | if (ret > 0) { |
121 | nfs_callback_tcpport6 = ret; | 120 | nfs_callback_tcpport6 = ret; |
@@ -184,7 +183,7 @@ nfs41_callback_up(struct svc_serv *serv, struct rpc_xprt *xprt) | |||
184 | * fore channel connection. | 183 | * fore channel connection. |
185 | * Returns the input port (0) and sets the svc_serv bc_xprt on success | 184 | * Returns the input port (0) and sets the svc_serv bc_xprt on success |
186 | */ | 185 | */ |
187 | ret = svc_create_xprt(serv, "tcp-bc", xprt->xprt_net, PF_INET, 0, | 186 | ret = svc_create_xprt(serv, "tcp-bc", &init_net, PF_INET, 0, |
188 | SVC_SOCK_ANONYMOUS); | 187 | SVC_SOCK_ANONYMOUS); |
189 | if (ret < 0) { | 188 | if (ret < 0) { |
190 | rqstp = ERR_PTR(ret); | 189 | rqstp = ERR_PTR(ret); |
@@ -254,7 +253,7 @@ int nfs_callback_up(u32 minorversion, struct rpc_xprt *xprt) | |||
254 | char svc_name[12]; | 253 | char svc_name[12]; |
255 | int ret = 0; | 254 | int ret = 0; |
256 | int minorversion_setup; | 255 | int minorversion_setup; |
257 | struct net *net = current->nsproxy->net_ns; | 256 | struct net *net = &init_net; |
258 | 257 | ||
259 | mutex_lock(&nfs_callback_mutex); | 258 | mutex_lock(&nfs_callback_mutex); |
260 | if (cb_info->users++ || cb_info->task != NULL) { | 259 | if (cb_info->users++ || cb_info->task != NULL) { |
@@ -330,7 +329,7 @@ void nfs_callback_down(int minorversion) | |||
330 | cb_info->users--; | 329 | cb_info->users--; |
331 | if (cb_info->users == 0 && cb_info->task != NULL) { | 330 | if (cb_info->users == 0 && cb_info->task != NULL) { |
332 | kthread_stop(cb_info->task); | 331 | kthread_stop(cb_info->task); |
333 | svc_shutdown_net(cb_info->serv, current->nsproxy->net_ns); | 332 | svc_shutdown_net(cb_info->serv, &init_net); |
334 | svc_exit_thread(cb_info->rqst); | 333 | svc_exit_thread(cb_info->rqst); |
335 | cb_info->serv = NULL; | 334 | cb_info->serv = NULL; |
336 | cb_info->rqst = NULL; | 335 | cb_info->rqst = NULL; |
diff --git a/fs/nfs/callback_xdr.c b/fs/nfs/callback_xdr.c index 95bfc243992c..e64b01d2a338 100644 --- a/fs/nfs/callback_xdr.c +++ b/fs/nfs/callback_xdr.c | |||
@@ -455,9 +455,9 @@ static __be32 decode_cb_sequence_args(struct svc_rqst *rqstp, | |||
455 | args->csa_nrclists = ntohl(*p++); | 455 | args->csa_nrclists = ntohl(*p++); |
456 | args->csa_rclists = NULL; | 456 | args->csa_rclists = NULL; |
457 | if (args->csa_nrclists) { | 457 | if (args->csa_nrclists) { |
458 | args->csa_rclists = kmalloc(args->csa_nrclists * | 458 | args->csa_rclists = kmalloc_array(args->csa_nrclists, |
459 | sizeof(*args->csa_rclists), | 459 | sizeof(*args->csa_rclists), |
460 | GFP_KERNEL); | 460 | GFP_KERNEL); |
461 | if (unlikely(args->csa_rclists == NULL)) | 461 | if (unlikely(args->csa_rclists == NULL)) |
462 | goto out; | 462 | goto out; |
463 | 463 | ||
@@ -696,7 +696,7 @@ static __be32 encode_cb_sequence_res(struct svc_rqst *rqstp, | |||
696 | const struct cb_sequenceres *res) | 696 | const struct cb_sequenceres *res) |
697 | { | 697 | { |
698 | __be32 *p; | 698 | __be32 *p; |
699 | unsigned status = res->csr_status; | 699 | __be32 status = res->csr_status; |
700 | 700 | ||
701 | if (unlikely(status != 0)) | 701 | if (unlikely(status != 0)) |
702 | goto out; | 702 | goto out; |
diff --git a/fs/nfs/client.c b/fs/nfs/client.c index 7d108753af81..17ba6b995659 100644 --- a/fs/nfs/client.c +++ b/fs/nfs/client.c | |||
@@ -544,8 +544,6 @@ nfs_found_client(const struct nfs_client_initdata *cl_init, | |||
544 | 544 | ||
545 | smp_rmb(); | 545 | smp_rmb(); |
546 | 546 | ||
547 | BUG_ON(clp->cl_cons_state != NFS_CS_READY); | ||
548 | |||
549 | dprintk("<-- %s found nfs_client %p for %s\n", | 547 | dprintk("<-- %s found nfs_client %p for %s\n", |
550 | __func__, clp, cl_init->hostname ?: ""); | 548 | __func__, clp, cl_init->hostname ?: ""); |
551 | return clp; | 549 | return clp; |
diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c index ad2775d3e219..3168f6e3d4d4 100644 --- a/fs/nfs/direct.c +++ b/fs/nfs/direct.c | |||
@@ -523,9 +523,9 @@ static void nfs_direct_commit_complete(struct nfs_commit_data *data) | |||
523 | nfs_list_remove_request(req); | 523 | nfs_list_remove_request(req); |
524 | if (dreq->flags == NFS_ODIRECT_RESCHED_WRITES) { | 524 | if (dreq->flags == NFS_ODIRECT_RESCHED_WRITES) { |
525 | /* Note the rewrite will go through mds */ | 525 | /* Note the rewrite will go through mds */ |
526 | kref_get(&req->wb_kref); | ||
527 | nfs_mark_request_commit(req, NULL, &cinfo); | 526 | nfs_mark_request_commit(req, NULL, &cinfo); |
528 | } | 527 | } else |
528 | nfs_release_request(req); | ||
529 | nfs_unlock_and_release_request(req); | 529 | nfs_unlock_and_release_request(req); |
530 | } | 530 | } |
531 | 531 | ||
@@ -716,12 +716,12 @@ static void nfs_direct_write_completion(struct nfs_pgio_header *hdr) | |||
716 | if (dreq->flags == NFS_ODIRECT_RESCHED_WRITES) | 716 | if (dreq->flags == NFS_ODIRECT_RESCHED_WRITES) |
717 | bit = NFS_IOHDR_NEED_RESCHED; | 717 | bit = NFS_IOHDR_NEED_RESCHED; |
718 | else if (dreq->flags == 0) { | 718 | else if (dreq->flags == 0) { |
719 | memcpy(&dreq->verf, &req->wb_verf, | 719 | memcpy(&dreq->verf, hdr->verf, |
720 | sizeof(dreq->verf)); | 720 | sizeof(dreq->verf)); |
721 | bit = NFS_IOHDR_NEED_COMMIT; | 721 | bit = NFS_IOHDR_NEED_COMMIT; |
722 | dreq->flags = NFS_ODIRECT_DO_COMMIT; | 722 | dreq->flags = NFS_ODIRECT_DO_COMMIT; |
723 | } else if (dreq->flags == NFS_ODIRECT_DO_COMMIT) { | 723 | } else if (dreq->flags == NFS_ODIRECT_DO_COMMIT) { |
724 | if (memcmp(&dreq->verf, &req->wb_verf, sizeof(dreq->verf))) { | 724 | if (memcmp(&dreq->verf, hdr->verf, sizeof(dreq->verf))) { |
725 | dreq->flags = NFS_ODIRECT_RESCHED_WRITES; | 725 | dreq->flags = NFS_ODIRECT_RESCHED_WRITES; |
726 | bit = NFS_IOHDR_NEED_RESCHED; | 726 | bit = NFS_IOHDR_NEED_RESCHED; |
727 | } else | 727 | } else |
diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h index c6827f93ab57..cc5900ac61b5 100644 --- a/fs/nfs/nfs4_fs.h +++ b/fs/nfs/nfs4_fs.h | |||
@@ -295,7 +295,7 @@ is_ds_client(struct nfs_client *clp) | |||
295 | 295 | ||
296 | extern const struct nfs4_minor_version_ops *nfs_v4_minor_ops[]; | 296 | extern const struct nfs4_minor_version_ops *nfs_v4_minor_ops[]; |
297 | 297 | ||
298 | extern const u32 nfs4_fattr_bitmap[2]; | 298 | extern const u32 nfs4_fattr_bitmap[3]; |
299 | extern const u32 nfs4_statfs_bitmap[2]; | 299 | extern const u32 nfs4_statfs_bitmap[2]; |
300 | extern const u32 nfs4_pathconf_bitmap[2]; | 300 | extern const u32 nfs4_pathconf_bitmap[2]; |
301 | extern const u32 nfs4_fsinfo_bitmap[3]; | 301 | extern const u32 nfs4_fsinfo_bitmap[3]; |
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index d48dbefa0e71..15fc7e4664ed 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c | |||
@@ -105,6 +105,8 @@ static int nfs4_map_errors(int err) | |||
105 | return -EINVAL; | 105 | return -EINVAL; |
106 | case -NFS4ERR_SHARE_DENIED: | 106 | case -NFS4ERR_SHARE_DENIED: |
107 | return -EACCES; | 107 | return -EACCES; |
108 | case -NFS4ERR_MINOR_VERS_MISMATCH: | ||
109 | return -EPROTONOSUPPORT; | ||
108 | default: | 110 | default: |
109 | dprintk("%s could not handle NFSv4 error %d\n", | 111 | dprintk("%s could not handle NFSv4 error %d\n", |
110 | __func__, -err); | 112 | __func__, -err); |
@@ -116,7 +118,7 @@ static int nfs4_map_errors(int err) | |||
116 | /* | 118 | /* |
117 | * This is our standard bitmap for GETATTR requests. | 119 | * This is our standard bitmap for GETATTR requests. |
118 | */ | 120 | */ |
119 | const u32 nfs4_fattr_bitmap[2] = { | 121 | const u32 nfs4_fattr_bitmap[3] = { |
120 | FATTR4_WORD0_TYPE | 122 | FATTR4_WORD0_TYPE |
121 | | FATTR4_WORD0_CHANGE | 123 | | FATTR4_WORD0_CHANGE |
122 | | FATTR4_WORD0_SIZE | 124 | | FATTR4_WORD0_SIZE |
@@ -133,6 +135,24 @@ const u32 nfs4_fattr_bitmap[2] = { | |||
133 | | FATTR4_WORD1_TIME_MODIFY | 135 | | FATTR4_WORD1_TIME_MODIFY |
134 | }; | 136 | }; |
135 | 137 | ||
138 | static const u32 nfs4_pnfs_open_bitmap[3] = { | ||
139 | FATTR4_WORD0_TYPE | ||
140 | | FATTR4_WORD0_CHANGE | ||
141 | | FATTR4_WORD0_SIZE | ||
142 | | FATTR4_WORD0_FSID | ||
143 | | FATTR4_WORD0_FILEID, | ||
144 | FATTR4_WORD1_MODE | ||
145 | | FATTR4_WORD1_NUMLINKS | ||
146 | | FATTR4_WORD1_OWNER | ||
147 | | FATTR4_WORD1_OWNER_GROUP | ||
148 | | FATTR4_WORD1_RAWDEV | ||
149 | | FATTR4_WORD1_SPACE_USED | ||
150 | | FATTR4_WORD1_TIME_ACCESS | ||
151 | | FATTR4_WORD1_TIME_METADATA | ||
152 | | FATTR4_WORD1_TIME_MODIFY, | ||
153 | FATTR4_WORD2_MDSTHRESHOLD | ||
154 | }; | ||
155 | |||
136 | const u32 nfs4_statfs_bitmap[2] = { | 156 | const u32 nfs4_statfs_bitmap[2] = { |
137 | FATTR4_WORD0_FILES_AVAIL | 157 | FATTR4_WORD0_FILES_AVAIL |
138 | | FATTR4_WORD0_FILES_FREE | 158 | | FATTR4_WORD0_FILES_FREE |
@@ -844,6 +864,7 @@ static struct nfs4_opendata *nfs4_opendata_alloc(struct dentry *dentry, | |||
844 | p->o_arg.name = &dentry->d_name; | 864 | p->o_arg.name = &dentry->d_name; |
845 | p->o_arg.server = server; | 865 | p->o_arg.server = server; |
846 | p->o_arg.bitmask = server->attr_bitmask; | 866 | p->o_arg.bitmask = server->attr_bitmask; |
867 | p->o_arg.open_bitmap = &nfs4_fattr_bitmap[0]; | ||
847 | p->o_arg.claim = NFS4_OPEN_CLAIM_NULL; | 868 | p->o_arg.claim = NFS4_OPEN_CLAIM_NULL; |
848 | if (attrs != NULL && attrs->ia_valid != 0) { | 869 | if (attrs != NULL && attrs->ia_valid != 0) { |
849 | __be32 verf[2]; | 870 | __be32 verf[2]; |
@@ -1820,6 +1841,7 @@ static int _nfs4_do_open(struct inode *dir, | |||
1820 | opendata->f_attr.mdsthreshold = pnfs_mdsthreshold_alloc(); | 1841 | opendata->f_attr.mdsthreshold = pnfs_mdsthreshold_alloc(); |
1821 | if (!opendata->f_attr.mdsthreshold) | 1842 | if (!opendata->f_attr.mdsthreshold) |
1822 | goto err_opendata_put; | 1843 | goto err_opendata_put; |
1844 | opendata->o_arg.open_bitmap = &nfs4_pnfs_open_bitmap[0]; | ||
1823 | } | 1845 | } |
1824 | if (dentry->d_inode != NULL) | 1846 | if (dentry->d_inode != NULL) |
1825 | opendata->state = nfs4_get_open_state(dentry->d_inode, sp); | 1847 | opendata->state = nfs4_get_open_state(dentry->d_inode, sp); |
@@ -1880,6 +1902,7 @@ static struct nfs4_state *nfs4_do_open(struct inode *dir, | |||
1880 | struct nfs4_state *res; | 1902 | struct nfs4_state *res; |
1881 | int status; | 1903 | int status; |
1882 | 1904 | ||
1905 | fmode &= FMODE_READ|FMODE_WRITE; | ||
1883 | do { | 1906 | do { |
1884 | status = _nfs4_do_open(dir, dentry, fmode, flags, sattr, cred, | 1907 | status = _nfs4_do_open(dir, dentry, fmode, flags, sattr, cred, |
1885 | &res, ctx_th); | 1908 | &res, ctx_th); |
@@ -2526,6 +2549,14 @@ nfs4_proc_setattr(struct dentry *dentry, struct nfs_fattr *fattr, | |||
2526 | 2549 | ||
2527 | nfs_fattr_init(fattr); | 2550 | nfs_fattr_init(fattr); |
2528 | 2551 | ||
2552 | /* Deal with open(O_TRUNC) */ | ||
2553 | if (sattr->ia_valid & ATTR_OPEN) | ||
2554 | sattr->ia_valid &= ~(ATTR_MTIME|ATTR_CTIME|ATTR_OPEN); | ||
2555 | |||
2556 | /* Optimization: if the end result is no change, don't RPC */ | ||
2557 | if ((sattr->ia_valid & ~(ATTR_FILE)) == 0) | ||
2558 | return 0; | ||
2559 | |||
2529 | /* Search for an existing open(O_WRITE) file */ | 2560 | /* Search for an existing open(O_WRITE) file */ |
2530 | if (sattr->ia_valid & ATTR_FILE) { | 2561 | if (sattr->ia_valid & ATTR_FILE) { |
2531 | struct nfs_open_context *ctx; | 2562 | struct nfs_open_context *ctx; |
@@ -2537,10 +2568,6 @@ nfs4_proc_setattr(struct dentry *dentry, struct nfs_fattr *fattr, | |||
2537 | } | 2568 | } |
2538 | } | 2569 | } |
2539 | 2570 | ||
2540 | /* Deal with open(O_TRUNC) */ | ||
2541 | if (sattr->ia_valid & ATTR_OPEN) | ||
2542 | sattr->ia_valid &= ~(ATTR_MTIME|ATTR_CTIME|ATTR_OPEN); | ||
2543 | |||
2544 | status = nfs4_do_setattr(inode, cred, fattr, sattr, state); | 2571 | status = nfs4_do_setattr(inode, cred, fattr, sattr, state); |
2545 | if (status == 0) | 2572 | if (status == 0) |
2546 | nfs_setattr_update_inode(inode, sattr); | 2573 | nfs_setattr_update_inode(inode, sattr); |
@@ -5275,7 +5302,7 @@ static int _nfs4_proc_destroy_clientid(struct nfs_client *clp, | |||
5275 | 5302 | ||
5276 | status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT); | 5303 | status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT); |
5277 | if (status) | 5304 | if (status) |
5278 | pr_warn("NFS: Got error %d from the server %s on " | 5305 | dprintk("NFS: Got error %d from the server %s on " |
5279 | "DESTROY_CLIENTID.", status, clp->cl_hostname); | 5306 | "DESTROY_CLIENTID.", status, clp->cl_hostname); |
5280 | return status; | 5307 | return status; |
5281 | } | 5308 | } |
@@ -5746,8 +5773,7 @@ int nfs4_proc_destroy_session(struct nfs4_session *session, | |||
5746 | status = rpc_call_sync(session->clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT); | 5773 | status = rpc_call_sync(session->clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT); |
5747 | 5774 | ||
5748 | if (status) | 5775 | if (status) |
5749 | printk(KERN_WARNING | 5776 | dprintk("NFS: Got error %d from the server on DESTROY_SESSION. " |
5750 | "NFS: Got error %d from the server on DESTROY_SESSION. " | ||
5751 | "Session has been destroyed regardless...\n", status); | 5777 | "Session has been destroyed regardless...\n", status); |
5752 | 5778 | ||
5753 | dprintk("<-- nfs4_proc_destroy_session\n"); | 5779 | dprintk("<-- nfs4_proc_destroy_session\n"); |
diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c index c679b9ecef63..f38300e9f171 100644 --- a/fs/nfs/nfs4state.c +++ b/fs/nfs/nfs4state.c | |||
@@ -244,6 +244,16 @@ static int nfs4_begin_drain_session(struct nfs_client *clp) | |||
244 | return nfs4_wait_on_slot_tbl(&ses->fc_slot_table); | 244 | return nfs4_wait_on_slot_tbl(&ses->fc_slot_table); |
245 | } | 245 | } |
246 | 246 | ||
247 | static void nfs41_finish_session_reset(struct nfs_client *clp) | ||
248 | { | ||
249 | clear_bit(NFS4CLNT_LEASE_CONFIRM, &clp->cl_state); | ||
250 | clear_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state); | ||
251 | /* create_session negotiated new slot table */ | ||
252 | clear_bit(NFS4CLNT_RECALL_SLOT, &clp->cl_state); | ||
253 | clear_bit(NFS4CLNT_BIND_CONN_TO_SESSION, &clp->cl_state); | ||
254 | nfs41_setup_state_renewal(clp); | ||
255 | } | ||
256 | |||
247 | int nfs41_init_clientid(struct nfs_client *clp, struct rpc_cred *cred) | 257 | int nfs41_init_clientid(struct nfs_client *clp, struct rpc_cred *cred) |
248 | { | 258 | { |
249 | int status; | 259 | int status; |
@@ -259,8 +269,7 @@ do_confirm: | |||
259 | status = nfs4_proc_create_session(clp, cred); | 269 | status = nfs4_proc_create_session(clp, cred); |
260 | if (status != 0) | 270 | if (status != 0) |
261 | goto out; | 271 | goto out; |
262 | clear_bit(NFS4CLNT_LEASE_CONFIRM, &clp->cl_state); | 272 | nfs41_finish_session_reset(clp); |
263 | nfs41_setup_state_renewal(clp); | ||
264 | nfs_mark_client_ready(clp, NFS_CS_READY); | 273 | nfs_mark_client_ready(clp, NFS_CS_READY); |
265 | out: | 274 | out: |
266 | return status; | 275 | return status; |
@@ -1772,16 +1781,9 @@ static int nfs4_reset_session(struct nfs_client *clp) | |||
1772 | status = nfs4_handle_reclaim_lease_error(clp, status); | 1781 | status = nfs4_handle_reclaim_lease_error(clp, status); |
1773 | goto out; | 1782 | goto out; |
1774 | } | 1783 | } |
1775 | clear_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state); | 1784 | nfs41_finish_session_reset(clp); |
1776 | /* create_session negotiated new slot table */ | ||
1777 | clear_bit(NFS4CLNT_RECALL_SLOT, &clp->cl_state); | ||
1778 | clear_bit(NFS4CLNT_BIND_CONN_TO_SESSION, &clp->cl_state); | ||
1779 | dprintk("%s: session reset was successful for server %s!\n", | 1785 | dprintk("%s: session reset was successful for server %s!\n", |
1780 | __func__, clp->cl_hostname); | 1786 | __func__, clp->cl_hostname); |
1781 | |||
1782 | /* Let the state manager reestablish state */ | ||
1783 | if (!test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state)) | ||
1784 | nfs41_setup_state_renewal(clp); | ||
1785 | out: | 1787 | out: |
1786 | if (cred) | 1788 | if (cred) |
1787 | put_rpccred(cred); | 1789 | put_rpccred(cred); |
diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c index ee4a74db95d0..18fae29b0301 100644 --- a/fs/nfs/nfs4xdr.c +++ b/fs/nfs/nfs4xdr.c | |||
@@ -1198,12 +1198,13 @@ static void encode_getfattr(struct xdr_stream *xdr, const u32* bitmask, struct c | |||
1198 | } | 1198 | } |
1199 | 1199 | ||
1200 | static void encode_getfattr_open(struct xdr_stream *xdr, const u32 *bitmask, | 1200 | static void encode_getfattr_open(struct xdr_stream *xdr, const u32 *bitmask, |
1201 | const u32 *open_bitmap, | ||
1201 | struct compound_hdr *hdr) | 1202 | struct compound_hdr *hdr) |
1202 | { | 1203 | { |
1203 | encode_getattr_three(xdr, | 1204 | encode_getattr_three(xdr, |
1204 | bitmask[0] & nfs4_fattr_bitmap[0], | 1205 | bitmask[0] & open_bitmap[0], |
1205 | bitmask[1] & nfs4_fattr_bitmap[1], | 1206 | bitmask[1] & open_bitmap[1], |
1206 | bitmask[2] & FATTR4_WORD2_MDSTHRESHOLD, | 1207 | bitmask[2] & open_bitmap[2], |
1207 | hdr); | 1208 | hdr); |
1208 | } | 1209 | } |
1209 | 1210 | ||
@@ -2221,7 +2222,7 @@ static void nfs4_xdr_enc_open(struct rpc_rqst *req, struct xdr_stream *xdr, | |||
2221 | encode_putfh(xdr, args->fh, &hdr); | 2222 | encode_putfh(xdr, args->fh, &hdr); |
2222 | encode_open(xdr, args, &hdr); | 2223 | encode_open(xdr, args, &hdr); |
2223 | encode_getfh(xdr, &hdr); | 2224 | encode_getfh(xdr, &hdr); |
2224 | encode_getfattr_open(xdr, args->bitmask, &hdr); | 2225 | encode_getfattr_open(xdr, args->bitmask, args->open_bitmap, &hdr); |
2225 | encode_nops(&hdr); | 2226 | encode_nops(&hdr); |
2226 | } | 2227 | } |
2227 | 2228 | ||
@@ -4359,7 +4360,10 @@ static int decode_attr_mdsthreshold(struct xdr_stream *xdr, | |||
4359 | 4360 | ||
4360 | if (unlikely(bitmap[2] & (FATTR4_WORD2_MDSTHRESHOLD - 1U))) | 4361 | if (unlikely(bitmap[2] & (FATTR4_WORD2_MDSTHRESHOLD - 1U))) |
4361 | return -EIO; | 4362 | return -EIO; |
4362 | if (likely(bitmap[2] & FATTR4_WORD2_MDSTHRESHOLD)) { | 4363 | if (bitmap[2] & FATTR4_WORD2_MDSTHRESHOLD) { |
4364 | /* Did the server return an unrequested attribute? */ | ||
4365 | if (unlikely(res == NULL)) | ||
4366 | return -EREMOTEIO; | ||
4363 | p = xdr_inline_decode(xdr, 4); | 4367 | p = xdr_inline_decode(xdr, 4); |
4364 | if (unlikely(!p)) | 4368 | if (unlikely(!p)) |
4365 | goto out_overflow; | 4369 | goto out_overflow; |
@@ -4372,6 +4376,7 @@ static int decode_attr_mdsthreshold(struct xdr_stream *xdr, | |||
4372 | __func__); | 4376 | __func__); |
4373 | 4377 | ||
4374 | status = decode_first_threshold_item4(xdr, res); | 4378 | status = decode_first_threshold_item4(xdr, res); |
4379 | bitmap[2] &= ~FATTR4_WORD2_MDSTHRESHOLD; | ||
4375 | } | 4380 | } |
4376 | return status; | 4381 | return status; |
4377 | out_overflow: | 4382 | out_overflow: |
diff --git a/fs/nfs/pnfs.h b/fs/nfs/pnfs.h index 29fd23c0efdc..64f90d845f6a 100644 --- a/fs/nfs/pnfs.h +++ b/fs/nfs/pnfs.h | |||
@@ -365,7 +365,7 @@ static inline bool | |||
365 | pnfs_use_threshold(struct nfs4_threshold **dst, struct nfs4_threshold *src, | 365 | pnfs_use_threshold(struct nfs4_threshold **dst, struct nfs4_threshold *src, |
366 | struct nfs_server *nfss) | 366 | struct nfs_server *nfss) |
367 | { | 367 | { |
368 | return (dst && src && src->bm != 0 && | 368 | return (dst && src && src->bm != 0 && nfss->pnfs_curr_ld && |
369 | nfss->pnfs_curr_ld->id == src->l_type); | 369 | nfss->pnfs_curr_ld->id == src->l_type); |
370 | } | 370 | } |
371 | 371 | ||
diff --git a/fs/nfs/proc.c b/fs/nfs/proc.c index a706b6bcc286..617c7419a08e 100644 --- a/fs/nfs/proc.c +++ b/fs/nfs/proc.c | |||
@@ -651,7 +651,7 @@ static int nfs_read_done(struct rpc_task *task, struct nfs_read_data *data) | |||
651 | /* Emulate the eof flag, which isn't normally needed in NFSv2 | 651 | /* Emulate the eof flag, which isn't normally needed in NFSv2 |
652 | * as it is guaranteed to always return the file attributes | 652 | * as it is guaranteed to always return the file attributes |
653 | */ | 653 | */ |
654 | if (data->args.offset + data->args.count >= data->res.fattr->size) | 654 | if (data->args.offset + data->res.count >= data->res.fattr->size) |
655 | data->res.eof = 1; | 655 | data->res.eof = 1; |
656 | } | 656 | } |
657 | return 0; | 657 | return 0; |
diff --git a/fs/nfs/super.c b/fs/nfs/super.c index ff656c022684..906f09c7d842 100644 --- a/fs/nfs/super.c +++ b/fs/nfs/super.c | |||
@@ -1867,6 +1867,7 @@ static int nfs23_validate_mount_data(void *options, | |||
1867 | if (data == NULL) | 1867 | if (data == NULL) |
1868 | goto out_no_data; | 1868 | goto out_no_data; |
1869 | 1869 | ||
1870 | args->version = NFS_DEFAULT_VERSION; | ||
1870 | switch (data->version) { | 1871 | switch (data->version) { |
1871 | case 1: | 1872 | case 1: |
1872 | data->namlen = 0; | 1873 | data->namlen = 0; |
@@ -2637,6 +2638,8 @@ static int nfs4_validate_mount_data(void *options, | |||
2637 | if (data == NULL) | 2638 | if (data == NULL) |
2638 | goto out_no_data; | 2639 | goto out_no_data; |
2639 | 2640 | ||
2641 | args->version = 4; | ||
2642 | |||
2640 | switch (data->version) { | 2643 | switch (data->version) { |
2641 | case 1: | 2644 | case 1: |
2642 | if (data->host_addrlen > sizeof(args->nfs_server.address)) | 2645 | if (data->host_addrlen > sizeof(args->nfs_server.address)) |
diff --git a/fs/nfs/write.c b/fs/nfs/write.c index e6fe3d69d14c..4d6861c0dc14 100644 --- a/fs/nfs/write.c +++ b/fs/nfs/write.c | |||
@@ -80,6 +80,7 @@ struct nfs_write_header *nfs_writehdr_alloc(void) | |||
80 | INIT_LIST_HEAD(&hdr->rpc_list); | 80 | INIT_LIST_HEAD(&hdr->rpc_list); |
81 | spin_lock_init(&hdr->lock); | 81 | spin_lock_init(&hdr->lock); |
82 | atomic_set(&hdr->refcnt, 0); | 82 | atomic_set(&hdr->refcnt, 0); |
83 | hdr->verf = &p->verf; | ||
83 | } | 84 | } |
84 | return p; | 85 | return p; |
85 | } | 86 | } |
@@ -619,6 +620,7 @@ static void nfs_write_completion(struct nfs_pgio_header *hdr) | |||
619 | goto next; | 620 | goto next; |
620 | } | 621 | } |
621 | if (test_bit(NFS_IOHDR_NEED_COMMIT, &hdr->flags)) { | 622 | if (test_bit(NFS_IOHDR_NEED_COMMIT, &hdr->flags)) { |
623 | memcpy(&req->wb_verf, hdr->verf, sizeof(req->wb_verf)); | ||
622 | nfs_mark_request_commit(req, hdr->lseg, &cinfo); | 624 | nfs_mark_request_commit(req, hdr->lseg, &cinfo); |
623 | goto next; | 625 | goto next; |
624 | } | 626 | } |
@@ -1255,15 +1257,14 @@ static void nfs_writeback_release_common(void *calldata) | |||
1255 | struct nfs_write_data *data = calldata; | 1257 | struct nfs_write_data *data = calldata; |
1256 | struct nfs_pgio_header *hdr = data->header; | 1258 | struct nfs_pgio_header *hdr = data->header; |
1257 | int status = data->task.tk_status; | 1259 | int status = data->task.tk_status; |
1258 | struct nfs_page *req = hdr->req; | ||
1259 | 1260 | ||
1260 | if ((status >= 0) && nfs_write_need_commit(data)) { | 1261 | if ((status >= 0) && nfs_write_need_commit(data)) { |
1261 | spin_lock(&hdr->lock); | 1262 | spin_lock(&hdr->lock); |
1262 | if (test_bit(NFS_IOHDR_NEED_RESCHED, &hdr->flags)) | 1263 | if (test_bit(NFS_IOHDR_NEED_RESCHED, &hdr->flags)) |
1263 | ; /* Do nothing */ | 1264 | ; /* Do nothing */ |
1264 | else if (!test_and_set_bit(NFS_IOHDR_NEED_COMMIT, &hdr->flags)) | 1265 | else if (!test_and_set_bit(NFS_IOHDR_NEED_COMMIT, &hdr->flags)) |
1265 | memcpy(&req->wb_verf, &data->verf, sizeof(req->wb_verf)); | 1266 | memcpy(hdr->verf, &data->verf, sizeof(*hdr->verf)); |
1266 | else if (memcmp(&req->wb_verf, &data->verf, sizeof(req->wb_verf))) | 1267 | else if (memcmp(hdr->verf, &data->verf, sizeof(*hdr->verf))) |
1267 | set_bit(NFS_IOHDR_NEED_RESCHED, &hdr->flags); | 1268 | set_bit(NFS_IOHDR_NEED_RESCHED, &hdr->flags); |
1268 | spin_unlock(&hdr->lock); | 1269 | spin_unlock(&hdr->lock); |
1269 | } | 1270 | } |
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c index 8fdc9ec5c5d3..94effd5bc4a1 100644 --- a/fs/nfsd/nfs4state.c +++ b/fs/nfsd/nfs4state.c | |||
@@ -900,7 +900,7 @@ static void free_session(struct kref *kref) | |||
900 | struct nfsd4_session *ses; | 900 | struct nfsd4_session *ses; |
901 | int mem; | 901 | int mem; |
902 | 902 | ||
903 | BUG_ON(!spin_is_locked(&client_lock)); | 903 | lockdep_assert_held(&client_lock); |
904 | ses = container_of(kref, struct nfsd4_session, se_ref); | 904 | ses = container_of(kref, struct nfsd4_session, se_ref); |
905 | nfsd4_del_conns(ses); | 905 | nfsd4_del_conns(ses); |
906 | spin_lock(&nfsd_drc_lock); | 906 | spin_lock(&nfsd_drc_lock); |
@@ -1080,7 +1080,7 @@ static struct nfs4_client *alloc_client(struct xdr_netobj name) | |||
1080 | static inline void | 1080 | static inline void |
1081 | free_client(struct nfs4_client *clp) | 1081 | free_client(struct nfs4_client *clp) |
1082 | { | 1082 | { |
1083 | BUG_ON(!spin_is_locked(&client_lock)); | 1083 | lockdep_assert_held(&client_lock); |
1084 | while (!list_empty(&clp->cl_sessions)) { | 1084 | while (!list_empty(&clp->cl_sessions)) { |
1085 | struct nfsd4_session *ses; | 1085 | struct nfsd4_session *ses; |
1086 | ses = list_entry(clp->cl_sessions.next, struct nfsd4_session, | 1086 | ses = list_entry(clp->cl_sessions.next, struct nfsd4_session, |
diff --git a/include/asm-generic/bug.h b/include/asm-generic/bug.h index 2520a6e241dc..9f02005f217a 100644 --- a/include/asm-generic/bug.h +++ b/include/asm-generic/bug.h | |||
@@ -2,6 +2,7 @@ | |||
2 | #define _ASM_GENERIC_BUG_H | 2 | #define _ASM_GENERIC_BUG_H |
3 | 3 | ||
4 | #include <linux/compiler.h> | 4 | #include <linux/compiler.h> |
5 | #include <linux/kernel.h> | ||
5 | 6 | ||
6 | #ifdef CONFIG_BUG | 7 | #ifdef CONFIG_BUG |
7 | 8 | ||
diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h index 73e45600f95d..bac55c215113 100644 --- a/include/drm/drm_crtc.h +++ b/include/drm/drm_crtc.h | |||
@@ -54,7 +54,7 @@ struct drm_mode_object { | |||
54 | struct drm_object_properties *properties; | 54 | struct drm_object_properties *properties; |
55 | }; | 55 | }; |
56 | 56 | ||
57 | #define DRM_OBJECT_MAX_PROPERTY 16 | 57 | #define DRM_OBJECT_MAX_PROPERTY 24 |
58 | struct drm_object_properties { | 58 | struct drm_object_properties { |
59 | int count; | 59 | int count; |
60 | uint32_t ids[DRM_OBJECT_MAX_PROPERTY]; | 60 | uint32_t ids[DRM_OBJECT_MAX_PROPERTY]; |
diff --git a/include/linux/netfilter/xt_HMARK.h b/include/linux/netfilter/xt_HMARK.h index abb1650940d2..826fc5807577 100644 --- a/include/linux/netfilter/xt_HMARK.h +++ b/include/linux/netfilter/xt_HMARK.h | |||
@@ -27,7 +27,12 @@ union hmark_ports { | |||
27 | __u16 src; | 27 | __u16 src; |
28 | __u16 dst; | 28 | __u16 dst; |
29 | } p16; | 29 | } p16; |
30 | struct { | ||
31 | __be16 src; | ||
32 | __be16 dst; | ||
33 | } b16; | ||
30 | __u32 v32; | 34 | __u32 v32; |
35 | __be32 b32; | ||
31 | }; | 36 | }; |
32 | 37 | ||
33 | struct xt_hmark_info { | 38 | struct xt_hmark_info { |
diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h index d1a7bf51c326..8aadd90b808a 100644 --- a/include/linux/nfs_xdr.h +++ b/include/linux/nfs_xdr.h | |||
@@ -348,6 +348,7 @@ struct nfs_openargs { | |||
348 | const struct qstr * name; | 348 | const struct qstr * name; |
349 | const struct nfs_server *server; /* Needed for ID mapping */ | 349 | const struct nfs_server *server; /* Needed for ID mapping */ |
350 | const u32 * bitmask; | 350 | const u32 * bitmask; |
351 | const u32 * open_bitmap; | ||
351 | __u32 claim; | 352 | __u32 claim; |
352 | struct nfs4_sequence_args seq_args; | 353 | struct nfs4_sequence_args seq_args; |
353 | }; | 354 | }; |
@@ -1236,6 +1237,7 @@ struct nfs_pgio_header { | |||
1236 | struct list_head rpc_list; | 1237 | struct list_head rpc_list; |
1237 | atomic_t refcnt; | 1238 | atomic_t refcnt; |
1238 | struct nfs_page *req; | 1239 | struct nfs_page *req; |
1240 | struct nfs_writeverf *verf; | ||
1239 | struct pnfs_layout_segment *lseg; | 1241 | struct pnfs_layout_segment *lseg; |
1240 | loff_t io_start; | 1242 | loff_t io_start; |
1241 | const struct rpc_call_ops *mds_ops; | 1243 | const struct rpc_call_ops *mds_ops; |
@@ -1273,6 +1275,7 @@ struct nfs_write_data { | |||
1273 | struct nfs_write_header { | 1275 | struct nfs_write_header { |
1274 | struct nfs_pgio_header header; | 1276 | struct nfs_pgio_header header; |
1275 | struct nfs_write_data rpc_data; | 1277 | struct nfs_write_data rpc_data; |
1278 | struct nfs_writeverf verf; | ||
1276 | }; | 1279 | }; |
1277 | 1280 | ||
1278 | struct nfs_mds_commit_info { | 1281 | struct nfs_mds_commit_info { |
diff --git a/include/linux/rcutiny.h b/include/linux/rcutiny.h index adb5e5a38cae..854dc4c5c271 100644 --- a/include/linux/rcutiny.h +++ b/include/linux/rcutiny.h | |||
@@ -87,8 +87,9 @@ static inline void kfree_call_rcu(struct rcu_head *head, | |||
87 | 87 | ||
88 | #ifdef CONFIG_TINY_RCU | 88 | #ifdef CONFIG_TINY_RCU |
89 | 89 | ||
90 | static inline int rcu_needs_cpu(int cpu) | 90 | static inline int rcu_needs_cpu(int cpu, unsigned long *delta_jiffies) |
91 | { | 91 | { |
92 | *delta_jiffies = ULONG_MAX; | ||
92 | return 0; | 93 | return 0; |
93 | } | 94 | } |
94 | 95 | ||
@@ -96,8 +97,9 @@ static inline int rcu_needs_cpu(int cpu) | |||
96 | 97 | ||
97 | int rcu_preempt_needs_cpu(void); | 98 | int rcu_preempt_needs_cpu(void); |
98 | 99 | ||
99 | static inline int rcu_needs_cpu(int cpu) | 100 | static inline int rcu_needs_cpu(int cpu, unsigned long *delta_jiffies) |
100 | { | 101 | { |
102 | *delta_jiffies = ULONG_MAX; | ||
101 | return rcu_preempt_needs_cpu(); | 103 | return rcu_preempt_needs_cpu(); |
102 | } | 104 | } |
103 | 105 | ||
diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h index 3c6083cde4fc..952b79339304 100644 --- a/include/linux/rcutree.h +++ b/include/linux/rcutree.h | |||
@@ -32,7 +32,7 @@ | |||
32 | 32 | ||
33 | extern void rcu_init(void); | 33 | extern void rcu_init(void); |
34 | extern void rcu_note_context_switch(int cpu); | 34 | extern void rcu_note_context_switch(int cpu); |
35 | extern int rcu_needs_cpu(int cpu); | 35 | extern int rcu_needs_cpu(int cpu, unsigned long *delta_jiffies); |
36 | extern void rcu_cpu_stall_reset(void); | 36 | extern void rcu_cpu_stall_reset(void); |
37 | 37 | ||
38 | /* | 38 | /* |
diff --git a/include/linux/swapops.h b/include/linux/swapops.h index 792d16d9cbc7..47ead515c811 100644 --- a/include/linux/swapops.h +++ b/include/linux/swapops.h | |||
@@ -9,13 +9,15 @@ | |||
9 | * get good packing density in that tree, so the index should be dense in | 9 | * get good packing density in that tree, so the index should be dense in |
10 | * the low-order bits. | 10 | * the low-order bits. |
11 | * | 11 | * |
12 | * We arrange the `type' and `offset' fields so that `type' is at the five | 12 | * We arrange the `type' and `offset' fields so that `type' is at the seven |
13 | * high-order bits of the swp_entry_t and `offset' is right-aligned in the | 13 | * high-order bits of the swp_entry_t and `offset' is right-aligned in the |
14 | * remaining bits. | 14 | * remaining bits. Although `type' itself needs only five bits, we allow for |
15 | * shmem/tmpfs to shift it all up a further two bits: see swp_to_radix_entry(). | ||
15 | * | 16 | * |
16 | * swp_entry_t's are *never* stored anywhere in their arch-dependent format. | 17 | * swp_entry_t's are *never* stored anywhere in their arch-dependent format. |
17 | */ | 18 | */ |
18 | #define SWP_TYPE_SHIFT(e) (sizeof(e.val) * 8 - MAX_SWAPFILES_SHIFT) | 19 | #define SWP_TYPE_SHIFT(e) ((sizeof(e.val) * 8) - \ |
20 | (MAX_SWAPFILES_SHIFT + RADIX_TREE_EXCEPTIONAL_SHIFT)) | ||
19 | #define SWP_OFFSET_MASK(e) ((1UL << SWP_TYPE_SHIFT(e)) - 1) | 21 | #define SWP_OFFSET_MASK(e) ((1UL << SWP_TYPE_SHIFT(e)) - 1) |
20 | 22 | ||
21 | /* | 23 | /* |
diff --git a/include/linux/tcp.h b/include/linux/tcp.h index 4c5b63283377..5f359dbfcdce 100644 --- a/include/linux/tcp.h +++ b/include/linux/tcp.h | |||
@@ -69,16 +69,16 @@ union tcp_word_hdr { | |||
69 | #define tcp_flag_word(tp) ( ((union tcp_word_hdr *)(tp))->words [3]) | 69 | #define tcp_flag_word(tp) ( ((union tcp_word_hdr *)(tp))->words [3]) |
70 | 70 | ||
71 | enum { | 71 | enum { |
72 | TCP_FLAG_CWR = __cpu_to_be32(0x00800000), | 72 | TCP_FLAG_CWR = __constant_cpu_to_be32(0x00800000), |
73 | TCP_FLAG_ECE = __cpu_to_be32(0x00400000), | 73 | TCP_FLAG_ECE = __constant_cpu_to_be32(0x00400000), |
74 | TCP_FLAG_URG = __cpu_to_be32(0x00200000), | 74 | TCP_FLAG_URG = __constant_cpu_to_be32(0x00200000), |
75 | TCP_FLAG_ACK = __cpu_to_be32(0x00100000), | 75 | TCP_FLAG_ACK = __constant_cpu_to_be32(0x00100000), |
76 | TCP_FLAG_PSH = __cpu_to_be32(0x00080000), | 76 | TCP_FLAG_PSH = __constant_cpu_to_be32(0x00080000), |
77 | TCP_FLAG_RST = __cpu_to_be32(0x00040000), | 77 | TCP_FLAG_RST = __constant_cpu_to_be32(0x00040000), |
78 | TCP_FLAG_SYN = __cpu_to_be32(0x00020000), | 78 | TCP_FLAG_SYN = __constant_cpu_to_be32(0x00020000), |
79 | TCP_FLAG_FIN = __cpu_to_be32(0x00010000), | 79 | TCP_FLAG_FIN = __constant_cpu_to_be32(0x00010000), |
80 | TCP_RESERVED_BITS = __cpu_to_be32(0x0F000000), | 80 | TCP_RESERVED_BITS = __constant_cpu_to_be32(0x0F000000), |
81 | TCP_DATA_OFFSET = __cpu_to_be32(0xF0000000) | 81 | TCP_DATA_OFFSET = __constant_cpu_to_be32(0xF0000000) |
82 | }; | 82 | }; |
83 | 83 | ||
84 | /* | 84 | /* |
diff --git a/include/linux/vga_switcheroo.h b/include/linux/vga_switcheroo.h index b455c7c212eb..60da41fe9dc2 100644 --- a/include/linux/vga_switcheroo.h +++ b/include/linux/vga_switcheroo.h | |||
@@ -12,6 +12,9 @@ | |||
12 | enum vga_switcheroo_state { | 12 | enum vga_switcheroo_state { |
13 | VGA_SWITCHEROO_OFF, | 13 | VGA_SWITCHEROO_OFF, |
14 | VGA_SWITCHEROO_ON, | 14 | VGA_SWITCHEROO_ON, |
15 | /* below are referred only from vga_switcheroo_get_client_state() */ | ||
16 | VGA_SWITCHEROO_INIT, | ||
17 | VGA_SWITCHEROO_NOT_FOUND, | ||
15 | }; | 18 | }; |
16 | 19 | ||
17 | enum vga_switcheroo_client_id { | 20 | enum vga_switcheroo_client_id { |
@@ -50,6 +53,8 @@ void vga_switcheroo_unregister_handler(void); | |||
50 | 53 | ||
51 | int vga_switcheroo_process_delayed_switch(void); | 54 | int vga_switcheroo_process_delayed_switch(void); |
52 | 55 | ||
56 | int vga_switcheroo_get_client_state(struct pci_dev *dev); | ||
57 | |||
53 | #else | 58 | #else |
54 | 59 | ||
55 | static inline void vga_switcheroo_unregister_client(struct pci_dev *dev) {} | 60 | static inline void vga_switcheroo_unregister_client(struct pci_dev *dev) {} |
@@ -62,5 +67,7 @@ static inline int vga_switcheroo_register_audio_client(struct pci_dev *pdev, | |||
62 | int id, bool active) { return 0; } | 67 | int id, bool active) { return 0; } |
63 | static inline void vga_switcheroo_unregister_handler(void) {} | 68 | static inline void vga_switcheroo_unregister_handler(void) {} |
64 | static inline int vga_switcheroo_process_delayed_switch(void) { return 0; } | 69 | static inline int vga_switcheroo_process_delayed_switch(void) { return 0; } |
70 | static inline int vga_switcheroo_get_client_state(struct pci_dev *dev) { return VGA_SWITCHEROO_ON; } | ||
71 | |||
65 | 72 | ||
66 | #endif | 73 | #endif |
diff --git a/include/net/inetpeer.h b/include/net/inetpeer.h index b94765e38e80..2040bff945d4 100644 --- a/include/net/inetpeer.h +++ b/include/net/inetpeer.h | |||
@@ -40,7 +40,10 @@ struct inet_peer { | |||
40 | u32 pmtu_orig; | 40 | u32 pmtu_orig; |
41 | u32 pmtu_learned; | 41 | u32 pmtu_learned; |
42 | struct inetpeer_addr_base redirect_learned; | 42 | struct inetpeer_addr_base redirect_learned; |
43 | struct list_head gc_list; | 43 | union { |
44 | struct list_head gc_list; | ||
45 | struct rcu_head gc_rcu; | ||
46 | }; | ||
44 | /* | 47 | /* |
45 | * Once inet_peer is queued for deletion (refcnt == -1), following fields | 48 | * Once inet_peer is queued for deletion (refcnt == -1), following fields |
46 | * are not available: rid, ip_id_count, tcp_ts, tcp_ts_stamp | 49 | * are not available: rid, ip_id_count, tcp_ts, tcp_ts_stamp |
diff --git a/include/net/route.h b/include/net/route.h index ed2b78e2375d..98705468ac03 100644 --- a/include/net/route.h +++ b/include/net/route.h | |||
@@ -130,9 +130,9 @@ static inline struct rtable *ip_route_output(struct net *net, __be32 daddr, | |||
130 | { | 130 | { |
131 | struct flowi4 fl4 = { | 131 | struct flowi4 fl4 = { |
132 | .flowi4_oif = oif, | 132 | .flowi4_oif = oif, |
133 | .flowi4_tos = tos, | ||
133 | .daddr = daddr, | 134 | .daddr = daddr, |
134 | .saddr = saddr, | 135 | .saddr = saddr, |
135 | .flowi4_tos = tos, | ||
136 | }; | 136 | }; |
137 | return ip_route_output_key(net, &fl4); | 137 | return ip_route_output_key(net, &fl4); |
138 | } | 138 | } |
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h index 55ce96b53b09..9d7d54a00e63 100644 --- a/include/net/sch_generic.h +++ b/include/net/sch_generic.h | |||
@@ -220,13 +220,16 @@ struct tcf_proto { | |||
220 | 220 | ||
221 | struct qdisc_skb_cb { | 221 | struct qdisc_skb_cb { |
222 | unsigned int pkt_len; | 222 | unsigned int pkt_len; |
223 | unsigned char data[24]; | 223 | u16 bond_queue_mapping; |
224 | u16 _pad; | ||
225 | unsigned char data[20]; | ||
224 | }; | 226 | }; |
225 | 227 | ||
226 | static inline void qdisc_cb_private_validate(const struct sk_buff *skb, int sz) | 228 | static inline void qdisc_cb_private_validate(const struct sk_buff *skb, int sz) |
227 | { | 229 | { |
228 | struct qdisc_skb_cb *qcb; | 230 | struct qdisc_skb_cb *qcb; |
229 | BUILD_BUG_ON(sizeof(skb->cb) < sizeof(unsigned int) + sz); | 231 | |
232 | BUILD_BUG_ON(sizeof(skb->cb) < offsetof(struct qdisc_skb_cb, data) + sz); | ||
230 | BUILD_BUG_ON(sizeof(qcb->data) < sz); | 233 | BUILD_BUG_ON(sizeof(qcb->data) < sz); |
231 | } | 234 | } |
232 | 235 | ||
diff --git a/include/target/target_core_fabric.h b/include/target/target_core_fabric.h index 116959933f46..c78a23333c4f 100644 --- a/include/target/target_core_fabric.h +++ b/include/target/target_core_fabric.h | |||
@@ -47,6 +47,7 @@ struct target_core_fabric_ops { | |||
47 | */ | 47 | */ |
48 | int (*check_stop_free)(struct se_cmd *); | 48 | int (*check_stop_free)(struct se_cmd *); |
49 | void (*release_cmd)(struct se_cmd *); | 49 | void (*release_cmd)(struct se_cmd *); |
50 | void (*put_session)(struct se_session *); | ||
50 | /* | 51 | /* |
51 | * Called with spin_lock_bh(struct se_portal_group->session_lock held. | 52 | * Called with spin_lock_bh(struct se_portal_group->session_lock held. |
52 | */ | 53 | */ |
diff --git a/include/trace/events/rcu.h b/include/trace/events/rcu.h index 1480900c511c..d274734b2aa4 100644 --- a/include/trace/events/rcu.h +++ b/include/trace/events/rcu.h | |||
@@ -289,6 +289,7 @@ TRACE_EVENT(rcu_dyntick, | |||
289 | * "In holdoff": Nothing to do, holding off after unsuccessful attempt. | 289 | * "In holdoff": Nothing to do, holding off after unsuccessful attempt. |
290 | * "Begin holdoff": Attempt failed, don't retry until next jiffy. | 290 | * "Begin holdoff": Attempt failed, don't retry until next jiffy. |
291 | * "Dyntick with callbacks": Entering dyntick-idle despite callbacks. | 291 | * "Dyntick with callbacks": Entering dyntick-idle despite callbacks. |
292 | * "Dyntick with lazy callbacks": Entering dyntick-idle w/lazy callbacks. | ||
292 | * "More callbacks": Still more callbacks, try again to clear them out. | 293 | * "More callbacks": Still more callbacks, try again to clear them out. |
293 | * "Callbacks drained": All callbacks processed, off to dyntick idle! | 294 | * "Callbacks drained": All callbacks processed, off to dyntick idle! |
294 | * "Timer": Timer fired to cause CPU to continue processing callbacks. | 295 | * "Timer": Timer fired to cause CPU to continue processing callbacks. |
diff --git a/kernel/panic.c b/kernel/panic.c index 8ed89a175d79..d2a5f4ecc6dd 100644 --- a/kernel/panic.c +++ b/kernel/panic.c | |||
@@ -27,7 +27,7 @@ | |||
27 | #define PANIC_TIMER_STEP 100 | 27 | #define PANIC_TIMER_STEP 100 |
28 | #define PANIC_BLINK_SPD 18 | 28 | #define PANIC_BLINK_SPD 18 |
29 | 29 | ||
30 | int panic_on_oops; | 30 | int panic_on_oops = CONFIG_PANIC_ON_OOPS_VALUE; |
31 | static unsigned long tainted_mask; | 31 | static unsigned long tainted_mask; |
32 | static int pause_on_oops; | 32 | static int pause_on_oops; |
33 | static int pause_on_oops_flag; | 33 | static int pause_on_oops_flag; |
@@ -108,8 +108,6 @@ void panic(const char *fmt, ...) | |||
108 | */ | 108 | */ |
109 | crash_kexec(NULL); | 109 | crash_kexec(NULL); |
110 | 110 | ||
111 | kmsg_dump(KMSG_DUMP_PANIC); | ||
112 | |||
113 | /* | 111 | /* |
114 | * Note smp_send_stop is the usual smp shutdown function, which | 112 | * Note smp_send_stop is the usual smp shutdown function, which |
115 | * unfortunately means it may not be hardened to work in a panic | 113 | * unfortunately means it may not be hardened to work in a panic |
@@ -117,6 +115,8 @@ void panic(const char *fmt, ...) | |||
117 | */ | 115 | */ |
118 | smp_send_stop(); | 116 | smp_send_stop(); |
119 | 117 | ||
118 | kmsg_dump(KMSG_DUMP_PANIC); | ||
119 | |||
120 | atomic_notifier_call_chain(&panic_notifier_list, 0, buf); | 120 | atomic_notifier_call_chain(&panic_notifier_list, 0, buf); |
121 | 121 | ||
122 | bust_spinlocks(0); | 122 | bust_spinlocks(0); |
diff --git a/kernel/rcutree.c b/kernel/rcutree.c index 0da7b88d92d0..3b0f1337f75b 100644 --- a/kernel/rcutree.c +++ b/kernel/rcutree.c | |||
@@ -1397,6 +1397,8 @@ static void rcu_adopt_orphan_cbs(struct rcu_state *rsp) | |||
1397 | rdp->qlen_lazy += rsp->qlen_lazy; | 1397 | rdp->qlen_lazy += rsp->qlen_lazy; |
1398 | rdp->qlen += rsp->qlen; | 1398 | rdp->qlen += rsp->qlen; |
1399 | rdp->n_cbs_adopted += rsp->qlen; | 1399 | rdp->n_cbs_adopted += rsp->qlen; |
1400 | if (rsp->qlen_lazy != rsp->qlen) | ||
1401 | rcu_idle_count_callbacks_posted(); | ||
1400 | rsp->qlen_lazy = 0; | 1402 | rsp->qlen_lazy = 0; |
1401 | rsp->qlen = 0; | 1403 | rsp->qlen = 0; |
1402 | 1404 | ||
diff --git a/kernel/rcutree.h b/kernel/rcutree.h index 7f5d138dedf5..ea056495783e 100644 --- a/kernel/rcutree.h +++ b/kernel/rcutree.h | |||
@@ -84,6 +84,20 @@ struct rcu_dynticks { | |||
84 | /* Process level is worth LLONG_MAX/2. */ | 84 | /* Process level is worth LLONG_MAX/2. */ |
85 | int dynticks_nmi_nesting; /* Track NMI nesting level. */ | 85 | int dynticks_nmi_nesting; /* Track NMI nesting level. */ |
86 | atomic_t dynticks; /* Even value for idle, else odd. */ | 86 | atomic_t dynticks; /* Even value for idle, else odd. */ |
87 | #ifdef CONFIG_RCU_FAST_NO_HZ | ||
88 | int dyntick_drain; /* Prepare-for-idle state variable. */ | ||
89 | unsigned long dyntick_holdoff; | ||
90 | /* No retries for the jiffy of failure. */ | ||
91 | struct timer_list idle_gp_timer; | ||
92 | /* Wake up CPU sleeping with callbacks. */ | ||
93 | unsigned long idle_gp_timer_expires; | ||
94 | /* When to wake up CPU (for repost). */ | ||
95 | bool idle_first_pass; /* First pass of attempt to go idle? */ | ||
96 | unsigned long nonlazy_posted; | ||
97 | /* # times non-lazy CBs posted to CPU. */ | ||
98 | unsigned long nonlazy_posted_snap; | ||
99 | /* idle-period nonlazy_posted snapshot. */ | ||
100 | #endif /* #ifdef CONFIG_RCU_FAST_NO_HZ */ | ||
87 | }; | 101 | }; |
88 | 102 | ||
89 | /* RCU's kthread states for tracing. */ | 103 | /* RCU's kthread states for tracing. */ |
diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h index 2411000d9869..5271a020887e 100644 --- a/kernel/rcutree_plugin.h +++ b/kernel/rcutree_plugin.h | |||
@@ -1886,8 +1886,9 @@ static void __cpuinit rcu_prepare_kthreads(int cpu) | |||
1886 | * Because we not have RCU_FAST_NO_HZ, just check whether this CPU needs | 1886 | * Because we not have RCU_FAST_NO_HZ, just check whether this CPU needs |
1887 | * any flavor of RCU. | 1887 | * any flavor of RCU. |
1888 | */ | 1888 | */ |
1889 | int rcu_needs_cpu(int cpu) | 1889 | int rcu_needs_cpu(int cpu, unsigned long *delta_jiffies) |
1890 | { | 1890 | { |
1891 | *delta_jiffies = ULONG_MAX; | ||
1891 | return rcu_cpu_has_callbacks(cpu); | 1892 | return rcu_cpu_has_callbacks(cpu); |
1892 | } | 1893 | } |
1893 | 1894 | ||
@@ -1962,41 +1963,6 @@ static void rcu_idle_count_callbacks_posted(void) | |||
1962 | #define RCU_IDLE_GP_DELAY 6 /* Roughly one grace period. */ | 1963 | #define RCU_IDLE_GP_DELAY 6 /* Roughly one grace period. */ |
1963 | #define RCU_IDLE_LAZY_GP_DELAY (6 * HZ) /* Roughly six seconds. */ | 1964 | #define RCU_IDLE_LAZY_GP_DELAY (6 * HZ) /* Roughly six seconds. */ |
1964 | 1965 | ||
1965 | /* Loop counter for rcu_prepare_for_idle(). */ | ||
1966 | static DEFINE_PER_CPU(int, rcu_dyntick_drain); | ||
1967 | /* If rcu_dyntick_holdoff==jiffies, don't try to enter dyntick-idle mode. */ | ||
1968 | static DEFINE_PER_CPU(unsigned long, rcu_dyntick_holdoff); | ||
1969 | /* Timer to awaken the CPU if it enters dyntick-idle mode with callbacks. */ | ||
1970 | static DEFINE_PER_CPU(struct timer_list, rcu_idle_gp_timer); | ||
1971 | /* Scheduled expiry time for rcu_idle_gp_timer to allow reposting. */ | ||
1972 | static DEFINE_PER_CPU(unsigned long, rcu_idle_gp_timer_expires); | ||
1973 | /* Enable special processing on first attempt to enter dyntick-idle mode. */ | ||
1974 | static DEFINE_PER_CPU(bool, rcu_idle_first_pass); | ||
1975 | /* Running count of non-lazy callbacks posted, never decremented. */ | ||
1976 | static DEFINE_PER_CPU(unsigned long, rcu_nonlazy_posted); | ||
1977 | /* Snapshot of rcu_nonlazy_posted to detect meaningful exits from idle. */ | ||
1978 | static DEFINE_PER_CPU(unsigned long, rcu_nonlazy_posted_snap); | ||
1979 | |||
1980 | /* | ||
1981 | * Allow the CPU to enter dyntick-idle mode if either: (1) There are no | ||
1982 | * callbacks on this CPU, (2) this CPU has not yet attempted to enter | ||
1983 | * dyntick-idle mode, or (3) this CPU is in the process of attempting to | ||
1984 | * enter dyntick-idle mode. Otherwise, if we have recently tried and failed | ||
1985 | * to enter dyntick-idle mode, we refuse to try to enter it. After all, | ||
1986 | * it is better to incur scheduling-clock interrupts than to spin | ||
1987 | * continuously for the same time duration! | ||
1988 | */ | ||
1989 | int rcu_needs_cpu(int cpu) | ||
1990 | { | ||
1991 | /* Flag a new idle sojourn to the idle-entry state machine. */ | ||
1992 | per_cpu(rcu_idle_first_pass, cpu) = 1; | ||
1993 | /* If no callbacks, RCU doesn't need the CPU. */ | ||
1994 | if (!rcu_cpu_has_callbacks(cpu)) | ||
1995 | return 0; | ||
1996 | /* Otherwise, RCU needs the CPU only if it recently tried and failed. */ | ||
1997 | return per_cpu(rcu_dyntick_holdoff, cpu) == jiffies; | ||
1998 | } | ||
1999 | |||
2000 | /* | 1966 | /* |
2001 | * Does the specified flavor of RCU have non-lazy callbacks pending on | 1967 | * Does the specified flavor of RCU have non-lazy callbacks pending on |
2002 | * the specified CPU? Both RCU flavor and CPU are specified by the | 1968 | * the specified CPU? Both RCU flavor and CPU are specified by the |
@@ -2040,6 +2006,47 @@ static bool rcu_cpu_has_nonlazy_callbacks(int cpu) | |||
2040 | } | 2006 | } |
2041 | 2007 | ||
2042 | /* | 2008 | /* |
2009 | * Allow the CPU to enter dyntick-idle mode if either: (1) There are no | ||
2010 | * callbacks on this CPU, (2) this CPU has not yet attempted to enter | ||
2011 | * dyntick-idle mode, or (3) this CPU is in the process of attempting to | ||
2012 | * enter dyntick-idle mode. Otherwise, if we have recently tried and failed | ||
2013 | * to enter dyntick-idle mode, we refuse to try to enter it. After all, | ||
2014 | * it is better to incur scheduling-clock interrupts than to spin | ||
2015 | * continuously for the same time duration! | ||
2016 | * | ||
2017 | * The delta_jiffies argument is used to store the time when RCU is | ||
2018 | * going to need the CPU again if it still has callbacks. The reason | ||
2019 | * for this is that rcu_prepare_for_idle() might need to post a timer, | ||
2020 | * but if so, it will do so after tick_nohz_stop_sched_tick() has set | ||
2021 | * the wakeup time for this CPU. This means that RCU's timer can be | ||
2022 | * delayed until the wakeup time, which defeats the purpose of posting | ||
2023 | * a timer. | ||
2024 | */ | ||
2025 | int rcu_needs_cpu(int cpu, unsigned long *delta_jiffies) | ||
2026 | { | ||
2027 | struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu); | ||
2028 | |||
2029 | /* Flag a new idle sojourn to the idle-entry state machine. */ | ||
2030 | rdtp->idle_first_pass = 1; | ||
2031 | /* If no callbacks, RCU doesn't need the CPU. */ | ||
2032 | if (!rcu_cpu_has_callbacks(cpu)) { | ||
2033 | *delta_jiffies = ULONG_MAX; | ||
2034 | return 0; | ||
2035 | } | ||
2036 | if (rdtp->dyntick_holdoff == jiffies) { | ||
2037 | /* RCU recently tried and failed, so don't try again. */ | ||
2038 | *delta_jiffies = 1; | ||
2039 | return 1; | ||
2040 | } | ||
2041 | /* Set up for the possibility that RCU will post a timer. */ | ||
2042 | if (rcu_cpu_has_nonlazy_callbacks(cpu)) | ||
2043 | *delta_jiffies = RCU_IDLE_GP_DELAY; | ||
2044 | else | ||
2045 | *delta_jiffies = RCU_IDLE_LAZY_GP_DELAY; | ||
2046 | return 0; | ||
2047 | } | ||
2048 | |||
2049 | /* | ||
2043 | * Handler for smp_call_function_single(). The only point of this | 2050 | * Handler for smp_call_function_single(). The only point of this |
2044 | * handler is to wake the CPU up, so the handler does only tracing. | 2051 | * handler is to wake the CPU up, so the handler does only tracing. |
2045 | */ | 2052 | */ |
@@ -2075,21 +2082,24 @@ static void rcu_idle_gp_timer_func(unsigned long cpu_in) | |||
2075 | */ | 2082 | */ |
2076 | static void rcu_prepare_for_idle_init(int cpu) | 2083 | static void rcu_prepare_for_idle_init(int cpu) |
2077 | { | 2084 | { |
2078 | per_cpu(rcu_dyntick_holdoff, cpu) = jiffies - 1; | 2085 | struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu); |
2079 | setup_timer(&per_cpu(rcu_idle_gp_timer, cpu), | 2086 | |
2080 | rcu_idle_gp_timer_func, cpu); | 2087 | rdtp->dyntick_holdoff = jiffies - 1; |
2081 | per_cpu(rcu_idle_gp_timer_expires, cpu) = jiffies - 1; | 2088 | setup_timer(&rdtp->idle_gp_timer, rcu_idle_gp_timer_func, cpu); |
2082 | per_cpu(rcu_idle_first_pass, cpu) = 1; | 2089 | rdtp->idle_gp_timer_expires = jiffies - 1; |
2090 | rdtp->idle_first_pass = 1; | ||
2083 | } | 2091 | } |
2084 | 2092 | ||
2085 | /* | 2093 | /* |
2086 | * Clean up for exit from idle. Because we are exiting from idle, there | 2094 | * Clean up for exit from idle. Because we are exiting from idle, there |
2087 | * is no longer any point to rcu_idle_gp_timer, so cancel it. This will | 2095 | * is no longer any point to ->idle_gp_timer, so cancel it. This will |
2088 | * do nothing if this timer is not active, so just cancel it unconditionally. | 2096 | * do nothing if this timer is not active, so just cancel it unconditionally. |
2089 | */ | 2097 | */ |
2090 | static void rcu_cleanup_after_idle(int cpu) | 2098 | static void rcu_cleanup_after_idle(int cpu) |
2091 | { | 2099 | { |
2092 | del_timer(&per_cpu(rcu_idle_gp_timer, cpu)); | 2100 | struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu); |
2101 | |||
2102 | del_timer(&rdtp->idle_gp_timer); | ||
2093 | trace_rcu_prep_idle("Cleanup after idle"); | 2103 | trace_rcu_prep_idle("Cleanup after idle"); |
2094 | } | 2104 | } |
2095 | 2105 | ||
@@ -2108,42 +2118,41 @@ static void rcu_cleanup_after_idle(int cpu) | |||
2108 | * Because it is not legal to invoke rcu_process_callbacks() with irqs | 2118 | * Because it is not legal to invoke rcu_process_callbacks() with irqs |
2109 | * disabled, we do one pass of force_quiescent_state(), then do a | 2119 | * disabled, we do one pass of force_quiescent_state(), then do a |
2110 | * invoke_rcu_core() to cause rcu_process_callbacks() to be invoked | 2120 | * invoke_rcu_core() to cause rcu_process_callbacks() to be invoked |
2111 | * later. The per-cpu rcu_dyntick_drain variable controls the sequencing. | 2121 | * later. The ->dyntick_drain field controls the sequencing. |
2112 | * | 2122 | * |
2113 | * The caller must have disabled interrupts. | 2123 | * The caller must have disabled interrupts. |
2114 | */ | 2124 | */ |
2115 | static void rcu_prepare_for_idle(int cpu) | 2125 | static void rcu_prepare_for_idle(int cpu) |
2116 | { | 2126 | { |
2117 | struct timer_list *tp; | 2127 | struct timer_list *tp; |
2128 | struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu); | ||
2118 | 2129 | ||
2119 | /* | 2130 | /* |
2120 | * If this is an idle re-entry, for example, due to use of | 2131 | * If this is an idle re-entry, for example, due to use of |
2121 | * RCU_NONIDLE() or the new idle-loop tracing API within the idle | 2132 | * RCU_NONIDLE() or the new idle-loop tracing API within the idle |
2122 | * loop, then don't take any state-machine actions, unless the | 2133 | * loop, then don't take any state-machine actions, unless the |
2123 | * momentary exit from idle queued additional non-lazy callbacks. | 2134 | * momentary exit from idle queued additional non-lazy callbacks. |
2124 | * Instead, repost the rcu_idle_gp_timer if this CPU has callbacks | 2135 | * Instead, repost the ->idle_gp_timer if this CPU has callbacks |
2125 | * pending. | 2136 | * pending. |
2126 | */ | 2137 | */ |
2127 | if (!per_cpu(rcu_idle_first_pass, cpu) && | 2138 | if (!rdtp->idle_first_pass && |
2128 | (per_cpu(rcu_nonlazy_posted, cpu) == | 2139 | (rdtp->nonlazy_posted == rdtp->nonlazy_posted_snap)) { |
2129 | per_cpu(rcu_nonlazy_posted_snap, cpu))) { | ||
2130 | if (rcu_cpu_has_callbacks(cpu)) { | 2140 | if (rcu_cpu_has_callbacks(cpu)) { |
2131 | tp = &per_cpu(rcu_idle_gp_timer, cpu); | 2141 | tp = &rdtp->idle_gp_timer; |
2132 | mod_timer_pinned(tp, per_cpu(rcu_idle_gp_timer_expires, cpu)); | 2142 | mod_timer_pinned(tp, rdtp->idle_gp_timer_expires); |
2133 | } | 2143 | } |
2134 | return; | 2144 | return; |
2135 | } | 2145 | } |
2136 | per_cpu(rcu_idle_first_pass, cpu) = 0; | 2146 | rdtp->idle_first_pass = 0; |
2137 | per_cpu(rcu_nonlazy_posted_snap, cpu) = | 2147 | rdtp->nonlazy_posted_snap = rdtp->nonlazy_posted - 1; |
2138 | per_cpu(rcu_nonlazy_posted, cpu) - 1; | ||
2139 | 2148 | ||
2140 | /* | 2149 | /* |
2141 | * If there are no callbacks on this CPU, enter dyntick-idle mode. | 2150 | * If there are no callbacks on this CPU, enter dyntick-idle mode. |
2142 | * Also reset state to avoid prejudicing later attempts. | 2151 | * Also reset state to avoid prejudicing later attempts. |
2143 | */ | 2152 | */ |
2144 | if (!rcu_cpu_has_callbacks(cpu)) { | 2153 | if (!rcu_cpu_has_callbacks(cpu)) { |
2145 | per_cpu(rcu_dyntick_holdoff, cpu) = jiffies - 1; | 2154 | rdtp->dyntick_holdoff = jiffies - 1; |
2146 | per_cpu(rcu_dyntick_drain, cpu) = 0; | 2155 | rdtp->dyntick_drain = 0; |
2147 | trace_rcu_prep_idle("No callbacks"); | 2156 | trace_rcu_prep_idle("No callbacks"); |
2148 | return; | 2157 | return; |
2149 | } | 2158 | } |
@@ -2152,36 +2161,37 @@ static void rcu_prepare_for_idle(int cpu) | |||
2152 | * If in holdoff mode, just return. We will presumably have | 2161 | * If in holdoff mode, just return. We will presumably have |
2153 | * refrained from disabling the scheduling-clock tick. | 2162 | * refrained from disabling the scheduling-clock tick. |
2154 | */ | 2163 | */ |
2155 | if (per_cpu(rcu_dyntick_holdoff, cpu) == jiffies) { | 2164 | if (rdtp->dyntick_holdoff == jiffies) { |
2156 | trace_rcu_prep_idle("In holdoff"); | 2165 | trace_rcu_prep_idle("In holdoff"); |
2157 | return; | 2166 | return; |
2158 | } | 2167 | } |
2159 | 2168 | ||
2160 | /* Check and update the rcu_dyntick_drain sequencing. */ | 2169 | /* Check and update the ->dyntick_drain sequencing. */ |
2161 | if (per_cpu(rcu_dyntick_drain, cpu) <= 0) { | 2170 | if (rdtp->dyntick_drain <= 0) { |
2162 | /* First time through, initialize the counter. */ | 2171 | /* First time through, initialize the counter. */ |
2163 | per_cpu(rcu_dyntick_drain, cpu) = RCU_IDLE_FLUSHES; | 2172 | rdtp->dyntick_drain = RCU_IDLE_FLUSHES; |
2164 | } else if (per_cpu(rcu_dyntick_drain, cpu) <= RCU_IDLE_OPT_FLUSHES && | 2173 | } else if (rdtp->dyntick_drain <= RCU_IDLE_OPT_FLUSHES && |
2165 | !rcu_pending(cpu) && | 2174 | !rcu_pending(cpu) && |
2166 | !local_softirq_pending()) { | 2175 | !local_softirq_pending()) { |
2167 | /* Can we go dyntick-idle despite still having callbacks? */ | 2176 | /* Can we go dyntick-idle despite still having callbacks? */ |
2168 | trace_rcu_prep_idle("Dyntick with callbacks"); | 2177 | rdtp->dyntick_drain = 0; |
2169 | per_cpu(rcu_dyntick_drain, cpu) = 0; | 2178 | rdtp->dyntick_holdoff = jiffies; |
2170 | per_cpu(rcu_dyntick_holdoff, cpu) = jiffies; | 2179 | if (rcu_cpu_has_nonlazy_callbacks(cpu)) { |
2171 | if (rcu_cpu_has_nonlazy_callbacks(cpu)) | 2180 | trace_rcu_prep_idle("Dyntick with callbacks"); |
2172 | per_cpu(rcu_idle_gp_timer_expires, cpu) = | 2181 | rdtp->idle_gp_timer_expires = |
2173 | jiffies + RCU_IDLE_GP_DELAY; | 2182 | jiffies + RCU_IDLE_GP_DELAY; |
2174 | else | 2183 | } else { |
2175 | per_cpu(rcu_idle_gp_timer_expires, cpu) = | 2184 | rdtp->idle_gp_timer_expires = |
2176 | jiffies + RCU_IDLE_LAZY_GP_DELAY; | 2185 | jiffies + RCU_IDLE_LAZY_GP_DELAY; |
2177 | tp = &per_cpu(rcu_idle_gp_timer, cpu); | 2186 | trace_rcu_prep_idle("Dyntick with lazy callbacks"); |
2178 | mod_timer_pinned(tp, per_cpu(rcu_idle_gp_timer_expires, cpu)); | 2187 | } |
2179 | per_cpu(rcu_nonlazy_posted_snap, cpu) = | 2188 | tp = &rdtp->idle_gp_timer; |
2180 | per_cpu(rcu_nonlazy_posted, cpu); | 2189 | mod_timer_pinned(tp, rdtp->idle_gp_timer_expires); |
2190 | rdtp->nonlazy_posted_snap = rdtp->nonlazy_posted; | ||
2181 | return; /* Nothing more to do immediately. */ | 2191 | return; /* Nothing more to do immediately. */ |
2182 | } else if (--per_cpu(rcu_dyntick_drain, cpu) <= 0) { | 2192 | } else if (--(rdtp->dyntick_drain) <= 0) { |
2183 | /* We have hit the limit, so time to give up. */ | 2193 | /* We have hit the limit, so time to give up. */ |
2184 | per_cpu(rcu_dyntick_holdoff, cpu) = jiffies; | 2194 | rdtp->dyntick_holdoff = jiffies; |
2185 | trace_rcu_prep_idle("Begin holdoff"); | 2195 | trace_rcu_prep_idle("Begin holdoff"); |
2186 | invoke_rcu_core(); /* Force the CPU out of dyntick-idle. */ | 2196 | invoke_rcu_core(); /* Force the CPU out of dyntick-idle. */ |
2187 | return; | 2197 | return; |
@@ -2227,7 +2237,7 @@ static void rcu_prepare_for_idle(int cpu) | |||
2227 | */ | 2237 | */ |
2228 | static void rcu_idle_count_callbacks_posted(void) | 2238 | static void rcu_idle_count_callbacks_posted(void) |
2229 | { | 2239 | { |
2230 | __this_cpu_add(rcu_nonlazy_posted, 1); | 2240 | __this_cpu_add(rcu_dynticks.nonlazy_posted, 1); |
2231 | } | 2241 | } |
2232 | 2242 | ||
2233 | #endif /* #else #if !defined(CONFIG_RCU_FAST_NO_HZ) */ | 2243 | #endif /* #else #if !defined(CONFIG_RCU_FAST_NO_HZ) */ |
@@ -2238,11 +2248,12 @@ static void rcu_idle_count_callbacks_posted(void) | |||
2238 | 2248 | ||
2239 | static void print_cpu_stall_fast_no_hz(char *cp, int cpu) | 2249 | static void print_cpu_stall_fast_no_hz(char *cp, int cpu) |
2240 | { | 2250 | { |
2241 | struct timer_list *tltp = &per_cpu(rcu_idle_gp_timer, cpu); | 2251 | struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu); |
2252 | struct timer_list *tltp = &rdtp->idle_gp_timer; | ||
2242 | 2253 | ||
2243 | sprintf(cp, "drain=%d %c timer=%lu", | 2254 | sprintf(cp, "drain=%d %c timer=%lu", |
2244 | per_cpu(rcu_dyntick_drain, cpu), | 2255 | rdtp->dyntick_drain, |
2245 | per_cpu(rcu_dyntick_holdoff, cpu) == jiffies ? 'H' : '.', | 2256 | rdtp->dyntick_holdoff == jiffies ? 'H' : '.', |
2246 | timer_pending(tltp) ? tltp->expires - jiffies : -1); | 2257 | timer_pending(tltp) ? tltp->expires - jiffies : -1); |
2247 | } | 2258 | } |
2248 | 2259 | ||
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c index da70c6db496c..869997833928 100644 --- a/kernel/time/tick-sched.c +++ b/kernel/time/tick-sched.c | |||
@@ -274,6 +274,7 @@ EXPORT_SYMBOL_GPL(get_cpu_iowait_time_us); | |||
274 | static void tick_nohz_stop_sched_tick(struct tick_sched *ts) | 274 | static void tick_nohz_stop_sched_tick(struct tick_sched *ts) |
275 | { | 275 | { |
276 | unsigned long seq, last_jiffies, next_jiffies, delta_jiffies; | 276 | unsigned long seq, last_jiffies, next_jiffies, delta_jiffies; |
277 | unsigned long rcu_delta_jiffies; | ||
277 | ktime_t last_update, expires, now; | 278 | ktime_t last_update, expires, now; |
278 | struct clock_event_device *dev = __get_cpu_var(tick_cpu_device).evtdev; | 279 | struct clock_event_device *dev = __get_cpu_var(tick_cpu_device).evtdev; |
279 | u64 time_delta; | 280 | u64 time_delta; |
@@ -322,7 +323,7 @@ static void tick_nohz_stop_sched_tick(struct tick_sched *ts) | |||
322 | time_delta = timekeeping_max_deferment(); | 323 | time_delta = timekeeping_max_deferment(); |
323 | } while (read_seqretry(&xtime_lock, seq)); | 324 | } while (read_seqretry(&xtime_lock, seq)); |
324 | 325 | ||
325 | if (rcu_needs_cpu(cpu) || printk_needs_cpu(cpu) || | 326 | if (rcu_needs_cpu(cpu, &rcu_delta_jiffies) || printk_needs_cpu(cpu) || |
326 | arch_needs_cpu(cpu)) { | 327 | arch_needs_cpu(cpu)) { |
327 | next_jiffies = last_jiffies + 1; | 328 | next_jiffies = last_jiffies + 1; |
328 | delta_jiffies = 1; | 329 | delta_jiffies = 1; |
@@ -330,6 +331,10 @@ static void tick_nohz_stop_sched_tick(struct tick_sched *ts) | |||
330 | /* Get the next timer wheel timer */ | 331 | /* Get the next timer wheel timer */ |
331 | next_jiffies = get_next_timer_interrupt(last_jiffies); | 332 | next_jiffies = get_next_timer_interrupt(last_jiffies); |
332 | delta_jiffies = next_jiffies - last_jiffies; | 333 | delta_jiffies = next_jiffies - last_jiffies; |
334 | if (rcu_delta_jiffies < delta_jiffies) { | ||
335 | next_jiffies = last_jiffies + rcu_delta_jiffies; | ||
336 | delta_jiffies = rcu_delta_jiffies; | ||
337 | } | ||
333 | } | 338 | } |
334 | /* | 339 | /* |
335 | * Do not stop the tick, if we are only one off | 340 | * Do not stop the tick, if we are only one off |
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 68032c6177db..49249c28690d 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
@@ -371,7 +371,7 @@ EXPORT_SYMBOL_GPL(tracing_on); | |||
371 | void tracing_off(void) | 371 | void tracing_off(void) |
372 | { | 372 | { |
373 | if (global_trace.buffer) | 373 | if (global_trace.buffer) |
374 | ring_buffer_record_on(global_trace.buffer); | 374 | ring_buffer_record_off(global_trace.buffer); |
375 | /* | 375 | /* |
376 | * This flag is only looked at when buffers haven't been | 376 | * This flag is only looked at when buffers haven't been |
377 | * allocated yet. We don't really care about the race | 377 | * allocated yet. We don't really care about the race |
diff --git a/kernel/watchdog.c b/kernel/watchdog.c index e5e1d85b8c7c..4b1dfba70f7c 100644 --- a/kernel/watchdog.c +++ b/kernel/watchdog.c | |||
@@ -372,6 +372,13 @@ static int watchdog(void *unused) | |||
372 | 372 | ||
373 | 373 | ||
374 | #ifdef CONFIG_HARDLOCKUP_DETECTOR | 374 | #ifdef CONFIG_HARDLOCKUP_DETECTOR |
375 | /* | ||
376 | * People like the simple clean cpu node info on boot. | ||
377 | * Reduce the watchdog noise by only printing messages | ||
378 | * that are different from what cpu0 displayed. | ||
379 | */ | ||
380 | static unsigned long cpu0_err; | ||
381 | |||
375 | static int watchdog_nmi_enable(int cpu) | 382 | static int watchdog_nmi_enable(int cpu) |
376 | { | 383 | { |
377 | struct perf_event_attr *wd_attr; | 384 | struct perf_event_attr *wd_attr; |
@@ -390,11 +397,21 @@ static int watchdog_nmi_enable(int cpu) | |||
390 | 397 | ||
391 | /* Try to register using hardware perf events */ | 398 | /* Try to register using hardware perf events */ |
392 | event = perf_event_create_kernel_counter(wd_attr, cpu, NULL, watchdog_overflow_callback, NULL); | 399 | event = perf_event_create_kernel_counter(wd_attr, cpu, NULL, watchdog_overflow_callback, NULL); |
400 | |||
401 | /* save cpu0 error for future comparision */ | ||
402 | if (cpu == 0 && IS_ERR(event)) | ||
403 | cpu0_err = PTR_ERR(event); | ||
404 | |||
393 | if (!IS_ERR(event)) { | 405 | if (!IS_ERR(event)) { |
394 | pr_info("enabled, takes one hw-pmu counter.\n"); | 406 | /* only print for cpu0 or different than cpu0 */ |
407 | if (cpu == 0 || cpu0_err) | ||
408 | pr_info("enabled on all CPUs, permanently consumes one hw-PMU counter.\n"); | ||
395 | goto out_save; | 409 | goto out_save; |
396 | } | 410 | } |
397 | 411 | ||
412 | /* skip displaying the same error again */ | ||
413 | if (cpu > 0 && (PTR_ERR(event) == cpu0_err)) | ||
414 | return PTR_ERR(event); | ||
398 | 415 | ||
399 | /* vary the KERN level based on the returned errno */ | 416 | /* vary the KERN level based on the returned errno */ |
400 | if (PTR_ERR(event) == -EOPNOTSUPP) | 417 | if (PTR_ERR(event) == -EOPNOTSUPP) |
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index a42d3ae39648..ff5bdee4716d 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug | |||
@@ -241,6 +241,26 @@ config BOOTPARAM_SOFTLOCKUP_PANIC_VALUE | |||
241 | default 0 if !BOOTPARAM_SOFTLOCKUP_PANIC | 241 | default 0 if !BOOTPARAM_SOFTLOCKUP_PANIC |
242 | default 1 if BOOTPARAM_SOFTLOCKUP_PANIC | 242 | default 1 if BOOTPARAM_SOFTLOCKUP_PANIC |
243 | 243 | ||
244 | config PANIC_ON_OOPS | ||
245 | bool "Panic on Oops" if EXPERT | ||
246 | default n | ||
247 | help | ||
248 | Say Y here to enable the kernel to panic when it oopses. This | ||
249 | has the same effect as setting oops=panic on the kernel command | ||
250 | line. | ||
251 | |||
252 | This feature is useful to ensure that the kernel does not do | ||
253 | anything erroneous after an oops which could result in data | ||
254 | corruption or other issues. | ||
255 | |||
256 | Say N if unsure. | ||
257 | |||
258 | config PANIC_ON_OOPS_VALUE | ||
259 | int | ||
260 | range 0 1 | ||
261 | default 0 if !PANIC_ON_OOPS | ||
262 | default 1 if PANIC_ON_OOPS | ||
263 | |||
244 | config DETECT_HUNG_TASK | 264 | config DETECT_HUNG_TASK |
245 | bool "Detect Hung Tasks" | 265 | bool "Detect Hung Tasks" |
246 | depends on DEBUG_KERNEL | 266 | depends on DEBUG_KERNEL |
diff --git a/lib/spinlock_debug.c b/lib/spinlock_debug.c index d0ec4f3d1593..e91fbc23fff1 100644 --- a/lib/spinlock_debug.c +++ b/lib/spinlock_debug.c | |||
@@ -118,7 +118,7 @@ static void __spin_lock_debug(raw_spinlock_t *lock) | |||
118 | /* lockup suspected: */ | 118 | /* lockup suspected: */ |
119 | if (print_once) { | 119 | if (print_once) { |
120 | print_once = 0; | 120 | print_once = 0; |
121 | spin_dump(lock, "lockup"); | 121 | spin_dump(lock, "lockup suspected"); |
122 | #ifdef CONFIG_SMP | 122 | #ifdef CONFIG_SMP |
123 | trigger_all_cpu_backtrace(); | 123 | trigger_all_cpu_backtrace(); |
124 | #endif | 124 | #endif |
diff --git a/mm/memblock.c b/mm/memblock.c index 952123eba433..32a0a5e4d79d 100644 --- a/mm/memblock.c +++ b/mm/memblock.c | |||
@@ -867,6 +867,16 @@ int __init_memblock memblock_is_memory(phys_addr_t addr) | |||
867 | return memblock_search(&memblock.memory, addr) != -1; | 867 | return memblock_search(&memblock.memory, addr) != -1; |
868 | } | 868 | } |
869 | 869 | ||
870 | /** | ||
871 | * memblock_is_region_memory - check if a region is a subset of memory | ||
872 | * @base: base of region to check | ||
873 | * @size: size of region to check | ||
874 | * | ||
875 | * Check if the region [@base, @base+@size) is a subset of a memory block. | ||
876 | * | ||
877 | * RETURNS: | ||
878 | * 0 if false, non-zero if true | ||
879 | */ | ||
870 | int __init_memblock memblock_is_region_memory(phys_addr_t base, phys_addr_t size) | 880 | int __init_memblock memblock_is_region_memory(phys_addr_t base, phys_addr_t size) |
871 | { | 881 | { |
872 | int idx = memblock_search(&memblock.memory, base); | 882 | int idx = memblock_search(&memblock.memory, base); |
@@ -879,6 +889,16 @@ int __init_memblock memblock_is_region_memory(phys_addr_t base, phys_addr_t size | |||
879 | memblock.memory.regions[idx].size) >= end; | 889 | memblock.memory.regions[idx].size) >= end; |
880 | } | 890 | } |
881 | 891 | ||
892 | /** | ||
893 | * memblock_is_region_reserved - check if a region intersects reserved memory | ||
894 | * @base: base of region to check | ||
895 | * @size: size of region to check | ||
896 | * | ||
897 | * Check if the region [@base, @base+@size) intersects a reserved memory block. | ||
898 | * | ||
899 | * RETURNS: | ||
900 | * 0 if false, non-zero if true | ||
901 | */ | ||
882 | int __init_memblock memblock_is_region_reserved(phys_addr_t base, phys_addr_t size) | 902 | int __init_memblock memblock_is_region_reserved(phys_addr_t base, phys_addr_t size) |
883 | { | 903 | { |
884 | memblock_cap_size(base, &size); | 904 | memblock_cap_size(base, &size); |
diff --git a/mm/swapfile.c b/mm/swapfile.c index de5bc51c4a66..71373d03fcee 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c | |||
@@ -1916,24 +1916,20 @@ static unsigned long read_swap_header(struct swap_info_struct *p, | |||
1916 | 1916 | ||
1917 | /* | 1917 | /* |
1918 | * Find out how many pages are allowed for a single swap | 1918 | * Find out how many pages are allowed for a single swap |
1919 | * device. There are three limiting factors: 1) the number | 1919 | * device. There are two limiting factors: 1) the number |
1920 | * of bits for the swap offset in the swp_entry_t type, and | 1920 | * of bits for the swap offset in the swp_entry_t type, and |
1921 | * 2) the number of bits in the swap pte as defined by the | 1921 | * 2) the number of bits in the swap pte as defined by the |
1922 | * the different architectures, and 3) the number of free bits | 1922 | * different architectures. In order to find the |
1923 | * in an exceptional radix_tree entry. In order to find the | ||
1924 | * largest possible bit mask, a swap entry with swap type 0 | 1923 | * largest possible bit mask, a swap entry with swap type 0 |
1925 | * and swap offset ~0UL is created, encoded to a swap pte, | 1924 | * and swap offset ~0UL is created, encoded to a swap pte, |
1926 | * decoded to a swp_entry_t again, and finally the swap | 1925 | * decoded to a swp_entry_t again, and finally the swap |
1927 | * offset is extracted. This will mask all the bits from | 1926 | * offset is extracted. This will mask all the bits from |
1928 | * the initial ~0UL mask that can't be encoded in either | 1927 | * the initial ~0UL mask that can't be encoded in either |
1929 | * the swp_entry_t or the architecture definition of a | 1928 | * the swp_entry_t or the architecture definition of a |
1930 | * swap pte. Then the same is done for a radix_tree entry. | 1929 | * swap pte. |
1931 | */ | 1930 | */ |
1932 | maxpages = swp_offset(pte_to_swp_entry( | 1931 | maxpages = swp_offset(pte_to_swp_entry( |
1933 | swp_entry_to_pte(swp_entry(0, ~0UL)))); | 1932 | swp_entry_to_pte(swp_entry(0, ~0UL)))) + 1; |
1934 | maxpages = swp_offset(radix_to_swp_entry( | ||
1935 | swp_to_radix_entry(swp_entry(0, maxpages)))) + 1; | ||
1936 | |||
1937 | if (maxpages > swap_header->info.last_page) { | 1933 | if (maxpages > swap_header->info.last_page) { |
1938 | maxpages = swap_header->info.last_page + 1; | 1934 | maxpages = swap_header->info.last_page + 1; |
1939 | /* p->max is an unsigned int: don't overflow it */ | 1935 | /* p->max is an unsigned int: don't overflow it */ |
diff --git a/net/appletalk/ddp.c b/net/appletalk/ddp.c index 0301b328cf0f..86852963b7f7 100644 --- a/net/appletalk/ddp.c +++ b/net/appletalk/ddp.c | |||
@@ -1208,9 +1208,7 @@ static int atalk_connect(struct socket *sock, struct sockaddr *uaddr, | |||
1208 | if (addr->sat_addr.s_node == ATADDR_BCAST && | 1208 | if (addr->sat_addr.s_node == ATADDR_BCAST && |
1209 | !sock_flag(sk, SOCK_BROADCAST)) { | 1209 | !sock_flag(sk, SOCK_BROADCAST)) { |
1210 | #if 1 | 1210 | #if 1 |
1211 | printk(KERN_WARNING "%s is broken and did not set " | 1211 | pr_warn("atalk_connect: %s is broken and did not set SO_BROADCAST.\n", |
1212 | "SO_BROADCAST. It will break when 2.2 is " | ||
1213 | "released.\n", | ||
1214 | current->comm); | 1212 | current->comm); |
1215 | #else | 1213 | #else |
1216 | return -EACCES; | 1214 | return -EACCES; |
diff --git a/net/bluetooth/af_bluetooth.c b/net/bluetooth/af_bluetooth.c index 46e7f86acfc9..3e18af4dadc4 100644 --- a/net/bluetooth/af_bluetooth.c +++ b/net/bluetooth/af_bluetooth.c | |||
@@ -210,7 +210,7 @@ struct sock *bt_accept_dequeue(struct sock *parent, struct socket *newsock) | |||
210 | } | 210 | } |
211 | 211 | ||
212 | if (sk->sk_state == BT_CONNECTED || !newsock || | 212 | if (sk->sk_state == BT_CONNECTED || !newsock || |
213 | test_bit(BT_DEFER_SETUP, &bt_sk(parent)->flags)) { | 213 | test_bit(BT_SK_DEFER_SETUP, &bt_sk(parent)->flags)) { |
214 | bt_accept_unlink(sk); | 214 | bt_accept_unlink(sk); |
215 | if (newsock) | 215 | if (newsock) |
216 | sock_graft(sk, newsock); | 216 | sock_graft(sk, newsock); |
diff --git a/net/core/drop_monitor.c b/net/core/drop_monitor.c index ea5fb9fcc3f5..d23b6682f4e9 100644 --- a/net/core/drop_monitor.c +++ b/net/core/drop_monitor.c | |||
@@ -36,9 +36,6 @@ | |||
36 | #define TRACE_ON 1 | 36 | #define TRACE_ON 1 |
37 | #define TRACE_OFF 0 | 37 | #define TRACE_OFF 0 |
38 | 38 | ||
39 | static void send_dm_alert(struct work_struct *unused); | ||
40 | |||
41 | |||
42 | /* | 39 | /* |
43 | * Globals, our netlink socket pointer | 40 | * Globals, our netlink socket pointer |
44 | * and the work handle that will send up | 41 | * and the work handle that will send up |
@@ -48,11 +45,10 @@ static int trace_state = TRACE_OFF; | |||
48 | static DEFINE_MUTEX(trace_state_mutex); | 45 | static DEFINE_MUTEX(trace_state_mutex); |
49 | 46 | ||
50 | struct per_cpu_dm_data { | 47 | struct per_cpu_dm_data { |
51 | struct work_struct dm_alert_work; | 48 | spinlock_t lock; |
52 | struct sk_buff __rcu *skb; | 49 | struct sk_buff *skb; |
53 | atomic_t dm_hit_count; | 50 | struct work_struct dm_alert_work; |
54 | struct timer_list send_timer; | 51 | struct timer_list send_timer; |
55 | int cpu; | ||
56 | }; | 52 | }; |
57 | 53 | ||
58 | struct dm_hw_stat_delta { | 54 | struct dm_hw_stat_delta { |
@@ -78,13 +74,13 @@ static int dm_delay = 1; | |||
78 | static unsigned long dm_hw_check_delta = 2*HZ; | 74 | static unsigned long dm_hw_check_delta = 2*HZ; |
79 | static LIST_HEAD(hw_stats_list); | 75 | static LIST_HEAD(hw_stats_list); |
80 | 76 | ||
81 | static void reset_per_cpu_data(struct per_cpu_dm_data *data) | 77 | static struct sk_buff *reset_per_cpu_data(struct per_cpu_dm_data *data) |
82 | { | 78 | { |
83 | size_t al; | 79 | size_t al; |
84 | struct net_dm_alert_msg *msg; | 80 | struct net_dm_alert_msg *msg; |
85 | struct nlattr *nla; | 81 | struct nlattr *nla; |
86 | struct sk_buff *skb; | 82 | struct sk_buff *skb; |
87 | struct sk_buff *oskb = rcu_dereference_protected(data->skb, 1); | 83 | unsigned long flags; |
88 | 84 | ||
89 | al = sizeof(struct net_dm_alert_msg); | 85 | al = sizeof(struct net_dm_alert_msg); |
90 | al += dm_hit_limit * sizeof(struct net_dm_drop_point); | 86 | al += dm_hit_limit * sizeof(struct net_dm_drop_point); |
@@ -99,65 +95,40 @@ static void reset_per_cpu_data(struct per_cpu_dm_data *data) | |||
99 | sizeof(struct net_dm_alert_msg)); | 95 | sizeof(struct net_dm_alert_msg)); |
100 | msg = nla_data(nla); | 96 | msg = nla_data(nla); |
101 | memset(msg, 0, al); | 97 | memset(msg, 0, al); |
102 | } else | 98 | } else { |
103 | schedule_work_on(data->cpu, &data->dm_alert_work); | 99 | mod_timer(&data->send_timer, jiffies + HZ / 10); |
104 | |||
105 | /* | ||
106 | * Don't need to lock this, since we are guaranteed to only | ||
107 | * run this on a single cpu at a time. | ||
108 | * Note also that we only update data->skb if the old and new skb | ||
109 | * pointers don't match. This ensures that we don't continually call | ||
110 | * synchornize_rcu if we repeatedly fail to alloc a new netlink message. | ||
111 | */ | ||
112 | if (skb != oskb) { | ||
113 | rcu_assign_pointer(data->skb, skb); | ||
114 | |||
115 | synchronize_rcu(); | ||
116 | |||
117 | atomic_set(&data->dm_hit_count, dm_hit_limit); | ||
118 | } | 100 | } |
119 | 101 | ||
102 | spin_lock_irqsave(&data->lock, flags); | ||
103 | swap(data->skb, skb); | ||
104 | spin_unlock_irqrestore(&data->lock, flags); | ||
105 | |||
106 | return skb; | ||
120 | } | 107 | } |
121 | 108 | ||
122 | static void send_dm_alert(struct work_struct *unused) | 109 | static void send_dm_alert(struct work_struct *work) |
123 | { | 110 | { |
124 | struct sk_buff *skb; | 111 | struct sk_buff *skb; |
125 | struct per_cpu_dm_data *data = &get_cpu_var(dm_cpu_data); | 112 | struct per_cpu_dm_data *data; |
126 | 113 | ||
127 | WARN_ON_ONCE(data->cpu != smp_processor_id()); | 114 | data = container_of(work, struct per_cpu_dm_data, dm_alert_work); |
128 | 115 | ||
129 | /* | 116 | skb = reset_per_cpu_data(data); |
130 | * Grab the skb we're about to send | ||
131 | */ | ||
132 | skb = rcu_dereference_protected(data->skb, 1); | ||
133 | |||
134 | /* | ||
135 | * Replace it with a new one | ||
136 | */ | ||
137 | reset_per_cpu_data(data); | ||
138 | 117 | ||
139 | /* | ||
140 | * Ship it! | ||
141 | */ | ||
142 | if (skb) | 118 | if (skb) |
143 | genlmsg_multicast(skb, 0, NET_DM_GRP_ALERT, GFP_KERNEL); | 119 | genlmsg_multicast(skb, 0, NET_DM_GRP_ALERT, GFP_KERNEL); |
144 | |||
145 | put_cpu_var(dm_cpu_data); | ||
146 | } | 120 | } |
147 | 121 | ||
148 | /* | 122 | /* |
149 | * This is the timer function to delay the sending of an alert | 123 | * This is the timer function to delay the sending of an alert |
150 | * in the event that more drops will arrive during the | 124 | * in the event that more drops will arrive during the |
151 | * hysteresis period. Note that it operates under the timer interrupt | 125 | * hysteresis period. |
152 | * so we don't need to disable preemption here | ||
153 | */ | 126 | */ |
154 | static void sched_send_work(unsigned long unused) | 127 | static void sched_send_work(unsigned long _data) |
155 | { | 128 | { |
156 | struct per_cpu_dm_data *data = &get_cpu_var(dm_cpu_data); | 129 | struct per_cpu_dm_data *data = (struct per_cpu_dm_data *)_data; |
157 | |||
158 | schedule_work_on(smp_processor_id(), &data->dm_alert_work); | ||
159 | 130 | ||
160 | put_cpu_var(dm_cpu_data); | 131 | schedule_work(&data->dm_alert_work); |
161 | } | 132 | } |
162 | 133 | ||
163 | static void trace_drop_common(struct sk_buff *skb, void *location) | 134 | static void trace_drop_common(struct sk_buff *skb, void *location) |
@@ -167,33 +138,28 @@ static void trace_drop_common(struct sk_buff *skb, void *location) | |||
167 | struct nlattr *nla; | 138 | struct nlattr *nla; |
168 | int i; | 139 | int i; |
169 | struct sk_buff *dskb; | 140 | struct sk_buff *dskb; |
170 | struct per_cpu_dm_data *data = &get_cpu_var(dm_cpu_data); | 141 | struct per_cpu_dm_data *data; |
171 | 142 | unsigned long flags; | |
172 | 143 | ||
173 | rcu_read_lock(); | 144 | local_irq_save(flags); |
174 | dskb = rcu_dereference(data->skb); | 145 | data = &__get_cpu_var(dm_cpu_data); |
146 | spin_lock(&data->lock); | ||
147 | dskb = data->skb; | ||
175 | 148 | ||
176 | if (!dskb) | 149 | if (!dskb) |
177 | goto out; | 150 | goto out; |
178 | 151 | ||
179 | if (!atomic_add_unless(&data->dm_hit_count, -1, 0)) { | ||
180 | /* | ||
181 | * we're already at zero, discard this hit | ||
182 | */ | ||
183 | goto out; | ||
184 | } | ||
185 | |||
186 | nlh = (struct nlmsghdr *)dskb->data; | 152 | nlh = (struct nlmsghdr *)dskb->data; |
187 | nla = genlmsg_data(nlmsg_data(nlh)); | 153 | nla = genlmsg_data(nlmsg_data(nlh)); |
188 | msg = nla_data(nla); | 154 | msg = nla_data(nla); |
189 | for (i = 0; i < msg->entries; i++) { | 155 | for (i = 0; i < msg->entries; i++) { |
190 | if (!memcmp(&location, msg->points[i].pc, sizeof(void *))) { | 156 | if (!memcmp(&location, msg->points[i].pc, sizeof(void *))) { |
191 | msg->points[i].count++; | 157 | msg->points[i].count++; |
192 | atomic_inc(&data->dm_hit_count); | ||
193 | goto out; | 158 | goto out; |
194 | } | 159 | } |
195 | } | 160 | } |
196 | 161 | if (msg->entries == dm_hit_limit) | |
162 | goto out; | ||
197 | /* | 163 | /* |
198 | * We need to create a new entry | 164 | * We need to create a new entry |
199 | */ | 165 | */ |
@@ -205,13 +171,11 @@ static void trace_drop_common(struct sk_buff *skb, void *location) | |||
205 | 171 | ||
206 | if (!timer_pending(&data->send_timer)) { | 172 | if (!timer_pending(&data->send_timer)) { |
207 | data->send_timer.expires = jiffies + dm_delay * HZ; | 173 | data->send_timer.expires = jiffies + dm_delay * HZ; |
208 | add_timer_on(&data->send_timer, smp_processor_id()); | 174 | add_timer(&data->send_timer); |
209 | } | 175 | } |
210 | 176 | ||
211 | out: | 177 | out: |
212 | rcu_read_unlock(); | 178 | spin_unlock_irqrestore(&data->lock, flags); |
213 | put_cpu_var(dm_cpu_data); | ||
214 | return; | ||
215 | } | 179 | } |
216 | 180 | ||
217 | static void trace_kfree_skb_hit(void *ignore, struct sk_buff *skb, void *location) | 181 | static void trace_kfree_skb_hit(void *ignore, struct sk_buff *skb, void *location) |
@@ -418,11 +382,11 @@ static int __init init_net_drop_monitor(void) | |||
418 | 382 | ||
419 | for_each_possible_cpu(cpu) { | 383 | for_each_possible_cpu(cpu) { |
420 | data = &per_cpu(dm_cpu_data, cpu); | 384 | data = &per_cpu(dm_cpu_data, cpu); |
421 | data->cpu = cpu; | ||
422 | INIT_WORK(&data->dm_alert_work, send_dm_alert); | 385 | INIT_WORK(&data->dm_alert_work, send_dm_alert); |
423 | init_timer(&data->send_timer); | 386 | init_timer(&data->send_timer); |
424 | data->send_timer.data = cpu; | 387 | data->send_timer.data = (unsigned long)data; |
425 | data->send_timer.function = sched_send_work; | 388 | data->send_timer.function = sched_send_work; |
389 | spin_lock_init(&data->lock); | ||
426 | reset_per_cpu_data(data); | 390 | reset_per_cpu_data(data); |
427 | } | 391 | } |
428 | 392 | ||
diff --git a/net/core/filter.c b/net/core/filter.c index a3eddb515d1b..d4ce2dc712e3 100644 --- a/net/core/filter.c +++ b/net/core/filter.c | |||
@@ -616,9 +616,9 @@ static int __sk_prepare_filter(struct sk_filter *fp) | |||
616 | /** | 616 | /** |
617 | * sk_unattached_filter_create - create an unattached filter | 617 | * sk_unattached_filter_create - create an unattached filter |
618 | * @fprog: the filter program | 618 | * @fprog: the filter program |
619 | * @sk: the socket to use | 619 | * @pfp: the unattached filter that is created |
620 | * | 620 | * |
621 | * Create a filter independent ofr any socket. We first run some | 621 | * Create a filter independent of any socket. We first run some |
622 | * sanity checks on it to make sure it does not explode on us later. | 622 | * sanity checks on it to make sure it does not explode on us later. |
623 | * If an error occurs or there is insufficient memory for the filter | 623 | * If an error occurs or there is insufficient memory for the filter |
624 | * a negative errno code is returned. On success the return is zero. | 624 | * a negative errno code is returned. On success the return is zero. |
diff --git a/net/core/neighbour.c b/net/core/neighbour.c index eb09f8bbbf07..d81d026138f0 100644 --- a/net/core/neighbour.c +++ b/net/core/neighbour.c | |||
@@ -2219,9 +2219,7 @@ static int neigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb, | |||
2219 | rcu_read_lock_bh(); | 2219 | rcu_read_lock_bh(); |
2220 | nht = rcu_dereference_bh(tbl->nht); | 2220 | nht = rcu_dereference_bh(tbl->nht); |
2221 | 2221 | ||
2222 | for (h = 0; h < (1 << nht->hash_shift); h++) { | 2222 | for (h = s_h; h < (1 << nht->hash_shift); h++) { |
2223 | if (h < s_h) | ||
2224 | continue; | ||
2225 | if (h > s_h) | 2223 | if (h > s_h) |
2226 | s_idx = 0; | 2224 | s_idx = 0; |
2227 | for (n = rcu_dereference_bh(nht->hash_buckets[h]), idx = 0; | 2225 | for (n = rcu_dereference_bh(nht->hash_buckets[h]), idx = 0; |
@@ -2260,9 +2258,7 @@ static int pneigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb, | |||
2260 | 2258 | ||
2261 | read_lock_bh(&tbl->lock); | 2259 | read_lock_bh(&tbl->lock); |
2262 | 2260 | ||
2263 | for (h = 0; h <= PNEIGH_HASHMASK; h++) { | 2261 | for (h = s_h; h <= PNEIGH_HASHMASK; h++) { |
2264 | if (h < s_h) | ||
2265 | continue; | ||
2266 | if (h > s_h) | 2262 | if (h > s_h) |
2267 | s_idx = 0; | 2263 | s_idx = 0; |
2268 | for (n = tbl->phash_buckets[h], idx = 0; n; n = n->next) { | 2264 | for (n = tbl->phash_buckets[h], idx = 0; n; n = n->next) { |
@@ -2297,7 +2293,7 @@ static int neigh_dump_info(struct sk_buff *skb, struct netlink_callback *cb) | |||
2297 | struct neigh_table *tbl; | 2293 | struct neigh_table *tbl; |
2298 | int t, family, s_t; | 2294 | int t, family, s_t; |
2299 | int proxy = 0; | 2295 | int proxy = 0; |
2300 | int err = 0; | 2296 | int err; |
2301 | 2297 | ||
2302 | read_lock(&neigh_tbl_lock); | 2298 | read_lock(&neigh_tbl_lock); |
2303 | family = ((struct rtgenmsg *) nlmsg_data(cb->nlh))->rtgen_family; | 2299 | family = ((struct rtgenmsg *) nlmsg_data(cb->nlh))->rtgen_family; |
@@ -2311,7 +2307,7 @@ static int neigh_dump_info(struct sk_buff *skb, struct netlink_callback *cb) | |||
2311 | 2307 | ||
2312 | s_t = cb->args[0]; | 2308 | s_t = cb->args[0]; |
2313 | 2309 | ||
2314 | for (tbl = neigh_tables, t = 0; tbl && (err >= 0); | 2310 | for (tbl = neigh_tables, t = 0; tbl; |
2315 | tbl = tbl->next, t++) { | 2311 | tbl = tbl->next, t++) { |
2316 | if (t < s_t || (family && tbl->family != family)) | 2312 | if (t < s_t || (family && tbl->family != family)) |
2317 | continue; | 2313 | continue; |
@@ -2322,6 +2318,8 @@ static int neigh_dump_info(struct sk_buff *skb, struct netlink_callback *cb) | |||
2322 | err = pneigh_dump_table(tbl, skb, cb); | 2318 | err = pneigh_dump_table(tbl, skb, cb); |
2323 | else | 2319 | else |
2324 | err = neigh_dump_table(tbl, skb, cb); | 2320 | err = neigh_dump_table(tbl, skb, cb); |
2321 | if (err < 0) | ||
2322 | break; | ||
2325 | } | 2323 | } |
2326 | read_unlock(&neigh_tbl_lock); | 2324 | read_unlock(&neigh_tbl_lock); |
2327 | 2325 | ||
diff --git a/net/core/netpoll.c b/net/core/netpoll.c index 3d84fb9d8873..f9f40b932e4b 100644 --- a/net/core/netpoll.c +++ b/net/core/netpoll.c | |||
@@ -362,22 +362,23 @@ EXPORT_SYMBOL(netpoll_send_skb_on_dev); | |||
362 | 362 | ||
363 | void netpoll_send_udp(struct netpoll *np, const char *msg, int len) | 363 | void netpoll_send_udp(struct netpoll *np, const char *msg, int len) |
364 | { | 364 | { |
365 | int total_len, eth_len, ip_len, udp_len; | 365 | int total_len, ip_len, udp_len; |
366 | struct sk_buff *skb; | 366 | struct sk_buff *skb; |
367 | struct udphdr *udph; | 367 | struct udphdr *udph; |
368 | struct iphdr *iph; | 368 | struct iphdr *iph; |
369 | struct ethhdr *eth; | 369 | struct ethhdr *eth; |
370 | 370 | ||
371 | udp_len = len + sizeof(*udph); | 371 | udp_len = len + sizeof(*udph); |
372 | ip_len = eth_len = udp_len + sizeof(*iph); | 372 | ip_len = udp_len + sizeof(*iph); |
373 | total_len = eth_len + ETH_HLEN + NET_IP_ALIGN; | 373 | total_len = ip_len + LL_RESERVED_SPACE(np->dev); |
374 | 374 | ||
375 | skb = find_skb(np, total_len, total_len - len); | 375 | skb = find_skb(np, total_len + np->dev->needed_tailroom, |
376 | total_len - len); | ||
376 | if (!skb) | 377 | if (!skb) |
377 | return; | 378 | return; |
378 | 379 | ||
379 | skb_copy_to_linear_data(skb, msg, len); | 380 | skb_copy_to_linear_data(skb, msg, len); |
380 | skb->len += len; | 381 | skb_put(skb, len); |
381 | 382 | ||
382 | skb_push(skb, sizeof(*udph)); | 383 | skb_push(skb, sizeof(*udph)); |
383 | skb_reset_transport_header(skb); | 384 | skb_reset_transport_header(skb); |
diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 016694d62484..d78671e9d545 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c | |||
@@ -3361,7 +3361,7 @@ EXPORT_SYMBOL(kfree_skb_partial); | |||
3361 | * @to: prior buffer | 3361 | * @to: prior buffer |
3362 | * @from: buffer to add | 3362 | * @from: buffer to add |
3363 | * @fragstolen: pointer to boolean | 3363 | * @fragstolen: pointer to boolean |
3364 | * | 3364 | * @delta_truesize: how much more was allocated than was requested |
3365 | */ | 3365 | */ |
3366 | bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from, | 3366 | bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from, |
3367 | bool *fragstolen, int *delta_truesize) | 3367 | bool *fragstolen, int *delta_truesize) |
diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c index d4d61b694fab..dfba343b2509 100644 --- a/net/ipv4/inetpeer.c +++ b/net/ipv4/inetpeer.c | |||
@@ -560,6 +560,17 @@ bool inet_peer_xrlim_allow(struct inet_peer *peer, int timeout) | |||
560 | } | 560 | } |
561 | EXPORT_SYMBOL(inet_peer_xrlim_allow); | 561 | EXPORT_SYMBOL(inet_peer_xrlim_allow); |
562 | 562 | ||
563 | static void inetpeer_inval_rcu(struct rcu_head *head) | ||
564 | { | ||
565 | struct inet_peer *p = container_of(head, struct inet_peer, gc_rcu); | ||
566 | |||
567 | spin_lock_bh(&gc_lock); | ||
568 | list_add_tail(&p->gc_list, &gc_list); | ||
569 | spin_unlock_bh(&gc_lock); | ||
570 | |||
571 | schedule_delayed_work(&gc_work, gc_delay); | ||
572 | } | ||
573 | |||
563 | void inetpeer_invalidate_tree(int family) | 574 | void inetpeer_invalidate_tree(int family) |
564 | { | 575 | { |
565 | struct inet_peer *old, *new, *prev; | 576 | struct inet_peer *old, *new, *prev; |
@@ -576,10 +587,7 @@ void inetpeer_invalidate_tree(int family) | |||
576 | prev = cmpxchg(&base->root, old, new); | 587 | prev = cmpxchg(&base->root, old, new); |
577 | if (prev == old) { | 588 | if (prev == old) { |
578 | base->total = 0; | 589 | base->total = 0; |
579 | spin_lock(&gc_lock); | 590 | call_rcu(&prev->gc_rcu, inetpeer_inval_rcu); |
580 | list_add_tail(&prev->gc_list, &gc_list); | ||
581 | spin_unlock(&gc_lock); | ||
582 | schedule_delayed_work(&gc_work, gc_delay); | ||
583 | } | 591 | } |
584 | 592 | ||
585 | out: | 593 | out: |
diff --git a/net/ipv4/ip_forward.c b/net/ipv4/ip_forward.c index e5c44fc586ab..ab09b126423c 100644 --- a/net/ipv4/ip_forward.c +++ b/net/ipv4/ip_forward.c | |||
@@ -44,6 +44,7 @@ static int ip_forward_finish(struct sk_buff *skb) | |||
44 | struct ip_options *opt = &(IPCB(skb)->opt); | 44 | struct ip_options *opt = &(IPCB(skb)->opt); |
45 | 45 | ||
46 | IP_INC_STATS_BH(dev_net(skb_dst(skb)->dev), IPSTATS_MIB_OUTFORWDATAGRAMS); | 46 | IP_INC_STATS_BH(dev_net(skb_dst(skb)->dev), IPSTATS_MIB_OUTFORWDATAGRAMS); |
47 | IP_ADD_STATS_BH(dev_net(skb_dst(skb)->dev), IPSTATS_MIB_OUTOCTETS, skb->len); | ||
47 | 48 | ||
48 | if (unlikely(opt->optlen)) | 49 | if (unlikely(opt->optlen)) |
49 | ip_forward_options(skb); | 50 | ip_forward_options(skb); |
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c index a9e519ad6db5..c94bbc6f2ba3 100644 --- a/net/ipv4/ipmr.c +++ b/net/ipv4/ipmr.c | |||
@@ -1574,6 +1574,7 @@ static inline int ipmr_forward_finish(struct sk_buff *skb) | |||
1574 | struct ip_options *opt = &(IPCB(skb)->opt); | 1574 | struct ip_options *opt = &(IPCB(skb)->opt); |
1575 | 1575 | ||
1576 | IP_INC_STATS_BH(dev_net(skb_dst(skb)->dev), IPSTATS_MIB_OUTFORWDATAGRAMS); | 1576 | IP_INC_STATS_BH(dev_net(skb_dst(skb)->dev), IPSTATS_MIB_OUTFORWDATAGRAMS); |
1577 | IP_ADD_STATS_BH(dev_net(skb_dst(skb)->dev), IPSTATS_MIB_OUTOCTETS, skb->len); | ||
1577 | 1578 | ||
1578 | if (unlikely(opt->optlen)) | 1579 | if (unlikely(opt->optlen)) |
1579 | ip_forward_options(skb); | 1580 | ip_forward_options(skb); |
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c index 0c220a416626..74c21b924a79 100644 --- a/net/ipv6/ip6_fib.c +++ b/net/ipv6/ip6_fib.c | |||
@@ -1561,7 +1561,7 @@ static int fib6_age(struct rt6_info *rt, void *arg) | |||
1561 | neigh_flags = neigh->flags; | 1561 | neigh_flags = neigh->flags; |
1562 | neigh_release(neigh); | 1562 | neigh_release(neigh); |
1563 | } | 1563 | } |
1564 | if (neigh_flags & NTF_ROUTER) { | 1564 | if (!(neigh_flags & NTF_ROUTER)) { |
1565 | RT6_TRACE("purging route %p via non-router but gateway\n", | 1565 | RT6_TRACE("purging route %p via non-router but gateway\n", |
1566 | rt); | 1566 | rt); |
1567 | return -1; | 1567 | return -1; |
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index 17b8c67998bb..decc21d19c53 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c | |||
@@ -526,6 +526,7 @@ int ip6_forward(struct sk_buff *skb) | |||
526 | hdr->hop_limit--; | 526 | hdr->hop_limit--; |
527 | 527 | ||
528 | IP6_INC_STATS_BH(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTFORWDATAGRAMS); | 528 | IP6_INC_STATS_BH(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTFORWDATAGRAMS); |
529 | IP6_ADD_STATS_BH(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTOCTETS, skb->len); | ||
529 | return NF_HOOK(NFPROTO_IPV6, NF_INET_FORWARD, skb, skb->dev, dst->dev, | 530 | return NF_HOOK(NFPROTO_IPV6, NF_INET_FORWARD, skb, skb->dev, dst->dev, |
530 | ip6_forward_finish); | 531 | ip6_forward_finish); |
531 | 532 | ||
diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c index b15dc08643a4..461e47c8e956 100644 --- a/net/ipv6/ip6mr.c +++ b/net/ipv6/ip6mr.c | |||
@@ -1886,6 +1886,8 @@ static inline int ip6mr_forward2_finish(struct sk_buff *skb) | |||
1886 | { | 1886 | { |
1887 | IP6_INC_STATS_BH(dev_net(skb_dst(skb)->dev), ip6_dst_idev(skb_dst(skb)), | 1887 | IP6_INC_STATS_BH(dev_net(skb_dst(skb)->dev), ip6_dst_idev(skb_dst(skb)), |
1888 | IPSTATS_MIB_OUTFORWDATAGRAMS); | 1888 | IPSTATS_MIB_OUTFORWDATAGRAMS); |
1889 | IP6_ADD_STATS_BH(dev_net(skb_dst(skb)->dev), ip6_dst_idev(skb_dst(skb)), | ||
1890 | IPSTATS_MIB_OUTOCTETS, skb->len); | ||
1889 | return dst_output(skb); | 1891 | return dst_output(skb); |
1890 | } | 1892 | } |
1891 | 1893 | ||
diff --git a/net/l2tp/l2tp_eth.c b/net/l2tp/l2tp_eth.c index 443591d629ca..185f12f4a5fa 100644 --- a/net/l2tp/l2tp_eth.c +++ b/net/l2tp/l2tp_eth.c | |||
@@ -162,6 +162,7 @@ static void l2tp_eth_delete(struct l2tp_session *session) | |||
162 | if (dev) { | 162 | if (dev) { |
163 | unregister_netdev(dev); | 163 | unregister_netdev(dev); |
164 | spriv->dev = NULL; | 164 | spriv->dev = NULL; |
165 | module_put(THIS_MODULE); | ||
165 | } | 166 | } |
166 | } | 167 | } |
167 | } | 168 | } |
@@ -249,6 +250,7 @@ static int l2tp_eth_create(struct net *net, u32 tunnel_id, u32 session_id, u32 p | |||
249 | if (rc < 0) | 250 | if (rc < 0) |
250 | goto out_del_dev; | 251 | goto out_del_dev; |
251 | 252 | ||
253 | __module_get(THIS_MODULE); | ||
252 | /* Must be done after register_netdev() */ | 254 | /* Must be done after register_netdev() */ |
253 | strlcpy(session->ifname, dev->name, IFNAMSIZ); | 255 | strlcpy(session->ifname, dev->name, IFNAMSIZ); |
254 | 256 | ||
diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c index 70614e7affab..61d8b75d2686 100644 --- a/net/l2tp/l2tp_ip.c +++ b/net/l2tp/l2tp_ip.c | |||
@@ -464,10 +464,12 @@ static int l2tp_ip_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *m | |||
464 | sk->sk_bound_dev_if); | 464 | sk->sk_bound_dev_if); |
465 | if (IS_ERR(rt)) | 465 | if (IS_ERR(rt)) |
466 | goto no_route; | 466 | goto no_route; |
467 | if (connected) | 467 | if (connected) { |
468 | sk_setup_caps(sk, &rt->dst); | 468 | sk_setup_caps(sk, &rt->dst); |
469 | else | 469 | } else { |
470 | dst_release(&rt->dst); /* safe since we hold rcu_read_lock */ | 470 | skb_dst_set(skb, &rt->dst); |
471 | goto xmit; | ||
472 | } | ||
471 | } | 473 | } |
472 | 474 | ||
473 | /* We dont need to clone dst here, it is guaranteed to not disappear. | 475 | /* We dont need to clone dst here, it is guaranteed to not disappear. |
@@ -475,6 +477,7 @@ static int l2tp_ip_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *m | |||
475 | */ | 477 | */ |
476 | skb_dst_set_noref(skb, &rt->dst); | 478 | skb_dst_set_noref(skb, &rt->dst); |
477 | 479 | ||
480 | xmit: | ||
478 | /* Queue the packet to IP for output */ | 481 | /* Queue the packet to IP for output */ |
479 | rc = ip_queue_xmit(skb, &inet->cork.fl); | 482 | rc = ip_queue_xmit(skb, &inet->cork.fl); |
480 | rcu_read_unlock(); | 483 | rcu_read_unlock(); |
diff --git a/net/mac80211/agg-rx.c b/net/mac80211/agg-rx.c index 26ddb699d693..c649188314cc 100644 --- a/net/mac80211/agg-rx.c +++ b/net/mac80211/agg-rx.c | |||
@@ -145,15 +145,20 @@ static void sta_rx_agg_session_timer_expired(unsigned long data) | |||
145 | struct tid_ampdu_rx *tid_rx; | 145 | struct tid_ampdu_rx *tid_rx; |
146 | unsigned long timeout; | 146 | unsigned long timeout; |
147 | 147 | ||
148 | rcu_read_lock(); | ||
148 | tid_rx = rcu_dereference(sta->ampdu_mlme.tid_rx[*ptid]); | 149 | tid_rx = rcu_dereference(sta->ampdu_mlme.tid_rx[*ptid]); |
149 | if (!tid_rx) | 150 | if (!tid_rx) { |
151 | rcu_read_unlock(); | ||
150 | return; | 152 | return; |
153 | } | ||
151 | 154 | ||
152 | timeout = tid_rx->last_rx + TU_TO_JIFFIES(tid_rx->timeout); | 155 | timeout = tid_rx->last_rx + TU_TO_JIFFIES(tid_rx->timeout); |
153 | if (time_is_after_jiffies(timeout)) { | 156 | if (time_is_after_jiffies(timeout)) { |
154 | mod_timer(&tid_rx->session_timer, timeout); | 157 | mod_timer(&tid_rx->session_timer, timeout); |
158 | rcu_read_unlock(); | ||
155 | return; | 159 | return; |
156 | } | 160 | } |
161 | rcu_read_unlock(); | ||
157 | 162 | ||
158 | #ifdef CONFIG_MAC80211_HT_DEBUG | 163 | #ifdef CONFIG_MAC80211_HT_DEBUG |
159 | printk(KERN_DEBUG "rx session timer expired on tid %d\n", (u16)*ptid); | 164 | printk(KERN_DEBUG "rx session timer expired on tid %d\n", (u16)*ptid); |
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c index 495831ee48f1..e9cecca5c44d 100644 --- a/net/mac80211/cfg.c +++ b/net/mac80211/cfg.c | |||
@@ -533,16 +533,16 @@ static void ieee80211_get_et_stats(struct wiphy *wiphy, | |||
533 | sinfo.filled = 0; | 533 | sinfo.filled = 0; |
534 | sta_set_sinfo(sta, &sinfo); | 534 | sta_set_sinfo(sta, &sinfo); |
535 | 535 | ||
536 | if (sinfo.filled | STATION_INFO_TX_BITRATE) | 536 | if (sinfo.filled & STATION_INFO_TX_BITRATE) |
537 | data[i] = 100000 * | 537 | data[i] = 100000 * |
538 | cfg80211_calculate_bitrate(&sinfo.txrate); | 538 | cfg80211_calculate_bitrate(&sinfo.txrate); |
539 | i++; | 539 | i++; |
540 | if (sinfo.filled | STATION_INFO_RX_BITRATE) | 540 | if (sinfo.filled & STATION_INFO_RX_BITRATE) |
541 | data[i] = 100000 * | 541 | data[i] = 100000 * |
542 | cfg80211_calculate_bitrate(&sinfo.rxrate); | 542 | cfg80211_calculate_bitrate(&sinfo.rxrate); |
543 | i++; | 543 | i++; |
544 | 544 | ||
545 | if (sinfo.filled | STATION_INFO_SIGNAL_AVG) | 545 | if (sinfo.filled & STATION_INFO_SIGNAL_AVG) |
546 | data[i] = (u8)sinfo.signal_avg; | 546 | data[i] = (u8)sinfo.signal_avg; |
547 | i++; | 547 | i++; |
548 | } else { | 548 | } else { |
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c index d4c19a7773db..8664111d0566 100644 --- a/net/mac80211/iface.c +++ b/net/mac80211/iface.c | |||
@@ -637,6 +637,18 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata, | |||
637 | ieee80211_configure_filter(local); | 637 | ieee80211_configure_filter(local); |
638 | break; | 638 | break; |
639 | default: | 639 | default: |
640 | mutex_lock(&local->mtx); | ||
641 | if (local->hw_roc_dev == sdata->dev && | ||
642 | local->hw_roc_channel) { | ||
643 | /* ignore return value since this is racy */ | ||
644 | drv_cancel_remain_on_channel(local); | ||
645 | ieee80211_queue_work(&local->hw, &local->hw_roc_done); | ||
646 | } | ||
647 | mutex_unlock(&local->mtx); | ||
648 | |||
649 | flush_work(&local->hw_roc_start); | ||
650 | flush_work(&local->hw_roc_done); | ||
651 | |||
640 | flush_work(&sdata->work); | 652 | flush_work(&sdata->work); |
641 | /* | 653 | /* |
642 | * When we get here, the interface is marked down. | 654 | * When we get here, the interface is marked down. |
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index 04c306308987..91d84cc77bbf 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c | |||
@@ -1220,6 +1220,22 @@ static void ieee80211_sta_wmm_params(struct ieee80211_local *local, | |||
1220 | sdata->vif.bss_conf.qos = true; | 1220 | sdata->vif.bss_conf.qos = true; |
1221 | } | 1221 | } |
1222 | 1222 | ||
1223 | static void __ieee80211_stop_poll(struct ieee80211_sub_if_data *sdata) | ||
1224 | { | ||
1225 | lockdep_assert_held(&sdata->local->mtx); | ||
1226 | |||
1227 | sdata->u.mgd.flags &= ~(IEEE80211_STA_CONNECTION_POLL | | ||
1228 | IEEE80211_STA_BEACON_POLL); | ||
1229 | ieee80211_run_deferred_scan(sdata->local); | ||
1230 | } | ||
1231 | |||
1232 | static void ieee80211_stop_poll(struct ieee80211_sub_if_data *sdata) | ||
1233 | { | ||
1234 | mutex_lock(&sdata->local->mtx); | ||
1235 | __ieee80211_stop_poll(sdata); | ||
1236 | mutex_unlock(&sdata->local->mtx); | ||
1237 | } | ||
1238 | |||
1223 | static u32 ieee80211_handle_bss_capability(struct ieee80211_sub_if_data *sdata, | 1239 | static u32 ieee80211_handle_bss_capability(struct ieee80211_sub_if_data *sdata, |
1224 | u16 capab, bool erp_valid, u8 erp) | 1240 | u16 capab, bool erp_valid, u8 erp) |
1225 | { | 1241 | { |
@@ -1285,8 +1301,7 @@ static void ieee80211_set_associated(struct ieee80211_sub_if_data *sdata, | |||
1285 | sdata->u.mgd.flags |= IEEE80211_STA_RESET_SIGNAL_AVE; | 1301 | sdata->u.mgd.flags |= IEEE80211_STA_RESET_SIGNAL_AVE; |
1286 | 1302 | ||
1287 | /* just to be sure */ | 1303 | /* just to be sure */ |
1288 | sdata->u.mgd.flags &= ~(IEEE80211_STA_CONNECTION_POLL | | 1304 | ieee80211_stop_poll(sdata); |
1289 | IEEE80211_STA_BEACON_POLL); | ||
1290 | 1305 | ||
1291 | ieee80211_led_assoc(local, 1); | 1306 | ieee80211_led_assoc(local, 1); |
1292 | 1307 | ||
@@ -1456,8 +1471,7 @@ static void ieee80211_reset_ap_probe(struct ieee80211_sub_if_data *sdata) | |||
1456 | return; | 1471 | return; |
1457 | } | 1472 | } |
1458 | 1473 | ||
1459 | ifmgd->flags &= ~(IEEE80211_STA_CONNECTION_POLL | | 1474 | __ieee80211_stop_poll(sdata); |
1460 | IEEE80211_STA_BEACON_POLL); | ||
1461 | 1475 | ||
1462 | mutex_lock(&local->iflist_mtx); | 1476 | mutex_lock(&local->iflist_mtx); |
1463 | ieee80211_recalc_ps(local, -1); | 1477 | ieee80211_recalc_ps(local, -1); |
@@ -1477,7 +1491,6 @@ static void ieee80211_reset_ap_probe(struct ieee80211_sub_if_data *sdata) | |||
1477 | round_jiffies_up(jiffies + | 1491 | round_jiffies_up(jiffies + |
1478 | IEEE80211_CONNECTION_IDLE_TIME)); | 1492 | IEEE80211_CONNECTION_IDLE_TIME)); |
1479 | out: | 1493 | out: |
1480 | ieee80211_run_deferred_scan(local); | ||
1481 | mutex_unlock(&local->mtx); | 1494 | mutex_unlock(&local->mtx); |
1482 | } | 1495 | } |
1483 | 1496 | ||
@@ -2408,7 +2421,11 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata, | |||
2408 | net_dbg_ratelimited("%s: cancelling probereq poll due to a received beacon\n", | 2421 | net_dbg_ratelimited("%s: cancelling probereq poll due to a received beacon\n", |
2409 | sdata->name); | 2422 | sdata->name); |
2410 | #endif | 2423 | #endif |
2424 | mutex_lock(&local->mtx); | ||
2411 | ifmgd->flags &= ~IEEE80211_STA_BEACON_POLL; | 2425 | ifmgd->flags &= ~IEEE80211_STA_BEACON_POLL; |
2426 | ieee80211_run_deferred_scan(local); | ||
2427 | mutex_unlock(&local->mtx); | ||
2428 | |||
2412 | mutex_lock(&local->iflist_mtx); | 2429 | mutex_lock(&local->iflist_mtx); |
2413 | ieee80211_recalc_ps(local, -1); | 2430 | ieee80211_recalc_ps(local, -1); |
2414 | mutex_unlock(&local->iflist_mtx); | 2431 | mutex_unlock(&local->iflist_mtx); |
@@ -2595,8 +2612,7 @@ static void ieee80211_sta_connection_lost(struct ieee80211_sub_if_data *sdata, | |||
2595 | struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; | 2612 | struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; |
2596 | u8 frame_buf[DEAUTH_DISASSOC_LEN]; | 2613 | u8 frame_buf[DEAUTH_DISASSOC_LEN]; |
2597 | 2614 | ||
2598 | ifmgd->flags &= ~(IEEE80211_STA_CONNECTION_POLL | | 2615 | ieee80211_stop_poll(sdata); |
2599 | IEEE80211_STA_BEACON_POLL); | ||
2600 | 2616 | ||
2601 | ieee80211_set_disassoc(sdata, IEEE80211_STYPE_DEAUTH, reason, | 2617 | ieee80211_set_disassoc(sdata, IEEE80211_STYPE_DEAUTH, reason, |
2602 | false, frame_buf); | 2618 | false, frame_buf); |
@@ -2874,8 +2890,7 @@ static void ieee80211_restart_sta_timer(struct ieee80211_sub_if_data *sdata) | |||
2874 | u32 flags; | 2890 | u32 flags; |
2875 | 2891 | ||
2876 | if (sdata->vif.type == NL80211_IFTYPE_STATION) { | 2892 | if (sdata->vif.type == NL80211_IFTYPE_STATION) { |
2877 | sdata->u.mgd.flags &= ~(IEEE80211_STA_BEACON_POLL | | 2893 | __ieee80211_stop_poll(sdata); |
2878 | IEEE80211_STA_CONNECTION_POLL); | ||
2879 | 2894 | ||
2880 | /* let's probe the connection once */ | 2895 | /* let's probe the connection once */ |
2881 | flags = sdata->local->hw.flags; | 2896 | flags = sdata->local->hw.flags; |
@@ -2944,7 +2959,10 @@ void ieee80211_sta_restart(struct ieee80211_sub_if_data *sdata) | |||
2944 | if (test_and_clear_bit(TMR_RUNNING_CHANSW, &ifmgd->timers_running)) | 2959 | if (test_and_clear_bit(TMR_RUNNING_CHANSW, &ifmgd->timers_running)) |
2945 | add_timer(&ifmgd->chswitch_timer); | 2960 | add_timer(&ifmgd->chswitch_timer); |
2946 | ieee80211_sta_reset_beacon_monitor(sdata); | 2961 | ieee80211_sta_reset_beacon_monitor(sdata); |
2962 | |||
2963 | mutex_lock(&sdata->local->mtx); | ||
2947 | ieee80211_restart_sta_timer(sdata); | 2964 | ieee80211_restart_sta_timer(sdata); |
2965 | mutex_unlock(&sdata->local->mtx); | ||
2948 | } | 2966 | } |
2949 | #endif | 2967 | #endif |
2950 | 2968 | ||
@@ -3106,7 +3124,7 @@ static int ieee80211_prep_connection(struct ieee80211_sub_if_data *sdata, | |||
3106 | } | 3124 | } |
3107 | 3125 | ||
3108 | local->oper_channel = cbss->channel; | 3126 | local->oper_channel = cbss->channel; |
3109 | ieee80211_hw_config(local, 0); | 3127 | ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL); |
3110 | 3128 | ||
3111 | if (!have_sta) { | 3129 | if (!have_sta) { |
3112 | u32 rates = 0, basic_rates = 0; | 3130 | u32 rates = 0, basic_rates = 0; |
diff --git a/net/mac80211/offchannel.c b/net/mac80211/offchannel.c index f054e94901a2..935aa4b6deee 100644 --- a/net/mac80211/offchannel.c +++ b/net/mac80211/offchannel.c | |||
@@ -234,6 +234,22 @@ static void ieee80211_hw_roc_done(struct work_struct *work) | |||
234 | return; | 234 | return; |
235 | } | 235 | } |
236 | 236 | ||
237 | /* was never transmitted */ | ||
238 | if (local->hw_roc_skb) { | ||
239 | u64 cookie; | ||
240 | |||
241 | cookie = local->hw_roc_cookie ^ 2; | ||
242 | |||
243 | cfg80211_mgmt_tx_status(local->hw_roc_dev, cookie, | ||
244 | local->hw_roc_skb->data, | ||
245 | local->hw_roc_skb->len, false, | ||
246 | GFP_KERNEL); | ||
247 | |||
248 | kfree_skb(local->hw_roc_skb); | ||
249 | local->hw_roc_skb = NULL; | ||
250 | local->hw_roc_skb_for_status = NULL; | ||
251 | } | ||
252 | |||
237 | if (!local->hw_roc_for_tx) | 253 | if (!local->hw_roc_for_tx) |
238 | cfg80211_remain_on_channel_expired(local->hw_roc_dev, | 254 | cfg80211_remain_on_channel_expired(local->hw_roc_dev, |
239 | local->hw_roc_cookie, | 255 | local->hw_roc_cookie, |
diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c index f5b1638fbf80..de455f8bbb91 100644 --- a/net/mac80211/sta_info.c +++ b/net/mac80211/sta_info.c | |||
@@ -378,7 +378,7 @@ static int sta_info_insert_finish(struct sta_info *sta) __acquires(RCU) | |||
378 | /* make the station visible */ | 378 | /* make the station visible */ |
379 | sta_info_hash_add(local, sta); | 379 | sta_info_hash_add(local, sta); |
380 | 380 | ||
381 | list_add(&sta->list, &local->sta_list); | 381 | list_add_rcu(&sta->list, &local->sta_list); |
382 | 382 | ||
383 | set_sta_flag(sta, WLAN_STA_INSERTED); | 383 | set_sta_flag(sta, WLAN_STA_INSERTED); |
384 | 384 | ||
@@ -688,7 +688,7 @@ int __must_check __sta_info_destroy(struct sta_info *sta) | |||
688 | if (ret) | 688 | if (ret) |
689 | return ret; | 689 | return ret; |
690 | 690 | ||
691 | list_del(&sta->list); | 691 | list_del_rcu(&sta->list); |
692 | 692 | ||
693 | mutex_lock(&local->key_mtx); | 693 | mutex_lock(&local->key_mtx); |
694 | for (i = 0; i < NUM_DEFAULT_KEYS; i++) | 694 | for (i = 0; i < NUM_DEFAULT_KEYS; i++) |
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c index 847215bb2a6f..e453212fa17f 100644 --- a/net/mac80211/tx.c +++ b/net/mac80211/tx.c | |||
@@ -1737,7 +1737,7 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb, | |||
1737 | __le16 fc; | 1737 | __le16 fc; |
1738 | struct ieee80211_hdr hdr; | 1738 | struct ieee80211_hdr hdr; |
1739 | struct ieee80211s_hdr mesh_hdr __maybe_unused; | 1739 | struct ieee80211s_hdr mesh_hdr __maybe_unused; |
1740 | struct mesh_path __maybe_unused *mppath = NULL; | 1740 | struct mesh_path __maybe_unused *mppath = NULL, *mpath = NULL; |
1741 | const u8 *encaps_data; | 1741 | const u8 *encaps_data; |
1742 | int encaps_len, skip_header_bytes; | 1742 | int encaps_len, skip_header_bytes; |
1743 | int nh_pos, h_pos; | 1743 | int nh_pos, h_pos; |
@@ -1803,8 +1803,11 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb, | |||
1803 | goto fail; | 1803 | goto fail; |
1804 | } | 1804 | } |
1805 | rcu_read_lock(); | 1805 | rcu_read_lock(); |
1806 | if (!is_multicast_ether_addr(skb->data)) | 1806 | if (!is_multicast_ether_addr(skb->data)) { |
1807 | mppath = mpp_path_lookup(skb->data, sdata); | 1807 | mpath = mesh_path_lookup(skb->data, sdata); |
1808 | if (!mpath) | ||
1809 | mppath = mpp_path_lookup(skb->data, sdata); | ||
1810 | } | ||
1808 | 1811 | ||
1809 | /* | 1812 | /* |
1810 | * Use address extension if it is a packet from | 1813 | * Use address extension if it is a packet from |
diff --git a/net/mac80211/util.c b/net/mac80211/util.c index a44c6807df01..8dd4712620ff 100644 --- a/net/mac80211/util.c +++ b/net/mac80211/util.c | |||
@@ -1271,7 +1271,7 @@ int ieee80211_reconfig(struct ieee80211_local *local) | |||
1271 | enum ieee80211_sta_state state; | 1271 | enum ieee80211_sta_state state; |
1272 | 1272 | ||
1273 | for (state = IEEE80211_STA_NOTEXIST; | 1273 | for (state = IEEE80211_STA_NOTEXIST; |
1274 | state < sta->sta_state - 1; state++) | 1274 | state < sta->sta_state; state++) |
1275 | WARN_ON(drv_sta_state(local, sta->sdata, sta, | 1275 | WARN_ON(drv_sta_state(local, sta->sdata, sta, |
1276 | state, state + 1)); | 1276 | state, state + 1)); |
1277 | } | 1277 | } |
diff --git a/net/netfilter/nf_conntrack_h323_main.c b/net/netfilter/nf_conntrack_h323_main.c index 46d69d7f1bb4..31f50bc3a312 100644 --- a/net/netfilter/nf_conntrack_h323_main.c +++ b/net/netfilter/nf_conntrack_h323_main.c | |||
@@ -270,9 +270,8 @@ static int expect_rtp_rtcp(struct sk_buff *skb, struct nf_conn *ct, | |||
270 | return 0; | 270 | return 0; |
271 | 271 | ||
272 | /* RTP port is even */ | 272 | /* RTP port is even */ |
273 | port &= htons(~1); | 273 | rtp_port = port & ~htons(1); |
274 | rtp_port = port; | 274 | rtcp_port = port | htons(1); |
275 | rtcp_port = htons(ntohs(port) + 1); | ||
276 | 275 | ||
277 | /* Create expect for RTP */ | 276 | /* Create expect for RTP */ |
278 | if ((rtp_exp = nf_ct_expect_alloc(ct)) == NULL) | 277 | if ((rtp_exp = nf_ct_expect_alloc(ct)) == NULL) |
diff --git a/net/netfilter/xt_HMARK.c b/net/netfilter/xt_HMARK.c index 0a96a43108ed..1686ca1b53a1 100644 --- a/net/netfilter/xt_HMARK.c +++ b/net/netfilter/xt_HMARK.c | |||
@@ -32,13 +32,13 @@ MODULE_ALIAS("ipt_HMARK"); | |||
32 | MODULE_ALIAS("ip6t_HMARK"); | 32 | MODULE_ALIAS("ip6t_HMARK"); |
33 | 33 | ||
34 | struct hmark_tuple { | 34 | struct hmark_tuple { |
35 | u32 src; | 35 | __be32 src; |
36 | u32 dst; | 36 | __be32 dst; |
37 | union hmark_ports uports; | 37 | union hmark_ports uports; |
38 | uint8_t proto; | 38 | u8 proto; |
39 | }; | 39 | }; |
40 | 40 | ||
41 | static inline u32 hmark_addr6_mask(const __u32 *addr32, const __u32 *mask) | 41 | static inline __be32 hmark_addr6_mask(const __be32 *addr32, const __be32 *mask) |
42 | { | 42 | { |
43 | return (addr32[0] & mask[0]) ^ | 43 | return (addr32[0] & mask[0]) ^ |
44 | (addr32[1] & mask[1]) ^ | 44 | (addr32[1] & mask[1]) ^ |
@@ -46,8 +46,8 @@ static inline u32 hmark_addr6_mask(const __u32 *addr32, const __u32 *mask) | |||
46 | (addr32[3] & mask[3]); | 46 | (addr32[3] & mask[3]); |
47 | } | 47 | } |
48 | 48 | ||
49 | static inline u32 | 49 | static inline __be32 |
50 | hmark_addr_mask(int l3num, const __u32 *addr32, const __u32 *mask) | 50 | hmark_addr_mask(int l3num, const __be32 *addr32, const __be32 *mask) |
51 | { | 51 | { |
52 | switch (l3num) { | 52 | switch (l3num) { |
53 | case AF_INET: | 53 | case AF_INET: |
@@ -58,6 +58,22 @@ hmark_addr_mask(int l3num, const __u32 *addr32, const __u32 *mask) | |||
58 | return 0; | 58 | return 0; |
59 | } | 59 | } |
60 | 60 | ||
61 | static inline void hmark_swap_ports(union hmark_ports *uports, | ||
62 | const struct xt_hmark_info *info) | ||
63 | { | ||
64 | union hmark_ports hp; | ||
65 | u16 src, dst; | ||
66 | |||
67 | hp.b32 = (uports->b32 & info->port_mask.b32) | info->port_set.b32; | ||
68 | src = ntohs(hp.b16.src); | ||
69 | dst = ntohs(hp.b16.dst); | ||
70 | |||
71 | if (dst > src) | ||
72 | uports->v32 = (dst << 16) | src; | ||
73 | else | ||
74 | uports->v32 = (src << 16) | dst; | ||
75 | } | ||
76 | |||
61 | static int | 77 | static int |
62 | hmark_ct_set_htuple(const struct sk_buff *skb, struct hmark_tuple *t, | 78 | hmark_ct_set_htuple(const struct sk_buff *skb, struct hmark_tuple *t, |
63 | const struct xt_hmark_info *info) | 79 | const struct xt_hmark_info *info) |
@@ -74,22 +90,19 @@ hmark_ct_set_htuple(const struct sk_buff *skb, struct hmark_tuple *t, | |||
74 | otuple = &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple; | 90 | otuple = &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple; |
75 | rtuple = &ct->tuplehash[IP_CT_DIR_REPLY].tuple; | 91 | rtuple = &ct->tuplehash[IP_CT_DIR_REPLY].tuple; |
76 | 92 | ||
77 | t->src = hmark_addr_mask(otuple->src.l3num, otuple->src.u3.all, | 93 | t->src = hmark_addr_mask(otuple->src.l3num, otuple->src.u3.ip6, |
78 | info->src_mask.all); | 94 | info->src_mask.ip6); |
79 | t->dst = hmark_addr_mask(otuple->src.l3num, rtuple->src.u3.all, | 95 | t->dst = hmark_addr_mask(otuple->src.l3num, rtuple->src.u3.ip6, |
80 | info->dst_mask.all); | 96 | info->dst_mask.ip6); |
81 | 97 | ||
82 | if (info->flags & XT_HMARK_FLAG(XT_HMARK_METHOD_L3)) | 98 | if (info->flags & XT_HMARK_FLAG(XT_HMARK_METHOD_L3)) |
83 | return 0; | 99 | return 0; |
84 | 100 | ||
85 | t->proto = nf_ct_protonum(ct); | 101 | t->proto = nf_ct_protonum(ct); |
86 | if (t->proto != IPPROTO_ICMP) { | 102 | if (t->proto != IPPROTO_ICMP) { |
87 | t->uports.p16.src = otuple->src.u.all; | 103 | t->uports.b16.src = otuple->src.u.all; |
88 | t->uports.p16.dst = rtuple->src.u.all; | 104 | t->uports.b16.dst = rtuple->src.u.all; |
89 | t->uports.v32 = (t->uports.v32 & info->port_mask.v32) | | 105 | hmark_swap_ports(&t->uports, info); |
90 | info->port_set.v32; | ||
91 | if (t->uports.p16.dst < t->uports.p16.src) | ||
92 | swap(t->uports.p16.dst, t->uports.p16.src); | ||
93 | } | 106 | } |
94 | 107 | ||
95 | return 0; | 108 | return 0; |
@@ -98,15 +111,19 @@ hmark_ct_set_htuple(const struct sk_buff *skb, struct hmark_tuple *t, | |||
98 | #endif | 111 | #endif |
99 | } | 112 | } |
100 | 113 | ||
114 | /* This hash function is endian independent, to ensure consistent hashing if | ||
115 | * the cluster is composed of big and little endian systems. */ | ||
101 | static inline u32 | 116 | static inline u32 |
102 | hmark_hash(struct hmark_tuple *t, const struct xt_hmark_info *info) | 117 | hmark_hash(struct hmark_tuple *t, const struct xt_hmark_info *info) |
103 | { | 118 | { |
104 | u32 hash; | 119 | u32 hash; |
120 | u32 src = ntohl(t->src); | ||
121 | u32 dst = ntohl(t->dst); | ||
105 | 122 | ||
106 | if (t->dst < t->src) | 123 | if (dst < src) |
107 | swap(t->src, t->dst); | 124 | swap(src, dst); |
108 | 125 | ||
109 | hash = jhash_3words(t->src, t->dst, t->uports.v32, info->hashrnd); | 126 | hash = jhash_3words(src, dst, t->uports.v32, info->hashrnd); |
110 | hash = hash ^ (t->proto & info->proto_mask); | 127 | hash = hash ^ (t->proto & info->proto_mask); |
111 | 128 | ||
112 | return (((u64)hash * info->hmodulus) >> 32) + info->hoffset; | 129 | return (((u64)hash * info->hmodulus) >> 32) + info->hoffset; |
@@ -126,11 +143,7 @@ hmark_set_tuple_ports(const struct sk_buff *skb, unsigned int nhoff, | |||
126 | if (skb_copy_bits(skb, nhoff, &t->uports, sizeof(t->uports)) < 0) | 143 | if (skb_copy_bits(skb, nhoff, &t->uports, sizeof(t->uports)) < 0) |
127 | return; | 144 | return; |
128 | 145 | ||
129 | t->uports.v32 = (t->uports.v32 & info->port_mask.v32) | | 146 | hmark_swap_ports(&t->uports, info); |
130 | info->port_set.v32; | ||
131 | |||
132 | if (t->uports.p16.dst < t->uports.p16.src) | ||
133 | swap(t->uports.p16.dst, t->uports.p16.src); | ||
134 | } | 147 | } |
135 | 148 | ||
136 | #if IS_ENABLED(CONFIG_IP6_NF_IPTABLES) | 149 | #if IS_ENABLED(CONFIG_IP6_NF_IPTABLES) |
@@ -178,8 +191,8 @@ hmark_pkt_set_htuple_ipv6(const struct sk_buff *skb, struct hmark_tuple *t, | |||
178 | return -1; | 191 | return -1; |
179 | } | 192 | } |
180 | noicmp: | 193 | noicmp: |
181 | t->src = hmark_addr6_mask(ip6->saddr.s6_addr32, info->src_mask.all); | 194 | t->src = hmark_addr6_mask(ip6->saddr.s6_addr32, info->src_mask.ip6); |
182 | t->dst = hmark_addr6_mask(ip6->daddr.s6_addr32, info->dst_mask.all); | 195 | t->dst = hmark_addr6_mask(ip6->daddr.s6_addr32, info->dst_mask.ip6); |
183 | 196 | ||
184 | if (info->flags & XT_HMARK_FLAG(XT_HMARK_METHOD_L3)) | 197 | if (info->flags & XT_HMARK_FLAG(XT_HMARK_METHOD_L3)) |
185 | return 0; | 198 | return 0; |
@@ -255,11 +268,8 @@ hmark_pkt_set_htuple_ipv4(const struct sk_buff *skb, struct hmark_tuple *t, | |||
255 | } | 268 | } |
256 | } | 269 | } |
257 | 270 | ||
258 | t->src = (__force u32) ip->saddr; | 271 | t->src = ip->saddr & info->src_mask.ip; |
259 | t->dst = (__force u32) ip->daddr; | 272 | t->dst = ip->daddr & info->dst_mask.ip; |
260 | |||
261 | t->src &= info->src_mask.ip; | ||
262 | t->dst &= info->dst_mask.ip; | ||
263 | 273 | ||
264 | if (info->flags & XT_HMARK_FLAG(XT_HMARK_METHOD_L3)) | 274 | if (info->flags & XT_HMARK_FLAG(XT_HMARK_METHOD_L3)) |
265 | return 0; | 275 | return 0; |
diff --git a/net/nfc/llcp/sock.c b/net/nfc/llcp/sock.c index 3f339b19d140..17a707db40eb 100644 --- a/net/nfc/llcp/sock.c +++ b/net/nfc/llcp/sock.c | |||
@@ -292,6 +292,9 @@ static int llcp_sock_getname(struct socket *sock, struct sockaddr *addr, | |||
292 | 292 | ||
293 | pr_debug("%p\n", sk); | 293 | pr_debug("%p\n", sk); |
294 | 294 | ||
295 | if (llcp_sock == NULL) | ||
296 | return -EBADFD; | ||
297 | |||
295 | addr->sa_family = AF_NFC; | 298 | addr->sa_family = AF_NFC; |
296 | *len = sizeof(struct sockaddr_nfc_llcp); | 299 | *len = sizeof(struct sockaddr_nfc_llcp); |
297 | 300 | ||
diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c index 04040476082e..21fde99e5c56 100644 --- a/net/sunrpc/rpc_pipe.c +++ b/net/sunrpc/rpc_pipe.c | |||
@@ -71,7 +71,9 @@ static void rpc_purge_list(wait_queue_head_t *waitq, struct list_head *head, | |||
71 | msg->errno = err; | 71 | msg->errno = err; |
72 | destroy_msg(msg); | 72 | destroy_msg(msg); |
73 | } while (!list_empty(head)); | 73 | } while (!list_empty(head)); |
74 | wake_up(waitq); | 74 | |
75 | if (waitq) | ||
76 | wake_up(waitq); | ||
75 | } | 77 | } |
76 | 78 | ||
77 | static void | 79 | static void |
@@ -91,11 +93,9 @@ rpc_timeout_upcall_queue(struct work_struct *work) | |||
91 | } | 93 | } |
92 | dentry = dget(pipe->dentry); | 94 | dentry = dget(pipe->dentry); |
93 | spin_unlock(&pipe->lock); | 95 | spin_unlock(&pipe->lock); |
94 | if (dentry) { | 96 | rpc_purge_list(dentry ? &RPC_I(dentry->d_inode)->waitq : NULL, |
95 | rpc_purge_list(&RPC_I(dentry->d_inode)->waitq, | 97 | &free_list, destroy_msg, -ETIMEDOUT); |
96 | &free_list, destroy_msg, -ETIMEDOUT); | 98 | dput(dentry); |
97 | dput(dentry); | ||
98 | } | ||
99 | } | 99 | } |
100 | 100 | ||
101 | ssize_t rpc_pipe_generic_upcall(struct file *filp, struct rpc_pipe_msg *msg, | 101 | ssize_t rpc_pipe_generic_upcall(struct file *filp, struct rpc_pipe_msg *msg, |
diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c index 7e9baaa1e543..3ee7461926d8 100644 --- a/net/sunrpc/svc.c +++ b/net/sunrpc/svc.c | |||
@@ -1374,7 +1374,8 @@ bc_svc_process(struct svc_serv *serv, struct rpc_rqst *req, | |||
1374 | sizeof(req->rq_snd_buf)); | 1374 | sizeof(req->rq_snd_buf)); |
1375 | return bc_send(req); | 1375 | return bc_send(req); |
1376 | } else { | 1376 | } else { |
1377 | /* Nothing to do to drop request */ | 1377 | /* drop request */ |
1378 | xprt_free_bc_request(req); | ||
1378 | return 0; | 1379 | return 0; |
1379 | } | 1380 | } |
1380 | } | 1381 | } |
diff --git a/net/wireless/ibss.c b/net/wireless/ibss.c index d2a19b0ff71f..89baa3328411 100644 --- a/net/wireless/ibss.c +++ b/net/wireless/ibss.c | |||
@@ -42,6 +42,7 @@ void __cfg80211_ibss_joined(struct net_device *dev, const u8 *bssid) | |||
42 | cfg80211_hold_bss(bss_from_pub(bss)); | 42 | cfg80211_hold_bss(bss_from_pub(bss)); |
43 | wdev->current_bss = bss_from_pub(bss); | 43 | wdev->current_bss = bss_from_pub(bss); |
44 | 44 | ||
45 | wdev->sme_state = CFG80211_SME_CONNECTED; | ||
45 | cfg80211_upload_connect_keys(wdev); | 46 | cfg80211_upload_connect_keys(wdev); |
46 | 47 | ||
47 | nl80211_send_ibss_bssid(wiphy_to_dev(wdev->wiphy), dev, bssid, | 48 | nl80211_send_ibss_bssid(wiphy_to_dev(wdev->wiphy), dev, bssid, |
@@ -60,7 +61,7 @@ void cfg80211_ibss_joined(struct net_device *dev, const u8 *bssid, gfp_t gfp) | |||
60 | struct cfg80211_event *ev; | 61 | struct cfg80211_event *ev; |
61 | unsigned long flags; | 62 | unsigned long flags; |
62 | 63 | ||
63 | CFG80211_DEV_WARN_ON(!wdev->ssid_len); | 64 | CFG80211_DEV_WARN_ON(wdev->sme_state != CFG80211_SME_CONNECTING); |
64 | 65 | ||
65 | ev = kzalloc(sizeof(*ev), gfp); | 66 | ev = kzalloc(sizeof(*ev), gfp); |
66 | if (!ev) | 67 | if (!ev) |
@@ -115,9 +116,11 @@ int __cfg80211_join_ibss(struct cfg80211_registered_device *rdev, | |||
115 | #ifdef CONFIG_CFG80211_WEXT | 116 | #ifdef CONFIG_CFG80211_WEXT |
116 | wdev->wext.ibss.channel = params->channel; | 117 | wdev->wext.ibss.channel = params->channel; |
117 | #endif | 118 | #endif |
119 | wdev->sme_state = CFG80211_SME_CONNECTING; | ||
118 | err = rdev->ops->join_ibss(&rdev->wiphy, dev, params); | 120 | err = rdev->ops->join_ibss(&rdev->wiphy, dev, params); |
119 | if (err) { | 121 | if (err) { |
120 | wdev->connect_keys = NULL; | 122 | wdev->connect_keys = NULL; |
123 | wdev->sme_state = CFG80211_SME_IDLE; | ||
121 | return err; | 124 | return err; |
122 | } | 125 | } |
123 | 126 | ||
@@ -169,6 +172,7 @@ static void __cfg80211_clear_ibss(struct net_device *dev, bool nowext) | |||
169 | } | 172 | } |
170 | 173 | ||
171 | wdev->current_bss = NULL; | 174 | wdev->current_bss = NULL; |
175 | wdev->sme_state = CFG80211_SME_IDLE; | ||
172 | wdev->ssid_len = 0; | 176 | wdev->ssid_len = 0; |
173 | #ifdef CONFIG_CFG80211_WEXT | 177 | #ifdef CONFIG_CFG80211_WEXT |
174 | if (!nowext) | 178 | if (!nowext) |
diff --git a/net/wireless/util.c b/net/wireless/util.c index 55d99466babb..8f2d68fc3a44 100644 --- a/net/wireless/util.c +++ b/net/wireless/util.c | |||
@@ -935,6 +935,7 @@ int cfg80211_can_change_interface(struct cfg80211_registered_device *rdev, | |||
935 | enum nl80211_iftype iftype) | 935 | enum nl80211_iftype iftype) |
936 | { | 936 | { |
937 | struct wireless_dev *wdev_iter; | 937 | struct wireless_dev *wdev_iter; |
938 | u32 used_iftypes = BIT(iftype); | ||
938 | int num[NUM_NL80211_IFTYPES]; | 939 | int num[NUM_NL80211_IFTYPES]; |
939 | int total = 1; | 940 | int total = 1; |
940 | int i, j; | 941 | int i, j; |
@@ -961,6 +962,7 @@ int cfg80211_can_change_interface(struct cfg80211_registered_device *rdev, | |||
961 | 962 | ||
962 | num[wdev_iter->iftype]++; | 963 | num[wdev_iter->iftype]++; |
963 | total++; | 964 | total++; |
965 | used_iftypes |= BIT(wdev_iter->iftype); | ||
964 | } | 966 | } |
965 | mutex_unlock(&rdev->devlist_mtx); | 967 | mutex_unlock(&rdev->devlist_mtx); |
966 | 968 | ||
@@ -970,6 +972,7 @@ int cfg80211_can_change_interface(struct cfg80211_registered_device *rdev, | |||
970 | for (i = 0; i < rdev->wiphy.n_iface_combinations; i++) { | 972 | for (i = 0; i < rdev->wiphy.n_iface_combinations; i++) { |
971 | const struct ieee80211_iface_combination *c; | 973 | const struct ieee80211_iface_combination *c; |
972 | struct ieee80211_iface_limit *limits; | 974 | struct ieee80211_iface_limit *limits; |
975 | u32 all_iftypes = 0; | ||
973 | 976 | ||
974 | c = &rdev->wiphy.iface_combinations[i]; | 977 | c = &rdev->wiphy.iface_combinations[i]; |
975 | 978 | ||
@@ -984,6 +987,7 @@ int cfg80211_can_change_interface(struct cfg80211_registered_device *rdev, | |||
984 | if (rdev->wiphy.software_iftypes & BIT(iftype)) | 987 | if (rdev->wiphy.software_iftypes & BIT(iftype)) |
985 | continue; | 988 | continue; |
986 | for (j = 0; j < c->n_limits; j++) { | 989 | for (j = 0; j < c->n_limits; j++) { |
990 | all_iftypes |= limits[j].types; | ||
987 | if (!(limits[j].types & BIT(iftype))) | 991 | if (!(limits[j].types & BIT(iftype))) |
988 | continue; | 992 | continue; |
989 | if (limits[j].max < num[iftype]) | 993 | if (limits[j].max < num[iftype]) |
@@ -991,7 +995,20 @@ int cfg80211_can_change_interface(struct cfg80211_registered_device *rdev, | |||
991 | limits[j].max -= num[iftype]; | 995 | limits[j].max -= num[iftype]; |
992 | } | 996 | } |
993 | } | 997 | } |
994 | /* yay, it fits */ | 998 | |
999 | /* | ||
1000 | * Finally check that all iftypes that we're currently | ||
1001 | * using are actually part of this combination. If they | ||
1002 | * aren't then we can't use this combination and have | ||
1003 | * to continue to the next. | ||
1004 | */ | ||
1005 | if ((all_iftypes & used_iftypes) != used_iftypes) | ||
1006 | goto cont; | ||
1007 | |||
1008 | /* | ||
1009 | * This combination covered all interface types and | ||
1010 | * supported the requested numbers, so we're good. | ||
1011 | */ | ||
995 | kfree(limits); | 1012 | kfree(limits); |
996 | return 0; | 1013 | return 0; |
997 | cont: | 1014 | cont: |
diff --git a/sound/core/compress_offload.c b/sound/core/compress_offload.c index a68aed7fce02..ec2118d0e27a 100644 --- a/sound/core/compress_offload.c +++ b/sound/core/compress_offload.c | |||
@@ -502,10 +502,8 @@ static int snd_compr_pause(struct snd_compr_stream *stream) | |||
502 | if (stream->runtime->state != SNDRV_PCM_STATE_RUNNING) | 502 | if (stream->runtime->state != SNDRV_PCM_STATE_RUNNING) |
503 | return -EPERM; | 503 | return -EPERM; |
504 | retval = stream->ops->trigger(stream, SNDRV_PCM_TRIGGER_PAUSE_PUSH); | 504 | retval = stream->ops->trigger(stream, SNDRV_PCM_TRIGGER_PAUSE_PUSH); |
505 | if (!retval) { | 505 | if (!retval) |
506 | stream->runtime->state = SNDRV_PCM_STATE_PAUSED; | 506 | stream->runtime->state = SNDRV_PCM_STATE_PAUSED; |
507 | wake_up(&stream->runtime->sleep); | ||
508 | } | ||
509 | return retval; | 507 | return retval; |
510 | } | 508 | } |
511 | 509 | ||
@@ -544,6 +542,10 @@ static int snd_compr_stop(struct snd_compr_stream *stream) | |||
544 | if (!retval) { | 542 | if (!retval) { |
545 | stream->runtime->state = SNDRV_PCM_STATE_SETUP; | 543 | stream->runtime->state = SNDRV_PCM_STATE_SETUP; |
546 | wake_up(&stream->runtime->sleep); | 544 | wake_up(&stream->runtime->sleep); |
545 | stream->runtime->hw_pointer = 0; | ||
546 | stream->runtime->app_pointer = 0; | ||
547 | stream->runtime->total_bytes_available = 0; | ||
548 | stream->runtime->total_bytes_transferred = 0; | ||
547 | } | 549 | } |
548 | return retval; | 550 | return retval; |
549 | } | 551 | } |
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c index 2b6392be451c..02763827dde0 100644 --- a/sound/pci/hda/hda_intel.c +++ b/sound/pci/hda/hda_intel.c | |||
@@ -2484,9 +2484,9 @@ static void azx_notifier_unregister(struct azx *chip) | |||
2484 | static int DELAYED_INIT_MARK azx_first_init(struct azx *chip); | 2484 | static int DELAYED_INIT_MARK azx_first_init(struct azx *chip); |
2485 | static int DELAYED_INIT_MARK azx_probe_continue(struct azx *chip); | 2485 | static int DELAYED_INIT_MARK azx_probe_continue(struct azx *chip); |
2486 | 2486 | ||
2487 | #ifdef SUPPORT_VGA_SWITCHEROO | ||
2487 | static struct pci_dev __devinit *get_bound_vga(struct pci_dev *pci); | 2488 | static struct pci_dev __devinit *get_bound_vga(struct pci_dev *pci); |
2488 | 2489 | ||
2489 | #ifdef SUPPORT_VGA_SWITCHEROO | ||
2490 | static void azx_vs_set_state(struct pci_dev *pci, | 2490 | static void azx_vs_set_state(struct pci_dev *pci, |
2491 | enum vga_switcheroo_state state) | 2491 | enum vga_switcheroo_state state) |
2492 | { | 2492 | { |
@@ -2578,6 +2578,7 @@ static int __devinit register_vga_switcheroo(struct azx *chip) | |||
2578 | #else | 2578 | #else |
2579 | #define init_vga_switcheroo(chip) /* NOP */ | 2579 | #define init_vga_switcheroo(chip) /* NOP */ |
2580 | #define register_vga_switcheroo(chip) 0 | 2580 | #define register_vga_switcheroo(chip) 0 |
2581 | #define check_hdmi_disabled(pci) false | ||
2581 | #endif /* SUPPORT_VGA_SWITCHER */ | 2582 | #endif /* SUPPORT_VGA_SWITCHER */ |
2582 | 2583 | ||
2583 | /* | 2584 | /* |
@@ -2638,6 +2639,7 @@ static int azx_dev_free(struct snd_device *device) | |||
2638 | return azx_free(device->device_data); | 2639 | return azx_free(device->device_data); |
2639 | } | 2640 | } |
2640 | 2641 | ||
2642 | #ifdef SUPPORT_VGA_SWITCHEROO | ||
2641 | /* | 2643 | /* |
2642 | * Check of disabled HDMI controller by vga-switcheroo | 2644 | * Check of disabled HDMI controller by vga-switcheroo |
2643 | */ | 2645 | */ |
@@ -2670,12 +2672,13 @@ static bool __devinit check_hdmi_disabled(struct pci_dev *pci) | |||
2670 | struct pci_dev *p = get_bound_vga(pci); | 2672 | struct pci_dev *p = get_bound_vga(pci); |
2671 | 2673 | ||
2672 | if (p) { | 2674 | if (p) { |
2673 | if (vga_default_device() && p != vga_default_device()) | 2675 | if (vga_switcheroo_get_client_state(p) == VGA_SWITCHEROO_OFF) |
2674 | vga_inactive = true; | 2676 | vga_inactive = true; |
2675 | pci_dev_put(p); | 2677 | pci_dev_put(p); |
2676 | } | 2678 | } |
2677 | return vga_inactive; | 2679 | return vga_inactive; |
2678 | } | 2680 | } |
2681 | #endif /* SUPPORT_VGA_SWITCHEROO */ | ||
2679 | 2682 | ||
2680 | /* | 2683 | /* |
2681 | * white/black-listing for position_fix | 2684 | * white/black-listing for position_fix |
@@ -3351,6 +3354,11 @@ static DEFINE_PCI_DEVICE_TABLE(azx_ids) = { | |||
3351 | { PCI_DEVICE(0x6549, 0x1200), | 3354 | { PCI_DEVICE(0x6549, 0x1200), |
3352 | .driver_data = AZX_DRIVER_TERA | AZX_DCAPS_NO_64BIT }, | 3355 | .driver_data = AZX_DRIVER_TERA | AZX_DCAPS_NO_64BIT }, |
3353 | /* Creative X-Fi (CA0110-IBG) */ | 3356 | /* Creative X-Fi (CA0110-IBG) */ |
3357 | /* CTHDA chips */ | ||
3358 | { PCI_DEVICE(0x1102, 0x0010), | ||
3359 | .driver_data = AZX_DRIVER_CTHDA | AZX_DCAPS_PRESET_CTHDA }, | ||
3360 | { PCI_DEVICE(0x1102, 0x0012), | ||
3361 | .driver_data = AZX_DRIVER_CTHDA | AZX_DCAPS_PRESET_CTHDA }, | ||
3354 | #if !defined(CONFIG_SND_CTXFI) && !defined(CONFIG_SND_CTXFI_MODULE) | 3362 | #if !defined(CONFIG_SND_CTXFI) && !defined(CONFIG_SND_CTXFI_MODULE) |
3355 | /* the following entry conflicts with snd-ctxfi driver, | 3363 | /* the following entry conflicts with snd-ctxfi driver, |
3356 | * as ctxfi driver mutates from HD-audio to native mode with | 3364 | * as ctxfi driver mutates from HD-audio to native mode with |
@@ -3367,11 +3375,6 @@ static DEFINE_PCI_DEVICE_TABLE(azx_ids) = { | |||
3367 | .driver_data = AZX_DRIVER_CTX | AZX_DCAPS_CTX_WORKAROUND | | 3375 | .driver_data = AZX_DRIVER_CTX | AZX_DCAPS_CTX_WORKAROUND | |
3368 | AZX_DCAPS_RIRB_PRE_DELAY | AZX_DCAPS_POSFIX_LPIB }, | 3376 | AZX_DCAPS_RIRB_PRE_DELAY | AZX_DCAPS_POSFIX_LPIB }, |
3369 | #endif | 3377 | #endif |
3370 | /* CTHDA chips */ | ||
3371 | { PCI_DEVICE(0x1102, 0x0010), | ||
3372 | .driver_data = AZX_DRIVER_CTHDA | AZX_DCAPS_PRESET_CTHDA }, | ||
3373 | { PCI_DEVICE(0x1102, 0x0012), | ||
3374 | .driver_data = AZX_DRIVER_CTHDA | AZX_DCAPS_PRESET_CTHDA }, | ||
3375 | /* Vortex86MX */ | 3378 | /* Vortex86MX */ |
3376 | { PCI_DEVICE(0x17f3, 0x3010), .driver_data = AZX_DRIVER_GENERIC }, | 3379 | { PCI_DEVICE(0x17f3, 0x3010), .driver_data = AZX_DRIVER_GENERIC }, |
3377 | /* VMware HDAudio */ | 3380 | /* VMware HDAudio */ |
diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c index 3acb5824ad39..172370b3793b 100644 --- a/sound/pci/hda/patch_conexant.c +++ b/sound/pci/hda/patch_conexant.c | |||
@@ -4061,7 +4061,7 @@ static void cx_auto_init_digital(struct hda_codec *codec) | |||
4061 | static int cx_auto_init(struct hda_codec *codec) | 4061 | static int cx_auto_init(struct hda_codec *codec) |
4062 | { | 4062 | { |
4063 | struct conexant_spec *spec = codec->spec; | 4063 | struct conexant_spec *spec = codec->spec; |
4064 | /*snd_hda_sequence_write(codec, cx_auto_init_verbs);*/ | 4064 | snd_hda_gen_apply_verbs(codec); |
4065 | cx_auto_init_output(codec); | 4065 | cx_auto_init_output(codec); |
4066 | cx_auto_init_input(codec); | 4066 | cx_auto_init_input(codec); |
4067 | cx_auto_init_digital(codec); | 4067 | cx_auto_init_digital(codec); |
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index 224410e8e9e7..f8f4906e498d 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c | |||
@@ -1896,6 +1896,7 @@ static int alc_init(struct hda_codec *codec) | |||
1896 | alc_fix_pll(codec); | 1896 | alc_fix_pll(codec); |
1897 | alc_auto_init_amp(codec, spec->init_amp); | 1897 | alc_auto_init_amp(codec, spec->init_amp); |
1898 | 1898 | ||
1899 | snd_hda_gen_apply_verbs(codec); | ||
1899 | alc_init_special_input_src(codec); | 1900 | alc_init_special_input_src(codec); |
1900 | alc_auto_init_std(codec); | 1901 | alc_auto_init_std(codec); |
1901 | 1902 | ||
@@ -6439,6 +6440,7 @@ enum { | |||
6439 | ALC662_FIXUP_ASUS_MODE7, | 6440 | ALC662_FIXUP_ASUS_MODE7, |
6440 | ALC662_FIXUP_ASUS_MODE8, | 6441 | ALC662_FIXUP_ASUS_MODE8, |
6441 | ALC662_FIXUP_NO_JACK_DETECT, | 6442 | ALC662_FIXUP_NO_JACK_DETECT, |
6443 | ALC662_FIXUP_ZOTAC_Z68, | ||
6442 | }; | 6444 | }; |
6443 | 6445 | ||
6444 | static const struct alc_fixup alc662_fixups[] = { | 6446 | static const struct alc_fixup alc662_fixups[] = { |
@@ -6588,6 +6590,13 @@ static const struct alc_fixup alc662_fixups[] = { | |||
6588 | .type = ALC_FIXUP_FUNC, | 6590 | .type = ALC_FIXUP_FUNC, |
6589 | .v.func = alc_fixup_no_jack_detect, | 6591 | .v.func = alc_fixup_no_jack_detect, |
6590 | }, | 6592 | }, |
6593 | [ALC662_FIXUP_ZOTAC_Z68] = { | ||
6594 | .type = ALC_FIXUP_PINS, | ||
6595 | .v.pins = (const struct alc_pincfg[]) { | ||
6596 | { 0x1b, 0x02214020 }, /* Front HP */ | ||
6597 | { } | ||
6598 | } | ||
6599 | }, | ||
6591 | }; | 6600 | }; |
6592 | 6601 | ||
6593 | static const struct snd_pci_quirk alc662_fixup_tbl[] = { | 6602 | static const struct snd_pci_quirk alc662_fixup_tbl[] = { |
@@ -6601,6 +6610,7 @@ static const struct snd_pci_quirk alc662_fixup_tbl[] = { | |||
6601 | SND_PCI_QUIRK(0x144d, 0xc051, "Samsung R720", ALC662_FIXUP_IDEAPAD), | 6610 | SND_PCI_QUIRK(0x144d, 0xc051, "Samsung R720", ALC662_FIXUP_IDEAPAD), |
6602 | SND_PCI_QUIRK(0x17aa, 0x38af, "Lenovo Ideapad Y550P", ALC662_FIXUP_IDEAPAD), | 6611 | SND_PCI_QUIRK(0x17aa, 0x38af, "Lenovo Ideapad Y550P", ALC662_FIXUP_IDEAPAD), |
6603 | SND_PCI_QUIRK(0x17aa, 0x3a0d, "Lenovo Ideapad Y550", ALC662_FIXUP_IDEAPAD), | 6612 | SND_PCI_QUIRK(0x17aa, 0x3a0d, "Lenovo Ideapad Y550", ALC662_FIXUP_IDEAPAD), |
6613 | SND_PCI_QUIRK(0x19da, 0xa130, "Zotac Z68", ALC662_FIXUP_ZOTAC_Z68), | ||
6604 | SND_PCI_QUIRK(0x1b35, 0x2206, "CZC P10T", ALC662_FIXUP_CZC_P10T), | 6614 | SND_PCI_QUIRK(0x1b35, 0x2206, "CZC P10T", ALC662_FIXUP_CZC_P10T), |
6605 | 6615 | ||
6606 | #if 0 | 6616 | #if 0 |
diff --git a/sound/soc/codecs/wm2000.c b/sound/soc/codecs/wm2000.c index a75c3766aede..0418fa11e6bd 100644 --- a/sound/soc/codecs/wm2000.c +++ b/sound/soc/codecs/wm2000.c | |||
@@ -99,8 +99,9 @@ static void wm2000_reset(struct wm2000_priv *wm2000) | |||
99 | } | 99 | } |
100 | 100 | ||
101 | static int wm2000_poll_bit(struct i2c_client *i2c, | 101 | static int wm2000_poll_bit(struct i2c_client *i2c, |
102 | unsigned int reg, u8 mask, int timeout) | 102 | unsigned int reg, u8 mask) |
103 | { | 103 | { |
104 | int timeout = 4000; | ||
104 | int val; | 105 | int val; |
105 | 106 | ||
106 | val = wm2000_read(i2c, reg); | 107 | val = wm2000_read(i2c, reg); |
@@ -119,7 +120,7 @@ static int wm2000_poll_bit(struct i2c_client *i2c, | |||
119 | static int wm2000_power_up(struct i2c_client *i2c, int analogue) | 120 | static int wm2000_power_up(struct i2c_client *i2c, int analogue) |
120 | { | 121 | { |
121 | struct wm2000_priv *wm2000 = dev_get_drvdata(&i2c->dev); | 122 | struct wm2000_priv *wm2000 = dev_get_drvdata(&i2c->dev); |
122 | int ret, timeout; | 123 | int ret; |
123 | 124 | ||
124 | BUG_ON(wm2000->anc_mode != ANC_OFF); | 125 | BUG_ON(wm2000->anc_mode != ANC_OFF); |
125 | 126 | ||
@@ -140,13 +141,13 @@ static int wm2000_power_up(struct i2c_client *i2c, int analogue) | |||
140 | 141 | ||
141 | /* Wait for ANC engine to become ready */ | 142 | /* Wait for ANC engine to become ready */ |
142 | if (!wm2000_poll_bit(i2c, WM2000_REG_ANC_STAT, | 143 | if (!wm2000_poll_bit(i2c, WM2000_REG_ANC_STAT, |
143 | WM2000_ANC_ENG_IDLE, 1)) { | 144 | WM2000_ANC_ENG_IDLE)) { |
144 | dev_err(&i2c->dev, "ANC engine failed to reset\n"); | 145 | dev_err(&i2c->dev, "ANC engine failed to reset\n"); |
145 | return -ETIMEDOUT; | 146 | return -ETIMEDOUT; |
146 | } | 147 | } |
147 | 148 | ||
148 | if (!wm2000_poll_bit(i2c, WM2000_REG_SYS_STATUS, | 149 | if (!wm2000_poll_bit(i2c, WM2000_REG_SYS_STATUS, |
149 | WM2000_STATUS_BOOT_COMPLETE, 1)) { | 150 | WM2000_STATUS_BOOT_COMPLETE)) { |
150 | dev_err(&i2c->dev, "ANC engine failed to initialise\n"); | 151 | dev_err(&i2c->dev, "ANC engine failed to initialise\n"); |
151 | return -ETIMEDOUT; | 152 | return -ETIMEDOUT; |
152 | } | 153 | } |
@@ -173,16 +174,13 @@ static int wm2000_power_up(struct i2c_client *i2c, int analogue) | |||
173 | dev_dbg(&i2c->dev, "Download complete\n"); | 174 | dev_dbg(&i2c->dev, "Download complete\n"); |
174 | 175 | ||
175 | if (analogue) { | 176 | if (analogue) { |
176 | timeout = 248; | 177 | wm2000_write(i2c, WM2000_REG_ANA_VMID_PU_TIME, 248 / 4); |
177 | wm2000_write(i2c, WM2000_REG_ANA_VMID_PU_TIME, timeout / 4); | ||
178 | 178 | ||
179 | wm2000_write(i2c, WM2000_REG_SYS_MODE_CNTRL, | 179 | wm2000_write(i2c, WM2000_REG_SYS_MODE_CNTRL, |
180 | WM2000_MODE_ANA_SEQ_INCLUDE | | 180 | WM2000_MODE_ANA_SEQ_INCLUDE | |
181 | WM2000_MODE_MOUSE_ENABLE | | 181 | WM2000_MODE_MOUSE_ENABLE | |
182 | WM2000_MODE_THERMAL_ENABLE); | 182 | WM2000_MODE_THERMAL_ENABLE); |
183 | } else { | 183 | } else { |
184 | timeout = 10; | ||
185 | |||
186 | wm2000_write(i2c, WM2000_REG_SYS_MODE_CNTRL, | 184 | wm2000_write(i2c, WM2000_REG_SYS_MODE_CNTRL, |
187 | WM2000_MODE_MOUSE_ENABLE | | 185 | WM2000_MODE_MOUSE_ENABLE | |
188 | WM2000_MODE_THERMAL_ENABLE); | 186 | WM2000_MODE_THERMAL_ENABLE); |
@@ -201,9 +199,8 @@ static int wm2000_power_up(struct i2c_client *i2c, int analogue) | |||
201 | wm2000_write(i2c, WM2000_REG_SYS_CTL2, WM2000_ANC_INT_N_CLR); | 199 | wm2000_write(i2c, WM2000_REG_SYS_CTL2, WM2000_ANC_INT_N_CLR); |
202 | 200 | ||
203 | if (!wm2000_poll_bit(i2c, WM2000_REG_SYS_STATUS, | 201 | if (!wm2000_poll_bit(i2c, WM2000_REG_SYS_STATUS, |
204 | WM2000_STATUS_MOUSE_ACTIVE, timeout)) { | 202 | WM2000_STATUS_MOUSE_ACTIVE)) { |
205 | dev_err(&i2c->dev, "Timed out waiting for device after %dms\n", | 203 | dev_err(&i2c->dev, "Timed out waiting for device\n"); |
206 | timeout * 10); | ||
207 | return -ETIMEDOUT; | 204 | return -ETIMEDOUT; |
208 | } | 205 | } |
209 | 206 | ||
@@ -218,28 +215,25 @@ static int wm2000_power_up(struct i2c_client *i2c, int analogue) | |||
218 | static int wm2000_power_down(struct i2c_client *i2c, int analogue) | 215 | static int wm2000_power_down(struct i2c_client *i2c, int analogue) |
219 | { | 216 | { |
220 | struct wm2000_priv *wm2000 = dev_get_drvdata(&i2c->dev); | 217 | struct wm2000_priv *wm2000 = dev_get_drvdata(&i2c->dev); |
221 | int timeout; | ||
222 | 218 | ||
223 | if (analogue) { | 219 | if (analogue) { |
224 | timeout = 248; | 220 | wm2000_write(i2c, WM2000_REG_ANA_VMID_PD_TIME, 248 / 4); |
225 | wm2000_write(i2c, WM2000_REG_ANA_VMID_PD_TIME, timeout / 4); | ||
226 | wm2000_write(i2c, WM2000_REG_SYS_MODE_CNTRL, | 221 | wm2000_write(i2c, WM2000_REG_SYS_MODE_CNTRL, |
227 | WM2000_MODE_ANA_SEQ_INCLUDE | | 222 | WM2000_MODE_ANA_SEQ_INCLUDE | |
228 | WM2000_MODE_POWER_DOWN); | 223 | WM2000_MODE_POWER_DOWN); |
229 | } else { | 224 | } else { |
230 | timeout = 10; | ||
231 | wm2000_write(i2c, WM2000_REG_SYS_MODE_CNTRL, | 225 | wm2000_write(i2c, WM2000_REG_SYS_MODE_CNTRL, |
232 | WM2000_MODE_POWER_DOWN); | 226 | WM2000_MODE_POWER_DOWN); |
233 | } | 227 | } |
234 | 228 | ||
235 | if (!wm2000_poll_bit(i2c, WM2000_REG_SYS_STATUS, | 229 | if (!wm2000_poll_bit(i2c, WM2000_REG_SYS_STATUS, |
236 | WM2000_STATUS_POWER_DOWN_COMPLETE, timeout)) { | 230 | WM2000_STATUS_POWER_DOWN_COMPLETE)) { |
237 | dev_err(&i2c->dev, "Timeout waiting for ANC power down\n"); | 231 | dev_err(&i2c->dev, "Timeout waiting for ANC power down\n"); |
238 | return -ETIMEDOUT; | 232 | return -ETIMEDOUT; |
239 | } | 233 | } |
240 | 234 | ||
241 | if (!wm2000_poll_bit(i2c, WM2000_REG_ANC_STAT, | 235 | if (!wm2000_poll_bit(i2c, WM2000_REG_ANC_STAT, |
242 | WM2000_ANC_ENG_IDLE, 1)) { | 236 | WM2000_ANC_ENG_IDLE)) { |
243 | dev_err(&i2c->dev, "Timeout waiting for ANC engine idle\n"); | 237 | dev_err(&i2c->dev, "Timeout waiting for ANC engine idle\n"); |
244 | return -ETIMEDOUT; | 238 | return -ETIMEDOUT; |
245 | } | 239 | } |
@@ -268,13 +262,13 @@ static int wm2000_enter_bypass(struct i2c_client *i2c, int analogue) | |||
268 | } | 262 | } |
269 | 263 | ||
270 | if (!wm2000_poll_bit(i2c, WM2000_REG_SYS_STATUS, | 264 | if (!wm2000_poll_bit(i2c, WM2000_REG_SYS_STATUS, |
271 | WM2000_STATUS_ANC_DISABLED, 10)) { | 265 | WM2000_STATUS_ANC_DISABLED)) { |
272 | dev_err(&i2c->dev, "Timeout waiting for ANC disable\n"); | 266 | dev_err(&i2c->dev, "Timeout waiting for ANC disable\n"); |
273 | return -ETIMEDOUT; | 267 | return -ETIMEDOUT; |
274 | } | 268 | } |
275 | 269 | ||
276 | if (!wm2000_poll_bit(i2c, WM2000_REG_ANC_STAT, | 270 | if (!wm2000_poll_bit(i2c, WM2000_REG_ANC_STAT, |
277 | WM2000_ANC_ENG_IDLE, 1)) { | 271 | WM2000_ANC_ENG_IDLE)) { |
278 | dev_err(&i2c->dev, "Timeout waiting for ANC engine idle\n"); | 272 | dev_err(&i2c->dev, "Timeout waiting for ANC engine idle\n"); |
279 | return -ETIMEDOUT; | 273 | return -ETIMEDOUT; |
280 | } | 274 | } |
@@ -311,7 +305,7 @@ static int wm2000_exit_bypass(struct i2c_client *i2c, int analogue) | |||
311 | wm2000_write(i2c, WM2000_REG_SYS_CTL2, WM2000_ANC_INT_N_CLR); | 305 | wm2000_write(i2c, WM2000_REG_SYS_CTL2, WM2000_ANC_INT_N_CLR); |
312 | 306 | ||
313 | if (!wm2000_poll_bit(i2c, WM2000_REG_SYS_STATUS, | 307 | if (!wm2000_poll_bit(i2c, WM2000_REG_SYS_STATUS, |
314 | WM2000_STATUS_MOUSE_ACTIVE, 10)) { | 308 | WM2000_STATUS_MOUSE_ACTIVE)) { |
315 | dev_err(&i2c->dev, "Timed out waiting for MOUSE\n"); | 309 | dev_err(&i2c->dev, "Timed out waiting for MOUSE\n"); |
316 | return -ETIMEDOUT; | 310 | return -ETIMEDOUT; |
317 | } | 311 | } |
@@ -325,38 +319,32 @@ static int wm2000_exit_bypass(struct i2c_client *i2c, int analogue) | |||
325 | static int wm2000_enter_standby(struct i2c_client *i2c, int analogue) | 319 | static int wm2000_enter_standby(struct i2c_client *i2c, int analogue) |
326 | { | 320 | { |
327 | struct wm2000_priv *wm2000 = dev_get_drvdata(&i2c->dev); | 321 | struct wm2000_priv *wm2000 = dev_get_drvdata(&i2c->dev); |
328 | int timeout; | ||
329 | 322 | ||
330 | BUG_ON(wm2000->anc_mode != ANC_ACTIVE); | 323 | BUG_ON(wm2000->anc_mode != ANC_ACTIVE); |
331 | 324 | ||
332 | if (analogue) { | 325 | if (analogue) { |
333 | timeout = 248; | 326 | wm2000_write(i2c, WM2000_REG_ANA_VMID_PD_TIME, 248 / 4); |
334 | wm2000_write(i2c, WM2000_REG_ANA_VMID_PD_TIME, timeout / 4); | ||
335 | 327 | ||
336 | wm2000_write(i2c, WM2000_REG_SYS_MODE_CNTRL, | 328 | wm2000_write(i2c, WM2000_REG_SYS_MODE_CNTRL, |
337 | WM2000_MODE_ANA_SEQ_INCLUDE | | 329 | WM2000_MODE_ANA_SEQ_INCLUDE | |
338 | WM2000_MODE_THERMAL_ENABLE | | 330 | WM2000_MODE_THERMAL_ENABLE | |
339 | WM2000_MODE_STANDBY_ENTRY); | 331 | WM2000_MODE_STANDBY_ENTRY); |
340 | } else { | 332 | } else { |
341 | timeout = 10; | ||
342 | |||
343 | wm2000_write(i2c, WM2000_REG_SYS_MODE_CNTRL, | 333 | wm2000_write(i2c, WM2000_REG_SYS_MODE_CNTRL, |
344 | WM2000_MODE_THERMAL_ENABLE | | 334 | WM2000_MODE_THERMAL_ENABLE | |
345 | WM2000_MODE_STANDBY_ENTRY); | 335 | WM2000_MODE_STANDBY_ENTRY); |
346 | } | 336 | } |
347 | 337 | ||
348 | if (!wm2000_poll_bit(i2c, WM2000_REG_SYS_STATUS, | 338 | if (!wm2000_poll_bit(i2c, WM2000_REG_SYS_STATUS, |
349 | WM2000_STATUS_ANC_DISABLED, timeout)) { | 339 | WM2000_STATUS_ANC_DISABLED)) { |
350 | dev_err(&i2c->dev, | 340 | dev_err(&i2c->dev, |
351 | "Timed out waiting for ANC disable after 1ms\n"); | 341 | "Timed out waiting for ANC disable after 1ms\n"); |
352 | return -ETIMEDOUT; | 342 | return -ETIMEDOUT; |
353 | } | 343 | } |
354 | 344 | ||
355 | if (!wm2000_poll_bit(i2c, WM2000_REG_ANC_STAT, WM2000_ANC_ENG_IDLE, | 345 | if (!wm2000_poll_bit(i2c, WM2000_REG_ANC_STAT, WM2000_ANC_ENG_IDLE)) { |
356 | 1)) { | ||
357 | dev_err(&i2c->dev, | 346 | dev_err(&i2c->dev, |
358 | "Timed out waiting for standby after %dms\n", | 347 | "Timed out waiting for standby\n"); |
359 | timeout * 10); | ||
360 | return -ETIMEDOUT; | 348 | return -ETIMEDOUT; |
361 | } | 349 | } |
362 | 350 | ||
@@ -374,23 +362,19 @@ static int wm2000_enter_standby(struct i2c_client *i2c, int analogue) | |||
374 | static int wm2000_exit_standby(struct i2c_client *i2c, int analogue) | 362 | static int wm2000_exit_standby(struct i2c_client *i2c, int analogue) |
375 | { | 363 | { |
376 | struct wm2000_priv *wm2000 = dev_get_drvdata(&i2c->dev); | 364 | struct wm2000_priv *wm2000 = dev_get_drvdata(&i2c->dev); |
377 | int timeout; | ||
378 | 365 | ||
379 | BUG_ON(wm2000->anc_mode != ANC_STANDBY); | 366 | BUG_ON(wm2000->anc_mode != ANC_STANDBY); |
380 | 367 | ||
381 | wm2000_write(i2c, WM2000_REG_SYS_CTL1, 0); | 368 | wm2000_write(i2c, WM2000_REG_SYS_CTL1, 0); |
382 | 369 | ||
383 | if (analogue) { | 370 | if (analogue) { |
384 | timeout = 248; | 371 | wm2000_write(i2c, WM2000_REG_ANA_VMID_PU_TIME, 248 / 4); |
385 | wm2000_write(i2c, WM2000_REG_ANA_VMID_PU_TIME, timeout / 4); | ||
386 | 372 | ||
387 | wm2000_write(i2c, WM2000_REG_SYS_MODE_CNTRL, | 373 | wm2000_write(i2c, WM2000_REG_SYS_MODE_CNTRL, |
388 | WM2000_MODE_ANA_SEQ_INCLUDE | | 374 | WM2000_MODE_ANA_SEQ_INCLUDE | |
389 | WM2000_MODE_THERMAL_ENABLE | | 375 | WM2000_MODE_THERMAL_ENABLE | |
390 | WM2000_MODE_MOUSE_ENABLE); | 376 | WM2000_MODE_MOUSE_ENABLE); |
391 | } else { | 377 | } else { |
392 | timeout = 10; | ||
393 | |||
394 | wm2000_write(i2c, WM2000_REG_SYS_MODE_CNTRL, | 378 | wm2000_write(i2c, WM2000_REG_SYS_MODE_CNTRL, |
395 | WM2000_MODE_THERMAL_ENABLE | | 379 | WM2000_MODE_THERMAL_ENABLE | |
396 | WM2000_MODE_MOUSE_ENABLE); | 380 | WM2000_MODE_MOUSE_ENABLE); |
@@ -400,9 +384,8 @@ static int wm2000_exit_standby(struct i2c_client *i2c, int analogue) | |||
400 | wm2000_write(i2c, WM2000_REG_SYS_CTL2, WM2000_ANC_INT_N_CLR); | 384 | wm2000_write(i2c, WM2000_REG_SYS_CTL2, WM2000_ANC_INT_N_CLR); |
401 | 385 | ||
402 | if (!wm2000_poll_bit(i2c, WM2000_REG_SYS_STATUS, | 386 | if (!wm2000_poll_bit(i2c, WM2000_REG_SYS_STATUS, |
403 | WM2000_STATUS_MOUSE_ACTIVE, timeout)) { | 387 | WM2000_STATUS_MOUSE_ACTIVE)) { |
404 | dev_err(&i2c->dev, "Timed out waiting for MOUSE after %dms\n", | 388 | dev_err(&i2c->dev, "Timed out waiting for MOUSE\n"); |
405 | timeout * 10); | ||
406 | return -ETIMEDOUT; | 389 | return -ETIMEDOUT; |
407 | } | 390 | } |
408 | 391 | ||
diff --git a/sound/soc/codecs/wm8994.c b/sound/soc/codecs/wm8994.c index 993639d694ce..aa8c98b628da 100644 --- a/sound/soc/codecs/wm8994.c +++ b/sound/soc/codecs/wm8994.c | |||
@@ -46,6 +46,39 @@ | |||
46 | #define WM8994_NUM_DRC 3 | 46 | #define WM8994_NUM_DRC 3 |
47 | #define WM8994_NUM_EQ 3 | 47 | #define WM8994_NUM_EQ 3 |
48 | 48 | ||
49 | static struct { | ||
50 | unsigned int reg; | ||
51 | unsigned int mask; | ||
52 | } wm8994_vu_bits[] = { | ||
53 | { WM8994_LEFT_LINE_INPUT_1_2_VOLUME, WM8994_IN1_VU }, | ||
54 | { WM8994_RIGHT_LINE_INPUT_1_2_VOLUME, WM8994_IN1_VU }, | ||
55 | { WM8994_LEFT_LINE_INPUT_3_4_VOLUME, WM8994_IN2_VU }, | ||
56 | { WM8994_RIGHT_LINE_INPUT_3_4_VOLUME, WM8994_IN2_VU }, | ||
57 | { WM8994_SPEAKER_VOLUME_LEFT, WM8994_SPKOUT_VU }, | ||
58 | { WM8994_SPEAKER_VOLUME_RIGHT, WM8994_SPKOUT_VU }, | ||
59 | { WM8994_LEFT_OUTPUT_VOLUME, WM8994_HPOUT1_VU }, | ||
60 | { WM8994_RIGHT_OUTPUT_VOLUME, WM8994_HPOUT1_VU }, | ||
61 | { WM8994_LEFT_OPGA_VOLUME, WM8994_MIXOUT_VU }, | ||
62 | { WM8994_RIGHT_OPGA_VOLUME, WM8994_MIXOUT_VU }, | ||
63 | |||
64 | { WM8994_AIF1_DAC1_LEFT_VOLUME, WM8994_AIF1DAC1_VU }, | ||
65 | { WM8994_AIF1_DAC1_RIGHT_VOLUME, WM8994_AIF1DAC1_VU }, | ||
66 | { WM8994_AIF1_DAC2_LEFT_VOLUME, WM8994_AIF1DAC2_VU }, | ||
67 | { WM8994_AIF1_DAC2_RIGHT_VOLUME, WM8994_AIF1DAC2_VU }, | ||
68 | { WM8994_AIF2_DAC_LEFT_VOLUME, WM8994_AIF2DAC_VU }, | ||
69 | { WM8994_AIF2_DAC_RIGHT_VOLUME, WM8994_AIF2DAC_VU }, | ||
70 | { WM8994_AIF1_ADC1_LEFT_VOLUME, WM8994_AIF1ADC1_VU }, | ||
71 | { WM8994_AIF1_ADC1_RIGHT_VOLUME, WM8994_AIF1ADC1_VU }, | ||
72 | { WM8994_AIF1_ADC2_LEFT_VOLUME, WM8994_AIF1ADC2_VU }, | ||
73 | { WM8994_AIF1_ADC2_RIGHT_VOLUME, WM8994_AIF1ADC2_VU }, | ||
74 | { WM8994_AIF2_ADC_LEFT_VOLUME, WM8994_AIF2ADC_VU }, | ||
75 | { WM8994_AIF2_ADC_RIGHT_VOLUME, WM8994_AIF1ADC2_VU }, | ||
76 | { WM8994_DAC1_LEFT_VOLUME, WM8994_DAC1_VU }, | ||
77 | { WM8994_DAC1_RIGHT_VOLUME, WM8994_DAC1_VU }, | ||
78 | { WM8994_DAC2_LEFT_VOLUME, WM8994_DAC2_VU }, | ||
79 | { WM8994_DAC2_RIGHT_VOLUME, WM8994_DAC2_VU }, | ||
80 | }; | ||
81 | |||
49 | static int wm8994_drc_base[] = { | 82 | static int wm8994_drc_base[] = { |
50 | WM8994_AIF1_DRC1_1, | 83 | WM8994_AIF1_DRC1_1, |
51 | WM8994_AIF1_DRC2_1, | 84 | WM8994_AIF1_DRC2_1, |
@@ -989,6 +1022,7 @@ static int aif1clk_ev(struct snd_soc_dapm_widget *w, | |||
989 | struct snd_soc_codec *codec = w->codec; | 1022 | struct snd_soc_codec *codec = w->codec; |
990 | struct wm8994 *control = codec->control_data; | 1023 | struct wm8994 *control = codec->control_data; |
991 | int mask = WM8994_AIF1DAC1L_ENA | WM8994_AIF1DAC1R_ENA; | 1024 | int mask = WM8994_AIF1DAC1L_ENA | WM8994_AIF1DAC1R_ENA; |
1025 | int i; | ||
992 | int dac; | 1026 | int dac; |
993 | int adc; | 1027 | int adc; |
994 | int val; | 1028 | int val; |
@@ -1047,6 +1081,13 @@ static int aif1clk_ev(struct snd_soc_dapm_widget *w, | |||
1047 | WM8994_AIF1DAC2L_ENA); | 1081 | WM8994_AIF1DAC2L_ENA); |
1048 | break; | 1082 | break; |
1049 | 1083 | ||
1084 | case SND_SOC_DAPM_POST_PMU: | ||
1085 | for (i = 0; i < ARRAY_SIZE(wm8994_vu_bits); i++) | ||
1086 | snd_soc_write(codec, wm8994_vu_bits[i].reg, | ||
1087 | snd_soc_read(codec, | ||
1088 | wm8994_vu_bits[i].reg)); | ||
1089 | break; | ||
1090 | |||
1050 | case SND_SOC_DAPM_PRE_PMD: | 1091 | case SND_SOC_DAPM_PRE_PMD: |
1051 | case SND_SOC_DAPM_POST_PMD: | 1092 | case SND_SOC_DAPM_POST_PMD: |
1052 | snd_soc_update_bits(codec, WM8994_POWER_MANAGEMENT_5, | 1093 | snd_soc_update_bits(codec, WM8994_POWER_MANAGEMENT_5, |
@@ -1072,6 +1113,7 @@ static int aif2clk_ev(struct snd_soc_dapm_widget *w, | |||
1072 | struct snd_kcontrol *kcontrol, int event) | 1113 | struct snd_kcontrol *kcontrol, int event) |
1073 | { | 1114 | { |
1074 | struct snd_soc_codec *codec = w->codec; | 1115 | struct snd_soc_codec *codec = w->codec; |
1116 | int i; | ||
1075 | int dac; | 1117 | int dac; |
1076 | int adc; | 1118 | int adc; |
1077 | int val; | 1119 | int val; |
@@ -1122,6 +1164,13 @@ static int aif2clk_ev(struct snd_soc_dapm_widget *w, | |||
1122 | WM8994_AIF2DACR_ENA); | 1164 | WM8994_AIF2DACR_ENA); |
1123 | break; | 1165 | break; |
1124 | 1166 | ||
1167 | case SND_SOC_DAPM_POST_PMU: | ||
1168 | for (i = 0; i < ARRAY_SIZE(wm8994_vu_bits); i++) | ||
1169 | snd_soc_write(codec, wm8994_vu_bits[i].reg, | ||
1170 | snd_soc_read(codec, | ||
1171 | wm8994_vu_bits[i].reg)); | ||
1172 | break; | ||
1173 | |||
1125 | case SND_SOC_DAPM_PRE_PMD: | 1174 | case SND_SOC_DAPM_PRE_PMD: |
1126 | case SND_SOC_DAPM_POST_PMD: | 1175 | case SND_SOC_DAPM_POST_PMD: |
1127 | snd_soc_update_bits(codec, WM8994_POWER_MANAGEMENT_5, | 1176 | snd_soc_update_bits(codec, WM8994_POWER_MANAGEMENT_5, |
@@ -1190,17 +1239,19 @@ static int late_enable_ev(struct snd_soc_dapm_widget *w, | |||
1190 | switch (event) { | 1239 | switch (event) { |
1191 | case SND_SOC_DAPM_PRE_PMU: | 1240 | case SND_SOC_DAPM_PRE_PMU: |
1192 | if (wm8994->aif1clk_enable) { | 1241 | if (wm8994->aif1clk_enable) { |
1193 | aif1clk_ev(w, kcontrol, event); | 1242 | aif1clk_ev(w, kcontrol, SND_SOC_DAPM_PRE_PMU); |
1194 | snd_soc_update_bits(codec, WM8994_AIF1_CLOCKING_1, | 1243 | snd_soc_update_bits(codec, WM8994_AIF1_CLOCKING_1, |
1195 | WM8994_AIF1CLK_ENA_MASK, | 1244 | WM8994_AIF1CLK_ENA_MASK, |
1196 | WM8994_AIF1CLK_ENA); | 1245 | WM8994_AIF1CLK_ENA); |
1246 | aif1clk_ev(w, kcontrol, SND_SOC_DAPM_POST_PMU); | ||
1197 | wm8994->aif1clk_enable = 0; | 1247 | wm8994->aif1clk_enable = 0; |
1198 | } | 1248 | } |
1199 | if (wm8994->aif2clk_enable) { | 1249 | if (wm8994->aif2clk_enable) { |
1200 | aif2clk_ev(w, kcontrol, event); | 1250 | aif2clk_ev(w, kcontrol, SND_SOC_DAPM_PRE_PMU); |
1201 | snd_soc_update_bits(codec, WM8994_AIF2_CLOCKING_1, | 1251 | snd_soc_update_bits(codec, WM8994_AIF2_CLOCKING_1, |
1202 | WM8994_AIF2CLK_ENA_MASK, | 1252 | WM8994_AIF2CLK_ENA_MASK, |
1203 | WM8994_AIF2CLK_ENA); | 1253 | WM8994_AIF2CLK_ENA); |
1254 | aif2clk_ev(w, kcontrol, SND_SOC_DAPM_POST_PMU); | ||
1204 | wm8994->aif2clk_enable = 0; | 1255 | wm8994->aif2clk_enable = 0; |
1205 | } | 1256 | } |
1206 | break; | 1257 | break; |
@@ -1221,15 +1272,17 @@ static int late_disable_ev(struct snd_soc_dapm_widget *w, | |||
1221 | switch (event) { | 1272 | switch (event) { |
1222 | case SND_SOC_DAPM_POST_PMD: | 1273 | case SND_SOC_DAPM_POST_PMD: |
1223 | if (wm8994->aif1clk_disable) { | 1274 | if (wm8994->aif1clk_disable) { |
1275 | aif1clk_ev(w, kcontrol, SND_SOC_DAPM_PRE_PMD); | ||
1224 | snd_soc_update_bits(codec, WM8994_AIF1_CLOCKING_1, | 1276 | snd_soc_update_bits(codec, WM8994_AIF1_CLOCKING_1, |
1225 | WM8994_AIF1CLK_ENA_MASK, 0); | 1277 | WM8994_AIF1CLK_ENA_MASK, 0); |
1226 | aif1clk_ev(w, kcontrol, event); | 1278 | aif1clk_ev(w, kcontrol, SND_SOC_DAPM_POST_PMD); |
1227 | wm8994->aif1clk_disable = 0; | 1279 | wm8994->aif1clk_disable = 0; |
1228 | } | 1280 | } |
1229 | if (wm8994->aif2clk_disable) { | 1281 | if (wm8994->aif2clk_disable) { |
1282 | aif2clk_ev(w, kcontrol, SND_SOC_DAPM_PRE_PMD); | ||
1230 | snd_soc_update_bits(codec, WM8994_AIF2_CLOCKING_1, | 1283 | snd_soc_update_bits(codec, WM8994_AIF2_CLOCKING_1, |
1231 | WM8994_AIF2CLK_ENA_MASK, 0); | 1284 | WM8994_AIF2CLK_ENA_MASK, 0); |
1232 | aif2clk_ev(w, kcontrol, event); | 1285 | aif2clk_ev(w, kcontrol, SND_SOC_DAPM_POST_PMD); |
1233 | wm8994->aif2clk_disable = 0; | 1286 | wm8994->aif2clk_disable = 0; |
1234 | } | 1287 | } |
1235 | break; | 1288 | break; |
@@ -1527,9 +1580,11 @@ SND_SOC_DAPM_POST("Late Disable PGA", late_disable_ev) | |||
1527 | 1580 | ||
1528 | static const struct snd_soc_dapm_widget wm8994_lateclk_widgets[] = { | 1581 | static const struct snd_soc_dapm_widget wm8994_lateclk_widgets[] = { |
1529 | SND_SOC_DAPM_SUPPLY("AIF1CLK", WM8994_AIF1_CLOCKING_1, 0, 0, aif1clk_ev, | 1582 | SND_SOC_DAPM_SUPPLY("AIF1CLK", WM8994_AIF1_CLOCKING_1, 0, 0, aif1clk_ev, |
1530 | SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_PRE_PMD), | 1583 | SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU | |
1584 | SND_SOC_DAPM_PRE_PMD), | ||
1531 | SND_SOC_DAPM_SUPPLY("AIF2CLK", WM8994_AIF2_CLOCKING_1, 0, 0, aif2clk_ev, | 1585 | SND_SOC_DAPM_SUPPLY("AIF2CLK", WM8994_AIF2_CLOCKING_1, 0, 0, aif2clk_ev, |
1532 | SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_PRE_PMD), | 1586 | SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU | |
1587 | SND_SOC_DAPM_PRE_PMD), | ||
1533 | SND_SOC_DAPM_PGA("Direct Voice", SND_SOC_NOPM, 0, 0, NULL, 0), | 1588 | SND_SOC_DAPM_PGA("Direct Voice", SND_SOC_NOPM, 0, 0, NULL, 0), |
1534 | SND_SOC_DAPM_MIXER("SPKL", WM8994_POWER_MANAGEMENT_3, 8, 0, | 1589 | SND_SOC_DAPM_MIXER("SPKL", WM8994_POWER_MANAGEMENT_3, 8, 0, |
1535 | left_speaker_mixer, ARRAY_SIZE(left_speaker_mixer)), | 1590 | left_speaker_mixer, ARRAY_SIZE(left_speaker_mixer)), |
@@ -3879,39 +3934,11 @@ static int wm8994_codec_probe(struct snd_soc_codec *codec) | |||
3879 | 3934 | ||
3880 | pm_runtime_put(codec->dev); | 3935 | pm_runtime_put(codec->dev); |
3881 | 3936 | ||
3882 | /* Latch volume updates (right only; we always do left then right). */ | 3937 | /* Latch volume update bits */ |
3883 | snd_soc_update_bits(codec, WM8994_AIF1_DAC1_LEFT_VOLUME, | 3938 | for (i = 0; i < ARRAY_SIZE(wm8994_vu_bits); i++) |
3884 | WM8994_AIF1DAC1_VU, WM8994_AIF1DAC1_VU); | 3939 | snd_soc_update_bits(codec, wm8994_vu_bits[i].reg, |
3885 | snd_soc_update_bits(codec, WM8994_AIF1_DAC1_RIGHT_VOLUME, | 3940 | wm8994_vu_bits[i].mask, |
3886 | WM8994_AIF1DAC1_VU, WM8994_AIF1DAC1_VU); | 3941 | wm8994_vu_bits[i].mask); |
3887 | snd_soc_update_bits(codec, WM8994_AIF1_DAC2_LEFT_VOLUME, | ||
3888 | WM8994_AIF1DAC2_VU, WM8994_AIF1DAC2_VU); | ||
3889 | snd_soc_update_bits(codec, WM8994_AIF1_DAC2_RIGHT_VOLUME, | ||
3890 | WM8994_AIF1DAC2_VU, WM8994_AIF1DAC2_VU); | ||
3891 | snd_soc_update_bits(codec, WM8994_AIF2_DAC_LEFT_VOLUME, | ||
3892 | WM8994_AIF2DAC_VU, WM8994_AIF2DAC_VU); | ||
3893 | snd_soc_update_bits(codec, WM8994_AIF2_DAC_RIGHT_VOLUME, | ||
3894 | WM8994_AIF2DAC_VU, WM8994_AIF2DAC_VU); | ||
3895 | snd_soc_update_bits(codec, WM8994_AIF1_ADC1_LEFT_VOLUME, | ||
3896 | WM8994_AIF1ADC1_VU, WM8994_AIF1ADC1_VU); | ||
3897 | snd_soc_update_bits(codec, WM8994_AIF1_ADC1_RIGHT_VOLUME, | ||
3898 | WM8994_AIF1ADC1_VU, WM8994_AIF1ADC1_VU); | ||
3899 | snd_soc_update_bits(codec, WM8994_AIF1_ADC2_LEFT_VOLUME, | ||
3900 | WM8994_AIF1ADC2_VU, WM8994_AIF1ADC2_VU); | ||
3901 | snd_soc_update_bits(codec, WM8994_AIF1_ADC2_RIGHT_VOLUME, | ||
3902 | WM8994_AIF1ADC2_VU, WM8994_AIF1ADC2_VU); | ||
3903 | snd_soc_update_bits(codec, WM8994_AIF2_ADC_LEFT_VOLUME, | ||
3904 | WM8994_AIF2ADC_VU, WM8994_AIF1ADC2_VU); | ||
3905 | snd_soc_update_bits(codec, WM8994_AIF2_ADC_RIGHT_VOLUME, | ||
3906 | WM8994_AIF2ADC_VU, WM8994_AIF1ADC2_VU); | ||
3907 | snd_soc_update_bits(codec, WM8994_DAC1_LEFT_VOLUME, | ||
3908 | WM8994_DAC1_VU, WM8994_DAC1_VU); | ||
3909 | snd_soc_update_bits(codec, WM8994_DAC1_RIGHT_VOLUME, | ||
3910 | WM8994_DAC1_VU, WM8994_DAC1_VU); | ||
3911 | snd_soc_update_bits(codec, WM8994_DAC2_LEFT_VOLUME, | ||
3912 | WM8994_DAC2_VU, WM8994_DAC2_VU); | ||
3913 | snd_soc_update_bits(codec, WM8994_DAC2_RIGHT_VOLUME, | ||
3914 | WM8994_DAC2_VU, WM8994_DAC2_VU); | ||
3915 | 3942 | ||
3916 | /* Set the low bit of the 3D stereo depth so TLV matches */ | 3943 | /* Set the low bit of the 3D stereo depth so TLV matches */ |
3917 | snd_soc_update_bits(codec, WM8994_AIF1_DAC1_FILTERS_2, | 3944 | snd_soc_update_bits(codec, WM8994_AIF1_DAC1_FILTERS_2, |
diff --git a/sound/soc/fsl/imx-audmux.c b/sound/soc/fsl/imx-audmux.c index f23700359c67..080327414c6b 100644 --- a/sound/soc/fsl/imx-audmux.c +++ b/sound/soc/fsl/imx-audmux.c | |||
@@ -26,6 +26,7 @@ | |||
26 | #include <linux/of_device.h> | 26 | #include <linux/of_device.h> |
27 | #include <linux/platform_device.h> | 27 | #include <linux/platform_device.h> |
28 | #include <linux/slab.h> | 28 | #include <linux/slab.h> |
29 | #include <linux/pinctrl/consumer.h> | ||
29 | 30 | ||
30 | #include "imx-audmux.h" | 31 | #include "imx-audmux.h" |
31 | 32 | ||
@@ -249,6 +250,7 @@ EXPORT_SYMBOL_GPL(imx_audmux_v2_configure_port); | |||
249 | static int __devinit imx_audmux_probe(struct platform_device *pdev) | 250 | static int __devinit imx_audmux_probe(struct platform_device *pdev) |
250 | { | 251 | { |
251 | struct resource *res; | 252 | struct resource *res; |
253 | struct pinctrl *pinctrl; | ||
252 | const struct of_device_id *of_id = | 254 | const struct of_device_id *of_id = |
253 | of_match_device(imx_audmux_dt_ids, &pdev->dev); | 255 | of_match_device(imx_audmux_dt_ids, &pdev->dev); |
254 | 256 | ||
@@ -257,6 +259,12 @@ static int __devinit imx_audmux_probe(struct platform_device *pdev) | |||
257 | if (!audmux_base) | 259 | if (!audmux_base) |
258 | return -EADDRNOTAVAIL; | 260 | return -EADDRNOTAVAIL; |
259 | 261 | ||
262 | pinctrl = devm_pinctrl_get_select_default(&pdev->dev); | ||
263 | if (IS_ERR(pinctrl)) { | ||
264 | dev_err(&pdev->dev, "setup pinctrl failed!"); | ||
265 | return PTR_ERR(pinctrl); | ||
266 | } | ||
267 | |||
260 | audmux_clk = clk_get(&pdev->dev, "audmux"); | 268 | audmux_clk = clk_get(&pdev->dev, "audmux"); |
261 | if (IS_ERR(audmux_clk)) { | 269 | if (IS_ERR(audmux_clk)) { |
262 | dev_dbg(&pdev->dev, "cannot get clock: %ld\n", | 270 | dev_dbg(&pdev->dev, "cannot get clock: %ld\n", |
diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c index 90ee77d2409d..89eae93445cf 100644 --- a/sound/soc/soc-dapm.c +++ b/sound/soc/soc-dapm.c | |||
@@ -913,7 +913,7 @@ static int is_connected_input_ep(struct snd_soc_dapm_widget *widget, | |||
913 | /* do we need to add this widget to the list ? */ | 913 | /* do we need to add this widget to the list ? */ |
914 | if (list) { | 914 | if (list) { |
915 | int err; | 915 | int err; |
916 | err = dapm_list_add_widget(list, path->sink); | 916 | err = dapm_list_add_widget(list, path->source); |
917 | if (err < 0) { | 917 | if (err < 0) { |
918 | dev_err(widget->dapm->dev, "could not add widget %s\n", | 918 | dev_err(widget->dapm->dev, "could not add widget %s\n", |
919 | widget->name); | 919 | widget->name); |
@@ -954,7 +954,7 @@ int snd_soc_dapm_dai_get_connected_widgets(struct snd_soc_dai *dai, int stream, | |||
954 | if (stream == SNDRV_PCM_STREAM_PLAYBACK) | 954 | if (stream == SNDRV_PCM_STREAM_PLAYBACK) |
955 | paths = is_connected_output_ep(dai->playback_widget, list); | 955 | paths = is_connected_output_ep(dai->playback_widget, list); |
956 | else | 956 | else |
957 | paths = is_connected_input_ep(dai->playback_widget, list); | 957 | paths = is_connected_input_ep(dai->capture_widget, list); |
958 | 958 | ||
959 | trace_snd_soc_dapm_connected(paths, stream); | 959 | trace_snd_soc_dapm_connected(paths, stream); |
960 | dapm_clear_walk(&card->dapm); | 960 | dapm_clear_walk(&card->dapm); |
diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c index bedd1717a373..48fd15b312c1 100644 --- a/sound/soc/soc-pcm.c +++ b/sound/soc/soc-pcm.c | |||
@@ -794,6 +794,9 @@ static struct snd_soc_pcm_runtime *dpcm_get_be(struct snd_soc_card *card, | |||
794 | for (i = 0; i < card->num_links; i++) { | 794 | for (i = 0; i < card->num_links; i++) { |
795 | be = &card->rtd[i]; | 795 | be = &card->rtd[i]; |
796 | 796 | ||
797 | if (!be->dai_link->no_pcm) | ||
798 | continue; | ||
799 | |||
797 | if (be->cpu_dai->playback_widget == widget || | 800 | if (be->cpu_dai->playback_widget == widget || |
798 | be->codec_dai->playback_widget == widget) | 801 | be->codec_dai->playback_widget == widget) |
799 | return be; | 802 | return be; |
@@ -803,6 +806,9 @@ static struct snd_soc_pcm_runtime *dpcm_get_be(struct snd_soc_card *card, | |||
803 | for (i = 0; i < card->num_links; i++) { | 806 | for (i = 0; i < card->num_links; i++) { |
804 | be = &card->rtd[i]; | 807 | be = &card->rtd[i]; |
805 | 808 | ||
809 | if (!be->dai_link->no_pcm) | ||
810 | continue; | ||
811 | |||
806 | if (be->cpu_dai->capture_widget == widget || | 812 | if (be->cpu_dai->capture_widget == widget || |
807 | be->codec_dai->capture_widget == widget) | 813 | be->codec_dai->capture_widget == widget) |
808 | return be; | 814 | return be; |
diff --git a/sound/soc/tegra/tegra30_ahub.c b/sound/soc/tegra/tegra30_ahub.c index 57cd419f743e..f43edb364a18 100644 --- a/sound/soc/tegra/tegra30_ahub.c +++ b/sound/soc/tegra/tegra30_ahub.c | |||
@@ -629,3 +629,4 @@ MODULE_AUTHOR("Stephen Warren <swarren@nvidia.com>"); | |||
629 | MODULE_DESCRIPTION("Tegra30 AHUB driver"); | 629 | MODULE_DESCRIPTION("Tegra30 AHUB driver"); |
630 | MODULE_LICENSE("GPL v2"); | 630 | MODULE_LICENSE("GPL v2"); |
631 | MODULE_ALIAS("platform:" DRV_NAME); | 631 | MODULE_ALIAS("platform:" DRV_NAME); |
632 | MODULE_DEVICE_TABLE(of, tegra30_ahub_of_match); | ||
diff --git a/sound/usb/card.h b/sound/usb/card.h index 0d37238b8457..2b9fffff23b6 100644 --- a/sound/usb/card.h +++ b/sound/usb/card.h | |||
@@ -119,6 +119,7 @@ struct snd_usb_substream { | |||
119 | unsigned long unlink_mask; /* bitmask of unlinked urbs */ | 119 | unsigned long unlink_mask; /* bitmask of unlinked urbs */ |
120 | 120 | ||
121 | /* data and sync endpoints for this stream */ | 121 | /* data and sync endpoints for this stream */ |
122 | unsigned int ep_num; /* the endpoint number */ | ||
122 | struct snd_usb_endpoint *data_endpoint; | 123 | struct snd_usb_endpoint *data_endpoint; |
123 | struct snd_usb_endpoint *sync_endpoint; | 124 | struct snd_usb_endpoint *sync_endpoint; |
124 | unsigned long flags; | 125 | unsigned long flags; |
diff --git a/sound/usb/stream.c b/sound/usb/stream.c index 6b7d7a2b7baa..083ed81160e5 100644 --- a/sound/usb/stream.c +++ b/sound/usb/stream.c | |||
@@ -97,6 +97,7 @@ static void snd_usb_init_substream(struct snd_usb_stream *as, | |||
97 | subs->formats |= fp->formats; | 97 | subs->formats |= fp->formats; |
98 | subs->num_formats++; | 98 | subs->num_formats++; |
99 | subs->fmt_type = fp->fmt_type; | 99 | subs->fmt_type = fp->fmt_type; |
100 | subs->ep_num = fp->endpoint; | ||
100 | } | 101 | } |
101 | 102 | ||
102 | /* | 103 | /* |
@@ -119,9 +120,7 @@ int snd_usb_add_audio_stream(struct snd_usb_audio *chip, | |||
119 | if (as->fmt_type != fp->fmt_type) | 120 | if (as->fmt_type != fp->fmt_type) |
120 | continue; | 121 | continue; |
121 | subs = &as->substream[stream]; | 122 | subs = &as->substream[stream]; |
122 | if (!subs->data_endpoint) | 123 | if (subs->ep_num == fp->endpoint) { |
123 | continue; | ||
124 | if (subs->data_endpoint->ep_num == fp->endpoint) { | ||
125 | list_add_tail(&fp->list, &subs->fmt_list); | 124 | list_add_tail(&fp->list, &subs->fmt_list); |
126 | subs->num_formats++; | 125 | subs->num_formats++; |
127 | subs->formats |= fp->formats; | 126 | subs->formats |= fp->formats; |
@@ -134,7 +133,7 @@ int snd_usb_add_audio_stream(struct snd_usb_audio *chip, | |||
134 | if (as->fmt_type != fp->fmt_type) | 133 | if (as->fmt_type != fp->fmt_type) |
135 | continue; | 134 | continue; |
136 | subs = &as->substream[stream]; | 135 | subs = &as->substream[stream]; |
137 | if (subs->data_endpoint) | 136 | if (subs->ep_num) |
138 | continue; | 137 | continue; |
139 | err = snd_pcm_new_stream(as->pcm, stream, 1); | 138 | err = snd_pcm_new_stream(as->pcm, stream, 1); |
140 | if (err < 0) | 139 | if (err < 0) |