aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--MAINTAINERS2
-rw-r--r--arch/arm/include/asm/io.h5
-rw-r--r--arch/arm/include/asm/mach/map.h14
-rw-r--r--arch/arm/mach-omap1/mcbsp.c8
-rw-r--r--arch/arm/mach-omap2/mcbsp.c4
-rw-r--r--arch/arm/mm/mmu.c20
-rw-r--r--arch/arm/plat-mxc/clock.c1
-rw-r--r--arch/arm/plat-omap/gpio.c2
-rw-r--r--arch/arm/plat-omap/include/mach/mcbsp.h2
-rw-r--r--arch/arm/plat-omap/mcbsp.c5
-rw-r--r--arch/sparc64/kernel/smp.c14
-rw-r--r--drivers/ata/Kconfig6
-rw-r--r--drivers/ata/ahci.c21
-rw-r--r--drivers/ata/libata-sff.c5
-rw-r--r--drivers/ata/pata_marvell.c51
-rw-r--r--drivers/ata/pata_sil680.c3
-rw-r--r--drivers/ata/sata_mv.c3
-rw-r--r--drivers/ata/sata_nv.c19
-rw-r--r--drivers/usb/host/ohci-omap.c2
-rw-r--r--drivers/usb/serial/console.c2
-rw-r--r--fs/nfs/super.c6
-rw-r--r--include/linux/cpuset.h2
-rw-r--r--include/net/inet_timewait_sock.h3
-rw-r--r--kernel/cpuset.c312
-rw-r--r--kernel/sched.c19
-rw-r--r--net/bridge/br_ioctl.c8
-rw-r--r--net/bridge/br_sysfs_br.c26
-rw-r--r--net/core/dev.c7
-rw-r--r--net/ipv4/inet_timewait_sock.c35
-rw-r--r--net/ipv4/tcp_ipv4.c1
-rw-r--r--net/ipv6/tcp_ipv6.c1
-rw-r--r--net/netfilter/nf_conntrack_irc.c10
-rw-r--r--net/netfilter/nf_conntrack_proto_gre.c14
-rw-r--r--net/netfilter/nf_conntrack_sip.c6
34 files changed, 419 insertions, 220 deletions
diff --git a/MAINTAINERS b/MAINTAINERS
index af279458b614..b3e92fbe336c 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -1593,7 +1593,7 @@ S: Supported
1593EMBEDDED LINUX 1593EMBEDDED LINUX
1594P: Paul Gortmaker 1594P: Paul Gortmaker
1595M: paul.gortmaker@windriver.com 1595M: paul.gortmaker@windriver.com
1596P David Woodhouse 1596P: David Woodhouse
1597M: dwmw2@infradead.org 1597M: dwmw2@infradead.org
1598L: linux-embedded@vger.kernel.org 1598L: linux-embedded@vger.kernel.org
1599S: Maintained 1599S: Maintained
diff --git a/arch/arm/include/asm/io.h b/arch/arm/include/asm/io.h
index 94a95d7fafd6..71934856fc22 100644
--- a/arch/arm/include/asm/io.h
+++ b/arch/arm/include/asm/io.h
@@ -61,8 +61,9 @@ extern void __raw_readsl(const void __iomem *addr, void *data, int longlen);
61#define MT_DEVICE_NONSHARED 1 61#define MT_DEVICE_NONSHARED 1
62#define MT_DEVICE_CACHED 2 62#define MT_DEVICE_CACHED 2
63#define MT_DEVICE_IXP2000 3 63#define MT_DEVICE_IXP2000 3
64#define MT_DEVICE_WC 4
64/* 65/*
65 * types 4 onwards can be found in asm/mach/map.h and are undefined 66 * types 5 onwards can be found in asm/mach/map.h and are undefined
66 * for ioremap 67 * for ioremap
67 */ 68 */
68 69
@@ -215,11 +216,13 @@ extern void _memset_io(volatile void __iomem *, int, size_t);
215#define ioremap(cookie,size) __arm_ioremap(cookie, size, MT_DEVICE) 216#define ioremap(cookie,size) __arm_ioremap(cookie, size, MT_DEVICE)
216#define ioremap_nocache(cookie,size) __arm_ioremap(cookie, size, MT_DEVICE) 217#define ioremap_nocache(cookie,size) __arm_ioremap(cookie, size, MT_DEVICE)
217#define ioremap_cached(cookie,size) __arm_ioremap(cookie, size, MT_DEVICE_CACHED) 218#define ioremap_cached(cookie,size) __arm_ioremap(cookie, size, MT_DEVICE_CACHED)
219#define ioremap_wc(cookie,size) __arm_ioremap(cookie, size, MT_DEVICE_WC)
218#define iounmap(cookie) __iounmap(cookie) 220#define iounmap(cookie) __iounmap(cookie)
219#else 221#else
220#define ioremap(cookie,size) __arch_ioremap((cookie), (size), MT_DEVICE) 222#define ioremap(cookie,size) __arch_ioremap((cookie), (size), MT_DEVICE)
221#define ioremap_nocache(cookie,size) __arch_ioremap((cookie), (size), MT_DEVICE) 223#define ioremap_nocache(cookie,size) __arch_ioremap((cookie), (size), MT_DEVICE)
222#define ioremap_cached(cookie,size) __arch_ioremap((cookie), (size), MT_DEVICE_CACHED) 224#define ioremap_cached(cookie,size) __arch_ioremap((cookie), (size), MT_DEVICE_CACHED)
225#define ioremap_wc(cookie,size) __arch_ioremap((cookie), (size), MT_DEVICE_WC)
223#define iounmap(cookie) __arch_iounmap(cookie) 226#define iounmap(cookie) __arch_iounmap(cookie)
224#endif 227#endif
225 228
diff --git a/arch/arm/include/asm/mach/map.h b/arch/arm/include/asm/mach/map.h
index 06f583b13999..9eb936e49cc3 100644
--- a/arch/arm/include/asm/mach/map.h
+++ b/arch/arm/include/asm/mach/map.h
@@ -18,13 +18,13 @@ struct map_desc {
18 unsigned int type; 18 unsigned int type;
19}; 19};
20 20
21/* types 0-3 are defined in asm/io.h */ 21/* types 0-4 are defined in asm/io.h */
22#define MT_CACHECLEAN 4 22#define MT_CACHECLEAN 5
23#define MT_MINICLEAN 5 23#define MT_MINICLEAN 6
24#define MT_LOW_VECTORS 6 24#define MT_LOW_VECTORS 7
25#define MT_HIGH_VECTORS 7 25#define MT_HIGH_VECTORS 8
26#define MT_MEMORY 8 26#define MT_MEMORY 9
27#define MT_ROM 9 27#define MT_ROM 10
28 28
29#define MT_NONSHARED_DEVICE MT_DEVICE_NONSHARED 29#define MT_NONSHARED_DEVICE MT_DEVICE_NONSHARED
30#define MT_IXP2000_DEVICE MT_DEVICE_IXP2000 30#define MT_IXP2000_DEVICE MT_DEVICE_IXP2000
diff --git a/arch/arm/mach-omap1/mcbsp.c b/arch/arm/mach-omap1/mcbsp.c
index 826010d5d014..2baeaeb0c900 100644
--- a/arch/arm/mach-omap1/mcbsp.c
+++ b/arch/arm/mach-omap1/mcbsp.c
@@ -159,6 +159,7 @@ static struct omap_mcbsp_ops omap1_mcbsp_ops = {
159#ifdef CONFIG_ARCH_OMAP730 159#ifdef CONFIG_ARCH_OMAP730
160static struct omap_mcbsp_platform_data omap730_mcbsp_pdata[] = { 160static struct omap_mcbsp_platform_data omap730_mcbsp_pdata[] = {
161 { 161 {
162 .phys_base = OMAP730_MCBSP1_BASE,
162 .virt_base = io_p2v(OMAP730_MCBSP1_BASE), 163 .virt_base = io_p2v(OMAP730_MCBSP1_BASE),
163 .dma_rx_sync = OMAP_DMA_MCBSP1_RX, 164 .dma_rx_sync = OMAP_DMA_MCBSP1_RX,
164 .dma_tx_sync = OMAP_DMA_MCBSP1_TX, 165 .dma_tx_sync = OMAP_DMA_MCBSP1_TX,
@@ -167,6 +168,7 @@ static struct omap_mcbsp_platform_data omap730_mcbsp_pdata[] = {
167 .ops = &omap1_mcbsp_ops, 168 .ops = &omap1_mcbsp_ops,
168 }, 169 },
169 { 170 {
171 .phys_base = OMAP730_MCBSP2_BASE,
170 .virt_base = io_p2v(OMAP730_MCBSP2_BASE), 172 .virt_base = io_p2v(OMAP730_MCBSP2_BASE),
171 .dma_rx_sync = OMAP_DMA_MCBSP3_RX, 173 .dma_rx_sync = OMAP_DMA_MCBSP3_RX,
172 .dma_tx_sync = OMAP_DMA_MCBSP3_TX, 174 .dma_tx_sync = OMAP_DMA_MCBSP3_TX,
@@ -184,6 +186,7 @@ static struct omap_mcbsp_platform_data omap730_mcbsp_pdata[] = {
184#ifdef CONFIG_ARCH_OMAP15XX 186#ifdef CONFIG_ARCH_OMAP15XX
185static struct omap_mcbsp_platform_data omap15xx_mcbsp_pdata[] = { 187static struct omap_mcbsp_platform_data omap15xx_mcbsp_pdata[] = {
186 { 188 {
189 .phys_base = OMAP1510_MCBSP1_BASE,
187 .virt_base = OMAP1510_MCBSP1_BASE, 190 .virt_base = OMAP1510_MCBSP1_BASE,
188 .dma_rx_sync = OMAP_DMA_MCBSP1_RX, 191 .dma_rx_sync = OMAP_DMA_MCBSP1_RX,
189 .dma_tx_sync = OMAP_DMA_MCBSP1_TX, 192 .dma_tx_sync = OMAP_DMA_MCBSP1_TX,
@@ -193,6 +196,7 @@ static struct omap_mcbsp_platform_data omap15xx_mcbsp_pdata[] = {
193 .clk_name = "mcbsp_clk", 196 .clk_name = "mcbsp_clk",
194 }, 197 },
195 { 198 {
199 .phys_base = OMAP1510_MCBSP2_BASE,
196 .virt_base = io_p2v(OMAP1510_MCBSP2_BASE), 200 .virt_base = io_p2v(OMAP1510_MCBSP2_BASE),
197 .dma_rx_sync = OMAP_DMA_MCBSP2_RX, 201 .dma_rx_sync = OMAP_DMA_MCBSP2_RX,
198 .dma_tx_sync = OMAP_DMA_MCBSP2_TX, 202 .dma_tx_sync = OMAP_DMA_MCBSP2_TX,
@@ -201,6 +205,7 @@ static struct omap_mcbsp_platform_data omap15xx_mcbsp_pdata[] = {
201 .ops = &omap1_mcbsp_ops, 205 .ops = &omap1_mcbsp_ops,
202 }, 206 },
203 { 207 {
208 .phys_base = OMAP1510_MCBSP3_BASE,
204 .virt_base = OMAP1510_MCBSP3_BASE, 209 .virt_base = OMAP1510_MCBSP3_BASE,
205 .dma_rx_sync = OMAP_DMA_MCBSP3_RX, 210 .dma_rx_sync = OMAP_DMA_MCBSP3_RX,
206 .dma_tx_sync = OMAP_DMA_MCBSP3_TX, 211 .dma_tx_sync = OMAP_DMA_MCBSP3_TX,
@@ -219,6 +224,7 @@ static struct omap_mcbsp_platform_data omap15xx_mcbsp_pdata[] = {
219#ifdef CONFIG_ARCH_OMAP16XX 224#ifdef CONFIG_ARCH_OMAP16XX
220static struct omap_mcbsp_platform_data omap16xx_mcbsp_pdata[] = { 225static struct omap_mcbsp_platform_data omap16xx_mcbsp_pdata[] = {
221 { 226 {
227 .phys_base = OMAP1610_MCBSP1_BASE,
222 .virt_base = OMAP1610_MCBSP1_BASE, 228 .virt_base = OMAP1610_MCBSP1_BASE,
223 .dma_rx_sync = OMAP_DMA_MCBSP1_RX, 229 .dma_rx_sync = OMAP_DMA_MCBSP1_RX,
224 .dma_tx_sync = OMAP_DMA_MCBSP1_TX, 230 .dma_tx_sync = OMAP_DMA_MCBSP1_TX,
@@ -228,6 +234,7 @@ static struct omap_mcbsp_platform_data omap16xx_mcbsp_pdata[] = {
228 .clk_name = "mcbsp_clk", 234 .clk_name = "mcbsp_clk",
229 }, 235 },
230 { 236 {
237 .phys_base = OMAP1610_MCBSP2_BASE,
231 .virt_base = io_p2v(OMAP1610_MCBSP2_BASE), 238 .virt_base = io_p2v(OMAP1610_MCBSP2_BASE),
232 .dma_rx_sync = OMAP_DMA_MCBSP2_RX, 239 .dma_rx_sync = OMAP_DMA_MCBSP2_RX,
233 .dma_tx_sync = OMAP_DMA_MCBSP2_TX, 240 .dma_tx_sync = OMAP_DMA_MCBSP2_TX,
@@ -236,6 +243,7 @@ static struct omap_mcbsp_platform_data omap16xx_mcbsp_pdata[] = {
236 .ops = &omap1_mcbsp_ops, 243 .ops = &omap1_mcbsp_ops,
237 }, 244 },
238 { 245 {
246 .phys_base = OMAP1610_MCBSP3_BASE,
239 .virt_base = OMAP1610_MCBSP3_BASE, 247 .virt_base = OMAP1610_MCBSP3_BASE,
240 .dma_rx_sync = OMAP_DMA_MCBSP3_RX, 248 .dma_rx_sync = OMAP_DMA_MCBSP3_RX,
241 .dma_tx_sync = OMAP_DMA_MCBSP3_TX, 249 .dma_tx_sync = OMAP_DMA_MCBSP3_TX,
diff --git a/arch/arm/mach-omap2/mcbsp.c b/arch/arm/mach-omap2/mcbsp.c
index 27eb6e3ca926..b261f1f80b5e 100644
--- a/arch/arm/mach-omap2/mcbsp.c
+++ b/arch/arm/mach-omap2/mcbsp.c
@@ -134,6 +134,7 @@ static struct omap_mcbsp_ops omap2_mcbsp_ops = {
134#ifdef CONFIG_ARCH_OMAP24XX 134#ifdef CONFIG_ARCH_OMAP24XX
135static struct omap_mcbsp_platform_data omap24xx_mcbsp_pdata[] = { 135static struct omap_mcbsp_platform_data omap24xx_mcbsp_pdata[] = {
136 { 136 {
137 .phys_base = OMAP24XX_MCBSP1_BASE,
137 .virt_base = IO_ADDRESS(OMAP24XX_MCBSP1_BASE), 138 .virt_base = IO_ADDRESS(OMAP24XX_MCBSP1_BASE),
138 .dma_rx_sync = OMAP24XX_DMA_MCBSP1_RX, 139 .dma_rx_sync = OMAP24XX_DMA_MCBSP1_RX,
139 .dma_tx_sync = OMAP24XX_DMA_MCBSP1_TX, 140 .dma_tx_sync = OMAP24XX_DMA_MCBSP1_TX,
@@ -143,6 +144,7 @@ static struct omap_mcbsp_platform_data omap24xx_mcbsp_pdata[] = {
143 .clk_name = "mcbsp_clk", 144 .clk_name = "mcbsp_clk",
144 }, 145 },
145 { 146 {
147 .phys_base = OMAP24XX_MCBSP2_BASE,
146 .virt_base = IO_ADDRESS(OMAP24XX_MCBSP2_BASE), 148 .virt_base = IO_ADDRESS(OMAP24XX_MCBSP2_BASE),
147 .dma_rx_sync = OMAP24XX_DMA_MCBSP2_RX, 149 .dma_rx_sync = OMAP24XX_DMA_MCBSP2_RX,
148 .dma_tx_sync = OMAP24XX_DMA_MCBSP2_TX, 150 .dma_tx_sync = OMAP24XX_DMA_MCBSP2_TX,
@@ -161,6 +163,7 @@ static struct omap_mcbsp_platform_data omap24xx_mcbsp_pdata[] = {
161#ifdef CONFIG_ARCH_OMAP34XX 163#ifdef CONFIG_ARCH_OMAP34XX
162static struct omap_mcbsp_platform_data omap34xx_mcbsp_pdata[] = { 164static struct omap_mcbsp_platform_data omap34xx_mcbsp_pdata[] = {
163 { 165 {
166 .phys_base = OMAP34XX_MCBSP1_BASE,
164 .virt_base = IO_ADDRESS(OMAP34XX_MCBSP1_BASE), 167 .virt_base = IO_ADDRESS(OMAP34XX_MCBSP1_BASE),
165 .dma_rx_sync = OMAP24XX_DMA_MCBSP1_RX, 168 .dma_rx_sync = OMAP24XX_DMA_MCBSP1_RX,
166 .dma_tx_sync = OMAP24XX_DMA_MCBSP1_TX, 169 .dma_tx_sync = OMAP24XX_DMA_MCBSP1_TX,
@@ -170,6 +173,7 @@ static struct omap_mcbsp_platform_data omap34xx_mcbsp_pdata[] = {
170 .clk_name = "mcbsp_clk", 173 .clk_name = "mcbsp_clk",
171 }, 174 },
172 { 175 {
176 .phys_base = OMAP34XX_MCBSP2_BASE,
173 .virt_base = IO_ADDRESS(OMAP34XX_MCBSP2_BASE), 177 .virt_base = IO_ADDRESS(OMAP34XX_MCBSP2_BASE),
174 .dma_rx_sync = OMAP24XX_DMA_MCBSP2_RX, 178 .dma_rx_sync = OMAP24XX_DMA_MCBSP2_RX,
175 .dma_tx_sync = OMAP24XX_DMA_MCBSP2_TX, 179 .dma_tx_sync = OMAP24XX_DMA_MCBSP2_TX,
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index 25d9a11eb617..a713e40e1f1a 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -211,6 +211,12 @@ static struct mem_type mem_types[] = {
211 PMD_SECT_TEX(1), 211 PMD_SECT_TEX(1),
212 .domain = DOMAIN_IO, 212 .domain = DOMAIN_IO,
213 }, 213 },
214 [MT_DEVICE_WC] = { /* ioremap_wc */
215 .prot_pte = PROT_PTE_DEVICE,
216 .prot_l1 = PMD_TYPE_TABLE,
217 .prot_sect = PROT_SECT_DEVICE,
218 .domain = DOMAIN_IO,
219 },
214 [MT_CACHECLEAN] = { 220 [MT_CACHECLEAN] = {
215 .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN, 221 .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
216 .domain = DOMAIN_KERNEL, 222 .domain = DOMAIN_KERNEL,
@@ -273,6 +279,20 @@ static void __init build_mem_type_table(void)
273 } 279 }
274 280
275 /* 281 /*
282 * On non-Xscale3 ARMv5-and-older systems, use CB=01
283 * (Uncached/Buffered) for ioremap_wc() mappings. On XScale3
284 * and ARMv6+, use TEXCB=00100 mappings (Inner/Outer Uncacheable
285 * in xsc3 parlance, Uncached Normal in ARMv6 parlance).
286 */
287 if (cpu_is_xsc3() || cpu_arch >= CPU_ARCH_ARMv6) {
288 mem_types[MT_DEVICE_WC].prot_pte_ext |= PTE_EXT_TEX(1);
289 mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_TEX(1);
290 } else {
291 mem_types[MT_DEVICE_WC].prot_pte |= L_PTE_BUFFERABLE;
292 mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_BUFFERABLE;
293 }
294
295 /*
276 * ARMv5 and lower, bit 4 must be set for page tables. 296 * ARMv5 and lower, bit 4 must be set for page tables.
277 * (was: cache "update-able on write" bit on ARM610) 297 * (was: cache "update-able on write" bit on ARM610)
278 * However, Xscale cores require this bit to be cleared. 298 * However, Xscale cores require this bit to be cleared.
diff --git a/arch/arm/plat-mxc/clock.c b/arch/arm/plat-mxc/clock.c
index 2f8627218839..0a38f0b396eb 100644
--- a/arch/arm/plat-mxc/clock.c
+++ b/arch/arm/plat-mxc/clock.c
@@ -37,7 +37,6 @@
37#include <linux/proc_fs.h> 37#include <linux/proc_fs.h>
38#include <linux/semaphore.h> 38#include <linux/semaphore.h>
39#include <linux/string.h> 39#include <linux/string.h>
40#include <linux/version.h>
41 40
42#include <mach/clock.h> 41#include <mach/clock.h>
43 42
diff --git a/arch/arm/plat-omap/gpio.c b/arch/arm/plat-omap/gpio.c
index 3e76ee2bc731..9e1341ebc14e 100644
--- a/arch/arm/plat-omap/gpio.c
+++ b/arch/arm/plat-omap/gpio.c
@@ -1488,7 +1488,7 @@ static int __init _omap_gpio_init(void)
1488 bank->chip.set = gpio_set; 1488 bank->chip.set = gpio_set;
1489 if (bank_is_mpuio(bank)) { 1489 if (bank_is_mpuio(bank)) {
1490 bank->chip.label = "mpuio"; 1490 bank->chip.label = "mpuio";
1491#ifdef CONFIG_ARCH_OMAP1 1491#ifdef CONFIG_ARCH_OMAP16XX
1492 bank->chip.dev = &omap_mpuio_device.dev; 1492 bank->chip.dev = &omap_mpuio_device.dev;
1493#endif 1493#endif
1494 bank->chip.base = OMAP_MPUIO(0); 1494 bank->chip.base = OMAP_MPUIO(0);
diff --git a/arch/arm/plat-omap/include/mach/mcbsp.h b/arch/arm/plat-omap/include/mach/mcbsp.h
index 6eb44a92871d..8fdb95e26fcd 100644
--- a/arch/arm/plat-omap/include/mach/mcbsp.h
+++ b/arch/arm/plat-omap/include/mach/mcbsp.h
@@ -315,6 +315,7 @@ struct omap_mcbsp_ops {
315}; 315};
316 316
317struct omap_mcbsp_platform_data { 317struct omap_mcbsp_platform_data {
318 unsigned long phys_base;
318 u32 virt_base; 319 u32 virt_base;
319 u8 dma_rx_sync, dma_tx_sync; 320 u8 dma_rx_sync, dma_tx_sync;
320 u16 rx_irq, tx_irq; 321 u16 rx_irq, tx_irq;
@@ -324,6 +325,7 @@ struct omap_mcbsp_platform_data {
324 325
325struct omap_mcbsp { 326struct omap_mcbsp {
326 struct device *dev; 327 struct device *dev;
328 unsigned long phys_base;
327 u32 io_base; 329 u32 io_base;
328 u8 id; 330 u8 id;
329 u8 free; 331 u8 free;
diff --git a/arch/arm/plat-omap/mcbsp.c b/arch/arm/plat-omap/mcbsp.c
index d0844050f2d2..014d26574bb6 100644
--- a/arch/arm/plat-omap/mcbsp.c
+++ b/arch/arm/plat-omap/mcbsp.c
@@ -651,7 +651,7 @@ int omap_mcbsp_xmit_buffer(unsigned int id, dma_addr_t buffer,
651 omap_set_dma_dest_params(mcbsp[id].dma_tx_lch, 651 omap_set_dma_dest_params(mcbsp[id].dma_tx_lch,
652 src_port, 652 src_port,
653 OMAP_DMA_AMODE_CONSTANT, 653 OMAP_DMA_AMODE_CONSTANT,
654 mcbsp[id].io_base + OMAP_MCBSP_REG_DXR1, 654 mcbsp[id].phys_base + OMAP_MCBSP_REG_DXR1,
655 0, 0); 655 0, 0);
656 656
657 omap_set_dma_src_params(mcbsp[id].dma_tx_lch, 657 omap_set_dma_src_params(mcbsp[id].dma_tx_lch,
@@ -712,7 +712,7 @@ int omap_mcbsp_recv_buffer(unsigned int id, dma_addr_t buffer,
712 omap_set_dma_src_params(mcbsp[id].dma_rx_lch, 712 omap_set_dma_src_params(mcbsp[id].dma_rx_lch,
713 src_port, 713 src_port,
714 OMAP_DMA_AMODE_CONSTANT, 714 OMAP_DMA_AMODE_CONSTANT,
715 mcbsp[id].io_base + OMAP_MCBSP_REG_DRR1, 715 mcbsp[id].phys_base + OMAP_MCBSP_REG_DRR1,
716 0, 0); 716 0, 0);
717 717
718 omap_set_dma_dest_params(mcbsp[id].dma_rx_lch, 718 omap_set_dma_dest_params(mcbsp[id].dma_rx_lch,
@@ -830,6 +830,7 @@ static int __init omap_mcbsp_probe(struct platform_device *pdev)
830 mcbsp[id].dma_tx_lch = -1; 830 mcbsp[id].dma_tx_lch = -1;
831 mcbsp[id].dma_rx_lch = -1; 831 mcbsp[id].dma_rx_lch = -1;
832 832
833 mcbsp[id].phys_base = pdata->phys_base;
833 mcbsp[id].io_base = pdata->virt_base; 834 mcbsp[id].io_base = pdata->virt_base;
834 /* Default I/O is IRQ based */ 835 /* Default I/O is IRQ based */
835 mcbsp[id].io_type = OMAP_MCBSP_IRQ_IO; 836 mcbsp[id].io_type = OMAP_MCBSP_IRQ_IO;
diff --git a/arch/sparc64/kernel/smp.c b/arch/sparc64/kernel/smp.c
index 743ccad61c60..2be166c544ca 100644
--- a/arch/sparc64/kernel/smp.c
+++ b/arch/sparc64/kernel/smp.c
@@ -80,8 +80,6 @@ void smp_bogo(struct seq_file *m)
80 i, cpu_data(i).clock_tick); 80 i, cpu_data(i).clock_tick);
81} 81}
82 82
83static __cacheline_aligned_in_smp DEFINE_SPINLOCK(call_lock);
84
85extern void setup_sparc64_timer(void); 83extern void setup_sparc64_timer(void);
86 84
87static volatile unsigned long callin_flag = 0; 85static volatile unsigned long callin_flag = 0;
@@ -120,9 +118,9 @@ void __cpuinit smp_callin(void)
120 while (!cpu_isset(cpuid, smp_commenced_mask)) 118 while (!cpu_isset(cpuid, smp_commenced_mask))
121 rmb(); 119 rmb();
122 120
123 spin_lock(&call_lock); 121 ipi_call_lock();
124 cpu_set(cpuid, cpu_online_map); 122 cpu_set(cpuid, cpu_online_map);
125 spin_unlock(&call_lock); 123 ipi_call_unlock();
126 124
127 /* idle thread is expected to have preempt disabled */ 125 /* idle thread is expected to have preempt disabled */
128 preempt_disable(); 126 preempt_disable();
@@ -1305,10 +1303,6 @@ int __cpu_disable(void)
1305 c->core_id = 0; 1303 c->core_id = 0;
1306 c->proc_id = -1; 1304 c->proc_id = -1;
1307 1305
1308 spin_lock(&call_lock);
1309 cpu_clear(cpu, cpu_online_map);
1310 spin_unlock(&call_lock);
1311
1312 smp_wmb(); 1306 smp_wmb();
1313 1307
1314 /* Make sure no interrupts point to this cpu. */ 1308 /* Make sure no interrupts point to this cpu. */
@@ -1318,6 +1312,10 @@ int __cpu_disable(void)
1318 mdelay(1); 1312 mdelay(1);
1319 local_irq_disable(); 1313 local_irq_disable();
1320 1314
1315 ipi_call_lock();
1316 cpu_clear(cpu, cpu_online_map);
1317 ipi_call_unlock();
1318
1321 return 0; 1319 return 0;
1322} 1320}
1323 1321
diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig
index ae8494944c45..11c8c19f0fb7 100644
--- a/drivers/ata/Kconfig
+++ b/drivers/ata/Kconfig
@@ -448,8 +448,10 @@ config PATA_MARVELL
448 tristate "Marvell PATA support via legacy mode" 448 tristate "Marvell PATA support via legacy mode"
449 depends on PCI 449 depends on PCI
450 help 450 help
451 This option enables limited support for the Marvell 88SE6145 ATA 451 This option enables limited support for the Marvell 88SE61xx ATA
452 controller. 452 controllers. If you wish to use only the SATA ports then select
453 the AHCI driver alone. If you wish to the use the PATA port or
454 both SATA and PATA include this driver.
453 455
454 If unsure, say N. 456 If unsure, say N.
455 457
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index c729e6988bbb..2e1a7cb2ed5f 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -420,7 +420,7 @@ static const struct ata_port_info ahci_port_info[] = {
420 /* board_ahci_mv */ 420 /* board_ahci_mv */
421 { 421 {
422 AHCI_HFLAGS (AHCI_HFLAG_NO_NCQ | AHCI_HFLAG_NO_MSI | 422 AHCI_HFLAGS (AHCI_HFLAG_NO_NCQ | AHCI_HFLAG_NO_MSI |
423 AHCI_HFLAG_MV_PATA), 423 AHCI_HFLAG_MV_PATA | AHCI_HFLAG_NO_PMP),
424 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | 424 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
425 ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA, 425 ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA,
426 .pio_mask = 0x1f, /* pio0-4 */ 426 .pio_mask = 0x1f, /* pio0-4 */
@@ -487,7 +487,9 @@ static const struct pci_device_id ahci_pci_tbl[] = {
487 { PCI_VDEVICE(INTEL, 0x3a05), board_ahci }, /* ICH10 */ 487 { PCI_VDEVICE(INTEL, 0x3a05), board_ahci }, /* ICH10 */
488 { PCI_VDEVICE(INTEL, 0x3a25), board_ahci }, /* ICH10 */ 488 { PCI_VDEVICE(INTEL, 0x3a25), board_ahci }, /* ICH10 */
489 { PCI_VDEVICE(INTEL, 0x3b24), board_ahci }, /* PCH RAID */ 489 { PCI_VDEVICE(INTEL, 0x3b24), board_ahci }, /* PCH RAID */
490 { PCI_VDEVICE(INTEL, 0x3b25), board_ahci }, /* PCH RAID */
490 { PCI_VDEVICE(INTEL, 0x3b2b), board_ahci }, /* PCH RAID */ 491 { PCI_VDEVICE(INTEL, 0x3b2b), board_ahci }, /* PCH RAID */
492 { PCI_VDEVICE(INTEL, 0x3b2c), board_ahci }, /* PCH RAID */
491 493
492 /* JMicron 360/1/3/5/6, match class to avoid IDE function */ 494 /* JMicron 360/1/3/5/6, match class to avoid IDE function */
493 { PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, 495 { PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
@@ -610,6 +612,15 @@ module_param(ahci_em_messages, int, 0444);
610MODULE_PARM_DESC(ahci_em_messages, 612MODULE_PARM_DESC(ahci_em_messages,
611 "Set AHCI Enclosure Management Message type (0 = disabled, 1 = LED"); 613 "Set AHCI Enclosure Management Message type (0 = disabled, 1 = LED");
612 614
615#if defined(CONFIG_PATA_MARVELL) || defined(CONFIG_PATA_MARVELL_MODULE)
616static int marvell_enable;
617#else
618static int marvell_enable = 1;
619#endif
620module_param(marvell_enable, int, 0644);
621MODULE_PARM_DESC(marvell_enable, "Marvell SATA via AHCI (1 = enabled)");
622
623
613static inline int ahci_nr_ports(u32 cap) 624static inline int ahci_nr_ports(u32 cap)
614{ 625{
615 return (cap & 0x1f) + 1; 626 return (cap & 0x1f) + 1;
@@ -732,6 +743,8 @@ static void ahci_save_initial_config(struct pci_dev *pdev,
732 "MV_AHCI HACK: port_map %x -> %x\n", 743 "MV_AHCI HACK: port_map %x -> %x\n",
733 port_map, 744 port_map,
734 port_map & mv); 745 port_map & mv);
746 dev_printk(KERN_ERR, &pdev->dev,
747 "Disabling your PATA port. Use the boot option 'ahci.marvell_enable=0' to avoid this.\n");
735 748
736 port_map &= mv; 749 port_map &= mv;
737 } 750 }
@@ -2533,6 +2546,12 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2533 if (!printed_version++) 2546 if (!printed_version++)
2534 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n"); 2547 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
2535 2548
2549 /* The AHCI driver can only drive the SATA ports, the PATA driver
2550 can drive them all so if both drivers are selected make sure
2551 AHCI stays out of the way */
2552 if (pdev->vendor == PCI_VENDOR_ID_MARVELL && !marvell_enable)
2553 return -ENODEV;
2554
2536 /* acquire resources */ 2555 /* acquire resources */
2537 rc = pcim_enable_device(pdev); 2556 rc = pcim_enable_device(pdev);
2538 if (rc) 2557 if (rc)
diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c
index 304fdc6f1dc2..2a4c516894f0 100644
--- a/drivers/ata/libata-sff.c
+++ b/drivers/ata/libata-sff.c
@@ -1315,11 +1315,6 @@ fsm_start:
1315 break; 1315 break;
1316 1316
1317 case HSM_ST_ERR: 1317 case HSM_ST_ERR:
1318 /* make sure qc->err_mask is available to
1319 * know what's wrong and recover
1320 */
1321 WARN_ON(!(qc->err_mask & (AC_ERR_DEV | AC_ERR_HSM)));
1322
1323 ap->hsm_task_state = HSM_ST_IDLE; 1318 ap->hsm_task_state = HSM_ST_IDLE;
1324 1319
1325 /* complete taskfile transaction */ 1320 /* complete taskfile transaction */
diff --git a/drivers/ata/pata_marvell.c b/drivers/ata/pata_marvell.c
index 24a011b25024..0d87eec84966 100644
--- a/drivers/ata/pata_marvell.c
+++ b/drivers/ata/pata_marvell.c
@@ -20,29 +20,30 @@
20#include <linux/ata.h> 20#include <linux/ata.h>
21 21
22#define DRV_NAME "pata_marvell" 22#define DRV_NAME "pata_marvell"
23#define DRV_VERSION "0.1.4" 23#define DRV_VERSION "0.1.6"
24 24
25/** 25/**
26 * marvell_pre_reset - check for 40/80 pin 26 * marvell_pata_active - check if PATA is active
27 * @link: link 27 * @pdev: PCI device
28 * @deadline: deadline jiffies for the operation
29 * 28 *
30 * Perform the PATA port setup we need. 29 * Returns 1 if the PATA port may be active. We know how to check this
30 * for the 6145 but not the other devices
31 */ 31 */
32 32
33static int marvell_pre_reset(struct ata_link *link, unsigned long deadline) 33static int marvell_pata_active(struct pci_dev *pdev)
34{ 34{
35 struct ata_port *ap = link->ap; 35 int i;
36 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
37 u32 devices; 36 u32 devices;
38 void __iomem *barp; 37 void __iomem *barp;
39 int i;
40 38
41 /* Check if our port is enabled */ 39 /* We don't yet know how to do this for other devices */
40 if (pdev->device != 0x6145)
41 return 1;
42 42
43 barp = pci_iomap(pdev, 5, 0x10); 43 barp = pci_iomap(pdev, 5, 0x10);
44 if (barp == NULL) 44 if (barp == NULL)
45 return -ENOMEM; 45 return -ENOMEM;
46
46 printk("BAR5:"); 47 printk("BAR5:");
47 for(i = 0; i <= 0x0F; i++) 48 for(i = 0; i <= 0x0F; i++)
48 printk("%02X:%02X ", i, ioread8(barp + i)); 49 printk("%02X:%02X ", i, ioread8(barp + i));
@@ -51,9 +52,27 @@ static int marvell_pre_reset(struct ata_link *link, unsigned long deadline)
51 devices = ioread32(barp + 0x0C); 52 devices = ioread32(barp + 0x0C);
52 pci_iounmap(pdev, barp); 53 pci_iounmap(pdev, barp);
53 54
54 if ((pdev->device == 0x6145) && (ap->port_no == 0) && 55 if (devices & 0x10)
55 (!(devices & 0x10))) /* PATA enable ? */ 56 return 1;
56 return -ENOENT; 57 return 0;
58}
59
60/**
61 * marvell_pre_reset - check for 40/80 pin
62 * @link: link
63 * @deadline: deadline jiffies for the operation
64 *
65 * Perform the PATA port setup we need.
66 */
67
68static int marvell_pre_reset(struct ata_link *link, unsigned long deadline)
69{
70 struct ata_port *ap = link->ap;
71 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
72
73 if (pdev->device == 0x6145 && ap->port_no == 0 &&
74 !marvell_pata_active(pdev)) /* PATA enable ? */
75 return -ENOENT;
57 76
58 return ata_sff_prereset(link, deadline); 77 return ata_sff_prereset(link, deadline);
59} 78}
@@ -128,6 +147,12 @@ static int marvell_init_one (struct pci_dev *pdev, const struct pci_device_id *i
128 if (pdev->device == 0x6101) 147 if (pdev->device == 0x6101)
129 ppi[1] = &ata_dummy_port_info; 148 ppi[1] = &ata_dummy_port_info;
130 149
150#if defined(CONFIG_AHCI) || defined(CONFIG_AHCI_MODULE)
151 if (!marvell_pata_active(pdev)) {
152 printk(KERN_INFO DRV_NAME ": PATA port not active, deferring to AHCI driver.\n");
153 return -ENODEV;
154 }
155#endif
131 return ata_pci_sff_init_one(pdev, ppi, &marvell_sht, NULL); 156 return ata_pci_sff_init_one(pdev, ppi, &marvell_sht, NULL);
132} 157}
133 158
diff --git a/drivers/ata/pata_sil680.c b/drivers/ata/pata_sil680.c
index 720b8645f58a..e970b227fbce 100644
--- a/drivers/ata/pata_sil680.c
+++ b/drivers/ata/pata_sil680.c
@@ -322,9 +322,6 @@ static int __devinit sil680_init_one(struct pci_dev *pdev,
322 /* Try to acquire MMIO resources and fallback to PIO if 322 /* Try to acquire MMIO resources and fallback to PIO if
323 * that fails 323 * that fails
324 */ 324 */
325 rc = pcim_enable_device(pdev);
326 if (rc)
327 return rc;
328 rc = pcim_iomap_regions(pdev, 1 << SIL680_MMIO_BAR, DRV_NAME); 325 rc = pcim_iomap_regions(pdev, 1 << SIL680_MMIO_BAR, DRV_NAME);
329 if (rc) 326 if (rc)
330 goto use_ioports; 327 goto use_ioports;
diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c
index 13c1d2af18ac..c815f8ecf6e6 100644
--- a/drivers/ata/sata_mv.c
+++ b/drivers/ata/sata_mv.c
@@ -667,7 +667,8 @@ static const struct pci_device_id mv_pci_tbl[] = {
667 { PCI_VDEVICE(MARVELL, 0x5041), chip_504x }, 667 { PCI_VDEVICE(MARVELL, 0x5041), chip_504x },
668 { PCI_VDEVICE(MARVELL, 0x5080), chip_5080 }, 668 { PCI_VDEVICE(MARVELL, 0x5080), chip_5080 },
669 { PCI_VDEVICE(MARVELL, 0x5081), chip_508x }, 669 { PCI_VDEVICE(MARVELL, 0x5081), chip_508x },
670 /* RocketRAID 1740/174x have different identifiers */ 670 /* RocketRAID 1720/174x have different identifiers */
671 { PCI_VDEVICE(TTI, 0x1720), chip_6042 },
671 { PCI_VDEVICE(TTI, 0x1740), chip_508x }, 672 { PCI_VDEVICE(TTI, 0x1740), chip_508x },
672 { PCI_VDEVICE(TTI, 0x1742), chip_508x }, 673 { PCI_VDEVICE(TTI, 0x1742), chip_508x },
673 674
diff --git a/drivers/ata/sata_nv.c b/drivers/ata/sata_nv.c
index 858f70610eda..1e1f3f3757ae 100644
--- a/drivers/ata/sata_nv.c
+++ b/drivers/ata/sata_nv.c
@@ -309,8 +309,6 @@ static void nv_nf2_freeze(struct ata_port *ap);
309static void nv_nf2_thaw(struct ata_port *ap); 309static void nv_nf2_thaw(struct ata_port *ap);
310static void nv_ck804_freeze(struct ata_port *ap); 310static void nv_ck804_freeze(struct ata_port *ap);
311static void nv_ck804_thaw(struct ata_port *ap); 311static void nv_ck804_thaw(struct ata_port *ap);
312static int nv_hardreset(struct ata_link *link, unsigned int *class,
313 unsigned long deadline);
314static int nv_adma_slave_config(struct scsi_device *sdev); 312static int nv_adma_slave_config(struct scsi_device *sdev);
315static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc); 313static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc);
316static void nv_adma_qc_prep(struct ata_queued_cmd *qc); 314static void nv_adma_qc_prep(struct ata_queued_cmd *qc);
@@ -407,7 +405,7 @@ static struct scsi_host_template nv_swncq_sht = {
407 405
408static struct ata_port_operations nv_generic_ops = { 406static struct ata_port_operations nv_generic_ops = {
409 .inherits = &ata_bmdma_port_ops, 407 .inherits = &ata_bmdma_port_ops,
410 .hardreset = nv_hardreset, 408 .hardreset = ATA_OP_NULL,
411 .scr_read = nv_scr_read, 409 .scr_read = nv_scr_read,
412 .scr_write = nv_scr_write, 410 .scr_write = nv_scr_write,
413}; 411};
@@ -1588,21 +1586,6 @@ static void nv_mcp55_thaw(struct ata_port *ap)
1588 ata_sff_thaw(ap); 1586 ata_sff_thaw(ap);
1589} 1587}
1590 1588
1591static int nv_hardreset(struct ata_link *link, unsigned int *class,
1592 unsigned long deadline)
1593{
1594 int rc;
1595
1596 /* SATA hardreset fails to retrieve proper device signature on
1597 * some controllers. Request follow up SRST. For more info,
1598 * see http://bugzilla.kernel.org/show_bug.cgi?id=3352
1599 */
1600 rc = sata_sff_hardreset(link, class, deadline);
1601 if (rc)
1602 return rc;
1603 return -EAGAIN;
1604}
1605
1606static void nv_adma_error_handler(struct ata_port *ap) 1589static void nv_adma_error_handler(struct ata_port *ap)
1607{ 1590{
1608 struct nv_adma_port_priv *pp = ap->private_data; 1591 struct nv_adma_port_priv *pp = ap->private_data;
diff --git a/drivers/usb/host/ohci-omap.c b/drivers/usb/host/ohci-omap.c
index 1eb64d08b60a..95b3ec89c126 100644
--- a/drivers/usb/host/ohci-omap.c
+++ b/drivers/usb/host/ohci-omap.c
@@ -208,7 +208,7 @@ static int ohci_omap_init(struct usb_hcd *hcd)
208 if (cpu_is_omap16xx()) 208 if (cpu_is_omap16xx())
209 ocpi_enable(); 209 ocpi_enable();
210 210
211#ifdef CONFIG_ARCH_OMAP_OTG 211#ifdef CONFIG_USB_OTG
212 if (need_transceiver) { 212 if (need_transceiver) {
213 ohci->transceiver = otg_get_transceiver(); 213 ohci->transceiver = otg_get_transceiver();
214 if (ohci->transceiver) { 214 if (ohci->transceiver) {
diff --git a/drivers/usb/serial/console.c b/drivers/usb/serial/console.c
index 7b74238ad1c7..e980766bb84b 100644
--- a/drivers/usb/serial/console.c
+++ b/drivers/usb/serial/console.c
@@ -161,7 +161,7 @@ static int usb_console_setup(struct console *co, char *options)
161 if (serial->type->set_termios) { 161 if (serial->type->set_termios) {
162 termios->c_cflag = cflag; 162 termios->c_cflag = cflag;
163 tty_termios_encode_baud_rate(termios, baud, baud); 163 tty_termios_encode_baud_rate(termios, baud, baud);
164 serial->type->set_termios(NULL, port, &dummy); 164 serial->type->set_termios(tty, port, &dummy);
165 165
166 port->port.tty = NULL; 166 port->port.tty = NULL;
167 kfree(termios); 167 kfree(termios);
diff --git a/fs/nfs/super.c b/fs/nfs/super.c
index 9abcd2b329f7..e9b20173fef3 100644
--- a/fs/nfs/super.c
+++ b/fs/nfs/super.c
@@ -1279,6 +1279,12 @@ static int nfs_parse_mount_options(char *raw,
1279 } 1279 }
1280 } 1280 }
1281 1281
1282 if (errors > 0) {
1283 dfprintk(MOUNT, "NFS: parsing encountered %d error%s\n",
1284 errors, (errors == 1 ? "" : "s"));
1285 if (!sloppy)
1286 return 0;
1287 }
1282 return 1; 1288 return 1;
1283 1289
1284out_nomem: 1290out_nomem:
diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h
index e8f450c499b0..2691926fb506 100644
--- a/include/linux/cpuset.h
+++ b/include/linux/cpuset.h
@@ -160,7 +160,7 @@ static inline int current_cpuset_is_being_rebound(void)
160 160
161static inline void rebuild_sched_domains(void) 161static inline void rebuild_sched_domains(void)
162{ 162{
163 partition_sched_domains(0, NULL, NULL); 163 partition_sched_domains(1, NULL, NULL);
164} 164}
165 165
166#endif /* !CONFIG_CPUSETS */ 166#endif /* !CONFIG_CPUSETS */
diff --git a/include/net/inet_timewait_sock.h b/include/net/inet_timewait_sock.h
index 95c660c9719b..91324908fccd 100644
--- a/include/net/inet_timewait_sock.h
+++ b/include/net/inet_timewait_sock.h
@@ -208,6 +208,9 @@ extern void inet_twsk_schedule(struct inet_timewait_sock *tw,
208extern void inet_twsk_deschedule(struct inet_timewait_sock *tw, 208extern void inet_twsk_deschedule(struct inet_timewait_sock *tw,
209 struct inet_timewait_death_row *twdr); 209 struct inet_timewait_death_row *twdr);
210 210
211extern void inet_twsk_purge(struct net *net, struct inet_hashinfo *hashinfo,
212 struct inet_timewait_death_row *twdr, int family);
213
211static inline 214static inline
212struct net *twsk_net(const struct inet_timewait_sock *twsk) 215struct net *twsk_net(const struct inet_timewait_sock *twsk)
213{ 216{
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index d5ab79cf516d..f227bc172690 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -14,6 +14,8 @@
14 * 2003-10-22 Updates by Stephen Hemminger. 14 * 2003-10-22 Updates by Stephen Hemminger.
15 * 2004 May-July Rework by Paul Jackson. 15 * 2004 May-July Rework by Paul Jackson.
16 * 2006 Rework by Paul Menage to use generic cgroups 16 * 2006 Rework by Paul Menage to use generic cgroups
17 * 2008 Rework of the scheduler domains and CPU hotplug handling
18 * by Max Krasnyansky
17 * 19 *
18 * This file is subject to the terms and conditions of the GNU General Public 20 * This file is subject to the terms and conditions of the GNU General Public
19 * License. See the file COPYING in the main directory of the Linux 21 * License. See the file COPYING in the main directory of the Linux
@@ -236,9 +238,11 @@ static struct cpuset top_cpuset = {
236 238
237static DEFINE_MUTEX(callback_mutex); 239static DEFINE_MUTEX(callback_mutex);
238 240
239/* This is ugly, but preserves the userspace API for existing cpuset 241/*
242 * This is ugly, but preserves the userspace API for existing cpuset
240 * users. If someone tries to mount the "cpuset" filesystem, we 243 * users. If someone tries to mount the "cpuset" filesystem, we
241 * silently switch it to mount "cgroup" instead */ 244 * silently switch it to mount "cgroup" instead
245 */
242static int cpuset_get_sb(struct file_system_type *fs_type, 246static int cpuset_get_sb(struct file_system_type *fs_type,
243 int flags, const char *unused_dev_name, 247 int flags, const char *unused_dev_name,
244 void *data, struct vfsmount *mnt) 248 void *data, struct vfsmount *mnt)
@@ -473,10 +477,9 @@ static int validate_change(const struct cpuset *cur, const struct cpuset *trial)
473} 477}
474 478
475/* 479/*
476 * Helper routine for rebuild_sched_domains(). 480 * Helper routine for generate_sched_domains().
477 * Do cpusets a, b have overlapping cpus_allowed masks? 481 * Do cpusets a, b have overlapping cpus_allowed masks?
478 */ 482 */
479
480static int cpusets_overlap(struct cpuset *a, struct cpuset *b) 483static int cpusets_overlap(struct cpuset *a, struct cpuset *b)
481{ 484{
482 return cpus_intersects(a->cpus_allowed, b->cpus_allowed); 485 return cpus_intersects(a->cpus_allowed, b->cpus_allowed);
@@ -518,26 +521,15 @@ update_domain_attr_tree(struct sched_domain_attr *dattr, struct cpuset *c)
518} 521}
519 522
520/* 523/*
521 * rebuild_sched_domains() 524 * generate_sched_domains()
522 * 525 *
523 * This routine will be called to rebuild the scheduler's dynamic 526 * This function builds a partial partition of the systems CPUs
524 * sched domains: 527 * A 'partial partition' is a set of non-overlapping subsets whose
525 * - if the flag 'sched_load_balance' of any cpuset with non-empty 528 * union is a subset of that set.
526 * 'cpus' changes, 529 * The output of this function needs to be passed to kernel/sched.c
527 * - or if the 'cpus' allowed changes in any cpuset which has that 530 * partition_sched_domains() routine, which will rebuild the scheduler's
528 * flag enabled, 531 * load balancing domains (sched domains) as specified by that partial
529 * - or if the 'sched_relax_domain_level' of any cpuset which has 532 * partition.
530 * that flag enabled and with non-empty 'cpus' changes,
531 * - or if any cpuset with non-empty 'cpus' is removed,
532 * - or if a cpu gets offlined.
533 *
534 * This routine builds a partial partition of the systems CPUs
535 * (the set of non-overlappping cpumask_t's in the array 'part'
536 * below), and passes that partial partition to the kernel/sched.c
537 * partition_sched_domains() routine, which will rebuild the
538 * schedulers load balancing domains (sched domains) as specified
539 * by that partial partition. A 'partial partition' is a set of
540 * non-overlapping subsets whose union is a subset of that set.
541 * 533 *
542 * See "What is sched_load_balance" in Documentation/cpusets.txt 534 * See "What is sched_load_balance" in Documentation/cpusets.txt
543 * for a background explanation of this. 535 * for a background explanation of this.
@@ -547,13 +539,7 @@ update_domain_attr_tree(struct sched_domain_attr *dattr, struct cpuset *c)
547 * domains when operating in the severe memory shortage situations 539 * domains when operating in the severe memory shortage situations
548 * that could cause allocation failures below. 540 * that could cause allocation failures below.
549 * 541 *
550 * Call with cgroup_mutex held. May take callback_mutex during 542 * Must be called with cgroup_lock held.
551 * call due to the kfifo_alloc() and kmalloc() calls. May nest
552 * a call to the get_online_cpus()/put_online_cpus() pair.
553 * Must not be called holding callback_mutex, because we must not
554 * call get_online_cpus() while holding callback_mutex. Elsewhere
555 * the kernel nests callback_mutex inside get_online_cpus() calls.
556 * So the reverse nesting would risk an ABBA deadlock.
557 * 543 *
558 * The three key local variables below are: 544 * The three key local variables below are:
559 * q - a linked-list queue of cpuset pointers, used to implement a 545 * q - a linked-list queue of cpuset pointers, used to implement a
@@ -588,10 +574,10 @@ update_domain_attr_tree(struct sched_domain_attr *dattr, struct cpuset *c)
588 * element of the partition (one sched domain) to be passed to 574 * element of the partition (one sched domain) to be passed to
589 * partition_sched_domains(). 575 * partition_sched_domains().
590 */ 576 */
591 577static int generate_sched_domains(cpumask_t **domains,
592void rebuild_sched_domains(void) 578 struct sched_domain_attr **attributes)
593{ 579{
594 LIST_HEAD(q); /* queue of cpusets to be scanned*/ 580 LIST_HEAD(q); /* queue of cpusets to be scanned */
595 struct cpuset *cp; /* scans q */ 581 struct cpuset *cp; /* scans q */
596 struct cpuset **csa; /* array of all cpuset ptrs */ 582 struct cpuset **csa; /* array of all cpuset ptrs */
597 int csn; /* how many cpuset ptrs in csa so far */ 583 int csn; /* how many cpuset ptrs in csa so far */
@@ -601,23 +587,26 @@ void rebuild_sched_domains(void)
601 int ndoms; /* number of sched domains in result */ 587 int ndoms; /* number of sched domains in result */
602 int nslot; /* next empty doms[] cpumask_t slot */ 588 int nslot; /* next empty doms[] cpumask_t slot */
603 589
604 csa = NULL; 590 ndoms = 0;
605 doms = NULL; 591 doms = NULL;
606 dattr = NULL; 592 dattr = NULL;
593 csa = NULL;
607 594
608 /* Special case for the 99% of systems with one, full, sched domain */ 595 /* Special case for the 99% of systems with one, full, sched domain */
609 if (is_sched_load_balance(&top_cpuset)) { 596 if (is_sched_load_balance(&top_cpuset)) {
610 ndoms = 1;
611 doms = kmalloc(sizeof(cpumask_t), GFP_KERNEL); 597 doms = kmalloc(sizeof(cpumask_t), GFP_KERNEL);
612 if (!doms) 598 if (!doms)
613 goto rebuild; 599 goto done;
600
614 dattr = kmalloc(sizeof(struct sched_domain_attr), GFP_KERNEL); 601 dattr = kmalloc(sizeof(struct sched_domain_attr), GFP_KERNEL);
615 if (dattr) { 602 if (dattr) {
616 *dattr = SD_ATTR_INIT; 603 *dattr = SD_ATTR_INIT;
617 update_domain_attr_tree(dattr, &top_cpuset); 604 update_domain_attr_tree(dattr, &top_cpuset);
618 } 605 }
619 *doms = top_cpuset.cpus_allowed; 606 *doms = top_cpuset.cpus_allowed;
620 goto rebuild; 607
608 ndoms = 1;
609 goto done;
621 } 610 }
622 611
623 csa = kmalloc(number_of_cpusets * sizeof(cp), GFP_KERNEL); 612 csa = kmalloc(number_of_cpusets * sizeof(cp), GFP_KERNEL);
@@ -680,61 +669,141 @@ restart:
680 } 669 }
681 } 670 }
682 671
683 /* Convert <csn, csa> to <ndoms, doms> */ 672 /*
673 * Now we know how many domains to create.
674 * Convert <csn, csa> to <ndoms, doms> and populate cpu masks.
675 */
684 doms = kmalloc(ndoms * sizeof(cpumask_t), GFP_KERNEL); 676 doms = kmalloc(ndoms * sizeof(cpumask_t), GFP_KERNEL);
685 if (!doms) 677 if (!doms) {
686 goto rebuild; 678 ndoms = 0;
679 goto done;
680 }
681
682 /*
683 * The rest of the code, including the scheduler, can deal with
684 * dattr==NULL case. No need to abort if alloc fails.
685 */
687 dattr = kmalloc(ndoms * sizeof(struct sched_domain_attr), GFP_KERNEL); 686 dattr = kmalloc(ndoms * sizeof(struct sched_domain_attr), GFP_KERNEL);
688 687
689 for (nslot = 0, i = 0; i < csn; i++) { 688 for (nslot = 0, i = 0; i < csn; i++) {
690 struct cpuset *a = csa[i]; 689 struct cpuset *a = csa[i];
690 cpumask_t *dp;
691 int apn = a->pn; 691 int apn = a->pn;
692 692
693 if (apn >= 0) { 693 if (apn < 0) {
694 cpumask_t *dp = doms + nslot; 694 /* Skip completed partitions */
695 695 continue;
696 if (nslot == ndoms) { 696 }
697 static int warnings = 10; 697
698 if (warnings) { 698 dp = doms + nslot;
699 printk(KERN_WARNING 699
700 "rebuild_sched_domains confused:" 700 if (nslot == ndoms) {
701 " nslot %d, ndoms %d, csn %d, i %d," 701 static int warnings = 10;
702 " apn %d\n", 702 if (warnings) {
703 nslot, ndoms, csn, i, apn); 703 printk(KERN_WARNING
704 warnings--; 704 "rebuild_sched_domains confused:"
705 } 705 " nslot %d, ndoms %d, csn %d, i %d,"
706 continue; 706 " apn %d\n",
707 nslot, ndoms, csn, i, apn);
708 warnings--;
707 } 709 }
710 continue;
711 }
708 712
709 cpus_clear(*dp); 713 cpus_clear(*dp);
710 if (dattr) 714 if (dattr)
711 *(dattr + nslot) = SD_ATTR_INIT; 715 *(dattr + nslot) = SD_ATTR_INIT;
712 for (j = i; j < csn; j++) { 716 for (j = i; j < csn; j++) {
713 struct cpuset *b = csa[j]; 717 struct cpuset *b = csa[j];
714 718
715 if (apn == b->pn) { 719 if (apn == b->pn) {
716 cpus_or(*dp, *dp, b->cpus_allowed); 720 cpus_or(*dp, *dp, b->cpus_allowed);
717 b->pn = -1; 721 if (dattr)
718 if (dattr) 722 update_domain_attr_tree(dattr + nslot, b);
719 update_domain_attr_tree(dattr 723
720 + nslot, b); 724 /* Done with this partition */
721 } 725 b->pn = -1;
722 } 726 }
723 nslot++;
724 } 727 }
728 nslot++;
725 } 729 }
726 BUG_ON(nslot != ndoms); 730 BUG_ON(nslot != ndoms);
727 731
728rebuild: 732done:
729 /* Have scheduler rebuild sched domains */ 733 kfree(csa);
734
735 *domains = doms;
736 *attributes = dattr;
737 return ndoms;
738}
739
740/*
741 * Rebuild scheduler domains.
742 *
743 * Call with neither cgroup_mutex held nor within get_online_cpus().
744 * Takes both cgroup_mutex and get_online_cpus().
745 *
746 * Cannot be directly called from cpuset code handling changes
747 * to the cpuset pseudo-filesystem, because it cannot be called
748 * from code that already holds cgroup_mutex.
749 */
750static void do_rebuild_sched_domains(struct work_struct *unused)
751{
752 struct sched_domain_attr *attr;
753 cpumask_t *doms;
754 int ndoms;
755
730 get_online_cpus(); 756 get_online_cpus();
731 partition_sched_domains(ndoms, doms, dattr); 757
758 /* Generate domain masks and attrs */
759 cgroup_lock();
760 ndoms = generate_sched_domains(&doms, &attr);
761 cgroup_unlock();
762
763 /* Have scheduler rebuild the domains */
764 partition_sched_domains(ndoms, doms, attr);
765
732 put_online_cpus(); 766 put_online_cpus();
767}
733 768
734done: 769static DECLARE_WORK(rebuild_sched_domains_work, do_rebuild_sched_domains);
735 kfree(csa); 770
736 /* Don't kfree(doms) -- partition_sched_domains() does that. */ 771/*
737 /* Don't kfree(dattr) -- partition_sched_domains() does that. */ 772 * Rebuild scheduler domains, asynchronously via workqueue.
773 *
774 * If the flag 'sched_load_balance' of any cpuset with non-empty
775 * 'cpus' changes, or if the 'cpus' allowed changes in any cpuset
776 * which has that flag enabled, or if any cpuset with a non-empty
777 * 'cpus' is removed, then call this routine to rebuild the
778 * scheduler's dynamic sched domains.
779 *
780 * The rebuild_sched_domains() and partition_sched_domains()
781 * routines must nest cgroup_lock() inside get_online_cpus(),
782 * but such cpuset changes as these must nest that locking the
783 * other way, holding cgroup_lock() for much of the code.
784 *
785 * So in order to avoid an ABBA deadlock, the cpuset code handling
786 * these user changes delegates the actual sched domain rebuilding
787 * to a separate workqueue thread, which ends up processing the
788 * above do_rebuild_sched_domains() function.
789 */
790static void async_rebuild_sched_domains(void)
791{
792 schedule_work(&rebuild_sched_domains_work);
793}
794
795/*
796 * Accomplishes the same scheduler domain rebuild as the above
797 * async_rebuild_sched_domains(), however it directly calls the
798 * rebuild routine synchronously rather than calling it via an
799 * asynchronous work thread.
800 *
801 * This can only be called from code that is not holding
802 * cgroup_mutex (not nested in a cgroup_lock() call.)
803 */
804void rebuild_sched_domains(void)
805{
806 do_rebuild_sched_domains(NULL);
738} 807}
739 808
740/** 809/**
@@ -863,7 +932,7 @@ static int update_cpumask(struct cpuset *cs, const char *buf)
863 return retval; 932 return retval;
864 933
865 if (is_load_balanced) 934 if (is_load_balanced)
866 rebuild_sched_domains(); 935 async_rebuild_sched_domains();
867 return 0; 936 return 0;
868} 937}
869 938
@@ -1090,7 +1159,7 @@ static int update_relax_domain_level(struct cpuset *cs, s64 val)
1090 if (val != cs->relax_domain_level) { 1159 if (val != cs->relax_domain_level) {
1091 cs->relax_domain_level = val; 1160 cs->relax_domain_level = val;
1092 if (!cpus_empty(cs->cpus_allowed) && is_sched_load_balance(cs)) 1161 if (!cpus_empty(cs->cpus_allowed) && is_sched_load_balance(cs))
1093 rebuild_sched_domains(); 1162 async_rebuild_sched_domains();
1094 } 1163 }
1095 1164
1096 return 0; 1165 return 0;
@@ -1131,7 +1200,7 @@ static int update_flag(cpuset_flagbits_t bit, struct cpuset *cs,
1131 mutex_unlock(&callback_mutex); 1200 mutex_unlock(&callback_mutex);
1132 1201
1133 if (cpus_nonempty && balance_flag_changed) 1202 if (cpus_nonempty && balance_flag_changed)
1134 rebuild_sched_domains(); 1203 async_rebuild_sched_domains();
1135 1204
1136 return 0; 1205 return 0;
1137} 1206}
@@ -1492,6 +1561,9 @@ static u64 cpuset_read_u64(struct cgroup *cont, struct cftype *cft)
1492 default: 1561 default:
1493 BUG(); 1562 BUG();
1494 } 1563 }
1564
1565 /* Unreachable but makes gcc happy */
1566 return 0;
1495} 1567}
1496 1568
1497static s64 cpuset_read_s64(struct cgroup *cont, struct cftype *cft) 1569static s64 cpuset_read_s64(struct cgroup *cont, struct cftype *cft)
@@ -1504,6 +1576,9 @@ static s64 cpuset_read_s64(struct cgroup *cont, struct cftype *cft)
1504 default: 1576 default:
1505 BUG(); 1577 BUG();
1506 } 1578 }
1579
1580 /* Unrechable but makes gcc happy */
1581 return 0;
1507} 1582}
1508 1583
1509 1584
@@ -1692,15 +1767,9 @@ static struct cgroup_subsys_state *cpuset_create(
1692} 1767}
1693 1768
1694/* 1769/*
1695 * Locking note on the strange update_flag() call below:
1696 *
1697 * If the cpuset being removed has its flag 'sched_load_balance' 1770 * If the cpuset being removed has its flag 'sched_load_balance'
1698 * enabled, then simulate turning sched_load_balance off, which 1771 * enabled, then simulate turning sched_load_balance off, which
1699 * will call rebuild_sched_domains(). The get_online_cpus() 1772 * will call async_rebuild_sched_domains().
1700 * call in rebuild_sched_domains() must not be made while holding
1701 * callback_mutex. Elsewhere the kernel nests callback_mutex inside
1702 * get_online_cpus() calls. So the reverse nesting would risk an
1703 * ABBA deadlock.
1704 */ 1773 */
1705 1774
1706static void cpuset_destroy(struct cgroup_subsys *ss, struct cgroup *cont) 1775static void cpuset_destroy(struct cgroup_subsys *ss, struct cgroup *cont)
@@ -1719,7 +1788,7 @@ static void cpuset_destroy(struct cgroup_subsys *ss, struct cgroup *cont)
1719struct cgroup_subsys cpuset_subsys = { 1788struct cgroup_subsys cpuset_subsys = {
1720 .name = "cpuset", 1789 .name = "cpuset",
1721 .create = cpuset_create, 1790 .create = cpuset_create,
1722 .destroy = cpuset_destroy, 1791 .destroy = cpuset_destroy,
1723 .can_attach = cpuset_can_attach, 1792 .can_attach = cpuset_can_attach,
1724 .attach = cpuset_attach, 1793 .attach = cpuset_attach,
1725 .populate = cpuset_populate, 1794 .populate = cpuset_populate,
@@ -1811,7 +1880,7 @@ static void move_member_tasks_to_cpuset(struct cpuset *from, struct cpuset *to)
1811} 1880}
1812 1881
1813/* 1882/*
1814 * If common_cpu_mem_hotplug_unplug(), below, unplugs any CPUs 1883 * If CPU and/or memory hotplug handlers, below, unplug any CPUs
1815 * or memory nodes, we need to walk over the cpuset hierarchy, 1884 * or memory nodes, we need to walk over the cpuset hierarchy,
1816 * removing that CPU or node from all cpusets. If this removes the 1885 * removing that CPU or node from all cpusets. If this removes the
1817 * last CPU or node from a cpuset, then move the tasks in the empty 1886 * last CPU or node from a cpuset, then move the tasks in the empty
@@ -1903,35 +1972,6 @@ static void scan_for_empty_cpusets(const struct cpuset *root)
1903} 1972}
1904 1973
1905/* 1974/*
1906 * The cpus_allowed and mems_allowed nodemasks in the top_cpuset track
1907 * cpu_online_map and node_states[N_HIGH_MEMORY]. Force the top cpuset to
1908 * track what's online after any CPU or memory node hotplug or unplug event.
1909 *
1910 * Since there are two callers of this routine, one for CPU hotplug
1911 * events and one for memory node hotplug events, we could have coded
1912 * two separate routines here. We code it as a single common routine
1913 * in order to minimize text size.
1914 */
1915
1916static void common_cpu_mem_hotplug_unplug(int rebuild_sd)
1917{
1918 cgroup_lock();
1919
1920 top_cpuset.cpus_allowed = cpu_online_map;
1921 top_cpuset.mems_allowed = node_states[N_HIGH_MEMORY];
1922 scan_for_empty_cpusets(&top_cpuset);
1923
1924 /*
1925 * Scheduler destroys domains on hotplug events.
1926 * Rebuild them based on the current settings.
1927 */
1928 if (rebuild_sd)
1929 rebuild_sched_domains();
1930
1931 cgroup_unlock();
1932}
1933
1934/*
1935 * The top_cpuset tracks what CPUs and Memory Nodes are online, 1975 * The top_cpuset tracks what CPUs and Memory Nodes are online,
1936 * period. This is necessary in order to make cpusets transparent 1976 * period. This is necessary in order to make cpusets transparent
1937 * (of no affect) on systems that are actively using CPU hotplug 1977 * (of no affect) on systems that are actively using CPU hotplug
@@ -1939,40 +1979,52 @@ static void common_cpu_mem_hotplug_unplug(int rebuild_sd)
1939 * 1979 *
1940 * This routine ensures that top_cpuset.cpus_allowed tracks 1980 * This routine ensures that top_cpuset.cpus_allowed tracks
1941 * cpu_online_map on each CPU hotplug (cpuhp) event. 1981 * cpu_online_map on each CPU hotplug (cpuhp) event.
1982 *
1983 * Called within get_online_cpus(). Needs to call cgroup_lock()
1984 * before calling generate_sched_domains().
1942 */ 1985 */
1943 1986static int cpuset_track_online_cpus(struct notifier_block *unused_nb,
1944static int cpuset_handle_cpuhp(struct notifier_block *unused_nb,
1945 unsigned long phase, void *unused_cpu) 1987 unsigned long phase, void *unused_cpu)
1946{ 1988{
1989 struct sched_domain_attr *attr;
1990 cpumask_t *doms;
1991 int ndoms;
1992
1947 switch (phase) { 1993 switch (phase) {
1948 case CPU_UP_CANCELED:
1949 case CPU_UP_CANCELED_FROZEN:
1950 case CPU_DOWN_FAILED:
1951 case CPU_DOWN_FAILED_FROZEN:
1952 case CPU_ONLINE: 1994 case CPU_ONLINE:
1953 case CPU_ONLINE_FROZEN: 1995 case CPU_ONLINE_FROZEN:
1954 case CPU_DEAD: 1996 case CPU_DEAD:
1955 case CPU_DEAD_FROZEN: 1997 case CPU_DEAD_FROZEN:
1956 common_cpu_mem_hotplug_unplug(1);
1957 break; 1998 break;
1999
1958 default: 2000 default:
1959 return NOTIFY_DONE; 2001 return NOTIFY_DONE;
1960 } 2002 }
1961 2003
2004 cgroup_lock();
2005 top_cpuset.cpus_allowed = cpu_online_map;
2006 scan_for_empty_cpusets(&top_cpuset);
2007 ndoms = generate_sched_domains(&doms, &attr);
2008 cgroup_unlock();
2009
2010 /* Have scheduler rebuild the domains */
2011 partition_sched_domains(ndoms, doms, attr);
2012
1962 return NOTIFY_OK; 2013 return NOTIFY_OK;
1963} 2014}
1964 2015
1965#ifdef CONFIG_MEMORY_HOTPLUG 2016#ifdef CONFIG_MEMORY_HOTPLUG
1966/* 2017/*
1967 * Keep top_cpuset.mems_allowed tracking node_states[N_HIGH_MEMORY]. 2018 * Keep top_cpuset.mems_allowed tracking node_states[N_HIGH_MEMORY].
1968 * Call this routine anytime after you change 2019 * Call this routine anytime after node_states[N_HIGH_MEMORY] changes.
1969 * node_states[N_HIGH_MEMORY]. 2020 * See also the previous routine cpuset_track_online_cpus().
1970 * See also the previous routine cpuset_handle_cpuhp().
1971 */ 2021 */
1972
1973void cpuset_track_online_nodes(void) 2022void cpuset_track_online_nodes(void)
1974{ 2023{
1975 common_cpu_mem_hotplug_unplug(0); 2024 cgroup_lock();
2025 top_cpuset.mems_allowed = node_states[N_HIGH_MEMORY];
2026 scan_for_empty_cpusets(&top_cpuset);
2027 cgroup_unlock();
1976} 2028}
1977#endif 2029#endif
1978 2030
@@ -1987,7 +2039,7 @@ void __init cpuset_init_smp(void)
1987 top_cpuset.cpus_allowed = cpu_online_map; 2039 top_cpuset.cpus_allowed = cpu_online_map;
1988 top_cpuset.mems_allowed = node_states[N_HIGH_MEMORY]; 2040 top_cpuset.mems_allowed = node_states[N_HIGH_MEMORY];
1989 2041
1990 hotcpu_notifier(cpuset_handle_cpuhp, 0); 2042 hotcpu_notifier(cpuset_track_online_cpus, 0);
1991} 2043}
1992 2044
1993/** 2045/**
diff --git a/kernel/sched.c b/kernel/sched.c
index 1a5f73c1fcdc..cc1f81b50b82 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -7696,24 +7696,27 @@ static int dattrs_equal(struct sched_domain_attr *cur, int idx_cur,
7696 * and partition_sched_domains() will fallback to the single partition 7696 * and partition_sched_domains() will fallback to the single partition
7697 * 'fallback_doms', it also forces the domains to be rebuilt. 7697 * 'fallback_doms', it also forces the domains to be rebuilt.
7698 * 7698 *
7699 * If doms_new==NULL it will be replaced with cpu_online_map.
7700 * ndoms_new==0 is a special case for destroying existing domains.
7701 * It will not create the default domain.
7702 *
7699 * Call with hotplug lock held 7703 * Call with hotplug lock held
7700 */ 7704 */
7701void partition_sched_domains(int ndoms_new, cpumask_t *doms_new, 7705void partition_sched_domains(int ndoms_new, cpumask_t *doms_new,
7702 struct sched_domain_attr *dattr_new) 7706 struct sched_domain_attr *dattr_new)
7703{ 7707{
7704 int i, j; 7708 int i, j, n;
7705 7709
7706 mutex_lock(&sched_domains_mutex); 7710 mutex_lock(&sched_domains_mutex);
7707 7711
7708 /* always unregister in case we don't destroy any domains */ 7712 /* always unregister in case we don't destroy any domains */
7709 unregister_sched_domain_sysctl(); 7713 unregister_sched_domain_sysctl();
7710 7714
7711 if (doms_new == NULL) 7715 n = doms_new ? ndoms_new : 0;
7712 ndoms_new = 0;
7713 7716
7714 /* Destroy deleted domains */ 7717 /* Destroy deleted domains */
7715 for (i = 0; i < ndoms_cur; i++) { 7718 for (i = 0; i < ndoms_cur; i++) {
7716 for (j = 0; j < ndoms_new; j++) { 7719 for (j = 0; j < n; j++) {
7717 if (cpus_equal(doms_cur[i], doms_new[j]) 7720 if (cpus_equal(doms_cur[i], doms_new[j])
7718 && dattrs_equal(dattr_cur, i, dattr_new, j)) 7721 && dattrs_equal(dattr_cur, i, dattr_new, j))
7719 goto match1; 7722 goto match1;
@@ -7726,7 +7729,6 @@ match1:
7726 7729
7727 if (doms_new == NULL) { 7730 if (doms_new == NULL) {
7728 ndoms_cur = 0; 7731 ndoms_cur = 0;
7729 ndoms_new = 1;
7730 doms_new = &fallback_doms; 7732 doms_new = &fallback_doms;
7731 cpus_andnot(doms_new[0], cpu_online_map, cpu_isolated_map); 7733 cpus_andnot(doms_new[0], cpu_online_map, cpu_isolated_map);
7732 dattr_new = NULL; 7734 dattr_new = NULL;
@@ -7763,8 +7765,13 @@ match2:
7763int arch_reinit_sched_domains(void) 7765int arch_reinit_sched_domains(void)
7764{ 7766{
7765 get_online_cpus(); 7767 get_online_cpus();
7768
7769 /* Destroy domains first to force the rebuild */
7770 partition_sched_domains(0, NULL, NULL);
7771
7766 rebuild_sched_domains(); 7772 rebuild_sched_domains();
7767 put_online_cpus(); 7773 put_online_cpus();
7774
7768 return 0; 7775 return 0;
7769} 7776}
7770 7777
@@ -7848,7 +7855,7 @@ static int update_sched_domains(struct notifier_block *nfb,
7848 case CPU_ONLINE_FROZEN: 7855 case CPU_ONLINE_FROZEN:
7849 case CPU_DEAD: 7856 case CPU_DEAD:
7850 case CPU_DEAD_FROZEN: 7857 case CPU_DEAD_FROZEN:
7851 partition_sched_domains(0, NULL, NULL); 7858 partition_sched_domains(1, NULL, NULL);
7852 return NOTIFY_OK; 7859 return NOTIFY_OK;
7853 7860
7854 default: 7861 default:
diff --git a/net/bridge/br_ioctl.c b/net/bridge/br_ioctl.c
index eeee218eed80..5bbf07362172 100644
--- a/net/bridge/br_ioctl.c
+++ b/net/bridge/br_ioctl.c
@@ -188,15 +188,21 @@ static int old_dev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
188 return 0; 188 return 0;
189 189
190 case BRCTL_SET_BRIDGE_HELLO_TIME: 190 case BRCTL_SET_BRIDGE_HELLO_TIME:
191 {
192 unsigned long t = clock_t_to_jiffies(args[1]);
191 if (!capable(CAP_NET_ADMIN)) 193 if (!capable(CAP_NET_ADMIN))
192 return -EPERM; 194 return -EPERM;
193 195
196 if (t < HZ)
197 return -EINVAL;
198
194 spin_lock_bh(&br->lock); 199 spin_lock_bh(&br->lock);
195 br->bridge_hello_time = clock_t_to_jiffies(args[1]); 200 br->bridge_hello_time = t;
196 if (br_is_root_bridge(br)) 201 if (br_is_root_bridge(br))
197 br->hello_time = br->bridge_hello_time; 202 br->hello_time = br->bridge_hello_time;
198 spin_unlock_bh(&br->lock); 203 spin_unlock_bh(&br->lock);
199 return 0; 204 return 0;
205 }
200 206
201 case BRCTL_SET_BRIDGE_MAX_AGE: 207 case BRCTL_SET_BRIDGE_MAX_AGE:
202 if (!capable(CAP_NET_ADMIN)) 208 if (!capable(CAP_NET_ADMIN))
diff --git a/net/bridge/br_sysfs_br.c b/net/bridge/br_sysfs_br.c
index 27d6a511c8c1..158dee8b4965 100644
--- a/net/bridge/br_sysfs_br.c
+++ b/net/bridge/br_sysfs_br.c
@@ -29,11 +29,12 @@
29 */ 29 */
30static ssize_t store_bridge_parm(struct device *d, 30static ssize_t store_bridge_parm(struct device *d,
31 const char *buf, size_t len, 31 const char *buf, size_t len,
32 void (*set)(struct net_bridge *, unsigned long)) 32 int (*set)(struct net_bridge *, unsigned long))
33{ 33{
34 struct net_bridge *br = to_bridge(d); 34 struct net_bridge *br = to_bridge(d);
35 char *endp; 35 char *endp;
36 unsigned long val; 36 unsigned long val;
37 int err;
37 38
38 if (!capable(CAP_NET_ADMIN)) 39 if (!capable(CAP_NET_ADMIN))
39 return -EPERM; 40 return -EPERM;
@@ -43,9 +44,9 @@ static ssize_t store_bridge_parm(struct device *d,
43 return -EINVAL; 44 return -EINVAL;
44 45
45 spin_lock_bh(&br->lock); 46 spin_lock_bh(&br->lock);
46 (*set)(br, val); 47 err = (*set)(br, val);
47 spin_unlock_bh(&br->lock); 48 spin_unlock_bh(&br->lock);
48 return len; 49 return err ? err : len;
49} 50}
50 51
51 52
@@ -56,12 +57,13 @@ static ssize_t show_forward_delay(struct device *d,
56 return sprintf(buf, "%lu\n", jiffies_to_clock_t(br->forward_delay)); 57 return sprintf(buf, "%lu\n", jiffies_to_clock_t(br->forward_delay));
57} 58}
58 59
59static void set_forward_delay(struct net_bridge *br, unsigned long val) 60static int set_forward_delay(struct net_bridge *br, unsigned long val)
60{ 61{
61 unsigned long delay = clock_t_to_jiffies(val); 62 unsigned long delay = clock_t_to_jiffies(val);
62 br->forward_delay = delay; 63 br->forward_delay = delay;
63 if (br_is_root_bridge(br)) 64 if (br_is_root_bridge(br))
64 br->bridge_forward_delay = delay; 65 br->bridge_forward_delay = delay;
66 return 0;
65} 67}
66 68
67static ssize_t store_forward_delay(struct device *d, 69static ssize_t store_forward_delay(struct device *d,
@@ -80,12 +82,17 @@ static ssize_t show_hello_time(struct device *d, struct device_attribute *attr,
80 jiffies_to_clock_t(to_bridge(d)->hello_time)); 82 jiffies_to_clock_t(to_bridge(d)->hello_time));
81} 83}
82 84
83static void set_hello_time(struct net_bridge *br, unsigned long val) 85static int set_hello_time(struct net_bridge *br, unsigned long val)
84{ 86{
85 unsigned long t = clock_t_to_jiffies(val); 87 unsigned long t = clock_t_to_jiffies(val);
88
89 if (t < HZ)
90 return -EINVAL;
91
86 br->hello_time = t; 92 br->hello_time = t;
87 if (br_is_root_bridge(br)) 93 if (br_is_root_bridge(br))
88 br->bridge_hello_time = t; 94 br->bridge_hello_time = t;
95 return 0;
89} 96}
90 97
91static ssize_t store_hello_time(struct device *d, 98static ssize_t store_hello_time(struct device *d,
@@ -104,12 +111,13 @@ static ssize_t show_max_age(struct device *d, struct device_attribute *attr,
104 jiffies_to_clock_t(to_bridge(d)->max_age)); 111 jiffies_to_clock_t(to_bridge(d)->max_age));
105} 112}
106 113
107static void set_max_age(struct net_bridge *br, unsigned long val) 114static int set_max_age(struct net_bridge *br, unsigned long val)
108{ 115{
109 unsigned long t = clock_t_to_jiffies(val); 116 unsigned long t = clock_t_to_jiffies(val);
110 br->max_age = t; 117 br->max_age = t;
111 if (br_is_root_bridge(br)) 118 if (br_is_root_bridge(br))
112 br->bridge_max_age = t; 119 br->bridge_max_age = t;
120 return 0;
113} 121}
114 122
115static ssize_t store_max_age(struct device *d, struct device_attribute *attr, 123static ssize_t store_max_age(struct device *d, struct device_attribute *attr,
@@ -126,9 +134,10 @@ static ssize_t show_ageing_time(struct device *d,
126 return sprintf(buf, "%lu\n", jiffies_to_clock_t(br->ageing_time)); 134 return sprintf(buf, "%lu\n", jiffies_to_clock_t(br->ageing_time));
127} 135}
128 136
129static void set_ageing_time(struct net_bridge *br, unsigned long val) 137static int set_ageing_time(struct net_bridge *br, unsigned long val)
130{ 138{
131 br->ageing_time = clock_t_to_jiffies(val); 139 br->ageing_time = clock_t_to_jiffies(val);
140 return 0;
132} 141}
133 142
134static ssize_t store_ageing_time(struct device *d, 143static ssize_t store_ageing_time(struct device *d,
@@ -180,9 +189,10 @@ static ssize_t show_priority(struct device *d, struct device_attribute *attr,
180 (br->bridge_id.prio[0] << 8) | br->bridge_id.prio[1]); 189 (br->bridge_id.prio[0] << 8) | br->bridge_id.prio[1]);
181} 190}
182 191
183static void set_priority(struct net_bridge *br, unsigned long val) 192static int set_priority(struct net_bridge *br, unsigned long val)
184{ 193{
185 br_stp_set_bridge_priority(br, (u16) val); 194 br_stp_set_bridge_priority(br, (u16) val);
195 return 0;
186} 196}
187 197
188static ssize_t store_priority(struct device *d, struct device_attribute *attr, 198static ssize_t store_priority(struct device *d, struct device_attribute *attr,
diff --git a/net/core/dev.c b/net/core/dev.c
index 60c51f765887..e719ed29310f 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1991,8 +1991,13 @@ static void net_tx_action(struct softirq_action *h)
1991 spin_unlock(root_lock); 1991 spin_unlock(root_lock);
1992 } else { 1992 } else {
1993 if (!test_bit(__QDISC_STATE_DEACTIVATED, 1993 if (!test_bit(__QDISC_STATE_DEACTIVATED,
1994 &q->state)) 1994 &q->state)) {
1995 __netif_reschedule(q); 1995 __netif_reschedule(q);
1996 } else {
1997 smp_mb__before_clear_bit();
1998 clear_bit(__QDISC_STATE_SCHED,
1999 &q->state);
2000 }
1996 } 2001 }
1997 } 2002 }
1998 } 2003 }
diff --git a/net/ipv4/inet_timewait_sock.c b/net/ipv4/inet_timewait_sock.c
index d985bd613d25..743f011b9a84 100644
--- a/net/ipv4/inet_timewait_sock.c
+++ b/net/ipv4/inet_timewait_sock.c
@@ -409,3 +409,38 @@ out:
409} 409}
410 410
411EXPORT_SYMBOL_GPL(inet_twdr_twcal_tick); 411EXPORT_SYMBOL_GPL(inet_twdr_twcal_tick);
412
413void inet_twsk_purge(struct net *net, struct inet_hashinfo *hashinfo,
414 struct inet_timewait_death_row *twdr, int family)
415{
416 struct inet_timewait_sock *tw;
417 struct sock *sk;
418 struct hlist_node *node;
419 int h;
420
421 local_bh_disable();
422 for (h = 0; h < (hashinfo->ehash_size); h++) {
423 struct inet_ehash_bucket *head =
424 inet_ehash_bucket(hashinfo, h);
425 rwlock_t *lock = inet_ehash_lockp(hashinfo, h);
426restart:
427 write_lock(lock);
428 sk_for_each(sk, node, &head->twchain) {
429
430 tw = inet_twsk(sk);
431 if (!net_eq(twsk_net(tw), net) ||
432 tw->tw_family != family)
433 continue;
434
435 atomic_inc(&tw->tw_refcnt);
436 write_unlock(lock);
437 inet_twsk_deschedule(tw, twdr);
438 inet_twsk_put(tw);
439
440 goto restart;
441 }
442 write_unlock(lock);
443 }
444 local_bh_enable();
445}
446EXPORT_SYMBOL_GPL(inet_twsk_purge);
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 44c1e934824b..1b4fee20fc93 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -2376,6 +2376,7 @@ static int __net_init tcp_sk_init(struct net *net)
2376static void __net_exit tcp_sk_exit(struct net *net) 2376static void __net_exit tcp_sk_exit(struct net *net)
2377{ 2377{
2378 inet_ctl_sock_destroy(net->ipv4.tcp_sock); 2378 inet_ctl_sock_destroy(net->ipv4.tcp_sock);
2379 inet_twsk_purge(net, &tcp_hashinfo, &tcp_death_row, AF_INET);
2379} 2380}
2380 2381
2381static struct pernet_operations __net_initdata tcp_sk_ops = { 2382static struct pernet_operations __net_initdata tcp_sk_ops = {
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 5b90b369ccb2..b585c850a89a 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -2148,6 +2148,7 @@ static int tcpv6_net_init(struct net *net)
2148static void tcpv6_net_exit(struct net *net) 2148static void tcpv6_net_exit(struct net *net)
2149{ 2149{
2150 inet_ctl_sock_destroy(net->ipv6.tcp_sk); 2150 inet_ctl_sock_destroy(net->ipv6.tcp_sk);
2151 inet_twsk_purge(net, &tcp_hashinfo, &tcp_death_row, AF_INET6);
2151} 2152}
2152 2153
2153static struct pernet_operations tcpv6_net_ops = { 2154static struct pernet_operations tcpv6_net_ops = {
diff --git a/net/netfilter/nf_conntrack_irc.c b/net/netfilter/nf_conntrack_irc.c
index 1b1226d6653f..20633fdf7e6b 100644
--- a/net/netfilter/nf_conntrack_irc.c
+++ b/net/netfilter/nf_conntrack_irc.c
@@ -68,11 +68,21 @@ static const char *const dccprotos[] = {
68static int parse_dcc(char *data, const char *data_end, u_int32_t *ip, 68static int parse_dcc(char *data, const char *data_end, u_int32_t *ip,
69 u_int16_t *port, char **ad_beg_p, char **ad_end_p) 69 u_int16_t *port, char **ad_beg_p, char **ad_end_p)
70{ 70{
71 char *tmp;
72
71 /* at least 12: "AAAAAAAA P\1\n" */ 73 /* at least 12: "AAAAAAAA P\1\n" */
72 while (*data++ != ' ') 74 while (*data++ != ' ')
73 if (data > data_end - 12) 75 if (data > data_end - 12)
74 return -1; 76 return -1;
75 77
78 /* Make sure we have a newline character within the packet boundaries
79 * because simple_strtoul parses until the first invalid character. */
80 for (tmp = data; tmp <= data_end; tmp++)
81 if (*tmp == '\n')
82 break;
83 if (tmp > data_end || *tmp != '\n')
84 return -1;
85
76 *ad_beg_p = data; 86 *ad_beg_p = data;
77 *ip = simple_strtoul(data, &data, 10); 87 *ip = simple_strtoul(data, &data, 10);
78 88
diff --git a/net/netfilter/nf_conntrack_proto_gre.c b/net/netfilter/nf_conntrack_proto_gre.c
index 654a4f7f12c6..9bd03967fea4 100644
--- a/net/netfilter/nf_conntrack_proto_gre.c
+++ b/net/netfilter/nf_conntrack_proto_gre.c
@@ -45,12 +45,12 @@ static LIST_HEAD(gre_keymap_list);
45 45
46void nf_ct_gre_keymap_flush(void) 46void nf_ct_gre_keymap_flush(void)
47{ 47{
48 struct list_head *pos, *n; 48 struct nf_ct_gre_keymap *km, *tmp;
49 49
50 write_lock_bh(&nf_ct_gre_lock); 50 write_lock_bh(&nf_ct_gre_lock);
51 list_for_each_safe(pos, n, &gre_keymap_list) { 51 list_for_each_entry_safe(km, tmp, &gre_keymap_list, list) {
52 list_del(pos); 52 list_del(&km->list);
53 kfree(pos); 53 kfree(km);
54 } 54 }
55 write_unlock_bh(&nf_ct_gre_lock); 55 write_unlock_bh(&nf_ct_gre_lock);
56} 56}
@@ -97,10 +97,14 @@ int nf_ct_gre_keymap_add(struct nf_conn *ct, enum ip_conntrack_dir dir,
97 kmp = &help->help.ct_pptp_info.keymap[dir]; 97 kmp = &help->help.ct_pptp_info.keymap[dir];
98 if (*kmp) { 98 if (*kmp) {
99 /* check whether it's a retransmission */ 99 /* check whether it's a retransmission */
100 read_lock_bh(&nf_ct_gre_lock);
100 list_for_each_entry(km, &gre_keymap_list, list) { 101 list_for_each_entry(km, &gre_keymap_list, list) {
101 if (gre_key_cmpfn(km, t) && km == *kmp) 102 if (gre_key_cmpfn(km, t) && km == *kmp) {
103 read_unlock_bh(&nf_ct_gre_lock);
102 return 0; 104 return 0;
105 }
103 } 106 }
107 read_unlock_bh(&nf_ct_gre_lock);
104 pr_debug("trying to override keymap_%s for ct %p\n", 108 pr_debug("trying to override keymap_%s for ct %p\n",
105 dir == IP_CT_DIR_REPLY ? "reply" : "orig", ct); 109 dir == IP_CT_DIR_REPLY ? "reply" : "orig", ct);
106 return -EEXIST; 110 return -EEXIST;
diff --git a/net/netfilter/nf_conntrack_sip.c b/net/netfilter/nf_conntrack_sip.c
index 2f9bbc058b48..1fa306be60fb 100644
--- a/net/netfilter/nf_conntrack_sip.c
+++ b/net/netfilter/nf_conntrack_sip.c
@@ -1193,7 +1193,6 @@ static const struct sip_handler sip_handlers[] = {
1193static int process_sip_response(struct sk_buff *skb, 1193static int process_sip_response(struct sk_buff *skb,
1194 const char **dptr, unsigned int *datalen) 1194 const char **dptr, unsigned int *datalen)
1195{ 1195{
1196 static const struct sip_handler *handler;
1197 enum ip_conntrack_info ctinfo; 1196 enum ip_conntrack_info ctinfo;
1198 struct nf_conn *ct = nf_ct_get(skb, &ctinfo); 1197 struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
1199 unsigned int matchoff, matchlen; 1198 unsigned int matchoff, matchlen;
@@ -1214,6 +1213,8 @@ static int process_sip_response(struct sk_buff *skb,
1214 dataoff = matchoff + matchlen + 1; 1213 dataoff = matchoff + matchlen + 1;
1215 1214
1216 for (i = 0; i < ARRAY_SIZE(sip_handlers); i++) { 1215 for (i = 0; i < ARRAY_SIZE(sip_handlers); i++) {
1216 const struct sip_handler *handler;
1217
1217 handler = &sip_handlers[i]; 1218 handler = &sip_handlers[i];
1218 if (handler->response == NULL) 1219 if (handler->response == NULL)
1219 continue; 1220 continue;
@@ -1228,13 +1229,14 @@ static int process_sip_response(struct sk_buff *skb,
1228static int process_sip_request(struct sk_buff *skb, 1229static int process_sip_request(struct sk_buff *skb,
1229 const char **dptr, unsigned int *datalen) 1230 const char **dptr, unsigned int *datalen)
1230{ 1231{
1231 static const struct sip_handler *handler;
1232 enum ip_conntrack_info ctinfo; 1232 enum ip_conntrack_info ctinfo;
1233 struct nf_conn *ct = nf_ct_get(skb, &ctinfo); 1233 struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
1234 unsigned int matchoff, matchlen; 1234 unsigned int matchoff, matchlen;
1235 unsigned int cseq, i; 1235 unsigned int cseq, i;
1236 1236
1237 for (i = 0; i < ARRAY_SIZE(sip_handlers); i++) { 1237 for (i = 0; i < ARRAY_SIZE(sip_handlers); i++) {
1238 const struct sip_handler *handler;
1239
1238 handler = &sip_handlers[i]; 1240 handler = &sip_handlers[i];
1239 if (handler->request == NULL) 1241 if (handler->request == NULL)
1240 continue; 1242 continue;