diff options
113 files changed, 1439 insertions, 675 deletions
diff --git a/Documentation/filesystems/Locking b/Documentation/filesystems/Locking index 680fb566b928..8362860e21a7 100644 --- a/Documentation/filesystems/Locking +++ b/Documentation/filesystems/Locking | |||
| @@ -144,8 +144,8 @@ prototypes: | |||
| 144 | void (*kill_sb) (struct super_block *); | 144 | void (*kill_sb) (struct super_block *); |
| 145 | locking rules: | 145 | locking rules: |
| 146 | may block BKL | 146 | may block BKL |
| 147 | get_sb yes yes | 147 | get_sb yes no |
| 148 | kill_sb yes yes | 148 | kill_sb yes no |
| 149 | 149 | ||
| 150 | ->get_sb() returns error or 0 with locked superblock attached to the vfsmount | 150 | ->get_sb() returns error or 0 with locked superblock attached to the vfsmount |
| 151 | (exclusive on ->s_umount). | 151 | (exclusive on ->s_umount). |
| @@ -409,12 +409,12 @@ ioctl: yes (see below) | |||
| 409 | unlocked_ioctl: no (see below) | 409 | unlocked_ioctl: no (see below) |
| 410 | compat_ioctl: no | 410 | compat_ioctl: no |
| 411 | mmap: no | 411 | mmap: no |
| 412 | open: maybe (see below) | 412 | open: no |
| 413 | flush: no | 413 | flush: no |
| 414 | release: no | 414 | release: no |
| 415 | fsync: no (see below) | 415 | fsync: no (see below) |
| 416 | aio_fsync: no | 416 | aio_fsync: no |
| 417 | fasync: yes (see below) | 417 | fasync: no |
| 418 | lock: yes | 418 | lock: yes |
| 419 | readv: no | 419 | readv: no |
| 420 | writev: no | 420 | writev: no |
| @@ -431,13 +431,6 @@ For many filesystems, it is probably safe to acquire the inode | |||
| 431 | semaphore. Note some filesystems (i.e. remote ones) provide no | 431 | semaphore. Note some filesystems (i.e. remote ones) provide no |
| 432 | protection for i_size so you will need to use the BKL. | 432 | protection for i_size so you will need to use the BKL. |
| 433 | 433 | ||
| 434 | ->open() locking is in-transit: big lock partially moved into the methods. | ||
| 435 | The only exception is ->open() in the instances of file_operations that never | ||
| 436 | end up in ->i_fop/->proc_fops, i.e. ones that belong to character devices | ||
| 437 | (chrdev_open() takes lock before replacing ->f_op and calling the secondary | ||
| 438 | method. As soon as we fix the handling of module reference counters all | ||
| 439 | instances of ->open() will be called without the BKL. | ||
| 440 | |||
| 441 | Note: ext2_release() was *the* source of contention on fs-intensive | 434 | Note: ext2_release() was *the* source of contention on fs-intensive |
| 442 | loads and dropping BKL on ->release() helps to get rid of that (we still | 435 | loads and dropping BKL on ->release() helps to get rid of that (we still |
| 443 | grab BKL for cases when we close a file that had been opened r/w, but that | 436 | grab BKL for cases when we close a file that had been opened r/w, but that |
diff --git a/MAINTAINERS b/MAINTAINERS index af279458b614..186be3ba5069 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
| @@ -750,11 +750,13 @@ P: Ville Syrjala | |||
| 750 | M: syrjala@sci.fi | 750 | M: syrjala@sci.fi |
| 751 | S: Maintained | 751 | S: Maintained |
| 752 | 752 | ||
| 753 | ATL1 ETHERNET DRIVER | 753 | ATLX ETHERNET DRIVERS |
| 754 | P: Jay Cliburn | 754 | P: Jay Cliburn |
| 755 | M: jcliburn@gmail.com | 755 | M: jcliburn@gmail.com |
| 756 | P: Chris Snook | 756 | P: Chris Snook |
| 757 | M: csnook@redhat.com | 757 | M: csnook@redhat.com |
| 758 | P: Jie Yang | ||
| 759 | M: jie.yang@atheros.com | ||
| 758 | L: atl1-devel@lists.sourceforge.net | 760 | L: atl1-devel@lists.sourceforge.net |
| 759 | W: http://sourceforge.net/projects/atl1 | 761 | W: http://sourceforge.net/projects/atl1 |
| 760 | W: http://atl1.sourceforge.net | 762 | W: http://atl1.sourceforge.net |
| @@ -1593,7 +1595,7 @@ S: Supported | |||
| 1593 | EMBEDDED LINUX | 1595 | EMBEDDED LINUX |
| 1594 | P: Paul Gortmaker | 1596 | P: Paul Gortmaker |
| 1595 | M: paul.gortmaker@windriver.com | 1597 | M: paul.gortmaker@windriver.com |
| 1596 | P David Woodhouse | 1598 | P: David Woodhouse |
| 1597 | M: dwmw2@infradead.org | 1599 | M: dwmw2@infradead.org |
| 1598 | L: linux-embedded@vger.kernel.org | 1600 | L: linux-embedded@vger.kernel.org |
| 1599 | S: Maintained | 1601 | S: Maintained |
| @@ -1,7 +1,7 @@ | |||
| 1 | VERSION = 2 | 1 | VERSION = 2 |
| 2 | PATCHLEVEL = 6 | 2 | PATCHLEVEL = 6 |
| 3 | SUBLEVEL = 27 | 3 | SUBLEVEL = 27 |
| 4 | EXTRAVERSION = -rc5 | 4 | EXTRAVERSION = -rc6 |
| 5 | NAME = Rotary Wombat | 5 | NAME = Rotary Wombat |
| 6 | 6 | ||
| 7 | # *DOCUMENTATION* | 7 | # *DOCUMENTATION* |
diff --git a/arch/arm/include/asm/io.h b/arch/arm/include/asm/io.h index 94a95d7fafd6..71934856fc22 100644 --- a/arch/arm/include/asm/io.h +++ b/arch/arm/include/asm/io.h | |||
| @@ -61,8 +61,9 @@ extern void __raw_readsl(const void __iomem *addr, void *data, int longlen); | |||
| 61 | #define MT_DEVICE_NONSHARED 1 | 61 | #define MT_DEVICE_NONSHARED 1 |
| 62 | #define MT_DEVICE_CACHED 2 | 62 | #define MT_DEVICE_CACHED 2 |
| 63 | #define MT_DEVICE_IXP2000 3 | 63 | #define MT_DEVICE_IXP2000 3 |
| 64 | #define MT_DEVICE_WC 4 | ||
| 64 | /* | 65 | /* |
| 65 | * types 4 onwards can be found in asm/mach/map.h and are undefined | 66 | * types 5 onwards can be found in asm/mach/map.h and are undefined |
| 66 | * for ioremap | 67 | * for ioremap |
| 67 | */ | 68 | */ |
| 68 | 69 | ||
| @@ -215,11 +216,13 @@ extern void _memset_io(volatile void __iomem *, int, size_t); | |||
| 215 | #define ioremap(cookie,size) __arm_ioremap(cookie, size, MT_DEVICE) | 216 | #define ioremap(cookie,size) __arm_ioremap(cookie, size, MT_DEVICE) |
| 216 | #define ioremap_nocache(cookie,size) __arm_ioremap(cookie, size, MT_DEVICE) | 217 | #define ioremap_nocache(cookie,size) __arm_ioremap(cookie, size, MT_DEVICE) |
| 217 | #define ioremap_cached(cookie,size) __arm_ioremap(cookie, size, MT_DEVICE_CACHED) | 218 | #define ioremap_cached(cookie,size) __arm_ioremap(cookie, size, MT_DEVICE_CACHED) |
| 219 | #define ioremap_wc(cookie,size) __arm_ioremap(cookie, size, MT_DEVICE_WC) | ||
| 218 | #define iounmap(cookie) __iounmap(cookie) | 220 | #define iounmap(cookie) __iounmap(cookie) |
| 219 | #else | 221 | #else |
| 220 | #define ioremap(cookie,size) __arch_ioremap((cookie), (size), MT_DEVICE) | 222 | #define ioremap(cookie,size) __arch_ioremap((cookie), (size), MT_DEVICE) |
| 221 | #define ioremap_nocache(cookie,size) __arch_ioremap((cookie), (size), MT_DEVICE) | 223 | #define ioremap_nocache(cookie,size) __arch_ioremap((cookie), (size), MT_DEVICE) |
| 222 | #define ioremap_cached(cookie,size) __arch_ioremap((cookie), (size), MT_DEVICE_CACHED) | 224 | #define ioremap_cached(cookie,size) __arch_ioremap((cookie), (size), MT_DEVICE_CACHED) |
| 225 | #define ioremap_wc(cookie,size) __arch_ioremap((cookie), (size), MT_DEVICE_WC) | ||
| 223 | #define iounmap(cookie) __arch_iounmap(cookie) | 226 | #define iounmap(cookie) __arch_iounmap(cookie) |
| 224 | #endif | 227 | #endif |
| 225 | 228 | ||
diff --git a/arch/arm/include/asm/mach/map.h b/arch/arm/include/asm/mach/map.h index 06f583b13999..9eb936e49cc3 100644 --- a/arch/arm/include/asm/mach/map.h +++ b/arch/arm/include/asm/mach/map.h | |||
| @@ -18,13 +18,13 @@ struct map_desc { | |||
| 18 | unsigned int type; | 18 | unsigned int type; |
| 19 | }; | 19 | }; |
| 20 | 20 | ||
| 21 | /* types 0-3 are defined in asm/io.h */ | 21 | /* types 0-4 are defined in asm/io.h */ |
| 22 | #define MT_CACHECLEAN 4 | 22 | #define MT_CACHECLEAN 5 |
| 23 | #define MT_MINICLEAN 5 | 23 | #define MT_MINICLEAN 6 |
| 24 | #define MT_LOW_VECTORS 6 | 24 | #define MT_LOW_VECTORS 7 |
| 25 | #define MT_HIGH_VECTORS 7 | 25 | #define MT_HIGH_VECTORS 8 |
| 26 | #define MT_MEMORY 8 | 26 | #define MT_MEMORY 9 |
| 27 | #define MT_ROM 9 | 27 | #define MT_ROM 10 |
| 28 | 28 | ||
| 29 | #define MT_NONSHARED_DEVICE MT_DEVICE_NONSHARED | 29 | #define MT_NONSHARED_DEVICE MT_DEVICE_NONSHARED |
| 30 | #define MT_IXP2000_DEVICE MT_DEVICE_IXP2000 | 30 | #define MT_IXP2000_DEVICE MT_DEVICE_IXP2000 |
diff --git a/arch/arm/mach-omap1/mcbsp.c b/arch/arm/mach-omap1/mcbsp.c index 826010d5d014..2baeaeb0c900 100644 --- a/arch/arm/mach-omap1/mcbsp.c +++ b/arch/arm/mach-omap1/mcbsp.c | |||
| @@ -159,6 +159,7 @@ static struct omap_mcbsp_ops omap1_mcbsp_ops = { | |||
| 159 | #ifdef CONFIG_ARCH_OMAP730 | 159 | #ifdef CONFIG_ARCH_OMAP730 |
| 160 | static struct omap_mcbsp_platform_data omap730_mcbsp_pdata[] = { | 160 | static struct omap_mcbsp_platform_data omap730_mcbsp_pdata[] = { |
| 161 | { | 161 | { |
| 162 | .phys_base = OMAP730_MCBSP1_BASE, | ||
| 162 | .virt_base = io_p2v(OMAP730_MCBSP1_BASE), | 163 | .virt_base = io_p2v(OMAP730_MCBSP1_BASE), |
| 163 | .dma_rx_sync = OMAP_DMA_MCBSP1_RX, | 164 | .dma_rx_sync = OMAP_DMA_MCBSP1_RX, |
| 164 | .dma_tx_sync = OMAP_DMA_MCBSP1_TX, | 165 | .dma_tx_sync = OMAP_DMA_MCBSP1_TX, |
| @@ -167,6 +168,7 @@ static struct omap_mcbsp_platform_data omap730_mcbsp_pdata[] = { | |||
| 167 | .ops = &omap1_mcbsp_ops, | 168 | .ops = &omap1_mcbsp_ops, |
| 168 | }, | 169 | }, |
| 169 | { | 170 | { |
| 171 | .phys_base = OMAP730_MCBSP2_BASE, | ||
| 170 | .virt_base = io_p2v(OMAP730_MCBSP2_BASE), | 172 | .virt_base = io_p2v(OMAP730_MCBSP2_BASE), |
| 171 | .dma_rx_sync = OMAP_DMA_MCBSP3_RX, | 173 | .dma_rx_sync = OMAP_DMA_MCBSP3_RX, |
| 172 | .dma_tx_sync = OMAP_DMA_MCBSP3_TX, | 174 | .dma_tx_sync = OMAP_DMA_MCBSP3_TX, |
| @@ -184,6 +186,7 @@ static struct omap_mcbsp_platform_data omap730_mcbsp_pdata[] = { | |||
| 184 | #ifdef CONFIG_ARCH_OMAP15XX | 186 | #ifdef CONFIG_ARCH_OMAP15XX |
| 185 | static struct omap_mcbsp_platform_data omap15xx_mcbsp_pdata[] = { | 187 | static struct omap_mcbsp_platform_data omap15xx_mcbsp_pdata[] = { |
| 186 | { | 188 | { |
| 189 | .phys_base = OMAP1510_MCBSP1_BASE, | ||
| 187 | .virt_base = OMAP1510_MCBSP1_BASE, | 190 | .virt_base = OMAP1510_MCBSP1_BASE, |
| 188 | .dma_rx_sync = OMAP_DMA_MCBSP1_RX, | 191 | .dma_rx_sync = OMAP_DMA_MCBSP1_RX, |
| 189 | .dma_tx_sync = OMAP_DMA_MCBSP1_TX, | 192 | .dma_tx_sync = OMAP_DMA_MCBSP1_TX, |
| @@ -193,6 +196,7 @@ static struct omap_mcbsp_platform_data omap15xx_mcbsp_pdata[] = { | |||
| 193 | .clk_name = "mcbsp_clk", | 196 | .clk_name = "mcbsp_clk", |
| 194 | }, | 197 | }, |
| 195 | { | 198 | { |
| 199 | .phys_base = OMAP1510_MCBSP2_BASE, | ||
| 196 | .virt_base = io_p2v(OMAP1510_MCBSP2_BASE), | 200 | .virt_base = io_p2v(OMAP1510_MCBSP2_BASE), |
| 197 | .dma_rx_sync = OMAP_DMA_MCBSP2_RX, | 201 | .dma_rx_sync = OMAP_DMA_MCBSP2_RX, |
| 198 | .dma_tx_sync = OMAP_DMA_MCBSP2_TX, | 202 | .dma_tx_sync = OMAP_DMA_MCBSP2_TX, |
| @@ -201,6 +205,7 @@ static struct omap_mcbsp_platform_data omap15xx_mcbsp_pdata[] = { | |||
| 201 | .ops = &omap1_mcbsp_ops, | 205 | .ops = &omap1_mcbsp_ops, |
| 202 | }, | 206 | }, |
| 203 | { | 207 | { |
| 208 | .phys_base = OMAP1510_MCBSP3_BASE, | ||
| 204 | .virt_base = OMAP1510_MCBSP3_BASE, | 209 | .virt_base = OMAP1510_MCBSP3_BASE, |
| 205 | .dma_rx_sync = OMAP_DMA_MCBSP3_RX, | 210 | .dma_rx_sync = OMAP_DMA_MCBSP3_RX, |
| 206 | .dma_tx_sync = OMAP_DMA_MCBSP3_TX, | 211 | .dma_tx_sync = OMAP_DMA_MCBSP3_TX, |
| @@ -219,6 +224,7 @@ static struct omap_mcbsp_platform_data omap15xx_mcbsp_pdata[] = { | |||
| 219 | #ifdef CONFIG_ARCH_OMAP16XX | 224 | #ifdef CONFIG_ARCH_OMAP16XX |
| 220 | static struct omap_mcbsp_platform_data omap16xx_mcbsp_pdata[] = { | 225 | static struct omap_mcbsp_platform_data omap16xx_mcbsp_pdata[] = { |
| 221 | { | 226 | { |
| 227 | .phys_base = OMAP1610_MCBSP1_BASE, | ||
| 222 | .virt_base = OMAP1610_MCBSP1_BASE, | 228 | .virt_base = OMAP1610_MCBSP1_BASE, |
| 223 | .dma_rx_sync = OMAP_DMA_MCBSP1_RX, | 229 | .dma_rx_sync = OMAP_DMA_MCBSP1_RX, |
| 224 | .dma_tx_sync = OMAP_DMA_MCBSP1_TX, | 230 | .dma_tx_sync = OMAP_DMA_MCBSP1_TX, |
| @@ -228,6 +234,7 @@ static struct omap_mcbsp_platform_data omap16xx_mcbsp_pdata[] = { | |||
| 228 | .clk_name = "mcbsp_clk", | 234 | .clk_name = "mcbsp_clk", |
| 229 | }, | 235 | }, |
| 230 | { | 236 | { |
| 237 | .phys_base = OMAP1610_MCBSP2_BASE, | ||
| 231 | .virt_base = io_p2v(OMAP1610_MCBSP2_BASE), | 238 | .virt_base = io_p2v(OMAP1610_MCBSP2_BASE), |
| 232 | .dma_rx_sync = OMAP_DMA_MCBSP2_RX, | 239 | .dma_rx_sync = OMAP_DMA_MCBSP2_RX, |
| 233 | .dma_tx_sync = OMAP_DMA_MCBSP2_TX, | 240 | .dma_tx_sync = OMAP_DMA_MCBSP2_TX, |
| @@ -236,6 +243,7 @@ static struct omap_mcbsp_platform_data omap16xx_mcbsp_pdata[] = { | |||
| 236 | .ops = &omap1_mcbsp_ops, | 243 | .ops = &omap1_mcbsp_ops, |
| 237 | }, | 244 | }, |
| 238 | { | 245 | { |
| 246 | .phys_base = OMAP1610_MCBSP3_BASE, | ||
| 239 | .virt_base = OMAP1610_MCBSP3_BASE, | 247 | .virt_base = OMAP1610_MCBSP3_BASE, |
| 240 | .dma_rx_sync = OMAP_DMA_MCBSP3_RX, | 248 | .dma_rx_sync = OMAP_DMA_MCBSP3_RX, |
| 241 | .dma_tx_sync = OMAP_DMA_MCBSP3_TX, | 249 | .dma_tx_sync = OMAP_DMA_MCBSP3_TX, |
diff --git a/arch/arm/mach-omap2/mcbsp.c b/arch/arm/mach-omap2/mcbsp.c index 27eb6e3ca926..b261f1f80b5e 100644 --- a/arch/arm/mach-omap2/mcbsp.c +++ b/arch/arm/mach-omap2/mcbsp.c | |||
| @@ -134,6 +134,7 @@ static struct omap_mcbsp_ops omap2_mcbsp_ops = { | |||
| 134 | #ifdef CONFIG_ARCH_OMAP24XX | 134 | #ifdef CONFIG_ARCH_OMAP24XX |
| 135 | static struct omap_mcbsp_platform_data omap24xx_mcbsp_pdata[] = { | 135 | static struct omap_mcbsp_platform_data omap24xx_mcbsp_pdata[] = { |
| 136 | { | 136 | { |
| 137 | .phys_base = OMAP24XX_MCBSP1_BASE, | ||
| 137 | .virt_base = IO_ADDRESS(OMAP24XX_MCBSP1_BASE), | 138 | .virt_base = IO_ADDRESS(OMAP24XX_MCBSP1_BASE), |
| 138 | .dma_rx_sync = OMAP24XX_DMA_MCBSP1_RX, | 139 | .dma_rx_sync = OMAP24XX_DMA_MCBSP1_RX, |
| 139 | .dma_tx_sync = OMAP24XX_DMA_MCBSP1_TX, | 140 | .dma_tx_sync = OMAP24XX_DMA_MCBSP1_TX, |
| @@ -143,6 +144,7 @@ static struct omap_mcbsp_platform_data omap24xx_mcbsp_pdata[] = { | |||
| 143 | .clk_name = "mcbsp_clk", | 144 | .clk_name = "mcbsp_clk", |
| 144 | }, | 145 | }, |
| 145 | { | 146 | { |
| 147 | .phys_base = OMAP24XX_MCBSP2_BASE, | ||
| 146 | .virt_base = IO_ADDRESS(OMAP24XX_MCBSP2_BASE), | 148 | .virt_base = IO_ADDRESS(OMAP24XX_MCBSP2_BASE), |
| 147 | .dma_rx_sync = OMAP24XX_DMA_MCBSP2_RX, | 149 | .dma_rx_sync = OMAP24XX_DMA_MCBSP2_RX, |
| 148 | .dma_tx_sync = OMAP24XX_DMA_MCBSP2_TX, | 150 | .dma_tx_sync = OMAP24XX_DMA_MCBSP2_TX, |
| @@ -161,6 +163,7 @@ static struct omap_mcbsp_platform_data omap24xx_mcbsp_pdata[] = { | |||
| 161 | #ifdef CONFIG_ARCH_OMAP34XX | 163 | #ifdef CONFIG_ARCH_OMAP34XX |
| 162 | static struct omap_mcbsp_platform_data omap34xx_mcbsp_pdata[] = { | 164 | static struct omap_mcbsp_platform_data omap34xx_mcbsp_pdata[] = { |
| 163 | { | 165 | { |
| 166 | .phys_base = OMAP34XX_MCBSP1_BASE, | ||
| 164 | .virt_base = IO_ADDRESS(OMAP34XX_MCBSP1_BASE), | 167 | .virt_base = IO_ADDRESS(OMAP34XX_MCBSP1_BASE), |
| 165 | .dma_rx_sync = OMAP24XX_DMA_MCBSP1_RX, | 168 | .dma_rx_sync = OMAP24XX_DMA_MCBSP1_RX, |
| 166 | .dma_tx_sync = OMAP24XX_DMA_MCBSP1_TX, | 169 | .dma_tx_sync = OMAP24XX_DMA_MCBSP1_TX, |
| @@ -170,6 +173,7 @@ static struct omap_mcbsp_platform_data omap34xx_mcbsp_pdata[] = { | |||
| 170 | .clk_name = "mcbsp_clk", | 173 | .clk_name = "mcbsp_clk", |
| 171 | }, | 174 | }, |
| 172 | { | 175 | { |
| 176 | .phys_base = OMAP34XX_MCBSP2_BASE, | ||
| 173 | .virt_base = IO_ADDRESS(OMAP34XX_MCBSP2_BASE), | 177 | .virt_base = IO_ADDRESS(OMAP34XX_MCBSP2_BASE), |
| 174 | .dma_rx_sync = OMAP24XX_DMA_MCBSP2_RX, | 178 | .dma_rx_sync = OMAP24XX_DMA_MCBSP2_RX, |
| 175 | .dma_tx_sync = OMAP24XX_DMA_MCBSP2_TX, | 179 | .dma_tx_sync = OMAP24XX_DMA_MCBSP2_TX, |
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index 25d9a11eb617..a713e40e1f1a 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c | |||
| @@ -211,6 +211,12 @@ static struct mem_type mem_types[] = { | |||
| 211 | PMD_SECT_TEX(1), | 211 | PMD_SECT_TEX(1), |
| 212 | .domain = DOMAIN_IO, | 212 | .domain = DOMAIN_IO, |
| 213 | }, | 213 | }, |
| 214 | [MT_DEVICE_WC] = { /* ioremap_wc */ | ||
| 215 | .prot_pte = PROT_PTE_DEVICE, | ||
| 216 | .prot_l1 = PMD_TYPE_TABLE, | ||
| 217 | .prot_sect = PROT_SECT_DEVICE, | ||
| 218 | .domain = DOMAIN_IO, | ||
| 219 | }, | ||
| 214 | [MT_CACHECLEAN] = { | 220 | [MT_CACHECLEAN] = { |
| 215 | .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN, | 221 | .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN, |
| 216 | .domain = DOMAIN_KERNEL, | 222 | .domain = DOMAIN_KERNEL, |
| @@ -273,6 +279,20 @@ static void __init build_mem_type_table(void) | |||
| 273 | } | 279 | } |
| 274 | 280 | ||
| 275 | /* | 281 | /* |
| 282 | * On non-Xscale3 ARMv5-and-older systems, use CB=01 | ||
| 283 | * (Uncached/Buffered) for ioremap_wc() mappings. On XScale3 | ||
| 284 | * and ARMv6+, use TEXCB=00100 mappings (Inner/Outer Uncacheable | ||
| 285 | * in xsc3 parlance, Uncached Normal in ARMv6 parlance). | ||
| 286 | */ | ||
| 287 | if (cpu_is_xsc3() || cpu_arch >= CPU_ARCH_ARMv6) { | ||
| 288 | mem_types[MT_DEVICE_WC].prot_pte_ext |= PTE_EXT_TEX(1); | ||
| 289 | mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_TEX(1); | ||
| 290 | } else { | ||
| 291 | mem_types[MT_DEVICE_WC].prot_pte |= L_PTE_BUFFERABLE; | ||
| 292 | mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_BUFFERABLE; | ||
| 293 | } | ||
| 294 | |||
| 295 | /* | ||
| 276 | * ARMv5 and lower, bit 4 must be set for page tables. | 296 | * ARMv5 and lower, bit 4 must be set for page tables. |
| 277 | * (was: cache "update-able on write" bit on ARM610) | 297 | * (was: cache "update-able on write" bit on ARM610) |
| 278 | * However, Xscale cores require this bit to be cleared. | 298 | * However, Xscale cores require this bit to be cleared. |
diff --git a/arch/arm/plat-mxc/clock.c b/arch/arm/plat-mxc/clock.c index 2f8627218839..0a38f0b396eb 100644 --- a/arch/arm/plat-mxc/clock.c +++ b/arch/arm/plat-mxc/clock.c | |||
| @@ -37,7 +37,6 @@ | |||
| 37 | #include <linux/proc_fs.h> | 37 | #include <linux/proc_fs.h> |
| 38 | #include <linux/semaphore.h> | 38 | #include <linux/semaphore.h> |
| 39 | #include <linux/string.h> | 39 | #include <linux/string.h> |
| 40 | #include <linux/version.h> | ||
| 41 | 40 | ||
| 42 | #include <mach/clock.h> | 41 | #include <mach/clock.h> |
| 43 | 42 | ||
diff --git a/arch/arm/plat-omap/gpio.c b/arch/arm/plat-omap/gpio.c index 3e76ee2bc731..9e1341ebc14e 100644 --- a/arch/arm/plat-omap/gpio.c +++ b/arch/arm/plat-omap/gpio.c | |||
| @@ -1488,7 +1488,7 @@ static int __init _omap_gpio_init(void) | |||
| 1488 | bank->chip.set = gpio_set; | 1488 | bank->chip.set = gpio_set; |
| 1489 | if (bank_is_mpuio(bank)) { | 1489 | if (bank_is_mpuio(bank)) { |
| 1490 | bank->chip.label = "mpuio"; | 1490 | bank->chip.label = "mpuio"; |
| 1491 | #ifdef CONFIG_ARCH_OMAP1 | 1491 | #ifdef CONFIG_ARCH_OMAP16XX |
| 1492 | bank->chip.dev = &omap_mpuio_device.dev; | 1492 | bank->chip.dev = &omap_mpuio_device.dev; |
| 1493 | #endif | 1493 | #endif |
| 1494 | bank->chip.base = OMAP_MPUIO(0); | 1494 | bank->chip.base = OMAP_MPUIO(0); |
diff --git a/arch/arm/plat-omap/include/mach/mcbsp.h b/arch/arm/plat-omap/include/mach/mcbsp.h index 6eb44a92871d..8fdb95e26fcd 100644 --- a/arch/arm/plat-omap/include/mach/mcbsp.h +++ b/arch/arm/plat-omap/include/mach/mcbsp.h | |||
| @@ -315,6 +315,7 @@ struct omap_mcbsp_ops { | |||
| 315 | }; | 315 | }; |
| 316 | 316 | ||
| 317 | struct omap_mcbsp_platform_data { | 317 | struct omap_mcbsp_platform_data { |
| 318 | unsigned long phys_base; | ||
| 318 | u32 virt_base; | 319 | u32 virt_base; |
| 319 | u8 dma_rx_sync, dma_tx_sync; | 320 | u8 dma_rx_sync, dma_tx_sync; |
| 320 | u16 rx_irq, tx_irq; | 321 | u16 rx_irq, tx_irq; |
| @@ -324,6 +325,7 @@ struct omap_mcbsp_platform_data { | |||
| 324 | 325 | ||
| 325 | struct omap_mcbsp { | 326 | struct omap_mcbsp { |
| 326 | struct device *dev; | 327 | struct device *dev; |
| 328 | unsigned long phys_base; | ||
| 327 | u32 io_base; | 329 | u32 io_base; |
| 328 | u8 id; | 330 | u8 id; |
| 329 | u8 free; | 331 | u8 free; |
diff --git a/arch/arm/plat-omap/mcbsp.c b/arch/arm/plat-omap/mcbsp.c index d0844050f2d2..014d26574bb6 100644 --- a/arch/arm/plat-omap/mcbsp.c +++ b/arch/arm/plat-omap/mcbsp.c | |||
| @@ -651,7 +651,7 @@ int omap_mcbsp_xmit_buffer(unsigned int id, dma_addr_t buffer, | |||
| 651 | omap_set_dma_dest_params(mcbsp[id].dma_tx_lch, | 651 | omap_set_dma_dest_params(mcbsp[id].dma_tx_lch, |
| 652 | src_port, | 652 | src_port, |
| 653 | OMAP_DMA_AMODE_CONSTANT, | 653 | OMAP_DMA_AMODE_CONSTANT, |
| 654 | mcbsp[id].io_base + OMAP_MCBSP_REG_DXR1, | 654 | mcbsp[id].phys_base + OMAP_MCBSP_REG_DXR1, |
| 655 | 0, 0); | 655 | 0, 0); |
| 656 | 656 | ||
| 657 | omap_set_dma_src_params(mcbsp[id].dma_tx_lch, | 657 | omap_set_dma_src_params(mcbsp[id].dma_tx_lch, |
| @@ -712,7 +712,7 @@ int omap_mcbsp_recv_buffer(unsigned int id, dma_addr_t buffer, | |||
| 712 | omap_set_dma_src_params(mcbsp[id].dma_rx_lch, | 712 | omap_set_dma_src_params(mcbsp[id].dma_rx_lch, |
| 713 | src_port, | 713 | src_port, |
| 714 | OMAP_DMA_AMODE_CONSTANT, | 714 | OMAP_DMA_AMODE_CONSTANT, |
| 715 | mcbsp[id].io_base + OMAP_MCBSP_REG_DRR1, | 715 | mcbsp[id].phys_base + OMAP_MCBSP_REG_DRR1, |
| 716 | 0, 0); | 716 | 0, 0); |
| 717 | 717 | ||
| 718 | omap_set_dma_dest_params(mcbsp[id].dma_rx_lch, | 718 | omap_set_dma_dest_params(mcbsp[id].dma_rx_lch, |
| @@ -830,6 +830,7 @@ static int __init omap_mcbsp_probe(struct platform_device *pdev) | |||
| 830 | mcbsp[id].dma_tx_lch = -1; | 830 | mcbsp[id].dma_tx_lch = -1; |
| 831 | mcbsp[id].dma_rx_lch = -1; | 831 | mcbsp[id].dma_rx_lch = -1; |
| 832 | 832 | ||
| 833 | mcbsp[id].phys_base = pdata->phys_base; | ||
| 833 | mcbsp[id].io_base = pdata->virt_base; | 834 | mcbsp[id].io_base = pdata->virt_base; |
| 834 | /* Default I/O is IRQ based */ | 835 | /* Default I/O is IRQ based */ |
| 835 | mcbsp[id].io_type = OMAP_MCBSP_IRQ_IO; | 836 | mcbsp[id].io_type = OMAP_MCBSP_IRQ_IO; |
diff --git a/arch/avr32/kernel/asm-offsets.c b/arch/avr32/kernel/asm-offsets.c index e4796c67a831..d6a8193a1d2f 100644 --- a/arch/avr32/kernel/asm-offsets.c +++ b/arch/avr32/kernel/asm-offsets.c | |||
| @@ -4,6 +4,8 @@ | |||
| 4 | * to extract and format the required data. | 4 | * to extract and format the required data. |
| 5 | */ | 5 | */ |
| 6 | 6 | ||
| 7 | #include <linux/mm.h> | ||
| 8 | #include <linux/sched.h> | ||
| 7 | #include <linux/thread_info.h> | 9 | #include <linux/thread_info.h> |
| 8 | #include <linux/kbuild.h> | 10 | #include <linux/kbuild.h> |
| 9 | 11 | ||
| @@ -17,4 +19,8 @@ void foo(void) | |||
| 17 | OFFSET(TI_rar_saved, thread_info, rar_saved); | 19 | OFFSET(TI_rar_saved, thread_info, rar_saved); |
| 18 | OFFSET(TI_rsr_saved, thread_info, rsr_saved); | 20 | OFFSET(TI_rsr_saved, thread_info, rsr_saved); |
| 19 | OFFSET(TI_restart_block, thread_info, restart_block); | 21 | OFFSET(TI_restart_block, thread_info, restart_block); |
| 22 | BLANK(); | ||
| 23 | OFFSET(TSK_active_mm, task_struct, active_mm); | ||
| 24 | BLANK(); | ||
| 25 | OFFSET(MM_pgd, mm_struct, pgd); | ||
| 20 | } | 26 | } |
diff --git a/arch/avr32/kernel/entry-avr32b.S b/arch/avr32/kernel/entry-avr32b.S index 2b398cae110c..33d49377b8be 100644 --- a/arch/avr32/kernel/entry-avr32b.S +++ b/arch/avr32/kernel/entry-avr32b.S | |||
| @@ -334,9 +334,64 @@ save_full_context_ex: | |||
| 334 | 334 | ||
| 335 | /* Low-level exception handlers */ | 335 | /* Low-level exception handlers */ |
| 336 | handle_critical: | 336 | handle_critical: |
| 337 | /* | ||
| 338 | * AT32AP700x errata: | ||
| 339 | * | ||
| 340 | * After a Java stack overflow or underflow trap, any CPU | ||
| 341 | * memory access may cause erratic behavior. This will happen | ||
| 342 | * when the four least significant bits of the JOSP system | ||
| 343 | * register contains any value between 9 and 15 (inclusive). | ||
| 344 | * | ||
| 345 | * Possible workarounds: | ||
| 346 | * - Don't use the Java Extension Module | ||
| 347 | * - Ensure that the stack overflow and underflow trap | ||
| 348 | * handlers do not do any memory access or trigger any | ||
| 349 | * exceptions before the overflow/underflow condition is | ||
| 350 | * cleared (by incrementing or decrementing the JOSP) | ||
| 351 | * - Make sure that JOSP does not contain any problematic | ||
| 352 | * value before doing any exception or interrupt | ||
| 353 | * processing. | ||
| 354 | * - Set up a critical exception handler which writes a | ||
| 355 | * known-to-be-safe value, e.g. 4, to JOSP before doing | ||
| 356 | * any further processing. | ||
| 357 | * | ||
| 358 | * We'll use the last workaround for now since we cannot | ||
| 359 | * guarantee that user space processes don't use Java mode. | ||
| 360 | * Non-well-behaving userland will be terminated with extreme | ||
| 361 | * prejudice. | ||
| 362 | */ | ||
| 363 | #ifdef CONFIG_CPU_AT32AP700X | ||
| 364 | /* | ||
| 365 | * There's a chance we can't touch memory, so temporarily | ||
| 366 | * borrow PTBR to save the stack pointer while we fix things | ||
| 367 | * up... | ||
| 368 | */ | ||
| 369 | mtsr SYSREG_PTBR, sp | ||
| 370 | mov sp, 4 | ||
| 371 | mtsr SYSREG_JOSP, sp | ||
| 372 | mfsr sp, SYSREG_PTBR | ||
| 373 | sub pc, -2 | ||
| 374 | |||
| 375 | /* Push most of pt_regs on stack. We'll do the rest later */ | ||
| 337 | sub sp, 4 | 376 | sub sp, 4 |
| 338 | stmts --sp, r0-lr | 377 | pushm r0-r12 |
| 339 | rcall save_full_context_ex | 378 | |
| 379 | /* PTBR mirrors current_thread_info()->task->active_mm->pgd */ | ||
| 380 | get_thread_info r0 | ||
| 381 | ld.w r1, r0[TI_task] | ||
| 382 | ld.w r2, r1[TSK_active_mm] | ||
| 383 | ld.w r3, r2[MM_pgd] | ||
| 384 | mtsr SYSREG_PTBR, r3 | ||
| 385 | #else | ||
| 386 | sub sp, 4 | ||
| 387 | pushm r0-r12 | ||
| 388 | #endif | ||
| 389 | sub r0, sp, -(14 * 4) | ||
| 390 | mov r1, lr | ||
| 391 | mfsr r2, SYSREG_RAR_EX | ||
| 392 | mfsr r3, SYSREG_RSR_EX | ||
| 393 | pushm r0-r3 | ||
| 394 | |||
| 340 | mfsr r12, SYSREG_ECR | 395 | mfsr r12, SYSREG_ECR |
| 341 | mov r11, sp | 396 | mov r11, sp |
| 342 | rcall do_critical_exception | 397 | rcall do_critical_exception |
diff --git a/arch/avr32/mach-at32ap/pm-at32ap700x.S b/arch/avr32/mach-at32ap/pm-at32ap700x.S index 5be4de65b209..17503b0ed6c9 100644 --- a/arch/avr32/mach-at32ap/pm-at32ap700x.S +++ b/arch/avr32/mach-at32ap/pm-at32ap700x.S | |||
| @@ -134,7 +134,7 @@ pm_standby: | |||
| 134 | mov r11, SDRAMC_LPR_LPCB_SELF_RFR | 134 | mov r11, SDRAMC_LPR_LPCB_SELF_RFR |
| 135 | bfins r10, r11, 0, 2 /* LPCB <- self Refresh */ | 135 | bfins r10, r11, 0, 2 /* LPCB <- self Refresh */ |
| 136 | sync 0 /* flush write buffer */ | 136 | sync 0 /* flush write buffer */ |
| 137 | st.w r12[SDRAMC_LPR], r11 /* put SDRAM in self-refresh mode */ | 137 | st.w r12[SDRAMC_LPR], r10 /* put SDRAM in self-refresh mode */ |
| 138 | ld.w r11, r12[SDRAMC_LPR] | 138 | ld.w r11, r12[SDRAMC_LPR] |
| 139 | unmask_interrupts | 139 | unmask_interrupts |
| 140 | sleep CPU_SLEEP_FROZEN | 140 | sleep CPU_SLEEP_FROZEN |
diff --git a/arch/ia64/include/asm/sections.h b/arch/ia64/include/asm/sections.h index 7286e4a9fe84..a7acad2bc2f0 100644 --- a/arch/ia64/include/asm/sections.h +++ b/arch/ia64/include/asm/sections.h | |||
| @@ -21,5 +21,8 @@ extern char __start_gate_brl_fsys_bubble_down_patchlist[], __end_gate_brl_fsys_b | |||
| 21 | extern char __start_unwind[], __end_unwind[]; | 21 | extern char __start_unwind[], __end_unwind[]; |
| 22 | extern char __start_ivt_text[], __end_ivt_text[]; | 22 | extern char __start_ivt_text[], __end_ivt_text[]; |
| 23 | 23 | ||
| 24 | #undef dereference_function_descriptor | ||
| 25 | void *dereference_function_descriptor(void *); | ||
| 26 | |||
| 24 | #endif /* _ASM_IA64_SECTIONS_H */ | 27 | #endif /* _ASM_IA64_SECTIONS_H */ |
| 25 | 28 | ||
diff --git a/arch/ia64/kernel/module.c b/arch/ia64/kernel/module.c index 29aad349e0c4..545626f66a4c 100644 --- a/arch/ia64/kernel/module.c +++ b/arch/ia64/kernel/module.c | |||
| @@ -31,9 +31,11 @@ | |||
| 31 | #include <linux/elf.h> | 31 | #include <linux/elf.h> |
| 32 | #include <linux/moduleloader.h> | 32 | #include <linux/moduleloader.h> |
| 33 | #include <linux/string.h> | 33 | #include <linux/string.h> |
| 34 | #include <linux/uaccess.h> | ||
| 34 | #include <linux/vmalloc.h> | 35 | #include <linux/vmalloc.h> |
| 35 | 36 | ||
| 36 | #include <asm/patch.h> | 37 | #include <asm/patch.h> |
| 38 | #include <asm/sections.h> | ||
| 37 | #include <asm/unaligned.h> | 39 | #include <asm/unaligned.h> |
| 38 | 40 | ||
| 39 | #define ARCH_MODULE_DEBUG 0 | 41 | #define ARCH_MODULE_DEBUG 0 |
| @@ -941,3 +943,13 @@ module_arch_cleanup (struct module *mod) | |||
| 941 | if (mod->arch.core_unw_table) | 943 | if (mod->arch.core_unw_table) |
| 942 | unw_remove_unwind_table(mod->arch.core_unw_table); | 944 | unw_remove_unwind_table(mod->arch.core_unw_table); |
| 943 | } | 945 | } |
| 946 | |||
| 947 | void *dereference_function_descriptor(void *ptr) | ||
| 948 | { | ||
| 949 | struct fdesc *desc = ptr; | ||
| 950 | void *p; | ||
| 951 | |||
| 952 | if (!probe_kernel_address(&desc->ip, p)) | ||
| 953 | ptr = p; | ||
| 954 | return ptr; | ||
| 955 | } | ||
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig index 4da736e25333..49896a2a1d72 100644 --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig | |||
| @@ -1886,6 +1886,15 @@ config STACKTRACE_SUPPORT | |||
| 1886 | 1886 | ||
| 1887 | source "init/Kconfig" | 1887 | source "init/Kconfig" |
| 1888 | 1888 | ||
| 1889 | config PROBE_INITRD_HEADER | ||
| 1890 | bool "Probe initrd header created by addinitrd" | ||
| 1891 | depends on BLK_DEV_INITRD | ||
| 1892 | help | ||
| 1893 | Probe initrd header at the last page of kernel image. | ||
| 1894 | Say Y here if you are using arch/mips/boot/addinitrd.c to | ||
| 1895 | add initrd or initramfs image to the kernel image. | ||
| 1896 | Otherwise, say N. | ||
| 1897 | |||
| 1889 | menu "Bus options (PCI, PCMCIA, EISA, ISA, TC)" | 1898 | menu "Bus options (PCI, PCMCIA, EISA, ISA, TC)" |
| 1890 | 1899 | ||
| 1891 | config HW_HAS_EISA | 1900 | config HW_HAS_EISA |
diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c index 2aae76bce293..16f8edfe5cdc 100644 --- a/arch/mips/kernel/setup.c +++ b/arch/mips/kernel/setup.c | |||
| @@ -160,30 +160,33 @@ early_param("rd_size", rd_size_early); | |||
| 160 | static unsigned long __init init_initrd(void) | 160 | static unsigned long __init init_initrd(void) |
| 161 | { | 161 | { |
| 162 | unsigned long end; | 162 | unsigned long end; |
| 163 | u32 *initrd_header; | ||
| 164 | 163 | ||
| 165 | /* | 164 | /* |
| 166 | * Board specific code or command line parser should have | 165 | * Board specific code or command line parser should have |
| 167 | * already set up initrd_start and initrd_end. In these cases | 166 | * already set up initrd_start and initrd_end. In these cases |
| 168 | * perfom sanity checks and use them if all looks good. | 167 | * perfom sanity checks and use them if all looks good. |
| 169 | */ | 168 | */ |
| 170 | if (initrd_start && initrd_end > initrd_start) | 169 | if (!initrd_start || initrd_end <= initrd_start) { |
| 171 | goto sanitize; | 170 | #ifdef CONFIG_PROBE_INITRD_HEADER |
| 171 | u32 *initrd_header; | ||
| 172 | 172 | ||
| 173 | /* | 173 | /* |
| 174 | * See if initrd has been added to the kernel image by | 174 | * See if initrd has been added to the kernel image by |
| 175 | * arch/mips/boot/addinitrd.c. In that case a header is | 175 | * arch/mips/boot/addinitrd.c. In that case a header is |
| 176 | * prepended to initrd and is made up by 8 bytes. The fisrt | 176 | * prepended to initrd and is made up by 8 bytes. The first |
| 177 | * word is a magic number and the second one is the size of | 177 | * word is a magic number and the second one is the size of |
| 178 | * initrd. Initrd start must be page aligned in any cases. | 178 | * initrd. Initrd start must be page aligned in any cases. |
| 179 | */ | 179 | */ |
| 180 | initrd_header = __va(PAGE_ALIGN(__pa_symbol(&_end) + 8)) - 8; | 180 | initrd_header = __va(PAGE_ALIGN(__pa_symbol(&_end) + 8)) - 8; |
| 181 | if (initrd_header[0] != 0x494E5244) | 181 | if (initrd_header[0] != 0x494E5244) |
| 182 | goto disable; | ||
| 183 | initrd_start = (unsigned long)(initrd_header + 2); | ||
| 184 | initrd_end = initrd_start + initrd_header[1]; | ||
| 185 | #else | ||
| 182 | goto disable; | 186 | goto disable; |
| 183 | initrd_start = (unsigned long)(initrd_header + 2); | 187 | #endif |
| 184 | initrd_end = initrd_start + initrd_header[1]; | 188 | } |
| 185 | 189 | ||
| 186 | sanitize: | ||
| 187 | if (initrd_start & ~PAGE_MASK) { | 190 | if (initrd_start & ~PAGE_MASK) { |
| 188 | pr_err("initrd start must be page aligned\n"); | 191 | pr_err("initrd start must be page aligned\n"); |
| 189 | goto disable; | 192 | goto disable; |
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c index 426cced1e9dc..6bee29097a56 100644 --- a/arch/mips/kernel/traps.c +++ b/arch/mips/kernel/traps.c | |||
| @@ -373,8 +373,8 @@ void __noreturn die(const char * str, const struct pt_regs * regs) | |||
| 373 | do_exit(SIGSEGV); | 373 | do_exit(SIGSEGV); |
| 374 | } | 374 | } |
| 375 | 375 | ||
| 376 | extern const struct exception_table_entry __start___dbe_table[]; | 376 | extern struct exception_table_entry __start___dbe_table[]; |
| 377 | extern const struct exception_table_entry __stop___dbe_table[]; | 377 | extern struct exception_table_entry __stop___dbe_table[]; |
| 378 | 378 | ||
| 379 | __asm__( | 379 | __asm__( |
| 380 | " .section __dbe_table, \"a\"\n" | 380 | " .section __dbe_table, \"a\"\n" |
| @@ -1200,7 +1200,7 @@ void *set_except_vector(int n, void *addr) | |||
| 1200 | if (n == 0 && cpu_has_divec) { | 1200 | if (n == 0 && cpu_has_divec) { |
| 1201 | *(u32 *)(ebase + 0x200) = 0x08000000 | | 1201 | *(u32 *)(ebase + 0x200) = 0x08000000 | |
| 1202 | (0x03ffffff & (handler >> 2)); | 1202 | (0x03ffffff & (handler >> 2)); |
| 1203 | flush_icache_range(ebase + 0x200, ebase + 0x204); | 1203 | local_flush_icache_range(ebase + 0x200, ebase + 0x204); |
| 1204 | } | 1204 | } |
| 1205 | return (void *)old_handler; | 1205 | return (void *)old_handler; |
| 1206 | } | 1206 | } |
| @@ -1283,7 +1283,8 @@ static void *set_vi_srs_handler(int n, vi_handler_t addr, int srs) | |||
| 1283 | *w = (*w & 0xffff0000) | (((u32)handler >> 16) & 0xffff); | 1283 | *w = (*w & 0xffff0000) | (((u32)handler >> 16) & 0xffff); |
| 1284 | w = (u32 *)(b + ori_offset); | 1284 | w = (u32 *)(b + ori_offset); |
| 1285 | *w = (*w & 0xffff0000) | ((u32)handler & 0xffff); | 1285 | *w = (*w & 0xffff0000) | ((u32)handler & 0xffff); |
| 1286 | flush_icache_range((unsigned long)b, (unsigned long)(b+handler_len)); | 1286 | local_flush_icache_range((unsigned long)b, |
| 1287 | (unsigned long)(b+handler_len)); | ||
| 1287 | } | 1288 | } |
| 1288 | else { | 1289 | else { |
| 1289 | /* | 1290 | /* |
| @@ -1295,7 +1296,8 @@ static void *set_vi_srs_handler(int n, vi_handler_t addr, int srs) | |||
| 1295 | w = (u32 *)b; | 1296 | w = (u32 *)b; |
| 1296 | *w++ = 0x08000000 | (((u32)handler >> 2) & 0x03fffff); /* j handler */ | 1297 | *w++ = 0x08000000 | (((u32)handler >> 2) & 0x03fffff); /* j handler */ |
| 1297 | *w = 0; | 1298 | *w = 0; |
| 1298 | flush_icache_range((unsigned long)b, (unsigned long)(b+8)); | 1299 | local_flush_icache_range((unsigned long)b, |
| 1300 | (unsigned long)(b+8)); | ||
| 1299 | } | 1301 | } |
| 1300 | 1302 | ||
| 1301 | return (void *)old_handler; | 1303 | return (void *)old_handler; |
| @@ -1515,7 +1517,7 @@ void __cpuinit per_cpu_trap_init(void) | |||
| 1515 | void __init set_handler(unsigned long offset, void *addr, unsigned long size) | 1517 | void __init set_handler(unsigned long offset, void *addr, unsigned long size) |
| 1516 | { | 1518 | { |
| 1517 | memcpy((void *)(ebase + offset), addr, size); | 1519 | memcpy((void *)(ebase + offset), addr, size); |
| 1518 | flush_icache_range(ebase + offset, ebase + offset + size); | 1520 | local_flush_icache_range(ebase + offset, ebase + offset + size); |
| 1519 | } | 1521 | } |
| 1520 | 1522 | ||
| 1521 | static char panic_null_cerr[] __cpuinitdata = | 1523 | static char panic_null_cerr[] __cpuinitdata = |
| @@ -1680,6 +1682,8 @@ void __init trap_init(void) | |||
| 1680 | signal32_init(); | 1682 | signal32_init(); |
| 1681 | #endif | 1683 | #endif |
| 1682 | 1684 | ||
| 1683 | flush_icache_range(ebase, ebase + 0x400); | 1685 | local_flush_icache_range(ebase, ebase + 0x400); |
| 1684 | flush_tlb_handlers(); | 1686 | flush_tlb_handlers(); |
| 1687 | |||
| 1688 | sort_extable(__start___dbe_table, __stop___dbe_table); | ||
| 1685 | } | 1689 | } |
diff --git a/arch/mips/mm/c-r3k.c b/arch/mips/mm/c-r3k.c index 27a5b466c85c..5500c20c79ae 100644 --- a/arch/mips/mm/c-r3k.c +++ b/arch/mips/mm/c-r3k.c | |||
| @@ -320,6 +320,7 @@ void __cpuinit r3k_cache_init(void) | |||
| 320 | flush_cache_range = r3k_flush_cache_range; | 320 | flush_cache_range = r3k_flush_cache_range; |
| 321 | flush_cache_page = r3k_flush_cache_page; | 321 | flush_cache_page = r3k_flush_cache_page; |
| 322 | flush_icache_range = r3k_flush_icache_range; | 322 | flush_icache_range = r3k_flush_icache_range; |
| 323 | local_flush_icache_range = r3k_flush_icache_range; | ||
| 323 | 324 | ||
| 324 | flush_cache_sigtramp = r3k_flush_cache_sigtramp; | 325 | flush_cache_sigtramp = r3k_flush_cache_sigtramp; |
| 325 | local_flush_data_cache_page = local_r3k_flush_data_cache_page; | 326 | local_flush_data_cache_page = local_r3k_flush_data_cache_page; |
diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c index 71df3390c07b..6e99665ae860 100644 --- a/arch/mips/mm/c-r4k.c +++ b/arch/mips/mm/c-r4k.c | |||
| @@ -543,12 +543,8 @@ struct flush_icache_range_args { | |||
| 543 | unsigned long end; | 543 | unsigned long end; |
| 544 | }; | 544 | }; |
| 545 | 545 | ||
| 546 | static inline void local_r4k_flush_icache_range(void *args) | 546 | static inline void local_r4k_flush_icache_range(unsigned long start, unsigned long end) |
| 547 | { | 547 | { |
| 548 | struct flush_icache_range_args *fir_args = args; | ||
| 549 | unsigned long start = fir_args->start; | ||
| 550 | unsigned long end = fir_args->end; | ||
| 551 | |||
| 552 | if (!cpu_has_ic_fills_f_dc) { | 548 | if (!cpu_has_ic_fills_f_dc) { |
| 553 | if (end - start >= dcache_size) { | 549 | if (end - start >= dcache_size) { |
| 554 | r4k_blast_dcache(); | 550 | r4k_blast_dcache(); |
| @@ -564,6 +560,15 @@ static inline void local_r4k_flush_icache_range(void *args) | |||
| 564 | protected_blast_icache_range(start, end); | 560 | protected_blast_icache_range(start, end); |
| 565 | } | 561 | } |
| 566 | 562 | ||
| 563 | static inline void local_r4k_flush_icache_range_ipi(void *args) | ||
| 564 | { | ||
| 565 | struct flush_icache_range_args *fir_args = args; | ||
| 566 | unsigned long start = fir_args->start; | ||
| 567 | unsigned long end = fir_args->end; | ||
| 568 | |||
| 569 | local_r4k_flush_icache_range(start, end); | ||
| 570 | } | ||
| 571 | |||
| 567 | static void r4k_flush_icache_range(unsigned long start, unsigned long end) | 572 | static void r4k_flush_icache_range(unsigned long start, unsigned long end) |
| 568 | { | 573 | { |
| 569 | struct flush_icache_range_args args; | 574 | struct flush_icache_range_args args; |
| @@ -571,7 +576,7 @@ static void r4k_flush_icache_range(unsigned long start, unsigned long end) | |||
| 571 | args.start = start; | 576 | args.start = start; |
| 572 | args.end = end; | 577 | args.end = end; |
| 573 | 578 | ||
| 574 | r4k_on_each_cpu(local_r4k_flush_icache_range, &args, 1); | 579 | r4k_on_each_cpu(local_r4k_flush_icache_range_ipi, &args, 1); |
| 575 | instruction_hazard(); | 580 | instruction_hazard(); |
| 576 | } | 581 | } |
| 577 | 582 | ||
| @@ -1375,6 +1380,7 @@ void __cpuinit r4k_cache_init(void) | |||
| 1375 | local_flush_data_cache_page = local_r4k_flush_data_cache_page; | 1380 | local_flush_data_cache_page = local_r4k_flush_data_cache_page; |
| 1376 | flush_data_cache_page = r4k_flush_data_cache_page; | 1381 | flush_data_cache_page = r4k_flush_data_cache_page; |
| 1377 | flush_icache_range = r4k_flush_icache_range; | 1382 | flush_icache_range = r4k_flush_icache_range; |
| 1383 | local_flush_icache_range = local_r4k_flush_icache_range; | ||
| 1378 | 1384 | ||
| 1379 | #if defined(CONFIG_DMA_NONCOHERENT) | 1385 | #if defined(CONFIG_DMA_NONCOHERENT) |
| 1380 | if (coherentio) { | 1386 | if (coherentio) { |
diff --git a/arch/mips/mm/c-tx39.c b/arch/mips/mm/c-tx39.c index a9f7f1f5e9b4..f7c8f9ce39c1 100644 --- a/arch/mips/mm/c-tx39.c +++ b/arch/mips/mm/c-tx39.c | |||
| @@ -362,6 +362,7 @@ void __cpuinit tx39_cache_init(void) | |||
| 362 | flush_cache_range = (void *) tx39h_flush_icache_all; | 362 | flush_cache_range = (void *) tx39h_flush_icache_all; |
| 363 | flush_cache_page = (void *) tx39h_flush_icache_all; | 363 | flush_cache_page = (void *) tx39h_flush_icache_all; |
| 364 | flush_icache_range = (void *) tx39h_flush_icache_all; | 364 | flush_icache_range = (void *) tx39h_flush_icache_all; |
| 365 | local_flush_icache_range = (void *) tx39h_flush_icache_all; | ||
| 365 | 366 | ||
| 366 | flush_cache_sigtramp = (void *) tx39h_flush_icache_all; | 367 | flush_cache_sigtramp = (void *) tx39h_flush_icache_all; |
| 367 | local_flush_data_cache_page = (void *) tx39h_flush_icache_all; | 368 | local_flush_data_cache_page = (void *) tx39h_flush_icache_all; |
| @@ -390,6 +391,7 @@ void __cpuinit tx39_cache_init(void) | |||
| 390 | flush_cache_range = tx39_flush_cache_range; | 391 | flush_cache_range = tx39_flush_cache_range; |
| 391 | flush_cache_page = tx39_flush_cache_page; | 392 | flush_cache_page = tx39_flush_cache_page; |
| 392 | flush_icache_range = tx39_flush_icache_range; | 393 | flush_icache_range = tx39_flush_icache_range; |
| 394 | local_flush_icache_range = tx39_flush_icache_range; | ||
| 393 | 395 | ||
| 394 | flush_cache_sigtramp = tx39_flush_cache_sigtramp; | 396 | flush_cache_sigtramp = tx39_flush_cache_sigtramp; |
| 395 | local_flush_data_cache_page = local_tx39_flush_data_cache_page; | 397 | local_flush_data_cache_page = local_tx39_flush_data_cache_page; |
diff --git a/arch/mips/mm/cache.c b/arch/mips/mm/cache.c index 034e8506f6ea..1eb7c71e3d6a 100644 --- a/arch/mips/mm/cache.c +++ b/arch/mips/mm/cache.c | |||
| @@ -29,6 +29,7 @@ void (*flush_cache_range)(struct vm_area_struct *vma, unsigned long start, | |||
| 29 | void (*flush_cache_page)(struct vm_area_struct *vma, unsigned long page, | 29 | void (*flush_cache_page)(struct vm_area_struct *vma, unsigned long page, |
| 30 | unsigned long pfn); | 30 | unsigned long pfn); |
| 31 | void (*flush_icache_range)(unsigned long start, unsigned long end); | 31 | void (*flush_icache_range)(unsigned long start, unsigned long end); |
| 32 | void (*local_flush_icache_range)(unsigned long start, unsigned long end); | ||
| 32 | 33 | ||
| 33 | void (*__flush_cache_vmap)(void); | 34 | void (*__flush_cache_vmap)(void); |
| 34 | void (*__flush_cache_vunmap)(void); | 35 | void (*__flush_cache_vunmap)(void); |
diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c index 76da73a5ab3c..979cf9197282 100644 --- a/arch/mips/mm/tlbex.c +++ b/arch/mips/mm/tlbex.c | |||
| @@ -1273,10 +1273,10 @@ void __cpuinit build_tlb_refill_handler(void) | |||
| 1273 | 1273 | ||
| 1274 | void __cpuinit flush_tlb_handlers(void) | 1274 | void __cpuinit flush_tlb_handlers(void) |
| 1275 | { | 1275 | { |
| 1276 | flush_icache_range((unsigned long)handle_tlbl, | 1276 | local_flush_icache_range((unsigned long)handle_tlbl, |
| 1277 | (unsigned long)handle_tlbl + sizeof(handle_tlbl)); | 1277 | (unsigned long)handle_tlbl + sizeof(handle_tlbl)); |
| 1278 | flush_icache_range((unsigned long)handle_tlbs, | 1278 | local_flush_icache_range((unsigned long)handle_tlbs, |
| 1279 | (unsigned long)handle_tlbs + sizeof(handle_tlbs)); | 1279 | (unsigned long)handle_tlbs + sizeof(handle_tlbs)); |
| 1280 | flush_icache_range((unsigned long)handle_tlbm, | 1280 | local_flush_icache_range((unsigned long)handle_tlbm, |
| 1281 | (unsigned long)handle_tlbm + sizeof(handle_tlbm)); | 1281 | (unsigned long)handle_tlbm + sizeof(handle_tlbm)); |
| 1282 | } | 1282 | } |
diff --git a/arch/mips/sgi-ip22/ip22-platform.c b/arch/mips/sgi-ip22/ip22-platform.c index 60141235ec40..52486c4d2b01 100644 --- a/arch/mips/sgi-ip22/ip22-platform.c +++ b/arch/mips/sgi-ip22/ip22-platform.c | |||
| @@ -150,7 +150,7 @@ static int __init sgiseeq_devinit(void) | |||
| 150 | return res; | 150 | return res; |
| 151 | 151 | ||
| 152 | /* Second HPC is missing? */ | 152 | /* Second HPC is missing? */ |
| 153 | if (!ip22_is_fullhouse() || | 153 | if (ip22_is_fullhouse() || |
| 154 | get_dbe(tmp, (unsigned int *)&hpc3c1->pbdma[1])) | 154 | get_dbe(tmp, (unsigned int *)&hpc3c1->pbdma[1])) |
| 155 | return 0; | 155 | return 0; |
| 156 | 156 | ||
diff --git a/arch/mips/txx9/generic/setup.c b/arch/mips/txx9/generic/setup.c index 0afe94c48fb6..fe6bee09cece 100644 --- a/arch/mips/txx9/generic/setup.c +++ b/arch/mips/txx9/generic/setup.c | |||
| @@ -53,6 +53,7 @@ txx9_reg_res_init(unsigned int pcode, unsigned long base, unsigned long size) | |||
| 53 | txx9_ce_res[i].name = txx9_ce_res_name[i]; | 53 | txx9_ce_res[i].name = txx9_ce_res_name[i]; |
| 54 | } | 54 | } |
| 55 | 55 | ||
| 56 | txx9_pcode = pcode; | ||
| 56 | sprintf(txx9_pcode_str, "TX%x", pcode); | 57 | sprintf(txx9_pcode_str, "TX%x", pcode); |
| 57 | if (base) { | 58 | if (base) { |
| 58 | txx9_reg_res.start = base & 0xfffffffffULL; | 59 | txx9_reg_res.start = base & 0xfffffffffULL; |
diff --git a/arch/parisc/kernel/module.c b/arch/parisc/kernel/module.c index fdacdd4341c9..44138c3e6ea7 100644 --- a/arch/parisc/kernel/module.c +++ b/arch/parisc/kernel/module.c | |||
| @@ -47,7 +47,9 @@ | |||
| 47 | #include <linux/string.h> | 47 | #include <linux/string.h> |
| 48 | #include <linux/kernel.h> | 48 | #include <linux/kernel.h> |
| 49 | #include <linux/bug.h> | 49 | #include <linux/bug.h> |
| 50 | #include <linux/uaccess.h> | ||
| 50 | 51 | ||
| 52 | #include <asm/sections.h> | ||
| 51 | #include <asm/unwind.h> | 53 | #include <asm/unwind.h> |
| 52 | 54 | ||
| 53 | #if 0 | 55 | #if 0 |
| @@ -860,3 +862,15 @@ void module_arch_cleanup(struct module *mod) | |||
| 860 | deregister_unwind_table(mod); | 862 | deregister_unwind_table(mod); |
| 861 | module_bug_cleanup(mod); | 863 | module_bug_cleanup(mod); |
| 862 | } | 864 | } |
| 865 | |||
| 866 | #ifdef CONFIG_64BIT | ||
| 867 | void *dereference_function_descriptor(void *ptr) | ||
| 868 | { | ||
| 869 | Elf64_Fdesc *desc = ptr; | ||
| 870 | void *p; | ||
| 871 | |||
| 872 | if (!probe_kernel_address(&desc->addr, p)) | ||
| 873 | ptr = p; | ||
| 874 | return ptr; | ||
| 875 | } | ||
| 876 | #endif | ||
diff --git a/arch/powerpc/boot/Makefile b/arch/powerpc/boot/Makefile index 14174aa24074..717a3bc1352e 100644 --- a/arch/powerpc/boot/Makefile +++ b/arch/powerpc/boot/Makefile | |||
| @@ -49,7 +49,7 @@ zlib := inffast.c inflate.c inftrees.c | |||
| 49 | zlibheader := inffast.h inffixed.h inflate.h inftrees.h infutil.h | 49 | zlibheader := inffast.h inffixed.h inflate.h inftrees.h infutil.h |
| 50 | zliblinuxheader := zlib.h zconf.h zutil.h | 50 | zliblinuxheader := zlib.h zconf.h zutil.h |
| 51 | 51 | ||
| 52 | $(addprefix $(obj)/,$(zlib) gunzip_util.o main.o): \ | 52 | $(addprefix $(obj)/,$(zlib) cuboot-c2k.o gunzip_util.o main.o prpmc2800.o): \ |
| 53 | $(addprefix $(obj)/,$(zliblinuxheader)) $(addprefix $(obj)/,$(zlibheader)) | 53 | $(addprefix $(obj)/,$(zliblinuxheader)) $(addprefix $(obj)/,$(zlibheader)) |
| 54 | 54 | ||
| 55 | src-libfdt := fdt.c fdt_ro.c fdt_wip.c fdt_sw.c fdt_rw.c fdt_strerror.c | 55 | src-libfdt := fdt.c fdt_ro.c fdt_wip.c fdt_sw.c fdt_rw.c fdt_strerror.c |
diff --git a/arch/powerpc/include/asm/sections.h b/arch/powerpc/include/asm/sections.h index 916018e425c4..7710e9e6660f 100644 --- a/arch/powerpc/include/asm/sections.h +++ b/arch/powerpc/include/asm/sections.h | |||
| @@ -16,6 +16,9 @@ static inline int in_kernel_text(unsigned long addr) | |||
| 16 | return 0; | 16 | return 0; |
| 17 | } | 17 | } |
| 18 | 18 | ||
| 19 | #undef dereference_function_descriptor | ||
| 20 | void *dereference_function_descriptor(void *); | ||
| 21 | |||
| 19 | #endif | 22 | #endif |
| 20 | 23 | ||
| 21 | #endif /* __KERNEL__ */ | 24 | #endif /* __KERNEL__ */ |
diff --git a/arch/powerpc/kernel/module_64.c b/arch/powerpc/kernel/module_64.c index ee6a2982d567..ad79de272ff3 100644 --- a/arch/powerpc/kernel/module_64.c +++ b/arch/powerpc/kernel/module_64.c | |||
| @@ -21,8 +21,9 @@ | |||
| 21 | #include <linux/err.h> | 21 | #include <linux/err.h> |
| 22 | #include <linux/vmalloc.h> | 22 | #include <linux/vmalloc.h> |
| 23 | #include <linux/bug.h> | 23 | #include <linux/bug.h> |
| 24 | #include <linux/uaccess.h> | ||
| 24 | #include <asm/module.h> | 25 | #include <asm/module.h> |
| 25 | #include <asm/uaccess.h> | 26 | #include <asm/sections.h> |
| 26 | #include <asm/firmware.h> | 27 | #include <asm/firmware.h> |
| 27 | #include <asm/code-patching.h> | 28 | #include <asm/code-patching.h> |
| 28 | #include <linux/sort.h> | 29 | #include <linux/sort.h> |
| @@ -451,3 +452,13 @@ int apply_relocate_add(Elf64_Shdr *sechdrs, | |||
| 451 | 452 | ||
| 452 | return 0; | 453 | return 0; |
| 453 | } | 454 | } |
| 455 | |||
| 456 | void *dereference_function_descriptor(void *ptr) | ||
| 457 | { | ||
| 458 | struct ppc64_opd_entry *desc = ptr; | ||
| 459 | void *p; | ||
| 460 | |||
| 461 | if (!probe_kernel_address(&desc->funcaddr, p)) | ||
| 462 | ptr = p; | ||
| 463 | return ptr; | ||
| 464 | } | ||
diff --git a/arch/powerpc/platforms/cell/spufs/sched.c b/arch/powerpc/platforms/cell/spufs/sched.c index 1c1b627ee843..67595bc380dc 100644 --- a/arch/powerpc/platforms/cell/spufs/sched.c +++ b/arch/powerpc/platforms/cell/spufs/sched.c | |||
| @@ -643,9 +643,10 @@ static struct spu *find_victim(struct spu_context *ctx) | |||
| 643 | !(tmp->flags & SPU_CREATE_NOSCHED) && | 643 | !(tmp->flags & SPU_CREATE_NOSCHED) && |
| 644 | (!victim || tmp->prio > victim->prio)) { | 644 | (!victim || tmp->prio > victim->prio)) { |
| 645 | victim = spu->ctx; | 645 | victim = spu->ctx; |
| 646 | get_spu_context(victim); | ||
| 647 | } | 646 | } |
| 648 | } | 647 | } |
| 648 | if (victim) | ||
| 649 | get_spu_context(victim); | ||
| 649 | mutex_unlock(&cbe_spu_info[node].list_mutex); | 650 | mutex_unlock(&cbe_spu_info[node].list_mutex); |
| 650 | 651 | ||
| 651 | if (victim) { | 652 | if (victim) { |
| @@ -727,17 +728,33 @@ static void spu_schedule(struct spu *spu, struct spu_context *ctx) | |||
| 727 | /* not a candidate for interruptible because it's called either | 728 | /* not a candidate for interruptible because it's called either |
| 728 | from the scheduler thread or from spu_deactivate */ | 729 | from the scheduler thread or from spu_deactivate */ |
| 729 | mutex_lock(&ctx->state_mutex); | 730 | mutex_lock(&ctx->state_mutex); |
| 730 | __spu_schedule(spu, ctx); | 731 | if (ctx->state == SPU_STATE_SAVED) |
| 732 | __spu_schedule(spu, ctx); | ||
| 731 | spu_release(ctx); | 733 | spu_release(ctx); |
| 732 | } | 734 | } |
| 733 | 735 | ||
| 734 | static void spu_unschedule(struct spu *spu, struct spu_context *ctx) | 736 | /** |
| 737 | * spu_unschedule - remove a context from a spu, and possibly release it. | ||
| 738 | * @spu: The SPU to unschedule from | ||
| 739 | * @ctx: The context currently scheduled on the SPU | ||
| 740 | * @free_spu Whether to free the SPU for other contexts | ||
| 741 | * | ||
| 742 | * Unbinds the context @ctx from the SPU @spu. If @free_spu is non-zero, the | ||
| 743 | * SPU is made available for other contexts (ie, may be returned by | ||
| 744 | * spu_get_idle). If this is zero, the caller is expected to schedule another | ||
| 745 | * context to this spu. | ||
| 746 | * | ||
| 747 | * Should be called with ctx->state_mutex held. | ||
| 748 | */ | ||
| 749 | static void spu_unschedule(struct spu *spu, struct spu_context *ctx, | ||
| 750 | int free_spu) | ||
| 735 | { | 751 | { |
| 736 | int node = spu->node; | 752 | int node = spu->node; |
| 737 | 753 | ||
| 738 | mutex_lock(&cbe_spu_info[node].list_mutex); | 754 | mutex_lock(&cbe_spu_info[node].list_mutex); |
| 739 | cbe_spu_info[node].nr_active--; | 755 | cbe_spu_info[node].nr_active--; |
| 740 | spu->alloc_state = SPU_FREE; | 756 | if (free_spu) |
| 757 | spu->alloc_state = SPU_FREE; | ||
| 741 | spu_unbind_context(spu, ctx); | 758 | spu_unbind_context(spu, ctx); |
| 742 | ctx->stats.invol_ctx_switch++; | 759 | ctx->stats.invol_ctx_switch++; |
| 743 | spu->stats.invol_ctx_switch++; | 760 | spu->stats.invol_ctx_switch++; |
| @@ -837,7 +854,7 @@ static int __spu_deactivate(struct spu_context *ctx, int force, int max_prio) | |||
| 837 | if (spu) { | 854 | if (spu) { |
| 838 | new = grab_runnable_context(max_prio, spu->node); | 855 | new = grab_runnable_context(max_prio, spu->node); |
| 839 | if (new || force) { | 856 | if (new || force) { |
| 840 | spu_unschedule(spu, ctx); | 857 | spu_unschedule(spu, ctx, new == NULL); |
| 841 | if (new) { | 858 | if (new) { |
| 842 | if (new->flags & SPU_CREATE_NOSCHED) | 859 | if (new->flags & SPU_CREATE_NOSCHED) |
| 843 | wake_up(&new->stop_wq); | 860 | wake_up(&new->stop_wq); |
| @@ -910,7 +927,7 @@ static noinline void spusched_tick(struct spu_context *ctx) | |||
| 910 | 927 | ||
| 911 | new = grab_runnable_context(ctx->prio + 1, spu->node); | 928 | new = grab_runnable_context(ctx->prio + 1, spu->node); |
| 912 | if (new) { | 929 | if (new) { |
| 913 | spu_unschedule(spu, ctx); | 930 | spu_unschedule(spu, ctx, 0); |
| 914 | if (test_bit(SPU_SCHED_SPU_RUN, &ctx->sched_flags)) | 931 | if (test_bit(SPU_SCHED_SPU_RUN, &ctx->sched_flags)) |
| 915 | spu_add_to_rq(ctx); | 932 | spu_add_to_rq(ctx); |
| 916 | } else { | 933 | } else { |
diff --git a/arch/s390/kernel/compat_ptrace.h b/arch/s390/kernel/compat_ptrace.h index cde81fa64f89..a2be3a978d5c 100644 --- a/arch/s390/kernel/compat_ptrace.h +++ b/arch/s390/kernel/compat_ptrace.h | |||
| @@ -42,6 +42,7 @@ struct user_regs_struct32 | |||
| 42 | u32 gprs[NUM_GPRS]; | 42 | u32 gprs[NUM_GPRS]; |
| 43 | u32 acrs[NUM_ACRS]; | 43 | u32 acrs[NUM_ACRS]; |
| 44 | u32 orig_gpr2; | 44 | u32 orig_gpr2; |
| 45 | /* nb: there's a 4-byte hole here */ | ||
| 45 | s390_fp_regs fp_regs; | 46 | s390_fp_regs fp_regs; |
| 46 | /* | 47 | /* |
| 47 | * These per registers are in here so that gdb can modify them | 48 | * These per registers are in here so that gdb can modify them |
diff --git a/arch/s390/kernel/ptrace.c b/arch/s390/kernel/ptrace.c index 2815bfe348a6..c8b08289eb87 100644 --- a/arch/s390/kernel/ptrace.c +++ b/arch/s390/kernel/ptrace.c | |||
| @@ -170,6 +170,13 @@ static unsigned long __peek_user(struct task_struct *child, addr_t addr) | |||
| 170 | */ | 170 | */ |
| 171 | tmp = (addr_t) task_pt_regs(child)->orig_gpr2; | 171 | tmp = (addr_t) task_pt_regs(child)->orig_gpr2; |
| 172 | 172 | ||
| 173 | } else if (addr < (addr_t) &dummy->regs.fp_regs) { | ||
| 174 | /* | ||
| 175 | * prevent reads of padding hole between | ||
| 176 | * orig_gpr2 and fp_regs on s390. | ||
| 177 | */ | ||
| 178 | tmp = 0; | ||
| 179 | |||
| 173 | } else if (addr < (addr_t) (&dummy->regs.fp_regs + 1)) { | 180 | } else if (addr < (addr_t) (&dummy->regs.fp_regs + 1)) { |
| 174 | /* | 181 | /* |
| 175 | * floating point regs. are stored in the thread structure | 182 | * floating point regs. are stored in the thread structure |
| @@ -270,6 +277,13 @@ static int __poke_user(struct task_struct *child, addr_t addr, addr_t data) | |||
| 270 | */ | 277 | */ |
| 271 | task_pt_regs(child)->orig_gpr2 = data; | 278 | task_pt_regs(child)->orig_gpr2 = data; |
| 272 | 279 | ||
| 280 | } else if (addr < (addr_t) &dummy->regs.fp_regs) { | ||
| 281 | /* | ||
| 282 | * prevent writes of padding hole between | ||
| 283 | * orig_gpr2 and fp_regs on s390. | ||
| 284 | */ | ||
| 285 | return 0; | ||
| 286 | |||
| 273 | } else if (addr < (addr_t) (&dummy->regs.fp_regs + 1)) { | 287 | } else if (addr < (addr_t) (&dummy->regs.fp_regs + 1)) { |
| 274 | /* | 288 | /* |
| 275 | * floating point regs. are stored in the thread structure | 289 | * floating point regs. are stored in the thread structure |
| @@ -428,6 +442,13 @@ static u32 __peek_user_compat(struct task_struct *child, addr_t addr) | |||
| 428 | */ | 442 | */ |
| 429 | tmp = *(__u32*)((addr_t) &task_pt_regs(child)->orig_gpr2 + 4); | 443 | tmp = *(__u32*)((addr_t) &task_pt_regs(child)->orig_gpr2 + 4); |
| 430 | 444 | ||
| 445 | } else if (addr < (addr_t) &dummy32->regs.fp_regs) { | ||
| 446 | /* | ||
| 447 | * prevent reads of padding hole between | ||
| 448 | * orig_gpr2 and fp_regs on s390. | ||
| 449 | */ | ||
| 450 | tmp = 0; | ||
| 451 | |||
| 431 | } else if (addr < (addr_t) (&dummy32->regs.fp_regs + 1)) { | 452 | } else if (addr < (addr_t) (&dummy32->regs.fp_regs + 1)) { |
| 432 | /* | 453 | /* |
| 433 | * floating point regs. are stored in the thread structure | 454 | * floating point regs. are stored in the thread structure |
| @@ -514,6 +535,13 @@ static int __poke_user_compat(struct task_struct *child, | |||
| 514 | */ | 535 | */ |
| 515 | *(__u32*)((addr_t) &task_pt_regs(child)->orig_gpr2 + 4) = tmp; | 536 | *(__u32*)((addr_t) &task_pt_regs(child)->orig_gpr2 + 4) = tmp; |
| 516 | 537 | ||
| 538 | } else if (addr < (addr_t) &dummy32->regs.fp_regs) { | ||
| 539 | /* | ||
| 540 | * prevent writess of padding hole between | ||
| 541 | * orig_gpr2 and fp_regs on s390. | ||
| 542 | */ | ||
| 543 | return 0; | ||
| 544 | |||
| 517 | } else if (addr < (addr_t) (&dummy32->regs.fp_regs + 1)) { | 545 | } else if (addr < (addr_t) (&dummy32->regs.fp_regs + 1)) { |
| 518 | /* | 546 | /* |
| 519 | * floating point regs. are stored in the thread structure | 547 | * floating point regs. are stored in the thread structure |
diff --git a/arch/sparc64/kernel/irq.c b/arch/sparc64/kernel/irq.c index 9b6689d9d570..23963882bc18 100644 --- a/arch/sparc64/kernel/irq.c +++ b/arch/sparc64/kernel/irq.c | |||
| @@ -792,6 +792,8 @@ void fixup_irqs(void) | |||
| 792 | } | 792 | } |
| 793 | spin_unlock_irqrestore(&irq_desc[irq].lock, flags); | 793 | spin_unlock_irqrestore(&irq_desc[irq].lock, flags); |
| 794 | } | 794 | } |
| 795 | |||
| 796 | tick_ops->disable_irq(); | ||
| 795 | } | 797 | } |
| 796 | #endif | 798 | #endif |
| 797 | 799 | ||
diff --git a/arch/sparc64/kernel/smp.c b/arch/sparc64/kernel/smp.c index 743ccad61c60..2be166c544ca 100644 --- a/arch/sparc64/kernel/smp.c +++ b/arch/sparc64/kernel/smp.c | |||
| @@ -80,8 +80,6 @@ void smp_bogo(struct seq_file *m) | |||
| 80 | i, cpu_data(i).clock_tick); | 80 | i, cpu_data(i).clock_tick); |
| 81 | } | 81 | } |
| 82 | 82 | ||
| 83 | static __cacheline_aligned_in_smp DEFINE_SPINLOCK(call_lock); | ||
| 84 | |||
| 85 | extern void setup_sparc64_timer(void); | 83 | extern void setup_sparc64_timer(void); |
| 86 | 84 | ||
| 87 | static volatile unsigned long callin_flag = 0; | 85 | static volatile unsigned long callin_flag = 0; |
| @@ -120,9 +118,9 @@ void __cpuinit smp_callin(void) | |||
| 120 | while (!cpu_isset(cpuid, smp_commenced_mask)) | 118 | while (!cpu_isset(cpuid, smp_commenced_mask)) |
| 121 | rmb(); | 119 | rmb(); |
| 122 | 120 | ||
| 123 | spin_lock(&call_lock); | 121 | ipi_call_lock(); |
| 124 | cpu_set(cpuid, cpu_online_map); | 122 | cpu_set(cpuid, cpu_online_map); |
| 125 | spin_unlock(&call_lock); | 123 | ipi_call_unlock(); |
| 126 | 124 | ||
| 127 | /* idle thread is expected to have preempt disabled */ | 125 | /* idle thread is expected to have preempt disabled */ |
| 128 | preempt_disable(); | 126 | preempt_disable(); |
| @@ -1305,10 +1303,6 @@ int __cpu_disable(void) | |||
| 1305 | c->core_id = 0; | 1303 | c->core_id = 0; |
| 1306 | c->proc_id = -1; | 1304 | c->proc_id = -1; |
| 1307 | 1305 | ||
| 1308 | spin_lock(&call_lock); | ||
| 1309 | cpu_clear(cpu, cpu_online_map); | ||
| 1310 | spin_unlock(&call_lock); | ||
| 1311 | |||
| 1312 | smp_wmb(); | 1306 | smp_wmb(); |
| 1313 | 1307 | ||
| 1314 | /* Make sure no interrupts point to this cpu. */ | 1308 | /* Make sure no interrupts point to this cpu. */ |
| @@ -1318,6 +1312,10 @@ int __cpu_disable(void) | |||
| 1318 | mdelay(1); | 1312 | mdelay(1); |
| 1319 | local_irq_disable(); | 1313 | local_irq_disable(); |
| 1320 | 1314 | ||
| 1315 | ipi_call_lock(); | ||
| 1316 | cpu_clear(cpu, cpu_online_map); | ||
| 1317 | ipi_call_unlock(); | ||
| 1318 | |||
| 1321 | return 0; | 1319 | return 0; |
| 1322 | } | 1320 | } |
| 1323 | 1321 | ||
diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu index 2c518fbc52ec..b225219c448c 100644 --- a/arch/x86/Kconfig.cpu +++ b/arch/x86/Kconfig.cpu | |||
| @@ -382,14 +382,17 @@ config X86_OOSTORE | |||
| 382 | # P6_NOPs are a relatively minor optimization that require a family >= | 382 | # P6_NOPs are a relatively minor optimization that require a family >= |
| 383 | # 6 processor, except that it is broken on certain VIA chips. | 383 | # 6 processor, except that it is broken on certain VIA chips. |
| 384 | # Furthermore, AMD chips prefer a totally different sequence of NOPs | 384 | # Furthermore, AMD chips prefer a totally different sequence of NOPs |
| 385 | # (which work on all CPUs). As a result, disallow these if we're | 385 | # (which work on all CPUs). In addition, it looks like Virtual PC |
| 386 | # compiling X86_GENERIC but not X86_64 (these NOPs do work on all | 386 | # does not understand them. |
| 387 | # x86-64 capable chips); the list of processors in the right-hand clause | 387 | # |
| 388 | # are the cores that benefit from this optimization. | 388 | # As a result, disallow these if we're not compiling for X86_64 (these |
| 389 | # NOPs do work on all x86-64 capable chips); the list of processors in | ||
| 390 | # the right-hand clause are the cores that benefit from this optimization. | ||
| 389 | # | 391 | # |
| 390 | config X86_P6_NOP | 392 | config X86_P6_NOP |
| 391 | def_bool y | 393 | def_bool y |
| 392 | depends on (X86_64 || !X86_GENERIC) && (M686 || MPENTIUMII || MPENTIUMIII || MPENTIUMM || MCORE2 || MPENTIUM4 || MPSC) | 394 | depends on X86_64 |
| 395 | depends on (MCORE2 || MPENTIUM4 || MPSC) | ||
| 393 | 396 | ||
| 394 | config X86_TSC | 397 | config X86_TSC |
| 395 | def_bool y | 398 | def_bool y |
diff --git a/arch/x86/boot/cpucheck.c b/arch/x86/boot/cpucheck.c index 4b9ae7c56748..4d3ff037201f 100644 --- a/arch/x86/boot/cpucheck.c +++ b/arch/x86/boot/cpucheck.c | |||
| @@ -38,12 +38,12 @@ static const u32 req_flags[NCAPINTS] = | |||
| 38 | { | 38 | { |
| 39 | REQUIRED_MASK0, | 39 | REQUIRED_MASK0, |
| 40 | REQUIRED_MASK1, | 40 | REQUIRED_MASK1, |
| 41 | REQUIRED_MASK2, | 41 | 0, /* REQUIRED_MASK2 not implemented in this file */ |
| 42 | REQUIRED_MASK3, | 42 | 0, /* REQUIRED_MASK3 not implemented in this file */ |
| 43 | REQUIRED_MASK4, | 43 | REQUIRED_MASK4, |
| 44 | REQUIRED_MASK5, | 44 | 0, /* REQUIRED_MASK5 not implemented in this file */ |
| 45 | REQUIRED_MASK6, | 45 | REQUIRED_MASK6, |
| 46 | REQUIRED_MASK7, | 46 | 0, /* REQUIRED_MASK7 not implemented in this file */ |
| 47 | }; | 47 | }; |
| 48 | 48 | ||
| 49 | #define A32(a, b, c, d) (((d) << 24)+((c) << 16)+((b) << 8)+(a)) | 49 | #define A32(a, b, c, d) (((d) << 24)+((c) << 16)+((b) << 8)+(a)) |
diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c index 2763cb37b553..65a0c1b48696 100644 --- a/arch/x86/kernel/alternative.c +++ b/arch/x86/kernel/alternative.c | |||
| @@ -145,35 +145,25 @@ static const unsigned char *const p6_nops[ASM_NOP_MAX+1] = { | |||
| 145 | extern char __vsyscall_0; | 145 | extern char __vsyscall_0; |
| 146 | const unsigned char *const *find_nop_table(void) | 146 | const unsigned char *const *find_nop_table(void) |
| 147 | { | 147 | { |
| 148 | return boot_cpu_data.x86_vendor != X86_VENDOR_INTEL || | 148 | if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL && |
| 149 | boot_cpu_data.x86 < 6 ? k8_nops : p6_nops; | 149 | boot_cpu_has(X86_FEATURE_NOPL)) |
| 150 | return p6_nops; | ||
| 151 | else | ||
| 152 | return k8_nops; | ||
| 150 | } | 153 | } |
| 151 | 154 | ||
| 152 | #else /* CONFIG_X86_64 */ | 155 | #else /* CONFIG_X86_64 */ |
| 153 | 156 | ||
| 154 | static const struct nop { | ||
| 155 | int cpuid; | ||
| 156 | const unsigned char *const *noptable; | ||
| 157 | } noptypes[] = { | ||
| 158 | { X86_FEATURE_K8, k8_nops }, | ||
| 159 | { X86_FEATURE_K7, k7_nops }, | ||
| 160 | { X86_FEATURE_P4, p6_nops }, | ||
| 161 | { X86_FEATURE_P3, p6_nops }, | ||
| 162 | { -1, NULL } | ||
| 163 | }; | ||
| 164 | |||
| 165 | const unsigned char *const *find_nop_table(void) | 157 | const unsigned char *const *find_nop_table(void) |
| 166 | { | 158 | { |
| 167 | const unsigned char *const *noptable = intel_nops; | 159 | if (boot_cpu_has(X86_FEATURE_K8)) |
| 168 | int i; | 160 | return k8_nops; |
| 169 | 161 | else if (boot_cpu_has(X86_FEATURE_K7)) | |
| 170 | for (i = 0; noptypes[i].cpuid >= 0; i++) { | 162 | return k7_nops; |
| 171 | if (boot_cpu_has(noptypes[i].cpuid)) { | 163 | else if (boot_cpu_has(X86_FEATURE_NOPL)) |
| 172 | noptable = noptypes[i].noptable; | 164 | return p6_nops; |
| 173 | break; | 165 | else |
| 174 | } | 166 | return intel_nops; |
| 175 | } | ||
| 176 | return noptable; | ||
| 177 | } | 167 | } |
| 178 | 168 | ||
| 179 | #endif /* CONFIG_X86_64 */ | 169 | #endif /* CONFIG_X86_64 */ |
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c index cae9cabc3031..18514ed26104 100644 --- a/arch/x86/kernel/cpu/amd.c +++ b/arch/x86/kernel/cpu/amd.c | |||
| @@ -31,6 +31,11 @@ static void __cpuinit early_init_amd(struct cpuinfo_x86 *c) | |||
| 31 | if (c->x86_power & (1<<8)) | 31 | if (c->x86_power & (1<<8)) |
| 32 | set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC); | 32 | set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC); |
| 33 | } | 33 | } |
| 34 | |||
| 35 | /* Set MTRR capability flag if appropriate */ | ||
| 36 | if (c->x86_model == 13 || c->x86_model == 9 || | ||
| 37 | (c->x86_model == 8 && c->x86_mask >= 8)) | ||
| 38 | set_cpu_cap(c, X86_FEATURE_K6_MTRR); | ||
| 34 | } | 39 | } |
| 35 | 40 | ||
| 36 | static void __cpuinit init_amd(struct cpuinfo_x86 *c) | 41 | static void __cpuinit init_amd(struct cpuinfo_x86 *c) |
| @@ -166,10 +171,6 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c) | |||
| 166 | mbytes); | 171 | mbytes); |
| 167 | } | 172 | } |
| 168 | 173 | ||
| 169 | /* Set MTRR capability flag if appropriate */ | ||
| 170 | if (c->x86_model == 13 || c->x86_model == 9 || | ||
| 171 | (c->x86_model == 8 && c->x86_mask >= 8)) | ||
| 172 | set_cpu_cap(c, X86_FEATURE_K6_MTRR); | ||
| 173 | break; | 174 | break; |
| 174 | } | 175 | } |
| 175 | 176 | ||
diff --git a/arch/x86/kernel/cpu/centaur.c b/arch/x86/kernel/cpu/centaur.c index e0f45edd6a55..a0534c04d38a 100644 --- a/arch/x86/kernel/cpu/centaur.c +++ b/arch/x86/kernel/cpu/centaur.c | |||
| @@ -314,6 +314,16 @@ enum { | |||
| 314 | EAMD3D = 1<<20, | 314 | EAMD3D = 1<<20, |
| 315 | }; | 315 | }; |
| 316 | 316 | ||
| 317 | static void __cpuinit early_init_centaur(struct cpuinfo_x86 *c) | ||
| 318 | { | ||
| 319 | switch (c->x86) { | ||
| 320 | case 5: | ||
| 321 | /* Emulate MTRRs using Centaur's MCR. */ | ||
| 322 | set_cpu_cap(c, X86_FEATURE_CENTAUR_MCR); | ||
| 323 | break; | ||
| 324 | } | ||
| 325 | } | ||
| 326 | |||
| 317 | static void __cpuinit init_centaur(struct cpuinfo_x86 *c) | 327 | static void __cpuinit init_centaur(struct cpuinfo_x86 *c) |
| 318 | { | 328 | { |
| 319 | 329 | ||
| @@ -462,6 +472,7 @@ centaur_size_cache(struct cpuinfo_x86 *c, unsigned int size) | |||
| 462 | static struct cpu_dev centaur_cpu_dev __cpuinitdata = { | 472 | static struct cpu_dev centaur_cpu_dev __cpuinitdata = { |
| 463 | .c_vendor = "Centaur", | 473 | .c_vendor = "Centaur", |
| 464 | .c_ident = { "CentaurHauls" }, | 474 | .c_ident = { "CentaurHauls" }, |
| 475 | .c_early_init = early_init_centaur, | ||
| 465 | .c_init = init_centaur, | 476 | .c_init = init_centaur, |
| 466 | .c_size_cache = centaur_size_cache, | 477 | .c_size_cache = centaur_size_cache, |
| 467 | }; | 478 | }; |
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index 80ab20d4fa39..8aab8517642e 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c | |||
| @@ -13,6 +13,7 @@ | |||
| 13 | #include <asm/mtrr.h> | 13 | #include <asm/mtrr.h> |
| 14 | #include <asm/mce.h> | 14 | #include <asm/mce.h> |
| 15 | #include <asm/pat.h> | 15 | #include <asm/pat.h> |
| 16 | #include <asm/asm.h> | ||
| 16 | #ifdef CONFIG_X86_LOCAL_APIC | 17 | #ifdef CONFIG_X86_LOCAL_APIC |
| 17 | #include <asm/mpspec.h> | 18 | #include <asm/mpspec.h> |
| 18 | #include <asm/apic.h> | 19 | #include <asm/apic.h> |
| @@ -334,11 +335,40 @@ static void __init early_cpu_detect(void) | |||
| 334 | 335 | ||
| 335 | get_cpu_vendor(c, 1); | 336 | get_cpu_vendor(c, 1); |
| 336 | 337 | ||
| 338 | early_get_cap(c); | ||
| 339 | |||
| 337 | if (c->x86_vendor != X86_VENDOR_UNKNOWN && | 340 | if (c->x86_vendor != X86_VENDOR_UNKNOWN && |
| 338 | cpu_devs[c->x86_vendor]->c_early_init) | 341 | cpu_devs[c->x86_vendor]->c_early_init) |
| 339 | cpu_devs[c->x86_vendor]->c_early_init(c); | 342 | cpu_devs[c->x86_vendor]->c_early_init(c); |
| 343 | } | ||
| 340 | 344 | ||
| 341 | early_get_cap(c); | 345 | /* |
| 346 | * The NOPL instruction is supposed to exist on all CPUs with | ||
| 347 | * family >= 6, unfortunately, that's not true in practice because | ||
| 348 | * of early VIA chips and (more importantly) broken virtualizers that | ||
| 349 | * are not easy to detect. Hence, probe for it based on first | ||
| 350 | * principles. | ||
| 351 | */ | ||
| 352 | static void __cpuinit detect_nopl(struct cpuinfo_x86 *c) | ||
| 353 | { | ||
| 354 | const u32 nopl_signature = 0x888c53b1; /* Random number */ | ||
| 355 | u32 has_nopl = nopl_signature; | ||
| 356 | |||
| 357 | clear_cpu_cap(c, X86_FEATURE_NOPL); | ||
| 358 | if (c->x86 >= 6) { | ||
| 359 | asm volatile("\n" | ||
| 360 | "1: .byte 0x0f,0x1f,0xc0\n" /* nopl %eax */ | ||
| 361 | "2:\n" | ||
| 362 | " .section .fixup,\"ax\"\n" | ||
| 363 | "3: xor %0,%0\n" | ||
| 364 | " jmp 2b\n" | ||
| 365 | " .previous\n" | ||
| 366 | _ASM_EXTABLE(1b,3b) | ||
| 367 | : "+a" (has_nopl)); | ||
| 368 | |||
| 369 | if (has_nopl == nopl_signature) | ||
| 370 | set_cpu_cap(c, X86_FEATURE_NOPL); | ||
| 371 | } | ||
| 342 | } | 372 | } |
| 343 | 373 | ||
| 344 | static void __cpuinit generic_identify(struct cpuinfo_x86 *c) | 374 | static void __cpuinit generic_identify(struct cpuinfo_x86 *c) |
| @@ -395,8 +425,8 @@ static void __cpuinit generic_identify(struct cpuinfo_x86 *c) | |||
| 395 | } | 425 | } |
| 396 | 426 | ||
| 397 | init_scattered_cpuid_features(c); | 427 | init_scattered_cpuid_features(c); |
| 428 | detect_nopl(c); | ||
| 398 | } | 429 | } |
| 399 | |||
| 400 | } | 430 | } |
| 401 | 431 | ||
| 402 | static void __cpuinit squash_the_stupid_serial_number(struct cpuinfo_x86 *c) | 432 | static void __cpuinit squash_the_stupid_serial_number(struct cpuinfo_x86 *c) |
diff --git a/arch/x86/kernel/cpu/common_64.c b/arch/x86/kernel/cpu/common_64.c index dd6e3f15017e..a11f5d4477cd 100644 --- a/arch/x86/kernel/cpu/common_64.c +++ b/arch/x86/kernel/cpu/common_64.c | |||
| @@ -18,6 +18,7 @@ | |||
| 18 | #include <asm/mtrr.h> | 18 | #include <asm/mtrr.h> |
| 19 | #include <asm/mce.h> | 19 | #include <asm/mce.h> |
| 20 | #include <asm/pat.h> | 20 | #include <asm/pat.h> |
| 21 | #include <asm/asm.h> | ||
| 21 | #include <asm/numa.h> | 22 | #include <asm/numa.h> |
| 22 | #ifdef CONFIG_X86_LOCAL_APIC | 23 | #ifdef CONFIG_X86_LOCAL_APIC |
| 23 | #include <asm/mpspec.h> | 24 | #include <asm/mpspec.h> |
| @@ -215,6 +216,39 @@ static void __init early_cpu_support_print(void) | |||
| 215 | } | 216 | } |
| 216 | } | 217 | } |
| 217 | 218 | ||
| 219 | /* | ||
| 220 | * The NOPL instruction is supposed to exist on all CPUs with | ||
| 221 | * family >= 6, unfortunately, that's not true in practice because | ||
| 222 | * of early VIA chips and (more importantly) broken virtualizers that | ||
| 223 | * are not easy to detect. Hence, probe for it based on first | ||
| 224 | * principles. | ||
| 225 | * | ||
| 226 | * Note: no 64-bit chip is known to lack these, but put the code here | ||
| 227 | * for consistency with 32 bits, and to make it utterly trivial to | ||
| 228 | * diagnose the problem should it ever surface. | ||
| 229 | */ | ||
| 230 | static void __cpuinit detect_nopl(struct cpuinfo_x86 *c) | ||
| 231 | { | ||
| 232 | const u32 nopl_signature = 0x888c53b1; /* Random number */ | ||
| 233 | u32 has_nopl = nopl_signature; | ||
| 234 | |||
| 235 | clear_cpu_cap(c, X86_FEATURE_NOPL); | ||
| 236 | if (c->x86 >= 6) { | ||
| 237 | asm volatile("\n" | ||
| 238 | "1: .byte 0x0f,0x1f,0xc0\n" /* nopl %eax */ | ||
| 239 | "2:\n" | ||
| 240 | " .section .fixup,\"ax\"\n" | ||
| 241 | "3: xor %0,%0\n" | ||
| 242 | " jmp 2b\n" | ||
| 243 | " .previous\n" | ||
| 244 | _ASM_EXTABLE(1b,3b) | ||
| 245 | : "+a" (has_nopl)); | ||
| 246 | |||
| 247 | if (has_nopl == nopl_signature) | ||
| 248 | set_cpu_cap(c, X86_FEATURE_NOPL); | ||
| 249 | } | ||
| 250 | } | ||
| 251 | |||
| 218 | static void __cpuinit early_identify_cpu(struct cpuinfo_x86 *c); | 252 | static void __cpuinit early_identify_cpu(struct cpuinfo_x86 *c); |
| 219 | 253 | ||
| 220 | void __init early_cpu_init(void) | 254 | void __init early_cpu_init(void) |
| @@ -313,6 +347,8 @@ static void __cpuinit early_identify_cpu(struct cpuinfo_x86 *c) | |||
| 313 | c->x86_phys_bits = eax & 0xff; | 347 | c->x86_phys_bits = eax & 0xff; |
| 314 | } | 348 | } |
| 315 | 349 | ||
| 350 | detect_nopl(c); | ||
| 351 | |||
| 316 | if (c->x86_vendor != X86_VENDOR_UNKNOWN && | 352 | if (c->x86_vendor != X86_VENDOR_UNKNOWN && |
| 317 | cpu_devs[c->x86_vendor]->c_early_init) | 353 | cpu_devs[c->x86_vendor]->c_early_init) |
| 318 | cpu_devs[c->x86_vendor]->c_early_init(c); | 354 | cpu_devs[c->x86_vendor]->c_early_init(c); |
| @@ -493,17 +529,20 @@ void pda_init(int cpu) | |||
| 493 | /* others are initialized in smpboot.c */ | 529 | /* others are initialized in smpboot.c */ |
| 494 | pda->pcurrent = &init_task; | 530 | pda->pcurrent = &init_task; |
| 495 | pda->irqstackptr = boot_cpu_stack; | 531 | pda->irqstackptr = boot_cpu_stack; |
| 532 | pda->irqstackptr += IRQSTACKSIZE - 64; | ||
| 496 | } else { | 533 | } else { |
| 497 | pda->irqstackptr = (char *) | 534 | if (!pda->irqstackptr) { |
| 498 | __get_free_pages(GFP_ATOMIC, IRQSTACK_ORDER); | 535 | pda->irqstackptr = (char *) |
| 499 | if (!pda->irqstackptr) | 536 | __get_free_pages(GFP_ATOMIC, IRQSTACK_ORDER); |
| 500 | panic("cannot allocate irqstack for cpu %d", cpu); | 537 | if (!pda->irqstackptr) |
| 538 | panic("cannot allocate irqstack for cpu %d", | ||
| 539 | cpu); | ||
| 540 | pda->irqstackptr += IRQSTACKSIZE - 64; | ||
| 541 | } | ||
| 501 | 542 | ||
| 502 | if (pda->nodenumber == 0 && cpu_to_node(cpu) != NUMA_NO_NODE) | 543 | if (pda->nodenumber == 0 && cpu_to_node(cpu) != NUMA_NO_NODE) |
| 503 | pda->nodenumber = cpu_to_node(cpu); | 544 | pda->nodenumber = cpu_to_node(cpu); |
| 504 | } | 545 | } |
| 505 | |||
| 506 | pda->irqstackptr += IRQSTACKSIZE-64; | ||
| 507 | } | 546 | } |
| 508 | 547 | ||
| 509 | char boot_exception_stacks[(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ + | 548 | char boot_exception_stacks[(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ + |
| @@ -601,19 +640,22 @@ void __cpuinit cpu_init(void) | |||
| 601 | /* | 640 | /* |
| 602 | * set up and load the per-CPU TSS | 641 | * set up and load the per-CPU TSS |
| 603 | */ | 642 | */ |
| 604 | for (v = 0; v < N_EXCEPTION_STACKS; v++) { | 643 | if (!orig_ist->ist[0]) { |
| 605 | static const unsigned int order[N_EXCEPTION_STACKS] = { | 644 | static const unsigned int order[N_EXCEPTION_STACKS] = { |
| 606 | [0 ... N_EXCEPTION_STACKS - 1] = EXCEPTION_STACK_ORDER, | 645 | [0 ... N_EXCEPTION_STACKS - 1] = EXCEPTION_STACK_ORDER, |
| 607 | [DEBUG_STACK - 1] = DEBUG_STACK_ORDER | 646 | [DEBUG_STACK - 1] = DEBUG_STACK_ORDER |
| 608 | }; | 647 | }; |
| 609 | if (cpu) { | 648 | for (v = 0; v < N_EXCEPTION_STACKS; v++) { |
| 610 | estacks = (char *)__get_free_pages(GFP_ATOMIC, order[v]); | 649 | if (cpu) { |
| 611 | if (!estacks) | 650 | estacks = (char *)__get_free_pages(GFP_ATOMIC, order[v]); |
| 612 | panic("Cannot allocate exception stack %ld %d\n", | 651 | if (!estacks) |
| 613 | v, cpu); | 652 | panic("Cannot allocate exception " |
| 653 | "stack %ld %d\n", v, cpu); | ||
| 654 | } | ||
| 655 | estacks += PAGE_SIZE << order[v]; | ||
| 656 | orig_ist->ist[v] = t->x86_tss.ist[v] = | ||
| 657 | (unsigned long)estacks; | ||
| 614 | } | 658 | } |
| 615 | estacks += PAGE_SIZE << order[v]; | ||
| 616 | orig_ist->ist[v] = t->x86_tss.ist[v] = (unsigned long)estacks; | ||
| 617 | } | 659 | } |
| 618 | 660 | ||
| 619 | t->x86_tss.io_bitmap_base = offsetof(struct tss_struct, io_bitmap); | 661 | t->x86_tss.io_bitmap_base = offsetof(struct tss_struct, io_bitmap); |
diff --git a/arch/x86/kernel/cpu/cyrix.c b/arch/x86/kernel/cpu/cyrix.c index e710a21bb6e8..898a5a2002ed 100644 --- a/arch/x86/kernel/cpu/cyrix.c +++ b/arch/x86/kernel/cpu/cyrix.c | |||
| @@ -15,13 +15,11 @@ | |||
| 15 | /* | 15 | /* |
| 16 | * Read NSC/Cyrix DEVID registers (DIR) to get more detailed info. about the CPU | 16 | * Read NSC/Cyrix DEVID registers (DIR) to get more detailed info. about the CPU |
| 17 | */ | 17 | */ |
| 18 | static void __cpuinit do_cyrix_devid(unsigned char *dir0, unsigned char *dir1) | 18 | static void __cpuinit __do_cyrix_devid(unsigned char *dir0, unsigned char *dir1) |
| 19 | { | 19 | { |
| 20 | unsigned char ccr2, ccr3; | 20 | unsigned char ccr2, ccr3; |
| 21 | unsigned long flags; | ||
| 22 | 21 | ||
| 23 | /* we test for DEVID by checking whether CCR3 is writable */ | 22 | /* we test for DEVID by checking whether CCR3 is writable */ |
| 24 | local_irq_save(flags); | ||
| 25 | ccr3 = getCx86(CX86_CCR3); | 23 | ccr3 = getCx86(CX86_CCR3); |
| 26 | setCx86(CX86_CCR3, ccr3 ^ 0x80); | 24 | setCx86(CX86_CCR3, ccr3 ^ 0x80); |
| 27 | getCx86(0xc0); /* dummy to change bus */ | 25 | getCx86(0xc0); /* dummy to change bus */ |
| @@ -44,9 +42,16 @@ static void __cpuinit do_cyrix_devid(unsigned char *dir0, unsigned char *dir1) | |||
| 44 | *dir0 = getCx86(CX86_DIR0); | 42 | *dir0 = getCx86(CX86_DIR0); |
| 45 | *dir1 = getCx86(CX86_DIR1); | 43 | *dir1 = getCx86(CX86_DIR1); |
| 46 | } | 44 | } |
| 47 | local_irq_restore(flags); | ||
| 48 | } | 45 | } |
| 49 | 46 | ||
| 47 | static void __cpuinit do_cyrix_devid(unsigned char *dir0, unsigned char *dir1) | ||
| 48 | { | ||
| 49 | unsigned long flags; | ||
| 50 | |||
| 51 | local_irq_save(flags); | ||
| 52 | __do_cyrix_devid(dir0, dir1); | ||
| 53 | local_irq_restore(flags); | ||
| 54 | } | ||
| 50 | /* | 55 | /* |
| 51 | * Cx86_dir0_msb is a HACK needed by check_cx686_cpuid/slop in bugs.h in | 56 | * Cx86_dir0_msb is a HACK needed by check_cx686_cpuid/slop in bugs.h in |
| 52 | * order to identify the Cyrix CPU model after we're out of setup.c | 57 | * order to identify the Cyrix CPU model after we're out of setup.c |
| @@ -161,6 +166,24 @@ static void __cpuinit geode_configure(void) | |||
| 161 | local_irq_restore(flags); | 166 | local_irq_restore(flags); |
| 162 | } | 167 | } |
| 163 | 168 | ||
| 169 | static void __cpuinit early_init_cyrix(struct cpuinfo_x86 *c) | ||
| 170 | { | ||
| 171 | unsigned char dir0, dir0_msn, dir1 = 0; | ||
| 172 | |||
| 173 | __do_cyrix_devid(&dir0, &dir1); | ||
| 174 | dir0_msn = dir0 >> 4; /* identifies CPU "family" */ | ||
| 175 | |||
| 176 | switch (dir0_msn) { | ||
| 177 | case 3: /* 6x86/6x86L */ | ||
| 178 | /* Emulate MTRRs using Cyrix's ARRs. */ | ||
| 179 | set_cpu_cap(c, X86_FEATURE_CYRIX_ARR); | ||
| 180 | break; | ||
| 181 | case 5: /* 6x86MX/M II */ | ||
| 182 | /* Emulate MTRRs using Cyrix's ARRs. */ | ||
| 183 | set_cpu_cap(c, X86_FEATURE_CYRIX_ARR); | ||
| 184 | break; | ||
| 185 | } | ||
| 186 | } | ||
| 164 | 187 | ||
| 165 | static void __cpuinit init_cyrix(struct cpuinfo_x86 *c) | 188 | static void __cpuinit init_cyrix(struct cpuinfo_x86 *c) |
| 166 | { | 189 | { |
| @@ -416,6 +439,7 @@ static void __cpuinit cyrix_identify(struct cpuinfo_x86 *c) | |||
| 416 | static struct cpu_dev cyrix_cpu_dev __cpuinitdata = { | 439 | static struct cpu_dev cyrix_cpu_dev __cpuinitdata = { |
| 417 | .c_vendor = "Cyrix", | 440 | .c_vendor = "Cyrix", |
| 418 | .c_ident = { "CyrixInstead" }, | 441 | .c_ident = { "CyrixInstead" }, |
| 442 | .c_early_init = early_init_cyrix, | ||
| 419 | .c_init = init_cyrix, | 443 | .c_init = init_cyrix, |
| 420 | .c_identify = cyrix_identify, | 444 | .c_identify = cyrix_identify, |
| 421 | }; | 445 | }; |
diff --git a/arch/x86/kernel/cpu/feature_names.c b/arch/x86/kernel/cpu/feature_names.c index e43ad4ad4cba..c9017799497c 100644 --- a/arch/x86/kernel/cpu/feature_names.c +++ b/arch/x86/kernel/cpu/feature_names.c | |||
| @@ -39,7 +39,8 @@ const char * const x86_cap_flags[NCAPINTS*32] = { | |||
| 39 | NULL, NULL, NULL, NULL, | 39 | NULL, NULL, NULL, NULL, |
| 40 | "constant_tsc", "up", NULL, "arch_perfmon", | 40 | "constant_tsc", "up", NULL, "arch_perfmon", |
| 41 | "pebs", "bts", NULL, NULL, | 41 | "pebs", "bts", NULL, NULL, |
| 42 | "rep_good", NULL, NULL, NULL, NULL, NULL, NULL, NULL, | 42 | "rep_good", NULL, NULL, NULL, |
| 43 | "nopl", NULL, NULL, NULL, | ||
| 43 | NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, | 44 | NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, |
| 44 | 45 | ||
| 45 | /* Intel-defined (#2) */ | 46 | /* Intel-defined (#2) */ |
diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c index 9af89078f7bb..66e48aa2dd1b 100644 --- a/arch/x86/kernel/e820.c +++ b/arch/x86/kernel/e820.c | |||
| @@ -1203,7 +1203,7 @@ static int __init parse_memmap_opt(char *p) | |||
| 1203 | if (!p) | 1203 | if (!p) |
| 1204 | return -EINVAL; | 1204 | return -EINVAL; |
| 1205 | 1205 | ||
| 1206 | if (!strcmp(p, "exactmap")) { | 1206 | if (!strncmp(p, "exactmap", 8)) { |
| 1207 | #ifdef CONFIG_CRASH_DUMP | 1207 | #ifdef CONFIG_CRASH_DUMP |
| 1208 | /* | 1208 | /* |
| 1209 | * If we are doing a crash dump, we still need to know | 1209 | * If we are doing a crash dump, we still need to know |
diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c index 59fd3b6b1303..73deaffadd03 100644 --- a/arch/x86/kernel/hpet.c +++ b/arch/x86/kernel/hpet.c | |||
| @@ -210,8 +210,8 @@ static void hpet_legacy_clockevent_register(void) | |||
| 210 | /* Calculate the min / max delta */ | 210 | /* Calculate the min / max delta */ |
| 211 | hpet_clockevent.max_delta_ns = clockevent_delta2ns(0x7FFFFFFF, | 211 | hpet_clockevent.max_delta_ns = clockevent_delta2ns(0x7FFFFFFF, |
| 212 | &hpet_clockevent); | 212 | &hpet_clockevent); |
| 213 | hpet_clockevent.min_delta_ns = clockevent_delta2ns(0x30, | 213 | /* 5 usec minimum reprogramming delta. */ |
| 214 | &hpet_clockevent); | 214 | hpet_clockevent.min_delta_ns = 5000; |
| 215 | 215 | ||
| 216 | /* | 216 | /* |
| 217 | * Start hpet with the boot cpu mask and make it | 217 | * Start hpet with the boot cpu mask and make it |
| @@ -270,15 +270,22 @@ static void hpet_legacy_set_mode(enum clock_event_mode mode, | |||
| 270 | } | 270 | } |
| 271 | 271 | ||
| 272 | static int hpet_legacy_next_event(unsigned long delta, | 272 | static int hpet_legacy_next_event(unsigned long delta, |
| 273 | struct clock_event_device *evt) | 273 | struct clock_event_device *evt) |
| 274 | { | 274 | { |
| 275 | unsigned long cnt; | 275 | u32 cnt; |
| 276 | 276 | ||
| 277 | cnt = hpet_readl(HPET_COUNTER); | 277 | cnt = hpet_readl(HPET_COUNTER); |
| 278 | cnt += delta; | 278 | cnt += (u32) delta; |
| 279 | hpet_writel(cnt, HPET_T0_CMP); | 279 | hpet_writel(cnt, HPET_T0_CMP); |
| 280 | 280 | ||
| 281 | return ((long)(hpet_readl(HPET_COUNTER) - cnt ) > 0) ? -ETIME : 0; | 281 | /* |
| 282 | * We need to read back the CMP register to make sure that | ||
| 283 | * what we wrote hit the chip before we compare it to the | ||
| 284 | * counter. | ||
| 285 | */ | ||
| 286 | WARN_ON((u32)hpet_readl(HPET_T0_CMP) != cnt); | ||
| 287 | |||
| 288 | return (s32)((u32)hpet_readl(HPET_COUNTER) - cnt) >= 0 ? -ETIME : 0; | ||
| 282 | } | 289 | } |
| 283 | 290 | ||
| 284 | /* | 291 | /* |
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c index 9ff6e3cbf08f..a4e201b47f64 100644 --- a/arch/x86/xen/enlighten.c +++ b/arch/x86/xen/enlighten.c | |||
| @@ -1324,7 +1324,7 @@ static const struct pv_mmu_ops xen_mmu_ops __initdata = { | |||
| 1324 | .ptep_modify_prot_commit = __ptep_modify_prot_commit, | 1324 | .ptep_modify_prot_commit = __ptep_modify_prot_commit, |
| 1325 | 1325 | ||
| 1326 | .pte_val = xen_pte_val, | 1326 | .pte_val = xen_pte_val, |
| 1327 | .pte_flags = native_pte_val, | 1327 | .pte_flags = native_pte_flags, |
| 1328 | .pgd_val = xen_pgd_val, | 1328 | .pgd_val = xen_pgd_val, |
| 1329 | 1329 | ||
| 1330 | .make_pte = xen_make_pte, | 1330 | .make_pte = xen_make_pte, |
diff --git a/crypto/camellia.c b/crypto/camellia.c index b1cc4de6493c..493fee7e0a8b 100644 --- a/crypto/camellia.c +++ b/crypto/camellia.c | |||
| @@ -35,8 +35,6 @@ | |||
| 35 | #include <linux/init.h> | 35 | #include <linux/init.h> |
| 36 | #include <linux/kernel.h> | 36 | #include <linux/kernel.h> |
| 37 | #include <linux/module.h> | 37 | #include <linux/module.h> |
| 38 | #include <linux/bitops.h> | ||
| 39 | #include <asm/unaligned.h> | ||
| 40 | 38 | ||
| 41 | static const u32 camellia_sp1110[256] = { | 39 | static const u32 camellia_sp1110[256] = { |
| 42 | 0x70707000,0x82828200,0x2c2c2c00,0xececec00, | 40 | 0x70707000,0x82828200,0x2c2c2c00,0xececec00, |
| @@ -337,6 +335,20 @@ static const u32 camellia_sp4404[256] = { | |||
| 337 | /* | 335 | /* |
| 338 | * macros | 336 | * macros |
| 339 | */ | 337 | */ |
| 338 | #define GETU32(v, pt) \ | ||
| 339 | do { \ | ||
| 340 | /* latest breed of gcc is clever enough to use move */ \ | ||
| 341 | memcpy(&(v), (pt), 4); \ | ||
| 342 | (v) = be32_to_cpu(v); \ | ||
| 343 | } while(0) | ||
| 344 | |||
| 345 | /* rotation right shift 1byte */ | ||
| 346 | #define ROR8(x) (((x) >> 8) + ((x) << 24)) | ||
| 347 | /* rotation left shift 1bit */ | ||
| 348 | #define ROL1(x) (((x) << 1) + ((x) >> 31)) | ||
| 349 | /* rotation left shift 1byte */ | ||
| 350 | #define ROL8(x) (((x) << 8) + ((x) >> 24)) | ||
| 351 | |||
| 340 | #define ROLDQ(ll, lr, rl, rr, w0, w1, bits) \ | 352 | #define ROLDQ(ll, lr, rl, rr, w0, w1, bits) \ |
| 341 | do { \ | 353 | do { \ |
| 342 | w0 = ll; \ | 354 | w0 = ll; \ |
| @@ -371,7 +383,7 @@ static const u32 camellia_sp4404[256] = { | |||
| 371 | ^ camellia_sp3033[(u8)(il >> 8)] \ | 383 | ^ camellia_sp3033[(u8)(il >> 8)] \ |
| 372 | ^ camellia_sp4404[(u8)(il )]; \ | 384 | ^ camellia_sp4404[(u8)(il )]; \ |
| 373 | yl ^= yr; \ | 385 | yl ^= yr; \ |
| 374 | yr = ror32(yr, 8); \ | 386 | yr = ROR8(yr); \ |
| 375 | yr ^= yl; \ | 387 | yr ^= yl; \ |
| 376 | } while(0) | 388 | } while(0) |
| 377 | 389 | ||
| @@ -393,7 +405,7 @@ static void camellia_setup_tail(u32 *subkey, u32 *subL, u32 *subR, int max) | |||
| 393 | subL[7] ^= subL[1]; subR[7] ^= subR[1]; | 405 | subL[7] ^= subL[1]; subR[7] ^= subR[1]; |
| 394 | subL[1] ^= subR[1] & ~subR[9]; | 406 | subL[1] ^= subR[1] & ~subR[9]; |
| 395 | dw = subL[1] & subL[9], | 407 | dw = subL[1] & subL[9], |
| 396 | subR[1] ^= rol32(dw, 1); /* modified for FLinv(kl2) */ | 408 | subR[1] ^= ROL1(dw); /* modified for FLinv(kl2) */ |
| 397 | /* round 8 */ | 409 | /* round 8 */ |
| 398 | subL[11] ^= subL[1]; subR[11] ^= subR[1]; | 410 | subL[11] ^= subL[1]; subR[11] ^= subR[1]; |
| 399 | /* round 10 */ | 411 | /* round 10 */ |
| @@ -402,7 +414,7 @@ static void camellia_setup_tail(u32 *subkey, u32 *subL, u32 *subR, int max) | |||
| 402 | subL[15] ^= subL[1]; subR[15] ^= subR[1]; | 414 | subL[15] ^= subL[1]; subR[15] ^= subR[1]; |
| 403 | subL[1] ^= subR[1] & ~subR[17]; | 415 | subL[1] ^= subR[1] & ~subR[17]; |
| 404 | dw = subL[1] & subL[17], | 416 | dw = subL[1] & subL[17], |
| 405 | subR[1] ^= rol32(dw, 1); /* modified for FLinv(kl4) */ | 417 | subR[1] ^= ROL1(dw); /* modified for FLinv(kl4) */ |
| 406 | /* round 14 */ | 418 | /* round 14 */ |
| 407 | subL[19] ^= subL[1]; subR[19] ^= subR[1]; | 419 | subL[19] ^= subL[1]; subR[19] ^= subR[1]; |
| 408 | /* round 16 */ | 420 | /* round 16 */ |
| @@ -418,7 +430,7 @@ static void camellia_setup_tail(u32 *subkey, u32 *subL, u32 *subR, int max) | |||
| 418 | } else { | 430 | } else { |
| 419 | subL[1] ^= subR[1] & ~subR[25]; | 431 | subL[1] ^= subR[1] & ~subR[25]; |
| 420 | dw = subL[1] & subL[25], | 432 | dw = subL[1] & subL[25], |
| 421 | subR[1] ^= rol32(dw, 1); /* modified for FLinv(kl6) */ | 433 | subR[1] ^= ROL1(dw); /* modified for FLinv(kl6) */ |
| 422 | /* round 20 */ | 434 | /* round 20 */ |
| 423 | subL[27] ^= subL[1]; subR[27] ^= subR[1]; | 435 | subL[27] ^= subL[1]; subR[27] ^= subR[1]; |
| 424 | /* round 22 */ | 436 | /* round 22 */ |
| @@ -438,7 +450,7 @@ static void camellia_setup_tail(u32 *subkey, u32 *subL, u32 *subR, int max) | |||
| 438 | subL[26] ^= kw4l; subR[26] ^= kw4r; | 450 | subL[26] ^= kw4l; subR[26] ^= kw4r; |
| 439 | kw4l ^= kw4r & ~subR[24]; | 451 | kw4l ^= kw4r & ~subR[24]; |
| 440 | dw = kw4l & subL[24], | 452 | dw = kw4l & subL[24], |
| 441 | kw4r ^= rol32(dw, 1); /* modified for FL(kl5) */ | 453 | kw4r ^= ROL1(dw); /* modified for FL(kl5) */ |
| 442 | } | 454 | } |
| 443 | /* round 17 */ | 455 | /* round 17 */ |
| 444 | subL[22] ^= kw4l; subR[22] ^= kw4r; | 456 | subL[22] ^= kw4l; subR[22] ^= kw4r; |
| @@ -448,7 +460,7 @@ static void camellia_setup_tail(u32 *subkey, u32 *subL, u32 *subR, int max) | |||
| 448 | subL[18] ^= kw4l; subR[18] ^= kw4r; | 460 | subL[18] ^= kw4l; subR[18] ^= kw4r; |
| 449 | kw4l ^= kw4r & ~subR[16]; | 461 | kw4l ^= kw4r & ~subR[16]; |
| 450 | dw = kw4l & subL[16], | 462 | dw = kw4l & subL[16], |
| 451 | kw4r ^= rol32(dw, 1); /* modified for FL(kl3) */ | 463 | kw4r ^= ROL1(dw); /* modified for FL(kl3) */ |
| 452 | /* round 11 */ | 464 | /* round 11 */ |
| 453 | subL[14] ^= kw4l; subR[14] ^= kw4r; | 465 | subL[14] ^= kw4l; subR[14] ^= kw4r; |
| 454 | /* round 9 */ | 466 | /* round 9 */ |
| @@ -457,7 +469,7 @@ static void camellia_setup_tail(u32 *subkey, u32 *subL, u32 *subR, int max) | |||
| 457 | subL[10] ^= kw4l; subR[10] ^= kw4r; | 469 | subL[10] ^= kw4l; subR[10] ^= kw4r; |
| 458 | kw4l ^= kw4r & ~subR[8]; | 470 | kw4l ^= kw4r & ~subR[8]; |
| 459 | dw = kw4l & subL[8], | 471 | dw = kw4l & subL[8], |
| 460 | kw4r ^= rol32(dw, 1); /* modified for FL(kl1) */ | 472 | kw4r ^= ROL1(dw); /* modified for FL(kl1) */ |
| 461 | /* round 5 */ | 473 | /* round 5 */ |
| 462 | subL[6] ^= kw4l; subR[6] ^= kw4r; | 474 | subL[6] ^= kw4l; subR[6] ^= kw4r; |
| 463 | /* round 3 */ | 475 | /* round 3 */ |
| @@ -482,7 +494,7 @@ static void camellia_setup_tail(u32 *subkey, u32 *subL, u32 *subR, int max) | |||
| 482 | SUBKEY_R(6) = subR[5] ^ subR[7]; | 494 | SUBKEY_R(6) = subR[5] ^ subR[7]; |
| 483 | tl = subL[10] ^ (subR[10] & ~subR[8]); | 495 | tl = subL[10] ^ (subR[10] & ~subR[8]); |
| 484 | dw = tl & subL[8], /* FL(kl1) */ | 496 | dw = tl & subL[8], /* FL(kl1) */ |
| 485 | tr = subR[10] ^ rol32(dw, 1); | 497 | tr = subR[10] ^ ROL1(dw); |
| 486 | SUBKEY_L(7) = subL[6] ^ tl; /* round 6 */ | 498 | SUBKEY_L(7) = subL[6] ^ tl; /* round 6 */ |
| 487 | SUBKEY_R(7) = subR[6] ^ tr; | 499 | SUBKEY_R(7) = subR[6] ^ tr; |
| 488 | SUBKEY_L(8) = subL[8]; /* FL(kl1) */ | 500 | SUBKEY_L(8) = subL[8]; /* FL(kl1) */ |
| @@ -491,7 +503,7 @@ static void camellia_setup_tail(u32 *subkey, u32 *subL, u32 *subR, int max) | |||
| 491 | SUBKEY_R(9) = subR[9]; | 503 | SUBKEY_R(9) = subR[9]; |
| 492 | tl = subL[7] ^ (subR[7] & ~subR[9]); | 504 | tl = subL[7] ^ (subR[7] & ~subR[9]); |
| 493 | dw = tl & subL[9], /* FLinv(kl2) */ | 505 | dw = tl & subL[9], /* FLinv(kl2) */ |
| 494 | tr = subR[7] ^ rol32(dw, 1); | 506 | tr = subR[7] ^ ROL1(dw); |
| 495 | SUBKEY_L(10) = tl ^ subL[11]; /* round 7 */ | 507 | SUBKEY_L(10) = tl ^ subL[11]; /* round 7 */ |
| 496 | SUBKEY_R(10) = tr ^ subR[11]; | 508 | SUBKEY_R(10) = tr ^ subR[11]; |
| 497 | SUBKEY_L(11) = subL[10] ^ subL[12]; /* round 8 */ | 509 | SUBKEY_L(11) = subL[10] ^ subL[12]; /* round 8 */ |
| @@ -504,7 +516,7 @@ static void camellia_setup_tail(u32 *subkey, u32 *subL, u32 *subR, int max) | |||
| 504 | SUBKEY_R(14) = subR[13] ^ subR[15]; | 516 | SUBKEY_R(14) = subR[13] ^ subR[15]; |
| 505 | tl = subL[18] ^ (subR[18] & ~subR[16]); | 517 | tl = subL[18] ^ (subR[18] & ~subR[16]); |
| 506 | dw = tl & subL[16], /* FL(kl3) */ | 518 | dw = tl & subL[16], /* FL(kl3) */ |
| 507 | tr = subR[18] ^ rol32(dw, 1); | 519 | tr = subR[18] ^ ROL1(dw); |
| 508 | SUBKEY_L(15) = subL[14] ^ tl; /* round 12 */ | 520 | SUBKEY_L(15) = subL[14] ^ tl; /* round 12 */ |
| 509 | SUBKEY_R(15) = subR[14] ^ tr; | 521 | SUBKEY_R(15) = subR[14] ^ tr; |
| 510 | SUBKEY_L(16) = subL[16]; /* FL(kl3) */ | 522 | SUBKEY_L(16) = subL[16]; /* FL(kl3) */ |
| @@ -513,7 +525,7 @@ static void camellia_setup_tail(u32 *subkey, u32 *subL, u32 *subR, int max) | |||
| 513 | SUBKEY_R(17) = subR[17]; | 525 | SUBKEY_R(17) = subR[17]; |
| 514 | tl = subL[15] ^ (subR[15] & ~subR[17]); | 526 | tl = subL[15] ^ (subR[15] & ~subR[17]); |
| 515 | dw = tl & subL[17], /* FLinv(kl4) */ | 527 | dw = tl & subL[17], /* FLinv(kl4) */ |
| 516 | tr = subR[15] ^ rol32(dw, 1); | 528 | tr = subR[15] ^ ROL1(dw); |
| 517 | SUBKEY_L(18) = tl ^ subL[19]; /* round 13 */ | 529 | SUBKEY_L(18) = tl ^ subL[19]; /* round 13 */ |
| 518 | SUBKEY_R(18) = tr ^ subR[19]; | 530 | SUBKEY_R(18) = tr ^ subR[19]; |
| 519 | SUBKEY_L(19) = subL[18] ^ subL[20]; /* round 14 */ | 531 | SUBKEY_L(19) = subL[18] ^ subL[20]; /* round 14 */ |
| @@ -532,7 +544,7 @@ static void camellia_setup_tail(u32 *subkey, u32 *subL, u32 *subR, int max) | |||
| 532 | } else { | 544 | } else { |
| 533 | tl = subL[26] ^ (subR[26] & ~subR[24]); | 545 | tl = subL[26] ^ (subR[26] & ~subR[24]); |
| 534 | dw = tl & subL[24], /* FL(kl5) */ | 546 | dw = tl & subL[24], /* FL(kl5) */ |
| 535 | tr = subR[26] ^ rol32(dw, 1); | 547 | tr = subR[26] ^ ROL1(dw); |
| 536 | SUBKEY_L(23) = subL[22] ^ tl; /* round 18 */ | 548 | SUBKEY_L(23) = subL[22] ^ tl; /* round 18 */ |
| 537 | SUBKEY_R(23) = subR[22] ^ tr; | 549 | SUBKEY_R(23) = subR[22] ^ tr; |
| 538 | SUBKEY_L(24) = subL[24]; /* FL(kl5) */ | 550 | SUBKEY_L(24) = subL[24]; /* FL(kl5) */ |
| @@ -541,7 +553,7 @@ static void camellia_setup_tail(u32 *subkey, u32 *subL, u32 *subR, int max) | |||
| 541 | SUBKEY_R(25) = subR[25]; | 553 | SUBKEY_R(25) = subR[25]; |
| 542 | tl = subL[23] ^ (subR[23] & ~subR[25]); | 554 | tl = subL[23] ^ (subR[23] & ~subR[25]); |
| 543 | dw = tl & subL[25], /* FLinv(kl6) */ | 555 | dw = tl & subL[25], /* FLinv(kl6) */ |
| 544 | tr = subR[23] ^ rol32(dw, 1); | 556 | tr = subR[23] ^ ROL1(dw); |
| 545 | SUBKEY_L(26) = tl ^ subL[27]; /* round 19 */ | 557 | SUBKEY_L(26) = tl ^ subL[27]; /* round 19 */ |
| 546 | SUBKEY_R(26) = tr ^ subR[27]; | 558 | SUBKEY_R(26) = tr ^ subR[27]; |
| 547 | SUBKEY_L(27) = subL[26] ^ subL[28]; /* round 20 */ | 559 | SUBKEY_L(27) = subL[26] ^ subL[28]; /* round 20 */ |
| @@ -561,17 +573,17 @@ static void camellia_setup_tail(u32 *subkey, u32 *subL, u32 *subR, int max) | |||
| 561 | /* apply the inverse of the last half of P-function */ | 573 | /* apply the inverse of the last half of P-function */ |
| 562 | i = 2; | 574 | i = 2; |
| 563 | do { | 575 | do { |
| 564 | dw = SUBKEY_L(i + 0) ^ SUBKEY_R(i + 0); dw = rol32(dw, 8);/* round 1 */ | 576 | dw = SUBKEY_L(i + 0) ^ SUBKEY_R(i + 0); dw = ROL8(dw);/* round 1 */ |
| 565 | SUBKEY_R(i + 0) = SUBKEY_L(i + 0) ^ dw; SUBKEY_L(i + 0) = dw; | 577 | SUBKEY_R(i + 0) = SUBKEY_L(i + 0) ^ dw; SUBKEY_L(i + 0) = dw; |
| 566 | dw = SUBKEY_L(i + 1) ^ SUBKEY_R(i + 1); dw = rol32(dw, 8);/* round 2 */ | 578 | dw = SUBKEY_L(i + 1) ^ SUBKEY_R(i + 1); dw = ROL8(dw);/* round 2 */ |
| 567 | SUBKEY_R(i + 1) = SUBKEY_L(i + 1) ^ dw; SUBKEY_L(i + 1) = dw; | 579 | SUBKEY_R(i + 1) = SUBKEY_L(i + 1) ^ dw; SUBKEY_L(i + 1) = dw; |
| 568 | dw = SUBKEY_L(i + 2) ^ SUBKEY_R(i + 2); dw = rol32(dw, 8);/* round 3 */ | 580 | dw = SUBKEY_L(i + 2) ^ SUBKEY_R(i + 2); dw = ROL8(dw);/* round 3 */ |
| 569 | SUBKEY_R(i + 2) = SUBKEY_L(i + 2) ^ dw; SUBKEY_L(i + 2) = dw; | 581 | SUBKEY_R(i + 2) = SUBKEY_L(i + 2) ^ dw; SUBKEY_L(i + 2) = dw; |
| 570 | dw = SUBKEY_L(i + 3) ^ SUBKEY_R(i + 3); dw = rol32(dw, 8);/* round 4 */ | 582 | dw = SUBKEY_L(i + 3) ^ SUBKEY_R(i + 3); dw = ROL8(dw);/* round 4 */ |
| 571 | SUBKEY_R(i + 3) = SUBKEY_L(i + 3) ^ dw; SUBKEY_L(i + 3) = dw; | 583 | SUBKEY_R(i + 3) = SUBKEY_L(i + 3) ^ dw; SUBKEY_L(i + 3) = dw; |
| 572 | dw = SUBKEY_L(i + 4) ^ SUBKEY_R(i + 4); dw = rol32(dw, 9);/* round 5 */ | 584 | dw = SUBKEY_L(i + 4) ^ SUBKEY_R(i + 4); dw = ROL8(dw);/* round 5 */ |
| 573 | SUBKEY_R(i + 4) = SUBKEY_L(i + 4) ^ dw; SUBKEY_L(i + 4) = dw; | 585 | SUBKEY_R(i + 4) = SUBKEY_L(i + 4) ^ dw; SUBKEY_L(i + 4) = dw; |
| 574 | dw = SUBKEY_L(i + 5) ^ SUBKEY_R(i + 5); dw = rol32(dw, 8);/* round 6 */ | 586 | dw = SUBKEY_L(i + 5) ^ SUBKEY_R(i + 5); dw = ROL8(dw);/* round 6 */ |
| 575 | SUBKEY_R(i + 5) = SUBKEY_L(i + 5) ^ dw; SUBKEY_L(i + 5) = dw; | 587 | SUBKEY_R(i + 5) = SUBKEY_L(i + 5) ^ dw; SUBKEY_L(i + 5) = dw; |
| 576 | i += 8; | 588 | i += 8; |
| 577 | } while (i < max); | 589 | } while (i < max); |
| @@ -587,10 +599,10 @@ static void camellia_setup128(const unsigned char *key, u32 *subkey) | |||
| 587 | /** | 599 | /** |
| 588 | * k == kll || klr || krl || krr (|| is concatenation) | 600 | * k == kll || klr || krl || krr (|| is concatenation) |
| 589 | */ | 601 | */ |
| 590 | kll = get_unaligned_be32(key); | 602 | GETU32(kll, key ); |
| 591 | klr = get_unaligned_be32(key + 4); | 603 | GETU32(klr, key + 4); |
| 592 | krl = get_unaligned_be32(key + 8); | 604 | GETU32(krl, key + 8); |
| 593 | krr = get_unaligned_be32(key + 12); | 605 | GETU32(krr, key + 12); |
| 594 | 606 | ||
| 595 | /* generate KL dependent subkeys */ | 607 | /* generate KL dependent subkeys */ |
| 596 | /* kw1 */ | 608 | /* kw1 */ |
| @@ -695,14 +707,14 @@ static void camellia_setup256(const unsigned char *key, u32 *subkey) | |||
| 695 | * key = (kll || klr || krl || krr || krll || krlr || krrl || krrr) | 707 | * key = (kll || klr || krl || krr || krll || krlr || krrl || krrr) |
| 696 | * (|| is concatenation) | 708 | * (|| is concatenation) |
| 697 | */ | 709 | */ |
| 698 | kll = get_unaligned_be32(key); | 710 | GETU32(kll, key ); |
| 699 | klr = get_unaligned_be32(key + 4); | 711 | GETU32(klr, key + 4); |
| 700 | krl = get_unaligned_be32(key + 8); | 712 | GETU32(krl, key + 8); |
| 701 | krr = get_unaligned_be32(key + 12); | 713 | GETU32(krr, key + 12); |
| 702 | krll = get_unaligned_be32(key + 16); | 714 | GETU32(krll, key + 16); |
| 703 | krlr = get_unaligned_be32(key + 20); | 715 | GETU32(krlr, key + 20); |
| 704 | krrl = get_unaligned_be32(key + 24); | 716 | GETU32(krrl, key + 24); |
| 705 | krrr = get_unaligned_be32(key + 28); | 717 | GETU32(krrr, key + 28); |
| 706 | 718 | ||
| 707 | /* generate KL dependent subkeys */ | 719 | /* generate KL dependent subkeys */ |
| 708 | /* kw1 */ | 720 | /* kw1 */ |
| @@ -858,13 +870,13 @@ static void camellia_setup192(const unsigned char *key, u32 *subkey) | |||
| 858 | t0 &= ll; \ | 870 | t0 &= ll; \ |
| 859 | t2 |= rr; \ | 871 | t2 |= rr; \ |
| 860 | rl ^= t2; \ | 872 | rl ^= t2; \ |
| 861 | lr ^= rol32(t0, 1); \ | 873 | lr ^= ROL1(t0); \ |
| 862 | t3 = krl; \ | 874 | t3 = krl; \ |
| 863 | t1 = klr; \ | 875 | t1 = klr; \ |
| 864 | t3 &= rl; \ | 876 | t3 &= rl; \ |
| 865 | t1 |= lr; \ | 877 | t1 |= lr; \ |
| 866 | ll ^= t1; \ | 878 | ll ^= t1; \ |
| 867 | rr ^= rol32(t3, 1); \ | 879 | rr ^= ROL1(t3); \ |
| 868 | } while(0) | 880 | } while(0) |
| 869 | 881 | ||
| 870 | #define CAMELLIA_ROUNDSM(xl, xr, kl, kr, yl, yr, il, ir) \ | 882 | #define CAMELLIA_ROUNDSM(xl, xr, kl, kr, yl, yr, il, ir) \ |
| @@ -880,7 +892,7 @@ static void camellia_setup192(const unsigned char *key, u32 *subkey) | |||
| 880 | il ^= kl; \ | 892 | il ^= kl; \ |
| 881 | ir ^= il ^ kr; \ | 893 | ir ^= il ^ kr; \ |
| 882 | yl ^= ir; \ | 894 | yl ^= ir; \ |
| 883 | yr ^= ror32(il, 8) ^ ir; \ | 895 | yr ^= ROR8(il) ^ ir; \ |
| 884 | } while(0) | 896 | } while(0) |
| 885 | 897 | ||
| 886 | /* max = 24: 128bit encrypt, max = 32: 256bit encrypt */ | 898 | /* max = 24: 128bit encrypt, max = 32: 256bit encrypt */ |
diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig index ae8494944c45..11c8c19f0fb7 100644 --- a/drivers/ata/Kconfig +++ b/drivers/ata/Kconfig | |||
| @@ -448,8 +448,10 @@ config PATA_MARVELL | |||
| 448 | tristate "Marvell PATA support via legacy mode" | 448 | tristate "Marvell PATA support via legacy mode" |
| 449 | depends on PCI | 449 | depends on PCI |
| 450 | help | 450 | help |
| 451 | This option enables limited support for the Marvell 88SE6145 ATA | 451 | This option enables limited support for the Marvell 88SE61xx ATA |
| 452 | controller. | 452 | controllers. If you wish to use only the SATA ports then select |
| 453 | the AHCI driver alone. If you wish to the use the PATA port or | ||
| 454 | both SATA and PATA include this driver. | ||
| 453 | 455 | ||
| 454 | If unsure, say N. | 456 | If unsure, say N. |
| 455 | 457 | ||
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c index c729e6988bbb..2e1a7cb2ed5f 100644 --- a/drivers/ata/ahci.c +++ b/drivers/ata/ahci.c | |||
| @@ -420,7 +420,7 @@ static const struct ata_port_info ahci_port_info[] = { | |||
| 420 | /* board_ahci_mv */ | 420 | /* board_ahci_mv */ |
| 421 | { | 421 | { |
| 422 | AHCI_HFLAGS (AHCI_HFLAG_NO_NCQ | AHCI_HFLAG_NO_MSI | | 422 | AHCI_HFLAGS (AHCI_HFLAG_NO_NCQ | AHCI_HFLAG_NO_MSI | |
| 423 | AHCI_HFLAG_MV_PATA), | 423 | AHCI_HFLAG_MV_PATA | AHCI_HFLAG_NO_PMP), |
| 424 | .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | | 424 | .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | |
| 425 | ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA, | 425 | ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA, |
| 426 | .pio_mask = 0x1f, /* pio0-4 */ | 426 | .pio_mask = 0x1f, /* pio0-4 */ |
| @@ -487,7 +487,9 @@ static const struct pci_device_id ahci_pci_tbl[] = { | |||
| 487 | { PCI_VDEVICE(INTEL, 0x3a05), board_ahci }, /* ICH10 */ | 487 | { PCI_VDEVICE(INTEL, 0x3a05), board_ahci }, /* ICH10 */ |
| 488 | { PCI_VDEVICE(INTEL, 0x3a25), board_ahci }, /* ICH10 */ | 488 | { PCI_VDEVICE(INTEL, 0x3a25), board_ahci }, /* ICH10 */ |
| 489 | { PCI_VDEVICE(INTEL, 0x3b24), board_ahci }, /* PCH RAID */ | 489 | { PCI_VDEVICE(INTEL, 0x3b24), board_ahci }, /* PCH RAID */ |
| 490 | { PCI_VDEVICE(INTEL, 0x3b25), board_ahci }, /* PCH RAID */ | ||
| 490 | { PCI_VDEVICE(INTEL, 0x3b2b), board_ahci }, /* PCH RAID */ | 491 | { PCI_VDEVICE(INTEL, 0x3b2b), board_ahci }, /* PCH RAID */ |
| 492 | { PCI_VDEVICE(INTEL, 0x3b2c), board_ahci }, /* PCH RAID */ | ||
| 491 | 493 | ||
| 492 | /* JMicron 360/1/3/5/6, match class to avoid IDE function */ | 494 | /* JMicron 360/1/3/5/6, match class to avoid IDE function */ |
| 493 | { PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, | 495 | { PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, |
| @@ -610,6 +612,15 @@ module_param(ahci_em_messages, int, 0444); | |||
| 610 | MODULE_PARM_DESC(ahci_em_messages, | 612 | MODULE_PARM_DESC(ahci_em_messages, |
| 611 | "Set AHCI Enclosure Management Message type (0 = disabled, 1 = LED"); | 613 | "Set AHCI Enclosure Management Message type (0 = disabled, 1 = LED"); |
| 612 | 614 | ||
| 615 | #if defined(CONFIG_PATA_MARVELL) || defined(CONFIG_PATA_MARVELL_MODULE) | ||
| 616 | static int marvell_enable; | ||
| 617 | #else | ||
| 618 | static int marvell_enable = 1; | ||
| 619 | #endif | ||
| 620 | module_param(marvell_enable, int, 0644); | ||
| 621 | MODULE_PARM_DESC(marvell_enable, "Marvell SATA via AHCI (1 = enabled)"); | ||
| 622 | |||
| 623 | |||
| 613 | static inline int ahci_nr_ports(u32 cap) | 624 | static inline int ahci_nr_ports(u32 cap) |
| 614 | { | 625 | { |
| 615 | return (cap & 0x1f) + 1; | 626 | return (cap & 0x1f) + 1; |
| @@ -732,6 +743,8 @@ static void ahci_save_initial_config(struct pci_dev *pdev, | |||
| 732 | "MV_AHCI HACK: port_map %x -> %x\n", | 743 | "MV_AHCI HACK: port_map %x -> %x\n", |
| 733 | port_map, | 744 | port_map, |
| 734 | port_map & mv); | 745 | port_map & mv); |
| 746 | dev_printk(KERN_ERR, &pdev->dev, | ||
| 747 | "Disabling your PATA port. Use the boot option 'ahci.marvell_enable=0' to avoid this.\n"); | ||
| 735 | 748 | ||
| 736 | port_map &= mv; | 749 | port_map &= mv; |
| 737 | } | 750 | } |
| @@ -2533,6 +2546,12 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
| 2533 | if (!printed_version++) | 2546 | if (!printed_version++) |
| 2534 | dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n"); | 2547 | dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n"); |
| 2535 | 2548 | ||
| 2549 | /* The AHCI driver can only drive the SATA ports, the PATA driver | ||
| 2550 | can drive them all so if both drivers are selected make sure | ||
| 2551 | AHCI stays out of the way */ | ||
| 2552 | if (pdev->vendor == PCI_VENDOR_ID_MARVELL && !marvell_enable) | ||
| 2553 | return -ENODEV; | ||
| 2554 | |||
| 2536 | /* acquire resources */ | 2555 | /* acquire resources */ |
| 2537 | rc = pcim_enable_device(pdev); | 2556 | rc = pcim_enable_device(pdev); |
| 2538 | if (rc) | 2557 | if (rc) |
diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c index 304fdc6f1dc2..2a4c516894f0 100644 --- a/drivers/ata/libata-sff.c +++ b/drivers/ata/libata-sff.c | |||
| @@ -1315,11 +1315,6 @@ fsm_start: | |||
| 1315 | break; | 1315 | break; |
| 1316 | 1316 | ||
| 1317 | case HSM_ST_ERR: | 1317 | case HSM_ST_ERR: |
| 1318 | /* make sure qc->err_mask is available to | ||
| 1319 | * know what's wrong and recover | ||
| 1320 | */ | ||
| 1321 | WARN_ON(!(qc->err_mask & (AC_ERR_DEV | AC_ERR_HSM))); | ||
| 1322 | |||
| 1323 | ap->hsm_task_state = HSM_ST_IDLE; | 1318 | ap->hsm_task_state = HSM_ST_IDLE; |
| 1324 | 1319 | ||
| 1325 | /* complete taskfile transaction */ | 1320 | /* complete taskfile transaction */ |
diff --git a/drivers/ata/pata_marvell.c b/drivers/ata/pata_marvell.c index 24a011b25024..0d87eec84966 100644 --- a/drivers/ata/pata_marvell.c +++ b/drivers/ata/pata_marvell.c | |||
| @@ -20,29 +20,30 @@ | |||
| 20 | #include <linux/ata.h> | 20 | #include <linux/ata.h> |
| 21 | 21 | ||
| 22 | #define DRV_NAME "pata_marvell" | 22 | #define DRV_NAME "pata_marvell" |
| 23 | #define DRV_VERSION "0.1.4" | 23 | #define DRV_VERSION "0.1.6" |
| 24 | 24 | ||
| 25 | /** | 25 | /** |
| 26 | * marvell_pre_reset - check for 40/80 pin | 26 | * marvell_pata_active - check if PATA is active |
| 27 | * @link: link | 27 | * @pdev: PCI device |
| 28 | * @deadline: deadline jiffies for the operation | ||
| 29 | * | 28 | * |
| 30 | * Perform the PATA port setup we need. | 29 | * Returns 1 if the PATA port may be active. We know how to check this |
| 30 | * for the 6145 but not the other devices | ||
| 31 | */ | 31 | */ |
| 32 | 32 | ||
| 33 | static int marvell_pre_reset(struct ata_link *link, unsigned long deadline) | 33 | static int marvell_pata_active(struct pci_dev *pdev) |
| 34 | { | 34 | { |
| 35 | struct ata_port *ap = link->ap; | 35 | int i; |
| 36 | struct pci_dev *pdev = to_pci_dev(ap->host->dev); | ||
| 37 | u32 devices; | 36 | u32 devices; |
| 38 | void __iomem *barp; | 37 | void __iomem *barp; |
| 39 | int i; | ||
| 40 | 38 | ||
| 41 | /* Check if our port is enabled */ | 39 | /* We don't yet know how to do this for other devices */ |
| 40 | if (pdev->device != 0x6145) | ||
| 41 | return 1; | ||
| 42 | 42 | ||
| 43 | barp = pci_iomap(pdev, 5, 0x10); | 43 | barp = pci_iomap(pdev, 5, 0x10); |
| 44 | if (barp == NULL) | 44 | if (barp == NULL) |
| 45 | return -ENOMEM; | 45 | return -ENOMEM; |
| 46 | |||
| 46 | printk("BAR5:"); | 47 | printk("BAR5:"); |
| 47 | for(i = 0; i <= 0x0F; i++) | 48 | for(i = 0; i <= 0x0F; i++) |
| 48 | printk("%02X:%02X ", i, ioread8(barp + i)); | 49 | printk("%02X:%02X ", i, ioread8(barp + i)); |
| @@ -51,9 +52,27 @@ static int marvell_pre_reset(struct ata_link *link, unsigned long deadline) | |||
| 51 | devices = ioread32(barp + 0x0C); | 52 | devices = ioread32(barp + 0x0C); |
| 52 | pci_iounmap(pdev, barp); | 53 | pci_iounmap(pdev, barp); |
| 53 | 54 | ||
| 54 | if ((pdev->device == 0x6145) && (ap->port_no == 0) && | 55 | if (devices & 0x10) |
| 55 | (!(devices & 0x10))) /* PATA enable ? */ | 56 | return 1; |
| 56 | return -ENOENT; | 57 | return 0; |
| 58 | } | ||
| 59 | |||
| 60 | /** | ||
| 61 | * marvell_pre_reset - check for 40/80 pin | ||
| 62 | * @link: link | ||
| 63 | * @deadline: deadline jiffies for the operation | ||
| 64 | * | ||
| 65 | * Perform the PATA port setup we need. | ||
| 66 | */ | ||
| 67 | |||
| 68 | static int marvell_pre_reset(struct ata_link *link, unsigned long deadline) | ||
| 69 | { | ||
| 70 | struct ata_port *ap = link->ap; | ||
| 71 | struct pci_dev *pdev = to_pci_dev(ap->host->dev); | ||
| 72 | |||
| 73 | if (pdev->device == 0x6145 && ap->port_no == 0 && | ||
| 74 | !marvell_pata_active(pdev)) /* PATA enable ? */ | ||
| 75 | return -ENOENT; | ||
| 57 | 76 | ||
| 58 | return ata_sff_prereset(link, deadline); | 77 | return ata_sff_prereset(link, deadline); |
| 59 | } | 78 | } |
| @@ -128,6 +147,12 @@ static int marvell_init_one (struct pci_dev *pdev, const struct pci_device_id *i | |||
| 128 | if (pdev->device == 0x6101) | 147 | if (pdev->device == 0x6101) |
| 129 | ppi[1] = &ata_dummy_port_info; | 148 | ppi[1] = &ata_dummy_port_info; |
| 130 | 149 | ||
| 150 | #if defined(CONFIG_AHCI) || defined(CONFIG_AHCI_MODULE) | ||
| 151 | if (!marvell_pata_active(pdev)) { | ||
| 152 | printk(KERN_INFO DRV_NAME ": PATA port not active, deferring to AHCI driver.\n"); | ||
| 153 | return -ENODEV; | ||
| 154 | } | ||
| 155 | #endif | ||
| 131 | return ata_pci_sff_init_one(pdev, ppi, &marvell_sht, NULL); | 156 | return ata_pci_sff_init_one(pdev, ppi, &marvell_sht, NULL); |
| 132 | } | 157 | } |
| 133 | 158 | ||
diff --git a/drivers/ata/pata_sil680.c b/drivers/ata/pata_sil680.c index 720b8645f58a..e970b227fbce 100644 --- a/drivers/ata/pata_sil680.c +++ b/drivers/ata/pata_sil680.c | |||
| @@ -322,9 +322,6 @@ static int __devinit sil680_init_one(struct pci_dev *pdev, | |||
| 322 | /* Try to acquire MMIO resources and fallback to PIO if | 322 | /* Try to acquire MMIO resources and fallback to PIO if |
| 323 | * that fails | 323 | * that fails |
| 324 | */ | 324 | */ |
| 325 | rc = pcim_enable_device(pdev); | ||
| 326 | if (rc) | ||
| 327 | return rc; | ||
| 328 | rc = pcim_iomap_regions(pdev, 1 << SIL680_MMIO_BAR, DRV_NAME); | 325 | rc = pcim_iomap_regions(pdev, 1 << SIL680_MMIO_BAR, DRV_NAME); |
| 329 | if (rc) | 326 | if (rc) |
| 330 | goto use_ioports; | 327 | goto use_ioports; |
diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c index 13c1d2af18ac..c815f8ecf6e6 100644 --- a/drivers/ata/sata_mv.c +++ b/drivers/ata/sata_mv.c | |||
| @@ -667,7 +667,8 @@ static const struct pci_device_id mv_pci_tbl[] = { | |||
| 667 | { PCI_VDEVICE(MARVELL, 0x5041), chip_504x }, | 667 | { PCI_VDEVICE(MARVELL, 0x5041), chip_504x }, |
| 668 | { PCI_VDEVICE(MARVELL, 0x5080), chip_5080 }, | 668 | { PCI_VDEVICE(MARVELL, 0x5080), chip_5080 }, |
| 669 | { PCI_VDEVICE(MARVELL, 0x5081), chip_508x }, | 669 | { PCI_VDEVICE(MARVELL, 0x5081), chip_508x }, |
| 670 | /* RocketRAID 1740/174x have different identifiers */ | 670 | /* RocketRAID 1720/174x have different identifiers */ |
| 671 | { PCI_VDEVICE(TTI, 0x1720), chip_6042 }, | ||
| 671 | { PCI_VDEVICE(TTI, 0x1740), chip_508x }, | 672 | { PCI_VDEVICE(TTI, 0x1740), chip_508x }, |
| 672 | { PCI_VDEVICE(TTI, 0x1742), chip_508x }, | 673 | { PCI_VDEVICE(TTI, 0x1742), chip_508x }, |
| 673 | 674 | ||
diff --git a/drivers/ata/sata_nv.c b/drivers/ata/sata_nv.c index 858f70610eda..1e1f3f3757ae 100644 --- a/drivers/ata/sata_nv.c +++ b/drivers/ata/sata_nv.c | |||
| @@ -309,8 +309,6 @@ static void nv_nf2_freeze(struct ata_port *ap); | |||
| 309 | static void nv_nf2_thaw(struct ata_port *ap); | 309 | static void nv_nf2_thaw(struct ata_port *ap); |
| 310 | static void nv_ck804_freeze(struct ata_port *ap); | 310 | static void nv_ck804_freeze(struct ata_port *ap); |
| 311 | static void nv_ck804_thaw(struct ata_port *ap); | 311 | static void nv_ck804_thaw(struct ata_port *ap); |
| 312 | static int nv_hardreset(struct ata_link *link, unsigned int *class, | ||
| 313 | unsigned long deadline); | ||
| 314 | static int nv_adma_slave_config(struct scsi_device *sdev); | 312 | static int nv_adma_slave_config(struct scsi_device *sdev); |
| 315 | static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc); | 313 | static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc); |
| 316 | static void nv_adma_qc_prep(struct ata_queued_cmd *qc); | 314 | static void nv_adma_qc_prep(struct ata_queued_cmd *qc); |
| @@ -407,7 +405,7 @@ static struct scsi_host_template nv_swncq_sht = { | |||
| 407 | 405 | ||
| 408 | static struct ata_port_operations nv_generic_ops = { | 406 | static struct ata_port_operations nv_generic_ops = { |
| 409 | .inherits = &ata_bmdma_port_ops, | 407 | .inherits = &ata_bmdma_port_ops, |
| 410 | .hardreset = nv_hardreset, | 408 | .hardreset = ATA_OP_NULL, |
| 411 | .scr_read = nv_scr_read, | 409 | .scr_read = nv_scr_read, |
| 412 | .scr_write = nv_scr_write, | 410 | .scr_write = nv_scr_write, |
| 413 | }; | 411 | }; |
| @@ -1588,21 +1586,6 @@ static void nv_mcp55_thaw(struct ata_port *ap) | |||
| 1588 | ata_sff_thaw(ap); | 1586 | ata_sff_thaw(ap); |
| 1589 | } | 1587 | } |
| 1590 | 1588 | ||
| 1591 | static int nv_hardreset(struct ata_link *link, unsigned int *class, | ||
| 1592 | unsigned long deadline) | ||
| 1593 | { | ||
| 1594 | int rc; | ||
| 1595 | |||
| 1596 | /* SATA hardreset fails to retrieve proper device signature on | ||
| 1597 | * some controllers. Request follow up SRST. For more info, | ||
| 1598 | * see http://bugzilla.kernel.org/show_bug.cgi?id=3352 | ||
| 1599 | */ | ||
| 1600 | rc = sata_sff_hardreset(link, class, deadline); | ||
| 1601 | if (rc) | ||
| 1602 | return rc; | ||
| 1603 | return -EAGAIN; | ||
| 1604 | } | ||
| 1605 | |||
| 1606 | static void nv_adma_error_handler(struct ata_port *ap) | 1589 | static void nv_adma_error_handler(struct ata_port *ap) |
| 1607 | { | 1590 | { |
| 1608 | struct nv_adma_port_priv *pp = ap->private_data; | 1591 | struct nv_adma_port_priv *pp = ap->private_data; |
diff --git a/drivers/clocksource/acpi_pm.c b/drivers/clocksource/acpi_pm.c index 5ca1d80de182..4eee533f3f4a 100644 --- a/drivers/clocksource/acpi_pm.c +++ b/drivers/clocksource/acpi_pm.c | |||
| @@ -21,6 +21,7 @@ | |||
| 21 | #include <linux/errno.h> | 21 | #include <linux/errno.h> |
| 22 | #include <linux/init.h> | 22 | #include <linux/init.h> |
| 23 | #include <linux/pci.h> | 23 | #include <linux/pci.h> |
| 24 | #include <linux/delay.h> | ||
| 24 | #include <asm/io.h> | 25 | #include <asm/io.h> |
| 25 | 26 | ||
| 26 | /* | 27 | /* |
| @@ -151,13 +152,13 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_LE, | |||
| 151 | */ | 152 | */ |
| 152 | static int verify_pmtmr_rate(void) | 153 | static int verify_pmtmr_rate(void) |
| 153 | { | 154 | { |
| 154 | u32 value1, value2; | 155 | cycle_t value1, value2; |
| 155 | unsigned long count, delta; | 156 | unsigned long count, delta; |
| 156 | 157 | ||
| 157 | mach_prepare_counter(); | 158 | mach_prepare_counter(); |
| 158 | value1 = read_pmtmr(); | 159 | value1 = clocksource_acpi_pm.read(); |
| 159 | mach_countup(&count); | 160 | mach_countup(&count); |
| 160 | value2 = read_pmtmr(); | 161 | value2 = clocksource_acpi_pm.read(); |
| 161 | delta = (value2 - value1) & ACPI_PM_MASK; | 162 | delta = (value2 - value1) & ACPI_PM_MASK; |
| 162 | 163 | ||
| 163 | /* Check that the PMTMR delta is within 5% of what we expect */ | 164 | /* Check that the PMTMR delta is within 5% of what we expect */ |
| @@ -175,10 +176,13 @@ static int verify_pmtmr_rate(void) | |||
| 175 | #define verify_pmtmr_rate() (0) | 176 | #define verify_pmtmr_rate() (0) |
| 176 | #endif | 177 | #endif |
| 177 | 178 | ||
| 179 | /* Number of monotonicity checks to perform during initialization */ | ||
| 180 | #define ACPI_PM_MONOTONICITY_CHECKS 10 | ||
| 181 | |||
| 178 | static int __init init_acpi_pm_clocksource(void) | 182 | static int __init init_acpi_pm_clocksource(void) |
| 179 | { | 183 | { |
| 180 | u32 value1, value2; | 184 | cycle_t value1, value2; |
| 181 | unsigned int i; | 185 | unsigned int i, j, good = 0; |
| 182 | 186 | ||
| 183 | if (!pmtmr_ioport) | 187 | if (!pmtmr_ioport) |
| 184 | return -ENODEV; | 188 | return -ENODEV; |
| @@ -187,24 +191,32 @@ static int __init init_acpi_pm_clocksource(void) | |||
| 187 | clocksource_acpi_pm.shift); | 191 | clocksource_acpi_pm.shift); |
| 188 | 192 | ||
| 189 | /* "verify" this timing source: */ | 193 | /* "verify" this timing source: */ |
| 190 | value1 = read_pmtmr(); | 194 | for (j = 0; j < ACPI_PM_MONOTONICITY_CHECKS; j++) { |
| 191 | for (i = 0; i < 10000; i++) { | 195 | value1 = clocksource_acpi_pm.read(); |
| 192 | value2 = read_pmtmr(); | 196 | for (i = 0; i < 10000; i++) { |
| 193 | if (value2 == value1) | 197 | value2 = clocksource_acpi_pm.read(); |
| 194 | continue; | 198 | if (value2 == value1) |
| 195 | if (value2 > value1) | 199 | continue; |
| 196 | goto pm_good; | 200 | if (value2 > value1) |
| 197 | if ((value2 < value1) && ((value2) < 0xFFF)) | 201 | good++; |
| 198 | goto pm_good; | 202 | break; |
| 199 | printk(KERN_INFO "PM-Timer had inconsistent results:" | 203 | if ((value2 < value1) && ((value2) < 0xFFF)) |
| 200 | " 0x%#x, 0x%#x - aborting.\n", value1, value2); | 204 | good++; |
| 201 | return -EINVAL; | 205 | break; |
| 206 | printk(KERN_INFO "PM-Timer had inconsistent results:" | ||
| 207 | " 0x%#llx, 0x%#llx - aborting.\n", | ||
| 208 | value1, value2); | ||
| 209 | return -EINVAL; | ||
| 210 | } | ||
| 211 | udelay(300 * i); | ||
| 212 | } | ||
| 213 | |||
| 214 | if (good != ACPI_PM_MONOTONICITY_CHECKS) { | ||
| 215 | printk(KERN_INFO "PM-Timer failed consistency check " | ||
| 216 | " (0x%#llx) - aborting.\n", value1); | ||
| 217 | return -ENODEV; | ||
| 202 | } | 218 | } |
| 203 | printk(KERN_INFO "PM-Timer had no reasonable result:" | ||
| 204 | " 0x%#x - aborting.\n", value1); | ||
| 205 | return -ENODEV; | ||
| 206 | 219 | ||
| 207 | pm_good: | ||
| 208 | if (verify_pmtmr_rate() != 0) | 220 | if (verify_pmtmr_rate() != 0) |
| 209 | return -ENODEV; | 221 | return -ENODEV; |
| 210 | 222 | ||
diff --git a/drivers/input/mouse/bcm5974.c b/drivers/input/mouse/bcm5974.c index 2ec921bf3c60..18f4d7f6ce6d 100644 --- a/drivers/input/mouse/bcm5974.c +++ b/drivers/input/mouse/bcm5974.c | |||
| @@ -63,7 +63,7 @@ | |||
| 63 | } | 63 | } |
| 64 | 64 | ||
| 65 | /* table of devices that work with this driver */ | 65 | /* table of devices that work with this driver */ |
| 66 | static const struct usb_device_id bcm5974_table [] = { | 66 | static const struct usb_device_id bcm5974_table[] = { |
| 67 | /* MacbookAir1.1 */ | 67 | /* MacbookAir1.1 */ |
| 68 | BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING_ANSI), | 68 | BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING_ANSI), |
| 69 | BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING_ISO), | 69 | BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING_ISO), |
| @@ -105,7 +105,7 @@ struct tp_header { | |||
| 105 | 105 | ||
| 106 | /* trackpad finger structure */ | 106 | /* trackpad finger structure */ |
| 107 | struct tp_finger { | 107 | struct tp_finger { |
| 108 | __le16 origin; /* left/right origin? */ | 108 | __le16 origin; /* zero when switching track finger */ |
| 109 | __le16 abs_x; /* absolute x coodinate */ | 109 | __le16 abs_x; /* absolute x coodinate */ |
| 110 | __le16 abs_y; /* absolute y coodinate */ | 110 | __le16 abs_y; /* absolute y coodinate */ |
| 111 | __le16 rel_x; /* relative x coodinate */ | 111 | __le16 rel_x; /* relative x coodinate */ |
| @@ -159,6 +159,7 @@ struct bcm5974 { | |||
| 159 | struct bt_data *bt_data; /* button transferred data */ | 159 | struct bt_data *bt_data; /* button transferred data */ |
| 160 | struct urb *tp_urb; /* trackpad usb request block */ | 160 | struct urb *tp_urb; /* trackpad usb request block */ |
| 161 | struct tp_data *tp_data; /* trackpad transferred data */ | 161 | struct tp_data *tp_data; /* trackpad transferred data */ |
| 162 | int fingers; /* number of fingers on trackpad */ | ||
| 162 | }; | 163 | }; |
| 163 | 164 | ||
| 164 | /* logical dimensions */ | 165 | /* logical dimensions */ |
| @@ -172,6 +173,10 @@ struct bcm5974 { | |||
| 172 | #define SN_WIDTH 100 /* width signal-to-noise ratio */ | 173 | #define SN_WIDTH 100 /* width signal-to-noise ratio */ |
| 173 | #define SN_COORD 250 /* coordinate signal-to-noise ratio */ | 174 | #define SN_COORD 250 /* coordinate signal-to-noise ratio */ |
| 174 | 175 | ||
| 176 | /* pressure thresholds */ | ||
| 177 | #define PRESSURE_LOW (2 * DIM_PRESSURE / SN_PRESSURE) | ||
| 178 | #define PRESSURE_HIGH (3 * PRESSURE_LOW) | ||
| 179 | |||
| 175 | /* device constants */ | 180 | /* device constants */ |
| 176 | static const struct bcm5974_config bcm5974_config_table[] = { | 181 | static const struct bcm5974_config bcm5974_config_table[] = { |
| 177 | { | 182 | { |
| @@ -248,6 +253,7 @@ static void setup_events_to_report(struct input_dev *input_dev, | |||
| 248 | 0, cfg->y.dim, cfg->y.fuzz, 0); | 253 | 0, cfg->y.dim, cfg->y.fuzz, 0); |
| 249 | 254 | ||
| 250 | __set_bit(EV_KEY, input_dev->evbit); | 255 | __set_bit(EV_KEY, input_dev->evbit); |
| 256 | __set_bit(BTN_TOUCH, input_dev->keybit); | ||
| 251 | __set_bit(BTN_TOOL_FINGER, input_dev->keybit); | 257 | __set_bit(BTN_TOOL_FINGER, input_dev->keybit); |
| 252 | __set_bit(BTN_TOOL_DOUBLETAP, input_dev->keybit); | 258 | __set_bit(BTN_TOOL_DOUBLETAP, input_dev->keybit); |
| 253 | __set_bit(BTN_TOOL_TRIPLETAP, input_dev->keybit); | 259 | __set_bit(BTN_TOOL_TRIPLETAP, input_dev->keybit); |
| @@ -273,32 +279,66 @@ static int report_tp_state(struct bcm5974 *dev, int size) | |||
| 273 | const struct tp_finger *f = dev->tp_data->finger; | 279 | const struct tp_finger *f = dev->tp_data->finger; |
| 274 | struct input_dev *input = dev->input; | 280 | struct input_dev *input = dev->input; |
| 275 | const int fingers = (size - 26) / 28; | 281 | const int fingers = (size - 26) / 28; |
| 276 | int p = 0, w, x, y, n = 0; | 282 | int raw_p, raw_w, raw_x, raw_y; |
| 283 | int ptest = 0, origin = 0, nmin = 0, nmax = 0; | ||
| 284 | int abs_p = 0, abs_w = 0, abs_x = 0, abs_y = 0; | ||
| 277 | 285 | ||
| 278 | if (size < 26 || (size - 26) % 28 != 0) | 286 | if (size < 26 || (size - 26) % 28 != 0) |
| 279 | return -EIO; | 287 | return -EIO; |
| 280 | 288 | ||
| 289 | /* always track the first finger; when detached, start over */ | ||
| 281 | if (fingers) { | 290 | if (fingers) { |
| 282 | p = raw2int(f->force_major); | 291 | raw_p = raw2int(f->force_major); |
| 283 | w = raw2int(f->size_major); | 292 | raw_w = raw2int(f->size_major); |
| 284 | x = raw2int(f->abs_x); | 293 | raw_x = raw2int(f->abs_x); |
| 285 | y = raw2int(f->abs_y); | 294 | raw_y = raw2int(f->abs_y); |
| 286 | n = p > 0 ? fingers : 0; | ||
| 287 | 295 | ||
| 288 | dprintk(9, | 296 | dprintk(9, |
| 289 | "bcm5974: p: %+05d w: %+05d x: %+05d y: %+05d n: %d\n", | 297 | "bcm5974: raw: p: %+05d w: %+05d x: %+05d y: %+05d\n", |
| 290 | p, w, x, y, n); | 298 | raw_p, raw_w, raw_x, raw_y); |
| 299 | |||
| 300 | ptest = int2bound(&c->p, raw_p); | ||
| 301 | origin = raw2int(f->origin); | ||
| 302 | } | ||
| 291 | 303 | ||
| 292 | input_report_abs(input, ABS_TOOL_WIDTH, int2bound(&c->w, w)); | 304 | /* while tracking finger still valid, count all fingers */ |
| 293 | input_report_abs(input, ABS_X, int2bound(&c->x, x - c->x.devmin)); | 305 | if (ptest > PRESSURE_LOW && origin) { |
| 294 | input_report_abs(input, ABS_Y, int2bound(&c->y, c->y.devmax - y)); | 306 | abs_p = ptest; |
| 307 | abs_w = int2bound(&c->w, raw_w); | ||
| 308 | abs_x = int2bound(&c->x, raw_x - c->x.devmin); | ||
| 309 | abs_y = int2bound(&c->y, c->y.devmax - raw_y); | ||
| 310 | for (; f != dev->tp_data->finger + fingers; f++) { | ||
| 311 | ptest = int2bound(&c->p, raw2int(f->force_major)); | ||
| 312 | if (ptest > PRESSURE_LOW) | ||
| 313 | nmax++; | ||
| 314 | if (ptest > PRESSURE_HIGH) | ||
| 315 | nmin++; | ||
| 316 | } | ||
| 295 | } | 317 | } |
| 296 | 318 | ||
| 297 | input_report_abs(input, ABS_PRESSURE, int2bound(&c->p, p)); | 319 | if (dev->fingers < nmin) |
| 320 | dev->fingers = nmin; | ||
| 321 | if (dev->fingers > nmax) | ||
| 322 | dev->fingers = nmax; | ||
| 323 | |||
| 324 | input_report_key(input, BTN_TOUCH, dev->fingers > 0); | ||
| 325 | input_report_key(input, BTN_TOOL_FINGER, dev->fingers == 1); | ||
| 326 | input_report_key(input, BTN_TOOL_DOUBLETAP, dev->fingers == 2); | ||
| 327 | input_report_key(input, BTN_TOOL_TRIPLETAP, dev->fingers > 2); | ||
| 298 | 328 | ||
| 299 | input_report_key(input, BTN_TOOL_FINGER, n == 1); | 329 | input_report_abs(input, ABS_PRESSURE, abs_p); |
| 300 | input_report_key(input, BTN_TOOL_DOUBLETAP, n == 2); | 330 | input_report_abs(input, ABS_TOOL_WIDTH, abs_w); |
| 301 | input_report_key(input, BTN_TOOL_TRIPLETAP, n > 2); | 331 | |
| 332 | if (abs_p) { | ||
| 333 | input_report_abs(input, ABS_X, abs_x); | ||
| 334 | input_report_abs(input, ABS_Y, abs_y); | ||
| 335 | |||
| 336 | dprintk(8, | ||
| 337 | "bcm5974: abs: p: %+05d w: %+05d x: %+05d y: %+05d " | ||
| 338 | "nmin: %d nmax: %d n: %d\n", | ||
| 339 | abs_p, abs_w, abs_x, abs_y, nmin, nmax, dev->fingers); | ||
| 340 | |||
| 341 | } | ||
| 302 | 342 | ||
| 303 | input_sync(input); | 343 | input_sync(input); |
| 304 | 344 | ||
diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h index 3282b741e246..5aafe24984c5 100644 --- a/drivers/input/serio/i8042-x86ia64io.h +++ b/drivers/input/serio/i8042-x86ia64io.h | |||
| @@ -305,7 +305,7 @@ static struct dmi_system_id __initdata i8042_dmi_nomux_table[] = { | |||
| 305 | .ident = "Lenovo 3000 n100", | 305 | .ident = "Lenovo 3000 n100", |
| 306 | .matches = { | 306 | .matches = { |
| 307 | DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), | 307 | DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), |
| 308 | DMI_MATCH(DMI_PRODUCT_VERSION, "3000 N100"), | 308 | DMI_MATCH(DMI_PRODUCT_NAME, "076804U"), |
| 309 | }, | 309 | }, |
| 310 | }, | 310 | }, |
| 311 | { | 311 | { |
diff --git a/drivers/s390/cio/chp.c b/drivers/s390/cio/chp.c index db00b0591733..f1216cf6fa8f 100644 --- a/drivers/s390/cio/chp.c +++ b/drivers/s390/cio/chp.c | |||
| @@ -423,7 +423,7 @@ int chp_new(struct chp_id chpid) | |||
| 423 | ret = sysfs_create_group(&chp->dev.kobj, &chp_attr_group); | 423 | ret = sysfs_create_group(&chp->dev.kobj, &chp_attr_group); |
| 424 | if (ret) { | 424 | if (ret) { |
| 425 | device_unregister(&chp->dev); | 425 | device_unregister(&chp->dev); |
| 426 | goto out_free; | 426 | goto out; |
| 427 | } | 427 | } |
| 428 | mutex_lock(&channel_subsystems[chpid.cssid]->mutex); | 428 | mutex_lock(&channel_subsystems[chpid.cssid]->mutex); |
| 429 | if (channel_subsystems[chpid.cssid]->cm_enabled) { | 429 | if (channel_subsystems[chpid.cssid]->cm_enabled) { |
| @@ -432,14 +432,15 @@ int chp_new(struct chp_id chpid) | |||
| 432 | sysfs_remove_group(&chp->dev.kobj, &chp_attr_group); | 432 | sysfs_remove_group(&chp->dev.kobj, &chp_attr_group); |
| 433 | device_unregister(&chp->dev); | 433 | device_unregister(&chp->dev); |
| 434 | mutex_unlock(&channel_subsystems[chpid.cssid]->mutex); | 434 | mutex_unlock(&channel_subsystems[chpid.cssid]->mutex); |
| 435 | goto out_free; | 435 | goto out; |
| 436 | } | 436 | } |
| 437 | } | 437 | } |
| 438 | channel_subsystems[chpid.cssid]->chps[chpid.id] = chp; | 438 | channel_subsystems[chpid.cssid]->chps[chpid.id] = chp; |
| 439 | mutex_unlock(&channel_subsystems[chpid.cssid]->mutex); | 439 | mutex_unlock(&channel_subsystems[chpid.cssid]->mutex); |
| 440 | return ret; | 440 | goto out; |
| 441 | out_free: | 441 | out_free: |
| 442 | kfree(chp); | 442 | kfree(chp); |
| 443 | out: | ||
| 443 | return ret; | 444 | return ret; |
| 444 | } | 445 | } |
| 445 | 446 | ||
diff --git a/drivers/s390/cio/cio.c b/drivers/s390/cio/cio.c index 33bff8fec7d1..5954b905e3ca 100644 --- a/drivers/s390/cio/cio.c +++ b/drivers/s390/cio/cio.c | |||
| @@ -208,8 +208,10 @@ cio_start_key (struct subchannel *sch, /* subchannel structure */ | |||
| 208 | case 1: /* status pending */ | 208 | case 1: /* status pending */ |
| 209 | case 2: /* busy */ | 209 | case 2: /* busy */ |
| 210 | return -EBUSY; | 210 | return -EBUSY; |
| 211 | default: /* device/path not operational */ | 211 | case 3: /* device/path not operational */ |
| 212 | return cio_start_handle_notoper(sch, lpm); | 212 | return cio_start_handle_notoper(sch, lpm); |
| 213 | default: | ||
| 214 | return ccode; | ||
| 213 | } | 215 | } |
| 214 | } | 216 | } |
| 215 | 217 | ||
diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c index 51489eff6b0b..1261e1a9e8cd 100644 --- a/drivers/s390/cio/css.c +++ b/drivers/s390/cio/css.c | |||
| @@ -633,6 +633,11 @@ channel_subsystem_release(struct device *dev) | |||
| 633 | 633 | ||
| 634 | css = to_css(dev); | 634 | css = to_css(dev); |
| 635 | mutex_destroy(&css->mutex); | 635 | mutex_destroy(&css->mutex); |
| 636 | if (css->pseudo_subchannel) { | ||
| 637 | /* Implies that it has been generated but never registered. */ | ||
| 638 | css_subchannel_release(&css->pseudo_subchannel->dev); | ||
| 639 | css->pseudo_subchannel = NULL; | ||
| 640 | } | ||
| 636 | kfree(css); | 641 | kfree(css); |
| 637 | } | 642 | } |
| 638 | 643 | ||
| @@ -785,11 +790,15 @@ init_channel_subsystem (void) | |||
| 785 | } | 790 | } |
| 786 | channel_subsystems[i] = css; | 791 | channel_subsystems[i] = css; |
| 787 | ret = setup_css(i); | 792 | ret = setup_css(i); |
| 788 | if (ret) | 793 | if (ret) { |
| 789 | goto out_free; | 794 | kfree(channel_subsystems[i]); |
| 795 | goto out_unregister; | ||
| 796 | } | ||
| 790 | ret = device_register(&css->device); | 797 | ret = device_register(&css->device); |
| 791 | if (ret) | 798 | if (ret) { |
| 792 | goto out_free_all; | 799 | put_device(&css->device); |
| 800 | goto out_unregister; | ||
| 801 | } | ||
| 793 | if (css_chsc_characteristics.secm) { | 802 | if (css_chsc_characteristics.secm) { |
| 794 | ret = device_create_file(&css->device, | 803 | ret = device_create_file(&css->device, |
| 795 | &dev_attr_cm_enable); | 804 | &dev_attr_cm_enable); |
| @@ -802,7 +811,7 @@ init_channel_subsystem (void) | |||
| 802 | } | 811 | } |
| 803 | ret = register_reboot_notifier(&css_reboot_notifier); | 812 | ret = register_reboot_notifier(&css_reboot_notifier); |
| 804 | if (ret) | 813 | if (ret) |
| 805 | goto out_pseudo; | 814 | goto out_unregister; |
| 806 | css_init_done = 1; | 815 | css_init_done = 1; |
| 807 | 816 | ||
| 808 | /* Enable default isc for I/O subchannels. */ | 817 | /* Enable default isc for I/O subchannels. */ |
| @@ -810,18 +819,12 @@ init_channel_subsystem (void) | |||
| 810 | 819 | ||
| 811 | for_each_subchannel(__init_channel_subsystem, NULL); | 820 | for_each_subchannel(__init_channel_subsystem, NULL); |
| 812 | return 0; | 821 | return 0; |
| 813 | out_pseudo: | ||
| 814 | device_unregister(&channel_subsystems[i]->pseudo_subchannel->dev); | ||
| 815 | out_file: | 822 | out_file: |
| 816 | device_remove_file(&channel_subsystems[i]->device, | 823 | if (css_chsc_characteristics.secm) |
| 817 | &dev_attr_cm_enable); | 824 | device_remove_file(&channel_subsystems[i]->device, |
| 825 | &dev_attr_cm_enable); | ||
| 818 | out_device: | 826 | out_device: |
| 819 | device_unregister(&channel_subsystems[i]->device); | 827 | device_unregister(&channel_subsystems[i]->device); |
| 820 | out_free_all: | ||
| 821 | kfree(channel_subsystems[i]->pseudo_subchannel->lock); | ||
| 822 | kfree(channel_subsystems[i]->pseudo_subchannel); | ||
| 823 | out_free: | ||
| 824 | kfree(channel_subsystems[i]); | ||
| 825 | out_unregister: | 828 | out_unregister: |
| 826 | while (i > 0) { | 829 | while (i > 0) { |
| 827 | struct channel_subsystem *css; | 830 | struct channel_subsystem *css; |
| @@ -829,6 +832,7 @@ out_unregister: | |||
| 829 | i--; | 832 | i--; |
| 830 | css = channel_subsystems[i]; | 833 | css = channel_subsystems[i]; |
| 831 | device_unregister(&css->pseudo_subchannel->dev); | 834 | device_unregister(&css->pseudo_subchannel->dev); |
| 835 | css->pseudo_subchannel = NULL; | ||
| 832 | if (css_chsc_characteristics.secm) | 836 | if (css_chsc_characteristics.secm) |
| 833 | device_remove_file(&css->device, | 837 | device_remove_file(&css->device, |
| 834 | &dev_attr_cm_enable); | 838 | &dev_attr_cm_enable); |
diff --git a/drivers/s390/cio/device_fsm.c b/drivers/s390/cio/device_fsm.c index 550508df952b..84cc9ea346db 100644 --- a/drivers/s390/cio/device_fsm.c +++ b/drivers/s390/cio/device_fsm.c | |||
| @@ -658,6 +658,13 @@ ccw_device_offline(struct ccw_device *cdev) | |||
| 658 | { | 658 | { |
| 659 | struct subchannel *sch; | 659 | struct subchannel *sch; |
| 660 | 660 | ||
| 661 | /* Allow ccw_device_offline while disconnected. */ | ||
| 662 | if (cdev->private->state == DEV_STATE_DISCONNECTED || | ||
| 663 | cdev->private->state == DEV_STATE_NOT_OPER) { | ||
| 664 | cdev->private->flags.donotify = 0; | ||
| 665 | ccw_device_done(cdev, DEV_STATE_NOT_OPER); | ||
| 666 | return 0; | ||
| 667 | } | ||
| 661 | if (ccw_device_is_orphan(cdev)) { | 668 | if (ccw_device_is_orphan(cdev)) { |
| 662 | ccw_device_done(cdev, DEV_STATE_OFFLINE); | 669 | ccw_device_done(cdev, DEV_STATE_OFFLINE); |
| 663 | return 0; | 670 | return 0; |
diff --git a/drivers/usb/host/ohci-omap.c b/drivers/usb/host/ohci-omap.c index 1eb64d08b60a..95b3ec89c126 100644 --- a/drivers/usb/host/ohci-omap.c +++ b/drivers/usb/host/ohci-omap.c | |||
| @@ -208,7 +208,7 @@ static int ohci_omap_init(struct usb_hcd *hcd) | |||
| 208 | if (cpu_is_omap16xx()) | 208 | if (cpu_is_omap16xx()) |
| 209 | ocpi_enable(); | 209 | ocpi_enable(); |
| 210 | 210 | ||
| 211 | #ifdef CONFIG_ARCH_OMAP_OTG | 211 | #ifdef CONFIG_USB_OTG |
| 212 | if (need_transceiver) { | 212 | if (need_transceiver) { |
| 213 | ohci->transceiver = otg_get_transceiver(); | 213 | ohci->transceiver = otg_get_transceiver(); |
| 214 | if (ohci->transceiver) { | 214 | if (ohci->transceiver) { |
diff --git a/drivers/usb/serial/console.c b/drivers/usb/serial/console.c index 7b74238ad1c7..e980766bb84b 100644 --- a/drivers/usb/serial/console.c +++ b/drivers/usb/serial/console.c | |||
| @@ -161,7 +161,7 @@ static int usb_console_setup(struct console *co, char *options) | |||
| 161 | if (serial->type->set_termios) { | 161 | if (serial->type->set_termios) { |
| 162 | termios->c_cflag = cflag; | 162 | termios->c_cflag = cflag; |
| 163 | tty_termios_encode_baud_rate(termios, baud, baud); | 163 | tty_termios_encode_baud_rate(termios, baud, baud); |
| 164 | serial->type->set_termios(NULL, port, &dummy); | 164 | serial->type->set_termios(tty, port, &dummy); |
| 165 | 165 | ||
| 166 | port->port.tty = NULL; | 166 | port->port.tty = NULL; |
| 167 | kfree(termios); | 167 | kfree(termios); |
diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c index d4427cb86979..2e15da5459cf 100644 --- a/drivers/xen/balloon.c +++ b/drivers/xen/balloon.c | |||
| @@ -60,7 +60,7 @@ | |||
| 60 | 60 | ||
| 61 | #define PAGES2KB(_p) ((_p)<<(PAGE_SHIFT-10)) | 61 | #define PAGES2KB(_p) ((_p)<<(PAGE_SHIFT-10)) |
| 62 | 62 | ||
| 63 | #define BALLOON_CLASS_NAME "memory" | 63 | #define BALLOON_CLASS_NAME "xen_memory" |
| 64 | 64 | ||
| 65 | struct balloon_stats { | 65 | struct balloon_stats { |
| 66 | /* We aim for 'current allocation' == 'target allocation'. */ | 66 | /* We aim for 'current allocation' == 'target allocation'. */ |
diff --git a/fs/nfs/super.c b/fs/nfs/super.c index 9abcd2b329f7..e9b20173fef3 100644 --- a/fs/nfs/super.c +++ b/fs/nfs/super.c | |||
| @@ -1279,6 +1279,12 @@ static int nfs_parse_mount_options(char *raw, | |||
| 1279 | } | 1279 | } |
| 1280 | } | 1280 | } |
| 1281 | 1281 | ||
| 1282 | if (errors > 0) { | ||
| 1283 | dfprintk(MOUNT, "NFS: parsing encountered %d error%s\n", | ||
| 1284 | errors, (errors == 1 ? "" : "s")); | ||
| 1285 | if (!sloppy) | ||
| 1286 | return 0; | ||
| 1287 | } | ||
| 1282 | return 1; | 1288 | return 1; |
| 1283 | 1289 | ||
| 1284 | out_nomem: | 1290 | out_nomem: |
diff --git a/fs/ubifs/budget.c b/fs/ubifs/budget.c index 154098157473..73db464cd08b 100644 --- a/fs/ubifs/budget.c +++ b/fs/ubifs/budget.c | |||
| @@ -302,18 +302,6 @@ long long ubifs_calc_available(const struct ubifs_info *c, int min_idx_lebs) | |||
| 302 | int subtract_lebs; | 302 | int subtract_lebs; |
| 303 | long long available; | 303 | long long available; |
| 304 | 304 | ||
| 305 | /* | ||
| 306 | * Force the amount available to the total size reported if the used | ||
| 307 | * space is zero. | ||
| 308 | */ | ||
| 309 | if (c->lst.total_used <= UBIFS_INO_NODE_SZ && | ||
| 310 | c->budg_data_growth + c->budg_dd_growth == 0) { | ||
| 311 | /* Do the same calculation as for c->block_cnt */ | ||
| 312 | available = c->main_lebs - 2; | ||
| 313 | available *= c->leb_size - c->dark_wm; | ||
| 314 | return available; | ||
| 315 | } | ||
| 316 | |||
| 317 | available = c->main_bytes - c->lst.total_used; | 305 | available = c->main_bytes - c->lst.total_used; |
| 318 | 306 | ||
| 319 | /* | 307 | /* |
| @@ -714,34 +702,106 @@ void ubifs_release_dirty_inode_budget(struct ubifs_info *c, | |||
| 714 | } | 702 | } |
| 715 | 703 | ||
| 716 | /** | 704 | /** |
| 717 | * ubifs_budg_get_free_space - return amount of free space. | 705 | * ubifs_reported_space - calculate reported free space. |
| 706 | * @c: the UBIFS file-system description object | ||
| 707 | * @free: amount of free space | ||
| 708 | * | ||
| 709 | * This function calculates amount of free space which will be reported to | ||
| 710 | * user-space. User-space application tend to expect that if the file-system | ||
| 711 | * (e.g., via the 'statfs()' call) reports that it has N bytes available, they | ||
| 712 | * are able to write a file of size N. UBIFS attaches node headers to each data | ||
| 713 | * node and it has to write indexind nodes as well. This introduces additional | ||
| 714 | * overhead, and UBIFS it has to report sligtly less free space to meet the | ||
| 715 | * above expectetion. | ||
| 716 | * | ||
| 717 | * This function assumes free space is made up of uncompressed data nodes and | ||
| 718 | * full index nodes (one per data node, tripled because we always allow enough | ||
| 719 | * space to write the index thrice). | ||
| 720 | * | ||
| 721 | * Note, the calculation is pessimistic, which means that most of the time | ||
| 722 | * UBIFS reports less space than it actually has. | ||
| 723 | */ | ||
| 724 | long long ubifs_reported_space(const struct ubifs_info *c, uint64_t free) | ||
| 725 | { | ||
| 726 | int divisor, factor, f; | ||
| 727 | |||
| 728 | /* | ||
| 729 | * Reported space size is @free * X, where X is UBIFS block size | ||
| 730 | * divided by UBIFS block size + all overhead one data block | ||
| 731 | * introduces. The overhead is the node header + indexing overhead. | ||
| 732 | * | ||
| 733 | * Indexing overhead calculations are based on the following formula: | ||
| 734 | * I = N/(f - 1) + 1, where I - number of indexing nodes, N - number | ||
| 735 | * of data nodes, f - fanout. Because effective UBIFS fanout is twice | ||
| 736 | * as less than maximum fanout, we assume that each data node | ||
| 737 | * introduces 3 * @c->max_idx_node_sz / (@c->fanout/2 - 1) bytes. | ||
| 738 | * Note, the multiplier 3 is because UBIFS reseves thrice as more space | ||
| 739 | * for the index. | ||
| 740 | */ | ||
| 741 | f = c->fanout > 3 ? c->fanout >> 1 : 2; | ||
| 742 | factor = UBIFS_BLOCK_SIZE; | ||
| 743 | divisor = UBIFS_MAX_DATA_NODE_SZ; | ||
| 744 | divisor += (c->max_idx_node_sz * 3) / (f - 1); | ||
| 745 | free *= factor; | ||
| 746 | do_div(free, divisor); | ||
| 747 | return free; | ||
| 748 | } | ||
| 749 | |||
| 750 | /** | ||
| 751 | * ubifs_get_free_space - return amount of free space. | ||
| 718 | * @c: UBIFS file-system description object | 752 | * @c: UBIFS file-system description object |
| 719 | * | 753 | * |
| 720 | * This function returns amount of free space on the file-system. | 754 | * This function calculates amount of free space to report to user-space. |
| 755 | * | ||
| 756 | * Because UBIFS may introduce substantial overhead (the index, node headers, | ||
| 757 | * alighment, wastage at the end of eraseblocks, etc), it cannot report real | ||
| 758 | * amount of free flash space it has (well, because not all dirty space is | ||
| 759 | * reclamable, UBIFS does not actually know the real amount). If UBIFS did so, | ||
| 760 | * it would bread user expectetion about what free space is. Users seem to | ||
| 761 | * accustomed to assume that if the file-system reports N bytes of free space, | ||
| 762 | * they would be able to fit a file of N bytes to the FS. This almost works for | ||
| 763 | * traditional file-systems, because they have way less overhead than UBIFS. | ||
| 764 | * So, to keep users happy, UBIFS tries to take the overhead into account. | ||
| 721 | */ | 765 | */ |
| 722 | long long ubifs_budg_get_free_space(struct ubifs_info *c) | 766 | long long ubifs_get_free_space(struct ubifs_info *c) |
| 723 | { | 767 | { |
| 724 | int min_idx_lebs, rsvd_idx_lebs; | 768 | int min_idx_lebs, rsvd_idx_lebs, lebs; |
| 725 | long long available, outstanding, free; | 769 | long long available, outstanding, free; |
| 726 | 770 | ||
| 727 | /* Do exactly the same calculations as in 'do_budget_space()' */ | ||
| 728 | spin_lock(&c->space_lock); | 771 | spin_lock(&c->space_lock); |
| 729 | min_idx_lebs = ubifs_calc_min_idx_lebs(c); | 772 | min_idx_lebs = ubifs_calc_min_idx_lebs(c); |
| 773 | outstanding = c->budg_data_growth + c->budg_dd_growth; | ||
| 730 | 774 | ||
| 731 | if (min_idx_lebs > c->lst.idx_lebs) | 775 | /* |
| 732 | rsvd_idx_lebs = min_idx_lebs - c->lst.idx_lebs; | 776 | * Force the amount available to the total size reported if the used |
| 733 | else | 777 | * space is zero. |
| 734 | rsvd_idx_lebs = 0; | 778 | */ |
| 735 | 779 | if (c->lst.total_used <= UBIFS_INO_NODE_SZ && !outstanding) { | |
| 736 | if (rsvd_idx_lebs > c->lst.empty_lebs + c->freeable_cnt + c->idx_gc_cnt | ||
| 737 | - c->lst.taken_empty_lebs) { | ||
| 738 | spin_unlock(&c->space_lock); | 780 | spin_unlock(&c->space_lock); |
| 739 | return 0; | 781 | return (long long)c->block_cnt << UBIFS_BLOCK_SHIFT; |
| 740 | } | 782 | } |
| 741 | 783 | ||
| 742 | available = ubifs_calc_available(c, min_idx_lebs); | 784 | available = ubifs_calc_available(c, min_idx_lebs); |
| 743 | outstanding = c->budg_data_growth + c->budg_dd_growth; | 785 | |
| 744 | c->min_idx_lebs = min_idx_lebs; | 786 | /* |
| 787 | * When reporting free space to user-space, UBIFS guarantees that it is | ||
| 788 | * possible to write a file of free space size. This means that for | ||
| 789 | * empty LEBs we may use more precise calculations than | ||
| 790 | * 'ubifs_calc_available()' is using. Namely, we know that in empty | ||
| 791 | * LEBs we would waste only @c->leb_overhead bytes, not @c->dark_wm. | ||
| 792 | * Thus, amend the available space. | ||
| 793 | * | ||
| 794 | * Note, the calculations below are similar to what we have in | ||
| 795 | * 'do_budget_space()', so refer there for comments. | ||
| 796 | */ | ||
| 797 | if (min_idx_lebs > c->lst.idx_lebs) | ||
| 798 | rsvd_idx_lebs = min_idx_lebs - c->lst.idx_lebs; | ||
| 799 | else | ||
| 800 | rsvd_idx_lebs = 0; | ||
| 801 | lebs = c->lst.empty_lebs + c->freeable_cnt + c->idx_gc_cnt - | ||
| 802 | c->lst.taken_empty_lebs; | ||
| 803 | lebs -= rsvd_idx_lebs; | ||
| 804 | available += lebs * (c->dark_wm - c->leb_overhead); | ||
| 745 | spin_unlock(&c->space_lock); | 805 | spin_unlock(&c->space_lock); |
| 746 | 806 | ||
| 747 | if (available > outstanding) | 807 | if (available > outstanding) |
diff --git a/fs/ubifs/dir.c b/fs/ubifs/dir.c index 5c96f1fb7016..2b267c9a1806 100644 --- a/fs/ubifs/dir.c +++ b/fs/ubifs/dir.c | |||
| @@ -587,7 +587,6 @@ static int ubifs_unlink(struct inode *dir, struct dentry *dentry) | |||
| 587 | if (err) { | 587 | if (err) { |
| 588 | if (err != -ENOSPC) | 588 | if (err != -ENOSPC) |
| 589 | return err; | 589 | return err; |
| 590 | err = 0; | ||
| 591 | budgeted = 0; | 590 | budgeted = 0; |
| 592 | } | 591 | } |
| 593 | 592 | ||
diff --git a/fs/ubifs/file.c b/fs/ubifs/file.c index 4071d1cae29f..3d698e2022b1 100644 --- a/fs/ubifs/file.c +++ b/fs/ubifs/file.c | |||
| @@ -793,7 +793,7 @@ static int do_truncation(struct ubifs_info *c, struct inode *inode, | |||
| 793 | int err; | 793 | int err; |
| 794 | struct ubifs_budget_req req; | 794 | struct ubifs_budget_req req; |
| 795 | loff_t old_size = inode->i_size, new_size = attr->ia_size; | 795 | loff_t old_size = inode->i_size, new_size = attr->ia_size; |
| 796 | int offset = new_size & (UBIFS_BLOCK_SIZE - 1); | 796 | int offset = new_size & (UBIFS_BLOCK_SIZE - 1), budgeted = 1; |
| 797 | struct ubifs_inode *ui = ubifs_inode(inode); | 797 | struct ubifs_inode *ui = ubifs_inode(inode); |
| 798 | 798 | ||
| 799 | dbg_gen("ino %lu, size %lld -> %lld", inode->i_ino, old_size, new_size); | 799 | dbg_gen("ino %lu, size %lld -> %lld", inode->i_ino, old_size, new_size); |
| @@ -811,8 +811,15 @@ static int do_truncation(struct ubifs_info *c, struct inode *inode, | |||
| 811 | /* A funny way to budget for truncation node */ | 811 | /* A funny way to budget for truncation node */ |
| 812 | req.dirtied_ino_d = UBIFS_TRUN_NODE_SZ; | 812 | req.dirtied_ino_d = UBIFS_TRUN_NODE_SZ; |
| 813 | err = ubifs_budget_space(c, &req); | 813 | err = ubifs_budget_space(c, &req); |
| 814 | if (err) | 814 | if (err) { |
| 815 | return err; | 815 | /* |
| 816 | * Treat truncations to zero as deletion and always allow them, | ||
| 817 | * just like we do for '->unlink()'. | ||
| 818 | */ | ||
| 819 | if (new_size || err != -ENOSPC) | ||
| 820 | return err; | ||
| 821 | budgeted = 0; | ||
| 822 | } | ||
| 816 | 823 | ||
| 817 | err = vmtruncate(inode, new_size); | 824 | err = vmtruncate(inode, new_size); |
| 818 | if (err) | 825 | if (err) |
| @@ -869,7 +876,12 @@ static int do_truncation(struct ubifs_info *c, struct inode *inode, | |||
| 869 | err = ubifs_jnl_truncate(c, inode, old_size, new_size); | 876 | err = ubifs_jnl_truncate(c, inode, old_size, new_size); |
| 870 | mutex_unlock(&ui->ui_mutex); | 877 | mutex_unlock(&ui->ui_mutex); |
| 871 | out_budg: | 878 | out_budg: |
| 872 | ubifs_release_budget(c, &req); | 879 | if (budgeted) |
| 880 | ubifs_release_budget(c, &req); | ||
| 881 | else { | ||
| 882 | c->nospace = c->nospace_rp = 0; | ||
| 883 | smp_wmb(); | ||
| 884 | } | ||
| 873 | return err; | 885 | return err; |
| 874 | } | 886 | } |
| 875 | 887 | ||
diff --git a/fs/ubifs/find.c b/fs/ubifs/find.c index adee7b5ddeab..e045c8b55423 100644 --- a/fs/ubifs/find.c +++ b/fs/ubifs/find.c | |||
| @@ -211,14 +211,8 @@ static const struct ubifs_lprops *scan_for_dirty(struct ubifs_info *c, | |||
| 211 | * dirty index heap, and it falls-back to LPT scanning if the heaps are empty | 211 | * dirty index heap, and it falls-back to LPT scanning if the heaps are empty |
| 212 | * or do not have an LEB which satisfies the @min_space criteria. | 212 | * or do not have an LEB which satisfies the @min_space criteria. |
| 213 | * | 213 | * |
| 214 | * Note: | 214 | * Note, LEBs which have less than dead watermark of free + dirty space are |
| 215 | * o LEBs which have less than dead watermark of dirty space are never picked | 215 | * never picked by this function. |
| 216 | * by this function; | ||
| 217 | * | ||
| 218 | * Returns zero and the LEB properties of | ||
| 219 | * found dirty LEB in case of success, %-ENOSPC if no dirty LEB was found and a | ||
| 220 | * negative error code in case of other failures. The returned LEB is marked as | ||
| 221 | * "taken". | ||
| 222 | * | 216 | * |
| 223 | * The additional @pick_free argument controls if this function has to return a | 217 | * The additional @pick_free argument controls if this function has to return a |
| 224 | * free or freeable LEB if one is present. For example, GC must to set it to %1, | 218 | * free or freeable LEB if one is present. For example, GC must to set it to %1, |
| @@ -231,6 +225,10 @@ static const struct ubifs_lprops *scan_for_dirty(struct ubifs_info *c, | |||
| 231 | * | 225 | * |
| 232 | * In addition @pick_free is set to %2 by the recovery process in order to | 226 | * In addition @pick_free is set to %2 by the recovery process in order to |
| 233 | * recover gc_lnum in which case an index LEB must not be returned. | 227 | * recover gc_lnum in which case an index LEB must not be returned. |
| 228 | * | ||
| 229 | * This function returns zero and the LEB properties of found dirty LEB in case | ||
| 230 | * of success, %-ENOSPC if no dirty LEB was found and a negative error code in | ||
| 231 | * case of other failures. The returned LEB is marked as "taken". | ||
| 234 | */ | 232 | */ |
| 235 | int ubifs_find_dirty_leb(struct ubifs_info *c, struct ubifs_lprops *ret_lp, | 233 | int ubifs_find_dirty_leb(struct ubifs_info *c, struct ubifs_lprops *ret_lp, |
| 236 | int min_space, int pick_free) | 234 | int min_space, int pick_free) |
| @@ -245,7 +243,7 @@ int ubifs_find_dirty_leb(struct ubifs_info *c, struct ubifs_lprops *ret_lp, | |||
| 245 | int lebs, rsvd_idx_lebs = 0; | 243 | int lebs, rsvd_idx_lebs = 0; |
| 246 | 244 | ||
| 247 | spin_lock(&c->space_lock); | 245 | spin_lock(&c->space_lock); |
| 248 | lebs = c->lst.empty_lebs; | 246 | lebs = c->lst.empty_lebs + c->idx_gc_cnt; |
| 249 | lebs += c->freeable_cnt - c->lst.taken_empty_lebs; | 247 | lebs += c->freeable_cnt - c->lst.taken_empty_lebs; |
| 250 | 248 | ||
| 251 | /* | 249 | /* |
| @@ -317,7 +315,7 @@ int ubifs_find_dirty_leb(struct ubifs_info *c, struct ubifs_lprops *ret_lp, | |||
| 317 | lp = idx_lp; | 315 | lp = idx_lp; |
| 318 | 316 | ||
| 319 | if (lp) { | 317 | if (lp) { |
| 320 | ubifs_assert(lp->dirty >= c->dead_wm); | 318 | ubifs_assert(lp->free + lp->dirty >= c->dead_wm); |
| 321 | goto found; | 319 | goto found; |
| 322 | } | 320 | } |
| 323 | 321 | ||
diff --git a/fs/ubifs/gc.c b/fs/ubifs/gc.c index d0f3dac29081..13f1019c859f 100644 --- a/fs/ubifs/gc.c +++ b/fs/ubifs/gc.c | |||
| @@ -344,6 +344,12 @@ int ubifs_garbage_collect_leb(struct ubifs_info *c, struct ubifs_lprops *lp) | |||
| 344 | if (err) | 344 | if (err) |
| 345 | goto out; | 345 | goto out; |
| 346 | 346 | ||
| 347 | /* Allow for races with TNC */ | ||
| 348 | c->gced_lnum = lnum; | ||
| 349 | smp_wmb(); | ||
| 350 | c->gc_seq += 1; | ||
| 351 | smp_wmb(); | ||
| 352 | |||
| 347 | if (c->gc_lnum == -1) { | 353 | if (c->gc_lnum == -1) { |
| 348 | c->gc_lnum = lnum; | 354 | c->gc_lnum = lnum; |
| 349 | err = LEB_RETAINED; | 355 | err = LEB_RETAINED; |
diff --git a/fs/ubifs/misc.h b/fs/ubifs/misc.h index 87dabf9fe742..4c12a9215d7f 100644 --- a/fs/ubifs/misc.h +++ b/fs/ubifs/misc.h | |||
| @@ -284,38 +284,6 @@ static inline void *ubifs_idx_key(const struct ubifs_info *c, | |||
| 284 | } | 284 | } |
| 285 | 285 | ||
| 286 | /** | 286 | /** |
| 287 | * ubifs_reported_space - calculate reported free space. | ||
| 288 | * @c: the UBIFS file-system description object | ||
| 289 | * @free: amount of free space | ||
| 290 | * | ||
| 291 | * This function calculates amount of free space which will be reported to | ||
| 292 | * user-space. User-space application tend to expect that if the file-system | ||
| 293 | * (e.g., via the 'statfs()' call) reports that it has N bytes available, they | ||
| 294 | * are able to write a file of size N. UBIFS attaches node headers to each data | ||
| 295 | * node and it has to write indexind nodes as well. This introduces additional | ||
| 296 | * overhead, and UBIFS it has to report sligtly less free space to meet the | ||
| 297 | * above expectetion. | ||
| 298 | * | ||
| 299 | * This function assumes free space is made up of uncompressed data nodes and | ||
| 300 | * full index nodes (one per data node, doubled because we always allow enough | ||
| 301 | * space to write the index twice). | ||
| 302 | * | ||
| 303 | * Note, the calculation is pessimistic, which means that most of the time | ||
| 304 | * UBIFS reports less space than it actually has. | ||
| 305 | */ | ||
| 306 | static inline long long ubifs_reported_space(const struct ubifs_info *c, | ||
| 307 | uint64_t free) | ||
| 308 | { | ||
| 309 | int divisor, factor; | ||
| 310 | |||
| 311 | divisor = UBIFS_MAX_DATA_NODE_SZ + (c->max_idx_node_sz * 3); | ||
| 312 | factor = UBIFS_MAX_DATA_NODE_SZ - UBIFS_DATA_NODE_SZ; | ||
| 313 | do_div(free, divisor); | ||
| 314 | |||
| 315 | return free * factor; | ||
| 316 | } | ||
| 317 | |||
| 318 | /** | ||
| 319 | * ubifs_current_time - round current time to time granularity. | 287 | * ubifs_current_time - round current time to time granularity. |
| 320 | * @inode: inode | 288 | * @inode: inode |
| 321 | */ | 289 | */ |
| @@ -325,4 +293,21 @@ static inline struct timespec ubifs_current_time(struct inode *inode) | |||
| 325 | current_fs_time(inode->i_sb) : CURRENT_TIME_SEC; | 293 | current_fs_time(inode->i_sb) : CURRENT_TIME_SEC; |
| 326 | } | 294 | } |
| 327 | 295 | ||
| 296 | /** | ||
| 297 | * ubifs_tnc_lookup - look up a file-system node. | ||
| 298 | * @c: UBIFS file-system description object | ||
| 299 | * @key: node key to lookup | ||
| 300 | * @node: the node is returned here | ||
| 301 | * | ||
| 302 | * This function look up and reads node with key @key. The caller has to make | ||
| 303 | * sure the @node buffer is large enough to fit the node. Returns zero in case | ||
| 304 | * of success, %-ENOENT if the node was not found, and a negative error code in | ||
| 305 | * case of failure. | ||
| 306 | */ | ||
| 307 | static inline int ubifs_tnc_lookup(struct ubifs_info *c, | ||
| 308 | const union ubifs_key *key, void *node) | ||
| 309 | { | ||
| 310 | return ubifs_tnc_locate(c, key, node, NULL, NULL); | ||
| 311 | } | ||
| 312 | |||
| 328 | #endif /* __UBIFS_MISC_H__ */ | 313 | #endif /* __UBIFS_MISC_H__ */ |
diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c index f71e6b8822c4..7562464ac83f 100644 --- a/fs/ubifs/super.c +++ b/fs/ubifs/super.c | |||
| @@ -370,8 +370,9 @@ static int ubifs_statfs(struct dentry *dentry, struct kstatfs *buf) | |||
| 370 | { | 370 | { |
| 371 | struct ubifs_info *c = dentry->d_sb->s_fs_info; | 371 | struct ubifs_info *c = dentry->d_sb->s_fs_info; |
| 372 | unsigned long long free; | 372 | unsigned long long free; |
| 373 | __le32 *uuid = (__le32 *)c->uuid; | ||
| 373 | 374 | ||
| 374 | free = ubifs_budg_get_free_space(c); | 375 | free = ubifs_get_free_space(c); |
| 375 | dbg_gen("free space %lld bytes (%lld blocks)", | 376 | dbg_gen("free space %lld bytes (%lld blocks)", |
| 376 | free, free >> UBIFS_BLOCK_SHIFT); | 377 | free, free >> UBIFS_BLOCK_SHIFT); |
| 377 | 378 | ||
| @@ -386,7 +387,8 @@ static int ubifs_statfs(struct dentry *dentry, struct kstatfs *buf) | |||
| 386 | buf->f_files = 0; | 387 | buf->f_files = 0; |
| 387 | buf->f_ffree = 0; | 388 | buf->f_ffree = 0; |
| 388 | buf->f_namelen = UBIFS_MAX_NLEN; | 389 | buf->f_namelen = UBIFS_MAX_NLEN; |
| 389 | 390 | buf->f_fsid.val[0] = le32_to_cpu(uuid[0]) ^ le32_to_cpu(uuid[2]); | |
| 391 | buf->f_fsid.val[1] = le32_to_cpu(uuid[1]) ^ le32_to_cpu(uuid[3]); | ||
| 390 | return 0; | 392 | return 0; |
| 391 | } | 393 | } |
| 392 | 394 | ||
| @@ -530,6 +532,12 @@ static int init_constants_early(struct ubifs_info *c) | |||
| 530 | c->dead_wm = ALIGN(MIN_WRITE_SZ, c->min_io_size); | 532 | c->dead_wm = ALIGN(MIN_WRITE_SZ, c->min_io_size); |
| 531 | c->dark_wm = ALIGN(UBIFS_MAX_NODE_SZ, c->min_io_size); | 533 | c->dark_wm = ALIGN(UBIFS_MAX_NODE_SZ, c->min_io_size); |
| 532 | 534 | ||
| 535 | /* | ||
| 536 | * Calculate how many bytes would be wasted at the end of LEB if it was | ||
| 537 | * fully filled with data nodes of maximum size. This is used in | ||
| 538 | * calculations when reporting free space. | ||
| 539 | */ | ||
| 540 | c->leb_overhead = c->leb_size % UBIFS_MAX_DATA_NODE_SZ; | ||
| 533 | return 0; | 541 | return 0; |
| 534 | } | 542 | } |
| 535 | 543 | ||
| @@ -647,13 +655,11 @@ static int init_constants_late(struct ubifs_info *c) | |||
| 647 | * internally because it does not make much sense for UBIFS, but it is | 655 | * internally because it does not make much sense for UBIFS, but it is |
| 648 | * necessary to report something for the 'statfs()' call. | 656 | * necessary to report something for the 'statfs()' call. |
| 649 | * | 657 | * |
| 650 | * Subtract the LEB reserved for GC and the LEB which is reserved for | 658 | * Subtract the LEB reserved for GC, the LEB which is reserved for |
| 651 | * deletions. | 659 | * deletions, and assume only one journal head is available. |
| 652 | * | ||
| 653 | * Review 'ubifs_calc_available()' if changing this calculation. | ||
| 654 | */ | 660 | */ |
| 655 | tmp64 = c->main_lebs - 2; | 661 | tmp64 = c->main_lebs - 2 - c->jhead_cnt + 1; |
| 656 | tmp64 *= (uint64_t)c->leb_size - c->dark_wm; | 662 | tmp64 *= (uint64_t)c->leb_size - c->leb_overhead; |
| 657 | tmp64 = ubifs_reported_space(c, tmp64); | 663 | tmp64 = ubifs_reported_space(c, tmp64); |
| 658 | c->block_cnt = tmp64 >> UBIFS_BLOCK_SHIFT; | 664 | c->block_cnt = tmp64 >> UBIFS_BLOCK_SHIFT; |
| 659 | 665 | ||
diff --git a/fs/ubifs/tnc.c b/fs/ubifs/tnc.c index e909f4a96443..7da209ab9378 100644 --- a/fs/ubifs/tnc.c +++ b/fs/ubifs/tnc.c | |||
| @@ -506,7 +506,7 @@ static int fallible_read_node(struct ubifs_info *c, const union ubifs_key *key, | |||
| 506 | if (keys_cmp(c, key, &node_key) != 0) | 506 | if (keys_cmp(c, key, &node_key) != 0) |
| 507 | ret = 0; | 507 | ret = 0; |
| 508 | } | 508 | } |
| 509 | if (ret == 0) | 509 | if (ret == 0 && c->replaying) |
| 510 | dbg_mnt("dangling branch LEB %d:%d len %d, key %s", | 510 | dbg_mnt("dangling branch LEB %d:%d len %d, key %s", |
| 511 | zbr->lnum, zbr->offs, zbr->len, DBGKEY(key)); | 511 | zbr->lnum, zbr->offs, zbr->len, DBGKEY(key)); |
| 512 | return ret; | 512 | return ret; |
| @@ -1382,50 +1382,39 @@ static int lookup_level0_dirty(struct ubifs_info *c, const union ubifs_key *key, | |||
| 1382 | } | 1382 | } |
| 1383 | 1383 | ||
| 1384 | /** | 1384 | /** |
| 1385 | * ubifs_tnc_lookup - look up a file-system node. | 1385 | * maybe_leb_gced - determine if a LEB may have been garbage collected. |
| 1386 | * @c: UBIFS file-system description object | 1386 | * @c: UBIFS file-system description object |
| 1387 | * @key: node key to lookup | 1387 | * @lnum: LEB number |
| 1388 | * @node: the node is returned here | 1388 | * @gc_seq1: garbage collection sequence number |
| 1389 | * | 1389 | * |
| 1390 | * This function look up and reads node with key @key. The caller has to make | 1390 | * This function determines if @lnum may have been garbage collected since |
| 1391 | * sure the @node buffer is large enough to fit the node. Returns zero in case | 1391 | * sequence number @gc_seq1. If it may have been then %1 is returned, otherwise |
| 1392 | * of success, %-ENOENT if the node was not found, and a negative error code in | 1392 | * %0 is returned. |
| 1393 | * case of failure. | ||
| 1394 | */ | 1393 | */ |
| 1395 | int ubifs_tnc_lookup(struct ubifs_info *c, const union ubifs_key *key, | 1394 | static int maybe_leb_gced(struct ubifs_info *c, int lnum, int gc_seq1) |
| 1396 | void *node) | ||
| 1397 | { | 1395 | { |
| 1398 | int found, n, err; | 1396 | int gc_seq2, gced_lnum; |
| 1399 | struct ubifs_znode *znode; | ||
| 1400 | struct ubifs_zbranch zbr, *zt; | ||
| 1401 | 1397 | ||
| 1402 | mutex_lock(&c->tnc_mutex); | 1398 | gced_lnum = c->gced_lnum; |
| 1403 | found = ubifs_lookup_level0(c, key, &znode, &n); | 1399 | smp_rmb(); |
| 1404 | if (!found) { | 1400 | gc_seq2 = c->gc_seq; |
| 1405 | err = -ENOENT; | 1401 | /* Same seq means no GC */ |
| 1406 | goto out; | 1402 | if (gc_seq1 == gc_seq2) |
| 1407 | } else if (found < 0) { | 1403 | return 0; |
| 1408 | err = found; | 1404 | /* Different by more than 1 means we don't know */ |
| 1409 | goto out; | 1405 | if (gc_seq1 + 1 != gc_seq2) |
| 1410 | } | 1406 | return 1; |
| 1411 | zt = &znode->zbranch[n]; | 1407 | /* |
| 1412 | if (is_hash_key(c, key)) { | 1408 | * We have seen the sequence number has increased by 1. Now we need to |
| 1413 | /* | 1409 | * be sure we read the right LEB number, so read it again. |
| 1414 | * In this case the leaf node cache gets used, so we pass the | 1410 | */ |
| 1415 | * address of the zbranch and keep the mutex locked | 1411 | smp_rmb(); |
| 1416 | */ | 1412 | if (gced_lnum != c->gced_lnum) |
| 1417 | err = tnc_read_node_nm(c, zt, node); | 1413 | return 1; |
| 1418 | goto out; | 1414 | /* Finally we can check lnum */ |
| 1419 | } | 1415 | if (gced_lnum == lnum) |
| 1420 | zbr = znode->zbranch[n]; | 1416 | return 1; |
| 1421 | mutex_unlock(&c->tnc_mutex); | 1417 | return 0; |
| 1422 | |||
| 1423 | err = ubifs_tnc_read_node(c, &zbr, node); | ||
| 1424 | return err; | ||
| 1425 | |||
| 1426 | out: | ||
| 1427 | mutex_unlock(&c->tnc_mutex); | ||
| 1428 | return err; | ||
| 1429 | } | 1418 | } |
| 1430 | 1419 | ||
| 1431 | /** | 1420 | /** |
| @@ -1436,16 +1425,19 @@ out: | |||
| 1436 | * @lnum: LEB number is returned here | 1425 | * @lnum: LEB number is returned here |
| 1437 | * @offs: offset is returned here | 1426 | * @offs: offset is returned here |
| 1438 | * | 1427 | * |
| 1439 | * This function is the same as 'ubifs_tnc_lookup()' but it returns the node | 1428 | * This function look up and reads node with key @key. The caller has to make |
| 1440 | * location also. See 'ubifs_tnc_lookup()'. | 1429 | * sure the @node buffer is large enough to fit the node. Returns zero in case |
| 1430 | * of success, %-ENOENT if the node was not found, and a negative error code in | ||
| 1431 | * case of failure. The node location can be returned in @lnum and @offs. | ||
| 1441 | */ | 1432 | */ |
| 1442 | int ubifs_tnc_locate(struct ubifs_info *c, const union ubifs_key *key, | 1433 | int ubifs_tnc_locate(struct ubifs_info *c, const union ubifs_key *key, |
| 1443 | void *node, int *lnum, int *offs) | 1434 | void *node, int *lnum, int *offs) |
| 1444 | { | 1435 | { |
| 1445 | int found, n, err; | 1436 | int found, n, err, safely = 0, gc_seq1; |
| 1446 | struct ubifs_znode *znode; | 1437 | struct ubifs_znode *znode; |
| 1447 | struct ubifs_zbranch zbr, *zt; | 1438 | struct ubifs_zbranch zbr, *zt; |
| 1448 | 1439 | ||
| 1440 | again: | ||
| 1449 | mutex_lock(&c->tnc_mutex); | 1441 | mutex_lock(&c->tnc_mutex); |
| 1450 | found = ubifs_lookup_level0(c, key, &znode, &n); | 1442 | found = ubifs_lookup_level0(c, key, &znode, &n); |
| 1451 | if (!found) { | 1443 | if (!found) { |
| @@ -1456,24 +1448,43 @@ int ubifs_tnc_locate(struct ubifs_info *c, const union ubifs_key *key, | |||
| 1456 | goto out; | 1448 | goto out; |
| 1457 | } | 1449 | } |
| 1458 | zt = &znode->zbranch[n]; | 1450 | zt = &znode->zbranch[n]; |
| 1451 | if (lnum) { | ||
| 1452 | *lnum = zt->lnum; | ||
| 1453 | *offs = zt->offs; | ||
| 1454 | } | ||
| 1459 | if (is_hash_key(c, key)) { | 1455 | if (is_hash_key(c, key)) { |
| 1460 | /* | 1456 | /* |
| 1461 | * In this case the leaf node cache gets used, so we pass the | 1457 | * In this case the leaf node cache gets used, so we pass the |
| 1462 | * address of the zbranch and keep the mutex locked | 1458 | * address of the zbranch and keep the mutex locked |
| 1463 | */ | 1459 | */ |
| 1464 | *lnum = zt->lnum; | ||
| 1465 | *offs = zt->offs; | ||
| 1466 | err = tnc_read_node_nm(c, zt, node); | 1460 | err = tnc_read_node_nm(c, zt, node); |
| 1467 | goto out; | 1461 | goto out; |
| 1468 | } | 1462 | } |
| 1463 | if (safely) { | ||
| 1464 | err = ubifs_tnc_read_node(c, zt, node); | ||
| 1465 | goto out; | ||
| 1466 | } | ||
| 1467 | /* Drop the TNC mutex prematurely and race with garbage collection */ | ||
| 1469 | zbr = znode->zbranch[n]; | 1468 | zbr = znode->zbranch[n]; |
| 1469 | gc_seq1 = c->gc_seq; | ||
| 1470 | mutex_unlock(&c->tnc_mutex); | 1470 | mutex_unlock(&c->tnc_mutex); |
| 1471 | 1471 | ||
| 1472 | *lnum = zbr.lnum; | 1472 | if (ubifs_get_wbuf(c, zbr.lnum)) { |
| 1473 | *offs = zbr.offs; | 1473 | /* We do not GC journal heads */ |
| 1474 | err = ubifs_tnc_read_node(c, &zbr, node); | ||
| 1475 | return err; | ||
| 1476 | } | ||
| 1474 | 1477 | ||
| 1475 | err = ubifs_tnc_read_node(c, &zbr, node); | 1478 | err = fallible_read_node(c, key, &zbr, node); |
| 1476 | return err; | 1479 | if (maybe_leb_gced(c, zbr.lnum, gc_seq1)) { |
| 1480 | /* | ||
| 1481 | * The node may have been GC'ed out from under us so try again | ||
| 1482 | * while keeping the TNC mutex locked. | ||
| 1483 | */ | ||
| 1484 | safely = 1; | ||
| 1485 | goto again; | ||
| 1486 | } | ||
| 1487 | return 0; | ||
| 1477 | 1488 | ||
| 1478 | out: | 1489 | out: |
| 1479 | mutex_unlock(&c->tnc_mutex); | 1490 | mutex_unlock(&c->tnc_mutex); |
| @@ -1498,7 +1509,6 @@ static int do_lookup_nm(struct ubifs_info *c, const union ubifs_key *key, | |||
| 1498 | { | 1509 | { |
| 1499 | int found, n, err; | 1510 | int found, n, err; |
| 1500 | struct ubifs_znode *znode; | 1511 | struct ubifs_znode *znode; |
| 1501 | struct ubifs_zbranch zbr; | ||
| 1502 | 1512 | ||
| 1503 | dbg_tnc("name '%.*s' key %s", nm->len, nm->name, DBGKEY(key)); | 1513 | dbg_tnc("name '%.*s' key %s", nm->len, nm->name, DBGKEY(key)); |
| 1504 | mutex_lock(&c->tnc_mutex); | 1514 | mutex_lock(&c->tnc_mutex); |
| @@ -1522,11 +1532,7 @@ static int do_lookup_nm(struct ubifs_info *c, const union ubifs_key *key, | |||
| 1522 | goto out_unlock; | 1532 | goto out_unlock; |
| 1523 | } | 1533 | } |
| 1524 | 1534 | ||
| 1525 | zbr = znode->zbranch[n]; | 1535 | err = tnc_read_node_nm(c, &znode->zbranch[n], node); |
| 1526 | mutex_unlock(&c->tnc_mutex); | ||
| 1527 | |||
| 1528 | err = tnc_read_node_nm(c, &zbr, node); | ||
| 1529 | return err; | ||
| 1530 | 1536 | ||
| 1531 | out_unlock: | 1537 | out_unlock: |
| 1532 | mutex_unlock(&c->tnc_mutex); | 1538 | mutex_unlock(&c->tnc_mutex); |
diff --git a/fs/ubifs/ubifs-media.h b/fs/ubifs/ubifs-media.h index bd2121f3426e..a9ecbd9af20d 100644 --- a/fs/ubifs/ubifs-media.h +++ b/fs/ubifs/ubifs-media.h | |||
| @@ -87,7 +87,7 @@ | |||
| 87 | #define UBIFS_SK_LEN 8 | 87 | #define UBIFS_SK_LEN 8 |
| 88 | 88 | ||
| 89 | /* Minimum index tree fanout */ | 89 | /* Minimum index tree fanout */ |
| 90 | #define UBIFS_MIN_FANOUT 2 | 90 | #define UBIFS_MIN_FANOUT 3 |
| 91 | 91 | ||
| 92 | /* Maximum number of levels in UBIFS indexing B-tree */ | 92 | /* Maximum number of levels in UBIFS indexing B-tree */ |
| 93 | #define UBIFS_MAX_LEVELS 512 | 93 | #define UBIFS_MAX_LEVELS 512 |
diff --git a/fs/ubifs/ubifs.h b/fs/ubifs/ubifs.h index d7f706f7a302..17c620b93eec 100644 --- a/fs/ubifs/ubifs.h +++ b/fs/ubifs/ubifs.h | |||
| @@ -995,6 +995,9 @@ struct ubifs_mount_opts { | |||
| 995 | * @max_idx_node_sz: maximum indexing node aligned on 8-bytes boundary | 995 | * @max_idx_node_sz: maximum indexing node aligned on 8-bytes boundary |
| 996 | * @max_inode_sz: maximum possible inode size in bytes | 996 | * @max_inode_sz: maximum possible inode size in bytes |
| 997 | * @max_znode_sz: size of znode in bytes | 997 | * @max_znode_sz: size of znode in bytes |
| 998 | * | ||
| 999 | * @leb_overhead: how many bytes are wasted in an LEB when it is filled with | ||
| 1000 | * data nodes of maximum size - used in free space reporting | ||
| 998 | * @dead_wm: LEB dead space watermark | 1001 | * @dead_wm: LEB dead space watermark |
| 999 | * @dark_wm: LEB dark space watermark | 1002 | * @dark_wm: LEB dark space watermark |
| 1000 | * @block_cnt: count of 4KiB blocks on the FS | 1003 | * @block_cnt: count of 4KiB blocks on the FS |
| @@ -1028,6 +1031,8 @@ struct ubifs_mount_opts { | |||
| 1028 | * @sbuf: a buffer of LEB size used by GC and replay for scanning | 1031 | * @sbuf: a buffer of LEB size used by GC and replay for scanning |
| 1029 | * @idx_gc: list of index LEBs that have been garbage collected | 1032 | * @idx_gc: list of index LEBs that have been garbage collected |
| 1030 | * @idx_gc_cnt: number of elements on the idx_gc list | 1033 | * @idx_gc_cnt: number of elements on the idx_gc list |
| 1034 | * @gc_seq: incremented for every non-index LEB garbage collected | ||
| 1035 | * @gced_lnum: last non-index LEB that was garbage collected | ||
| 1031 | * | 1036 | * |
| 1032 | * @infos_list: links all 'ubifs_info' objects | 1037 | * @infos_list: links all 'ubifs_info' objects |
| 1033 | * @umount_mutex: serializes shrinker and un-mount | 1038 | * @umount_mutex: serializes shrinker and un-mount |
| @@ -1224,6 +1229,8 @@ struct ubifs_info { | |||
| 1224 | int max_idx_node_sz; | 1229 | int max_idx_node_sz; |
| 1225 | long long max_inode_sz; | 1230 | long long max_inode_sz; |
| 1226 | int max_znode_sz; | 1231 | int max_znode_sz; |
| 1232 | |||
| 1233 | int leb_overhead; | ||
| 1227 | int dead_wm; | 1234 | int dead_wm; |
| 1228 | int dark_wm; | 1235 | int dark_wm; |
| 1229 | int block_cnt; | 1236 | int block_cnt; |
| @@ -1257,6 +1264,8 @@ struct ubifs_info { | |||
| 1257 | void *sbuf; | 1264 | void *sbuf; |
| 1258 | struct list_head idx_gc; | 1265 | struct list_head idx_gc; |
| 1259 | int idx_gc_cnt; | 1266 | int idx_gc_cnt; |
| 1267 | volatile int gc_seq; | ||
| 1268 | volatile int gced_lnum; | ||
| 1260 | 1269 | ||
| 1261 | struct list_head infos_list; | 1270 | struct list_head infos_list; |
| 1262 | struct mutex umount_mutex; | 1271 | struct mutex umount_mutex; |
| @@ -1434,9 +1443,10 @@ void ubifs_release_ino_dirty(struct ubifs_info *c, struct inode *inode, | |||
| 1434 | struct ubifs_budget_req *req); | 1443 | struct ubifs_budget_req *req); |
| 1435 | void ubifs_cancel_ino_op(struct ubifs_info *c, struct inode *inode, | 1444 | void ubifs_cancel_ino_op(struct ubifs_info *c, struct inode *inode, |
| 1436 | struct ubifs_budget_req *req); | 1445 | struct ubifs_budget_req *req); |
| 1437 | long long ubifs_budg_get_free_space(struct ubifs_info *c); | 1446 | long long ubifs_get_free_space(struct ubifs_info *c); |
| 1438 | int ubifs_calc_min_idx_lebs(struct ubifs_info *c); | 1447 | int ubifs_calc_min_idx_lebs(struct ubifs_info *c); |
| 1439 | void ubifs_convert_page_budget(struct ubifs_info *c); | 1448 | void ubifs_convert_page_budget(struct ubifs_info *c); |
| 1449 | long long ubifs_reported_space(const struct ubifs_info *c, uint64_t free); | ||
| 1440 | long long ubifs_calc_available(const struct ubifs_info *c, int min_idx_lebs); | 1450 | long long ubifs_calc_available(const struct ubifs_info *c, int min_idx_lebs); |
| 1441 | 1451 | ||
| 1442 | /* find.c */ | 1452 | /* find.c */ |
| @@ -1451,8 +1461,6 @@ int ubifs_save_dirty_idx_lnums(struct ubifs_info *c); | |||
| 1451 | /* tnc.c */ | 1461 | /* tnc.c */ |
| 1452 | int ubifs_lookup_level0(struct ubifs_info *c, const union ubifs_key *key, | 1462 | int ubifs_lookup_level0(struct ubifs_info *c, const union ubifs_key *key, |
| 1453 | struct ubifs_znode **zn, int *n); | 1463 | struct ubifs_znode **zn, int *n); |
| 1454 | int ubifs_tnc_lookup(struct ubifs_info *c, const union ubifs_key *key, | ||
| 1455 | void *node); | ||
| 1456 | int ubifs_tnc_lookup_nm(struct ubifs_info *c, const union ubifs_key *key, | 1464 | int ubifs_tnc_lookup_nm(struct ubifs_info *c, const union ubifs_key *key, |
| 1457 | void *node, const struct qstr *nm); | 1465 | void *node, const struct qstr *nm); |
| 1458 | int ubifs_tnc_locate(struct ubifs_info *c, const union ubifs_key *key, | 1466 | int ubifs_tnc_locate(struct ubifs_info *c, const union ubifs_key *key, |
diff --git a/include/asm-generic/sections.h b/include/asm-generic/sections.h index 8feeae1f2369..79a7ff925bf8 100644 --- a/include/asm-generic/sections.h +++ b/include/asm-generic/sections.h | |||
| @@ -14,4 +14,10 @@ extern char __kprobes_text_start[], __kprobes_text_end[]; | |||
| 14 | extern char __initdata_begin[], __initdata_end[]; | 14 | extern char __initdata_begin[], __initdata_end[]; |
| 15 | extern char __start_rodata[], __end_rodata[]; | 15 | extern char __start_rodata[], __end_rodata[]; |
| 16 | 16 | ||
| 17 | /* function descriptor handling (if any). Override | ||
| 18 | * in asm/sections.h */ | ||
| 19 | #ifndef dereference_function_descriptor | ||
| 20 | #define dereference_function_descriptor(p) (p) | ||
| 21 | #endif | ||
| 22 | |||
| 17 | #endif /* _ASM_GENERIC_SECTIONS_H_ */ | 23 | #endif /* _ASM_GENERIC_SECTIONS_H_ */ |
diff --git a/include/asm-mips/cacheflush.h b/include/asm-mips/cacheflush.h index d5c0f2fda51b..03b1d69b142f 100644 --- a/include/asm-mips/cacheflush.h +++ b/include/asm-mips/cacheflush.h | |||
| @@ -63,6 +63,7 @@ static inline void flush_icache_page(struct vm_area_struct *vma, | |||
| 63 | } | 63 | } |
| 64 | 64 | ||
| 65 | extern void (*flush_icache_range)(unsigned long start, unsigned long end); | 65 | extern void (*flush_icache_range)(unsigned long start, unsigned long end); |
| 66 | extern void (*local_flush_icache_range)(unsigned long start, unsigned long end); | ||
| 66 | 67 | ||
| 67 | extern void (*__flush_cache_vmap)(void); | 68 | extern void (*__flush_cache_vmap)(void); |
| 68 | 69 | ||
diff --git a/include/asm-parisc/sections.h b/include/asm-parisc/sections.h index fdd43ec42ec5..9d13c3507ad6 100644 --- a/include/asm-parisc/sections.h +++ b/include/asm-parisc/sections.h | |||
| @@ -4,4 +4,9 @@ | |||
| 4 | /* nothing to see, move along */ | 4 | /* nothing to see, move along */ |
| 5 | #include <asm-generic/sections.h> | 5 | #include <asm-generic/sections.h> |
| 6 | 6 | ||
| 7 | #ifdef CONFIG_64BIT | ||
| 8 | #undef dereference_function_descriptor | ||
| 9 | void *dereference_function_descriptor(void *); | ||
| 10 | #endif | ||
| 11 | |||
| 7 | #endif | 12 | #endif |
diff --git a/include/asm-x86/cpufeature.h b/include/asm-x86/cpufeature.h index 762f6a6bc707..9489283a4bcf 100644 --- a/include/asm-x86/cpufeature.h +++ b/include/asm-x86/cpufeature.h | |||
| @@ -72,14 +72,15 @@ | |||
| 72 | #define X86_FEATURE_UP (3*32+ 9) /* smp kernel running on up */ | 72 | #define X86_FEATURE_UP (3*32+ 9) /* smp kernel running on up */ |
| 73 | #define X86_FEATURE_FXSAVE_LEAK (3*32+10) /* FXSAVE leaks FOP/FIP/FOP */ | 73 | #define X86_FEATURE_FXSAVE_LEAK (3*32+10) /* FXSAVE leaks FOP/FIP/FOP */ |
| 74 | #define X86_FEATURE_ARCH_PERFMON (3*32+11) /* Intel Architectural PerfMon */ | 74 | #define X86_FEATURE_ARCH_PERFMON (3*32+11) /* Intel Architectural PerfMon */ |
| 75 | #define X86_FEATURE_PEBS (3*32+12) /* Precise-Event Based Sampling */ | 75 | #define X86_FEATURE_PEBS (3*32+12) /* Precise-Event Based Sampling */ |
| 76 | #define X86_FEATURE_BTS (3*32+13) /* Branch Trace Store */ | 76 | #define X86_FEATURE_BTS (3*32+13) /* Branch Trace Store */ |
| 77 | #define X86_FEATURE_SYSCALL32 (3*32+14) /* syscall in ia32 userspace */ | 77 | #define X86_FEATURE_SYSCALL32 (3*32+14) /* syscall in ia32 userspace */ |
| 78 | #define X86_FEATURE_SYSENTER32 (3*32+15) /* sysenter in ia32 userspace */ | 78 | #define X86_FEATURE_SYSENTER32 (3*32+15) /* sysenter in ia32 userspace */ |
| 79 | #define X86_FEATURE_REP_GOOD (3*32+16) /* rep microcode works well on this CPU */ | 79 | #define X86_FEATURE_REP_GOOD (3*32+16) /* rep microcode works well on this CPU */ |
| 80 | #define X86_FEATURE_MFENCE_RDTSC (3*32+17) /* Mfence synchronizes RDTSC */ | 80 | #define X86_FEATURE_MFENCE_RDTSC (3*32+17) /* Mfence synchronizes RDTSC */ |
| 81 | #define X86_FEATURE_LFENCE_RDTSC (3*32+18) /* Lfence synchronizes RDTSC */ | 81 | #define X86_FEATURE_LFENCE_RDTSC (3*32+18) /* Lfence synchronizes RDTSC */ |
| 82 | #define X86_FEATURE_11AP (3*32+19) /* Bad local APIC aka 11AP */ | 82 | #define X86_FEATURE_11AP (3*32+19) /* Bad local APIC aka 11AP */ |
| 83 | #define X86_FEATURE_NOPL (3*32+20) /* The NOPL (0F 1F) instructions */ | ||
| 83 | 84 | ||
| 84 | /* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */ | 85 | /* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */ |
| 85 | #define X86_FEATURE_XMM3 (4*32+ 0) /* Streaming SIMD Extensions-3 */ | 86 | #define X86_FEATURE_XMM3 (4*32+ 0) /* Streaming SIMD Extensions-3 */ |
diff --git a/include/asm-x86/required-features.h b/include/asm-x86/required-features.h index adec887dd7cd..5c2ff4bc2980 100644 --- a/include/asm-x86/required-features.h +++ b/include/asm-x86/required-features.h | |||
| @@ -41,6 +41,12 @@ | |||
| 41 | # define NEED_3DNOW 0 | 41 | # define NEED_3DNOW 0 |
| 42 | #endif | 42 | #endif |
| 43 | 43 | ||
| 44 | #if defined(CONFIG_X86_P6_NOP) || defined(CONFIG_X86_64) | ||
| 45 | # define NEED_NOPL (1<<(X86_FEATURE_NOPL & 31)) | ||
| 46 | #else | ||
| 47 | # define NEED_NOPL 0 | ||
| 48 | #endif | ||
| 49 | |||
| 44 | #ifdef CONFIG_X86_64 | 50 | #ifdef CONFIG_X86_64 |
| 45 | #define NEED_PSE 0 | 51 | #define NEED_PSE 0 |
| 46 | #define NEED_MSR (1<<(X86_FEATURE_MSR & 31)) | 52 | #define NEED_MSR (1<<(X86_FEATURE_MSR & 31)) |
| @@ -67,7 +73,7 @@ | |||
| 67 | #define REQUIRED_MASK1 (NEED_LM|NEED_3DNOW) | 73 | #define REQUIRED_MASK1 (NEED_LM|NEED_3DNOW) |
| 68 | 74 | ||
| 69 | #define REQUIRED_MASK2 0 | 75 | #define REQUIRED_MASK2 0 |
| 70 | #define REQUIRED_MASK3 0 | 76 | #define REQUIRED_MASK3 (NEED_NOPL) |
| 71 | #define REQUIRED_MASK4 0 | 77 | #define REQUIRED_MASK4 0 |
| 72 | #define REQUIRED_MASK5 0 | 78 | #define REQUIRED_MASK5 0 |
| 73 | #define REQUIRED_MASK6 0 | 79 | #define REQUIRED_MASK6 0 |
diff --git a/include/linux/clockchips.h b/include/linux/clockchips.h index c33b0dc28e4d..ed3a5d473e52 100644 --- a/include/linux/clockchips.h +++ b/include/linux/clockchips.h | |||
| @@ -127,6 +127,8 @@ extern int clockevents_register_notifier(struct notifier_block *nb); | |||
| 127 | extern int clockevents_program_event(struct clock_event_device *dev, | 127 | extern int clockevents_program_event(struct clock_event_device *dev, |
| 128 | ktime_t expires, ktime_t now); | 128 | ktime_t expires, ktime_t now); |
| 129 | 129 | ||
| 130 | extern void clockevents_handle_noop(struct clock_event_device *dev); | ||
| 131 | |||
| 130 | #ifdef CONFIG_GENERIC_CLOCKEVENTS | 132 | #ifdef CONFIG_GENERIC_CLOCKEVENTS |
| 131 | extern void clockevents_notify(unsigned long reason, void *arg); | 133 | extern void clockevents_notify(unsigned long reason, void *arg); |
| 132 | #else | 134 | #else |
diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h index e8f450c499b0..2691926fb506 100644 --- a/include/linux/cpuset.h +++ b/include/linux/cpuset.h | |||
| @@ -160,7 +160,7 @@ static inline int current_cpuset_is_being_rebound(void) | |||
| 160 | 160 | ||
| 161 | static inline void rebuild_sched_domains(void) | 161 | static inline void rebuild_sched_domains(void) |
| 162 | { | 162 | { |
| 163 | partition_sched_domains(0, NULL, NULL); | 163 | partition_sched_domains(1, NULL, NULL); |
| 164 | } | 164 | } |
| 165 | 165 | ||
| 166 | #endif /* !CONFIG_CPUSETS */ | 166 | #endif /* !CONFIG_CPUSETS */ |
diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h index cbf751094688..46a43b721dd6 100644 --- a/include/net/bluetooth/hci_core.h +++ b/include/net/bluetooth/hci_core.h | |||
| @@ -325,7 +325,8 @@ int hci_conn_del(struct hci_conn *conn); | |||
| 325 | void hci_conn_hash_flush(struct hci_dev *hdev); | 325 | void hci_conn_hash_flush(struct hci_dev *hdev); |
| 326 | void hci_conn_check_pending(struct hci_dev *hdev); | 326 | void hci_conn_check_pending(struct hci_dev *hdev); |
| 327 | 327 | ||
| 328 | struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *src); | 328 | struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst, __u8 auth_type); |
| 329 | int hci_conn_check_link_mode(struct hci_conn *conn); | ||
| 329 | int hci_conn_auth(struct hci_conn *conn); | 330 | int hci_conn_auth(struct hci_conn *conn); |
| 330 | int hci_conn_encrypt(struct hci_conn *conn); | 331 | int hci_conn_encrypt(struct hci_conn *conn); |
| 331 | int hci_conn_change_link_key(struct hci_conn *conn); | 332 | int hci_conn_change_link_key(struct hci_conn *conn); |
diff --git a/include/net/inet_timewait_sock.h b/include/net/inet_timewait_sock.h index 95c660c9719b..91324908fccd 100644 --- a/include/net/inet_timewait_sock.h +++ b/include/net/inet_timewait_sock.h | |||
| @@ -208,6 +208,9 @@ extern void inet_twsk_schedule(struct inet_timewait_sock *tw, | |||
| 208 | extern void inet_twsk_deschedule(struct inet_timewait_sock *tw, | 208 | extern void inet_twsk_deschedule(struct inet_timewait_sock *tw, |
| 209 | struct inet_timewait_death_row *twdr); | 209 | struct inet_timewait_death_row *twdr); |
| 210 | 210 | ||
| 211 | extern void inet_twsk_purge(struct net *net, struct inet_hashinfo *hashinfo, | ||
| 212 | struct inet_timewait_death_row *twdr, int family); | ||
| 213 | |||
| 211 | static inline | 214 | static inline |
| 212 | struct net *twsk_net(const struct inet_timewait_sock *twsk) | 215 | struct net *twsk_net(const struct inet_timewait_sock *twsk) |
| 213 | { | 216 | { |
diff --git a/kernel/cpuset.c b/kernel/cpuset.c index d5ab79cf516d..f227bc172690 100644 --- a/kernel/cpuset.c +++ b/kernel/cpuset.c | |||
| @@ -14,6 +14,8 @@ | |||
| 14 | * 2003-10-22 Updates by Stephen Hemminger. | 14 | * 2003-10-22 Updates by Stephen Hemminger. |
| 15 | * 2004 May-July Rework by Paul Jackson. | 15 | * 2004 May-July Rework by Paul Jackson. |
| 16 | * 2006 Rework by Paul Menage to use generic cgroups | 16 | * 2006 Rework by Paul Menage to use generic cgroups |
| 17 | * 2008 Rework of the scheduler domains and CPU hotplug handling | ||
| 18 | * by Max Krasnyansky | ||
| 17 | * | 19 | * |
| 18 | * This file is subject to the terms and conditions of the GNU General Public | 20 | * This file is subject to the terms and conditions of the GNU General Public |
| 19 | * License. See the file COPYING in the main directory of the Linux | 21 | * License. See the file COPYING in the main directory of the Linux |
| @@ -236,9 +238,11 @@ static struct cpuset top_cpuset = { | |||
| 236 | 238 | ||
| 237 | static DEFINE_MUTEX(callback_mutex); | 239 | static DEFINE_MUTEX(callback_mutex); |
| 238 | 240 | ||
| 239 | /* This is ugly, but preserves the userspace API for existing cpuset | 241 | /* |
| 242 | * This is ugly, but preserves the userspace API for existing cpuset | ||
| 240 | * users. If someone tries to mount the "cpuset" filesystem, we | 243 | * users. If someone tries to mount the "cpuset" filesystem, we |
| 241 | * silently switch it to mount "cgroup" instead */ | 244 | * silently switch it to mount "cgroup" instead |
| 245 | */ | ||
| 242 | static int cpuset_get_sb(struct file_system_type *fs_type, | 246 | static int cpuset_get_sb(struct file_system_type *fs_type, |
| 243 | int flags, const char *unused_dev_name, | 247 | int flags, const char *unused_dev_name, |
| 244 | void *data, struct vfsmount *mnt) | 248 | void *data, struct vfsmount *mnt) |
| @@ -473,10 +477,9 @@ static int validate_change(const struct cpuset *cur, const struct cpuset *trial) | |||
| 473 | } | 477 | } |
| 474 | 478 | ||
| 475 | /* | 479 | /* |
| 476 | * Helper routine for rebuild_sched_domains(). | 480 | * Helper routine for generate_sched_domains(). |
| 477 | * Do cpusets a, b have overlapping cpus_allowed masks? | 481 | * Do cpusets a, b have overlapping cpus_allowed masks? |
| 478 | */ | 482 | */ |
| 479 | |||
| 480 | static int cpusets_overlap(struct cpuset *a, struct cpuset *b) | 483 | static int cpusets_overlap(struct cpuset *a, struct cpuset *b) |
| 481 | { | 484 | { |
| 482 | return cpus_intersects(a->cpus_allowed, b->cpus_allowed); | 485 | return cpus_intersects(a->cpus_allowed, b->cpus_allowed); |
| @@ -518,26 +521,15 @@ update_domain_attr_tree(struct sched_domain_attr *dattr, struct cpuset *c) | |||
| 518 | } | 521 | } |
| 519 | 522 | ||
| 520 | /* | 523 | /* |
| 521 | * rebuild_sched_domains() | 524 | * generate_sched_domains() |
| 522 | * | 525 | * |
| 523 | * This routine will be called to rebuild the scheduler's dynamic | 526 | * This function builds a partial partition of the systems CPUs |
| 524 | * sched domains: | 527 | * A 'partial partition' is a set of non-overlapping subsets whose |
| 525 | * - if the flag 'sched_load_balance' of any cpuset with non-empty | 528 | * union is a subset of that set. |
| 526 | * 'cpus' changes, | 529 | * The output of this function needs to be passed to kernel/sched.c |
| 527 | * - or if the 'cpus' allowed changes in any cpuset which has that | 530 | * partition_sched_domains() routine, which will rebuild the scheduler's |
| 528 | * flag enabled, | 531 | * load balancing domains (sched domains) as specified by that partial |
| 529 | * - or if the 'sched_relax_domain_level' of any cpuset which has | 532 | * partition. |
| 530 | * that flag enabled and with non-empty 'cpus' changes, | ||
| 531 | * - or if any cpuset with non-empty 'cpus' is removed, | ||
| 532 | * - or if a cpu gets offlined. | ||
| 533 | * | ||
| 534 | * This routine builds a partial partition of the systems CPUs | ||
| 535 | * (the set of non-overlappping cpumask_t's in the array 'part' | ||
| 536 | * below), and passes that partial partition to the kernel/sched.c | ||
| 537 | * partition_sched_domains() routine, which will rebuild the | ||
| 538 | * schedulers load balancing domains (sched domains) as specified | ||
| 539 | * by that partial partition. A 'partial partition' is a set of | ||
| 540 | * non-overlapping subsets whose union is a subset of that set. | ||
| 541 | * | 533 | * |
| 542 | * See "What is sched_load_balance" in Documentation/cpusets.txt | 534 | * See "What is sched_load_balance" in Documentation/cpusets.txt |
| 543 | * for a background explanation of this. | 535 | * for a background explanation of this. |
| @@ -547,13 +539,7 @@ update_domain_attr_tree(struct sched_domain_attr *dattr, struct cpuset *c) | |||
| 547 | * domains when operating in the severe memory shortage situations | 539 | * domains when operating in the severe memory shortage situations |
| 548 | * that could cause allocation failures below. | 540 | * that could cause allocation failures below. |
| 549 | * | 541 | * |
| 550 | * Call with cgroup_mutex held. May take callback_mutex during | 542 | * Must be called with cgroup_lock held. |
| 551 | * call due to the kfifo_alloc() and kmalloc() calls. May nest | ||
| 552 | * a call to the get_online_cpus()/put_online_cpus() pair. | ||
| 553 | * Must not be called holding callback_mutex, because we must not | ||
| 554 | * call get_online_cpus() while holding callback_mutex. Elsewhere | ||
| 555 | * the kernel nests callback_mutex inside get_online_cpus() calls. | ||
| 556 | * So the reverse nesting would risk an ABBA deadlock. | ||
| 557 | * | 543 | * |
| 558 | * The three key local variables below are: | 544 | * The three key local variables below are: |
| 559 | * q - a linked-list queue of cpuset pointers, used to implement a | 545 | * q - a linked-list queue of cpuset pointers, used to implement a |
| @@ -588,10 +574,10 @@ update_domain_attr_tree(struct sched_domain_attr *dattr, struct cpuset *c) | |||
| 588 | * element of the partition (one sched domain) to be passed to | 574 | * element of the partition (one sched domain) to be passed to |
| 589 | * partition_sched_domains(). | 575 | * partition_sched_domains(). |
| 590 | */ | 576 | */ |
| 591 | 577 | static int generate_sched_domains(cpumask_t **domains, | |
| 592 | void rebuild_sched_domains(void) | 578 | struct sched_domain_attr **attributes) |
| 593 | { | 579 | { |
| 594 | LIST_HEAD(q); /* queue of cpusets to be scanned*/ | 580 | LIST_HEAD(q); /* queue of cpusets to be scanned */ |
| 595 | struct cpuset *cp; /* scans q */ | 581 | struct cpuset *cp; /* scans q */ |
| 596 | struct cpuset **csa; /* array of all cpuset ptrs */ | 582 | struct cpuset **csa; /* array of all cpuset ptrs */ |
| 597 | int csn; /* how many cpuset ptrs in csa so far */ | 583 | int csn; /* how many cpuset ptrs in csa so far */ |
| @@ -601,23 +587,26 @@ void rebuild_sched_domains(void) | |||
| 601 | int ndoms; /* number of sched domains in result */ | 587 | int ndoms; /* number of sched domains in result */ |
| 602 | int nslot; /* next empty doms[] cpumask_t slot */ | 588 | int nslot; /* next empty doms[] cpumask_t slot */ |
| 603 | 589 | ||
| 604 | csa = NULL; | 590 | ndoms = 0; |
| 605 | doms = NULL; | 591 | doms = NULL; |
| 606 | dattr = NULL; | 592 | dattr = NULL; |
| 593 | csa = NULL; | ||
| 607 | 594 | ||
| 608 | /* Special case for the 99% of systems with one, full, sched domain */ | 595 | /* Special case for the 99% of systems with one, full, sched domain */ |
| 609 | if (is_sched_load_balance(&top_cpuset)) { | 596 | if (is_sched_load_balance(&top_cpuset)) { |
| 610 | ndoms = 1; | ||
| 611 | doms = kmalloc(sizeof(cpumask_t), GFP_KERNEL); | 597 | doms = kmalloc(sizeof(cpumask_t), GFP_KERNEL); |
| 612 | if (!doms) | 598 | if (!doms) |
| 613 | goto rebuild; | 599 | goto done; |
| 600 | |||
| 614 | dattr = kmalloc(sizeof(struct sched_domain_attr), GFP_KERNEL); | 601 | dattr = kmalloc(sizeof(struct sched_domain_attr), GFP_KERNEL); |
| 615 | if (dattr) { | 602 | if (dattr) { |
| 616 | *dattr = SD_ATTR_INIT; | 603 | *dattr = SD_ATTR_INIT; |
| 617 | update_domain_attr_tree(dattr, &top_cpuset); | 604 | update_domain_attr_tree(dattr, &top_cpuset); |
| 618 | } | 605 | } |
| 619 | *doms = top_cpuset.cpus_allowed; | 606 | *doms = top_cpuset.cpus_allowed; |
| 620 | goto rebuild; | 607 | |
| 608 | ndoms = 1; | ||
| 609 | goto done; | ||
| 621 | } | 610 | } |
| 622 | 611 | ||
| 623 | csa = kmalloc(number_of_cpusets * sizeof(cp), GFP_KERNEL); | 612 | csa = kmalloc(number_of_cpusets * sizeof(cp), GFP_KERNEL); |
| @@ -680,61 +669,141 @@ restart: | |||
| 680 | } | 669 | } |
| 681 | } | 670 | } |
| 682 | 671 | ||
| 683 | /* Convert <csn, csa> to <ndoms, doms> */ | 672 | /* |
| 673 | * Now we know how many domains to create. | ||
| 674 | * Convert <csn, csa> to <ndoms, doms> and populate cpu masks. | ||
| 675 | */ | ||
| 684 | doms = kmalloc(ndoms * sizeof(cpumask_t), GFP_KERNEL); | 676 | doms = kmalloc(ndoms * sizeof(cpumask_t), GFP_KERNEL); |
| 685 | if (!doms) | 677 | if (!doms) { |
| 686 | goto rebuild; | 678 | ndoms = 0; |
| 679 | goto done; | ||
| 680 | } | ||
| 681 | |||
| 682 | /* | ||
| 683 | * The rest of the code, including the scheduler, can deal with | ||
| 684 | * dattr==NULL case. No need to abort if alloc fails. | ||
| 685 | */ | ||
| 687 | dattr = kmalloc(ndoms * sizeof(struct sched_domain_attr), GFP_KERNEL); | 686 | dattr = kmalloc(ndoms * sizeof(struct sched_domain_attr), GFP_KERNEL); |
| 688 | 687 | ||
| 689 | for (nslot = 0, i = 0; i < csn; i++) { | 688 | for (nslot = 0, i = 0; i < csn; i++) { |
| 690 | struct cpuset *a = csa[i]; | 689 | struct cpuset *a = csa[i]; |
| 690 | cpumask_t *dp; | ||
| 691 | int apn = a->pn; | 691 | int apn = a->pn; |
| 692 | 692 | ||
| 693 | if (apn >= 0) { | 693 | if (apn < 0) { |
| 694 | cpumask_t *dp = doms + nslot; | 694 | /* Skip completed partitions */ |
| 695 | 695 | continue; | |
| 696 | if (nslot == ndoms) { | 696 | } |
| 697 | static int warnings = 10; | 697 | |
| 698 | if (warnings) { | 698 | dp = doms + nslot; |
| 699 | printk(KERN_WARNING | 699 | |
| 700 | "rebuild_sched_domains confused:" | 700 | if (nslot == ndoms) { |
| 701 | " nslot %d, ndoms %d, csn %d, i %d," | 701 | static int warnings = 10; |
| 702 | " apn %d\n", | 702 | if (warnings) { |
| 703 | nslot, ndoms, csn, i, apn); | 703 | printk(KERN_WARNING |
| 704 | warnings--; | 704 | "rebuild_sched_domains confused:" |
| 705 | } | 705 | " nslot %d, ndoms %d, csn %d, i %d," |
| 706 | continue; | 706 | " apn %d\n", |
| 707 | nslot, ndoms, csn, i, apn); | ||
| 708 | warnings--; | ||
| 707 | } | 709 | } |
| 710 | continue; | ||
| 711 | } | ||
| 708 | 712 | ||
| 709 | cpus_clear(*dp); | 713 | cpus_clear(*dp); |
| 710 | if (dattr) | 714 | if (dattr) |
| 711 | *(dattr + nslot) = SD_ATTR_INIT; | 715 | *(dattr + nslot) = SD_ATTR_INIT; |
| 712 | for (j = i; j < csn; j++) { | 716 | for (j = i; j < csn; j++) { |
| 713 | struct cpuset *b = csa[j]; | 717 | struct cpuset *b = csa[j]; |
| 714 | 718 | ||
| 715 | if (apn == b->pn) { | 719 | if (apn == b->pn) { |
| 716 | cpus_or(*dp, *dp, b->cpus_allowed); | 720 | cpus_or(*dp, *dp, b->cpus_allowed); |
| 717 | b->pn = -1; | 721 | if (dattr) |
| 718 | if (dattr) | 722 | update_domain_attr_tree(dattr + nslot, b); |
| 719 | update_domain_attr_tree(dattr | 723 | |
| 720 | + nslot, b); | 724 | /* Done with this partition */ |
| 721 | } | 725 | b->pn = -1; |
| 722 | } | 726 | } |
| 723 | nslot++; | ||
| 724 | } | 727 | } |
| 728 | nslot++; | ||
| 725 | } | 729 | } |
| 726 | BUG_ON(nslot != ndoms); | 730 | BUG_ON(nslot != ndoms); |
| 727 | 731 | ||
| 728 | rebuild: | 732 | done: |
| 729 | /* Have scheduler rebuild sched domains */ | 733 | kfree(csa); |
| 734 | |||
| 735 | *domains = doms; | ||
| 736 | *attributes = dattr; | ||
| 737 | return ndoms; | ||
| 738 | } | ||
| 739 | |||
| 740 | /* | ||
| 741 | * Rebuild scheduler domains. | ||
| 742 | * | ||
| 743 | * Call with neither cgroup_mutex held nor within get_online_cpus(). | ||
| 744 | * Takes both cgroup_mutex and get_online_cpus(). | ||
| 745 | * | ||
| 746 | * Cannot be directly called from cpuset code handling changes | ||
| 747 | * to the cpuset pseudo-filesystem, because it cannot be called | ||
| 748 | * from code that already holds cgroup_mutex. | ||
| 749 | */ | ||
| 750 | static void do_rebuild_sched_domains(struct work_struct *unused) | ||
| 751 | { | ||
| 752 | struct sched_domain_attr *attr; | ||
| 753 | cpumask_t *doms; | ||
| 754 | int ndoms; | ||
| 755 | |||
| 730 | get_online_cpus(); | 756 | get_online_cpus(); |
| 731 | partition_sched_domains(ndoms, doms, dattr); | 757 | |
| 758 | /* Generate domain masks and attrs */ | ||
| 759 | cgroup_lock(); | ||
| 760 | ndoms = generate_sched_domains(&doms, &attr); | ||
| 761 | cgroup_unlock(); | ||
| 762 | |||
| 763 | /* Have scheduler rebuild the domains */ | ||
| 764 | partition_sched_domains(ndoms, doms, attr); | ||
| 765 | |||
| 732 | put_online_cpus(); | 766 | put_online_cpus(); |
| 767 | } | ||
| 733 | 768 | ||
| 734 | done: | 769 | static DECLARE_WORK(rebuild_sched_domains_work, do_rebuild_sched_domains); |
| 735 | kfree(csa); | 770 | |
| 736 | /* Don't kfree(doms) -- partition_sched_domains() does that. */ | 771 | /* |
| 737 | /* Don't kfree(dattr) -- partition_sched_domains() does that. */ | 772 | * Rebuild scheduler domains, asynchronously via workqueue. |
| 773 | * | ||
| 774 | * If the flag 'sched_load_balance' of any cpuset with non-empty | ||
| 775 | * 'cpus' changes, or if the 'cpus' allowed changes in any cpuset | ||
| 776 | * which has that flag enabled, or if any cpuset with a non-empty | ||
| 777 | * 'cpus' is removed, then call this routine to rebuild the | ||
| 778 | * scheduler's dynamic sched domains. | ||
| 779 | * | ||
| 780 | * The rebuild_sched_domains() and partition_sched_domains() | ||
| 781 | * routines must nest cgroup_lock() inside get_online_cpus(), | ||
| 782 | * but such cpuset changes as these must nest that locking the | ||
| 783 | * other way, holding cgroup_lock() for much of the code. | ||
| 784 | * | ||
| 785 | * So in order to avoid an ABBA deadlock, the cpuset code handling | ||
| 786 | * these user changes delegates the actual sched domain rebuilding | ||
| 787 | * to a separate workqueue thread, which ends up processing the | ||
| 788 | * above do_rebuild_sched_domains() function. | ||
| 789 | */ | ||
| 790 | static void async_rebuild_sched_domains(void) | ||
| 791 | { | ||
| 792 | schedule_work(&rebuild_sched_domains_work); | ||
| 793 | } | ||
| 794 | |||
| 795 | /* | ||
| 796 | * Accomplishes the same scheduler domain rebuild as the above | ||
| 797 | * async_rebuild_sched_domains(), however it directly calls the | ||
| 798 | * rebuild routine synchronously rather than calling it via an | ||
| 799 | * asynchronous work thread. | ||
| 800 | * | ||
| 801 | * This can only be called from code that is not holding | ||
| 802 | * cgroup_mutex (not nested in a cgroup_lock() call.) | ||
| 803 | */ | ||
| 804 | void rebuild_sched_domains(void) | ||
| 805 | { | ||
| 806 | do_rebuild_sched_domains(NULL); | ||
| 738 | } | 807 | } |
| 739 | 808 | ||
| 740 | /** | 809 | /** |
| @@ -863,7 +932,7 @@ static int update_cpumask(struct cpuset *cs, const char *buf) | |||
| 863 | return retval; | 932 | return retval; |
| 864 | 933 | ||
| 865 | if (is_load_balanced) | 934 | if (is_load_balanced) |
| 866 | rebuild_sched_domains(); | 935 | async_rebuild_sched_domains(); |
| 867 | return 0; | 936 | return 0; |
| 868 | } | 937 | } |
| 869 | 938 | ||
| @@ -1090,7 +1159,7 @@ static int update_relax_domain_level(struct cpuset *cs, s64 val) | |||
| 1090 | if (val != cs->relax_domain_level) { | 1159 | if (val != cs->relax_domain_level) { |
| 1091 | cs->relax_domain_level = val; | 1160 | cs->relax_domain_level = val; |
| 1092 | if (!cpus_empty(cs->cpus_allowed) && is_sched_load_balance(cs)) | 1161 | if (!cpus_empty(cs->cpus_allowed) && is_sched_load_balance(cs)) |
| 1093 | rebuild_sched_domains(); | 1162 | async_rebuild_sched_domains(); |
| 1094 | } | 1163 | } |
| 1095 | 1164 | ||
| 1096 | return 0; | 1165 | return 0; |
| @@ -1131,7 +1200,7 @@ static int update_flag(cpuset_flagbits_t bit, struct cpuset *cs, | |||
| 1131 | mutex_unlock(&callback_mutex); | 1200 | mutex_unlock(&callback_mutex); |
| 1132 | 1201 | ||
| 1133 | if (cpus_nonempty && balance_flag_changed) | 1202 | if (cpus_nonempty && balance_flag_changed) |
| 1134 | rebuild_sched_domains(); | 1203 | async_rebuild_sched_domains(); |
| 1135 | 1204 | ||
| 1136 | return 0; | 1205 | return 0; |
| 1137 | } | 1206 | } |
| @@ -1492,6 +1561,9 @@ static u64 cpuset_read_u64(struct cgroup *cont, struct cftype *cft) | |||
| 1492 | default: | 1561 | default: |
| 1493 | BUG(); | 1562 | BUG(); |
| 1494 | } | 1563 | } |
| 1564 | |||
| 1565 | /* Unreachable but makes gcc happy */ | ||
| 1566 | return 0; | ||
| 1495 | } | 1567 | } |
| 1496 | 1568 | ||
| 1497 | static s64 cpuset_read_s64(struct cgroup *cont, struct cftype *cft) | 1569 | static s64 cpuset_read_s64(struct cgroup *cont, struct cftype *cft) |
| @@ -1504,6 +1576,9 @@ static s64 cpuset_read_s64(struct cgroup *cont, struct cftype *cft) | |||
| 1504 | default: | 1576 | default: |
| 1505 | BUG(); | 1577 | BUG(); |
| 1506 | } | 1578 | } |
| 1579 | |||
| 1580 | /* Unrechable but makes gcc happy */ | ||
| 1581 | return 0; | ||
| 1507 | } | 1582 | } |
| 1508 | 1583 | ||
| 1509 | 1584 | ||
| @@ -1692,15 +1767,9 @@ static struct cgroup_subsys_state *cpuset_create( | |||
| 1692 | } | 1767 | } |
| 1693 | 1768 | ||
| 1694 | /* | 1769 | /* |
| 1695 | * Locking note on the strange update_flag() call below: | ||
| 1696 | * | ||
| 1697 | * If the cpuset being removed has its flag 'sched_load_balance' | 1770 | * If the cpuset being removed has its flag 'sched_load_balance' |
| 1698 | * enabled, then simulate turning sched_load_balance off, which | 1771 | * enabled, then simulate turning sched_load_balance off, which |
| 1699 | * will call rebuild_sched_domains(). The get_online_cpus() | 1772 | * will call async_rebuild_sched_domains(). |
| 1700 | * call in rebuild_sched_domains() must not be made while holding | ||
| 1701 | * callback_mutex. Elsewhere the kernel nests callback_mutex inside | ||
| 1702 | * get_online_cpus() calls. So the reverse nesting would risk an | ||
| 1703 | * ABBA deadlock. | ||
| 1704 | */ | 1773 | */ |
| 1705 | 1774 | ||
| 1706 | static void cpuset_destroy(struct cgroup_subsys *ss, struct cgroup *cont) | 1775 | static void cpuset_destroy(struct cgroup_subsys *ss, struct cgroup *cont) |
| @@ -1719,7 +1788,7 @@ static void cpuset_destroy(struct cgroup_subsys *ss, struct cgroup *cont) | |||
| 1719 | struct cgroup_subsys cpuset_subsys = { | 1788 | struct cgroup_subsys cpuset_subsys = { |
| 1720 | .name = "cpuset", | 1789 | .name = "cpuset", |
| 1721 | .create = cpuset_create, | 1790 | .create = cpuset_create, |
| 1722 | .destroy = cpuset_destroy, | 1791 | .destroy = cpuset_destroy, |
| 1723 | .can_attach = cpuset_can_attach, | 1792 | .can_attach = cpuset_can_attach, |
| 1724 | .attach = cpuset_attach, | 1793 | .attach = cpuset_attach, |
| 1725 | .populate = cpuset_populate, | 1794 | .populate = cpuset_populate, |
| @@ -1811,7 +1880,7 @@ static void move_member_tasks_to_cpuset(struct cpuset *from, struct cpuset *to) | |||
| 1811 | } | 1880 | } |
| 1812 | 1881 | ||
| 1813 | /* | 1882 | /* |
| 1814 | * If common_cpu_mem_hotplug_unplug(), below, unplugs any CPUs | 1883 | * If CPU and/or memory hotplug handlers, below, unplug any CPUs |
| 1815 | * or memory nodes, we need to walk over the cpuset hierarchy, | 1884 | * or memory nodes, we need to walk over the cpuset hierarchy, |
| 1816 | * removing that CPU or node from all cpusets. If this removes the | 1885 | * removing that CPU or node from all cpusets. If this removes the |
| 1817 | * last CPU or node from a cpuset, then move the tasks in the empty | 1886 | * last CPU or node from a cpuset, then move the tasks in the empty |
| @@ -1903,35 +1972,6 @@ static void scan_for_empty_cpusets(const struct cpuset *root) | |||
| 1903 | } | 1972 | } |
| 1904 | 1973 | ||
| 1905 | /* | 1974 | /* |
| 1906 | * The cpus_allowed and mems_allowed nodemasks in the top_cpuset track | ||
| 1907 | * cpu_online_map and node_states[N_HIGH_MEMORY]. Force the top cpuset to | ||
| 1908 | * track what's online after any CPU or memory node hotplug or unplug event. | ||
| 1909 | * | ||
| 1910 | * Since there are two callers of this routine, one for CPU hotplug | ||
| 1911 | * events and one for memory node hotplug events, we could have coded | ||
| 1912 | * two separate routines here. We code it as a single common routine | ||
| 1913 | * in order to minimize text size. | ||
| 1914 | */ | ||
| 1915 | |||
| 1916 | static void common_cpu_mem_hotplug_unplug(int rebuild_sd) | ||
| 1917 | { | ||
| 1918 | cgroup_lock(); | ||
| 1919 | |||
| 1920 | top_cpuset.cpus_allowed = cpu_online_map; | ||
| 1921 | top_cpuset.mems_allowed = node_states[N_HIGH_MEMORY]; | ||
| 1922 | scan_for_empty_cpusets(&top_cpuset); | ||
| 1923 | |||
| 1924 | /* | ||
| 1925 | * Scheduler destroys domains on hotplug events. | ||
| 1926 | * Rebuild them based on the current settings. | ||
| 1927 | */ | ||
| 1928 | if (rebuild_sd) | ||
| 1929 | rebuild_sched_domains(); | ||
| 1930 | |||
| 1931 | cgroup_unlock(); | ||
| 1932 | } | ||
| 1933 | |||
| 1934 | /* | ||
| 1935 | * The top_cpuset tracks what CPUs and Memory Nodes are online, | 1975 | * The top_cpuset tracks what CPUs and Memory Nodes are online, |
| 1936 | * period. This is necessary in order to make cpusets transparent | 1976 | * period. This is necessary in order to make cpusets transparent |
| 1937 | * (of no affect) on systems that are actively using CPU hotplug | 1977 | * (of no affect) on systems that are actively using CPU hotplug |
| @@ -1939,40 +1979,52 @@ static void common_cpu_mem_hotplug_unplug(int rebuild_sd) | |||
| 1939 | * | 1979 | * |
| 1940 | * This routine ensures that top_cpuset.cpus_allowed tracks | 1980 | * This routine ensures that top_cpuset.cpus_allowed tracks |
| 1941 | * cpu_online_map on each CPU hotplug (cpuhp) event. | 1981 | * cpu_online_map on each CPU hotplug (cpuhp) event. |
| 1982 | * | ||
| 1983 | * Called within get_online_cpus(). Needs to call cgroup_lock() | ||
| 1984 | * before calling generate_sched_domains(). | ||
| 1942 | */ | 1985 | */ |
| 1943 | 1986 | static int cpuset_track_online_cpus(struct notifier_block *unused_nb, | |
| 1944 | static int cpuset_handle_cpuhp(struct notifier_block *unused_nb, | ||
| 1945 | unsigned long phase, void *unused_cpu) | 1987 | unsigned long phase, void *unused_cpu) |
| 1946 | { | 1988 | { |
| 1989 | struct sched_domain_attr *attr; | ||
| 1990 | cpumask_t *doms; | ||
| 1991 | int ndoms; | ||
| 1992 | |||
| 1947 | switch (phase) { | 1993 | switch (phase) { |
| 1948 | case CPU_UP_CANCELED: | ||
| 1949 | case CPU_UP_CANCELED_FROZEN: | ||
| 1950 | case CPU_DOWN_FAILED: | ||
| 1951 | case CPU_DOWN_FAILED_FROZEN: | ||
| 1952 | case CPU_ONLINE: | 1994 | case CPU_ONLINE: |
| 1953 | case CPU_ONLINE_FROZEN: | 1995 | case CPU_ONLINE_FROZEN: |
| 1954 | case CPU_DEAD: | 1996 | case CPU_DEAD: |
| 1955 | case CPU_DEAD_FROZEN: | 1997 | case CPU_DEAD_FROZEN: |
| 1956 | common_cpu_mem_hotplug_unplug(1); | ||
| 1957 | break; | 1998 | break; |
| 1999 | |||
| 1958 | default: | 2000 | default: |
| 1959 | return NOTIFY_DONE; | 2001 | return NOTIFY_DONE; |
| 1960 | } | 2002 | } |
| 1961 | 2003 | ||
| 2004 | cgroup_lock(); | ||
| 2005 | top_cpuset.cpus_allowed = cpu_online_map; | ||
| 2006 | scan_for_empty_cpusets(&top_cpuset); | ||
| 2007 | ndoms = generate_sched_domains(&doms, &attr); | ||
| 2008 | cgroup_unlock(); | ||
| 2009 | |||
| 2010 | /* Have scheduler rebuild the domains */ | ||
| 2011 | partition_sched_domains(ndoms, doms, attr); | ||
| 2012 | |||
| 1962 | return NOTIFY_OK; | 2013 | return NOTIFY_OK; |
| 1963 | } | 2014 | } |
| 1964 | 2015 | ||
| 1965 | #ifdef CONFIG_MEMORY_HOTPLUG | 2016 | #ifdef CONFIG_MEMORY_HOTPLUG |
| 1966 | /* | 2017 | /* |
| 1967 | * Keep top_cpuset.mems_allowed tracking node_states[N_HIGH_MEMORY]. | 2018 | * Keep top_cpuset.mems_allowed tracking node_states[N_HIGH_MEMORY]. |
| 1968 | * Call this routine anytime after you change | 2019 | * Call this routine anytime after node_states[N_HIGH_MEMORY] changes. |
| 1969 | * node_states[N_HIGH_MEMORY]. | 2020 | * See also the previous routine cpuset_track_online_cpus(). |
| 1970 | * See also the previous routine cpuset_handle_cpuhp(). | ||
| 1971 | */ | 2021 | */ |
| 1972 | |||
| 1973 | void cpuset_track_online_nodes(void) | 2022 | void cpuset_track_online_nodes(void) |
| 1974 | { | 2023 | { |
| 1975 | common_cpu_mem_hotplug_unplug(0); | 2024 | cgroup_lock(); |
| 2025 | top_cpuset.mems_allowed = node_states[N_HIGH_MEMORY]; | ||
| 2026 | scan_for_empty_cpusets(&top_cpuset); | ||
| 2027 | cgroup_unlock(); | ||
| 1976 | } | 2028 | } |
| 1977 | #endif | 2029 | #endif |
| 1978 | 2030 | ||
| @@ -1987,7 +2039,7 @@ void __init cpuset_init_smp(void) | |||
| 1987 | top_cpuset.cpus_allowed = cpu_online_map; | 2039 | top_cpuset.cpus_allowed = cpu_online_map; |
| 1988 | top_cpuset.mems_allowed = node_states[N_HIGH_MEMORY]; | 2040 | top_cpuset.mems_allowed = node_states[N_HIGH_MEMORY]; |
| 1989 | 2041 | ||
| 1990 | hotcpu_notifier(cpuset_handle_cpuhp, 0); | 2042 | hotcpu_notifier(cpuset_track_online_cpus, 0); |
| 1991 | } | 2043 | } |
| 1992 | 2044 | ||
| 1993 | /** | 2045 | /** |
diff --git a/kernel/sched.c b/kernel/sched.c index 8626ae50ce08..0d8905a1b8ca 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
| @@ -7746,24 +7746,27 @@ static int dattrs_equal(struct sched_domain_attr *cur, int idx_cur, | |||
| 7746 | * and partition_sched_domains() will fallback to the single partition | 7746 | * and partition_sched_domains() will fallback to the single partition |
| 7747 | * 'fallback_doms', it also forces the domains to be rebuilt. | 7747 | * 'fallback_doms', it also forces the domains to be rebuilt. |
| 7748 | * | 7748 | * |
| 7749 | * If doms_new==NULL it will be replaced with cpu_online_map. | ||
| 7750 | * ndoms_new==0 is a special case for destroying existing domains. | ||
| 7751 | * It will not create the default domain. | ||
| 7752 | * | ||
| 7749 | * Call with hotplug lock held | 7753 | * Call with hotplug lock held |
| 7750 | */ | 7754 | */ |
| 7751 | void partition_sched_domains(int ndoms_new, cpumask_t *doms_new, | 7755 | void partition_sched_domains(int ndoms_new, cpumask_t *doms_new, |
| 7752 | struct sched_domain_attr *dattr_new) | 7756 | struct sched_domain_attr *dattr_new) |
| 7753 | { | 7757 | { |
| 7754 | int i, j; | 7758 | int i, j, n; |
| 7755 | 7759 | ||
| 7756 | mutex_lock(&sched_domains_mutex); | 7760 | mutex_lock(&sched_domains_mutex); |
| 7757 | 7761 | ||
| 7758 | /* always unregister in case we don't destroy any domains */ | 7762 | /* always unregister in case we don't destroy any domains */ |
| 7759 | unregister_sched_domain_sysctl(); | 7763 | unregister_sched_domain_sysctl(); |
| 7760 | 7764 | ||
| 7761 | if (doms_new == NULL) | 7765 | n = doms_new ? ndoms_new : 0; |
| 7762 | ndoms_new = 0; | ||
| 7763 | 7766 | ||
| 7764 | /* Destroy deleted domains */ | 7767 | /* Destroy deleted domains */ |
| 7765 | for (i = 0; i < ndoms_cur; i++) { | 7768 | for (i = 0; i < ndoms_cur; i++) { |
| 7766 | for (j = 0; j < ndoms_new; j++) { | 7769 | for (j = 0; j < n; j++) { |
| 7767 | if (cpus_equal(doms_cur[i], doms_new[j]) | 7770 | if (cpus_equal(doms_cur[i], doms_new[j]) |
| 7768 | && dattrs_equal(dattr_cur, i, dattr_new, j)) | 7771 | && dattrs_equal(dattr_cur, i, dattr_new, j)) |
| 7769 | goto match1; | 7772 | goto match1; |
| @@ -7776,7 +7779,6 @@ match1: | |||
| 7776 | 7779 | ||
| 7777 | if (doms_new == NULL) { | 7780 | if (doms_new == NULL) { |
| 7778 | ndoms_cur = 0; | 7781 | ndoms_cur = 0; |
| 7779 | ndoms_new = 1; | ||
| 7780 | doms_new = &fallback_doms; | 7782 | doms_new = &fallback_doms; |
| 7781 | cpus_andnot(doms_new[0], cpu_online_map, cpu_isolated_map); | 7783 | cpus_andnot(doms_new[0], cpu_online_map, cpu_isolated_map); |
| 7782 | dattr_new = NULL; | 7784 | dattr_new = NULL; |
| @@ -7813,8 +7815,13 @@ match2: | |||
| 7813 | int arch_reinit_sched_domains(void) | 7815 | int arch_reinit_sched_domains(void) |
| 7814 | { | 7816 | { |
| 7815 | get_online_cpus(); | 7817 | get_online_cpus(); |
| 7818 | |||
| 7819 | /* Destroy domains first to force the rebuild */ | ||
| 7820 | partition_sched_domains(0, NULL, NULL); | ||
| 7821 | |||
| 7816 | rebuild_sched_domains(); | 7822 | rebuild_sched_domains(); |
| 7817 | put_online_cpus(); | 7823 | put_online_cpus(); |
| 7824 | |||
| 7818 | return 0; | 7825 | return 0; |
| 7819 | } | 7826 | } |
| 7820 | 7827 | ||
| @@ -7898,7 +7905,7 @@ static int update_sched_domains(struct notifier_block *nfb, | |||
| 7898 | case CPU_ONLINE_FROZEN: | 7905 | case CPU_ONLINE_FROZEN: |
| 7899 | case CPU_DEAD: | 7906 | case CPU_DEAD: |
| 7900 | case CPU_DEAD_FROZEN: | 7907 | case CPU_DEAD_FROZEN: |
| 7901 | partition_sched_domains(0, NULL, NULL); | 7908 | partition_sched_domains(1, NULL, NULL); |
| 7902 | return NOTIFY_OK; | 7909 | return NOTIFY_OK; |
| 7903 | 7910 | ||
| 7904 | default: | 7911 | default: |
diff --git a/kernel/time/clockevents.c b/kernel/time/clockevents.c index 3d1e3e1a1971..1876b526c778 100644 --- a/kernel/time/clockevents.c +++ b/kernel/time/clockevents.c | |||
| @@ -177,7 +177,7 @@ void clockevents_register_device(struct clock_event_device *dev) | |||
| 177 | /* | 177 | /* |
| 178 | * Noop handler when we shut down an event device | 178 | * Noop handler when we shut down an event device |
| 179 | */ | 179 | */ |
| 180 | static void clockevents_handle_noop(struct clock_event_device *dev) | 180 | void clockevents_handle_noop(struct clock_event_device *dev) |
| 181 | { | 181 | { |
| 182 | } | 182 | } |
| 183 | 183 | ||
| @@ -199,7 +199,6 @@ void clockevents_exchange_device(struct clock_event_device *old, | |||
| 199 | * released list and do a notify add later. | 199 | * released list and do a notify add later. |
| 200 | */ | 200 | */ |
| 201 | if (old) { | 201 | if (old) { |
| 202 | old->event_handler = clockevents_handle_noop; | ||
| 203 | clockevents_set_mode(old, CLOCK_EVT_MODE_UNUSED); | 202 | clockevents_set_mode(old, CLOCK_EVT_MODE_UNUSED); |
| 204 | list_del(&old->list); | 203 | list_del(&old->list); |
| 205 | list_add(&old->list, &clockevents_released); | 204 | list_add(&old->list, &clockevents_released); |
diff --git a/kernel/time/ntp.c b/kernel/time/ntp.c index 5125ddd8196b..1ad46f3df6e7 100644 --- a/kernel/time/ntp.c +++ b/kernel/time/ntp.c | |||
| @@ -245,7 +245,7 @@ static void sync_cmos_clock(unsigned long dummy) | |||
| 245 | if (abs(now.tv_nsec - (NSEC_PER_SEC / 2)) <= tick_nsec / 2) | 245 | if (abs(now.tv_nsec - (NSEC_PER_SEC / 2)) <= tick_nsec / 2) |
| 246 | fail = update_persistent_clock(now); | 246 | fail = update_persistent_clock(now); |
| 247 | 247 | ||
| 248 | next.tv_nsec = (NSEC_PER_SEC / 2) - now.tv_nsec; | 248 | next.tv_nsec = (NSEC_PER_SEC / 2) - now.tv_nsec - (TICK_NSEC / 2); |
| 249 | if (next.tv_nsec <= 0) | 249 | if (next.tv_nsec <= 0) |
| 250 | next.tv_nsec += NSEC_PER_SEC; | 250 | next.tv_nsec += NSEC_PER_SEC; |
| 251 | 251 | ||
diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c index 31463d370b94..2f5a38294bf9 100644 --- a/kernel/time/tick-broadcast.c +++ b/kernel/time/tick-broadcast.c | |||
| @@ -175,6 +175,8 @@ static void tick_do_periodic_broadcast(void) | |||
| 175 | */ | 175 | */ |
| 176 | static void tick_handle_periodic_broadcast(struct clock_event_device *dev) | 176 | static void tick_handle_periodic_broadcast(struct clock_event_device *dev) |
| 177 | { | 177 | { |
| 178 | ktime_t next; | ||
| 179 | |||
| 178 | tick_do_periodic_broadcast(); | 180 | tick_do_periodic_broadcast(); |
| 179 | 181 | ||
| 180 | /* | 182 | /* |
| @@ -185,10 +187,13 @@ static void tick_handle_periodic_broadcast(struct clock_event_device *dev) | |||
| 185 | 187 | ||
| 186 | /* | 188 | /* |
| 187 | * Setup the next period for devices, which do not have | 189 | * Setup the next period for devices, which do not have |
| 188 | * periodic mode: | 190 | * periodic mode. We read dev->next_event first and add to it |
| 191 | * when the event alrady expired. clockevents_program_event() | ||
| 192 | * sets dev->next_event only when the event is really | ||
| 193 | * programmed to the device. | ||
| 189 | */ | 194 | */ |
| 190 | for (;;) { | 195 | for (next = dev->next_event; ;) { |
| 191 | ktime_t next = ktime_add(dev->next_event, tick_period); | 196 | next = ktime_add(next, tick_period); |
| 192 | 197 | ||
| 193 | if (!clockevents_program_event(dev, next, ktime_get())) | 198 | if (!clockevents_program_event(dev, next, ktime_get())) |
| 194 | return; | 199 | return; |
| @@ -205,7 +210,7 @@ static void tick_do_broadcast_on_off(void *why) | |||
| 205 | struct clock_event_device *bc, *dev; | 210 | struct clock_event_device *bc, *dev; |
| 206 | struct tick_device *td; | 211 | struct tick_device *td; |
| 207 | unsigned long flags, *reason = why; | 212 | unsigned long flags, *reason = why; |
| 208 | int cpu; | 213 | int cpu, bc_stopped; |
| 209 | 214 | ||
| 210 | spin_lock_irqsave(&tick_broadcast_lock, flags); | 215 | spin_lock_irqsave(&tick_broadcast_lock, flags); |
| 211 | 216 | ||
| @@ -223,6 +228,8 @@ static void tick_do_broadcast_on_off(void *why) | |||
| 223 | if (!tick_device_is_functional(dev)) | 228 | if (!tick_device_is_functional(dev)) |
| 224 | goto out; | 229 | goto out; |
| 225 | 230 | ||
| 231 | bc_stopped = cpus_empty(tick_broadcast_mask); | ||
| 232 | |||
| 226 | switch (*reason) { | 233 | switch (*reason) { |
| 227 | case CLOCK_EVT_NOTIFY_BROADCAST_ON: | 234 | case CLOCK_EVT_NOTIFY_BROADCAST_ON: |
| 228 | case CLOCK_EVT_NOTIFY_BROADCAST_FORCE: | 235 | case CLOCK_EVT_NOTIFY_BROADCAST_FORCE: |
| @@ -245,9 +252,10 @@ static void tick_do_broadcast_on_off(void *why) | |||
| 245 | break; | 252 | break; |
| 246 | } | 253 | } |
| 247 | 254 | ||
| 248 | if (cpus_empty(tick_broadcast_mask)) | 255 | if (cpus_empty(tick_broadcast_mask)) { |
| 249 | clockevents_set_mode(bc, CLOCK_EVT_MODE_SHUTDOWN); | 256 | if (!bc_stopped) |
| 250 | else { | 257 | clockevents_set_mode(bc, CLOCK_EVT_MODE_SHUTDOWN); |
| 258 | } else if (bc_stopped) { | ||
| 251 | if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC) | 259 | if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC) |
| 252 | tick_broadcast_start_periodic(bc); | 260 | tick_broadcast_start_periodic(bc); |
| 253 | else | 261 | else |
| @@ -364,16 +372,8 @@ cpumask_t *tick_get_broadcast_oneshot_mask(void) | |||
| 364 | static int tick_broadcast_set_event(ktime_t expires, int force) | 372 | static int tick_broadcast_set_event(ktime_t expires, int force) |
| 365 | { | 373 | { |
| 366 | struct clock_event_device *bc = tick_broadcast_device.evtdev; | 374 | struct clock_event_device *bc = tick_broadcast_device.evtdev; |
| 367 | ktime_t now = ktime_get(); | 375 | |
| 368 | int res; | 376 | return tick_dev_program_event(bc, expires, force); |
| 369 | |||
| 370 | for(;;) { | ||
| 371 | res = clockevents_program_event(bc, expires, now); | ||
| 372 | if (!res || !force) | ||
| 373 | return res; | ||
| 374 | now = ktime_get(); | ||
| 375 | expires = ktime_add(now, ktime_set(0, bc->min_delta_ns)); | ||
| 376 | } | ||
| 377 | } | 377 | } |
| 378 | 378 | ||
| 379 | int tick_resume_broadcast_oneshot(struct clock_event_device *bc) | 379 | int tick_resume_broadcast_oneshot(struct clock_event_device *bc) |
| @@ -491,14 +491,52 @@ static void tick_broadcast_clear_oneshot(int cpu) | |||
| 491 | cpu_clear(cpu, tick_broadcast_oneshot_mask); | 491 | cpu_clear(cpu, tick_broadcast_oneshot_mask); |
| 492 | } | 492 | } |
| 493 | 493 | ||
| 494 | static void tick_broadcast_init_next_event(cpumask_t *mask, ktime_t expires) | ||
| 495 | { | ||
| 496 | struct tick_device *td; | ||
| 497 | int cpu; | ||
| 498 | |||
| 499 | for_each_cpu_mask_nr(cpu, *mask) { | ||
| 500 | td = &per_cpu(tick_cpu_device, cpu); | ||
| 501 | if (td->evtdev) | ||
| 502 | td->evtdev->next_event = expires; | ||
| 503 | } | ||
| 504 | } | ||
| 505 | |||
| 494 | /** | 506 | /** |
| 495 | * tick_broadcast_setup_oneshot - setup the broadcast device | 507 | * tick_broadcast_setup_oneshot - setup the broadcast device |
| 496 | */ | 508 | */ |
| 497 | void tick_broadcast_setup_oneshot(struct clock_event_device *bc) | 509 | void tick_broadcast_setup_oneshot(struct clock_event_device *bc) |
| 498 | { | 510 | { |
| 499 | bc->event_handler = tick_handle_oneshot_broadcast; | 511 | /* Set it up only once ! */ |
| 500 | clockevents_set_mode(bc, CLOCK_EVT_MODE_ONESHOT); | 512 | if (bc->event_handler != tick_handle_oneshot_broadcast) { |
| 501 | bc->next_event.tv64 = KTIME_MAX; | 513 | int was_periodic = bc->mode == CLOCK_EVT_MODE_PERIODIC; |
| 514 | int cpu = smp_processor_id(); | ||
| 515 | cpumask_t mask; | ||
| 516 | |||
| 517 | bc->event_handler = tick_handle_oneshot_broadcast; | ||
| 518 | clockevents_set_mode(bc, CLOCK_EVT_MODE_ONESHOT); | ||
| 519 | |||
| 520 | /* Take the do_timer update */ | ||
| 521 | tick_do_timer_cpu = cpu; | ||
| 522 | |||
| 523 | /* | ||
| 524 | * We must be careful here. There might be other CPUs | ||
| 525 | * waiting for periodic broadcast. We need to set the | ||
| 526 | * oneshot_mask bits for those and program the | ||
| 527 | * broadcast device to fire. | ||
| 528 | */ | ||
| 529 | mask = tick_broadcast_mask; | ||
| 530 | cpu_clear(cpu, mask); | ||
| 531 | cpus_or(tick_broadcast_oneshot_mask, | ||
| 532 | tick_broadcast_oneshot_mask, mask); | ||
| 533 | |||
| 534 | if (was_periodic && !cpus_empty(mask)) { | ||
| 535 | tick_broadcast_init_next_event(&mask, tick_next_period); | ||
| 536 | tick_broadcast_set_event(tick_next_period, 1); | ||
| 537 | } else | ||
| 538 | bc->next_event.tv64 = KTIME_MAX; | ||
| 539 | } | ||
| 502 | } | 540 | } |
| 503 | 541 | ||
| 504 | /* | 542 | /* |
diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c index 80c4336f4188..c4777193d567 100644 --- a/kernel/time/tick-common.c +++ b/kernel/time/tick-common.c | |||
| @@ -161,6 +161,7 @@ static void tick_setup_device(struct tick_device *td, | |||
| 161 | } else { | 161 | } else { |
| 162 | handler = td->evtdev->event_handler; | 162 | handler = td->evtdev->event_handler; |
| 163 | next_event = td->evtdev->next_event; | 163 | next_event = td->evtdev->next_event; |
| 164 | td->evtdev->event_handler = clockevents_handle_noop; | ||
| 164 | } | 165 | } |
| 165 | 166 | ||
| 166 | td->evtdev = newdev; | 167 | td->evtdev = newdev; |
diff --git a/kernel/time/tick-internal.h b/kernel/time/tick-internal.h index f13f2b7f4fd4..0ffc2918ea6f 100644 --- a/kernel/time/tick-internal.h +++ b/kernel/time/tick-internal.h | |||
| @@ -17,6 +17,8 @@ extern void tick_handle_periodic(struct clock_event_device *dev); | |||
| 17 | extern void tick_setup_oneshot(struct clock_event_device *newdev, | 17 | extern void tick_setup_oneshot(struct clock_event_device *newdev, |
| 18 | void (*handler)(struct clock_event_device *), | 18 | void (*handler)(struct clock_event_device *), |
| 19 | ktime_t nextevt); | 19 | ktime_t nextevt); |
| 20 | extern int tick_dev_program_event(struct clock_event_device *dev, | ||
| 21 | ktime_t expires, int force); | ||
| 20 | extern int tick_program_event(ktime_t expires, int force); | 22 | extern int tick_program_event(ktime_t expires, int force); |
| 21 | extern void tick_oneshot_notify(void); | 23 | extern void tick_oneshot_notify(void); |
| 22 | extern int tick_switch_to_oneshot(void (*handler)(struct clock_event_device *)); | 24 | extern int tick_switch_to_oneshot(void (*handler)(struct clock_event_device *)); |
diff --git a/kernel/time/tick-oneshot.c b/kernel/time/tick-oneshot.c index 450c04935b66..2e8de678e767 100644 --- a/kernel/time/tick-oneshot.c +++ b/kernel/time/tick-oneshot.c | |||
| @@ -23,24 +23,56 @@ | |||
| 23 | #include "tick-internal.h" | 23 | #include "tick-internal.h" |
| 24 | 24 | ||
| 25 | /** | 25 | /** |
| 26 | * tick_program_event | 26 | * tick_program_event internal worker function |
| 27 | */ | 27 | */ |
| 28 | int tick_program_event(ktime_t expires, int force) | 28 | int tick_dev_program_event(struct clock_event_device *dev, ktime_t expires, |
| 29 | int force) | ||
| 29 | { | 30 | { |
| 30 | struct clock_event_device *dev = __get_cpu_var(tick_cpu_device).evtdev; | ||
| 31 | ktime_t now = ktime_get(); | 31 | ktime_t now = ktime_get(); |
| 32 | int i; | ||
| 32 | 33 | ||
| 33 | while (1) { | 34 | for (i = 0;;) { |
| 34 | int ret = clockevents_program_event(dev, expires, now); | 35 | int ret = clockevents_program_event(dev, expires, now); |
| 35 | 36 | ||
| 36 | if (!ret || !force) | 37 | if (!ret || !force) |
| 37 | return ret; | 38 | return ret; |
| 39 | |||
| 40 | /* | ||
| 41 | * We tried 2 times to program the device with the given | ||
| 42 | * min_delta_ns. If that's not working then we double it | ||
| 43 | * and emit a warning. | ||
| 44 | */ | ||
| 45 | if (++i > 2) { | ||
| 46 | /* Increase the min. delta and try again */ | ||
| 47 | if (!dev->min_delta_ns) | ||
| 48 | dev->min_delta_ns = 5000; | ||
| 49 | else | ||
| 50 | dev->min_delta_ns += dev->min_delta_ns >> 1; | ||
| 51 | |||
| 52 | printk(KERN_WARNING | ||
| 53 | "CE: %s increasing min_delta_ns to %lu nsec\n", | ||
| 54 | dev->name ? dev->name : "?", | ||
| 55 | dev->min_delta_ns << 1); | ||
| 56 | |||
| 57 | i = 0; | ||
| 58 | } | ||
| 59 | |||
| 38 | now = ktime_get(); | 60 | now = ktime_get(); |
| 39 | expires = ktime_add(now, ktime_set(0, dev->min_delta_ns)); | 61 | expires = ktime_add_ns(now, dev->min_delta_ns); |
| 40 | } | 62 | } |
| 41 | } | 63 | } |
| 42 | 64 | ||
| 43 | /** | 65 | /** |
| 66 | * tick_program_event | ||
| 67 | */ | ||
| 68 | int tick_program_event(ktime_t expires, int force) | ||
| 69 | { | ||
| 70 | struct clock_event_device *dev = __get_cpu_var(tick_cpu_device).evtdev; | ||
| 71 | |||
| 72 | return tick_dev_program_event(dev, expires, force); | ||
| 73 | } | ||
| 74 | |||
| 75 | /** | ||
| 44 | * tick_resume_onshot - resume oneshot mode | 76 | * tick_resume_onshot - resume oneshot mode |
| 45 | */ | 77 | */ |
| 46 | void tick_resume_oneshot(void) | 78 | void tick_resume_oneshot(void) |
| @@ -61,7 +93,7 @@ void tick_setup_oneshot(struct clock_event_device *newdev, | |||
| 61 | { | 93 | { |
| 62 | newdev->event_handler = handler; | 94 | newdev->event_handler = handler; |
| 63 | clockevents_set_mode(newdev, CLOCK_EVT_MODE_ONESHOT); | 95 | clockevents_set_mode(newdev, CLOCK_EVT_MODE_ONESHOT); |
| 64 | clockevents_program_event(newdev, next_event, ktime_get()); | 96 | tick_dev_program_event(newdev, next_event, 1); |
| 65 | } | 97 | } |
| 66 | 98 | ||
| 67 | /** | 99 | /** |
diff --git a/lib/vsprintf.c b/lib/vsprintf.c index d8d1d1142248..c399bc1093cb 100644 --- a/lib/vsprintf.c +++ b/lib/vsprintf.c | |||
| @@ -27,6 +27,7 @@ | |||
| 27 | 27 | ||
| 28 | #include <asm/page.h> /* for PAGE_SIZE */ | 28 | #include <asm/page.h> /* for PAGE_SIZE */ |
| 29 | #include <asm/div64.h> | 29 | #include <asm/div64.h> |
| 30 | #include <asm/sections.h> /* for dereference_function_descriptor() */ | ||
| 30 | 31 | ||
| 31 | /* Works only for digits and letters, but small and fast */ | 32 | /* Works only for digits and letters, but small and fast */ |
| 32 | #define TOLOWER(x) ((x) | 0x20) | 33 | #define TOLOWER(x) ((x) | 0x20) |
| @@ -513,16 +514,6 @@ static char *string(char *buf, char *end, char *s, int field_width, int precisio | |||
| 513 | return buf; | 514 | return buf; |
| 514 | } | 515 | } |
| 515 | 516 | ||
| 516 | static inline void *dereference_function_descriptor(void *ptr) | ||
| 517 | { | ||
| 518 | #if defined(CONFIG_IA64) || defined(CONFIG_PPC64) | ||
| 519 | void *p; | ||
| 520 | if (!probe_kernel_address(ptr, p)) | ||
| 521 | ptr = p; | ||
| 522 | #endif | ||
| 523 | return ptr; | ||
| 524 | } | ||
| 525 | |||
| 526 | static char *symbol_string(char *buf, char *end, void *ptr, int field_width, int precision, int flags) | 517 | static char *symbol_string(char *buf, char *end, void *ptr, int field_width, int precision, int flags) |
| 527 | { | 518 | { |
| 528 | unsigned long value = (unsigned long) ptr; | 519 | unsigned long value = (unsigned long) ptr; |
diff --git a/net/bluetooth/af_bluetooth.c b/net/bluetooth/af_bluetooth.c index 1edfdf4c095b..f6348e078aa4 100644 --- a/net/bluetooth/af_bluetooth.c +++ b/net/bluetooth/af_bluetooth.c | |||
| @@ -49,7 +49,7 @@ | |||
| 49 | #define BT_DBG(D...) | 49 | #define BT_DBG(D...) |
| 50 | #endif | 50 | #endif |
| 51 | 51 | ||
| 52 | #define VERSION "2.12" | 52 | #define VERSION "2.13" |
| 53 | 53 | ||
| 54 | /* Bluetooth sockets */ | 54 | /* Bluetooth sockets */ |
| 55 | #define BT_MAX_PROTO 8 | 55 | #define BT_MAX_PROTO 8 |
diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c index ca8d05245ca0..b7002429f152 100644 --- a/net/bluetooth/hci_conn.c +++ b/net/bluetooth/hci_conn.c | |||
| @@ -330,7 +330,7 @@ EXPORT_SYMBOL(hci_get_route); | |||
| 330 | 330 | ||
| 331 | /* Create SCO or ACL connection. | 331 | /* Create SCO or ACL connection. |
| 332 | * Device _must_ be locked */ | 332 | * Device _must_ be locked */ |
| 333 | struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst) | 333 | struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst, __u8 auth_type) |
| 334 | { | 334 | { |
| 335 | struct hci_conn *acl; | 335 | struct hci_conn *acl; |
| 336 | struct hci_conn *sco; | 336 | struct hci_conn *sco; |
| @@ -344,8 +344,10 @@ struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst) | |||
| 344 | 344 | ||
| 345 | hci_conn_hold(acl); | 345 | hci_conn_hold(acl); |
| 346 | 346 | ||
| 347 | if (acl->state == BT_OPEN || acl->state == BT_CLOSED) | 347 | if (acl->state == BT_OPEN || acl->state == BT_CLOSED) { |
| 348 | acl->auth_type = auth_type; | ||
| 348 | hci_acl_connect(acl); | 349 | hci_acl_connect(acl); |
| 350 | } | ||
| 349 | 351 | ||
| 350 | if (type == ACL_LINK) | 352 | if (type == ACL_LINK) |
| 351 | return acl; | 353 | return acl; |
| @@ -374,6 +376,19 @@ struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst) | |||
| 374 | } | 376 | } |
| 375 | EXPORT_SYMBOL(hci_connect); | 377 | EXPORT_SYMBOL(hci_connect); |
| 376 | 378 | ||
| 379 | /* Check link security requirement */ | ||
| 380 | int hci_conn_check_link_mode(struct hci_conn *conn) | ||
| 381 | { | ||
| 382 | BT_DBG("conn %p", conn); | ||
| 383 | |||
| 384 | if (conn->ssp_mode > 0 && conn->hdev->ssp_mode > 0 && | ||
| 385 | !(conn->link_mode & HCI_LM_ENCRYPT)) | ||
| 386 | return 0; | ||
| 387 | |||
| 388 | return 1; | ||
| 389 | } | ||
| 390 | EXPORT_SYMBOL(hci_conn_check_link_mode); | ||
| 391 | |||
| 377 | /* Authenticate remote device */ | 392 | /* Authenticate remote device */ |
| 378 | int hci_conn_auth(struct hci_conn *conn) | 393 | int hci_conn_auth(struct hci_conn *conn) |
| 379 | { | 394 | { |
| @@ -381,7 +396,7 @@ int hci_conn_auth(struct hci_conn *conn) | |||
| 381 | 396 | ||
| 382 | if (conn->ssp_mode > 0 && conn->hdev->ssp_mode > 0) { | 397 | if (conn->ssp_mode > 0 && conn->hdev->ssp_mode > 0) { |
| 383 | if (!(conn->auth_type & 0x01)) { | 398 | if (!(conn->auth_type & 0x01)) { |
| 384 | conn->auth_type = HCI_AT_GENERAL_BONDING_MITM; | 399 | conn->auth_type |= 0x01; |
| 385 | conn->link_mode &= ~HCI_LM_AUTH; | 400 | conn->link_mode &= ~HCI_LM_AUTH; |
| 386 | } | 401 | } |
| 387 | } | 402 | } |
diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c index 0e3db289f4be..ad7a553d7713 100644 --- a/net/bluetooth/hci_event.c +++ b/net/bluetooth/hci_event.c | |||
| @@ -1605,14 +1605,11 @@ static inline void hci_remote_ext_features_evt(struct hci_dev *hdev, struct sk_b | |||
| 1605 | 1605 | ||
| 1606 | if (conn->state == BT_CONFIG) { | 1606 | if (conn->state == BT_CONFIG) { |
| 1607 | if (!ev->status && hdev->ssp_mode > 0 && | 1607 | if (!ev->status && hdev->ssp_mode > 0 && |
| 1608 | conn->ssp_mode > 0) { | 1608 | conn->ssp_mode > 0 && conn->out) { |
| 1609 | if (conn->out) { | 1609 | struct hci_cp_auth_requested cp; |
| 1610 | struct hci_cp_auth_requested cp; | 1610 | cp.handle = ev->handle; |
| 1611 | cp.handle = ev->handle; | 1611 | hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, |
| 1612 | hci_send_cmd(hdev, | ||
| 1613 | HCI_OP_AUTH_REQUESTED, | ||
| 1614 | sizeof(cp), &cp); | 1612 | sizeof(cp), &cp); |
| 1615 | } | ||
| 1616 | } else { | 1613 | } else { |
| 1617 | conn->state = BT_CONNECTED; | 1614 | conn->state = BT_CONNECTED; |
| 1618 | hci_proto_connect_cfm(conn, ev->status); | 1615 | hci_proto_connect_cfm(conn, ev->status); |
diff --git a/net/bluetooth/l2cap.c b/net/bluetooth/l2cap.c index 3396d5bdef1c..9610a9c85b98 100644 --- a/net/bluetooth/l2cap.c +++ b/net/bluetooth/l2cap.c | |||
| @@ -55,7 +55,7 @@ | |||
| 55 | #define BT_DBG(D...) | 55 | #define BT_DBG(D...) |
| 56 | #endif | 56 | #endif |
| 57 | 57 | ||
| 58 | #define VERSION "2.10" | 58 | #define VERSION "2.11" |
| 59 | 59 | ||
| 60 | static u32 l2cap_feat_mask = 0x0000; | 60 | static u32 l2cap_feat_mask = 0x0000; |
| 61 | 61 | ||
| @@ -778,6 +778,7 @@ static int l2cap_do_connect(struct sock *sk) | |||
| 778 | struct l2cap_conn *conn; | 778 | struct l2cap_conn *conn; |
| 779 | struct hci_conn *hcon; | 779 | struct hci_conn *hcon; |
| 780 | struct hci_dev *hdev; | 780 | struct hci_dev *hdev; |
| 781 | __u8 auth_type; | ||
| 781 | int err = 0; | 782 | int err = 0; |
| 782 | 783 | ||
| 783 | BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst), l2cap_pi(sk)->psm); | 784 | BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst), l2cap_pi(sk)->psm); |
| @@ -789,7 +790,21 @@ static int l2cap_do_connect(struct sock *sk) | |||
| 789 | 790 | ||
| 790 | err = -ENOMEM; | 791 | err = -ENOMEM; |
| 791 | 792 | ||
| 792 | hcon = hci_connect(hdev, ACL_LINK, dst); | 793 | if (l2cap_pi(sk)->link_mode & L2CAP_LM_AUTH || |
| 794 | l2cap_pi(sk)->link_mode & L2CAP_LM_ENCRYPT || | ||
| 795 | l2cap_pi(sk)->link_mode & L2CAP_LM_SECURE) { | ||
| 796 | if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) | ||
| 797 | auth_type = HCI_AT_NO_BONDING_MITM; | ||
| 798 | else | ||
| 799 | auth_type = HCI_AT_GENERAL_BONDING_MITM; | ||
| 800 | } else { | ||
| 801 | if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) | ||
| 802 | auth_type = HCI_AT_NO_BONDING; | ||
| 803 | else | ||
| 804 | auth_type = HCI_AT_GENERAL_BONDING; | ||
| 805 | } | ||
| 806 | |||
| 807 | hcon = hci_connect(hdev, ACL_LINK, dst, auth_type); | ||
| 793 | if (!hcon) | 808 | if (!hcon) |
| 794 | goto done; | 809 | goto done; |
| 795 | 810 | ||
| @@ -1553,10 +1568,10 @@ static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hd | |||
| 1553 | struct l2cap_conn_req *req = (struct l2cap_conn_req *) data; | 1568 | struct l2cap_conn_req *req = (struct l2cap_conn_req *) data; |
| 1554 | struct l2cap_conn_rsp rsp; | 1569 | struct l2cap_conn_rsp rsp; |
| 1555 | struct sock *sk, *parent; | 1570 | struct sock *sk, *parent; |
| 1556 | int result, status = 0; | 1571 | int result, status = L2CAP_CS_NO_INFO; |
| 1557 | 1572 | ||
| 1558 | u16 dcid = 0, scid = __le16_to_cpu(req->scid); | 1573 | u16 dcid = 0, scid = __le16_to_cpu(req->scid); |
| 1559 | __le16 psm = req->psm; | 1574 | __le16 psm = req->psm; |
| 1560 | 1575 | ||
| 1561 | BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid); | 1576 | BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid); |
| 1562 | 1577 | ||
| @@ -1567,6 +1582,13 @@ static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hd | |||
| 1567 | goto sendresp; | 1582 | goto sendresp; |
| 1568 | } | 1583 | } |
| 1569 | 1584 | ||
| 1585 | /* Check if the ACL is secure enough (if not SDP) */ | ||
| 1586 | if (psm != cpu_to_le16(0x0001) && | ||
| 1587 | !hci_conn_check_link_mode(conn->hcon)) { | ||
| 1588 | result = L2CAP_CR_SEC_BLOCK; | ||
| 1589 | goto response; | ||
| 1590 | } | ||
| 1591 | |||
| 1570 | result = L2CAP_CR_NO_MEM; | 1592 | result = L2CAP_CR_NO_MEM; |
| 1571 | 1593 | ||
| 1572 | /* Check for backlog size */ | 1594 | /* Check for backlog size */ |
| @@ -2224,7 +2246,7 @@ static int l2cap_auth_cfm(struct hci_conn *hcon, u8 status) | |||
| 2224 | rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid); | 2246 | rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid); |
| 2225 | rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid); | 2247 | rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid); |
| 2226 | rsp.result = cpu_to_le16(result); | 2248 | rsp.result = cpu_to_le16(result); |
| 2227 | rsp.status = cpu_to_le16(0); | 2249 | rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO); |
| 2228 | l2cap_send_cmd(conn, l2cap_pi(sk)->ident, | 2250 | l2cap_send_cmd(conn, l2cap_pi(sk)->ident, |
| 2229 | L2CAP_CONN_RSP, sizeof(rsp), &rsp); | 2251 | L2CAP_CONN_RSP, sizeof(rsp), &rsp); |
| 2230 | } | 2252 | } |
| @@ -2296,7 +2318,7 @@ static int l2cap_encrypt_cfm(struct hci_conn *hcon, u8 status, u8 encrypt) | |||
| 2296 | rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid); | 2318 | rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid); |
| 2297 | rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid); | 2319 | rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid); |
| 2298 | rsp.result = cpu_to_le16(result); | 2320 | rsp.result = cpu_to_le16(result); |
| 2299 | rsp.status = cpu_to_le16(0); | 2321 | rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO); |
| 2300 | l2cap_send_cmd(conn, l2cap_pi(sk)->ident, | 2322 | l2cap_send_cmd(conn, l2cap_pi(sk)->ident, |
| 2301 | L2CAP_CONN_RSP, sizeof(rsp), &rsp); | 2323 | L2CAP_CONN_RSP, sizeof(rsp), &rsp); |
| 2302 | } | 2324 | } |
diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c index a16011fedc1d..0cc91e6da76d 100644 --- a/net/bluetooth/sco.c +++ b/net/bluetooth/sco.c | |||
| @@ -200,7 +200,7 @@ static int sco_connect(struct sock *sk) | |||
| 200 | else | 200 | else |
| 201 | type = SCO_LINK; | 201 | type = SCO_LINK; |
| 202 | 202 | ||
| 203 | hcon = hci_connect(hdev, type, dst); | 203 | hcon = hci_connect(hdev, type, dst, HCI_AT_NO_BONDING); |
| 204 | if (!hcon) | 204 | if (!hcon) |
| 205 | goto done; | 205 | goto done; |
| 206 | 206 | ||
diff --git a/net/bridge/br_ioctl.c b/net/bridge/br_ioctl.c index eeee218eed80..5bbf07362172 100644 --- a/net/bridge/br_ioctl.c +++ b/net/bridge/br_ioctl.c | |||
| @@ -188,15 +188,21 @@ static int old_dev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) | |||
| 188 | return 0; | 188 | return 0; |
| 189 | 189 | ||
| 190 | case BRCTL_SET_BRIDGE_HELLO_TIME: | 190 | case BRCTL_SET_BRIDGE_HELLO_TIME: |
| 191 | { | ||
| 192 | unsigned long t = clock_t_to_jiffies(args[1]); | ||
| 191 | if (!capable(CAP_NET_ADMIN)) | 193 | if (!capable(CAP_NET_ADMIN)) |
| 192 | return -EPERM; | 194 | return -EPERM; |
| 193 | 195 | ||
| 196 | if (t < HZ) | ||
| 197 | return -EINVAL; | ||
| 198 | |||
| 194 | spin_lock_bh(&br->lock); | 199 | spin_lock_bh(&br->lock); |
| 195 | br->bridge_hello_time = clock_t_to_jiffies(args[1]); | 200 | br->bridge_hello_time = t; |
| 196 | if (br_is_root_bridge(br)) | 201 | if (br_is_root_bridge(br)) |
| 197 | br->hello_time = br->bridge_hello_time; | 202 | br->hello_time = br->bridge_hello_time; |
| 198 | spin_unlock_bh(&br->lock); | 203 | spin_unlock_bh(&br->lock); |
| 199 | return 0; | 204 | return 0; |
| 205 | } | ||
| 200 | 206 | ||
| 201 | case BRCTL_SET_BRIDGE_MAX_AGE: | 207 | case BRCTL_SET_BRIDGE_MAX_AGE: |
| 202 | if (!capable(CAP_NET_ADMIN)) | 208 | if (!capable(CAP_NET_ADMIN)) |
diff --git a/net/bridge/br_sysfs_br.c b/net/bridge/br_sysfs_br.c index 27d6a511c8c1..158dee8b4965 100644 --- a/net/bridge/br_sysfs_br.c +++ b/net/bridge/br_sysfs_br.c | |||
| @@ -29,11 +29,12 @@ | |||
| 29 | */ | 29 | */ |
| 30 | static ssize_t store_bridge_parm(struct device *d, | 30 | static ssize_t store_bridge_parm(struct device *d, |
| 31 | const char *buf, size_t len, | 31 | const char *buf, size_t len, |
| 32 | void (*set)(struct net_bridge *, unsigned long)) | 32 | int (*set)(struct net_bridge *, unsigned long)) |
| 33 | { | 33 | { |
| 34 | struct net_bridge *br = to_bridge(d); | 34 | struct net_bridge *br = to_bridge(d); |
| 35 | char *endp; | 35 | char *endp; |
| 36 | unsigned long val; | 36 | unsigned long val; |
| 37 | int err; | ||
| 37 | 38 | ||
| 38 | if (!capable(CAP_NET_ADMIN)) | 39 | if (!capable(CAP_NET_ADMIN)) |
| 39 | return -EPERM; | 40 | return -EPERM; |
| @@ -43,9 +44,9 @@ static ssize_t store_bridge_parm(struct device *d, | |||
| 43 | return -EINVAL; | 44 | return -EINVAL; |
| 44 | 45 | ||
| 45 | spin_lock_bh(&br->lock); | 46 | spin_lock_bh(&br->lock); |
| 46 | (*set)(br, val); | 47 | err = (*set)(br, val); |
| 47 | spin_unlock_bh(&br->lock); | 48 | spin_unlock_bh(&br->lock); |
| 48 | return len; | 49 | return err ? err : len; |
| 49 | } | 50 | } |
| 50 | 51 | ||
| 51 | 52 | ||
| @@ -56,12 +57,13 @@ static ssize_t show_forward_delay(struct device *d, | |||
| 56 | return sprintf(buf, "%lu\n", jiffies_to_clock_t(br->forward_delay)); | 57 | return sprintf(buf, "%lu\n", jiffies_to_clock_t(br->forward_delay)); |
| 57 | } | 58 | } |
| 58 | 59 | ||
| 59 | static void set_forward_delay(struct net_bridge *br, unsigned long val) | 60 | static int set_forward_delay(struct net_bridge *br, unsigned long val) |
| 60 | { | 61 | { |
| 61 | unsigned long delay = clock_t_to_jiffies(val); | 62 | unsigned long delay = clock_t_to_jiffies(val); |
| 62 | br->forward_delay = delay; | 63 | br->forward_delay = delay; |
| 63 | if (br_is_root_bridge(br)) | 64 | if (br_is_root_bridge(br)) |
| 64 | br->bridge_forward_delay = delay; | 65 | br->bridge_forward_delay = delay; |
| 66 | return 0; | ||
| 65 | } | 67 | } |
| 66 | 68 | ||
| 67 | static ssize_t store_forward_delay(struct device *d, | 69 | static ssize_t store_forward_delay(struct device *d, |
| @@ -80,12 +82,17 @@ static ssize_t show_hello_time(struct device *d, struct device_attribute *attr, | |||
| 80 | jiffies_to_clock_t(to_bridge(d)->hello_time)); | 82 | jiffies_to_clock_t(to_bridge(d)->hello_time)); |
| 81 | } | 83 | } |
| 82 | 84 | ||
| 83 | static void set_hello_time(struct net_bridge *br, unsigned long val) | 85 | static int set_hello_time(struct net_bridge *br, unsigned long val) |
| 84 | { | 86 | { |
| 85 | unsigned long t = clock_t_to_jiffies(val); | 87 | unsigned long t = clock_t_to_jiffies(val); |
| 88 | |||
| 89 | if (t < HZ) | ||
| 90 | return -EINVAL; | ||
| 91 | |||
| 86 | br->hello_time = t; | 92 | br->hello_time = t; |
| 87 | if (br_is_root_bridge(br)) | 93 | if (br_is_root_bridge(br)) |
| 88 | br->bridge_hello_time = t; | 94 | br->bridge_hello_time = t; |
| 95 | return 0; | ||
| 89 | } | 96 | } |
| 90 | 97 | ||
| 91 | static ssize_t store_hello_time(struct device *d, | 98 | static ssize_t store_hello_time(struct device *d, |
| @@ -104,12 +111,13 @@ static ssize_t show_max_age(struct device *d, struct device_attribute *attr, | |||
| 104 | jiffies_to_clock_t(to_bridge(d)->max_age)); | 111 | jiffies_to_clock_t(to_bridge(d)->max_age)); |
| 105 | } | 112 | } |
| 106 | 113 | ||
| 107 | static void set_max_age(struct net_bridge *br, unsigned long val) | 114 | static int set_max_age(struct net_bridge *br, unsigned long val) |
| 108 | { | 115 | { |
| 109 | unsigned long t = clock_t_to_jiffies(val); | 116 | unsigned long t = clock_t_to_jiffies(val); |
| 110 | br->max_age = t; | 117 | br->max_age = t; |
| 111 | if (br_is_root_bridge(br)) | 118 | if (br_is_root_bridge(br)) |
| 112 | br->bridge_max_age = t; | 119 | br->bridge_max_age = t; |
| 120 | return 0; | ||
| 113 | } | 121 | } |
| 114 | 122 | ||
| 115 | static ssize_t store_max_age(struct device *d, struct device_attribute *attr, | 123 | static ssize_t store_max_age(struct device *d, struct device_attribute *attr, |
| @@ -126,9 +134,10 @@ static ssize_t show_ageing_time(struct device *d, | |||
| 126 | return sprintf(buf, "%lu\n", jiffies_to_clock_t(br->ageing_time)); | 134 | return sprintf(buf, "%lu\n", jiffies_to_clock_t(br->ageing_time)); |
| 127 | } | 135 | } |
| 128 | 136 | ||
| 129 | static void set_ageing_time(struct net_bridge *br, unsigned long val) | 137 | static int set_ageing_time(struct net_bridge *br, unsigned long val) |
| 130 | { | 138 | { |
| 131 | br->ageing_time = clock_t_to_jiffies(val); | 139 | br->ageing_time = clock_t_to_jiffies(val); |
| 140 | return 0; | ||
| 132 | } | 141 | } |
| 133 | 142 | ||
| 134 | static ssize_t store_ageing_time(struct device *d, | 143 | static ssize_t store_ageing_time(struct device *d, |
| @@ -180,9 +189,10 @@ static ssize_t show_priority(struct device *d, struct device_attribute *attr, | |||
| 180 | (br->bridge_id.prio[0] << 8) | br->bridge_id.prio[1]); | 189 | (br->bridge_id.prio[0] << 8) | br->bridge_id.prio[1]); |
| 181 | } | 190 | } |
| 182 | 191 | ||
| 183 | static void set_priority(struct net_bridge *br, unsigned long val) | 192 | static int set_priority(struct net_bridge *br, unsigned long val) |
| 184 | { | 193 | { |
| 185 | br_stp_set_bridge_priority(br, (u16) val); | 194 | br_stp_set_bridge_priority(br, (u16) val); |
| 195 | return 0; | ||
| 186 | } | 196 | } |
| 187 | 197 | ||
| 188 | static ssize_t store_priority(struct device *d, struct device_attribute *attr, | 198 | static ssize_t store_priority(struct device *d, struct device_attribute *attr, |
diff --git a/net/core/dev.c b/net/core/dev.c index 60c51f765887..e719ed29310f 100644 --- a/net/core/dev.c +++ b/net/core/dev.c | |||
| @@ -1991,8 +1991,13 @@ static void net_tx_action(struct softirq_action *h) | |||
| 1991 | spin_unlock(root_lock); | 1991 | spin_unlock(root_lock); |
| 1992 | } else { | 1992 | } else { |
| 1993 | if (!test_bit(__QDISC_STATE_DEACTIVATED, | 1993 | if (!test_bit(__QDISC_STATE_DEACTIVATED, |
| 1994 | &q->state)) | 1994 | &q->state)) { |
| 1995 | __netif_reschedule(q); | 1995 | __netif_reschedule(q); |
| 1996 | } else { | ||
| 1997 | smp_mb__before_clear_bit(); | ||
| 1998 | clear_bit(__QDISC_STATE_SCHED, | ||
| 1999 | &q->state); | ||
| 2000 | } | ||
| 1996 | } | 2001 | } |
| 1997 | } | 2002 | } |
| 1998 | } | 2003 | } |
diff --git a/net/ipv4/inet_timewait_sock.c b/net/ipv4/inet_timewait_sock.c index d985bd613d25..743f011b9a84 100644 --- a/net/ipv4/inet_timewait_sock.c +++ b/net/ipv4/inet_timewait_sock.c | |||
| @@ -409,3 +409,38 @@ out: | |||
| 409 | } | 409 | } |
| 410 | 410 | ||
| 411 | EXPORT_SYMBOL_GPL(inet_twdr_twcal_tick); | 411 | EXPORT_SYMBOL_GPL(inet_twdr_twcal_tick); |
| 412 | |||
| 413 | void inet_twsk_purge(struct net *net, struct inet_hashinfo *hashinfo, | ||
| 414 | struct inet_timewait_death_row *twdr, int family) | ||
| 415 | { | ||
| 416 | struct inet_timewait_sock *tw; | ||
| 417 | struct sock *sk; | ||
| 418 | struct hlist_node *node; | ||
| 419 | int h; | ||
| 420 | |||
| 421 | local_bh_disable(); | ||
| 422 | for (h = 0; h < (hashinfo->ehash_size); h++) { | ||
| 423 | struct inet_ehash_bucket *head = | ||
| 424 | inet_ehash_bucket(hashinfo, h); | ||
| 425 | rwlock_t *lock = inet_ehash_lockp(hashinfo, h); | ||
| 426 | restart: | ||
| 427 | write_lock(lock); | ||
| 428 | sk_for_each(sk, node, &head->twchain) { | ||
| 429 | |||
| 430 | tw = inet_twsk(sk); | ||
| 431 | if (!net_eq(twsk_net(tw), net) || | ||
| 432 | tw->tw_family != family) | ||
| 433 | continue; | ||
| 434 | |||
| 435 | atomic_inc(&tw->tw_refcnt); | ||
| 436 | write_unlock(lock); | ||
| 437 | inet_twsk_deschedule(tw, twdr); | ||
| 438 | inet_twsk_put(tw); | ||
| 439 | |||
| 440 | goto restart; | ||
| 441 | } | ||
| 442 | write_unlock(lock); | ||
| 443 | } | ||
| 444 | local_bh_enable(); | ||
| 445 | } | ||
| 446 | EXPORT_SYMBOL_GPL(inet_twsk_purge); | ||
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 44c1e934824b..1b4fee20fc93 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c | |||
| @@ -2376,6 +2376,7 @@ static int __net_init tcp_sk_init(struct net *net) | |||
| 2376 | static void __net_exit tcp_sk_exit(struct net *net) | 2376 | static void __net_exit tcp_sk_exit(struct net *net) |
| 2377 | { | 2377 | { |
| 2378 | inet_ctl_sock_destroy(net->ipv4.tcp_sock); | 2378 | inet_ctl_sock_destroy(net->ipv4.tcp_sock); |
| 2379 | inet_twsk_purge(net, &tcp_hashinfo, &tcp_death_row, AF_INET); | ||
| 2379 | } | 2380 | } |
| 2380 | 2381 | ||
| 2381 | static struct pernet_operations __net_initdata tcp_sk_ops = { | 2382 | static struct pernet_operations __net_initdata tcp_sk_ops = { |
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index 0e844c2736a7..3df2c442d90b 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c | |||
| @@ -943,39 +943,39 @@ static int ip6_dst_lookup_tail(struct sock *sk, | |||
| 943 | } | 943 | } |
| 944 | 944 | ||
| 945 | #ifdef CONFIG_IPV6_OPTIMISTIC_DAD | 945 | #ifdef CONFIG_IPV6_OPTIMISTIC_DAD |
| 946 | /* | 946 | /* |
| 947 | * Here if the dst entry we've looked up | 947 | * Here if the dst entry we've looked up |
| 948 | * has a neighbour entry that is in the INCOMPLETE | 948 | * has a neighbour entry that is in the INCOMPLETE |
| 949 | * state and the src address from the flow is | 949 | * state and the src address from the flow is |
| 950 | * marked as OPTIMISTIC, we release the found | 950 | * marked as OPTIMISTIC, we release the found |
| 951 | * dst entry and replace it instead with the | 951 | * dst entry and replace it instead with the |
| 952 | * dst entry of the nexthop router | 952 | * dst entry of the nexthop router |
| 953 | */ | 953 | */ |
| 954 | if (!((*dst)->neighbour->nud_state & NUD_VALID)) { | 954 | if ((*dst)->neighbour && !((*dst)->neighbour->nud_state & NUD_VALID)) { |
| 955 | struct inet6_ifaddr *ifp; | 955 | struct inet6_ifaddr *ifp; |
| 956 | struct flowi fl_gw; | 956 | struct flowi fl_gw; |
| 957 | int redirect; | 957 | int redirect; |
| 958 | 958 | ||
| 959 | ifp = ipv6_get_ifaddr(net, &fl->fl6_src, | 959 | ifp = ipv6_get_ifaddr(net, &fl->fl6_src, |
| 960 | (*dst)->dev, 1); | 960 | (*dst)->dev, 1); |
| 961 | 961 | ||
| 962 | redirect = (ifp && ifp->flags & IFA_F_OPTIMISTIC); | 962 | redirect = (ifp && ifp->flags & IFA_F_OPTIMISTIC); |
| 963 | if (ifp) | 963 | if (ifp) |
| 964 | in6_ifa_put(ifp); | 964 | in6_ifa_put(ifp); |
| 965 | 965 | ||
| 966 | if (redirect) { | 966 | if (redirect) { |
| 967 | /* | 967 | /* |
| 968 | * We need to get the dst entry for the | 968 | * We need to get the dst entry for the |
| 969 | * default router instead | 969 | * default router instead |
| 970 | */ | 970 | */ |
| 971 | dst_release(*dst); | 971 | dst_release(*dst); |
| 972 | memcpy(&fl_gw, fl, sizeof(struct flowi)); | 972 | memcpy(&fl_gw, fl, sizeof(struct flowi)); |
| 973 | memset(&fl_gw.fl6_dst, 0, sizeof(struct in6_addr)); | 973 | memset(&fl_gw.fl6_dst, 0, sizeof(struct in6_addr)); |
| 974 | *dst = ip6_route_output(net, sk, &fl_gw); | 974 | *dst = ip6_route_output(net, sk, &fl_gw); |
| 975 | if ((err = (*dst)->error)) | 975 | if ((err = (*dst)->error)) |
| 976 | goto out_err_release; | 976 | goto out_err_release; |
| 977 | } | ||
| 978 | } | 977 | } |
| 978 | } | ||
| 979 | #endif | 979 | #endif |
| 980 | 980 | ||
| 981 | return 0; | 981 | return 0; |
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index 5b90b369ccb2..b585c850a89a 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c | |||
| @@ -2148,6 +2148,7 @@ static int tcpv6_net_init(struct net *net) | |||
| 2148 | static void tcpv6_net_exit(struct net *net) | 2148 | static void tcpv6_net_exit(struct net *net) |
| 2149 | { | 2149 | { |
| 2150 | inet_ctl_sock_destroy(net->ipv6.tcp_sk); | 2150 | inet_ctl_sock_destroy(net->ipv6.tcp_sk); |
| 2151 | inet_twsk_purge(net, &tcp_hashinfo, &tcp_death_row, AF_INET6); | ||
| 2151 | } | 2152 | } |
| 2152 | 2153 | ||
| 2153 | static struct pernet_operations tcpv6_net_ops = { | 2154 | static struct pernet_operations tcpv6_net_ops = { |
diff --git a/net/netfilter/nf_conntrack_irc.c b/net/netfilter/nf_conntrack_irc.c index 1b1226d6653f..20633fdf7e6b 100644 --- a/net/netfilter/nf_conntrack_irc.c +++ b/net/netfilter/nf_conntrack_irc.c | |||
| @@ -68,11 +68,21 @@ static const char *const dccprotos[] = { | |||
| 68 | static int parse_dcc(char *data, const char *data_end, u_int32_t *ip, | 68 | static int parse_dcc(char *data, const char *data_end, u_int32_t *ip, |
| 69 | u_int16_t *port, char **ad_beg_p, char **ad_end_p) | 69 | u_int16_t *port, char **ad_beg_p, char **ad_end_p) |
| 70 | { | 70 | { |
| 71 | char *tmp; | ||
| 72 | |||
| 71 | /* at least 12: "AAAAAAAA P\1\n" */ | 73 | /* at least 12: "AAAAAAAA P\1\n" */ |
| 72 | while (*data++ != ' ') | 74 | while (*data++ != ' ') |
| 73 | if (data > data_end - 12) | 75 | if (data > data_end - 12) |
| 74 | return -1; | 76 | return -1; |
| 75 | 77 | ||
| 78 | /* Make sure we have a newline character within the packet boundaries | ||
| 79 | * because simple_strtoul parses until the first invalid character. */ | ||
| 80 | for (tmp = data; tmp <= data_end; tmp++) | ||
| 81 | if (*tmp == '\n') | ||
| 82 | break; | ||
| 83 | if (tmp > data_end || *tmp != '\n') | ||
| 84 | return -1; | ||
| 85 | |||
| 76 | *ad_beg_p = data; | 86 | *ad_beg_p = data; |
| 77 | *ip = simple_strtoul(data, &data, 10); | 87 | *ip = simple_strtoul(data, &data, 10); |
| 78 | 88 | ||
diff --git a/net/netfilter/nf_conntrack_proto_gre.c b/net/netfilter/nf_conntrack_proto_gre.c index 654a4f7f12c6..9bd03967fea4 100644 --- a/net/netfilter/nf_conntrack_proto_gre.c +++ b/net/netfilter/nf_conntrack_proto_gre.c | |||
| @@ -45,12 +45,12 @@ static LIST_HEAD(gre_keymap_list); | |||
| 45 | 45 | ||
| 46 | void nf_ct_gre_keymap_flush(void) | 46 | void nf_ct_gre_keymap_flush(void) |
| 47 | { | 47 | { |
| 48 | struct list_head *pos, *n; | 48 | struct nf_ct_gre_keymap *km, *tmp; |
| 49 | 49 | ||
| 50 | write_lock_bh(&nf_ct_gre_lock); | 50 | write_lock_bh(&nf_ct_gre_lock); |
| 51 | list_for_each_safe(pos, n, &gre_keymap_list) { | 51 | list_for_each_entry_safe(km, tmp, &gre_keymap_list, list) { |
| 52 | list_del(pos); | 52 | list_del(&km->list); |
| 53 | kfree(pos); | 53 | kfree(km); |
| 54 | } | 54 | } |
| 55 | write_unlock_bh(&nf_ct_gre_lock); | 55 | write_unlock_bh(&nf_ct_gre_lock); |
| 56 | } | 56 | } |
| @@ -97,10 +97,14 @@ int nf_ct_gre_keymap_add(struct nf_conn *ct, enum ip_conntrack_dir dir, | |||
| 97 | kmp = &help->help.ct_pptp_info.keymap[dir]; | 97 | kmp = &help->help.ct_pptp_info.keymap[dir]; |
| 98 | if (*kmp) { | 98 | if (*kmp) { |
| 99 | /* check whether it's a retransmission */ | 99 | /* check whether it's a retransmission */ |
| 100 | read_lock_bh(&nf_ct_gre_lock); | ||
| 100 | list_for_each_entry(km, &gre_keymap_list, list) { | 101 | list_for_each_entry(km, &gre_keymap_list, list) { |
| 101 | if (gre_key_cmpfn(km, t) && km == *kmp) | 102 | if (gre_key_cmpfn(km, t) && km == *kmp) { |
| 103 | read_unlock_bh(&nf_ct_gre_lock); | ||
| 102 | return 0; | 104 | return 0; |
| 105 | } | ||
| 103 | } | 106 | } |
| 107 | read_unlock_bh(&nf_ct_gre_lock); | ||
| 104 | pr_debug("trying to override keymap_%s for ct %p\n", | 108 | pr_debug("trying to override keymap_%s for ct %p\n", |
| 105 | dir == IP_CT_DIR_REPLY ? "reply" : "orig", ct); | 109 | dir == IP_CT_DIR_REPLY ? "reply" : "orig", ct); |
| 106 | return -EEXIST; | 110 | return -EEXIST; |
diff --git a/net/netfilter/nf_conntrack_sip.c b/net/netfilter/nf_conntrack_sip.c index 2f9bbc058b48..1fa306be60fb 100644 --- a/net/netfilter/nf_conntrack_sip.c +++ b/net/netfilter/nf_conntrack_sip.c | |||
| @@ -1193,7 +1193,6 @@ static const struct sip_handler sip_handlers[] = { | |||
| 1193 | static int process_sip_response(struct sk_buff *skb, | 1193 | static int process_sip_response(struct sk_buff *skb, |
| 1194 | const char **dptr, unsigned int *datalen) | 1194 | const char **dptr, unsigned int *datalen) |
| 1195 | { | 1195 | { |
| 1196 | static const struct sip_handler *handler; | ||
| 1197 | enum ip_conntrack_info ctinfo; | 1196 | enum ip_conntrack_info ctinfo; |
| 1198 | struct nf_conn *ct = nf_ct_get(skb, &ctinfo); | 1197 | struct nf_conn *ct = nf_ct_get(skb, &ctinfo); |
| 1199 | unsigned int matchoff, matchlen; | 1198 | unsigned int matchoff, matchlen; |
| @@ -1214,6 +1213,8 @@ static int process_sip_response(struct sk_buff *skb, | |||
| 1214 | dataoff = matchoff + matchlen + 1; | 1213 | dataoff = matchoff + matchlen + 1; |
| 1215 | 1214 | ||
| 1216 | for (i = 0; i < ARRAY_SIZE(sip_handlers); i++) { | 1215 | for (i = 0; i < ARRAY_SIZE(sip_handlers); i++) { |
| 1216 | const struct sip_handler *handler; | ||
| 1217 | |||
| 1217 | handler = &sip_handlers[i]; | 1218 | handler = &sip_handlers[i]; |
| 1218 | if (handler->response == NULL) | 1219 | if (handler->response == NULL) |
| 1219 | continue; | 1220 | continue; |
| @@ -1228,13 +1229,14 @@ static int process_sip_response(struct sk_buff *skb, | |||
| 1228 | static int process_sip_request(struct sk_buff *skb, | 1229 | static int process_sip_request(struct sk_buff *skb, |
| 1229 | const char **dptr, unsigned int *datalen) | 1230 | const char **dptr, unsigned int *datalen) |
| 1230 | { | 1231 | { |
| 1231 | static const struct sip_handler *handler; | ||
| 1232 | enum ip_conntrack_info ctinfo; | 1232 | enum ip_conntrack_info ctinfo; |
| 1233 | struct nf_conn *ct = nf_ct_get(skb, &ctinfo); | 1233 | struct nf_conn *ct = nf_ct_get(skb, &ctinfo); |
| 1234 | unsigned int matchoff, matchlen; | 1234 | unsigned int matchoff, matchlen; |
| 1235 | unsigned int cseq, i; | 1235 | unsigned int cseq, i; |
| 1236 | 1236 | ||
| 1237 | for (i = 0; i < ARRAY_SIZE(sip_handlers); i++) { | 1237 | for (i = 0; i < ARRAY_SIZE(sip_handlers); i++) { |
| 1238 | const struct sip_handler *handler; | ||
| 1239 | |||
| 1238 | handler = &sip_handlers[i]; | 1240 | handler = &sip_handlers[i]; |
| 1239 | if (handler->request == NULL) | 1241 | if (handler->request == NULL) |
| 1240 | continue; | 1242 | continue; |
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c index 46914b79d850..b7754b1b73a4 100644 --- a/net/xfrm/xfrm_policy.c +++ b/net/xfrm/xfrm_policy.c | |||
| @@ -1077,6 +1077,7 @@ static void __xfrm_policy_link(struct xfrm_policy *pol, int dir) | |||
| 1077 | struct hlist_head *chain = policy_hash_bysel(&pol->selector, | 1077 | struct hlist_head *chain = policy_hash_bysel(&pol->selector, |
| 1078 | pol->family, dir); | 1078 | pol->family, dir); |
| 1079 | 1079 | ||
| 1080 | list_add_tail(&pol->bytype, &xfrm_policy_bytype[pol->type]); | ||
| 1080 | hlist_add_head(&pol->bydst, chain); | 1081 | hlist_add_head(&pol->bydst, chain); |
| 1081 | hlist_add_head(&pol->byidx, xfrm_policy_byidx+idx_hash(pol->index)); | 1082 | hlist_add_head(&pol->byidx, xfrm_policy_byidx+idx_hash(pol->index)); |
| 1082 | xfrm_policy_count[dir]++; | 1083 | xfrm_policy_count[dir]++; |
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c index 7bd62f61593f..0a8f09c3144c 100644 --- a/net/xfrm/xfrm_state.c +++ b/net/xfrm/xfrm_state.c | |||
| @@ -858,6 +858,7 @@ xfrm_state_find(xfrm_address_t *daddr, xfrm_address_t *saddr, | |||
| 858 | 858 | ||
| 859 | if (km_query(x, tmpl, pol) == 0) { | 859 | if (km_query(x, tmpl, pol) == 0) { |
| 860 | x->km.state = XFRM_STATE_ACQ; | 860 | x->km.state = XFRM_STATE_ACQ; |
| 861 | list_add_tail(&x->all, &xfrm_state_all); | ||
| 861 | hlist_add_head(&x->bydst, xfrm_state_bydst+h); | 862 | hlist_add_head(&x->bydst, xfrm_state_bydst+h); |
| 862 | h = xfrm_src_hash(daddr, saddr, family); | 863 | h = xfrm_src_hash(daddr, saddr, family); |
| 863 | hlist_add_head(&x->bysrc, xfrm_state_bysrc+h); | 864 | hlist_add_head(&x->bysrc, xfrm_state_bysrc+h); |
| @@ -1055,6 +1056,7 @@ static struct xfrm_state *__find_acq_core(unsigned short family, u8 mode, u32 re | |||
| 1055 | xfrm_state_hold(x); | 1056 | xfrm_state_hold(x); |
| 1056 | x->timer.expires = jiffies + sysctl_xfrm_acq_expires*HZ; | 1057 | x->timer.expires = jiffies + sysctl_xfrm_acq_expires*HZ; |
| 1057 | add_timer(&x->timer); | 1058 | add_timer(&x->timer); |
| 1059 | list_add_tail(&x->all, &xfrm_state_all); | ||
| 1058 | hlist_add_head(&x->bydst, xfrm_state_bydst+h); | 1060 | hlist_add_head(&x->bydst, xfrm_state_bydst+h); |
| 1059 | h = xfrm_src_hash(daddr, saddr, family); | 1061 | h = xfrm_src_hash(daddr, saddr, family); |
| 1060 | hlist_add_head(&x->bysrc, xfrm_state_bysrc+h); | 1062 | hlist_add_head(&x->bysrc, xfrm_state_bysrc+h); |
diff --git a/sound/Kconfig b/sound/Kconfig index a37bee094eba..8ebf512ced6c 100644 --- a/sound/Kconfig +++ b/sound/Kconfig | |||
| @@ -91,6 +91,9 @@ endif # SOUND_PRIME | |||
| 91 | 91 | ||
| 92 | endif # !M68K | 92 | endif # !M68K |
| 93 | 93 | ||
| 94 | endif # SOUND | ||
| 95 | |||
| 96 | # AC97_BUS is used from both sound and ucb1400 | ||
| 94 | config AC97_BUS | 97 | config AC97_BUS |
| 95 | tristate | 98 | tristate |
| 96 | help | 99 | help |
| @@ -99,4 +102,3 @@ config AC97_BUS | |||
| 99 | sound although they're sharing the AC97 bus. Concerned drivers | 102 | sound although they're sharing the AC97 bus. Concerned drivers |
| 100 | should "select" this. | 103 | should "select" this. |
| 101 | 104 | ||
| 102 | endif # SOUND | ||
