diff options
251 files changed, 3500 insertions, 2533 deletions
diff --git a/MAINTAINERS b/MAINTAINERS index 1858646b52e3..03f38c18f323 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
@@ -987,7 +987,6 @@ F: drivers/platform/x86/asus-laptop.c | |||
987 | 987 | ||
988 | ASYNCHRONOUS TRANSFERS/TRANSFORMS (IOAT) API | 988 | ASYNCHRONOUS TRANSFERS/TRANSFORMS (IOAT) API |
989 | M: Dan Williams <dan.j.williams@intel.com> | 989 | M: Dan Williams <dan.j.williams@intel.com> |
990 | M: Maciej Sosnowski <maciej.sosnowski@intel.com> | ||
991 | W: http://sourceforge.net/projects/xscaleiop | 990 | W: http://sourceforge.net/projects/xscaleiop |
992 | S: Supported | 991 | S: Supported |
993 | F: Documentation/crypto/async-tx-api.txt | 992 | F: Documentation/crypto/async-tx-api.txt |
@@ -1823,7 +1822,6 @@ S: Supported | |||
1823 | F: fs/dlm/ | 1822 | F: fs/dlm/ |
1824 | 1823 | ||
1825 | DMA GENERIC OFFLOAD ENGINE SUBSYSTEM | 1824 | DMA GENERIC OFFLOAD ENGINE SUBSYSTEM |
1826 | M: Maciej Sosnowski <maciej.sosnowski@intel.com> | ||
1827 | M: Dan Williams <dan.j.williams@intel.com> | 1825 | M: Dan Williams <dan.j.williams@intel.com> |
1828 | S: Supported | 1826 | S: Supported |
1829 | F: drivers/dma/ | 1827 | F: drivers/dma/ |
@@ -2786,7 +2784,7 @@ F: arch/x86/kernel/microcode_core.c | |||
2786 | F: arch/x86/kernel/microcode_intel.c | 2784 | F: arch/x86/kernel/microcode_intel.c |
2787 | 2785 | ||
2788 | INTEL I/OAT DMA DRIVER | 2786 | INTEL I/OAT DMA DRIVER |
2789 | M: Maciej Sosnowski <maciej.sosnowski@intel.com> | 2787 | M: Dan Williams <dan.j.williams@intel.com> |
2790 | S: Supported | 2788 | S: Supported |
2791 | F: drivers/dma/ioat* | 2789 | F: drivers/dma/ioat* |
2792 | 2790 | ||
@@ -2824,10 +2822,11 @@ L: netdev@vger.kernel.org | |||
2824 | S: Maintained | 2822 | S: Maintained |
2825 | F: drivers/net/ixp2000/ | 2823 | F: drivers/net/ixp2000/ |
2826 | 2824 | ||
2827 | INTEL ETHERNET DRIVERS (e100/e1000/e1000e/igb/ixgb/ixgbe) | 2825 | INTEL ETHERNET DRIVERS (e100/e1000/e1000e/igb/igbvf/ixgb/ixgbe) |
2828 | M: Jeff Kirsher <jeffrey.t.kirsher@intel.com> | 2826 | M: Jeff Kirsher <jeffrey.t.kirsher@intel.com> |
2829 | M: Jesse Brandeburg <jesse.brandeburg@intel.com> | 2827 | M: Jesse Brandeburg <jesse.brandeburg@intel.com> |
2830 | M: Bruce Allan <bruce.w.allan@intel.com> | 2828 | M: Bruce Allan <bruce.w.allan@intel.com> |
2829 | M: Alex Duyck <alexander.h.duyck@intel.com> | ||
2831 | M: PJ Waskiewicz <peter.p.waskiewicz.jr@intel.com> | 2830 | M: PJ Waskiewicz <peter.p.waskiewicz.jr@intel.com> |
2832 | M: John Ronciak <john.ronciak@intel.com> | 2831 | M: John Ronciak <john.ronciak@intel.com> |
2833 | L: e1000-devel@lists.sourceforge.net | 2832 | L: e1000-devel@lists.sourceforge.net |
@@ -2837,6 +2836,7 @@ F: drivers/net/e100.c | |||
2837 | F: drivers/net/e1000/ | 2836 | F: drivers/net/e1000/ |
2838 | F: drivers/net/e1000e/ | 2837 | F: drivers/net/e1000e/ |
2839 | F: drivers/net/igb/ | 2838 | F: drivers/net/igb/ |
2839 | F: drivers/net/igbvf/ | ||
2840 | F: drivers/net/ixgb/ | 2840 | F: drivers/net/ixgb/ |
2841 | F: drivers/net/ixgbe/ | 2841 | F: drivers/net/ixgbe/ |
2842 | 2842 | ||
diff --git a/arch/arm/mach-omap1/clock.c b/arch/arm/mach-omap1/clock.c index 2ba9ab953731..04f1d29cba2c 100644 --- a/arch/arm/mach-omap1/clock.c +++ b/arch/arm/mach-omap1/clock.c | |||
@@ -214,8 +214,8 @@ int omap1_select_table_rate(struct clk *clk, unsigned long rate) | |||
214 | struct mpu_rate * ptr; | 214 | struct mpu_rate * ptr; |
215 | unsigned long dpll1_rate, ref_rate; | 215 | unsigned long dpll1_rate, ref_rate; |
216 | 216 | ||
217 | dpll1_rate = clk_get_rate(ck_dpll1_p); | 217 | dpll1_rate = ck_dpll1_p->rate; |
218 | ref_rate = clk_get_rate(ck_ref_p); | 218 | ref_rate = ck_ref_p->rate; |
219 | 219 | ||
220 | for (ptr = omap1_rate_table; ptr->rate; ptr++) { | 220 | for (ptr = omap1_rate_table; ptr->rate; ptr++) { |
221 | if (ptr->xtal != ref_rate) | 221 | if (ptr->xtal != ref_rate) |
@@ -306,7 +306,7 @@ long omap1_round_to_table_rate(struct clk *clk, unsigned long rate) | |||
306 | long highest_rate; | 306 | long highest_rate; |
307 | unsigned long ref_rate; | 307 | unsigned long ref_rate; |
308 | 308 | ||
309 | ref_rate = clk_get_rate(ck_ref_p); | 309 | ref_rate = ck_ref_p->rate; |
310 | 310 | ||
311 | highest_rate = -EINVAL; | 311 | highest_rate = -EINVAL; |
312 | 312 | ||
diff --git a/arch/arm/mach-omap2/clock34xx_data.c b/arch/arm/mach-omap2/clock34xx_data.c index c6031d74d6f6..74930e3158e3 100644 --- a/arch/arm/mach-omap2/clock34xx_data.c +++ b/arch/arm/mach-omap2/clock34xx_data.c | |||
@@ -671,7 +671,6 @@ static struct clk dpll4_m3x2_ck = { | |||
671 | .name = "dpll4_m3x2_ck", | 671 | .name = "dpll4_m3x2_ck", |
672 | .ops = &clkops_omap2_dflt_wait, | 672 | .ops = &clkops_omap2_dflt_wait, |
673 | .parent = &dpll4_m3_ck, | 673 | .parent = &dpll4_m3_ck, |
674 | .init = &omap2_init_clksel_parent, | ||
675 | .enable_reg = OMAP_CM_REGADDR(PLL_MOD, CM_CLKEN), | 674 | .enable_reg = OMAP_CM_REGADDR(PLL_MOD, CM_CLKEN), |
676 | .enable_bit = OMAP3430_PWRDN_TV_SHIFT, | 675 | .enable_bit = OMAP3430_PWRDN_TV_SHIFT, |
677 | .flags = INVERT_ENABLE, | 676 | .flags = INVERT_ENABLE, |
@@ -811,7 +810,6 @@ static struct clk dpll4_m6x2_ck = { | |||
811 | .name = "dpll4_m6x2_ck", | 810 | .name = "dpll4_m6x2_ck", |
812 | .ops = &clkops_omap2_dflt_wait, | 811 | .ops = &clkops_omap2_dflt_wait, |
813 | .parent = &dpll4_m6_ck, | 812 | .parent = &dpll4_m6_ck, |
814 | .init = &omap2_init_clksel_parent, | ||
815 | .enable_reg = OMAP_CM_REGADDR(PLL_MOD, CM_CLKEN), | 813 | .enable_reg = OMAP_CM_REGADDR(PLL_MOD, CM_CLKEN), |
816 | .enable_bit = OMAP3430_PWRDN_EMU_PERIPH_SHIFT, | 814 | .enable_bit = OMAP3430_PWRDN_EMU_PERIPH_SHIFT, |
817 | .flags = INVERT_ENABLE, | 815 | .flags = INVERT_ENABLE, |
@@ -1047,7 +1045,6 @@ static struct clk iva2_ck = { | |||
1047 | .name = "iva2_ck", | 1045 | .name = "iva2_ck", |
1048 | .ops = &clkops_omap2_dflt_wait, | 1046 | .ops = &clkops_omap2_dflt_wait, |
1049 | .parent = &dpll2_m2_ck, | 1047 | .parent = &dpll2_m2_ck, |
1050 | .init = &omap2_init_clksel_parent, | ||
1051 | .enable_reg = OMAP_CM_REGADDR(OMAP3430_IVA2_MOD, CM_FCLKEN), | 1048 | .enable_reg = OMAP_CM_REGADDR(OMAP3430_IVA2_MOD, CM_FCLKEN), |
1052 | .enable_bit = OMAP3430_CM_FCLKEN_IVA2_EN_IVA2_SHIFT, | 1049 | .enable_bit = OMAP3430_CM_FCLKEN_IVA2_EN_IVA2_SHIFT, |
1053 | .clkdm_name = "iva2_clkdm", | 1050 | .clkdm_name = "iva2_clkdm", |
@@ -1121,7 +1118,6 @@ static struct clk gfx_l3_ck = { | |||
1121 | .name = "gfx_l3_ck", | 1118 | .name = "gfx_l3_ck", |
1122 | .ops = &clkops_omap2_dflt_wait, | 1119 | .ops = &clkops_omap2_dflt_wait, |
1123 | .parent = &l3_ick, | 1120 | .parent = &l3_ick, |
1124 | .init = &omap2_init_clksel_parent, | ||
1125 | .enable_reg = OMAP_CM_REGADDR(GFX_MOD, CM_ICLKEN), | 1121 | .enable_reg = OMAP_CM_REGADDR(GFX_MOD, CM_ICLKEN), |
1126 | .enable_bit = OMAP_EN_GFX_SHIFT, | 1122 | .enable_bit = OMAP_EN_GFX_SHIFT, |
1127 | .recalc = &followparent_recalc, | 1123 | .recalc = &followparent_recalc, |
diff --git a/arch/arm/mach-omap2/clock44xx_data.c b/arch/arm/mach-omap2/clock44xx_data.c index 2210e227d78a..9d882bcb56e3 100644 --- a/arch/arm/mach-omap2/clock44xx_data.c +++ b/arch/arm/mach-omap2/clock44xx_data.c | |||
@@ -346,37 +346,37 @@ static struct clk aess_fclk = { | |||
346 | }; | 346 | }; |
347 | 347 | ||
348 | static const struct clksel_rate div31_1to31_rates[] = { | 348 | static const struct clksel_rate div31_1to31_rates[] = { |
349 | { .div = 1, .val = 0, .flags = RATE_IN_4430 }, | 349 | { .div = 1, .val = 1, .flags = RATE_IN_4430 }, |
350 | { .div = 2, .val = 1, .flags = RATE_IN_4430 }, | 350 | { .div = 2, .val = 2, .flags = RATE_IN_4430 }, |
351 | { .div = 3, .val = 2, .flags = RATE_IN_4430 }, | 351 | { .div = 3, .val = 3, .flags = RATE_IN_4430 }, |
352 | { .div = 4, .val = 3, .flags = RATE_IN_4430 }, | 352 | { .div = 4, .val = 4, .flags = RATE_IN_4430 }, |
353 | { .div = 5, .val = 4, .flags = RATE_IN_4430 }, | 353 | { .div = 5, .val = 5, .flags = RATE_IN_4430 }, |
354 | { .div = 6, .val = 5, .flags = RATE_IN_4430 }, | 354 | { .div = 6, .val = 6, .flags = RATE_IN_4430 }, |
355 | { .div = 7, .val = 6, .flags = RATE_IN_4430 }, | 355 | { .div = 7, .val = 7, .flags = RATE_IN_4430 }, |
356 | { .div = 8, .val = 7, .flags = RATE_IN_4430 }, | 356 | { .div = 8, .val = 8, .flags = RATE_IN_4430 }, |
357 | { .div = 9, .val = 8, .flags = RATE_IN_4430 }, | 357 | { .div = 9, .val = 9, .flags = RATE_IN_4430 }, |
358 | { .div = 10, .val = 9, .flags = RATE_IN_4430 }, | 358 | { .div = 10, .val = 10, .flags = RATE_IN_4430 }, |
359 | { .div = 11, .val = 10, .flags = RATE_IN_4430 }, | 359 | { .div = 11, .val = 11, .flags = RATE_IN_4430 }, |
360 | { .div = 12, .val = 11, .flags = RATE_IN_4430 }, | 360 | { .div = 12, .val = 12, .flags = RATE_IN_4430 }, |
361 | { .div = 13, .val = 12, .flags = RATE_IN_4430 }, | 361 | { .div = 13, .val = 13, .flags = RATE_IN_4430 }, |
362 | { .div = 14, .val = 13, .flags = RATE_IN_4430 }, | 362 | { .div = 14, .val = 14, .flags = RATE_IN_4430 }, |
363 | { .div = 15, .val = 14, .flags = RATE_IN_4430 }, | 363 | { .div = 15, .val = 15, .flags = RATE_IN_4430 }, |
364 | { .div = 16, .val = 15, .flags = RATE_IN_4430 }, | 364 | { .div = 16, .val = 16, .flags = RATE_IN_4430 }, |
365 | { .div = 17, .val = 16, .flags = RATE_IN_4430 }, | 365 | { .div = 17, .val = 17, .flags = RATE_IN_4430 }, |
366 | { .div = 18, .val = 17, .flags = RATE_IN_4430 }, | 366 | { .div = 18, .val = 18, .flags = RATE_IN_4430 }, |
367 | { .div = 19, .val = 18, .flags = RATE_IN_4430 }, | 367 | { .div = 19, .val = 19, .flags = RATE_IN_4430 }, |
368 | { .div = 20, .val = 19, .flags = RATE_IN_4430 }, | 368 | { .div = 20, .val = 20, .flags = RATE_IN_4430 }, |
369 | { .div = 21, .val = 20, .flags = RATE_IN_4430 }, | 369 | { .div = 21, .val = 21, .flags = RATE_IN_4430 }, |
370 | { .div = 22, .val = 21, .flags = RATE_IN_4430 }, | 370 | { .div = 22, .val = 22, .flags = RATE_IN_4430 }, |
371 | { .div = 23, .val = 22, .flags = RATE_IN_4430 }, | 371 | { .div = 23, .val = 23, .flags = RATE_IN_4430 }, |
372 | { .div = 24, .val = 23, .flags = RATE_IN_4430 }, | 372 | { .div = 24, .val = 24, .flags = RATE_IN_4430 }, |
373 | { .div = 25, .val = 24, .flags = RATE_IN_4430 }, | 373 | { .div = 25, .val = 25, .flags = RATE_IN_4430 }, |
374 | { .div = 26, .val = 25, .flags = RATE_IN_4430 }, | 374 | { .div = 26, .val = 26, .flags = RATE_IN_4430 }, |
375 | { .div = 27, .val = 26, .flags = RATE_IN_4430 }, | 375 | { .div = 27, .val = 27, .flags = RATE_IN_4430 }, |
376 | { .div = 28, .val = 27, .flags = RATE_IN_4430 }, | 376 | { .div = 28, .val = 28, .flags = RATE_IN_4430 }, |
377 | { .div = 29, .val = 28, .flags = RATE_IN_4430 }, | 377 | { .div = 29, .val = 29, .flags = RATE_IN_4430 }, |
378 | { .div = 30, .val = 29, .flags = RATE_IN_4430 }, | 378 | { .div = 30, .val = 30, .flags = RATE_IN_4430 }, |
379 | { .div = 31, .val = 30, .flags = RATE_IN_4430 }, | 379 | { .div = 31, .val = 31, .flags = RATE_IN_4430 }, |
380 | { .div = 0 }, | 380 | { .div = 0 }, |
381 | }; | 381 | }; |
382 | 382 | ||
diff --git a/arch/arm/mach-omap2/cpuidle34xx.c b/arch/arm/mach-omap2/cpuidle34xx.c index a26d6a08ae3f..12f0cbfc2894 100644 --- a/arch/arm/mach-omap2/cpuidle34xx.c +++ b/arch/arm/mach-omap2/cpuidle34xx.c | |||
@@ -137,7 +137,7 @@ return_sleep_time: | |||
137 | local_irq_enable(); | 137 | local_irq_enable(); |
138 | local_fiq_enable(); | 138 | local_fiq_enable(); |
139 | 139 | ||
140 | return (u32)timespec_to_ns(&ts_idle)/1000; | 140 | return ts_idle.tv_nsec / NSEC_PER_USEC + ts_idle.tv_sec * USEC_PER_SEC; |
141 | } | 141 | } |
142 | 142 | ||
143 | /** | 143 | /** |
diff --git a/arch/arm/mach-omap2/gpmc.c b/arch/arm/mach-omap2/gpmc.c index bd8cb5974726..3f1334f62e7a 100644 --- a/arch/arm/mach-omap2/gpmc.c +++ b/arch/arm/mach-omap2/gpmc.c | |||
@@ -534,6 +534,8 @@ void __init gpmc_init(void) | |||
534 | BUG(); | 534 | BUG(); |
535 | } | 535 | } |
536 | 536 | ||
537 | clk_enable(gpmc_l3_clk); | ||
538 | |||
537 | l = gpmc_read_reg(GPMC_REVISION); | 539 | l = gpmc_read_reg(GPMC_REVISION); |
538 | printk(KERN_INFO "GPMC revision %d.%d\n", (l >> 4) & 0x0f, l & 0x0f); | 540 | printk(KERN_INFO "GPMC revision %d.%d\n", (l >> 4) & 0x0f, l & 0x0f); |
539 | /* Set smart idle mode and automatic L3 clock gating */ | 541 | /* Set smart idle mode and automatic L3 clock gating */ |
diff --git a/arch/arm/mach-omap2/id.c b/arch/arm/mach-omap2/id.c index a091b53657b9..3d65c50bd017 100644 --- a/arch/arm/mach-omap2/id.c +++ b/arch/arm/mach-omap2/id.c | |||
@@ -188,6 +188,8 @@ void __init omap3_check_revision(void) | |||
188 | u16 hawkeye; | 188 | u16 hawkeye; |
189 | u8 rev; | 189 | u8 rev; |
190 | 190 | ||
191 | omap_chip.oc = CHIP_IS_OMAP3430; | ||
192 | |||
191 | /* | 193 | /* |
192 | * We cannot access revision registers on ES1.0. | 194 | * We cannot access revision registers on ES1.0. |
193 | * If the processor type is Cortex-A8 and the revision is 0x0 | 195 | * If the processor type is Cortex-A8 and the revision is 0x0 |
@@ -196,6 +198,7 @@ void __init omap3_check_revision(void) | |||
196 | cpuid = read_cpuid(CPUID_ID); | 198 | cpuid = read_cpuid(CPUID_ID); |
197 | if ((((cpuid >> 4) & 0xfff) == 0xc08) && ((cpuid & 0xf) == 0x0)) { | 199 | if ((((cpuid >> 4) & 0xfff) == 0xc08) && ((cpuid & 0xf) == 0x0)) { |
198 | omap_revision = OMAP3430_REV_ES1_0; | 200 | omap_revision = OMAP3430_REV_ES1_0; |
201 | omap_chip.oc |= CHIP_IS_OMAP3430ES1; | ||
199 | return; | 202 | return; |
200 | } | 203 | } |
201 | 204 | ||
@@ -216,18 +219,28 @@ void __init omap3_check_revision(void) | |||
216 | case 0: /* Take care of early samples */ | 219 | case 0: /* Take care of early samples */ |
217 | case 1: | 220 | case 1: |
218 | omap_revision = OMAP3430_REV_ES2_0; | 221 | omap_revision = OMAP3430_REV_ES2_0; |
222 | omap_chip.oc |= CHIP_IS_OMAP3430ES2; | ||
219 | break; | 223 | break; |
220 | case 2: | 224 | case 2: |
221 | omap_revision = OMAP3430_REV_ES2_1; | 225 | omap_revision = OMAP3430_REV_ES2_1; |
226 | omap_chip.oc |= CHIP_IS_OMAP3430ES2; | ||
222 | break; | 227 | break; |
223 | case 3: | 228 | case 3: |
224 | omap_revision = OMAP3430_REV_ES3_0; | 229 | omap_revision = OMAP3430_REV_ES3_0; |
230 | omap_chip.oc |= CHIP_IS_OMAP3430ES3_0; | ||
225 | break; | 231 | break; |
226 | case 4: | 232 | case 4: |
233 | omap_revision = OMAP3430_REV_ES3_1; | ||
234 | omap_chip.oc |= CHIP_IS_OMAP3430ES3_1; | ||
235 | break; | ||
236 | case 7: | ||
227 | /* FALLTHROUGH */ | 237 | /* FALLTHROUGH */ |
228 | default: | 238 | default: |
229 | /* Use the latest known revision as default */ | 239 | /* Use the latest known revision as default */ |
230 | omap_revision = OMAP3430_REV_ES3_1; | 240 | omap_revision = OMAP3430_REV_ES3_1_2; |
241 | |||
242 | /* REVISIT: Add CHIP_IS_OMAP3430ES3_1_2? */ | ||
243 | omap_chip.oc |= CHIP_IS_OMAP3430ES3_1; | ||
231 | } | 244 | } |
232 | break; | 245 | break; |
233 | case 0xb868: | 246 | case 0xb868: |
@@ -235,14 +248,18 @@ void __init omap3_check_revision(void) | |||
235 | * | 248 | * |
236 | * Set the device to be OMAP3505 here. Actual device | 249 | * Set the device to be OMAP3505 here. Actual device |
237 | * is identified later based on the features. | 250 | * is identified later based on the features. |
251 | * | ||
252 | * REVISIT: AM3505/AM3517 should have their own CHIP_IS | ||
238 | */ | 253 | */ |
239 | omap_revision = OMAP3505_REV(rev); | 254 | omap_revision = OMAP3505_REV(rev); |
255 | omap_chip.oc |= CHIP_IS_OMAP3430ES3_1; | ||
240 | break; | 256 | break; |
241 | case 0xb891: | 257 | case 0xb891: |
242 | /* FALLTHROUGH */ | 258 | /* FALLTHROUGH */ |
243 | default: | 259 | default: |
244 | /* Unknown default to latest silicon rev as default*/ | 260 | /* Unknown default to latest silicon rev as default*/ |
245 | omap_revision = OMAP3630_REV_ES1_0; | 261 | omap_revision = OMAP3630_REV_ES1_0; |
262 | omap_chip.oc |= CHIP_IS_OMAP3630ES1; | ||
246 | } | 263 | } |
247 | } | 264 | } |
248 | 265 | ||
@@ -360,6 +377,7 @@ void __init omap2_check_revision(void) | |||
360 | omap3_check_revision(); | 377 | omap3_check_revision(); |
361 | omap3_check_features(); | 378 | omap3_check_features(); |
362 | omap3_cpuinfo(); | 379 | omap3_cpuinfo(); |
380 | return; | ||
363 | } else if (cpu_is_omap44xx()) { | 381 | } else if (cpu_is_omap44xx()) { |
364 | omap4_check_revision(); | 382 | omap4_check_revision(); |
365 | return; | 383 | return; |
@@ -374,27 +392,14 @@ void __init omap2_check_revision(void) | |||
374 | if (cpu_is_omap243x()) { | 392 | if (cpu_is_omap243x()) { |
375 | /* Currently only supports 2430ES2.1 and 2430-all */ | 393 | /* Currently only supports 2430ES2.1 and 2430-all */ |
376 | omap_chip.oc |= CHIP_IS_OMAP2430; | 394 | omap_chip.oc |= CHIP_IS_OMAP2430; |
395 | return; | ||
377 | } else if (cpu_is_omap242x()) { | 396 | } else if (cpu_is_omap242x()) { |
378 | /* Currently only supports 2420ES2.1.1 and 2420-all */ | 397 | /* Currently only supports 2420ES2.1.1 and 2420-all */ |
379 | omap_chip.oc |= CHIP_IS_OMAP2420; | 398 | omap_chip.oc |= CHIP_IS_OMAP2420; |
380 | } else if (cpu_is_omap3505() || cpu_is_omap3517()) { | 399 | return; |
381 | omap_chip.oc = CHIP_IS_OMAP3430 | CHIP_IS_OMAP3430ES3_1; | ||
382 | } else if (cpu_is_omap343x()) { | ||
383 | omap_chip.oc = CHIP_IS_OMAP3430; | ||
384 | if (omap_rev() == OMAP3430_REV_ES1_0) | ||
385 | omap_chip.oc |= CHIP_IS_OMAP3430ES1; | ||
386 | else if (omap_rev() >= OMAP3430_REV_ES2_0 && | ||
387 | omap_rev() <= OMAP3430_REV_ES2_1) | ||
388 | omap_chip.oc |= CHIP_IS_OMAP3430ES2; | ||
389 | else if (omap_rev() == OMAP3430_REV_ES3_0) | ||
390 | omap_chip.oc |= CHIP_IS_OMAP3430ES3_0; | ||
391 | else if (omap_rev() == OMAP3430_REV_ES3_1) | ||
392 | omap_chip.oc |= CHIP_IS_OMAP3430ES3_1; | ||
393 | else if (omap_rev() == OMAP3630_REV_ES1_0) | ||
394 | omap_chip.oc |= CHIP_IS_OMAP3630ES1; | ||
395 | } else { | ||
396 | pr_err("Uninitialized omap_chip, please fix!\n"); | ||
397 | } | 400 | } |
401 | |||
402 | pr_err("Uninitialized omap_chip, please fix!\n"); | ||
398 | } | 403 | } |
399 | 404 | ||
400 | /* | 405 | /* |
diff --git a/arch/arm/mach-omap2/irq.c b/arch/arm/mach-omap2/irq.c index e9bc782fa414..27054025da2b 100644 --- a/arch/arm/mach-omap2/irq.c +++ b/arch/arm/mach-omap2/irq.c | |||
@@ -274,4 +274,22 @@ void omap_intc_restore_context(void) | |||
274 | } | 274 | } |
275 | /* MIRs are saved and restore with other PRCM registers */ | 275 | /* MIRs are saved and restore with other PRCM registers */ |
276 | } | 276 | } |
277 | |||
278 | void omap3_intc_suspend(void) | ||
279 | { | ||
280 | /* A pending interrupt would prevent OMAP from entering suspend */ | ||
281 | omap_ack_irq(0); | ||
282 | } | ||
283 | |||
284 | void omap3_intc_prepare_idle(void) | ||
285 | { | ||
286 | /* Disable autoidle as it can stall interrupt controller */ | ||
287 | intc_bank_write_reg(0, &irq_banks[0], INTC_SYSCONFIG); | ||
288 | } | ||
289 | |||
290 | void omap3_intc_resume_idle(void) | ||
291 | { | ||
292 | /* Re-enable autoidle */ | ||
293 | intc_bank_write_reg(1, &irq_banks[0], INTC_SYSCONFIG); | ||
294 | } | ||
277 | #endif /* CONFIG_ARCH_OMAP3 */ | 295 | #endif /* CONFIG_ARCH_OMAP3 */ |
diff --git a/arch/arm/mach-omap2/mux.c b/arch/arm/mach-omap2/mux.c index 459ef23ab8a8..3f59bd12cbbf 100644 --- a/arch/arm/mach-omap2/mux.c +++ b/arch/arm/mach-omap2/mux.c | |||
@@ -51,7 +51,7 @@ struct omap_mux_entry { | |||
51 | static unsigned long mux_phys; | 51 | static unsigned long mux_phys; |
52 | static void __iomem *mux_base; | 52 | static void __iomem *mux_base; |
53 | 53 | ||
54 | static inline u16 omap_mux_read(u16 reg) | 54 | u16 omap_mux_read(u16 reg) |
55 | { | 55 | { |
56 | if (cpu_is_omap24xx()) | 56 | if (cpu_is_omap24xx()) |
57 | return __raw_readb(mux_base + reg); | 57 | return __raw_readb(mux_base + reg); |
@@ -59,7 +59,7 @@ static inline u16 omap_mux_read(u16 reg) | |||
59 | return __raw_readw(mux_base + reg); | 59 | return __raw_readw(mux_base + reg); |
60 | } | 60 | } |
61 | 61 | ||
62 | static inline void omap_mux_write(u16 val, u16 reg) | 62 | void omap_mux_write(u16 val, u16 reg) |
63 | { | 63 | { |
64 | if (cpu_is_omap24xx()) | 64 | if (cpu_is_omap24xx()) |
65 | __raw_writeb(val, mux_base + reg); | 65 | __raw_writeb(val, mux_base + reg); |
@@ -67,6 +67,14 @@ static inline void omap_mux_write(u16 val, u16 reg) | |||
67 | __raw_writew(val, mux_base + reg); | 67 | __raw_writew(val, mux_base + reg); |
68 | } | 68 | } |
69 | 69 | ||
70 | void omap_mux_write_array(struct omap_board_mux *board_mux) | ||
71 | { | ||
72 | while (board_mux->reg_offset != OMAP_MUX_TERMINATOR) { | ||
73 | omap_mux_write(board_mux->value, board_mux->reg_offset); | ||
74 | board_mux++; | ||
75 | } | ||
76 | } | ||
77 | |||
70 | #if defined(CONFIG_ARCH_OMAP24XX) && defined(CONFIG_OMAP_MUX) | 78 | #if defined(CONFIG_ARCH_OMAP24XX) && defined(CONFIG_OMAP_MUX) |
71 | 79 | ||
72 | static struct omap_mux_cfg arch_mux_cfg; | 80 | static struct omap_mux_cfg arch_mux_cfg; |
@@ -833,14 +841,6 @@ static void __init omap_mux_set_cmdline_signals(void) | |||
833 | kfree(options); | 841 | kfree(options); |
834 | } | 842 | } |
835 | 843 | ||
836 | static void __init omap_mux_set_board_signals(struct omap_board_mux *board_mux) | ||
837 | { | ||
838 | while (board_mux->reg_offset != OMAP_MUX_TERMINATOR) { | ||
839 | omap_mux_write(board_mux->value, board_mux->reg_offset); | ||
840 | board_mux++; | ||
841 | } | ||
842 | } | ||
843 | |||
844 | static int __init omap_mux_copy_names(struct omap_mux *src, | 844 | static int __init omap_mux_copy_names(struct omap_mux *src, |
845 | struct omap_mux *dst) | 845 | struct omap_mux *dst) |
846 | { | 846 | { |
@@ -998,12 +998,15 @@ int __init omap_mux_init(u32 mux_pbase, u32 mux_size, | |||
998 | omap_mux_package_fixup(package_subset, superset); | 998 | omap_mux_package_fixup(package_subset, superset); |
999 | if (package_balls) | 999 | if (package_balls) |
1000 | omap_mux_package_init_balls(package_balls, superset); | 1000 | omap_mux_package_init_balls(package_balls, superset); |
1001 | omap_mux_set_cmdline_signals(); | ||
1002 | omap_mux_set_board_signals(board_mux); | ||
1003 | #endif | 1001 | #endif |
1004 | 1002 | ||
1005 | omap_mux_init_list(superset); | 1003 | omap_mux_init_list(superset); |
1006 | 1004 | ||
1005 | #ifdef CONFIG_OMAP_MUX | ||
1006 | omap_mux_set_cmdline_signals(); | ||
1007 | omap_mux_write_array(board_mux); | ||
1008 | #endif | ||
1009 | |||
1007 | return 0; | 1010 | return 0; |
1008 | } | 1011 | } |
1009 | 1012 | ||
diff --git a/arch/arm/mach-omap2/mux.h b/arch/arm/mach-omap2/mux.h index d8b4d5ad2278..f8c2e7a8f063 100644 --- a/arch/arm/mach-omap2/mux.h +++ b/arch/arm/mach-omap2/mux.h | |||
@@ -147,6 +147,30 @@ u16 omap_mux_get_gpio(int gpio); | |||
147 | void omap_mux_set_gpio(u16 val, int gpio); | 147 | void omap_mux_set_gpio(u16 val, int gpio); |
148 | 148 | ||
149 | /** | 149 | /** |
150 | * omap_mux_read() - read mux register | ||
151 | * @mux_offset: Offset of the mux register | ||
152 | * | ||
153 | */ | ||
154 | u16 omap_mux_read(u16 mux_offset); | ||
155 | |||
156 | /** | ||
157 | * omap_mux_write() - write mux register | ||
158 | * @val: New mux register value | ||
159 | * @mux_offset: Offset of the mux register | ||
160 | * | ||
161 | * This should be only needed for dynamic remuxing of non-gpio signals. | ||
162 | */ | ||
163 | void omap_mux_write(u16 val, u16 mux_offset); | ||
164 | |||
165 | /** | ||
166 | * omap_mux_write_array() - write an array of mux registers | ||
167 | * @board_mux: Array of mux registers terminated by MAP_MUX_TERMINATOR | ||
168 | * | ||
169 | * This should be only needed for dynamic remuxing of non-gpio signals. | ||
170 | */ | ||
171 | void omap_mux_write_array(struct omap_board_mux *board_mux); | ||
172 | |||
173 | /** | ||
150 | * omap3_mux_init() - initialize mux system with board specific set | 174 | * omap3_mux_init() - initialize mux system with board specific set |
151 | * @board_mux: Board specific mux table | 175 | * @board_mux: Board specific mux table |
152 | * @flags: OMAP package type used for the board | 176 | * @flags: OMAP package type used for the board |
diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c index d8c8545875b1..478ae585ca39 100644 --- a/arch/arm/mach-omap2/omap_hwmod.c +++ b/arch/arm/mach-omap2/omap_hwmod.c | |||
@@ -94,7 +94,8 @@ static int _update_sysc_cache(struct omap_hwmod *oh) | |||
94 | 94 | ||
95 | oh->_sysc_cache = omap_hwmod_readl(oh, oh->sysconfig->sysc_offs); | 95 | oh->_sysc_cache = omap_hwmod_readl(oh, oh->sysconfig->sysc_offs); |
96 | 96 | ||
97 | oh->_int_flags |= _HWMOD_SYSCONFIG_LOADED; | 97 | if (!(oh->sysconfig->sysc_flags & SYSC_NO_CACHE)) |
98 | oh->_int_flags |= _HWMOD_SYSCONFIG_LOADED; | ||
98 | 99 | ||
99 | return 0; | 100 | return 0; |
100 | } | 101 | } |
diff --git a/arch/arm/mach-omap2/pm-debug.c b/arch/arm/mach-omap2/pm-debug.c index 860b755d2220..a0866268aa41 100644 --- a/arch/arm/mach-omap2/pm-debug.c +++ b/arch/arm/mach-omap2/pm-debug.c | |||
@@ -54,8 +54,6 @@ int omap2_pm_debug; | |||
54 | regs[reg_count++].val = \ | 54 | regs[reg_count++].val = \ |
55 | __raw_readl(OMAP2_L4_IO_ADDRESS(0x480fe000 + (off))) | 55 | __raw_readl(OMAP2_L4_IO_ADDRESS(0x480fe000 + (off))) |
56 | 56 | ||
57 | static int __init pm_dbg_init(void); | ||
58 | |||
59 | void omap2_pm_dump(int mode, int resume, unsigned int us) | 57 | void omap2_pm_dump(int mode, int resume, unsigned int us) |
60 | { | 58 | { |
61 | struct reg { | 59 | struct reg { |
@@ -167,6 +165,8 @@ struct dentry *pm_dbg_dir; | |||
167 | 165 | ||
168 | static int pm_dbg_init_done; | 166 | static int pm_dbg_init_done; |
169 | 167 | ||
168 | static int __init pm_dbg_init(void); | ||
169 | |||
170 | enum { | 170 | enum { |
171 | DEBUG_FILE_COUNTERS = 0, | 171 | DEBUG_FILE_COUNTERS = 0, |
172 | DEBUG_FILE_TIMERS, | 172 | DEBUG_FILE_TIMERS, |
@@ -488,9 +488,11 @@ int pm_dbg_regset_init(int reg_set) | |||
488 | 488 | ||
489 | static int pwrdm_suspend_get(void *data, u64 *val) | 489 | static int pwrdm_suspend_get(void *data, u64 *val) |
490 | { | 490 | { |
491 | *val = omap3_pm_get_suspend_state((struct powerdomain *)data); | 491 | int ret; |
492 | ret = omap3_pm_get_suspend_state((struct powerdomain *)data); | ||
493 | *val = ret; | ||
492 | 494 | ||
493 | if (*val >= 0) | 495 | if (ret >= 0) |
494 | return 0; | 496 | return 0; |
495 | return *val; | 497 | return *val; |
496 | } | 498 | } |
@@ -604,6 +606,4 @@ static int __init pm_dbg_init(void) | |||
604 | } | 606 | } |
605 | arch_initcall(pm_dbg_init); | 607 | arch_initcall(pm_dbg_init); |
606 | 608 | ||
607 | #else | ||
608 | void pm_dbg_update_time(struct powerdomain *pwrdm, int prev) {} | ||
609 | #endif | 609 | #endif |
diff --git a/arch/arm/mach-omap2/pm.h b/arch/arm/mach-omap2/pm.h index 0bf345db7147..7a9c2d004511 100644 --- a/arch/arm/mach-omap2/pm.h +++ b/arch/arm/mach-omap2/pm.h | |||
@@ -32,12 +32,16 @@ extern struct omap_dm_timer *gptimer_wakeup; | |||
32 | #ifdef CONFIG_PM_DEBUG | 32 | #ifdef CONFIG_PM_DEBUG |
33 | extern void omap2_pm_dump(int mode, int resume, unsigned int us); | 33 | extern void omap2_pm_dump(int mode, int resume, unsigned int us); |
34 | extern int omap2_pm_debug; | 34 | extern int omap2_pm_debug; |
35 | #else | ||
36 | #define omap2_pm_dump(mode, resume, us) do {} while (0); | ||
37 | #define omap2_pm_debug 0 | ||
38 | #endif | ||
39 | |||
40 | #if defined(CONFIG_PM_DEBUG) && defined(CONFIG_DEBUG_FS) | ||
35 | extern void pm_dbg_update_time(struct powerdomain *pwrdm, int prev); | 41 | extern void pm_dbg_update_time(struct powerdomain *pwrdm, int prev); |
36 | extern int pm_dbg_regset_save(int reg_set); | 42 | extern int pm_dbg_regset_save(int reg_set); |
37 | extern int pm_dbg_regset_init(int reg_set); | 43 | extern int pm_dbg_regset_init(int reg_set); |
38 | #else | 44 | #else |
39 | #define omap2_pm_dump(mode, resume, us) do {} while (0); | ||
40 | #define omap2_pm_debug 0 | ||
41 | #define pm_dbg_update_time(pwrdm, prev) do {} while (0); | 45 | #define pm_dbg_update_time(pwrdm, prev) do {} while (0); |
42 | #define pm_dbg_regset_save(reg_set) do {} while (0); | 46 | #define pm_dbg_regset_save(reg_set) do {} while (0); |
43 | #define pm_dbg_regset_init(reg_set) do {} while (0); | 47 | #define pm_dbg_regset_init(reg_set) do {} while (0); |
diff --git a/arch/arm/mach-omap2/pm34xx.c b/arch/arm/mach-omap2/pm34xx.c index c6cc809afb79..910a7acf542d 100644 --- a/arch/arm/mach-omap2/pm34xx.c +++ b/arch/arm/mach-omap2/pm34xx.c | |||
@@ -26,6 +26,7 @@ | |||
26 | #include <linux/err.h> | 26 | #include <linux/err.h> |
27 | #include <linux/gpio.h> | 27 | #include <linux/gpio.h> |
28 | #include <linux/clk.h> | 28 | #include <linux/clk.h> |
29 | #include <linux/delay.h> | ||
29 | 30 | ||
30 | #include <plat/sram.h> | 31 | #include <plat/sram.h> |
31 | #include <plat/clockdomain.h> | 32 | #include <plat/clockdomain.h> |
@@ -126,7 +127,15 @@ static void omap3_core_save_context(void) | |||
126 | /* wait for the save to complete */ | 127 | /* wait for the save to complete */ |
127 | while (!(omap_ctrl_readl(OMAP343X_CONTROL_GENERAL_PURPOSE_STATUS) | 128 | while (!(omap_ctrl_readl(OMAP343X_CONTROL_GENERAL_PURPOSE_STATUS) |
128 | & PADCONF_SAVE_DONE)) | 129 | & PADCONF_SAVE_DONE)) |
129 | ; | 130 | udelay(1); |
131 | |||
132 | /* | ||
133 | * Force write last pad into memory, as this can fail in some | ||
134 | * cases according to erratas 1.157, 1.185 | ||
135 | */ | ||
136 | omap_ctrl_writel(omap_ctrl_readl(OMAP343X_PADCONF_ETK_D14), | ||
137 | OMAP343X_CONTROL_MEM_WKUP + 0x2a0); | ||
138 | |||
130 | /* Save the Interrupt controller context */ | 139 | /* Save the Interrupt controller context */ |
131 | omap_intc_save_context(); | 140 | omap_intc_save_context(); |
132 | /* Save the GPMC context */ | 141 | /* Save the GPMC context */ |
@@ -392,6 +401,7 @@ void omap_sram_idle(void) | |||
392 | prm_set_mod_reg_bits(OMAP3430_EN_IO, WKUP_MOD, PM_WKEN); | 401 | prm_set_mod_reg_bits(OMAP3430_EN_IO, WKUP_MOD, PM_WKEN); |
393 | omap3_enable_io_chain(); | 402 | omap3_enable_io_chain(); |
394 | } | 403 | } |
404 | omap3_intc_prepare_idle(); | ||
395 | 405 | ||
396 | /* | 406 | /* |
397 | * On EMU/HS devices ROM code restores a SRDC value | 407 | * On EMU/HS devices ROM code restores a SRDC value |
@@ -438,6 +448,7 @@ void omap_sram_idle(void) | |||
438 | OMAP3430_GR_MOD, | 448 | OMAP3430_GR_MOD, |
439 | OMAP3_PRM_VOLTCTRL_OFFSET); | 449 | OMAP3_PRM_VOLTCTRL_OFFSET); |
440 | } | 450 | } |
451 | omap3_intc_resume_idle(); | ||
441 | 452 | ||
442 | /* PER */ | 453 | /* PER */ |
443 | if (per_next_state < PWRDM_POWER_ON) { | 454 | if (per_next_state < PWRDM_POWER_ON) { |
@@ -578,6 +589,8 @@ static int omap3_pm_suspend(void) | |||
578 | } | 589 | } |
579 | 590 | ||
580 | omap_uart_prepare_suspend(); | 591 | omap_uart_prepare_suspend(); |
592 | omap3_intc_suspend(); | ||
593 | |||
581 | omap_sram_idle(); | 594 | omap_sram_idle(); |
582 | 595 | ||
583 | restore: | 596 | restore: |
@@ -835,6 +848,8 @@ static void __init prcm_setup_regs(void) | |||
835 | CM_AUTOIDLE); | 848 | CM_AUTOIDLE); |
836 | } | 849 | } |
837 | 850 | ||
851 | omap_ctrl_writel(OMAP3430_AUTOIDLE, OMAP2_CONTROL_SYSCONFIG); | ||
852 | |||
838 | /* | 853 | /* |
839 | * Set all plls to autoidle. This is needed until autoidle is | 854 | * Set all plls to autoidle. This is needed until autoidle is |
840 | * enabled by clockfw | 855 | * enabled by clockfw |
@@ -875,15 +890,23 @@ static void __init prcm_setup_regs(void) | |||
875 | prm_write_mod_reg(OMAP3430_IO_EN | OMAP3430_WKUP_EN, | 890 | prm_write_mod_reg(OMAP3430_IO_EN | OMAP3430_WKUP_EN, |
876 | OCP_MOD, OMAP3_PRM_IRQENABLE_MPU_OFFSET); | 891 | OCP_MOD, OMAP3_PRM_IRQENABLE_MPU_OFFSET); |
877 | 892 | ||
893 | /* Enable PM_WKEN to support DSS LPR */ | ||
894 | prm_write_mod_reg(OMAP3430_PM_WKEN_DSS_EN_DSS, | ||
895 | OMAP3430_DSS_MOD, PM_WKEN); | ||
896 | |||
878 | /* Enable wakeups in PER */ | 897 | /* Enable wakeups in PER */ |
879 | prm_write_mod_reg(OMAP3430_EN_GPIO2 | OMAP3430_EN_GPIO3 | | 898 | prm_write_mod_reg(OMAP3430_EN_GPIO2 | OMAP3430_EN_GPIO3 | |
880 | OMAP3430_EN_GPIO4 | OMAP3430_EN_GPIO5 | | 899 | OMAP3430_EN_GPIO4 | OMAP3430_EN_GPIO5 | |
881 | OMAP3430_EN_GPIO6 | OMAP3430_EN_UART3, | 900 | OMAP3430_EN_GPIO6 | OMAP3430_EN_UART3 | |
901 | OMAP3430_EN_MCBSP2 | OMAP3430_EN_MCBSP3 | | ||
902 | OMAP3430_EN_MCBSP4, | ||
882 | OMAP3430_PER_MOD, PM_WKEN); | 903 | OMAP3430_PER_MOD, PM_WKEN); |
883 | /* and allow them to wake up MPU */ | 904 | /* and allow them to wake up MPU */ |
884 | prm_write_mod_reg(OMAP3430_GRPSEL_GPIO2 | OMAP3430_EN_GPIO3 | | 905 | prm_write_mod_reg(OMAP3430_GRPSEL_GPIO2 | OMAP3430_EN_GPIO3 | |
885 | OMAP3430_GRPSEL_GPIO4 | OMAP3430_EN_GPIO5 | | 906 | OMAP3430_GRPSEL_GPIO4 | OMAP3430_EN_GPIO5 | |
886 | OMAP3430_GRPSEL_GPIO6 | OMAP3430_EN_UART3, | 907 | OMAP3430_GRPSEL_GPIO6 | OMAP3430_EN_UART3 | |
908 | OMAP3430_EN_MCBSP2 | OMAP3430_EN_MCBSP3 | | ||
909 | OMAP3430_EN_MCBSP4, | ||
887 | OMAP3430_PER_MOD, OMAP3430_PM_MPUGRPSEL); | 910 | OMAP3430_PER_MOD, OMAP3430_PM_MPUGRPSEL); |
888 | 911 | ||
889 | /* Don't attach IVA interrupts */ | 912 | /* Don't attach IVA interrupts */ |
@@ -904,24 +927,6 @@ static void __init prcm_setup_regs(void) | |||
904 | /* Clear any pending PRCM interrupts */ | 927 | /* Clear any pending PRCM interrupts */ |
905 | prm_write_mod_reg(0, OCP_MOD, OMAP3_PRM_IRQSTATUS_MPU_OFFSET); | 928 | prm_write_mod_reg(0, OCP_MOD, OMAP3_PRM_IRQSTATUS_MPU_OFFSET); |
906 | 929 | ||
907 | /* Don't attach IVA interrupts */ | ||
908 | prm_write_mod_reg(0, WKUP_MOD, OMAP3430_PM_IVAGRPSEL); | ||
909 | prm_write_mod_reg(0, CORE_MOD, OMAP3430_PM_IVAGRPSEL1); | ||
910 | prm_write_mod_reg(0, CORE_MOD, OMAP3430ES2_PM_IVAGRPSEL3); | ||
911 | prm_write_mod_reg(0, OMAP3430_PER_MOD, OMAP3430_PM_IVAGRPSEL); | ||
912 | |||
913 | /* Clear any pending 'reset' flags */ | ||
914 | prm_write_mod_reg(0xffffffff, MPU_MOD, RM_RSTST); | ||
915 | prm_write_mod_reg(0xffffffff, CORE_MOD, RM_RSTST); | ||
916 | prm_write_mod_reg(0xffffffff, OMAP3430_PER_MOD, RM_RSTST); | ||
917 | prm_write_mod_reg(0xffffffff, OMAP3430_EMU_MOD, RM_RSTST); | ||
918 | prm_write_mod_reg(0xffffffff, OMAP3430_NEON_MOD, RM_RSTST); | ||
919 | prm_write_mod_reg(0xffffffff, OMAP3430_DSS_MOD, RM_RSTST); | ||
920 | prm_write_mod_reg(0xffffffff, OMAP3430ES2_USBHOST_MOD, RM_RSTST); | ||
921 | |||
922 | /* Clear any pending PRCM interrupts */ | ||
923 | prm_write_mod_reg(0, OCP_MOD, OMAP3_PRM_IRQSTATUS_MPU_OFFSET); | ||
924 | |||
925 | omap3_iva_idle(); | 930 | omap3_iva_idle(); |
926 | omap3_d2d_idle(); | 931 | omap3_d2d_idle(); |
927 | } | 932 | } |
diff --git a/arch/arm/mach-omap2/prcm.c b/arch/arm/mach-omap2/prcm.c index 3ea8177ffb25..cf466ea1dffc 100644 --- a/arch/arm/mach-omap2/prcm.c +++ b/arch/arm/mach-omap2/prcm.c | |||
@@ -44,7 +44,6 @@ struct omap3_prcm_regs { | |||
44 | u32 iva2_cm_clksel2; | 44 | u32 iva2_cm_clksel2; |
45 | u32 cm_sysconfig; | 45 | u32 cm_sysconfig; |
46 | u32 sgx_cm_clksel; | 46 | u32 sgx_cm_clksel; |
47 | u32 wkup_cm_clksel; | ||
48 | u32 dss_cm_clksel; | 47 | u32 dss_cm_clksel; |
49 | u32 cam_cm_clksel; | 48 | u32 cam_cm_clksel; |
50 | u32 per_cm_clksel; | 49 | u32 per_cm_clksel; |
@@ -53,7 +52,6 @@ struct omap3_prcm_regs { | |||
53 | u32 pll_cm_autoidle2; | 52 | u32 pll_cm_autoidle2; |
54 | u32 pll_cm_clksel4; | 53 | u32 pll_cm_clksel4; |
55 | u32 pll_cm_clksel5; | 54 | u32 pll_cm_clksel5; |
56 | u32 pll_cm_clken; | ||
57 | u32 pll_cm_clken2; | 55 | u32 pll_cm_clken2; |
58 | u32 cm_polctrl; | 56 | u32 cm_polctrl; |
59 | u32 iva2_cm_fclken; | 57 | u32 iva2_cm_fclken; |
@@ -77,7 +75,6 @@ struct omap3_prcm_regs { | |||
77 | u32 usbhost_cm_iclken; | 75 | u32 usbhost_cm_iclken; |
78 | u32 iva2_cm_autiidle2; | 76 | u32 iva2_cm_autiidle2; |
79 | u32 mpu_cm_autoidle2; | 77 | u32 mpu_cm_autoidle2; |
80 | u32 pll_cm_autoidle; | ||
81 | u32 iva2_cm_clkstctrl; | 78 | u32 iva2_cm_clkstctrl; |
82 | u32 mpu_cm_clkstctrl; | 79 | u32 mpu_cm_clkstctrl; |
83 | u32 core_cm_clkstctrl; | 80 | u32 core_cm_clkstctrl; |
@@ -274,7 +271,6 @@ void omap3_prcm_save_context(void) | |||
274 | prcm_context.cm_sysconfig = __raw_readl(OMAP3430_CM_SYSCONFIG); | 271 | prcm_context.cm_sysconfig = __raw_readl(OMAP3430_CM_SYSCONFIG); |
275 | prcm_context.sgx_cm_clksel = | 272 | prcm_context.sgx_cm_clksel = |
276 | cm_read_mod_reg(OMAP3430ES2_SGX_MOD, CM_CLKSEL); | 273 | cm_read_mod_reg(OMAP3430ES2_SGX_MOD, CM_CLKSEL); |
277 | prcm_context.wkup_cm_clksel = cm_read_mod_reg(WKUP_MOD, CM_CLKSEL); | ||
278 | prcm_context.dss_cm_clksel = | 274 | prcm_context.dss_cm_clksel = |
279 | cm_read_mod_reg(OMAP3430_DSS_MOD, CM_CLKSEL); | 275 | cm_read_mod_reg(OMAP3430_DSS_MOD, CM_CLKSEL); |
280 | prcm_context.cam_cm_clksel = | 276 | prcm_context.cam_cm_clksel = |
@@ -291,8 +287,6 @@ void omap3_prcm_save_context(void) | |||
291 | cm_read_mod_reg(PLL_MOD, OMAP3430ES2_CM_CLKSEL4); | 287 | cm_read_mod_reg(PLL_MOD, OMAP3430ES2_CM_CLKSEL4); |
292 | prcm_context.pll_cm_clksel5 = | 288 | prcm_context.pll_cm_clksel5 = |
293 | cm_read_mod_reg(PLL_MOD, OMAP3430ES2_CM_CLKSEL5); | 289 | cm_read_mod_reg(PLL_MOD, OMAP3430ES2_CM_CLKSEL5); |
294 | prcm_context.pll_cm_clken = | ||
295 | cm_read_mod_reg(PLL_MOD, CM_CLKEN); | ||
296 | prcm_context.pll_cm_clken2 = | 290 | prcm_context.pll_cm_clken2 = |
297 | cm_read_mod_reg(PLL_MOD, OMAP3430ES2_CM_CLKEN2); | 291 | cm_read_mod_reg(PLL_MOD, OMAP3430ES2_CM_CLKEN2); |
298 | prcm_context.cm_polctrl = __raw_readl(OMAP3430_CM_POLCTRL); | 292 | prcm_context.cm_polctrl = __raw_readl(OMAP3430_CM_POLCTRL); |
@@ -338,8 +332,6 @@ void omap3_prcm_save_context(void) | |||
338 | cm_read_mod_reg(OMAP3430_IVA2_MOD, CM_AUTOIDLE2); | 332 | cm_read_mod_reg(OMAP3430_IVA2_MOD, CM_AUTOIDLE2); |
339 | prcm_context.mpu_cm_autoidle2 = | 333 | prcm_context.mpu_cm_autoidle2 = |
340 | cm_read_mod_reg(MPU_MOD, CM_AUTOIDLE2); | 334 | cm_read_mod_reg(MPU_MOD, CM_AUTOIDLE2); |
341 | prcm_context.pll_cm_autoidle = | ||
342 | cm_read_mod_reg(PLL_MOD, CM_AUTOIDLE); | ||
343 | prcm_context.iva2_cm_clkstctrl = | 335 | prcm_context.iva2_cm_clkstctrl = |
344 | cm_read_mod_reg(OMAP3430_IVA2_MOD, CM_CLKSTCTRL); | 336 | cm_read_mod_reg(OMAP3430_IVA2_MOD, CM_CLKSTCTRL); |
345 | prcm_context.mpu_cm_clkstctrl = | 337 | prcm_context.mpu_cm_clkstctrl = |
@@ -431,7 +423,6 @@ void omap3_prcm_restore_context(void) | |||
431 | __raw_writel(prcm_context.cm_sysconfig, OMAP3430_CM_SYSCONFIG); | 423 | __raw_writel(prcm_context.cm_sysconfig, OMAP3430_CM_SYSCONFIG); |
432 | cm_write_mod_reg(prcm_context.sgx_cm_clksel, OMAP3430ES2_SGX_MOD, | 424 | cm_write_mod_reg(prcm_context.sgx_cm_clksel, OMAP3430ES2_SGX_MOD, |
433 | CM_CLKSEL); | 425 | CM_CLKSEL); |
434 | cm_write_mod_reg(prcm_context.wkup_cm_clksel, WKUP_MOD, CM_CLKSEL); | ||
435 | cm_write_mod_reg(prcm_context.dss_cm_clksel, OMAP3430_DSS_MOD, | 426 | cm_write_mod_reg(prcm_context.dss_cm_clksel, OMAP3430_DSS_MOD, |
436 | CM_CLKSEL); | 427 | CM_CLKSEL); |
437 | cm_write_mod_reg(prcm_context.cam_cm_clksel, OMAP3430_CAM_MOD, | 428 | cm_write_mod_reg(prcm_context.cam_cm_clksel, OMAP3430_CAM_MOD, |
@@ -448,7 +439,6 @@ void omap3_prcm_restore_context(void) | |||
448 | OMAP3430ES2_CM_CLKSEL4); | 439 | OMAP3430ES2_CM_CLKSEL4); |
449 | cm_write_mod_reg(prcm_context.pll_cm_clksel5, PLL_MOD, | 440 | cm_write_mod_reg(prcm_context.pll_cm_clksel5, PLL_MOD, |
450 | OMAP3430ES2_CM_CLKSEL5); | 441 | OMAP3430ES2_CM_CLKSEL5); |
451 | cm_write_mod_reg(prcm_context.pll_cm_clken, PLL_MOD, CM_CLKEN); | ||
452 | cm_write_mod_reg(prcm_context.pll_cm_clken2, PLL_MOD, | 442 | cm_write_mod_reg(prcm_context.pll_cm_clken2, PLL_MOD, |
453 | OMAP3430ES2_CM_CLKEN2); | 443 | OMAP3430ES2_CM_CLKEN2); |
454 | __raw_writel(prcm_context.cm_polctrl, OMAP3430_CM_POLCTRL); | 444 | __raw_writel(prcm_context.cm_polctrl, OMAP3430_CM_POLCTRL); |
@@ -487,7 +477,6 @@ void omap3_prcm_restore_context(void) | |||
487 | cm_write_mod_reg(prcm_context.iva2_cm_autiidle2, OMAP3430_IVA2_MOD, | 477 | cm_write_mod_reg(prcm_context.iva2_cm_autiidle2, OMAP3430_IVA2_MOD, |
488 | CM_AUTOIDLE2); | 478 | CM_AUTOIDLE2); |
489 | cm_write_mod_reg(prcm_context.mpu_cm_autoidle2, MPU_MOD, CM_AUTOIDLE2); | 479 | cm_write_mod_reg(prcm_context.mpu_cm_autoidle2, MPU_MOD, CM_AUTOIDLE2); |
490 | cm_write_mod_reg(prcm_context.pll_cm_autoidle, PLL_MOD, CM_AUTOIDLE); | ||
491 | cm_write_mod_reg(prcm_context.iva2_cm_clkstctrl, OMAP3430_IVA2_MOD, | 480 | cm_write_mod_reg(prcm_context.iva2_cm_clkstctrl, OMAP3430_IVA2_MOD, |
492 | CM_CLKSTCTRL); | 481 | CM_CLKSTCTRL); |
493 | cm_write_mod_reg(prcm_context.mpu_cm_clkstctrl, MPU_MOD, CM_CLKSTCTRL); | 482 | cm_write_mod_reg(prcm_context.mpu_cm_clkstctrl, MPU_MOD, CM_CLKSTCTRL); |
diff --git a/arch/arm/mach-omap2/prm.h b/arch/arm/mach-omap2/prm.h index ea050ce188a7..40f006285163 100644 --- a/arch/arm/mach-omap2/prm.h +++ b/arch/arm/mach-omap2/prm.h | |||
@@ -24,6 +24,8 @@ | |||
24 | OMAP2_L4_IO_ADDRESS(OMAP3430_PRM_BASE + (module) + (reg)) | 24 | OMAP2_L4_IO_ADDRESS(OMAP3430_PRM_BASE + (module) + (reg)) |
25 | #define OMAP44XX_PRM_REGADDR(module, reg) \ | 25 | #define OMAP44XX_PRM_REGADDR(module, reg) \ |
26 | OMAP2_L4_IO_ADDRESS(OMAP4430_PRM_BASE + (module) + (reg)) | 26 | OMAP2_L4_IO_ADDRESS(OMAP4430_PRM_BASE + (module) + (reg)) |
27 | #define OMAP44XX_CHIRONSS_REGADDR(module, reg) \ | ||
28 | OMAP2_L4_IO_ADDRESS(OMAP4430_CHIRONSS_BASE + (module) + (reg)) | ||
27 | 29 | ||
28 | #include "prm44xx.h" | 30 | #include "prm44xx.h" |
29 | 31 | ||
diff --git a/arch/arm/mach-omap2/prm44xx.h b/arch/arm/mach-omap2/prm44xx.h index 89be97f0589d..adb2558bb121 100644 --- a/arch/arm/mach-omap2/prm44xx.h +++ b/arch/arm/mach-omap2/prm44xx.h | |||
@@ -386,26 +386,26 @@ | |||
386 | 386 | ||
387 | 387 | ||
388 | /* CHIRON_PRCM.CHIRONSS_OCP_SOCKET_PRCM register offsets */ | 388 | /* CHIRON_PRCM.CHIRONSS_OCP_SOCKET_PRCM register offsets */ |
389 | #define OMAP4430_REVISION_PRCM OMAP44XX_PRM_REGADDR(OMAP4430_CHIRONSS_CHIRONSS_OCP_SOCKET_PRCM_MOD, 0x0000) | 389 | #define OMAP4430_REVISION_PRCM OMAP44XX_CHIRONSS_REGADDR(OMAP4430_CHIRONSS_CHIRONSS_OCP_SOCKET_PRCM_MOD, 0x0000) |
390 | 390 | ||
391 | /* CHIRON_PRCM.CHIRONSS_DEVICE_PRM register offsets */ | 391 | /* CHIRON_PRCM.CHIRONSS_DEVICE_PRM register offsets */ |
392 | #define OMAP4430_CHIRON_PRCM_PRM_RSTST OMAP44XX_PRM_REGADDR(OMAP4430_CHIRONSS_CHIRONSS_DEVICE_PRM_MOD, 0x0000) | 392 | #define OMAP4430_CHIRON_PRCM_PRM_RSTST OMAP44XX_CHIRONSS_REGADDR(OMAP4430_CHIRONSS_CHIRONSS_DEVICE_PRM_MOD, 0x0000) |
393 | 393 | ||
394 | /* CHIRON_PRCM.CHIRONSS_CPU0 register offsets */ | 394 | /* CHIRON_PRCM.CHIRONSS_CPU0 register offsets */ |
395 | #define OMAP4430_PM_PDA_CPU0_PWRSTCTRL OMAP44XX_PRM_REGADDR(OMAP4430_CHIRONSS_CHIRONSS_CPU0_MOD, 0x0000) | 395 | #define OMAP4430_PM_PDA_CPU0_PWRSTCTRL OMAP44XX_CHIRONSS_REGADDR(OMAP4430_CHIRONSS_CHIRONSS_CPU0_MOD, 0x0000) |
396 | #define OMAP4430_PM_PDA_CPU0_PWRSTST OMAP44XX_PRM_REGADDR(OMAP4430_CHIRONSS_CHIRONSS_CPU0_MOD, 0x0004) | 396 | #define OMAP4430_PM_PDA_CPU0_PWRSTST OMAP44XX_CHIRONSS_REGADDR(OMAP4430_CHIRONSS_CHIRONSS_CPU0_MOD, 0x0004) |
397 | #define OMAP4430_RM_PDA_CPU0_CPU0_CONTEXT OMAP44XX_PRM_REGADDR(OMAP4430_CHIRONSS_CHIRONSS_CPU0_MOD, 0x0008) | 397 | #define OMAP4430_RM_PDA_CPU0_CPU0_CONTEXT OMAP44XX_CHIRONSS_REGADDR(OMAP4430_CHIRONSS_CHIRONSS_CPU0_MOD, 0x0008) |
398 | #define OMAP4430_RM_PDA_CPU0_CPU0_RSTCTRL OMAP44XX_PRM_REGADDR(OMAP4430_CHIRONSS_CHIRONSS_CPU0_MOD, 0x000c) | 398 | #define OMAP4430_RM_PDA_CPU0_CPU0_RSTCTRL OMAP44XX_CHIRONSS_REGADDR(OMAP4430_CHIRONSS_CHIRONSS_CPU0_MOD, 0x000c) |
399 | #define OMAP4430_RM_PDA_CPU0_CPU0_RSTST OMAP44XX_PRM_REGADDR(OMAP4430_CHIRONSS_CHIRONSS_CPU0_MOD, 0x0010) | 399 | #define OMAP4430_RM_PDA_CPU0_CPU0_RSTST OMAP44XX_CHIRONSS_REGADDR(OMAP4430_CHIRONSS_CHIRONSS_CPU0_MOD, 0x0010) |
400 | #define OMAP4430_CM_PDA_CPU0_CPU0_CLKCTRL OMAP44XX_PRM_REGADDR(OMAP4430_CHIRONSS_CHIRONSS_CPU0_MOD, 0x0014) | 400 | #define OMAP4430_CM_PDA_CPU0_CPU0_CLKCTRL OMAP44XX_CHIRONSS_REGADDR(OMAP4430_CHIRONSS_CHIRONSS_CPU0_MOD, 0x0014) |
401 | #define OMAP4430_CM_PDA_CPU0_CLKSTCTRL OMAP44XX_PRM_REGADDR(OMAP4430_CHIRONSS_CHIRONSS_CPU0_MOD, 0x0018) | 401 | #define OMAP4430_CM_PDA_CPU0_CLKSTCTRL OMAP44XX_CHIRONSS_REGADDR(OMAP4430_CHIRONSS_CHIRONSS_CPU0_MOD, 0x0018) |
402 | 402 | ||
403 | /* CHIRON_PRCM.CHIRONSS_CPU1 register offsets */ | 403 | /* CHIRON_PRCM.CHIRONSS_CPU1 register offsets */ |
404 | #define OMAP4430_PM_PDA_CPU1_PWRSTCTRL OMAP44XX_PRM_REGADDR(OMAP4430_CHIRONSS_CHIRONSS_CPU1_MOD, 0x0000) | 404 | #define OMAP4430_PM_PDA_CPU1_PWRSTCTRL OMAP44XX_CHIRONSS_REGADDR(OMAP4430_CHIRONSS_CHIRONSS_CPU1_MOD, 0x0000) |
405 | #define OMAP4430_PM_PDA_CPU1_PWRSTST OMAP44XX_PRM_REGADDR(OMAP4430_CHIRONSS_CHIRONSS_CPU1_MOD, 0x0004) | 405 | #define OMAP4430_PM_PDA_CPU1_PWRSTST OMAP44XX_CHIRONSS_REGADDR(OMAP4430_CHIRONSS_CHIRONSS_CPU1_MOD, 0x0004) |
406 | #define OMAP4430_RM_PDA_CPU1_CPU1_CONTEXT OMAP44XX_PRM_REGADDR(OMAP4430_CHIRONSS_CHIRONSS_CPU1_MOD, 0x0008) | 406 | #define OMAP4430_RM_PDA_CPU1_CPU1_CONTEXT OMAP44XX_CHIRONSS_REGADDR(OMAP4430_CHIRONSS_CHIRONSS_CPU1_MOD, 0x0008) |
407 | #define OMAP4430_RM_PDA_CPU1_CPU1_RSTCTRL OMAP44XX_PRM_REGADDR(OMAP4430_CHIRONSS_CHIRONSS_CPU1_MOD, 0x000c) | 407 | #define OMAP4430_RM_PDA_CPU1_CPU1_RSTCTRL OMAP44XX_CHIRONSS_REGADDR(OMAP4430_CHIRONSS_CHIRONSS_CPU1_MOD, 0x000c) |
408 | #define OMAP4430_RM_PDA_CPU1_CPU1_RSTST OMAP44XX_PRM_REGADDR(OMAP4430_CHIRONSS_CHIRONSS_CPU1_MOD, 0x0010) | 408 | #define OMAP4430_RM_PDA_CPU1_CPU1_RSTST OMAP44XX_CHIRONSS_REGADDR(OMAP4430_CHIRONSS_CHIRONSS_CPU1_MOD, 0x0010) |
409 | #define OMAP4430_CM_PDA_CPU1_CPU1_CLKCTRL OMAP44XX_PRM_REGADDR(OMAP4430_CHIRONSS_CHIRONSS_CPU1_MOD, 0x0014) | 409 | #define OMAP4430_CM_PDA_CPU1_CPU1_CLKCTRL OMAP44XX_CHIRONSS_REGADDR(OMAP4430_CHIRONSS_CHIRONSS_CPU1_MOD, 0x0014) |
410 | #define OMAP4430_CM_PDA_CPU1_CLKSTCTRL OMAP44XX_PRM_REGADDR(OMAP4430_CHIRONSS_CHIRONSS_CPU1_MOD, 0x0018) | 410 | #define OMAP4430_CM_PDA_CPU1_CLKSTCTRL OMAP44XX_CHIRONSS_REGADDR(OMAP4430_CHIRONSS_CHIRONSS_CPU1_MOD, 0x0018) |
411 | #endif | 411 | #endif |
diff --git a/arch/arm/mach-omap2/sleep34xx.S b/arch/arm/mach-omap2/sleep34xx.S index 15268f8b61de..c3626ea48143 100644 --- a/arch/arm/mach-omap2/sleep34xx.S +++ b/arch/arm/mach-omap2/sleep34xx.S | |||
@@ -245,7 +245,8 @@ restore: | |||
245 | mov r1, #0 @ set task id for ROM code in r1 | 245 | mov r1, #0 @ set task id for ROM code in r1 |
246 | mov r2, #4 @ set some flags in r2, r6 | 246 | mov r2, #4 @ set some flags in r2, r6 |
247 | mov r6, #0xff | 247 | mov r6, #0xff |
248 | adr r3, write_aux_control_params @ r3 points to parameters | 248 | ldr r4, scratchpad_base |
249 | ldr r3, [r4, #0xBC] @ r3 points to parameters | ||
249 | mcr p15, 0, r0, c7, c10, 4 @ data write barrier | 250 | mcr p15, 0, r0, c7, c10, 4 @ data write barrier |
250 | mcr p15, 0, r0, c7, c10, 5 @ data memory barrier | 251 | mcr p15, 0, r0, c7, c10, 5 @ data memory barrier |
251 | .word 0xE1600071 @ call SMI monitor (smi #1) | 252 | .word 0xE1600071 @ call SMI monitor (smi #1) |
@@ -253,14 +254,14 @@ restore: | |||
253 | b logic_l1_restore | 254 | b logic_l1_restore |
254 | l2_inv_api_params: | 255 | l2_inv_api_params: |
255 | .word 0x1, 0x00 | 256 | .word 0x1, 0x00 |
256 | write_aux_control_params: | ||
257 | .word 0x1, 0x72 | ||
258 | l2_inv_gp: | 257 | l2_inv_gp: |
259 | /* Execute smi to invalidate L2 cache */ | 258 | /* Execute smi to invalidate L2 cache */ |
260 | mov r12, #0x1 @ set up to invalide L2 | 259 | mov r12, #0x1 @ set up to invalide L2 |
261 | smi: .word 0xE1600070 @ Call SMI monitor (smieq) | 260 | smi: .word 0xE1600070 @ Call SMI monitor (smieq) |
262 | /* Write to Aux control register to set some bits */ | 261 | /* Write to Aux control register to set some bits */ |
263 | mov r0, #0x72 | 262 | ldr r4, scratchpad_base |
263 | ldr r3, [r4,#0xBC] | ||
264 | ldr r0, [r3,#4] | ||
264 | mov r12, #0x3 | 265 | mov r12, #0x3 |
265 | .word 0xE1600070 @ Call SMI monitor (smieq) | 266 | .word 0xE1600070 @ Call SMI monitor (smieq) |
266 | logic_l1_restore: | 267 | logic_l1_restore: |
@@ -271,6 +272,7 @@ logic_l1_restore: | |||
271 | 272 | ||
272 | ldr r4, scratchpad_base | 273 | ldr r4, scratchpad_base |
273 | ldr r3, [r4,#0xBC] | 274 | ldr r3, [r4,#0xBC] |
275 | adds r3, r3, #8 | ||
274 | ldmia r3!, {r4-r6} | 276 | ldmia r3!, {r4-r6} |
275 | mov sp, r4 | 277 | mov sp, r4 |
276 | msr spsr_cxsf, r5 | 278 | msr spsr_cxsf, r5 |
@@ -387,6 +389,9 @@ usettbr0: | |||
387 | save_context_wfi: | 389 | save_context_wfi: |
388 | /*b save_context_wfi*/ @ enable to debug save code | 390 | /*b save_context_wfi*/ @ enable to debug save code |
389 | mov r8, r0 /* Store SDRAM address in r8 */ | 391 | mov r8, r0 /* Store SDRAM address in r8 */ |
392 | mrc p15, 0, r5, c1, c0, 1 @ Read Auxiliary Control Register | ||
393 | mov r4, #0x1 @ Number of parameters for restore call | ||
394 | stmia r8!, {r4-r5} | ||
390 | /* Check what that target sleep state is:stored in r1*/ | 395 | /* Check what that target sleep state is:stored in r1*/ |
391 | /* 1 - Only L1 and logic lost */ | 396 | /* 1 - Only L1 and logic lost */ |
392 | /* 2 - Only L2 lost */ | 397 | /* 2 - Only L2 lost */ |
diff --git a/arch/arm/mach-s3c6410/mach-hmt.c b/arch/arm/mach-s3c6410/mach-hmt.c index cdd4b5378552..7619456f2ae8 100644 --- a/arch/arm/mach-s3c6410/mach-hmt.c +++ b/arch/arm/mach-s3c6410/mach-hmt.c | |||
@@ -82,7 +82,7 @@ static int hmt_bl_init(struct device *dev) | |||
82 | return ret; | 82 | return ret; |
83 | } | 83 | } |
84 | 84 | ||
85 | static int hmt_bl_notify(int brightness) | 85 | static int hmt_bl_notify(struct device *dev, int brightness) |
86 | { | 86 | { |
87 | /* | 87 | /* |
88 | * translate from CIELUV/CIELAB L*->brightness, E.G. from | 88 | * translate from CIELUV/CIELAB L*->brightness, E.G. from |
diff --git a/arch/arm/plat-omap/common.c b/arch/arm/plat-omap/common.c index bf1eaf3a27d4..dddc0273bc8b 100644 --- a/arch/arm/plat-omap/common.c +++ b/arch/arm/plat-omap/common.c | |||
@@ -172,6 +172,32 @@ unsigned long long sched_clock(void) | |||
172 | clocksource_32k.mult, clocksource_32k.shift); | 172 | clocksource_32k.mult, clocksource_32k.shift); |
173 | } | 173 | } |
174 | 174 | ||
175 | /** | ||
176 | * read_persistent_clock - Return time from a persistent clock. | ||
177 | * | ||
178 | * Reads the time from a source which isn't disabled during PM, the | ||
179 | * 32k sync timer. Convert the cycles elapsed since last read into | ||
180 | * nsecs and adds to a monotonically increasing timespec. | ||
181 | */ | ||
182 | static struct timespec persistent_ts; | ||
183 | static cycles_t cycles, last_cycles; | ||
184 | void read_persistent_clock(struct timespec *ts) | ||
185 | { | ||
186 | unsigned long long nsecs; | ||
187 | cycles_t delta; | ||
188 | struct timespec *tsp = &persistent_ts; | ||
189 | |||
190 | last_cycles = cycles; | ||
191 | cycles = clocksource_32k.read(&clocksource_32k); | ||
192 | delta = cycles - last_cycles; | ||
193 | |||
194 | nsecs = clocksource_cyc2ns(delta, | ||
195 | clocksource_32k.mult, clocksource_32k.shift); | ||
196 | |||
197 | timespec_add_ns(tsp, nsecs); | ||
198 | *ts = *tsp; | ||
199 | } | ||
200 | |||
175 | static int __init omap_init_clocksource_32k(void) | 201 | static int __init omap_init_clocksource_32k(void) |
176 | { | 202 | { |
177 | static char err[] __initdata = KERN_ERR | 203 | static char err[] __initdata = KERN_ERR |
diff --git a/arch/arm/plat-omap/dma.c b/arch/arm/plat-omap/dma.c index 09d82b3c66ce..728c64204184 100644 --- a/arch/arm/plat-omap/dma.c +++ b/arch/arm/plat-omap/dma.c | |||
@@ -1183,7 +1183,7 @@ void omap_dma_unlink_lch(int lch_head, int lch_queue) | |||
1183 | } | 1183 | } |
1184 | 1184 | ||
1185 | if ((dma_chan[lch_head].flags & OMAP_DMA_ACTIVE) || | 1185 | if ((dma_chan[lch_head].flags & OMAP_DMA_ACTIVE) || |
1186 | (dma_chan[lch_head].flags & OMAP_DMA_ACTIVE)) { | 1186 | (dma_chan[lch_queue].flags & OMAP_DMA_ACTIVE)) { |
1187 | printk(KERN_ERR "omap_dma: You need to stop the DMA channels " | 1187 | printk(KERN_ERR "omap_dma: You need to stop the DMA channels " |
1188 | "before unlinking\n"); | 1188 | "before unlinking\n"); |
1189 | dump_stack(); | 1189 | dump_stack(); |
diff --git a/arch/arm/plat-omap/dmtimer.c b/arch/arm/plat-omap/dmtimer.c index 64f407ee0f4e..08ccf8922520 100644 --- a/arch/arm/plat-omap/dmtimer.c +++ b/arch/arm/plat-omap/dmtimer.c | |||
@@ -551,6 +551,19 @@ void omap_dm_timer_stop(struct omap_dm_timer *timer) | |||
551 | if (l & OMAP_TIMER_CTRL_ST) { | 551 | if (l & OMAP_TIMER_CTRL_ST) { |
552 | l &= ~0x1; | 552 | l &= ~0x1; |
553 | omap_dm_timer_write_reg(timer, OMAP_TIMER_CTRL_REG, l); | 553 | omap_dm_timer_write_reg(timer, OMAP_TIMER_CTRL_REG, l); |
554 | #if defined(CONFIG_ARCH_OMAP2) || defined(CONFIG_ARCH_OMAP3) || \ | ||
555 | defined(CONFIG_ARCH_OMAP4) | ||
556 | /* Readback to make sure write has completed */ | ||
557 | omap_dm_timer_read_reg(timer, OMAP_TIMER_CTRL_REG); | ||
558 | /* | ||
559 | * Wait for functional clock period x 3.5 to make sure that | ||
560 | * timer is stopped | ||
561 | */ | ||
562 | udelay(3500000 / clk_get_rate(timer->fclk) + 1); | ||
563 | /* Ack possibly pending interrupt */ | ||
564 | omap_dm_timer_write_reg(timer, OMAP_TIMER_STAT_REG, | ||
565 | OMAP_TIMER_INT_OVERFLOW); | ||
566 | #endif | ||
554 | } | 567 | } |
555 | } | 568 | } |
556 | EXPORT_SYMBOL_GPL(omap_dm_timer_stop); | 569 | EXPORT_SYMBOL_GPL(omap_dm_timer_stop); |
diff --git a/arch/arm/plat-omap/include/plat/cpu.h b/arch/arm/plat-omap/include/plat/cpu.h index 9a028bdebb06..a162f585b1e3 100644 --- a/arch/arm/plat-omap/include/plat/cpu.h +++ b/arch/arm/plat-omap/include/plat/cpu.h | |||
@@ -434,6 +434,7 @@ IS_OMAP_TYPE(3517, 0x3517) | |||
434 | #define OMAP3430_REV_ES2_1 0x34302034 | 434 | #define OMAP3430_REV_ES2_1 0x34302034 |
435 | #define OMAP3430_REV_ES3_0 0x34303034 | 435 | #define OMAP3430_REV_ES3_0 0x34303034 |
436 | #define OMAP3430_REV_ES3_1 0x34304034 | 436 | #define OMAP3430_REV_ES3_1 0x34304034 |
437 | #define OMAP3430_REV_ES3_1_2 0x34305034 | ||
437 | 438 | ||
438 | #define OMAP3630_REV_ES1_0 0x36300034 | 439 | #define OMAP3630_REV_ES1_0 0x36300034 |
439 | 440 | ||
diff --git a/arch/arm/plat-omap/include/plat/irqs.h b/arch/arm/plat-omap/include/plat/irqs.h index 97d6c50c3dcb..c0ab7c80f72e 100644 --- a/arch/arm/plat-omap/include/plat/irqs.h +++ b/arch/arm/plat-omap/include/plat/irqs.h | |||
@@ -499,6 +499,9 @@ extern void omap_init_irq(void); | |||
499 | extern int omap_irq_pending(void); | 499 | extern int omap_irq_pending(void); |
500 | void omap_intc_save_context(void); | 500 | void omap_intc_save_context(void); |
501 | void omap_intc_restore_context(void); | 501 | void omap_intc_restore_context(void); |
502 | void omap3_intc_suspend(void); | ||
503 | void omap3_intc_prepare_idle(void); | ||
504 | void omap3_intc_resume_idle(void); | ||
502 | #endif | 505 | #endif |
503 | 506 | ||
504 | #include <mach/hardware.h> | 507 | #include <mach/hardware.h> |
diff --git a/arch/arm/plat-omap/include/plat/omap_hwmod.h b/arch/arm/plat-omap/include/plat/omap_hwmod.h index 007935a921ea..33933256a226 100644 --- a/arch/arm/plat-omap/include/plat/omap_hwmod.h +++ b/arch/arm/plat-omap/include/plat/omap_hwmod.h | |||
@@ -227,6 +227,7 @@ struct omap_hwmod_ocp_if { | |||
227 | #define SYSC_HAS_SIDLEMODE (1 << 5) | 227 | #define SYSC_HAS_SIDLEMODE (1 << 5) |
228 | #define SYSC_HAS_MIDLEMODE (1 << 6) | 228 | #define SYSC_HAS_MIDLEMODE (1 << 6) |
229 | #define SYSS_MISSING (1 << 7) | 229 | #define SYSS_MISSING (1 << 7) |
230 | #define SYSC_NO_CACHE (1 << 8) /* XXX SW flag, belongs elsewhere */ | ||
230 | 231 | ||
231 | /* omap_hwmod_sysconfig.clockact flags */ | 232 | /* omap_hwmod_sysconfig.clockact flags */ |
232 | #define CLOCKACT_TEST_BOTH 0x0 | 233 | #define CLOCKACT_TEST_BOTH 0x0 |
diff --git a/arch/powerpc/kvm/Kconfig b/arch/powerpc/kvm/Kconfig index 07703f72330e..6fb6e8aa3890 100644 --- a/arch/powerpc/kvm/Kconfig +++ b/arch/powerpc/kvm/Kconfig | |||
@@ -53,7 +53,7 @@ config KVM_440 | |||
53 | 53 | ||
54 | config KVM_EXIT_TIMING | 54 | config KVM_EXIT_TIMING |
55 | bool "Detailed exit timing" | 55 | bool "Detailed exit timing" |
56 | depends on KVM | 56 | depends on KVM_440 || KVM_E500 |
57 | ---help--- | 57 | ---help--- |
58 | Calculate elapsed time for every exit/enter cycle. A per-vcpu | 58 | Calculate elapsed time for every exit/enter cycle. A per-vcpu |
59 | report is available in debugfs kvm/vm#_vcpu#_timing. | 59 | report is available in debugfs kvm/vm#_vcpu#_timing. |
diff --git a/arch/s390/include/asm/irqflags.h b/arch/s390/include/asm/irqflags.h index 3f26131120b7..c2fb432f576a 100644 --- a/arch/s390/include/asm/irqflags.h +++ b/arch/s390/include/asm/irqflags.h | |||
@@ -1,14 +1,12 @@ | |||
1 | /* | 1 | /* |
2 | * include/asm-s390/irqflags.h | 2 | * Copyright IBM Corp. 2006,2010 |
3 | * | 3 | * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com> |
4 | * Copyright (C) IBM Corp. 2006 | ||
5 | * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com> | ||
6 | */ | 4 | */ |
7 | 5 | ||
8 | #ifndef __ASM_IRQFLAGS_H | 6 | #ifndef __ASM_IRQFLAGS_H |
9 | #define __ASM_IRQFLAGS_H | 7 | #define __ASM_IRQFLAGS_H |
10 | 8 | ||
11 | #ifdef __KERNEL__ | 9 | #include <linux/types.h> |
12 | 10 | ||
13 | #if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2) | 11 | #if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2) |
14 | 12 | ||
@@ -102,5 +100,4 @@ static inline int raw_irqs_disabled_flags(unsigned long flags) | |||
102 | /* For spinlocks etc */ | 100 | /* For spinlocks etc */ |
103 | #define raw_local_irq_save(x) ((x) = raw_local_irq_disable()) | 101 | #define raw_local_irq_save(x) ((x) = raw_local_irq_disable()) |
104 | 102 | ||
105 | #endif /* __KERNEL__ */ | ||
106 | #endif /* __ASM_IRQFLAGS_H */ | 103 | #endif /* __ASM_IRQFLAGS_H */ |
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S index 48215d15762b..e8ef21c51bbe 100644 --- a/arch/s390/kernel/entry.S +++ b/arch/s390/kernel/entry.S | |||
@@ -571,6 +571,7 @@ pgm_svcper: | |||
571 | mvc __THREAD_per+__PER_access_id(1,%r8),__LC_PER_ACCESS_ID | 571 | mvc __THREAD_per+__PER_access_id(1,%r8),__LC_PER_ACCESS_ID |
572 | oi __TI_flags+3(%r9),_TIF_SINGLE_STEP # set TIF_SINGLE_STEP | 572 | oi __TI_flags+3(%r9),_TIF_SINGLE_STEP # set TIF_SINGLE_STEP |
573 | TRACE_IRQS_ON | 573 | TRACE_IRQS_ON |
574 | lm %r2,%r6,SP_R2(%r15) # load svc arguments | ||
574 | stosm __SF_EMPTY(%r15),0x03 # reenable interrupts | 575 | stosm __SF_EMPTY(%r15),0x03 # reenable interrupts |
575 | b BASED(sysc_do_svc) | 576 | b BASED(sysc_do_svc) |
576 | 577 | ||
diff --git a/arch/s390/kernel/entry64.S b/arch/s390/kernel/entry64.S index 9aff1d449b6e..f33658f09dd7 100644 --- a/arch/s390/kernel/entry64.S +++ b/arch/s390/kernel/entry64.S | |||
@@ -549,6 +549,7 @@ pgm_svcper: | |||
549 | mvc __THREAD_per+__PER_access_id(1,%r8),__LC_PER_ACCESS_ID | 549 | mvc __THREAD_per+__PER_access_id(1,%r8),__LC_PER_ACCESS_ID |
550 | oi __TI_flags+7(%r9),_TIF_SINGLE_STEP # set TIF_SINGLE_STEP | 550 | oi __TI_flags+7(%r9),_TIF_SINGLE_STEP # set TIF_SINGLE_STEP |
551 | TRACE_IRQS_ON | 551 | TRACE_IRQS_ON |
552 | lmg %r2,%r6,SP_R2(%r15) # load svc arguments | ||
552 | stosm __SF_EMPTY(%r15),0x03 # reenable interrupts | 553 | stosm __SF_EMPTY(%r15),0x03 # reenable interrupts |
553 | j sysc_do_svc | 554 | j sysc_do_svc |
554 | 555 | ||
diff --git a/arch/s390/kernel/signal.c b/arch/s390/kernel/signal.c index 1675c48b9145..6289945562b0 100644 --- a/arch/s390/kernel/signal.c +++ b/arch/s390/kernel/signal.c | |||
@@ -64,7 +64,7 @@ SYSCALL_DEFINE3(sigsuspend, int, history0, int, history1, old_sigset_t, mask) | |||
64 | recalc_sigpending(); | 64 | recalc_sigpending(); |
65 | spin_unlock_irq(¤t->sighand->siglock); | 65 | spin_unlock_irq(¤t->sighand->siglock); |
66 | 66 | ||
67 | current->state = TASK_INTERRUPTIBLE; | 67 | set_current_state(TASK_INTERRUPTIBLE); |
68 | schedule(); | 68 | schedule(); |
69 | set_thread_flag(TIF_RESTORE_SIGMASK); | 69 | set_thread_flag(TIF_RESTORE_SIGMASK); |
70 | 70 | ||
diff --git a/arch/s390/kvm/intercept.c b/arch/s390/kvm/intercept.c index ba9d8a7bc1ac..b40096494e46 100644 --- a/arch/s390/kvm/intercept.c +++ b/arch/s390/kvm/intercept.c | |||
@@ -213,7 +213,7 @@ static int handle_instruction_and_prog(struct kvm_vcpu *vcpu) | |||
213 | return rc2; | 213 | return rc2; |
214 | } | 214 | } |
215 | 215 | ||
216 | static const intercept_handler_t intercept_funcs[0x48 >> 2] = { | 216 | static const intercept_handler_t intercept_funcs[] = { |
217 | [0x00 >> 2] = handle_noop, | 217 | [0x00 >> 2] = handle_noop, |
218 | [0x04 >> 2] = handle_instruction, | 218 | [0x04 >> 2] = handle_instruction, |
219 | [0x08 >> 2] = handle_prog, | 219 | [0x08 >> 2] = handle_prog, |
@@ -230,7 +230,7 @@ int kvm_handle_sie_intercept(struct kvm_vcpu *vcpu) | |||
230 | intercept_handler_t func; | 230 | intercept_handler_t func; |
231 | u8 code = vcpu->arch.sie_block->icptcode; | 231 | u8 code = vcpu->arch.sie_block->icptcode; |
232 | 232 | ||
233 | if (code & 3 || code > 0x48) | 233 | if (code & 3 || (code >> 2) >= ARRAY_SIZE(intercept_funcs)) |
234 | return -ENOTSUPP; | 234 | return -ENOTSUPP; |
235 | func = intercept_funcs[code >> 2]; | 235 | func = intercept_funcs[code >> 2]; |
236 | if (func) | 236 | if (func) |
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig index 108197ac0d56..4097f6a10860 100644 --- a/arch/sparc/Kconfig +++ b/arch/sparc/Kconfig | |||
@@ -64,8 +64,11 @@ config BITS | |||
64 | default 64 if SPARC64 | 64 | default 64 if SPARC64 |
65 | 65 | ||
66 | config GENERIC_TIME | 66 | config GENERIC_TIME |
67 | def_bool y | ||
68 | |||
69 | config ARCH_USES_GETTIMEOFFSET | ||
67 | bool | 70 | bool |
68 | default y if SPARC64 | 71 | default y if SPARC32 |
69 | 72 | ||
70 | config GENERIC_CMOS_UPDATE | 73 | config GENERIC_CMOS_UPDATE |
71 | bool | 74 | bool |
diff --git a/arch/sparc/configs/sparc32_defconfig b/arch/sparc/configs/sparc32_defconfig index 983d59824a28..99a1f191497b 100644 --- a/arch/sparc/configs/sparc32_defconfig +++ b/arch/sparc/configs/sparc32_defconfig | |||
@@ -1,7 +1,7 @@ | |||
1 | # | 1 | # |
2 | # Automatically generated make config: don't edit | 2 | # Automatically generated make config: don't edit |
3 | # Linux kernel version: 2.6.31 | 3 | # Linux kernel version: 2.6.33-rc2 |
4 | # Wed Sep 16 00:03:43 2009 | 4 | # Mon Jan 11 23:20:31 2010 |
5 | # | 5 | # |
6 | # CONFIG_64BIT is not set | 6 | # CONFIG_64BIT is not set |
7 | CONFIG_SPARC=y | 7 | CONFIG_SPARC=y |
@@ -41,6 +41,7 @@ CONFIG_POSIX_MQUEUE_SYSCTL=y | |||
41 | # | 41 | # |
42 | CONFIG_TREE_RCU=y | 42 | CONFIG_TREE_RCU=y |
43 | # CONFIG_TREE_PREEMPT_RCU is not set | 43 | # CONFIG_TREE_PREEMPT_RCU is not set |
44 | # CONFIG_TINY_RCU is not set | ||
44 | # CONFIG_RCU_TRACE is not set | 45 | # CONFIG_RCU_TRACE is not set |
45 | CONFIG_RCU_FANOUT=32 | 46 | CONFIG_RCU_FANOUT=32 |
46 | # CONFIG_RCU_FANOUT_EXACT is not set | 47 | # CONFIG_RCU_FANOUT_EXACT is not set |
@@ -88,21 +89,21 @@ CONFIG_TIMERFD=y | |||
88 | CONFIG_EVENTFD=y | 89 | CONFIG_EVENTFD=y |
89 | CONFIG_SHMEM=y | 90 | CONFIG_SHMEM=y |
90 | CONFIG_AIO=y | 91 | CONFIG_AIO=y |
91 | CONFIG_HAVE_PERF_COUNTERS=y | 92 | CONFIG_HAVE_PERF_EVENTS=y |
93 | CONFIG_PERF_USE_VMALLOC=y | ||
92 | 94 | ||
93 | # | 95 | # |
94 | # Performance Counters | 96 | # Kernel Performance Events And Counters |
95 | # | 97 | # |
98 | # CONFIG_PERF_EVENTS is not set | ||
96 | # CONFIG_PERF_COUNTERS is not set | 99 | # CONFIG_PERF_COUNTERS is not set |
97 | CONFIG_VM_EVENT_COUNTERS=y | 100 | CONFIG_VM_EVENT_COUNTERS=y |
98 | CONFIG_PCI_QUIRKS=y | 101 | CONFIG_PCI_QUIRKS=y |
99 | # CONFIG_STRIP_ASM_SYMS is not set | ||
100 | CONFIG_COMPAT_BRK=y | 102 | CONFIG_COMPAT_BRK=y |
101 | CONFIG_SLAB=y | 103 | CONFIG_SLAB=y |
102 | # CONFIG_SLUB is not set | 104 | # CONFIG_SLUB is not set |
103 | # CONFIG_SLOB is not set | 105 | # CONFIG_SLOB is not set |
104 | # CONFIG_PROFILING is not set | 106 | # CONFIG_PROFILING is not set |
105 | # CONFIG_MARKERS is not set | ||
106 | CONFIG_HAVE_OPROFILE=y | 107 | CONFIG_HAVE_OPROFILE=y |
107 | CONFIG_HAVE_ARCH_TRACEHOOK=y | 108 | CONFIG_HAVE_ARCH_TRACEHOOK=y |
108 | CONFIG_HAVE_DMA_ATTRS=y | 109 | CONFIG_HAVE_DMA_ATTRS=y |
@@ -131,14 +132,41 @@ CONFIG_LBDAF=y | |||
131 | # IO Schedulers | 132 | # IO Schedulers |
132 | # | 133 | # |
133 | CONFIG_IOSCHED_NOOP=y | 134 | CONFIG_IOSCHED_NOOP=y |
134 | CONFIG_IOSCHED_AS=y | ||
135 | CONFIG_IOSCHED_DEADLINE=y | 135 | CONFIG_IOSCHED_DEADLINE=y |
136 | CONFIG_IOSCHED_CFQ=y | 136 | CONFIG_IOSCHED_CFQ=y |
137 | # CONFIG_DEFAULT_AS is not set | ||
138 | # CONFIG_DEFAULT_DEADLINE is not set | 137 | # CONFIG_DEFAULT_DEADLINE is not set |
139 | CONFIG_DEFAULT_CFQ=y | 138 | CONFIG_DEFAULT_CFQ=y |
140 | # CONFIG_DEFAULT_NOOP is not set | 139 | # CONFIG_DEFAULT_NOOP is not set |
141 | CONFIG_DEFAULT_IOSCHED="cfq" | 140 | CONFIG_DEFAULT_IOSCHED="cfq" |
141 | # CONFIG_INLINE_SPIN_TRYLOCK is not set | ||
142 | # CONFIG_INLINE_SPIN_TRYLOCK_BH is not set | ||
143 | # CONFIG_INLINE_SPIN_LOCK is not set | ||
144 | # CONFIG_INLINE_SPIN_LOCK_BH is not set | ||
145 | # CONFIG_INLINE_SPIN_LOCK_IRQ is not set | ||
146 | # CONFIG_INLINE_SPIN_LOCK_IRQSAVE is not set | ||
147 | CONFIG_INLINE_SPIN_UNLOCK=y | ||
148 | # CONFIG_INLINE_SPIN_UNLOCK_BH is not set | ||
149 | CONFIG_INLINE_SPIN_UNLOCK_IRQ=y | ||
150 | # CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE is not set | ||
151 | # CONFIG_INLINE_READ_TRYLOCK is not set | ||
152 | # CONFIG_INLINE_READ_LOCK is not set | ||
153 | # CONFIG_INLINE_READ_LOCK_BH is not set | ||
154 | # CONFIG_INLINE_READ_LOCK_IRQ is not set | ||
155 | # CONFIG_INLINE_READ_LOCK_IRQSAVE is not set | ||
156 | CONFIG_INLINE_READ_UNLOCK=y | ||
157 | # CONFIG_INLINE_READ_UNLOCK_BH is not set | ||
158 | CONFIG_INLINE_READ_UNLOCK_IRQ=y | ||
159 | # CONFIG_INLINE_READ_UNLOCK_IRQRESTORE is not set | ||
160 | # CONFIG_INLINE_WRITE_TRYLOCK is not set | ||
161 | # CONFIG_INLINE_WRITE_LOCK is not set | ||
162 | # CONFIG_INLINE_WRITE_LOCK_BH is not set | ||
163 | # CONFIG_INLINE_WRITE_LOCK_IRQ is not set | ||
164 | # CONFIG_INLINE_WRITE_LOCK_IRQSAVE is not set | ||
165 | CONFIG_INLINE_WRITE_UNLOCK=y | ||
166 | # CONFIG_INLINE_WRITE_UNLOCK_BH is not set | ||
167 | CONFIG_INLINE_WRITE_UNLOCK_IRQ=y | ||
168 | # CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE is not set | ||
169 | # CONFIG_MUTEX_SPIN_ON_OWNER is not set | ||
142 | # CONFIG_FREEZER is not set | 170 | # CONFIG_FREEZER is not set |
143 | 171 | ||
144 | # | 172 | # |
@@ -168,8 +196,7 @@ CONFIG_SPLIT_PTLOCK_CPUS=4 | |||
168 | # CONFIG_PHYS_ADDR_T_64BIT is not set | 196 | # CONFIG_PHYS_ADDR_T_64BIT is not set |
169 | CONFIG_ZONE_DMA_FLAG=1 | 197 | CONFIG_ZONE_DMA_FLAG=1 |
170 | CONFIG_BOUNCE=y | 198 | CONFIG_BOUNCE=y |
171 | CONFIG_HAVE_MLOCK=y | 199 | # CONFIG_KSM is not set |
172 | CONFIG_HAVE_MLOCKED_PAGE_BIT=y | ||
173 | CONFIG_DEFAULT_MMAP_MIN_ADDR=4096 | 200 | CONFIG_DEFAULT_MMAP_MIN_ADDR=4096 |
174 | CONFIG_SUN_PM=y | 201 | CONFIG_SUN_PM=y |
175 | # CONFIG_SPARC_LED is not set | 202 | # CONFIG_SPARC_LED is not set |
@@ -257,6 +284,7 @@ CONFIG_INET6_XFRM_MODE_TUNNEL=m | |||
257 | CONFIG_INET6_XFRM_MODE_BEET=m | 284 | CONFIG_INET6_XFRM_MODE_BEET=m |
258 | # CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION is not set | 285 | # CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION is not set |
259 | CONFIG_IPV6_SIT=m | 286 | CONFIG_IPV6_SIT=m |
287 | # CONFIG_IPV6_SIT_6RD is not set | ||
260 | CONFIG_IPV6_NDISC_NODETYPE=y | 288 | CONFIG_IPV6_NDISC_NODETYPE=y |
261 | CONFIG_IPV6_TUNNEL=m | 289 | CONFIG_IPV6_TUNNEL=m |
262 | # CONFIG_IPV6_MULTIPLE_TABLES is not set | 290 | # CONFIG_IPV6_MULTIPLE_TABLES is not set |
@@ -295,9 +323,6 @@ CONFIG_NET_PKTGEN=m | |||
295 | # CONFIG_AF_RXRPC is not set | 323 | # CONFIG_AF_RXRPC is not set |
296 | CONFIG_WIRELESS=y | 324 | CONFIG_WIRELESS=y |
297 | # CONFIG_CFG80211 is not set | 325 | # CONFIG_CFG80211 is not set |
298 | CONFIG_CFG80211_DEFAULT_PS_VALUE=0 | ||
299 | CONFIG_WIRELESS_OLD_REGULATORY=y | ||
300 | # CONFIG_WIRELESS_EXT is not set | ||
301 | # CONFIG_LIB80211 is not set | 326 | # CONFIG_LIB80211 is not set |
302 | 327 | ||
303 | # | 328 | # |
@@ -335,6 +360,10 @@ CONFIG_BLK_DEV=y | |||
335 | # CONFIG_BLK_DEV_COW_COMMON is not set | 360 | # CONFIG_BLK_DEV_COW_COMMON is not set |
336 | CONFIG_BLK_DEV_LOOP=m | 361 | CONFIG_BLK_DEV_LOOP=m |
337 | CONFIG_BLK_DEV_CRYPTOLOOP=m | 362 | CONFIG_BLK_DEV_CRYPTOLOOP=m |
363 | |||
364 | # | ||
365 | # DRBD disabled because PROC_FS, INET or CONNECTOR not selected | ||
366 | # | ||
338 | # CONFIG_BLK_DEV_NBD is not set | 367 | # CONFIG_BLK_DEV_NBD is not set |
339 | # CONFIG_BLK_DEV_SX8 is not set | 368 | # CONFIG_BLK_DEV_SX8 is not set |
340 | CONFIG_BLK_DEV_RAM=y | 369 | CONFIG_BLK_DEV_RAM=y |
@@ -398,8 +427,11 @@ CONFIG_SCSI_LOWLEVEL=y | |||
398 | # CONFIG_ISCSI_TCP is not set | 427 | # CONFIG_ISCSI_TCP is not set |
399 | # CONFIG_SCSI_CXGB3_ISCSI is not set | 428 | # CONFIG_SCSI_CXGB3_ISCSI is not set |
400 | # CONFIG_SCSI_BNX2_ISCSI is not set | 429 | # CONFIG_SCSI_BNX2_ISCSI is not set |
430 | # CONFIG_BE2ISCSI is not set | ||
401 | # CONFIG_BLK_DEV_3W_XXXX_RAID is not set | 431 | # CONFIG_BLK_DEV_3W_XXXX_RAID is not set |
432 | # CONFIG_SCSI_HPSA is not set | ||
402 | # CONFIG_SCSI_3W_9XXX is not set | 433 | # CONFIG_SCSI_3W_9XXX is not set |
434 | # CONFIG_SCSI_3W_SAS is not set | ||
403 | # CONFIG_SCSI_ACARD is not set | 435 | # CONFIG_SCSI_ACARD is not set |
404 | # CONFIG_SCSI_AACRAID is not set | 436 | # CONFIG_SCSI_AACRAID is not set |
405 | # CONFIG_SCSI_AIC7XXX is not set | 437 | # CONFIG_SCSI_AIC7XXX is not set |
@@ -434,7 +466,9 @@ CONFIG_SCSI_QLOGICPTI=m | |||
434 | # CONFIG_SCSI_DEBUG is not set | 466 | # CONFIG_SCSI_DEBUG is not set |
435 | CONFIG_SCSI_SUNESP=y | 467 | CONFIG_SCSI_SUNESP=y |
436 | # CONFIG_SCSI_PMCRAID is not set | 468 | # CONFIG_SCSI_PMCRAID is not set |
469 | # CONFIG_SCSI_PM8001 is not set | ||
437 | # CONFIG_SCSI_SRP is not set | 470 | # CONFIG_SCSI_SRP is not set |
471 | # CONFIG_SCSI_BFA_FC is not set | ||
438 | # CONFIG_SCSI_DH is not set | 472 | # CONFIG_SCSI_DH is not set |
439 | # CONFIG_SCSI_OSD_INITIATOR is not set | 473 | # CONFIG_SCSI_OSD_INITIATOR is not set |
440 | # CONFIG_ATA is not set | 474 | # CONFIG_ATA is not set |
@@ -450,7 +484,7 @@ CONFIG_SCSI_SUNESP=y | |||
450 | # | 484 | # |
451 | 485 | ||
452 | # | 486 | # |
453 | # See the help texts for more information. | 487 | # The newer stack is recommended. |
454 | # | 488 | # |
455 | # CONFIG_FIREWIRE is not set | 489 | # CONFIG_FIREWIRE is not set |
456 | # CONFIG_IEEE1394 is not set | 490 | # CONFIG_IEEE1394 is not set |
@@ -487,6 +521,7 @@ CONFIG_SUNQE=m | |||
487 | # CONFIG_NET_PCI is not set | 521 | # CONFIG_NET_PCI is not set |
488 | # CONFIG_B44 is not set | 522 | # CONFIG_B44 is not set |
489 | # CONFIG_KS8842 is not set | 523 | # CONFIG_KS8842 is not set |
524 | # CONFIG_KS8851_MLL is not set | ||
490 | # CONFIG_ATL2 is not set | 525 | # CONFIG_ATL2 is not set |
491 | CONFIG_NETDEV_1000=y | 526 | CONFIG_NETDEV_1000=y |
492 | # CONFIG_ACENIC is not set | 527 | # CONFIG_ACENIC is not set |
@@ -546,6 +581,7 @@ CONFIG_CHELSIO_T3_DEPENDS=y | |||
546 | # CONFIG_NETCONSOLE is not set | 581 | # CONFIG_NETCONSOLE is not set |
547 | # CONFIG_NETPOLL is not set | 582 | # CONFIG_NETPOLL is not set |
548 | # CONFIG_NET_POLL_CONTROLLER is not set | 583 | # CONFIG_NET_POLL_CONTROLLER is not set |
584 | # CONFIG_VMXNET3 is not set | ||
549 | # CONFIG_ISDN is not set | 585 | # CONFIG_ISDN is not set |
550 | # CONFIG_PHONE is not set | 586 | # CONFIG_PHONE is not set |
551 | 587 | ||
@@ -555,6 +591,7 @@ CONFIG_CHELSIO_T3_DEPENDS=y | |||
555 | CONFIG_INPUT=y | 591 | CONFIG_INPUT=y |
556 | # CONFIG_INPUT_FF_MEMLESS is not set | 592 | # CONFIG_INPUT_FF_MEMLESS is not set |
557 | # CONFIG_INPUT_POLLDEV is not set | 593 | # CONFIG_INPUT_POLLDEV is not set |
594 | # CONFIG_INPUT_SPARSEKMAP is not set | ||
558 | 595 | ||
559 | # | 596 | # |
560 | # Userland interfaces | 597 | # Userland interfaces |
@@ -574,6 +611,7 @@ CONFIG_INPUT_KEYBOARD=y | |||
574 | CONFIG_KEYBOARD_ATKBD=m | 611 | CONFIG_KEYBOARD_ATKBD=m |
575 | # CONFIG_KEYBOARD_LKKBD is not set | 612 | # CONFIG_KEYBOARD_LKKBD is not set |
576 | # CONFIG_KEYBOARD_NEWTON is not set | 613 | # CONFIG_KEYBOARD_NEWTON is not set |
614 | # CONFIG_KEYBOARD_OPENCORES is not set | ||
577 | # CONFIG_KEYBOARD_STOWAWAY is not set | 615 | # CONFIG_KEYBOARD_STOWAWAY is not set |
578 | CONFIG_KEYBOARD_SUNKBD=m | 616 | CONFIG_KEYBOARD_SUNKBD=m |
579 | # CONFIG_KEYBOARD_XTKBD is not set | 617 | # CONFIG_KEYBOARD_XTKBD is not set |
@@ -604,6 +642,7 @@ CONFIG_SERIO_SERPORT=m | |||
604 | # CONFIG_SERIO_PCIPS2 is not set | 642 | # CONFIG_SERIO_PCIPS2 is not set |
605 | CONFIG_SERIO_LIBPS2=m | 643 | CONFIG_SERIO_LIBPS2=m |
606 | # CONFIG_SERIO_RAW is not set | 644 | # CONFIG_SERIO_RAW is not set |
645 | # CONFIG_SERIO_ALTERA_PS2 is not set | ||
607 | # CONFIG_GAMEPORT is not set | 646 | # CONFIG_GAMEPORT is not set |
608 | 647 | ||
609 | # | 648 | # |
@@ -636,6 +675,7 @@ CONFIG_SERIAL_CORE=y | |||
636 | CONFIG_SERIAL_CORE_CONSOLE=y | 675 | CONFIG_SERIAL_CORE_CONSOLE=y |
637 | CONFIG_CONSOLE_POLL=y | 676 | CONFIG_CONSOLE_POLL=y |
638 | # CONFIG_SERIAL_JSM is not set | 677 | # CONFIG_SERIAL_JSM is not set |
678 | # CONFIG_SERIAL_GRLIB_GAISLER_APBUART is not set | ||
639 | CONFIG_UNIX98_PTYS=y | 679 | CONFIG_UNIX98_PTYS=y |
640 | # CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set | 680 | # CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set |
641 | CONFIG_LEGACY_PTYS=y | 681 | CONFIG_LEGACY_PTYS=y |
@@ -661,6 +701,11 @@ CONFIG_ARCH_WANT_OPTIONAL_GPIOLIB=y | |||
661 | # CONFIG_POWER_SUPPLY is not set | 701 | # CONFIG_POWER_SUPPLY is not set |
662 | CONFIG_HWMON=y | 702 | CONFIG_HWMON=y |
663 | # CONFIG_HWMON_VID is not set | 703 | # CONFIG_HWMON_VID is not set |
704 | # CONFIG_HWMON_DEBUG_CHIP is not set | ||
705 | |||
706 | # | ||
707 | # Native drivers | ||
708 | # | ||
664 | # CONFIG_SENSORS_I5K_AMB is not set | 709 | # CONFIG_SENSORS_I5K_AMB is not set |
665 | # CONFIG_SENSORS_F71805F is not set | 710 | # CONFIG_SENSORS_F71805F is not set |
666 | # CONFIG_SENSORS_F71882FG is not set | 711 | # CONFIG_SENSORS_F71882FG is not set |
@@ -675,9 +720,7 @@ CONFIG_HWMON=y | |||
675 | # CONFIG_SENSORS_VT8231 is not set | 720 | # CONFIG_SENSORS_VT8231 is not set |
676 | # CONFIG_SENSORS_W83627HF is not set | 721 | # CONFIG_SENSORS_W83627HF is not set |
677 | # CONFIG_SENSORS_W83627EHF is not set | 722 | # CONFIG_SENSORS_W83627EHF is not set |
678 | # CONFIG_HWMON_DEBUG_CHIP is not set | ||
679 | # CONFIG_THERMAL is not set | 723 | # CONFIG_THERMAL is not set |
680 | # CONFIG_THERMAL_HWMON is not set | ||
681 | # CONFIG_WATCHDOG is not set | 724 | # CONFIG_WATCHDOG is not set |
682 | CONFIG_SSB_POSSIBLE=y | 725 | CONFIG_SSB_POSSIBLE=y |
683 | 726 | ||
@@ -699,6 +742,7 @@ CONFIG_SSB_POSSIBLE=y | |||
699 | # | 742 | # |
700 | # Graphics support | 743 | # Graphics support |
701 | # | 744 | # |
745 | CONFIG_VGA_ARB=y | ||
702 | # CONFIG_VGASTATE is not set | 746 | # CONFIG_VGASTATE is not set |
703 | # CONFIG_VIDEO_OUTPUT_CONTROL is not set | 747 | # CONFIG_VIDEO_OUTPUT_CONTROL is not set |
704 | # CONFIG_FB is not set | 748 | # CONFIG_FB is not set |
@@ -776,7 +820,9 @@ CONFIG_RTC_INTF_DEV=y | |||
776 | # CONFIG_RTC_DRV_M48T86 is not set | 820 | # CONFIG_RTC_DRV_M48T86 is not set |
777 | # CONFIG_RTC_DRV_M48T35 is not set | 821 | # CONFIG_RTC_DRV_M48T35 is not set |
778 | CONFIG_RTC_DRV_M48T59=y | 822 | CONFIG_RTC_DRV_M48T59=y |
823 | # CONFIG_RTC_DRV_MSM6242 is not set | ||
779 | # CONFIG_RTC_DRV_BQ4802 is not set | 824 | # CONFIG_RTC_DRV_BQ4802 is not set |
825 | # CONFIG_RTC_DRV_RP5C01 is not set | ||
780 | # CONFIG_RTC_DRV_V3020 is not set | 826 | # CONFIG_RTC_DRV_V3020 is not set |
781 | 827 | ||
782 | # | 828 | # |
@@ -955,6 +1001,7 @@ CONFIG_TRACE_IRQFLAGS_SUPPORT=y | |||
955 | CONFIG_ENABLE_MUST_CHECK=y | 1001 | CONFIG_ENABLE_MUST_CHECK=y |
956 | CONFIG_FRAME_WARN=1024 | 1002 | CONFIG_FRAME_WARN=1024 |
957 | CONFIG_MAGIC_SYSRQ=y | 1003 | CONFIG_MAGIC_SYSRQ=y |
1004 | # CONFIG_STRIP_ASM_SYMS is not set | ||
958 | # CONFIG_UNUSED_SYMBOLS is not set | 1005 | # CONFIG_UNUSED_SYMBOLS is not set |
959 | # CONFIG_DEBUG_FS is not set | 1006 | # CONFIG_DEBUG_FS is not set |
960 | # CONFIG_HEADERS_CHECK is not set | 1007 | # CONFIG_HEADERS_CHECK is not set |
@@ -1003,9 +1050,9 @@ CONFIG_KGDB=y | |||
1003 | CONFIG_KGDB_SERIAL_CONSOLE=y | 1050 | CONFIG_KGDB_SERIAL_CONSOLE=y |
1004 | CONFIG_KGDB_TESTS=y | 1051 | CONFIG_KGDB_TESTS=y |
1005 | # CONFIG_KGDB_TESTS_ON_BOOT is not set | 1052 | # CONFIG_KGDB_TESTS_ON_BOOT is not set |
1006 | # CONFIG_KMEMCHECK is not set | ||
1007 | # CONFIG_DEBUG_STACK_USAGE is not set | 1053 | # CONFIG_DEBUG_STACK_USAGE is not set |
1008 | # CONFIG_STACK_DEBUG is not set | 1054 | # CONFIG_STACK_DEBUG is not set |
1055 | # CONFIG_DEBUG_STRICT_USER_COPY_CHECKS is not set | ||
1009 | 1056 | ||
1010 | # | 1057 | # |
1011 | # Security options | 1058 | # Security options |
@@ -1013,7 +1060,11 @@ CONFIG_KGDB_TESTS=y | |||
1013 | # CONFIG_KEYS is not set | 1060 | # CONFIG_KEYS is not set |
1014 | # CONFIG_SECURITY is not set | 1061 | # CONFIG_SECURITY is not set |
1015 | # CONFIG_SECURITYFS is not set | 1062 | # CONFIG_SECURITYFS is not set |
1016 | # CONFIG_SECURITY_FILE_CAPABILITIES is not set | 1063 | # CONFIG_DEFAULT_SECURITY_SELINUX is not set |
1064 | # CONFIG_DEFAULT_SECURITY_SMACK is not set | ||
1065 | # CONFIG_DEFAULT_SECURITY_TOMOYO is not set | ||
1066 | CONFIG_DEFAULT_SECURITY_DAC=y | ||
1067 | CONFIG_DEFAULT_SECURITY="" | ||
1017 | CONFIG_CRYPTO=y | 1068 | CONFIG_CRYPTO=y |
1018 | 1069 | ||
1019 | # | 1070 | # |
diff --git a/arch/sparc/configs/sparc64_defconfig b/arch/sparc/configs/sparc64_defconfig index f80b881dfea7..41c5a56aa6f2 100644 --- a/arch/sparc/configs/sparc64_defconfig +++ b/arch/sparc/configs/sparc64_defconfig | |||
@@ -1,7 +1,7 @@ | |||
1 | # | 1 | # |
2 | # Automatically generated make config: don't edit | 2 | # Automatically generated make config: don't edit |
3 | # Linux kernel version: 2.6.31 | 3 | # Linux kernel version: 2.6.33-rc2 |
4 | # Tue Sep 15 17:06:03 2009 | 4 | # Wed Jan 20 16:31:47 2010 |
5 | # | 5 | # |
6 | CONFIG_64BIT=y | 6 | CONFIG_64BIT=y |
7 | CONFIG_SPARC=y | 7 | CONFIG_SPARC=y |
@@ -20,6 +20,7 @@ CONFIG_HAVE_LATENCYTOP_SUPPORT=y | |||
20 | CONFIG_AUDIT_ARCH=y | 20 | CONFIG_AUDIT_ARCH=y |
21 | CONFIG_HAVE_SETUP_PER_CPU_AREA=y | 21 | CONFIG_HAVE_SETUP_PER_CPU_AREA=y |
22 | CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK=y | 22 | CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK=y |
23 | CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK=y | ||
23 | CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y | 24 | CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y |
24 | CONFIG_MMU=y | 25 | CONFIG_MMU=y |
25 | CONFIG_ARCH_NO_VIRT_TO_BUS=y | 26 | CONFIG_ARCH_NO_VIRT_TO_BUS=y |
@@ -50,6 +51,7 @@ CONFIG_POSIX_MQUEUE_SYSCTL=y | |||
50 | # | 51 | # |
51 | CONFIG_TREE_RCU=y | 52 | CONFIG_TREE_RCU=y |
52 | # CONFIG_TREE_PREEMPT_RCU is not set | 53 | # CONFIG_TREE_PREEMPT_RCU is not set |
54 | # CONFIG_TINY_RCU is not set | ||
53 | # CONFIG_RCU_TRACE is not set | 55 | # CONFIG_RCU_TRACE is not set |
54 | CONFIG_RCU_FANOUT=64 | 56 | CONFIG_RCU_FANOUT=64 |
55 | # CONFIG_RCU_FANOUT_EXACT is not set | 57 | # CONFIG_RCU_FANOUT_EXACT is not set |
@@ -62,8 +64,7 @@ CONFIG_RT_GROUP_SCHED=y | |||
62 | CONFIG_USER_SCHED=y | 64 | CONFIG_USER_SCHED=y |
63 | # CONFIG_CGROUP_SCHED is not set | 65 | # CONFIG_CGROUP_SCHED is not set |
64 | # CONFIG_CGROUPS is not set | 66 | # CONFIG_CGROUPS is not set |
65 | CONFIG_SYSFS_DEPRECATED=y | 67 | # CONFIG_SYSFS_DEPRECATED_V2 is not set |
66 | CONFIG_SYSFS_DEPRECATED_V2=y | ||
67 | CONFIG_RELAY=y | 68 | CONFIG_RELAY=y |
68 | CONFIG_NAMESPACES=y | 69 | CONFIG_NAMESPACES=y |
69 | # CONFIG_UTS_NS is not set | 70 | # CONFIG_UTS_NS is not set |
@@ -97,24 +98,25 @@ CONFIG_TIMERFD=y | |||
97 | CONFIG_EVENTFD=y | 98 | CONFIG_EVENTFD=y |
98 | CONFIG_SHMEM=y | 99 | CONFIG_SHMEM=y |
99 | CONFIG_AIO=y | 100 | CONFIG_AIO=y |
100 | CONFIG_HAVE_PERF_COUNTERS=y | 101 | CONFIG_HAVE_PERF_EVENTS=y |
102 | CONFIG_PERF_USE_VMALLOC=y | ||
101 | 103 | ||
102 | # | 104 | # |
103 | # Performance Counters | 105 | # Kernel Performance Events And Counters |
104 | # | 106 | # |
105 | CONFIG_PERF_COUNTERS=y | 107 | CONFIG_PERF_EVENTS=y |
106 | CONFIG_EVENT_PROFILE=y | 108 | CONFIG_EVENT_PROFILE=y |
109 | CONFIG_PERF_COUNTERS=y | ||
110 | # CONFIG_DEBUG_PERF_USE_VMALLOC is not set | ||
107 | CONFIG_VM_EVENT_COUNTERS=y | 111 | CONFIG_VM_EVENT_COUNTERS=y |
108 | CONFIG_PCI_QUIRKS=y | 112 | CONFIG_PCI_QUIRKS=y |
109 | CONFIG_SLUB_DEBUG=y | 113 | CONFIG_SLUB_DEBUG=y |
110 | # CONFIG_STRIP_ASM_SYMS is not set | ||
111 | # CONFIG_COMPAT_BRK is not set | 114 | # CONFIG_COMPAT_BRK is not set |
112 | # CONFIG_SLAB is not set | 115 | # CONFIG_SLAB is not set |
113 | CONFIG_SLUB=y | 116 | CONFIG_SLUB=y |
114 | # CONFIG_SLOB is not set | 117 | # CONFIG_SLOB is not set |
115 | CONFIG_PROFILING=y | 118 | CONFIG_PROFILING=y |
116 | CONFIG_TRACEPOINTS=y | 119 | CONFIG_TRACEPOINTS=y |
117 | CONFIG_MARKERS=y | ||
118 | CONFIG_OPROFILE=m | 120 | CONFIG_OPROFILE=m |
119 | CONFIG_HAVE_OPROFILE=y | 121 | CONFIG_HAVE_OPROFILE=y |
120 | CONFIG_KPROBES=y | 122 | CONFIG_KPROBES=y |
@@ -152,14 +154,41 @@ CONFIG_BLOCK_COMPAT=y | |||
152 | # IO Schedulers | 154 | # IO Schedulers |
153 | # | 155 | # |
154 | CONFIG_IOSCHED_NOOP=y | 156 | CONFIG_IOSCHED_NOOP=y |
155 | CONFIG_IOSCHED_AS=y | ||
156 | CONFIG_IOSCHED_DEADLINE=y | 157 | CONFIG_IOSCHED_DEADLINE=y |
157 | CONFIG_IOSCHED_CFQ=y | 158 | CONFIG_IOSCHED_CFQ=y |
158 | CONFIG_DEFAULT_AS=y | ||
159 | # CONFIG_DEFAULT_DEADLINE is not set | 159 | # CONFIG_DEFAULT_DEADLINE is not set |
160 | # CONFIG_DEFAULT_CFQ is not set | 160 | CONFIG_DEFAULT_CFQ=y |
161 | # CONFIG_DEFAULT_NOOP is not set | 161 | # CONFIG_DEFAULT_NOOP is not set |
162 | CONFIG_DEFAULT_IOSCHED="anticipatory" | 162 | CONFIG_DEFAULT_IOSCHED="cfq" |
163 | # CONFIG_INLINE_SPIN_TRYLOCK is not set | ||
164 | # CONFIG_INLINE_SPIN_TRYLOCK_BH is not set | ||
165 | # CONFIG_INLINE_SPIN_LOCK is not set | ||
166 | # CONFIG_INLINE_SPIN_LOCK_BH is not set | ||
167 | # CONFIG_INLINE_SPIN_LOCK_IRQ is not set | ||
168 | # CONFIG_INLINE_SPIN_LOCK_IRQSAVE is not set | ||
169 | CONFIG_INLINE_SPIN_UNLOCK=y | ||
170 | # CONFIG_INLINE_SPIN_UNLOCK_BH is not set | ||
171 | CONFIG_INLINE_SPIN_UNLOCK_IRQ=y | ||
172 | # CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE is not set | ||
173 | # CONFIG_INLINE_READ_TRYLOCK is not set | ||
174 | # CONFIG_INLINE_READ_LOCK is not set | ||
175 | # CONFIG_INLINE_READ_LOCK_BH is not set | ||
176 | # CONFIG_INLINE_READ_LOCK_IRQ is not set | ||
177 | # CONFIG_INLINE_READ_LOCK_IRQSAVE is not set | ||
178 | CONFIG_INLINE_READ_UNLOCK=y | ||
179 | # CONFIG_INLINE_READ_UNLOCK_BH is not set | ||
180 | CONFIG_INLINE_READ_UNLOCK_IRQ=y | ||
181 | # CONFIG_INLINE_READ_UNLOCK_IRQRESTORE is not set | ||
182 | # CONFIG_INLINE_WRITE_TRYLOCK is not set | ||
183 | # CONFIG_INLINE_WRITE_LOCK is not set | ||
184 | # CONFIG_INLINE_WRITE_LOCK_BH is not set | ||
185 | # CONFIG_INLINE_WRITE_LOCK_IRQ is not set | ||
186 | # CONFIG_INLINE_WRITE_LOCK_IRQSAVE is not set | ||
187 | CONFIG_INLINE_WRITE_UNLOCK=y | ||
188 | # CONFIG_INLINE_WRITE_UNLOCK_BH is not set | ||
189 | CONFIG_INLINE_WRITE_UNLOCK_IRQ=y | ||
190 | # CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE is not set | ||
191 | CONFIG_MUTEX_SPIN_ON_OWNER=y | ||
163 | # CONFIG_FREEZER is not set | 192 | # CONFIG_FREEZER is not set |
164 | 193 | ||
165 | # | 194 | # |
@@ -179,6 +208,7 @@ CONFIG_GENERIC_HWEIGHT=y | |||
179 | CONFIG_GENERIC_CALIBRATE_DELAY=y | 208 | CONFIG_GENERIC_CALIBRATE_DELAY=y |
180 | CONFIG_ARCH_MAY_HAVE_PC_FDC=y | 209 | CONFIG_ARCH_MAY_HAVE_PC_FDC=y |
181 | CONFIG_SPARC64_SMP=y | 210 | CONFIG_SPARC64_SMP=y |
211 | CONFIG_EARLYFB=y | ||
182 | CONFIG_SPARC64_PAGE_SIZE_8KB=y | 212 | CONFIG_SPARC64_PAGE_SIZE_8KB=y |
183 | # CONFIG_SPARC64_PAGE_SIZE_64KB is not set | 213 | # CONFIG_SPARC64_PAGE_SIZE_64KB is not set |
184 | CONFIG_SECCOMP=y | 214 | CONFIG_SECCOMP=y |
@@ -216,8 +246,7 @@ CONFIG_MIGRATION=y | |||
216 | CONFIG_PHYS_ADDR_T_64BIT=y | 246 | CONFIG_PHYS_ADDR_T_64BIT=y |
217 | CONFIG_ZONE_DMA_FLAG=0 | 247 | CONFIG_ZONE_DMA_FLAG=0 |
218 | CONFIG_NR_QUICK=1 | 248 | CONFIG_NR_QUICK=1 |
219 | CONFIG_HAVE_MLOCK=y | 249 | # CONFIG_KSM is not set |
220 | CONFIG_HAVE_MLOCKED_PAGE_BIT=y | ||
221 | CONFIG_DEFAULT_MMAP_MIN_ADDR=8192 | 250 | CONFIG_DEFAULT_MMAP_MIN_ADDR=8192 |
222 | CONFIG_SCHED_SMT=y | 251 | CONFIG_SCHED_SMT=y |
223 | CONFIG_SCHED_MC=y | 252 | CONFIG_SCHED_MC=y |
@@ -315,6 +344,7 @@ CONFIG_INET6_XFRM_MODE_TUNNEL=m | |||
315 | CONFIG_INET6_XFRM_MODE_BEET=m | 344 | CONFIG_INET6_XFRM_MODE_BEET=m |
316 | # CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION is not set | 345 | # CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION is not set |
317 | CONFIG_IPV6_SIT=m | 346 | CONFIG_IPV6_SIT=m |
347 | # CONFIG_IPV6_SIT_6RD is not set | ||
318 | CONFIG_IPV6_NDISC_NODETYPE=y | 348 | CONFIG_IPV6_NDISC_NODETYPE=y |
319 | CONFIG_IPV6_TUNNEL=m | 349 | CONFIG_IPV6_TUNNEL=m |
320 | # CONFIG_IPV6_MULTIPLE_TABLES is not set | 350 | # CONFIG_IPV6_MULTIPLE_TABLES is not set |
@@ -356,9 +386,6 @@ CONFIG_NET_TCPPROBE=m | |||
356 | # CONFIG_AF_RXRPC is not set | 386 | # CONFIG_AF_RXRPC is not set |
357 | CONFIG_WIRELESS=y | 387 | CONFIG_WIRELESS=y |
358 | # CONFIG_CFG80211 is not set | 388 | # CONFIG_CFG80211 is not set |
359 | CONFIG_CFG80211_DEFAULT_PS_VALUE=0 | ||
360 | CONFIG_WIRELESS_OLD_REGULATORY=y | ||
361 | # CONFIG_WIRELESS_EXT is not set | ||
362 | # CONFIG_LIB80211 is not set | 389 | # CONFIG_LIB80211 is not set |
363 | 390 | ||
364 | # | 391 | # |
@@ -376,6 +403,7 @@ CONFIG_WIRELESS_OLD_REGULATORY=y | |||
376 | # Generic Driver Options | 403 | # Generic Driver Options |
377 | # | 404 | # |
378 | CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" | 405 | CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" |
406 | # CONFIG_DEVTMPFS is not set | ||
379 | CONFIG_STANDALONE=y | 407 | CONFIG_STANDALONE=y |
380 | # CONFIG_PREVENT_FIRMWARE_BUILD is not set | 408 | # CONFIG_PREVENT_FIRMWARE_BUILD is not set |
381 | CONFIG_FW_LOADER=y | 409 | CONFIG_FW_LOADER=y |
@@ -397,6 +425,11 @@ CONFIG_BLK_DEV=y | |||
397 | # CONFIG_BLK_DEV_COW_COMMON is not set | 425 | # CONFIG_BLK_DEV_COW_COMMON is not set |
398 | CONFIG_BLK_DEV_LOOP=m | 426 | CONFIG_BLK_DEV_LOOP=m |
399 | CONFIG_BLK_DEV_CRYPTOLOOP=m | 427 | CONFIG_BLK_DEV_CRYPTOLOOP=m |
428 | |||
429 | # | ||
430 | # DRBD disabled because PROC_FS, INET or CONNECTOR not selected | ||
431 | # | ||
432 | # CONFIG_BLK_DEV_DRBD is not set | ||
400 | CONFIG_BLK_DEV_NBD=m | 433 | CONFIG_BLK_DEV_NBD=m |
401 | # CONFIG_BLK_DEV_SX8 is not set | 434 | # CONFIG_BLK_DEV_SX8 is not set |
402 | # CONFIG_BLK_DEV_UB is not set | 435 | # CONFIG_BLK_DEV_UB is not set |
@@ -408,6 +441,7 @@ CONFIG_ATA_OVER_ETH=m | |||
408 | CONFIG_SUNVDC=m | 441 | CONFIG_SUNVDC=m |
409 | # CONFIG_BLK_DEV_HD is not set | 442 | # CONFIG_BLK_DEV_HD is not set |
410 | CONFIG_MISC_DEVICES=y | 443 | CONFIG_MISC_DEVICES=y |
444 | # CONFIG_AD525X_DPOT is not set | ||
411 | # CONFIG_PHANTOM is not set | 445 | # CONFIG_PHANTOM is not set |
412 | # CONFIG_SGI_IOC4 is not set | 446 | # CONFIG_SGI_IOC4 is not set |
413 | # CONFIG_TIFM_CORE is not set | 447 | # CONFIG_TIFM_CORE is not set |
@@ -415,6 +449,7 @@ CONFIG_MISC_DEVICES=y | |||
415 | # CONFIG_ENCLOSURE_SERVICES is not set | 449 | # CONFIG_ENCLOSURE_SERVICES is not set |
416 | # CONFIG_HP_ILO is not set | 450 | # CONFIG_HP_ILO is not set |
417 | # CONFIG_ISL29003 is not set | 451 | # CONFIG_ISL29003 is not set |
452 | # CONFIG_DS1682 is not set | ||
418 | # CONFIG_C2PORT is not set | 453 | # CONFIG_C2PORT is not set |
419 | 454 | ||
420 | # | 455 | # |
@@ -522,8 +557,11 @@ CONFIG_SCSI_LOWLEVEL=y | |||
522 | # CONFIG_ISCSI_TCP is not set | 557 | # CONFIG_ISCSI_TCP is not set |
523 | # CONFIG_SCSI_CXGB3_ISCSI is not set | 558 | # CONFIG_SCSI_CXGB3_ISCSI is not set |
524 | # CONFIG_SCSI_BNX2_ISCSI is not set | 559 | # CONFIG_SCSI_BNX2_ISCSI is not set |
560 | # CONFIG_BE2ISCSI is not set | ||
525 | # CONFIG_BLK_DEV_3W_XXXX_RAID is not set | 561 | # CONFIG_BLK_DEV_3W_XXXX_RAID is not set |
562 | # CONFIG_SCSI_HPSA is not set | ||
526 | # CONFIG_SCSI_3W_9XXX is not set | 563 | # CONFIG_SCSI_3W_9XXX is not set |
564 | # CONFIG_SCSI_3W_SAS is not set | ||
527 | # CONFIG_SCSI_ACARD is not set | 565 | # CONFIG_SCSI_ACARD is not set |
528 | # CONFIG_SCSI_AACRAID is not set | 566 | # CONFIG_SCSI_AACRAID is not set |
529 | # CONFIG_SCSI_AIC7XXX is not set | 567 | # CONFIG_SCSI_AIC7XXX is not set |
@@ -557,7 +595,9 @@ CONFIG_SCSI_LOWLEVEL=y | |||
557 | # CONFIG_SCSI_DEBUG is not set | 595 | # CONFIG_SCSI_DEBUG is not set |
558 | # CONFIG_SCSI_SUNESP is not set | 596 | # CONFIG_SCSI_SUNESP is not set |
559 | # CONFIG_SCSI_PMCRAID is not set | 597 | # CONFIG_SCSI_PMCRAID is not set |
598 | # CONFIG_SCSI_PM8001 is not set | ||
560 | # CONFIG_SCSI_SRP is not set | 599 | # CONFIG_SCSI_SRP is not set |
600 | # CONFIG_SCSI_BFA_FC is not set | ||
561 | # CONFIG_SCSI_DH is not set | 601 | # CONFIG_SCSI_DH is not set |
562 | # CONFIG_SCSI_OSD_INITIATOR is not set | 602 | # CONFIG_SCSI_OSD_INITIATOR is not set |
563 | # CONFIG_ATA is not set | 603 | # CONFIG_ATA is not set |
@@ -568,7 +608,9 @@ CONFIG_MD_RAID0=m | |||
568 | CONFIG_MD_RAID1=m | 608 | CONFIG_MD_RAID1=m |
569 | CONFIG_MD_RAID10=m | 609 | CONFIG_MD_RAID10=m |
570 | CONFIG_MD_RAID456=m | 610 | CONFIG_MD_RAID456=m |
611 | # CONFIG_MULTICORE_RAID456 is not set | ||
571 | CONFIG_MD_RAID6_PQ=m | 612 | CONFIG_MD_RAID6_PQ=m |
613 | # CONFIG_ASYNC_RAID6_TEST is not set | ||
572 | CONFIG_MD_MULTIPATH=m | 614 | CONFIG_MD_MULTIPATH=m |
573 | # CONFIG_MD_FAULTY is not set | 615 | # CONFIG_MD_FAULTY is not set |
574 | CONFIG_BLK_DEV_DM=m | 616 | CONFIG_BLK_DEV_DM=m |
@@ -592,7 +634,7 @@ CONFIG_DM_ZERO=m | |||
592 | # | 634 | # |
593 | 635 | ||
594 | # | 636 | # |
595 | # See the help texts for more information. | 637 | # The newer stack is recommended. |
596 | # | 638 | # |
597 | # CONFIG_FIREWIRE is not set | 639 | # CONFIG_FIREWIRE is not set |
598 | # CONFIG_IEEE1394 is not set | 640 | # CONFIG_IEEE1394 is not set |
@@ -664,6 +706,7 @@ CONFIG_NET_PCI=y | |||
664 | # CONFIG_SUNDANCE is not set | 706 | # CONFIG_SUNDANCE is not set |
665 | # CONFIG_TLAN is not set | 707 | # CONFIG_TLAN is not set |
666 | # CONFIG_KS8842 is not set | 708 | # CONFIG_KS8842 is not set |
709 | # CONFIG_KS8851_MLL is not set | ||
667 | # CONFIG_VIA_RHINE is not set | 710 | # CONFIG_VIA_RHINE is not set |
668 | # CONFIG_SC92031 is not set | 711 | # CONFIG_SC92031 is not set |
669 | # CONFIG_ATL2 is not set | 712 | # CONFIG_ATL2 is not set |
@@ -745,6 +788,7 @@ CONFIG_SLHC=m | |||
745 | # CONFIG_NETCONSOLE is not set | 788 | # CONFIG_NETCONSOLE is not set |
746 | # CONFIG_NETPOLL is not set | 789 | # CONFIG_NETPOLL is not set |
747 | # CONFIG_NET_POLL_CONTROLLER is not set | 790 | # CONFIG_NET_POLL_CONTROLLER is not set |
791 | # CONFIG_VMXNET3 is not set | ||
748 | # CONFIG_ISDN is not set | 792 | # CONFIG_ISDN is not set |
749 | # CONFIG_PHONE is not set | 793 | # CONFIG_PHONE is not set |
750 | 794 | ||
@@ -754,6 +798,7 @@ CONFIG_SLHC=m | |||
754 | CONFIG_INPUT=y | 798 | CONFIG_INPUT=y |
755 | # CONFIG_INPUT_FF_MEMLESS is not set | 799 | # CONFIG_INPUT_FF_MEMLESS is not set |
756 | # CONFIG_INPUT_POLLDEV is not set | 800 | # CONFIG_INPUT_POLLDEV is not set |
801 | # CONFIG_INPUT_SPARSEKMAP is not set | ||
757 | 802 | ||
758 | # | 803 | # |
759 | # Userland interfaces | 804 | # Userland interfaces |
@@ -770,9 +815,13 @@ CONFIG_INPUT_EVDEV=y | |||
770 | # Input Device Drivers | 815 | # Input Device Drivers |
771 | # | 816 | # |
772 | CONFIG_INPUT_KEYBOARD=y | 817 | CONFIG_INPUT_KEYBOARD=y |
818 | # CONFIG_KEYBOARD_ADP5588 is not set | ||
773 | CONFIG_KEYBOARD_ATKBD=y | 819 | CONFIG_KEYBOARD_ATKBD=y |
820 | # CONFIG_QT2160 is not set | ||
774 | CONFIG_KEYBOARD_LKKBD=m | 821 | CONFIG_KEYBOARD_LKKBD=m |
822 | # CONFIG_KEYBOARD_MAX7359 is not set | ||
775 | # CONFIG_KEYBOARD_NEWTON is not set | 823 | # CONFIG_KEYBOARD_NEWTON is not set |
824 | # CONFIG_KEYBOARD_OPENCORES is not set | ||
776 | # CONFIG_KEYBOARD_STOWAWAY is not set | 825 | # CONFIG_KEYBOARD_STOWAWAY is not set |
777 | CONFIG_KEYBOARD_SUNKBD=y | 826 | CONFIG_KEYBOARD_SUNKBD=y |
778 | # CONFIG_KEYBOARD_XTKBD is not set | 827 | # CONFIG_KEYBOARD_XTKBD is not set |
@@ -812,6 +861,7 @@ CONFIG_SERIO_I8042=y | |||
812 | CONFIG_SERIO_PCIPS2=m | 861 | CONFIG_SERIO_PCIPS2=m |
813 | CONFIG_SERIO_LIBPS2=y | 862 | CONFIG_SERIO_LIBPS2=y |
814 | CONFIG_SERIO_RAW=m | 863 | CONFIG_SERIO_RAW=m |
864 | # CONFIG_SERIO_ALTERA_PS2 is not set | ||
815 | # CONFIG_GAMEPORT is not set | 865 | # CONFIG_GAMEPORT is not set |
816 | 866 | ||
817 | # | 867 | # |
@@ -844,6 +894,7 @@ CONFIG_SERIAL_SUNHV=y | |||
844 | CONFIG_SERIAL_CORE=y | 894 | CONFIG_SERIAL_CORE=y |
845 | CONFIG_SERIAL_CORE_CONSOLE=y | 895 | CONFIG_SERIAL_CORE_CONSOLE=y |
846 | # CONFIG_SERIAL_JSM is not set | 896 | # CONFIG_SERIAL_JSM is not set |
897 | # CONFIG_SERIAL_GRLIB_GAISLER_APBUART is not set | ||
847 | CONFIG_UNIX98_PTYS=y | 898 | CONFIG_UNIX98_PTYS=y |
848 | # CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set | 899 | # CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set |
849 | # CONFIG_LEGACY_PTYS is not set | 900 | # CONFIG_LEGACY_PTYS is not set |
@@ -858,6 +909,7 @@ CONFIG_HW_RANDOM_N2RNG=m | |||
858 | CONFIG_DEVPORT=y | 909 | CONFIG_DEVPORT=y |
859 | CONFIG_I2C=y | 910 | CONFIG_I2C=y |
860 | CONFIG_I2C_BOARDINFO=y | 911 | CONFIG_I2C_BOARDINFO=y |
912 | CONFIG_I2C_COMPAT=y | ||
861 | # CONFIG_I2C_CHARDEV is not set | 913 | # CONFIG_I2C_CHARDEV is not set |
862 | CONFIG_I2C_HELPER_AUTO=y | 914 | CONFIG_I2C_HELPER_AUTO=y |
863 | CONFIG_I2C_ALGOBIT=y | 915 | CONFIG_I2C_ALGOBIT=y |
@@ -898,11 +950,6 @@ CONFIG_I2C_ALGOBIT=y | |||
898 | # CONFIG_I2C_TINY_USB is not set | 950 | # CONFIG_I2C_TINY_USB is not set |
899 | 951 | ||
900 | # | 952 | # |
901 | # Graphics adapter I2C/DDC channel drivers | ||
902 | # | ||
903 | # CONFIG_I2C_VOODOO3 is not set | ||
904 | |||
905 | # | ||
906 | # Other I2C/SMBus bus drivers | 953 | # Other I2C/SMBus bus drivers |
907 | # | 954 | # |
908 | # CONFIG_I2C_PCA_PLATFORM is not set | 955 | # CONFIG_I2C_PCA_PLATFORM is not set |
@@ -911,10 +958,6 @@ CONFIG_I2C_ALGOBIT=y | |||
911 | # | 958 | # |
912 | # Miscellaneous I2C Chip support | 959 | # Miscellaneous I2C Chip support |
913 | # | 960 | # |
914 | # CONFIG_DS1682 is not set | ||
915 | # CONFIG_SENSORS_PCF8574 is not set | ||
916 | # CONFIG_PCF8575 is not set | ||
917 | # CONFIG_SENSORS_PCA9539 is not set | ||
918 | # CONFIG_SENSORS_TSL2550 is not set | 961 | # CONFIG_SENSORS_TSL2550 is not set |
919 | # CONFIG_I2C_DEBUG_CORE is not set | 962 | # CONFIG_I2C_DEBUG_CORE is not set |
920 | # CONFIG_I2C_DEBUG_ALGO is not set | 963 | # CONFIG_I2C_DEBUG_ALGO is not set |
@@ -932,6 +975,11 @@ CONFIG_ARCH_WANT_OPTIONAL_GPIOLIB=y | |||
932 | # CONFIG_POWER_SUPPLY is not set | 975 | # CONFIG_POWER_SUPPLY is not set |
933 | CONFIG_HWMON=y | 976 | CONFIG_HWMON=y |
934 | # CONFIG_HWMON_VID is not set | 977 | # CONFIG_HWMON_VID is not set |
978 | # CONFIG_HWMON_DEBUG_CHIP is not set | ||
979 | |||
980 | # | ||
981 | # Native drivers | ||
982 | # | ||
935 | # CONFIG_SENSORS_AD7414 is not set | 983 | # CONFIG_SENSORS_AD7414 is not set |
936 | # CONFIG_SENSORS_AD7418 is not set | 984 | # CONFIG_SENSORS_AD7418 is not set |
937 | # CONFIG_SENSORS_ADM1021 is not set | 985 | # CONFIG_SENSORS_ADM1021 is not set |
@@ -955,6 +1003,7 @@ CONFIG_HWMON=y | |||
955 | # CONFIG_SENSORS_GL520SM is not set | 1003 | # CONFIG_SENSORS_GL520SM is not set |
956 | # CONFIG_SENSORS_IT87 is not set | 1004 | # CONFIG_SENSORS_IT87 is not set |
957 | # CONFIG_SENSORS_LM63 is not set | 1005 | # CONFIG_SENSORS_LM63 is not set |
1006 | # CONFIG_SENSORS_LM73 is not set | ||
958 | # CONFIG_SENSORS_LM75 is not set | 1007 | # CONFIG_SENSORS_LM75 is not set |
959 | # CONFIG_SENSORS_LM77 is not set | 1008 | # CONFIG_SENSORS_LM77 is not set |
960 | # CONFIG_SENSORS_LM78 is not set | 1009 | # CONFIG_SENSORS_LM78 is not set |
@@ -981,6 +1030,7 @@ CONFIG_HWMON=y | |||
981 | # CONFIG_SENSORS_ADS7828 is not set | 1030 | # CONFIG_SENSORS_ADS7828 is not set |
982 | # CONFIG_SENSORS_THMC50 is not set | 1031 | # CONFIG_SENSORS_THMC50 is not set |
983 | # CONFIG_SENSORS_TMP401 is not set | 1032 | # CONFIG_SENSORS_TMP401 is not set |
1033 | # CONFIG_SENSORS_TMP421 is not set | ||
984 | # CONFIG_SENSORS_VIA686A is not set | 1034 | # CONFIG_SENSORS_VIA686A is not set |
985 | # CONFIG_SENSORS_VT1211 is not set | 1035 | # CONFIG_SENSORS_VT1211 is not set |
986 | # CONFIG_SENSORS_VT8231 is not set | 1036 | # CONFIG_SENSORS_VT8231 is not set |
@@ -993,9 +1043,8 @@ CONFIG_HWMON=y | |||
993 | # CONFIG_SENSORS_W83627HF is not set | 1043 | # CONFIG_SENSORS_W83627HF is not set |
994 | # CONFIG_SENSORS_W83627EHF is not set | 1044 | # CONFIG_SENSORS_W83627EHF is not set |
995 | # CONFIG_SENSORS_ULTRA45 is not set | 1045 | # CONFIG_SENSORS_ULTRA45 is not set |
996 | # CONFIG_HWMON_DEBUG_CHIP is not set | 1046 | # CONFIG_SENSORS_LIS3_I2C is not set |
997 | # CONFIG_THERMAL is not set | 1047 | # CONFIG_THERMAL is not set |
998 | # CONFIG_THERMAL_HWMON is not set | ||
999 | # CONFIG_WATCHDOG is not set | 1048 | # CONFIG_WATCHDOG is not set |
1000 | CONFIG_SSB_POSSIBLE=y | 1049 | CONFIG_SSB_POSSIBLE=y |
1001 | 1050 | ||
@@ -1013,16 +1062,20 @@ CONFIG_SSB_POSSIBLE=y | |||
1013 | # CONFIG_TWL4030_CORE is not set | 1062 | # CONFIG_TWL4030_CORE is not set |
1014 | # CONFIG_MFD_TMIO is not set | 1063 | # CONFIG_MFD_TMIO is not set |
1015 | # CONFIG_PMIC_DA903X is not set | 1064 | # CONFIG_PMIC_DA903X is not set |
1065 | # CONFIG_PMIC_ADP5520 is not set | ||
1016 | # CONFIG_MFD_WM8400 is not set | 1066 | # CONFIG_MFD_WM8400 is not set |
1067 | # CONFIG_MFD_WM831X is not set | ||
1017 | # CONFIG_MFD_WM8350_I2C is not set | 1068 | # CONFIG_MFD_WM8350_I2C is not set |
1018 | # CONFIG_MFD_PCF50633 is not set | 1069 | # CONFIG_MFD_PCF50633 is not set |
1019 | # CONFIG_AB3100_CORE is not set | 1070 | # CONFIG_AB3100_CORE is not set |
1071 | # CONFIG_MFD_88PM8607 is not set | ||
1020 | # CONFIG_REGULATOR is not set | 1072 | # CONFIG_REGULATOR is not set |
1021 | # CONFIG_MEDIA_SUPPORT is not set | 1073 | # CONFIG_MEDIA_SUPPORT is not set |
1022 | 1074 | ||
1023 | # | 1075 | # |
1024 | # Graphics support | 1076 | # Graphics support |
1025 | # | 1077 | # |
1078 | CONFIG_VGA_ARB=y | ||
1026 | # CONFIG_DRM is not set | 1079 | # CONFIG_DRM is not set |
1027 | # CONFIG_VGASTATE is not set | 1080 | # CONFIG_VGASTATE is not set |
1028 | # CONFIG_VIDEO_OUTPUT_CONTROL is not set | 1081 | # CONFIG_VIDEO_OUTPUT_CONTROL is not set |
@@ -1176,6 +1229,7 @@ CONFIG_SND_ALI5451=m | |||
1176 | # CONFIG_SND_OXYGEN is not set | 1229 | # CONFIG_SND_OXYGEN is not set |
1177 | # CONFIG_SND_CS4281 is not set | 1230 | # CONFIG_SND_CS4281 is not set |
1178 | # CONFIG_SND_CS46XX is not set | 1231 | # CONFIG_SND_CS46XX is not set |
1232 | # CONFIG_SND_CS5535AUDIO is not set | ||
1179 | # CONFIG_SND_CTXFI is not set | 1233 | # CONFIG_SND_CTXFI is not set |
1180 | # CONFIG_SND_DARLA20 is not set | 1234 | # CONFIG_SND_DARLA20 is not set |
1181 | # CONFIG_SND_GINA20 is not set | 1235 | # CONFIG_SND_GINA20 is not set |
@@ -1311,6 +1365,7 @@ CONFIG_USB_EHCI_HCD=m | |||
1311 | # CONFIG_USB_OXU210HP_HCD is not set | 1365 | # CONFIG_USB_OXU210HP_HCD is not set |
1312 | # CONFIG_USB_ISP116X_HCD is not set | 1366 | # CONFIG_USB_ISP116X_HCD is not set |
1313 | # CONFIG_USB_ISP1760_HCD is not set | 1367 | # CONFIG_USB_ISP1760_HCD is not set |
1368 | # CONFIG_USB_ISP1362_HCD is not set | ||
1314 | CONFIG_USB_OHCI_HCD=y | 1369 | CONFIG_USB_OHCI_HCD=y |
1315 | # CONFIG_USB_OHCI_BIG_ENDIAN_DESC is not set | 1370 | # CONFIG_USB_OHCI_BIG_ENDIAN_DESC is not set |
1316 | # CONFIG_USB_OHCI_BIG_ENDIAN_MMIO is not set | 1371 | # CONFIG_USB_OHCI_BIG_ENDIAN_MMIO is not set |
@@ -1426,6 +1481,7 @@ CONFIG_RTC_INTF_DEV=y | |||
1426 | # CONFIG_RTC_DRV_PCF8563 is not set | 1481 | # CONFIG_RTC_DRV_PCF8563 is not set |
1427 | # CONFIG_RTC_DRV_PCF8583 is not set | 1482 | # CONFIG_RTC_DRV_PCF8583 is not set |
1428 | # CONFIG_RTC_DRV_M41T80 is not set | 1483 | # CONFIG_RTC_DRV_M41T80 is not set |
1484 | # CONFIG_RTC_DRV_BQ32K is not set | ||
1429 | # CONFIG_RTC_DRV_S35390A is not set | 1485 | # CONFIG_RTC_DRV_S35390A is not set |
1430 | # CONFIG_RTC_DRV_FM3130 is not set | 1486 | # CONFIG_RTC_DRV_FM3130 is not set |
1431 | # CONFIG_RTC_DRV_RX8581 is not set | 1487 | # CONFIG_RTC_DRV_RX8581 is not set |
@@ -1447,7 +1503,9 @@ CONFIG_RTC_DRV_CMOS=y | |||
1447 | # CONFIG_RTC_DRV_M48T86 is not set | 1503 | # CONFIG_RTC_DRV_M48T86 is not set |
1448 | # CONFIG_RTC_DRV_M48T35 is not set | 1504 | # CONFIG_RTC_DRV_M48T35 is not set |
1449 | CONFIG_RTC_DRV_M48T59=y | 1505 | CONFIG_RTC_DRV_M48T59=y |
1506 | # CONFIG_RTC_DRV_MSM6242 is not set | ||
1450 | CONFIG_RTC_DRV_BQ4802=y | 1507 | CONFIG_RTC_DRV_BQ4802=y |
1508 | # CONFIG_RTC_DRV_RP5C01 is not set | ||
1451 | # CONFIG_RTC_DRV_V3020 is not set | 1509 | # CONFIG_RTC_DRV_V3020 is not set |
1452 | 1510 | ||
1453 | # | 1511 | # |
@@ -1625,6 +1683,7 @@ CONFIG_PRINTK_TIME=y | |||
1625 | CONFIG_ENABLE_MUST_CHECK=y | 1683 | CONFIG_ENABLE_MUST_CHECK=y |
1626 | CONFIG_FRAME_WARN=2048 | 1684 | CONFIG_FRAME_WARN=2048 |
1627 | CONFIG_MAGIC_SYSRQ=y | 1685 | CONFIG_MAGIC_SYSRQ=y |
1686 | # CONFIG_STRIP_ASM_SYMS is not set | ||
1628 | # CONFIG_UNUSED_SYMBOLS is not set | 1687 | # CONFIG_UNUSED_SYMBOLS is not set |
1629 | CONFIG_DEBUG_FS=y | 1688 | CONFIG_DEBUG_FS=y |
1630 | # CONFIG_HEADERS_CHECK is not set | 1689 | # CONFIG_HEADERS_CHECK is not set |
@@ -1678,9 +1737,11 @@ CONFIG_NOP_TRACER=y | |||
1678 | CONFIG_HAVE_FUNCTION_TRACER=y | 1737 | CONFIG_HAVE_FUNCTION_TRACER=y |
1679 | CONFIG_HAVE_DYNAMIC_FTRACE=y | 1738 | CONFIG_HAVE_DYNAMIC_FTRACE=y |
1680 | CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y | 1739 | CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y |
1740 | CONFIG_HAVE_SYSCALL_TRACEPOINTS=y | ||
1681 | CONFIG_RING_BUFFER=y | 1741 | CONFIG_RING_BUFFER=y |
1682 | CONFIG_EVENT_TRACING=y | 1742 | CONFIG_EVENT_TRACING=y |
1683 | CONFIG_CONTEXT_SWITCH_TRACER=y | 1743 | CONFIG_CONTEXT_SWITCH_TRACER=y |
1744 | CONFIG_RING_BUFFER_ALLOW_SWAP=y | ||
1684 | CONFIG_TRACING=y | 1745 | CONFIG_TRACING=y |
1685 | CONFIG_GENERIC_TRACER=y | 1746 | CONFIG_GENERIC_TRACER=y |
1686 | CONFIG_TRACING_SUPPORT=y | 1747 | CONFIG_TRACING_SUPPORT=y |
@@ -1688,6 +1749,7 @@ CONFIG_FTRACE=y | |||
1688 | # CONFIG_FUNCTION_TRACER is not set | 1749 | # CONFIG_FUNCTION_TRACER is not set |
1689 | # CONFIG_IRQSOFF_TRACER is not set | 1750 | # CONFIG_IRQSOFF_TRACER is not set |
1690 | # CONFIG_SCHED_TRACER is not set | 1751 | # CONFIG_SCHED_TRACER is not set |
1752 | # CONFIG_FTRACE_SYSCALLS is not set | ||
1691 | # CONFIG_BOOT_TRACER is not set | 1753 | # CONFIG_BOOT_TRACER is not set |
1692 | CONFIG_BRANCH_PROFILE_NONE=y | 1754 | CONFIG_BRANCH_PROFILE_NONE=y |
1693 | # CONFIG_PROFILE_ANNOTATED_BRANCHES is not set | 1755 | # CONFIG_PROFILE_ANNOTATED_BRANCHES is not set |
@@ -1706,6 +1768,7 @@ CONFIG_HAVE_ARCH_KGDB=y | |||
1706 | # CONFIG_DEBUG_STACK_USAGE is not set | 1768 | # CONFIG_DEBUG_STACK_USAGE is not set |
1707 | # CONFIG_DEBUG_DCFLUSH is not set | 1769 | # CONFIG_DEBUG_DCFLUSH is not set |
1708 | # CONFIG_STACK_DEBUG is not set | 1770 | # CONFIG_STACK_DEBUG is not set |
1771 | # CONFIG_DEBUG_STRICT_USER_COPY_CHECKS is not set | ||
1709 | 1772 | ||
1710 | # | 1773 | # |
1711 | # Security options | 1774 | # Security options |
@@ -1714,11 +1777,17 @@ CONFIG_KEYS=y | |||
1714 | # CONFIG_KEYS_DEBUG_PROC_KEYS is not set | 1777 | # CONFIG_KEYS_DEBUG_PROC_KEYS is not set |
1715 | # CONFIG_SECURITY is not set | 1778 | # CONFIG_SECURITY is not set |
1716 | # CONFIG_SECURITYFS is not set | 1779 | # CONFIG_SECURITYFS is not set |
1717 | # CONFIG_SECURITY_FILE_CAPABILITIES is not set | 1780 | # CONFIG_DEFAULT_SECURITY_SELINUX is not set |
1781 | # CONFIG_DEFAULT_SECURITY_SMACK is not set | ||
1782 | # CONFIG_DEFAULT_SECURITY_TOMOYO is not set | ||
1783 | CONFIG_DEFAULT_SECURITY_DAC=y | ||
1784 | CONFIG_DEFAULT_SECURITY="" | ||
1718 | CONFIG_XOR_BLOCKS=m | 1785 | CONFIG_XOR_BLOCKS=m |
1719 | CONFIG_ASYNC_CORE=m | 1786 | CONFIG_ASYNC_CORE=m |
1720 | CONFIG_ASYNC_MEMCPY=m | 1787 | CONFIG_ASYNC_MEMCPY=m |
1721 | CONFIG_ASYNC_XOR=m | 1788 | CONFIG_ASYNC_XOR=m |
1789 | CONFIG_ASYNC_PQ=m | ||
1790 | CONFIG_ASYNC_RAID6_RECOV=m | ||
1722 | CONFIG_CRYPTO=y | 1791 | CONFIG_CRYPTO=y |
1723 | 1792 | ||
1724 | # | 1793 | # |
diff --git a/arch/sparc/include/asm/io_32.h b/arch/sparc/include/asm/io_32.h index 93fe21e02c86..679c7504625a 100644 --- a/arch/sparc/include/asm/io_32.h +++ b/arch/sparc/include/asm/io_32.h | |||
@@ -8,7 +8,7 @@ | |||
8 | #include <asm/page.h> /* IO address mapping routines need this */ | 8 | #include <asm/page.h> /* IO address mapping routines need this */ |
9 | #include <asm/system.h> | 9 | #include <asm/system.h> |
10 | 10 | ||
11 | #define page_to_phys(page) (((page) - mem_map) << PAGE_SHIFT) | 11 | #define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT) |
12 | 12 | ||
13 | static inline u32 flip_dword (u32 l) | 13 | static inline u32 flip_dword (u32 l) |
14 | { | 14 | { |
diff --git a/arch/sparc/include/asm/page_32.h b/arch/sparc/include/asm/page_32.h index f72080bdda94..156707b0f18d 100644 --- a/arch/sparc/include/asm/page_32.h +++ b/arch/sparc/include/asm/page_32.h | |||
@@ -143,7 +143,7 @@ extern unsigned long pfn_base; | |||
143 | #define phys_to_virt __va | 143 | #define phys_to_virt __va |
144 | 144 | ||
145 | #define ARCH_PFN_OFFSET (pfn_base) | 145 | #define ARCH_PFN_OFFSET (pfn_base) |
146 | #define virt_to_page(kaddr) (mem_map + ((((unsigned long)(kaddr)-PAGE_OFFSET)>>PAGE_SHIFT))) | 146 | #define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT) |
147 | 147 | ||
148 | #define pfn_valid(pfn) (((pfn) >= (pfn_base)) && (((pfn)-(pfn_base)) < max_mapnr)) | 148 | #define pfn_valid(pfn) (((pfn) >= (pfn_base)) && (((pfn)-(pfn_base)) < max_mapnr)) |
149 | #define virt_addr_valid(kaddr) ((((unsigned long)(kaddr)-PAGE_OFFSET)>>PAGE_SHIFT) < max_mapnr) | 149 | #define virt_addr_valid(kaddr) ((((unsigned long)(kaddr)-PAGE_OFFSET)>>PAGE_SHIFT) < max_mapnr) |
diff --git a/arch/sparc/include/asm/param.h b/arch/sparc/include/asm/param.h index 9836d9a3cb9a..0bc356bf8c50 100644 --- a/arch/sparc/include/asm/param.h +++ b/arch/sparc/include/asm/param.h | |||
@@ -1,22 +1,7 @@ | |||
1 | #ifndef _ASMSPARC_PARAM_H | 1 | #ifndef _ASMSPARC_PARAM_H |
2 | #define _ASMSPARC_PARAM_H | 2 | #define _ASMSPARC_PARAM_H |
3 | 3 | ||
4 | #ifdef __KERNEL__ | ||
5 | # define HZ CONFIG_HZ /* Internal kernel timer frequency */ | ||
6 | # define USER_HZ 100 /* .. some user interfaces are in "ticks" */ | ||
7 | # define CLOCKS_PER_SEC (USER_HZ) | ||
8 | #endif | ||
9 | |||
10 | #ifndef HZ | ||
11 | #define HZ 100 | ||
12 | #endif | ||
13 | |||
14 | #define EXEC_PAGESIZE 8192 /* Thanks for sun4's we carry baggage... */ | 4 | #define EXEC_PAGESIZE 8192 /* Thanks for sun4's we carry baggage... */ |
5 | #include <asm-generic/param.h> | ||
15 | 6 | ||
16 | #ifndef NOGROUP | 7 | #endif /* _ASMSPARC_PARAM_H */ |
17 | #define NOGROUP (-1) | ||
18 | #endif | ||
19 | |||
20 | #define MAXHOSTNAMELEN 64 /* max length of hostname */ | ||
21 | |||
22 | #endif | ||
diff --git a/arch/sparc/include/asm/timex_32.h b/arch/sparc/include/asm/timex_32.h index b6ccdb0d6f7d..a254750e4c03 100644 --- a/arch/sparc/include/asm/timex_32.h +++ b/arch/sparc/include/asm/timex_32.h | |||
@@ -12,4 +12,5 @@ | |||
12 | typedef unsigned long cycles_t; | 12 | typedef unsigned long cycles_t; |
13 | #define get_cycles() (0) | 13 | #define get_cycles() (0) |
14 | 14 | ||
15 | extern u32 (*do_arch_gettimeoffset)(void); | ||
15 | #endif | 16 | #endif |
diff --git a/arch/sparc/include/asm/topology_64.h b/arch/sparc/include/asm/topology_64.h index 600a79035fa1..1c79f32734a0 100644 --- a/arch/sparc/include/asm/topology_64.h +++ b/arch/sparc/include/asm/topology_64.h | |||
@@ -12,7 +12,9 @@ static inline int cpu_to_node(int cpu) | |||
12 | 12 | ||
13 | #define parent_node(node) (node) | 13 | #define parent_node(node) (node) |
14 | 14 | ||
15 | #define cpumask_of_node(node) (&numa_cpumask_lookup_table[node]) | 15 | #define cpumask_of_node(node) ((node) == -1 ? \ |
16 | cpu_all_mask : \ | ||
17 | &numa_cpumask_lookup_table[node]) | ||
16 | 18 | ||
17 | struct pci_bus; | 19 | struct pci_bus; |
18 | #ifdef CONFIG_PCI | 20 | #ifdef CONFIG_PCI |
diff --git a/arch/sparc/include/asm/uaccess_32.h b/arch/sparc/include/asm/uaccess_32.h index 489d2ba92bcb..25f1d10155e8 100644 --- a/arch/sparc/include/asm/uaccess_32.h +++ b/arch/sparc/include/asm/uaccess_32.h | |||
@@ -274,7 +274,7 @@ static inline unsigned long copy_from_user(void *to, const void __user *from, un | |||
274 | 274 | ||
275 | if (unlikely(sz != -1 && sz < n)) { | 275 | if (unlikely(sz != -1 && sz < n)) { |
276 | copy_from_user_overflow(); | 276 | copy_from_user_overflow(); |
277 | return -EFAULT; | 277 | return n; |
278 | } | 278 | } |
279 | 279 | ||
280 | if (n && __access_ok((unsigned long) from, n)) | 280 | if (n && __access_ok((unsigned long) from, n)) |
diff --git a/arch/sparc/include/asm/uaccess_64.h b/arch/sparc/include/asm/uaccess_64.h index dbc141660994..2406788bfe5f 100644 --- a/arch/sparc/include/asm/uaccess_64.h +++ b/arch/sparc/include/asm/uaccess_64.h | |||
@@ -221,8 +221,8 @@ extern unsigned long copy_from_user_fixup(void *to, const void __user *from, | |||
221 | static inline unsigned long __must_check | 221 | static inline unsigned long __must_check |
222 | copy_from_user(void *to, const void __user *from, unsigned long size) | 222 | copy_from_user(void *to, const void __user *from, unsigned long size) |
223 | { | 223 | { |
224 | unsigned long ret = (unsigned long) -EFAULT; | ||
225 | int sz = __compiletime_object_size(to); | 224 | int sz = __compiletime_object_size(to); |
225 | unsigned long ret = size; | ||
226 | 226 | ||
227 | if (likely(sz == -1 || sz >= size)) { | 227 | if (likely(sz == -1 || sz >= size)) { |
228 | ret = ___copy_from_user(to, from, size); | 228 | ret = ___copy_from_user(to, from, size); |
diff --git a/arch/sparc/kernel/central.c b/arch/sparc/kernel/central.c index f3b5466c389c..4589ca33220f 100644 --- a/arch/sparc/kernel/central.c +++ b/arch/sparc/kernel/central.c | |||
@@ -99,7 +99,7 @@ static int __devinit clock_board_probe(struct of_device *op, | |||
99 | 99 | ||
100 | p->leds_resource.start = (unsigned long) | 100 | p->leds_resource.start = (unsigned long) |
101 | (p->clock_regs + CLOCK_CTRL); | 101 | (p->clock_regs + CLOCK_CTRL); |
102 | p->leds_resource.end = p->leds_resource.end; | 102 | p->leds_resource.end = p->leds_resource.start; |
103 | p->leds_resource.name = "leds"; | 103 | p->leds_resource.name = "leds"; |
104 | 104 | ||
105 | p->leds_pdev.name = "sunfire-clockboard-leds"; | 105 | p->leds_pdev.name = "sunfire-clockboard-leds"; |
@@ -194,7 +194,7 @@ static int __devinit fhc_probe(struct of_device *op, | |||
194 | if (!p->central) { | 194 | if (!p->central) { |
195 | p->leds_resource.start = (unsigned long) | 195 | p->leds_resource.start = (unsigned long) |
196 | (p->pregs + FHC_PREGS_CTRL); | 196 | (p->pregs + FHC_PREGS_CTRL); |
197 | p->leds_resource.end = p->leds_resource.end; | 197 | p->leds_resource.end = p->leds_resource.start; |
198 | p->leds_resource.name = "leds"; | 198 | p->leds_resource.name = "leds"; |
199 | 199 | ||
200 | p->leds_pdev.name = "sunfire-fhc-leds"; | 200 | p->leds_pdev.name = "sunfire-fhc-leds"; |
diff --git a/arch/sparc/kernel/irq_64.c b/arch/sparc/kernel/irq_64.c index 8d6882bb480a..e1cbdb94d97b 100644 --- a/arch/sparc/kernel/irq_64.c +++ b/arch/sparc/kernel/irq_64.c | |||
@@ -250,12 +250,12 @@ struct irq_handler_data { | |||
250 | }; | 250 | }; |
251 | 251 | ||
252 | #ifdef CONFIG_SMP | 252 | #ifdef CONFIG_SMP |
253 | static int irq_choose_cpu(unsigned int virt_irq) | 253 | static int irq_choose_cpu(unsigned int virt_irq, const struct cpumask *affinity) |
254 | { | 254 | { |
255 | cpumask_t mask; | 255 | cpumask_t mask; |
256 | int cpuid; | 256 | int cpuid; |
257 | 257 | ||
258 | cpumask_copy(&mask, irq_desc[virt_irq].affinity); | 258 | cpumask_copy(&mask, affinity); |
259 | if (cpus_equal(mask, cpu_online_map)) { | 259 | if (cpus_equal(mask, cpu_online_map)) { |
260 | cpuid = map_to_cpu(virt_irq); | 260 | cpuid = map_to_cpu(virt_irq); |
261 | } else { | 261 | } else { |
@@ -268,10 +268,8 @@ static int irq_choose_cpu(unsigned int virt_irq) | |||
268 | return cpuid; | 268 | return cpuid; |
269 | } | 269 | } |
270 | #else | 270 | #else |
271 | static int irq_choose_cpu(unsigned int virt_irq) | 271 | #define irq_choose_cpu(virt_irq, affinity) \ |
272 | { | 272 | real_hard_smp_processor_id() |
273 | return real_hard_smp_processor_id(); | ||
274 | } | ||
275 | #endif | 273 | #endif |
276 | 274 | ||
277 | static void sun4u_irq_enable(unsigned int virt_irq) | 275 | static void sun4u_irq_enable(unsigned int virt_irq) |
@@ -282,7 +280,8 @@ static void sun4u_irq_enable(unsigned int virt_irq) | |||
282 | unsigned long cpuid, imap, val; | 280 | unsigned long cpuid, imap, val; |
283 | unsigned int tid; | 281 | unsigned int tid; |
284 | 282 | ||
285 | cpuid = irq_choose_cpu(virt_irq); | 283 | cpuid = irq_choose_cpu(virt_irq, |
284 | irq_desc[virt_irq].affinity); | ||
286 | imap = data->imap; | 285 | imap = data->imap; |
287 | 286 | ||
288 | tid = sun4u_compute_tid(imap, cpuid); | 287 | tid = sun4u_compute_tid(imap, cpuid); |
@@ -299,7 +298,24 @@ static void sun4u_irq_enable(unsigned int virt_irq) | |||
299 | static int sun4u_set_affinity(unsigned int virt_irq, | 298 | static int sun4u_set_affinity(unsigned int virt_irq, |
300 | const struct cpumask *mask) | 299 | const struct cpumask *mask) |
301 | { | 300 | { |
302 | sun4u_irq_enable(virt_irq); | 301 | struct irq_handler_data *data = get_irq_chip_data(virt_irq); |
302 | |||
303 | if (likely(data)) { | ||
304 | unsigned long cpuid, imap, val; | ||
305 | unsigned int tid; | ||
306 | |||
307 | cpuid = irq_choose_cpu(virt_irq, mask); | ||
308 | imap = data->imap; | ||
309 | |||
310 | tid = sun4u_compute_tid(imap, cpuid); | ||
311 | |||
312 | val = upa_readq(imap); | ||
313 | val &= ~(IMAP_TID_UPA | IMAP_TID_JBUS | | ||
314 | IMAP_AID_SAFARI | IMAP_NID_SAFARI); | ||
315 | val |= tid | IMAP_VALID; | ||
316 | upa_writeq(val, imap); | ||
317 | upa_writeq(ICLR_IDLE, data->iclr); | ||
318 | } | ||
303 | 319 | ||
304 | return 0; | 320 | return 0; |
305 | } | 321 | } |
@@ -340,7 +356,8 @@ static void sun4u_irq_eoi(unsigned int virt_irq) | |||
340 | static void sun4v_irq_enable(unsigned int virt_irq) | 356 | static void sun4v_irq_enable(unsigned int virt_irq) |
341 | { | 357 | { |
342 | unsigned int ino = virt_irq_table[virt_irq].dev_ino; | 358 | unsigned int ino = virt_irq_table[virt_irq].dev_ino; |
343 | unsigned long cpuid = irq_choose_cpu(virt_irq); | 359 | unsigned long cpuid = irq_choose_cpu(virt_irq, |
360 | irq_desc[virt_irq].affinity); | ||
344 | int err; | 361 | int err; |
345 | 362 | ||
346 | err = sun4v_intr_settarget(ino, cpuid); | 363 | err = sun4v_intr_settarget(ino, cpuid); |
@@ -361,7 +378,7 @@ static int sun4v_set_affinity(unsigned int virt_irq, | |||
361 | const struct cpumask *mask) | 378 | const struct cpumask *mask) |
362 | { | 379 | { |
363 | unsigned int ino = virt_irq_table[virt_irq].dev_ino; | 380 | unsigned int ino = virt_irq_table[virt_irq].dev_ino; |
364 | unsigned long cpuid = irq_choose_cpu(virt_irq); | 381 | unsigned long cpuid = irq_choose_cpu(virt_irq, mask); |
365 | int err; | 382 | int err; |
366 | 383 | ||
367 | err = sun4v_intr_settarget(ino, cpuid); | 384 | err = sun4v_intr_settarget(ino, cpuid); |
@@ -403,7 +420,7 @@ static void sun4v_virq_enable(unsigned int virt_irq) | |||
403 | unsigned long cpuid, dev_handle, dev_ino; | 420 | unsigned long cpuid, dev_handle, dev_ino; |
404 | int err; | 421 | int err; |
405 | 422 | ||
406 | cpuid = irq_choose_cpu(virt_irq); | 423 | cpuid = irq_choose_cpu(virt_irq, irq_desc[virt_irq].affinity); |
407 | 424 | ||
408 | dev_handle = virt_irq_table[virt_irq].dev_handle; | 425 | dev_handle = virt_irq_table[virt_irq].dev_handle; |
409 | dev_ino = virt_irq_table[virt_irq].dev_ino; | 426 | dev_ino = virt_irq_table[virt_irq].dev_ino; |
@@ -433,7 +450,7 @@ static int sun4v_virt_set_affinity(unsigned int virt_irq, | |||
433 | unsigned long cpuid, dev_handle, dev_ino; | 450 | unsigned long cpuid, dev_handle, dev_ino; |
434 | int err; | 451 | int err; |
435 | 452 | ||
436 | cpuid = irq_choose_cpu(virt_irq); | 453 | cpuid = irq_choose_cpu(virt_irq, mask); |
437 | 454 | ||
438 | dev_handle = virt_irq_table[virt_irq].dev_handle; | 455 | dev_handle = virt_irq_table[virt_irq].dev_handle; |
439 | dev_ino = virt_irq_table[virt_irq].dev_ino; | 456 | dev_ino = virt_irq_table[virt_irq].dev_ino; |
diff --git a/arch/sparc/kernel/pcic.c b/arch/sparc/kernel/pcic.c index 85e7037429b9..4e2724ec2bb6 100644 --- a/arch/sparc/kernel/pcic.c +++ b/arch/sparc/kernel/pcic.c | |||
@@ -30,6 +30,7 @@ | |||
30 | #include <asm/oplib.h> | 30 | #include <asm/oplib.h> |
31 | #include <asm/prom.h> | 31 | #include <asm/prom.h> |
32 | #include <asm/pcic.h> | 32 | #include <asm/pcic.h> |
33 | #include <asm/timex.h> | ||
33 | #include <asm/timer.h> | 34 | #include <asm/timer.h> |
34 | #include <asm/uaccess.h> | 35 | #include <asm/uaccess.h> |
35 | #include <asm/irq_regs.h> | 36 | #include <asm/irq_regs.h> |
@@ -163,8 +164,6 @@ void __iomem *pcic_regs; | |||
163 | volatile int pcic_speculative; | 164 | volatile int pcic_speculative; |
164 | volatile int pcic_trapped; | 165 | volatile int pcic_trapped; |
165 | 166 | ||
166 | static void pci_do_gettimeofday(struct timeval *tv); | ||
167 | static int pci_do_settimeofday(struct timespec *tv); | ||
168 | 167 | ||
169 | #define CONFIG_CMD(bus, device_fn, where) (0x80000000 | (((unsigned int)bus) << 16) | (((unsigned int)device_fn) << 8) | (where & ~3)) | 168 | #define CONFIG_CMD(bus, device_fn, where) (0x80000000 | (((unsigned int)bus) << 16) | (((unsigned int)device_fn) << 8) | (where & ~3)) |
170 | 169 | ||
@@ -716,19 +715,27 @@ static irqreturn_t pcic_timer_handler (int irq, void *h) | |||
716 | #define USECS_PER_JIFFY 10000 /* We have 100HZ "standard" timer for sparc */ | 715 | #define USECS_PER_JIFFY 10000 /* We have 100HZ "standard" timer for sparc */ |
717 | #define TICK_TIMER_LIMIT ((100*1000000/4)/100) | 716 | #define TICK_TIMER_LIMIT ((100*1000000/4)/100) |
718 | 717 | ||
718 | u32 pci_gettimeoffset(void) | ||
719 | { | ||
720 | /* | ||
721 | * We divide all by 100 | ||
722 | * to have microsecond resolution and to avoid overflow | ||
723 | */ | ||
724 | unsigned long count = | ||
725 | readl(pcic0.pcic_regs+PCI_SYS_COUNTER) & ~PCI_SYS_COUNTER_OVERFLOW; | ||
726 | count = ((count/100)*USECS_PER_JIFFY) / (TICK_TIMER_LIMIT/100); | ||
727 | return count * 1000; | ||
728 | } | ||
729 | |||
730 | |||
719 | void __init pci_time_init(void) | 731 | void __init pci_time_init(void) |
720 | { | 732 | { |
721 | struct linux_pcic *pcic = &pcic0; | 733 | struct linux_pcic *pcic = &pcic0; |
722 | unsigned long v; | 734 | unsigned long v; |
723 | int timer_irq, irq; | 735 | int timer_irq, irq; |
724 | 736 | ||
725 | /* A hack until do_gettimeofday prototype is moved to arch specific headers | 737 | do_arch_gettimeoffset = pci_gettimeoffset; |
726 | and btfixupped. Patch do_gettimeofday with ba pci_do_gettimeofday; nop */ | 738 | |
727 | ((unsigned int *)do_gettimeofday)[0] = | ||
728 | 0x10800000 | ((((unsigned long)pci_do_gettimeofday - | ||
729 | (unsigned long)do_gettimeofday) >> 2) & 0x003fffff); | ||
730 | ((unsigned int *)do_gettimeofday)[1] = 0x01000000; | ||
731 | BTFIXUPSET_CALL(bus_do_settimeofday, pci_do_settimeofday, BTFIXUPCALL_NORM); | ||
732 | btfixup(); | 739 | btfixup(); |
733 | 740 | ||
734 | writel (TICK_TIMER_LIMIT, pcic->pcic_regs+PCI_SYS_LIMIT); | 741 | writel (TICK_TIMER_LIMIT, pcic->pcic_regs+PCI_SYS_LIMIT); |
@@ -746,84 +753,6 @@ void __init pci_time_init(void) | |||
746 | local_irq_enable(); | 753 | local_irq_enable(); |
747 | } | 754 | } |
748 | 755 | ||
749 | static inline unsigned long do_gettimeoffset(void) | ||
750 | { | ||
751 | /* | ||
752 | * We divide all by 100 | ||
753 | * to have microsecond resolution and to avoid overflow | ||
754 | */ | ||
755 | unsigned long count = | ||
756 | readl(pcic0.pcic_regs+PCI_SYS_COUNTER) & ~PCI_SYS_COUNTER_OVERFLOW; | ||
757 | count = ((count/100)*USECS_PER_JIFFY) / (TICK_TIMER_LIMIT/100); | ||
758 | return count; | ||
759 | } | ||
760 | |||
761 | static void pci_do_gettimeofday(struct timeval *tv) | ||
762 | { | ||
763 | unsigned long flags; | ||
764 | unsigned long seq; | ||
765 | unsigned long usec, sec; | ||
766 | unsigned long max_ntp_tick = tick_usec - tickadj; | ||
767 | |||
768 | do { | ||
769 | seq = read_seqbegin_irqsave(&xtime_lock, flags); | ||
770 | usec = do_gettimeoffset(); | ||
771 | |||
772 | /* | ||
773 | * If time_adjust is negative then NTP is slowing the clock | ||
774 | * so make sure not to go into next possible interval. | ||
775 | * Better to lose some accuracy than have time go backwards.. | ||
776 | */ | ||
777 | if (unlikely(time_adjust < 0)) | ||
778 | usec = min(usec, max_ntp_tick); | ||
779 | |||
780 | sec = xtime.tv_sec; | ||
781 | usec += (xtime.tv_nsec / 1000); | ||
782 | } while (read_seqretry_irqrestore(&xtime_lock, seq, flags)); | ||
783 | |||
784 | while (usec >= 1000000) { | ||
785 | usec -= 1000000; | ||
786 | sec++; | ||
787 | } | ||
788 | |||
789 | tv->tv_sec = sec; | ||
790 | tv->tv_usec = usec; | ||
791 | } | ||
792 | |||
793 | static int pci_do_settimeofday(struct timespec *tv) | ||
794 | { | ||
795 | if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC) | ||
796 | return -EINVAL; | ||
797 | |||
798 | /* | ||
799 | * This is revolting. We need to set "xtime" correctly. However, the | ||
800 | * value in this location is the value at the most recent update of | ||
801 | * wall time. Discover what correction gettimeofday() would have | ||
802 | * made, and then undo it! | ||
803 | */ | ||
804 | tv->tv_nsec -= 1000 * do_gettimeoffset(); | ||
805 | while (tv->tv_nsec < 0) { | ||
806 | tv->tv_nsec += NSEC_PER_SEC; | ||
807 | tv->tv_sec--; | ||
808 | } | ||
809 | |||
810 | wall_to_monotonic.tv_sec += xtime.tv_sec - tv->tv_sec; | ||
811 | wall_to_monotonic.tv_nsec += xtime.tv_nsec - tv->tv_nsec; | ||
812 | |||
813 | if (wall_to_monotonic.tv_nsec > NSEC_PER_SEC) { | ||
814 | wall_to_monotonic.tv_nsec -= NSEC_PER_SEC; | ||
815 | wall_to_monotonic.tv_sec++; | ||
816 | } | ||
817 | if (wall_to_monotonic.tv_nsec < 0) { | ||
818 | wall_to_monotonic.tv_nsec += NSEC_PER_SEC; | ||
819 | wall_to_monotonic.tv_sec--; | ||
820 | } | ||
821 | |||
822 | xtime.tv_sec = tv->tv_sec; | ||
823 | xtime.tv_nsec = tv->tv_nsec; | ||
824 | ntp_clear(); | ||
825 | return 0; | ||
826 | } | ||
827 | 756 | ||
828 | #if 0 | 757 | #if 0 |
829 | static void watchdog_reset() { | 758 | static void watchdog_reset() { |
diff --git a/arch/sparc/kernel/perf_event.c b/arch/sparc/kernel/perf_event.c index 198fb4e79ba2..e856456ec02f 100644 --- a/arch/sparc/kernel/perf_event.c +++ b/arch/sparc/kernel/perf_event.c | |||
@@ -1,6 +1,6 @@ | |||
1 | /* Performance event support for sparc64. | 1 | /* Performance event support for sparc64. |
2 | * | 2 | * |
3 | * Copyright (C) 2009 David S. Miller <davem@davemloft.net> | 3 | * Copyright (C) 2009, 2010 David S. Miller <davem@davemloft.net> |
4 | * | 4 | * |
5 | * This code is based almost entirely upon the x86 perf event | 5 | * This code is based almost entirely upon the x86 perf event |
6 | * code, which is: | 6 | * code, which is: |
@@ -18,11 +18,15 @@ | |||
18 | #include <linux/kdebug.h> | 18 | #include <linux/kdebug.h> |
19 | #include <linux/mutex.h> | 19 | #include <linux/mutex.h> |
20 | 20 | ||
21 | #include <asm/stacktrace.h> | ||
21 | #include <asm/cpudata.h> | 22 | #include <asm/cpudata.h> |
23 | #include <asm/uaccess.h> | ||
22 | #include <asm/atomic.h> | 24 | #include <asm/atomic.h> |
23 | #include <asm/nmi.h> | 25 | #include <asm/nmi.h> |
24 | #include <asm/pcr.h> | 26 | #include <asm/pcr.h> |
25 | 27 | ||
28 | #include "kstack.h" | ||
29 | |||
26 | /* Sparc64 chips have two performance counters, 32-bits each, with | 30 | /* Sparc64 chips have two performance counters, 32-bits each, with |
27 | * overflow interrupts generated on transition from 0xffffffff to 0. | 31 | * overflow interrupts generated on transition from 0xffffffff to 0. |
28 | * The counters are accessed in one go using a 64-bit register. | 32 | * The counters are accessed in one go using a 64-bit register. |
@@ -51,16 +55,49 @@ | |||
51 | 55 | ||
52 | #define PIC_UPPER_INDEX 0 | 56 | #define PIC_UPPER_INDEX 0 |
53 | #define PIC_LOWER_INDEX 1 | 57 | #define PIC_LOWER_INDEX 1 |
58 | #define PIC_NO_INDEX -1 | ||
54 | 59 | ||
55 | struct cpu_hw_events { | 60 | struct cpu_hw_events { |
56 | struct perf_event *events[MAX_HWEVENTS]; | 61 | /* Number of events currently scheduled onto this cpu. |
57 | unsigned long used_mask[BITS_TO_LONGS(MAX_HWEVENTS)]; | 62 | * This tells how many entries in the arrays below |
58 | unsigned long active_mask[BITS_TO_LONGS(MAX_HWEVENTS)]; | 63 | * are valid. |
64 | */ | ||
65 | int n_events; | ||
66 | |||
67 | /* Number of new events added since the last hw_perf_disable(). | ||
68 | * This works because the perf event layer always adds new | ||
69 | * events inside of a perf_{disable,enable}() sequence. | ||
70 | */ | ||
71 | int n_added; | ||
72 | |||
73 | /* Array of events current scheduled on this cpu. */ | ||
74 | struct perf_event *event[MAX_HWEVENTS]; | ||
75 | |||
76 | /* Array of encoded longs, specifying the %pcr register | ||
77 | * encoding and the mask of PIC counters this even can | ||
78 | * be scheduled on. See perf_event_encode() et al. | ||
79 | */ | ||
80 | unsigned long events[MAX_HWEVENTS]; | ||
81 | |||
82 | /* The current counter index assigned to an event. When the | ||
83 | * event hasn't been programmed into the cpu yet, this will | ||
84 | * hold PIC_NO_INDEX. The event->hw.idx value tells us where | ||
85 | * we ought to schedule the event. | ||
86 | */ | ||
87 | int current_idx[MAX_HWEVENTS]; | ||
88 | |||
89 | /* Software copy of %pcr register on this cpu. */ | ||
59 | u64 pcr; | 90 | u64 pcr; |
91 | |||
92 | /* Enabled/disable state. */ | ||
60 | int enabled; | 93 | int enabled; |
61 | }; | 94 | }; |
62 | DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = { .enabled = 1, }; | 95 | DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = { .enabled = 1, }; |
63 | 96 | ||
97 | /* An event map describes the characteristics of a performance | ||
98 | * counter event. In particular it gives the encoding as well as | ||
99 | * a mask telling which counters the event can be measured on. | ||
100 | */ | ||
64 | struct perf_event_map { | 101 | struct perf_event_map { |
65 | u16 encoding; | 102 | u16 encoding; |
66 | u8 pic_mask; | 103 | u8 pic_mask; |
@@ -69,15 +106,20 @@ struct perf_event_map { | |||
69 | #define PIC_LOWER 0x02 | 106 | #define PIC_LOWER 0x02 |
70 | }; | 107 | }; |
71 | 108 | ||
109 | /* Encode a perf_event_map entry into a long. */ | ||
72 | static unsigned long perf_event_encode(const struct perf_event_map *pmap) | 110 | static unsigned long perf_event_encode(const struct perf_event_map *pmap) |
73 | { | 111 | { |
74 | return ((unsigned long) pmap->encoding << 16) | pmap->pic_mask; | 112 | return ((unsigned long) pmap->encoding << 16) | pmap->pic_mask; |
75 | } | 113 | } |
76 | 114 | ||
77 | static void perf_event_decode(unsigned long val, u16 *enc, u8 *msk) | 115 | static u8 perf_event_get_msk(unsigned long val) |
78 | { | 116 | { |
79 | *msk = val & 0xff; | 117 | return val & 0xff; |
80 | *enc = val >> 16; | 118 | } |
119 | |||
120 | static u64 perf_event_get_enc(unsigned long val) | ||
121 | { | ||
122 | return val >> 16; | ||
81 | } | 123 | } |
82 | 124 | ||
83 | #define C(x) PERF_COUNT_HW_CACHE_##x | 125 | #define C(x) PERF_COUNT_HW_CACHE_##x |
@@ -491,53 +533,6 @@ static inline void sparc_pmu_disable_event(struct cpu_hw_events *cpuc, struct hw | |||
491 | pcr_ops->write(cpuc->pcr); | 533 | pcr_ops->write(cpuc->pcr); |
492 | } | 534 | } |
493 | 535 | ||
494 | void hw_perf_enable(void) | ||
495 | { | ||
496 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | ||
497 | u64 val; | ||
498 | int i; | ||
499 | |||
500 | if (cpuc->enabled) | ||
501 | return; | ||
502 | |||
503 | cpuc->enabled = 1; | ||
504 | barrier(); | ||
505 | |||
506 | val = cpuc->pcr; | ||
507 | |||
508 | for (i = 0; i < MAX_HWEVENTS; i++) { | ||
509 | struct perf_event *cp = cpuc->events[i]; | ||
510 | struct hw_perf_event *hwc; | ||
511 | |||
512 | if (!cp) | ||
513 | continue; | ||
514 | hwc = &cp->hw; | ||
515 | val |= hwc->config_base; | ||
516 | } | ||
517 | |||
518 | cpuc->pcr = val; | ||
519 | |||
520 | pcr_ops->write(cpuc->pcr); | ||
521 | } | ||
522 | |||
523 | void hw_perf_disable(void) | ||
524 | { | ||
525 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | ||
526 | u64 val; | ||
527 | |||
528 | if (!cpuc->enabled) | ||
529 | return; | ||
530 | |||
531 | cpuc->enabled = 0; | ||
532 | |||
533 | val = cpuc->pcr; | ||
534 | val &= ~(PCR_UTRACE | PCR_STRACE | | ||
535 | sparc_pmu->hv_bit | sparc_pmu->irq_bit); | ||
536 | cpuc->pcr = val; | ||
537 | |||
538 | pcr_ops->write(cpuc->pcr); | ||
539 | } | ||
540 | |||
541 | static u32 read_pmc(int idx) | 536 | static u32 read_pmc(int idx) |
542 | { | 537 | { |
543 | u64 val; | 538 | u64 val; |
@@ -566,6 +561,30 @@ static void write_pmc(int idx, u64 val) | |||
566 | write_pic(pic); | 561 | write_pic(pic); |
567 | } | 562 | } |
568 | 563 | ||
564 | static u64 sparc_perf_event_update(struct perf_event *event, | ||
565 | struct hw_perf_event *hwc, int idx) | ||
566 | { | ||
567 | int shift = 64 - 32; | ||
568 | u64 prev_raw_count, new_raw_count; | ||
569 | s64 delta; | ||
570 | |||
571 | again: | ||
572 | prev_raw_count = atomic64_read(&hwc->prev_count); | ||
573 | new_raw_count = read_pmc(idx); | ||
574 | |||
575 | if (atomic64_cmpxchg(&hwc->prev_count, prev_raw_count, | ||
576 | new_raw_count) != prev_raw_count) | ||
577 | goto again; | ||
578 | |||
579 | delta = (new_raw_count << shift) - (prev_raw_count << shift); | ||
580 | delta >>= shift; | ||
581 | |||
582 | atomic64_add(delta, &event->count); | ||
583 | atomic64_sub(delta, &hwc->period_left); | ||
584 | |||
585 | return new_raw_count; | ||
586 | } | ||
587 | |||
569 | static int sparc_perf_event_set_period(struct perf_event *event, | 588 | static int sparc_perf_event_set_period(struct perf_event *event, |
570 | struct hw_perf_event *hwc, int idx) | 589 | struct hw_perf_event *hwc, int idx) |
571 | { | 590 | { |
@@ -598,81 +617,166 @@ static int sparc_perf_event_set_period(struct perf_event *event, | |||
598 | return ret; | 617 | return ret; |
599 | } | 618 | } |
600 | 619 | ||
601 | static int sparc_pmu_enable(struct perf_event *event) | 620 | /* If performance event entries have been added, move existing |
621 | * events around (if necessary) and then assign new entries to | ||
622 | * counters. | ||
623 | */ | ||
624 | static u64 maybe_change_configuration(struct cpu_hw_events *cpuc, u64 pcr) | ||
602 | { | 625 | { |
603 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 626 | int i; |
604 | struct hw_perf_event *hwc = &event->hw; | ||
605 | int idx = hwc->idx; | ||
606 | 627 | ||
607 | if (test_and_set_bit(idx, cpuc->used_mask)) | 628 | if (!cpuc->n_added) |
608 | return -EAGAIN; | 629 | goto out; |
609 | 630 | ||
610 | sparc_pmu_disable_event(cpuc, hwc, idx); | 631 | /* Read in the counters which are moving. */ |
632 | for (i = 0; i < cpuc->n_events; i++) { | ||
633 | struct perf_event *cp = cpuc->event[i]; | ||
611 | 634 | ||
612 | cpuc->events[idx] = event; | 635 | if (cpuc->current_idx[i] != PIC_NO_INDEX && |
613 | set_bit(idx, cpuc->active_mask); | 636 | cpuc->current_idx[i] != cp->hw.idx) { |
637 | sparc_perf_event_update(cp, &cp->hw, | ||
638 | cpuc->current_idx[i]); | ||
639 | cpuc->current_idx[i] = PIC_NO_INDEX; | ||
640 | } | ||
641 | } | ||
614 | 642 | ||
615 | sparc_perf_event_set_period(event, hwc, idx); | 643 | /* Assign to counters all unassigned events. */ |
616 | sparc_pmu_enable_event(cpuc, hwc, idx); | 644 | for (i = 0; i < cpuc->n_events; i++) { |
617 | perf_event_update_userpage(event); | 645 | struct perf_event *cp = cpuc->event[i]; |
618 | return 0; | 646 | struct hw_perf_event *hwc = &cp->hw; |
647 | int idx = hwc->idx; | ||
648 | u64 enc; | ||
649 | |||
650 | if (cpuc->current_idx[i] != PIC_NO_INDEX) | ||
651 | continue; | ||
652 | |||
653 | sparc_perf_event_set_period(cp, hwc, idx); | ||
654 | cpuc->current_idx[i] = idx; | ||
655 | |||
656 | enc = perf_event_get_enc(cpuc->events[i]); | ||
657 | pcr |= event_encoding(enc, idx); | ||
658 | } | ||
659 | out: | ||
660 | return pcr; | ||
619 | } | 661 | } |
620 | 662 | ||
621 | static u64 sparc_perf_event_update(struct perf_event *event, | 663 | void hw_perf_enable(void) |
622 | struct hw_perf_event *hwc, int idx) | ||
623 | { | 664 | { |
624 | int shift = 64 - 32; | 665 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); |
625 | u64 prev_raw_count, new_raw_count; | 666 | u64 pcr; |
626 | s64 delta; | ||
627 | 667 | ||
628 | again: | 668 | if (cpuc->enabled) |
629 | prev_raw_count = atomic64_read(&hwc->prev_count); | 669 | return; |
630 | new_raw_count = read_pmc(idx); | ||
631 | 670 | ||
632 | if (atomic64_cmpxchg(&hwc->prev_count, prev_raw_count, | 671 | cpuc->enabled = 1; |
633 | new_raw_count) != prev_raw_count) | 672 | barrier(); |
634 | goto again; | ||
635 | 673 | ||
636 | delta = (new_raw_count << shift) - (prev_raw_count << shift); | 674 | pcr = cpuc->pcr; |
637 | delta >>= shift; | 675 | if (!cpuc->n_events) { |
676 | pcr = 0; | ||
677 | } else { | ||
678 | pcr = maybe_change_configuration(cpuc, pcr); | ||
638 | 679 | ||
639 | atomic64_add(delta, &event->count); | 680 | /* We require that all of the events have the same |
640 | atomic64_sub(delta, &hwc->period_left); | 681 | * configuration, so just fetch the settings from the |
682 | * first entry. | ||
683 | */ | ||
684 | cpuc->pcr = pcr | cpuc->event[0]->hw.config_base; | ||
685 | } | ||
641 | 686 | ||
642 | return new_raw_count; | 687 | pcr_ops->write(cpuc->pcr); |
688 | } | ||
689 | |||
690 | void hw_perf_disable(void) | ||
691 | { | ||
692 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | ||
693 | u64 val; | ||
694 | |||
695 | if (!cpuc->enabled) | ||
696 | return; | ||
697 | |||
698 | cpuc->enabled = 0; | ||
699 | cpuc->n_added = 0; | ||
700 | |||
701 | val = cpuc->pcr; | ||
702 | val &= ~(PCR_UTRACE | PCR_STRACE | | ||
703 | sparc_pmu->hv_bit | sparc_pmu->irq_bit); | ||
704 | cpuc->pcr = val; | ||
705 | |||
706 | pcr_ops->write(cpuc->pcr); | ||
643 | } | 707 | } |
644 | 708 | ||
645 | static void sparc_pmu_disable(struct perf_event *event) | 709 | static void sparc_pmu_disable(struct perf_event *event) |
646 | { | 710 | { |
647 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 711 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); |
648 | struct hw_perf_event *hwc = &event->hw; | 712 | struct hw_perf_event *hwc = &event->hw; |
649 | int idx = hwc->idx; | 713 | unsigned long flags; |
714 | int i; | ||
650 | 715 | ||
651 | clear_bit(idx, cpuc->active_mask); | 716 | local_irq_save(flags); |
652 | sparc_pmu_disable_event(cpuc, hwc, idx); | 717 | perf_disable(); |
718 | |||
719 | for (i = 0; i < cpuc->n_events; i++) { | ||
720 | if (event == cpuc->event[i]) { | ||
721 | int idx = cpuc->current_idx[i]; | ||
722 | |||
723 | /* Shift remaining entries down into | ||
724 | * the existing slot. | ||
725 | */ | ||
726 | while (++i < cpuc->n_events) { | ||
727 | cpuc->event[i - 1] = cpuc->event[i]; | ||
728 | cpuc->events[i - 1] = cpuc->events[i]; | ||
729 | cpuc->current_idx[i - 1] = | ||
730 | cpuc->current_idx[i]; | ||
731 | } | ||
732 | |||
733 | /* Absorb the final count and turn off the | ||
734 | * event. | ||
735 | */ | ||
736 | sparc_pmu_disable_event(cpuc, hwc, idx); | ||
737 | barrier(); | ||
738 | sparc_perf_event_update(event, hwc, idx); | ||
653 | 739 | ||
654 | barrier(); | 740 | perf_event_update_userpage(event); |
655 | 741 | ||
656 | sparc_perf_event_update(event, hwc, idx); | 742 | cpuc->n_events--; |
657 | cpuc->events[idx] = NULL; | 743 | break; |
658 | clear_bit(idx, cpuc->used_mask); | 744 | } |
745 | } | ||
659 | 746 | ||
660 | perf_event_update_userpage(event); | 747 | perf_enable(); |
748 | local_irq_restore(flags); | ||
749 | } | ||
750 | |||
751 | static int active_event_index(struct cpu_hw_events *cpuc, | ||
752 | struct perf_event *event) | ||
753 | { | ||
754 | int i; | ||
755 | |||
756 | for (i = 0; i < cpuc->n_events; i++) { | ||
757 | if (cpuc->event[i] == event) | ||
758 | break; | ||
759 | } | ||
760 | BUG_ON(i == cpuc->n_events); | ||
761 | return cpuc->current_idx[i]; | ||
661 | } | 762 | } |
662 | 763 | ||
663 | static void sparc_pmu_read(struct perf_event *event) | 764 | static void sparc_pmu_read(struct perf_event *event) |
664 | { | 765 | { |
766 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | ||
767 | int idx = active_event_index(cpuc, event); | ||
665 | struct hw_perf_event *hwc = &event->hw; | 768 | struct hw_perf_event *hwc = &event->hw; |
666 | 769 | ||
667 | sparc_perf_event_update(event, hwc, hwc->idx); | 770 | sparc_perf_event_update(event, hwc, idx); |
668 | } | 771 | } |
669 | 772 | ||
670 | static void sparc_pmu_unthrottle(struct perf_event *event) | 773 | static void sparc_pmu_unthrottle(struct perf_event *event) |
671 | { | 774 | { |
672 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 775 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); |
776 | int idx = active_event_index(cpuc, event); | ||
673 | struct hw_perf_event *hwc = &event->hw; | 777 | struct hw_perf_event *hwc = &event->hw; |
674 | 778 | ||
675 | sparc_pmu_enable_event(cpuc, hwc, hwc->idx); | 779 | sparc_pmu_enable_event(cpuc, hwc, idx); |
676 | } | 780 | } |
677 | 781 | ||
678 | static atomic_t active_events = ATOMIC_INIT(0); | 782 | static atomic_t active_events = ATOMIC_INIT(0); |
@@ -750,43 +854,75 @@ static void hw_perf_event_destroy(struct perf_event *event) | |||
750 | /* Make sure all events can be scheduled into the hardware at | 854 | /* Make sure all events can be scheduled into the hardware at |
751 | * the same time. This is simplified by the fact that we only | 855 | * the same time. This is simplified by the fact that we only |
752 | * need to support 2 simultaneous HW events. | 856 | * need to support 2 simultaneous HW events. |
857 | * | ||
858 | * As a side effect, the evts[]->hw.idx values will be assigned | ||
859 | * on success. These are pending indexes. When the events are | ||
860 | * actually programmed into the chip, these values will propagate | ||
861 | * to the per-cpu cpuc->current_idx[] slots, see the code in | ||
862 | * maybe_change_configuration() for details. | ||
753 | */ | 863 | */ |
754 | static int sparc_check_constraints(unsigned long *events, int n_ev) | 864 | static int sparc_check_constraints(struct perf_event **evts, |
865 | unsigned long *events, int n_ev) | ||
755 | { | 866 | { |
756 | if (n_ev <= perf_max_events) { | 867 | u8 msk0 = 0, msk1 = 0; |
757 | u8 msk1, msk2; | 868 | int idx0 = 0; |
758 | u16 dummy; | 869 | |
759 | 870 | /* This case is possible when we are invoked from | |
760 | if (n_ev == 1) | 871 | * hw_perf_group_sched_in(). |
761 | return 0; | 872 | */ |
762 | BUG_ON(n_ev != 2); | 873 | if (!n_ev) |
763 | perf_event_decode(events[0], &dummy, &msk1); | 874 | return 0; |
764 | perf_event_decode(events[1], &dummy, &msk2); | 875 | |
765 | 876 | if (n_ev > perf_max_events) | |
766 | /* If both events can go on any counter, OK. */ | 877 | return -1; |
767 | if (msk1 == (PIC_UPPER | PIC_LOWER) && | 878 | |
768 | msk2 == (PIC_UPPER | PIC_LOWER)) | 879 | msk0 = perf_event_get_msk(events[0]); |
769 | return 0; | 880 | if (n_ev == 1) { |
770 | 881 | if (msk0 & PIC_LOWER) | |
771 | /* If one event is limited to a specific counter, | 882 | idx0 = 1; |
772 | * and the other can go on both, OK. | 883 | goto success; |
773 | */ | ||
774 | if ((msk1 == PIC_UPPER || msk1 == PIC_LOWER) && | ||
775 | msk2 == (PIC_UPPER | PIC_LOWER)) | ||
776 | return 0; | ||
777 | if ((msk2 == PIC_UPPER || msk2 == PIC_LOWER) && | ||
778 | msk1 == (PIC_UPPER | PIC_LOWER)) | ||
779 | return 0; | ||
780 | |||
781 | /* If the events are fixed to different counters, OK. */ | ||
782 | if ((msk1 == PIC_UPPER && msk2 == PIC_LOWER) || | ||
783 | (msk1 == PIC_LOWER && msk2 == PIC_UPPER)) | ||
784 | return 0; | ||
785 | |||
786 | /* Otherwise, there is a conflict. */ | ||
787 | } | 884 | } |
885 | BUG_ON(n_ev != 2); | ||
886 | msk1 = perf_event_get_msk(events[1]); | ||
887 | |||
888 | /* If both events can go on any counter, OK. */ | ||
889 | if (msk0 == (PIC_UPPER | PIC_LOWER) && | ||
890 | msk1 == (PIC_UPPER | PIC_LOWER)) | ||
891 | goto success; | ||
788 | 892 | ||
893 | /* If one event is limited to a specific counter, | ||
894 | * and the other can go on both, OK. | ||
895 | */ | ||
896 | if ((msk0 == PIC_UPPER || msk0 == PIC_LOWER) && | ||
897 | msk1 == (PIC_UPPER | PIC_LOWER)) { | ||
898 | if (msk0 & PIC_LOWER) | ||
899 | idx0 = 1; | ||
900 | goto success; | ||
901 | } | ||
902 | |||
903 | if ((msk1 == PIC_UPPER || msk1 == PIC_LOWER) && | ||
904 | msk0 == (PIC_UPPER | PIC_LOWER)) { | ||
905 | if (msk1 & PIC_UPPER) | ||
906 | idx0 = 1; | ||
907 | goto success; | ||
908 | } | ||
909 | |||
910 | /* If the events are fixed to different counters, OK. */ | ||
911 | if ((msk0 == PIC_UPPER && msk1 == PIC_LOWER) || | ||
912 | (msk0 == PIC_LOWER && msk1 == PIC_UPPER)) { | ||
913 | if (msk0 & PIC_LOWER) | ||
914 | idx0 = 1; | ||
915 | goto success; | ||
916 | } | ||
917 | |||
918 | /* Otherwise, there is a conflict. */ | ||
789 | return -1; | 919 | return -1; |
920 | |||
921 | success: | ||
922 | evts[0]->hw.idx = idx0; | ||
923 | if (n_ev == 2) | ||
924 | evts[1]->hw.idx = idx0 ^ 1; | ||
925 | return 0; | ||
790 | } | 926 | } |
791 | 927 | ||
792 | static int check_excludes(struct perf_event **evts, int n_prev, int n_new) | 928 | static int check_excludes(struct perf_event **evts, int n_prev, int n_new) |
@@ -818,7 +954,8 @@ static int check_excludes(struct perf_event **evts, int n_prev, int n_new) | |||
818 | } | 954 | } |
819 | 955 | ||
820 | static int collect_events(struct perf_event *group, int max_count, | 956 | static int collect_events(struct perf_event *group, int max_count, |
821 | struct perf_event *evts[], unsigned long *events) | 957 | struct perf_event *evts[], unsigned long *events, |
958 | int *current_idx) | ||
822 | { | 959 | { |
823 | struct perf_event *event; | 960 | struct perf_event *event; |
824 | int n = 0; | 961 | int n = 0; |
@@ -827,7 +964,8 @@ static int collect_events(struct perf_event *group, int max_count, | |||
827 | if (n >= max_count) | 964 | if (n >= max_count) |
828 | return -1; | 965 | return -1; |
829 | evts[n] = group; | 966 | evts[n] = group; |
830 | events[n++] = group->hw.event_base; | 967 | events[n] = group->hw.event_base; |
968 | current_idx[n++] = PIC_NO_INDEX; | ||
831 | } | 969 | } |
832 | list_for_each_entry(event, &group->sibling_list, group_entry) { | 970 | list_for_each_entry(event, &group->sibling_list, group_entry) { |
833 | if (!is_software_event(event) && | 971 | if (!is_software_event(event) && |
@@ -835,20 +973,100 @@ static int collect_events(struct perf_event *group, int max_count, | |||
835 | if (n >= max_count) | 973 | if (n >= max_count) |
836 | return -1; | 974 | return -1; |
837 | evts[n] = event; | 975 | evts[n] = event; |
838 | events[n++] = event->hw.event_base; | 976 | events[n] = event->hw.event_base; |
977 | current_idx[n++] = PIC_NO_INDEX; | ||
839 | } | 978 | } |
840 | } | 979 | } |
841 | return n; | 980 | return n; |
842 | } | 981 | } |
843 | 982 | ||
983 | static void event_sched_in(struct perf_event *event, int cpu) | ||
984 | { | ||
985 | event->state = PERF_EVENT_STATE_ACTIVE; | ||
986 | event->oncpu = cpu; | ||
987 | event->tstamp_running += event->ctx->time - event->tstamp_stopped; | ||
988 | if (is_software_event(event)) | ||
989 | event->pmu->enable(event); | ||
990 | } | ||
991 | |||
992 | int hw_perf_group_sched_in(struct perf_event *group_leader, | ||
993 | struct perf_cpu_context *cpuctx, | ||
994 | struct perf_event_context *ctx, int cpu) | ||
995 | { | ||
996 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | ||
997 | struct perf_event *sub; | ||
998 | int n0, n; | ||
999 | |||
1000 | if (!sparc_pmu) | ||
1001 | return 0; | ||
1002 | |||
1003 | n0 = cpuc->n_events; | ||
1004 | n = collect_events(group_leader, perf_max_events - n0, | ||
1005 | &cpuc->event[n0], &cpuc->events[n0], | ||
1006 | &cpuc->current_idx[n0]); | ||
1007 | if (n < 0) | ||
1008 | return -EAGAIN; | ||
1009 | if (check_excludes(cpuc->event, n0, n)) | ||
1010 | return -EINVAL; | ||
1011 | if (sparc_check_constraints(cpuc->event, cpuc->events, n + n0)) | ||
1012 | return -EAGAIN; | ||
1013 | cpuc->n_events = n0 + n; | ||
1014 | cpuc->n_added += n; | ||
1015 | |||
1016 | cpuctx->active_oncpu += n; | ||
1017 | n = 1; | ||
1018 | event_sched_in(group_leader, cpu); | ||
1019 | list_for_each_entry(sub, &group_leader->sibling_list, group_entry) { | ||
1020 | if (sub->state != PERF_EVENT_STATE_OFF) { | ||
1021 | event_sched_in(sub, cpu); | ||
1022 | n++; | ||
1023 | } | ||
1024 | } | ||
1025 | ctx->nr_active += n; | ||
1026 | |||
1027 | return 1; | ||
1028 | } | ||
1029 | |||
1030 | static int sparc_pmu_enable(struct perf_event *event) | ||
1031 | { | ||
1032 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | ||
1033 | int n0, ret = -EAGAIN; | ||
1034 | unsigned long flags; | ||
1035 | |||
1036 | local_irq_save(flags); | ||
1037 | perf_disable(); | ||
1038 | |||
1039 | n0 = cpuc->n_events; | ||
1040 | if (n0 >= perf_max_events) | ||
1041 | goto out; | ||
1042 | |||
1043 | cpuc->event[n0] = event; | ||
1044 | cpuc->events[n0] = event->hw.event_base; | ||
1045 | cpuc->current_idx[n0] = PIC_NO_INDEX; | ||
1046 | |||
1047 | if (check_excludes(cpuc->event, n0, 1)) | ||
1048 | goto out; | ||
1049 | if (sparc_check_constraints(cpuc->event, cpuc->events, n0 + 1)) | ||
1050 | goto out; | ||
1051 | |||
1052 | cpuc->n_events++; | ||
1053 | cpuc->n_added++; | ||
1054 | |||
1055 | ret = 0; | ||
1056 | out: | ||
1057 | perf_enable(); | ||
1058 | local_irq_restore(flags); | ||
1059 | return ret; | ||
1060 | } | ||
1061 | |||
844 | static int __hw_perf_event_init(struct perf_event *event) | 1062 | static int __hw_perf_event_init(struct perf_event *event) |
845 | { | 1063 | { |
846 | struct perf_event_attr *attr = &event->attr; | 1064 | struct perf_event_attr *attr = &event->attr; |
847 | struct perf_event *evts[MAX_HWEVENTS]; | 1065 | struct perf_event *evts[MAX_HWEVENTS]; |
848 | struct hw_perf_event *hwc = &event->hw; | 1066 | struct hw_perf_event *hwc = &event->hw; |
849 | unsigned long events[MAX_HWEVENTS]; | 1067 | unsigned long events[MAX_HWEVENTS]; |
1068 | int current_idx_dmy[MAX_HWEVENTS]; | ||
850 | const struct perf_event_map *pmap; | 1069 | const struct perf_event_map *pmap; |
851 | u64 enc; | ||
852 | int n; | 1070 | int n; |
853 | 1071 | ||
854 | if (atomic_read(&nmi_active) < 0) | 1072 | if (atomic_read(&nmi_active) < 0) |
@@ -865,10 +1083,7 @@ static int __hw_perf_event_init(struct perf_event *event) | |||
865 | } else | 1083 | } else |
866 | return -EOPNOTSUPP; | 1084 | return -EOPNOTSUPP; |
867 | 1085 | ||
868 | /* We save the enable bits in the config_base. So to | 1086 | /* We save the enable bits in the config_base. */ |
869 | * turn off sampling just write 'config', and to enable | ||
870 | * things write 'config | config_base'. | ||
871 | */ | ||
872 | hwc->config_base = sparc_pmu->irq_bit; | 1087 | hwc->config_base = sparc_pmu->irq_bit; |
873 | if (!attr->exclude_user) | 1088 | if (!attr->exclude_user) |
874 | hwc->config_base |= PCR_UTRACE; | 1089 | hwc->config_base |= PCR_UTRACE; |
@@ -879,13 +1094,11 @@ static int __hw_perf_event_init(struct perf_event *event) | |||
879 | 1094 | ||
880 | hwc->event_base = perf_event_encode(pmap); | 1095 | hwc->event_base = perf_event_encode(pmap); |
881 | 1096 | ||
882 | enc = pmap->encoding; | ||
883 | |||
884 | n = 0; | 1097 | n = 0; |
885 | if (event->group_leader != event) { | 1098 | if (event->group_leader != event) { |
886 | n = collect_events(event->group_leader, | 1099 | n = collect_events(event->group_leader, |
887 | perf_max_events - 1, | 1100 | perf_max_events - 1, |
888 | evts, events); | 1101 | evts, events, current_idx_dmy); |
889 | if (n < 0) | 1102 | if (n < 0) |
890 | return -EINVAL; | 1103 | return -EINVAL; |
891 | } | 1104 | } |
@@ -895,9 +1108,11 @@ static int __hw_perf_event_init(struct perf_event *event) | |||
895 | if (check_excludes(evts, n, 1)) | 1108 | if (check_excludes(evts, n, 1)) |
896 | return -EINVAL; | 1109 | return -EINVAL; |
897 | 1110 | ||
898 | if (sparc_check_constraints(events, n + 1)) | 1111 | if (sparc_check_constraints(evts, events, n + 1)) |
899 | return -EINVAL; | 1112 | return -EINVAL; |
900 | 1113 | ||
1114 | hwc->idx = PIC_NO_INDEX; | ||
1115 | |||
901 | /* Try to do all error checking before this point, as unwinding | 1116 | /* Try to do all error checking before this point, as unwinding |
902 | * state after grabbing the PMC is difficult. | 1117 | * state after grabbing the PMC is difficult. |
903 | */ | 1118 | */ |
@@ -910,15 +1125,6 @@ static int __hw_perf_event_init(struct perf_event *event) | |||
910 | atomic64_set(&hwc->period_left, hwc->sample_period); | 1125 | atomic64_set(&hwc->period_left, hwc->sample_period); |
911 | } | 1126 | } |
912 | 1127 | ||
913 | if (pmap->pic_mask & PIC_UPPER) { | ||
914 | hwc->idx = PIC_UPPER_INDEX; | ||
915 | enc <<= sparc_pmu->upper_shift; | ||
916 | } else { | ||
917 | hwc->idx = PIC_LOWER_INDEX; | ||
918 | enc <<= sparc_pmu->lower_shift; | ||
919 | } | ||
920 | |||
921 | hwc->config |= enc; | ||
922 | return 0; | 1128 | return 0; |
923 | } | 1129 | } |
924 | 1130 | ||
@@ -968,7 +1174,7 @@ static int __kprobes perf_event_nmi_handler(struct notifier_block *self, | |||
968 | struct perf_sample_data data; | 1174 | struct perf_sample_data data; |
969 | struct cpu_hw_events *cpuc; | 1175 | struct cpu_hw_events *cpuc; |
970 | struct pt_regs *regs; | 1176 | struct pt_regs *regs; |
971 | int idx; | 1177 | int i; |
972 | 1178 | ||
973 | if (!atomic_read(&active_events)) | 1179 | if (!atomic_read(&active_events)) |
974 | return NOTIFY_DONE; | 1180 | return NOTIFY_DONE; |
@@ -997,13 +1203,12 @@ static int __kprobes perf_event_nmi_handler(struct notifier_block *self, | |||
997 | if (sparc_pmu->irq_bit) | 1203 | if (sparc_pmu->irq_bit) |
998 | pcr_ops->write(cpuc->pcr); | 1204 | pcr_ops->write(cpuc->pcr); |
999 | 1205 | ||
1000 | for (idx = 0; idx < MAX_HWEVENTS; idx++) { | 1206 | for (i = 0; i < cpuc->n_events; i++) { |
1001 | struct perf_event *event = cpuc->events[idx]; | 1207 | struct perf_event *event = cpuc->event[i]; |
1208 | int idx = cpuc->current_idx[i]; | ||
1002 | struct hw_perf_event *hwc; | 1209 | struct hw_perf_event *hwc; |
1003 | u64 val; | 1210 | u64 val; |
1004 | 1211 | ||
1005 | if (!test_bit(idx, cpuc->active_mask)) | ||
1006 | continue; | ||
1007 | hwc = &event->hw; | 1212 | hwc = &event->hw; |
1008 | val = sparc_perf_event_update(event, hwc, idx); | 1213 | val = sparc_perf_event_update(event, hwc, idx); |
1009 | if (val & (1ULL << 31)) | 1214 | if (val & (1ULL << 31)) |
@@ -1055,10 +1260,122 @@ void __init init_hw_perf_events(void) | |||
1055 | 1260 | ||
1056 | pr_cont("Supported PMU type is '%s'\n", sparc_pmu_type); | 1261 | pr_cont("Supported PMU type is '%s'\n", sparc_pmu_type); |
1057 | 1262 | ||
1058 | /* All sparc64 PMUs currently have 2 events. But this simple | 1263 | /* All sparc64 PMUs currently have 2 events. */ |
1059 | * driver only supports one active event at a time. | 1264 | perf_max_events = 2; |
1060 | */ | ||
1061 | perf_max_events = 1; | ||
1062 | 1265 | ||
1063 | register_die_notifier(&perf_event_nmi_notifier); | 1266 | register_die_notifier(&perf_event_nmi_notifier); |
1064 | } | 1267 | } |
1268 | |||
1269 | static inline void callchain_store(struct perf_callchain_entry *entry, u64 ip) | ||
1270 | { | ||
1271 | if (entry->nr < PERF_MAX_STACK_DEPTH) | ||
1272 | entry->ip[entry->nr++] = ip; | ||
1273 | } | ||
1274 | |||
1275 | static void perf_callchain_kernel(struct pt_regs *regs, | ||
1276 | struct perf_callchain_entry *entry) | ||
1277 | { | ||
1278 | unsigned long ksp, fp; | ||
1279 | |||
1280 | callchain_store(entry, PERF_CONTEXT_KERNEL); | ||
1281 | callchain_store(entry, regs->tpc); | ||
1282 | |||
1283 | ksp = regs->u_regs[UREG_I6]; | ||
1284 | fp = ksp + STACK_BIAS; | ||
1285 | do { | ||
1286 | struct sparc_stackf *sf; | ||
1287 | struct pt_regs *regs; | ||
1288 | unsigned long pc; | ||
1289 | |||
1290 | if (!kstack_valid(current_thread_info(), fp)) | ||
1291 | break; | ||
1292 | |||
1293 | sf = (struct sparc_stackf *) fp; | ||
1294 | regs = (struct pt_regs *) (sf + 1); | ||
1295 | |||
1296 | if (kstack_is_trap_frame(current_thread_info(), regs)) { | ||
1297 | if (user_mode(regs)) | ||
1298 | break; | ||
1299 | pc = regs->tpc; | ||
1300 | fp = regs->u_regs[UREG_I6] + STACK_BIAS; | ||
1301 | } else { | ||
1302 | pc = sf->callers_pc; | ||
1303 | fp = (unsigned long)sf->fp + STACK_BIAS; | ||
1304 | } | ||
1305 | callchain_store(entry, pc); | ||
1306 | } while (entry->nr < PERF_MAX_STACK_DEPTH); | ||
1307 | } | ||
1308 | |||
1309 | static void perf_callchain_user_64(struct pt_regs *regs, | ||
1310 | struct perf_callchain_entry *entry) | ||
1311 | { | ||
1312 | unsigned long ufp; | ||
1313 | |||
1314 | callchain_store(entry, PERF_CONTEXT_USER); | ||
1315 | callchain_store(entry, regs->tpc); | ||
1316 | |||
1317 | ufp = regs->u_regs[UREG_I6] + STACK_BIAS; | ||
1318 | do { | ||
1319 | struct sparc_stackf *usf, sf; | ||
1320 | unsigned long pc; | ||
1321 | |||
1322 | usf = (struct sparc_stackf *) ufp; | ||
1323 | if (__copy_from_user_inatomic(&sf, usf, sizeof(sf))) | ||
1324 | break; | ||
1325 | |||
1326 | pc = sf.callers_pc; | ||
1327 | ufp = (unsigned long)sf.fp + STACK_BIAS; | ||
1328 | callchain_store(entry, pc); | ||
1329 | } while (entry->nr < PERF_MAX_STACK_DEPTH); | ||
1330 | } | ||
1331 | |||
1332 | static void perf_callchain_user_32(struct pt_regs *regs, | ||
1333 | struct perf_callchain_entry *entry) | ||
1334 | { | ||
1335 | unsigned long ufp; | ||
1336 | |||
1337 | callchain_store(entry, PERF_CONTEXT_USER); | ||
1338 | callchain_store(entry, regs->tpc); | ||
1339 | |||
1340 | ufp = regs->u_regs[UREG_I6]; | ||
1341 | do { | ||
1342 | struct sparc_stackf32 *usf, sf; | ||
1343 | unsigned long pc; | ||
1344 | |||
1345 | usf = (struct sparc_stackf32 *) ufp; | ||
1346 | if (__copy_from_user_inatomic(&sf, usf, sizeof(sf))) | ||
1347 | break; | ||
1348 | |||
1349 | pc = sf.callers_pc; | ||
1350 | ufp = (unsigned long)sf.fp; | ||
1351 | callchain_store(entry, pc); | ||
1352 | } while (entry->nr < PERF_MAX_STACK_DEPTH); | ||
1353 | } | ||
1354 | |||
1355 | /* Like powerpc we can't get PMU interrupts within the PMU handler, | ||
1356 | * so no need for seperate NMI and IRQ chains as on x86. | ||
1357 | */ | ||
1358 | static DEFINE_PER_CPU(struct perf_callchain_entry, callchain); | ||
1359 | |||
1360 | struct perf_callchain_entry *perf_callchain(struct pt_regs *regs) | ||
1361 | { | ||
1362 | struct perf_callchain_entry *entry = &__get_cpu_var(callchain); | ||
1363 | |||
1364 | entry->nr = 0; | ||
1365 | if (!user_mode(regs)) { | ||
1366 | stack_trace_flush(); | ||
1367 | perf_callchain_kernel(regs, entry); | ||
1368 | if (current->mm) | ||
1369 | regs = task_pt_regs(current); | ||
1370 | else | ||
1371 | regs = NULL; | ||
1372 | } | ||
1373 | if (regs) { | ||
1374 | flushw_user(); | ||
1375 | if (test_thread_flag(TIF_32BIT)) | ||
1376 | perf_callchain_user_32(regs, entry); | ||
1377 | else | ||
1378 | perf_callchain_user_64(regs, entry); | ||
1379 | } | ||
1380 | return entry; | ||
1381 | } | ||
diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c index cfa0e19abe3b..d77f54316948 100644 --- a/arch/sparc/kernel/sys_sparc_64.c +++ b/arch/sparc/kernel/sys_sparc_64.c | |||
@@ -365,6 +365,7 @@ EXPORT_SYMBOL(get_fb_unmapped_area); | |||
365 | void arch_pick_mmap_layout(struct mm_struct *mm) | 365 | void arch_pick_mmap_layout(struct mm_struct *mm) |
366 | { | 366 | { |
367 | unsigned long random_factor = 0UL; | 367 | unsigned long random_factor = 0UL; |
368 | unsigned long gap; | ||
368 | 369 | ||
369 | if (current->flags & PF_RANDOMIZE) { | 370 | if (current->flags & PF_RANDOMIZE) { |
370 | random_factor = get_random_int(); | 371 | random_factor = get_random_int(); |
@@ -379,9 +380,10 @@ void arch_pick_mmap_layout(struct mm_struct *mm) | |||
379 | * Fall back to the standard layout if the personality | 380 | * Fall back to the standard layout if the personality |
380 | * bit is set, or if the expected stack growth is unlimited: | 381 | * bit is set, or if the expected stack growth is unlimited: |
381 | */ | 382 | */ |
383 | gap = rlimit(RLIMIT_STACK); | ||
382 | if (!test_thread_flag(TIF_32BIT) || | 384 | if (!test_thread_flag(TIF_32BIT) || |
383 | (current->personality & ADDR_COMPAT_LAYOUT) || | 385 | (current->personality & ADDR_COMPAT_LAYOUT) || |
384 | current->signal->rlim[RLIMIT_STACK].rlim_cur == RLIM_INFINITY || | 386 | gap == RLIM_INFINITY || |
385 | sysctl_legacy_va_layout) { | 387 | sysctl_legacy_va_layout) { |
386 | mm->mmap_base = TASK_UNMAPPED_BASE + random_factor; | 388 | mm->mmap_base = TASK_UNMAPPED_BASE + random_factor; |
387 | mm->get_unmapped_area = arch_get_unmapped_area; | 389 | mm->get_unmapped_area = arch_get_unmapped_area; |
@@ -389,9 +391,7 @@ void arch_pick_mmap_layout(struct mm_struct *mm) | |||
389 | } else { | 391 | } else { |
390 | /* We know it's 32-bit */ | 392 | /* We know it's 32-bit */ |
391 | unsigned long task_size = STACK_TOP32; | 393 | unsigned long task_size = STACK_TOP32; |
392 | unsigned long gap; | ||
393 | 394 | ||
394 | gap = current->signal->rlim[RLIMIT_STACK].rlim_cur; | ||
395 | if (gap < 128 * 1024 * 1024) | 395 | if (gap < 128 * 1024 * 1024) |
396 | gap = 128 * 1024 * 1024; | 396 | gap = 128 * 1024 * 1024; |
397 | if (gap > (task_size / 6 * 5)) | 397 | if (gap > (task_size / 6 * 5)) |
diff --git a/arch/sparc/kernel/time_32.c b/arch/sparc/kernel/time_32.c index 5b2f595fe65b..0d4c09b15efc 100644 --- a/arch/sparc/kernel/time_32.c +++ b/arch/sparc/kernel/time_32.c | |||
@@ -35,6 +35,7 @@ | |||
35 | #include <linux/platform_device.h> | 35 | #include <linux/platform_device.h> |
36 | 36 | ||
37 | #include <asm/oplib.h> | 37 | #include <asm/oplib.h> |
38 | #include <asm/timex.h> | ||
38 | #include <asm/timer.h> | 39 | #include <asm/timer.h> |
39 | #include <asm/system.h> | 40 | #include <asm/system.h> |
40 | #include <asm/irq.h> | 41 | #include <asm/irq.h> |
@@ -51,7 +52,6 @@ DEFINE_SPINLOCK(rtc_lock); | |||
51 | EXPORT_SYMBOL(rtc_lock); | 52 | EXPORT_SYMBOL(rtc_lock); |
52 | 53 | ||
53 | static int set_rtc_mmss(unsigned long); | 54 | static int set_rtc_mmss(unsigned long); |
54 | static int sbus_do_settimeofday(struct timespec *tv); | ||
55 | 55 | ||
56 | unsigned long profile_pc(struct pt_regs *regs) | 56 | unsigned long profile_pc(struct pt_regs *regs) |
57 | { | 57 | { |
@@ -76,6 +76,8 @@ EXPORT_SYMBOL(profile_pc); | |||
76 | 76 | ||
77 | __volatile__ unsigned int *master_l10_counter; | 77 | __volatile__ unsigned int *master_l10_counter; |
78 | 78 | ||
79 | u32 (*do_arch_gettimeoffset)(void); | ||
80 | |||
79 | /* | 81 | /* |
80 | * timer_interrupt() needs to keep up the real-time clock, | 82 | * timer_interrupt() needs to keep up the real-time clock, |
81 | * as well as call the "do_timer()" routine every clocktick | 83 | * as well as call the "do_timer()" routine every clocktick |
@@ -196,35 +198,14 @@ static int __init clock_init(void) | |||
196 | { | 198 | { |
197 | return of_register_driver(&clock_driver, &of_platform_bus_type); | 199 | return of_register_driver(&clock_driver, &of_platform_bus_type); |
198 | } | 200 | } |
199 | |||
200 | /* Must be after subsys_initcall() so that busses are probed. Must | 201 | /* Must be after subsys_initcall() so that busses are probed. Must |
201 | * be before device_initcall() because things like the RTC driver | 202 | * be before device_initcall() because things like the RTC driver |
202 | * need to see the clock registers. | 203 | * need to see the clock registers. |
203 | */ | 204 | */ |
204 | fs_initcall(clock_init); | 205 | fs_initcall(clock_init); |
205 | 206 | ||
206 | static void __init sbus_time_init(void) | ||
207 | { | ||
208 | |||
209 | BTFIXUPSET_CALL(bus_do_settimeofday, sbus_do_settimeofday, BTFIXUPCALL_NORM); | ||
210 | btfixup(); | ||
211 | |||
212 | sparc_init_timers(timer_interrupt); | ||
213 | } | ||
214 | |||
215 | void __init time_init(void) | ||
216 | { | ||
217 | #ifdef CONFIG_PCI | ||
218 | extern void pci_time_init(void); | ||
219 | if (pcic_present()) { | ||
220 | pci_time_init(); | ||
221 | return; | ||
222 | } | ||
223 | #endif | ||
224 | sbus_time_init(); | ||
225 | } | ||
226 | 207 | ||
227 | static inline unsigned long do_gettimeoffset(void) | 208 | u32 sbus_do_gettimeoffset(void) |
228 | { | 209 | { |
229 | unsigned long val = *master_l10_counter; | 210 | unsigned long val = *master_l10_counter; |
230 | unsigned long usec = (val >> 10) & 0x1fffff; | 211 | unsigned long usec = (val >> 10) & 0x1fffff; |
@@ -233,86 +214,39 @@ static inline unsigned long do_gettimeoffset(void) | |||
233 | if (val & 0x80000000) | 214 | if (val & 0x80000000) |
234 | usec += 1000000 / HZ; | 215 | usec += 1000000 / HZ; |
235 | 216 | ||
236 | return usec; | 217 | return usec * 1000; |
237 | } | 218 | } |
238 | 219 | ||
239 | /* Ok, my cute asm atomicity trick doesn't work anymore. | ||
240 | * There are just too many variables that need to be protected | ||
241 | * now (both members of xtime, et al.) | ||
242 | */ | ||
243 | void do_gettimeofday(struct timeval *tv) | ||
244 | { | ||
245 | unsigned long flags; | ||
246 | unsigned long seq; | ||
247 | unsigned long usec, sec; | ||
248 | unsigned long max_ntp_tick = tick_usec - tickadj; | ||
249 | |||
250 | do { | ||
251 | seq = read_seqbegin_irqsave(&xtime_lock, flags); | ||
252 | usec = do_gettimeoffset(); | ||
253 | |||
254 | /* | ||
255 | * If time_adjust is negative then NTP is slowing the clock | ||
256 | * so make sure not to go into next possible interval. | ||
257 | * Better to lose some accuracy than have time go backwards.. | ||
258 | */ | ||
259 | if (unlikely(time_adjust < 0)) | ||
260 | usec = min(usec, max_ntp_tick); | ||
261 | |||
262 | sec = xtime.tv_sec; | ||
263 | usec += (xtime.tv_nsec / 1000); | ||
264 | } while (read_seqretry_irqrestore(&xtime_lock, seq, flags)); | ||
265 | |||
266 | while (usec >= 1000000) { | ||
267 | usec -= 1000000; | ||
268 | sec++; | ||
269 | } | ||
270 | 220 | ||
271 | tv->tv_sec = sec; | 221 | u32 arch_gettimeoffset(void) |
272 | tv->tv_usec = usec; | ||
273 | } | ||
274 | |||
275 | EXPORT_SYMBOL(do_gettimeofday); | ||
276 | |||
277 | int do_settimeofday(struct timespec *tv) | ||
278 | { | 222 | { |
279 | int ret; | 223 | if (unlikely(!do_arch_gettimeoffset)) |
280 | 224 | return 0; | |
281 | write_seqlock_irq(&xtime_lock); | 225 | return do_arch_gettimeoffset(); |
282 | ret = bus_do_settimeofday(tv); | ||
283 | write_sequnlock_irq(&xtime_lock); | ||
284 | clock_was_set(); | ||
285 | return ret; | ||
286 | } | 226 | } |
287 | 227 | ||
288 | EXPORT_SYMBOL(do_settimeofday); | 228 | static void __init sbus_time_init(void) |
289 | |||
290 | static int sbus_do_settimeofday(struct timespec *tv) | ||
291 | { | 229 | { |
292 | time_t wtm_sec, sec = tv->tv_sec; | 230 | do_arch_gettimeoffset = sbus_do_gettimeoffset; |
293 | long wtm_nsec, nsec = tv->tv_nsec; | ||
294 | 231 | ||
295 | if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC) | 232 | btfixup(); |
296 | return -EINVAL; | ||
297 | |||
298 | /* | ||
299 | * This is revolting. We need to set "xtime" correctly. However, the | ||
300 | * value in this location is the value at the most recent update of | ||
301 | * wall time. Discover what correction gettimeofday() would have | ||
302 | * made, and then undo it! | ||
303 | */ | ||
304 | nsec -= 1000 * do_gettimeoffset(); | ||
305 | |||
306 | wtm_sec = wall_to_monotonic.tv_sec + (xtime.tv_sec - sec); | ||
307 | wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - nsec); | ||
308 | 233 | ||
309 | set_normalized_timespec(&xtime, sec, nsec); | 234 | sparc_init_timers(timer_interrupt); |
310 | set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec); | 235 | } |
311 | 236 | ||
312 | ntp_clear(); | 237 | void __init time_init(void) |
313 | return 0; | 238 | { |
239 | #ifdef CONFIG_PCI | ||
240 | extern void pci_time_init(void); | ||
241 | if (pcic_present()) { | ||
242 | pci_time_init(); | ||
243 | return; | ||
244 | } | ||
245 | #endif | ||
246 | sbus_time_init(); | ||
314 | } | 247 | } |
315 | 248 | ||
249 | |||
316 | static int set_rtc_mmss(unsigned long secs) | 250 | static int set_rtc_mmss(unsigned long secs) |
317 | { | 251 | { |
318 | struct rtc_device *rtc = rtc_class_open("rtc0"); | 252 | struct rtc_device *rtc = rtc_class_open("rtc0"); |
diff --git a/arch/sparc/mm/fault_32.c b/arch/sparc/mm/fault_32.c index b99f81c4906f..a3413acb8f12 100644 --- a/arch/sparc/mm/fault_32.c +++ b/arch/sparc/mm/fault_32.c | |||
@@ -18,6 +18,7 @@ | |||
18 | #include <linux/signal.h> | 18 | #include <linux/signal.h> |
19 | #include <linux/mm.h> | 19 | #include <linux/mm.h> |
20 | #include <linux/smp.h> | 20 | #include <linux/smp.h> |
21 | #include <linux/perf_event.h> | ||
21 | #include <linux/interrupt.h> | 22 | #include <linux/interrupt.h> |
22 | #include <linux/module.h> | 23 | #include <linux/module.h> |
23 | #include <linux/kdebug.h> | 24 | #include <linux/kdebug.h> |
@@ -203,6 +204,8 @@ asmlinkage void do_sparc_fault(struct pt_regs *regs, int text_fault, int write, | |||
203 | if (in_atomic() || !mm) | 204 | if (in_atomic() || !mm) |
204 | goto no_context; | 205 | goto no_context; |
205 | 206 | ||
207 | perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address); | ||
208 | |||
206 | down_read(&mm->mmap_sem); | 209 | down_read(&mm->mmap_sem); |
207 | 210 | ||
208 | /* | 211 | /* |
@@ -249,10 +252,15 @@ good_area: | |||
249 | goto do_sigbus; | 252 | goto do_sigbus; |
250 | BUG(); | 253 | BUG(); |
251 | } | 254 | } |
252 | if (fault & VM_FAULT_MAJOR) | 255 | if (fault & VM_FAULT_MAJOR) { |
253 | current->maj_flt++; | 256 | current->maj_flt++; |
254 | else | 257 | perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0, |
258 | regs, address); | ||
259 | } else { | ||
255 | current->min_flt++; | 260 | current->min_flt++; |
261 | perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0, | ||
262 | regs, address); | ||
263 | } | ||
256 | up_read(&mm->mmap_sem); | 264 | up_read(&mm->mmap_sem); |
257 | return; | 265 | return; |
258 | 266 | ||
diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c index 6081936bf03b..b9d4ff02b8fc 100644 --- a/arch/sparc/mm/fault_64.c +++ b/arch/sparc/mm/fault_64.c | |||
@@ -16,6 +16,7 @@ | |||
16 | #include <linux/mm.h> | 16 | #include <linux/mm.h> |
17 | #include <linux/module.h> | 17 | #include <linux/module.h> |
18 | #include <linux/init.h> | 18 | #include <linux/init.h> |
19 | #include <linux/perf_event.h> | ||
19 | #include <linux/interrupt.h> | 20 | #include <linux/interrupt.h> |
20 | #include <linux/kprobes.h> | 21 | #include <linux/kprobes.h> |
21 | #include <linux/kdebug.h> | 22 | #include <linux/kdebug.h> |
@@ -296,6 +297,8 @@ asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs) | |||
296 | if (in_atomic() || !mm) | 297 | if (in_atomic() || !mm) |
297 | goto intr_or_no_mm; | 298 | goto intr_or_no_mm; |
298 | 299 | ||
300 | perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address); | ||
301 | |||
299 | if (!down_read_trylock(&mm->mmap_sem)) { | 302 | if (!down_read_trylock(&mm->mmap_sem)) { |
300 | if ((regs->tstate & TSTATE_PRIV) && | 303 | if ((regs->tstate & TSTATE_PRIV) && |
301 | !search_exception_tables(regs->tpc)) { | 304 | !search_exception_tables(regs->tpc)) { |
@@ -400,11 +403,15 @@ good_area: | |||
400 | goto do_sigbus; | 403 | goto do_sigbus; |
401 | BUG(); | 404 | BUG(); |
402 | } | 405 | } |
403 | if (fault & VM_FAULT_MAJOR) | 406 | if (fault & VM_FAULT_MAJOR) { |
404 | current->maj_flt++; | 407 | current->maj_flt++; |
405 | else | 408 | perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0, |
409 | regs, address); | ||
410 | } else { | ||
406 | current->min_flt++; | 411 | current->min_flt++; |
407 | 412 | perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0, | |
413 | regs, address); | ||
414 | } | ||
408 | up_read(&mm->mmap_sem); | 415 | up_read(&mm->mmap_sem); |
409 | 416 | ||
410 | mm_rss = get_mm_rss(mm); | 417 | mm_rss = get_mm_rss(mm); |
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index cbcbfdee3ee0..eb4092568f9e 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig | |||
@@ -989,12 +989,6 @@ config X86_CPUID | |||
989 | with major 203 and minors 0 to 31 for /dev/cpu/0/cpuid to | 989 | with major 203 and minors 0 to 31 for /dev/cpu/0/cpuid to |
990 | /dev/cpu/31/cpuid. | 990 | /dev/cpu/31/cpuid. |
991 | 991 | ||
992 | config X86_CPU_DEBUG | ||
993 | tristate "/sys/kernel/debug/x86/cpu/* - CPU Debug support" | ||
994 | ---help--- | ||
995 | If you select this option, this will provide various x86 CPUs | ||
996 | information through debugfs. | ||
997 | |||
998 | choice | 992 | choice |
999 | prompt "High Memory Support" | 993 | prompt "High Memory Support" |
1000 | default HIGHMEM4G if !X86_NUMAQ | 994 | default HIGHMEM4G if !X86_NUMAQ |
diff --git a/arch/x86/include/asm/cpu_debug.h b/arch/x86/include/asm/cpu_debug.h deleted file mode 100644 index d96c1ee3a95c..000000000000 --- a/arch/x86/include/asm/cpu_debug.h +++ /dev/null | |||
@@ -1,127 +0,0 @@ | |||
1 | #ifndef _ASM_X86_CPU_DEBUG_H | ||
2 | #define _ASM_X86_CPU_DEBUG_H | ||
3 | |||
4 | /* | ||
5 | * CPU x86 architecture debug | ||
6 | * | ||
7 | * Copyright(C) 2009 Jaswinder Singh Rajput | ||
8 | */ | ||
9 | |||
10 | /* Register flags */ | ||
11 | enum cpu_debug_bit { | ||
12 | /* Model Specific Registers (MSRs) */ | ||
13 | CPU_MC_BIT, /* Machine Check */ | ||
14 | CPU_MONITOR_BIT, /* Monitor */ | ||
15 | CPU_TIME_BIT, /* Time */ | ||
16 | CPU_PMC_BIT, /* Performance Monitor */ | ||
17 | CPU_PLATFORM_BIT, /* Platform */ | ||
18 | CPU_APIC_BIT, /* APIC */ | ||
19 | CPU_POWERON_BIT, /* Power-on */ | ||
20 | CPU_CONTROL_BIT, /* Control */ | ||
21 | CPU_FEATURES_BIT, /* Features control */ | ||
22 | CPU_LBRANCH_BIT, /* Last Branch */ | ||
23 | CPU_BIOS_BIT, /* BIOS */ | ||
24 | CPU_FREQ_BIT, /* Frequency */ | ||
25 | CPU_MTTR_BIT, /* MTRR */ | ||
26 | CPU_PERF_BIT, /* Performance */ | ||
27 | CPU_CACHE_BIT, /* Cache */ | ||
28 | CPU_SYSENTER_BIT, /* Sysenter */ | ||
29 | CPU_THERM_BIT, /* Thermal */ | ||
30 | CPU_MISC_BIT, /* Miscellaneous */ | ||
31 | CPU_DEBUG_BIT, /* Debug */ | ||
32 | CPU_PAT_BIT, /* PAT */ | ||
33 | CPU_VMX_BIT, /* VMX */ | ||
34 | CPU_CALL_BIT, /* System Call */ | ||
35 | CPU_BASE_BIT, /* BASE Address */ | ||
36 | CPU_VER_BIT, /* Version ID */ | ||
37 | CPU_CONF_BIT, /* Configuration */ | ||
38 | CPU_SMM_BIT, /* System mgmt mode */ | ||
39 | CPU_SVM_BIT, /*Secure Virtual Machine*/ | ||
40 | CPU_OSVM_BIT, /* OS-Visible Workaround*/ | ||
41 | /* Standard Registers */ | ||
42 | CPU_TSS_BIT, /* Task Stack Segment */ | ||
43 | CPU_CR_BIT, /* Control Registers */ | ||
44 | CPU_DT_BIT, /* Descriptor Table */ | ||
45 | /* End of Registers flags */ | ||
46 | CPU_REG_ALL_BIT, /* Select all Registers */ | ||
47 | }; | ||
48 | |||
49 | #define CPU_REG_ALL (~0) /* Select all Registers */ | ||
50 | |||
51 | #define CPU_MC (1 << CPU_MC_BIT) | ||
52 | #define CPU_MONITOR (1 << CPU_MONITOR_BIT) | ||
53 | #define CPU_TIME (1 << CPU_TIME_BIT) | ||
54 | #define CPU_PMC (1 << CPU_PMC_BIT) | ||
55 | #define CPU_PLATFORM (1 << CPU_PLATFORM_BIT) | ||
56 | #define CPU_APIC (1 << CPU_APIC_BIT) | ||
57 | #define CPU_POWERON (1 << CPU_POWERON_BIT) | ||
58 | #define CPU_CONTROL (1 << CPU_CONTROL_BIT) | ||
59 | #define CPU_FEATURES (1 << CPU_FEATURES_BIT) | ||
60 | #define CPU_LBRANCH (1 << CPU_LBRANCH_BIT) | ||
61 | #define CPU_BIOS (1 << CPU_BIOS_BIT) | ||
62 | #define CPU_FREQ (1 << CPU_FREQ_BIT) | ||
63 | #define CPU_MTRR (1 << CPU_MTTR_BIT) | ||
64 | #define CPU_PERF (1 << CPU_PERF_BIT) | ||
65 | #define CPU_CACHE (1 << CPU_CACHE_BIT) | ||
66 | #define CPU_SYSENTER (1 << CPU_SYSENTER_BIT) | ||
67 | #define CPU_THERM (1 << CPU_THERM_BIT) | ||
68 | #define CPU_MISC (1 << CPU_MISC_BIT) | ||
69 | #define CPU_DEBUG (1 << CPU_DEBUG_BIT) | ||
70 | #define CPU_PAT (1 << CPU_PAT_BIT) | ||
71 | #define CPU_VMX (1 << CPU_VMX_BIT) | ||
72 | #define CPU_CALL (1 << CPU_CALL_BIT) | ||
73 | #define CPU_BASE (1 << CPU_BASE_BIT) | ||
74 | #define CPU_VER (1 << CPU_VER_BIT) | ||
75 | #define CPU_CONF (1 << CPU_CONF_BIT) | ||
76 | #define CPU_SMM (1 << CPU_SMM_BIT) | ||
77 | #define CPU_SVM (1 << CPU_SVM_BIT) | ||
78 | #define CPU_OSVM (1 << CPU_OSVM_BIT) | ||
79 | #define CPU_TSS (1 << CPU_TSS_BIT) | ||
80 | #define CPU_CR (1 << CPU_CR_BIT) | ||
81 | #define CPU_DT (1 << CPU_DT_BIT) | ||
82 | |||
83 | /* Register file flags */ | ||
84 | enum cpu_file_bit { | ||
85 | CPU_INDEX_BIT, /* index */ | ||
86 | CPU_VALUE_BIT, /* value */ | ||
87 | }; | ||
88 | |||
89 | #define CPU_FILE_VALUE (1 << CPU_VALUE_BIT) | ||
90 | |||
91 | #define MAX_CPU_FILES 512 | ||
92 | |||
93 | struct cpu_private { | ||
94 | unsigned cpu; | ||
95 | unsigned type; | ||
96 | unsigned reg; | ||
97 | unsigned file; | ||
98 | }; | ||
99 | |||
100 | struct cpu_debug_base { | ||
101 | char *name; /* Register name */ | ||
102 | unsigned flag; /* Register flag */ | ||
103 | unsigned write; /* Register write flag */ | ||
104 | }; | ||
105 | |||
106 | /* | ||
107 | * Currently it looks similar to cpu_debug_base but once we add more files | ||
108 | * cpu_file_base will go in different direction | ||
109 | */ | ||
110 | struct cpu_file_base { | ||
111 | char *name; /* Register file name */ | ||
112 | unsigned flag; /* Register file flag */ | ||
113 | unsigned write; /* Register write flag */ | ||
114 | }; | ||
115 | |||
116 | struct cpu_cpuX_base { | ||
117 | struct dentry *dentry; /* Register dentry */ | ||
118 | int init; /* Register index file */ | ||
119 | }; | ||
120 | |||
121 | struct cpu_debug_range { | ||
122 | unsigned min; /* Register range min */ | ||
123 | unsigned max; /* Register range max */ | ||
124 | unsigned flag; /* Supported flags */ | ||
125 | }; | ||
126 | |||
127 | #endif /* _ASM_X86_CPU_DEBUG_H */ | ||
diff --git a/arch/x86/include/asm/hpet.h b/arch/x86/include/asm/hpet.h index 5d89fd2a3690..1d5c08a1bdfd 100644 --- a/arch/x86/include/asm/hpet.h +++ b/arch/x86/include/asm/hpet.h | |||
@@ -67,6 +67,7 @@ extern unsigned long hpet_address; | |||
67 | extern unsigned long force_hpet_address; | 67 | extern unsigned long force_hpet_address; |
68 | extern u8 hpet_blockid; | 68 | extern u8 hpet_blockid; |
69 | extern int hpet_force_user; | 69 | extern int hpet_force_user; |
70 | extern u8 hpet_msi_disable; | ||
70 | extern int is_hpet_enabled(void); | 71 | extern int is_hpet_enabled(void); |
71 | extern int hpet_enable(void); | 72 | extern int hpet_enable(void); |
72 | extern void hpet_disable(void); | 73 | extern void hpet_disable(void); |
diff --git a/arch/x86/include/asm/microcode.h b/arch/x86/include/asm/microcode.h index c24ca9a56458..ef51b501e22a 100644 --- a/arch/x86/include/asm/microcode.h +++ b/arch/x86/include/asm/microcode.h | |||
@@ -12,8 +12,6 @@ struct device; | |||
12 | enum ucode_state { UCODE_ERROR, UCODE_OK, UCODE_NFOUND }; | 12 | enum ucode_state { UCODE_ERROR, UCODE_OK, UCODE_NFOUND }; |
13 | 13 | ||
14 | struct microcode_ops { | 14 | struct microcode_ops { |
15 | void (*init)(struct device *device); | ||
16 | void (*fini)(void); | ||
17 | enum ucode_state (*request_microcode_user) (int cpu, | 15 | enum ucode_state (*request_microcode_user) (int cpu, |
18 | const void __user *buf, size_t size); | 16 | const void __user *buf, size_t size); |
19 | 17 | ||
diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile index 1d2cb383410e..c202b62f3671 100644 --- a/arch/x86/kernel/cpu/Makefile +++ b/arch/x86/kernel/cpu/Makefile | |||
@@ -19,8 +19,6 @@ obj-y += vmware.o hypervisor.o sched.o | |||
19 | obj-$(CONFIG_X86_32) += bugs.o cmpxchg.o | 19 | obj-$(CONFIG_X86_32) += bugs.o cmpxchg.o |
20 | obj-$(CONFIG_X86_64) += bugs_64.o | 20 | obj-$(CONFIG_X86_64) += bugs_64.o |
21 | 21 | ||
22 | obj-$(CONFIG_X86_CPU_DEBUG) += cpu_debug.o | ||
23 | |||
24 | obj-$(CONFIG_CPU_SUP_INTEL) += intel.o | 22 | obj-$(CONFIG_CPU_SUP_INTEL) += intel.o |
25 | obj-$(CONFIG_CPU_SUP_AMD) += amd.o | 23 | obj-$(CONFIG_CPU_SUP_AMD) += amd.o |
26 | obj-$(CONFIG_CPU_SUP_CYRIX_32) += cyrix.o | 24 | obj-$(CONFIG_CPU_SUP_CYRIX_32) += cyrix.o |
diff --git a/arch/x86/kernel/cpu/cpu_debug.c b/arch/x86/kernel/cpu/cpu_debug.c deleted file mode 100644 index b368cd862997..000000000000 --- a/arch/x86/kernel/cpu/cpu_debug.c +++ /dev/null | |||
@@ -1,688 +0,0 @@ | |||
1 | /* | ||
2 | * CPU x86 architecture debug code | ||
3 | * | ||
4 | * Copyright(C) 2009 Jaswinder Singh Rajput | ||
5 | * | ||
6 | * For licencing details see kernel-base/COPYING | ||
7 | */ | ||
8 | |||
9 | #include <linux/interrupt.h> | ||
10 | #include <linux/compiler.h> | ||
11 | #include <linux/seq_file.h> | ||
12 | #include <linux/debugfs.h> | ||
13 | #include <linux/kprobes.h> | ||
14 | #include <linux/uaccess.h> | ||
15 | #include <linux/kernel.h> | ||
16 | #include <linux/module.h> | ||
17 | #include <linux/percpu.h> | ||
18 | #include <linux/signal.h> | ||
19 | #include <linux/errno.h> | ||
20 | #include <linux/sched.h> | ||
21 | #include <linux/types.h> | ||
22 | #include <linux/init.h> | ||
23 | #include <linux/slab.h> | ||
24 | #include <linux/smp.h> | ||
25 | |||
26 | #include <asm/cpu_debug.h> | ||
27 | #include <asm/paravirt.h> | ||
28 | #include <asm/system.h> | ||
29 | #include <asm/traps.h> | ||
30 | #include <asm/apic.h> | ||
31 | #include <asm/desc.h> | ||
32 | |||
33 | static DEFINE_PER_CPU(struct cpu_cpuX_base [CPU_REG_ALL_BIT], cpud_arr); | ||
34 | static DEFINE_PER_CPU(struct cpu_private * [MAX_CPU_FILES], cpud_priv_arr); | ||
35 | static DEFINE_PER_CPU(int, cpud_priv_count); | ||
36 | |||
37 | static DEFINE_MUTEX(cpu_debug_lock); | ||
38 | |||
39 | static struct dentry *cpu_debugfs_dir; | ||
40 | |||
41 | static struct cpu_debug_base cpu_base[] = { | ||
42 | { "mc", CPU_MC, 0 }, | ||
43 | { "monitor", CPU_MONITOR, 0 }, | ||
44 | { "time", CPU_TIME, 0 }, | ||
45 | { "pmc", CPU_PMC, 1 }, | ||
46 | { "platform", CPU_PLATFORM, 0 }, | ||
47 | { "apic", CPU_APIC, 0 }, | ||
48 | { "poweron", CPU_POWERON, 0 }, | ||
49 | { "control", CPU_CONTROL, 0 }, | ||
50 | { "features", CPU_FEATURES, 0 }, | ||
51 | { "lastbranch", CPU_LBRANCH, 0 }, | ||
52 | { "bios", CPU_BIOS, 0 }, | ||
53 | { "freq", CPU_FREQ, 0 }, | ||
54 | { "mtrr", CPU_MTRR, 0 }, | ||
55 | { "perf", CPU_PERF, 0 }, | ||
56 | { "cache", CPU_CACHE, 0 }, | ||
57 | { "sysenter", CPU_SYSENTER, 0 }, | ||
58 | { "therm", CPU_THERM, 0 }, | ||
59 | { "misc", CPU_MISC, 0 }, | ||
60 | { "debug", CPU_DEBUG, 0 }, | ||
61 | { "pat", CPU_PAT, 0 }, | ||
62 | { "vmx", CPU_VMX, 0 }, | ||
63 | { "call", CPU_CALL, 0 }, | ||
64 | { "base", CPU_BASE, 0 }, | ||
65 | { "ver", CPU_VER, 0 }, | ||
66 | { "conf", CPU_CONF, 0 }, | ||
67 | { "smm", CPU_SMM, 0 }, | ||
68 | { "svm", CPU_SVM, 0 }, | ||
69 | { "osvm", CPU_OSVM, 0 }, | ||
70 | { "tss", CPU_TSS, 0 }, | ||
71 | { "cr", CPU_CR, 0 }, | ||
72 | { "dt", CPU_DT, 0 }, | ||
73 | { "registers", CPU_REG_ALL, 0 }, | ||
74 | }; | ||
75 | |||
76 | static struct cpu_file_base cpu_file[] = { | ||
77 | { "index", CPU_REG_ALL, 0 }, | ||
78 | { "value", CPU_REG_ALL, 1 }, | ||
79 | }; | ||
80 | |||
81 | /* CPU Registers Range */ | ||
82 | static struct cpu_debug_range cpu_reg_range[] = { | ||
83 | { 0x00000000, 0x00000001, CPU_MC, }, | ||
84 | { 0x00000006, 0x00000007, CPU_MONITOR, }, | ||
85 | { 0x00000010, 0x00000010, CPU_TIME, }, | ||
86 | { 0x00000011, 0x00000013, CPU_PMC, }, | ||
87 | { 0x00000017, 0x00000017, CPU_PLATFORM, }, | ||
88 | { 0x0000001B, 0x0000001B, CPU_APIC, }, | ||
89 | { 0x0000002A, 0x0000002B, CPU_POWERON, }, | ||
90 | { 0x0000002C, 0x0000002C, CPU_FREQ, }, | ||
91 | { 0x0000003A, 0x0000003A, CPU_CONTROL, }, | ||
92 | { 0x00000040, 0x00000047, CPU_LBRANCH, }, | ||
93 | { 0x00000060, 0x00000067, CPU_LBRANCH, }, | ||
94 | { 0x00000079, 0x00000079, CPU_BIOS, }, | ||
95 | { 0x00000088, 0x0000008A, CPU_CACHE, }, | ||
96 | { 0x0000008B, 0x0000008B, CPU_BIOS, }, | ||
97 | { 0x0000009B, 0x0000009B, CPU_MONITOR, }, | ||
98 | { 0x000000C1, 0x000000C4, CPU_PMC, }, | ||
99 | { 0x000000CD, 0x000000CD, CPU_FREQ, }, | ||
100 | { 0x000000E7, 0x000000E8, CPU_PERF, }, | ||
101 | { 0x000000FE, 0x000000FE, CPU_MTRR, }, | ||
102 | |||
103 | { 0x00000116, 0x0000011E, CPU_CACHE, }, | ||
104 | { 0x00000174, 0x00000176, CPU_SYSENTER, }, | ||
105 | { 0x00000179, 0x0000017B, CPU_MC, }, | ||
106 | { 0x00000186, 0x00000189, CPU_PMC, }, | ||
107 | { 0x00000198, 0x00000199, CPU_PERF, }, | ||
108 | { 0x0000019A, 0x0000019A, CPU_TIME, }, | ||
109 | { 0x0000019B, 0x0000019D, CPU_THERM, }, | ||
110 | { 0x000001A0, 0x000001A0, CPU_MISC, }, | ||
111 | { 0x000001C9, 0x000001C9, CPU_LBRANCH, }, | ||
112 | { 0x000001D7, 0x000001D8, CPU_LBRANCH, }, | ||
113 | { 0x000001D9, 0x000001D9, CPU_DEBUG, }, | ||
114 | { 0x000001DA, 0x000001E0, CPU_LBRANCH, }, | ||
115 | |||
116 | { 0x00000200, 0x0000020F, CPU_MTRR, }, | ||
117 | { 0x00000250, 0x00000250, CPU_MTRR, }, | ||
118 | { 0x00000258, 0x00000259, CPU_MTRR, }, | ||
119 | { 0x00000268, 0x0000026F, CPU_MTRR, }, | ||
120 | { 0x00000277, 0x00000277, CPU_PAT, }, | ||
121 | { 0x000002FF, 0x000002FF, CPU_MTRR, }, | ||
122 | |||
123 | { 0x00000300, 0x00000311, CPU_PMC, }, | ||
124 | { 0x00000345, 0x00000345, CPU_PMC, }, | ||
125 | { 0x00000360, 0x00000371, CPU_PMC, }, | ||
126 | { 0x0000038D, 0x00000390, CPU_PMC, }, | ||
127 | { 0x000003A0, 0x000003BE, CPU_PMC, }, | ||
128 | { 0x000003C0, 0x000003CD, CPU_PMC, }, | ||
129 | { 0x000003E0, 0x000003E1, CPU_PMC, }, | ||
130 | { 0x000003F0, 0x000003F2, CPU_PMC, }, | ||
131 | |||
132 | { 0x00000400, 0x00000417, CPU_MC, }, | ||
133 | { 0x00000480, 0x0000048B, CPU_VMX, }, | ||
134 | |||
135 | { 0x00000600, 0x00000600, CPU_DEBUG, }, | ||
136 | { 0x00000680, 0x0000068F, CPU_LBRANCH, }, | ||
137 | { 0x000006C0, 0x000006CF, CPU_LBRANCH, }, | ||
138 | |||
139 | { 0x000107CC, 0x000107D3, CPU_PMC, }, | ||
140 | |||
141 | { 0xC0000080, 0xC0000080, CPU_FEATURES, }, | ||
142 | { 0xC0000081, 0xC0000084, CPU_CALL, }, | ||
143 | { 0xC0000100, 0xC0000102, CPU_BASE, }, | ||
144 | { 0xC0000103, 0xC0000103, CPU_TIME, }, | ||
145 | |||
146 | { 0xC0010000, 0xC0010007, CPU_PMC, }, | ||
147 | { 0xC0010010, 0xC0010010, CPU_CONF, }, | ||
148 | { 0xC0010015, 0xC0010015, CPU_CONF, }, | ||
149 | { 0xC0010016, 0xC001001A, CPU_MTRR, }, | ||
150 | { 0xC001001D, 0xC001001D, CPU_MTRR, }, | ||
151 | { 0xC001001F, 0xC001001F, CPU_CONF, }, | ||
152 | { 0xC0010030, 0xC0010035, CPU_BIOS, }, | ||
153 | { 0xC0010044, 0xC0010048, CPU_MC, }, | ||
154 | { 0xC0010050, 0xC0010056, CPU_SMM, }, | ||
155 | { 0xC0010058, 0xC0010058, CPU_CONF, }, | ||
156 | { 0xC0010060, 0xC0010060, CPU_CACHE, }, | ||
157 | { 0xC0010061, 0xC0010068, CPU_SMM, }, | ||
158 | { 0xC0010069, 0xC001006B, CPU_SMM, }, | ||
159 | { 0xC0010070, 0xC0010071, CPU_SMM, }, | ||
160 | { 0xC0010111, 0xC0010113, CPU_SMM, }, | ||
161 | { 0xC0010114, 0xC0010118, CPU_SVM, }, | ||
162 | { 0xC0010140, 0xC0010141, CPU_OSVM, }, | ||
163 | { 0xC0011022, 0xC0011023, CPU_CONF, }, | ||
164 | }; | ||
165 | |||
166 | static int is_typeflag_valid(unsigned cpu, unsigned flag) | ||
167 | { | ||
168 | int i; | ||
169 | |||
170 | /* Standard Registers should be always valid */ | ||
171 | if (flag >= CPU_TSS) | ||
172 | return 1; | ||
173 | |||
174 | for (i = 0; i < ARRAY_SIZE(cpu_reg_range); i++) { | ||
175 | if (cpu_reg_range[i].flag == flag) | ||
176 | return 1; | ||
177 | } | ||
178 | |||
179 | /* Invalid */ | ||
180 | return 0; | ||
181 | } | ||
182 | |||
183 | static unsigned get_cpu_range(unsigned cpu, unsigned *min, unsigned *max, | ||
184 | int index, unsigned flag) | ||
185 | { | ||
186 | if (cpu_reg_range[index].flag == flag) { | ||
187 | *min = cpu_reg_range[index].min; | ||
188 | *max = cpu_reg_range[index].max; | ||
189 | } else | ||
190 | *max = 0; | ||
191 | |||
192 | return *max; | ||
193 | } | ||
194 | |||
195 | /* This function can also be called with seq = NULL for printk */ | ||
196 | static void print_cpu_data(struct seq_file *seq, unsigned type, | ||
197 | u32 low, u32 high) | ||
198 | { | ||
199 | struct cpu_private *priv; | ||
200 | u64 val = high; | ||
201 | |||
202 | if (seq) { | ||
203 | priv = seq->private; | ||
204 | if (priv->file) { | ||
205 | val = (val << 32) | low; | ||
206 | seq_printf(seq, "0x%llx\n", val); | ||
207 | } else | ||
208 | seq_printf(seq, " %08x: %08x_%08x\n", | ||
209 | type, high, low); | ||
210 | } else | ||
211 | printk(KERN_INFO " %08x: %08x_%08x\n", type, high, low); | ||
212 | } | ||
213 | |||
214 | /* This function can also be called with seq = NULL for printk */ | ||
215 | static void print_msr(struct seq_file *seq, unsigned cpu, unsigned flag) | ||
216 | { | ||
217 | unsigned msr, msr_min, msr_max; | ||
218 | struct cpu_private *priv; | ||
219 | u32 low, high; | ||
220 | int i; | ||
221 | |||
222 | if (seq) { | ||
223 | priv = seq->private; | ||
224 | if (priv->file) { | ||
225 | if (!rdmsr_safe_on_cpu(priv->cpu, priv->reg, | ||
226 | &low, &high)) | ||
227 | print_cpu_data(seq, priv->reg, low, high); | ||
228 | return; | ||
229 | } | ||
230 | } | ||
231 | |||
232 | for (i = 0; i < ARRAY_SIZE(cpu_reg_range); i++) { | ||
233 | if (!get_cpu_range(cpu, &msr_min, &msr_max, i, flag)) | ||
234 | continue; | ||
235 | |||
236 | for (msr = msr_min; msr <= msr_max; msr++) { | ||
237 | if (rdmsr_safe_on_cpu(cpu, msr, &low, &high)) | ||
238 | continue; | ||
239 | print_cpu_data(seq, msr, low, high); | ||
240 | } | ||
241 | } | ||
242 | } | ||
243 | |||
244 | static void print_tss(void *arg) | ||
245 | { | ||
246 | struct pt_regs *regs = task_pt_regs(current); | ||
247 | struct seq_file *seq = arg; | ||
248 | unsigned int seg; | ||
249 | |||
250 | seq_printf(seq, " RAX\t: %016lx\n", regs->ax); | ||
251 | seq_printf(seq, " RBX\t: %016lx\n", regs->bx); | ||
252 | seq_printf(seq, " RCX\t: %016lx\n", regs->cx); | ||
253 | seq_printf(seq, " RDX\t: %016lx\n", regs->dx); | ||
254 | |||
255 | seq_printf(seq, " RSI\t: %016lx\n", regs->si); | ||
256 | seq_printf(seq, " RDI\t: %016lx\n", regs->di); | ||
257 | seq_printf(seq, " RBP\t: %016lx\n", regs->bp); | ||
258 | seq_printf(seq, " ESP\t: %016lx\n", regs->sp); | ||
259 | |||
260 | #ifdef CONFIG_X86_64 | ||
261 | seq_printf(seq, " R08\t: %016lx\n", regs->r8); | ||
262 | seq_printf(seq, " R09\t: %016lx\n", regs->r9); | ||
263 | seq_printf(seq, " R10\t: %016lx\n", regs->r10); | ||
264 | seq_printf(seq, " R11\t: %016lx\n", regs->r11); | ||
265 | seq_printf(seq, " R12\t: %016lx\n", regs->r12); | ||
266 | seq_printf(seq, " R13\t: %016lx\n", regs->r13); | ||
267 | seq_printf(seq, " R14\t: %016lx\n", regs->r14); | ||
268 | seq_printf(seq, " R15\t: %016lx\n", regs->r15); | ||
269 | #endif | ||
270 | |||
271 | asm("movl %%cs,%0" : "=r" (seg)); | ||
272 | seq_printf(seq, " CS\t: %04x\n", seg); | ||
273 | asm("movl %%ds,%0" : "=r" (seg)); | ||
274 | seq_printf(seq, " DS\t: %04x\n", seg); | ||
275 | seq_printf(seq, " SS\t: %04lx\n", regs->ss & 0xffff); | ||
276 | asm("movl %%es,%0" : "=r" (seg)); | ||
277 | seq_printf(seq, " ES\t: %04x\n", seg); | ||
278 | asm("movl %%fs,%0" : "=r" (seg)); | ||
279 | seq_printf(seq, " FS\t: %04x\n", seg); | ||
280 | asm("movl %%gs,%0" : "=r" (seg)); | ||
281 | seq_printf(seq, " GS\t: %04x\n", seg); | ||
282 | |||
283 | seq_printf(seq, " EFLAGS\t: %016lx\n", regs->flags); | ||
284 | |||
285 | seq_printf(seq, " EIP\t: %016lx\n", regs->ip); | ||
286 | } | ||
287 | |||
288 | static void print_cr(void *arg) | ||
289 | { | ||
290 | struct seq_file *seq = arg; | ||
291 | |||
292 | seq_printf(seq, " cr0\t: %016lx\n", read_cr0()); | ||
293 | seq_printf(seq, " cr2\t: %016lx\n", read_cr2()); | ||
294 | seq_printf(seq, " cr3\t: %016lx\n", read_cr3()); | ||
295 | seq_printf(seq, " cr4\t: %016lx\n", read_cr4_safe()); | ||
296 | #ifdef CONFIG_X86_64 | ||
297 | seq_printf(seq, " cr8\t: %016lx\n", read_cr8()); | ||
298 | #endif | ||
299 | } | ||
300 | |||
301 | static void print_desc_ptr(char *str, struct seq_file *seq, struct desc_ptr dt) | ||
302 | { | ||
303 | seq_printf(seq, " %s\t: %016llx\n", str, (u64)(dt.address | dt.size)); | ||
304 | } | ||
305 | |||
306 | static void print_dt(void *seq) | ||
307 | { | ||
308 | struct desc_ptr dt; | ||
309 | unsigned long ldt; | ||
310 | |||
311 | /* IDT */ | ||
312 | store_idt((struct desc_ptr *)&dt); | ||
313 | print_desc_ptr("IDT", seq, dt); | ||
314 | |||
315 | /* GDT */ | ||
316 | store_gdt((struct desc_ptr *)&dt); | ||
317 | print_desc_ptr("GDT", seq, dt); | ||
318 | |||
319 | /* LDT */ | ||
320 | store_ldt(ldt); | ||
321 | seq_printf(seq, " LDT\t: %016lx\n", ldt); | ||
322 | |||
323 | /* TR */ | ||
324 | store_tr(ldt); | ||
325 | seq_printf(seq, " TR\t: %016lx\n", ldt); | ||
326 | } | ||
327 | |||
328 | static void print_dr(void *arg) | ||
329 | { | ||
330 | struct seq_file *seq = arg; | ||
331 | unsigned long dr; | ||
332 | int i; | ||
333 | |||
334 | for (i = 0; i < 8; i++) { | ||
335 | /* Ignore db4, db5 */ | ||
336 | if ((i == 4) || (i == 5)) | ||
337 | continue; | ||
338 | get_debugreg(dr, i); | ||
339 | seq_printf(seq, " dr%d\t: %016lx\n", i, dr); | ||
340 | } | ||
341 | |||
342 | seq_printf(seq, "\n MSR\t:\n"); | ||
343 | } | ||
344 | |||
345 | static void print_apic(void *arg) | ||
346 | { | ||
347 | struct seq_file *seq = arg; | ||
348 | |||
349 | #ifdef CONFIG_X86_LOCAL_APIC | ||
350 | seq_printf(seq, " LAPIC\t:\n"); | ||
351 | seq_printf(seq, " ID\t\t: %08x\n", apic_read(APIC_ID) >> 24); | ||
352 | seq_printf(seq, " LVR\t\t: %08x\n", apic_read(APIC_LVR)); | ||
353 | seq_printf(seq, " TASKPRI\t: %08x\n", apic_read(APIC_TASKPRI)); | ||
354 | seq_printf(seq, " ARBPRI\t\t: %08x\n", apic_read(APIC_ARBPRI)); | ||
355 | seq_printf(seq, " PROCPRI\t: %08x\n", apic_read(APIC_PROCPRI)); | ||
356 | seq_printf(seq, " LDR\t\t: %08x\n", apic_read(APIC_LDR)); | ||
357 | seq_printf(seq, " DFR\t\t: %08x\n", apic_read(APIC_DFR)); | ||
358 | seq_printf(seq, " SPIV\t\t: %08x\n", apic_read(APIC_SPIV)); | ||
359 | seq_printf(seq, " ISR\t\t: %08x\n", apic_read(APIC_ISR)); | ||
360 | seq_printf(seq, " ESR\t\t: %08x\n", apic_read(APIC_ESR)); | ||
361 | seq_printf(seq, " ICR\t\t: %08x\n", apic_read(APIC_ICR)); | ||
362 | seq_printf(seq, " ICR2\t\t: %08x\n", apic_read(APIC_ICR2)); | ||
363 | seq_printf(seq, " LVTT\t\t: %08x\n", apic_read(APIC_LVTT)); | ||
364 | seq_printf(seq, " LVTTHMR\t: %08x\n", apic_read(APIC_LVTTHMR)); | ||
365 | seq_printf(seq, " LVTPC\t\t: %08x\n", apic_read(APIC_LVTPC)); | ||
366 | seq_printf(seq, " LVT0\t\t: %08x\n", apic_read(APIC_LVT0)); | ||
367 | seq_printf(seq, " LVT1\t\t: %08x\n", apic_read(APIC_LVT1)); | ||
368 | seq_printf(seq, " LVTERR\t\t: %08x\n", apic_read(APIC_LVTERR)); | ||
369 | seq_printf(seq, " TMICT\t\t: %08x\n", apic_read(APIC_TMICT)); | ||
370 | seq_printf(seq, " TMCCT\t\t: %08x\n", apic_read(APIC_TMCCT)); | ||
371 | seq_printf(seq, " TDCR\t\t: %08x\n", apic_read(APIC_TDCR)); | ||
372 | if (boot_cpu_has(X86_FEATURE_EXTAPIC)) { | ||
373 | unsigned int i, v, maxeilvt; | ||
374 | |||
375 | v = apic_read(APIC_EFEAT); | ||
376 | maxeilvt = (v >> 16) & 0xff; | ||
377 | seq_printf(seq, " EFEAT\t\t: %08x\n", v); | ||
378 | seq_printf(seq, " ECTRL\t\t: %08x\n", apic_read(APIC_ECTRL)); | ||
379 | |||
380 | for (i = 0; i < maxeilvt; i++) { | ||
381 | v = apic_read(APIC_EILVTn(i)); | ||
382 | seq_printf(seq, " EILVT%d\t\t: %08x\n", i, v); | ||
383 | } | ||
384 | } | ||
385 | #endif /* CONFIG_X86_LOCAL_APIC */ | ||
386 | seq_printf(seq, "\n MSR\t:\n"); | ||
387 | } | ||
388 | |||
389 | static int cpu_seq_show(struct seq_file *seq, void *v) | ||
390 | { | ||
391 | struct cpu_private *priv = seq->private; | ||
392 | |||
393 | if (priv == NULL) | ||
394 | return -EINVAL; | ||
395 | |||
396 | switch (cpu_base[priv->type].flag) { | ||
397 | case CPU_TSS: | ||
398 | smp_call_function_single(priv->cpu, print_tss, seq, 1); | ||
399 | break; | ||
400 | case CPU_CR: | ||
401 | smp_call_function_single(priv->cpu, print_cr, seq, 1); | ||
402 | break; | ||
403 | case CPU_DT: | ||
404 | smp_call_function_single(priv->cpu, print_dt, seq, 1); | ||
405 | break; | ||
406 | case CPU_DEBUG: | ||
407 | if (priv->file == CPU_INDEX_BIT) | ||
408 | smp_call_function_single(priv->cpu, print_dr, seq, 1); | ||
409 | print_msr(seq, priv->cpu, cpu_base[priv->type].flag); | ||
410 | break; | ||
411 | case CPU_APIC: | ||
412 | if (priv->file == CPU_INDEX_BIT) | ||
413 | smp_call_function_single(priv->cpu, print_apic, seq, 1); | ||
414 | print_msr(seq, priv->cpu, cpu_base[priv->type].flag); | ||
415 | break; | ||
416 | |||
417 | default: | ||
418 | print_msr(seq, priv->cpu, cpu_base[priv->type].flag); | ||
419 | break; | ||
420 | } | ||
421 | seq_printf(seq, "\n"); | ||
422 | |||
423 | return 0; | ||
424 | } | ||
425 | |||
426 | static void *cpu_seq_start(struct seq_file *seq, loff_t *pos) | ||
427 | { | ||
428 | if (*pos == 0) /* One time is enough ;-) */ | ||
429 | return seq; | ||
430 | |||
431 | return NULL; | ||
432 | } | ||
433 | |||
434 | static void *cpu_seq_next(struct seq_file *seq, void *v, loff_t *pos) | ||
435 | { | ||
436 | (*pos)++; | ||
437 | |||
438 | return cpu_seq_start(seq, pos); | ||
439 | } | ||
440 | |||
441 | static void cpu_seq_stop(struct seq_file *seq, void *v) | ||
442 | { | ||
443 | } | ||
444 | |||
445 | static const struct seq_operations cpu_seq_ops = { | ||
446 | .start = cpu_seq_start, | ||
447 | .next = cpu_seq_next, | ||
448 | .stop = cpu_seq_stop, | ||
449 | .show = cpu_seq_show, | ||
450 | }; | ||
451 | |||
452 | static int cpu_seq_open(struct inode *inode, struct file *file) | ||
453 | { | ||
454 | struct cpu_private *priv = inode->i_private; | ||
455 | struct seq_file *seq; | ||
456 | int err; | ||
457 | |||
458 | err = seq_open(file, &cpu_seq_ops); | ||
459 | if (!err) { | ||
460 | seq = file->private_data; | ||
461 | seq->private = priv; | ||
462 | } | ||
463 | |||
464 | return err; | ||
465 | } | ||
466 | |||
467 | static int write_msr(struct cpu_private *priv, u64 val) | ||
468 | { | ||
469 | u32 low, high; | ||
470 | |||
471 | high = (val >> 32) & 0xffffffff; | ||
472 | low = val & 0xffffffff; | ||
473 | |||
474 | if (!wrmsr_safe_on_cpu(priv->cpu, priv->reg, low, high)) | ||
475 | return 0; | ||
476 | |||
477 | return -EPERM; | ||
478 | } | ||
479 | |||
480 | static int write_cpu_register(struct cpu_private *priv, const char *buf) | ||
481 | { | ||
482 | int ret = -EPERM; | ||
483 | u64 val; | ||
484 | |||
485 | ret = strict_strtoull(buf, 0, &val); | ||
486 | if (ret < 0) | ||
487 | return ret; | ||
488 | |||
489 | /* Supporting only MSRs */ | ||
490 | if (priv->type < CPU_TSS_BIT) | ||
491 | return write_msr(priv, val); | ||
492 | |||
493 | return ret; | ||
494 | } | ||
495 | |||
496 | static ssize_t cpu_write(struct file *file, const char __user *ubuf, | ||
497 | size_t count, loff_t *off) | ||
498 | { | ||
499 | struct seq_file *seq = file->private_data; | ||
500 | struct cpu_private *priv = seq->private; | ||
501 | char buf[19]; | ||
502 | |||
503 | if ((priv == NULL) || (count >= sizeof(buf))) | ||
504 | return -EINVAL; | ||
505 | |||
506 | if (copy_from_user(&buf, ubuf, count)) | ||
507 | return -EFAULT; | ||
508 | |||
509 | buf[count] = 0; | ||
510 | |||
511 | if ((cpu_base[priv->type].write) && (cpu_file[priv->file].write)) | ||
512 | if (!write_cpu_register(priv, buf)) | ||
513 | return count; | ||
514 | |||
515 | return -EACCES; | ||
516 | } | ||
517 | |||
518 | static const struct file_operations cpu_fops = { | ||
519 | .owner = THIS_MODULE, | ||
520 | .open = cpu_seq_open, | ||
521 | .read = seq_read, | ||
522 | .write = cpu_write, | ||
523 | .llseek = seq_lseek, | ||
524 | .release = seq_release, | ||
525 | }; | ||
526 | |||
527 | static int cpu_create_file(unsigned cpu, unsigned type, unsigned reg, | ||
528 | unsigned file, struct dentry *dentry) | ||
529 | { | ||
530 | struct cpu_private *priv = NULL; | ||
531 | |||
532 | /* Already intialized */ | ||
533 | if (file == CPU_INDEX_BIT) | ||
534 | if (per_cpu(cpud_arr[type].init, cpu)) | ||
535 | return 0; | ||
536 | |||
537 | priv = kzalloc(sizeof(*priv), GFP_KERNEL); | ||
538 | if (priv == NULL) | ||
539 | return -ENOMEM; | ||
540 | |||
541 | priv->cpu = cpu; | ||
542 | priv->type = type; | ||
543 | priv->reg = reg; | ||
544 | priv->file = file; | ||
545 | mutex_lock(&cpu_debug_lock); | ||
546 | per_cpu(cpud_priv_arr[type], cpu) = priv; | ||
547 | per_cpu(cpud_priv_count, cpu)++; | ||
548 | mutex_unlock(&cpu_debug_lock); | ||
549 | |||
550 | if (file) | ||
551 | debugfs_create_file(cpu_file[file].name, S_IRUGO, | ||
552 | dentry, (void *)priv, &cpu_fops); | ||
553 | else { | ||
554 | debugfs_create_file(cpu_base[type].name, S_IRUGO, | ||
555 | per_cpu(cpud_arr[type].dentry, cpu), | ||
556 | (void *)priv, &cpu_fops); | ||
557 | mutex_lock(&cpu_debug_lock); | ||
558 | per_cpu(cpud_arr[type].init, cpu) = 1; | ||
559 | mutex_unlock(&cpu_debug_lock); | ||
560 | } | ||
561 | |||
562 | return 0; | ||
563 | } | ||
564 | |||
565 | static int cpu_init_regfiles(unsigned cpu, unsigned int type, unsigned reg, | ||
566 | struct dentry *dentry) | ||
567 | { | ||
568 | unsigned file; | ||
569 | int err = 0; | ||
570 | |||
571 | for (file = 0; file < ARRAY_SIZE(cpu_file); file++) { | ||
572 | err = cpu_create_file(cpu, type, reg, file, dentry); | ||
573 | if (err) | ||
574 | return err; | ||
575 | } | ||
576 | |||
577 | return err; | ||
578 | } | ||
579 | |||
580 | static int cpu_init_msr(unsigned cpu, unsigned type, struct dentry *dentry) | ||
581 | { | ||
582 | struct dentry *cpu_dentry = NULL; | ||
583 | unsigned reg, reg_min, reg_max; | ||
584 | int i, err = 0; | ||
585 | char reg_dir[12]; | ||
586 | u32 low, high; | ||
587 | |||
588 | for (i = 0; i < ARRAY_SIZE(cpu_reg_range); i++) { | ||
589 | if (!get_cpu_range(cpu, ®_min, ®_max, i, | ||
590 | cpu_base[type].flag)) | ||
591 | continue; | ||
592 | |||
593 | for (reg = reg_min; reg <= reg_max; reg++) { | ||
594 | if (rdmsr_safe_on_cpu(cpu, reg, &low, &high)) | ||
595 | continue; | ||
596 | |||
597 | sprintf(reg_dir, "0x%x", reg); | ||
598 | cpu_dentry = debugfs_create_dir(reg_dir, dentry); | ||
599 | err = cpu_init_regfiles(cpu, type, reg, cpu_dentry); | ||
600 | if (err) | ||
601 | return err; | ||
602 | } | ||
603 | } | ||
604 | |||
605 | return err; | ||
606 | } | ||
607 | |||
608 | static int cpu_init_allreg(unsigned cpu, struct dentry *dentry) | ||
609 | { | ||
610 | struct dentry *cpu_dentry = NULL; | ||
611 | unsigned type; | ||
612 | int err = 0; | ||
613 | |||
614 | for (type = 0; type < ARRAY_SIZE(cpu_base) - 1; type++) { | ||
615 | if (!is_typeflag_valid(cpu, cpu_base[type].flag)) | ||
616 | continue; | ||
617 | cpu_dentry = debugfs_create_dir(cpu_base[type].name, dentry); | ||
618 | per_cpu(cpud_arr[type].dentry, cpu) = cpu_dentry; | ||
619 | |||
620 | if (type < CPU_TSS_BIT) | ||
621 | err = cpu_init_msr(cpu, type, cpu_dentry); | ||
622 | else | ||
623 | err = cpu_create_file(cpu, type, 0, CPU_INDEX_BIT, | ||
624 | cpu_dentry); | ||
625 | if (err) | ||
626 | return err; | ||
627 | } | ||
628 | |||
629 | return err; | ||
630 | } | ||
631 | |||
632 | static int cpu_init_cpu(void) | ||
633 | { | ||
634 | struct dentry *cpu_dentry = NULL; | ||
635 | struct cpuinfo_x86 *cpui; | ||
636 | char cpu_dir[12]; | ||
637 | unsigned cpu; | ||
638 | int err = 0; | ||
639 | |||
640 | for (cpu = 0; cpu < nr_cpu_ids; cpu++) { | ||
641 | cpui = &cpu_data(cpu); | ||
642 | if (!cpu_has(cpui, X86_FEATURE_MSR)) | ||
643 | continue; | ||
644 | |||
645 | sprintf(cpu_dir, "cpu%d", cpu); | ||
646 | cpu_dentry = debugfs_create_dir(cpu_dir, cpu_debugfs_dir); | ||
647 | err = cpu_init_allreg(cpu, cpu_dentry); | ||
648 | |||
649 | pr_info("cpu%d(%d) debug files %d\n", | ||
650 | cpu, nr_cpu_ids, per_cpu(cpud_priv_count, cpu)); | ||
651 | if (per_cpu(cpud_priv_count, cpu) > MAX_CPU_FILES) { | ||
652 | pr_err("Register files count %d exceeds limit %d\n", | ||
653 | per_cpu(cpud_priv_count, cpu), MAX_CPU_FILES); | ||
654 | per_cpu(cpud_priv_count, cpu) = MAX_CPU_FILES; | ||
655 | err = -ENFILE; | ||
656 | } | ||
657 | if (err) | ||
658 | return err; | ||
659 | } | ||
660 | |||
661 | return err; | ||
662 | } | ||
663 | |||
664 | static int __init cpu_debug_init(void) | ||
665 | { | ||
666 | cpu_debugfs_dir = debugfs_create_dir("cpu", arch_debugfs_dir); | ||
667 | |||
668 | return cpu_init_cpu(); | ||
669 | } | ||
670 | |||
671 | static void __exit cpu_debug_exit(void) | ||
672 | { | ||
673 | int i, cpu; | ||
674 | |||
675 | if (cpu_debugfs_dir) | ||
676 | debugfs_remove_recursive(cpu_debugfs_dir); | ||
677 | |||
678 | for (cpu = 0; cpu < nr_cpu_ids; cpu++) | ||
679 | for (i = 0; i < per_cpu(cpud_priv_count, cpu); i++) | ||
680 | kfree(per_cpu(cpud_priv_arr[i], cpu)); | ||
681 | } | ||
682 | |||
683 | module_init(cpu_debug_init); | ||
684 | module_exit(cpu_debug_exit); | ||
685 | |||
686 | MODULE_AUTHOR("Jaswinder Singh Rajput"); | ||
687 | MODULE_DESCRIPTION("CPU Debug module"); | ||
688 | MODULE_LICENSE("GPL"); | ||
diff --git a/arch/x86/kernel/cpuid.c b/arch/x86/kernel/cpuid.c index cb27fd6136c9..83e5e628de73 100644 --- a/arch/x86/kernel/cpuid.c +++ b/arch/x86/kernel/cpuid.c | |||
@@ -229,7 +229,7 @@ static void __exit cpuid_exit(void) | |||
229 | for_each_online_cpu(cpu) | 229 | for_each_online_cpu(cpu) |
230 | cpuid_device_destroy(cpu); | 230 | cpuid_device_destroy(cpu); |
231 | class_destroy(cpuid_class); | 231 | class_destroy(cpuid_class); |
232 | unregister_chrdev(CPUID_MAJOR, "cpu/cpuid"); | 232 | __unregister_chrdev(CPUID_MAJOR, 0, NR_CPUS, "cpu/cpuid"); |
233 | unregister_hotcpu_notifier(&cpuid_class_cpu_notifier); | 233 | unregister_hotcpu_notifier(&cpuid_class_cpu_notifier); |
234 | } | 234 | } |
235 | 235 | ||
diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c index ba6e65884603..ad80a1c718c6 100644 --- a/arch/x86/kernel/hpet.c +++ b/arch/x86/kernel/hpet.c | |||
@@ -34,6 +34,8 @@ | |||
34 | */ | 34 | */ |
35 | unsigned long hpet_address; | 35 | unsigned long hpet_address; |
36 | u8 hpet_blockid; /* OS timer block num */ | 36 | u8 hpet_blockid; /* OS timer block num */ |
37 | u8 hpet_msi_disable; | ||
38 | |||
37 | #ifdef CONFIG_PCI_MSI | 39 | #ifdef CONFIG_PCI_MSI |
38 | static unsigned long hpet_num_timers; | 40 | static unsigned long hpet_num_timers; |
39 | #endif | 41 | #endif |
@@ -596,6 +598,9 @@ static void hpet_msi_capability_lookup(unsigned int start_timer) | |||
596 | unsigned int num_timers_used = 0; | 598 | unsigned int num_timers_used = 0; |
597 | int i; | 599 | int i; |
598 | 600 | ||
601 | if (hpet_msi_disable) | ||
602 | return; | ||
603 | |||
599 | if (boot_cpu_has(X86_FEATURE_ARAT)) | 604 | if (boot_cpu_has(X86_FEATURE_ARAT)) |
600 | return; | 605 | return; |
601 | id = hpet_readl(HPET_ID); | 606 | id = hpet_readl(HPET_ID); |
@@ -928,6 +933,9 @@ static __init int hpet_late_init(void) | |||
928 | hpet_reserve_platform_timers(hpet_readl(HPET_ID)); | 933 | hpet_reserve_platform_timers(hpet_readl(HPET_ID)); |
929 | hpet_print_config(); | 934 | hpet_print_config(); |
930 | 935 | ||
936 | if (hpet_msi_disable) | ||
937 | return 0; | ||
938 | |||
931 | if (boot_cpu_has(X86_FEATURE_ARAT)) | 939 | if (boot_cpu_has(X86_FEATURE_ARAT)) |
932 | return 0; | 940 | return 0; |
933 | 941 | ||
diff --git a/arch/x86/kernel/microcode_amd.c b/arch/x86/kernel/microcode_amd.c index 37542b67c57e..e1af7c055c7d 100644 --- a/arch/x86/kernel/microcode_amd.c +++ b/arch/x86/kernel/microcode_amd.c | |||
@@ -36,9 +36,6 @@ MODULE_LICENSE("GPL v2"); | |||
36 | #define UCODE_EQUIV_CPU_TABLE_TYPE 0x00000000 | 36 | #define UCODE_EQUIV_CPU_TABLE_TYPE 0x00000000 |
37 | #define UCODE_UCODE_TYPE 0x00000001 | 37 | #define UCODE_UCODE_TYPE 0x00000001 |
38 | 38 | ||
39 | const struct firmware *firmware; | ||
40 | static int supported_cpu; | ||
41 | |||
42 | struct equiv_cpu_entry { | 39 | struct equiv_cpu_entry { |
43 | u32 installed_cpu; | 40 | u32 installed_cpu; |
44 | u32 fixed_errata_mask; | 41 | u32 fixed_errata_mask; |
@@ -77,12 +74,15 @@ static struct equiv_cpu_entry *equiv_cpu_table; | |||
77 | 74 | ||
78 | static int collect_cpu_info_amd(int cpu, struct cpu_signature *csig) | 75 | static int collect_cpu_info_amd(int cpu, struct cpu_signature *csig) |
79 | { | 76 | { |
77 | struct cpuinfo_x86 *c = &cpu_data(cpu); | ||
80 | u32 dummy; | 78 | u32 dummy; |
81 | 79 | ||
82 | if (!supported_cpu) | ||
83 | return -1; | ||
84 | |||
85 | memset(csig, 0, sizeof(*csig)); | 80 | memset(csig, 0, sizeof(*csig)); |
81 | if (c->x86_vendor != X86_VENDOR_AMD || c->x86 < 0x10) { | ||
82 | pr_warning("microcode: CPU%d: AMD CPU family 0x%x not " | ||
83 | "supported\n", cpu, c->x86); | ||
84 | return -1; | ||
85 | } | ||
86 | rdmsr(MSR_AMD64_PATCH_LEVEL, csig->rev, dummy); | 86 | rdmsr(MSR_AMD64_PATCH_LEVEL, csig->rev, dummy); |
87 | pr_info("CPU%d: patch_level=0x%x\n", cpu, csig->rev); | 87 | pr_info("CPU%d: patch_level=0x%x\n", cpu, csig->rev); |
88 | return 0; | 88 | return 0; |
@@ -294,10 +294,14 @@ generic_load_microcode(int cpu, const u8 *data, size_t size) | |||
294 | 294 | ||
295 | static enum ucode_state request_microcode_fw(int cpu, struct device *device) | 295 | static enum ucode_state request_microcode_fw(int cpu, struct device *device) |
296 | { | 296 | { |
297 | const char *fw_name = "amd-ucode/microcode_amd.bin"; | ||
298 | const struct firmware *firmware; | ||
297 | enum ucode_state ret; | 299 | enum ucode_state ret; |
298 | 300 | ||
299 | if (firmware == NULL) | 301 | if (request_firmware(&firmware, fw_name, device)) { |
302 | printk(KERN_ERR "microcode: failed to load file %s\n", fw_name); | ||
300 | return UCODE_NFOUND; | 303 | return UCODE_NFOUND; |
304 | } | ||
301 | 305 | ||
302 | if (*(u32 *)firmware->data != UCODE_MAGIC) { | 306 | if (*(u32 *)firmware->data != UCODE_MAGIC) { |
303 | pr_err("invalid UCODE_MAGIC (0x%08x)\n", | 307 | pr_err("invalid UCODE_MAGIC (0x%08x)\n", |
@@ -307,6 +311,8 @@ static enum ucode_state request_microcode_fw(int cpu, struct device *device) | |||
307 | 311 | ||
308 | ret = generic_load_microcode(cpu, firmware->data, firmware->size); | 312 | ret = generic_load_microcode(cpu, firmware->data, firmware->size); |
309 | 313 | ||
314 | release_firmware(firmware); | ||
315 | |||
310 | return ret; | 316 | return ret; |
311 | } | 317 | } |
312 | 318 | ||
@@ -325,31 +331,7 @@ static void microcode_fini_cpu_amd(int cpu) | |||
325 | uci->mc = NULL; | 331 | uci->mc = NULL; |
326 | } | 332 | } |
327 | 333 | ||
328 | void init_microcode_amd(struct device *device) | ||
329 | { | ||
330 | const char *fw_name = "amd-ucode/microcode_amd.bin"; | ||
331 | struct cpuinfo_x86 *c = &boot_cpu_data; | ||
332 | |||
333 | WARN_ON(c->x86_vendor != X86_VENDOR_AMD); | ||
334 | |||
335 | if (c->x86 < 0x10) { | ||
336 | pr_warning("AMD CPU family 0x%x not supported\n", c->x86); | ||
337 | return; | ||
338 | } | ||
339 | supported_cpu = 1; | ||
340 | |||
341 | if (request_firmware(&firmware, fw_name, device)) | ||
342 | pr_err("failed to load file %s\n", fw_name); | ||
343 | } | ||
344 | |||
345 | void fini_microcode_amd(void) | ||
346 | { | ||
347 | release_firmware(firmware); | ||
348 | } | ||
349 | |||
350 | static struct microcode_ops microcode_amd_ops = { | 334 | static struct microcode_ops microcode_amd_ops = { |
351 | .init = init_microcode_amd, | ||
352 | .fini = fini_microcode_amd, | ||
353 | .request_microcode_user = request_microcode_user, | 335 | .request_microcode_user = request_microcode_user, |
354 | .request_microcode_fw = request_microcode_fw, | 336 | .request_microcode_fw = request_microcode_fw, |
355 | .collect_cpu_info = collect_cpu_info_amd, | 337 | .collect_cpu_info = collect_cpu_info_amd, |
diff --git a/arch/x86/kernel/microcode_core.c b/arch/x86/kernel/microcode_core.c index 0c8632433090..cceb5bc3c3c2 100644 --- a/arch/x86/kernel/microcode_core.c +++ b/arch/x86/kernel/microcode_core.c | |||
@@ -521,9 +521,6 @@ static int __init microcode_init(void) | |||
521 | return PTR_ERR(microcode_pdev); | 521 | return PTR_ERR(microcode_pdev); |
522 | } | 522 | } |
523 | 523 | ||
524 | if (microcode_ops->init) | ||
525 | microcode_ops->init(µcode_pdev->dev); | ||
526 | |||
527 | get_online_cpus(); | 524 | get_online_cpus(); |
528 | mutex_lock(µcode_mutex); | 525 | mutex_lock(µcode_mutex); |
529 | 526 | ||
@@ -566,9 +563,6 @@ static void __exit microcode_exit(void) | |||
566 | 563 | ||
567 | platform_device_unregister(microcode_pdev); | 564 | platform_device_unregister(microcode_pdev); |
568 | 565 | ||
569 | if (microcode_ops->fini) | ||
570 | microcode_ops->fini(); | ||
571 | |||
572 | microcode_ops = NULL; | 566 | microcode_ops = NULL; |
573 | 567 | ||
574 | pr_info("Microcode Update Driver: v" MICROCODE_VERSION " removed.\n"); | 568 | pr_info("Microcode Update Driver: v" MICROCODE_VERSION " removed.\n"); |
diff --git a/arch/x86/kernel/msr.c b/arch/x86/kernel/msr.c index 4bd93c9b2b27..206735ac8cbd 100644 --- a/arch/x86/kernel/msr.c +++ b/arch/x86/kernel/msr.c | |||
@@ -285,7 +285,7 @@ static void __exit msr_exit(void) | |||
285 | for_each_online_cpu(cpu) | 285 | for_each_online_cpu(cpu) |
286 | msr_device_destroy(cpu); | 286 | msr_device_destroy(cpu); |
287 | class_destroy(msr_class); | 287 | class_destroy(msr_class); |
288 | unregister_chrdev(MSR_MAJOR, "cpu/msr"); | 288 | __unregister_chrdev(MSR_MAJOR, 0, NR_CPUS, "cpu/msr"); |
289 | unregister_hotcpu_notifier(&msr_class_cpu_notifier); | 289 | unregister_hotcpu_notifier(&msr_class_cpu_notifier); |
290 | } | 290 | } |
291 | 291 | ||
diff --git a/arch/x86/kernel/quirks.c b/arch/x86/kernel/quirks.c index 18093d7498f0..12e9feaa2f7a 100644 --- a/arch/x86/kernel/quirks.c +++ b/arch/x86/kernel/quirks.c | |||
@@ -491,6 +491,19 @@ void force_hpet_resume(void) | |||
491 | break; | 491 | break; |
492 | } | 492 | } |
493 | } | 493 | } |
494 | |||
495 | /* | ||
496 | * HPET MSI on some boards (ATI SB700/SB800) has side effect on | ||
497 | * floppy DMA. Disable HPET MSI on such platforms. | ||
498 | */ | ||
499 | static void force_disable_hpet_msi(struct pci_dev *unused) | ||
500 | { | ||
501 | hpet_msi_disable = 1; | ||
502 | } | ||
503 | |||
504 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_SBX00_SMBUS, | ||
505 | force_disable_hpet_msi); | ||
506 | |||
494 | #endif | 507 | #endif |
495 | 508 | ||
496 | #if defined(CONFIG_PCI) && defined(CONFIG_NUMA) | 509 | #if defined(CONFIG_PCI) && defined(CONFIG_NUMA) |
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c index 3063a0c4858b..ba8c045da782 100644 --- a/arch/x86/kvm/lapic.c +++ b/arch/x86/kvm/lapic.c | |||
@@ -373,6 +373,12 @@ static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode, | |||
373 | if (unlikely(!apic_enabled(apic))) | 373 | if (unlikely(!apic_enabled(apic))) |
374 | break; | 374 | break; |
375 | 375 | ||
376 | if (trig_mode) { | ||
377 | apic_debug("level trig mode for vector %d", vector); | ||
378 | apic_set_vector(vector, apic->regs + APIC_TMR); | ||
379 | } else | ||
380 | apic_clear_vector(vector, apic->regs + APIC_TMR); | ||
381 | |||
376 | result = !apic_test_and_set_irr(vector, apic); | 382 | result = !apic_test_and_set_irr(vector, apic); |
377 | trace_kvm_apic_accept_irq(vcpu->vcpu_id, delivery_mode, | 383 | trace_kvm_apic_accept_irq(vcpu->vcpu_id, delivery_mode, |
378 | trig_mode, vector, !result); | 384 | trig_mode, vector, !result); |
@@ -383,11 +389,6 @@ static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode, | |||
383 | break; | 389 | break; |
384 | } | 390 | } |
385 | 391 | ||
386 | if (trig_mode) { | ||
387 | apic_debug("level trig mode for vector %d", vector); | ||
388 | apic_set_vector(vector, apic->regs + APIC_TMR); | ||
389 | } else | ||
390 | apic_clear_vector(vector, apic->regs + APIC_TMR); | ||
391 | kvm_vcpu_kick(vcpu); | 392 | kvm_vcpu_kick(vcpu); |
392 | break; | 393 | break; |
393 | 394 | ||
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 4c3e5b2314cb..89a49fb46a27 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c | |||
@@ -477,7 +477,7 @@ static int host_mapping_level(struct kvm *kvm, gfn_t gfn) | |||
477 | 477 | ||
478 | addr = gfn_to_hva(kvm, gfn); | 478 | addr = gfn_to_hva(kvm, gfn); |
479 | if (kvm_is_error_hva(addr)) | 479 | if (kvm_is_error_hva(addr)) |
480 | return page_size; | 480 | return PT_PAGE_TABLE_LEVEL; |
481 | 481 | ||
482 | down_read(¤t->mm->mmap_sem); | 482 | down_read(¤t->mm->mmap_sem); |
483 | vma = find_vma(current->mm, addr); | 483 | vma = find_vma(current->mm, addr); |
@@ -515,11 +515,9 @@ static int mapping_level(struct kvm_vcpu *vcpu, gfn_t large_gfn) | |||
515 | if (host_level == PT_PAGE_TABLE_LEVEL) | 515 | if (host_level == PT_PAGE_TABLE_LEVEL) |
516 | return host_level; | 516 | return host_level; |
517 | 517 | ||
518 | for (level = PT_DIRECTORY_LEVEL; level <= host_level; ++level) { | 518 | for (level = PT_DIRECTORY_LEVEL; level <= host_level; ++level) |
519 | |||
520 | if (has_wrprotected_page(vcpu->kvm, large_gfn, level)) | 519 | if (has_wrprotected_page(vcpu->kvm, large_gfn, level)) |
521 | break; | 520 | break; |
522 | } | ||
523 | 521 | ||
524 | return level - 1; | 522 | return level - 1; |
525 | } | 523 | } |
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h index 58a0f1e88596..ede2131a9225 100644 --- a/arch/x86/kvm/paging_tmpl.h +++ b/arch/x86/kvm/paging_tmpl.h | |||
@@ -150,7 +150,9 @@ walk: | |||
150 | walker->table_gfn[walker->level - 1] = table_gfn; | 150 | walker->table_gfn[walker->level - 1] = table_gfn; |
151 | walker->pte_gpa[walker->level - 1] = pte_gpa; | 151 | walker->pte_gpa[walker->level - 1] = pte_gpa; |
152 | 152 | ||
153 | kvm_read_guest(vcpu->kvm, pte_gpa, &pte, sizeof(pte)); | 153 | if (kvm_read_guest(vcpu->kvm, pte_gpa, &pte, sizeof(pte))) |
154 | goto not_present; | ||
155 | |||
154 | trace_kvm_mmu_paging_element(pte, walker->level); | 156 | trace_kvm_mmu_paging_element(pte, walker->level); |
155 | 157 | ||
156 | if (!is_present_gpte(pte)) | 158 | if (!is_present_gpte(pte)) |
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 6651dbf58675..1ddcad452add 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c | |||
@@ -5072,12 +5072,13 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) | |||
5072 | GFP_KERNEL); | 5072 | GFP_KERNEL); |
5073 | if (!vcpu->arch.mce_banks) { | 5073 | if (!vcpu->arch.mce_banks) { |
5074 | r = -ENOMEM; | 5074 | r = -ENOMEM; |
5075 | goto fail_mmu_destroy; | 5075 | goto fail_free_lapic; |
5076 | } | 5076 | } |
5077 | vcpu->arch.mcg_cap = KVM_MAX_MCE_BANKS; | 5077 | vcpu->arch.mcg_cap = KVM_MAX_MCE_BANKS; |
5078 | 5078 | ||
5079 | return 0; | 5079 | return 0; |
5080 | 5080 | fail_free_lapic: | |
5081 | kvm_free_lapic(vcpu); | ||
5081 | fail_mmu_destroy: | 5082 | fail_mmu_destroy: |
5082 | kvm_mmu_destroy(vcpu); | 5083 | kvm_mmu_destroy(vcpu); |
5083 | fail_free_pio_data: | 5084 | fail_free_pio_data: |
@@ -5088,6 +5089,7 @@ fail: | |||
5088 | 5089 | ||
5089 | void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) | 5090 | void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) |
5090 | { | 5091 | { |
5092 | kfree(vcpu->arch.mce_banks); | ||
5091 | kvm_free_lapic(vcpu); | 5093 | kvm_free_lapic(vcpu); |
5092 | down_read(&vcpu->kvm->slots_lock); | 5094 | down_read(&vcpu->kvm->slots_lock); |
5093 | kvm_mmu_destroy(vcpu); | 5095 | kvm_mmu_destroy(vcpu); |
diff --git a/arch/x86/mm/srat_64.c b/arch/x86/mm/srat_64.c index a27124185fc1..28c68762648f 100644 --- a/arch/x86/mm/srat_64.c +++ b/arch/x86/mm/srat_64.c | |||
@@ -229,9 +229,11 @@ update_nodes_add(int node, unsigned long start, unsigned long end) | |||
229 | printk(KERN_ERR "SRAT: Hotplug zone not continuous. Partly ignored\n"); | 229 | printk(KERN_ERR "SRAT: Hotplug zone not continuous. Partly ignored\n"); |
230 | } | 230 | } |
231 | 231 | ||
232 | if (changed) | 232 | if (changed) { |
233 | node_set(node, cpu_nodes_parsed); | ||
233 | printk(KERN_INFO "SRAT: hot plug zone found %Lx - %Lx\n", | 234 | printk(KERN_INFO "SRAT: hot plug zone found %Lx - %Lx\n", |
234 | nd->start, nd->end); | 235 | nd->start, nd->end); |
236 | } | ||
235 | } | 237 | } |
236 | 238 | ||
237 | /* Callback for parsing of the Proximity Domain <-> Memory Area mappings */ | 239 | /* Callback for parsing of the Proximity Domain <-> Memory Area mappings */ |
diff --git a/drivers/firewire/core-card.c b/drivers/firewire/core-card.c index 7083bcc1b9c7..5045156c5313 100644 --- a/drivers/firewire/core-card.c +++ b/drivers/firewire/core-card.c | |||
@@ -57,6 +57,8 @@ static LIST_HEAD(descriptor_list); | |||
57 | static int descriptor_count; | 57 | static int descriptor_count; |
58 | 58 | ||
59 | static __be32 tmp_config_rom[256]; | 59 | static __be32 tmp_config_rom[256]; |
60 | /* ROM header, bus info block, root dir header, capabilities = 7 quadlets */ | ||
61 | static size_t config_rom_length = 1 + 4 + 1 + 1; | ||
60 | 62 | ||
61 | #define BIB_CRC(v) ((v) << 0) | 63 | #define BIB_CRC(v) ((v) << 0) |
62 | #define BIB_CRC_LENGTH(v) ((v) << 16) | 64 | #define BIB_CRC_LENGTH(v) ((v) << 16) |
@@ -73,7 +75,7 @@ static __be32 tmp_config_rom[256]; | |||
73 | #define BIB_CMC ((1) << 30) | 75 | #define BIB_CMC ((1) << 30) |
74 | #define BIB_IMC ((1) << 31) | 76 | #define BIB_IMC ((1) << 31) |
75 | 77 | ||
76 | static size_t generate_config_rom(struct fw_card *card, __be32 *config_rom) | 78 | static void generate_config_rom(struct fw_card *card, __be32 *config_rom) |
77 | { | 79 | { |
78 | struct fw_descriptor *desc; | 80 | struct fw_descriptor *desc; |
79 | int i, j, k, length; | 81 | int i, j, k, length; |
@@ -130,23 +132,30 @@ static size_t generate_config_rom(struct fw_card *card, __be32 *config_rom) | |||
130 | for (i = 0; i < j; i += length + 1) | 132 | for (i = 0; i < j; i += length + 1) |
131 | length = fw_compute_block_crc(config_rom + i); | 133 | length = fw_compute_block_crc(config_rom + i); |
132 | 134 | ||
133 | return j; | 135 | WARN_ON(j != config_rom_length); |
134 | } | 136 | } |
135 | 137 | ||
136 | static void update_config_roms(void) | 138 | static void update_config_roms(void) |
137 | { | 139 | { |
138 | struct fw_card *card; | 140 | struct fw_card *card; |
139 | size_t length; | ||
140 | 141 | ||
141 | list_for_each_entry (card, &card_list, link) { | 142 | list_for_each_entry (card, &card_list, link) { |
142 | length = generate_config_rom(card, tmp_config_rom); | 143 | generate_config_rom(card, tmp_config_rom); |
143 | card->driver->set_config_rom(card, tmp_config_rom, length); | 144 | card->driver->set_config_rom(card, tmp_config_rom, |
145 | config_rom_length); | ||
144 | } | 146 | } |
145 | } | 147 | } |
146 | 148 | ||
149 | static size_t required_space(struct fw_descriptor *desc) | ||
150 | { | ||
151 | /* descriptor + entry into root dir + optional immediate entry */ | ||
152 | return desc->length + 1 + (desc->immediate > 0 ? 1 : 0); | ||
153 | } | ||
154 | |||
147 | int fw_core_add_descriptor(struct fw_descriptor *desc) | 155 | int fw_core_add_descriptor(struct fw_descriptor *desc) |
148 | { | 156 | { |
149 | size_t i; | 157 | size_t i; |
158 | int ret; | ||
150 | 159 | ||
151 | /* | 160 | /* |
152 | * Check descriptor is valid; the length of all blocks in the | 161 | * Check descriptor is valid; the length of all blocks in the |
@@ -162,15 +171,21 @@ int fw_core_add_descriptor(struct fw_descriptor *desc) | |||
162 | 171 | ||
163 | mutex_lock(&card_mutex); | 172 | mutex_lock(&card_mutex); |
164 | 173 | ||
165 | list_add_tail(&desc->link, &descriptor_list); | 174 | if (config_rom_length + required_space(desc) > 256) { |
166 | descriptor_count++; | 175 | ret = -EBUSY; |
167 | if (desc->immediate > 0) | 176 | } else { |
177 | list_add_tail(&desc->link, &descriptor_list); | ||
178 | config_rom_length += required_space(desc); | ||
168 | descriptor_count++; | 179 | descriptor_count++; |
169 | update_config_roms(); | 180 | if (desc->immediate > 0) |
181 | descriptor_count++; | ||
182 | update_config_roms(); | ||
183 | ret = 0; | ||
184 | } | ||
170 | 185 | ||
171 | mutex_unlock(&card_mutex); | 186 | mutex_unlock(&card_mutex); |
172 | 187 | ||
173 | return 0; | 188 | return ret; |
174 | } | 189 | } |
175 | EXPORT_SYMBOL(fw_core_add_descriptor); | 190 | EXPORT_SYMBOL(fw_core_add_descriptor); |
176 | 191 | ||
@@ -179,6 +194,7 @@ void fw_core_remove_descriptor(struct fw_descriptor *desc) | |||
179 | mutex_lock(&card_mutex); | 194 | mutex_lock(&card_mutex); |
180 | 195 | ||
181 | list_del(&desc->link); | 196 | list_del(&desc->link); |
197 | config_rom_length -= required_space(desc); | ||
182 | descriptor_count--; | 198 | descriptor_count--; |
183 | if (desc->immediate > 0) | 199 | if (desc->immediate > 0) |
184 | descriptor_count--; | 200 | descriptor_count--; |
@@ -428,7 +444,6 @@ EXPORT_SYMBOL(fw_card_initialize); | |||
428 | int fw_card_add(struct fw_card *card, | 444 | int fw_card_add(struct fw_card *card, |
429 | u32 max_receive, u32 link_speed, u64 guid) | 445 | u32 max_receive, u32 link_speed, u64 guid) |
430 | { | 446 | { |
431 | size_t length; | ||
432 | int ret; | 447 | int ret; |
433 | 448 | ||
434 | card->max_receive = max_receive; | 449 | card->max_receive = max_receive; |
@@ -437,8 +452,8 @@ int fw_card_add(struct fw_card *card, | |||
437 | 452 | ||
438 | mutex_lock(&card_mutex); | 453 | mutex_lock(&card_mutex); |
439 | 454 | ||
440 | length = generate_config_rom(card, tmp_config_rom); | 455 | generate_config_rom(card, tmp_config_rom); |
441 | ret = card->driver->enable(card, tmp_config_rom, length); | 456 | ret = card->driver->enable(card, tmp_config_rom, config_rom_length); |
442 | if (ret == 0) | 457 | if (ret == 0) |
443 | list_add_tail(&card->link, &card_list); | 458 | list_add_tail(&card->link, &card_list); |
444 | 459 | ||
diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c index e6d63849e78e..4eeaed57e219 100644 --- a/drivers/firewire/core-cdev.c +++ b/drivers/firewire/core-cdev.c | |||
@@ -35,6 +35,7 @@ | |||
35 | #include <linux/preempt.h> | 35 | #include <linux/preempt.h> |
36 | #include <linux/sched.h> | 36 | #include <linux/sched.h> |
37 | #include <linux/spinlock.h> | 37 | #include <linux/spinlock.h> |
38 | #include <linux/string.h> | ||
38 | #include <linux/time.h> | 39 | #include <linux/time.h> |
39 | #include <linux/uaccess.h> | 40 | #include <linux/uaccess.h> |
40 | #include <linux/vmalloc.h> | 41 | #include <linux/vmalloc.h> |
@@ -595,13 +596,20 @@ static int ioctl_send_request(struct client *client, void *buffer) | |||
595 | client->device->max_speed); | 596 | client->device->max_speed); |
596 | } | 597 | } |
597 | 598 | ||
599 | static inline bool is_fcp_request(struct fw_request *request) | ||
600 | { | ||
601 | return request == NULL; | ||
602 | } | ||
603 | |||
598 | static void release_request(struct client *client, | 604 | static void release_request(struct client *client, |
599 | struct client_resource *resource) | 605 | struct client_resource *resource) |
600 | { | 606 | { |
601 | struct inbound_transaction_resource *r = container_of(resource, | 607 | struct inbound_transaction_resource *r = container_of(resource, |
602 | struct inbound_transaction_resource, resource); | 608 | struct inbound_transaction_resource, resource); |
603 | 609 | ||
604 | if (r->request) | 610 | if (is_fcp_request(r->request)) |
611 | kfree(r->data); | ||
612 | else | ||
605 | fw_send_response(client->device->card, r->request, | 613 | fw_send_response(client->device->card, r->request, |
606 | RCODE_CONFLICT_ERROR); | 614 | RCODE_CONFLICT_ERROR); |
607 | kfree(r); | 615 | kfree(r); |
@@ -616,6 +624,7 @@ static void handle_request(struct fw_card *card, struct fw_request *request, | |||
616 | struct address_handler_resource *handler = callback_data; | 624 | struct address_handler_resource *handler = callback_data; |
617 | struct inbound_transaction_resource *r; | 625 | struct inbound_transaction_resource *r; |
618 | struct inbound_transaction_event *e; | 626 | struct inbound_transaction_event *e; |
627 | void *fcp_frame = NULL; | ||
619 | int ret; | 628 | int ret; |
620 | 629 | ||
621 | r = kmalloc(sizeof(*r), GFP_ATOMIC); | 630 | r = kmalloc(sizeof(*r), GFP_ATOMIC); |
@@ -627,6 +636,18 @@ static void handle_request(struct fw_card *card, struct fw_request *request, | |||
627 | r->data = payload; | 636 | r->data = payload; |
628 | r->length = length; | 637 | r->length = length; |
629 | 638 | ||
639 | if (is_fcp_request(request)) { | ||
640 | /* | ||
641 | * FIXME: Let core-transaction.c manage a | ||
642 | * single reference-counted copy? | ||
643 | */ | ||
644 | fcp_frame = kmemdup(payload, length, GFP_ATOMIC); | ||
645 | if (fcp_frame == NULL) | ||
646 | goto failed; | ||
647 | |||
648 | r->data = fcp_frame; | ||
649 | } | ||
650 | |||
630 | r->resource.release = release_request; | 651 | r->resource.release = release_request; |
631 | ret = add_client_resource(handler->client, &r->resource, GFP_ATOMIC); | 652 | ret = add_client_resource(handler->client, &r->resource, GFP_ATOMIC); |
632 | if (ret < 0) | 653 | if (ret < 0) |
@@ -640,13 +661,15 @@ static void handle_request(struct fw_card *card, struct fw_request *request, | |||
640 | e->request.closure = handler->closure; | 661 | e->request.closure = handler->closure; |
641 | 662 | ||
642 | queue_event(handler->client, &e->event, | 663 | queue_event(handler->client, &e->event, |
643 | &e->request, sizeof(e->request), payload, length); | 664 | &e->request, sizeof(e->request), r->data, length); |
644 | return; | 665 | return; |
645 | 666 | ||
646 | failed: | 667 | failed: |
647 | kfree(r); | 668 | kfree(r); |
648 | kfree(e); | 669 | kfree(e); |
649 | if (request) | 670 | kfree(fcp_frame); |
671 | |||
672 | if (!is_fcp_request(request)) | ||
650 | fw_send_response(card, request, RCODE_CONFLICT_ERROR); | 673 | fw_send_response(card, request, RCODE_CONFLICT_ERROR); |
651 | } | 674 | } |
652 | 675 | ||
@@ -717,18 +740,17 @@ static int ioctl_send_response(struct client *client, void *buffer) | |||
717 | 740 | ||
718 | r = container_of(resource, struct inbound_transaction_resource, | 741 | r = container_of(resource, struct inbound_transaction_resource, |
719 | resource); | 742 | resource); |
720 | if (r->request) { | 743 | if (is_fcp_request(r->request)) |
721 | if (request->length < r->length) | 744 | goto out; |
722 | r->length = request->length; | 745 | |
723 | if (copy_from_user(r->data, u64_to_uptr(request->data), | 746 | if (request->length < r->length) |
724 | r->length)) { | 747 | r->length = request->length; |
725 | ret = -EFAULT; | 748 | if (copy_from_user(r->data, u64_to_uptr(request->data), r->length)) { |
726 | kfree(r->request); | 749 | ret = -EFAULT; |
727 | goto out; | 750 | kfree(r->request); |
728 | } | 751 | goto out; |
729 | fw_send_response(client->device->card, r->request, | ||
730 | request->rcode); | ||
731 | } | 752 | } |
753 | fw_send_response(client->device->card, r->request, request->rcode); | ||
732 | out: | 754 | out: |
733 | kfree(r); | 755 | kfree(r); |
734 | 756 | ||
diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c index a61571c63c59..2345d4103fe6 100644 --- a/drivers/firewire/ohci.c +++ b/drivers/firewire/ohci.c | |||
@@ -2420,6 +2420,7 @@ static void ohci_pmac_off(struct pci_dev *dev) | |||
2420 | 2420 | ||
2421 | #define PCI_VENDOR_ID_AGERE PCI_VENDOR_ID_ATT | 2421 | #define PCI_VENDOR_ID_AGERE PCI_VENDOR_ID_ATT |
2422 | #define PCI_DEVICE_ID_AGERE_FW643 0x5901 | 2422 | #define PCI_DEVICE_ID_AGERE_FW643 0x5901 |
2423 | #define PCI_DEVICE_ID_TI_TSB43AB23 0x8024 | ||
2423 | 2424 | ||
2424 | static int __devinit pci_probe(struct pci_dev *dev, | 2425 | static int __devinit pci_probe(struct pci_dev *dev, |
2425 | const struct pci_device_id *ent) | 2426 | const struct pci_device_id *ent) |
@@ -2488,7 +2489,8 @@ static int __devinit pci_probe(struct pci_dev *dev, | |||
2488 | #if !defined(CONFIG_X86_32) | 2489 | #if !defined(CONFIG_X86_32) |
2489 | /* dual-buffer mode is broken with descriptor addresses above 2G */ | 2490 | /* dual-buffer mode is broken with descriptor addresses above 2G */ |
2490 | if (dev->vendor == PCI_VENDOR_ID_TI && | 2491 | if (dev->vendor == PCI_VENDOR_ID_TI && |
2491 | dev->device == PCI_DEVICE_ID_TI_TSB43AB22) | 2492 | (dev->device == PCI_DEVICE_ID_TI_TSB43AB22 || |
2493 | dev->device == PCI_DEVICE_ID_TI_TSB43AB23)) | ||
2492 | ohci->use_dualbuffer = false; | 2494 | ohci->use_dualbuffer = false; |
2493 | #endif | 2495 | #endif |
2494 | 2496 | ||
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c index defcaf108460..f665b05592f3 100644 --- a/drivers/gpu/drm/drm_edid.c +++ b/drivers/gpu/drm/drm_edid.c | |||
@@ -633,8 +633,7 @@ static struct drm_display_mode *drm_mode_detailed(struct drm_device *dev, | |||
633 | return NULL; | 633 | return NULL; |
634 | } | 634 | } |
635 | if (!(pt->misc & DRM_EDID_PT_SEPARATE_SYNC)) { | 635 | if (!(pt->misc & DRM_EDID_PT_SEPARATE_SYNC)) { |
636 | printk(KERN_WARNING "integrated sync not supported\n"); | 636 | printk(KERN_WARNING "composite sync not supported\n"); |
637 | return NULL; | ||
638 | } | 637 | } |
639 | 638 | ||
640 | /* it is incorrect if hsync/vsync width is zero */ | 639 | /* it is incorrect if hsync/vsync width is zero */ |
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c index 1c2b7d44ec05..0f9e90552dc4 100644 --- a/drivers/gpu/drm/drm_fb_helper.c +++ b/drivers/gpu/drm/drm_fb_helper.c | |||
@@ -389,7 +389,7 @@ int drm_fb_helper_blank(int blank, struct fb_info *info) | |||
389 | break; | 389 | break; |
390 | /* Display: Off; HSync: On, VSync: On */ | 390 | /* Display: Off; HSync: On, VSync: On */ |
391 | case FB_BLANK_NORMAL: | 391 | case FB_BLANK_NORMAL: |
392 | drm_fb_helper_off(info, DRM_MODE_DPMS_ON); | 392 | drm_fb_helper_off(info, DRM_MODE_DPMS_STANDBY); |
393 | break; | 393 | break; |
394 | /* Display: Off; HSync: Off, VSync: On */ | 394 | /* Display: Off; HSync: Off, VSync: On */ |
395 | case FB_BLANK_HSYNC_SUSPEND: | 395 | case FB_BLANK_HSYNC_SUSPEND: |
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c index e9dbb481c469..8bf3770f294e 100644 --- a/drivers/gpu/drm/drm_gem.c +++ b/drivers/gpu/drm/drm_gem.c | |||
@@ -142,19 +142,6 @@ drm_gem_object_alloc(struct drm_device *dev, size_t size) | |||
142 | if (IS_ERR(obj->filp)) | 142 | if (IS_ERR(obj->filp)) |
143 | goto free; | 143 | goto free; |
144 | 144 | ||
145 | /* Basically we want to disable the OOM killer and handle ENOMEM | ||
146 | * ourselves by sacrificing pages from cached buffers. | ||
147 | * XXX shmem_file_[gs]et_gfp_mask() | ||
148 | */ | ||
149 | mapping_set_gfp_mask(obj->filp->f_path.dentry->d_inode->i_mapping, | ||
150 | GFP_HIGHUSER | | ||
151 | __GFP_COLD | | ||
152 | __GFP_FS | | ||
153 | __GFP_RECLAIMABLE | | ||
154 | __GFP_NORETRY | | ||
155 | __GFP_NOWARN | | ||
156 | __GFP_NOMEMALLOC); | ||
157 | |||
158 | kref_init(&obj->refcount); | 145 | kref_init(&obj->refcount); |
159 | kref_init(&obj->handlecount); | 146 | kref_init(&obj->handlecount); |
160 | obj->size = size; | 147 | obj->size = size; |
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 9c9998c4dceb..a894ade03093 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c | |||
@@ -290,7 +290,7 @@ static int i915_batchbuffer_info(struct seq_file *m, void *data) | |||
290 | list_for_each_entry(obj_priv, &dev_priv->mm.active_list, list) { | 290 | list_for_each_entry(obj_priv, &dev_priv->mm.active_list, list) { |
291 | obj = obj_priv->obj; | 291 | obj = obj_priv->obj; |
292 | if (obj->read_domains & I915_GEM_DOMAIN_COMMAND) { | 292 | if (obj->read_domains & I915_GEM_DOMAIN_COMMAND) { |
293 | ret = i915_gem_object_get_pages(obj); | 293 | ret = i915_gem_object_get_pages(obj, 0); |
294 | if (ret) { | 294 | if (ret) { |
295 | DRM_ERROR("Failed to get pages: %d\n", ret); | 295 | DRM_ERROR("Failed to get pages: %d\n", ret); |
296 | spin_unlock(&dev_priv->mm.active_list_lock); | 296 | spin_unlock(&dev_priv->mm.active_list_lock); |
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 2c1669488b5a..aaf934d96f21 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h | |||
@@ -872,7 +872,7 @@ int i915_gem_attach_phys_object(struct drm_device *dev, | |||
872 | void i915_gem_detach_phys_object(struct drm_device *dev, | 872 | void i915_gem_detach_phys_object(struct drm_device *dev, |
873 | struct drm_gem_object *obj); | 873 | struct drm_gem_object *obj); |
874 | void i915_gem_free_all_phys_object(struct drm_device *dev); | 874 | void i915_gem_free_all_phys_object(struct drm_device *dev); |
875 | int i915_gem_object_get_pages(struct drm_gem_object *obj); | 875 | int i915_gem_object_get_pages(struct drm_gem_object *obj, gfp_t gfpmask); |
876 | void i915_gem_object_put_pages(struct drm_gem_object *obj); | 876 | void i915_gem_object_put_pages(struct drm_gem_object *obj); |
877 | void i915_gem_release(struct drm_device * dev, struct drm_file *file_priv); | 877 | void i915_gem_release(struct drm_device * dev, struct drm_file *file_priv); |
878 | void i915_gem_object_flush_write_domain(struct drm_gem_object *obj); | 878 | void i915_gem_object_flush_write_domain(struct drm_gem_object *obj); |
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 0c67924ca80c..dda787aafcc6 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c | |||
@@ -277,7 +277,7 @@ i915_gem_shmem_pread_fast(struct drm_device *dev, struct drm_gem_object *obj, | |||
277 | 277 | ||
278 | mutex_lock(&dev->struct_mutex); | 278 | mutex_lock(&dev->struct_mutex); |
279 | 279 | ||
280 | ret = i915_gem_object_get_pages(obj); | 280 | ret = i915_gem_object_get_pages(obj, 0); |
281 | if (ret != 0) | 281 | if (ret != 0) |
282 | goto fail_unlock; | 282 | goto fail_unlock; |
283 | 283 | ||
@@ -321,40 +321,24 @@ fail_unlock: | |||
321 | return ret; | 321 | return ret; |
322 | } | 322 | } |
323 | 323 | ||
324 | static inline gfp_t | ||
325 | i915_gem_object_get_page_gfp_mask (struct drm_gem_object *obj) | ||
326 | { | ||
327 | return mapping_gfp_mask(obj->filp->f_path.dentry->d_inode->i_mapping); | ||
328 | } | ||
329 | |||
330 | static inline void | ||
331 | i915_gem_object_set_page_gfp_mask (struct drm_gem_object *obj, gfp_t gfp) | ||
332 | { | ||
333 | mapping_set_gfp_mask(obj->filp->f_path.dentry->d_inode->i_mapping, gfp); | ||
334 | } | ||
335 | |||
336 | static int | 324 | static int |
337 | i915_gem_object_get_pages_or_evict(struct drm_gem_object *obj) | 325 | i915_gem_object_get_pages_or_evict(struct drm_gem_object *obj) |
338 | { | 326 | { |
339 | int ret; | 327 | int ret; |
340 | 328 | ||
341 | ret = i915_gem_object_get_pages(obj); | 329 | ret = i915_gem_object_get_pages(obj, __GFP_NORETRY | __GFP_NOWARN); |
342 | 330 | ||
343 | /* If we've insufficient memory to map in the pages, attempt | 331 | /* If we've insufficient memory to map in the pages, attempt |
344 | * to make some space by throwing out some old buffers. | 332 | * to make some space by throwing out some old buffers. |
345 | */ | 333 | */ |
346 | if (ret == -ENOMEM) { | 334 | if (ret == -ENOMEM) { |
347 | struct drm_device *dev = obj->dev; | 335 | struct drm_device *dev = obj->dev; |
348 | gfp_t gfp; | ||
349 | 336 | ||
350 | ret = i915_gem_evict_something(dev, obj->size); | 337 | ret = i915_gem_evict_something(dev, obj->size); |
351 | if (ret) | 338 | if (ret) |
352 | return ret; | 339 | return ret; |
353 | 340 | ||
354 | gfp = i915_gem_object_get_page_gfp_mask(obj); | 341 | ret = i915_gem_object_get_pages(obj, 0); |
355 | i915_gem_object_set_page_gfp_mask(obj, gfp & ~__GFP_NORETRY); | ||
356 | ret = i915_gem_object_get_pages(obj); | ||
357 | i915_gem_object_set_page_gfp_mask (obj, gfp); | ||
358 | } | 342 | } |
359 | 343 | ||
360 | return ret; | 344 | return ret; |
@@ -790,7 +774,7 @@ i915_gem_shmem_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj, | |||
790 | 774 | ||
791 | mutex_lock(&dev->struct_mutex); | 775 | mutex_lock(&dev->struct_mutex); |
792 | 776 | ||
793 | ret = i915_gem_object_get_pages(obj); | 777 | ret = i915_gem_object_get_pages(obj, 0); |
794 | if (ret != 0) | 778 | if (ret != 0) |
795 | goto fail_unlock; | 779 | goto fail_unlock; |
796 | 780 | ||
@@ -2230,7 +2214,8 @@ i915_gem_evict_something(struct drm_device *dev, int min_size) | |||
2230 | } | 2214 | } |
2231 | 2215 | ||
2232 | int | 2216 | int |
2233 | i915_gem_object_get_pages(struct drm_gem_object *obj) | 2217 | i915_gem_object_get_pages(struct drm_gem_object *obj, |
2218 | gfp_t gfpmask) | ||
2234 | { | 2219 | { |
2235 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | 2220 | struct drm_i915_gem_object *obj_priv = obj->driver_private; |
2236 | int page_count, i; | 2221 | int page_count, i; |
@@ -2256,7 +2241,10 @@ i915_gem_object_get_pages(struct drm_gem_object *obj) | |||
2256 | inode = obj->filp->f_path.dentry->d_inode; | 2241 | inode = obj->filp->f_path.dentry->d_inode; |
2257 | mapping = inode->i_mapping; | 2242 | mapping = inode->i_mapping; |
2258 | for (i = 0; i < page_count; i++) { | 2243 | for (i = 0; i < page_count; i++) { |
2259 | page = read_mapping_page(mapping, i, NULL); | 2244 | page = read_cache_page_gfp(mapping, i, |
2245 | mapping_gfp_mask (mapping) | | ||
2246 | __GFP_COLD | | ||
2247 | gfpmask); | ||
2260 | if (IS_ERR(page)) { | 2248 | if (IS_ERR(page)) { |
2261 | ret = PTR_ERR(page); | 2249 | ret = PTR_ERR(page); |
2262 | i915_gem_object_put_pages(obj); | 2250 | i915_gem_object_put_pages(obj); |
@@ -2579,7 +2567,7 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment) | |||
2579 | drm_i915_private_t *dev_priv = dev->dev_private; | 2567 | drm_i915_private_t *dev_priv = dev->dev_private; |
2580 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | 2568 | struct drm_i915_gem_object *obj_priv = obj->driver_private; |
2581 | struct drm_mm_node *free_space; | 2569 | struct drm_mm_node *free_space; |
2582 | bool retry_alloc = false; | 2570 | gfp_t gfpmask = __GFP_NORETRY | __GFP_NOWARN; |
2583 | int ret; | 2571 | int ret; |
2584 | 2572 | ||
2585 | if (obj_priv->madv != I915_MADV_WILLNEED) { | 2573 | if (obj_priv->madv != I915_MADV_WILLNEED) { |
@@ -2623,15 +2611,7 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment) | |||
2623 | DRM_INFO("Binding object of size %zd at 0x%08x\n", | 2611 | DRM_INFO("Binding object of size %zd at 0x%08x\n", |
2624 | obj->size, obj_priv->gtt_offset); | 2612 | obj->size, obj_priv->gtt_offset); |
2625 | #endif | 2613 | #endif |
2626 | if (retry_alloc) { | 2614 | ret = i915_gem_object_get_pages(obj, gfpmask); |
2627 | i915_gem_object_set_page_gfp_mask (obj, | ||
2628 | i915_gem_object_get_page_gfp_mask (obj) & ~__GFP_NORETRY); | ||
2629 | } | ||
2630 | ret = i915_gem_object_get_pages(obj); | ||
2631 | if (retry_alloc) { | ||
2632 | i915_gem_object_set_page_gfp_mask (obj, | ||
2633 | i915_gem_object_get_page_gfp_mask (obj) | __GFP_NORETRY); | ||
2634 | } | ||
2635 | if (ret) { | 2615 | if (ret) { |
2636 | drm_mm_put_block(obj_priv->gtt_space); | 2616 | drm_mm_put_block(obj_priv->gtt_space); |
2637 | obj_priv->gtt_space = NULL; | 2617 | obj_priv->gtt_space = NULL; |
@@ -2641,9 +2621,9 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment) | |||
2641 | ret = i915_gem_evict_something(dev, obj->size); | 2621 | ret = i915_gem_evict_something(dev, obj->size); |
2642 | if (ret) { | 2622 | if (ret) { |
2643 | /* now try to shrink everyone else */ | 2623 | /* now try to shrink everyone else */ |
2644 | if (! retry_alloc) { | 2624 | if (gfpmask) { |
2645 | retry_alloc = true; | 2625 | gfpmask = 0; |
2646 | goto search_free; | 2626 | goto search_free; |
2647 | } | 2627 | } |
2648 | 2628 | ||
2649 | return ret; | 2629 | return ret; |
@@ -4946,7 +4926,7 @@ void i915_gem_detach_phys_object(struct drm_device *dev, | |||
4946 | if (!obj_priv->phys_obj) | 4926 | if (!obj_priv->phys_obj) |
4947 | return; | 4927 | return; |
4948 | 4928 | ||
4949 | ret = i915_gem_object_get_pages(obj); | 4929 | ret = i915_gem_object_get_pages(obj, 0); |
4950 | if (ret) | 4930 | if (ret) |
4951 | goto out; | 4931 | goto out; |
4952 | 4932 | ||
@@ -5004,7 +4984,7 @@ i915_gem_attach_phys_object(struct drm_device *dev, | |||
5004 | obj_priv->phys_obj = dev_priv->mm.phys_objs[id - 1]; | 4984 | obj_priv->phys_obj = dev_priv->mm.phys_objs[id - 1]; |
5005 | obj_priv->phys_obj->cur_obj = obj; | 4985 | obj_priv->phys_obj->cur_obj = obj; |
5006 | 4986 | ||
5007 | ret = i915_gem_object_get_pages(obj); | 4987 | ret = i915_gem_object_get_pages(obj, 0); |
5008 | if (ret) { | 4988 | if (ret) { |
5009 | DRM_ERROR("failed to get page list\n"); | 4989 | DRM_ERROR("failed to get page list\n"); |
5010 | goto out; | 4990 | goto out; |
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c index ba143972769f..d7f8d8b4a4b8 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bios.c +++ b/drivers/gpu/drm/nouveau/nouveau_bios.c | |||
@@ -310,63 +310,22 @@ valid_reg(struct nvbios *bios, uint32_t reg) | |||
310 | struct drm_device *dev = bios->dev; | 310 | struct drm_device *dev = bios->dev; |
311 | 311 | ||
312 | /* C51 has misaligned regs on purpose. Marvellous */ | 312 | /* C51 has misaligned regs on purpose. Marvellous */ |
313 | if (reg & 0x2 || (reg & 0x1 && dev_priv->VBIOS.pub.chip_version != 0x51)) { | 313 | if (reg & 0x2 || |
314 | NV_ERROR(dev, "========== misaligned reg 0x%08X ==========\n", | 314 | (reg & 0x1 && dev_priv->VBIOS.pub.chip_version != 0x51)) |
315 | reg); | 315 | NV_ERROR(dev, "======= misaligned reg 0x%08X =======\n", reg); |
316 | return 0; | 316 | |
317 | } | 317 | /* warn on C51 regs that haven't been verified accessible in tracing */ |
318 | /* | ||
319 | * Warn on C51 regs that have not been verified accessible in | ||
320 | * mmiotracing | ||
321 | */ | ||
322 | if (reg & 0x1 && dev_priv->VBIOS.pub.chip_version == 0x51 && | 318 | if (reg & 0x1 && dev_priv->VBIOS.pub.chip_version == 0x51 && |
323 | reg != 0x130d && reg != 0x1311 && reg != 0x60081d) | 319 | reg != 0x130d && reg != 0x1311 && reg != 0x60081d) |
324 | NV_WARN(dev, "=== C51 misaligned reg 0x%08X not verified ===\n", | 320 | NV_WARN(dev, "=== C51 misaligned reg 0x%08X not verified ===\n", |
325 | reg); | 321 | reg); |
326 | 322 | ||
327 | /* Trust the init scripts on G80 */ | 323 | if (reg >= (8*1024*1024)) { |
328 | if (dev_priv->card_type >= NV_50) | 324 | NV_ERROR(dev, "=== reg 0x%08x out of mapped bounds ===\n", reg); |
329 | return 1; | 325 | return 0; |
330 | |||
331 | #define WITHIN(x, y, z) ((x >= y) && (x < y + z)) | ||
332 | if (WITHIN(reg, NV_PMC_OFFSET, NV_PMC_SIZE)) | ||
333 | return 1; | ||
334 | if (WITHIN(reg, NV_PBUS_OFFSET, NV_PBUS_SIZE)) | ||
335 | return 1; | ||
336 | if (WITHIN(reg, NV_PFIFO_OFFSET, NV_PFIFO_SIZE)) | ||
337 | return 1; | ||
338 | if (dev_priv->VBIOS.pub.chip_version >= 0x30 && | ||
339 | (WITHIN(reg, 0x4000, 0x600) || reg == 0x00004600)) | ||
340 | return 1; | ||
341 | if (dev_priv->VBIOS.pub.chip_version >= 0x40 && | ||
342 | WITHIN(reg, 0xc000, 0x48)) | ||
343 | return 1; | ||
344 | if (dev_priv->VBIOS.pub.chip_version >= 0x17 && reg == 0x0000d204) | ||
345 | return 1; | ||
346 | if (dev_priv->VBIOS.pub.chip_version >= 0x40) { | ||
347 | if (reg == 0x00011014 || reg == 0x00020328) | ||
348 | return 1; | ||
349 | if (WITHIN(reg, 0x88000, NV_PBUS_SIZE)) /* new PBUS */ | ||
350 | return 1; | ||
351 | } | 326 | } |
352 | if (WITHIN(reg, NV_PFB_OFFSET, NV_PFB_SIZE)) | ||
353 | return 1; | ||
354 | if (WITHIN(reg, NV_PEXTDEV_OFFSET, NV_PEXTDEV_SIZE)) | ||
355 | return 1; | ||
356 | if (WITHIN(reg, NV_PCRTC0_OFFSET, NV_PCRTC0_SIZE * 2)) | ||
357 | return 1; | ||
358 | if (WITHIN(reg, NV_PRAMDAC0_OFFSET, NV_PRAMDAC0_SIZE * 2)) | ||
359 | return 1; | ||
360 | if (dev_priv->VBIOS.pub.chip_version >= 0x17 && reg == 0x0070fff0) | ||
361 | return 1; | ||
362 | if (dev_priv->VBIOS.pub.chip_version == 0x51 && | ||
363 | WITHIN(reg, NV_PRAMIN_OFFSET, NV_PRAMIN_SIZE)) | ||
364 | return 1; | ||
365 | #undef WITHIN | ||
366 | 327 | ||
367 | NV_ERROR(dev, "========== unknown reg 0x%08X ==========\n", reg); | 328 | return 1; |
368 | |||
369 | return 0; | ||
370 | } | 329 | } |
371 | 330 | ||
372 | static bool | 331 | static bool |
@@ -3196,16 +3155,25 @@ static int call_lvds_manufacturer_script(struct drm_device *dev, struct dcb_entr | |||
3196 | } | 3155 | } |
3197 | #ifdef __powerpc__ | 3156 | #ifdef __powerpc__ |
3198 | /* Powerbook specific quirks */ | 3157 | /* Powerbook specific quirks */ |
3199 | if (script == LVDS_RESET && ((dev->pci_device & 0xffff) == 0x0179 || (dev->pci_device & 0xffff) == 0x0329)) | 3158 | if ((dev->pci_device & 0xffff) == 0x0179 || |
3200 | nv_write_tmds(dev, dcbent->or, 0, 0x02, 0x72); | 3159 | (dev->pci_device & 0xffff) == 0x0189 || |
3201 | if ((dev->pci_device & 0xffff) == 0x0179 || (dev->pci_device & 0xffff) == 0x0189 || (dev->pci_device & 0xffff) == 0x0329) { | 3160 | (dev->pci_device & 0xffff) == 0x0329) { |
3202 | if (script == LVDS_PANEL_ON) { | 3161 | if (script == LVDS_RESET) { |
3203 | bios_wr32(bios, NV_PBUS_DEBUG_DUALHEAD_CTL, bios_rd32(bios, NV_PBUS_DEBUG_DUALHEAD_CTL) | (1 << 31)); | 3162 | nv_write_tmds(dev, dcbent->or, 0, 0x02, 0x72); |
3204 | bios_wr32(bios, NV_PCRTC_GPIO_EXT, bios_rd32(bios, NV_PCRTC_GPIO_EXT) | 1); | 3163 | |
3205 | } | 3164 | } else if (script == LVDS_PANEL_ON) { |
3206 | if (script == LVDS_PANEL_OFF) { | 3165 | bios_wr32(bios, NV_PBUS_DEBUG_DUALHEAD_CTL, |
3207 | bios_wr32(bios, NV_PBUS_DEBUG_DUALHEAD_CTL, bios_rd32(bios, NV_PBUS_DEBUG_DUALHEAD_CTL) & ~(1 << 31)); | 3166 | bios_rd32(bios, NV_PBUS_DEBUG_DUALHEAD_CTL) |
3208 | bios_wr32(bios, NV_PCRTC_GPIO_EXT, bios_rd32(bios, NV_PCRTC_GPIO_EXT) & ~3); | 3167 | | (1 << 31)); |
3168 | bios_wr32(bios, NV_PCRTC_GPIO_EXT, | ||
3169 | bios_rd32(bios, NV_PCRTC_GPIO_EXT) | 1); | ||
3170 | |||
3171 | } else if (script == LVDS_PANEL_OFF) { | ||
3172 | bios_wr32(bios, NV_PBUS_DEBUG_DUALHEAD_CTL, | ||
3173 | bios_rd32(bios, NV_PBUS_DEBUG_DUALHEAD_CTL) | ||
3174 | & ~(1 << 31)); | ||
3175 | bios_wr32(bios, NV_PCRTC_GPIO_EXT, | ||
3176 | bios_rd32(bios, NV_PCRTC_GPIO_EXT) & ~3); | ||
3209 | } | 3177 | } |
3210 | } | 3178 | } |
3211 | #endif | 3179 | #endif |
@@ -5434,52 +5402,49 @@ static bool | |||
5434 | parse_dcb15_entry(struct drm_device *dev, struct parsed_dcb *dcb, | 5402 | parse_dcb15_entry(struct drm_device *dev, struct parsed_dcb *dcb, |
5435 | uint32_t conn, uint32_t conf, struct dcb_entry *entry) | 5403 | uint32_t conn, uint32_t conf, struct dcb_entry *entry) |
5436 | { | 5404 | { |
5437 | if (conn != 0xf0003f00 && conn != 0xf2247f10 && conn != 0xf2204001 && | 5405 | switch (conn & 0x0000000f) { |
5438 | conn != 0xf2204301 && conn != 0xf2204311 && conn != 0xf2208001 && | 5406 | case 0: |
5439 | conn != 0xf2244001 && conn != 0xf2244301 && conn != 0xf2244311 && | 5407 | entry->type = OUTPUT_ANALOG; |
5440 | conn != 0xf4204011 && conn != 0xf4208011 && conn != 0xf4248011 && | 5408 | break; |
5441 | conn != 0xf2045ff2 && conn != 0xf2045f14 && conn != 0xf207df14 && | 5409 | case 1: |
5442 | conn != 0xf2205004 && conn != 0xf2209004) { | 5410 | entry->type = OUTPUT_TV; |
5443 | NV_ERROR(dev, "Unknown DCB 1.5 entry, please report\n"); | 5411 | break; |
5444 | 5412 | case 2: | |
5445 | /* cause output setting to fail for !TV, so message is seen */ | 5413 | case 3: |
5446 | if ((conn & 0xf) != 0x1) | ||
5447 | dcb->entries = 0; | ||
5448 | |||
5449 | return false; | ||
5450 | } | ||
5451 | /* most of the below is a "best guess" atm */ | ||
5452 | entry->type = conn & 0xf; | ||
5453 | if (entry->type == 2) | ||
5454 | /* another way of specifying straps based lvds... */ | ||
5455 | entry->type = OUTPUT_LVDS; | 5414 | entry->type = OUTPUT_LVDS; |
5456 | if (entry->type == 4) { /* digital */ | 5415 | break; |
5457 | if (conn & 0x10) | 5416 | case 4: |
5458 | entry->type = OUTPUT_LVDS; | 5417 | switch ((conn & 0x000000f0) >> 4) { |
5459 | else | 5418 | case 0: |
5460 | entry->type = OUTPUT_TMDS; | 5419 | entry->type = OUTPUT_TMDS; |
5420 | break; | ||
5421 | case 1: | ||
5422 | entry->type = OUTPUT_LVDS; | ||
5423 | break; | ||
5424 | default: | ||
5425 | NV_ERROR(dev, "Unknown DCB subtype 4/%d\n", | ||
5426 | (conn & 0x000000f0) >> 4); | ||
5427 | return false; | ||
5428 | } | ||
5429 | break; | ||
5430 | default: | ||
5431 | NV_ERROR(dev, "Unknown DCB type %d\n", conn & 0x0000000f); | ||
5432 | return false; | ||
5461 | } | 5433 | } |
5462 | /* what's in bits 5-13? could be some encoder maker thing, in tv case */ | 5434 | |
5463 | entry->i2c_index = (conn >> 14) & 0xf; | 5435 | entry->i2c_index = (conn & 0x0003c000) >> 14; |
5464 | /* raw heads field is in range 0-1, so move to 1-2 */ | 5436 | entry->heads = ((conn & 0x001c0000) >> 18) + 1; |
5465 | entry->heads = ((conn >> 18) & 0x7) + 1; | 5437 | entry->or = entry->heads; /* same as heads, hopefully safe enough */ |
5466 | entry->location = (conn >> 21) & 0xf; | 5438 | entry->location = (conn & 0x01e00000) >> 21; |
5467 | /* unused: entry->bus = (conn >> 25) & 0x7; */ | 5439 | entry->bus = (conn & 0x0e000000) >> 25; |
5468 | /* set or to be same as heads -- hopefully safe enough */ | ||
5469 | entry->or = entry->heads; | ||
5470 | entry->duallink_possible = false; | 5440 | entry->duallink_possible = false; |
5471 | 5441 | ||
5472 | switch (entry->type) { | 5442 | switch (entry->type) { |
5473 | case OUTPUT_ANALOG: | 5443 | case OUTPUT_ANALOG: |
5474 | entry->crtconf.maxfreq = (conf & 0xffff) * 10; | 5444 | entry->crtconf.maxfreq = (conf & 0xffff) * 10; |
5475 | break; | 5445 | break; |
5476 | case OUTPUT_LVDS: | 5446 | case OUTPUT_TV: |
5477 | /* | 5447 | entry->tvconf.has_component_output = false; |
5478 | * This is probably buried in conn's unknown bits. | ||
5479 | * This will upset EDID-ful models, if they exist | ||
5480 | */ | ||
5481 | entry->lvdsconf.use_straps_for_mode = true; | ||
5482 | entry->lvdsconf.use_power_scripts = true; | ||
5483 | break; | 5448 | break; |
5484 | case OUTPUT_TMDS: | 5449 | case OUTPUT_TMDS: |
5485 | /* | 5450 | /* |
@@ -5488,8 +5453,12 @@ parse_dcb15_entry(struct drm_device *dev, struct parsed_dcb *dcb, | |||
5488 | */ | 5453 | */ |
5489 | fabricate_vga_output(dcb, entry->i2c_index, entry->heads); | 5454 | fabricate_vga_output(dcb, entry->i2c_index, entry->heads); |
5490 | break; | 5455 | break; |
5491 | case OUTPUT_TV: | 5456 | case OUTPUT_LVDS: |
5492 | entry->tvconf.has_component_output = false; | 5457 | if ((conn & 0x00003f00) != 0x10) |
5458 | entry->lvdsconf.use_straps_for_mode = true; | ||
5459 | entry->lvdsconf.use_power_scripts = true; | ||
5460 | break; | ||
5461 | default: | ||
5493 | break; | 5462 | break; |
5494 | } | 5463 | } |
5495 | 5464 | ||
@@ -5564,11 +5533,13 @@ void merge_like_dcb_entries(struct drm_device *dev, struct parsed_dcb *dcb) | |||
5564 | dcb->entries = newentries; | 5533 | dcb->entries = newentries; |
5565 | } | 5534 | } |
5566 | 5535 | ||
5567 | static int parse_dcb_table(struct drm_device *dev, struct nvbios *bios, bool twoHeads) | 5536 | static int |
5537 | parse_dcb_table(struct drm_device *dev, struct nvbios *bios, bool twoHeads) | ||
5568 | { | 5538 | { |
5539 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
5569 | struct bios_parsed_dcb *bdcb = &bios->bdcb; | 5540 | struct bios_parsed_dcb *bdcb = &bios->bdcb; |
5570 | struct parsed_dcb *dcb; | 5541 | struct parsed_dcb *dcb; |
5571 | uint16_t dcbptr, i2ctabptr = 0; | 5542 | uint16_t dcbptr = 0, i2ctabptr = 0; |
5572 | uint8_t *dcbtable; | 5543 | uint8_t *dcbtable; |
5573 | uint8_t headerlen = 0x4, entries = DCB_MAX_NUM_ENTRIES; | 5544 | uint8_t headerlen = 0x4, entries = DCB_MAX_NUM_ENTRIES; |
5574 | bool configblock = true; | 5545 | bool configblock = true; |
@@ -5579,16 +5550,18 @@ static int parse_dcb_table(struct drm_device *dev, struct nvbios *bios, bool two | |||
5579 | dcb->entries = 0; | 5550 | dcb->entries = 0; |
5580 | 5551 | ||
5581 | /* get the offset from 0x36 */ | 5552 | /* get the offset from 0x36 */ |
5582 | dcbptr = ROM16(bios->data[0x36]); | 5553 | if (dev_priv->card_type > NV_04) { |
5554 | dcbptr = ROM16(bios->data[0x36]); | ||
5555 | if (dcbptr == 0x0000) | ||
5556 | NV_WARN(dev, "No output data (DCB) found in BIOS\n"); | ||
5557 | } | ||
5583 | 5558 | ||
5559 | /* this situation likely means a really old card, pre DCB */ | ||
5584 | if (dcbptr == 0x0) { | 5560 | if (dcbptr == 0x0) { |
5585 | NV_WARN(dev, "No output data (DCB) found in BIOS, " | 5561 | NV_INFO(dev, "Assuming a CRT output exists\n"); |
5586 | "assuming a CRT output exists\n"); | ||
5587 | /* this situation likely means a really old card, pre DCB */ | ||
5588 | fabricate_vga_output(dcb, LEGACY_I2C_CRT, 1); | 5562 | fabricate_vga_output(dcb, LEGACY_I2C_CRT, 1); |
5589 | 5563 | ||
5590 | if (nv04_tv_identify(dev, | 5564 | if (nv04_tv_identify(dev, bios->legacy.i2c_indices.tv) >= 0) |
5591 | bios->legacy.i2c_indices.tv) >= 0) | ||
5592 | fabricate_tv_output(dcb, twoHeads); | 5565 | fabricate_tv_output(dcb, twoHeads); |
5593 | 5566 | ||
5594 | return 0; | 5567 | return 0; |
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c index e342a418d434..db0ed4c13f98 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.c +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c | |||
@@ -469,6 +469,8 @@ nouveau_bo_move_accel_cleanup(struct nouveau_channel *chan, | |||
469 | 469 | ||
470 | ret = ttm_bo_move_accel_cleanup(&nvbo->bo, fence, NULL, | 470 | ret = ttm_bo_move_accel_cleanup(&nvbo->bo, fence, NULL, |
471 | evict, no_wait, new_mem); | 471 | evict, no_wait, new_mem); |
472 | if (nvbo->channel && nvbo->channel != chan) | ||
473 | ret = nouveau_fence_wait(fence, NULL, false, false); | ||
472 | nouveau_fence_unref((void *)&fence); | 474 | nouveau_fence_unref((void *)&fence); |
473 | return ret; | 475 | return ret; |
474 | } | 476 | } |
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c index 5a10deb8bdbd..7e6d673f3a23 100644 --- a/drivers/gpu/drm/nouveau/nouveau_connector.c +++ b/drivers/gpu/drm/nouveau/nouveau_connector.c | |||
@@ -24,9 +24,12 @@ | |||
24 | * | 24 | * |
25 | */ | 25 | */ |
26 | 26 | ||
27 | #include <acpi/button.h> | ||
28 | |||
27 | #include "drmP.h" | 29 | #include "drmP.h" |
28 | #include "drm_edid.h" | 30 | #include "drm_edid.h" |
29 | #include "drm_crtc_helper.h" | 31 | #include "drm_crtc_helper.h" |
32 | |||
30 | #include "nouveau_reg.h" | 33 | #include "nouveau_reg.h" |
31 | #include "nouveau_drv.h" | 34 | #include "nouveau_drv.h" |
32 | #include "nouveau_encoder.h" | 35 | #include "nouveau_encoder.h" |
@@ -83,14 +86,16 @@ nouveau_encoder_connector_get(struct nouveau_encoder *encoder) | |||
83 | static void | 86 | static void |
84 | nouveau_connector_destroy(struct drm_connector *drm_connector) | 87 | nouveau_connector_destroy(struct drm_connector *drm_connector) |
85 | { | 88 | { |
86 | struct nouveau_connector *connector = nouveau_connector(drm_connector); | 89 | struct nouveau_connector *nv_connector = |
87 | struct drm_device *dev = connector->base.dev; | 90 | nouveau_connector(drm_connector); |
91 | struct drm_device *dev = nv_connector->base.dev; | ||
88 | 92 | ||
89 | NV_DEBUG_KMS(dev, "\n"); | 93 | NV_DEBUG_KMS(dev, "\n"); |
90 | 94 | ||
91 | if (!connector) | 95 | if (!nv_connector) |
92 | return; | 96 | return; |
93 | 97 | ||
98 | kfree(nv_connector->edid); | ||
94 | drm_sysfs_connector_remove(drm_connector); | 99 | drm_sysfs_connector_remove(drm_connector); |
95 | drm_connector_cleanup(drm_connector); | 100 | drm_connector_cleanup(drm_connector); |
96 | kfree(drm_connector); | 101 | kfree(drm_connector); |
@@ -233,10 +238,21 @@ nouveau_connector_detect(struct drm_connector *connector) | |||
233 | if (connector->connector_type == DRM_MODE_CONNECTOR_LVDS) | 238 | if (connector->connector_type == DRM_MODE_CONNECTOR_LVDS) |
234 | nv_encoder = find_encoder_by_type(connector, OUTPUT_LVDS); | 239 | nv_encoder = find_encoder_by_type(connector, OUTPUT_LVDS); |
235 | if (nv_encoder && nv_connector->native_mode) { | 240 | if (nv_encoder && nv_connector->native_mode) { |
241 | #ifdef CONFIG_ACPI | ||
242 | if (!nouveau_ignorelid && !acpi_lid_open()) | ||
243 | return connector_status_disconnected; | ||
244 | #endif | ||
236 | nouveau_connector_set_encoder(connector, nv_encoder); | 245 | nouveau_connector_set_encoder(connector, nv_encoder); |
237 | return connector_status_connected; | 246 | return connector_status_connected; |
238 | } | 247 | } |
239 | 248 | ||
249 | /* Cleanup the previous EDID block. */ | ||
250 | if (nv_connector->edid) { | ||
251 | drm_mode_connector_update_edid_property(connector, NULL); | ||
252 | kfree(nv_connector->edid); | ||
253 | nv_connector->edid = NULL; | ||
254 | } | ||
255 | |||
240 | i2c = nouveau_connector_ddc_detect(connector, &nv_encoder); | 256 | i2c = nouveau_connector_ddc_detect(connector, &nv_encoder); |
241 | if (i2c) { | 257 | if (i2c) { |
242 | nouveau_connector_ddc_prepare(connector, &flags); | 258 | nouveau_connector_ddc_prepare(connector, &flags); |
@@ -247,7 +263,7 @@ nouveau_connector_detect(struct drm_connector *connector) | |||
247 | if (!nv_connector->edid) { | 263 | if (!nv_connector->edid) { |
248 | NV_ERROR(dev, "DDC responded, but no EDID for %s\n", | 264 | NV_ERROR(dev, "DDC responded, but no EDID for %s\n", |
249 | drm_get_connector_name(connector)); | 265 | drm_get_connector_name(connector)); |
250 | return connector_status_disconnected; | 266 | goto detect_analog; |
251 | } | 267 | } |
252 | 268 | ||
253 | if (nv_encoder->dcb->type == OUTPUT_DP && | 269 | if (nv_encoder->dcb->type == OUTPUT_DP && |
@@ -281,6 +297,7 @@ nouveau_connector_detect(struct drm_connector *connector) | |||
281 | return connector_status_connected; | 297 | return connector_status_connected; |
282 | } | 298 | } |
283 | 299 | ||
300 | detect_analog: | ||
284 | nv_encoder = find_encoder_by_type(connector, OUTPUT_ANALOG); | 301 | nv_encoder = find_encoder_by_type(connector, OUTPUT_ANALOG); |
285 | if (!nv_encoder) | 302 | if (!nv_encoder) |
286 | nv_encoder = find_encoder_by_type(connector, OUTPUT_TV); | 303 | nv_encoder = find_encoder_by_type(connector, OUTPUT_TV); |
@@ -687,8 +704,12 @@ nouveau_connector_create_lvds(struct drm_device *dev, | |||
687 | */ | 704 | */ |
688 | if (!nv_connector->edid && !nv_connector->native_mode && | 705 | if (!nv_connector->edid && !nv_connector->native_mode && |
689 | !dev_priv->VBIOS.pub.fp_no_ddc) { | 706 | !dev_priv->VBIOS.pub.fp_no_ddc) { |
690 | nv_connector->edid = | 707 | struct edid *edid = |
691 | (struct edid *)nouveau_bios_embedded_edid(dev); | 708 | (struct edid *)nouveau_bios_embedded_edid(dev); |
709 | if (edid) { | ||
710 | nv_connector->edid = kmalloc(EDID_LENGTH, GFP_KERNEL); | ||
711 | *(nv_connector->edid) = *edid; | ||
712 | } | ||
692 | } | 713 | } |
693 | 714 | ||
694 | if (!nv_connector->edid) | 715 | if (!nv_connector->edid) |
diff --git a/drivers/gpu/drm/nouveau/nouveau_dma.c b/drivers/gpu/drm/nouveau/nouveau_dma.c index 7afbe8b40d51..50d9e67745af 100644 --- a/drivers/gpu/drm/nouveau/nouveau_dma.c +++ b/drivers/gpu/drm/nouveau/nouveau_dma.c | |||
@@ -126,47 +126,52 @@ OUT_RINGp(struct nouveau_channel *chan, const void *data, unsigned nr_dwords) | |||
126 | chan->dma.cur += nr_dwords; | 126 | chan->dma.cur += nr_dwords; |
127 | } | 127 | } |
128 | 128 | ||
129 | static inline bool | 129 | /* Fetch and adjust GPU GET pointer |
130 | READ_GET(struct nouveau_channel *chan, uint32_t *get) | 130 | * |
131 | * Returns: | ||
132 | * value >= 0, the adjusted GET pointer | ||
133 | * -EINVAL if GET pointer currently outside main push buffer | ||
134 | * -EBUSY if timeout exceeded | ||
135 | */ | ||
136 | static inline int | ||
137 | READ_GET(struct nouveau_channel *chan, uint32_t *prev_get, uint32_t *timeout) | ||
131 | { | 138 | { |
132 | uint32_t val; | 139 | uint32_t val; |
133 | 140 | ||
134 | val = nvchan_rd32(chan, chan->user_get); | 141 | val = nvchan_rd32(chan, chan->user_get); |
135 | if (val < chan->pushbuf_base || | 142 | |
136 | val > chan->pushbuf_base + (chan->dma.max << 2)) { | 143 | /* reset counter as long as GET is still advancing, this is |
137 | /* meaningless to dma_wait() except to know whether the | 144 | * to avoid misdetecting a GPU lockup if the GPU happens to |
138 | * GPU has stalled or not | 145 | * just be processing an operation that takes a long time |
139 | */ | 146 | */ |
140 | *get = val; | 147 | if (val != *prev_get) { |
141 | return false; | 148 | *prev_get = val; |
149 | *timeout = 0; | ||
150 | } | ||
151 | |||
152 | if ((++*timeout & 0xff) == 0) { | ||
153 | DRM_UDELAY(1); | ||
154 | if (*timeout > 100000) | ||
155 | return -EBUSY; | ||
142 | } | 156 | } |
143 | 157 | ||
144 | *get = (val - chan->pushbuf_base) >> 2; | 158 | if (val < chan->pushbuf_base || |
145 | return true; | 159 | val > chan->pushbuf_base + (chan->dma.max << 2)) |
160 | return -EINVAL; | ||
161 | |||
162 | return (val - chan->pushbuf_base) >> 2; | ||
146 | } | 163 | } |
147 | 164 | ||
148 | int | 165 | int |
149 | nouveau_dma_wait(struct nouveau_channel *chan, int size) | 166 | nouveau_dma_wait(struct nouveau_channel *chan, int size) |
150 | { | 167 | { |
151 | uint32_t get, prev_get = 0, cnt = 0; | 168 | uint32_t prev_get = 0, cnt = 0; |
152 | bool get_valid; | 169 | int get; |
153 | 170 | ||
154 | while (chan->dma.free < size) { | 171 | while (chan->dma.free < size) { |
155 | /* reset counter as long as GET is still advancing, this is | 172 | get = READ_GET(chan, &prev_get, &cnt); |
156 | * to avoid misdetecting a GPU lockup if the GPU happens to | 173 | if (unlikely(get == -EBUSY)) |
157 | * just be processing an operation that takes a long time | 174 | return -EBUSY; |
158 | */ | ||
159 | get_valid = READ_GET(chan, &get); | ||
160 | if (get != prev_get) { | ||
161 | prev_get = get; | ||
162 | cnt = 0; | ||
163 | } | ||
164 | |||
165 | if ((++cnt & 0xff) == 0) { | ||
166 | DRM_UDELAY(1); | ||
167 | if (cnt > 100000) | ||
168 | return -EBUSY; | ||
169 | } | ||
170 | 175 | ||
171 | /* loop until we have a usable GET pointer. the value | 176 | /* loop until we have a usable GET pointer. the value |
172 | * we read from the GPU may be outside the main ring if | 177 | * we read from the GPU may be outside the main ring if |
@@ -177,7 +182,7 @@ nouveau_dma_wait(struct nouveau_channel *chan, int size) | |||
177 | * from the SKIPS area, so the code below doesn't have to deal | 182 | * from the SKIPS area, so the code below doesn't have to deal |
178 | * with some fun corner cases. | 183 | * with some fun corner cases. |
179 | */ | 184 | */ |
180 | if (!get_valid || get < NOUVEAU_DMA_SKIPS) | 185 | if (unlikely(get == -EINVAL) || get < NOUVEAU_DMA_SKIPS) |
181 | continue; | 186 | continue; |
182 | 187 | ||
183 | if (get <= chan->dma.cur) { | 188 | if (get <= chan->dma.cur) { |
@@ -203,6 +208,19 @@ nouveau_dma_wait(struct nouveau_channel *chan, int size) | |||
203 | * after processing the currently pending commands. | 208 | * after processing the currently pending commands. |
204 | */ | 209 | */ |
205 | OUT_RING(chan, chan->pushbuf_base | 0x20000000); | 210 | OUT_RING(chan, chan->pushbuf_base | 0x20000000); |
211 | |||
212 | /* wait for GET to depart from the skips area. | ||
213 | * prevents writing GET==PUT and causing a race | ||
214 | * condition that causes us to think the GPU is | ||
215 | * idle when it's not. | ||
216 | */ | ||
217 | do { | ||
218 | get = READ_GET(chan, &prev_get, &cnt); | ||
219 | if (unlikely(get == -EBUSY)) | ||
220 | return -EBUSY; | ||
221 | if (unlikely(get == -EINVAL)) | ||
222 | continue; | ||
223 | } while (get <= NOUVEAU_DMA_SKIPS); | ||
206 | WRITE_PUT(NOUVEAU_DMA_SKIPS); | 224 | WRITE_PUT(NOUVEAU_DMA_SKIPS); |
207 | 225 | ||
208 | /* we're now submitting commands at the start of | 226 | /* we're now submitting commands at the start of |
diff --git a/drivers/gpu/drm/nouveau/nouveau_dp.c b/drivers/gpu/drm/nouveau/nouveau_dp.c index 9e2926c48579..dd4937224220 100644 --- a/drivers/gpu/drm/nouveau/nouveau_dp.c +++ b/drivers/gpu/drm/nouveau/nouveau_dp.c | |||
@@ -490,7 +490,8 @@ nouveau_dp_auxch(struct nouveau_i2c_chan *auxch, int cmd, int addr, | |||
490 | if (!nv_wait(NV50_AUXCH_CTRL(index), 0x00010000, 0x00000000)) { | 490 | if (!nv_wait(NV50_AUXCH_CTRL(index), 0x00010000, 0x00000000)) { |
491 | NV_ERROR(dev, "expected bit 16 == 0, got 0x%08x\n", | 491 | NV_ERROR(dev, "expected bit 16 == 0, got 0x%08x\n", |
492 | nv_rd32(dev, NV50_AUXCH_CTRL(index))); | 492 | nv_rd32(dev, NV50_AUXCH_CTRL(index))); |
493 | return -EBUSY; | 493 | ret = -EBUSY; |
494 | goto out; | ||
494 | } | 495 | } |
495 | 496 | ||
496 | udelay(400); | 497 | udelay(400); |
@@ -501,6 +502,11 @@ nouveau_dp_auxch(struct nouveau_i2c_chan *auxch, int cmd, int addr, | |||
501 | break; | 502 | break; |
502 | } | 503 | } |
503 | 504 | ||
505 | if ((stat & NV50_AUXCH_STAT_COUNT) != data_nr) { | ||
506 | ret = -EREMOTEIO; | ||
507 | goto out; | ||
508 | } | ||
509 | |||
504 | if (cmd & 1) { | 510 | if (cmd & 1) { |
505 | for (i = 0; i < 4; i++) { | 511 | for (i = 0; i < 4; i++) { |
506 | data32[i] = nv_rd32(dev, NV50_AUXCH_DATA_IN(index, i)); | 512 | data32[i] = nv_rd32(dev, NV50_AUXCH_DATA_IN(index, i)); |
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.c b/drivers/gpu/drm/nouveau/nouveau_drv.c index 06eb993e0883..343ab7f17ccc 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drv.c +++ b/drivers/gpu/drm/nouveau/nouveau_drv.c | |||
@@ -71,6 +71,10 @@ MODULE_PARM_DESC(uscript_tmds, "TMDS output script table ID (>=GeForce 8)"); | |||
71 | int nouveau_uscript_tmds = -1; | 71 | int nouveau_uscript_tmds = -1; |
72 | module_param_named(uscript_tmds, nouveau_uscript_tmds, int, 0400); | 72 | module_param_named(uscript_tmds, nouveau_uscript_tmds, int, 0400); |
73 | 73 | ||
74 | MODULE_PARM_DESC(ignorelid, "Ignore ACPI lid status"); | ||
75 | int nouveau_ignorelid = 0; | ||
76 | module_param_named(ignorelid, nouveau_ignorelid, int, 0400); | ||
77 | |||
74 | MODULE_PARM_DESC(tv_norm, "Default TV norm.\n" | 78 | MODULE_PARM_DESC(tv_norm, "Default TV norm.\n" |
75 | "\t\tSupported: PAL, PAL-M, PAL-N, PAL-Nc, NTSC-M, NTSC-J,\n" | 79 | "\t\tSupported: PAL, PAL-M, PAL-N, PAL-Nc, NTSC-M, NTSC-J,\n" |
76 | "\t\t\thd480i, hd480p, hd576i, hd576p, hd720p, hd1080i.\n" | 80 | "\t\t\thd480i, hd480p, hd576i, hd576p, hd720p, hd1080i.\n" |
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h index 026419fe8791..6b9690418bc7 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drv.h +++ b/drivers/gpu/drm/nouveau/nouveau_drv.h | |||
@@ -509,6 +509,8 @@ struct drm_nouveau_private { | |||
509 | void __iomem *ramin; | 509 | void __iomem *ramin; |
510 | uint32_t ramin_size; | 510 | uint32_t ramin_size; |
511 | 511 | ||
512 | struct nouveau_bo *vga_ram; | ||
513 | |||
512 | struct workqueue_struct *wq; | 514 | struct workqueue_struct *wq; |
513 | struct work_struct irq_work; | 515 | struct work_struct irq_work; |
514 | 516 | ||
@@ -675,6 +677,7 @@ extern char *nouveau_tv_norm; | |||
675 | extern int nouveau_reg_debug; | 677 | extern int nouveau_reg_debug; |
676 | extern char *nouveau_vbios; | 678 | extern char *nouveau_vbios; |
677 | extern int nouveau_ctxfw; | 679 | extern int nouveau_ctxfw; |
680 | extern int nouveau_ignorelid; | ||
678 | 681 | ||
679 | /* nouveau_state.c */ | 682 | /* nouveau_state.c */ |
680 | extern void nouveau_preclose(struct drm_device *dev, struct drm_file *); | 683 | extern void nouveau_preclose(struct drm_device *dev, struct drm_file *); |
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c index 2009db2426c3..6ac804b0c9f9 100644 --- a/drivers/gpu/drm/nouveau/nouveau_gem.c +++ b/drivers/gpu/drm/nouveau/nouveau_gem.c | |||
@@ -321,6 +321,7 @@ retry: | |||
321 | else { | 321 | else { |
322 | NV_ERROR(dev, "invalid valid domains: 0x%08x\n", | 322 | NV_ERROR(dev, "invalid valid domains: 0x%08x\n", |
323 | b->valid_domains); | 323 | b->valid_domains); |
324 | list_add_tail(&nvbo->entry, &op->both_list); | ||
324 | validate_fini(op, NULL); | 325 | validate_fini(op, NULL); |
325 | return -EINVAL; | 326 | return -EINVAL; |
326 | } | 327 | } |
@@ -466,13 +467,14 @@ u_memcpya(uint64_t user, unsigned nmemb, unsigned size) | |||
466 | static int | 467 | static int |
467 | nouveau_gem_pushbuf_reloc_apply(struct nouveau_channel *chan, int nr_bo, | 468 | nouveau_gem_pushbuf_reloc_apply(struct nouveau_channel *chan, int nr_bo, |
468 | struct drm_nouveau_gem_pushbuf_bo *bo, | 469 | struct drm_nouveau_gem_pushbuf_bo *bo, |
469 | int nr_relocs, uint64_t ptr_relocs, | 470 | unsigned nr_relocs, uint64_t ptr_relocs, |
470 | int nr_dwords, int first_dword, | 471 | unsigned nr_dwords, unsigned first_dword, |
471 | uint32_t *pushbuf, bool is_iomem) | 472 | uint32_t *pushbuf, bool is_iomem) |
472 | { | 473 | { |
473 | struct drm_nouveau_gem_pushbuf_reloc *reloc = NULL; | 474 | struct drm_nouveau_gem_pushbuf_reloc *reloc = NULL; |
474 | struct drm_device *dev = chan->dev; | 475 | struct drm_device *dev = chan->dev; |
475 | int ret = 0, i; | 476 | int ret = 0; |
477 | unsigned i; | ||
476 | 478 | ||
477 | reloc = u_memcpya(ptr_relocs, nr_relocs, sizeof(*reloc)); | 479 | reloc = u_memcpya(ptr_relocs, nr_relocs, sizeof(*reloc)); |
478 | if (IS_ERR(reloc)) | 480 | if (IS_ERR(reloc)) |
@@ -667,6 +669,18 @@ nouveau_gem_ioctl_pushbuf_call(struct drm_device *dev, void *data, | |||
667 | } | 669 | } |
668 | pbbo = nouveau_gem_object(gem); | 670 | pbbo = nouveau_gem_object(gem); |
669 | 671 | ||
672 | if ((req->offset & 3) || req->nr_dwords < 2 || | ||
673 | (unsigned long)req->offset > (unsigned long)pbbo->bo.mem.size || | ||
674 | (unsigned long)req->nr_dwords > | ||
675 | ((unsigned long)(pbbo->bo.mem.size - req->offset ) >> 2)) { | ||
676 | NV_ERROR(dev, "pb call misaligned or out of bounds: " | ||
677 | "%d + %d * 4 > %ld\n", | ||
678 | req->offset, req->nr_dwords, pbbo->bo.mem.size); | ||
679 | ret = -EINVAL; | ||
680 | drm_gem_object_unreference(gem); | ||
681 | goto out; | ||
682 | } | ||
683 | |||
670 | ret = ttm_bo_reserve(&pbbo->bo, false, false, true, | 684 | ret = ttm_bo_reserve(&pbbo->bo, false, false, true, |
671 | chan->fence.sequence); | 685 | chan->fence.sequence); |
672 | if (ret) { | 686 | if (ret) { |
diff --git a/drivers/gpu/drm/nouveau/nouveau_irq.c b/drivers/gpu/drm/nouveau/nouveau_irq.c index 919a619ca7fa..3b9bad66162a 100644 --- a/drivers/gpu/drm/nouveau/nouveau_irq.c +++ b/drivers/gpu/drm/nouveau/nouveau_irq.c | |||
@@ -483,6 +483,13 @@ nouveau_pgraph_intr_error(struct drm_device *dev, uint32_t nsource) | |||
483 | if (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD) { | 483 | if (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD) { |
484 | if (nouveau_pgraph_intr_swmthd(dev, &trap)) | 484 | if (nouveau_pgraph_intr_swmthd(dev, &trap)) |
485 | unhandled = 1; | 485 | unhandled = 1; |
486 | } else if (nsource & NV03_PGRAPH_NSOURCE_DMA_VTX_PROTECTION) { | ||
487 | uint32_t v = nv_rd32(dev, 0x402000); | ||
488 | nv_wr32(dev, 0x402000, v); | ||
489 | |||
490 | /* dump the error anyway for now: it's useful for | ||
491 | Gallium development */ | ||
492 | unhandled = 1; | ||
486 | } else { | 493 | } else { |
487 | unhandled = 1; | 494 | unhandled = 1; |
488 | } | 495 | } |
diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.c b/drivers/gpu/drm/nouveau/nouveau_mem.c index fb9bdd6edf1f..8f3a12f614ed 100644 --- a/drivers/gpu/drm/nouveau/nouveau_mem.c +++ b/drivers/gpu/drm/nouveau/nouveau_mem.c | |||
@@ -383,9 +383,8 @@ void nouveau_mem_close(struct drm_device *dev) | |||
383 | { | 383 | { |
384 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 384 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
385 | 385 | ||
386 | if (dev_priv->ttm.bdev.man[TTM_PL_PRIV0].has_type) | 386 | nouveau_bo_unpin(dev_priv->vga_ram); |
387 | ttm_bo_clean_mm(&dev_priv->ttm.bdev, TTM_PL_PRIV0); | 387 | nouveau_bo_ref(NULL, &dev_priv->vga_ram); |
388 | ttm_bo_clean_mm(&dev_priv->ttm.bdev, TTM_PL_VRAM); | ||
389 | 388 | ||
390 | ttm_bo_device_release(&dev_priv->ttm.bdev); | 389 | ttm_bo_device_release(&dev_priv->ttm.bdev); |
391 | 390 | ||
@@ -622,6 +621,15 @@ nouveau_mem_init(struct drm_device *dev) | |||
622 | return ret; | 621 | return ret; |
623 | } | 622 | } |
624 | 623 | ||
624 | ret = nouveau_bo_new(dev, NULL, 256*1024, 0, TTM_PL_FLAG_VRAM, | ||
625 | 0, 0, true, true, &dev_priv->vga_ram); | ||
626 | if (ret == 0) | ||
627 | ret = nouveau_bo_pin(dev_priv->vga_ram, TTM_PL_FLAG_VRAM); | ||
628 | if (ret) { | ||
629 | NV_WARN(dev, "failed to reserve VGA memory\n"); | ||
630 | nouveau_bo_ref(NULL, &dev_priv->vga_ram); | ||
631 | } | ||
632 | |||
625 | /* GART */ | 633 | /* GART */ |
626 | #if !defined(__powerpc__) && !defined(__ia64__) | 634 | #if !defined(__powerpc__) && !defined(__ia64__) |
627 | if (drm_device_is_agp(dev) && dev->agp) { | 635 | if (drm_device_is_agp(dev) && dev->agp) { |
@@ -653,6 +661,7 @@ nouveau_mem_init(struct drm_device *dev) | |||
653 | dev_priv->fb_mtrr = drm_mtrr_add(drm_get_resource_start(dev, 1), | 661 | dev_priv->fb_mtrr = drm_mtrr_add(drm_get_resource_start(dev, 1), |
654 | drm_get_resource_len(dev, 1), | 662 | drm_get_resource_len(dev, 1), |
655 | DRM_MTRR_WC); | 663 | DRM_MTRR_WC); |
664 | |||
656 | return 0; | 665 | return 0; |
657 | } | 666 | } |
658 | 667 | ||
diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c index 09b9a46dfc0e..f2d0187ba152 100644 --- a/drivers/gpu/drm/nouveau/nouveau_state.c +++ b/drivers/gpu/drm/nouveau/nouveau_state.c | |||
@@ -525,6 +525,7 @@ static void nouveau_card_takedown(struct drm_device *dev) | |||
525 | engine->mc.takedown(dev); | 525 | engine->mc.takedown(dev); |
526 | 526 | ||
527 | mutex_lock(&dev->struct_mutex); | 527 | mutex_lock(&dev->struct_mutex); |
528 | ttm_bo_clean_mm(&dev_priv->ttm.bdev, TTM_PL_VRAM); | ||
528 | ttm_bo_clean_mm(&dev_priv->ttm.bdev, TTM_PL_TT); | 529 | ttm_bo_clean_mm(&dev_priv->ttm.bdev, TTM_PL_TT); |
529 | mutex_unlock(&dev->struct_mutex); | 530 | mutex_unlock(&dev->struct_mutex); |
530 | nouveau_sgdma_takedown(dev); | 531 | nouveau_sgdma_takedown(dev); |
diff --git a/drivers/gpu/drm/nouveau/nv04_instmem.c b/drivers/gpu/drm/nouveau/nv04_instmem.c index a20c206625a2..a3b9563a6f60 100644 --- a/drivers/gpu/drm/nouveau/nv04_instmem.c +++ b/drivers/gpu/drm/nouveau/nv04_instmem.c | |||
@@ -30,7 +30,7 @@ nv04_instmem_determine_amount(struct drm_device *dev) | |||
30 | * of vram. For now, only reserve a small piece until we know | 30 | * of vram. For now, only reserve a small piece until we know |
31 | * more about what each chipset requires. | 31 | * more about what each chipset requires. |
32 | */ | 32 | */ |
33 | switch (dev_priv->chipset & 0xf0) { | 33 | switch (dev_priv->chipset) { |
34 | case 0x40: | 34 | case 0x40: |
35 | case 0x47: | 35 | case 0x47: |
36 | case 0x49: | 36 | case 0x49: |
diff --git a/drivers/gpu/drm/nouveau/nv50_crtc.c b/drivers/gpu/drm/nouveau/nv50_crtc.c index 118d3285fd8c..40b7360841f8 100644 --- a/drivers/gpu/drm/nouveau/nv50_crtc.c +++ b/drivers/gpu/drm/nouveau/nv50_crtc.c | |||
@@ -432,6 +432,7 @@ nv50_crtc_prepare(struct drm_crtc *crtc) | |||
432 | struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); | 432 | struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); |
433 | struct drm_device *dev = crtc->dev; | 433 | struct drm_device *dev = crtc->dev; |
434 | struct drm_encoder *encoder; | 434 | struct drm_encoder *encoder; |
435 | uint32_t dac = 0, sor = 0; | ||
435 | 436 | ||
436 | NV_DEBUG_KMS(dev, "index %d\n", nv_crtc->index); | 437 | NV_DEBUG_KMS(dev, "index %d\n", nv_crtc->index); |
437 | 438 | ||
@@ -439,9 +440,28 @@ nv50_crtc_prepare(struct drm_crtc *crtc) | |||
439 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { | 440 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { |
440 | struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); | 441 | struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); |
441 | 442 | ||
442 | if (drm_helper_encoder_in_use(encoder)) | 443 | if (!drm_helper_encoder_in_use(encoder)) |
443 | continue; | 444 | continue; |
444 | 445 | ||
446 | if (nv_encoder->dcb->type == OUTPUT_ANALOG || | ||
447 | nv_encoder->dcb->type == OUTPUT_TV) | ||
448 | dac |= (1 << nv_encoder->or); | ||
449 | else | ||
450 | sor |= (1 << nv_encoder->or); | ||
451 | } | ||
452 | |||
453 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { | ||
454 | struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); | ||
455 | |||
456 | if (nv_encoder->dcb->type == OUTPUT_ANALOG || | ||
457 | nv_encoder->dcb->type == OUTPUT_TV) { | ||
458 | if (dac & (1 << nv_encoder->or)) | ||
459 | continue; | ||
460 | } else { | ||
461 | if (sor & (1 << nv_encoder->or)) | ||
462 | continue; | ||
463 | } | ||
464 | |||
445 | nv_encoder->disconnect(nv_encoder); | 465 | nv_encoder->disconnect(nv_encoder); |
446 | } | 466 | } |
447 | 467 | ||
diff --git a/drivers/gpu/drm/nouveau/nv50_fifo.c b/drivers/gpu/drm/nouveau/nv50_fifo.c index 39caf167587d..32b244bcb482 100644 --- a/drivers/gpu/drm/nouveau/nv50_fifo.c +++ b/drivers/gpu/drm/nouveau/nv50_fifo.c | |||
@@ -272,7 +272,7 @@ nv50_fifo_create_context(struct nouveau_channel *chan) | |||
272 | return ret; | 272 | return ret; |
273 | ramfc = chan->ramfc->gpuobj; | 273 | ramfc = chan->ramfc->gpuobj; |
274 | 274 | ||
275 | ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0, 4096, 256, | 275 | ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0, 4096, 1024, |
276 | 0, &chan->cache); | 276 | 0, &chan->cache); |
277 | if (ret) | 277 | if (ret) |
278 | return ret; | 278 | return ret; |
diff --git a/drivers/gpu/drm/nouveau/nv50_graph.c b/drivers/gpu/drm/nouveau/nv50_graph.c index ca79f32be44c..20319e59d368 100644 --- a/drivers/gpu/drm/nouveau/nv50_graph.c +++ b/drivers/gpu/drm/nouveau/nv50_graph.c | |||
@@ -84,7 +84,7 @@ nv50_graph_init_regs__nv(struct drm_device *dev) | |||
84 | nv_wr32(dev, 0x400804, 0xc0000000); | 84 | nv_wr32(dev, 0x400804, 0xc0000000); |
85 | nv_wr32(dev, 0x406800, 0xc0000000); | 85 | nv_wr32(dev, 0x406800, 0xc0000000); |
86 | nv_wr32(dev, 0x400c04, 0xc0000000); | 86 | nv_wr32(dev, 0x400c04, 0xc0000000); |
87 | nv_wr32(dev, 0x401804, 0xc0000000); | 87 | nv_wr32(dev, 0x401800, 0xc0000000); |
88 | nv_wr32(dev, 0x405018, 0xc0000000); | 88 | nv_wr32(dev, 0x405018, 0xc0000000); |
89 | nv_wr32(dev, 0x402000, 0xc0000000); | 89 | nv_wr32(dev, 0x402000, 0xc0000000); |
90 | 90 | ||
@@ -282,6 +282,7 @@ nv50_graph_unload_context(struct drm_device *dev) | |||
282 | return 0; | 282 | return 0; |
283 | inst &= NV50_PGRAPH_CTXCTL_CUR_INSTANCE; | 283 | inst &= NV50_PGRAPH_CTXCTL_CUR_INSTANCE; |
284 | 284 | ||
285 | nouveau_wait_for_idle(dev); | ||
285 | nv_wr32(dev, 0x400500, fifo & ~1); | 286 | nv_wr32(dev, 0x400500, fifo & ~1); |
286 | nv_wr32(dev, 0x400784, inst); | 287 | nv_wr32(dev, 0x400784, inst); |
287 | nv_wr32(dev, 0x400824, nv_rd32(dev, 0x400824) | 0x20); | 288 | nv_wr32(dev, 0x400824, nv_rd32(dev, 0x400824) | 0x20); |
diff --git a/drivers/gpu/drm/nouveau/nv50_sor.c b/drivers/gpu/drm/nouveau/nv50_sor.c index e395c16d30f5..ecf1936b8224 100644 --- a/drivers/gpu/drm/nouveau/nv50_sor.c +++ b/drivers/gpu/drm/nouveau/nv50_sor.c | |||
@@ -90,11 +90,24 @@ nv50_sor_dpms(struct drm_encoder *encoder, int mode) | |||
90 | { | 90 | { |
91 | struct drm_device *dev = encoder->dev; | 91 | struct drm_device *dev = encoder->dev; |
92 | struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); | 92 | struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); |
93 | struct drm_encoder *enc; | ||
93 | uint32_t val; | 94 | uint32_t val; |
94 | int or = nv_encoder->or; | 95 | int or = nv_encoder->or; |
95 | 96 | ||
96 | NV_DEBUG_KMS(dev, "or %d mode %d\n", or, mode); | 97 | NV_DEBUG_KMS(dev, "or %d mode %d\n", or, mode); |
97 | 98 | ||
99 | nv_encoder->last_dpms = mode; | ||
100 | list_for_each_entry(enc, &dev->mode_config.encoder_list, head) { | ||
101 | struct nouveau_encoder *nvenc = nouveau_encoder(enc); | ||
102 | |||
103 | if (nvenc == nv_encoder || | ||
104 | nvenc->dcb->or != nv_encoder->dcb->or) | ||
105 | continue; | ||
106 | |||
107 | if (nvenc->last_dpms == DRM_MODE_DPMS_ON) | ||
108 | return; | ||
109 | } | ||
110 | |||
98 | /* wait for it to be done */ | 111 | /* wait for it to be done */ |
99 | if (!nv_wait(NV50_PDISPLAY_SOR_DPMS_CTRL(or), | 112 | if (!nv_wait(NV50_PDISPLAY_SOR_DPMS_CTRL(or), |
100 | NV50_PDISPLAY_SOR_DPMS_CTRL_PENDING, 0)) { | 113 | NV50_PDISPLAY_SOR_DPMS_CTRL_PENDING, 0)) { |
diff --git a/drivers/gpu/drm/radeon/atom.c b/drivers/gpu/drm/radeon/atom.c index 388140a7e651..e3b44562d265 100644 --- a/drivers/gpu/drm/radeon/atom.c +++ b/drivers/gpu/drm/radeon/atom.c | |||
@@ -246,6 +246,9 @@ static uint32_t atom_get_src_int(atom_exec_context *ctx, uint8_t attr, | |||
246 | case ATOM_WS_ATTRIBUTES: | 246 | case ATOM_WS_ATTRIBUTES: |
247 | val = gctx->io_attr; | 247 | val = gctx->io_attr; |
248 | break; | 248 | break; |
249 | case ATOM_WS_REGPTR: | ||
250 | val = gctx->reg_block; | ||
251 | break; | ||
249 | default: | 252 | default: |
250 | val = ctx->ws[idx]; | 253 | val = ctx->ws[idx]; |
251 | } | 254 | } |
@@ -385,6 +388,32 @@ static uint32_t atom_get_src(atom_exec_context *ctx, uint8_t attr, int *ptr) | |||
385 | return atom_get_src_int(ctx, attr, ptr, NULL, 1); | 388 | return atom_get_src_int(ctx, attr, ptr, NULL, 1); |
386 | } | 389 | } |
387 | 390 | ||
391 | static uint32_t atom_get_src_direct(atom_exec_context *ctx, uint8_t align, int *ptr) | ||
392 | { | ||
393 | uint32_t val = 0xCDCDCDCD; | ||
394 | |||
395 | switch (align) { | ||
396 | case ATOM_SRC_DWORD: | ||
397 | val = U32(*ptr); | ||
398 | (*ptr) += 4; | ||
399 | break; | ||
400 | case ATOM_SRC_WORD0: | ||
401 | case ATOM_SRC_WORD8: | ||
402 | case ATOM_SRC_WORD16: | ||
403 | val = U16(*ptr); | ||
404 | (*ptr) += 2; | ||
405 | break; | ||
406 | case ATOM_SRC_BYTE0: | ||
407 | case ATOM_SRC_BYTE8: | ||
408 | case ATOM_SRC_BYTE16: | ||
409 | case ATOM_SRC_BYTE24: | ||
410 | val = U8(*ptr); | ||
411 | (*ptr)++; | ||
412 | break; | ||
413 | } | ||
414 | return val; | ||
415 | } | ||
416 | |||
388 | static uint32_t atom_get_dst(atom_exec_context *ctx, int arg, uint8_t attr, | 417 | static uint32_t atom_get_dst(atom_exec_context *ctx, int arg, uint8_t attr, |
389 | int *ptr, uint32_t *saved, int print) | 418 | int *ptr, uint32_t *saved, int print) |
390 | { | 419 | { |
@@ -482,6 +511,9 @@ static void atom_put_dst(atom_exec_context *ctx, int arg, uint8_t attr, | |||
482 | case ATOM_WS_ATTRIBUTES: | 511 | case ATOM_WS_ATTRIBUTES: |
483 | gctx->io_attr = val; | 512 | gctx->io_attr = val; |
484 | break; | 513 | break; |
514 | case ATOM_WS_REGPTR: | ||
515 | gctx->reg_block = val; | ||
516 | break; | ||
485 | default: | 517 | default: |
486 | ctx->ws[idx] = val; | 518 | ctx->ws[idx] = val; |
487 | } | 519 | } |
@@ -677,7 +709,7 @@ static void atom_op_mask(atom_exec_context *ctx, int *ptr, int arg) | |||
677 | SDEBUG(" dst: "); | 709 | SDEBUG(" dst: "); |
678 | dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1); | 710 | dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1); |
679 | SDEBUG(" src1: "); | 711 | SDEBUG(" src1: "); |
680 | src1 = atom_get_src(ctx, attr, ptr); | 712 | src1 = atom_get_src_direct(ctx, ((attr >> 3) & 7), ptr); |
681 | SDEBUG(" src2: "); | 713 | SDEBUG(" src2: "); |
682 | src2 = atom_get_src(ctx, attr, ptr); | 714 | src2 = atom_get_src(ctx, attr, ptr); |
683 | dst &= src1; | 715 | dst &= src1; |
@@ -809,6 +841,38 @@ static void atom_op_setregblock(atom_exec_context *ctx, int *ptr, int arg) | |||
809 | SDEBUG(" base: 0x%04X\n", ctx->ctx->reg_block); | 841 | SDEBUG(" base: 0x%04X\n", ctx->ctx->reg_block); |
810 | } | 842 | } |
811 | 843 | ||
844 | static void atom_op_shift_left(atom_exec_context *ctx, int *ptr, int arg) | ||
845 | { | ||
846 | uint8_t attr = U8((*ptr)++), shift; | ||
847 | uint32_t saved, dst; | ||
848 | int dptr = *ptr; | ||
849 | attr &= 0x38; | ||
850 | attr |= atom_def_dst[attr >> 3] << 6; | ||
851 | SDEBUG(" dst: "); | ||
852 | dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1); | ||
853 | shift = atom_get_src_direct(ctx, ATOM_SRC_BYTE0, ptr); | ||
854 | SDEBUG(" shift: %d\n", shift); | ||
855 | dst <<= shift; | ||
856 | SDEBUG(" dst: "); | ||
857 | atom_put_dst(ctx, arg, attr, &dptr, dst, saved); | ||
858 | } | ||
859 | |||
860 | static void atom_op_shift_right(atom_exec_context *ctx, int *ptr, int arg) | ||
861 | { | ||
862 | uint8_t attr = U8((*ptr)++), shift; | ||
863 | uint32_t saved, dst; | ||
864 | int dptr = *ptr; | ||
865 | attr &= 0x38; | ||
866 | attr |= atom_def_dst[attr >> 3] << 6; | ||
867 | SDEBUG(" dst: "); | ||
868 | dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1); | ||
869 | shift = atom_get_src_direct(ctx, ATOM_SRC_BYTE0, ptr); | ||
870 | SDEBUG(" shift: %d\n", shift); | ||
871 | dst >>= shift; | ||
872 | SDEBUG(" dst: "); | ||
873 | atom_put_dst(ctx, arg, attr, &dptr, dst, saved); | ||
874 | } | ||
875 | |||
812 | static void atom_op_shl(atom_exec_context *ctx, int *ptr, int arg) | 876 | static void atom_op_shl(atom_exec_context *ctx, int *ptr, int arg) |
813 | { | 877 | { |
814 | uint8_t attr = U8((*ptr)++), shift; | 878 | uint8_t attr = U8((*ptr)++), shift; |
@@ -818,7 +882,7 @@ static void atom_op_shl(atom_exec_context *ctx, int *ptr, int arg) | |||
818 | attr |= atom_def_dst[attr >> 3] << 6; | 882 | attr |= atom_def_dst[attr >> 3] << 6; |
819 | SDEBUG(" dst: "); | 883 | SDEBUG(" dst: "); |
820 | dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1); | 884 | dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1); |
821 | shift = U8((*ptr)++); | 885 | shift = atom_get_src(ctx, attr, ptr); |
822 | SDEBUG(" shift: %d\n", shift); | 886 | SDEBUG(" shift: %d\n", shift); |
823 | dst <<= shift; | 887 | dst <<= shift; |
824 | SDEBUG(" dst: "); | 888 | SDEBUG(" dst: "); |
@@ -834,7 +898,7 @@ static void atom_op_shr(atom_exec_context *ctx, int *ptr, int arg) | |||
834 | attr |= atom_def_dst[attr >> 3] << 6; | 898 | attr |= atom_def_dst[attr >> 3] << 6; |
835 | SDEBUG(" dst: "); | 899 | SDEBUG(" dst: "); |
836 | dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1); | 900 | dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1); |
837 | shift = U8((*ptr)++); | 901 | shift = atom_get_src(ctx, attr, ptr); |
838 | SDEBUG(" shift: %d\n", shift); | 902 | SDEBUG(" shift: %d\n", shift); |
839 | dst >>= shift; | 903 | dst >>= shift; |
840 | SDEBUG(" dst: "); | 904 | SDEBUG(" dst: "); |
@@ -937,18 +1001,18 @@ static struct { | |||
937 | atom_op_or, ATOM_ARG_FB}, { | 1001 | atom_op_or, ATOM_ARG_FB}, { |
938 | atom_op_or, ATOM_ARG_PLL}, { | 1002 | atom_op_or, ATOM_ARG_PLL}, { |
939 | atom_op_or, ATOM_ARG_MC}, { | 1003 | atom_op_or, ATOM_ARG_MC}, { |
940 | atom_op_shl, ATOM_ARG_REG}, { | 1004 | atom_op_shift_left, ATOM_ARG_REG}, { |
941 | atom_op_shl, ATOM_ARG_PS}, { | 1005 | atom_op_shift_left, ATOM_ARG_PS}, { |
942 | atom_op_shl, ATOM_ARG_WS}, { | 1006 | atom_op_shift_left, ATOM_ARG_WS}, { |
943 | atom_op_shl, ATOM_ARG_FB}, { | 1007 | atom_op_shift_left, ATOM_ARG_FB}, { |
944 | atom_op_shl, ATOM_ARG_PLL}, { | 1008 | atom_op_shift_left, ATOM_ARG_PLL}, { |
945 | atom_op_shl, ATOM_ARG_MC}, { | 1009 | atom_op_shift_left, ATOM_ARG_MC}, { |
946 | atom_op_shr, ATOM_ARG_REG}, { | 1010 | atom_op_shift_right, ATOM_ARG_REG}, { |
947 | atom_op_shr, ATOM_ARG_PS}, { | 1011 | atom_op_shift_right, ATOM_ARG_PS}, { |
948 | atom_op_shr, ATOM_ARG_WS}, { | 1012 | atom_op_shift_right, ATOM_ARG_WS}, { |
949 | atom_op_shr, ATOM_ARG_FB}, { | 1013 | atom_op_shift_right, ATOM_ARG_FB}, { |
950 | atom_op_shr, ATOM_ARG_PLL}, { | 1014 | atom_op_shift_right, ATOM_ARG_PLL}, { |
951 | atom_op_shr, ATOM_ARG_MC}, { | 1015 | atom_op_shift_right, ATOM_ARG_MC}, { |
952 | atom_op_mul, ATOM_ARG_REG}, { | 1016 | atom_op_mul, ATOM_ARG_REG}, { |
953 | atom_op_mul, ATOM_ARG_PS}, { | 1017 | atom_op_mul, ATOM_ARG_PS}, { |
954 | atom_op_mul, ATOM_ARG_WS}, { | 1018 | atom_op_mul, ATOM_ARG_WS}, { |
@@ -1058,8 +1122,6 @@ static void atom_execute_table_locked(struct atom_context *ctx, int index, uint3 | |||
1058 | 1122 | ||
1059 | SDEBUG(">> execute %04X (len %d, WS %d, PS %d)\n", base, len, ws, ps); | 1123 | SDEBUG(">> execute %04X (len %d, WS %d, PS %d)\n", base, len, ws, ps); |
1060 | 1124 | ||
1061 | /* reset reg block */ | ||
1062 | ctx->reg_block = 0; | ||
1063 | ectx.ctx = ctx; | 1125 | ectx.ctx = ctx; |
1064 | ectx.ps_shift = ps / 4; | 1126 | ectx.ps_shift = ps / 4; |
1065 | ectx.start = base; | 1127 | ectx.start = base; |
@@ -1096,6 +1158,12 @@ static void atom_execute_table_locked(struct atom_context *ctx, int index, uint3 | |||
1096 | void atom_execute_table(struct atom_context *ctx, int index, uint32_t * params) | 1158 | void atom_execute_table(struct atom_context *ctx, int index, uint32_t * params) |
1097 | { | 1159 | { |
1098 | mutex_lock(&ctx->mutex); | 1160 | mutex_lock(&ctx->mutex); |
1161 | /* reset reg block */ | ||
1162 | ctx->reg_block = 0; | ||
1163 | /* reset fb window */ | ||
1164 | ctx->fb_base = 0; | ||
1165 | /* reset io mode */ | ||
1166 | ctx->io_mode = ATOM_IO_MM; | ||
1099 | atom_execute_table_locked(ctx, index, params); | 1167 | atom_execute_table_locked(ctx, index, params); |
1100 | mutex_unlock(&ctx->mutex); | 1168 | mutex_unlock(&ctx->mutex); |
1101 | } | 1169 | } |
diff --git a/drivers/gpu/drm/radeon/atom.h b/drivers/gpu/drm/radeon/atom.h index 47fd943f6d14..bc73781423a1 100644 --- a/drivers/gpu/drm/radeon/atom.h +++ b/drivers/gpu/drm/radeon/atom.h | |||
@@ -91,6 +91,7 @@ | |||
91 | #define ATOM_WS_AND_MASK 0x45 | 91 | #define ATOM_WS_AND_MASK 0x45 |
92 | #define ATOM_WS_FB_WINDOW 0x46 | 92 | #define ATOM_WS_FB_WINDOW 0x46 |
93 | #define ATOM_WS_ATTRIBUTES 0x47 | 93 | #define ATOM_WS_ATTRIBUTES 0x47 |
94 | #define ATOM_WS_REGPTR 0x48 | ||
94 | 95 | ||
95 | #define ATOM_IIO_NOP 0 | 96 | #define ATOM_IIO_NOP 0 |
96 | #define ATOM_IIO_START 1 | 97 | #define ATOM_IIO_START 1 |
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c index 260fcf59f00c..af464e351fbd 100644 --- a/drivers/gpu/drm/radeon/atombios_crtc.c +++ b/drivers/gpu/drm/radeon/atombios_crtc.c | |||
@@ -307,7 +307,6 @@ atombios_set_crtc_dtd_timing(struct drm_crtc *crtc, | |||
307 | args.susModeMiscInfo.usAccess = cpu_to_le16(misc); | 307 | args.susModeMiscInfo.usAccess = cpu_to_le16(misc); |
308 | args.ucCRTC = radeon_crtc->crtc_id; | 308 | args.ucCRTC = radeon_crtc->crtc_id; |
309 | 309 | ||
310 | printk("executing set crtc dtd timing\n"); | ||
311 | atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); | 310 | atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); |
312 | } | 311 | } |
313 | 312 | ||
@@ -347,7 +346,6 @@ static void atombios_crtc_set_timing(struct drm_crtc *crtc, | |||
347 | args.susModeMiscInfo.usAccess = cpu_to_le16(misc); | 346 | args.susModeMiscInfo.usAccess = cpu_to_le16(misc); |
348 | args.ucCRTC = radeon_crtc->crtc_id; | 347 | args.ucCRTC = radeon_crtc->crtc_id; |
349 | 348 | ||
350 | printk("executing set crtc timing\n"); | ||
351 | atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); | 349 | atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); |
352 | } | 350 | } |
353 | 351 | ||
@@ -409,59 +407,57 @@ static void atombios_set_ss(struct drm_crtc *crtc, int enable) | |||
409 | } | 407 | } |
410 | } | 408 | } |
411 | 409 | ||
412 | void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode) | 410 | union adjust_pixel_clock { |
411 | ADJUST_DISPLAY_PLL_PS_ALLOCATION v1; | ||
412 | }; | ||
413 | |||
414 | static u32 atombios_adjust_pll(struct drm_crtc *crtc, | ||
415 | struct drm_display_mode *mode, | ||
416 | struct radeon_pll *pll) | ||
413 | { | 417 | { |
414 | struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); | ||
415 | struct drm_device *dev = crtc->dev; | 418 | struct drm_device *dev = crtc->dev; |
416 | struct radeon_device *rdev = dev->dev_private; | 419 | struct radeon_device *rdev = dev->dev_private; |
417 | struct drm_encoder *encoder = NULL; | 420 | struct drm_encoder *encoder = NULL; |
418 | struct radeon_encoder *radeon_encoder = NULL; | 421 | struct radeon_encoder *radeon_encoder = NULL; |
419 | uint8_t frev, crev; | 422 | u32 adjusted_clock = mode->clock; |
420 | int index; | ||
421 | SET_PIXEL_CLOCK_PS_ALLOCATION args; | ||
422 | PIXEL_CLOCK_PARAMETERS *spc1_ptr; | ||
423 | PIXEL_CLOCK_PARAMETERS_V2 *spc2_ptr; | ||
424 | PIXEL_CLOCK_PARAMETERS_V3 *spc3_ptr; | ||
425 | uint32_t pll_clock = mode->clock; | ||
426 | uint32_t adjusted_clock; | ||
427 | uint32_t ref_div = 0, fb_div = 0, frac_fb_div = 0, post_div = 0; | ||
428 | struct radeon_pll *pll; | ||
429 | int pll_flags = 0; | ||
430 | 423 | ||
431 | memset(&args, 0, sizeof(args)); | 424 | /* reset the pll flags */ |
425 | pll->flags = 0; | ||
432 | 426 | ||
433 | if (ASIC_IS_AVIVO(rdev)) { | 427 | if (ASIC_IS_AVIVO(rdev)) { |
434 | if ((rdev->family == CHIP_RS600) || | 428 | if ((rdev->family == CHIP_RS600) || |
435 | (rdev->family == CHIP_RS690) || | 429 | (rdev->family == CHIP_RS690) || |
436 | (rdev->family == CHIP_RS740)) | 430 | (rdev->family == CHIP_RS740)) |
437 | pll_flags |= (RADEON_PLL_USE_FRAC_FB_DIV | | 431 | pll->flags |= (RADEON_PLL_USE_FRAC_FB_DIV | |
438 | RADEON_PLL_PREFER_CLOSEST_LOWER); | 432 | RADEON_PLL_PREFER_CLOSEST_LOWER); |
439 | 433 | ||
440 | if (ASIC_IS_DCE32(rdev) && mode->clock > 200000) /* range limits??? */ | 434 | if (ASIC_IS_DCE32(rdev) && mode->clock > 200000) /* range limits??? */ |
441 | pll_flags |= RADEON_PLL_PREFER_HIGH_FB_DIV; | 435 | pll->flags |= RADEON_PLL_PREFER_HIGH_FB_DIV; |
442 | else | 436 | else |
443 | pll_flags |= RADEON_PLL_PREFER_LOW_REF_DIV; | 437 | pll->flags |= RADEON_PLL_PREFER_LOW_REF_DIV; |
444 | } else { | 438 | } else { |
445 | pll_flags |= RADEON_PLL_LEGACY; | 439 | pll->flags |= RADEON_PLL_LEGACY; |
446 | 440 | ||
447 | if (mode->clock > 200000) /* range limits??? */ | 441 | if (mode->clock > 200000) /* range limits??? */ |
448 | pll_flags |= RADEON_PLL_PREFER_HIGH_FB_DIV; | 442 | pll->flags |= RADEON_PLL_PREFER_HIGH_FB_DIV; |
449 | else | 443 | else |
450 | pll_flags |= RADEON_PLL_PREFER_LOW_REF_DIV; | 444 | pll->flags |= RADEON_PLL_PREFER_LOW_REF_DIV; |
451 | 445 | ||
452 | } | 446 | } |
453 | 447 | ||
454 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { | 448 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { |
455 | if (encoder->crtc == crtc) { | 449 | if (encoder->crtc == crtc) { |
456 | if (!ASIC_IS_AVIVO(rdev)) { | ||
457 | if (encoder->encoder_type != | ||
458 | DRM_MODE_ENCODER_DAC) | ||
459 | pll_flags |= RADEON_PLL_NO_ODD_POST_DIV; | ||
460 | if (encoder->encoder_type == | ||
461 | DRM_MODE_ENCODER_LVDS) | ||
462 | pll_flags |= RADEON_PLL_USE_REF_DIV; | ||
463 | } | ||
464 | radeon_encoder = to_radeon_encoder(encoder); | 450 | radeon_encoder = to_radeon_encoder(encoder); |
451 | if (ASIC_IS_AVIVO(rdev)) { | ||
452 | /* DVO wants 2x pixel clock if the DVO chip is in 12 bit mode */ | ||
453 | if (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1) | ||
454 | adjusted_clock = mode->clock * 2; | ||
455 | } else { | ||
456 | if (encoder->encoder_type != DRM_MODE_ENCODER_DAC) | ||
457 | pll->flags |= RADEON_PLL_NO_ODD_POST_DIV; | ||
458 | if (encoder->encoder_type == DRM_MODE_ENCODER_LVDS) | ||
459 | pll->flags |= RADEON_PLL_USE_REF_DIV; | ||
460 | } | ||
465 | break; | 461 | break; |
466 | } | 462 | } |
467 | } | 463 | } |
@@ -471,46 +467,101 @@ void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode) | |||
471 | * special hw requirements. | 467 | * special hw requirements. |
472 | */ | 468 | */ |
473 | if (ASIC_IS_DCE3(rdev)) { | 469 | if (ASIC_IS_DCE3(rdev)) { |
474 | ADJUST_DISPLAY_PLL_PS_ALLOCATION adjust_pll_args; | 470 | union adjust_pixel_clock args; |
471 | struct radeon_encoder_atom_dig *dig; | ||
472 | u8 frev, crev; | ||
473 | int index; | ||
475 | 474 | ||
476 | if (!encoder) | 475 | if (!radeon_encoder->enc_priv) |
477 | return; | 476 | return adjusted_clock; |
478 | 477 | dig = radeon_encoder->enc_priv; | |
479 | memset(&adjust_pll_args, 0, sizeof(adjust_pll_args)); | ||
480 | adjust_pll_args.usPixelClock = cpu_to_le16(mode->clock / 10); | ||
481 | adjust_pll_args.ucTransmitterID = radeon_encoder->encoder_id; | ||
482 | adjust_pll_args.ucEncodeMode = atombios_get_encoder_mode(encoder); | ||
483 | 478 | ||
484 | index = GetIndexIntoMasterTable(COMMAND, AdjustDisplayPll); | 479 | index = GetIndexIntoMasterTable(COMMAND, AdjustDisplayPll); |
485 | atom_execute_table(rdev->mode_info.atom_context, | 480 | atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, |
486 | index, (uint32_t *)&adjust_pll_args); | 481 | &crev); |
487 | adjusted_clock = le16_to_cpu(adjust_pll_args.usPixelClock) * 10; | 482 | |
488 | } else { | 483 | memset(&args, 0, sizeof(args)); |
489 | /* DVO wants 2x pixel clock if the DVO chip is in 12 bit mode */ | 484 | |
490 | if (ASIC_IS_AVIVO(rdev) && | 485 | switch (frev) { |
491 | (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1)) | 486 | case 1: |
492 | adjusted_clock = mode->clock * 2; | 487 | switch (crev) { |
493 | else | 488 | case 1: |
494 | adjusted_clock = mode->clock; | 489 | case 2: |
490 | args.v1.usPixelClock = cpu_to_le16(mode->clock / 10); | ||
491 | args.v1.ucTransmitterID = radeon_encoder->encoder_id; | ||
492 | args.v1.ucEncodeMode = atombios_get_encoder_mode(encoder); | ||
493 | |||
494 | atom_execute_table(rdev->mode_info.atom_context, | ||
495 | index, (uint32_t *)&args); | ||
496 | adjusted_clock = le16_to_cpu(args.v1.usPixelClock) * 10; | ||
497 | break; | ||
498 | default: | ||
499 | DRM_ERROR("Unknown table version %d %d\n", frev, crev); | ||
500 | return adjusted_clock; | ||
501 | } | ||
502 | break; | ||
503 | default: | ||
504 | DRM_ERROR("Unknown table version %d %d\n", frev, crev); | ||
505 | return adjusted_clock; | ||
506 | } | ||
495 | } | 507 | } |
508 | return adjusted_clock; | ||
509 | } | ||
510 | |||
511 | union set_pixel_clock { | ||
512 | SET_PIXEL_CLOCK_PS_ALLOCATION base; | ||
513 | PIXEL_CLOCK_PARAMETERS v1; | ||
514 | PIXEL_CLOCK_PARAMETERS_V2 v2; | ||
515 | PIXEL_CLOCK_PARAMETERS_V3 v3; | ||
516 | }; | ||
517 | |||
518 | void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode) | ||
519 | { | ||
520 | struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); | ||
521 | struct drm_device *dev = crtc->dev; | ||
522 | struct radeon_device *rdev = dev->dev_private; | ||
523 | struct drm_encoder *encoder = NULL; | ||
524 | struct radeon_encoder *radeon_encoder = NULL; | ||
525 | u8 frev, crev; | ||
526 | int index; | ||
527 | union set_pixel_clock args; | ||
528 | u32 pll_clock = mode->clock; | ||
529 | u32 ref_div = 0, fb_div = 0, frac_fb_div = 0, post_div = 0; | ||
530 | struct radeon_pll *pll; | ||
531 | u32 adjusted_clock; | ||
532 | |||
533 | memset(&args, 0, sizeof(args)); | ||
534 | |||
535 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { | ||
536 | if (encoder->crtc == crtc) { | ||
537 | radeon_encoder = to_radeon_encoder(encoder); | ||
538 | break; | ||
539 | } | ||
540 | } | ||
541 | |||
542 | if (!radeon_encoder) | ||
543 | return; | ||
496 | 544 | ||
497 | if (radeon_crtc->crtc_id == 0) | 545 | if (radeon_crtc->crtc_id == 0) |
498 | pll = &rdev->clock.p1pll; | 546 | pll = &rdev->clock.p1pll; |
499 | else | 547 | else |
500 | pll = &rdev->clock.p2pll; | 548 | pll = &rdev->clock.p2pll; |
501 | 549 | ||
550 | /* adjust pixel clock as needed */ | ||
551 | adjusted_clock = atombios_adjust_pll(crtc, mode, pll); | ||
552 | |||
502 | if (ASIC_IS_AVIVO(rdev)) { | 553 | if (ASIC_IS_AVIVO(rdev)) { |
503 | if (radeon_new_pll) | 554 | if (radeon_new_pll) |
504 | radeon_compute_pll_avivo(pll, adjusted_clock, &pll_clock, | 555 | radeon_compute_pll_avivo(pll, adjusted_clock, &pll_clock, |
505 | &fb_div, &frac_fb_div, | 556 | &fb_div, &frac_fb_div, |
506 | &ref_div, &post_div, pll_flags); | 557 | &ref_div, &post_div); |
507 | else | 558 | else |
508 | radeon_compute_pll(pll, adjusted_clock, &pll_clock, | 559 | radeon_compute_pll(pll, adjusted_clock, &pll_clock, |
509 | &fb_div, &frac_fb_div, | 560 | &fb_div, &frac_fb_div, |
510 | &ref_div, &post_div, pll_flags); | 561 | &ref_div, &post_div); |
511 | } else | 562 | } else |
512 | radeon_compute_pll(pll, adjusted_clock, &pll_clock, &fb_div, &frac_fb_div, | 563 | radeon_compute_pll(pll, adjusted_clock, &pll_clock, &fb_div, &frac_fb_div, |
513 | &ref_div, &post_div, pll_flags); | 564 | &ref_div, &post_div); |
514 | 565 | ||
515 | index = GetIndexIntoMasterTable(COMMAND, SetPixelClock); | 566 | index = GetIndexIntoMasterTable(COMMAND, SetPixelClock); |
516 | atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, | 567 | atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, |
@@ -520,45 +571,38 @@ void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode) | |||
520 | case 1: | 571 | case 1: |
521 | switch (crev) { | 572 | switch (crev) { |
522 | case 1: | 573 | case 1: |
523 | spc1_ptr = (PIXEL_CLOCK_PARAMETERS *) & args.sPCLKInput; | 574 | args.v1.usPixelClock = cpu_to_le16(mode->clock / 10); |
524 | spc1_ptr->usPixelClock = cpu_to_le16(mode->clock / 10); | 575 | args.v1.usRefDiv = cpu_to_le16(ref_div); |
525 | spc1_ptr->usRefDiv = cpu_to_le16(ref_div); | 576 | args.v1.usFbDiv = cpu_to_le16(fb_div); |
526 | spc1_ptr->usFbDiv = cpu_to_le16(fb_div); | 577 | args.v1.ucFracFbDiv = frac_fb_div; |
527 | spc1_ptr->ucFracFbDiv = frac_fb_div; | 578 | args.v1.ucPostDiv = post_div; |
528 | spc1_ptr->ucPostDiv = post_div; | 579 | args.v1.ucPpll = |
529 | spc1_ptr->ucPpll = | ||
530 | radeon_crtc->crtc_id ? ATOM_PPLL2 : ATOM_PPLL1; | 580 | radeon_crtc->crtc_id ? ATOM_PPLL2 : ATOM_PPLL1; |
531 | spc1_ptr->ucCRTC = radeon_crtc->crtc_id; | 581 | args.v1.ucCRTC = radeon_crtc->crtc_id; |
532 | spc1_ptr->ucRefDivSrc = 1; | 582 | args.v1.ucRefDivSrc = 1; |
533 | break; | 583 | break; |
534 | case 2: | 584 | case 2: |
535 | spc2_ptr = | 585 | args.v2.usPixelClock = cpu_to_le16(mode->clock / 10); |
536 | (PIXEL_CLOCK_PARAMETERS_V2 *) & args.sPCLKInput; | 586 | args.v2.usRefDiv = cpu_to_le16(ref_div); |
537 | spc2_ptr->usPixelClock = cpu_to_le16(mode->clock / 10); | 587 | args.v2.usFbDiv = cpu_to_le16(fb_div); |
538 | spc2_ptr->usRefDiv = cpu_to_le16(ref_div); | 588 | args.v2.ucFracFbDiv = frac_fb_div; |
539 | spc2_ptr->usFbDiv = cpu_to_le16(fb_div); | 589 | args.v2.ucPostDiv = post_div; |
540 | spc2_ptr->ucFracFbDiv = frac_fb_div; | 590 | args.v2.ucPpll = |
541 | spc2_ptr->ucPostDiv = post_div; | ||
542 | spc2_ptr->ucPpll = | ||
543 | radeon_crtc->crtc_id ? ATOM_PPLL2 : ATOM_PPLL1; | 591 | radeon_crtc->crtc_id ? ATOM_PPLL2 : ATOM_PPLL1; |
544 | spc2_ptr->ucCRTC = radeon_crtc->crtc_id; | 592 | args.v2.ucCRTC = radeon_crtc->crtc_id; |
545 | spc2_ptr->ucRefDivSrc = 1; | 593 | args.v2.ucRefDivSrc = 1; |
546 | break; | 594 | break; |
547 | case 3: | 595 | case 3: |
548 | if (!encoder) | 596 | args.v3.usPixelClock = cpu_to_le16(mode->clock / 10); |
549 | return; | 597 | args.v3.usRefDiv = cpu_to_le16(ref_div); |
550 | spc3_ptr = | 598 | args.v3.usFbDiv = cpu_to_le16(fb_div); |
551 | (PIXEL_CLOCK_PARAMETERS_V3 *) & args.sPCLKInput; | 599 | args.v3.ucFracFbDiv = frac_fb_div; |
552 | spc3_ptr->usPixelClock = cpu_to_le16(mode->clock / 10); | 600 | args.v3.ucPostDiv = post_div; |
553 | spc3_ptr->usRefDiv = cpu_to_le16(ref_div); | 601 | args.v3.ucPpll = |
554 | spc3_ptr->usFbDiv = cpu_to_le16(fb_div); | ||
555 | spc3_ptr->ucFracFbDiv = frac_fb_div; | ||
556 | spc3_ptr->ucPostDiv = post_div; | ||
557 | spc3_ptr->ucPpll = | ||
558 | radeon_crtc->crtc_id ? ATOM_PPLL2 : ATOM_PPLL1; | 602 | radeon_crtc->crtc_id ? ATOM_PPLL2 : ATOM_PPLL1; |
559 | spc3_ptr->ucMiscInfo = (radeon_crtc->crtc_id << 2); | 603 | args.v3.ucMiscInfo = (radeon_crtc->crtc_id << 2); |
560 | spc3_ptr->ucTransmitterId = radeon_encoder->encoder_id; | 604 | args.v3.ucTransmitterId = radeon_encoder->encoder_id; |
561 | spc3_ptr->ucEncoderMode = | 605 | args.v3.ucEncoderMode = |
562 | atombios_get_encoder_mode(encoder); | 606 | atombios_get_encoder_mode(encoder); |
563 | break; | 607 | break; |
564 | default: | 608 | default: |
@@ -571,12 +615,11 @@ void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode) | |||
571 | return; | 615 | return; |
572 | } | 616 | } |
573 | 617 | ||
574 | printk("executing set pll\n"); | ||
575 | atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); | 618 | atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); |
576 | } | 619 | } |
577 | 620 | ||
578 | int atombios_crtc_set_base(struct drm_crtc *crtc, int x, int y, | 621 | static int avivo_crtc_set_base(struct drm_crtc *crtc, int x, int y, |
579 | struct drm_framebuffer *old_fb) | 622 | struct drm_framebuffer *old_fb) |
580 | { | 623 | { |
581 | struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); | 624 | struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); |
582 | struct drm_device *dev = crtc->dev; | 625 | struct drm_device *dev = crtc->dev; |
@@ -706,6 +749,42 @@ int atombios_crtc_set_base(struct drm_crtc *crtc, int x, int y, | |||
706 | return 0; | 749 | return 0; |
707 | } | 750 | } |
708 | 751 | ||
752 | int atombios_crtc_set_base(struct drm_crtc *crtc, int x, int y, | ||
753 | struct drm_framebuffer *old_fb) | ||
754 | { | ||
755 | struct drm_device *dev = crtc->dev; | ||
756 | struct radeon_device *rdev = dev->dev_private; | ||
757 | |||
758 | if (ASIC_IS_AVIVO(rdev)) | ||
759 | return avivo_crtc_set_base(crtc, x, y, old_fb); | ||
760 | else | ||
761 | return radeon_crtc_set_base(crtc, x, y, old_fb); | ||
762 | } | ||
763 | |||
764 | /* properly set additional regs when using atombios */ | ||
765 | static void radeon_legacy_atom_fixup(struct drm_crtc *crtc) | ||
766 | { | ||
767 | struct drm_device *dev = crtc->dev; | ||
768 | struct radeon_device *rdev = dev->dev_private; | ||
769 | struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); | ||
770 | u32 disp_merge_cntl; | ||
771 | |||
772 | switch (radeon_crtc->crtc_id) { | ||
773 | case 0: | ||
774 | disp_merge_cntl = RREG32(RADEON_DISP_MERGE_CNTL); | ||
775 | disp_merge_cntl &= ~RADEON_DISP_RGB_OFFSET_EN; | ||
776 | WREG32(RADEON_DISP_MERGE_CNTL, disp_merge_cntl); | ||
777 | break; | ||
778 | case 1: | ||
779 | disp_merge_cntl = RREG32(RADEON_DISP2_MERGE_CNTL); | ||
780 | disp_merge_cntl &= ~RADEON_DISP2_RGB_OFFSET_EN; | ||
781 | WREG32(RADEON_DISP2_MERGE_CNTL, disp_merge_cntl); | ||
782 | WREG32(RADEON_FP_H2_SYNC_STRT_WID, RREG32(RADEON_CRTC2_H_SYNC_STRT_WID)); | ||
783 | WREG32(RADEON_FP_V2_SYNC_STRT_WID, RREG32(RADEON_CRTC2_V_SYNC_STRT_WID)); | ||
784 | break; | ||
785 | } | ||
786 | } | ||
787 | |||
709 | int atombios_crtc_mode_set(struct drm_crtc *crtc, | 788 | int atombios_crtc_mode_set(struct drm_crtc *crtc, |
710 | struct drm_display_mode *mode, | 789 | struct drm_display_mode *mode, |
711 | struct drm_display_mode *adjusted_mode, | 790 | struct drm_display_mode *adjusted_mode, |
@@ -727,8 +806,8 @@ int atombios_crtc_mode_set(struct drm_crtc *crtc, | |||
727 | else { | 806 | else { |
728 | if (radeon_crtc->crtc_id == 0) | 807 | if (radeon_crtc->crtc_id == 0) |
729 | atombios_set_crtc_dtd_timing(crtc, adjusted_mode); | 808 | atombios_set_crtc_dtd_timing(crtc, adjusted_mode); |
730 | radeon_crtc_set_base(crtc, x, y, old_fb); | 809 | atombios_crtc_set_base(crtc, x, y, old_fb); |
731 | radeon_legacy_atom_set_surface(crtc); | 810 | radeon_legacy_atom_fixup(crtc); |
732 | } | 811 | } |
733 | atombios_overscan_setup(crtc, mode, adjusted_mode); | 812 | atombios_overscan_setup(crtc, mode, adjusted_mode); |
734 | atombios_scaler_setup(crtc); | 813 | atombios_scaler_setup(crtc); |
@@ -746,8 +825,8 @@ static bool atombios_crtc_mode_fixup(struct drm_crtc *crtc, | |||
746 | 825 | ||
747 | static void atombios_crtc_prepare(struct drm_crtc *crtc) | 826 | static void atombios_crtc_prepare(struct drm_crtc *crtc) |
748 | { | 827 | { |
749 | atombios_crtc_dpms(crtc, DRM_MODE_DPMS_OFF); | ||
750 | atombios_lock_crtc(crtc, 1); | 828 | atombios_lock_crtc(crtc, 1); |
829 | atombios_crtc_dpms(crtc, DRM_MODE_DPMS_OFF); | ||
751 | } | 830 | } |
752 | 831 | ||
753 | static void atombios_crtc_commit(struct drm_crtc *crtc) | 832 | static void atombios_crtc_commit(struct drm_crtc *crtc) |
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c index 8760d66e058a..11c9a3fe6810 100644 --- a/drivers/gpu/drm/radeon/r100.c +++ b/drivers/gpu/drm/radeon/r100.c | |||
@@ -1504,6 +1504,7 @@ static int r100_packet3_check(struct radeon_cs_parser *p, | |||
1504 | DRM_ERROR("PRIM_WALK must be 3 for IMMD draw\n"); | 1504 | DRM_ERROR("PRIM_WALK must be 3 for IMMD draw\n"); |
1505 | return -EINVAL; | 1505 | return -EINVAL; |
1506 | } | 1506 | } |
1507 | track->vtx_size = r100_get_vtx_size(radeon_get_ib_value(p, idx + 0)); | ||
1507 | track->vap_vf_cntl = radeon_get_ib_value(p, idx + 1); | 1508 | track->vap_vf_cntl = radeon_get_ib_value(p, idx + 1); |
1508 | track->immd_dwords = pkt->count - 1; | 1509 | track->immd_dwords = pkt->count - 1; |
1509 | r = r100_cs_track_check(p->rdev, track); | 1510 | r = r100_cs_track_check(p->rdev, track); |
@@ -3399,9 +3400,7 @@ int r100_mc_init(struct radeon_device *rdev) | |||
3399 | if (rdev->flags & RADEON_IS_AGP) { | 3400 | if (rdev->flags & RADEON_IS_AGP) { |
3400 | r = radeon_agp_init(rdev); | 3401 | r = radeon_agp_init(rdev); |
3401 | if (r) { | 3402 | if (r) { |
3402 | printk(KERN_WARNING "[drm] Disabling AGP\n"); | 3403 | radeon_agp_disable(rdev); |
3403 | rdev->flags &= ~RADEON_IS_AGP; | ||
3404 | rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024; | ||
3405 | } else { | 3404 | } else { |
3406 | rdev->mc.gtt_location = rdev->mc.agp_base; | 3405 | rdev->mc.gtt_location = rdev->mc.agp_base; |
3407 | } | 3406 | } |
diff --git a/drivers/gpu/drm/radeon/r200.c b/drivers/gpu/drm/radeon/r200.c index 20942127c46b..ff1e0cd608bf 100644 --- a/drivers/gpu/drm/radeon/r200.c +++ b/drivers/gpu/drm/radeon/r200.c | |||
@@ -371,13 +371,16 @@ int r200_packet0_check(struct radeon_cs_parser *p, | |||
371 | case 5: | 371 | case 5: |
372 | case 6: | 372 | case 6: |
373 | case 7: | 373 | case 7: |
374 | /* 1D/2D */ | ||
374 | track->textures[i].tex_coord_type = 0; | 375 | track->textures[i].tex_coord_type = 0; |
375 | break; | 376 | break; |
376 | case 1: | 377 | case 1: |
377 | track->textures[i].tex_coord_type = 1; | 378 | /* CUBE */ |
379 | track->textures[i].tex_coord_type = 2; | ||
378 | break; | 380 | break; |
379 | case 2: | 381 | case 2: |
380 | track->textures[i].tex_coord_type = 2; | 382 | /* 3D */ |
383 | track->textures[i].tex_coord_type = 1; | ||
381 | break; | 384 | break; |
382 | } | 385 | } |
383 | break; | 386 | break; |
diff --git a/drivers/gpu/drm/radeon/r420.c b/drivers/gpu/drm/radeon/r420.c index 053404e71a9d..4526faaacca8 100644 --- a/drivers/gpu/drm/radeon/r420.c +++ b/drivers/gpu/drm/radeon/r420.c | |||
@@ -50,9 +50,7 @@ int r420_mc_init(struct radeon_device *rdev) | |||
50 | if (rdev->flags & RADEON_IS_AGP) { | 50 | if (rdev->flags & RADEON_IS_AGP) { |
51 | r = radeon_agp_init(rdev); | 51 | r = radeon_agp_init(rdev); |
52 | if (r) { | 52 | if (r) { |
53 | printk(KERN_WARNING "[drm] Disabling AGP\n"); | 53 | radeon_agp_disable(rdev); |
54 | rdev->flags &= ~RADEON_IS_AGP; | ||
55 | rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024; | ||
56 | } else { | 54 | } else { |
57 | rdev->mc.gtt_location = rdev->mc.agp_base; | 55 | rdev->mc.gtt_location = rdev->mc.agp_base; |
58 | } | 56 | } |
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c index f5ff3490929f..da9aa3c31bcf 100644 --- a/drivers/gpu/drm/radeon/r600.c +++ b/drivers/gpu/drm/radeon/r600.c | |||
@@ -624,7 +624,6 @@ int r600_mc_init(struct radeon_device *rdev) | |||
624 | fixed20_12 a; | 624 | fixed20_12 a; |
625 | u32 tmp; | 625 | u32 tmp; |
626 | int chansize, numchan; | 626 | int chansize, numchan; |
627 | int r; | ||
628 | 627 | ||
629 | /* Get VRAM informations */ | 628 | /* Get VRAM informations */ |
630 | rdev->mc.vram_is_ddr = true; | 629 | rdev->mc.vram_is_ddr = true; |
@@ -667,9 +666,6 @@ int r600_mc_init(struct radeon_device *rdev) | |||
667 | rdev->mc.real_vram_size = rdev->mc.aper_size; | 666 | rdev->mc.real_vram_size = rdev->mc.aper_size; |
668 | 667 | ||
669 | if (rdev->flags & RADEON_IS_AGP) { | 668 | if (rdev->flags & RADEON_IS_AGP) { |
670 | r = radeon_agp_init(rdev); | ||
671 | if (r) | ||
672 | return r; | ||
673 | /* gtt_size is setup by radeon_agp_init */ | 669 | /* gtt_size is setup by radeon_agp_init */ |
674 | rdev->mc.gtt_location = rdev->mc.agp_base; | 670 | rdev->mc.gtt_location = rdev->mc.agp_base; |
675 | tmp = 0xFFFFFFFFUL - rdev->mc.agp_base - rdev->mc.gtt_size; | 671 | tmp = 0xFFFFFFFFUL - rdev->mc.agp_base - rdev->mc.gtt_size; |
@@ -1958,14 +1954,17 @@ int r600_suspend(struct radeon_device *rdev) | |||
1958 | /* FIXME: we should wait for ring to be empty */ | 1954 | /* FIXME: we should wait for ring to be empty */ |
1959 | r600_cp_stop(rdev); | 1955 | r600_cp_stop(rdev); |
1960 | rdev->cp.ready = false; | 1956 | rdev->cp.ready = false; |
1957 | r600_irq_suspend(rdev); | ||
1961 | r600_wb_disable(rdev); | 1958 | r600_wb_disable(rdev); |
1962 | r600_pcie_gart_disable(rdev); | 1959 | r600_pcie_gart_disable(rdev); |
1963 | /* unpin shaders bo */ | 1960 | /* unpin shaders bo */ |
1964 | r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false); | 1961 | if (rdev->r600_blit.shader_obj) { |
1965 | if (unlikely(r != 0)) | 1962 | r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false); |
1966 | return r; | 1963 | if (!r) { |
1967 | radeon_bo_unpin(rdev->r600_blit.shader_obj); | 1964 | radeon_bo_unpin(rdev->r600_blit.shader_obj); |
1968 | radeon_bo_unreserve(rdev->r600_blit.shader_obj); | 1965 | radeon_bo_unreserve(rdev->r600_blit.shader_obj); |
1966 | } | ||
1967 | } | ||
1969 | return 0; | 1968 | return 0; |
1970 | } | 1969 | } |
1971 | 1970 | ||
@@ -2026,6 +2025,11 @@ int r600_init(struct radeon_device *rdev) | |||
2026 | r = radeon_fence_driver_init(rdev); | 2025 | r = radeon_fence_driver_init(rdev); |
2027 | if (r) | 2026 | if (r) |
2028 | return r; | 2027 | return r; |
2028 | if (rdev->flags & RADEON_IS_AGP) { | ||
2029 | r = radeon_agp_init(rdev); | ||
2030 | if (r) | ||
2031 | radeon_agp_disable(rdev); | ||
2032 | } | ||
2029 | r = r600_mc_init(rdev); | 2033 | r = r600_mc_init(rdev); |
2030 | if (r) | 2034 | if (r) |
2031 | return r; | 2035 | return r; |
@@ -2060,13 +2064,14 @@ int r600_init(struct radeon_device *rdev) | |||
2060 | if (rdev->accel_working) { | 2064 | if (rdev->accel_working) { |
2061 | r = radeon_ib_pool_init(rdev); | 2065 | r = radeon_ib_pool_init(rdev); |
2062 | if (r) { | 2066 | if (r) { |
2063 | DRM_ERROR("radeon: failed initializing IB pool (%d).\n", r); | 2067 | dev_err(rdev->dev, "IB initialization failed (%d).\n", r); |
2064 | rdev->accel_working = false; | ||
2065 | } | ||
2066 | r = r600_ib_test(rdev); | ||
2067 | if (r) { | ||
2068 | DRM_ERROR("radeon: failed testing IB (%d).\n", r); | ||
2069 | rdev->accel_working = false; | 2068 | rdev->accel_working = false; |
2069 | } else { | ||
2070 | r = r600_ib_test(rdev); | ||
2071 | if (r) { | ||
2072 | dev_err(rdev->dev, "IB test failed (%d).\n", r); | ||
2073 | rdev->accel_working = false; | ||
2074 | } | ||
2070 | } | 2075 | } |
2071 | } | 2076 | } |
2072 | 2077 | ||
@@ -2197,14 +2202,14 @@ void r600_ih_ring_init(struct radeon_device *rdev, unsigned ring_size) | |||
2197 | rb_bufsz = drm_order(ring_size / 4); | 2202 | rb_bufsz = drm_order(ring_size / 4); |
2198 | ring_size = (1 << rb_bufsz) * 4; | 2203 | ring_size = (1 << rb_bufsz) * 4; |
2199 | rdev->ih.ring_size = ring_size; | 2204 | rdev->ih.ring_size = ring_size; |
2200 | rdev->ih.align_mask = 4 - 1; | 2205 | rdev->ih.ptr_mask = rdev->ih.ring_size - 1; |
2206 | rdev->ih.rptr = 0; | ||
2201 | } | 2207 | } |
2202 | 2208 | ||
2203 | static int r600_ih_ring_alloc(struct radeon_device *rdev, unsigned ring_size) | 2209 | static int r600_ih_ring_alloc(struct radeon_device *rdev) |
2204 | { | 2210 | { |
2205 | int r; | 2211 | int r; |
2206 | 2212 | ||
2207 | rdev->ih.ring_size = ring_size; | ||
2208 | /* Allocate ring buffer */ | 2213 | /* Allocate ring buffer */ |
2209 | if (rdev->ih.ring_obj == NULL) { | 2214 | if (rdev->ih.ring_obj == NULL) { |
2210 | r = radeon_bo_create(rdev, NULL, rdev->ih.ring_size, | 2215 | r = radeon_bo_create(rdev, NULL, rdev->ih.ring_size, |
@@ -2234,9 +2239,6 @@ static int r600_ih_ring_alloc(struct radeon_device *rdev, unsigned ring_size) | |||
2234 | return r; | 2239 | return r; |
2235 | } | 2240 | } |
2236 | } | 2241 | } |
2237 | rdev->ih.ptr_mask = (rdev->cp.ring_size / 4) - 1; | ||
2238 | rdev->ih.rptr = 0; | ||
2239 | |||
2240 | return 0; | 2242 | return 0; |
2241 | } | 2243 | } |
2242 | 2244 | ||
@@ -2386,7 +2388,7 @@ int r600_irq_init(struct radeon_device *rdev) | |||
2386 | u32 interrupt_cntl, ih_cntl, ih_rb_cntl; | 2388 | u32 interrupt_cntl, ih_cntl, ih_rb_cntl; |
2387 | 2389 | ||
2388 | /* allocate ring */ | 2390 | /* allocate ring */ |
2389 | ret = r600_ih_ring_alloc(rdev, rdev->ih.ring_size); | 2391 | ret = r600_ih_ring_alloc(rdev); |
2390 | if (ret) | 2392 | if (ret) |
2391 | return ret; | 2393 | return ret; |
2392 | 2394 | ||
@@ -2449,10 +2451,15 @@ int r600_irq_init(struct radeon_device *rdev) | |||
2449 | return ret; | 2451 | return ret; |
2450 | } | 2452 | } |
2451 | 2453 | ||
2452 | void r600_irq_fini(struct radeon_device *rdev) | 2454 | void r600_irq_suspend(struct radeon_device *rdev) |
2453 | { | 2455 | { |
2454 | r600_disable_interrupts(rdev); | 2456 | r600_disable_interrupts(rdev); |
2455 | r600_rlc_stop(rdev); | 2457 | r600_rlc_stop(rdev); |
2458 | } | ||
2459 | |||
2460 | void r600_irq_fini(struct radeon_device *rdev) | ||
2461 | { | ||
2462 | r600_irq_suspend(rdev); | ||
2456 | r600_ih_ring_fini(rdev); | 2463 | r600_ih_ring_fini(rdev); |
2457 | } | 2464 | } |
2458 | 2465 | ||
@@ -2467,8 +2474,12 @@ int r600_irq_set(struct radeon_device *rdev) | |||
2467 | return -EINVAL; | 2474 | return -EINVAL; |
2468 | } | 2475 | } |
2469 | /* don't enable anything if the ih is disabled */ | 2476 | /* don't enable anything if the ih is disabled */ |
2470 | if (!rdev->ih.enabled) | 2477 | if (!rdev->ih.enabled) { |
2478 | r600_disable_interrupts(rdev); | ||
2479 | /* force the active interrupt state to all disabled */ | ||
2480 | r600_disable_interrupt_state(rdev); | ||
2471 | return 0; | 2481 | return 0; |
2482 | } | ||
2472 | 2483 | ||
2473 | if (ASIC_IS_DCE3(rdev)) { | 2484 | if (ASIC_IS_DCE3(rdev)) { |
2474 | hpd1 = RREG32(DC_HPD1_INT_CONTROL) & ~DC_HPDx_INT_EN; | 2485 | hpd1 = RREG32(DC_HPD1_INT_CONTROL) & ~DC_HPDx_INT_EN; |
@@ -2638,16 +2649,18 @@ static inline u32 r600_get_ih_wptr(struct radeon_device *rdev) | |||
2638 | wptr = RREG32(IH_RB_WPTR); | 2649 | wptr = RREG32(IH_RB_WPTR); |
2639 | 2650 | ||
2640 | if (wptr & RB_OVERFLOW) { | 2651 | if (wptr & RB_OVERFLOW) { |
2641 | WARN_ON(1); | 2652 | /* When a ring buffer overflow happen start parsing interrupt |
2642 | /* XXX deal with overflow */ | 2653 | * from the last not overwritten vector (wptr + 16). Hopefully |
2643 | DRM_ERROR("IH RB overflow\n"); | 2654 | * this should allow us to catchup. |
2655 | */ | ||
2656 | dev_warn(rdev->dev, "IH ring buffer overflow (0x%08X, %d, %d)\n", | ||
2657 | wptr, rdev->ih.rptr, (wptr + 16) + rdev->ih.ptr_mask); | ||
2658 | rdev->ih.rptr = (wptr + 16) & rdev->ih.ptr_mask; | ||
2644 | tmp = RREG32(IH_RB_CNTL); | 2659 | tmp = RREG32(IH_RB_CNTL); |
2645 | tmp |= IH_WPTR_OVERFLOW_CLEAR; | 2660 | tmp |= IH_WPTR_OVERFLOW_CLEAR; |
2646 | WREG32(IH_RB_CNTL, tmp); | 2661 | WREG32(IH_RB_CNTL, tmp); |
2647 | } | 2662 | } |
2648 | wptr = wptr & WPTR_OFFSET_MASK; | 2663 | return (wptr & rdev->ih.ptr_mask); |
2649 | |||
2650 | return wptr; | ||
2651 | } | 2664 | } |
2652 | 2665 | ||
2653 | /* r600 IV Ring | 2666 | /* r600 IV Ring |
@@ -2683,12 +2696,13 @@ int r600_irq_process(struct radeon_device *rdev) | |||
2683 | u32 wptr = r600_get_ih_wptr(rdev); | 2696 | u32 wptr = r600_get_ih_wptr(rdev); |
2684 | u32 rptr = rdev->ih.rptr; | 2697 | u32 rptr = rdev->ih.rptr; |
2685 | u32 src_id, src_data; | 2698 | u32 src_id, src_data; |
2686 | u32 last_entry = rdev->ih.ring_size - 16; | ||
2687 | u32 ring_index, disp_int, disp_int_cont, disp_int_cont2; | 2699 | u32 ring_index, disp_int, disp_int_cont, disp_int_cont2; |
2688 | unsigned long flags; | 2700 | unsigned long flags; |
2689 | bool queue_hotplug = false; | 2701 | bool queue_hotplug = false; |
2690 | 2702 | ||
2691 | DRM_DEBUG("r600_irq_process start: rptr %d, wptr %d\n", rptr, wptr); | 2703 | DRM_DEBUG("r600_irq_process start: rptr %d, wptr %d\n", rptr, wptr); |
2704 | if (!rdev->ih.enabled) | ||
2705 | return IRQ_NONE; | ||
2692 | 2706 | ||
2693 | spin_lock_irqsave(&rdev->ih.lock, flags); | 2707 | spin_lock_irqsave(&rdev->ih.lock, flags); |
2694 | 2708 | ||
@@ -2817,10 +2831,8 @@ restart_ih: | |||
2817 | } | 2831 | } |
2818 | 2832 | ||
2819 | /* wptr/rptr are in bytes! */ | 2833 | /* wptr/rptr are in bytes! */ |
2820 | if (rptr == last_entry) | 2834 | rptr += 16; |
2821 | rptr = 0; | 2835 | rptr &= rdev->ih.ptr_mask; |
2822 | else | ||
2823 | rptr += 16; | ||
2824 | } | 2836 | } |
2825 | /* make sure wptr hasn't changed while processing */ | 2837 | /* make sure wptr hasn't changed while processing */ |
2826 | wptr = r600_get_ih_wptr(rdev); | 2838 | wptr = r600_get_ih_wptr(rdev); |
diff --git a/drivers/gpu/drm/radeon/r600_blit_kms.c b/drivers/gpu/drm/radeon/r600_blit_kms.c index 8787ea89dc6e..2bedce477a97 100644 --- a/drivers/gpu/drm/radeon/r600_blit_kms.c +++ b/drivers/gpu/drm/radeon/r600_blit_kms.c | |||
@@ -512,14 +512,16 @@ void r600_blit_fini(struct radeon_device *rdev) | |||
512 | { | 512 | { |
513 | int r; | 513 | int r; |
514 | 514 | ||
515 | if (rdev->r600_blit.shader_obj == NULL) | ||
516 | return; | ||
517 | /* If we can't reserve the bo, unref should be enough to destroy | ||
518 | * it when it becomes idle. | ||
519 | */ | ||
515 | r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false); | 520 | r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false); |
516 | if (unlikely(r != 0)) { | 521 | if (!r) { |
517 | dev_err(rdev->dev, "(%d) can't finish r600 blit\n", r); | 522 | radeon_bo_unpin(rdev->r600_blit.shader_obj); |
518 | goto out_unref; | 523 | radeon_bo_unreserve(rdev->r600_blit.shader_obj); |
519 | } | 524 | } |
520 | radeon_bo_unpin(rdev->r600_blit.shader_obj); | ||
521 | radeon_bo_unreserve(rdev->r600_blit.shader_obj); | ||
522 | out_unref: | ||
523 | radeon_bo_unref(&rdev->r600_blit.shader_obj); | 525 | radeon_bo_unref(&rdev->r600_blit.shader_obj); |
524 | } | 526 | } |
525 | 527 | ||
diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c index 44060b92d9e6..e4c45ec16507 100644 --- a/drivers/gpu/drm/radeon/r600_cs.c +++ b/drivers/gpu/drm/radeon/r600_cs.c | |||
@@ -36,6 +36,10 @@ static int r600_cs_packet_next_reloc_nomm(struct radeon_cs_parser *p, | |||
36 | typedef int (*next_reloc_t)(struct radeon_cs_parser*, struct radeon_cs_reloc**); | 36 | typedef int (*next_reloc_t)(struct radeon_cs_parser*, struct radeon_cs_reloc**); |
37 | static next_reloc_t r600_cs_packet_next_reloc = &r600_cs_packet_next_reloc_mm; | 37 | static next_reloc_t r600_cs_packet_next_reloc = &r600_cs_packet_next_reloc_mm; |
38 | 38 | ||
39 | struct r600_cs_track { | ||
40 | u32 cb_color0_base_last; | ||
41 | }; | ||
42 | |||
39 | /** | 43 | /** |
40 | * r600_cs_packet_parse() - parse cp packet and point ib index to next packet | 44 | * r600_cs_packet_parse() - parse cp packet and point ib index to next packet |
41 | * @parser: parser structure holding parsing context. | 45 | * @parser: parser structure holding parsing context. |
@@ -177,6 +181,28 @@ static int r600_cs_packet_next_reloc_nomm(struct radeon_cs_parser *p, | |||
177 | } | 181 | } |
178 | 182 | ||
179 | /** | 183 | /** |
184 | * r600_cs_packet_next_is_pkt3_nop() - test if next packet is packet3 nop for reloc | ||
185 | * @parser: parser structure holding parsing context. | ||
186 | * | ||
187 | * Check next packet is relocation packet3, do bo validation and compute | ||
188 | * GPU offset using the provided start. | ||
189 | **/ | ||
190 | static inline int r600_cs_packet_next_is_pkt3_nop(struct radeon_cs_parser *p) | ||
191 | { | ||
192 | struct radeon_cs_packet p3reloc; | ||
193 | int r; | ||
194 | |||
195 | r = r600_cs_packet_parse(p, &p3reloc, p->idx); | ||
196 | if (r) { | ||
197 | return 0; | ||
198 | } | ||
199 | if (p3reloc.type != PACKET_TYPE3 || p3reloc.opcode != PACKET3_NOP) { | ||
200 | return 0; | ||
201 | } | ||
202 | return 1; | ||
203 | } | ||
204 | |||
205 | /** | ||
180 | * r600_cs_packet_next_vline() - parse userspace VLINE packet | 206 | * r600_cs_packet_next_vline() - parse userspace VLINE packet |
181 | * @parser: parser structure holding parsing context. | 207 | * @parser: parser structure holding parsing context. |
182 | * | 208 | * |
@@ -337,6 +363,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p, | |||
337 | struct radeon_cs_packet *pkt) | 363 | struct radeon_cs_packet *pkt) |
338 | { | 364 | { |
339 | struct radeon_cs_reloc *reloc; | 365 | struct radeon_cs_reloc *reloc; |
366 | struct r600_cs_track *track; | ||
340 | volatile u32 *ib; | 367 | volatile u32 *ib; |
341 | unsigned idx; | 368 | unsigned idx; |
342 | unsigned i; | 369 | unsigned i; |
@@ -344,6 +371,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p, | |||
344 | int r; | 371 | int r; |
345 | u32 idx_value; | 372 | u32 idx_value; |
346 | 373 | ||
374 | track = (struct r600_cs_track *)p->track; | ||
347 | ib = p->ib->ptr; | 375 | ib = p->ib->ptr; |
348 | idx = pkt->idx + 1; | 376 | idx = pkt->idx + 1; |
349 | idx_value = radeon_get_ib_value(p, idx); | 377 | idx_value = radeon_get_ib_value(p, idx); |
@@ -503,9 +531,60 @@ static int r600_packet3_check(struct radeon_cs_parser *p, | |||
503 | for (i = 0; i < pkt->count; i++) { | 531 | for (i = 0; i < pkt->count; i++) { |
504 | reg = start_reg + (4 * i); | 532 | reg = start_reg + (4 * i); |
505 | switch (reg) { | 533 | switch (reg) { |
534 | /* This register were added late, there is userspace | ||
535 | * which does provide relocation for those but set | ||
536 | * 0 offset. In order to avoid breaking old userspace | ||
537 | * we detect this and set address to point to last | ||
538 | * CB_COLOR0_BASE, note that if userspace doesn't set | ||
539 | * CB_COLOR0_BASE before this register we will report | ||
540 | * error. Old userspace always set CB_COLOR0_BASE | ||
541 | * before any of this. | ||
542 | */ | ||
543 | case R_0280E0_CB_COLOR0_FRAG: | ||
544 | case R_0280E4_CB_COLOR1_FRAG: | ||
545 | case R_0280E8_CB_COLOR2_FRAG: | ||
546 | case R_0280EC_CB_COLOR3_FRAG: | ||
547 | case R_0280F0_CB_COLOR4_FRAG: | ||
548 | case R_0280F4_CB_COLOR5_FRAG: | ||
549 | case R_0280F8_CB_COLOR6_FRAG: | ||
550 | case R_0280FC_CB_COLOR7_FRAG: | ||
551 | case R_0280C0_CB_COLOR0_TILE: | ||
552 | case R_0280C4_CB_COLOR1_TILE: | ||
553 | case R_0280C8_CB_COLOR2_TILE: | ||
554 | case R_0280CC_CB_COLOR3_TILE: | ||
555 | case R_0280D0_CB_COLOR4_TILE: | ||
556 | case R_0280D4_CB_COLOR5_TILE: | ||
557 | case R_0280D8_CB_COLOR6_TILE: | ||
558 | case R_0280DC_CB_COLOR7_TILE: | ||
559 | if (!r600_cs_packet_next_is_pkt3_nop(p)) { | ||
560 | if (!track->cb_color0_base_last) { | ||
561 | dev_err(p->dev, "Broken old userspace ? no cb_color0_base supplied before trying to write 0x%08X\n", reg); | ||
562 | return -EINVAL; | ||
563 | } | ||
564 | ib[idx+1+i] = track->cb_color0_base_last; | ||
565 | printk_once(KERN_WARNING "radeon: You have old & broken userspace " | ||
566 | "please consider updating mesa & xf86-video-ati\n"); | ||
567 | } else { | ||
568 | r = r600_cs_packet_next_reloc(p, &reloc); | ||
569 | if (r) { | ||
570 | dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg); | ||
571 | return -EINVAL; | ||
572 | } | ||
573 | ib[idx+1+i] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); | ||
574 | } | ||
575 | break; | ||
506 | case DB_DEPTH_BASE: | 576 | case DB_DEPTH_BASE: |
507 | case DB_HTILE_DATA_BASE: | 577 | case DB_HTILE_DATA_BASE: |
508 | case CB_COLOR0_BASE: | 578 | case CB_COLOR0_BASE: |
579 | r = r600_cs_packet_next_reloc(p, &reloc); | ||
580 | if (r) { | ||
581 | DRM_ERROR("bad SET_CONTEXT_REG " | ||
582 | "0x%04X\n", reg); | ||
583 | return -EINVAL; | ||
584 | } | ||
585 | ib[idx+1+i] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); | ||
586 | track->cb_color0_base_last = ib[idx+1+i]; | ||
587 | break; | ||
509 | case CB_COLOR1_BASE: | 588 | case CB_COLOR1_BASE: |
510 | case CB_COLOR2_BASE: | 589 | case CB_COLOR2_BASE: |
511 | case CB_COLOR3_BASE: | 590 | case CB_COLOR3_BASE: |
@@ -678,8 +757,11 @@ static int r600_packet3_check(struct radeon_cs_parser *p, | |||
678 | int r600_cs_parse(struct radeon_cs_parser *p) | 757 | int r600_cs_parse(struct radeon_cs_parser *p) |
679 | { | 758 | { |
680 | struct radeon_cs_packet pkt; | 759 | struct radeon_cs_packet pkt; |
760 | struct r600_cs_track *track; | ||
681 | int r; | 761 | int r; |
682 | 762 | ||
763 | track = kzalloc(sizeof(*track), GFP_KERNEL); | ||
764 | p->track = track; | ||
683 | do { | 765 | do { |
684 | r = r600_cs_packet_parse(p, &pkt, p->idx); | 766 | r = r600_cs_packet_parse(p, &pkt, p->idx); |
685 | if (r) { | 767 | if (r) { |
@@ -757,6 +839,7 @@ int r600_cs_legacy(struct drm_device *dev, void *data, struct drm_file *filp, | |||
757 | /* initialize parser */ | 839 | /* initialize parser */ |
758 | memset(&parser, 0, sizeof(struct radeon_cs_parser)); | 840 | memset(&parser, 0, sizeof(struct radeon_cs_parser)); |
759 | parser.filp = filp; | 841 | parser.filp = filp; |
842 | parser.dev = &dev->pdev->dev; | ||
760 | parser.rdev = NULL; | 843 | parser.rdev = NULL; |
761 | parser.family = family; | 844 | parser.family = family; |
762 | parser.ib = &fake_ib; | 845 | parser.ib = &fake_ib; |
diff --git a/drivers/gpu/drm/radeon/r600d.h b/drivers/gpu/drm/radeon/r600d.h index 05894edadab4..30480881aed1 100644 --- a/drivers/gpu/drm/radeon/r600d.h +++ b/drivers/gpu/drm/radeon/r600d.h | |||
@@ -882,4 +882,29 @@ | |||
882 | #define S_000E60_SOFT_RESET_VMC(x) (((x) & 1) << 17) | 882 | #define S_000E60_SOFT_RESET_VMC(x) (((x) & 1) << 17) |
883 | 883 | ||
884 | #define R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL 0x5480 | 884 | #define R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL 0x5480 |
885 | |||
886 | #define R_0280E0_CB_COLOR0_FRAG 0x0280E0 | ||
887 | #define S_0280E0_BASE_256B(x) (((x) & 0xFFFFFFFF) << 0) | ||
888 | #define G_0280E0_BASE_256B(x) (((x) >> 0) & 0xFFFFFFFF) | ||
889 | #define C_0280E0_BASE_256B 0x00000000 | ||
890 | #define R_0280E4_CB_COLOR1_FRAG 0x0280E4 | ||
891 | #define R_0280E8_CB_COLOR2_FRAG 0x0280E8 | ||
892 | #define R_0280EC_CB_COLOR3_FRAG 0x0280EC | ||
893 | #define R_0280F0_CB_COLOR4_FRAG 0x0280F0 | ||
894 | #define R_0280F4_CB_COLOR5_FRAG 0x0280F4 | ||
895 | #define R_0280F8_CB_COLOR6_FRAG 0x0280F8 | ||
896 | #define R_0280FC_CB_COLOR7_FRAG 0x0280FC | ||
897 | #define R_0280C0_CB_COLOR0_TILE 0x0280C0 | ||
898 | #define S_0280C0_BASE_256B(x) (((x) & 0xFFFFFFFF) << 0) | ||
899 | #define G_0280C0_BASE_256B(x) (((x) >> 0) & 0xFFFFFFFF) | ||
900 | #define C_0280C0_BASE_256B 0x00000000 | ||
901 | #define R_0280C4_CB_COLOR1_TILE 0x0280C4 | ||
902 | #define R_0280C8_CB_COLOR2_TILE 0x0280C8 | ||
903 | #define R_0280CC_CB_COLOR3_TILE 0x0280CC | ||
904 | #define R_0280D0_CB_COLOR4_TILE 0x0280D0 | ||
905 | #define R_0280D4_CB_COLOR5_TILE 0x0280D4 | ||
906 | #define R_0280D8_CB_COLOR6_TILE 0x0280D8 | ||
907 | #define R_0280DC_CB_COLOR7_TILE 0x0280DC | ||
908 | |||
909 | |||
885 | #endif | 910 | #endif |
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index eb5f99b9469d..f7df1a7e4413 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h | |||
@@ -410,7 +410,6 @@ struct r600_ih { | |||
410 | unsigned wptr_old; | 410 | unsigned wptr_old; |
411 | unsigned ring_size; | 411 | unsigned ring_size; |
412 | uint64_t gpu_addr; | 412 | uint64_t gpu_addr; |
413 | uint32_t align_mask; | ||
414 | uint32_t ptr_mask; | 413 | uint32_t ptr_mask; |
415 | spinlock_t lock; | 414 | spinlock_t lock; |
416 | bool enabled; | 415 | bool enabled; |
@@ -465,6 +464,7 @@ struct radeon_cs_chunk { | |||
465 | }; | 464 | }; |
466 | 465 | ||
467 | struct radeon_cs_parser { | 466 | struct radeon_cs_parser { |
467 | struct device *dev; | ||
468 | struct radeon_device *rdev; | 468 | struct radeon_device *rdev; |
469 | struct drm_file *filp; | 469 | struct drm_file *filp; |
470 | /* chunks */ | 470 | /* chunks */ |
@@ -847,7 +847,7 @@ void r600_kms_blit_copy(struct radeon_device *rdev, | |||
847 | 847 | ||
848 | static inline uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg) | 848 | static inline uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg) |
849 | { | 849 | { |
850 | if (reg < 0x10000) | 850 | if (reg < rdev->rmmio_size) |
851 | return readl(((void __iomem *)rdev->rmmio) + reg); | 851 | return readl(((void __iomem *)rdev->rmmio) + reg); |
852 | else { | 852 | else { |
853 | writel(reg, ((void __iomem *)rdev->rmmio) + RADEON_MM_INDEX); | 853 | writel(reg, ((void __iomem *)rdev->rmmio) + RADEON_MM_INDEX); |
@@ -857,7 +857,7 @@ static inline uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg) | |||
857 | 857 | ||
858 | static inline void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v) | 858 | static inline void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v) |
859 | { | 859 | { |
860 | if (reg < 0x10000) | 860 | if (reg < rdev->rmmio_size) |
861 | writel(v, ((void __iomem *)rdev->rmmio) + reg); | 861 | writel(v, ((void __iomem *)rdev->rmmio) + reg); |
862 | else { | 862 | else { |
863 | writel(reg, ((void __iomem *)rdev->rmmio) + RADEON_MM_INDEX); | 863 | writel(reg, ((void __iomem *)rdev->rmmio) + RADEON_MM_INDEX); |
@@ -1017,6 +1017,8 @@ static inline void radeon_ring_write(struct radeon_device *rdev, uint32_t v) | |||
1017 | #define radeon_hpd_set_polarity(rdev, hpd) (rdev)->asic->hpd_set_polarity((rdev), (hpd)) | 1017 | #define radeon_hpd_set_polarity(rdev, hpd) (rdev)->asic->hpd_set_polarity((rdev), (hpd)) |
1018 | 1018 | ||
1019 | /* Common functions */ | 1019 | /* Common functions */ |
1020 | /* AGP */ | ||
1021 | extern void radeon_agp_disable(struct radeon_device *rdev); | ||
1020 | extern int radeon_gart_table_vram_pin(struct radeon_device *rdev); | 1022 | extern int radeon_gart_table_vram_pin(struct radeon_device *rdev); |
1021 | extern int radeon_modeset_init(struct radeon_device *rdev); | 1023 | extern int radeon_modeset_init(struct radeon_device *rdev); |
1022 | extern void radeon_modeset_fini(struct radeon_device *rdev); | 1024 | extern void radeon_modeset_fini(struct radeon_device *rdev); |
@@ -1160,7 +1162,8 @@ extern int r600_irq_init(struct radeon_device *rdev); | |||
1160 | extern void r600_irq_fini(struct radeon_device *rdev); | 1162 | extern void r600_irq_fini(struct radeon_device *rdev); |
1161 | extern void r600_ih_ring_init(struct radeon_device *rdev, unsigned ring_size); | 1163 | extern void r600_ih_ring_init(struct radeon_device *rdev, unsigned ring_size); |
1162 | extern int r600_irq_set(struct radeon_device *rdev); | 1164 | extern int r600_irq_set(struct radeon_device *rdev); |
1163 | 1165 | extern void r600_irq_suspend(struct radeon_device *rdev); | |
1166 | /* r600 audio */ | ||
1164 | extern int r600_audio_init(struct radeon_device *rdev); | 1167 | extern int r600_audio_init(struct radeon_device *rdev); |
1165 | extern int r600_audio_tmds_index(struct drm_encoder *encoder); | 1168 | extern int r600_audio_tmds_index(struct drm_encoder *encoder); |
1166 | extern void r600_audio_set_clock(struct drm_encoder *encoder, int clock); | 1169 | extern void r600_audio_set_clock(struct drm_encoder *encoder, int clock); |
diff --git a/drivers/gpu/drm/radeon/radeon_agp.c b/drivers/gpu/drm/radeon/radeon_agp.c index 220f454ea9fa..c9ad7f5cc1ac 100644 --- a/drivers/gpu/drm/radeon/radeon_agp.c +++ b/drivers/gpu/drm/radeon/radeon_agp.c | |||
@@ -133,6 +133,13 @@ int radeon_agp_init(struct radeon_device *rdev) | |||
133 | bool is_v3; | 133 | bool is_v3; |
134 | int ret; | 134 | int ret; |
135 | 135 | ||
136 | if (rdev->ddev->agp->agp_info.aper_size < 32) { | ||
137 | dev_warn(rdev->dev, "AGP aperture to small (%dM) " | ||
138 | "need at least 32M, disabling AGP\n", | ||
139 | rdev->ddev->agp->agp_info.aper_size); | ||
140 | return -EINVAL; | ||
141 | } | ||
142 | |||
136 | /* Acquire AGP. */ | 143 | /* Acquire AGP. */ |
137 | if (!rdev->ddev->agp->acquired) { | 144 | if (!rdev->ddev->agp->acquired) { |
138 | ret = drm_agp_acquire(rdev->ddev); | 145 | ret = drm_agp_acquire(rdev->ddev); |
diff --git a/drivers/gpu/drm/radeon/radeon_clocks.c b/drivers/gpu/drm/radeon/radeon_clocks.c index 812f24dbc2a8..73c4405bf42f 100644 --- a/drivers/gpu/drm/radeon/radeon_clocks.c +++ b/drivers/gpu/drm/radeon/radeon_clocks.c | |||
@@ -56,7 +56,7 @@ uint32_t radeon_legacy_get_engine_clock(struct radeon_device *rdev) | |||
56 | else if (post_div == 3) | 56 | else if (post_div == 3) |
57 | sclk >>= 2; | 57 | sclk >>= 2; |
58 | else if (post_div == 4) | 58 | else if (post_div == 4) |
59 | sclk >>= 4; | 59 | sclk >>= 3; |
60 | 60 | ||
61 | return sclk; | 61 | return sclk; |
62 | } | 62 | } |
@@ -86,7 +86,7 @@ uint32_t radeon_legacy_get_memory_clock(struct radeon_device *rdev) | |||
86 | else if (post_div == 3) | 86 | else if (post_div == 3) |
87 | mclk >>= 2; | 87 | mclk >>= 2; |
88 | else if (post_div == 4) | 88 | else if (post_div == 4) |
89 | mclk >>= 4; | 89 | mclk >>= 3; |
90 | 90 | ||
91 | return mclk; | 91 | return mclk; |
92 | } | 92 | } |
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c index 65590a0f1d93..1496cb8658ef 100644 --- a/drivers/gpu/drm/radeon/radeon_cs.c +++ b/drivers/gpu/drm/radeon/radeon_cs.c | |||
@@ -231,6 +231,7 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) | |||
231 | memset(&parser, 0, sizeof(struct radeon_cs_parser)); | 231 | memset(&parser, 0, sizeof(struct radeon_cs_parser)); |
232 | parser.filp = filp; | 232 | parser.filp = filp; |
233 | parser.rdev = rdev; | 233 | parser.rdev = rdev; |
234 | parser.dev = rdev->dev; | ||
234 | r = radeon_cs_parser_init(&parser, data); | 235 | r = radeon_cs_parser_init(&parser, data); |
235 | if (r) { | 236 | if (r) { |
236 | DRM_ERROR("Failed to initialize parser !\n"); | 237 | DRM_ERROR("Failed to initialize parser !\n"); |
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c index 0c51f8e46613..768b1509fa03 100644 --- a/drivers/gpu/drm/radeon/radeon_device.c +++ b/drivers/gpu/drm/radeon/radeon_device.c | |||
@@ -544,6 +544,7 @@ void radeon_agp_disable(struct radeon_device *rdev) | |||
544 | rdev->asic->gart_tlb_flush = &r100_pci_gart_tlb_flush; | 544 | rdev->asic->gart_tlb_flush = &r100_pci_gart_tlb_flush; |
545 | rdev->asic->gart_set_page = &r100_pci_gart_set_page; | 545 | rdev->asic->gart_set_page = &r100_pci_gart_set_page; |
546 | } | 546 | } |
547 | rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024; | ||
547 | } | 548 | } |
548 | 549 | ||
549 | void radeon_check_arguments(struct radeon_device *rdev) | 550 | void radeon_check_arguments(struct radeon_device *rdev) |
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c index 0ec491ead2ff..6a92f994cc26 100644 --- a/drivers/gpu/drm/radeon/radeon_display.c +++ b/drivers/gpu/drm/radeon/radeon_display.c | |||
@@ -357,7 +357,8 @@ int radeon_ddc_get_modes(struct radeon_connector *radeon_connector) | |||
357 | if ((radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_DisplayPort) || | 357 | if ((radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_DisplayPort) || |
358 | (radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP)) { | 358 | (radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP)) { |
359 | struct radeon_connector_atom_dig *dig = radeon_connector->con_priv; | 359 | struct radeon_connector_atom_dig *dig = radeon_connector->con_priv; |
360 | if (dig->dp_i2c_bus) | 360 | if ((dig->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT || |
361 | dig->dp_sink_type == CONNECTOR_OBJECT_ID_eDP) && dig->dp_i2c_bus) | ||
361 | radeon_connector->edid = drm_get_edid(&radeon_connector->base, &dig->dp_i2c_bus->adapter); | 362 | radeon_connector->edid = drm_get_edid(&radeon_connector->base, &dig->dp_i2c_bus->adapter); |
362 | } | 363 | } |
363 | if (!radeon_connector->ddc_bus) | 364 | if (!radeon_connector->ddc_bus) |
@@ -410,11 +411,12 @@ void radeon_compute_pll(struct radeon_pll *pll, | |||
410 | uint32_t *fb_div_p, | 411 | uint32_t *fb_div_p, |
411 | uint32_t *frac_fb_div_p, | 412 | uint32_t *frac_fb_div_p, |
412 | uint32_t *ref_div_p, | 413 | uint32_t *ref_div_p, |
413 | uint32_t *post_div_p, | 414 | uint32_t *post_div_p) |
414 | int flags) | ||
415 | { | 415 | { |
416 | uint32_t min_ref_div = pll->min_ref_div; | 416 | uint32_t min_ref_div = pll->min_ref_div; |
417 | uint32_t max_ref_div = pll->max_ref_div; | 417 | uint32_t max_ref_div = pll->max_ref_div; |
418 | uint32_t min_post_div = pll->min_post_div; | ||
419 | uint32_t max_post_div = pll->max_post_div; | ||
418 | uint32_t min_fractional_feed_div = 0; | 420 | uint32_t min_fractional_feed_div = 0; |
419 | uint32_t max_fractional_feed_div = 0; | 421 | uint32_t max_fractional_feed_div = 0; |
420 | uint32_t best_vco = pll->best_vco; | 422 | uint32_t best_vco = pll->best_vco; |
@@ -430,7 +432,7 @@ void radeon_compute_pll(struct radeon_pll *pll, | |||
430 | DRM_DEBUG("PLL freq %llu %u %u\n", freq, pll->min_ref_div, pll->max_ref_div); | 432 | DRM_DEBUG("PLL freq %llu %u %u\n", freq, pll->min_ref_div, pll->max_ref_div); |
431 | freq = freq * 1000; | 433 | freq = freq * 1000; |
432 | 434 | ||
433 | if (flags & RADEON_PLL_USE_REF_DIV) | 435 | if (pll->flags & RADEON_PLL_USE_REF_DIV) |
434 | min_ref_div = max_ref_div = pll->reference_div; | 436 | min_ref_div = max_ref_div = pll->reference_div; |
435 | else { | 437 | else { |
436 | while (min_ref_div < max_ref_div-1) { | 438 | while (min_ref_div < max_ref_div-1) { |
@@ -445,19 +447,22 @@ void radeon_compute_pll(struct radeon_pll *pll, | |||
445 | } | 447 | } |
446 | } | 448 | } |
447 | 449 | ||
448 | if (flags & RADEON_PLL_USE_FRAC_FB_DIV) { | 450 | if (pll->flags & RADEON_PLL_USE_POST_DIV) |
451 | min_post_div = max_post_div = pll->post_div; | ||
452 | |||
453 | if (pll->flags & RADEON_PLL_USE_FRAC_FB_DIV) { | ||
449 | min_fractional_feed_div = pll->min_frac_feedback_div; | 454 | min_fractional_feed_div = pll->min_frac_feedback_div; |
450 | max_fractional_feed_div = pll->max_frac_feedback_div; | 455 | max_fractional_feed_div = pll->max_frac_feedback_div; |
451 | } | 456 | } |
452 | 457 | ||
453 | for (post_div = pll->min_post_div; post_div <= pll->max_post_div; ++post_div) { | 458 | for (post_div = min_post_div; post_div <= max_post_div; ++post_div) { |
454 | uint32_t ref_div; | 459 | uint32_t ref_div; |
455 | 460 | ||
456 | if ((flags & RADEON_PLL_NO_ODD_POST_DIV) && (post_div & 1)) | 461 | if ((pll->flags & RADEON_PLL_NO_ODD_POST_DIV) && (post_div & 1)) |
457 | continue; | 462 | continue; |
458 | 463 | ||
459 | /* legacy radeons only have a few post_divs */ | 464 | /* legacy radeons only have a few post_divs */ |
460 | if (flags & RADEON_PLL_LEGACY) { | 465 | if (pll->flags & RADEON_PLL_LEGACY) { |
461 | if ((post_div == 5) || | 466 | if ((post_div == 5) || |
462 | (post_div == 7) || | 467 | (post_div == 7) || |
463 | (post_div == 9) || | 468 | (post_div == 9) || |
@@ -504,7 +509,7 @@ void radeon_compute_pll(struct radeon_pll *pll, | |||
504 | tmp += (uint64_t)pll->reference_freq * 1000 * frac_feedback_div; | 509 | tmp += (uint64_t)pll->reference_freq * 1000 * frac_feedback_div; |
505 | current_freq = radeon_div(tmp, ref_div * post_div); | 510 | current_freq = radeon_div(tmp, ref_div * post_div); |
506 | 511 | ||
507 | if (flags & RADEON_PLL_PREFER_CLOSEST_LOWER) { | 512 | if (pll->flags & RADEON_PLL_PREFER_CLOSEST_LOWER) { |
508 | error = freq - current_freq; | 513 | error = freq - current_freq; |
509 | error = error < 0 ? 0xffffffff : error; | 514 | error = error < 0 ? 0xffffffff : error; |
510 | } else | 515 | } else |
@@ -531,12 +536,12 @@ void radeon_compute_pll(struct radeon_pll *pll, | |||
531 | best_freq = current_freq; | 536 | best_freq = current_freq; |
532 | best_error = error; | 537 | best_error = error; |
533 | best_vco_diff = vco_diff; | 538 | best_vco_diff = vco_diff; |
534 | } else if (((flags & RADEON_PLL_PREFER_LOW_REF_DIV) && (ref_div < best_ref_div)) || | 539 | } else if (((pll->flags & RADEON_PLL_PREFER_LOW_REF_DIV) && (ref_div < best_ref_div)) || |
535 | ((flags & RADEON_PLL_PREFER_HIGH_REF_DIV) && (ref_div > best_ref_div)) || | 540 | ((pll->flags & RADEON_PLL_PREFER_HIGH_REF_DIV) && (ref_div > best_ref_div)) || |
536 | ((flags & RADEON_PLL_PREFER_LOW_FB_DIV) && (feedback_div < best_feedback_div)) || | 541 | ((pll->flags & RADEON_PLL_PREFER_LOW_FB_DIV) && (feedback_div < best_feedback_div)) || |
537 | ((flags & RADEON_PLL_PREFER_HIGH_FB_DIV) && (feedback_div > best_feedback_div)) || | 542 | ((pll->flags & RADEON_PLL_PREFER_HIGH_FB_DIV) && (feedback_div > best_feedback_div)) || |
538 | ((flags & RADEON_PLL_PREFER_LOW_POST_DIV) && (post_div < best_post_div)) || | 543 | ((pll->flags & RADEON_PLL_PREFER_LOW_POST_DIV) && (post_div < best_post_div)) || |
539 | ((flags & RADEON_PLL_PREFER_HIGH_POST_DIV) && (post_div > best_post_div))) { | 544 | ((pll->flags & RADEON_PLL_PREFER_HIGH_POST_DIV) && (post_div > best_post_div))) { |
540 | best_post_div = post_div; | 545 | best_post_div = post_div; |
541 | best_ref_div = ref_div; | 546 | best_ref_div = ref_div; |
542 | best_feedback_div = feedback_div; | 547 | best_feedback_div = feedback_div; |
@@ -572,8 +577,7 @@ void radeon_compute_pll_avivo(struct radeon_pll *pll, | |||
572 | uint32_t *fb_div_p, | 577 | uint32_t *fb_div_p, |
573 | uint32_t *frac_fb_div_p, | 578 | uint32_t *frac_fb_div_p, |
574 | uint32_t *ref_div_p, | 579 | uint32_t *ref_div_p, |
575 | uint32_t *post_div_p, | 580 | uint32_t *post_div_p) |
576 | int flags) | ||
577 | { | 581 | { |
578 | fixed20_12 m, n, frac_n, p, f_vco, f_pclk, best_freq; | 582 | fixed20_12 m, n, frac_n, p, f_vco, f_pclk, best_freq; |
579 | fixed20_12 pll_out_max, pll_out_min; | 583 | fixed20_12 pll_out_max, pll_out_min; |
@@ -667,7 +671,6 @@ static void radeon_user_framebuffer_destroy(struct drm_framebuffer *fb) | |||
667 | radeonfb_remove(dev, fb); | 671 | radeonfb_remove(dev, fb); |
668 | 672 | ||
669 | if (radeon_fb->obj) { | 673 | if (radeon_fb->obj) { |
670 | radeon_gem_object_unpin(radeon_fb->obj); | ||
671 | mutex_lock(&dev->struct_mutex); | 674 | mutex_lock(&dev->struct_mutex); |
672 | drm_gem_object_unreference(radeon_fb->obj); | 675 | drm_gem_object_unreference(radeon_fb->obj); |
673 | mutex_unlock(&dev->struct_mutex); | 676 | mutex_unlock(&dev->struct_mutex); |
@@ -715,7 +718,11 @@ radeon_user_framebuffer_create(struct drm_device *dev, | |||
715 | struct drm_gem_object *obj; | 718 | struct drm_gem_object *obj; |
716 | 719 | ||
717 | obj = drm_gem_object_lookup(dev, file_priv, mode_cmd->handle); | 720 | obj = drm_gem_object_lookup(dev, file_priv, mode_cmd->handle); |
718 | 721 | if (obj == NULL) { | |
722 | dev_err(&dev->pdev->dev, "No GEM object associated to handle 0x%08X, " | ||
723 | "can't create framebuffer\n", mode_cmd->handle); | ||
724 | return NULL; | ||
725 | } | ||
719 | return radeon_framebuffer_create(dev, mode_cmd, obj); | 726 | return radeon_framebuffer_create(dev, mode_cmd, obj); |
720 | } | 727 | } |
721 | 728 | ||
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c index cc27485a07ad..b6d8081e1246 100644 --- a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c +++ b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c | |||
@@ -339,69 +339,6 @@ void radeon_crtc_dpms(struct drm_crtc *crtc, int mode) | |||
339 | } | 339 | } |
340 | } | 340 | } |
341 | 341 | ||
342 | /* properly set crtc bpp when using atombios */ | ||
343 | void radeon_legacy_atom_set_surface(struct drm_crtc *crtc) | ||
344 | { | ||
345 | struct drm_device *dev = crtc->dev; | ||
346 | struct radeon_device *rdev = dev->dev_private; | ||
347 | struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); | ||
348 | int format; | ||
349 | uint32_t crtc_gen_cntl; | ||
350 | uint32_t disp_merge_cntl; | ||
351 | uint32_t crtc_pitch; | ||
352 | |||
353 | switch (crtc->fb->bits_per_pixel) { | ||
354 | case 8: | ||
355 | format = 2; | ||
356 | break; | ||
357 | case 15: /* 555 */ | ||
358 | format = 3; | ||
359 | break; | ||
360 | case 16: /* 565 */ | ||
361 | format = 4; | ||
362 | break; | ||
363 | case 24: /* RGB */ | ||
364 | format = 5; | ||
365 | break; | ||
366 | case 32: /* xRGB */ | ||
367 | format = 6; | ||
368 | break; | ||
369 | default: | ||
370 | return; | ||
371 | } | ||
372 | |||
373 | crtc_pitch = ((((crtc->fb->pitch / (crtc->fb->bits_per_pixel / 8)) * crtc->fb->bits_per_pixel) + | ||
374 | ((crtc->fb->bits_per_pixel * 8) - 1)) / | ||
375 | (crtc->fb->bits_per_pixel * 8)); | ||
376 | crtc_pitch |= crtc_pitch << 16; | ||
377 | |||
378 | WREG32(RADEON_CRTC_PITCH + radeon_crtc->crtc_offset, crtc_pitch); | ||
379 | |||
380 | switch (radeon_crtc->crtc_id) { | ||
381 | case 0: | ||
382 | disp_merge_cntl = RREG32(RADEON_DISP_MERGE_CNTL); | ||
383 | disp_merge_cntl &= ~RADEON_DISP_RGB_OFFSET_EN; | ||
384 | WREG32(RADEON_DISP_MERGE_CNTL, disp_merge_cntl); | ||
385 | |||
386 | crtc_gen_cntl = RREG32(RADEON_CRTC_GEN_CNTL) & 0xfffff0ff; | ||
387 | crtc_gen_cntl |= (format << 8); | ||
388 | crtc_gen_cntl |= RADEON_CRTC_EXT_DISP_EN; | ||
389 | WREG32(RADEON_CRTC_GEN_CNTL, crtc_gen_cntl); | ||
390 | break; | ||
391 | case 1: | ||
392 | disp_merge_cntl = RREG32(RADEON_DISP2_MERGE_CNTL); | ||
393 | disp_merge_cntl &= ~RADEON_DISP2_RGB_OFFSET_EN; | ||
394 | WREG32(RADEON_DISP2_MERGE_CNTL, disp_merge_cntl); | ||
395 | |||
396 | crtc_gen_cntl = RREG32(RADEON_CRTC2_GEN_CNTL) & 0xfffff0ff; | ||
397 | crtc_gen_cntl |= (format << 8); | ||
398 | WREG32(RADEON_CRTC2_GEN_CNTL, crtc_gen_cntl); | ||
399 | WREG32(RADEON_FP_H2_SYNC_STRT_WID, RREG32(RADEON_CRTC2_H_SYNC_STRT_WID)); | ||
400 | WREG32(RADEON_FP_V2_SYNC_STRT_WID, RREG32(RADEON_CRTC2_V_SYNC_STRT_WID)); | ||
401 | break; | ||
402 | } | ||
403 | } | ||
404 | |||
405 | int radeon_crtc_set_base(struct drm_crtc *crtc, int x, int y, | 342 | int radeon_crtc_set_base(struct drm_crtc *crtc, int x, int y, |
406 | struct drm_framebuffer *old_fb) | 343 | struct drm_framebuffer *old_fb) |
407 | { | 344 | { |
@@ -755,7 +692,6 @@ static void radeon_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode) | |||
755 | uint32_t post_divider = 0; | 692 | uint32_t post_divider = 0; |
756 | uint32_t freq = 0; | 693 | uint32_t freq = 0; |
757 | uint8_t pll_gain; | 694 | uint8_t pll_gain; |
758 | int pll_flags = RADEON_PLL_LEGACY; | ||
759 | bool use_bios_divs = false; | 695 | bool use_bios_divs = false; |
760 | /* PLL registers */ | 696 | /* PLL registers */ |
761 | uint32_t pll_ref_div = 0; | 697 | uint32_t pll_ref_div = 0; |
@@ -789,10 +725,12 @@ static void radeon_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode) | |||
789 | else | 725 | else |
790 | pll = &rdev->clock.p1pll; | 726 | pll = &rdev->clock.p1pll; |
791 | 727 | ||
728 | pll->flags = RADEON_PLL_LEGACY; | ||
729 | |||
792 | if (mode->clock > 200000) /* range limits??? */ | 730 | if (mode->clock > 200000) /* range limits??? */ |
793 | pll_flags |= RADEON_PLL_PREFER_HIGH_FB_DIV; | 731 | pll->flags |= RADEON_PLL_PREFER_HIGH_FB_DIV; |
794 | else | 732 | else |
795 | pll_flags |= RADEON_PLL_PREFER_LOW_REF_DIV; | 733 | pll->flags |= RADEON_PLL_PREFER_LOW_REF_DIV; |
796 | 734 | ||
797 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { | 735 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { |
798 | if (encoder->crtc == crtc) { | 736 | if (encoder->crtc == crtc) { |
@@ -804,7 +742,7 @@ static void radeon_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode) | |||
804 | } | 742 | } |
805 | 743 | ||
806 | if (encoder->encoder_type != DRM_MODE_ENCODER_DAC) | 744 | if (encoder->encoder_type != DRM_MODE_ENCODER_DAC) |
807 | pll_flags |= RADEON_PLL_NO_ODD_POST_DIV; | 745 | pll->flags |= RADEON_PLL_NO_ODD_POST_DIV; |
808 | if (encoder->encoder_type == DRM_MODE_ENCODER_LVDS) { | 746 | if (encoder->encoder_type == DRM_MODE_ENCODER_LVDS) { |
809 | if (!rdev->is_atom_bios) { | 747 | if (!rdev->is_atom_bios) { |
810 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); | 748 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); |
@@ -819,7 +757,7 @@ static void radeon_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode) | |||
819 | } | 757 | } |
820 | } | 758 | } |
821 | } | 759 | } |
822 | pll_flags |= RADEON_PLL_USE_REF_DIV; | 760 | pll->flags |= RADEON_PLL_USE_REF_DIV; |
823 | } | 761 | } |
824 | } | 762 | } |
825 | } | 763 | } |
@@ -829,8 +767,7 @@ static void radeon_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode) | |||
829 | if (!use_bios_divs) { | 767 | if (!use_bios_divs) { |
830 | radeon_compute_pll(pll, mode->clock, | 768 | radeon_compute_pll(pll, mode->clock, |
831 | &freq, &feedback_div, &frac_fb_div, | 769 | &freq, &feedback_div, &frac_fb_div, |
832 | &reference_div, &post_divider, | 770 | &reference_div, &post_divider); |
833 | pll_flags); | ||
834 | 771 | ||
835 | for (post_div = &post_divs[0]; post_div->divider; ++post_div) { | 772 | for (post_div = &post_divs[0]; post_div->divider; ++post_div) { |
836 | if (post_div->divider == post_divider) | 773 | if (post_div->divider == post_divider) |
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h index 91cb041cb40d..96b851f92f4c 100644 --- a/drivers/gpu/drm/radeon/radeon_mode.h +++ b/drivers/gpu/drm/radeon/radeon_mode.h | |||
@@ -125,16 +125,24 @@ struct radeon_tmds_pll { | |||
125 | #define RADEON_PLL_PREFER_HIGH_POST_DIV (1 << 9) | 125 | #define RADEON_PLL_PREFER_HIGH_POST_DIV (1 << 9) |
126 | #define RADEON_PLL_USE_FRAC_FB_DIV (1 << 10) | 126 | #define RADEON_PLL_USE_FRAC_FB_DIV (1 << 10) |
127 | #define RADEON_PLL_PREFER_CLOSEST_LOWER (1 << 11) | 127 | #define RADEON_PLL_PREFER_CLOSEST_LOWER (1 << 11) |
128 | #define RADEON_PLL_USE_POST_DIV (1 << 12) | ||
128 | 129 | ||
129 | struct radeon_pll { | 130 | struct radeon_pll { |
130 | uint16_t reference_freq; | 131 | /* reference frequency */ |
131 | uint16_t reference_div; | 132 | uint32_t reference_freq; |
133 | |||
134 | /* fixed dividers */ | ||
135 | uint32_t reference_div; | ||
136 | uint32_t post_div; | ||
137 | |||
138 | /* pll in/out limits */ | ||
132 | uint32_t pll_in_min; | 139 | uint32_t pll_in_min; |
133 | uint32_t pll_in_max; | 140 | uint32_t pll_in_max; |
134 | uint32_t pll_out_min; | 141 | uint32_t pll_out_min; |
135 | uint32_t pll_out_max; | 142 | uint32_t pll_out_max; |
136 | uint16_t xclk; | 143 | uint32_t best_vco; |
137 | 144 | ||
145 | /* divider limits */ | ||
138 | uint32_t min_ref_div; | 146 | uint32_t min_ref_div; |
139 | uint32_t max_ref_div; | 147 | uint32_t max_ref_div; |
140 | uint32_t min_post_div; | 148 | uint32_t min_post_div; |
@@ -143,7 +151,12 @@ struct radeon_pll { | |||
143 | uint32_t max_feedback_div; | 151 | uint32_t max_feedback_div; |
144 | uint32_t min_frac_feedback_div; | 152 | uint32_t min_frac_feedback_div; |
145 | uint32_t max_frac_feedback_div; | 153 | uint32_t max_frac_feedback_div; |
146 | uint32_t best_vco; | 154 | |
155 | /* flags for the current clock */ | ||
156 | uint32_t flags; | ||
157 | |||
158 | /* pll id */ | ||
159 | uint32_t id; | ||
147 | }; | 160 | }; |
148 | 161 | ||
149 | struct radeon_i2c_chan { | 162 | struct radeon_i2c_chan { |
@@ -417,8 +430,7 @@ extern void radeon_compute_pll(struct radeon_pll *pll, | |||
417 | uint32_t *fb_div_p, | 430 | uint32_t *fb_div_p, |
418 | uint32_t *frac_fb_div_p, | 431 | uint32_t *frac_fb_div_p, |
419 | uint32_t *ref_div_p, | 432 | uint32_t *ref_div_p, |
420 | uint32_t *post_div_p, | 433 | uint32_t *post_div_p); |
421 | int flags); | ||
422 | 434 | ||
423 | extern void radeon_compute_pll_avivo(struct radeon_pll *pll, | 435 | extern void radeon_compute_pll_avivo(struct radeon_pll *pll, |
424 | uint64_t freq, | 436 | uint64_t freq, |
@@ -426,8 +438,7 @@ extern void radeon_compute_pll_avivo(struct radeon_pll *pll, | |||
426 | uint32_t *fb_div_p, | 438 | uint32_t *fb_div_p, |
427 | uint32_t *frac_fb_div_p, | 439 | uint32_t *frac_fb_div_p, |
428 | uint32_t *ref_div_p, | 440 | uint32_t *ref_div_p, |
429 | uint32_t *post_div_p, | 441 | uint32_t *post_div_p); |
430 | int flags); | ||
431 | 442 | ||
432 | extern void radeon_setup_encoder_clones(struct drm_device *dev); | 443 | extern void radeon_setup_encoder_clones(struct drm_device *dev); |
433 | 444 | ||
@@ -453,7 +464,6 @@ extern void atombios_crtc_dpms(struct drm_crtc *crtc, int mode); | |||
453 | 464 | ||
454 | extern int radeon_crtc_set_base(struct drm_crtc *crtc, int x, int y, | 465 | extern int radeon_crtc_set_base(struct drm_crtc *crtc, int x, int y, |
455 | struct drm_framebuffer *old_fb); | 466 | struct drm_framebuffer *old_fb); |
456 | extern void radeon_legacy_atom_set_surface(struct drm_crtc *crtc); | ||
457 | 467 | ||
458 | extern int radeon_crtc_cursor_set(struct drm_crtc *crtc, | 468 | extern int radeon_crtc_cursor_set(struct drm_crtc *crtc, |
459 | struct drm_file *file_priv, | 469 | struct drm_file *file_priv, |
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c index 4e636de877b2..d72a71bff218 100644 --- a/drivers/gpu/drm/radeon/radeon_object.c +++ b/drivers/gpu/drm/radeon/radeon_object.c | |||
@@ -220,7 +220,8 @@ int radeon_bo_unpin(struct radeon_bo *bo) | |||
220 | 220 | ||
221 | int radeon_bo_evict_vram(struct radeon_device *rdev) | 221 | int radeon_bo_evict_vram(struct radeon_device *rdev) |
222 | { | 222 | { |
223 | if (rdev->flags & RADEON_IS_IGP) { | 223 | /* late 2.6.33 fix IGP hibernate - we need pm ops to do this correct */ |
224 | if (0 && (rdev->flags & RADEON_IS_IGP)) { | ||
224 | if (rdev->mc.igp_sideport_enabled == false) | 225 | if (rdev->mc.igp_sideport_enabled == false) |
225 | /* Useless to evict on IGP chips */ | 226 | /* Useless to evict on IGP chips */ |
226 | return 0; | 227 | return 0; |
diff --git a/drivers/gpu/drm/radeon/reg_srcs/r200 b/drivers/gpu/drm/radeon/reg_srcs/r200 index 6021c8849a16..c29ac434ac9c 100644 --- a/drivers/gpu/drm/radeon/reg_srcs/r200 +++ b/drivers/gpu/drm/radeon/reg_srcs/r200 | |||
@@ -91,6 +91,8 @@ r200 0x3294 | |||
91 | 0x22b8 SE_TCL_TEX_CYL_WRAP_CTL | 91 | 0x22b8 SE_TCL_TEX_CYL_WRAP_CTL |
92 | 0x22c0 SE_TCL_UCP_VERT_BLEND_CNTL | 92 | 0x22c0 SE_TCL_UCP_VERT_BLEND_CNTL |
93 | 0x22c4 SE_TCL_POINT_SPRITE_CNTL | 93 | 0x22c4 SE_TCL_POINT_SPRITE_CNTL |
94 | 0x22d0 SE_PVS_CNTL | ||
95 | 0x22d4 SE_PVS_CONST_CNTL | ||
94 | 0x2648 RE_POINTSIZE | 96 | 0x2648 RE_POINTSIZE |
95 | 0x26c0 RE_TOP_LEFT | 97 | 0x26c0 RE_TOP_LEFT |
96 | 0x26c4 RE_MISC | 98 | 0x26c4 RE_MISC |
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c index 59c71245fb91..55f6ffc4e58b 100644 --- a/drivers/gpu/drm/radeon/rv770.c +++ b/drivers/gpu/drm/radeon/rv770.c | |||
@@ -779,7 +779,6 @@ int rv770_mc_init(struct radeon_device *rdev) | |||
779 | fixed20_12 a; | 779 | fixed20_12 a; |
780 | u32 tmp; | 780 | u32 tmp; |
781 | int chansize, numchan; | 781 | int chansize, numchan; |
782 | int r; | ||
783 | 782 | ||
784 | /* Get VRAM informations */ | 783 | /* Get VRAM informations */ |
785 | rdev->mc.vram_is_ddr = true; | 784 | rdev->mc.vram_is_ddr = true; |
@@ -822,9 +821,6 @@ int rv770_mc_init(struct radeon_device *rdev) | |||
822 | rdev->mc.real_vram_size = rdev->mc.aper_size; | 821 | rdev->mc.real_vram_size = rdev->mc.aper_size; |
823 | 822 | ||
824 | if (rdev->flags & RADEON_IS_AGP) { | 823 | if (rdev->flags & RADEON_IS_AGP) { |
825 | r = radeon_agp_init(rdev); | ||
826 | if (r) | ||
827 | return r; | ||
828 | /* gtt_size is setup by radeon_agp_init */ | 824 | /* gtt_size is setup by radeon_agp_init */ |
829 | rdev->mc.gtt_location = rdev->mc.agp_base; | 825 | rdev->mc.gtt_location = rdev->mc.agp_base; |
830 | tmp = 0xFFFFFFFFUL - rdev->mc.agp_base - rdev->mc.gtt_size; | 826 | tmp = 0xFFFFFFFFUL - rdev->mc.agp_base - rdev->mc.gtt_size; |
@@ -972,13 +968,16 @@ int rv770_suspend(struct radeon_device *rdev) | |||
972 | /* FIXME: we should wait for ring to be empty */ | 968 | /* FIXME: we should wait for ring to be empty */ |
973 | r700_cp_stop(rdev); | 969 | r700_cp_stop(rdev); |
974 | rdev->cp.ready = false; | 970 | rdev->cp.ready = false; |
971 | r600_irq_suspend(rdev); | ||
975 | r600_wb_disable(rdev); | 972 | r600_wb_disable(rdev); |
976 | rv770_pcie_gart_disable(rdev); | 973 | rv770_pcie_gart_disable(rdev); |
977 | /* unpin shaders bo */ | 974 | /* unpin shaders bo */ |
978 | r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false); | 975 | if (rdev->r600_blit.shader_obj) { |
979 | if (likely(r == 0)) { | 976 | r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false); |
980 | radeon_bo_unpin(rdev->r600_blit.shader_obj); | 977 | if (likely(r == 0)) { |
981 | radeon_bo_unreserve(rdev->r600_blit.shader_obj); | 978 | radeon_bo_unpin(rdev->r600_blit.shader_obj); |
979 | radeon_bo_unreserve(rdev->r600_blit.shader_obj); | ||
980 | } | ||
982 | } | 981 | } |
983 | return 0; | 982 | return 0; |
984 | } | 983 | } |
@@ -1037,6 +1036,11 @@ int rv770_init(struct radeon_device *rdev) | |||
1037 | r = radeon_fence_driver_init(rdev); | 1036 | r = radeon_fence_driver_init(rdev); |
1038 | if (r) | 1037 | if (r) |
1039 | return r; | 1038 | return r; |
1039 | if (rdev->flags & RADEON_IS_AGP) { | ||
1040 | r = radeon_agp_init(rdev); | ||
1041 | if (r) | ||
1042 | radeon_agp_disable(rdev); | ||
1043 | } | ||
1040 | r = rv770_mc_init(rdev); | 1044 | r = rv770_mc_init(rdev); |
1041 | if (r) | 1045 | if (r) |
1042 | return r; | 1046 | return r; |
@@ -1071,13 +1075,14 @@ int rv770_init(struct radeon_device *rdev) | |||
1071 | if (rdev->accel_working) { | 1075 | if (rdev->accel_working) { |
1072 | r = radeon_ib_pool_init(rdev); | 1076 | r = radeon_ib_pool_init(rdev); |
1073 | if (r) { | 1077 | if (r) { |
1074 | DRM_ERROR("radeon: failed initializing IB pool (%d).\n", r); | 1078 | dev_err(rdev->dev, "IB initialization failed (%d).\n", r); |
1075 | rdev->accel_working = false; | ||
1076 | } | ||
1077 | r = r600_ib_test(rdev); | ||
1078 | if (r) { | ||
1079 | DRM_ERROR("radeon: failed testing IB (%d).\n", r); | ||
1080 | rdev->accel_working = false; | 1079 | rdev->accel_working = false; |
1080 | } else { | ||
1081 | r = r600_ib_test(rdev); | ||
1082 | if (r) { | ||
1083 | dev_err(rdev->dev, "IB test failed (%d).\n", r); | ||
1084 | rdev->accel_working = false; | ||
1085 | } | ||
1081 | } | 1086 | } |
1082 | } | 1087 | } |
1083 | return 0; | 1088 | return 0; |
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index 2920f9a279e1..1a3e909b7bba 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c | |||
@@ -426,7 +426,8 @@ moved: | |||
426 | bdev->man[bo->mem.mem_type].gpu_offset; | 426 | bdev->man[bo->mem.mem_type].gpu_offset; |
427 | bo->cur_placement = bo->mem.placement; | 427 | bo->cur_placement = bo->mem.placement; |
428 | spin_unlock(&bo->lock); | 428 | spin_unlock(&bo->lock); |
429 | } | 429 | } else |
430 | bo->offset = 0; | ||
430 | 431 | ||
431 | return 0; | 432 | return 0; |
432 | 433 | ||
@@ -523,52 +524,44 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, bool remove_all) | |||
523 | static int ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all) | 524 | static int ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all) |
524 | { | 525 | { |
525 | struct ttm_bo_global *glob = bdev->glob; | 526 | struct ttm_bo_global *glob = bdev->glob; |
526 | struct ttm_buffer_object *entry, *nentry; | 527 | struct ttm_buffer_object *entry = NULL; |
527 | struct list_head *list, *next; | 528 | int ret = 0; |
528 | int ret; | ||
529 | 529 | ||
530 | spin_lock(&glob->lru_lock); | 530 | spin_lock(&glob->lru_lock); |
531 | list_for_each_safe(list, next, &bdev->ddestroy) { | 531 | if (list_empty(&bdev->ddestroy)) |
532 | entry = list_entry(list, struct ttm_buffer_object, ddestroy); | 532 | goto out_unlock; |
533 | nentry = NULL; | ||
534 | 533 | ||
535 | /* | 534 | entry = list_first_entry(&bdev->ddestroy, |
536 | * Protect the next list entry from destruction while we | 535 | struct ttm_buffer_object, ddestroy); |
537 | * unlock the lru_lock. | 536 | kref_get(&entry->list_kref); |
538 | */ | ||
539 | 537 | ||
540 | if (next != &bdev->ddestroy) { | 538 | for (;;) { |
541 | nentry = list_entry(next, struct ttm_buffer_object, | 539 | struct ttm_buffer_object *nentry = NULL; |
542 | ddestroy); | 540 | |
541 | if (entry->ddestroy.next != &bdev->ddestroy) { | ||
542 | nentry = list_first_entry(&entry->ddestroy, | ||
543 | struct ttm_buffer_object, ddestroy); | ||
543 | kref_get(&nentry->list_kref); | 544 | kref_get(&nentry->list_kref); |
544 | } | 545 | } |
545 | kref_get(&entry->list_kref); | ||
546 | 546 | ||
547 | spin_unlock(&glob->lru_lock); | 547 | spin_unlock(&glob->lru_lock); |
548 | ret = ttm_bo_cleanup_refs(entry, remove_all); | 548 | ret = ttm_bo_cleanup_refs(entry, remove_all); |
549 | kref_put(&entry->list_kref, ttm_bo_release_list); | 549 | kref_put(&entry->list_kref, ttm_bo_release_list); |
550 | entry = nentry; | ||
551 | |||
552 | if (ret || !entry) | ||
553 | goto out; | ||
550 | 554 | ||
551 | spin_lock(&glob->lru_lock); | 555 | spin_lock(&glob->lru_lock); |
552 | if (nentry) { | 556 | if (list_empty(&entry->ddestroy)) |
553 | bool next_onlist = !list_empty(next); | ||
554 | spin_unlock(&glob->lru_lock); | ||
555 | kref_put(&nentry->list_kref, ttm_bo_release_list); | ||
556 | spin_lock(&glob->lru_lock); | ||
557 | /* | ||
558 | * Someone might have raced us and removed the | ||
559 | * next entry from the list. We don't bother restarting | ||
560 | * list traversal. | ||
561 | */ | ||
562 | |||
563 | if (!next_onlist) | ||
564 | break; | ||
565 | } | ||
566 | if (ret) | ||
567 | break; | 557 | break; |
568 | } | 558 | } |
569 | ret = !list_empty(&bdev->ddestroy); | ||
570 | spin_unlock(&glob->lru_lock); | ||
571 | 559 | ||
560 | out_unlock: | ||
561 | spin_unlock(&glob->lru_lock); | ||
562 | out: | ||
563 | if (entry) | ||
564 | kref_put(&entry->list_kref, ttm_bo_release_list); | ||
572 | return ret; | 565 | return ret; |
573 | } | 566 | } |
574 | 567 | ||
@@ -950,6 +943,14 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo, | |||
950 | ttm_flag_masked(&cur_flags, placement->busy_placement[i], | 943 | ttm_flag_masked(&cur_flags, placement->busy_placement[i], |
951 | ~TTM_PL_MASK_MEMTYPE); | 944 | ~TTM_PL_MASK_MEMTYPE); |
952 | 945 | ||
946 | |||
947 | if (mem_type == TTM_PL_SYSTEM) { | ||
948 | mem->mem_type = mem_type; | ||
949 | mem->placement = cur_flags; | ||
950 | mem->mm_node = NULL; | ||
951 | return 0; | ||
952 | } | ||
953 | |||
953 | ret = ttm_bo_mem_force_space(bo, mem_type, placement, mem, | 954 | ret = ttm_bo_mem_force_space(bo, mem_type, placement, mem, |
954 | interruptible, no_wait); | 955 | interruptible, no_wait); |
955 | if (ret == 0 && mem->mm_node) { | 956 | if (ret == 0 && mem->mm_node) { |
@@ -1844,6 +1845,9 @@ static int ttm_bo_swapout(struct ttm_mem_shrink *shrink) | |||
1844 | * anyone tries to access a ttm page. | 1845 | * anyone tries to access a ttm page. |
1845 | */ | 1846 | */ |
1846 | 1847 | ||
1848 | if (bo->bdev->driver->swap_notify) | ||
1849 | bo->bdev->driver->swap_notify(bo); | ||
1850 | |||
1847 | ret = ttm_tt_swapout(bo->ttm, bo->persistant_swap_storage); | 1851 | ret = ttm_tt_swapout(bo->ttm, bo->persistant_swap_storage); |
1848 | out: | 1852 | out: |
1849 | 1853 | ||
@@ -1864,3 +1868,4 @@ void ttm_bo_swapout_all(struct ttm_bo_device *bdev) | |||
1864 | while (ttm_bo_swapout(&bdev->glob->shrink) == 0) | 1868 | while (ttm_bo_swapout(&bdev->glob->shrink) == 0) |
1865 | ; | 1869 | ; |
1866 | } | 1870 | } |
1871 | EXPORT_SYMBOL(ttm_bo_swapout_all); | ||
diff --git a/drivers/gpu/drm/ttm/ttm_lock.c b/drivers/gpu/drm/ttm/ttm_lock.c index f619ebcaa4ec..3d172ef04ee1 100644 --- a/drivers/gpu/drm/ttm/ttm_lock.c +++ b/drivers/gpu/drm/ttm/ttm_lock.c | |||
@@ -288,6 +288,7 @@ void ttm_suspend_unlock(struct ttm_lock *lock) | |||
288 | wake_up_all(&lock->queue); | 288 | wake_up_all(&lock->queue); |
289 | spin_unlock(&lock->lock); | 289 | spin_unlock(&lock->lock); |
290 | } | 290 | } |
291 | EXPORT_SYMBOL(ttm_suspend_unlock); | ||
291 | 292 | ||
292 | static bool __ttm_suspend_lock(struct ttm_lock *lock) | 293 | static bool __ttm_suspend_lock(struct ttm_lock *lock) |
293 | { | 294 | { |
@@ -309,3 +310,4 @@ void ttm_suspend_lock(struct ttm_lock *lock) | |||
309 | { | 310 | { |
310 | wait_event(lock->queue, __ttm_suspend_lock(lock)); | 311 | wait_event(lock->queue, __ttm_suspend_lock(lock)); |
311 | } | 312 | } |
313 | EXPORT_SYMBOL(ttm_suspend_lock); | ||
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c index d6f2d2b882e9..825ebe3d89d5 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c | |||
@@ -48,6 +48,15 @@ struct ttm_placement vmw_vram_placement = { | |||
48 | .busy_placement = &vram_placement_flags | 48 | .busy_placement = &vram_placement_flags |
49 | }; | 49 | }; |
50 | 50 | ||
51 | struct ttm_placement vmw_vram_sys_placement = { | ||
52 | .fpfn = 0, | ||
53 | .lpfn = 0, | ||
54 | .num_placement = 1, | ||
55 | .placement = &vram_placement_flags, | ||
56 | .num_busy_placement = 1, | ||
57 | .busy_placement = &sys_placement_flags | ||
58 | }; | ||
59 | |||
51 | struct ttm_placement vmw_vram_ne_placement = { | 60 | struct ttm_placement vmw_vram_ne_placement = { |
52 | .fpfn = 0, | 61 | .fpfn = 0, |
53 | .lpfn = 0, | 62 | .lpfn = 0, |
@@ -172,6 +181,18 @@ static int vmw_verify_access(struct ttm_buffer_object *bo, struct file *filp) | |||
172 | return 0; | 181 | return 0; |
173 | } | 182 | } |
174 | 183 | ||
184 | static void vmw_move_notify(struct ttm_buffer_object *bo, | ||
185 | struct ttm_mem_reg *new_mem) | ||
186 | { | ||
187 | if (new_mem->mem_type != TTM_PL_SYSTEM) | ||
188 | vmw_dmabuf_gmr_unbind(bo); | ||
189 | } | ||
190 | |||
191 | static void vmw_swap_notify(struct ttm_buffer_object *bo) | ||
192 | { | ||
193 | vmw_dmabuf_gmr_unbind(bo); | ||
194 | } | ||
195 | |||
175 | /** | 196 | /** |
176 | * FIXME: We're using the old vmware polling method to sync. | 197 | * FIXME: We're using the old vmware polling method to sync. |
177 | * Do this with fences instead. | 198 | * Do this with fences instead. |
@@ -225,5 +246,7 @@ struct ttm_bo_driver vmw_bo_driver = { | |||
225 | .sync_obj_wait = vmw_sync_obj_wait, | 246 | .sync_obj_wait = vmw_sync_obj_wait, |
226 | .sync_obj_flush = vmw_sync_obj_flush, | 247 | .sync_obj_flush = vmw_sync_obj_flush, |
227 | .sync_obj_unref = vmw_sync_obj_unref, | 248 | .sync_obj_unref = vmw_sync_obj_unref, |
228 | .sync_obj_ref = vmw_sync_obj_ref | 249 | .sync_obj_ref = vmw_sync_obj_ref, |
250 | .move_notify = vmw_move_notify, | ||
251 | .swap_notify = vmw_swap_notify | ||
229 | }; | 252 | }; |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c index 1db1ef30be2b..dedd121d8fe7 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c | |||
@@ -147,6 +147,8 @@ static char *vmw_devname = "vmwgfx"; | |||
147 | 147 | ||
148 | static int vmw_probe(struct pci_dev *, const struct pci_device_id *); | 148 | static int vmw_probe(struct pci_dev *, const struct pci_device_id *); |
149 | static void vmw_master_init(struct vmw_master *); | 149 | static void vmw_master_init(struct vmw_master *); |
150 | static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val, | ||
151 | void *ptr); | ||
150 | 152 | ||
151 | static void vmw_print_capabilities(uint32_t capabilities) | 153 | static void vmw_print_capabilities(uint32_t capabilities) |
152 | { | 154 | { |
@@ -217,6 +219,7 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) | |||
217 | 219 | ||
218 | dev_priv->dev = dev; | 220 | dev_priv->dev = dev; |
219 | dev_priv->vmw_chipset = chipset; | 221 | dev_priv->vmw_chipset = chipset; |
222 | dev_priv->last_read_sequence = (uint32_t) -100; | ||
220 | mutex_init(&dev_priv->hw_mutex); | 223 | mutex_init(&dev_priv->hw_mutex); |
221 | mutex_init(&dev_priv->cmdbuf_mutex); | 224 | mutex_init(&dev_priv->cmdbuf_mutex); |
222 | rwlock_init(&dev_priv->resource_lock); | 225 | rwlock_init(&dev_priv->resource_lock); |
@@ -351,6 +354,9 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) | |||
351 | vmw_fb_init(dev_priv); | 354 | vmw_fb_init(dev_priv); |
352 | } | 355 | } |
353 | 356 | ||
357 | dev_priv->pm_nb.notifier_call = vmwgfx_pm_notifier; | ||
358 | register_pm_notifier(&dev_priv->pm_nb); | ||
359 | |||
354 | return 0; | 360 | return 0; |
355 | 361 | ||
356 | out_no_device: | 362 | out_no_device: |
@@ -385,6 +391,8 @@ static int vmw_driver_unload(struct drm_device *dev) | |||
385 | 391 | ||
386 | DRM_INFO(VMWGFX_DRIVER_NAME " unload.\n"); | 392 | DRM_INFO(VMWGFX_DRIVER_NAME " unload.\n"); |
387 | 393 | ||
394 | unregister_pm_notifier(&dev_priv->pm_nb); | ||
395 | |||
388 | if (!dev_priv->stealth) { | 396 | if (!dev_priv->stealth) { |
389 | vmw_fb_close(dev_priv); | 397 | vmw_fb_close(dev_priv); |
390 | vmw_kms_close(dev_priv); | 398 | vmw_kms_close(dev_priv); |
@@ -650,6 +658,57 @@ static void vmw_remove(struct pci_dev *pdev) | |||
650 | drm_put_dev(dev); | 658 | drm_put_dev(dev); |
651 | } | 659 | } |
652 | 660 | ||
661 | static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val, | ||
662 | void *ptr) | ||
663 | { | ||
664 | struct vmw_private *dev_priv = | ||
665 | container_of(nb, struct vmw_private, pm_nb); | ||
666 | struct vmw_master *vmaster = dev_priv->active_master; | ||
667 | |||
668 | switch (val) { | ||
669 | case PM_HIBERNATION_PREPARE: | ||
670 | case PM_SUSPEND_PREPARE: | ||
671 | ttm_suspend_lock(&vmaster->lock); | ||
672 | |||
673 | /** | ||
674 | * This empties VRAM and unbinds all GMR bindings. | ||
675 | * Buffer contents is moved to swappable memory. | ||
676 | */ | ||
677 | ttm_bo_swapout_all(&dev_priv->bdev); | ||
678 | break; | ||
679 | case PM_POST_HIBERNATION: | ||
680 | case PM_POST_SUSPEND: | ||
681 | ttm_suspend_unlock(&vmaster->lock); | ||
682 | break; | ||
683 | case PM_RESTORE_PREPARE: | ||
684 | break; | ||
685 | case PM_POST_RESTORE: | ||
686 | break; | ||
687 | default: | ||
688 | break; | ||
689 | } | ||
690 | return 0; | ||
691 | } | ||
692 | |||
693 | /** | ||
694 | * These might not be needed with the virtual SVGA device. | ||
695 | */ | ||
696 | |||
697 | int vmw_pci_suspend(struct pci_dev *pdev, pm_message_t state) | ||
698 | { | ||
699 | pci_save_state(pdev); | ||
700 | pci_disable_device(pdev); | ||
701 | pci_set_power_state(pdev, PCI_D3hot); | ||
702 | return 0; | ||
703 | } | ||
704 | |||
705 | int vmw_pci_resume(struct pci_dev *pdev) | ||
706 | { | ||
707 | pci_set_power_state(pdev, PCI_D0); | ||
708 | pci_restore_state(pdev); | ||
709 | return pci_enable_device(pdev); | ||
710 | } | ||
711 | |||
653 | static struct drm_driver driver = { | 712 | static struct drm_driver driver = { |
654 | .driver_features = DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | | 713 | .driver_features = DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | |
655 | DRIVER_MODESET, | 714 | DRIVER_MODESET, |
@@ -689,7 +748,9 @@ static struct drm_driver driver = { | |||
689 | .name = VMWGFX_DRIVER_NAME, | 748 | .name = VMWGFX_DRIVER_NAME, |
690 | .id_table = vmw_pci_id_list, | 749 | .id_table = vmw_pci_id_list, |
691 | .probe = vmw_probe, | 750 | .probe = vmw_probe, |
692 | .remove = vmw_remove | 751 | .remove = vmw_remove, |
752 | .suspend = vmw_pci_suspend, | ||
753 | .resume = vmw_pci_resume | ||
693 | }, | 754 | }, |
694 | .name = VMWGFX_DRIVER_NAME, | 755 | .name = VMWGFX_DRIVER_NAME, |
695 | .desc = VMWGFX_DRIVER_DESC, | 756 | .desc = VMWGFX_DRIVER_DESC, |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h index e61bd85b6975..50529a7f06fb 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h | |||
@@ -32,6 +32,7 @@ | |||
32 | #include "drmP.h" | 32 | #include "drmP.h" |
33 | #include "vmwgfx_drm.h" | 33 | #include "vmwgfx_drm.h" |
34 | #include "drm_hashtab.h" | 34 | #include "drm_hashtab.h" |
35 | #include "linux/suspend.h" | ||
35 | #include "ttm/ttm_bo_driver.h" | 36 | #include "ttm/ttm_bo_driver.h" |
36 | #include "ttm/ttm_object.h" | 37 | #include "ttm/ttm_object.h" |
37 | #include "ttm/ttm_lock.h" | 38 | #include "ttm/ttm_lock.h" |
@@ -258,6 +259,7 @@ struct vmw_private { | |||
258 | 259 | ||
259 | struct vmw_master *active_master; | 260 | struct vmw_master *active_master; |
260 | struct vmw_master fbdev_master; | 261 | struct vmw_master fbdev_master; |
262 | struct notifier_block pm_nb; | ||
261 | }; | 263 | }; |
262 | 264 | ||
263 | static inline struct vmw_private *vmw_priv(struct drm_device *dev) | 265 | static inline struct vmw_private *vmw_priv(struct drm_device *dev) |
@@ -353,6 +355,7 @@ extern int vmw_dmabuf_to_start_of_vram(struct vmw_private *vmw_priv, | |||
353 | struct vmw_dma_buffer *bo); | 355 | struct vmw_dma_buffer *bo); |
354 | extern int vmw_dmabuf_from_vram(struct vmw_private *vmw_priv, | 356 | extern int vmw_dmabuf_from_vram(struct vmw_private *vmw_priv, |
355 | struct vmw_dma_buffer *bo); | 357 | struct vmw_dma_buffer *bo); |
358 | extern void vmw_dmabuf_gmr_unbind(struct ttm_buffer_object *bo); | ||
356 | extern int vmw_stream_claim_ioctl(struct drm_device *dev, void *data, | 359 | extern int vmw_stream_claim_ioctl(struct drm_device *dev, void *data, |
357 | struct drm_file *file_priv); | 360 | struct drm_file *file_priv); |
358 | extern int vmw_stream_unref_ioctl(struct drm_device *dev, void *data, | 361 | extern int vmw_stream_unref_ioctl(struct drm_device *dev, void *data, |
@@ -401,6 +404,7 @@ extern int vmw_mmap(struct file *filp, struct vm_area_struct *vma); | |||
401 | 404 | ||
402 | extern struct ttm_placement vmw_vram_placement; | 405 | extern struct ttm_placement vmw_vram_placement; |
403 | extern struct ttm_placement vmw_vram_ne_placement; | 406 | extern struct ttm_placement vmw_vram_ne_placement; |
407 | extern struct ttm_placement vmw_vram_sys_placement; | ||
404 | extern struct ttm_placement vmw_sys_placement; | 408 | extern struct ttm_placement vmw_sys_placement; |
405 | extern struct ttm_bo_driver vmw_bo_driver; | 409 | extern struct ttm_bo_driver vmw_bo_driver; |
406 | extern int vmw_dma_quiescent(struct drm_device *dev); | 410 | extern int vmw_dma_quiescent(struct drm_device *dev); |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c index 2e92da567403..d69caf92ffe7 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c | |||
@@ -490,10 +490,29 @@ static int vmw_validate_single_buffer(struct vmw_private *dev_priv, | |||
490 | if (vmw_dmabuf_gmr(bo) != SVGA_GMR_NULL) | 490 | if (vmw_dmabuf_gmr(bo) != SVGA_GMR_NULL) |
491 | return 0; | 491 | return 0; |
492 | 492 | ||
493 | /** | ||
494 | * Put BO in VRAM, only if there is space. | ||
495 | */ | ||
496 | |||
497 | ret = ttm_bo_validate(bo, &vmw_vram_sys_placement, true, false); | ||
498 | if (unlikely(ret == -ERESTARTSYS)) | ||
499 | return ret; | ||
500 | |||
501 | /** | ||
502 | * Otherwise, set it up as GMR. | ||
503 | */ | ||
504 | |||
505 | if (vmw_dmabuf_gmr(bo) != SVGA_GMR_NULL) | ||
506 | return 0; | ||
507 | |||
493 | ret = vmw_gmr_bind(dev_priv, bo); | 508 | ret = vmw_gmr_bind(dev_priv, bo); |
494 | if (likely(ret == 0 || ret == -ERESTARTSYS)) | 509 | if (likely(ret == 0 || ret == -ERESTARTSYS)) |
495 | return ret; | 510 | return ret; |
496 | 511 | ||
512 | /** | ||
513 | * If that failed, try VRAM again, this time evicting | ||
514 | * previous contents. | ||
515 | */ | ||
497 | 516 | ||
498 | ret = ttm_bo_validate(bo, &vmw_vram_placement, true, false); | 517 | ret = ttm_bo_validate(bo, &vmw_vram_placement, true, false); |
499 | return ret; | 518 | return ret; |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c index 641dde76ada1..4f4f6432be8b 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c | |||
@@ -649,14 +649,6 @@ int vmw_dmabuf_to_start_of_vram(struct vmw_private *vmw_priv, | |||
649 | if (unlikely(ret != 0)) | 649 | if (unlikely(ret != 0)) |
650 | goto err_unlock; | 650 | goto err_unlock; |
651 | 651 | ||
652 | if (vmw_bo->gmr_bound) { | ||
653 | vmw_gmr_unbind(vmw_priv, vmw_bo->gmr_id); | ||
654 | spin_lock(&bo->glob->lru_lock); | ||
655 | ida_remove(&vmw_priv->gmr_ida, vmw_bo->gmr_id); | ||
656 | spin_unlock(&bo->glob->lru_lock); | ||
657 | vmw_bo->gmr_bound = NULL; | ||
658 | } | ||
659 | |||
660 | ret = ttm_bo_validate(bo, &ne_placement, false, false); | 652 | ret = ttm_bo_validate(bo, &ne_placement, false, false); |
661 | ttm_bo_unreserve(bo); | 653 | ttm_bo_unreserve(bo); |
662 | err_unlock: | 654 | err_unlock: |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c index 01feb48af333..f7d5f70b52dd 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c | |||
@@ -98,8 +98,7 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo) | |||
98 | (unsigned int) min, | 98 | (unsigned int) min, |
99 | (unsigned int) fifo->capabilities); | 99 | (unsigned int) fifo->capabilities); |
100 | 100 | ||
101 | dev_priv->fence_seq = (uint32_t) -100; | 101 | dev_priv->fence_seq = dev_priv->last_read_sequence; |
102 | dev_priv->last_read_sequence = (uint32_t) -100; | ||
103 | iowrite32(dev_priv->last_read_sequence, fifo_mem + SVGA_FIFO_FENCE); | 102 | iowrite32(dev_priv->last_read_sequence, fifo_mem + SVGA_FIFO_FENCE); |
104 | 103 | ||
105 | return vmw_fifo_send_fence(dev_priv, &dummy); | 104 | return vmw_fifo_send_fence(dev_priv, &dummy); |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c index b1af76e371c3..686692de209a 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c | |||
@@ -553,9 +553,7 @@ int vmw_framebuffer_dmabuf_dirty(struct drm_framebuffer *framebuffer, | |||
553 | } *cmd; | 553 | } *cmd; |
554 | int i, increment = 1; | 554 | int i, increment = 1; |
555 | 555 | ||
556 | if (!num_clips || | 556 | if (!num_clips) { |
557 | !(dev_priv->fifo.capabilities & | ||
558 | SVGA_FIFO_CAP_SCREEN_OBJECT)) { | ||
559 | num_clips = 1; | 557 | num_clips = 1; |
560 | clips = &norect; | 558 | clips = &norect; |
561 | norect.x1 = norect.y1 = 0; | 559 | norect.x1 = norect.y1 = 0; |
@@ -574,10 +572,10 @@ int vmw_framebuffer_dmabuf_dirty(struct drm_framebuffer *framebuffer, | |||
574 | 572 | ||
575 | for (i = 0; i < num_clips; i++, clips += increment) { | 573 | for (i = 0; i < num_clips; i++, clips += increment) { |
576 | cmd[i].header = cpu_to_le32(SVGA_CMD_UPDATE); | 574 | cmd[i].header = cpu_to_le32(SVGA_CMD_UPDATE); |
577 | cmd[i].body.x = cpu_to_le32(clips[i].x1); | 575 | cmd[i].body.x = cpu_to_le32(clips->x1); |
578 | cmd[i].body.y = cpu_to_le32(clips[i].y1); | 576 | cmd[i].body.y = cpu_to_le32(clips->y1); |
579 | cmd[i].body.width = cpu_to_le32(clips[i].x2 - clips[i].x1); | 577 | cmd[i].body.width = cpu_to_le32(clips->x2 - clips->x1); |
580 | cmd[i].body.height = cpu_to_le32(clips[i].y2 - clips[i].y1); | 578 | cmd[i].body.height = cpu_to_le32(clips->y2 - clips->y1); |
581 | } | 579 | } |
582 | 580 | ||
583 | vmw_fifo_commit(dev_priv, sizeof(*cmd) * num_clips); | 581 | vmw_fifo_commit(dev_priv, sizeof(*cmd) * num_clips); |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c index bb6e6a096d25..5b6eabeb7f51 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c | |||
@@ -104,7 +104,6 @@ static int vmw_dmabuf_pin_in_vram(struct vmw_private *dev_priv, | |||
104 | bool pin, bool interruptible) | 104 | bool pin, bool interruptible) |
105 | { | 105 | { |
106 | struct ttm_buffer_object *bo = &buf->base; | 106 | struct ttm_buffer_object *bo = &buf->base; |
107 | struct ttm_bo_global *glob = bo->glob; | ||
108 | struct ttm_placement *overlay_placement = &vmw_vram_placement; | 107 | struct ttm_placement *overlay_placement = &vmw_vram_placement; |
109 | int ret; | 108 | int ret; |
110 | 109 | ||
@@ -116,14 +115,6 @@ static int vmw_dmabuf_pin_in_vram(struct vmw_private *dev_priv, | |||
116 | if (unlikely(ret != 0)) | 115 | if (unlikely(ret != 0)) |
117 | goto err; | 116 | goto err; |
118 | 117 | ||
119 | if (buf->gmr_bound) { | ||
120 | vmw_gmr_unbind(dev_priv, buf->gmr_id); | ||
121 | spin_lock(&glob->lru_lock); | ||
122 | ida_remove(&dev_priv->gmr_ida, buf->gmr_id); | ||
123 | spin_unlock(&glob->lru_lock); | ||
124 | buf->gmr_bound = NULL; | ||
125 | } | ||
126 | |||
127 | if (pin) | 118 | if (pin) |
128 | overlay_placement = &vmw_vram_ne_placement; | 119 | overlay_placement = &vmw_vram_ne_placement; |
129 | 120 | ||
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c index c012d5927f65..e01db120efff 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c | |||
@@ -599,6 +599,27 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data, | |||
599 | if (unlikely(ret != 0)) | 599 | if (unlikely(ret != 0)) |
600 | goto out_err1; | 600 | goto out_err1; |
601 | 601 | ||
602 | |||
603 | if (srf->flags & (1 << 9) && | ||
604 | srf->num_sizes == 1 && | ||
605 | srf->sizes[0].width == 64 && | ||
606 | srf->sizes[0].height == 64 && | ||
607 | srf->format == SVGA3D_A8R8G8B8) { | ||
608 | |||
609 | srf->snooper.image = kmalloc(64 * 64 * 4, GFP_KERNEL); | ||
610 | /* clear the image */ | ||
611 | if (srf->snooper.image) { | ||
612 | memset(srf->snooper.image, 0x00, 64 * 64 * 4); | ||
613 | } else { | ||
614 | DRM_ERROR("Failed to allocate cursor_image\n"); | ||
615 | ret = -ENOMEM; | ||
616 | goto out_err1; | ||
617 | } | ||
618 | } else { | ||
619 | srf->snooper.image = NULL; | ||
620 | } | ||
621 | srf->snooper.crtc = NULL; | ||
622 | |||
602 | user_srf->base.shareable = false; | 623 | user_srf->base.shareable = false; |
603 | user_srf->base.tfile = NULL; | 624 | user_srf->base.tfile = NULL; |
604 | 625 | ||
@@ -622,24 +643,6 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data, | |||
622 | return ret; | 643 | return ret; |
623 | } | 644 | } |
624 | 645 | ||
625 | if (srf->flags & (1 << 9) && | ||
626 | srf->num_sizes == 1 && | ||
627 | srf->sizes[0].width == 64 && | ||
628 | srf->sizes[0].height == 64 && | ||
629 | srf->format == SVGA3D_A8R8G8B8) { | ||
630 | |||
631 | srf->snooper.image = kmalloc(64 * 64 * 4, GFP_KERNEL); | ||
632 | /* clear the image */ | ||
633 | if (srf->snooper.image) | ||
634 | memset(srf->snooper.image, 0x00, 64 * 64 * 4); | ||
635 | else | ||
636 | DRM_ERROR("Failed to allocate cursor_image\n"); | ||
637 | |||
638 | } else { | ||
639 | srf->snooper.image = NULL; | ||
640 | } | ||
641 | srf->snooper.crtc = NULL; | ||
642 | |||
643 | rep->sid = user_srf->base.hash.key; | 646 | rep->sid = user_srf->base.hash.key; |
644 | if (rep->sid == SVGA3D_INVALID_ID) | 647 | if (rep->sid == SVGA3D_INVALID_ID) |
645 | DRM_ERROR("Created bad Surface ID.\n"); | 648 | DRM_ERROR("Created bad Surface ID.\n"); |
@@ -754,20 +757,29 @@ static size_t vmw_dmabuf_acc_size(struct ttm_bo_global *glob, | |||
754 | return bo_user_size + page_array_size; | 757 | return bo_user_size + page_array_size; |
755 | } | 758 | } |
756 | 759 | ||
757 | void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo) | 760 | void vmw_dmabuf_gmr_unbind(struct ttm_buffer_object *bo) |
758 | { | 761 | { |
759 | struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo); | 762 | struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo); |
760 | struct ttm_bo_global *glob = bo->glob; | 763 | struct ttm_bo_global *glob = bo->glob; |
761 | struct vmw_private *dev_priv = | 764 | struct vmw_private *dev_priv = |
762 | container_of(bo->bdev, struct vmw_private, bdev); | 765 | container_of(bo->bdev, struct vmw_private, bdev); |
763 | 766 | ||
764 | ttm_mem_global_free(glob->mem_glob, bo->acc_size); | ||
765 | if (vmw_bo->gmr_bound) { | 767 | if (vmw_bo->gmr_bound) { |
766 | vmw_gmr_unbind(dev_priv, vmw_bo->gmr_id); | 768 | vmw_gmr_unbind(dev_priv, vmw_bo->gmr_id); |
767 | spin_lock(&glob->lru_lock); | 769 | spin_lock(&glob->lru_lock); |
768 | ida_remove(&dev_priv->gmr_ida, vmw_bo->gmr_id); | 770 | ida_remove(&dev_priv->gmr_ida, vmw_bo->gmr_id); |
769 | spin_unlock(&glob->lru_lock); | 771 | spin_unlock(&glob->lru_lock); |
772 | vmw_bo->gmr_bound = false; | ||
770 | } | 773 | } |
774 | } | ||
775 | |||
776 | void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo) | ||
777 | { | ||
778 | struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo); | ||
779 | struct ttm_bo_global *glob = bo->glob; | ||
780 | |||
781 | vmw_dmabuf_gmr_unbind(bo); | ||
782 | ttm_mem_global_free(glob->mem_glob, bo->acc_size); | ||
771 | kfree(vmw_bo); | 783 | kfree(vmw_bo); |
772 | } | 784 | } |
773 | 785 | ||
@@ -813,18 +825,10 @@ int vmw_dmabuf_init(struct vmw_private *dev_priv, | |||
813 | static void vmw_user_dmabuf_destroy(struct ttm_buffer_object *bo) | 825 | static void vmw_user_dmabuf_destroy(struct ttm_buffer_object *bo) |
814 | { | 826 | { |
815 | struct vmw_user_dma_buffer *vmw_user_bo = vmw_user_dma_buffer(bo); | 827 | struct vmw_user_dma_buffer *vmw_user_bo = vmw_user_dma_buffer(bo); |
816 | struct vmw_dma_buffer *vmw_bo = &vmw_user_bo->dma; | ||
817 | struct ttm_bo_global *glob = bo->glob; | 828 | struct ttm_bo_global *glob = bo->glob; |
818 | struct vmw_private *dev_priv = | ||
819 | container_of(bo->bdev, struct vmw_private, bdev); | ||
820 | 829 | ||
830 | vmw_dmabuf_gmr_unbind(bo); | ||
821 | ttm_mem_global_free(glob->mem_glob, bo->acc_size); | 831 | ttm_mem_global_free(glob->mem_glob, bo->acc_size); |
822 | if (vmw_bo->gmr_bound) { | ||
823 | vmw_gmr_unbind(dev_priv, vmw_bo->gmr_id); | ||
824 | spin_lock(&glob->lru_lock); | ||
825 | ida_remove(&dev_priv->gmr_ida, vmw_bo->gmr_id); | ||
826 | spin_unlock(&glob->lru_lock); | ||
827 | } | ||
828 | kfree(vmw_user_bo); | 832 | kfree(vmw_user_bo); |
829 | } | 833 | } |
830 | 834 | ||
@@ -868,7 +872,7 @@ int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data, | |||
868 | } | 872 | } |
869 | 873 | ||
870 | ret = vmw_dmabuf_init(dev_priv, &vmw_user_bo->dma, req->size, | 874 | ret = vmw_dmabuf_init(dev_priv, &vmw_user_bo->dma, req->size, |
871 | &vmw_vram_placement, true, | 875 | &vmw_vram_sys_placement, true, |
872 | &vmw_user_dmabuf_destroy); | 876 | &vmw_user_dmabuf_destroy); |
873 | if (unlikely(ret != 0)) | 877 | if (unlikely(ret != 0)) |
874 | return ret; | 878 | return ret; |
diff --git a/drivers/hwmon/amc6821.c b/drivers/hwmon/amc6821.c index 1c89d922d619..fa9708c2d723 100644 --- a/drivers/hwmon/amc6821.c +++ b/drivers/hwmon/amc6821.c | |||
@@ -686,7 +686,6 @@ static ssize_t set_fan1_div( | |||
686 | data->fan1_div = 4; | 686 | data->fan1_div = 4; |
687 | break; | 687 | break; |
688 | default: | 688 | default: |
689 | mutex_unlock(&data->update_lock); | ||
690 | count = -EINVAL; | 689 | count = -EINVAL; |
691 | goto EXIT; | 690 | goto EXIT; |
692 | } | 691 | } |
diff --git a/drivers/hwmon/asus_atk0110.c b/drivers/hwmon/asus_atk0110.c index 6811346c1c62..028284f544e3 100644 --- a/drivers/hwmon/asus_atk0110.c +++ b/drivers/hwmon/asus_atk0110.c | |||
@@ -1329,17 +1329,16 @@ static int atk_add(struct acpi_device *device) | |||
1329 | &buf, ACPI_TYPE_PACKAGE); | 1329 | &buf, ACPI_TYPE_PACKAGE); |
1330 | if (ret != AE_OK) { | 1330 | if (ret != AE_OK) { |
1331 | dev_dbg(&device->dev, "atk: method MBIF not found\n"); | 1331 | dev_dbg(&device->dev, "atk: method MBIF not found\n"); |
1332 | err = -ENODEV; | 1332 | } else { |
1333 | goto out; | 1333 | obj = buf.pointer; |
1334 | } | 1334 | if (obj->package.count >= 2) { |
1335 | 1335 | union acpi_object *id = &obj->package.elements[1]; | |
1336 | obj = buf.pointer; | 1336 | if (id->type == ACPI_TYPE_STRING) |
1337 | if (obj->package.count >= 2 && | 1337 | dev_dbg(&device->dev, "board ID = %s\n", |
1338 | obj->package.elements[1].type == ACPI_TYPE_STRING) { | 1338 | id->string.pointer); |
1339 | dev_dbg(&device->dev, "board ID = %s\n", | 1339 | } |
1340 | obj->package.elements[1].string.pointer); | 1340 | ACPI_FREE(buf.pointer); |
1341 | } | 1341 | } |
1342 | ACPI_FREE(buf.pointer); | ||
1343 | 1342 | ||
1344 | err = atk_probe_if(data); | 1343 | err = atk_probe_if(data); |
1345 | if (err) { | 1344 | if (err) { |
diff --git a/drivers/hwmon/fschmd.c b/drivers/hwmon/fschmd.c index bd0fc67e804b..fa0728232e71 100644 --- a/drivers/hwmon/fschmd.c +++ b/drivers/hwmon/fschmd.c | |||
@@ -768,6 +768,7 @@ leave: | |||
768 | static int watchdog_open(struct inode *inode, struct file *filp) | 768 | static int watchdog_open(struct inode *inode, struct file *filp) |
769 | { | 769 | { |
770 | struct fschmd_data *pos, *data = NULL; | 770 | struct fschmd_data *pos, *data = NULL; |
771 | int watchdog_is_open; | ||
771 | 772 | ||
772 | /* We get called from drivers/char/misc.c with misc_mtx hold, and we | 773 | /* We get called from drivers/char/misc.c with misc_mtx hold, and we |
773 | call misc_register() from fschmd_probe() with watchdog_data_mutex | 774 | call misc_register() from fschmd_probe() with watchdog_data_mutex |
@@ -782,10 +783,12 @@ static int watchdog_open(struct inode *inode, struct file *filp) | |||
782 | } | 783 | } |
783 | } | 784 | } |
784 | /* Note we can never not have found data, so we don't check for this */ | 785 | /* Note we can never not have found data, so we don't check for this */ |
785 | kref_get(&data->kref); | 786 | watchdog_is_open = test_and_set_bit(0, &data->watchdog_is_open); |
787 | if (!watchdog_is_open) | ||
788 | kref_get(&data->kref); | ||
786 | mutex_unlock(&watchdog_data_mutex); | 789 | mutex_unlock(&watchdog_data_mutex); |
787 | 790 | ||
788 | if (test_and_set_bit(0, &data->watchdog_is_open)) | 791 | if (watchdog_is_open) |
789 | return -EBUSY; | 792 | return -EBUSY; |
790 | 793 | ||
791 | /* Start the watchdog */ | 794 | /* Start the watchdog */ |
diff --git a/drivers/hwmon/smsc47m1.c b/drivers/hwmon/smsc47m1.c index 9ca97818bd4b..8fa462f2b570 100644 --- a/drivers/hwmon/smsc47m1.c +++ b/drivers/hwmon/smsc47m1.c | |||
@@ -488,7 +488,7 @@ static int __init smsc47m1_find(unsigned short *addr, | |||
488 | } | 488 | } |
489 | 489 | ||
490 | /* Restore device to its initial state */ | 490 | /* Restore device to its initial state */ |
491 | static void __init smsc47m1_restore(const struct smsc47m1_sio_data *sio_data) | 491 | static void smsc47m1_restore(const struct smsc47m1_sio_data *sio_data) |
492 | { | 492 | { |
493 | if ((sio_data->activate & 0x01) == 0) { | 493 | if ((sio_data->activate & 0x01) == 0) { |
494 | superio_enter(); | 494 | superio_enter(); |
diff --git a/drivers/i2c/busses/i2c-imx.c b/drivers/i2c/busses/i2c-imx.c index e3654d683e15..75bf820e7ccb 100644 --- a/drivers/i2c/busses/i2c-imx.c +++ b/drivers/i2c/busses/i2c-imx.c | |||
@@ -226,7 +226,6 @@ static void i2c_imx_stop(struct imx_i2c_struct *i2c_imx) | |||
226 | temp = readb(i2c_imx->base + IMX_I2C_I2CR); | 226 | temp = readb(i2c_imx->base + IMX_I2C_I2CR); |
227 | temp &= ~(I2CR_MSTA | I2CR_MTX); | 227 | temp &= ~(I2CR_MSTA | I2CR_MTX); |
228 | writeb(temp, i2c_imx->base + IMX_I2C_I2CR); | 228 | writeb(temp, i2c_imx->base + IMX_I2C_I2CR); |
229 | i2c_imx->stopped = 1; | ||
230 | } | 229 | } |
231 | if (cpu_is_mx1()) { | 230 | if (cpu_is_mx1()) { |
232 | /* | 231 | /* |
@@ -236,8 +235,10 @@ static void i2c_imx_stop(struct imx_i2c_struct *i2c_imx) | |||
236 | udelay(i2c_imx->disable_delay); | 235 | udelay(i2c_imx->disable_delay); |
237 | } | 236 | } |
238 | 237 | ||
239 | if (!i2c_imx->stopped) | 238 | if (!i2c_imx->stopped) { |
240 | i2c_imx_bus_busy(i2c_imx, 0); | 239 | i2c_imx_bus_busy(i2c_imx, 0); |
240 | i2c_imx->stopped = 1; | ||
241 | } | ||
241 | 242 | ||
242 | /* Disable I2C controller */ | 243 | /* Disable I2C controller */ |
243 | writeb(0, i2c_imx->base + IMX_I2C_I2CR); | 244 | writeb(0, i2c_imx->base + IMX_I2C_I2CR); |
@@ -496,22 +497,23 @@ static int __init i2c_imx_probe(struct platform_device *pdev) | |||
496 | } | 497 | } |
497 | 498 | ||
498 | res_size = resource_size(res); | 499 | res_size = resource_size(res); |
500 | |||
501 | if (!request_mem_region(res->start, res_size, DRIVER_NAME)) { | ||
502 | ret = -EBUSY; | ||
503 | goto fail0; | ||
504 | } | ||
505 | |||
499 | base = ioremap(res->start, res_size); | 506 | base = ioremap(res->start, res_size); |
500 | if (!base) { | 507 | if (!base) { |
501 | dev_err(&pdev->dev, "ioremap failed\n"); | 508 | dev_err(&pdev->dev, "ioremap failed\n"); |
502 | ret = -EIO; | 509 | ret = -EIO; |
503 | goto fail0; | 510 | goto fail1; |
504 | } | 511 | } |
505 | 512 | ||
506 | i2c_imx = kzalloc(sizeof(struct imx_i2c_struct), GFP_KERNEL); | 513 | i2c_imx = kzalloc(sizeof(struct imx_i2c_struct), GFP_KERNEL); |
507 | if (!i2c_imx) { | 514 | if (!i2c_imx) { |
508 | dev_err(&pdev->dev, "can't allocate interface\n"); | 515 | dev_err(&pdev->dev, "can't allocate interface\n"); |
509 | ret = -ENOMEM; | 516 | ret = -ENOMEM; |
510 | goto fail1; | ||
511 | } | ||
512 | |||
513 | if (!request_mem_region(res->start, res_size, DRIVER_NAME)) { | ||
514 | ret = -EBUSY; | ||
515 | goto fail2; | 517 | goto fail2; |
516 | } | 518 | } |
517 | 519 | ||
@@ -582,11 +584,11 @@ fail5: | |||
582 | fail4: | 584 | fail4: |
583 | clk_put(i2c_imx->clk); | 585 | clk_put(i2c_imx->clk); |
584 | fail3: | 586 | fail3: |
585 | release_mem_region(i2c_imx->res->start, resource_size(res)); | ||
586 | fail2: | ||
587 | kfree(i2c_imx); | 587 | kfree(i2c_imx); |
588 | fail1: | 588 | fail2: |
589 | iounmap(base); | 589 | iounmap(base); |
590 | fail1: | ||
591 | release_mem_region(res->start, resource_size(res)); | ||
590 | fail0: | 592 | fail0: |
591 | if (pdata && pdata->exit) | 593 | if (pdata && pdata->exit) |
592 | pdata->exit(&pdev->dev); | 594 | pdata->exit(&pdev->dev); |
@@ -618,8 +620,8 @@ static int __exit i2c_imx_remove(struct platform_device *pdev) | |||
618 | 620 | ||
619 | clk_put(i2c_imx->clk); | 621 | clk_put(i2c_imx->clk); |
620 | 622 | ||
621 | release_mem_region(i2c_imx->res->start, resource_size(i2c_imx->res)); | ||
622 | iounmap(i2c_imx->base); | 623 | iounmap(i2c_imx->base); |
624 | release_mem_region(i2c_imx->res->start, resource_size(i2c_imx->res)); | ||
623 | kfree(i2c_imx); | 625 | kfree(i2c_imx); |
624 | return 0; | 626 | return 0; |
625 | } | 627 | } |
diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c index 85bc6a685e36..44d2037e9e56 100644 --- a/drivers/message/fusion/mptbase.c +++ b/drivers/message/fusion/mptbase.c | |||
@@ -4330,6 +4330,8 @@ initChainBuffers(MPT_ADAPTER *ioc) | |||
4330 | 4330 | ||
4331 | if (ioc->bus_type == SPI) | 4331 | if (ioc->bus_type == SPI) |
4332 | num_chain *= MPT_SCSI_CAN_QUEUE; | 4332 | num_chain *= MPT_SCSI_CAN_QUEUE; |
4333 | else if (ioc->bus_type == SAS) | ||
4334 | num_chain *= MPT_SAS_CAN_QUEUE; | ||
4333 | else | 4335 | else |
4334 | num_chain *= MPT_FC_CAN_QUEUE; | 4336 | num_chain *= MPT_FC_CAN_QUEUE; |
4335 | 4337 | ||
diff --git a/drivers/mtd/maps/Kconfig b/drivers/mtd/maps/Kconfig index 4c364d44ad59..2de0cc823d60 100644 --- a/drivers/mtd/maps/Kconfig +++ b/drivers/mtd/maps/Kconfig | |||
@@ -549,4 +549,21 @@ config MTD_VMU | |||
549 | To build this as a module select M here, the module will be called | 549 | To build this as a module select M here, the module will be called |
550 | vmu-flash. | 550 | vmu-flash. |
551 | 551 | ||
552 | config MTD_PISMO | ||
553 | tristate "MTD discovery driver for PISMO modules" | ||
554 | depends on I2C | ||
555 | depends on ARCH_VERSATILE | ||
556 | help | ||
557 | This driver allows for discovery of PISMO modules - see | ||
558 | <http://www.pismoworld.org/>. These are small modules containing | ||
559 | up to five memory devices (eg, SRAM, flash, DOC) described by an | ||
560 | I2C EEPROM. | ||
561 | |||
562 | This driver does not create any MTD maps itself; instead it | ||
563 | creates MTD physmap and MTD SRAM platform devices. If you | ||
564 | enable this option, you should consider enabling MTD_PHYSMAP | ||
565 | and/or MTD_PLATRAM according to the devices on your module. | ||
566 | |||
567 | When built as a module, it will be called pismo.ko | ||
568 | |||
552 | endmenu | 569 | endmenu |
diff --git a/drivers/mtd/maps/pismo.c b/drivers/mtd/maps/pismo.c new file mode 100644 index 000000000000..c48cad271f5d --- /dev/null +++ b/drivers/mtd/maps/pismo.c | |||
@@ -0,0 +1,320 @@ | |||
1 | /* | ||
2 | * PISMO memory driver - http://www.pismoworld.org/ | ||
3 | * | ||
4 | * For ARM Realview and Versatile platforms | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License as published by | ||
8 | * the Free Software Foundation; either version 2 of the License. | ||
9 | */ | ||
10 | #include <linux/init.h> | ||
11 | #include <linux/module.h> | ||
12 | #include <linux/i2c.h> | ||
13 | #include <linux/platform_device.h> | ||
14 | #include <linux/spinlock.h> | ||
15 | #include <linux/mutex.h> | ||
16 | #include <linux/mtd/physmap.h> | ||
17 | #include <linux/mtd/plat-ram.h> | ||
18 | #include <linux/mtd/pismo.h> | ||
19 | |||
20 | #define PISMO_NUM_CS 5 | ||
21 | |||
22 | struct pismo_cs_block { | ||
23 | u8 type; | ||
24 | u8 width; | ||
25 | __le16 access; | ||
26 | __le32 size; | ||
27 | u32 reserved[2]; | ||
28 | char device[32]; | ||
29 | } __packed; | ||
30 | |||
31 | struct pismo_eeprom { | ||
32 | struct pismo_cs_block cs[PISMO_NUM_CS]; | ||
33 | char board[15]; | ||
34 | u8 sum; | ||
35 | } __packed; | ||
36 | |||
37 | struct pismo_mem { | ||
38 | phys_addr_t base; | ||
39 | u32 size; | ||
40 | u16 access; | ||
41 | u8 width; | ||
42 | u8 type; | ||
43 | }; | ||
44 | |||
45 | struct pismo_data { | ||
46 | struct i2c_client *client; | ||
47 | void (*vpp)(void *, int); | ||
48 | void *vpp_data; | ||
49 | struct platform_device *dev[PISMO_NUM_CS]; | ||
50 | }; | ||
51 | |||
52 | /* FIXME: set_vpp could do with a better calling convention */ | ||
53 | static struct pismo_data *vpp_pismo; | ||
54 | static DEFINE_MUTEX(pismo_mutex); | ||
55 | |||
56 | static int pismo_setvpp_probe_fix(struct pismo_data *pismo) | ||
57 | { | ||
58 | mutex_lock(&pismo_mutex); | ||
59 | if (vpp_pismo) { | ||
60 | mutex_unlock(&pismo_mutex); | ||
61 | kfree(pismo); | ||
62 | return -EBUSY; | ||
63 | } | ||
64 | vpp_pismo = pismo; | ||
65 | mutex_unlock(&pismo_mutex); | ||
66 | return 0; | ||
67 | } | ||
68 | |||
69 | static void pismo_setvpp_remove_fix(struct pismo_data *pismo) | ||
70 | { | ||
71 | mutex_lock(&pismo_mutex); | ||
72 | if (vpp_pismo == pismo) | ||
73 | vpp_pismo = NULL; | ||
74 | mutex_unlock(&pismo_mutex); | ||
75 | } | ||
76 | |||
77 | static void pismo_set_vpp(struct map_info *map, int on) | ||
78 | { | ||
79 | struct pismo_data *pismo = vpp_pismo; | ||
80 | |||
81 | pismo->vpp(pismo->vpp_data, on); | ||
82 | } | ||
83 | /* end of hack */ | ||
84 | |||
85 | |||
86 | static unsigned int __devinit pismo_width_to_bytes(unsigned int width) | ||
87 | { | ||
88 | width &= 15; | ||
89 | if (width > 2) | ||
90 | return 0; | ||
91 | return 1 << width; | ||
92 | } | ||
93 | |||
94 | static int __devinit pismo_eeprom_read(struct i2c_client *client, void *buf, | ||
95 | u8 addr, size_t size) | ||
96 | { | ||
97 | int ret; | ||
98 | struct i2c_msg msg[] = { | ||
99 | { | ||
100 | .addr = client->addr, | ||
101 | .len = sizeof(addr), | ||
102 | .buf = &addr, | ||
103 | }, { | ||
104 | .addr = client->addr, | ||
105 | .flags = I2C_M_RD, | ||
106 | .len = size, | ||
107 | .buf = buf, | ||
108 | }, | ||
109 | }; | ||
110 | |||
111 | ret = i2c_transfer(client->adapter, msg, ARRAY_SIZE(msg)); | ||
112 | |||
113 | return ret == ARRAY_SIZE(msg) ? size : -EIO; | ||
114 | } | ||
115 | |||
116 | static int __devinit pismo_add_device(struct pismo_data *pismo, int i, | ||
117 | struct pismo_mem *region, const char *name, void *pdata, size_t psize) | ||
118 | { | ||
119 | struct platform_device *dev; | ||
120 | struct resource res = { }; | ||
121 | phys_addr_t base = region.base; | ||
122 | int ret; | ||
123 | |||
124 | if (base == ~0) | ||
125 | return -ENXIO; | ||
126 | |||
127 | res.start = base; | ||
128 | res.end = base + region->size - 1; | ||
129 | res.flags = IORESOURCE_MEM; | ||
130 | |||
131 | dev = platform_device_alloc(name, i); | ||
132 | if (!dev) | ||
133 | return -ENOMEM; | ||
134 | dev->dev.parent = &pismo->client->dev; | ||
135 | |||
136 | do { | ||
137 | ret = platform_device_add_resources(dev, &res, 1); | ||
138 | if (ret) | ||
139 | break; | ||
140 | |||
141 | ret = platform_device_add_data(dev, pdata, psize); | ||
142 | if (ret) | ||
143 | break; | ||
144 | |||
145 | ret = platform_device_add(dev); | ||
146 | if (ret) | ||
147 | break; | ||
148 | |||
149 | pismo->dev[i] = dev; | ||
150 | return 0; | ||
151 | } while (0); | ||
152 | |||
153 | platform_device_put(dev); | ||
154 | return ret; | ||
155 | } | ||
156 | |||
157 | static int __devinit pismo_add_nor(struct pismo_data *pismo, int i, | ||
158 | struct pismo_mem *region) | ||
159 | { | ||
160 | struct physmap_flash_data data = { | ||
161 | .width = region->width, | ||
162 | }; | ||
163 | |||
164 | if (pismo->vpp) | ||
165 | data.set_vpp = pismo_set_vpp; | ||
166 | |||
167 | return pismo_add_device(pismo, i, region, "physmap-flash", | ||
168 | &data, sizeof(data)); | ||
169 | } | ||
170 | |||
171 | static int __devinit pismo_add_sram(struct pismo_data *pismo, int i, | ||
172 | struct pismo_mem *region) | ||
173 | { | ||
174 | struct platdata_mtd_ram data = { | ||
175 | .bankwidth = region->width, | ||
176 | }; | ||
177 | |||
178 | return pismo_add_device(pismo, i, region, "mtd-ram", | ||
179 | &data, sizeof(data)); | ||
180 | } | ||
181 | |||
182 | static void __devinit pismo_add_one(struct pismo_data *pismo, int i, | ||
183 | const struct pismo_cs_block *cs, phys_addr_t base) | ||
184 | { | ||
185 | struct device *dev = &pismo->client->dev; | ||
186 | struct pismo_mem region; | ||
187 | |||
188 | region.base = base; | ||
189 | region.type = cs->type; | ||
190 | region.width = pismo_width_to_bytes(cs->width); | ||
191 | region.access = le16_to_cpu(cs->access); | ||
192 | region.size = le32_to_cpu(cs->size); | ||
193 | |||
194 | if (region.width == 0) { | ||
195 | dev_err(dev, "cs%u: bad width: %02x, ignoring\n", i, cs->width); | ||
196 | return; | ||
197 | } | ||
198 | |||
199 | /* | ||
200 | * FIXME: may need to the platforms memory controller here, but at | ||
201 | * the moment we assume that it has already been correctly setup. | ||
202 | * The memory controller can also tell us the base address as well. | ||
203 | */ | ||
204 | |||
205 | dev_info(dev, "cs%u: %.32s: type %02x access %u00ps size %uK\n", | ||
206 | i, cs->device, region.type, region.access, region.size / 1024); | ||
207 | |||
208 | switch (region.type) { | ||
209 | case 0: | ||
210 | break; | ||
211 | case 1: | ||
212 | /* static DOC */ | ||
213 | break; | ||
214 | case 2: | ||
215 | /* static NOR */ | ||
216 | pismo_add_nor(pismo, i, ®ion); | ||
217 | break; | ||
218 | case 3: | ||
219 | /* static RAM */ | ||
220 | pismo_add_sram(pismo, i, ®ion); | ||
221 | break; | ||
222 | } | ||
223 | } | ||
224 | |||
225 | static int __devexit pismo_remove(struct i2c_client *client) | ||
226 | { | ||
227 | struct pismo_data *pismo = i2c_get_clientdata(client); | ||
228 | int i; | ||
229 | |||
230 | for (i = 0; i < ARRAY_SIZE(pismo->dev); i++) | ||
231 | platform_device_unregister(pismo->dev[i]); | ||
232 | |||
233 | /* FIXME: set_vpp needs saner arguments */ | ||
234 | pismo_setvpp_remove_fix(pismo); | ||
235 | |||
236 | kfree(pismo); | ||
237 | |||
238 | return 0; | ||
239 | } | ||
240 | |||
241 | static int __devinit pismo_probe(struct i2c_client *client, | ||
242 | const struct i2c_device_id *id) | ||
243 | { | ||
244 | struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent); | ||
245 | struct pismo_pdata *pdata = client->dev.platform_data; | ||
246 | struct pismo_eeprom eeprom; | ||
247 | struct pismo_data *pismo; | ||
248 | int ret, i; | ||
249 | |||
250 | if (!i2c_check_functionality(adapter, I2C_FUNC_I2C)) { | ||
251 | dev_err(&client->dev, "functionality mismatch\n"); | ||
252 | return -EIO; | ||
253 | } | ||
254 | |||
255 | pismo = kzalloc(sizeof(*pismo), GFP_KERNEL); | ||
256 | if (!pismo) | ||
257 | return -ENOMEM; | ||
258 | |||
259 | /* FIXME: set_vpp needs saner arguments */ | ||
260 | ret = pismo_setvpp_probe_fix(pismo); | ||
261 | if (ret) | ||
262 | return ret; | ||
263 | |||
264 | pismo->client = client; | ||
265 | if (pdata) { | ||
266 | pismo->vpp = pdata->set_vpp; | ||
267 | pismo->vpp_data = pdata->vpp_data; | ||
268 | } | ||
269 | i2c_set_clientdata(client, pismo); | ||
270 | |||
271 | ret = pismo_eeprom_read(client, &eeprom, 0, sizeof(eeprom)); | ||
272 | if (ret < 0) { | ||
273 | dev_err(&client->dev, "error reading EEPROM: %d\n", ret); | ||
274 | return ret; | ||
275 | } | ||
276 | |||
277 | dev_info(&client->dev, "%.15s board found\n", eeprom.board); | ||
278 | |||
279 | for (i = 0; i < ARRAY_SIZE(eeprom.cs); i++) | ||
280 | if (eeprom.cs[i].type != 0xff) | ||
281 | pismo_add_one(pismo, i, &eeprom.cs[i], | ||
282 | pdata->cs_addrs[i]); | ||
283 | |||
284 | return 0; | ||
285 | } | ||
286 | |||
287 | static const struct i2c_device_id pismo_id[] = { | ||
288 | { "pismo" }, | ||
289 | { }, | ||
290 | }; | ||
291 | MODULE_DEVICE_TABLE(i2c, pismo_id); | ||
292 | |||
293 | static struct i2c_driver pismo_driver = { | ||
294 | .driver = { | ||
295 | .name = "pismo", | ||
296 | .owner = THIS_MODULE, | ||
297 | }, | ||
298 | .probe = pismo_probe, | ||
299 | .remove = __devexit_p(pismo_remove), | ||
300 | .id_table = pismo_id, | ||
301 | }; | ||
302 | |||
303 | static int __init pismo_init(void) | ||
304 | { | ||
305 | BUILD_BUG_ON(sizeof(struct pismo_cs_block) != 48); | ||
306 | BUILD_BUG_ON(sizeof(struct pismo_eeprom) != 256); | ||
307 | |||
308 | return i2c_add_driver(&pismo_driver); | ||
309 | } | ||
310 | module_init(pismo_init); | ||
311 | |||
312 | static void __exit pismo_exit(void) | ||
313 | { | ||
314 | i2c_del_driver(&pismo_driver); | ||
315 | } | ||
316 | module_exit(pismo_exit); | ||
317 | |||
318 | MODULE_AUTHOR("Russell King <linux@arm.linux.org.uk>"); | ||
319 | MODULE_DESCRIPTION("PISMO memory driver"); | ||
320 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/mtd/mtdoops.c b/drivers/mtd/mtdoops.c index a714ec482761..92e12df0917f 100644 --- a/drivers/mtd/mtdoops.c +++ b/drivers/mtd/mtdoops.c | |||
@@ -322,7 +322,7 @@ static void mtdoops_do_dump(struct kmsg_dumper *dumper, | |||
322 | memcpy(dst + l1_cpy, s2 + s2_start, l2_cpy); | 322 | memcpy(dst + l1_cpy, s2 + s2_start, l2_cpy); |
323 | 323 | ||
324 | /* Panics must be written immediately */ | 324 | /* Panics must be written immediately */ |
325 | if (reason == KMSG_DUMP_PANIC) { | 325 | if (reason != KMSG_DUMP_OOPS) { |
326 | if (!cxt->mtd->panic_write) | 326 | if (!cxt->mtd->panic_write) |
327 | printk(KERN_ERR "mtdoops: Cannot write from panic without panic_write\n"); | 327 | printk(KERN_ERR "mtdoops: Cannot write from panic without panic_write\n"); |
328 | else | 328 | else |
diff --git a/drivers/mtd/tests/mtd_readtest.c b/drivers/mtd/tests/mtd_readtest.c index 79fc4530987b..25c5dd03a837 100644 --- a/drivers/mtd/tests/mtd_readtest.c +++ b/drivers/mtd/tests/mtd_readtest.c | |||
@@ -147,6 +147,10 @@ static int scan_for_bad_eraseblocks(void) | |||
147 | } | 147 | } |
148 | memset(bbt, 0 , ebcnt); | 148 | memset(bbt, 0 , ebcnt); |
149 | 149 | ||
150 | /* NOR flash does not implement block_isbad */ | ||
151 | if (mtd->block_isbad == NULL) | ||
152 | return 0; | ||
153 | |||
150 | printk(PRINT_PREF "scanning for bad eraseblocks\n"); | 154 | printk(PRINT_PREF "scanning for bad eraseblocks\n"); |
151 | for (i = 0; i < ebcnt; ++i) { | 155 | for (i = 0; i < ebcnt; ++i) { |
152 | bbt[i] = is_block_bad(i) ? 1 : 0; | 156 | bbt[i] = is_block_bad(i) ? 1 : 0; |
@@ -184,7 +188,7 @@ static int __init mtd_readtest_init(void) | |||
184 | tmp = mtd->size; | 188 | tmp = mtd->size; |
185 | do_div(tmp, mtd->erasesize); | 189 | do_div(tmp, mtd->erasesize); |
186 | ebcnt = tmp; | 190 | ebcnt = tmp; |
187 | pgcnt = mtd->erasesize / mtd->writesize; | 191 | pgcnt = mtd->erasesize / pgsize; |
188 | 192 | ||
189 | printk(PRINT_PREF "MTD device size %llu, eraseblock size %u, " | 193 | printk(PRINT_PREF "MTD device size %llu, eraseblock size %u, " |
190 | "page size %u, count of eraseblocks %u, pages per " | 194 | "page size %u, count of eraseblocks %u, pages per " |
diff --git a/drivers/mtd/tests/mtd_speedtest.c b/drivers/mtd/tests/mtd_speedtest.c index 141363a7e805..7fbb51d4eabe 100644 --- a/drivers/mtd/tests/mtd_speedtest.c +++ b/drivers/mtd/tests/mtd_speedtest.c | |||
@@ -301,6 +301,10 @@ static int scan_for_bad_eraseblocks(void) | |||
301 | } | 301 | } |
302 | memset(bbt, 0 , ebcnt); | 302 | memset(bbt, 0 , ebcnt); |
303 | 303 | ||
304 | /* NOR flash does not implement block_isbad */ | ||
305 | if (mtd->block_isbad == NULL) | ||
306 | goto out; | ||
307 | |||
304 | printk(PRINT_PREF "scanning for bad eraseblocks\n"); | 308 | printk(PRINT_PREF "scanning for bad eraseblocks\n"); |
305 | for (i = 0; i < ebcnt; ++i) { | 309 | for (i = 0; i < ebcnt; ++i) { |
306 | bbt[i] = is_block_bad(i) ? 1 : 0; | 310 | bbt[i] = is_block_bad(i) ? 1 : 0; |
@@ -309,6 +313,7 @@ static int scan_for_bad_eraseblocks(void) | |||
309 | cond_resched(); | 313 | cond_resched(); |
310 | } | 314 | } |
311 | printk(PRINT_PREF "scanned %d eraseblocks, %d are bad\n", i, bad); | 315 | printk(PRINT_PREF "scanned %d eraseblocks, %d are bad\n", i, bad); |
316 | out: | ||
312 | goodebcnt = ebcnt - bad; | 317 | goodebcnt = ebcnt - bad; |
313 | return 0; | 318 | return 0; |
314 | } | 319 | } |
@@ -340,7 +345,7 @@ static int __init mtd_speedtest_init(void) | |||
340 | tmp = mtd->size; | 345 | tmp = mtd->size; |
341 | do_div(tmp, mtd->erasesize); | 346 | do_div(tmp, mtd->erasesize); |
342 | ebcnt = tmp; | 347 | ebcnt = tmp; |
343 | pgcnt = mtd->erasesize / mtd->writesize; | 348 | pgcnt = mtd->erasesize / pgsize; |
344 | 349 | ||
345 | printk(PRINT_PREF "MTD device size %llu, eraseblock size %u, " | 350 | printk(PRINT_PREF "MTD device size %llu, eraseblock size %u, " |
346 | "page size %u, count of eraseblocks %u, pages per " | 351 | "page size %u, count of eraseblocks %u, pages per " |
diff --git a/drivers/mtd/tests/mtd_stresstest.c b/drivers/mtd/tests/mtd_stresstest.c index 63920476b57a..a99d3cd737d8 100644 --- a/drivers/mtd/tests/mtd_stresstest.c +++ b/drivers/mtd/tests/mtd_stresstest.c | |||
@@ -227,6 +227,10 @@ static int scan_for_bad_eraseblocks(void) | |||
227 | } | 227 | } |
228 | memset(bbt, 0 , ebcnt); | 228 | memset(bbt, 0 , ebcnt); |
229 | 229 | ||
230 | /* NOR flash does not implement block_isbad */ | ||
231 | if (mtd->block_isbad == NULL) | ||
232 | return 0; | ||
233 | |||
230 | printk(PRINT_PREF "scanning for bad eraseblocks\n"); | 234 | printk(PRINT_PREF "scanning for bad eraseblocks\n"); |
231 | for (i = 0; i < ebcnt; ++i) { | 235 | for (i = 0; i < ebcnt; ++i) { |
232 | bbt[i] = is_block_bad(i) ? 1 : 0; | 236 | bbt[i] = is_block_bad(i) ? 1 : 0; |
@@ -265,7 +269,7 @@ static int __init mtd_stresstest_init(void) | |||
265 | tmp = mtd->size; | 269 | tmp = mtd->size; |
266 | do_div(tmp, mtd->erasesize); | 270 | do_div(tmp, mtd->erasesize); |
267 | ebcnt = tmp; | 271 | ebcnt = tmp; |
268 | pgcnt = mtd->erasesize / mtd->writesize; | 272 | pgcnt = mtd->erasesize / pgsize; |
269 | 273 | ||
270 | printk(PRINT_PREF "MTD device size %llu, eraseblock size %u, " | 274 | printk(PRINT_PREF "MTD device size %llu, eraseblock size %u, " |
271 | "page size %u, count of eraseblocks %u, pages per " | 275 | "page size %u, count of eraseblocks %u, pages per " |
diff --git a/drivers/mtd/ubi/kapi.c b/drivers/mtd/ubi/kapi.c index 277786ebaa2c..1361574e2b00 100644 --- a/drivers/mtd/ubi/kapi.c +++ b/drivers/mtd/ubi/kapi.c | |||
@@ -291,8 +291,7 @@ EXPORT_SYMBOL_GPL(ubi_open_volume_nm); | |||
291 | */ | 291 | */ |
292 | struct ubi_volume_desc *ubi_open_volume_path(const char *pathname, int mode) | 292 | struct ubi_volume_desc *ubi_open_volume_path(const char *pathname, int mode) |
293 | { | 293 | { |
294 | int error, ubi_num, vol_id; | 294 | int error, ubi_num, vol_id, mod; |
295 | struct ubi_volume_desc *ret; | ||
296 | struct inode *inode; | 295 | struct inode *inode; |
297 | struct path path; | 296 | struct path path; |
298 | 297 | ||
@@ -306,16 +305,16 @@ struct ubi_volume_desc *ubi_open_volume_path(const char *pathname, int mode) | |||
306 | return ERR_PTR(error); | 305 | return ERR_PTR(error); |
307 | 306 | ||
308 | inode = path.dentry->d_inode; | 307 | inode = path.dentry->d_inode; |
308 | mod = inode->i_mode; | ||
309 | ubi_num = ubi_major2num(imajor(inode)); | 309 | ubi_num = ubi_major2num(imajor(inode)); |
310 | vol_id = iminor(inode) - 1; | 310 | vol_id = iminor(inode) - 1; |
311 | path_put(&path); | ||
311 | 312 | ||
313 | if (!S_ISCHR(mod)) | ||
314 | return ERR_PTR(-EINVAL); | ||
312 | if (vol_id >= 0 && ubi_num >= 0) | 315 | if (vol_id >= 0 && ubi_num >= 0) |
313 | ret = ubi_open_volume(ubi_num, vol_id, mode); | 316 | return ubi_open_volume(ubi_num, vol_id, mode); |
314 | else | 317 | return ERR_PTR(-ENODEV); |
315 | ret = ERR_PTR(-ENODEV); | ||
316 | |||
317 | path_put(&path); | ||
318 | return ret; | ||
319 | } | 318 | } |
320 | EXPORT_SYMBOL_GPL(ubi_open_volume_path); | 319 | EXPORT_SYMBOL_GPL(ubi_open_volume_path); |
321 | 320 | ||
diff --git a/drivers/mtd/ubi/upd.c b/drivers/mtd/ubi/upd.c index c1d7b880c795..425bf5a3edd4 100644 --- a/drivers/mtd/ubi/upd.c +++ b/drivers/mtd/ubi/upd.c | |||
@@ -155,6 +155,7 @@ int ubi_start_update(struct ubi_device *ubi, struct ubi_volume *vol, | |||
155 | if (err) | 155 | if (err) |
156 | return err; | 156 | return err; |
157 | vol->updating = 0; | 157 | vol->updating = 0; |
158 | return 0; | ||
158 | } | 159 | } |
159 | 160 | ||
160 | vol->upd_buf = vmalloc(ubi->leb_size); | 161 | vol->upd_buf = vmalloc(ubi->leb_size); |
diff --git a/drivers/mtd/ubi/vtbl.c b/drivers/mtd/ubi/vtbl.c index 1afc61e7455d..40044028d682 100644 --- a/drivers/mtd/ubi/vtbl.c +++ b/drivers/mtd/ubi/vtbl.c | |||
@@ -566,6 +566,7 @@ static int init_volumes(struct ubi_device *ubi, const struct ubi_scan_info *si, | |||
566 | vol->reserved_pebs = be32_to_cpu(vtbl[i].reserved_pebs); | 566 | vol->reserved_pebs = be32_to_cpu(vtbl[i].reserved_pebs); |
567 | vol->alignment = be32_to_cpu(vtbl[i].alignment); | 567 | vol->alignment = be32_to_cpu(vtbl[i].alignment); |
568 | vol->data_pad = be32_to_cpu(vtbl[i].data_pad); | 568 | vol->data_pad = be32_to_cpu(vtbl[i].data_pad); |
569 | vol->upd_marker = vtbl[i].upd_marker; | ||
569 | vol->vol_type = vtbl[i].vol_type == UBI_VID_DYNAMIC ? | 570 | vol->vol_type = vtbl[i].vol_type == UBI_VID_DYNAMIC ? |
570 | UBI_DYNAMIC_VOLUME : UBI_STATIC_VOLUME; | 571 | UBI_DYNAMIC_VOLUME : UBI_STATIC_VOLUME; |
571 | vol->name_len = be16_to_cpu(vtbl[i].name_len); | 572 | vol->name_len = be16_to_cpu(vtbl[i].name_len); |
diff --git a/drivers/net/benet/be_cmds.c b/drivers/net/benet/be_cmds.c index 102ade134165..fee6eee7ae5b 100644 --- a/drivers/net/benet/be_cmds.c +++ b/drivers/net/benet/be_cmds.c | |||
@@ -286,7 +286,7 @@ static void be_wrb_hdr_prepare(struct be_mcc_wrb *wrb, int payload_len, | |||
286 | MCC_WRB_SGE_CNT_SHIFT; | 286 | MCC_WRB_SGE_CNT_SHIFT; |
287 | wrb->payload_length = payload_len; | 287 | wrb->payload_length = payload_len; |
288 | wrb->tag0 = opcode; | 288 | wrb->tag0 = opcode; |
289 | be_dws_cpu_to_le(wrb, 20); | 289 | be_dws_cpu_to_le(wrb, 8); |
290 | } | 290 | } |
291 | 291 | ||
292 | /* Don't touch the hdr after it's prepared */ | 292 | /* Don't touch the hdr after it's prepared */ |
diff --git a/drivers/net/benet/be_main.c b/drivers/net/benet/be_main.c index 3a1f7902c16d..33ab8c7f14fe 100644 --- a/drivers/net/benet/be_main.c +++ b/drivers/net/benet/be_main.c | |||
@@ -910,7 +910,7 @@ static inline struct page *be_alloc_pages(u32 size) | |||
910 | static void be_post_rx_frags(struct be_adapter *adapter) | 910 | static void be_post_rx_frags(struct be_adapter *adapter) |
911 | { | 911 | { |
912 | struct be_rx_page_info *page_info_tbl = adapter->rx_obj.page_info_tbl; | 912 | struct be_rx_page_info *page_info_tbl = adapter->rx_obj.page_info_tbl; |
913 | struct be_rx_page_info *page_info = NULL; | 913 | struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL; |
914 | struct be_queue_info *rxq = &adapter->rx_obj.q; | 914 | struct be_queue_info *rxq = &adapter->rx_obj.q; |
915 | struct page *pagep = NULL; | 915 | struct page *pagep = NULL; |
916 | struct be_eth_rx_d *rxd; | 916 | struct be_eth_rx_d *rxd; |
@@ -941,7 +941,6 @@ static void be_post_rx_frags(struct be_adapter *adapter) | |||
941 | rxd = queue_head_node(rxq); | 941 | rxd = queue_head_node(rxq); |
942 | rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF); | 942 | rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF); |
943 | rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr)); | 943 | rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr)); |
944 | queue_head_inc(rxq); | ||
945 | 944 | ||
946 | /* Any space left in the current big page for another frag? */ | 945 | /* Any space left in the current big page for another frag? */ |
947 | if ((page_offset + rx_frag_size + rx_frag_size) > | 946 | if ((page_offset + rx_frag_size + rx_frag_size) > |
@@ -949,10 +948,13 @@ static void be_post_rx_frags(struct be_adapter *adapter) | |||
949 | pagep = NULL; | 948 | pagep = NULL; |
950 | page_info->last_page_user = true; | 949 | page_info->last_page_user = true; |
951 | } | 950 | } |
951 | |||
952 | prev_page_info = page_info; | ||
953 | queue_head_inc(rxq); | ||
952 | page_info = &page_info_tbl[rxq->head]; | 954 | page_info = &page_info_tbl[rxq->head]; |
953 | } | 955 | } |
954 | if (pagep) | 956 | if (pagep) |
955 | page_info->last_page_user = true; | 957 | prev_page_info->last_page_user = true; |
956 | 958 | ||
957 | if (posted) { | 959 | if (posted) { |
958 | atomic_add(posted, &rxq->used); | 960 | atomic_add(posted, &rxq->used); |
diff --git a/drivers/net/bfin_mac.c b/drivers/net/bfin_mac.c index 8ffea3990d07..0b23bc4f56c6 100644 --- a/drivers/net/bfin_mac.c +++ b/drivers/net/bfin_mac.c | |||
@@ -33,6 +33,7 @@ | |||
33 | #include <asm/dma.h> | 33 | #include <asm/dma.h> |
34 | #include <linux/dma-mapping.h> | 34 | #include <linux/dma-mapping.h> |
35 | 35 | ||
36 | #include <asm/dpmc.h> | ||
36 | #include <asm/blackfin.h> | 37 | #include <asm/blackfin.h> |
37 | #include <asm/cacheflush.h> | 38 | #include <asm/cacheflush.h> |
38 | #include <asm/portmux.h> | 39 | #include <asm/portmux.h> |
@@ -386,8 +387,8 @@ static int mii_probe(struct net_device *dev) | |||
386 | u32 sclk, mdc_div; | 387 | u32 sclk, mdc_div; |
387 | 388 | ||
388 | /* Enable PHY output early */ | 389 | /* Enable PHY output early */ |
389 | if (!(bfin_read_VR_CTL() & PHYCLKOE)) | 390 | if (!(bfin_read_VR_CTL() & CLKBUFOE)) |
390 | bfin_write_VR_CTL(bfin_read_VR_CTL() | PHYCLKOE); | 391 | bfin_write_VR_CTL(bfin_read_VR_CTL() | CLKBUFOE); |
391 | 392 | ||
392 | sclk = get_sclk(); | 393 | sclk = get_sclk(); |
393 | mdc_div = ((sclk / MDC_CLK) / 2) - 1; | 394 | mdc_div = ((sclk / MDC_CLK) / 2) - 1; |
diff --git a/drivers/net/e1000/e1000.h b/drivers/net/e1000/e1000.h index 2a567df3ea71..e8932db7ee77 100644 --- a/drivers/net/e1000/e1000.h +++ b/drivers/net/e1000/e1000.h | |||
@@ -326,6 +326,8 @@ struct e1000_adapter { | |||
326 | /* for ioport free */ | 326 | /* for ioport free */ |
327 | int bars; | 327 | int bars; |
328 | int need_ioport; | 328 | int need_ioport; |
329 | |||
330 | bool discarding; | ||
329 | }; | 331 | }; |
330 | 332 | ||
331 | enum e1000_state_t { | 333 | enum e1000_state_t { |
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c index 7e855f9bbd97..d29bb532eccf 100644 --- a/drivers/net/e1000/e1000_main.c +++ b/drivers/net/e1000/e1000_main.c | |||
@@ -1698,18 +1698,6 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter) | |||
1698 | rctl &= ~E1000_RCTL_SZ_4096; | 1698 | rctl &= ~E1000_RCTL_SZ_4096; |
1699 | rctl |= E1000_RCTL_BSEX; | 1699 | rctl |= E1000_RCTL_BSEX; |
1700 | switch (adapter->rx_buffer_len) { | 1700 | switch (adapter->rx_buffer_len) { |
1701 | case E1000_RXBUFFER_256: | ||
1702 | rctl |= E1000_RCTL_SZ_256; | ||
1703 | rctl &= ~E1000_RCTL_BSEX; | ||
1704 | break; | ||
1705 | case E1000_RXBUFFER_512: | ||
1706 | rctl |= E1000_RCTL_SZ_512; | ||
1707 | rctl &= ~E1000_RCTL_BSEX; | ||
1708 | break; | ||
1709 | case E1000_RXBUFFER_1024: | ||
1710 | rctl |= E1000_RCTL_SZ_1024; | ||
1711 | rctl &= ~E1000_RCTL_BSEX; | ||
1712 | break; | ||
1713 | case E1000_RXBUFFER_2048: | 1701 | case E1000_RXBUFFER_2048: |
1714 | default: | 1702 | default: |
1715 | rctl |= E1000_RCTL_SZ_2048; | 1703 | rctl |= E1000_RCTL_SZ_2048; |
@@ -2802,13 +2790,13 @@ static int e1000_tx_map(struct e1000_adapter *adapter, | |||
2802 | dma_error: | 2790 | dma_error: |
2803 | dev_err(&pdev->dev, "TX DMA map failed\n"); | 2791 | dev_err(&pdev->dev, "TX DMA map failed\n"); |
2804 | buffer_info->dma = 0; | 2792 | buffer_info->dma = 0; |
2805 | count--; | 2793 | if (count) |
2806 | |||
2807 | while (count >= 0) { | ||
2808 | count--; | 2794 | count--; |
2809 | i--; | 2795 | |
2810 | if (i < 0) | 2796 | while (count--) { |
2797 | if (i==0) | ||
2811 | i += tx_ring->count; | 2798 | i += tx_ring->count; |
2799 | i--; | ||
2812 | buffer_info = &tx_ring->buffer_info[i]; | 2800 | buffer_info = &tx_ring->buffer_info[i]; |
2813 | e1000_unmap_and_free_tx_resource(adapter, buffer_info); | 2801 | e1000_unmap_and_free_tx_resource(adapter, buffer_info); |
2814 | } | 2802 | } |
@@ -3176,13 +3164,7 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu) | |||
3176 | * however with the new *_jumbo_rx* routines, jumbo receives will use | 3164 | * however with the new *_jumbo_rx* routines, jumbo receives will use |
3177 | * fragmented skbs */ | 3165 | * fragmented skbs */ |
3178 | 3166 | ||
3179 | if (max_frame <= E1000_RXBUFFER_256) | 3167 | if (max_frame <= E1000_RXBUFFER_2048) |
3180 | adapter->rx_buffer_len = E1000_RXBUFFER_256; | ||
3181 | else if (max_frame <= E1000_RXBUFFER_512) | ||
3182 | adapter->rx_buffer_len = E1000_RXBUFFER_512; | ||
3183 | else if (max_frame <= E1000_RXBUFFER_1024) | ||
3184 | adapter->rx_buffer_len = E1000_RXBUFFER_1024; | ||
3185 | else if (max_frame <= E1000_RXBUFFER_2048) | ||
3186 | adapter->rx_buffer_len = E1000_RXBUFFER_2048; | 3168 | adapter->rx_buffer_len = E1000_RXBUFFER_2048; |
3187 | else | 3169 | else |
3188 | #if (PAGE_SIZE >= E1000_RXBUFFER_16384) | 3170 | #if (PAGE_SIZE >= E1000_RXBUFFER_16384) |
@@ -3850,13 +3832,22 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter, | |||
3850 | 3832 | ||
3851 | length = le16_to_cpu(rx_desc->length); | 3833 | length = le16_to_cpu(rx_desc->length); |
3852 | /* !EOP means multiple descriptors were used to store a single | 3834 | /* !EOP means multiple descriptors were used to store a single |
3853 | * packet, also make sure the frame isn't just CRC only */ | 3835 | * packet, if thats the case we need to toss it. In fact, we |
3854 | if (unlikely(!(status & E1000_RXD_STAT_EOP) || (length <= 4))) { | 3836 | * to toss every packet with the EOP bit clear and the next |
3837 | * frame that _does_ have the EOP bit set, as it is by | ||
3838 | * definition only a frame fragment | ||
3839 | */ | ||
3840 | if (unlikely(!(status & E1000_RXD_STAT_EOP))) | ||
3841 | adapter->discarding = true; | ||
3842 | |||
3843 | if (adapter->discarding) { | ||
3855 | /* All receives must fit into a single buffer */ | 3844 | /* All receives must fit into a single buffer */ |
3856 | E1000_DBG("%s: Receive packet consumed multiple" | 3845 | E1000_DBG("%s: Receive packet consumed multiple" |
3857 | " buffers\n", netdev->name); | 3846 | " buffers\n", netdev->name); |
3858 | /* recycle */ | 3847 | /* recycle */ |
3859 | buffer_info->skb = skb; | 3848 | buffer_info->skb = skb; |
3849 | if (status & E1000_RXD_STAT_EOP) | ||
3850 | adapter->discarding = false; | ||
3860 | goto next_desc; | 3851 | goto next_desc; |
3861 | } | 3852 | } |
3862 | 3853 | ||
diff --git a/drivers/net/e1000e/e1000.h b/drivers/net/e1000e/e1000.h index d6ee28f6ea08..d236efaf7478 100644 --- a/drivers/net/e1000e/e1000.h +++ b/drivers/net/e1000e/e1000.h | |||
@@ -421,6 +421,7 @@ struct e1000_info { | |||
421 | /* CRC Stripping defines */ | 421 | /* CRC Stripping defines */ |
422 | #define FLAG2_CRC_STRIPPING (1 << 0) | 422 | #define FLAG2_CRC_STRIPPING (1 << 0) |
423 | #define FLAG2_HAS_PHY_WAKEUP (1 << 1) | 423 | #define FLAG2_HAS_PHY_WAKEUP (1 << 1) |
424 | #define FLAG2_IS_DISCARDING (1 << 2) | ||
424 | 425 | ||
425 | #define E1000_RX_DESC_PS(R, i) \ | 426 | #define E1000_RX_DESC_PS(R, i) \ |
426 | (&(((union e1000_rx_desc_packet_split *)((R).desc))[i])) | 427 | (&(((union e1000_rx_desc_packet_split *)((R).desc))[i])) |
diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c index c45965a256b6..57f149b75fbe 100644 --- a/drivers/net/e1000e/netdev.c +++ b/drivers/net/e1000e/netdev.c | |||
@@ -450,13 +450,23 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter, | |||
450 | 450 | ||
451 | length = le16_to_cpu(rx_desc->length); | 451 | length = le16_to_cpu(rx_desc->length); |
452 | 452 | ||
453 | /* !EOP means multiple descriptors were used to store a single | 453 | /* |
454 | * packet, also make sure the frame isn't just CRC only */ | 454 | * !EOP means multiple descriptors were used to store a single |
455 | if (!(status & E1000_RXD_STAT_EOP) || (length <= 4)) { | 455 | * packet, if that's the case we need to toss it. In fact, we |
456 | * need to toss every packet with the EOP bit clear and the | ||
457 | * next frame that _does_ have the EOP bit set, as it is by | ||
458 | * definition only a frame fragment | ||
459 | */ | ||
460 | if (unlikely(!(status & E1000_RXD_STAT_EOP))) | ||
461 | adapter->flags2 |= FLAG2_IS_DISCARDING; | ||
462 | |||
463 | if (adapter->flags2 & FLAG2_IS_DISCARDING) { | ||
456 | /* All receives must fit into a single buffer */ | 464 | /* All receives must fit into a single buffer */ |
457 | e_dbg("Receive packet consumed multiple buffers\n"); | 465 | e_dbg("Receive packet consumed multiple buffers\n"); |
458 | /* recycle */ | 466 | /* recycle */ |
459 | buffer_info->skb = skb; | 467 | buffer_info->skb = skb; |
468 | if (status & E1000_RXD_STAT_EOP) | ||
469 | adapter->flags2 &= ~FLAG2_IS_DISCARDING; | ||
460 | goto next_desc; | 470 | goto next_desc; |
461 | } | 471 | } |
462 | 472 | ||
@@ -745,10 +755,16 @@ static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter, | |||
745 | PCI_DMA_FROMDEVICE); | 755 | PCI_DMA_FROMDEVICE); |
746 | buffer_info->dma = 0; | 756 | buffer_info->dma = 0; |
747 | 757 | ||
748 | if (!(staterr & E1000_RXD_STAT_EOP)) { | 758 | /* see !EOP comment in other rx routine */ |
759 | if (!(staterr & E1000_RXD_STAT_EOP)) | ||
760 | adapter->flags2 |= FLAG2_IS_DISCARDING; | ||
761 | |||
762 | if (adapter->flags2 & FLAG2_IS_DISCARDING) { | ||
749 | e_dbg("Packet Split buffers didn't pick up the full " | 763 | e_dbg("Packet Split buffers didn't pick up the full " |
750 | "packet\n"); | 764 | "packet\n"); |
751 | dev_kfree_skb_irq(skb); | 765 | dev_kfree_skb_irq(skb); |
766 | if (staterr & E1000_RXD_STAT_EOP) | ||
767 | adapter->flags2 &= ~FLAG2_IS_DISCARDING; | ||
752 | goto next_desc; | 768 | goto next_desc; |
753 | } | 769 | } |
754 | 770 | ||
@@ -1118,6 +1134,7 @@ static void e1000_clean_rx_ring(struct e1000_adapter *adapter) | |||
1118 | 1134 | ||
1119 | rx_ring->next_to_clean = 0; | 1135 | rx_ring->next_to_clean = 0; |
1120 | rx_ring->next_to_use = 0; | 1136 | rx_ring->next_to_use = 0; |
1137 | adapter->flags2 &= ~FLAG2_IS_DISCARDING; | ||
1121 | 1138 | ||
1122 | writel(0, adapter->hw.hw_addr + rx_ring->head); | 1139 | writel(0, adapter->hw.hw_addr + rx_ring->head); |
1123 | writel(0, adapter->hw.hw_addr + rx_ring->tail); | 1140 | writel(0, adapter->hw.hw_addr + rx_ring->tail); |
@@ -2333,18 +2350,6 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter) | |||
2333 | rctl &= ~E1000_RCTL_SZ_4096; | 2350 | rctl &= ~E1000_RCTL_SZ_4096; |
2334 | rctl |= E1000_RCTL_BSEX; | 2351 | rctl |= E1000_RCTL_BSEX; |
2335 | switch (adapter->rx_buffer_len) { | 2352 | switch (adapter->rx_buffer_len) { |
2336 | case 256: | ||
2337 | rctl |= E1000_RCTL_SZ_256; | ||
2338 | rctl &= ~E1000_RCTL_BSEX; | ||
2339 | break; | ||
2340 | case 512: | ||
2341 | rctl |= E1000_RCTL_SZ_512; | ||
2342 | rctl &= ~E1000_RCTL_BSEX; | ||
2343 | break; | ||
2344 | case 1024: | ||
2345 | rctl |= E1000_RCTL_SZ_1024; | ||
2346 | rctl &= ~E1000_RCTL_BSEX; | ||
2347 | break; | ||
2348 | case 2048: | 2353 | case 2048: |
2349 | default: | 2354 | default: |
2350 | rctl |= E1000_RCTL_SZ_2048; | 2355 | rctl |= E1000_RCTL_SZ_2048; |
@@ -3781,7 +3786,7 @@ static int e1000_tso(struct e1000_adapter *adapter, | |||
3781 | 0, IPPROTO_TCP, 0); | 3786 | 0, IPPROTO_TCP, 0); |
3782 | cmd_length = E1000_TXD_CMD_IP; | 3787 | cmd_length = E1000_TXD_CMD_IP; |
3783 | ipcse = skb_transport_offset(skb) - 1; | 3788 | ipcse = skb_transport_offset(skb) - 1; |
3784 | } else if (skb_shinfo(skb)->gso_type == SKB_GSO_TCPV6) { | 3789 | } else if (skb_is_gso_v6(skb)) { |
3785 | ipv6_hdr(skb)->payload_len = 0; | 3790 | ipv6_hdr(skb)->payload_len = 0; |
3786 | tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, | 3791 | tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, |
3787 | &ipv6_hdr(skb)->daddr, | 3792 | &ipv6_hdr(skb)->daddr, |
@@ -3962,13 +3967,13 @@ static int e1000_tx_map(struct e1000_adapter *adapter, | |||
3962 | dma_error: | 3967 | dma_error: |
3963 | dev_err(&pdev->dev, "TX DMA map failed\n"); | 3968 | dev_err(&pdev->dev, "TX DMA map failed\n"); |
3964 | buffer_info->dma = 0; | 3969 | buffer_info->dma = 0; |
3965 | count--; | 3970 | if (count) |
3966 | |||
3967 | while (count >= 0) { | ||
3968 | count--; | 3971 | count--; |
3969 | i--; | 3972 | |
3970 | if (i < 0) | 3973 | while (count--) { |
3974 | if (i==0) | ||
3971 | i += tx_ring->count; | 3975 | i += tx_ring->count; |
3976 | i--; | ||
3972 | buffer_info = &tx_ring->buffer_info[i]; | 3977 | buffer_info = &tx_ring->buffer_info[i]; |
3973 | e1000_put_txbuf(adapter, buffer_info);; | 3978 | e1000_put_txbuf(adapter, buffer_info);; |
3974 | } | 3979 | } |
@@ -4317,13 +4322,7 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu) | |||
4317 | * fragmented skbs | 4322 | * fragmented skbs |
4318 | */ | 4323 | */ |
4319 | 4324 | ||
4320 | if (max_frame <= 256) | 4325 | if (max_frame <= 2048) |
4321 | adapter->rx_buffer_len = 256; | ||
4322 | else if (max_frame <= 512) | ||
4323 | adapter->rx_buffer_len = 512; | ||
4324 | else if (max_frame <= 1024) | ||
4325 | adapter->rx_buffer_len = 1024; | ||
4326 | else if (max_frame <= 2048) | ||
4327 | adapter->rx_buffer_len = 2048; | 4326 | adapter->rx_buffer_len = 2048; |
4328 | else | 4327 | else |
4329 | adapter->rx_buffer_len = 4096; | 4328 | adapter->rx_buffer_len = 4096; |
diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c index 933c64ff2465..997124d2992a 100644 --- a/drivers/net/igb/igb_main.c +++ b/drivers/net/igb/igb_main.c | |||
@@ -3422,7 +3422,7 @@ static inline int igb_tso_adv(struct igb_ring *tx_ring, | |||
3422 | iph->daddr, 0, | 3422 | iph->daddr, 0, |
3423 | IPPROTO_TCP, | 3423 | IPPROTO_TCP, |
3424 | 0); | 3424 | 0); |
3425 | } else if (skb_shinfo(skb)->gso_type == SKB_GSO_TCPV6) { | 3425 | } else if (skb_is_gso_v6(skb)) { |
3426 | ipv6_hdr(skb)->payload_len = 0; | 3426 | ipv6_hdr(skb)->payload_len = 0; |
3427 | tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, | 3427 | tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, |
3428 | &ipv6_hdr(skb)->daddr, | 3428 | &ipv6_hdr(skb)->daddr, |
@@ -3584,6 +3584,7 @@ static inline int igb_tx_map_adv(struct igb_ring *tx_ring, struct sk_buff *skb, | |||
3584 | for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) { | 3584 | for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) { |
3585 | struct skb_frag_struct *frag; | 3585 | struct skb_frag_struct *frag; |
3586 | 3586 | ||
3587 | count++; | ||
3587 | i++; | 3588 | i++; |
3588 | if (i == tx_ring->count) | 3589 | if (i == tx_ring->count) |
3589 | i = 0; | 3590 | i = 0; |
@@ -3605,7 +3606,6 @@ static inline int igb_tx_map_adv(struct igb_ring *tx_ring, struct sk_buff *skb, | |||
3605 | if (pci_dma_mapping_error(pdev, buffer_info->dma)) | 3606 | if (pci_dma_mapping_error(pdev, buffer_info->dma)) |
3606 | goto dma_error; | 3607 | goto dma_error; |
3607 | 3608 | ||
3608 | count++; | ||
3609 | } | 3609 | } |
3610 | 3610 | ||
3611 | tx_ring->buffer_info[i].skb = skb; | 3611 | tx_ring->buffer_info[i].skb = skb; |
diff --git a/drivers/net/igbvf/netdev.c b/drivers/net/igbvf/netdev.c index 0dbd0320023a..297a5ddd77f0 100644 --- a/drivers/net/igbvf/netdev.c +++ b/drivers/net/igbvf/netdev.c | |||
@@ -1963,7 +1963,7 @@ static int igbvf_tso(struct igbvf_adapter *adapter, | |||
1963 | iph->daddr, 0, | 1963 | iph->daddr, 0, |
1964 | IPPROTO_TCP, | 1964 | IPPROTO_TCP, |
1965 | 0); | 1965 | 0); |
1966 | } else if (skb_shinfo(skb)->gso_type == SKB_GSO_TCPV6) { | 1966 | } else if (skb_is_gso_v6(skb)) { |
1967 | ipv6_hdr(skb)->payload_len = 0; | 1967 | ipv6_hdr(skb)->payload_len = 0; |
1968 | tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, | 1968 | tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, |
1969 | &ipv6_hdr(skb)->daddr, | 1969 | &ipv6_hdr(skb)->daddr, |
@@ -2126,6 +2126,7 @@ static inline int igbvf_tx_map_adv(struct igbvf_adapter *adapter, | |||
2126 | for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) { | 2126 | for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) { |
2127 | struct skb_frag_struct *frag; | 2127 | struct skb_frag_struct *frag; |
2128 | 2128 | ||
2129 | count++; | ||
2129 | i++; | 2130 | i++; |
2130 | if (i == tx_ring->count) | 2131 | if (i == tx_ring->count) |
2131 | i = 0; | 2132 | i = 0; |
@@ -2146,7 +2147,6 @@ static inline int igbvf_tx_map_adv(struct igbvf_adapter *adapter, | |||
2146 | PCI_DMA_TODEVICE); | 2147 | PCI_DMA_TODEVICE); |
2147 | if (pci_dma_mapping_error(pdev, buffer_info->dma)) | 2148 | if (pci_dma_mapping_error(pdev, buffer_info->dma)) |
2148 | goto dma_error; | 2149 | goto dma_error; |
2149 | count++; | ||
2150 | } | 2150 | } |
2151 | 2151 | ||
2152 | tx_ring->buffer_info[i].skb = skb; | 2152 | tx_ring->buffer_info[i].skb = skb; |
@@ -2163,14 +2163,14 @@ dma_error: | |||
2163 | buffer_info->length = 0; | 2163 | buffer_info->length = 0; |
2164 | buffer_info->next_to_watch = 0; | 2164 | buffer_info->next_to_watch = 0; |
2165 | buffer_info->mapped_as_page = false; | 2165 | buffer_info->mapped_as_page = false; |
2166 | count--; | 2166 | if (count) |
2167 | count--; | ||
2167 | 2168 | ||
2168 | /* clear timestamp and dma mappings for remaining portion of packet */ | 2169 | /* clear timestamp and dma mappings for remaining portion of packet */ |
2169 | while (count >= 0) { | 2170 | while (count--) { |
2170 | count--; | 2171 | if (i==0) |
2171 | i--; | ||
2172 | if (i < 0) | ||
2173 | i += tx_ring->count; | 2172 | i += tx_ring->count; |
2173 | i--; | ||
2174 | buffer_info = &tx_ring->buffer_info[i]; | 2174 | buffer_info = &tx_ring->buffer_info[i]; |
2175 | igbvf_put_txbuf(adapter, buffer_info); | 2175 | igbvf_put_txbuf(adapter, buffer_info); |
2176 | } | 2176 | } |
diff --git a/drivers/net/ixgb/ixgb_main.c b/drivers/net/ixgb/ixgb_main.c index bcd0f01d5feb..593d1a4f217c 100644 --- a/drivers/net/ixgb/ixgb_main.c +++ b/drivers/net/ixgb/ixgb_main.c | |||
@@ -1363,13 +1363,13 @@ ixgb_tx_map(struct ixgb_adapter *adapter, struct sk_buff *skb, | |||
1363 | dma_error: | 1363 | dma_error: |
1364 | dev_err(&pdev->dev, "TX DMA map failed\n"); | 1364 | dev_err(&pdev->dev, "TX DMA map failed\n"); |
1365 | buffer_info->dma = 0; | 1365 | buffer_info->dma = 0; |
1366 | count--; | 1366 | if (count) |
1367 | |||
1368 | while (count >= 0) { | ||
1369 | count--; | 1367 | count--; |
1370 | i--; | 1368 | |
1371 | if (i < 0) | 1369 | while (count--) { |
1370 | if (i==0) | ||
1372 | i += tx_ring->count; | 1371 | i += tx_ring->count; |
1372 | i--; | ||
1373 | buffer_info = &tx_ring->buffer_info[i]; | 1373 | buffer_info = &tx_ring->buffer_info[i]; |
1374 | ixgb_unmap_and_free_tx_resource(adapter, buffer_info); | 1374 | ixgb_unmap_and_free_tx_resource(adapter, buffer_info); |
1375 | } | 1375 | } |
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c index 9c9202f40b10..b5f64ad67975 100644 --- a/drivers/net/ixgbe/ixgbe_main.c +++ b/drivers/net/ixgbe/ixgbe_main.c | |||
@@ -4928,7 +4928,7 @@ static int ixgbe_tso(struct ixgbe_adapter *adapter, | |||
4928 | iph->daddr, 0, | 4928 | iph->daddr, 0, |
4929 | IPPROTO_TCP, | 4929 | IPPROTO_TCP, |
4930 | 0); | 4930 | 0); |
4931 | } else if (skb_shinfo(skb)->gso_type == SKB_GSO_TCPV6) { | 4931 | } else if (skb_is_gso_v6(skb)) { |
4932 | ipv6_hdr(skb)->payload_len = 0; | 4932 | ipv6_hdr(skb)->payload_len = 0; |
4933 | tcp_hdr(skb)->check = | 4933 | tcp_hdr(skb)->check = |
4934 | ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, | 4934 | ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, |
@@ -5167,14 +5167,14 @@ dma_error: | |||
5167 | tx_buffer_info->dma = 0; | 5167 | tx_buffer_info->dma = 0; |
5168 | tx_buffer_info->time_stamp = 0; | 5168 | tx_buffer_info->time_stamp = 0; |
5169 | tx_buffer_info->next_to_watch = 0; | 5169 | tx_buffer_info->next_to_watch = 0; |
5170 | count--; | 5170 | if (count) |
5171 | count--; | ||
5171 | 5172 | ||
5172 | /* clear timestamp and dma mappings for remaining portion of packet */ | 5173 | /* clear timestamp and dma mappings for remaining portion of packet */ |
5173 | while (count >= 0) { | 5174 | while (count--) { |
5174 | count--; | 5175 | if (i==0) |
5175 | i--; | ||
5176 | if (i < 0) | ||
5177 | i += tx_ring->count; | 5176 | i += tx_ring->count; |
5177 | i--; | ||
5178 | tx_buffer_info = &tx_ring->tx_buffer_info[i]; | 5178 | tx_buffer_info = &tx_ring->tx_buffer_info[i]; |
5179 | ixgbe_unmap_and_free_tx_resource(adapter, tx_buffer_info); | 5179 | ixgbe_unmap_and_free_tx_resource(adapter, tx_buffer_info); |
5180 | } | 5180 | } |
diff --git a/drivers/net/pcmcia/fmvj18x_cs.c b/drivers/net/pcmcia/fmvj18x_cs.c index 813aca3fc433..7b17404d0858 100644 --- a/drivers/net/pcmcia/fmvj18x_cs.c +++ b/drivers/net/pcmcia/fmvj18x_cs.c | |||
@@ -717,6 +717,7 @@ static struct pcmcia_device_id fmvj18x_ids[] = { | |||
717 | PCMCIA_PFC_DEVICE_PROD_ID12(0, "NEC", "PK-UG-J001" ,0x18df0ba0 ,0x831b1064), | 717 | PCMCIA_PFC_DEVICE_PROD_ID12(0, "NEC", "PK-UG-J001" ,0x18df0ba0 ,0x831b1064), |
718 | PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0105, 0x0d0a), | 718 | PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0105, 0x0d0a), |
719 | PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0105, 0x0e0a), | 719 | PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0105, 0x0e0a), |
720 | PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0032, 0x0e01), | ||
720 | PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0032, 0x0a05), | 721 | PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0032, 0x0a05), |
721 | PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0032, 0x1101), | 722 | PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0032, 0x1101), |
722 | PCMCIA_DEVICE_NULL, | 723 | PCMCIA_DEVICE_NULL, |
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c index b0e9f9c51721..0295097d6c44 100644 --- a/drivers/net/phy/phy.c +++ b/drivers/net/phy/phy.c | |||
@@ -410,7 +410,6 @@ EXPORT_SYMBOL(phy_start_aneg); | |||
410 | 410 | ||
411 | 411 | ||
412 | static void phy_change(struct work_struct *work); | 412 | static void phy_change(struct work_struct *work); |
413 | static void phy_state_machine(struct work_struct *work); | ||
414 | 413 | ||
415 | /** | 414 | /** |
416 | * phy_start_machine - start PHY state machine tracking | 415 | * phy_start_machine - start PHY state machine tracking |
@@ -430,7 +429,6 @@ void phy_start_machine(struct phy_device *phydev, | |||
430 | { | 429 | { |
431 | phydev->adjust_state = handler; | 430 | phydev->adjust_state = handler; |
432 | 431 | ||
433 | INIT_DELAYED_WORK(&phydev->state_queue, phy_state_machine); | ||
434 | schedule_delayed_work(&phydev->state_queue, HZ); | 432 | schedule_delayed_work(&phydev->state_queue, HZ); |
435 | } | 433 | } |
436 | 434 | ||
@@ -761,7 +759,7 @@ EXPORT_SYMBOL(phy_start); | |||
761 | * phy_state_machine - Handle the state machine | 759 | * phy_state_machine - Handle the state machine |
762 | * @work: work_struct that describes the work to be done | 760 | * @work: work_struct that describes the work to be done |
763 | */ | 761 | */ |
764 | static void phy_state_machine(struct work_struct *work) | 762 | void phy_state_machine(struct work_struct *work) |
765 | { | 763 | { |
766 | struct delayed_work *dwork = to_delayed_work(work); | 764 | struct delayed_work *dwork = to_delayed_work(work); |
767 | struct phy_device *phydev = | 765 | struct phy_device *phydev = |
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c index 8212b2b93422..adbc0fded130 100644 --- a/drivers/net/phy/phy_device.c +++ b/drivers/net/phy/phy_device.c | |||
@@ -177,6 +177,7 @@ struct phy_device* phy_device_create(struct mii_bus *bus, int addr, int phy_id) | |||
177 | dev->state = PHY_DOWN; | 177 | dev->state = PHY_DOWN; |
178 | 178 | ||
179 | mutex_init(&dev->lock); | 179 | mutex_init(&dev->lock); |
180 | INIT_DELAYED_WORK(&dev->state_queue, phy_state_machine); | ||
180 | 181 | ||
181 | return dev; | 182 | return dev; |
182 | } | 183 | } |
diff --git a/drivers/net/qlge/qlge_main.c b/drivers/net/qlge/qlge_main.c index 707b391afa02..894a7c84faef 100644 --- a/drivers/net/qlge/qlge_main.c +++ b/drivers/net/qlge/qlge_main.c | |||
@@ -4119,7 +4119,7 @@ static int __devinit ql_init_device(struct pci_dev *pdev, | |||
4119 | err = pcie_set_readrq(pdev, 4096); | 4119 | err = pcie_set_readrq(pdev, 4096); |
4120 | if (err) { | 4120 | if (err) { |
4121 | dev_err(&pdev->dev, "Set readrq failed.\n"); | 4121 | dev_err(&pdev->dev, "Set readrq failed.\n"); |
4122 | goto err_out; | 4122 | goto err_out1; |
4123 | } | 4123 | } |
4124 | 4124 | ||
4125 | err = pci_request_regions(pdev, DRV_NAME); | 4125 | err = pci_request_regions(pdev, DRV_NAME); |
@@ -4140,7 +4140,7 @@ static int __devinit ql_init_device(struct pci_dev *pdev, | |||
4140 | 4140 | ||
4141 | if (err) { | 4141 | if (err) { |
4142 | dev_err(&pdev->dev, "No usable DMA configuration.\n"); | 4142 | dev_err(&pdev->dev, "No usable DMA configuration.\n"); |
4143 | goto err_out; | 4143 | goto err_out2; |
4144 | } | 4144 | } |
4145 | 4145 | ||
4146 | /* Set PCIe reset type for EEH to fundamental. */ | 4146 | /* Set PCIe reset type for EEH to fundamental. */ |
@@ -4152,7 +4152,7 @@ static int __devinit ql_init_device(struct pci_dev *pdev, | |||
4152 | if (!qdev->reg_base) { | 4152 | if (!qdev->reg_base) { |
4153 | dev_err(&pdev->dev, "Register mapping failed.\n"); | 4153 | dev_err(&pdev->dev, "Register mapping failed.\n"); |
4154 | err = -ENOMEM; | 4154 | err = -ENOMEM; |
4155 | goto err_out; | 4155 | goto err_out2; |
4156 | } | 4156 | } |
4157 | 4157 | ||
4158 | qdev->doorbell_area_size = pci_resource_len(pdev, 3); | 4158 | qdev->doorbell_area_size = pci_resource_len(pdev, 3); |
@@ -4162,14 +4162,14 @@ static int __devinit ql_init_device(struct pci_dev *pdev, | |||
4162 | if (!qdev->doorbell_area) { | 4162 | if (!qdev->doorbell_area) { |
4163 | dev_err(&pdev->dev, "Doorbell register mapping failed.\n"); | 4163 | dev_err(&pdev->dev, "Doorbell register mapping failed.\n"); |
4164 | err = -ENOMEM; | 4164 | err = -ENOMEM; |
4165 | goto err_out; | 4165 | goto err_out2; |
4166 | } | 4166 | } |
4167 | 4167 | ||
4168 | err = ql_get_board_info(qdev); | 4168 | err = ql_get_board_info(qdev); |
4169 | if (err) { | 4169 | if (err) { |
4170 | dev_err(&pdev->dev, "Register access failed.\n"); | 4170 | dev_err(&pdev->dev, "Register access failed.\n"); |
4171 | err = -EIO; | 4171 | err = -EIO; |
4172 | goto err_out; | 4172 | goto err_out2; |
4173 | } | 4173 | } |
4174 | qdev->msg_enable = netif_msg_init(debug, default_msg); | 4174 | qdev->msg_enable = netif_msg_init(debug, default_msg); |
4175 | spin_lock_init(&qdev->hw_lock); | 4175 | spin_lock_init(&qdev->hw_lock); |
@@ -4179,7 +4179,7 @@ static int __devinit ql_init_device(struct pci_dev *pdev, | |||
4179 | err = qdev->nic_ops->get_flash(qdev); | 4179 | err = qdev->nic_ops->get_flash(qdev); |
4180 | if (err) { | 4180 | if (err) { |
4181 | dev_err(&pdev->dev, "Invalid FLASH.\n"); | 4181 | dev_err(&pdev->dev, "Invalid FLASH.\n"); |
4182 | goto err_out; | 4182 | goto err_out2; |
4183 | } | 4183 | } |
4184 | 4184 | ||
4185 | memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len); | 4185 | memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len); |
@@ -4212,8 +4212,9 @@ static int __devinit ql_init_device(struct pci_dev *pdev, | |||
4212 | DRV_NAME, DRV_VERSION); | 4212 | DRV_NAME, DRV_VERSION); |
4213 | } | 4213 | } |
4214 | return 0; | 4214 | return 0; |
4215 | err_out: | 4215 | err_out2: |
4216 | ql_release_all(pdev); | 4216 | ql_release_all(pdev); |
4217 | err_out1: | ||
4217 | pci_disable_device(pdev); | 4218 | pci_disable_device(pdev); |
4218 | return err; | 4219 | return err; |
4219 | } | 4220 | } |
diff --git a/drivers/net/s2io.c b/drivers/net/s2io.c index cc4218667cba..3c4836d0898f 100644 --- a/drivers/net/s2io.c +++ b/drivers/net/s2io.c | |||
@@ -3421,7 +3421,7 @@ static int wait_for_cmd_complete(void __iomem *addr, u64 busy_bit, | |||
3421 | break; | 3421 | break; |
3422 | } | 3422 | } |
3423 | } else { | 3423 | } else { |
3424 | if (!(val64 & busy_bit)) { | 3424 | if (val64 & busy_bit) { |
3425 | ret = SUCCESS; | 3425 | ret = SUCCESS; |
3426 | break; | 3426 | break; |
3427 | } | 3427 | } |
diff --git a/drivers/net/sfc/mcdi.c b/drivers/net/sfc/mcdi.c index 0d4eba7266ec..9f035b9f0350 100644 --- a/drivers/net/sfc/mcdi.c +++ b/drivers/net/sfc/mcdi.c | |||
@@ -804,7 +804,7 @@ int efx_mcdi_nvram_read(struct efx_nic *efx, unsigned int type, | |||
804 | loff_t offset, u8 *buffer, size_t length) | 804 | loff_t offset, u8 *buffer, size_t length) |
805 | { | 805 | { |
806 | u8 inbuf[MC_CMD_NVRAM_READ_IN_LEN]; | 806 | u8 inbuf[MC_CMD_NVRAM_READ_IN_LEN]; |
807 | u8 outbuf[MC_CMD_NVRAM_READ_OUT_LEN(length)]; | 807 | u8 outbuf[MC_CMD_NVRAM_READ_OUT_LEN(EFX_MCDI_NVRAM_LEN_MAX)]; |
808 | size_t outlen; | 808 | size_t outlen; |
809 | int rc; | 809 | int rc; |
810 | 810 | ||
@@ -828,7 +828,7 @@ fail: | |||
828 | int efx_mcdi_nvram_write(struct efx_nic *efx, unsigned int type, | 828 | int efx_mcdi_nvram_write(struct efx_nic *efx, unsigned int type, |
829 | loff_t offset, const u8 *buffer, size_t length) | 829 | loff_t offset, const u8 *buffer, size_t length) |
830 | { | 830 | { |
831 | u8 inbuf[MC_CMD_NVRAM_WRITE_IN_LEN(length)]; | 831 | u8 inbuf[MC_CMD_NVRAM_WRITE_IN_LEN(EFX_MCDI_NVRAM_LEN_MAX)]; |
832 | int rc; | 832 | int rc; |
833 | 833 | ||
834 | MCDI_SET_DWORD(inbuf, NVRAM_WRITE_IN_TYPE, type); | 834 | MCDI_SET_DWORD(inbuf, NVRAM_WRITE_IN_TYPE, type); |
@@ -838,7 +838,8 @@ int efx_mcdi_nvram_write(struct efx_nic *efx, unsigned int type, | |||
838 | 838 | ||
839 | BUILD_BUG_ON(MC_CMD_NVRAM_WRITE_OUT_LEN != 0); | 839 | BUILD_BUG_ON(MC_CMD_NVRAM_WRITE_OUT_LEN != 0); |
840 | 840 | ||
841 | rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_WRITE, inbuf, sizeof(inbuf), | 841 | rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_WRITE, inbuf, |
842 | ALIGN(MC_CMD_NVRAM_WRITE_IN_LEN(length), 4), | ||
842 | NULL, 0, NULL); | 843 | NULL, 0, NULL); |
843 | if (rc) | 844 | if (rc) |
844 | goto fail; | 845 | goto fail; |
diff --git a/drivers/net/sfc/mcdi.h b/drivers/net/sfc/mcdi.h index de916728c2e3..10ce98f4c0fb 100644 --- a/drivers/net/sfc/mcdi.h +++ b/drivers/net/sfc/mcdi.h | |||
@@ -111,6 +111,7 @@ extern int efx_mcdi_nvram_read(struct efx_nic *efx, unsigned int type, | |||
111 | extern int efx_mcdi_nvram_write(struct efx_nic *efx, unsigned int type, | 111 | extern int efx_mcdi_nvram_write(struct efx_nic *efx, unsigned int type, |
112 | loff_t offset, const u8 *buffer, | 112 | loff_t offset, const u8 *buffer, |
113 | size_t length); | 113 | size_t length); |
114 | #define EFX_MCDI_NVRAM_LEN_MAX 128 | ||
114 | extern int efx_mcdi_nvram_erase(struct efx_nic *efx, unsigned int type, | 115 | extern int efx_mcdi_nvram_erase(struct efx_nic *efx, unsigned int type, |
115 | loff_t offset, size_t length); | 116 | loff_t offset, size_t length); |
116 | extern int efx_mcdi_nvram_update_finish(struct efx_nic *efx, | 117 | extern int efx_mcdi_nvram_update_finish(struct efx_nic *efx, |
diff --git a/drivers/net/sfc/mcdi_pcol.h b/drivers/net/sfc/mcdi_pcol.h index 2a85360a46f0..73e71f420624 100644 --- a/drivers/net/sfc/mcdi_pcol.h +++ b/drivers/net/sfc/mcdi_pcol.h | |||
@@ -1090,8 +1090,10 @@ | |||
1090 | #define MC_CMD_MAC_RX_LANES01_DISP_ERR 57 | 1090 | #define MC_CMD_MAC_RX_LANES01_DISP_ERR 57 |
1091 | #define MC_CMD_MAC_RX_LANES23_DISP_ERR 58 | 1091 | #define MC_CMD_MAC_RX_LANES23_DISP_ERR 58 |
1092 | #define MC_CMD_MAC_RX_MATCH_FAULT 59 | 1092 | #define MC_CMD_MAC_RX_MATCH_FAULT 59 |
1093 | #define MC_CMD_GMAC_DMABUF_START 64 | ||
1094 | #define MC_CMD_GMAC_DMABUF_END 95 | ||
1093 | /* Insert new members here. */ | 1095 | /* Insert new members here. */ |
1094 | #define MC_CMD_MAC_GENERATION_END 60 | 1096 | #define MC_CMD_MAC_GENERATION_END 96 |
1095 | #define MC_CMD_MAC_NSTATS (MC_CMD_MAC_GENERATION_END+1) | 1097 | #define MC_CMD_MAC_NSTATS (MC_CMD_MAC_GENERATION_END+1) |
1096 | 1098 | ||
1097 | /* MC_CMD_MAC_STATS: | 1099 | /* MC_CMD_MAC_STATS: |
diff --git a/drivers/net/sfc/mtd.c b/drivers/net/sfc/mtd.c index 3a464529a46b..407bbaddfea6 100644 --- a/drivers/net/sfc/mtd.c +++ b/drivers/net/sfc/mtd.c | |||
@@ -23,7 +23,6 @@ | |||
23 | #include "mcdi_pcol.h" | 23 | #include "mcdi_pcol.h" |
24 | 24 | ||
25 | #define EFX_SPI_VERIFY_BUF_LEN 16 | 25 | #define EFX_SPI_VERIFY_BUF_LEN 16 |
26 | #define EFX_MCDI_CHUNK_LEN 128 | ||
27 | 26 | ||
28 | struct efx_mtd_partition { | 27 | struct efx_mtd_partition { |
29 | struct mtd_info mtd; | 28 | struct mtd_info mtd; |
@@ -428,7 +427,7 @@ static int siena_mtd_read(struct mtd_info *mtd, loff_t start, | |||
428 | int rc = 0; | 427 | int rc = 0; |
429 | 428 | ||
430 | while (offset < end) { | 429 | while (offset < end) { |
431 | chunk = min_t(size_t, end - offset, EFX_MCDI_CHUNK_LEN); | 430 | chunk = min_t(size_t, end - offset, EFX_MCDI_NVRAM_LEN_MAX); |
432 | rc = efx_mcdi_nvram_read(efx, part->mcdi.nvram_type, offset, | 431 | rc = efx_mcdi_nvram_read(efx, part->mcdi.nvram_type, offset, |
433 | buffer, chunk); | 432 | buffer, chunk); |
434 | if (rc) | 433 | if (rc) |
@@ -491,7 +490,7 @@ static int siena_mtd_write(struct mtd_info *mtd, loff_t start, | |||
491 | } | 490 | } |
492 | 491 | ||
493 | while (offset < end) { | 492 | while (offset < end) { |
494 | chunk = min_t(size_t, end - offset, EFX_MCDI_CHUNK_LEN); | 493 | chunk = min_t(size_t, end - offset, EFX_MCDI_NVRAM_LEN_MAX); |
495 | rc = efx_mcdi_nvram_write(efx, part->mcdi.nvram_type, offset, | 494 | rc = efx_mcdi_nvram_write(efx, part->mcdi.nvram_type, offset, |
496 | buffer, chunk); | 495 | buffer, chunk); |
497 | if (rc) | 496 | if (rc) |
diff --git a/drivers/net/sfc/qt202x_phy.c b/drivers/net/sfc/qt202x_phy.c index ff8f0a417fa3..e0d13a451019 100644 --- a/drivers/net/sfc/qt202x_phy.c +++ b/drivers/net/sfc/qt202x_phy.c | |||
@@ -318,12 +318,6 @@ static int qt202x_reset_phy(struct efx_nic *efx) | |||
318 | /* Wait 250ms for the PHY to complete bootup */ | 318 | /* Wait 250ms for the PHY to complete bootup */ |
319 | msleep(250); | 319 | msleep(250); |
320 | 320 | ||
321 | /* Check that all the MMDs we expect are present and responding. We | ||
322 | * expect faults on some if the link is down, but not on the PHY XS */ | ||
323 | rc = efx_mdio_check_mmds(efx, QT202X_REQUIRED_DEVS, MDIO_DEVS_PHYXS); | ||
324 | if (rc < 0) | ||
325 | goto fail; | ||
326 | |||
327 | falcon_board(efx)->type->init_phy(efx); | 321 | falcon_board(efx)->type->init_phy(efx); |
328 | 322 | ||
329 | return rc; | 323 | return rc; |
diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c index 37f486b65f63..d760650c5c04 100644 --- a/drivers/net/sky2.c +++ b/drivers/net/sky2.c | |||
@@ -644,6 +644,7 @@ static void sky2_phy_power_up(struct sky2_hw *hw, unsigned port) | |||
644 | { | 644 | { |
645 | u32 reg1; | 645 | u32 reg1; |
646 | 646 | ||
647 | sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON); | ||
647 | reg1 = sky2_pci_read32(hw, PCI_DEV_REG1); | 648 | reg1 = sky2_pci_read32(hw, PCI_DEV_REG1); |
648 | reg1 &= ~phy_power[port]; | 649 | reg1 &= ~phy_power[port]; |
649 | 650 | ||
@@ -651,6 +652,7 @@ static void sky2_phy_power_up(struct sky2_hw *hw, unsigned port) | |||
651 | reg1 |= coma_mode[port]; | 652 | reg1 |= coma_mode[port]; |
652 | 653 | ||
653 | sky2_pci_write32(hw, PCI_DEV_REG1, reg1); | 654 | sky2_pci_write32(hw, PCI_DEV_REG1, reg1); |
655 | sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF); | ||
654 | sky2_pci_read32(hw, PCI_DEV_REG1); | 656 | sky2_pci_read32(hw, PCI_DEV_REG1); |
655 | 657 | ||
656 | if (hw->chip_id == CHIP_ID_YUKON_FE) | 658 | if (hw->chip_id == CHIP_ID_YUKON_FE) |
@@ -707,9 +709,11 @@ static void sky2_phy_power_down(struct sky2_hw *hw, unsigned port) | |||
707 | gm_phy_write(hw, port, PHY_MARV_CTRL, PHY_CT_PDOWN); | 709 | gm_phy_write(hw, port, PHY_MARV_CTRL, PHY_CT_PDOWN); |
708 | } | 710 | } |
709 | 711 | ||
712 | sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON); | ||
710 | reg1 = sky2_pci_read32(hw, PCI_DEV_REG1); | 713 | reg1 = sky2_pci_read32(hw, PCI_DEV_REG1); |
711 | reg1 |= phy_power[port]; /* set PHY to PowerDown/COMA Mode */ | 714 | reg1 |= phy_power[port]; /* set PHY to PowerDown/COMA Mode */ |
712 | sky2_pci_write32(hw, PCI_DEV_REG1, reg1); | 715 | sky2_pci_write32(hw, PCI_DEV_REG1, reg1); |
716 | sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF); | ||
713 | } | 717 | } |
714 | 718 | ||
715 | /* Force a renegotiation */ | 719 | /* Force a renegotiation */ |
@@ -2149,7 +2153,9 @@ static void sky2_qlink_intr(struct sky2_hw *hw) | |||
2149 | 2153 | ||
2150 | /* reset PHY Link Detect */ | 2154 | /* reset PHY Link Detect */ |
2151 | phy = sky2_pci_read16(hw, PSM_CONFIG_REG4); | 2155 | phy = sky2_pci_read16(hw, PSM_CONFIG_REG4); |
2156 | sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON); | ||
2152 | sky2_pci_write16(hw, PSM_CONFIG_REG4, phy | 1); | 2157 | sky2_pci_write16(hw, PSM_CONFIG_REG4, phy | 1); |
2158 | sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF); | ||
2153 | 2159 | ||
2154 | sky2_link_up(sky2); | 2160 | sky2_link_up(sky2); |
2155 | } | 2161 | } |
@@ -2640,6 +2646,7 @@ static void sky2_hw_intr(struct sky2_hw *hw) | |||
2640 | if (status & (Y2_IS_MST_ERR | Y2_IS_IRQ_STAT)) { | 2646 | if (status & (Y2_IS_MST_ERR | Y2_IS_IRQ_STAT)) { |
2641 | u16 pci_err; | 2647 | u16 pci_err; |
2642 | 2648 | ||
2649 | sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON); | ||
2643 | pci_err = sky2_pci_read16(hw, PCI_STATUS); | 2650 | pci_err = sky2_pci_read16(hw, PCI_STATUS); |
2644 | if (net_ratelimit()) | 2651 | if (net_ratelimit()) |
2645 | dev_err(&pdev->dev, "PCI hardware error (0x%x)\n", | 2652 | dev_err(&pdev->dev, "PCI hardware error (0x%x)\n", |
@@ -2647,12 +2654,14 @@ static void sky2_hw_intr(struct sky2_hw *hw) | |||
2647 | 2654 | ||
2648 | sky2_pci_write16(hw, PCI_STATUS, | 2655 | sky2_pci_write16(hw, PCI_STATUS, |
2649 | pci_err | PCI_STATUS_ERROR_BITS); | 2656 | pci_err | PCI_STATUS_ERROR_BITS); |
2657 | sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF); | ||
2650 | } | 2658 | } |
2651 | 2659 | ||
2652 | if (status & Y2_IS_PCI_EXP) { | 2660 | if (status & Y2_IS_PCI_EXP) { |
2653 | /* PCI-Express uncorrectable Error occurred */ | 2661 | /* PCI-Express uncorrectable Error occurred */ |
2654 | u32 err; | 2662 | u32 err; |
2655 | 2663 | ||
2664 | sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON); | ||
2656 | err = sky2_read32(hw, Y2_CFG_AER + PCI_ERR_UNCOR_STATUS); | 2665 | err = sky2_read32(hw, Y2_CFG_AER + PCI_ERR_UNCOR_STATUS); |
2657 | sky2_write32(hw, Y2_CFG_AER + PCI_ERR_UNCOR_STATUS, | 2666 | sky2_write32(hw, Y2_CFG_AER + PCI_ERR_UNCOR_STATUS, |
2658 | 0xfffffffful); | 2667 | 0xfffffffful); |
@@ -2660,6 +2669,7 @@ static void sky2_hw_intr(struct sky2_hw *hw) | |||
2660 | dev_err(&pdev->dev, "PCI Express error (0x%x)\n", err); | 2669 | dev_err(&pdev->dev, "PCI Express error (0x%x)\n", err); |
2661 | 2670 | ||
2662 | sky2_read32(hw, Y2_CFG_AER + PCI_ERR_UNCOR_STATUS); | 2671 | sky2_read32(hw, Y2_CFG_AER + PCI_ERR_UNCOR_STATUS); |
2672 | sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF); | ||
2663 | } | 2673 | } |
2664 | 2674 | ||
2665 | if (status & Y2_HWE_L1_MASK) | 2675 | if (status & Y2_HWE_L1_MASK) |
@@ -3038,6 +3048,7 @@ static void sky2_reset(struct sky2_hw *hw) | |||
3038 | } | 3048 | } |
3039 | 3049 | ||
3040 | sky2_power_on(hw); | 3050 | sky2_power_on(hw); |
3051 | sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF); | ||
3041 | 3052 | ||
3042 | for (i = 0; i < hw->ports; i++) { | 3053 | for (i = 0; i < hw->ports; i++) { |
3043 | sky2_write8(hw, SK_REG(i, GMAC_LINK_CTRL), GMLC_RST_SET); | 3054 | sky2_write8(hw, SK_REG(i, GMAC_LINK_CTRL), GMLC_RST_SET); |
@@ -3074,6 +3085,7 @@ static void sky2_reset(struct sky2_hw *hw) | |||
3074 | reg <<= PSM_CONFIG_REG4_TIMER_PHY_LINK_DETECT_BASE; | 3085 | reg <<= PSM_CONFIG_REG4_TIMER_PHY_LINK_DETECT_BASE; |
3075 | 3086 | ||
3076 | /* reset PHY Link Detect */ | 3087 | /* reset PHY Link Detect */ |
3088 | sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON); | ||
3077 | sky2_pci_write16(hw, PSM_CONFIG_REG4, | 3089 | sky2_pci_write16(hw, PSM_CONFIG_REG4, |
3078 | reg | PSM_CONFIG_REG4_RST_PHY_LINK_DETECT); | 3090 | reg | PSM_CONFIG_REG4_RST_PHY_LINK_DETECT); |
3079 | sky2_pci_write16(hw, PSM_CONFIG_REG4, reg); | 3091 | sky2_pci_write16(hw, PSM_CONFIG_REG4, reg); |
@@ -3091,6 +3103,7 @@ static void sky2_reset(struct sky2_hw *hw) | |||
3091 | /* restore the PCIe Link Control register */ | 3103 | /* restore the PCIe Link Control register */ |
3092 | sky2_pci_write16(hw, cap + PCI_EXP_LNKCTL, reg); | 3104 | sky2_pci_write16(hw, cap + PCI_EXP_LNKCTL, reg); |
3093 | } | 3105 | } |
3106 | sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF); | ||
3094 | 3107 | ||
3095 | /* re-enable PEX PM in PEX PHY debug reg. 8 (clear bit 12) */ | 3108 | /* re-enable PEX PM in PEX PHY debug reg. 8 (clear bit 12) */ |
3096 | sky2_write32(hw, Y2_PEX_PHY_DATA, PEX_DB_ACCESS | (0x08UL << 16)); | 3109 | sky2_write32(hw, Y2_PEX_PHY_DATA, PEX_DB_ACCESS | (0x08UL << 16)); |
@@ -3228,6 +3241,27 @@ static inline u8 sky2_wol_supported(const struct sky2_hw *hw) | |||
3228 | return sky2_is_copper(hw) ? (WAKE_PHY | WAKE_MAGIC) : 0; | 3241 | return sky2_is_copper(hw) ? (WAKE_PHY | WAKE_MAGIC) : 0; |
3229 | } | 3242 | } |
3230 | 3243 | ||
3244 | static void sky2_hw_set_wol(struct sky2_hw *hw) | ||
3245 | { | ||
3246 | int wol = 0; | ||
3247 | int i; | ||
3248 | |||
3249 | for (i = 0; i < hw->ports; i++) { | ||
3250 | struct net_device *dev = hw->dev[i]; | ||
3251 | struct sky2_port *sky2 = netdev_priv(dev); | ||
3252 | |||
3253 | if (sky2->wol) | ||
3254 | wol = 1; | ||
3255 | } | ||
3256 | |||
3257 | if (hw->chip_id == CHIP_ID_YUKON_EC_U || | ||
3258 | hw->chip_id == CHIP_ID_YUKON_EX || | ||
3259 | hw->chip_id == CHIP_ID_YUKON_FE_P) | ||
3260 | sky2_write32(hw, B0_CTST, wol ? Y2_HW_WOL_ON : Y2_HW_WOL_OFF); | ||
3261 | |||
3262 | device_set_wakeup_enable(&hw->pdev->dev, wol); | ||
3263 | } | ||
3264 | |||
3231 | static void sky2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) | 3265 | static void sky2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) |
3232 | { | 3266 | { |
3233 | const struct sky2_port *sky2 = netdev_priv(dev); | 3267 | const struct sky2_port *sky2 = netdev_priv(dev); |
@@ -3247,13 +3281,7 @@ static int sky2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) | |||
3247 | 3281 | ||
3248 | sky2->wol = wol->wolopts; | 3282 | sky2->wol = wol->wolopts; |
3249 | 3283 | ||
3250 | if (hw->chip_id == CHIP_ID_YUKON_EC_U || | 3284 | sky2_hw_set_wol(hw); |
3251 | hw->chip_id == CHIP_ID_YUKON_EX || | ||
3252 | hw->chip_id == CHIP_ID_YUKON_FE_P) | ||
3253 | sky2_write32(hw, B0_CTST, sky2->wol | ||
3254 | ? Y2_HW_WOL_ON : Y2_HW_WOL_OFF); | ||
3255 | |||
3256 | device_set_wakeup_enable(&hw->pdev->dev, sky2->wol); | ||
3257 | 3285 | ||
3258 | if (!netif_running(dev)) | 3286 | if (!netif_running(dev)) |
3259 | sky2_wol_init(sky2); | 3287 | sky2_wol_init(sky2); |
diff --git a/drivers/net/tulip/tulip_core.c b/drivers/net/tulip/tulip_core.c index 595777dcadb1..20696b5d60a5 100644 --- a/drivers/net/tulip/tulip_core.c +++ b/drivers/net/tulip/tulip_core.c | |||
@@ -249,6 +249,7 @@ static struct pci_device_id tulip_pci_tbl[] = { | |||
249 | { 0x17B3, 0xAB08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, | 249 | { 0x17B3, 0xAB08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, |
250 | { 0x10b7, 0x9300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, /* 3Com 3CSOHO100B-TX */ | 250 | { 0x10b7, 0x9300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, /* 3Com 3CSOHO100B-TX */ |
251 | { 0x14ea, 0xab08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, /* Planex FNW-3602-TX */ | 251 | { 0x14ea, 0xab08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, /* Planex FNW-3602-TX */ |
252 | { 0x1414, 0x0001, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, /* Microsoft MN-120 */ | ||
252 | { 0x1414, 0x0002, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, | 253 | { 0x1414, 0x0002, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, |
253 | { } /* terminate list */ | 254 | { } /* terminate list */ |
254 | }; | 255 | }; |
diff --git a/drivers/net/ucc_geth.c b/drivers/net/ucc_geth.c index 96bdc0b43889..eb8fe7e16c6c 100644 --- a/drivers/net/ucc_geth.c +++ b/drivers/net/ucc_geth.c | |||
@@ -3279,13 +3279,12 @@ static int ucc_geth_tx(struct net_device *dev, u8 txQ) | |||
3279 | /* Handle the transmitted buffer and release */ | 3279 | /* Handle the transmitted buffer and release */ |
3280 | /* the BD to be used with the current frame */ | 3280 | /* the BD to be used with the current frame */ |
3281 | 3281 | ||
3282 | if (bd == ugeth->txBd[txQ]) /* queue empty? */ | 3282 | skb = ugeth->tx_skbuff[txQ][ugeth->skb_dirtytx[txQ]]; |
3283 | if (!skb) | ||
3283 | break; | 3284 | break; |
3284 | 3285 | ||
3285 | dev->stats.tx_packets++; | 3286 | dev->stats.tx_packets++; |
3286 | 3287 | ||
3287 | skb = ugeth->tx_skbuff[txQ][ugeth->skb_dirtytx[txQ]]; | ||
3288 | |||
3289 | if (skb_queue_len(&ugeth->rx_recycle) < RX_BD_RING_LEN && | 3288 | if (skb_queue_len(&ugeth->rx_recycle) < RX_BD_RING_LEN && |
3290 | skb_recycle_check(skb, | 3289 | skb_recycle_check(skb, |
3291 | ugeth->ug_info->uf_info.max_rx_buf_length + | 3290 | ugeth->ug_info->uf_info.max_rx_buf_length + |
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index c708ecc3cb2e..9ead30bd00c4 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c | |||
@@ -395,8 +395,7 @@ static void refill_work(struct work_struct *work) | |||
395 | 395 | ||
396 | vi = container_of(work, struct virtnet_info, refill.work); | 396 | vi = container_of(work, struct virtnet_info, refill.work); |
397 | napi_disable(&vi->napi); | 397 | napi_disable(&vi->napi); |
398 | try_fill_recv(vi, GFP_KERNEL); | 398 | still_empty = !try_fill_recv(vi, GFP_KERNEL); |
399 | still_empty = (vi->num == 0); | ||
400 | napi_enable(&vi->napi); | 399 | napi_enable(&vi->napi); |
401 | 400 | ||
402 | /* In theory, this can happen: if we don't get any buffers in | 401 | /* In theory, this can happen: if we don't get any buffers in |
diff --git a/drivers/net/wimax/i2400m/i2400m-usb.h b/drivers/net/wimax/i2400m/i2400m-usb.h index 5cc0f279417e..2d7c96d7e865 100644 --- a/drivers/net/wimax/i2400m/i2400m-usb.h +++ b/drivers/net/wimax/i2400m/i2400m-usb.h | |||
@@ -151,6 +151,7 @@ enum { | |||
151 | 151 | ||
152 | /* Device IDs */ | 152 | /* Device IDs */ |
153 | USB_DEVICE_ID_I6050 = 0x0186, | 153 | USB_DEVICE_ID_I6050 = 0x0186, |
154 | USB_DEVICE_ID_I6050_2 = 0x0188, | ||
154 | }; | 155 | }; |
155 | 156 | ||
156 | 157 | ||
@@ -234,6 +235,7 @@ struct i2400mu { | |||
234 | u8 rx_size_auto_shrink; | 235 | u8 rx_size_auto_shrink; |
235 | 236 | ||
236 | struct dentry *debugfs_dentry; | 237 | struct dentry *debugfs_dentry; |
238 | unsigned i6050:1; /* 1 if this is a 6050 based SKU */ | ||
237 | }; | 239 | }; |
238 | 240 | ||
239 | 241 | ||
diff --git a/drivers/net/wimax/i2400m/usb.c b/drivers/net/wimax/i2400m/usb.c index 3b48681f8a0d..98f4f8c5fb68 100644 --- a/drivers/net/wimax/i2400m/usb.c +++ b/drivers/net/wimax/i2400m/usb.c | |||
@@ -478,7 +478,16 @@ int i2400mu_probe(struct usb_interface *iface, | |||
478 | i2400m->bus_bm_wait_for_ack = i2400mu_bus_bm_wait_for_ack; | 478 | i2400m->bus_bm_wait_for_ack = i2400mu_bus_bm_wait_for_ack; |
479 | i2400m->bus_bm_mac_addr_impaired = 0; | 479 | i2400m->bus_bm_mac_addr_impaired = 0; |
480 | 480 | ||
481 | if (id->idProduct == USB_DEVICE_ID_I6050) { | 481 | switch (id->idProduct) { |
482 | case USB_DEVICE_ID_I6050: | ||
483 | case USB_DEVICE_ID_I6050_2: | ||
484 | i2400mu->i6050 = 1; | ||
485 | break; | ||
486 | default: | ||
487 | break; | ||
488 | } | ||
489 | |||
490 | if (i2400mu->i6050) { | ||
482 | i2400m->bus_fw_names = i2400mu_bus_fw_names_6050; | 491 | i2400m->bus_fw_names = i2400mu_bus_fw_names_6050; |
483 | i2400mu->endpoint_cfg.bulk_out = 0; | 492 | i2400mu->endpoint_cfg.bulk_out = 0; |
484 | i2400mu->endpoint_cfg.notification = 3; | 493 | i2400mu->endpoint_cfg.notification = 3; |
@@ -719,6 +728,7 @@ int i2400mu_post_reset(struct usb_interface *iface) | |||
719 | static | 728 | static |
720 | struct usb_device_id i2400mu_id_table[] = { | 729 | struct usb_device_id i2400mu_id_table[] = { |
721 | { USB_DEVICE(0x8086, USB_DEVICE_ID_I6050) }, | 730 | { USB_DEVICE(0x8086, USB_DEVICE_ID_I6050) }, |
731 | { USB_DEVICE(0x8086, USB_DEVICE_ID_I6050_2) }, | ||
722 | { USB_DEVICE(0x8086, 0x0181) }, | 732 | { USB_DEVICE(0x8086, 0x0181) }, |
723 | { USB_DEVICE(0x8086, 0x1403) }, | 733 | { USB_DEVICE(0x8086, 0x1403) }, |
724 | { USB_DEVICE(0x8086, 0x1405) }, | 734 | { USB_DEVICE(0x8086, 0x1405) }, |
diff --git a/drivers/net/wireless/iwlwifi/iwl-5000.c b/drivers/net/wireless/iwlwifi/iwl-5000.c index 33a5866538e7..de45f308b744 100644 --- a/drivers/net/wireless/iwlwifi/iwl-5000.c +++ b/drivers/net/wireless/iwlwifi/iwl-5000.c | |||
@@ -1598,6 +1598,7 @@ struct iwl_cfg iwl5300_agn_cfg = { | |||
1598 | .use_bsm = false, | 1598 | .use_bsm = false, |
1599 | .ht_greenfield_support = true, | 1599 | .ht_greenfield_support = true, |
1600 | .led_compensation = 51, | 1600 | .led_compensation = 51, |
1601 | .use_rts_for_ht = true, /* use rts/cts protection */ | ||
1601 | .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS, | 1602 | .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS, |
1602 | .sm_ps_mode = WLAN_HT_CAP_SM_PS_DISABLED, | 1603 | .sm_ps_mode = WLAN_HT_CAP_SM_PS_DISABLED, |
1603 | }; | 1604 | }; |
@@ -1622,6 +1623,7 @@ struct iwl_cfg iwl5100_bgn_cfg = { | |||
1622 | .use_bsm = false, | 1623 | .use_bsm = false, |
1623 | .ht_greenfield_support = true, | 1624 | .ht_greenfield_support = true, |
1624 | .led_compensation = 51, | 1625 | .led_compensation = 51, |
1626 | .use_rts_for_ht = true, /* use rts/cts protection */ | ||
1625 | .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS, | 1627 | .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS, |
1626 | }; | 1628 | }; |
1627 | 1629 | ||
@@ -1667,6 +1669,7 @@ struct iwl_cfg iwl5100_agn_cfg = { | |||
1667 | .use_bsm = false, | 1669 | .use_bsm = false, |
1668 | .ht_greenfield_support = true, | 1670 | .ht_greenfield_support = true, |
1669 | .led_compensation = 51, | 1671 | .led_compensation = 51, |
1672 | .use_rts_for_ht = true, /* use rts/cts protection */ | ||
1670 | .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS, | 1673 | .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS, |
1671 | .sm_ps_mode = WLAN_HT_CAP_SM_PS_DISABLED, | 1674 | .sm_ps_mode = WLAN_HT_CAP_SM_PS_DISABLED, |
1672 | }; | 1675 | }; |
@@ -1691,6 +1694,7 @@ struct iwl_cfg iwl5350_agn_cfg = { | |||
1691 | .use_bsm = false, | 1694 | .use_bsm = false, |
1692 | .ht_greenfield_support = true, | 1695 | .ht_greenfield_support = true, |
1693 | .led_compensation = 51, | 1696 | .led_compensation = 51, |
1697 | .use_rts_for_ht = true, /* use rts/cts protection */ | ||
1694 | .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS, | 1698 | .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS, |
1695 | .sm_ps_mode = WLAN_HT_CAP_SM_PS_DISABLED, | 1699 | .sm_ps_mode = WLAN_HT_CAP_SM_PS_DISABLED, |
1696 | }; | 1700 | }; |
@@ -1715,6 +1719,7 @@ struct iwl_cfg iwl5150_agn_cfg = { | |||
1715 | .use_bsm = false, | 1719 | .use_bsm = false, |
1716 | .ht_greenfield_support = true, | 1720 | .ht_greenfield_support = true, |
1717 | .led_compensation = 51, | 1721 | .led_compensation = 51, |
1722 | .use_rts_for_ht = true, /* use rts/cts protection */ | ||
1718 | .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS, | 1723 | .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS, |
1719 | .sm_ps_mode = WLAN_HT_CAP_SM_PS_DISABLED, | 1724 | .sm_ps_mode = WLAN_HT_CAP_SM_PS_DISABLED, |
1720 | }; | 1725 | }; |
diff --git a/drivers/net/wireless/iwlwifi/iwl-devtrace.c b/drivers/net/wireless/iwlwifi/iwl-devtrace.c index e7d88d1da15d..83cc4e500a96 100644 --- a/drivers/net/wireless/iwlwifi/iwl-devtrace.c +++ b/drivers/net/wireless/iwlwifi/iwl-devtrace.c | |||
@@ -1,3 +1,29 @@ | |||
1 | /****************************************************************************** | ||
2 | * | ||
3 | * Copyright(c) 2009 - 2010 Intel Corporation. All rights reserved. | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify it | ||
6 | * under the terms of version 2 of the GNU General Public License as | ||
7 | * published by the Free Software Foundation. | ||
8 | * | ||
9 | * This program is distributed in the hope that it will be useful, but WITHOUT | ||
10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
12 | * more details. | ||
13 | * | ||
14 | * You should have received a copy of the GNU General Public License along with | ||
15 | * this program; if not, write to the Free Software Foundation, Inc., | ||
16 | * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA | ||
17 | * | ||
18 | * The full GNU General Public License is included in this distribution in the | ||
19 | * file called LICENSE. | ||
20 | * | ||
21 | * Contact Information: | ||
22 | * Intel Linux Wireless <ilw@linux.intel.com> | ||
23 | * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 | ||
24 | * | ||
25 | *****************************************************************************/ | ||
26 | |||
1 | #include <linux/module.h> | 27 | #include <linux/module.h> |
2 | 28 | ||
3 | /* sparse doesn't like tracepoint macros */ | 29 | /* sparse doesn't like tracepoint macros */ |
diff --git a/drivers/net/wireless/iwlwifi/iwl-devtrace.h b/drivers/net/wireless/iwlwifi/iwl-devtrace.h index 21361968ab7e..d9c7363b1bbb 100644 --- a/drivers/net/wireless/iwlwifi/iwl-devtrace.h +++ b/drivers/net/wireless/iwlwifi/iwl-devtrace.h | |||
@@ -1,3 +1,29 @@ | |||
1 | /****************************************************************************** | ||
2 | * | ||
3 | * Copyright(c) 2009 - 2010 Intel Corporation. All rights reserved. | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify it | ||
6 | * under the terms of version 2 of the GNU General Public License as | ||
7 | * published by the Free Software Foundation. | ||
8 | * | ||
9 | * This program is distributed in the hope that it will be useful, but WITHOUT | ||
10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
12 | * more details. | ||
13 | * | ||
14 | * You should have received a copy of the GNU General Public License along with | ||
15 | * this program; if not, write to the Free Software Foundation, Inc., | ||
16 | * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA | ||
17 | * | ||
18 | * The full GNU General Public License is included in this distribution in the | ||
19 | * file called LICENSE. | ||
20 | * | ||
21 | * Contact Information: | ||
22 | * Intel Linux Wireless <ilw@linux.intel.com> | ||
23 | * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 | ||
24 | * | ||
25 | *****************************************************************************/ | ||
26 | |||
1 | #if !defined(__IWLWIFI_DEVICE_TRACE) || defined(TRACE_HEADER_MULTI_READ) | 27 | #if !defined(__IWLWIFI_DEVICE_TRACE) || defined(TRACE_HEADER_MULTI_READ) |
2 | #define __IWLWIFI_DEVICE_TRACE | 28 | #define __IWLWIFI_DEVICE_TRACE |
3 | 29 | ||
diff --git a/drivers/net/wireless/iwmc3200wifi/commands.c b/drivers/net/wireless/iwmc3200wifi/commands.c index 777584d76a88..1e41ad0fcad5 100644 --- a/drivers/net/wireless/iwmc3200wifi/commands.c +++ b/drivers/net/wireless/iwmc3200wifi/commands.c | |||
@@ -973,6 +973,10 @@ int iwm_send_pmkid_update(struct iwm_priv *iwm, | |||
973 | 973 | ||
974 | memset(&update, 0, sizeof(struct iwm_umac_pmkid_update)); | 974 | memset(&update, 0, sizeof(struct iwm_umac_pmkid_update)); |
975 | 975 | ||
976 | update.hdr.oid = UMAC_WIFI_IF_CMD_PMKID_UPDATE; | ||
977 | update.hdr.buf_size = cpu_to_le16(sizeof(struct iwm_umac_pmkid_update) - | ||
978 | sizeof(struct iwm_umac_wifi_if)); | ||
979 | |||
976 | update.command = cpu_to_le32(command); | 980 | update.command = cpu_to_le32(command); |
977 | if (pmksa->bssid) | 981 | if (pmksa->bssid) |
978 | memcpy(&update.bssid, pmksa->bssid, ETH_ALEN); | 982 | memcpy(&update.bssid, pmksa->bssid, ETH_ALEN); |
diff --git a/drivers/net/wireless/iwmc3200wifi/commands.h b/drivers/net/wireless/iwmc3200wifi/commands.h index 06af0552cd75..3dfd9f0e9003 100644 --- a/drivers/net/wireless/iwmc3200wifi/commands.h +++ b/drivers/net/wireless/iwmc3200wifi/commands.h | |||
@@ -463,6 +463,7 @@ struct iwm_umac_cmd_stop_resume_tx { | |||
463 | #define IWM_CMD_PMKID_FLUSH 3 | 463 | #define IWM_CMD_PMKID_FLUSH 3 |
464 | 464 | ||
465 | struct iwm_umac_pmkid_update { | 465 | struct iwm_umac_pmkid_update { |
466 | struct iwm_umac_wifi_if hdr; | ||
466 | __le32 command; | 467 | __le32 command; |
467 | u8 bssid[ETH_ALEN]; | 468 | u8 bssid[ETH_ALEN]; |
468 | __le16 reserved; | 469 | __le16 reserved; |
diff --git a/drivers/net/wireless/p54/p54pci.c b/drivers/net/wireless/p54/p54pci.c index a15962a19b2a..a72f7c2577de 100644 --- a/drivers/net/wireless/p54/p54pci.c +++ b/drivers/net/wireless/p54/p54pci.c | |||
@@ -197,6 +197,14 @@ static void p54p_check_rx_ring(struct ieee80211_hw *dev, u32 *index, | |||
197 | i %= ring_limit; | 197 | i %= ring_limit; |
198 | continue; | 198 | continue; |
199 | } | 199 | } |
200 | |||
201 | if (unlikely(len > priv->common.rx_mtu)) { | ||
202 | if (net_ratelimit()) | ||
203 | dev_err(&priv->pdev->dev, "rx'd frame size " | ||
204 | "exceeds length threshold.\n"); | ||
205 | |||
206 | len = priv->common.rx_mtu; | ||
207 | } | ||
200 | skb_put(skb, len); | 208 | skb_put(skb, len); |
201 | 209 | ||
202 | if (p54_rx(dev, skb)) { | 210 | if (p54_rx(dev, skb)) { |
diff --git a/drivers/net/wireless/zd1211rw/zd_usb.c b/drivers/net/wireless/zd1211rw/zd_usb.c index ac19ecd19cfe..72d3e437e190 100644 --- a/drivers/net/wireless/zd1211rw/zd_usb.c +++ b/drivers/net/wireless/zd1211rw/zd_usb.c | |||
@@ -62,6 +62,7 @@ static struct usb_device_id usb_ids[] = { | |||
62 | { USB_DEVICE(0x6891, 0xa727), .driver_info = DEVICE_ZD1211 }, | 62 | { USB_DEVICE(0x6891, 0xa727), .driver_info = DEVICE_ZD1211 }, |
63 | /* ZD1211B */ | 63 | /* ZD1211B */ |
64 | { USB_DEVICE(0x0053, 0x5301), .driver_info = DEVICE_ZD1211B }, | 64 | { USB_DEVICE(0x0053, 0x5301), .driver_info = DEVICE_ZD1211B }, |
65 | { USB_DEVICE(0x0409, 0x0248), .driver_info = DEVICE_ZD1211B }, | ||
65 | { USB_DEVICE(0x0411, 0x00da), .driver_info = DEVICE_ZD1211B }, | 66 | { USB_DEVICE(0x0411, 0x00da), .driver_info = DEVICE_ZD1211B }, |
66 | { USB_DEVICE(0x0471, 0x1236), .driver_info = DEVICE_ZD1211B }, | 67 | { USB_DEVICE(0x0471, 0x1236), .driver_info = DEVICE_ZD1211B }, |
67 | { USB_DEVICE(0x0471, 0x1237), .driver_info = DEVICE_ZD1211B }, | 68 | { USB_DEVICE(0x0471, 0x1237), .driver_info = DEVICE_ZD1211B }, |
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c index fdb2e7c14506..5905936c7c60 100644 --- a/drivers/s390/block/dasd.c +++ b/drivers/s390/block/dasd.c | |||
@@ -1004,8 +1004,8 @@ static void dasd_handle_killed_request(struct ccw_device *cdev, | |||
1004 | if (device == NULL || | 1004 | if (device == NULL || |
1005 | device != dasd_device_from_cdev_locked(cdev) || | 1005 | device != dasd_device_from_cdev_locked(cdev) || |
1006 | strncmp(device->discipline->ebcname, (char *) &cqr->magic, 4)) { | 1006 | strncmp(device->discipline->ebcname, (char *) &cqr->magic, 4)) { |
1007 | DBF_DEV_EVENT(DBF_DEBUG, device, "invalid device in request: " | 1007 | DBF_EVENT_DEVID(DBF_DEBUG, cdev, "%s", |
1008 | "bus_id %s", dev_name(&cdev->dev)); | 1008 | "invalid device in request"); |
1009 | return; | 1009 | return; |
1010 | } | 1010 | } |
1011 | 1011 | ||
@@ -1078,8 +1078,8 @@ void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm, | |||
1078 | device = (struct dasd_device *) cqr->startdev; | 1078 | device = (struct dasd_device *) cqr->startdev; |
1079 | if (!device || | 1079 | if (!device || |
1080 | strncmp(device->discipline->ebcname, (char *) &cqr->magic, 4)) { | 1080 | strncmp(device->discipline->ebcname, (char *) &cqr->magic, 4)) { |
1081 | DBF_DEV_EVENT(DBF_DEBUG, device, "invalid device in request: " | 1081 | DBF_EVENT_DEVID(DBF_DEBUG, cdev, "%s", |
1082 | "bus_id %s", dev_name(&cdev->dev)); | 1082 | "invalid device in request"); |
1083 | return; | 1083 | return; |
1084 | } | 1084 | } |
1085 | 1085 | ||
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c index 1c500c462225..1cca21aafaba 100644 --- a/drivers/s390/block/dasd_eckd.c +++ b/drivers/s390/block/dasd_eckd.c | |||
@@ -3033,7 +3033,7 @@ static void dasd_eckd_dump_sense_ccw(struct dasd_device *device, | |||
3033 | len += sprintf(page + len, KERN_ERR PRINTK_HEADER | 3033 | len += sprintf(page + len, KERN_ERR PRINTK_HEADER |
3034 | " in req: %p CS: 0x%02X DS: 0x%02X CC: 0x%02X RC: %d\n", | 3034 | " in req: %p CS: 0x%02X DS: 0x%02X CC: 0x%02X RC: %d\n", |
3035 | req, scsw_cstat(&irb->scsw), scsw_dstat(&irb->scsw), | 3035 | req, scsw_cstat(&irb->scsw), scsw_dstat(&irb->scsw), |
3036 | scsw_cc(&irb->scsw), req->intrc); | 3036 | scsw_cc(&irb->scsw), req ? req->intrc : 0); |
3037 | len += sprintf(page + len, KERN_ERR PRINTK_HEADER | 3037 | len += sprintf(page + len, KERN_ERR PRINTK_HEADER |
3038 | " device %s: Failing CCW: %p\n", | 3038 | " device %s: Failing CCW: %p\n", |
3039 | dev_name(&device->cdev->dev), | 3039 | dev_name(&device->cdev->dev), |
diff --git a/drivers/s390/block/dasd_ioctl.c b/drivers/s390/block/dasd_ioctl.c index fc7b30b4a255..7039d9cf0fb4 100644 --- a/drivers/s390/block/dasd_ioctl.c +++ b/drivers/s390/block/dasd_ioctl.c | |||
@@ -260,7 +260,7 @@ static int dasd_ioctl_information(struct dasd_block *block, | |||
260 | struct ccw_dev_id dev_id; | 260 | struct ccw_dev_id dev_id; |
261 | 261 | ||
262 | base = block->base; | 262 | base = block->base; |
263 | if (!base->discipline->fill_info) | 263 | if (!base->discipline || !base->discipline->fill_info) |
264 | return -EINVAL; | 264 | return -EINVAL; |
265 | 265 | ||
266 | dasd_info = kzalloc(sizeof(struct dasd_information2_t), GFP_KERNEL); | 266 | dasd_info = kzalloc(sizeof(struct dasd_information2_t), GFP_KERNEL); |
@@ -303,10 +303,7 @@ static int dasd_ioctl_information(struct dasd_block *block, | |||
303 | dasd_info->features |= | 303 | dasd_info->features |= |
304 | ((base->features & DASD_FEATURE_READONLY) != 0); | 304 | ((base->features & DASD_FEATURE_READONLY) != 0); |
305 | 305 | ||
306 | if (base->discipline) | 306 | memcpy(dasd_info->type, base->discipline->name, 4); |
307 | memcpy(dasd_info->type, base->discipline->name, 4); | ||
308 | else | ||
309 | memcpy(dasd_info->type, "none", 4); | ||
310 | 307 | ||
311 | if (block->request_queue->request_fn) { | 308 | if (block->request_queue->request_fn) { |
312 | struct list_head *l; | 309 | struct list_head *l; |
diff --git a/drivers/s390/block/dasd_proc.c b/drivers/s390/block/dasd_proc.c index 6315fbd8e68b..71f95f54866f 100644 --- a/drivers/s390/block/dasd_proc.c +++ b/drivers/s390/block/dasd_proc.c | |||
@@ -72,7 +72,7 @@ dasd_devices_show(struct seq_file *m, void *v) | |||
72 | /* Print device number. */ | 72 | /* Print device number. */ |
73 | seq_printf(m, "%s", dev_name(&device->cdev->dev)); | 73 | seq_printf(m, "%s", dev_name(&device->cdev->dev)); |
74 | /* Print discipline string. */ | 74 | /* Print discipline string. */ |
75 | if (device != NULL && device->discipline != NULL) | 75 | if (device->discipline != NULL) |
76 | seq_printf(m, "(%s)", device->discipline->name); | 76 | seq_printf(m, "(%s)", device->discipline->name); |
77 | else | 77 | else |
78 | seq_printf(m, "(none)"); | 78 | seq_printf(m, "(none)"); |
@@ -92,10 +92,7 @@ dasd_devices_show(struct seq_file *m, void *v) | |||
92 | substr = (device->features & DASD_FEATURE_READONLY) ? "(ro)" : " "; | 92 | substr = (device->features & DASD_FEATURE_READONLY) ? "(ro)" : " "; |
93 | seq_printf(m, "%4s: ", substr); | 93 | seq_printf(m, "%4s: ", substr); |
94 | /* Print device status information. */ | 94 | /* Print device status information. */ |
95 | switch ((device != NULL) ? device->state : -1) { | 95 | switch (device->state) { |
96 | case -1: | ||
97 | seq_printf(m, "unknown"); | ||
98 | break; | ||
99 | case DASD_STATE_NEW: | 96 | case DASD_STATE_NEW: |
100 | seq_printf(m, "new"); | 97 | seq_printf(m, "new"); |
101 | break; | 98 | break; |
diff --git a/drivers/s390/char/sclp_vt220.c b/drivers/s390/char/sclp_vt220.c index b9d2a007e93b..3796ffdb8479 100644 --- a/drivers/s390/char/sclp_vt220.c +++ b/drivers/s390/char/sclp_vt220.c | |||
@@ -495,6 +495,10 @@ sclp_vt220_open(struct tty_struct *tty, struct file *filp) | |||
495 | if (tty->driver_data == NULL) | 495 | if (tty->driver_data == NULL) |
496 | return -ENOMEM; | 496 | return -ENOMEM; |
497 | tty->low_latency = 0; | 497 | tty->low_latency = 0; |
498 | if (!tty->winsize.ws_row && !tty->winsize.ws_col) { | ||
499 | tty->winsize.ws_row = 24; | ||
500 | tty->winsize.ws_col = 80; | ||
501 | } | ||
498 | } | 502 | } |
499 | return 0; | 503 | return 0; |
500 | } | 504 | } |
diff --git a/drivers/s390/crypto/zcrypt_pcicc.c b/drivers/s390/crypto/zcrypt_pcicc.c index a23726a0735c..142f72a2ca5a 100644 --- a/drivers/s390/crypto/zcrypt_pcicc.c +++ b/drivers/s390/crypto/zcrypt_pcicc.c | |||
@@ -373,6 +373,8 @@ static int convert_type86(struct zcrypt_device *zdev, | |||
373 | zdev->max_mod_size = PCICC_MAX_MOD_SIZE_OLD; | 373 | zdev->max_mod_size = PCICC_MAX_MOD_SIZE_OLD; |
374 | return -EAGAIN; | 374 | return -EAGAIN; |
375 | } | 375 | } |
376 | if (service_rc == 8 && service_rs == 72) | ||
377 | return -EINVAL; | ||
376 | zdev->online = 0; | 378 | zdev->online = 0; |
377 | return -EAGAIN; /* repeat the request on a different device. */ | 379 | return -EAGAIN; /* repeat the request on a different device. */ |
378 | } | 380 | } |
diff --git a/drivers/s390/crypto/zcrypt_pcixcc.c b/drivers/s390/crypto/zcrypt_pcixcc.c index 79c120578e61..68f3e6204db8 100644 --- a/drivers/s390/crypto/zcrypt_pcixcc.c +++ b/drivers/s390/crypto/zcrypt_pcixcc.c | |||
@@ -470,6 +470,8 @@ static int convert_type86_ica(struct zcrypt_device *zdev, | |||
470 | } | 470 | } |
471 | if (service_rc == 12 && service_rs == 769) | 471 | if (service_rc == 12 && service_rs == 769) |
472 | return -EINVAL; | 472 | return -EINVAL; |
473 | if (service_rc == 8 && service_rs == 72) | ||
474 | return -EINVAL; | ||
473 | zdev->online = 0; | 475 | zdev->online = 0; |
474 | return -EAGAIN; /* repeat the request on a different device. */ | 476 | return -EAGAIN; /* repeat the request on a different device. */ |
475 | } | 477 | } |
diff --git a/drivers/s390/scsi/zfcp_cfdc.c b/drivers/s390/scsi/zfcp_cfdc.c index f932400e980a..0eb6eefd2c1a 100644 --- a/drivers/s390/scsi/zfcp_cfdc.c +++ b/drivers/s390/scsi/zfcp_cfdc.c | |||
@@ -12,6 +12,7 @@ | |||
12 | 12 | ||
13 | #include <linux/types.h> | 13 | #include <linux/types.h> |
14 | #include <linux/miscdevice.h> | 14 | #include <linux/miscdevice.h> |
15 | #include <asm/compat.h> | ||
15 | #include <asm/ccwdev.h> | 16 | #include <asm/ccwdev.h> |
16 | #include "zfcp_def.h" | 17 | #include "zfcp_def.h" |
17 | #include "zfcp_ext.h" | 18 | #include "zfcp_ext.h" |
@@ -163,7 +164,7 @@ static void zfcp_cfdc_req_to_sense(struct zfcp_cfdc_data *data, | |||
163 | } | 164 | } |
164 | 165 | ||
165 | static long zfcp_cfdc_dev_ioctl(struct file *file, unsigned int command, | 166 | static long zfcp_cfdc_dev_ioctl(struct file *file, unsigned int command, |
166 | unsigned long buffer) | 167 | unsigned long arg) |
167 | { | 168 | { |
168 | struct zfcp_cfdc_data *data; | 169 | struct zfcp_cfdc_data *data; |
169 | struct zfcp_cfdc_data __user *data_user; | 170 | struct zfcp_cfdc_data __user *data_user; |
@@ -175,7 +176,11 @@ static long zfcp_cfdc_dev_ioctl(struct file *file, unsigned int command, | |||
175 | if (command != ZFCP_CFDC_IOC) | 176 | if (command != ZFCP_CFDC_IOC) |
176 | return -ENOTTY; | 177 | return -ENOTTY; |
177 | 178 | ||
178 | data_user = (void __user *) buffer; | 179 | if (is_compat_task()) |
180 | data_user = compat_ptr(arg); | ||
181 | else | ||
182 | data_user = (void __user *)arg; | ||
183 | |||
179 | if (!data_user) | 184 | if (!data_user) |
180 | return -EINVAL; | 185 | return -EINVAL; |
181 | 186 | ||
diff --git a/drivers/s390/scsi/zfcp_dbf.c b/drivers/s390/scsi/zfcp_dbf.c index 84450955ae11..7369c8911bcf 100644 --- a/drivers/s390/scsi/zfcp_dbf.c +++ b/drivers/s390/scsi/zfcp_dbf.c | |||
@@ -327,7 +327,7 @@ static void zfcp_dbf_hba_view_response(char **p, | |||
327 | break; | 327 | break; |
328 | zfcp_dbf_out(p, "scsi_cmnd", "0x%0Lx", r->u.fcp.cmnd); | 328 | zfcp_dbf_out(p, "scsi_cmnd", "0x%0Lx", r->u.fcp.cmnd); |
329 | zfcp_dbf_out(p, "scsi_serial", "0x%016Lx", r->u.fcp.serial); | 329 | zfcp_dbf_out(p, "scsi_serial", "0x%016Lx", r->u.fcp.serial); |
330 | p += sprintf(*p, "\n"); | 330 | *p += sprintf(*p, "\n"); |
331 | break; | 331 | break; |
332 | 332 | ||
333 | case FSF_QTCB_OPEN_PORT_WITH_DID: | 333 | case FSF_QTCB_OPEN_PORT_WITH_DID: |
diff --git a/drivers/s390/scsi/zfcp_ext.h b/drivers/s390/scsi/zfcp_ext.h index 03dec832b465..66bdb34143cb 100644 --- a/drivers/s390/scsi/zfcp_ext.h +++ b/drivers/s390/scsi/zfcp_ext.h | |||
@@ -108,6 +108,7 @@ extern void zfcp_fc_wka_ports_force_offline(struct zfcp_fc_wka_ports *); | |||
108 | extern int zfcp_fc_gs_setup(struct zfcp_adapter *); | 108 | extern int zfcp_fc_gs_setup(struct zfcp_adapter *); |
109 | extern void zfcp_fc_gs_destroy(struct zfcp_adapter *); | 109 | extern void zfcp_fc_gs_destroy(struct zfcp_adapter *); |
110 | extern int zfcp_fc_exec_bsg_job(struct fc_bsg_job *); | 110 | extern int zfcp_fc_exec_bsg_job(struct fc_bsg_job *); |
111 | extern int zfcp_fc_timeout_bsg_job(struct fc_bsg_job *); | ||
111 | 112 | ||
112 | /* zfcp_fsf.c */ | 113 | /* zfcp_fsf.c */ |
113 | extern int zfcp_fsf_open_port(struct zfcp_erp_action *); | 114 | extern int zfcp_fsf_open_port(struct zfcp_erp_action *); |
@@ -129,9 +130,9 @@ extern void zfcp_fsf_req_dismiss_all(struct zfcp_adapter *); | |||
129 | extern int zfcp_fsf_status_read(struct zfcp_qdio *); | 130 | extern int zfcp_fsf_status_read(struct zfcp_qdio *); |
130 | extern int zfcp_status_read_refill(struct zfcp_adapter *adapter); | 131 | extern int zfcp_status_read_refill(struct zfcp_adapter *adapter); |
131 | extern int zfcp_fsf_send_ct(struct zfcp_fc_wka_port *, struct zfcp_fsf_ct_els *, | 132 | extern int zfcp_fsf_send_ct(struct zfcp_fc_wka_port *, struct zfcp_fsf_ct_els *, |
132 | mempool_t *); | 133 | mempool_t *, unsigned int); |
133 | extern int zfcp_fsf_send_els(struct zfcp_adapter *, u32, | 134 | extern int zfcp_fsf_send_els(struct zfcp_adapter *, u32, |
134 | struct zfcp_fsf_ct_els *); | 135 | struct zfcp_fsf_ct_els *, unsigned int); |
135 | extern int zfcp_fsf_send_fcp_command_task(struct zfcp_unit *, | 136 | extern int zfcp_fsf_send_fcp_command_task(struct zfcp_unit *, |
136 | struct scsi_cmnd *); | 137 | struct scsi_cmnd *); |
137 | extern void zfcp_fsf_req_free(struct zfcp_fsf_req *); | 138 | extern void zfcp_fsf_req_free(struct zfcp_fsf_req *); |
diff --git a/drivers/s390/scsi/zfcp_fc.c b/drivers/s390/scsi/zfcp_fc.c index ac5e3b7a3576..0f7b493fb105 100644 --- a/drivers/s390/scsi/zfcp_fc.c +++ b/drivers/s390/scsi/zfcp_fc.c | |||
@@ -258,7 +258,8 @@ static int zfcp_fc_ns_gid_pn_request(struct zfcp_port *port, | |||
258 | gid_pn->gid_pn_req.gid_pn.fn_wwpn = port->wwpn; | 258 | gid_pn->gid_pn_req.gid_pn.fn_wwpn = port->wwpn; |
259 | 259 | ||
260 | ret = zfcp_fsf_send_ct(&adapter->gs->ds, &gid_pn->ct, | 260 | ret = zfcp_fsf_send_ct(&adapter->gs->ds, &gid_pn->ct, |
261 | adapter->pool.gid_pn_req); | 261 | adapter->pool.gid_pn_req, |
262 | ZFCP_FC_CTELS_TMO); | ||
262 | if (!ret) { | 263 | if (!ret) { |
263 | wait_for_completion(&completion); | 264 | wait_for_completion(&completion); |
264 | zfcp_fc_ns_gid_pn_eval(gid_pn); | 265 | zfcp_fc_ns_gid_pn_eval(gid_pn); |
@@ -421,7 +422,8 @@ static int zfcp_fc_adisc(struct zfcp_port *port) | |||
421 | hton24(adisc->adisc_req.adisc_port_id, | 422 | hton24(adisc->adisc_req.adisc_port_id, |
422 | fc_host_port_id(adapter->scsi_host)); | 423 | fc_host_port_id(adapter->scsi_host)); |
423 | 424 | ||
424 | ret = zfcp_fsf_send_els(adapter, port->d_id, &adisc->els); | 425 | ret = zfcp_fsf_send_els(adapter, port->d_id, &adisc->els, |
426 | ZFCP_FC_CTELS_TMO); | ||
425 | if (ret) | 427 | if (ret) |
426 | kmem_cache_free(zfcp_data.adisc_cache, adisc); | 428 | kmem_cache_free(zfcp_data.adisc_cache, adisc); |
427 | 429 | ||
@@ -532,7 +534,8 @@ static int zfcp_fc_send_gpn_ft(struct zfcp_fc_gpn_ft *gpn_ft, | |||
532 | ct->req = &gpn_ft->sg_req; | 534 | ct->req = &gpn_ft->sg_req; |
533 | ct->resp = gpn_ft->sg_resp; | 535 | ct->resp = gpn_ft->sg_resp; |
534 | 536 | ||
535 | ret = zfcp_fsf_send_ct(&adapter->gs->ds, ct, NULL); | 537 | ret = zfcp_fsf_send_ct(&adapter->gs->ds, ct, NULL, |
538 | ZFCP_FC_CTELS_TMO); | ||
536 | if (!ret) | 539 | if (!ret) |
537 | wait_for_completion(&completion); | 540 | wait_for_completion(&completion); |
538 | return ret; | 541 | return ret; |
@@ -677,6 +680,44 @@ static void zfcp_fc_ct_els_job_handler(void *data) | |||
677 | job->job_done(job); | 680 | job->job_done(job); |
678 | } | 681 | } |
679 | 682 | ||
683 | static struct zfcp_fc_wka_port *zfcp_fc_job_wka_port(struct fc_bsg_job *job) | ||
684 | { | ||
685 | u32 preamble_word1; | ||
686 | u8 gs_type; | ||
687 | struct zfcp_adapter *adapter; | ||
688 | |||
689 | preamble_word1 = job->request->rqst_data.r_ct.preamble_word1; | ||
690 | gs_type = (preamble_word1 & 0xff000000) >> 24; | ||
691 | |||
692 | adapter = (struct zfcp_adapter *) job->shost->hostdata[0]; | ||
693 | |||
694 | switch (gs_type) { | ||
695 | case FC_FST_ALIAS: | ||
696 | return &adapter->gs->as; | ||
697 | case FC_FST_MGMT: | ||
698 | return &adapter->gs->ms; | ||
699 | case FC_FST_TIME: | ||
700 | return &adapter->gs->ts; | ||
701 | break; | ||
702 | case FC_FST_DIR: | ||
703 | return &adapter->gs->ds; | ||
704 | break; | ||
705 | default: | ||
706 | return NULL; | ||
707 | } | ||
708 | } | ||
709 | |||
710 | static void zfcp_fc_ct_job_handler(void *data) | ||
711 | { | ||
712 | struct fc_bsg_job *job = data; | ||
713 | struct zfcp_fc_wka_port *wka_port; | ||
714 | |||
715 | wka_port = zfcp_fc_job_wka_port(job); | ||
716 | zfcp_fc_wka_port_put(wka_port); | ||
717 | |||
718 | zfcp_fc_ct_els_job_handler(data); | ||
719 | } | ||
720 | |||
680 | static int zfcp_fc_exec_els_job(struct fc_bsg_job *job, | 721 | static int zfcp_fc_exec_els_job(struct fc_bsg_job *job, |
681 | struct zfcp_adapter *adapter) | 722 | struct zfcp_adapter *adapter) |
682 | { | 723 | { |
@@ -695,43 +736,27 @@ static int zfcp_fc_exec_els_job(struct fc_bsg_job *job, | |||
695 | } else | 736 | } else |
696 | d_id = ntoh24(job->request->rqst_data.h_els.port_id); | 737 | d_id = ntoh24(job->request->rqst_data.h_els.port_id); |
697 | 738 | ||
698 | return zfcp_fsf_send_els(adapter, d_id, els); | 739 | els->handler = zfcp_fc_ct_els_job_handler; |
740 | return zfcp_fsf_send_els(adapter, d_id, els, job->req->timeout / HZ); | ||
699 | } | 741 | } |
700 | 742 | ||
701 | static int zfcp_fc_exec_ct_job(struct fc_bsg_job *job, | 743 | static int zfcp_fc_exec_ct_job(struct fc_bsg_job *job, |
702 | struct zfcp_adapter *adapter) | 744 | struct zfcp_adapter *adapter) |
703 | { | 745 | { |
704 | int ret; | 746 | int ret; |
705 | u8 gs_type; | ||
706 | struct zfcp_fsf_ct_els *ct = job->dd_data; | 747 | struct zfcp_fsf_ct_els *ct = job->dd_data; |
707 | struct zfcp_fc_wka_port *wka_port; | 748 | struct zfcp_fc_wka_port *wka_port; |
708 | u32 preamble_word1; | ||
709 | 749 | ||
710 | preamble_word1 = job->request->rqst_data.r_ct.preamble_word1; | 750 | wka_port = zfcp_fc_job_wka_port(job); |
711 | gs_type = (preamble_word1 & 0xff000000) >> 24; | 751 | if (!wka_port) |
712 | 752 | return -EINVAL; | |
713 | switch (gs_type) { | ||
714 | case FC_FST_ALIAS: | ||
715 | wka_port = &adapter->gs->as; | ||
716 | break; | ||
717 | case FC_FST_MGMT: | ||
718 | wka_port = &adapter->gs->ms; | ||
719 | break; | ||
720 | case FC_FST_TIME: | ||
721 | wka_port = &adapter->gs->ts; | ||
722 | break; | ||
723 | case FC_FST_DIR: | ||
724 | wka_port = &adapter->gs->ds; | ||
725 | break; | ||
726 | default: | ||
727 | return -EINVAL; /* no such service */ | ||
728 | } | ||
729 | 753 | ||
730 | ret = zfcp_fc_wka_port_get(wka_port); | 754 | ret = zfcp_fc_wka_port_get(wka_port); |
731 | if (ret) | 755 | if (ret) |
732 | return ret; | 756 | return ret; |
733 | 757 | ||
734 | ret = zfcp_fsf_send_ct(wka_port, ct, NULL); | 758 | ct->handler = zfcp_fc_ct_job_handler; |
759 | ret = zfcp_fsf_send_ct(wka_port, ct, NULL, job->req->timeout / HZ); | ||
735 | if (ret) | 760 | if (ret) |
736 | zfcp_fc_wka_port_put(wka_port); | 761 | zfcp_fc_wka_port_put(wka_port); |
737 | 762 | ||
@@ -752,7 +777,6 @@ int zfcp_fc_exec_bsg_job(struct fc_bsg_job *job) | |||
752 | 777 | ||
753 | ct_els->req = job->request_payload.sg_list; | 778 | ct_els->req = job->request_payload.sg_list; |
754 | ct_els->resp = job->reply_payload.sg_list; | 779 | ct_els->resp = job->reply_payload.sg_list; |
755 | ct_els->handler = zfcp_fc_ct_els_job_handler; | ||
756 | ct_els->handler_data = job; | 780 | ct_els->handler_data = job; |
757 | 781 | ||
758 | switch (job->request->msgcode) { | 782 | switch (job->request->msgcode) { |
@@ -767,6 +791,12 @@ int zfcp_fc_exec_bsg_job(struct fc_bsg_job *job) | |||
767 | } | 791 | } |
768 | } | 792 | } |
769 | 793 | ||
794 | int zfcp_fc_timeout_bsg_job(struct fc_bsg_job *job) | ||
795 | { | ||
796 | /* hardware tracks timeout, reset bsg timeout to not interfere */ | ||
797 | return -EAGAIN; | ||
798 | } | ||
799 | |||
770 | int zfcp_fc_gs_setup(struct zfcp_adapter *adapter) | 800 | int zfcp_fc_gs_setup(struct zfcp_adapter *adapter) |
771 | { | 801 | { |
772 | struct zfcp_fc_wka_ports *wka_ports; | 802 | struct zfcp_fc_wka_ports *wka_ports; |
diff --git a/drivers/s390/scsi/zfcp_fc.h b/drivers/s390/scsi/zfcp_fc.h index cb2a3669a384..0747b087390d 100644 --- a/drivers/s390/scsi/zfcp_fc.h +++ b/drivers/s390/scsi/zfcp_fc.h | |||
@@ -27,6 +27,8 @@ | |||
27 | #define ZFCP_FC_GPN_FT_MAX_ENT (ZFCP_FC_GPN_FT_NUM_BUFS * \ | 27 | #define ZFCP_FC_GPN_FT_MAX_ENT (ZFCP_FC_GPN_FT_NUM_BUFS * \ |
28 | (ZFCP_FC_GPN_FT_ENT_PAGE + 1)) | 28 | (ZFCP_FC_GPN_FT_ENT_PAGE + 1)) |
29 | 29 | ||
30 | #define ZFCP_FC_CTELS_TMO (2 * FC_DEF_R_A_TOV / 1000) | ||
31 | |||
30 | /** | 32 | /** |
31 | * struct zfcp_fc_gid_pn_req - container for ct header plus gid_pn request | 33 | * struct zfcp_fc_gid_pn_req - container for ct header plus gid_pn request |
32 | * @ct_hdr: FC GS common transport header | 34 | * @ct_hdr: FC GS common transport header |
diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c index 482dcd97aa5d..e8fb4d9baa8b 100644 --- a/drivers/s390/scsi/zfcp_fsf.c +++ b/drivers/s390/scsi/zfcp_fsf.c | |||
@@ -1068,20 +1068,20 @@ static int zfcp_fsf_setup_ct_els_sbals(struct zfcp_fsf_req *req, | |||
1068 | static int zfcp_fsf_setup_ct_els(struct zfcp_fsf_req *req, | 1068 | static int zfcp_fsf_setup_ct_els(struct zfcp_fsf_req *req, |
1069 | struct scatterlist *sg_req, | 1069 | struct scatterlist *sg_req, |
1070 | struct scatterlist *sg_resp, | 1070 | struct scatterlist *sg_resp, |
1071 | int max_sbals) | 1071 | int max_sbals, unsigned int timeout) |
1072 | { | 1072 | { |
1073 | int ret; | 1073 | int ret; |
1074 | unsigned int fcp_chan_timeout; | ||
1075 | 1074 | ||
1076 | ret = zfcp_fsf_setup_ct_els_sbals(req, sg_req, sg_resp, max_sbals); | 1075 | ret = zfcp_fsf_setup_ct_els_sbals(req, sg_req, sg_resp, max_sbals); |
1077 | if (ret) | 1076 | if (ret) |
1078 | return ret; | 1077 | return ret; |
1079 | 1078 | ||
1080 | /* common settings for ct/gs and els requests */ | 1079 | /* common settings for ct/gs and els requests */ |
1081 | fcp_chan_timeout = 2 * FC_DEF_R_A_TOV / 1000; | 1080 | if (timeout > 255) |
1081 | timeout = 255; /* max value accepted by hardware */ | ||
1082 | req->qtcb->bottom.support.service_class = FSF_CLASS_3; | 1082 | req->qtcb->bottom.support.service_class = FSF_CLASS_3; |
1083 | req->qtcb->bottom.support.timeout = fcp_chan_timeout; | 1083 | req->qtcb->bottom.support.timeout = timeout; |
1084 | zfcp_fsf_start_timer(req, (fcp_chan_timeout + 10) * HZ); | 1084 | zfcp_fsf_start_timer(req, (timeout + 10) * HZ); |
1085 | 1085 | ||
1086 | return 0; | 1086 | return 0; |
1087 | } | 1087 | } |
@@ -1092,7 +1092,8 @@ static int zfcp_fsf_setup_ct_els(struct zfcp_fsf_req *req, | |||
1092 | * @pool: if non-null this mempool is used to allocate struct zfcp_fsf_req | 1092 | * @pool: if non-null this mempool is used to allocate struct zfcp_fsf_req |
1093 | */ | 1093 | */ |
1094 | int zfcp_fsf_send_ct(struct zfcp_fc_wka_port *wka_port, | 1094 | int zfcp_fsf_send_ct(struct zfcp_fc_wka_port *wka_port, |
1095 | struct zfcp_fsf_ct_els *ct, mempool_t *pool) | 1095 | struct zfcp_fsf_ct_els *ct, mempool_t *pool, |
1096 | unsigned int timeout) | ||
1096 | { | 1097 | { |
1097 | struct zfcp_qdio *qdio = wka_port->adapter->qdio; | 1098 | struct zfcp_qdio *qdio = wka_port->adapter->qdio; |
1098 | struct zfcp_fsf_req *req; | 1099 | struct zfcp_fsf_req *req; |
@@ -1111,7 +1112,7 @@ int zfcp_fsf_send_ct(struct zfcp_fc_wka_port *wka_port, | |||
1111 | 1112 | ||
1112 | req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; | 1113 | req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; |
1113 | ret = zfcp_fsf_setup_ct_els(req, ct->req, ct->resp, | 1114 | ret = zfcp_fsf_setup_ct_els(req, ct->req, ct->resp, |
1114 | FSF_MAX_SBALS_PER_REQ); | 1115 | FSF_MAX_SBALS_PER_REQ, timeout); |
1115 | if (ret) | 1116 | if (ret) |
1116 | goto failed_send; | 1117 | goto failed_send; |
1117 | 1118 | ||
@@ -1188,7 +1189,7 @@ skip_fsfstatus: | |||
1188 | * @els: pointer to struct zfcp_send_els with data for the command | 1189 | * @els: pointer to struct zfcp_send_els with data for the command |
1189 | */ | 1190 | */ |
1190 | int zfcp_fsf_send_els(struct zfcp_adapter *adapter, u32 d_id, | 1191 | int zfcp_fsf_send_els(struct zfcp_adapter *adapter, u32 d_id, |
1191 | struct zfcp_fsf_ct_els *els) | 1192 | struct zfcp_fsf_ct_els *els, unsigned int timeout) |
1192 | { | 1193 | { |
1193 | struct zfcp_fsf_req *req; | 1194 | struct zfcp_fsf_req *req; |
1194 | struct zfcp_qdio *qdio = adapter->qdio; | 1195 | struct zfcp_qdio *qdio = adapter->qdio; |
@@ -1206,7 +1207,7 @@ int zfcp_fsf_send_els(struct zfcp_adapter *adapter, u32 d_id, | |||
1206 | } | 1207 | } |
1207 | 1208 | ||
1208 | req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; | 1209 | req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; |
1209 | ret = zfcp_fsf_setup_ct_els(req, els->req, els->resp, 2); | 1210 | ret = zfcp_fsf_setup_ct_els(req, els->req, els->resp, 2, timeout); |
1210 | 1211 | ||
1211 | if (ret) | 1212 | if (ret) |
1212 | goto failed_send; | 1213 | goto failed_send; |
diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c index 771cc536a989..8e6fc68d6bd4 100644 --- a/drivers/s390/scsi/zfcp_scsi.c +++ b/drivers/s390/scsi/zfcp_scsi.c | |||
@@ -652,6 +652,7 @@ struct fc_function_template zfcp_transport_functions = { | |||
652 | .show_host_port_state = 1, | 652 | .show_host_port_state = 1, |
653 | .show_host_active_fc4s = 1, | 653 | .show_host_active_fc4s = 1, |
654 | .bsg_request = zfcp_fc_exec_bsg_job, | 654 | .bsg_request = zfcp_fc_exec_bsg_job, |
655 | .bsg_timeout = zfcp_fc_timeout_bsg_job, | ||
655 | /* no functions registered for following dynamic attributes but | 656 | /* no functions registered for following dynamic attributes but |
656 | directly set by LLDD */ | 657 | directly set by LLDD */ |
657 | .show_host_port_type = 1, | 658 | .show_host_port_type = 1, |
diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c index 2a889853a106..7e26ebc26661 100644 --- a/drivers/scsi/aacraid/aachba.c +++ b/drivers/scsi/aacraid/aachba.c | |||
@@ -293,7 +293,10 @@ int aac_get_config_status(struct aac_dev *dev, int commit_flag) | |||
293 | status = -EINVAL; | 293 | status = -EINVAL; |
294 | } | 294 | } |
295 | } | 295 | } |
296 | aac_fib_complete(fibptr); | 296 | /* Do not set XferState to zero unless receives a response from F/W */ |
297 | if (status >= 0) | ||
298 | aac_fib_complete(fibptr); | ||
299 | |||
297 | /* Send a CT_COMMIT_CONFIG to enable discovery of devices */ | 300 | /* Send a CT_COMMIT_CONFIG to enable discovery of devices */ |
298 | if (status >= 0) { | 301 | if (status >= 0) { |
299 | if ((aac_commit == 1) || commit_flag) { | 302 | if ((aac_commit == 1) || commit_flag) { |
@@ -310,13 +313,18 @@ int aac_get_config_status(struct aac_dev *dev, int commit_flag) | |||
310 | FsaNormal, | 313 | FsaNormal, |
311 | 1, 1, | 314 | 1, 1, |
312 | NULL, NULL); | 315 | NULL, NULL); |
313 | aac_fib_complete(fibptr); | 316 | /* Do not set XferState to zero unless |
317 | * receives a response from F/W */ | ||
318 | if (status >= 0) | ||
319 | aac_fib_complete(fibptr); | ||
314 | } else if (aac_commit == 0) { | 320 | } else if (aac_commit == 0) { |
315 | printk(KERN_WARNING | 321 | printk(KERN_WARNING |
316 | "aac_get_config_status: Foreign device configurations are being ignored\n"); | 322 | "aac_get_config_status: Foreign device configurations are being ignored\n"); |
317 | } | 323 | } |
318 | } | 324 | } |
319 | aac_fib_free(fibptr); | 325 | /* FIB should be freed only after getting the response from the F/W */ |
326 | if (status != -ERESTARTSYS) | ||
327 | aac_fib_free(fibptr); | ||
320 | return status; | 328 | return status; |
321 | } | 329 | } |
322 | 330 | ||
@@ -355,7 +363,9 @@ int aac_get_containers(struct aac_dev *dev) | |||
355 | maximum_num_containers = le32_to_cpu(dresp->ContainerSwitchEntries); | 363 | maximum_num_containers = le32_to_cpu(dresp->ContainerSwitchEntries); |
356 | aac_fib_complete(fibptr); | 364 | aac_fib_complete(fibptr); |
357 | } | 365 | } |
358 | aac_fib_free(fibptr); | 366 | /* FIB should be freed only after getting the response from the F/W */ |
367 | if (status != -ERESTARTSYS) | ||
368 | aac_fib_free(fibptr); | ||
359 | 369 | ||
360 | if (maximum_num_containers < MAXIMUM_NUM_CONTAINERS) | 370 | if (maximum_num_containers < MAXIMUM_NUM_CONTAINERS) |
361 | maximum_num_containers = MAXIMUM_NUM_CONTAINERS; | 371 | maximum_num_containers = MAXIMUM_NUM_CONTAINERS; |
@@ -1245,8 +1255,12 @@ int aac_get_adapter_info(struct aac_dev* dev) | |||
1245 | NULL); | 1255 | NULL); |
1246 | 1256 | ||
1247 | if (rcode < 0) { | 1257 | if (rcode < 0) { |
1248 | aac_fib_complete(fibptr); | 1258 | /* FIB should be freed only after |
1249 | aac_fib_free(fibptr); | 1259 | * getting the response from the F/W */ |
1260 | if (rcode != -ERESTARTSYS) { | ||
1261 | aac_fib_complete(fibptr); | ||
1262 | aac_fib_free(fibptr); | ||
1263 | } | ||
1250 | return rcode; | 1264 | return rcode; |
1251 | } | 1265 | } |
1252 | memcpy(&dev->adapter_info, info, sizeof(*info)); | 1266 | memcpy(&dev->adapter_info, info, sizeof(*info)); |
@@ -1270,6 +1284,12 @@ int aac_get_adapter_info(struct aac_dev* dev) | |||
1270 | 1284 | ||
1271 | if (rcode >= 0) | 1285 | if (rcode >= 0) |
1272 | memcpy(&dev->supplement_adapter_info, sinfo, sizeof(*sinfo)); | 1286 | memcpy(&dev->supplement_adapter_info, sinfo, sizeof(*sinfo)); |
1287 | if (rcode == -ERESTARTSYS) { | ||
1288 | fibptr = aac_fib_alloc(dev); | ||
1289 | if (!fibptr) | ||
1290 | return -ENOMEM; | ||
1291 | } | ||
1292 | |||
1273 | } | 1293 | } |
1274 | 1294 | ||
1275 | 1295 | ||
@@ -1470,9 +1490,11 @@ int aac_get_adapter_info(struct aac_dev* dev) | |||
1470 | (dev->scsi_host_ptr->sg_tablesize * 8) + 112; | 1490 | (dev->scsi_host_ptr->sg_tablesize * 8) + 112; |
1471 | } | 1491 | } |
1472 | } | 1492 | } |
1473 | 1493 | /* FIB should be freed only after getting the response from the F/W */ | |
1474 | aac_fib_complete(fibptr); | 1494 | if (rcode != -ERESTARTSYS) { |
1475 | aac_fib_free(fibptr); | 1495 | aac_fib_complete(fibptr); |
1496 | aac_fib_free(fibptr); | ||
1497 | } | ||
1476 | 1498 | ||
1477 | return rcode; | 1499 | return rcode; |
1478 | } | 1500 | } |
@@ -1633,6 +1655,7 @@ static int aac_read(struct scsi_cmnd * scsicmd) | |||
1633 | * Alocate and initialize a Fib | 1655 | * Alocate and initialize a Fib |
1634 | */ | 1656 | */ |
1635 | if (!(cmd_fibcontext = aac_fib_alloc(dev))) { | 1657 | if (!(cmd_fibcontext = aac_fib_alloc(dev))) { |
1658 | printk(KERN_WARNING "aac_read: fib allocation failed\n"); | ||
1636 | return -1; | 1659 | return -1; |
1637 | } | 1660 | } |
1638 | 1661 | ||
@@ -1712,9 +1735,14 @@ static int aac_write(struct scsi_cmnd * scsicmd) | |||
1712 | * Allocate and initialize a Fib then setup a BlockWrite command | 1735 | * Allocate and initialize a Fib then setup a BlockWrite command |
1713 | */ | 1736 | */ |
1714 | if (!(cmd_fibcontext = aac_fib_alloc(dev))) { | 1737 | if (!(cmd_fibcontext = aac_fib_alloc(dev))) { |
1715 | scsicmd->result = DID_ERROR << 16; | 1738 | /* FIB temporarily unavailable,not catastrophic failure */ |
1716 | scsicmd->scsi_done(scsicmd); | 1739 | |
1717 | return 0; | 1740 | /* scsicmd->result = DID_ERROR << 16; |
1741 | * scsicmd->scsi_done(scsicmd); | ||
1742 | * return 0; | ||
1743 | */ | ||
1744 | printk(KERN_WARNING "aac_write: fib allocation failed\n"); | ||
1745 | return -1; | ||
1718 | } | 1746 | } |
1719 | 1747 | ||
1720 | status = aac_adapter_write(cmd_fibcontext, scsicmd, lba, count, fua); | 1748 | status = aac_adapter_write(cmd_fibcontext, scsicmd, lba, count, fua); |
diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h index 83986ed86556..619c02d9c862 100644 --- a/drivers/scsi/aacraid/aacraid.h +++ b/drivers/scsi/aacraid/aacraid.h | |||
@@ -12,7 +12,7 @@ | |||
12 | *----------------------------------------------------------------------------*/ | 12 | *----------------------------------------------------------------------------*/ |
13 | 13 | ||
14 | #ifndef AAC_DRIVER_BUILD | 14 | #ifndef AAC_DRIVER_BUILD |
15 | # define AAC_DRIVER_BUILD 2461 | 15 | # define AAC_DRIVER_BUILD 24702 |
16 | # define AAC_DRIVER_BRANCH "-ms" | 16 | # define AAC_DRIVER_BRANCH "-ms" |
17 | #endif | 17 | #endif |
18 | #define MAXIMUM_NUM_CONTAINERS 32 | 18 | #define MAXIMUM_NUM_CONTAINERS 32 |
@@ -1036,6 +1036,9 @@ struct aac_dev | |||
1036 | u8 printf_enabled; | 1036 | u8 printf_enabled; |
1037 | u8 in_reset; | 1037 | u8 in_reset; |
1038 | u8 msi; | 1038 | u8 msi; |
1039 | int management_fib_count; | ||
1040 | spinlock_t manage_lock; | ||
1041 | |||
1039 | }; | 1042 | }; |
1040 | 1043 | ||
1041 | #define aac_adapter_interrupt(dev) \ | 1044 | #define aac_adapter_interrupt(dev) \ |
diff --git a/drivers/scsi/aacraid/commctrl.c b/drivers/scsi/aacraid/commctrl.c index 0391d759dfdb..9c0c91178538 100644 --- a/drivers/scsi/aacraid/commctrl.c +++ b/drivers/scsi/aacraid/commctrl.c | |||
@@ -153,7 +153,7 @@ cleanup: | |||
153 | fibptr->hw_fib_pa = hw_fib_pa; | 153 | fibptr->hw_fib_pa = hw_fib_pa; |
154 | fibptr->hw_fib_va = hw_fib; | 154 | fibptr->hw_fib_va = hw_fib; |
155 | } | 155 | } |
156 | if (retval != -EINTR) | 156 | if (retval != -ERESTARTSYS) |
157 | aac_fib_free(fibptr); | 157 | aac_fib_free(fibptr); |
158 | return retval; | 158 | return retval; |
159 | } | 159 | } |
@@ -322,7 +322,7 @@ return_fib: | |||
322 | } | 322 | } |
323 | if (f.wait) { | 323 | if (f.wait) { |
324 | if(down_interruptible(&fibctx->wait_sem) < 0) { | 324 | if(down_interruptible(&fibctx->wait_sem) < 0) { |
325 | status = -EINTR; | 325 | status = -ERESTARTSYS; |
326 | } else { | 326 | } else { |
327 | /* Lock again and retry */ | 327 | /* Lock again and retry */ |
328 | spin_lock_irqsave(&dev->fib_lock, flags); | 328 | spin_lock_irqsave(&dev->fib_lock, flags); |
@@ -593,10 +593,10 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg) | |||
593 | u64 addr; | 593 | u64 addr; |
594 | void* p; | 594 | void* p; |
595 | if (upsg->sg[i].count > | 595 | if (upsg->sg[i].count > |
596 | (dev->adapter_info.options & | 596 | ((dev->adapter_info.options & |
597 | AAC_OPT_NEW_COMM) ? | 597 | AAC_OPT_NEW_COMM) ? |
598 | (dev->scsi_host_ptr->max_sectors << 9) : | 598 | (dev->scsi_host_ptr->max_sectors << 9) : |
599 | 65536) { | 599 | 65536)) { |
600 | rcode = -EINVAL; | 600 | rcode = -EINVAL; |
601 | goto cleanup; | 601 | goto cleanup; |
602 | } | 602 | } |
@@ -645,10 +645,10 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg) | |||
645 | u64 addr; | 645 | u64 addr; |
646 | void* p; | 646 | void* p; |
647 | if (usg->sg[i].count > | 647 | if (usg->sg[i].count > |
648 | (dev->adapter_info.options & | 648 | ((dev->adapter_info.options & |
649 | AAC_OPT_NEW_COMM) ? | 649 | AAC_OPT_NEW_COMM) ? |
650 | (dev->scsi_host_ptr->max_sectors << 9) : | 650 | (dev->scsi_host_ptr->max_sectors << 9) : |
651 | 65536) { | 651 | 65536)) { |
652 | rcode = -EINVAL; | 652 | rcode = -EINVAL; |
653 | goto cleanup; | 653 | goto cleanup; |
654 | } | 654 | } |
@@ -695,10 +695,10 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg) | |||
695 | uintptr_t addr; | 695 | uintptr_t addr; |
696 | void* p; | 696 | void* p; |
697 | if (usg->sg[i].count > | 697 | if (usg->sg[i].count > |
698 | (dev->adapter_info.options & | 698 | ((dev->adapter_info.options & |
699 | AAC_OPT_NEW_COMM) ? | 699 | AAC_OPT_NEW_COMM) ? |
700 | (dev->scsi_host_ptr->max_sectors << 9) : | 700 | (dev->scsi_host_ptr->max_sectors << 9) : |
701 | 65536) { | 701 | 65536)) { |
702 | rcode = -EINVAL; | 702 | rcode = -EINVAL; |
703 | goto cleanup; | 703 | goto cleanup; |
704 | } | 704 | } |
@@ -734,10 +734,10 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg) | |||
734 | dma_addr_t addr; | 734 | dma_addr_t addr; |
735 | void* p; | 735 | void* p; |
736 | if (upsg->sg[i].count > | 736 | if (upsg->sg[i].count > |
737 | (dev->adapter_info.options & | 737 | ((dev->adapter_info.options & |
738 | AAC_OPT_NEW_COMM) ? | 738 | AAC_OPT_NEW_COMM) ? |
739 | (dev->scsi_host_ptr->max_sectors << 9) : | 739 | (dev->scsi_host_ptr->max_sectors << 9) : |
740 | 65536) { | 740 | 65536)) { |
741 | rcode = -EINVAL; | 741 | rcode = -EINVAL; |
742 | goto cleanup; | 742 | goto cleanup; |
743 | } | 743 | } |
@@ -772,8 +772,8 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg) | |||
772 | psg->count = cpu_to_le32(sg_indx+1); | 772 | psg->count = cpu_to_le32(sg_indx+1); |
773 | status = aac_fib_send(ScsiPortCommand, srbfib, actual_fibsize, FsaNormal, 1, 1, NULL, NULL); | 773 | status = aac_fib_send(ScsiPortCommand, srbfib, actual_fibsize, FsaNormal, 1, 1, NULL, NULL); |
774 | } | 774 | } |
775 | if (status == -EINTR) { | 775 | if (status == -ERESTARTSYS) { |
776 | rcode = -EINTR; | 776 | rcode = -ERESTARTSYS; |
777 | goto cleanup; | 777 | goto cleanup; |
778 | } | 778 | } |
779 | 779 | ||
@@ -810,7 +810,7 @@ cleanup: | |||
810 | for(i=0; i <= sg_indx; i++){ | 810 | for(i=0; i <= sg_indx; i++){ |
811 | kfree(sg_list[i]); | 811 | kfree(sg_list[i]); |
812 | } | 812 | } |
813 | if (rcode != -EINTR) { | 813 | if (rcode != -ERESTARTSYS) { |
814 | aac_fib_complete(srbfib); | 814 | aac_fib_complete(srbfib); |
815 | aac_fib_free(srbfib); | 815 | aac_fib_free(srbfib); |
816 | } | 816 | } |
@@ -848,7 +848,7 @@ int aac_do_ioctl(struct aac_dev * dev, int cmd, void __user *arg) | |||
848 | */ | 848 | */ |
849 | 849 | ||
850 | status = aac_dev_ioctl(dev, cmd, arg); | 850 | status = aac_dev_ioctl(dev, cmd, arg); |
851 | if(status != -ENOTTY) | 851 | if (status != -ENOTTY) |
852 | return status; | 852 | return status; |
853 | 853 | ||
854 | switch (cmd) { | 854 | switch (cmd) { |
diff --git a/drivers/scsi/aacraid/comminit.c b/drivers/scsi/aacraid/comminit.c index 666d5151d628..a7261486ccd4 100644 --- a/drivers/scsi/aacraid/comminit.c +++ b/drivers/scsi/aacraid/comminit.c | |||
@@ -194,7 +194,9 @@ int aac_send_shutdown(struct aac_dev * dev) | |||
194 | 194 | ||
195 | if (status >= 0) | 195 | if (status >= 0) |
196 | aac_fib_complete(fibctx); | 196 | aac_fib_complete(fibctx); |
197 | aac_fib_free(fibctx); | 197 | /* FIB should be freed only after getting the response from the F/W */ |
198 | if (status != -ERESTARTSYS) | ||
199 | aac_fib_free(fibctx); | ||
198 | return status; | 200 | return status; |
199 | } | 201 | } |
200 | 202 | ||
@@ -304,6 +306,8 @@ struct aac_dev *aac_init_adapter(struct aac_dev *dev) | |||
304 | /* | 306 | /* |
305 | * Check the preferred comm settings, defaults from template. | 307 | * Check the preferred comm settings, defaults from template. |
306 | */ | 308 | */ |
309 | dev->management_fib_count = 0; | ||
310 | spin_lock_init(&dev->manage_lock); | ||
307 | dev->max_fib_size = sizeof(struct hw_fib); | 311 | dev->max_fib_size = sizeof(struct hw_fib); |
308 | dev->sg_tablesize = host->sg_tablesize = (dev->max_fib_size | 312 | dev->sg_tablesize = host->sg_tablesize = (dev->max_fib_size |
309 | - sizeof(struct aac_fibhdr) | 313 | - sizeof(struct aac_fibhdr) |
diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c index 956261f25181..94d2954d79ae 100644 --- a/drivers/scsi/aacraid/commsup.c +++ b/drivers/scsi/aacraid/commsup.c | |||
@@ -189,7 +189,14 @@ struct fib *aac_fib_alloc(struct aac_dev *dev) | |||
189 | 189 | ||
190 | void aac_fib_free(struct fib *fibptr) | 190 | void aac_fib_free(struct fib *fibptr) |
191 | { | 191 | { |
192 | unsigned long flags; | 192 | unsigned long flags, flagsv; |
193 | |||
194 | spin_lock_irqsave(&fibptr->event_lock, flagsv); | ||
195 | if (fibptr->done == 2) { | ||
196 | spin_unlock_irqrestore(&fibptr->event_lock, flagsv); | ||
197 | return; | ||
198 | } | ||
199 | spin_unlock_irqrestore(&fibptr->event_lock, flagsv); | ||
193 | 200 | ||
194 | spin_lock_irqsave(&fibptr->dev->fib_lock, flags); | 201 | spin_lock_irqsave(&fibptr->dev->fib_lock, flags); |
195 | if (unlikely(fibptr->flags & FIB_CONTEXT_FLAG_TIMED_OUT)) | 202 | if (unlikely(fibptr->flags & FIB_CONTEXT_FLAG_TIMED_OUT)) |
@@ -390,6 +397,8 @@ int aac_fib_send(u16 command, struct fib *fibptr, unsigned long size, | |||
390 | struct hw_fib * hw_fib = fibptr->hw_fib_va; | 397 | struct hw_fib * hw_fib = fibptr->hw_fib_va; |
391 | unsigned long flags = 0; | 398 | unsigned long flags = 0; |
392 | unsigned long qflags; | 399 | unsigned long qflags; |
400 | unsigned long mflags = 0; | ||
401 | |||
393 | 402 | ||
394 | if (!(hw_fib->header.XferState & cpu_to_le32(HostOwned))) | 403 | if (!(hw_fib->header.XferState & cpu_to_le32(HostOwned))) |
395 | return -EBUSY; | 404 | return -EBUSY; |
@@ -471,9 +480,31 @@ int aac_fib_send(u16 command, struct fib *fibptr, unsigned long size, | |||
471 | if (!dev->queues) | 480 | if (!dev->queues) |
472 | return -EBUSY; | 481 | return -EBUSY; |
473 | 482 | ||
474 | if(wait) | 483 | if (wait) { |
484 | |||
485 | spin_lock_irqsave(&dev->manage_lock, mflags); | ||
486 | if (dev->management_fib_count >= AAC_NUM_MGT_FIB) { | ||
487 | printk(KERN_INFO "No management Fibs Available:%d\n", | ||
488 | dev->management_fib_count); | ||
489 | spin_unlock_irqrestore(&dev->manage_lock, mflags); | ||
490 | return -EBUSY; | ||
491 | } | ||
492 | dev->management_fib_count++; | ||
493 | spin_unlock_irqrestore(&dev->manage_lock, mflags); | ||
475 | spin_lock_irqsave(&fibptr->event_lock, flags); | 494 | spin_lock_irqsave(&fibptr->event_lock, flags); |
476 | aac_adapter_deliver(fibptr); | 495 | } |
496 | |||
497 | if (aac_adapter_deliver(fibptr) != 0) { | ||
498 | printk(KERN_ERR "aac_fib_send: returned -EBUSY\n"); | ||
499 | if (wait) { | ||
500 | spin_unlock_irqrestore(&fibptr->event_lock, flags); | ||
501 | spin_lock_irqsave(&dev->manage_lock, mflags); | ||
502 | dev->management_fib_count--; | ||
503 | spin_unlock_irqrestore(&dev->manage_lock, mflags); | ||
504 | } | ||
505 | return -EBUSY; | ||
506 | } | ||
507 | |||
477 | 508 | ||
478 | /* | 509 | /* |
479 | * If the caller wanted us to wait for response wait now. | 510 | * If the caller wanted us to wait for response wait now. |
@@ -516,14 +547,15 @@ int aac_fib_send(u16 command, struct fib *fibptr, unsigned long size, | |||
516 | udelay(5); | 547 | udelay(5); |
517 | } | 548 | } |
518 | } else if (down_interruptible(&fibptr->event_wait)) { | 549 | } else if (down_interruptible(&fibptr->event_wait)) { |
519 | fibptr->done = 2; | 550 | /* Do nothing ... satisfy |
520 | up(&fibptr->event_wait); | 551 | * down_interruptible must_check */ |
521 | } | 552 | } |
553 | |||
522 | spin_lock_irqsave(&fibptr->event_lock, flags); | 554 | spin_lock_irqsave(&fibptr->event_lock, flags); |
523 | if ((fibptr->done == 0) || (fibptr->done == 2)) { | 555 | if (fibptr->done == 0) { |
524 | fibptr->done = 2; /* Tell interrupt we aborted */ | 556 | fibptr->done = 2; /* Tell interrupt we aborted */ |
525 | spin_unlock_irqrestore(&fibptr->event_lock, flags); | 557 | spin_unlock_irqrestore(&fibptr->event_lock, flags); |
526 | return -EINTR; | 558 | return -ERESTARTSYS; |
527 | } | 559 | } |
528 | spin_unlock_irqrestore(&fibptr->event_lock, flags); | 560 | spin_unlock_irqrestore(&fibptr->event_lock, flags); |
529 | BUG_ON(fibptr->done == 0); | 561 | BUG_ON(fibptr->done == 0); |
@@ -689,6 +721,7 @@ int aac_fib_adapter_complete(struct fib *fibptr, unsigned short size) | |||
689 | 721 | ||
690 | int aac_fib_complete(struct fib *fibptr) | 722 | int aac_fib_complete(struct fib *fibptr) |
691 | { | 723 | { |
724 | unsigned long flags; | ||
692 | struct hw_fib * hw_fib = fibptr->hw_fib_va; | 725 | struct hw_fib * hw_fib = fibptr->hw_fib_va; |
693 | 726 | ||
694 | /* | 727 | /* |
@@ -709,6 +742,13 @@ int aac_fib_complete(struct fib *fibptr) | |||
709 | * command is complete that we had sent to the adapter and this | 742 | * command is complete that we had sent to the adapter and this |
710 | * cdb could be reused. | 743 | * cdb could be reused. |
711 | */ | 744 | */ |
745 | spin_lock_irqsave(&fibptr->event_lock, flags); | ||
746 | if (fibptr->done == 2) { | ||
747 | spin_unlock_irqrestore(&fibptr->event_lock, flags); | ||
748 | return 0; | ||
749 | } | ||
750 | spin_unlock_irqrestore(&fibptr->event_lock, flags); | ||
751 | |||
712 | if((hw_fib->header.XferState & cpu_to_le32(SentFromHost)) && | 752 | if((hw_fib->header.XferState & cpu_to_le32(SentFromHost)) && |
713 | (hw_fib->header.XferState & cpu_to_le32(AdapterProcessed))) | 753 | (hw_fib->header.XferState & cpu_to_le32(AdapterProcessed))) |
714 | { | 754 | { |
@@ -1355,7 +1395,10 @@ int aac_reset_adapter(struct aac_dev * aac, int forced) | |||
1355 | 1395 | ||
1356 | if (status >= 0) | 1396 | if (status >= 0) |
1357 | aac_fib_complete(fibctx); | 1397 | aac_fib_complete(fibctx); |
1358 | aac_fib_free(fibctx); | 1398 | /* FIB should be freed only after getting |
1399 | * the response from the F/W */ | ||
1400 | if (status != -ERESTARTSYS) | ||
1401 | aac_fib_free(fibctx); | ||
1359 | } | 1402 | } |
1360 | } | 1403 | } |
1361 | 1404 | ||
@@ -1759,6 +1802,7 @@ int aac_command_thread(void *data) | |||
1759 | struct fib *fibptr; | 1802 | struct fib *fibptr; |
1760 | 1803 | ||
1761 | if ((fibptr = aac_fib_alloc(dev))) { | 1804 | if ((fibptr = aac_fib_alloc(dev))) { |
1805 | int status; | ||
1762 | __le32 *info; | 1806 | __le32 *info; |
1763 | 1807 | ||
1764 | aac_fib_init(fibptr); | 1808 | aac_fib_init(fibptr); |
@@ -1769,15 +1813,21 @@ int aac_command_thread(void *data) | |||
1769 | 1813 | ||
1770 | *info = cpu_to_le32(now.tv_sec); | 1814 | *info = cpu_to_le32(now.tv_sec); |
1771 | 1815 | ||
1772 | (void)aac_fib_send(SendHostTime, | 1816 | status = aac_fib_send(SendHostTime, |
1773 | fibptr, | 1817 | fibptr, |
1774 | sizeof(*info), | 1818 | sizeof(*info), |
1775 | FsaNormal, | 1819 | FsaNormal, |
1776 | 1, 1, | 1820 | 1, 1, |
1777 | NULL, | 1821 | NULL, |
1778 | NULL); | 1822 | NULL); |
1779 | aac_fib_complete(fibptr); | 1823 | /* Do not set XferState to zero unless |
1780 | aac_fib_free(fibptr); | 1824 | * receives a response from F/W */ |
1825 | if (status >= 0) | ||
1826 | aac_fib_complete(fibptr); | ||
1827 | /* FIB should be freed only after | ||
1828 | * getting the response from the F/W */ | ||
1829 | if (status != -ERESTARTSYS) | ||
1830 | aac_fib_free(fibptr); | ||
1781 | } | 1831 | } |
1782 | difference = (long)(unsigned)update_interval*HZ; | 1832 | difference = (long)(unsigned)update_interval*HZ; |
1783 | } else { | 1833 | } else { |
diff --git a/drivers/scsi/aacraid/dpcsup.c b/drivers/scsi/aacraid/dpcsup.c index abc9ef5d1b10..9c7408fe8c7d 100644 --- a/drivers/scsi/aacraid/dpcsup.c +++ b/drivers/scsi/aacraid/dpcsup.c | |||
@@ -57,9 +57,9 @@ unsigned int aac_response_normal(struct aac_queue * q) | |||
57 | struct hw_fib * hwfib; | 57 | struct hw_fib * hwfib; |
58 | struct fib * fib; | 58 | struct fib * fib; |
59 | int consumed = 0; | 59 | int consumed = 0; |
60 | unsigned long flags; | 60 | unsigned long flags, mflags; |
61 | 61 | ||
62 | spin_lock_irqsave(q->lock, flags); | 62 | spin_lock_irqsave(q->lock, flags); |
63 | /* | 63 | /* |
64 | * Keep pulling response QEs off the response queue and waking | 64 | * Keep pulling response QEs off the response queue and waking |
65 | * up the waiters until there are no more QEs. We then return | 65 | * up the waiters until there are no more QEs. We then return |
@@ -125,12 +125,21 @@ unsigned int aac_response_normal(struct aac_queue * q) | |||
125 | } else { | 125 | } else { |
126 | unsigned long flagv; | 126 | unsigned long flagv; |
127 | spin_lock_irqsave(&fib->event_lock, flagv); | 127 | spin_lock_irqsave(&fib->event_lock, flagv); |
128 | if (!fib->done) | 128 | if (!fib->done) { |
129 | fib->done = 1; | 129 | fib->done = 1; |
130 | up(&fib->event_wait); | 130 | up(&fib->event_wait); |
131 | } | ||
131 | spin_unlock_irqrestore(&fib->event_lock, flagv); | 132 | spin_unlock_irqrestore(&fib->event_lock, flagv); |
133 | |||
134 | spin_lock_irqsave(&dev->manage_lock, mflags); | ||
135 | dev->management_fib_count--; | ||
136 | spin_unlock_irqrestore(&dev->manage_lock, mflags); | ||
137 | |||
132 | FIB_COUNTER_INCREMENT(aac_config.NormalRecved); | 138 | FIB_COUNTER_INCREMENT(aac_config.NormalRecved); |
133 | if (fib->done == 2) { | 139 | if (fib->done == 2) { |
140 | spin_lock_irqsave(&fib->event_lock, flagv); | ||
141 | fib->done = 0; | ||
142 | spin_unlock_irqrestore(&fib->event_lock, flagv); | ||
134 | aac_fib_complete(fib); | 143 | aac_fib_complete(fib); |
135 | aac_fib_free(fib); | 144 | aac_fib_free(fib); |
136 | } | 145 | } |
@@ -232,6 +241,7 @@ unsigned int aac_command_normal(struct aac_queue *q) | |||
232 | 241 | ||
233 | unsigned int aac_intr_normal(struct aac_dev * dev, u32 index) | 242 | unsigned int aac_intr_normal(struct aac_dev * dev, u32 index) |
234 | { | 243 | { |
244 | unsigned long mflags; | ||
235 | dprintk((KERN_INFO "aac_intr_normal(%p,%x)\n", dev, index)); | 245 | dprintk((KERN_INFO "aac_intr_normal(%p,%x)\n", dev, index)); |
236 | if ((index & 0x00000002L)) { | 246 | if ((index & 0x00000002L)) { |
237 | struct hw_fib * hw_fib; | 247 | struct hw_fib * hw_fib; |
@@ -320,11 +330,25 @@ unsigned int aac_intr_normal(struct aac_dev * dev, u32 index) | |||
320 | unsigned long flagv; | 330 | unsigned long flagv; |
321 | dprintk((KERN_INFO "event_wait up\n")); | 331 | dprintk((KERN_INFO "event_wait up\n")); |
322 | spin_lock_irqsave(&fib->event_lock, flagv); | 332 | spin_lock_irqsave(&fib->event_lock, flagv); |
323 | if (!fib->done) | 333 | if (!fib->done) { |
324 | fib->done = 1; | 334 | fib->done = 1; |
325 | up(&fib->event_wait); | 335 | up(&fib->event_wait); |
336 | } | ||
326 | spin_unlock_irqrestore(&fib->event_lock, flagv); | 337 | spin_unlock_irqrestore(&fib->event_lock, flagv); |
338 | |||
339 | spin_lock_irqsave(&dev->manage_lock, mflags); | ||
340 | dev->management_fib_count--; | ||
341 | spin_unlock_irqrestore(&dev->manage_lock, mflags); | ||
342 | |||
327 | FIB_COUNTER_INCREMENT(aac_config.NormalRecved); | 343 | FIB_COUNTER_INCREMENT(aac_config.NormalRecved); |
344 | if (fib->done == 2) { | ||
345 | spin_lock_irqsave(&fib->event_lock, flagv); | ||
346 | fib->done = 0; | ||
347 | spin_unlock_irqrestore(&fib->event_lock, flagv); | ||
348 | aac_fib_complete(fib); | ||
349 | aac_fib_free(fib); | ||
350 | } | ||
351 | |||
328 | } | 352 | } |
329 | return 0; | 353 | return 0; |
330 | } | 354 | } |
diff --git a/drivers/scsi/aic7xxx/aic79xx_core.c b/drivers/scsi/aic7xxx/aic79xx_core.c index 4d419c155ce9..78971db5b60e 100644 --- a/drivers/scsi/aic7xxx/aic79xx_core.c +++ b/drivers/scsi/aic7xxx/aic79xx_core.c | |||
@@ -3171,13 +3171,16 @@ ahd_handle_nonpkt_busfree(struct ahd_softc *ahd) | |||
3171 | tinfo->curr.transport_version = 2; | 3171 | tinfo->curr.transport_version = 2; |
3172 | tinfo->goal.transport_version = 2; | 3172 | tinfo->goal.transport_version = 2; |
3173 | tinfo->goal.ppr_options = 0; | 3173 | tinfo->goal.ppr_options = 0; |
3174 | /* | 3174 | if (scb != NULL) { |
3175 | * Remove any SCBs in the waiting for selection | 3175 | /* |
3176 | * queue that may also be for this target so | 3176 | * Remove any SCBs in the waiting |
3177 | * that command ordering is preserved. | 3177 | * for selection queue that may |
3178 | */ | 3178 | * also be for this target so that |
3179 | ahd_freeze_devq(ahd, scb); | 3179 | * command ordering is preserved. |
3180 | ahd_qinfifo_requeue_tail(ahd, scb); | 3180 | */ |
3181 | ahd_freeze_devq(ahd, scb); | ||
3182 | ahd_qinfifo_requeue_tail(ahd, scb); | ||
3183 | } | ||
3181 | printerror = 0; | 3184 | printerror = 0; |
3182 | } | 3185 | } |
3183 | } else if (ahd_sent_msg(ahd, AHDMSG_EXT, MSG_EXT_WDTR, FALSE) | 3186 | } else if (ahd_sent_msg(ahd, AHDMSG_EXT, MSG_EXT_WDTR, FALSE) |
@@ -3194,13 +3197,16 @@ ahd_handle_nonpkt_busfree(struct ahd_softc *ahd) | |||
3194 | MSG_EXT_WDTR_BUS_8_BIT, | 3197 | MSG_EXT_WDTR_BUS_8_BIT, |
3195 | AHD_TRANS_CUR|AHD_TRANS_GOAL, | 3198 | AHD_TRANS_CUR|AHD_TRANS_GOAL, |
3196 | /*paused*/TRUE); | 3199 | /*paused*/TRUE); |
3197 | /* | 3200 | if (scb != NULL) { |
3198 | * Remove any SCBs in the waiting for selection | 3201 | /* |
3199 | * queue that may also be for this target so that | 3202 | * Remove any SCBs in the waiting for |
3200 | * command ordering is preserved. | 3203 | * selection queue that may also be for |
3201 | */ | 3204 | * this target so that command ordering |
3202 | ahd_freeze_devq(ahd, scb); | 3205 | * is preserved. |
3203 | ahd_qinfifo_requeue_tail(ahd, scb); | 3206 | */ |
3207 | ahd_freeze_devq(ahd, scb); | ||
3208 | ahd_qinfifo_requeue_tail(ahd, scb); | ||
3209 | } | ||
3204 | printerror = 0; | 3210 | printerror = 0; |
3205 | } else if (ahd_sent_msg(ahd, AHDMSG_EXT, MSG_EXT_SDTR, FALSE) | 3211 | } else if (ahd_sent_msg(ahd, AHDMSG_EXT, MSG_EXT_SDTR, FALSE) |
3206 | && ppr_busfree == 0) { | 3212 | && ppr_busfree == 0) { |
@@ -3217,13 +3223,16 @@ ahd_handle_nonpkt_busfree(struct ahd_softc *ahd) | |||
3217 | /*ppr_options*/0, | 3223 | /*ppr_options*/0, |
3218 | AHD_TRANS_CUR|AHD_TRANS_GOAL, | 3224 | AHD_TRANS_CUR|AHD_TRANS_GOAL, |
3219 | /*paused*/TRUE); | 3225 | /*paused*/TRUE); |
3220 | /* | 3226 | if (scb != NULL) { |
3221 | * Remove any SCBs in the waiting for selection | 3227 | /* |
3222 | * queue that may also be for this target so that | 3228 | * Remove any SCBs in the waiting for |
3223 | * command ordering is preserved. | 3229 | * selection queue that may also be for |
3224 | */ | 3230 | * this target so that command ordering |
3225 | ahd_freeze_devq(ahd, scb); | 3231 | * is preserved. |
3226 | ahd_qinfifo_requeue_tail(ahd, scb); | 3232 | */ |
3233 | ahd_freeze_devq(ahd, scb); | ||
3234 | ahd_qinfifo_requeue_tail(ahd, scb); | ||
3235 | } | ||
3227 | printerror = 0; | 3236 | printerror = 0; |
3228 | } else if ((ahd->msg_flags & MSG_FLAG_EXPECT_IDE_BUSFREE) != 0 | 3237 | } else if ((ahd->msg_flags & MSG_FLAG_EXPECT_IDE_BUSFREE) != 0 |
3229 | && ahd_sent_msg(ahd, AHDMSG_1B, | 3238 | && ahd_sent_msg(ahd, AHDMSG_1B, |
@@ -3251,7 +3260,7 @@ ahd_handle_nonpkt_busfree(struct ahd_softc *ahd) | |||
3251 | * the message phases. We check it last in case we | 3260 | * the message phases. We check it last in case we |
3252 | * had to send some other message that caused a busfree. | 3261 | * had to send some other message that caused a busfree. |
3253 | */ | 3262 | */ |
3254 | if (printerror != 0 | 3263 | if (scb != NULL && printerror != 0 |
3255 | && (lastphase == P_MESGIN || lastphase == P_MESGOUT) | 3264 | && (lastphase == P_MESGIN || lastphase == P_MESGOUT) |
3256 | && ((ahd->msg_flags & MSG_FLAG_EXPECT_PPR_BUSFREE) != 0)) { | 3265 | && ((ahd->msg_flags & MSG_FLAG_EXPECT_PPR_BUSFREE) != 0)) { |
3257 | 3266 | ||
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c index 2445e399fd60..2445e399fd60 100755..100644 --- a/drivers/scsi/lpfc/lpfc_hbadisc.c +++ b/drivers/scsi/lpfc/lpfc_hbadisc.c | |||
diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h index 8a2a1c5935c6..8a2a1c5935c6 100755..100644 --- a/drivers/scsi/lpfc/lpfc_hw4.h +++ b/drivers/scsi/lpfc/lpfc_hw4.h | |||
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h index 608e675f68c8..1263d9796e89 100644 --- a/drivers/scsi/qla2xxx/qla_def.h +++ b/drivers/scsi/qla2xxx/qla_def.h | |||
@@ -1586,8 +1586,7 @@ typedef struct fc_port { | |||
1586 | */ | 1586 | */ |
1587 | #define FCF_FABRIC_DEVICE BIT_0 | 1587 | #define FCF_FABRIC_DEVICE BIT_0 |
1588 | #define FCF_LOGIN_NEEDED BIT_1 | 1588 | #define FCF_LOGIN_NEEDED BIT_1 |
1589 | #define FCF_TAPE_PRESENT BIT_2 | 1589 | #define FCF_FCP2_DEVICE BIT_2 |
1590 | #define FCF_FCP2_DEVICE BIT_3 | ||
1591 | 1590 | ||
1592 | /* No loop ID flag. */ | 1591 | /* No loop ID flag. */ |
1593 | #define FC_NO_LOOP_ID 0x1000 | 1592 | #define FC_NO_LOOP_ID 0x1000 |
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c index b4a0eac8f96d..3f8e8495b743 100644 --- a/drivers/scsi/qla2xxx/qla_init.c +++ b/drivers/scsi/qla2xxx/qla_init.c | |||
@@ -205,7 +205,7 @@ qla2x00_async_login_done(struct scsi_qla_host *vha, fc_port_t *fcport, | |||
205 | 205 | ||
206 | switch (data[0]) { | 206 | switch (data[0]) { |
207 | case MBS_COMMAND_COMPLETE: | 207 | case MBS_COMMAND_COMPLETE: |
208 | if (fcport->flags & FCF_TAPE_PRESENT) | 208 | if (fcport->flags & FCF_FCP2_DEVICE) |
209 | opts |= BIT_1; | 209 | opts |= BIT_1; |
210 | rval = qla2x00_get_port_database(vha, fcport, opts); | 210 | rval = qla2x00_get_port_database(vha, fcport, opts); |
211 | if (rval != QLA_SUCCESS) | 211 | if (rval != QLA_SUCCESS) |
@@ -2726,7 +2726,7 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha) | |||
2726 | 2726 | ||
2727 | /* | 2727 | /* |
2728 | * Logout all previous fabric devices marked lost, except | 2728 | * Logout all previous fabric devices marked lost, except |
2729 | * tape devices. | 2729 | * FCP2 devices. |
2730 | */ | 2730 | */ |
2731 | list_for_each_entry(fcport, &vha->vp_fcports, list) { | 2731 | list_for_each_entry(fcport, &vha->vp_fcports, list) { |
2732 | if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) | 2732 | if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) |
@@ -2739,7 +2739,7 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha) | |||
2739 | qla2x00_mark_device_lost(vha, fcport, | 2739 | qla2x00_mark_device_lost(vha, fcport, |
2740 | ql2xplogiabsentdevice, 0); | 2740 | ql2xplogiabsentdevice, 0); |
2741 | if (fcport->loop_id != FC_NO_LOOP_ID && | 2741 | if (fcport->loop_id != FC_NO_LOOP_ID && |
2742 | (fcport->flags & FCF_TAPE_PRESENT) == 0 && | 2742 | (fcport->flags & FCF_FCP2_DEVICE) == 0 && |
2743 | fcport->port_type != FCT_INITIATOR && | 2743 | fcport->port_type != FCT_INITIATOR && |
2744 | fcport->port_type != FCT_BROADCAST) { | 2744 | fcport->port_type != FCT_BROADCAST) { |
2745 | ha->isp_ops->fabric_logout(vha, | 2745 | ha->isp_ops->fabric_logout(vha, |
@@ -3018,7 +3018,7 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha, | |||
3018 | fcport->d_id.b24 = new_fcport->d_id.b24; | 3018 | fcport->d_id.b24 = new_fcport->d_id.b24; |
3019 | fcport->flags |= FCF_LOGIN_NEEDED; | 3019 | fcport->flags |= FCF_LOGIN_NEEDED; |
3020 | if (fcport->loop_id != FC_NO_LOOP_ID && | 3020 | if (fcport->loop_id != FC_NO_LOOP_ID && |
3021 | (fcport->flags & FCF_TAPE_PRESENT) == 0 && | 3021 | (fcport->flags & FCF_FCP2_DEVICE) == 0 && |
3022 | fcport->port_type != FCT_INITIATOR && | 3022 | fcport->port_type != FCT_INITIATOR && |
3023 | fcport->port_type != FCT_BROADCAST) { | 3023 | fcport->port_type != FCT_BROADCAST) { |
3024 | ha->isp_ops->fabric_logout(vha, fcport->loop_id, | 3024 | ha->isp_ops->fabric_logout(vha, fcport->loop_id, |
@@ -3272,9 +3272,9 @@ qla2x00_fabric_dev_login(scsi_qla_host_t *vha, fc_port_t *fcport, | |||
3272 | 3272 | ||
3273 | rval = qla2x00_fabric_login(vha, fcport, next_loopid); | 3273 | rval = qla2x00_fabric_login(vha, fcport, next_loopid); |
3274 | if (rval == QLA_SUCCESS) { | 3274 | if (rval == QLA_SUCCESS) { |
3275 | /* Send an ADISC to tape devices.*/ | 3275 | /* Send an ADISC to FCP2 devices.*/ |
3276 | opts = 0; | 3276 | opts = 0; |
3277 | if (fcport->flags & FCF_TAPE_PRESENT) | 3277 | if (fcport->flags & FCF_FCP2_DEVICE) |
3278 | opts |= BIT_1; | 3278 | opts |= BIT_1; |
3279 | rval = qla2x00_get_port_database(vha, fcport, opts); | 3279 | rval = qla2x00_get_port_database(vha, fcport, opts); |
3280 | if (rval != QLA_SUCCESS) { | 3280 | if (rval != QLA_SUCCESS) { |
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index 209f50e788a1..8529eb1f3cd4 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c | |||
@@ -1188,7 +1188,6 @@ qla2xxx_slave_configure(struct scsi_device *sdev) | |||
1188 | scsi_qla_host_t *vha = shost_priv(sdev->host); | 1188 | scsi_qla_host_t *vha = shost_priv(sdev->host); |
1189 | struct qla_hw_data *ha = vha->hw; | 1189 | struct qla_hw_data *ha = vha->hw; |
1190 | struct fc_rport *rport = starget_to_rport(sdev->sdev_target); | 1190 | struct fc_rport *rport = starget_to_rport(sdev->sdev_target); |
1191 | fc_port_t *fcport = *(fc_port_t **)rport->dd_data; | ||
1192 | struct req_que *req = vha->req; | 1191 | struct req_que *req = vha->req; |
1193 | 1192 | ||
1194 | if (sdev->tagged_supported) | 1193 | if (sdev->tagged_supported) |
@@ -1197,8 +1196,6 @@ qla2xxx_slave_configure(struct scsi_device *sdev) | |||
1197 | scsi_deactivate_tcq(sdev, req->max_q_depth); | 1196 | scsi_deactivate_tcq(sdev, req->max_q_depth); |
1198 | 1197 | ||
1199 | rport->dev_loss_tmo = ha->port_down_retry_count; | 1198 | rport->dev_loss_tmo = ha->port_down_retry_count; |
1200 | if (sdev->type == TYPE_TAPE) | ||
1201 | fcport->flags |= FCF_TAPE_PRESENT; | ||
1202 | 1199 | ||
1203 | return 0; | 1200 | return 0; |
1204 | } | 1201 | } |
@@ -2805,7 +2802,7 @@ void qla2x00_relogin(struct scsi_qla_host *vha) | |||
2805 | 2802 | ||
2806 | fcport->login_retry--; | 2803 | fcport->login_retry--; |
2807 | if (fcport->flags & FCF_FABRIC_DEVICE) { | 2804 | if (fcport->flags & FCF_FABRIC_DEVICE) { |
2808 | if (fcport->flags & FCF_TAPE_PRESENT) | 2805 | if (fcport->flags & FCF_FCP2_DEVICE) |
2809 | ha->isp_ops->fabric_logout(vha, | 2806 | ha->isp_ops->fabric_logout(vha, |
2810 | fcport->loop_id, | 2807 | fcport->loop_id, |
2811 | fcport->d_id.b.domain, | 2808 | fcport->d_id.b.domain, |
@@ -3141,7 +3138,10 @@ qla2x00_timer(scsi_qla_host_t *vha) | |||
3141 | if (!IS_QLA2100(ha) && vha->link_down_timeout) | 3138 | if (!IS_QLA2100(ha) && vha->link_down_timeout) |
3142 | atomic_set(&vha->loop_state, LOOP_DEAD); | 3139 | atomic_set(&vha->loop_state, LOOP_DEAD); |
3143 | 3140 | ||
3144 | /* Schedule an ISP abort to return any tape commands. */ | 3141 | /* |
3142 | * Schedule an ISP abort to return any FCP2-device | ||
3143 | * commands. | ||
3144 | */ | ||
3145 | /* NPIV - scan physical port only */ | 3145 | /* NPIV - scan physical port only */ |
3146 | if (!vha->vp_idx) { | 3146 | if (!vha->vp_idx) { |
3147 | spin_lock_irqsave(&ha->hardware_lock, | 3147 | spin_lock_irqsave(&ha->hardware_lock, |
@@ -3158,7 +3158,7 @@ qla2x00_timer(scsi_qla_host_t *vha) | |||
3158 | if (sp->ctx) | 3158 | if (sp->ctx) |
3159 | continue; | 3159 | continue; |
3160 | sfcp = sp->fcport; | 3160 | sfcp = sp->fcport; |
3161 | if (!(sfcp->flags & FCF_TAPE_PRESENT)) | 3161 | if (!(sfcp->flags & FCF_FCP2_DEVICE)) |
3162 | continue; | 3162 | continue; |
3163 | 3163 | ||
3164 | set_bit(ISP_ABORT_NEEDED, | 3164 | set_bit(ISP_ABORT_NEEDED, |
diff --git a/drivers/scsi/qla2xxx/qla_sup.c b/drivers/scsi/qla2xxx/qla_sup.c index 010e69b29afe..371dc895972a 100644 --- a/drivers/scsi/qla2xxx/qla_sup.c +++ b/drivers/scsi/qla2xxx/qla_sup.c | |||
@@ -2292,11 +2292,14 @@ qla25xx_read_optrom_data(struct scsi_qla_host *vha, uint8_t *buf, | |||
2292 | uint32_t faddr, left, burst; | 2292 | uint32_t faddr, left, burst; |
2293 | struct qla_hw_data *ha = vha->hw; | 2293 | struct qla_hw_data *ha = vha->hw; |
2294 | 2294 | ||
2295 | if (IS_QLA25XX(ha) || IS_QLA81XX(ha)) | ||
2296 | goto try_fast; | ||
2295 | if (offset & 0xfff) | 2297 | if (offset & 0xfff) |
2296 | goto slow_read; | 2298 | goto slow_read; |
2297 | if (length < OPTROM_BURST_SIZE) | 2299 | if (length < OPTROM_BURST_SIZE) |
2298 | goto slow_read; | 2300 | goto slow_read; |
2299 | 2301 | ||
2302 | try_fast: | ||
2300 | optrom = dma_alloc_coherent(&ha->pdev->dev, OPTROM_BURST_SIZE, | 2303 | optrom = dma_alloc_coherent(&ha->pdev->dev, OPTROM_BURST_SIZE, |
2301 | &optrom_dma, GFP_KERNEL); | 2304 | &optrom_dma, GFP_KERNEL); |
2302 | if (!optrom) { | 2305 | if (!optrom) { |
diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h index a65dd95507c6..ed36279a33c1 100644 --- a/drivers/scsi/qla2xxx/qla_version.h +++ b/drivers/scsi/qla2xxx/qla_version.h | |||
@@ -7,7 +7,7 @@ | |||
7 | /* | 7 | /* |
8 | * Driver version | 8 | * Driver version |
9 | */ | 9 | */ |
10 | #define QLA2XXX_VERSION "8.03.01-k9" | 10 | #define QLA2XXX_VERSION "8.03.01-k10" |
11 | 11 | ||
12 | #define QLA_DRIVER_MAJOR_VER 8 | 12 | #define QLA_DRIVER_MAJOR_VER 8 |
13 | #define QLA_DRIVER_MINOR_VER 3 | 13 | #define QLA_DRIVER_MINOR_VER 3 |
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index d8927681ec88..c6642423cc67 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c | |||
@@ -749,9 +749,9 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes) | |||
749 | */ | 749 | */ |
750 | req->next_rq->resid_len = scsi_in(cmd)->resid; | 750 | req->next_rq->resid_len = scsi_in(cmd)->resid; |
751 | 751 | ||
752 | scsi_release_buffers(cmd); | ||
752 | blk_end_request_all(req, 0); | 753 | blk_end_request_all(req, 0); |
753 | 754 | ||
754 | scsi_release_buffers(cmd); | ||
755 | scsi_next_command(cmd); | 755 | scsi_next_command(cmd); |
756 | return; | 756 | return; |
757 | } | 757 | } |
diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c index ddfcecd5099f..653f22a8deb9 100644 --- a/drivers/scsi/scsi_transport_fc.c +++ b/drivers/scsi/scsi_transport_fc.c | |||
@@ -3527,7 +3527,10 @@ fc_bsg_job_timeout(struct request *req) | |||
3527 | if (!done && i->f->bsg_timeout) { | 3527 | if (!done && i->f->bsg_timeout) { |
3528 | /* call LLDD to abort the i/o as it has timed out */ | 3528 | /* call LLDD to abort the i/o as it has timed out */ |
3529 | err = i->f->bsg_timeout(job); | 3529 | err = i->f->bsg_timeout(job); |
3530 | if (err) | 3530 | if (err == -EAGAIN) { |
3531 | job->ref_cnt--; | ||
3532 | return BLK_EH_RESET_TIMER; | ||
3533 | } else if (err) | ||
3531 | printk(KERN_ERR "ERROR: FC BSG request timeout - LLD " | 3534 | printk(KERN_ERR "ERROR: FC BSG request timeout - LLD " |
3532 | "abort failed with status %d\n", err); | 3535 | "abort failed with status %d\n", err); |
3533 | } | 3536 | } |
diff --git a/drivers/serial/serial_cs.c b/drivers/serial/serial_cs.c index df854401af2d..95421fa3b304 100644 --- a/drivers/serial/serial_cs.c +++ b/drivers/serial/serial_cs.c | |||
@@ -758,6 +758,7 @@ static struct pcmcia_device_id serial_ids[] = { | |||
758 | PCMCIA_PFC_DEVICE_PROD_ID12(1, "PCMCIAs", "LanModem", 0xdcfe12d3, 0xc67c648f), | 758 | PCMCIA_PFC_DEVICE_PROD_ID12(1, "PCMCIAs", "LanModem", 0xdcfe12d3, 0xc67c648f), |
759 | PCMCIA_PFC_DEVICE_PROD_ID12(1, "TDK", "GlobalNetworker 3410/3412", 0x1eae9475, 0xd9a93bed), | 759 | PCMCIA_PFC_DEVICE_PROD_ID12(1, "TDK", "GlobalNetworker 3410/3412", 0x1eae9475, 0xd9a93bed), |
760 | PCMCIA_PFC_DEVICE_PROD_ID12(1, "Xircom", "CreditCard Ethernet+Modem II", 0x2e3ee845, 0xeca401bf), | 760 | PCMCIA_PFC_DEVICE_PROD_ID12(1, "Xircom", "CreditCard Ethernet+Modem II", 0x2e3ee845, 0xeca401bf), |
761 | PCMCIA_PFC_DEVICE_MANF_CARD(1, 0x0032, 0x0e01), | ||
761 | PCMCIA_PFC_DEVICE_MANF_CARD(1, 0x0032, 0x0a05), | 762 | PCMCIA_PFC_DEVICE_MANF_CARD(1, 0x0032, 0x0a05), |
762 | PCMCIA_PFC_DEVICE_MANF_CARD(1, 0x0032, 0x1101), | 763 | PCMCIA_PFC_DEVICE_MANF_CARD(1, 0x0032, 0x1101), |
763 | PCMCIA_MFC_DEVICE_MANF_CARD(0, 0x0104, 0x0070), | 764 | PCMCIA_MFC_DEVICE_MANF_CARD(0, 0x0104, 0x0070), |
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig index 088f32f29a6e..050ee147592f 100644 --- a/drivers/watchdog/Kconfig +++ b/drivers/watchdog/Kconfig | |||
@@ -396,8 +396,8 @@ config SBC_FITPC2_WATCHDOG | |||
396 | tristate "Compulab SBC-FITPC2 watchdog" | 396 | tristate "Compulab SBC-FITPC2 watchdog" |
397 | depends on X86 | 397 | depends on X86 |
398 | ---help--- | 398 | ---help--- |
399 | This is the driver for the built-in watchdog timer on the fit-PC2 | 399 | This is the driver for the built-in watchdog timer on the fit-PC2, |
400 | Single-board computer made by Compulab. | 400 | fit-PC2i, CM-iAM single-board computers made by Compulab. |
401 | 401 | ||
402 | It`s possible to enable watchdog timer either from BIOS (F2) or from booted Linux. | 402 | It`s possible to enable watchdog timer either from BIOS (F2) or from booted Linux. |
403 | When "Watchdog Timer Value" enabled one can set 31-255 s operational range. | 403 | When "Watchdog Timer Value" enabled one can set 31-255 s operational range. |
diff --git a/drivers/watchdog/ixp2000_wdt.c b/drivers/watchdog/ixp2000_wdt.c index 4f4b35a20d84..3c79dc587958 100644 --- a/drivers/watchdog/ixp2000_wdt.c +++ b/drivers/watchdog/ixp2000_wdt.c | |||
@@ -19,6 +19,7 @@ | |||
19 | #include <linux/module.h> | 19 | #include <linux/module.h> |
20 | #include <linux/moduleparam.h> | 20 | #include <linux/moduleparam.h> |
21 | #include <linux/types.h> | 21 | #include <linux/types.h> |
22 | #include <linux/timer.h> | ||
22 | #include <linux/kernel.h> | 23 | #include <linux/kernel.h> |
23 | #include <linux/fs.h> | 24 | #include <linux/fs.h> |
24 | #include <linux/miscdevice.h> | 25 | #include <linux/miscdevice.h> |
diff --git a/drivers/watchdog/sbc_fitpc2_wdt.c b/drivers/watchdog/sbc_fitpc2_wdt.c index 91430a89107c..e6763d2a567b 100644 --- a/drivers/watchdog/sbc_fitpc2_wdt.c +++ b/drivers/watchdog/sbc_fitpc2_wdt.c | |||
@@ -46,9 +46,9 @@ static DEFINE_SPINLOCK(wdt_lock); | |||
46 | static void wdt_send_data(unsigned char command, unsigned char data) | 46 | static void wdt_send_data(unsigned char command, unsigned char data) |
47 | { | 47 | { |
48 | outb(command, COMMAND_PORT); | 48 | outb(command, COMMAND_PORT); |
49 | mdelay(100); | 49 | msleep(100); |
50 | outb(data, DATA_PORT); | 50 | outb(data, DATA_PORT); |
51 | mdelay(200); | 51 | msleep(200); |
52 | } | 52 | } |
53 | 53 | ||
54 | static void wdt_enable(void) | 54 | static void wdt_enable(void) |
@@ -202,11 +202,10 @@ static int __init fitpc2_wdt_init(void) | |||
202 | { | 202 | { |
203 | int err; | 203 | int err; |
204 | 204 | ||
205 | if (strcmp("SBC-FITPC2", dmi_get_system_info(DMI_BOARD_NAME))) { | 205 | if (!strstr(dmi_get_system_info(DMI_BOARD_NAME), "SBC-FITPC2")) |
206 | pr_info("board name is: %s. Should be SBC-FITPC2\n", | ||
207 | dmi_get_system_info(DMI_BOARD_NAME)); | ||
208 | return -ENODEV; | 206 | return -ENODEV; |
209 | } | 207 | |
208 | pr_info("%s found\n", dmi_get_system_info(DMI_BOARD_NAME)); | ||
210 | 209 | ||
211 | if (!request_region(COMMAND_PORT, 1, WATCHDOG_NAME)) { | 210 | if (!request_region(COMMAND_PORT, 1, WATCHDOG_NAME)) { |
212 | pr_err("I/O address 0x%04x already in use\n", COMMAND_PORT); | 211 | pr_err("I/O address 0x%04x already in use\n", COMMAND_PORT); |
diff --git a/fs/eventfd.c b/fs/eventfd.c index d26402ff06ea..7758cc382ef0 100644 --- a/fs/eventfd.c +++ b/fs/eventfd.c | |||
@@ -135,26 +135,71 @@ static unsigned int eventfd_poll(struct file *file, poll_table *wait) | |||
135 | return events; | 135 | return events; |
136 | } | 136 | } |
137 | 137 | ||
138 | static ssize_t eventfd_read(struct file *file, char __user *buf, size_t count, | 138 | static void eventfd_ctx_do_read(struct eventfd_ctx *ctx, __u64 *cnt) |
139 | loff_t *ppos) | 139 | { |
140 | *cnt = (ctx->flags & EFD_SEMAPHORE) ? 1 : ctx->count; | ||
141 | ctx->count -= *cnt; | ||
142 | } | ||
143 | |||
144 | /** | ||
145 | * eventfd_ctx_remove_wait_queue - Read the current counter and removes wait queue. | ||
146 | * @ctx: [in] Pointer to eventfd context. | ||
147 | * @wait: [in] Wait queue to be removed. | ||
148 | * @cnt: [out] Pointer to the 64bit conter value. | ||
149 | * | ||
150 | * Returns zero if successful, or the following error codes: | ||
151 | * | ||
152 | * -EAGAIN : The operation would have blocked. | ||
153 | * | ||
154 | * This is used to atomically remove a wait queue entry from the eventfd wait | ||
155 | * queue head, and read/reset the counter value. | ||
156 | */ | ||
157 | int eventfd_ctx_remove_wait_queue(struct eventfd_ctx *ctx, wait_queue_t *wait, | ||
158 | __u64 *cnt) | ||
159 | { | ||
160 | unsigned long flags; | ||
161 | |||
162 | spin_lock_irqsave(&ctx->wqh.lock, flags); | ||
163 | eventfd_ctx_do_read(ctx, cnt); | ||
164 | __remove_wait_queue(&ctx->wqh, wait); | ||
165 | if (*cnt != 0 && waitqueue_active(&ctx->wqh)) | ||
166 | wake_up_locked_poll(&ctx->wqh, POLLOUT); | ||
167 | spin_unlock_irqrestore(&ctx->wqh.lock, flags); | ||
168 | |||
169 | return *cnt != 0 ? 0 : -EAGAIN; | ||
170 | } | ||
171 | EXPORT_SYMBOL_GPL(eventfd_ctx_remove_wait_queue); | ||
172 | |||
173 | /** | ||
174 | * eventfd_ctx_read - Reads the eventfd counter or wait if it is zero. | ||
175 | * @ctx: [in] Pointer to eventfd context. | ||
176 | * @no_wait: [in] Different from zero if the operation should not block. | ||
177 | * @cnt: [out] Pointer to the 64bit conter value. | ||
178 | * | ||
179 | * Returns zero if successful, or the following error codes: | ||
180 | * | ||
181 | * -EAGAIN : The operation would have blocked but @no_wait was nonzero. | ||
182 | * -ERESTARTSYS : A signal interrupted the wait operation. | ||
183 | * | ||
184 | * If @no_wait is zero, the function might sleep until the eventfd internal | ||
185 | * counter becomes greater than zero. | ||
186 | */ | ||
187 | ssize_t eventfd_ctx_read(struct eventfd_ctx *ctx, int no_wait, __u64 *cnt) | ||
140 | { | 188 | { |
141 | struct eventfd_ctx *ctx = file->private_data; | ||
142 | ssize_t res; | 189 | ssize_t res; |
143 | __u64 ucnt = 0; | ||
144 | DECLARE_WAITQUEUE(wait, current); | 190 | DECLARE_WAITQUEUE(wait, current); |
145 | 191 | ||
146 | if (count < sizeof(ucnt)) | ||
147 | return -EINVAL; | ||
148 | spin_lock_irq(&ctx->wqh.lock); | 192 | spin_lock_irq(&ctx->wqh.lock); |
193 | *cnt = 0; | ||
149 | res = -EAGAIN; | 194 | res = -EAGAIN; |
150 | if (ctx->count > 0) | 195 | if (ctx->count > 0) |
151 | res = sizeof(ucnt); | 196 | res = 0; |
152 | else if (!(file->f_flags & O_NONBLOCK)) { | 197 | else if (!no_wait) { |
153 | __add_wait_queue(&ctx->wqh, &wait); | 198 | __add_wait_queue(&ctx->wqh, &wait); |
154 | for (res = 0;;) { | 199 | for (;;) { |
155 | set_current_state(TASK_INTERRUPTIBLE); | 200 | set_current_state(TASK_INTERRUPTIBLE); |
156 | if (ctx->count > 0) { | 201 | if (ctx->count > 0) { |
157 | res = sizeof(ucnt); | 202 | res = 0; |
158 | break; | 203 | break; |
159 | } | 204 | } |
160 | if (signal_pending(current)) { | 205 | if (signal_pending(current)) { |
@@ -168,18 +213,32 @@ static ssize_t eventfd_read(struct file *file, char __user *buf, size_t count, | |||
168 | __remove_wait_queue(&ctx->wqh, &wait); | 213 | __remove_wait_queue(&ctx->wqh, &wait); |
169 | __set_current_state(TASK_RUNNING); | 214 | __set_current_state(TASK_RUNNING); |
170 | } | 215 | } |
171 | if (likely(res > 0)) { | 216 | if (likely(res == 0)) { |
172 | ucnt = (ctx->flags & EFD_SEMAPHORE) ? 1 : ctx->count; | 217 | eventfd_ctx_do_read(ctx, cnt); |
173 | ctx->count -= ucnt; | ||
174 | if (waitqueue_active(&ctx->wqh)) | 218 | if (waitqueue_active(&ctx->wqh)) |
175 | wake_up_locked_poll(&ctx->wqh, POLLOUT); | 219 | wake_up_locked_poll(&ctx->wqh, POLLOUT); |
176 | } | 220 | } |
177 | spin_unlock_irq(&ctx->wqh.lock); | 221 | spin_unlock_irq(&ctx->wqh.lock); |
178 | if (res > 0 && put_user(ucnt, (__u64 __user *) buf)) | ||
179 | return -EFAULT; | ||
180 | 222 | ||
181 | return res; | 223 | return res; |
182 | } | 224 | } |
225 | EXPORT_SYMBOL_GPL(eventfd_ctx_read); | ||
226 | |||
227 | static ssize_t eventfd_read(struct file *file, char __user *buf, size_t count, | ||
228 | loff_t *ppos) | ||
229 | { | ||
230 | struct eventfd_ctx *ctx = file->private_data; | ||
231 | ssize_t res; | ||
232 | __u64 cnt; | ||
233 | |||
234 | if (count < sizeof(cnt)) | ||
235 | return -EINVAL; | ||
236 | res = eventfd_ctx_read(ctx, file->f_flags & O_NONBLOCK, &cnt); | ||
237 | if (res < 0) | ||
238 | return res; | ||
239 | |||
240 | return put_user(cnt, (__u64 __user *) buf) ? -EFAULT : sizeof(cnt); | ||
241 | } | ||
183 | 242 | ||
184 | static ssize_t eventfd_write(struct file *file, const char __user *buf, size_t count, | 243 | static ssize_t eventfd_write(struct file *file, const char __user *buf, size_t count, |
185 | loff_t *ppos) | 244 | loff_t *ppos) |
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h index af7b62699ea9..874d169a193e 100644 --- a/fs/ext4/ext4.h +++ b/fs/ext4/ext4.h | |||
@@ -361,14 +361,11 @@ struct ext4_new_group_data { | |||
361 | so set the magic i_delalloc_reserve_flag after taking the | 361 | so set the magic i_delalloc_reserve_flag after taking the |
362 | inode allocation semaphore for */ | 362 | inode allocation semaphore for */ |
363 | #define EXT4_GET_BLOCKS_DELALLOC_RESERVE 0x0004 | 363 | #define EXT4_GET_BLOCKS_DELALLOC_RESERVE 0x0004 |
364 | /* Call ext4_da_update_reserve_space() after successfully | ||
365 | allocating the blocks */ | ||
366 | #define EXT4_GET_BLOCKS_UPDATE_RESERVE_SPACE 0x0008 | ||
367 | /* caller is from the direct IO path, request to creation of an | 364 | /* caller is from the direct IO path, request to creation of an |
368 | unitialized extents if not allocated, split the uninitialized | 365 | unitialized extents if not allocated, split the uninitialized |
369 | extent if blocks has been preallocated already*/ | 366 | extent if blocks has been preallocated already*/ |
370 | #define EXT4_GET_BLOCKS_DIO 0x0010 | 367 | #define EXT4_GET_BLOCKS_DIO 0x0008 |
371 | #define EXT4_GET_BLOCKS_CONVERT 0x0020 | 368 | #define EXT4_GET_BLOCKS_CONVERT 0x0010 |
372 | #define EXT4_GET_BLOCKS_DIO_CREATE_EXT (EXT4_GET_BLOCKS_DIO|\ | 369 | #define EXT4_GET_BLOCKS_DIO_CREATE_EXT (EXT4_GET_BLOCKS_DIO|\ |
373 | EXT4_GET_BLOCKS_CREATE_UNINIT_EXT) | 370 | EXT4_GET_BLOCKS_CREATE_UNINIT_EXT) |
374 | /* Convert extent to initialized after direct IO complete */ | 371 | /* Convert extent to initialized after direct IO complete */ |
@@ -1443,6 +1440,8 @@ extern int ext4_block_truncate_page(handle_t *handle, | |||
1443 | extern int ext4_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf); | 1440 | extern int ext4_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf); |
1444 | extern qsize_t *ext4_get_reserved_space(struct inode *inode); | 1441 | extern qsize_t *ext4_get_reserved_space(struct inode *inode); |
1445 | extern int flush_aio_dio_completed_IO(struct inode *inode); | 1442 | extern int flush_aio_dio_completed_IO(struct inode *inode); |
1443 | extern void ext4_da_update_reserve_space(struct inode *inode, | ||
1444 | int used, int quota_claim); | ||
1446 | /* ioctl.c */ | 1445 | /* ioctl.c */ |
1447 | extern long ext4_ioctl(struct file *, unsigned int, unsigned long); | 1446 | extern long ext4_ioctl(struct file *, unsigned int, unsigned long); |
1448 | extern long ext4_compat_ioctl(struct file *, unsigned int, unsigned long); | 1447 | extern long ext4_compat_ioctl(struct file *, unsigned int, unsigned long); |
diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c index 7d7b74e94687..765a4826b118 100644 --- a/fs/ext4/extents.c +++ b/fs/ext4/extents.c | |||
@@ -3132,7 +3132,19 @@ out: | |||
3132 | unmap_underlying_metadata_blocks(inode->i_sb->s_bdev, | 3132 | unmap_underlying_metadata_blocks(inode->i_sb->s_bdev, |
3133 | newblock + max_blocks, | 3133 | newblock + max_blocks, |
3134 | allocated - max_blocks); | 3134 | allocated - max_blocks); |
3135 | allocated = max_blocks; | ||
3135 | } | 3136 | } |
3137 | |||
3138 | /* | ||
3139 | * If we have done fallocate with the offset that is already | ||
3140 | * delayed allocated, we would have block reservation | ||
3141 | * and quota reservation done in the delayed write path. | ||
3142 | * But fallocate would have already updated quota and block | ||
3143 | * count for this offset. So cancel these reservation | ||
3144 | */ | ||
3145 | if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) | ||
3146 | ext4_da_update_reserve_space(inode, allocated, 0); | ||
3147 | |||
3136 | map_out: | 3148 | map_out: |
3137 | set_buffer_mapped(bh_result); | 3149 | set_buffer_mapped(bh_result); |
3138 | out1: | 3150 | out1: |
@@ -3368,9 +3380,18 @@ int ext4_ext_get_blocks(handle_t *handle, struct inode *inode, | |||
3368 | /* previous routine could use block we allocated */ | 3380 | /* previous routine could use block we allocated */ |
3369 | newblock = ext_pblock(&newex); | 3381 | newblock = ext_pblock(&newex); |
3370 | allocated = ext4_ext_get_actual_len(&newex); | 3382 | allocated = ext4_ext_get_actual_len(&newex); |
3383 | if (allocated > max_blocks) | ||
3384 | allocated = max_blocks; | ||
3371 | set_buffer_new(bh_result); | 3385 | set_buffer_new(bh_result); |
3372 | 3386 | ||
3373 | /* | 3387 | /* |
3388 | * Update reserved blocks/metadata blocks after successful | ||
3389 | * block allocation which had been deferred till now. | ||
3390 | */ | ||
3391 | if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) | ||
3392 | ext4_da_update_reserve_space(inode, allocated, 1); | ||
3393 | |||
3394 | /* | ||
3374 | * Cache the extent and update transaction to commit on fdatasync only | 3395 | * Cache the extent and update transaction to commit on fdatasync only |
3375 | * when it is _not_ an uninitialized extent. | 3396 | * when it is _not_ an uninitialized extent. |
3376 | */ | 3397 | */ |
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index c818972c8302..e11952404e02 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c | |||
@@ -1053,11 +1053,12 @@ static int ext4_calc_metadata_amount(struct inode *inode, sector_t lblock) | |||
1053 | * Called with i_data_sem down, which is important since we can call | 1053 | * Called with i_data_sem down, which is important since we can call |
1054 | * ext4_discard_preallocations() from here. | 1054 | * ext4_discard_preallocations() from here. |
1055 | */ | 1055 | */ |
1056 | static void ext4_da_update_reserve_space(struct inode *inode, int used) | 1056 | void ext4_da_update_reserve_space(struct inode *inode, |
1057 | int used, int quota_claim) | ||
1057 | { | 1058 | { |
1058 | struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); | 1059 | struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); |
1059 | struct ext4_inode_info *ei = EXT4_I(inode); | 1060 | struct ext4_inode_info *ei = EXT4_I(inode); |
1060 | int mdb_free = 0; | 1061 | int mdb_free = 0, allocated_meta_blocks = 0; |
1061 | 1062 | ||
1062 | spin_lock(&ei->i_block_reservation_lock); | 1063 | spin_lock(&ei->i_block_reservation_lock); |
1063 | if (unlikely(used > ei->i_reserved_data_blocks)) { | 1064 | if (unlikely(used > ei->i_reserved_data_blocks)) { |
@@ -1073,6 +1074,7 @@ static void ext4_da_update_reserve_space(struct inode *inode, int used) | |||
1073 | ei->i_reserved_data_blocks -= used; | 1074 | ei->i_reserved_data_blocks -= used; |
1074 | used += ei->i_allocated_meta_blocks; | 1075 | used += ei->i_allocated_meta_blocks; |
1075 | ei->i_reserved_meta_blocks -= ei->i_allocated_meta_blocks; | 1076 | ei->i_reserved_meta_blocks -= ei->i_allocated_meta_blocks; |
1077 | allocated_meta_blocks = ei->i_allocated_meta_blocks; | ||
1076 | ei->i_allocated_meta_blocks = 0; | 1078 | ei->i_allocated_meta_blocks = 0; |
1077 | percpu_counter_sub(&sbi->s_dirtyblocks_counter, used); | 1079 | percpu_counter_sub(&sbi->s_dirtyblocks_counter, used); |
1078 | 1080 | ||
@@ -1090,9 +1092,23 @@ static void ext4_da_update_reserve_space(struct inode *inode, int used) | |||
1090 | spin_unlock(&EXT4_I(inode)->i_block_reservation_lock); | 1092 | spin_unlock(&EXT4_I(inode)->i_block_reservation_lock); |
1091 | 1093 | ||
1092 | /* Update quota subsystem */ | 1094 | /* Update quota subsystem */ |
1093 | vfs_dq_claim_block(inode, used); | 1095 | if (quota_claim) { |
1094 | if (mdb_free) | 1096 | vfs_dq_claim_block(inode, used); |
1095 | vfs_dq_release_reservation_block(inode, mdb_free); | 1097 | if (mdb_free) |
1098 | vfs_dq_release_reservation_block(inode, mdb_free); | ||
1099 | } else { | ||
1100 | /* | ||
1101 | * We did fallocate with an offset that is already delayed | ||
1102 | * allocated. So on delayed allocated writeback we should | ||
1103 | * not update the quota for allocated blocks. But then | ||
1104 | * converting an fallocate region to initialized region would | ||
1105 | * have caused a metadata allocation. So claim quota for | ||
1106 | * that | ||
1107 | */ | ||
1108 | if (allocated_meta_blocks) | ||
1109 | vfs_dq_claim_block(inode, allocated_meta_blocks); | ||
1110 | vfs_dq_release_reservation_block(inode, mdb_free + used); | ||
1111 | } | ||
1096 | 1112 | ||
1097 | /* | 1113 | /* |
1098 | * If we have done all the pending block allocations and if | 1114 | * If we have done all the pending block allocations and if |
@@ -1292,18 +1308,20 @@ int ext4_get_blocks(handle_t *handle, struct inode *inode, sector_t block, | |||
1292 | */ | 1308 | */ |
1293 | EXT4_I(inode)->i_state &= ~EXT4_STATE_EXT_MIGRATE; | 1309 | EXT4_I(inode)->i_state &= ~EXT4_STATE_EXT_MIGRATE; |
1294 | } | 1310 | } |
1295 | } | ||
1296 | 1311 | ||
1312 | /* | ||
1313 | * Update reserved blocks/metadata blocks after successful | ||
1314 | * block allocation which had been deferred till now. We don't | ||
1315 | * support fallocate for non extent files. So we can update | ||
1316 | * reserve space here. | ||
1317 | */ | ||
1318 | if ((retval > 0) && | ||
1319 | (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE)) | ||
1320 | ext4_da_update_reserve_space(inode, retval, 1); | ||
1321 | } | ||
1297 | if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) | 1322 | if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) |
1298 | EXT4_I(inode)->i_delalloc_reserved_flag = 0; | 1323 | EXT4_I(inode)->i_delalloc_reserved_flag = 0; |
1299 | 1324 | ||
1300 | /* | ||
1301 | * Update reserved blocks/metadata blocks after successful | ||
1302 | * block allocation which had been deferred till now. | ||
1303 | */ | ||
1304 | if ((retval > 0) && (flags & EXT4_GET_BLOCKS_UPDATE_RESERVE_SPACE)) | ||
1305 | ext4_da_update_reserve_space(inode, retval); | ||
1306 | |||
1307 | up_write((&EXT4_I(inode)->i_data_sem)); | 1325 | up_write((&EXT4_I(inode)->i_data_sem)); |
1308 | if (retval > 0 && buffer_mapped(bh)) { | 1326 | if (retval > 0 && buffer_mapped(bh)) { |
1309 | int ret = check_block_validity(inode, "file system " | 1327 | int ret = check_block_validity(inode, "file system " |
@@ -1835,24 +1853,12 @@ repeat: | |||
1835 | * later. Real quota accounting is done at pages writeout | 1853 | * later. Real quota accounting is done at pages writeout |
1836 | * time. | 1854 | * time. |
1837 | */ | 1855 | */ |
1838 | if (vfs_dq_reserve_block(inode, md_needed + 1)) { | 1856 | if (vfs_dq_reserve_block(inode, md_needed + 1)) |
1839 | /* | ||
1840 | * We tend to badly over-estimate the amount of | ||
1841 | * metadata blocks which are needed, so if we have | ||
1842 | * reserved any metadata blocks, try to force out the | ||
1843 | * inode and see if we have any better luck. | ||
1844 | */ | ||
1845 | if (md_reserved && retries++ <= 3) | ||
1846 | goto retry; | ||
1847 | return -EDQUOT; | 1857 | return -EDQUOT; |
1848 | } | ||
1849 | 1858 | ||
1850 | if (ext4_claim_free_blocks(sbi, md_needed + 1)) { | 1859 | if (ext4_claim_free_blocks(sbi, md_needed + 1)) { |
1851 | vfs_dq_release_reservation_block(inode, md_needed + 1); | 1860 | vfs_dq_release_reservation_block(inode, md_needed + 1); |
1852 | if (ext4_should_retry_alloc(inode->i_sb, &retries)) { | 1861 | if (ext4_should_retry_alloc(inode->i_sb, &retries)) { |
1853 | retry: | ||
1854 | if (md_reserved) | ||
1855 | write_inode_now(inode, (retries == 3)); | ||
1856 | yield(); | 1862 | yield(); |
1857 | goto repeat; | 1863 | goto repeat; |
1858 | } | 1864 | } |
@@ -2213,10 +2219,10 @@ static int mpage_da_map_blocks(struct mpage_da_data *mpd) | |||
2213 | * variables are updated after the blocks have been allocated. | 2219 | * variables are updated after the blocks have been allocated. |
2214 | */ | 2220 | */ |
2215 | new.b_state = 0; | 2221 | new.b_state = 0; |
2216 | get_blocks_flags = (EXT4_GET_BLOCKS_CREATE | | 2222 | get_blocks_flags = EXT4_GET_BLOCKS_CREATE; |
2217 | EXT4_GET_BLOCKS_DELALLOC_RESERVE); | ||
2218 | if (mpd->b_state & (1 << BH_Delay)) | 2223 | if (mpd->b_state & (1 << BH_Delay)) |
2219 | get_blocks_flags |= EXT4_GET_BLOCKS_UPDATE_RESERVE_SPACE; | 2224 | get_blocks_flags |= EXT4_GET_BLOCKS_DELALLOC_RESERVE; |
2225 | |||
2220 | blks = ext4_get_blocks(handle, mpd->inode, next, max_blocks, | 2226 | blks = ext4_get_blocks(handle, mpd->inode, next, max_blocks, |
2221 | &new, get_blocks_flags); | 2227 | &new, get_blocks_flags); |
2222 | if (blks < 0) { | 2228 | if (blks < 0) { |
@@ -3032,7 +3038,7 @@ static int ext4_da_write_begin(struct file *file, struct address_space *mapping, | |||
3032 | loff_t pos, unsigned len, unsigned flags, | 3038 | loff_t pos, unsigned len, unsigned flags, |
3033 | struct page **pagep, void **fsdata) | 3039 | struct page **pagep, void **fsdata) |
3034 | { | 3040 | { |
3035 | int ret, retries = 0; | 3041 | int ret, retries = 0, quota_retries = 0; |
3036 | struct page *page; | 3042 | struct page *page; |
3037 | pgoff_t index; | 3043 | pgoff_t index; |
3038 | unsigned from, to; | 3044 | unsigned from, to; |
@@ -3091,6 +3097,22 @@ retry: | |||
3091 | 3097 | ||
3092 | if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries)) | 3098 | if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries)) |
3093 | goto retry; | 3099 | goto retry; |
3100 | |||
3101 | if ((ret == -EDQUOT) && | ||
3102 | EXT4_I(inode)->i_reserved_meta_blocks && | ||
3103 | (quota_retries++ < 3)) { | ||
3104 | /* | ||
3105 | * Since we often over-estimate the number of meta | ||
3106 | * data blocks required, we may sometimes get a | ||
3107 | * spurios out of quota error even though there would | ||
3108 | * be enough space once we write the data blocks and | ||
3109 | * find out how many meta data blocks were _really_ | ||
3110 | * required. So try forcing the inode write to see if | ||
3111 | * that helps. | ||
3112 | */ | ||
3113 | write_inode_now(inode, (quota_retries == 3)); | ||
3114 | goto retry; | ||
3115 | } | ||
3094 | out: | 3116 | out: |
3095 | return ret; | 3117 | return ret; |
3096 | } | 3118 | } |
diff --git a/fs/fcntl.c b/fs/fcntl.c index 97e01dc0d95f..5ef953e6f908 100644 --- a/fs/fcntl.c +++ b/fs/fcntl.c | |||
@@ -199,7 +199,9 @@ static int setfl(int fd, struct file * filp, unsigned long arg) | |||
199 | static void f_modown(struct file *filp, struct pid *pid, enum pid_type type, | 199 | static void f_modown(struct file *filp, struct pid *pid, enum pid_type type, |
200 | int force) | 200 | int force) |
201 | { | 201 | { |
202 | write_lock_irq(&filp->f_owner.lock); | 202 | unsigned long flags; |
203 | |||
204 | write_lock_irqsave(&filp->f_owner.lock, flags); | ||
203 | if (force || !filp->f_owner.pid) { | 205 | if (force || !filp->f_owner.pid) { |
204 | put_pid(filp->f_owner.pid); | 206 | put_pid(filp->f_owner.pid); |
205 | filp->f_owner.pid = get_pid(pid); | 207 | filp->f_owner.pid = get_pid(pid); |
@@ -211,7 +213,7 @@ static void f_modown(struct file *filp, struct pid *pid, enum pid_type type, | |||
211 | filp->f_owner.euid = cred->euid; | 213 | filp->f_owner.euid = cred->euid; |
212 | } | 214 | } |
213 | } | 215 | } |
214 | write_unlock_irq(&filp->f_owner.lock); | 216 | write_unlock_irqrestore(&filp->f_owner.lock, flags); |
215 | } | 217 | } |
216 | 218 | ||
217 | int __f_setown(struct file *filp, struct pid *pid, enum pid_type type, | 219 | int __f_setown(struct file *filp, struct pid *pid, enum pid_type type, |
diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h index ff7664e0c3cd..4c4e0f8375b3 100644 --- a/include/drm/ttm/ttm_bo_driver.h +++ b/include/drm/ttm/ttm_bo_driver.h | |||
@@ -353,6 +353,11 @@ struct ttm_bo_driver { | |||
353 | /* notify the driver we are taking a fault on this BO | 353 | /* notify the driver we are taking a fault on this BO |
354 | * and have reserved it */ | 354 | * and have reserved it */ |
355 | void (*fault_reserve_notify)(struct ttm_buffer_object *bo); | 355 | void (*fault_reserve_notify)(struct ttm_buffer_object *bo); |
356 | |||
357 | /** | ||
358 | * notify the driver that we're about to swap out this bo | ||
359 | */ | ||
360 | void (*swap_notify) (struct ttm_buffer_object *bo); | ||
356 | }; | 361 | }; |
357 | 362 | ||
358 | /** | 363 | /** |
diff --git a/include/linux/eventfd.h b/include/linux/eventfd.h index 94dd10366a78..91bb4f27238c 100644 --- a/include/linux/eventfd.h +++ b/include/linux/eventfd.h | |||
@@ -10,6 +10,7 @@ | |||
10 | 10 | ||
11 | #include <linux/fcntl.h> | 11 | #include <linux/fcntl.h> |
12 | #include <linux/file.h> | 12 | #include <linux/file.h> |
13 | #include <linux/wait.h> | ||
13 | 14 | ||
14 | /* | 15 | /* |
15 | * CAREFUL: Check include/asm-generic/fcntl.h when defining | 16 | * CAREFUL: Check include/asm-generic/fcntl.h when defining |
@@ -34,6 +35,9 @@ struct file *eventfd_fget(int fd); | |||
34 | struct eventfd_ctx *eventfd_ctx_fdget(int fd); | 35 | struct eventfd_ctx *eventfd_ctx_fdget(int fd); |
35 | struct eventfd_ctx *eventfd_ctx_fileget(struct file *file); | 36 | struct eventfd_ctx *eventfd_ctx_fileget(struct file *file); |
36 | int eventfd_signal(struct eventfd_ctx *ctx, int n); | 37 | int eventfd_signal(struct eventfd_ctx *ctx, int n); |
38 | ssize_t eventfd_ctx_read(struct eventfd_ctx *ctx, int no_wait, __u64 *cnt); | ||
39 | int eventfd_ctx_remove_wait_queue(struct eventfd_ctx *ctx, wait_queue_t *wait, | ||
40 | __u64 *cnt); | ||
37 | 41 | ||
38 | #else /* CONFIG_EVENTFD */ | 42 | #else /* CONFIG_EVENTFD */ |
39 | 43 | ||
@@ -61,6 +65,18 @@ static inline void eventfd_ctx_put(struct eventfd_ctx *ctx) | |||
61 | 65 | ||
62 | } | 66 | } |
63 | 67 | ||
68 | static inline ssize_t eventfd_ctx_read(struct eventfd_ctx *ctx, int no_wait, | ||
69 | __u64 *cnt) | ||
70 | { | ||
71 | return -ENOSYS; | ||
72 | } | ||
73 | |||
74 | static inline int eventfd_ctx_remove_wait_queue(struct eventfd_ctx *ctx, | ||
75 | wait_queue_t *wait, __u64 *cnt) | ||
76 | { | ||
77 | return -ENOSYS; | ||
78 | } | ||
79 | |||
64 | #endif | 80 | #endif |
65 | 81 | ||
66 | #endif /* _LINUX_EVENTFD_H */ | 82 | #endif /* _LINUX_EVENTFD_H */ |
diff --git a/include/linux/firewire-cdev.h b/include/linux/firewire-cdev.h index 1f716d9f714b..520ecf86cbb3 100644 --- a/include/linux/firewire-cdev.h +++ b/include/linux/firewire-cdev.h | |||
@@ -380,7 +380,7 @@ struct fw_cdev_initiate_bus_reset { | |||
380 | * @immediate: If non-zero, immediate key to insert before pointer | 380 | * @immediate: If non-zero, immediate key to insert before pointer |
381 | * @key: Upper 8 bits of root directory pointer | 381 | * @key: Upper 8 bits of root directory pointer |
382 | * @data: Userspace pointer to contents of descriptor block | 382 | * @data: Userspace pointer to contents of descriptor block |
383 | * @length: Length of descriptor block data, in bytes | 383 | * @length: Length of descriptor block data, in quadlets |
384 | * @handle: Handle to the descriptor, written by the kernel | 384 | * @handle: Handle to the descriptor, written by the kernel |
385 | * | 385 | * |
386 | * Add a descriptor block and optionally a preceding immediate key to the local | 386 | * Add a descriptor block and optionally a preceding immediate key to the local |
@@ -394,6 +394,8 @@ struct fw_cdev_initiate_bus_reset { | |||
394 | * If not 0, the @immediate field specifies an immediate key which will be | 394 | * If not 0, the @immediate field specifies an immediate key which will be |
395 | * inserted before the root directory pointer. | 395 | * inserted before the root directory pointer. |
396 | * | 396 | * |
397 | * @immediate, @key, and @data array elements are CPU-endian quadlets. | ||
398 | * | ||
397 | * If successful, the kernel adds the descriptor and writes back a handle to the | 399 | * If successful, the kernel adds the descriptor and writes back a handle to the |
398 | * kernel-side object to be used for later removal of the descriptor block and | 400 | * kernel-side object to be used for later removal of the descriptor block and |
399 | * immediate key. | 401 | * immediate key. |
diff --git a/include/linux/kmsg_dump.h b/include/linux/kmsg_dump.h index e32aa268efac..24b44145a886 100644 --- a/include/linux/kmsg_dump.h +++ b/include/linux/kmsg_dump.h | |||
@@ -17,6 +17,7 @@ | |||
17 | enum kmsg_dump_reason { | 17 | enum kmsg_dump_reason { |
18 | KMSG_DUMP_OOPS, | 18 | KMSG_DUMP_OOPS, |
19 | KMSG_DUMP_PANIC, | 19 | KMSG_DUMP_PANIC, |
20 | KMSG_DUMP_KEXEC, | ||
20 | }; | 21 | }; |
21 | 22 | ||
22 | /** | 23 | /** |
diff --git a/include/linux/mtd/pismo.h b/include/linux/mtd/pismo.h new file mode 100644 index 000000000000..8dfb7e1421c5 --- /dev/null +++ b/include/linux/mtd/pismo.h | |||
@@ -0,0 +1,17 @@ | |||
1 | /* | ||
2 | * PISMO memory driver - http://www.pismoworld.org/ | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify | ||
5 | * it under the terms of the GNU General Public License as published by | ||
6 | * the Free Software Foundation; either version 2 of the License. | ||
7 | */ | ||
8 | #ifndef __LINUX_MTD_PISMO_H | ||
9 | #define __LINUX_MTD_PISMO_H | ||
10 | |||
11 | struct pismo_pdata { | ||
12 | void (*set_vpp)(void *, int); | ||
13 | void *vpp_data; | ||
14 | phys_addr_t cs_addrs[5]; | ||
15 | }; | ||
16 | |||
17 | #endif | ||
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index ed5d7501e181..3c62ed408492 100644 --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h | |||
@@ -253,6 +253,8 @@ extern struct page * read_cache_page_async(struct address_space *mapping, | |||
253 | extern struct page * read_cache_page(struct address_space *mapping, | 253 | extern struct page * read_cache_page(struct address_space *mapping, |
254 | pgoff_t index, filler_t *filler, | 254 | pgoff_t index, filler_t *filler, |
255 | void *data); | 255 | void *data); |
256 | extern struct page * read_cache_page_gfp(struct address_space *mapping, | ||
257 | pgoff_t index, gfp_t gfp_mask); | ||
256 | extern int read_cache_pages(struct address_space *mapping, | 258 | extern int read_cache_pages(struct address_space *mapping, |
257 | struct list_head *pages, filler_t *filler, void *data); | 259 | struct list_head *pages, filler_t *filler, void *data); |
258 | 260 | ||
diff --git a/include/linux/phy.h b/include/linux/phy.h index 7968defd2fa7..6a7eb402165d 100644 --- a/include/linux/phy.h +++ b/include/linux/phy.h | |||
@@ -485,6 +485,7 @@ void phy_driver_unregister(struct phy_driver *drv); | |||
485 | int phy_driver_register(struct phy_driver *new_driver); | 485 | int phy_driver_register(struct phy_driver *new_driver); |
486 | void phy_prepare_link(struct phy_device *phydev, | 486 | void phy_prepare_link(struct phy_device *phydev, |
487 | void (*adjust_link)(struct net_device *)); | 487 | void (*adjust_link)(struct net_device *)); |
488 | void phy_state_machine(struct work_struct *work); | ||
488 | void phy_start_machine(struct phy_device *phydev, | 489 | void phy_start_machine(struct phy_device *phydev, |
489 | void (*handler)(struct net_device *)); | 490 | void (*handler)(struct net_device *)); |
490 | void phy_stop_machine(struct phy_device *phydev); | 491 | void phy_stop_machine(struct phy_device *phydev); |
diff --git a/include/net/netns/xfrm.h b/include/net/netns/xfrm.h index 56f8e5585df7..74f119a2829a 100644 --- a/include/net/netns/xfrm.h +++ b/include/net/netns/xfrm.h | |||
@@ -5,6 +5,7 @@ | |||
5 | #include <linux/wait.h> | 5 | #include <linux/wait.h> |
6 | #include <linux/workqueue.h> | 6 | #include <linux/workqueue.h> |
7 | #include <linux/xfrm.h> | 7 | #include <linux/xfrm.h> |
8 | #include <net/dst_ops.h> | ||
8 | 9 | ||
9 | struct ctl_table_header; | 10 | struct ctl_table_header; |
10 | 11 | ||
@@ -42,6 +43,11 @@ struct netns_xfrm { | |||
42 | unsigned int policy_count[XFRM_POLICY_MAX * 2]; | 43 | unsigned int policy_count[XFRM_POLICY_MAX * 2]; |
43 | struct work_struct policy_hash_work; | 44 | struct work_struct policy_hash_work; |
44 | 45 | ||
46 | struct dst_ops xfrm4_dst_ops; | ||
47 | #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) | ||
48 | struct dst_ops xfrm6_dst_ops; | ||
49 | #endif | ||
50 | |||
45 | struct sock *nlsk; | 51 | struct sock *nlsk; |
46 | struct sock *nlsk_stash; | 52 | struct sock *nlsk_stash; |
47 | 53 | ||
diff --git a/include/net/netrom.h b/include/net/netrom.h index 15696b1fd30f..ab170a60e7d3 100644 --- a/include/net/netrom.h +++ b/include/net/netrom.h | |||
@@ -132,6 +132,8 @@ static __inline__ void nr_node_put(struct nr_node *nr_node) | |||
132 | static __inline__ void nr_neigh_put(struct nr_neigh *nr_neigh) | 132 | static __inline__ void nr_neigh_put(struct nr_neigh *nr_neigh) |
133 | { | 133 | { |
134 | if (atomic_dec_and_test(&nr_neigh->refcount)) { | 134 | if (atomic_dec_and_test(&nr_neigh->refcount)) { |
135 | if (nr_neigh->ax25) | ||
136 | ax25_cb_put(nr_neigh->ax25); | ||
135 | kfree(nr_neigh->digipeat); | 137 | kfree(nr_neigh->digipeat); |
136 | kfree(nr_neigh); | 138 | kfree(nr_neigh); |
137 | } | 139 | } |
diff --git a/include/net/xfrm.h b/include/net/xfrm.h index 6d85861ab990..60c27706e7b9 100644 --- a/include/net/xfrm.h +++ b/include/net/xfrm.h | |||
@@ -1367,8 +1367,8 @@ struct xfrmk_spdinfo { | |||
1367 | extern struct xfrm_state *xfrm_find_acq_byseq(struct net *net, u32 seq); | 1367 | extern struct xfrm_state *xfrm_find_acq_byseq(struct net *net, u32 seq); |
1368 | extern int xfrm_state_delete(struct xfrm_state *x); | 1368 | extern int xfrm_state_delete(struct xfrm_state *x); |
1369 | extern int xfrm_state_flush(struct net *net, u8 proto, struct xfrm_audit *audit_info); | 1369 | extern int xfrm_state_flush(struct net *net, u8 proto, struct xfrm_audit *audit_info); |
1370 | extern void xfrm_sad_getinfo(struct xfrmk_sadinfo *si); | 1370 | extern void xfrm_sad_getinfo(struct net *net, struct xfrmk_sadinfo *si); |
1371 | extern void xfrm_spd_getinfo(struct xfrmk_spdinfo *si); | 1371 | extern void xfrm_spd_getinfo(struct net *net, struct xfrmk_spdinfo *si); |
1372 | extern int xfrm_replay_check(struct xfrm_state *x, | 1372 | extern int xfrm_replay_check(struct xfrm_state *x, |
1373 | struct sk_buff *skb, __be32 seq); | 1373 | struct sk_buff *skb, __be32 seq); |
1374 | extern void xfrm_replay_advance(struct xfrm_state *x, __be32 seq); | 1374 | extern void xfrm_replay_advance(struct xfrm_state *x, __be32 seq); |
diff --git a/include/scsi/scsi_bsg_fc.h b/include/scsi/scsi_bsg_fc.h index a4b233318179..91a4e4ff9a9b 100644 --- a/include/scsi/scsi_bsg_fc.h +++ b/include/scsi/scsi_bsg_fc.h | |||
@@ -292,7 +292,7 @@ struct fc_bsg_request { | |||
292 | struct fc_bsg_rport_els r_els; | 292 | struct fc_bsg_rport_els r_els; |
293 | struct fc_bsg_rport_ct r_ct; | 293 | struct fc_bsg_rport_ct r_ct; |
294 | } rqst_data; | 294 | } rqst_data; |
295 | }; | 295 | } __attribute__((packed)); |
296 | 296 | ||
297 | 297 | ||
298 | /* response (request sense data) structure of the sg_io_v4 */ | 298 | /* response (request sense data) structure of the sg_io_v4 */ |
diff --git a/kernel/kexec.c b/kernel/kexec.c index a9a93d9ee7a7..ef077fb73155 100644 --- a/kernel/kexec.c +++ b/kernel/kexec.c | |||
@@ -32,6 +32,7 @@ | |||
32 | #include <linux/console.h> | 32 | #include <linux/console.h> |
33 | #include <linux/vmalloc.h> | 33 | #include <linux/vmalloc.h> |
34 | #include <linux/swap.h> | 34 | #include <linux/swap.h> |
35 | #include <linux/kmsg_dump.h> | ||
35 | 36 | ||
36 | #include <asm/page.h> | 37 | #include <asm/page.h> |
37 | #include <asm/uaccess.h> | 38 | #include <asm/uaccess.h> |
@@ -1074,6 +1075,9 @@ void crash_kexec(struct pt_regs *regs) | |||
1074 | if (mutex_trylock(&kexec_mutex)) { | 1075 | if (mutex_trylock(&kexec_mutex)) { |
1075 | if (kexec_crash_image) { | 1076 | if (kexec_crash_image) { |
1076 | struct pt_regs fixed_regs; | 1077 | struct pt_regs fixed_regs; |
1078 | |||
1079 | kmsg_dump(KMSG_DUMP_KEXEC); | ||
1080 | |||
1077 | crash_setup_regs(&fixed_regs, regs); | 1081 | crash_setup_regs(&fixed_regs, regs); |
1078 | crash_save_vmcoreinfo(); | 1082 | crash_save_vmcoreinfo(); |
1079 | machine_crash_shutdown(&fixed_regs); | 1083 | machine_crash_shutdown(&fixed_regs); |
diff --git a/kernel/panic.c b/kernel/panic.c index 5827f7b97254..c787333282b8 100644 --- a/kernel/panic.c +++ b/kernel/panic.c | |||
@@ -75,7 +75,6 @@ NORET_TYPE void panic(const char * fmt, ...) | |||
75 | dump_stack(); | 75 | dump_stack(); |
76 | #endif | 76 | #endif |
77 | 77 | ||
78 | kmsg_dump(KMSG_DUMP_PANIC); | ||
79 | /* | 78 | /* |
80 | * If we have crashed and we have a crash kernel loaded let it handle | 79 | * If we have crashed and we have a crash kernel loaded let it handle |
81 | * everything else. | 80 | * everything else. |
@@ -83,6 +82,8 @@ NORET_TYPE void panic(const char * fmt, ...) | |||
83 | */ | 82 | */ |
84 | crash_kexec(NULL); | 83 | crash_kexec(NULL); |
85 | 84 | ||
85 | kmsg_dump(KMSG_DUMP_PANIC); | ||
86 | |||
86 | /* | 87 | /* |
87 | * Note smp_send_stop is the usual smp shutdown function, which | 88 | * Note smp_send_stop is the usual smp shutdown function, which |
88 | * unfortunately means it may not be hardened to work in a panic | 89 | * unfortunately means it may not be hardened to work in a panic |
diff --git a/kernel/printk.c b/kernel/printk.c index 17463ca2e229..1751c456b71f 100644 --- a/kernel/printk.c +++ b/kernel/printk.c | |||
@@ -1467,6 +1467,7 @@ EXPORT_SYMBOL_GPL(kmsg_dump_unregister); | |||
1467 | static const char const *kmsg_reasons[] = { | 1467 | static const char const *kmsg_reasons[] = { |
1468 | [KMSG_DUMP_OOPS] = "oops", | 1468 | [KMSG_DUMP_OOPS] = "oops", |
1469 | [KMSG_DUMP_PANIC] = "panic", | 1469 | [KMSG_DUMP_PANIC] = "panic", |
1470 | [KMSG_DUMP_KEXEC] = "kexec", | ||
1470 | }; | 1471 | }; |
1471 | 1472 | ||
1472 | static const char *kmsg_to_str(enum kmsg_dump_reason reason) | 1473 | static const char *kmsg_to_str(enum kmsg_dump_reason reason) |
diff --git a/kernel/time/clockevents.c b/kernel/time/clockevents.c index 6f740d9f0948..d7395fdfb9f3 100644 --- a/kernel/time/clockevents.c +++ b/kernel/time/clockevents.c | |||
@@ -259,7 +259,8 @@ void clockevents_notify(unsigned long reason, void *arg) | |||
259 | cpu = *((int *)arg); | 259 | cpu = *((int *)arg); |
260 | list_for_each_entry_safe(dev, tmp, &clockevent_devices, list) { | 260 | list_for_each_entry_safe(dev, tmp, &clockevent_devices, list) { |
261 | if (cpumask_test_cpu(cpu, dev->cpumask) && | 261 | if (cpumask_test_cpu(cpu, dev->cpumask) && |
262 | cpumask_weight(dev->cpumask) == 1) { | 262 | cpumask_weight(dev->cpumask) == 1 && |
263 | !tick_is_broadcast_device(dev)) { | ||
263 | BUG_ON(dev->mode != CLOCK_EVT_MODE_UNUSED); | 264 | BUG_ON(dev->mode != CLOCK_EVT_MODE_UNUSED); |
264 | list_del(&dev->list); | 265 | list_del(&dev->list); |
265 | } | 266 | } |
diff --git a/mm/filemap.c b/mm/filemap.c index 96ac6b0eb6cb..e3736923220e 100644 --- a/mm/filemap.c +++ b/mm/filemap.c | |||
@@ -1634,14 +1634,15 @@ EXPORT_SYMBOL(generic_file_readonly_mmap); | |||
1634 | static struct page *__read_cache_page(struct address_space *mapping, | 1634 | static struct page *__read_cache_page(struct address_space *mapping, |
1635 | pgoff_t index, | 1635 | pgoff_t index, |
1636 | int (*filler)(void *,struct page*), | 1636 | int (*filler)(void *,struct page*), |
1637 | void *data) | 1637 | void *data, |
1638 | gfp_t gfp) | ||
1638 | { | 1639 | { |
1639 | struct page *page; | 1640 | struct page *page; |
1640 | int err; | 1641 | int err; |
1641 | repeat: | 1642 | repeat: |
1642 | page = find_get_page(mapping, index); | 1643 | page = find_get_page(mapping, index); |
1643 | if (!page) { | 1644 | if (!page) { |
1644 | page = page_cache_alloc_cold(mapping); | 1645 | page = __page_cache_alloc(gfp | __GFP_COLD); |
1645 | if (!page) | 1646 | if (!page) |
1646 | return ERR_PTR(-ENOMEM); | 1647 | return ERR_PTR(-ENOMEM); |
1647 | err = add_to_page_cache_lru(page, mapping, index, GFP_KERNEL); | 1648 | err = add_to_page_cache_lru(page, mapping, index, GFP_KERNEL); |
@@ -1661,31 +1662,18 @@ repeat: | |||
1661 | return page; | 1662 | return page; |
1662 | } | 1663 | } |
1663 | 1664 | ||
1664 | /** | 1665 | static struct page *do_read_cache_page(struct address_space *mapping, |
1665 | * read_cache_page_async - read into page cache, fill it if needed | ||
1666 | * @mapping: the page's address_space | ||
1667 | * @index: the page index | ||
1668 | * @filler: function to perform the read | ||
1669 | * @data: destination for read data | ||
1670 | * | ||
1671 | * Same as read_cache_page, but don't wait for page to become unlocked | ||
1672 | * after submitting it to the filler. | ||
1673 | * | ||
1674 | * Read into the page cache. If a page already exists, and PageUptodate() is | ||
1675 | * not set, try to fill the page but don't wait for it to become unlocked. | ||
1676 | * | ||
1677 | * If the page does not get brought uptodate, return -EIO. | ||
1678 | */ | ||
1679 | struct page *read_cache_page_async(struct address_space *mapping, | ||
1680 | pgoff_t index, | 1666 | pgoff_t index, |
1681 | int (*filler)(void *,struct page*), | 1667 | int (*filler)(void *,struct page*), |
1682 | void *data) | 1668 | void *data, |
1669 | gfp_t gfp) | ||
1670 | |||
1683 | { | 1671 | { |
1684 | struct page *page; | 1672 | struct page *page; |
1685 | int err; | 1673 | int err; |
1686 | 1674 | ||
1687 | retry: | 1675 | retry: |
1688 | page = __read_cache_page(mapping, index, filler, data); | 1676 | page = __read_cache_page(mapping, index, filler, data, gfp); |
1689 | if (IS_ERR(page)) | 1677 | if (IS_ERR(page)) |
1690 | return page; | 1678 | return page; |
1691 | if (PageUptodate(page)) | 1679 | if (PageUptodate(page)) |
@@ -1710,8 +1698,67 @@ out: | |||
1710 | mark_page_accessed(page); | 1698 | mark_page_accessed(page); |
1711 | return page; | 1699 | return page; |
1712 | } | 1700 | } |
1701 | |||
1702 | /** | ||
1703 | * read_cache_page_async - read into page cache, fill it if needed | ||
1704 | * @mapping: the page's address_space | ||
1705 | * @index: the page index | ||
1706 | * @filler: function to perform the read | ||
1707 | * @data: destination for read data | ||
1708 | * | ||
1709 | * Same as read_cache_page, but don't wait for page to become unlocked | ||
1710 | * after submitting it to the filler. | ||
1711 | * | ||
1712 | * Read into the page cache. If a page already exists, and PageUptodate() is | ||
1713 | * not set, try to fill the page but don't wait for it to become unlocked. | ||
1714 | * | ||
1715 | * If the page does not get brought uptodate, return -EIO. | ||
1716 | */ | ||
1717 | struct page *read_cache_page_async(struct address_space *mapping, | ||
1718 | pgoff_t index, | ||
1719 | int (*filler)(void *,struct page*), | ||
1720 | void *data) | ||
1721 | { | ||
1722 | return do_read_cache_page(mapping, index, filler, data, mapping_gfp_mask(mapping)); | ||
1723 | } | ||
1713 | EXPORT_SYMBOL(read_cache_page_async); | 1724 | EXPORT_SYMBOL(read_cache_page_async); |
1714 | 1725 | ||
1726 | static struct page *wait_on_page_read(struct page *page) | ||
1727 | { | ||
1728 | if (!IS_ERR(page)) { | ||
1729 | wait_on_page_locked(page); | ||
1730 | if (!PageUptodate(page)) { | ||
1731 | page_cache_release(page); | ||
1732 | page = ERR_PTR(-EIO); | ||
1733 | } | ||
1734 | } | ||
1735 | return page; | ||
1736 | } | ||
1737 | |||
1738 | /** | ||
1739 | * read_cache_page_gfp - read into page cache, using specified page allocation flags. | ||
1740 | * @mapping: the page's address_space | ||
1741 | * @index: the page index | ||
1742 | * @gfp: the page allocator flags to use if allocating | ||
1743 | * | ||
1744 | * This is the same as "read_mapping_page(mapping, index, NULL)", but with | ||
1745 | * any new page allocations done using the specified allocation flags. Note | ||
1746 | * that the Radix tree operations will still use GFP_KERNEL, so you can't | ||
1747 | * expect to do this atomically or anything like that - but you can pass in | ||
1748 | * other page requirements. | ||
1749 | * | ||
1750 | * If the page does not get brought uptodate, return -EIO. | ||
1751 | */ | ||
1752 | struct page *read_cache_page_gfp(struct address_space *mapping, | ||
1753 | pgoff_t index, | ||
1754 | gfp_t gfp) | ||
1755 | { | ||
1756 | filler_t *filler = (filler_t *)mapping->a_ops->readpage; | ||
1757 | |||
1758 | return wait_on_page_read(do_read_cache_page(mapping, index, filler, NULL, gfp)); | ||
1759 | } | ||
1760 | EXPORT_SYMBOL(read_cache_page_gfp); | ||
1761 | |||
1715 | /** | 1762 | /** |
1716 | * read_cache_page - read into page cache, fill it if needed | 1763 | * read_cache_page - read into page cache, fill it if needed |
1717 | * @mapping: the page's address_space | 1764 | * @mapping: the page's address_space |
@@ -1729,18 +1776,7 @@ struct page *read_cache_page(struct address_space *mapping, | |||
1729 | int (*filler)(void *,struct page*), | 1776 | int (*filler)(void *,struct page*), |
1730 | void *data) | 1777 | void *data) |
1731 | { | 1778 | { |
1732 | struct page *page; | 1779 | return wait_on_page_read(read_cache_page_async(mapping, index, filler, data)); |
1733 | |||
1734 | page = read_cache_page_async(mapping, index, filler, data); | ||
1735 | if (IS_ERR(page)) | ||
1736 | goto out; | ||
1737 | wait_on_page_locked(page); | ||
1738 | if (!PageUptodate(page)) { | ||
1739 | page_cache_release(page); | ||
1740 | page = ERR_PTR(-EIO); | ||
1741 | } | ||
1742 | out: | ||
1743 | return page; | ||
1744 | } | 1780 | } |
1745 | EXPORT_SYMBOL(read_cache_page); | 1781 | EXPORT_SYMBOL(read_cache_page); |
1746 | 1782 | ||
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c index b7889782047e..c1b92cab46c7 100644 --- a/net/8021q/vlan_dev.c +++ b/net/8021q/vlan_dev.c | |||
@@ -163,7 +163,7 @@ int vlan_skb_recv(struct sk_buff *skb, struct net_device *dev, | |||
163 | goto err_unlock; | 163 | goto err_unlock; |
164 | } | 164 | } |
165 | 165 | ||
166 | rx_stats = per_cpu_ptr(vlan_dev_info(dev)->vlan_rx_stats, | 166 | rx_stats = per_cpu_ptr(vlan_dev_info(skb->dev)->vlan_rx_stats, |
167 | smp_processor_id()); | 167 | smp_processor_id()); |
168 | rx_stats->rx_packets++; | 168 | rx_stats->rx_packets++; |
169 | rx_stats->rx_bytes += skb->len; | 169 | rx_stats->rx_bytes += skb->len; |
diff --git a/net/appletalk/aarp.c b/net/appletalk/aarp.c index 9d4adfd22757..f2b3b56aa779 100644 --- a/net/appletalk/aarp.c +++ b/net/appletalk/aarp.c | |||
@@ -819,7 +819,7 @@ static int aarp_rcv(struct sk_buff *skb, struct net_device *dev, | |||
819 | ma = &ifa->address; | 819 | ma = &ifa->address; |
820 | else { /* We need to make a copy of the entry. */ | 820 | else { /* We need to make a copy of the entry. */ |
821 | da.s_node = sa.s_node; | 821 | da.s_node = sa.s_node; |
822 | da.s_net = da.s_net; | 822 | da.s_net = sa.s_net; |
823 | ma = &da; | 823 | ma = &da; |
824 | } | 824 | } |
825 | 825 | ||
diff --git a/net/ax25/ax25_out.c b/net/ax25/ax25_out.c index bf706f83a5c9..14912600ec57 100644 --- a/net/ax25/ax25_out.c +++ b/net/ax25/ax25_out.c | |||
@@ -92,6 +92,12 @@ ax25_cb *ax25_send_frame(struct sk_buff *skb, int paclen, ax25_address *src, ax2 | |||
92 | #endif | 92 | #endif |
93 | } | 93 | } |
94 | 94 | ||
95 | /* | ||
96 | * There is one ref for the state machine; a caller needs | ||
97 | * one more to put it back, just like with the existing one. | ||
98 | */ | ||
99 | ax25_cb_hold(ax25); | ||
100 | |||
95 | ax25_cb_add(ax25); | 101 | ax25_cb_add(ax25); |
96 | 102 | ||
97 | ax25->state = AX25_STATE_1; | 103 | ax25->state = AX25_STATE_1; |
diff --git a/net/dccp/ccid.c b/net/dccp/ccid.c index f3e9ba1cfd01..57dfb9c8c4f2 100644 --- a/net/dccp/ccid.c +++ b/net/dccp/ccid.c | |||
@@ -77,34 +77,24 @@ int ccid_getsockopt_builtin_ccids(struct sock *sk, int len, | |||
77 | return err; | 77 | return err; |
78 | } | 78 | } |
79 | 79 | ||
80 | static struct kmem_cache *ccid_kmem_cache_create(int obj_size, const char *fmt,...) | 80 | static struct kmem_cache *ccid_kmem_cache_create(int obj_size, char *slab_name_fmt, const char *fmt,...) |
81 | { | 81 | { |
82 | struct kmem_cache *slab; | 82 | struct kmem_cache *slab; |
83 | char slab_name_fmt[32], *slab_name; | ||
84 | va_list args; | 83 | va_list args; |
85 | 84 | ||
86 | va_start(args, fmt); | 85 | va_start(args, fmt); |
87 | vsnprintf(slab_name_fmt, sizeof(slab_name_fmt), fmt, args); | 86 | vsnprintf(slab_name_fmt, sizeof(slab_name_fmt), fmt, args); |
88 | va_end(args); | 87 | va_end(args); |
89 | 88 | ||
90 | slab_name = kstrdup(slab_name_fmt, GFP_KERNEL); | 89 | slab = kmem_cache_create(slab_name_fmt, sizeof(struct ccid) + obj_size, 0, |
91 | if (slab_name == NULL) | ||
92 | return NULL; | ||
93 | slab = kmem_cache_create(slab_name, sizeof(struct ccid) + obj_size, 0, | ||
94 | SLAB_HWCACHE_ALIGN, NULL); | 90 | SLAB_HWCACHE_ALIGN, NULL); |
95 | if (slab == NULL) | ||
96 | kfree(slab_name); | ||
97 | return slab; | 91 | return slab; |
98 | } | 92 | } |
99 | 93 | ||
100 | static void ccid_kmem_cache_destroy(struct kmem_cache *slab) | 94 | static void ccid_kmem_cache_destroy(struct kmem_cache *slab) |
101 | { | 95 | { |
102 | if (slab != NULL) { | 96 | if (slab != NULL) |
103 | const char *name = kmem_cache_name(slab); | ||
104 | |||
105 | kmem_cache_destroy(slab); | 97 | kmem_cache_destroy(slab); |
106 | kfree(name); | ||
107 | } | ||
108 | } | 98 | } |
109 | 99 | ||
110 | static int ccid_activate(struct ccid_operations *ccid_ops) | 100 | static int ccid_activate(struct ccid_operations *ccid_ops) |
@@ -113,6 +103,7 @@ static int ccid_activate(struct ccid_operations *ccid_ops) | |||
113 | 103 | ||
114 | ccid_ops->ccid_hc_rx_slab = | 104 | ccid_ops->ccid_hc_rx_slab = |
115 | ccid_kmem_cache_create(ccid_ops->ccid_hc_rx_obj_size, | 105 | ccid_kmem_cache_create(ccid_ops->ccid_hc_rx_obj_size, |
106 | ccid_ops->ccid_hc_rx_slab_name, | ||
116 | "ccid%u_hc_rx_sock", | 107 | "ccid%u_hc_rx_sock", |
117 | ccid_ops->ccid_id); | 108 | ccid_ops->ccid_id); |
118 | if (ccid_ops->ccid_hc_rx_slab == NULL) | 109 | if (ccid_ops->ccid_hc_rx_slab == NULL) |
@@ -120,6 +111,7 @@ static int ccid_activate(struct ccid_operations *ccid_ops) | |||
120 | 111 | ||
121 | ccid_ops->ccid_hc_tx_slab = | 112 | ccid_ops->ccid_hc_tx_slab = |
122 | ccid_kmem_cache_create(ccid_ops->ccid_hc_tx_obj_size, | 113 | ccid_kmem_cache_create(ccid_ops->ccid_hc_tx_obj_size, |
114 | ccid_ops->ccid_hc_tx_slab_name, | ||
123 | "ccid%u_hc_tx_sock", | 115 | "ccid%u_hc_tx_sock", |
124 | ccid_ops->ccid_id); | 116 | ccid_ops->ccid_id); |
125 | if (ccid_ops->ccid_hc_tx_slab == NULL) | 117 | if (ccid_ops->ccid_hc_tx_slab == NULL) |
diff --git a/net/dccp/ccid.h b/net/dccp/ccid.h index facedd20b531..269958bf7fe9 100644 --- a/net/dccp/ccid.h +++ b/net/dccp/ccid.h | |||
@@ -49,6 +49,8 @@ struct ccid_operations { | |||
49 | const char *ccid_name; | 49 | const char *ccid_name; |
50 | struct kmem_cache *ccid_hc_rx_slab, | 50 | struct kmem_cache *ccid_hc_rx_slab, |
51 | *ccid_hc_tx_slab; | 51 | *ccid_hc_tx_slab; |
52 | char ccid_hc_rx_slab_name[32]; | ||
53 | char ccid_hc_tx_slab_name[32]; | ||
52 | __u32 ccid_hc_rx_obj_size, | 54 | __u32 ccid_hc_rx_obj_size, |
53 | ccid_hc_tx_obj_size; | 55 | ccid_hc_tx_obj_size; |
54 | /* Interface Routines */ | 56 | /* Interface Routines */ |
diff --git a/net/dccp/probe.c b/net/dccp/probe.c index a1362dc8abb0..bace1d8cbcfd 100644 --- a/net/dccp/probe.c +++ b/net/dccp/probe.c | |||
@@ -161,7 +161,8 @@ static __init int dccpprobe_init(void) | |||
161 | if (!proc_net_fops_create(&init_net, procname, S_IRUSR, &dccpprobe_fops)) | 161 | if (!proc_net_fops_create(&init_net, procname, S_IRUSR, &dccpprobe_fops)) |
162 | goto err0; | 162 | goto err0; |
163 | 163 | ||
164 | ret = register_jprobe(&dccp_send_probe); | 164 | ret = try_then_request_module((register_jprobe(&dccp_send_probe) == 0), |
165 | "dccp"); | ||
165 | if (ret) | 166 | if (ret) |
166 | goto err1; | 167 | goto err1; |
167 | 168 | ||
diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c index bdb78dd180ce..1aaa8110d84b 100644 --- a/net/ipv4/inet_diag.c +++ b/net/ipv4/inet_diag.c | |||
@@ -368,7 +368,7 @@ static int inet_diag_bc_run(const void *bc, int len, | |||
368 | yes = entry->sport >= op[1].no; | 368 | yes = entry->sport >= op[1].no; |
369 | break; | 369 | break; |
370 | case INET_DIAG_BC_S_LE: | 370 | case INET_DIAG_BC_S_LE: |
371 | yes = entry->dport <= op[1].no; | 371 | yes = entry->sport <= op[1].no; |
372 | break; | 372 | break; |
373 | case INET_DIAG_BC_D_GE: | 373 | case INET_DIAG_BC_D_GE: |
374 | yes = entry->dport >= op[1].no; | 374 | yes = entry->dport >= op[1].no; |
diff --git a/net/ipv4/route.c b/net/ipv4/route.c index e446496f564f..d62b05d33384 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c | |||
@@ -586,7 +586,9 @@ static void __net_exit ip_rt_do_proc_exit(struct net *net) | |||
586 | { | 586 | { |
587 | remove_proc_entry("rt_cache", net->proc_net_stat); | 587 | remove_proc_entry("rt_cache", net->proc_net_stat); |
588 | remove_proc_entry("rt_cache", net->proc_net); | 588 | remove_proc_entry("rt_cache", net->proc_net); |
589 | #ifdef CONFIG_NET_CLS_ROUTE | ||
589 | remove_proc_entry("rt_acct", net->proc_net); | 590 | remove_proc_entry("rt_acct", net->proc_net); |
591 | #endif | ||
590 | } | 592 | } |
591 | 593 | ||
592 | static struct pernet_operations ip_rt_proc_ops __net_initdata = { | 594 | static struct pernet_operations ip_rt_proc_ops __net_initdata = { |
diff --git a/net/ipv4/tcp_probe.c b/net/ipv4/tcp_probe.c index bb110c5ce1d2..9bc805df95d2 100644 --- a/net/ipv4/tcp_probe.c +++ b/net/ipv4/tcp_probe.c | |||
@@ -39,9 +39,9 @@ static int port __read_mostly = 0; | |||
39 | MODULE_PARM_DESC(port, "Port to match (0=all)"); | 39 | MODULE_PARM_DESC(port, "Port to match (0=all)"); |
40 | module_param(port, int, 0); | 40 | module_param(port, int, 0); |
41 | 41 | ||
42 | static int bufsize __read_mostly = 4096; | 42 | static unsigned int bufsize __read_mostly = 4096; |
43 | MODULE_PARM_DESC(bufsize, "Log buffer size in packets (4096)"); | 43 | MODULE_PARM_DESC(bufsize, "Log buffer size in packets (4096)"); |
44 | module_param(bufsize, int, 0); | 44 | module_param(bufsize, uint, 0); |
45 | 45 | ||
46 | static int full __read_mostly; | 46 | static int full __read_mostly; |
47 | MODULE_PARM_DESC(full, "Full log (1=every ack packet received, 0=only cwnd changes)"); | 47 | MODULE_PARM_DESC(full, "Full log (1=every ack packet received, 0=only cwnd changes)"); |
@@ -75,12 +75,12 @@ static struct { | |||
75 | 75 | ||
76 | static inline int tcp_probe_used(void) | 76 | static inline int tcp_probe_used(void) |
77 | { | 77 | { |
78 | return (tcp_probe.head - tcp_probe.tail) % bufsize; | 78 | return (tcp_probe.head - tcp_probe.tail) & (bufsize - 1); |
79 | } | 79 | } |
80 | 80 | ||
81 | static inline int tcp_probe_avail(void) | 81 | static inline int tcp_probe_avail(void) |
82 | { | 82 | { |
83 | return bufsize - tcp_probe_used(); | 83 | return bufsize - tcp_probe_used() - 1; |
84 | } | 84 | } |
85 | 85 | ||
86 | /* | 86 | /* |
@@ -116,7 +116,7 @@ static int jtcp_rcv_established(struct sock *sk, struct sk_buff *skb, | |||
116 | p->ssthresh = tcp_current_ssthresh(sk); | 116 | p->ssthresh = tcp_current_ssthresh(sk); |
117 | p->srtt = tp->srtt >> 3; | 117 | p->srtt = tp->srtt >> 3; |
118 | 118 | ||
119 | tcp_probe.head = (tcp_probe.head + 1) % bufsize; | 119 | tcp_probe.head = (tcp_probe.head + 1) & (bufsize - 1); |
120 | } | 120 | } |
121 | tcp_probe.lastcwnd = tp->snd_cwnd; | 121 | tcp_probe.lastcwnd = tp->snd_cwnd; |
122 | spin_unlock(&tcp_probe.lock); | 122 | spin_unlock(&tcp_probe.lock); |
@@ -149,7 +149,7 @@ static int tcpprobe_open(struct inode * inode, struct file * file) | |||
149 | static int tcpprobe_sprint(char *tbuf, int n) | 149 | static int tcpprobe_sprint(char *tbuf, int n) |
150 | { | 150 | { |
151 | const struct tcp_log *p | 151 | const struct tcp_log *p |
152 | = tcp_probe.log + tcp_probe.tail % bufsize; | 152 | = tcp_probe.log + tcp_probe.tail; |
153 | struct timespec tv | 153 | struct timespec tv |
154 | = ktime_to_timespec(ktime_sub(p->tstamp, tcp_probe.start)); | 154 | = ktime_to_timespec(ktime_sub(p->tstamp, tcp_probe.start)); |
155 | 155 | ||
@@ -192,7 +192,7 @@ static ssize_t tcpprobe_read(struct file *file, char __user *buf, | |||
192 | width = tcpprobe_sprint(tbuf, sizeof(tbuf)); | 192 | width = tcpprobe_sprint(tbuf, sizeof(tbuf)); |
193 | 193 | ||
194 | if (cnt + width < len) | 194 | if (cnt + width < len) |
195 | tcp_probe.tail = (tcp_probe.tail + 1) % bufsize; | 195 | tcp_probe.tail = (tcp_probe.tail + 1) & (bufsize - 1); |
196 | 196 | ||
197 | spin_unlock_bh(&tcp_probe.lock); | 197 | spin_unlock_bh(&tcp_probe.lock); |
198 | 198 | ||
@@ -222,9 +222,10 @@ static __init int tcpprobe_init(void) | |||
222 | init_waitqueue_head(&tcp_probe.wait); | 222 | init_waitqueue_head(&tcp_probe.wait); |
223 | spin_lock_init(&tcp_probe.lock); | 223 | spin_lock_init(&tcp_probe.lock); |
224 | 224 | ||
225 | if (bufsize < 0) | 225 | if (bufsize == 0) |
226 | return -EINVAL; | 226 | return -EINVAL; |
227 | 227 | ||
228 | bufsize = roundup_pow_of_two(bufsize); | ||
228 | tcp_probe.log = kcalloc(bufsize, sizeof(struct tcp_log), GFP_KERNEL); | 229 | tcp_probe.log = kcalloc(bufsize, sizeof(struct tcp_log), GFP_KERNEL); |
229 | if (!tcp_probe.log) | 230 | if (!tcp_probe.log) |
230 | goto err0; | 231 | goto err0; |
@@ -236,7 +237,7 @@ static __init int tcpprobe_init(void) | |||
236 | if (ret) | 237 | if (ret) |
237 | goto err1; | 238 | goto err1; |
238 | 239 | ||
239 | pr_info("TCP probe registered (port=%d)\n", port); | 240 | pr_info("TCP probe registered (port=%d) bufsize=%u\n", port, bufsize); |
240 | return 0; | 241 | return 0; |
241 | err1: | 242 | err1: |
242 | proc_net_remove(&init_net, procname); | 243 | proc_net_remove(&init_net, procname); |
diff --git a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c index 8c08a28d8f83..67107d63c1cd 100644 --- a/net/ipv4/xfrm4_policy.c +++ b/net/ipv4/xfrm4_policy.c | |||
@@ -15,7 +15,6 @@ | |||
15 | #include <net/xfrm.h> | 15 | #include <net/xfrm.h> |
16 | #include <net/ip.h> | 16 | #include <net/ip.h> |
17 | 17 | ||
18 | static struct dst_ops xfrm4_dst_ops; | ||
19 | static struct xfrm_policy_afinfo xfrm4_policy_afinfo; | 18 | static struct xfrm_policy_afinfo xfrm4_policy_afinfo; |
20 | 19 | ||
21 | static struct dst_entry *xfrm4_dst_lookup(struct net *net, int tos, | 20 | static struct dst_entry *xfrm4_dst_lookup(struct net *net, int tos, |
@@ -190,8 +189,10 @@ _decode_session4(struct sk_buff *skb, struct flowi *fl, int reverse) | |||
190 | 189 | ||
191 | static inline int xfrm4_garbage_collect(struct dst_ops *ops) | 190 | static inline int xfrm4_garbage_collect(struct dst_ops *ops) |
192 | { | 191 | { |
193 | xfrm4_policy_afinfo.garbage_collect(&init_net); | 192 | struct net *net = container_of(ops, struct net, xfrm.xfrm4_dst_ops); |
194 | return (atomic_read(&xfrm4_dst_ops.entries) > xfrm4_dst_ops.gc_thresh*2); | 193 | |
194 | xfrm4_policy_afinfo.garbage_collect(net); | ||
195 | return (atomic_read(&ops->entries) > ops->gc_thresh * 2); | ||
195 | } | 196 | } |
196 | 197 | ||
197 | static void xfrm4_update_pmtu(struct dst_entry *dst, u32 mtu) | 198 | static void xfrm4_update_pmtu(struct dst_entry *dst, u32 mtu) |
@@ -268,7 +269,7 @@ static struct xfrm_policy_afinfo xfrm4_policy_afinfo = { | |||
268 | static struct ctl_table xfrm4_policy_table[] = { | 269 | static struct ctl_table xfrm4_policy_table[] = { |
269 | { | 270 | { |
270 | .procname = "xfrm4_gc_thresh", | 271 | .procname = "xfrm4_gc_thresh", |
271 | .data = &xfrm4_dst_ops.gc_thresh, | 272 | .data = &init_net.xfrm.xfrm4_dst_ops.gc_thresh, |
272 | .maxlen = sizeof(int), | 273 | .maxlen = sizeof(int), |
273 | .mode = 0644, | 274 | .mode = 0644, |
274 | .proc_handler = proc_dointvec, | 275 | .proc_handler = proc_dointvec, |
@@ -295,8 +296,6 @@ static void __exit xfrm4_policy_fini(void) | |||
295 | 296 | ||
296 | void __init xfrm4_init(int rt_max_size) | 297 | void __init xfrm4_init(int rt_max_size) |
297 | { | 298 | { |
298 | xfrm4_state_init(); | ||
299 | xfrm4_policy_init(); | ||
300 | /* | 299 | /* |
301 | * Select a default value for the gc_thresh based on the main route | 300 | * Select a default value for the gc_thresh based on the main route |
302 | * table hash size. It seems to me the worst case scenario is when | 301 | * table hash size. It seems to me the worst case scenario is when |
@@ -308,6 +307,9 @@ void __init xfrm4_init(int rt_max_size) | |||
308 | * and start cleaning when were 1/2 full | 307 | * and start cleaning when were 1/2 full |
309 | */ | 308 | */ |
310 | xfrm4_dst_ops.gc_thresh = rt_max_size/2; | 309 | xfrm4_dst_ops.gc_thresh = rt_max_size/2; |
310 | |||
311 | xfrm4_state_init(); | ||
312 | xfrm4_policy_init(); | ||
311 | #ifdef CONFIG_SYSCTL | 313 | #ifdef CONFIG_SYSCTL |
312 | sysctl_hdr = register_net_sysctl_table(&init_net, net_ipv4_ctl_path, | 314 | sysctl_hdr = register_net_sysctl_table(&init_net, net_ipv4_ctl_path, |
313 | xfrm4_policy_table); | 315 | xfrm4_policy_table); |
diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c index 7254e3f899a7..dbdc696f5fc5 100644 --- a/net/ipv6/xfrm6_policy.c +++ b/net/ipv6/xfrm6_policy.c | |||
@@ -24,7 +24,6 @@ | |||
24 | #include <net/mip6.h> | 24 | #include <net/mip6.h> |
25 | #endif | 25 | #endif |
26 | 26 | ||
27 | static struct dst_ops xfrm6_dst_ops; | ||
28 | static struct xfrm_policy_afinfo xfrm6_policy_afinfo; | 27 | static struct xfrm_policy_afinfo xfrm6_policy_afinfo; |
29 | 28 | ||
30 | static struct dst_entry *xfrm6_dst_lookup(struct net *net, int tos, | 29 | static struct dst_entry *xfrm6_dst_lookup(struct net *net, int tos, |
@@ -224,8 +223,10 @@ _decode_session6(struct sk_buff *skb, struct flowi *fl, int reverse) | |||
224 | 223 | ||
225 | static inline int xfrm6_garbage_collect(struct dst_ops *ops) | 224 | static inline int xfrm6_garbage_collect(struct dst_ops *ops) |
226 | { | 225 | { |
227 | xfrm6_policy_afinfo.garbage_collect(&init_net); | 226 | struct net *net = container_of(ops, struct net, xfrm.xfrm6_dst_ops); |
228 | return (atomic_read(&xfrm6_dst_ops.entries) > xfrm6_dst_ops.gc_thresh*2); | 227 | |
228 | xfrm6_policy_afinfo.garbage_collect(net); | ||
229 | return (atomic_read(&ops->entries) > ops->gc_thresh * 2); | ||
229 | } | 230 | } |
230 | 231 | ||
231 | static void xfrm6_update_pmtu(struct dst_entry *dst, u32 mtu) | 232 | static void xfrm6_update_pmtu(struct dst_entry *dst, u32 mtu) |
@@ -310,7 +311,7 @@ static void xfrm6_policy_fini(void) | |||
310 | static struct ctl_table xfrm6_policy_table[] = { | 311 | static struct ctl_table xfrm6_policy_table[] = { |
311 | { | 312 | { |
312 | .procname = "xfrm6_gc_thresh", | 313 | .procname = "xfrm6_gc_thresh", |
313 | .data = &xfrm6_dst_ops.gc_thresh, | 314 | .data = &init_net.xfrm.xfrm6_dst_ops.gc_thresh, |
314 | .maxlen = sizeof(int), | 315 | .maxlen = sizeof(int), |
315 | .mode = 0644, | 316 | .mode = 0644, |
316 | .proc_handler = proc_dointvec, | 317 | .proc_handler = proc_dointvec, |
@@ -326,13 +327,6 @@ int __init xfrm6_init(void) | |||
326 | int ret; | 327 | int ret; |
327 | unsigned int gc_thresh; | 328 | unsigned int gc_thresh; |
328 | 329 | ||
329 | ret = xfrm6_policy_init(); | ||
330 | if (ret) | ||
331 | goto out; | ||
332 | |||
333 | ret = xfrm6_state_init(); | ||
334 | if (ret) | ||
335 | goto out_policy; | ||
336 | /* | 330 | /* |
337 | * We need a good default value for the xfrm6 gc threshold. | 331 | * We need a good default value for the xfrm6 gc threshold. |
338 | * In ipv4 we set it to the route hash table size * 8, which | 332 | * In ipv4 we set it to the route hash table size * 8, which |
@@ -346,6 +340,15 @@ int __init xfrm6_init(void) | |||
346 | */ | 340 | */ |
347 | gc_thresh = FIB6_TABLE_HASHSZ * 8; | 341 | gc_thresh = FIB6_TABLE_HASHSZ * 8; |
348 | xfrm6_dst_ops.gc_thresh = (gc_thresh < 1024) ? 1024 : gc_thresh; | 342 | xfrm6_dst_ops.gc_thresh = (gc_thresh < 1024) ? 1024 : gc_thresh; |
343 | |||
344 | ret = xfrm6_policy_init(); | ||
345 | if (ret) | ||
346 | goto out; | ||
347 | |||
348 | ret = xfrm6_state_init(); | ||
349 | if (ret) | ||
350 | goto out_policy; | ||
351 | |||
349 | #ifdef CONFIG_SYSCTL | 352 | #ifdef CONFIG_SYSCTL |
350 | sysctl_hdr = register_net_sysctl_table(&init_net, net_ipv6_ctl_path, | 353 | sysctl_hdr = register_net_sysctl_table(&init_net, net_ipv6_ctl_path, |
351 | xfrm6_policy_table); | 354 | xfrm6_policy_table); |
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c index 6dc3579c0ac5..9ae1a4760b58 100644 --- a/net/mac80211/cfg.c +++ b/net/mac80211/cfg.c | |||
@@ -1331,6 +1331,9 @@ static int ieee80211_set_power_mgmt(struct wiphy *wiphy, struct net_device *dev, | |||
1331 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); | 1331 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); |
1332 | struct ieee80211_conf *conf = &local->hw.conf; | 1332 | struct ieee80211_conf *conf = &local->hw.conf; |
1333 | 1333 | ||
1334 | if (sdata->vif.type != NL80211_IFTYPE_STATION) | ||
1335 | return -EOPNOTSUPP; | ||
1336 | |||
1334 | if (!(local->hw.flags & IEEE80211_HW_SUPPORTS_PS)) | 1337 | if (!(local->hw.flags & IEEE80211_HW_SUPPORTS_PS)) |
1335 | return -EOPNOTSUPP; | 1338 | return -EOPNOTSUPP; |
1336 | 1339 | ||
diff --git a/net/mac80211/rc80211_pid_algo.c b/net/mac80211/rc80211_pid_algo.c index 699d3ed869c4..29bc4c516238 100644 --- a/net/mac80211/rc80211_pid_algo.c +++ b/net/mac80211/rc80211_pid_algo.c | |||
@@ -190,7 +190,7 @@ static void rate_control_pid_sample(struct rc_pid_info *pinfo, | |||
190 | rate_control_pid_normalize(pinfo, sband->n_bitrates); | 190 | rate_control_pid_normalize(pinfo, sband->n_bitrates); |
191 | 191 | ||
192 | /* Compute the proportional, integral and derivative errors. */ | 192 | /* Compute the proportional, integral and derivative errors. */ |
193 | err_prop = (pinfo->target << RC_PID_ARITH_SHIFT) - pf; | 193 | err_prop = (pinfo->target - pf) << RC_PID_ARITH_SHIFT; |
194 | 194 | ||
195 | err_avg = spinfo->err_avg_sc >> pinfo->smoothing_shift; | 195 | err_avg = spinfo->err_avg_sc >> pinfo->smoothing_shift; |
196 | spinfo->err_avg_sc = spinfo->err_avg_sc - err_avg + err_prop; | 196 | spinfo->err_avg_sc = spinfo->err_avg_sc - err_avg + err_prop; |
diff --git a/net/netrom/nr_route.c b/net/netrom/nr_route.c index aacba76070fc..e2e2d33cafdf 100644 --- a/net/netrom/nr_route.c +++ b/net/netrom/nr_route.c | |||
@@ -843,12 +843,13 @@ int nr_route_frame(struct sk_buff *skb, ax25_cb *ax25) | |||
843 | dptr = skb_push(skb, 1); | 843 | dptr = skb_push(skb, 1); |
844 | *dptr = AX25_P_NETROM; | 844 | *dptr = AX25_P_NETROM; |
845 | 845 | ||
846 | ax25s = ax25_send_frame(skb, 256, (ax25_address *)dev->dev_addr, &nr_neigh->callsign, nr_neigh->digipeat, nr_neigh->dev); | 846 | ax25s = nr_neigh->ax25; |
847 | if (nr_neigh->ax25 && ax25s) { | 847 | nr_neigh->ax25 = ax25_send_frame(skb, 256, |
848 | /* We were already holding this ax25_cb */ | 848 | (ax25_address *)dev->dev_addr, |
849 | &nr_neigh->callsign, | ||
850 | nr_neigh->digipeat, nr_neigh->dev); | ||
851 | if (ax25s) | ||
849 | ax25_cb_put(ax25s); | 852 | ax25_cb_put(ax25s); |
850 | } | ||
851 | nr_neigh->ax25 = ax25s; | ||
852 | 853 | ||
853 | dev_put(dev); | 854 | dev_put(dev); |
854 | ret = (nr_neigh->ax25 != NULL); | 855 | ret = (nr_neigh->ax25 != NULL); |
diff --git a/net/rose/rose_link.c b/net/rose/rose_link.c index bd86a63960ce..5ef5f6988a2e 100644 --- a/net/rose/rose_link.c +++ b/net/rose/rose_link.c | |||
@@ -101,13 +101,17 @@ static void rose_t0timer_expiry(unsigned long param) | |||
101 | static int rose_send_frame(struct sk_buff *skb, struct rose_neigh *neigh) | 101 | static int rose_send_frame(struct sk_buff *skb, struct rose_neigh *neigh) |
102 | { | 102 | { |
103 | ax25_address *rose_call; | 103 | ax25_address *rose_call; |
104 | ax25_cb *ax25s; | ||
104 | 105 | ||
105 | if (ax25cmp(&rose_callsign, &null_ax25_address) == 0) | 106 | if (ax25cmp(&rose_callsign, &null_ax25_address) == 0) |
106 | rose_call = (ax25_address *)neigh->dev->dev_addr; | 107 | rose_call = (ax25_address *)neigh->dev->dev_addr; |
107 | else | 108 | else |
108 | rose_call = &rose_callsign; | 109 | rose_call = &rose_callsign; |
109 | 110 | ||
111 | ax25s = neigh->ax25; | ||
110 | neigh->ax25 = ax25_send_frame(skb, 260, rose_call, &neigh->callsign, neigh->digipeat, neigh->dev); | 112 | neigh->ax25 = ax25_send_frame(skb, 260, rose_call, &neigh->callsign, neigh->digipeat, neigh->dev); |
113 | if (ax25s) | ||
114 | ax25_cb_put(ax25s); | ||
111 | 115 | ||
112 | return (neigh->ax25 != NULL); | 116 | return (neigh->ax25 != NULL); |
113 | } | 117 | } |
@@ -120,13 +124,17 @@ static int rose_send_frame(struct sk_buff *skb, struct rose_neigh *neigh) | |||
120 | static int rose_link_up(struct rose_neigh *neigh) | 124 | static int rose_link_up(struct rose_neigh *neigh) |
121 | { | 125 | { |
122 | ax25_address *rose_call; | 126 | ax25_address *rose_call; |
127 | ax25_cb *ax25s; | ||
123 | 128 | ||
124 | if (ax25cmp(&rose_callsign, &null_ax25_address) == 0) | 129 | if (ax25cmp(&rose_callsign, &null_ax25_address) == 0) |
125 | rose_call = (ax25_address *)neigh->dev->dev_addr; | 130 | rose_call = (ax25_address *)neigh->dev->dev_addr; |
126 | else | 131 | else |
127 | rose_call = &rose_callsign; | 132 | rose_call = &rose_callsign; |
128 | 133 | ||
134 | ax25s = neigh->ax25; | ||
129 | neigh->ax25 = ax25_find_cb(rose_call, &neigh->callsign, neigh->digipeat, neigh->dev); | 135 | neigh->ax25 = ax25_find_cb(rose_call, &neigh->callsign, neigh->digipeat, neigh->dev); |
136 | if (ax25s) | ||
137 | ax25_cb_put(ax25s); | ||
130 | 138 | ||
131 | return (neigh->ax25 != NULL); | 139 | return (neigh->ax25 != NULL); |
132 | } | 140 | } |
diff --git a/net/rose/rose_route.c b/net/rose/rose_route.c index 795c4b025e31..70a0b3b4b4d2 100644 --- a/net/rose/rose_route.c +++ b/net/rose/rose_route.c | |||
@@ -235,6 +235,8 @@ static void rose_remove_neigh(struct rose_neigh *rose_neigh) | |||
235 | 235 | ||
236 | if ((s = rose_neigh_list) == rose_neigh) { | 236 | if ((s = rose_neigh_list) == rose_neigh) { |
237 | rose_neigh_list = rose_neigh->next; | 237 | rose_neigh_list = rose_neigh->next; |
238 | if (rose_neigh->ax25) | ||
239 | ax25_cb_put(rose_neigh->ax25); | ||
238 | kfree(rose_neigh->digipeat); | 240 | kfree(rose_neigh->digipeat); |
239 | kfree(rose_neigh); | 241 | kfree(rose_neigh); |
240 | return; | 242 | return; |
@@ -243,6 +245,8 @@ static void rose_remove_neigh(struct rose_neigh *rose_neigh) | |||
243 | while (s != NULL && s->next != NULL) { | 245 | while (s != NULL && s->next != NULL) { |
244 | if (s->next == rose_neigh) { | 246 | if (s->next == rose_neigh) { |
245 | s->next = rose_neigh->next; | 247 | s->next = rose_neigh->next; |
248 | if (rose_neigh->ax25) | ||
249 | ax25_cb_put(rose_neigh->ax25); | ||
246 | kfree(rose_neigh->digipeat); | 250 | kfree(rose_neigh->digipeat); |
247 | kfree(rose_neigh); | 251 | kfree(rose_neigh); |
248 | return; | 252 | return; |
@@ -812,6 +816,7 @@ void rose_link_failed(ax25_cb *ax25, int reason) | |||
812 | 816 | ||
813 | if (rose_neigh != NULL) { | 817 | if (rose_neigh != NULL) { |
814 | rose_neigh->ax25 = NULL; | 818 | rose_neigh->ax25 = NULL; |
819 | ax25_cb_put(ax25); | ||
815 | 820 | ||
816 | rose_del_route_by_neigh(rose_neigh); | 821 | rose_del_route_by_neigh(rose_neigh); |
817 | rose_kill_by_neigh(rose_neigh); | 822 | rose_kill_by_neigh(rose_neigh); |
diff --git a/net/wireless/sme.c b/net/wireless/sme.c index 2333d78187e4..dc0fc4989d54 100644 --- a/net/wireless/sme.c +++ b/net/wireless/sme.c | |||
@@ -655,6 +655,7 @@ void __cfg80211_disconnected(struct net_device *dev, const u8 *ie, | |||
655 | memset(&wrqu, 0, sizeof(wrqu)); | 655 | memset(&wrqu, 0, sizeof(wrqu)); |
656 | wrqu.ap_addr.sa_family = ARPHRD_ETHER; | 656 | wrqu.ap_addr.sa_family = ARPHRD_ETHER; |
657 | wireless_send_event(dev, SIOCGIWAP, &wrqu, NULL); | 657 | wireless_send_event(dev, SIOCGIWAP, &wrqu, NULL); |
658 | wdev->wext.connect.ssid_len = 0; | ||
658 | #endif | 659 | #endif |
659 | } | 660 | } |
660 | 661 | ||
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c index 4725a549ad4d..0ecb16a9a883 100644 --- a/net/xfrm/xfrm_policy.c +++ b/net/xfrm/xfrm_policy.c | |||
@@ -469,16 +469,16 @@ static inline int xfrm_byidx_should_resize(struct net *net, int total) | |||
469 | return 0; | 469 | return 0; |
470 | } | 470 | } |
471 | 471 | ||
472 | void xfrm_spd_getinfo(struct xfrmk_spdinfo *si) | 472 | void xfrm_spd_getinfo(struct net *net, struct xfrmk_spdinfo *si) |
473 | { | 473 | { |
474 | read_lock_bh(&xfrm_policy_lock); | 474 | read_lock_bh(&xfrm_policy_lock); |
475 | si->incnt = init_net.xfrm.policy_count[XFRM_POLICY_IN]; | 475 | si->incnt = net->xfrm.policy_count[XFRM_POLICY_IN]; |
476 | si->outcnt = init_net.xfrm.policy_count[XFRM_POLICY_OUT]; | 476 | si->outcnt = net->xfrm.policy_count[XFRM_POLICY_OUT]; |
477 | si->fwdcnt = init_net.xfrm.policy_count[XFRM_POLICY_FWD]; | 477 | si->fwdcnt = net->xfrm.policy_count[XFRM_POLICY_FWD]; |
478 | si->inscnt = init_net.xfrm.policy_count[XFRM_POLICY_IN+XFRM_POLICY_MAX]; | 478 | si->inscnt = net->xfrm.policy_count[XFRM_POLICY_IN+XFRM_POLICY_MAX]; |
479 | si->outscnt = init_net.xfrm.policy_count[XFRM_POLICY_OUT+XFRM_POLICY_MAX]; | 479 | si->outscnt = net->xfrm.policy_count[XFRM_POLICY_OUT+XFRM_POLICY_MAX]; |
480 | si->fwdscnt = init_net.xfrm.policy_count[XFRM_POLICY_FWD+XFRM_POLICY_MAX]; | 480 | si->fwdscnt = net->xfrm.policy_count[XFRM_POLICY_FWD+XFRM_POLICY_MAX]; |
481 | si->spdhcnt = init_net.xfrm.policy_idx_hmask; | 481 | si->spdhcnt = net->xfrm.policy_idx_hmask; |
482 | si->spdhmcnt = xfrm_policy_hashmax; | 482 | si->spdhmcnt = xfrm_policy_hashmax; |
483 | read_unlock_bh(&xfrm_policy_lock); | 483 | read_unlock_bh(&xfrm_policy_lock); |
484 | } | 484 | } |
@@ -1309,15 +1309,28 @@ static inline int xfrm_get_tos(struct flowi *fl, int family) | |||
1309 | return tos; | 1309 | return tos; |
1310 | } | 1310 | } |
1311 | 1311 | ||
1312 | static inline struct xfrm_dst *xfrm_alloc_dst(int family) | 1312 | static inline struct xfrm_dst *xfrm_alloc_dst(struct net *net, int family) |
1313 | { | 1313 | { |
1314 | struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family); | 1314 | struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family); |
1315 | struct dst_ops *dst_ops; | ||
1315 | struct xfrm_dst *xdst; | 1316 | struct xfrm_dst *xdst; |
1316 | 1317 | ||
1317 | if (!afinfo) | 1318 | if (!afinfo) |
1318 | return ERR_PTR(-EINVAL); | 1319 | return ERR_PTR(-EINVAL); |
1319 | 1320 | ||
1320 | xdst = dst_alloc(afinfo->dst_ops) ?: ERR_PTR(-ENOBUFS); | 1321 | switch (family) { |
1322 | case AF_INET: | ||
1323 | dst_ops = &net->xfrm.xfrm4_dst_ops; | ||
1324 | break; | ||
1325 | #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) | ||
1326 | case AF_INET6: | ||
1327 | dst_ops = &net->xfrm.xfrm6_dst_ops; | ||
1328 | break; | ||
1329 | #endif | ||
1330 | default: | ||
1331 | BUG(); | ||
1332 | } | ||
1333 | xdst = dst_alloc(dst_ops) ?: ERR_PTR(-ENOBUFS); | ||
1321 | 1334 | ||
1322 | xfrm_policy_put_afinfo(afinfo); | 1335 | xfrm_policy_put_afinfo(afinfo); |
1323 | 1336 | ||
@@ -1366,6 +1379,7 @@ static struct dst_entry *xfrm_bundle_create(struct xfrm_policy *policy, | |||
1366 | struct flowi *fl, | 1379 | struct flowi *fl, |
1367 | struct dst_entry *dst) | 1380 | struct dst_entry *dst) |
1368 | { | 1381 | { |
1382 | struct net *net = xp_net(policy); | ||
1369 | unsigned long now = jiffies; | 1383 | unsigned long now = jiffies; |
1370 | struct net_device *dev; | 1384 | struct net_device *dev; |
1371 | struct dst_entry *dst_prev = NULL; | 1385 | struct dst_entry *dst_prev = NULL; |
@@ -1389,7 +1403,7 @@ static struct dst_entry *xfrm_bundle_create(struct xfrm_policy *policy, | |||
1389 | dst_hold(dst); | 1403 | dst_hold(dst); |
1390 | 1404 | ||
1391 | for (; i < nx; i++) { | 1405 | for (; i < nx; i++) { |
1392 | struct xfrm_dst *xdst = xfrm_alloc_dst(family); | 1406 | struct xfrm_dst *xdst = xfrm_alloc_dst(net, family); |
1393 | struct dst_entry *dst1 = &xdst->u.dst; | 1407 | struct dst_entry *dst1 = &xdst->u.dst; |
1394 | 1408 | ||
1395 | err = PTR_ERR(xdst); | 1409 | err = PTR_ERR(xdst); |
@@ -2279,6 +2293,7 @@ EXPORT_SYMBOL(xfrm_bundle_ok); | |||
2279 | 2293 | ||
2280 | int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo) | 2294 | int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo) |
2281 | { | 2295 | { |
2296 | struct net *net; | ||
2282 | int err = 0; | 2297 | int err = 0; |
2283 | if (unlikely(afinfo == NULL)) | 2298 | if (unlikely(afinfo == NULL)) |
2284 | return -EINVAL; | 2299 | return -EINVAL; |
@@ -2302,6 +2317,27 @@ int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo) | |||
2302 | xfrm_policy_afinfo[afinfo->family] = afinfo; | 2317 | xfrm_policy_afinfo[afinfo->family] = afinfo; |
2303 | } | 2318 | } |
2304 | write_unlock_bh(&xfrm_policy_afinfo_lock); | 2319 | write_unlock_bh(&xfrm_policy_afinfo_lock); |
2320 | |||
2321 | rtnl_lock(); | ||
2322 | for_each_net(net) { | ||
2323 | struct dst_ops *xfrm_dst_ops; | ||
2324 | |||
2325 | switch (afinfo->family) { | ||
2326 | case AF_INET: | ||
2327 | xfrm_dst_ops = &net->xfrm.xfrm4_dst_ops; | ||
2328 | break; | ||
2329 | #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) | ||
2330 | case AF_INET6: | ||
2331 | xfrm_dst_ops = &net->xfrm.xfrm6_dst_ops; | ||
2332 | break; | ||
2333 | #endif | ||
2334 | default: | ||
2335 | BUG(); | ||
2336 | } | ||
2337 | *xfrm_dst_ops = *afinfo->dst_ops; | ||
2338 | } | ||
2339 | rtnl_unlock(); | ||
2340 | |||
2305 | return err; | 2341 | return err; |
2306 | } | 2342 | } |
2307 | EXPORT_SYMBOL(xfrm_policy_register_afinfo); | 2343 | EXPORT_SYMBOL(xfrm_policy_register_afinfo); |
@@ -2332,6 +2368,22 @@ int xfrm_policy_unregister_afinfo(struct xfrm_policy_afinfo *afinfo) | |||
2332 | } | 2368 | } |
2333 | EXPORT_SYMBOL(xfrm_policy_unregister_afinfo); | 2369 | EXPORT_SYMBOL(xfrm_policy_unregister_afinfo); |
2334 | 2370 | ||
2371 | static void __net_init xfrm_dst_ops_init(struct net *net) | ||
2372 | { | ||
2373 | struct xfrm_policy_afinfo *afinfo; | ||
2374 | |||
2375 | read_lock_bh(&xfrm_policy_afinfo_lock); | ||
2376 | afinfo = xfrm_policy_afinfo[AF_INET]; | ||
2377 | if (afinfo) | ||
2378 | net->xfrm.xfrm4_dst_ops = *afinfo->dst_ops; | ||
2379 | #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) | ||
2380 | afinfo = xfrm_policy_afinfo[AF_INET6]; | ||
2381 | if (afinfo) | ||
2382 | net->xfrm.xfrm6_dst_ops = *afinfo->dst_ops; | ||
2383 | #endif | ||
2384 | read_unlock_bh(&xfrm_policy_afinfo_lock); | ||
2385 | } | ||
2386 | |||
2335 | static struct xfrm_policy_afinfo *xfrm_policy_get_afinfo(unsigned short family) | 2387 | static struct xfrm_policy_afinfo *xfrm_policy_get_afinfo(unsigned short family) |
2336 | { | 2388 | { |
2337 | struct xfrm_policy_afinfo *afinfo; | 2389 | struct xfrm_policy_afinfo *afinfo; |
@@ -2494,6 +2546,7 @@ static int __net_init xfrm_net_init(struct net *net) | |||
2494 | rv = xfrm_policy_init(net); | 2546 | rv = xfrm_policy_init(net); |
2495 | if (rv < 0) | 2547 | if (rv < 0) |
2496 | goto out_policy; | 2548 | goto out_policy; |
2549 | xfrm_dst_ops_init(net); | ||
2497 | rv = xfrm_sysctl_init(net); | 2550 | rv = xfrm_sysctl_init(net); |
2498 | if (rv < 0) | 2551 | if (rv < 0) |
2499 | goto out_sysctl; | 2552 | goto out_sysctl; |
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c index d847f1a52b44..b36cc344474b 100644 --- a/net/xfrm/xfrm_state.c +++ b/net/xfrm/xfrm_state.c | |||
@@ -641,11 +641,11 @@ out: | |||
641 | } | 641 | } |
642 | EXPORT_SYMBOL(xfrm_state_flush); | 642 | EXPORT_SYMBOL(xfrm_state_flush); |
643 | 643 | ||
644 | void xfrm_sad_getinfo(struct xfrmk_sadinfo *si) | 644 | void xfrm_sad_getinfo(struct net *net, struct xfrmk_sadinfo *si) |
645 | { | 645 | { |
646 | spin_lock_bh(&xfrm_state_lock); | 646 | spin_lock_bh(&xfrm_state_lock); |
647 | si->sadcnt = init_net.xfrm.state_num; | 647 | si->sadcnt = net->xfrm.state_num; |
648 | si->sadhcnt = init_net.xfrm.state_hmask; | 648 | si->sadhcnt = net->xfrm.state_hmask; |
649 | si->sadhmcnt = xfrm_state_hashmax; | 649 | si->sadhmcnt = xfrm_state_hashmax; |
650 | spin_unlock_bh(&xfrm_state_lock); | 650 | spin_unlock_bh(&xfrm_state_lock); |
651 | } | 651 | } |
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c index 1ada6186933c..d5a712976004 100644 --- a/net/xfrm/xfrm_user.c +++ b/net/xfrm/xfrm_user.c | |||
@@ -781,7 +781,8 @@ static inline size_t xfrm_spdinfo_msgsize(void) | |||
781 | + nla_total_size(sizeof(struct xfrmu_spdhinfo)); | 781 | + nla_total_size(sizeof(struct xfrmu_spdhinfo)); |
782 | } | 782 | } |
783 | 783 | ||
784 | static int build_spdinfo(struct sk_buff *skb, u32 pid, u32 seq, u32 flags) | 784 | static int build_spdinfo(struct sk_buff *skb, struct net *net, |
785 | u32 pid, u32 seq, u32 flags) | ||
785 | { | 786 | { |
786 | struct xfrmk_spdinfo si; | 787 | struct xfrmk_spdinfo si; |
787 | struct xfrmu_spdinfo spc; | 788 | struct xfrmu_spdinfo spc; |
@@ -795,7 +796,7 @@ static int build_spdinfo(struct sk_buff *skb, u32 pid, u32 seq, u32 flags) | |||
795 | 796 | ||
796 | f = nlmsg_data(nlh); | 797 | f = nlmsg_data(nlh); |
797 | *f = flags; | 798 | *f = flags; |
798 | xfrm_spd_getinfo(&si); | 799 | xfrm_spd_getinfo(net, &si); |
799 | spc.incnt = si.incnt; | 800 | spc.incnt = si.incnt; |
800 | spc.outcnt = si.outcnt; | 801 | spc.outcnt = si.outcnt; |
801 | spc.fwdcnt = si.fwdcnt; | 802 | spc.fwdcnt = si.fwdcnt; |
@@ -828,7 +829,7 @@ static int xfrm_get_spdinfo(struct sk_buff *skb, struct nlmsghdr *nlh, | |||
828 | if (r_skb == NULL) | 829 | if (r_skb == NULL) |
829 | return -ENOMEM; | 830 | return -ENOMEM; |
830 | 831 | ||
831 | if (build_spdinfo(r_skb, spid, seq, *flags) < 0) | 832 | if (build_spdinfo(r_skb, net, spid, seq, *flags) < 0) |
832 | BUG(); | 833 | BUG(); |
833 | 834 | ||
834 | return nlmsg_unicast(net->xfrm.nlsk, r_skb, spid); | 835 | return nlmsg_unicast(net->xfrm.nlsk, r_skb, spid); |
@@ -841,7 +842,8 @@ static inline size_t xfrm_sadinfo_msgsize(void) | |||
841 | + nla_total_size(4); /* XFRMA_SAD_CNT */ | 842 | + nla_total_size(4); /* XFRMA_SAD_CNT */ |
842 | } | 843 | } |
843 | 844 | ||
844 | static int build_sadinfo(struct sk_buff *skb, u32 pid, u32 seq, u32 flags) | 845 | static int build_sadinfo(struct sk_buff *skb, struct net *net, |
846 | u32 pid, u32 seq, u32 flags) | ||
845 | { | 847 | { |
846 | struct xfrmk_sadinfo si; | 848 | struct xfrmk_sadinfo si; |
847 | struct xfrmu_sadhinfo sh; | 849 | struct xfrmu_sadhinfo sh; |
@@ -854,7 +856,7 @@ static int build_sadinfo(struct sk_buff *skb, u32 pid, u32 seq, u32 flags) | |||
854 | 856 | ||
855 | f = nlmsg_data(nlh); | 857 | f = nlmsg_data(nlh); |
856 | *f = flags; | 858 | *f = flags; |
857 | xfrm_sad_getinfo(&si); | 859 | xfrm_sad_getinfo(net, &si); |
858 | 860 | ||
859 | sh.sadhmcnt = si.sadhmcnt; | 861 | sh.sadhmcnt = si.sadhmcnt; |
860 | sh.sadhcnt = si.sadhcnt; | 862 | sh.sadhcnt = si.sadhcnt; |
@@ -882,7 +884,7 @@ static int xfrm_get_sadinfo(struct sk_buff *skb, struct nlmsghdr *nlh, | |||
882 | if (r_skb == NULL) | 884 | if (r_skb == NULL) |
883 | return -ENOMEM; | 885 | return -ENOMEM; |
884 | 886 | ||
885 | if (build_sadinfo(r_skb, spid, seq, *flags) < 0) | 887 | if (build_sadinfo(r_skb, net, spid, seq, *flags) < 0) |
886 | BUG(); | 888 | BUG(); |
887 | 889 | ||
888 | return nlmsg_unicast(net->xfrm.nlsk, r_skb, spid); | 890 | return nlmsg_unicast(net->xfrm.nlsk, r_skb, spid); |
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index 3f92def752fd..da34095c707f 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c | |||
@@ -1093,6 +1093,16 @@ static void alc889_coef_init(struct hda_codec *codec) | |||
1093 | snd_hda_codec_write(codec, 0x20, 0, AC_VERB_SET_PROC_COEF, tmp|0x2010); | 1093 | snd_hda_codec_write(codec, 0x20, 0, AC_VERB_SET_PROC_COEF, tmp|0x2010); |
1094 | } | 1094 | } |
1095 | 1095 | ||
1096 | /* turn on/off EAPD control (only if available) */ | ||
1097 | static void set_eapd(struct hda_codec *codec, hda_nid_t nid, int on) | ||
1098 | { | ||
1099 | if (get_wcaps_type(get_wcaps(codec, nid)) != AC_WID_PIN) | ||
1100 | return; | ||
1101 | if (snd_hda_query_pin_caps(codec, nid) & AC_PINCAP_EAPD) | ||
1102 | snd_hda_codec_write(codec, nid, 0, AC_VERB_SET_EAPD_BTLENABLE, | ||
1103 | on ? 2 : 0); | ||
1104 | } | ||
1105 | |||
1096 | static void alc_auto_init_amp(struct hda_codec *codec, int type) | 1106 | static void alc_auto_init_amp(struct hda_codec *codec, int type) |
1097 | { | 1107 | { |
1098 | unsigned int tmp; | 1108 | unsigned int tmp; |
@@ -1110,25 +1120,22 @@ static void alc_auto_init_amp(struct hda_codec *codec, int type) | |||
1110 | case ALC_INIT_DEFAULT: | 1120 | case ALC_INIT_DEFAULT: |
1111 | switch (codec->vendor_id) { | 1121 | switch (codec->vendor_id) { |
1112 | case 0x10ec0260: | 1122 | case 0x10ec0260: |
1113 | snd_hda_codec_write(codec, 0x0f, 0, | 1123 | set_eapd(codec, 0x0f, 1); |
1114 | AC_VERB_SET_EAPD_BTLENABLE, 2); | 1124 | set_eapd(codec, 0x10, 1); |
1115 | snd_hda_codec_write(codec, 0x10, 0, | ||
1116 | AC_VERB_SET_EAPD_BTLENABLE, 2); | ||
1117 | break; | 1125 | break; |
1118 | case 0x10ec0262: | 1126 | case 0x10ec0262: |
1119 | case 0x10ec0267: | 1127 | case 0x10ec0267: |
1120 | case 0x10ec0268: | 1128 | case 0x10ec0268: |
1121 | case 0x10ec0269: | 1129 | case 0x10ec0269: |
1130 | case 0x10ec0270: | ||
1122 | case 0x10ec0272: | 1131 | case 0x10ec0272: |
1123 | case 0x10ec0660: | 1132 | case 0x10ec0660: |
1124 | case 0x10ec0662: | 1133 | case 0x10ec0662: |
1125 | case 0x10ec0663: | 1134 | case 0x10ec0663: |
1126 | case 0x10ec0862: | 1135 | case 0x10ec0862: |
1127 | case 0x10ec0889: | 1136 | case 0x10ec0889: |
1128 | snd_hda_codec_write(codec, 0x14, 0, | 1137 | set_eapd(codec, 0x14, 1); |
1129 | AC_VERB_SET_EAPD_BTLENABLE, 2); | 1138 | set_eapd(codec, 0x15, 1); |
1130 | snd_hda_codec_write(codec, 0x15, 0, | ||
1131 | AC_VERB_SET_EAPD_BTLENABLE, 2); | ||
1132 | break; | 1139 | break; |
1133 | } | 1140 | } |
1134 | switch (codec->vendor_id) { | 1141 | switch (codec->vendor_id) { |
@@ -1836,10 +1843,8 @@ static void alc889_acer_aspire_8930g_setup(struct hda_codec *codec) | |||
1836 | #ifdef CONFIG_SND_HDA_POWER_SAVE | 1843 | #ifdef CONFIG_SND_HDA_POWER_SAVE |
1837 | static void alc889_power_eapd(struct hda_codec *codec, int power) | 1844 | static void alc889_power_eapd(struct hda_codec *codec, int power) |
1838 | { | 1845 | { |
1839 | snd_hda_codec_write(codec, 0x14, 0, | 1846 | set_eapd(codec, 0x14, power); |
1840 | AC_VERB_SET_EAPD_BTLENABLE, power ? 2 : 0); | 1847 | set_eapd(codec, 0x15, power); |
1841 | snd_hda_codec_write(codec, 0x15, 0, | ||
1842 | AC_VERB_SET_EAPD_BTLENABLE, power ? 2 : 0); | ||
1843 | } | 1848 | } |
1844 | #endif | 1849 | #endif |
1845 | 1850 | ||
@@ -9473,6 +9478,7 @@ static struct alc_config_preset alc882_presets[] = { | |||
9473 | .num_channel_mode = ARRAY_SIZE(alc883_3ST_6ch_modes), | 9478 | .num_channel_mode = ARRAY_SIZE(alc883_3ST_6ch_modes), |
9474 | .channel_mode = alc883_3ST_6ch_modes, | 9479 | .channel_mode = alc883_3ST_6ch_modes, |
9475 | .need_dac_fix = 1, | 9480 | .need_dac_fix = 1, |
9481 | .const_channel_count = 6, | ||
9476 | .num_mux_defs = | 9482 | .num_mux_defs = |
9477 | ARRAY_SIZE(alc888_2_capture_sources), | 9483 | ARRAY_SIZE(alc888_2_capture_sources), |
9478 | .input_mux = alc888_2_capture_sources, | 9484 | .input_mux = alc888_2_capture_sources, |
@@ -10377,7 +10383,7 @@ static void alc262_hp_t5735_setup(struct hda_codec *codec) | |||
10377 | struct alc_spec *spec = codec->spec; | 10383 | struct alc_spec *spec = codec->spec; |
10378 | 10384 | ||
10379 | spec->autocfg.hp_pins[0] = 0x15; | 10385 | spec->autocfg.hp_pins[0] = 0x15; |
10380 | spec->autocfg.speaker_pins[0] = 0x0c; /* HACK: not actually a pin */ | 10386 | spec->autocfg.speaker_pins[0] = 0x14; |
10381 | } | 10387 | } |
10382 | 10388 | ||
10383 | static struct snd_kcontrol_new alc262_hp_t5735_mixer[] = { | 10389 | static struct snd_kcontrol_new alc262_hp_t5735_mixer[] = { |
@@ -11788,9 +11794,9 @@ static struct alc_config_preset alc262_presets[] = { | |||
11788 | .num_channel_mode = ARRAY_SIZE(alc262_modes), | 11794 | .num_channel_mode = ARRAY_SIZE(alc262_modes), |
11789 | .channel_mode = alc262_modes, | 11795 | .channel_mode = alc262_modes, |
11790 | .input_mux = &alc262_capture_source, | 11796 | .input_mux = &alc262_capture_source, |
11791 | .unsol_event = alc_automute_amp_unsol_event, | 11797 | .unsol_event = alc_sku_unsol_event, |
11792 | .setup = alc262_hp_t5735_setup, | 11798 | .setup = alc262_hp_t5735_setup, |
11793 | .init_hook = alc_automute_amp, | 11799 | .init_hook = alc_inithook, |
11794 | }, | 11800 | }, |
11795 | [ALC262_HP_RP5700] = { | 11801 | [ALC262_HP_RP5700] = { |
11796 | .mixers = { alc262_hp_rp5700_mixer }, | 11802 | .mixers = { alc262_hp_rp5700_mixer }, |
@@ -12541,6 +12547,7 @@ static int alc268_new_analog_output(struct alc_spec *spec, hda_nid_t nid, | |||
12541 | dac = 0x02; | 12547 | dac = 0x02; |
12542 | break; | 12548 | break; |
12543 | case 0x15: | 12549 | case 0x15: |
12550 | case 0x21: | ||
12544 | dac = 0x03; | 12551 | dac = 0x03; |
12545 | break; | 12552 | break; |
12546 | default: | 12553 | default: |
diff --git a/sound/soc/codecs/wm8903.c b/sound/soc/codecs/wm8903.c index ce5515e3f2b0..3595bd57c4eb 100644 --- a/sound/soc/codecs/wm8903.c +++ b/sound/soc/codecs/wm8903.c | |||
@@ -1504,7 +1504,7 @@ static int wm8903_resume(struct platform_device *pdev) | |||
1504 | struct i2c_client *i2c = codec->control_data; | 1504 | struct i2c_client *i2c = codec->control_data; |
1505 | int i; | 1505 | int i; |
1506 | u16 *reg_cache = codec->reg_cache; | 1506 | u16 *reg_cache = codec->reg_cache; |
1507 | u16 *tmp_cache = kmemdup(codec->reg_cache, sizeof(wm8903_reg_defaults), | 1507 | u16 *tmp_cache = kmemdup(reg_cache, sizeof(wm8903_reg_defaults), |
1508 | GFP_KERNEL); | 1508 | GFP_KERNEL); |
1509 | 1509 | ||
1510 | /* Bring the codec back up to standby first to minimise pop/clicks */ | 1510 | /* Bring the codec back up to standby first to minimise pop/clicks */ |
@@ -1516,6 +1516,7 @@ static int wm8903_resume(struct platform_device *pdev) | |||
1516 | for (i = 2; i < ARRAY_SIZE(wm8903_reg_defaults); i++) | 1516 | for (i = 2; i < ARRAY_SIZE(wm8903_reg_defaults); i++) |
1517 | if (tmp_cache[i] != reg_cache[i]) | 1517 | if (tmp_cache[i] != reg_cache[i]) |
1518 | snd_soc_write(codec, i, tmp_cache[i]); | 1518 | snd_soc_write(codec, i, tmp_cache[i]); |
1519 | kfree(tmp_cache); | ||
1519 | } else { | 1520 | } else { |
1520 | dev_err(&i2c->dev, "Failed to allocate temporary cache\n"); | 1521 | dev_err(&i2c->dev, "Failed to allocate temporary cache\n"); |
1521 | } | 1522 | } |
diff --git a/virt/kvm/eventfd.c b/virt/kvm/eventfd.c index 30f70fd511c4..a9d3fc6c681c 100644 --- a/virt/kvm/eventfd.c +++ b/virt/kvm/eventfd.c | |||
@@ -72,12 +72,13 @@ static void | |||
72 | irqfd_shutdown(struct work_struct *work) | 72 | irqfd_shutdown(struct work_struct *work) |
73 | { | 73 | { |
74 | struct _irqfd *irqfd = container_of(work, struct _irqfd, shutdown); | 74 | struct _irqfd *irqfd = container_of(work, struct _irqfd, shutdown); |
75 | u64 cnt; | ||
75 | 76 | ||
76 | /* | 77 | /* |
77 | * Synchronize with the wait-queue and unhook ourselves to prevent | 78 | * Synchronize with the wait-queue and unhook ourselves to prevent |
78 | * further events. | 79 | * further events. |
79 | */ | 80 | */ |
80 | remove_wait_queue(irqfd->wqh, &irqfd->wait); | 81 | eventfd_ctx_remove_wait_queue(irqfd->eventfd, &irqfd->wait, &cnt); |
81 | 82 | ||
82 | /* | 83 | /* |
83 | * We know no new events will be scheduled at this point, so block | 84 | * We know no new events will be scheduled at this point, so block |
@@ -166,7 +167,7 @@ irqfd_ptable_queue_proc(struct file *file, wait_queue_head_t *wqh, | |||
166 | static int | 167 | static int |
167 | kvm_irqfd_assign(struct kvm *kvm, int fd, int gsi) | 168 | kvm_irqfd_assign(struct kvm *kvm, int fd, int gsi) |
168 | { | 169 | { |
169 | struct _irqfd *irqfd; | 170 | struct _irqfd *irqfd, *tmp; |
170 | struct file *file = NULL; | 171 | struct file *file = NULL; |
171 | struct eventfd_ctx *eventfd = NULL; | 172 | struct eventfd_ctx *eventfd = NULL; |
172 | int ret; | 173 | int ret; |
@@ -203,9 +204,20 @@ kvm_irqfd_assign(struct kvm *kvm, int fd, int gsi) | |||
203 | init_waitqueue_func_entry(&irqfd->wait, irqfd_wakeup); | 204 | init_waitqueue_func_entry(&irqfd->wait, irqfd_wakeup); |
204 | init_poll_funcptr(&irqfd->pt, irqfd_ptable_queue_proc); | 205 | init_poll_funcptr(&irqfd->pt, irqfd_ptable_queue_proc); |
205 | 206 | ||
207 | spin_lock_irq(&kvm->irqfds.lock); | ||
208 | |||
209 | ret = 0; | ||
210 | list_for_each_entry(tmp, &kvm->irqfds.items, list) { | ||
211 | if (irqfd->eventfd != tmp->eventfd) | ||
212 | continue; | ||
213 | /* This fd is used for another irq already. */ | ||
214 | ret = -EBUSY; | ||
215 | spin_unlock_irq(&kvm->irqfds.lock); | ||
216 | goto fail; | ||
217 | } | ||
218 | |||
206 | events = file->f_op->poll(file, &irqfd->pt); | 219 | events = file->f_op->poll(file, &irqfd->pt); |
207 | 220 | ||
208 | spin_lock_irq(&kvm->irqfds.lock); | ||
209 | list_add_tail(&irqfd->list, &kvm->irqfds.items); | 221 | list_add_tail(&irqfd->list, &kvm->irqfds.items); |
210 | spin_unlock_irq(&kvm->irqfds.lock); | 222 | spin_unlock_irq(&kvm->irqfds.lock); |
211 | 223 | ||
diff --git a/virt/kvm/irq_comm.c b/virt/kvm/irq_comm.c index 9b077342ab54..9fd5b3ebc517 100644 --- a/virt/kvm/irq_comm.c +++ b/virt/kvm/irq_comm.c | |||
@@ -302,6 +302,7 @@ static int setup_routing_entry(struct kvm_irq_routing_table *rt, | |||
302 | { | 302 | { |
303 | int r = -EINVAL; | 303 | int r = -EINVAL; |
304 | int delta; | 304 | int delta; |
305 | unsigned max_pin; | ||
305 | struct kvm_kernel_irq_routing_entry *ei; | 306 | struct kvm_kernel_irq_routing_entry *ei; |
306 | struct hlist_node *n; | 307 | struct hlist_node *n; |
307 | 308 | ||
@@ -322,12 +323,15 @@ static int setup_routing_entry(struct kvm_irq_routing_table *rt, | |||
322 | switch (ue->u.irqchip.irqchip) { | 323 | switch (ue->u.irqchip.irqchip) { |
323 | case KVM_IRQCHIP_PIC_MASTER: | 324 | case KVM_IRQCHIP_PIC_MASTER: |
324 | e->set = kvm_set_pic_irq; | 325 | e->set = kvm_set_pic_irq; |
326 | max_pin = 16; | ||
325 | break; | 327 | break; |
326 | case KVM_IRQCHIP_PIC_SLAVE: | 328 | case KVM_IRQCHIP_PIC_SLAVE: |
327 | e->set = kvm_set_pic_irq; | 329 | e->set = kvm_set_pic_irq; |
330 | max_pin = 16; | ||
328 | delta = 8; | 331 | delta = 8; |
329 | break; | 332 | break; |
330 | case KVM_IRQCHIP_IOAPIC: | 333 | case KVM_IRQCHIP_IOAPIC: |
334 | max_pin = KVM_IOAPIC_NUM_PINS; | ||
331 | e->set = kvm_set_ioapic_irq; | 335 | e->set = kvm_set_ioapic_irq; |
332 | break; | 336 | break; |
333 | default: | 337 | default: |
@@ -335,7 +339,7 @@ static int setup_routing_entry(struct kvm_irq_routing_table *rt, | |||
335 | } | 339 | } |
336 | e->irqchip.irqchip = ue->u.irqchip.irqchip; | 340 | e->irqchip.irqchip = ue->u.irqchip.irqchip; |
337 | e->irqchip.pin = ue->u.irqchip.pin + delta; | 341 | e->irqchip.pin = ue->u.irqchip.pin + delta; |
338 | if (e->irqchip.pin >= KVM_IOAPIC_NUM_PINS) | 342 | if (e->irqchip.pin >= max_pin) |
339 | goto out; | 343 | goto out; |
340 | rt->chip[ue->u.irqchip.irqchip][e->irqchip.pin] = ue->gsi; | 344 | rt->chip[ue->u.irqchip.irqchip][e->irqchip.pin] = ue->gsi; |
341 | break; | 345 | break; |