diff options
210 files changed, 1568 insertions, 966 deletions
diff --git a/Documentation/ABI/testing/sysfs-bus-pci b/Documentation/ABI/testing/sysfs-bus-pci index 34f51100f029..dff1f48d252d 100644 --- a/Documentation/ABI/testing/sysfs-bus-pci +++ b/Documentation/ABI/testing/sysfs-bus-pci | |||
@@ -210,3 +210,15 @@ Users: | |||
210 | firmware assigned instance number of the PCI | 210 | firmware assigned instance number of the PCI |
211 | device that can help in understanding the firmware | 211 | device that can help in understanding the firmware |
212 | intended order of the PCI device. | 212 | intended order of the PCI device. |
213 | |||
214 | What: /sys/bus/pci/devices/.../d3cold_allowed | ||
215 | Date: July 2012 | ||
216 | Contact: Huang Ying <ying.huang@intel.com> | ||
217 | Description: | ||
218 | d3cold_allowed is bit to control whether the corresponding PCI | ||
219 | device can be put into D3Cold state. If it is cleared, the | ||
220 | device will never be put into D3Cold state. If it is set, the | ||
221 | device may be put into D3Cold state if other requirements are | ||
222 | satisfied too. Reading this attribute will show the current | ||
223 | value of d3cold_allowed bit. Writing this attribute will set | ||
224 | the value of d3cold_allowed bit. | ||
diff --git a/Documentation/feature-removal-schedule.txt b/Documentation/feature-removal-schedule.txt index afaff312bf41..f4d8c7105fcd 100644 --- a/Documentation/feature-removal-schedule.txt +++ b/Documentation/feature-removal-schedule.txt | |||
@@ -579,7 +579,7 @@ Why: KVM tracepoints provide mostly equivalent information in a much more | |||
579 | ---------------------------- | 579 | ---------------------------- |
580 | 580 | ||
581 | What: at91-mci driver ("CONFIG_MMC_AT91") | 581 | What: at91-mci driver ("CONFIG_MMC_AT91") |
582 | When: 3.7 | 582 | When: 3.8 |
583 | Why: There are two mci drivers: at91-mci and atmel-mci. The PDC support | 583 | Why: There are two mci drivers: at91-mci and atmel-mci. The PDC support |
584 | was added to atmel-mci as a first step to support more chips. | 584 | was added to atmel-mci as a first step to support more chips. |
585 | Then at91-mci was kept only for old IP versions (on at91rm9200 and | 585 | Then at91-mci was kept only for old IP versions (on at91rm9200 and |
diff --git a/Documentation/watchdog/src/watchdog-test.c b/Documentation/watchdog/src/watchdog-test.c index 73ff5cc93e05..3da822967ee0 100644 --- a/Documentation/watchdog/src/watchdog-test.c +++ b/Documentation/watchdog/src/watchdog-test.c | |||
@@ -31,7 +31,7 @@ static void keep_alive(void) | |||
31 | * or "-e" to enable the card. | 31 | * or "-e" to enable the card. |
32 | */ | 32 | */ |
33 | 33 | ||
34 | void term(int sig) | 34 | static void term(int sig) |
35 | { | 35 | { |
36 | close(fd); | 36 | close(fd); |
37 | fprintf(stderr, "Stopping watchdog ticks...\n"); | 37 | fprintf(stderr, "Stopping watchdog ticks...\n"); |
@@ -1,7 +1,7 @@ | |||
1 | VERSION = 3 | 1 | VERSION = 3 |
2 | PATCHLEVEL = 6 | 2 | PATCHLEVEL = 6 |
3 | SUBLEVEL = 0 | 3 | SUBLEVEL = 0 |
4 | EXTRAVERSION = -rc3 | 4 | EXTRAVERSION = -rc5 |
5 | NAME = Saber-toothed Squirrel | 5 | NAME = Saber-toothed Squirrel |
6 | 6 | ||
7 | # *DOCUMENTATION* | 7 | # *DOCUMENTATION* |
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index c5f9ae5dbd1a..2f88d8d97701 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig | |||
@@ -6,7 +6,7 @@ config ARM | |||
6 | select HAVE_DMA_API_DEBUG | 6 | select HAVE_DMA_API_DEBUG |
7 | select HAVE_IDE if PCI || ISA || PCMCIA | 7 | select HAVE_IDE if PCI || ISA || PCMCIA |
8 | select HAVE_DMA_ATTRS | 8 | select HAVE_DMA_ATTRS |
9 | select HAVE_DMA_CONTIGUOUS if (CPU_V6 || CPU_V6K || CPU_V7) | 9 | select HAVE_DMA_CONTIGUOUS if MMU |
10 | select HAVE_MEMBLOCK | 10 | select HAVE_MEMBLOCK |
11 | select RTC_LIB | 11 | select RTC_LIB |
12 | select SYS_SUPPORTS_APM_EMULATION | 12 | select SYS_SUPPORTS_APM_EMULATION |
diff --git a/arch/arm/boot/dts/at91sam9g25ek.dts b/arch/arm/boot/dts/at91sam9g25ek.dts index 7829a4d0cb22..96514c134e54 100644 --- a/arch/arm/boot/dts/at91sam9g25ek.dts +++ b/arch/arm/boot/dts/at91sam9g25ek.dts | |||
@@ -15,7 +15,7 @@ | |||
15 | compatible = "atmel,at91sam9g25ek", "atmel,at91sam9x5ek", "atmel,at91sam9x5", "atmel,at91sam9"; | 15 | compatible = "atmel,at91sam9g25ek", "atmel,at91sam9x5ek", "atmel,at91sam9x5", "atmel,at91sam9"; |
16 | 16 | ||
17 | chosen { | 17 | chosen { |
18 | bootargs = "128M console=ttyS0,115200 root=/dev/mtdblock1 rw rootfstype=ubifs ubi.mtd=1 root=ubi0:rootfs"; | 18 | bootargs = "console=ttyS0,115200 root=/dev/mtdblock1 rw rootfstype=ubifs ubi.mtd=1 root=ubi0:rootfs"; |
19 | }; | 19 | }; |
20 | 20 | ||
21 | ahb { | 21 | ahb { |
diff --git a/arch/arm/configs/armadillo800eva_defconfig b/arch/arm/configs/armadillo800eva_defconfig index 7d8718468e0d..90610c7030f7 100644 --- a/arch/arm/configs/armadillo800eva_defconfig +++ b/arch/arm/configs/armadillo800eva_defconfig | |||
@@ -33,7 +33,7 @@ CONFIG_AEABI=y | |||
33 | CONFIG_FORCE_MAX_ZONEORDER=13 | 33 | CONFIG_FORCE_MAX_ZONEORDER=13 |
34 | CONFIG_ZBOOT_ROM_TEXT=0x0 | 34 | CONFIG_ZBOOT_ROM_TEXT=0x0 |
35 | CONFIG_ZBOOT_ROM_BSS=0x0 | 35 | CONFIG_ZBOOT_ROM_BSS=0x0 |
36 | CONFIG_CMDLINE="console=tty0 console=ttySC1,115200 earlyprintk=sh-sci.1,115200 ignore_loglevel root=/dev/nfs ip=dhcp nfsroot=,rsize=4096,wsize=4096" | 36 | CONFIG_CMDLINE="console=tty0 console=ttySC1,115200 earlyprintk=sh-sci.1,115200 ignore_loglevel root=/dev/nfs ip=dhcp nfsroot=,rsize=4096,wsize=4096 rw" |
37 | CONFIG_CMDLINE_FORCE=y | 37 | CONFIG_CMDLINE_FORCE=y |
38 | CONFIG_KEXEC=y | 38 | CONFIG_KEXEC=y |
39 | CONFIG_VFP=y | 39 | CONFIG_VFP=y |
diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h index 2ae842df4551..5c44dcb0987b 100644 --- a/arch/arm/include/asm/dma-mapping.h +++ b/arch/arm/include/asm/dma-mapping.h | |||
@@ -203,6 +203,13 @@ static inline void dma_free_writecombine(struct device *dev, size_t size, | |||
203 | } | 203 | } |
204 | 204 | ||
205 | /* | 205 | /* |
206 | * This can be called during early boot to increase the size of the atomic | ||
207 | * coherent DMA pool above the default value of 256KiB. It must be called | ||
208 | * before postcore_initcall. | ||
209 | */ | ||
210 | extern void __init init_dma_coherent_pool_size(unsigned long size); | ||
211 | |||
212 | /* | ||
206 | * This can be called during boot to increase the size of the consistent | 213 | * This can be called during boot to increase the size of the consistent |
207 | * DMA region above it's default value of 2MB. It must be called before the | 214 | * DMA region above it's default value of 2MB. It must be called before the |
208 | * memory allocator is initialised, i.e. before any core_initcall. | 215 | * memory allocator is initialised, i.e. before any core_initcall. |
diff --git a/arch/arm/mach-at91/at91rm9200_time.c b/arch/arm/mach-at91/at91rm9200_time.c index 104ca40d8d18..aaa443b48c91 100644 --- a/arch/arm/mach-at91/at91rm9200_time.c +++ b/arch/arm/mach-at91/at91rm9200_time.c | |||
@@ -197,7 +197,7 @@ void __init at91rm9200_timer_init(void) | |||
197 | at91_st_read(AT91_ST_SR); | 197 | at91_st_read(AT91_ST_SR); |
198 | 198 | ||
199 | /* Make IRQs happen for the system timer */ | 199 | /* Make IRQs happen for the system timer */ |
200 | setup_irq(AT91_ID_SYS, &at91rm9200_timer_irq); | 200 | setup_irq(NR_IRQS_LEGACY + AT91_ID_SYS, &at91rm9200_timer_irq); |
201 | 201 | ||
202 | /* The 32KiHz "Slow Clock" (tick every 30517.58 nanoseconds) is used | 202 | /* The 32KiHz "Slow Clock" (tick every 30517.58 nanoseconds) is used |
203 | * directly for the clocksource and all clockevents, after adjusting | 203 | * directly for the clocksource and all clockevents, after adjusting |
diff --git a/arch/arm/mach-at91/at91sam9260_devices.c b/arch/arm/mach-at91/at91sam9260_devices.c index 7b9c2ba396ed..bce572a530ef 100644 --- a/arch/arm/mach-at91/at91sam9260_devices.c +++ b/arch/arm/mach-at91/at91sam9260_devices.c | |||
@@ -726,6 +726,8 @@ static struct resource rtt_resources[] = { | |||
726 | .flags = IORESOURCE_MEM, | 726 | .flags = IORESOURCE_MEM, |
727 | }, { | 727 | }, { |
728 | .flags = IORESOURCE_MEM, | 728 | .flags = IORESOURCE_MEM, |
729 | }, { | ||
730 | .flags = IORESOURCE_IRQ, | ||
729 | }, | 731 | }, |
730 | }; | 732 | }; |
731 | 733 | ||
@@ -744,10 +746,12 @@ static void __init at91_add_device_rtt_rtc(void) | |||
744 | * The second resource is needed: | 746 | * The second resource is needed: |
745 | * GPBR will serve as the storage for RTC time offset | 747 | * GPBR will serve as the storage for RTC time offset |
746 | */ | 748 | */ |
747 | at91sam9260_rtt_device.num_resources = 2; | 749 | at91sam9260_rtt_device.num_resources = 3; |
748 | rtt_resources[1].start = AT91SAM9260_BASE_GPBR + | 750 | rtt_resources[1].start = AT91SAM9260_BASE_GPBR + |
749 | 4 * CONFIG_RTC_DRV_AT91SAM9_GPBR; | 751 | 4 * CONFIG_RTC_DRV_AT91SAM9_GPBR; |
750 | rtt_resources[1].end = rtt_resources[1].start + 3; | 752 | rtt_resources[1].end = rtt_resources[1].start + 3; |
753 | rtt_resources[2].start = NR_IRQS_LEGACY + AT91_ID_SYS; | ||
754 | rtt_resources[2].end = NR_IRQS_LEGACY + AT91_ID_SYS; | ||
751 | } | 755 | } |
752 | #else | 756 | #else |
753 | static void __init at91_add_device_rtt_rtc(void) | 757 | static void __init at91_add_device_rtt_rtc(void) |
diff --git a/arch/arm/mach-at91/at91sam9261_devices.c b/arch/arm/mach-at91/at91sam9261_devices.c index 8df5c1bdff92..bc2590d712d0 100644 --- a/arch/arm/mach-at91/at91sam9261_devices.c +++ b/arch/arm/mach-at91/at91sam9261_devices.c | |||
@@ -609,6 +609,8 @@ static struct resource rtt_resources[] = { | |||
609 | .flags = IORESOURCE_MEM, | 609 | .flags = IORESOURCE_MEM, |
610 | }, { | 610 | }, { |
611 | .flags = IORESOURCE_MEM, | 611 | .flags = IORESOURCE_MEM, |
612 | }, { | ||
613 | .flags = IORESOURCE_IRQ, | ||
612 | } | 614 | } |
613 | }; | 615 | }; |
614 | 616 | ||
@@ -626,10 +628,12 @@ static void __init at91_add_device_rtt_rtc(void) | |||
626 | * The second resource is needed: | 628 | * The second resource is needed: |
627 | * GPBR will serve as the storage for RTC time offset | 629 | * GPBR will serve as the storage for RTC time offset |
628 | */ | 630 | */ |
629 | at91sam9261_rtt_device.num_resources = 2; | 631 | at91sam9261_rtt_device.num_resources = 3; |
630 | rtt_resources[1].start = AT91SAM9261_BASE_GPBR + | 632 | rtt_resources[1].start = AT91SAM9261_BASE_GPBR + |
631 | 4 * CONFIG_RTC_DRV_AT91SAM9_GPBR; | 633 | 4 * CONFIG_RTC_DRV_AT91SAM9_GPBR; |
632 | rtt_resources[1].end = rtt_resources[1].start + 3; | 634 | rtt_resources[1].end = rtt_resources[1].start + 3; |
635 | rtt_resources[2].start = NR_IRQS_LEGACY + AT91_ID_SYS; | ||
636 | rtt_resources[2].end = NR_IRQS_LEGACY + AT91_ID_SYS; | ||
633 | } | 637 | } |
634 | #else | 638 | #else |
635 | static void __init at91_add_device_rtt_rtc(void) | 639 | static void __init at91_add_device_rtt_rtc(void) |
diff --git a/arch/arm/mach-at91/at91sam9263_devices.c b/arch/arm/mach-at91/at91sam9263_devices.c index eb6bbf86fb9f..9b6ca734f1a9 100644 --- a/arch/arm/mach-at91/at91sam9263_devices.c +++ b/arch/arm/mach-at91/at91sam9263_devices.c | |||
@@ -990,6 +990,8 @@ static struct resource rtt0_resources[] = { | |||
990 | .flags = IORESOURCE_MEM, | 990 | .flags = IORESOURCE_MEM, |
991 | }, { | 991 | }, { |
992 | .flags = IORESOURCE_MEM, | 992 | .flags = IORESOURCE_MEM, |
993 | }, { | ||
994 | .flags = IORESOURCE_IRQ, | ||
993 | } | 995 | } |
994 | }; | 996 | }; |
995 | 997 | ||
@@ -1006,6 +1008,8 @@ static struct resource rtt1_resources[] = { | |||
1006 | .flags = IORESOURCE_MEM, | 1008 | .flags = IORESOURCE_MEM, |
1007 | }, { | 1009 | }, { |
1008 | .flags = IORESOURCE_MEM, | 1010 | .flags = IORESOURCE_MEM, |
1011 | }, { | ||
1012 | .flags = IORESOURCE_IRQ, | ||
1009 | } | 1013 | } |
1010 | }; | 1014 | }; |
1011 | 1015 | ||
@@ -1027,14 +1031,14 @@ static void __init at91_add_device_rtt_rtc(void) | |||
1027 | * The second resource is needed only for the chosen RTT: | 1031 | * The second resource is needed only for the chosen RTT: |
1028 | * GPBR will serve as the storage for RTC time offset | 1032 | * GPBR will serve as the storage for RTC time offset |
1029 | */ | 1033 | */ |
1030 | at91sam9263_rtt0_device.num_resources = 2; | 1034 | at91sam9263_rtt0_device.num_resources = 3; |
1031 | at91sam9263_rtt1_device.num_resources = 1; | 1035 | at91sam9263_rtt1_device.num_resources = 1; |
1032 | pdev = &at91sam9263_rtt0_device; | 1036 | pdev = &at91sam9263_rtt0_device; |
1033 | r = rtt0_resources; | 1037 | r = rtt0_resources; |
1034 | break; | 1038 | break; |
1035 | case 1: | 1039 | case 1: |
1036 | at91sam9263_rtt0_device.num_resources = 1; | 1040 | at91sam9263_rtt0_device.num_resources = 1; |
1037 | at91sam9263_rtt1_device.num_resources = 2; | 1041 | at91sam9263_rtt1_device.num_resources = 3; |
1038 | pdev = &at91sam9263_rtt1_device; | 1042 | pdev = &at91sam9263_rtt1_device; |
1039 | r = rtt1_resources; | 1043 | r = rtt1_resources; |
1040 | break; | 1044 | break; |
@@ -1047,6 +1051,8 @@ static void __init at91_add_device_rtt_rtc(void) | |||
1047 | pdev->name = "rtc-at91sam9"; | 1051 | pdev->name = "rtc-at91sam9"; |
1048 | r[1].start = AT91SAM9263_BASE_GPBR + 4 * CONFIG_RTC_DRV_AT91SAM9_GPBR; | 1052 | r[1].start = AT91SAM9263_BASE_GPBR + 4 * CONFIG_RTC_DRV_AT91SAM9_GPBR; |
1049 | r[1].end = r[1].start + 3; | 1053 | r[1].end = r[1].start + 3; |
1054 | r[2].start = NR_IRQS_LEGACY + AT91_ID_SYS; | ||
1055 | r[2].end = NR_IRQS_LEGACY + AT91_ID_SYS; | ||
1050 | } | 1056 | } |
1051 | #else | 1057 | #else |
1052 | static void __init at91_add_device_rtt_rtc(void) | 1058 | static void __init at91_add_device_rtt_rtc(void) |
diff --git a/arch/arm/mach-at91/at91sam9g45_devices.c b/arch/arm/mach-at91/at91sam9g45_devices.c index 06073996a382..1b47319ca00b 100644 --- a/arch/arm/mach-at91/at91sam9g45_devices.c +++ b/arch/arm/mach-at91/at91sam9g45_devices.c | |||
@@ -1293,6 +1293,8 @@ static struct resource rtt_resources[] = { | |||
1293 | .flags = IORESOURCE_MEM, | 1293 | .flags = IORESOURCE_MEM, |
1294 | }, { | 1294 | }, { |
1295 | .flags = IORESOURCE_MEM, | 1295 | .flags = IORESOURCE_MEM, |
1296 | }, { | ||
1297 | .flags = IORESOURCE_IRQ, | ||
1296 | } | 1298 | } |
1297 | }; | 1299 | }; |
1298 | 1300 | ||
@@ -1310,10 +1312,12 @@ static void __init at91_add_device_rtt_rtc(void) | |||
1310 | * The second resource is needed: | 1312 | * The second resource is needed: |
1311 | * GPBR will serve as the storage for RTC time offset | 1313 | * GPBR will serve as the storage for RTC time offset |
1312 | */ | 1314 | */ |
1313 | at91sam9g45_rtt_device.num_resources = 2; | 1315 | at91sam9g45_rtt_device.num_resources = 3; |
1314 | rtt_resources[1].start = AT91SAM9G45_BASE_GPBR + | 1316 | rtt_resources[1].start = AT91SAM9G45_BASE_GPBR + |
1315 | 4 * CONFIG_RTC_DRV_AT91SAM9_GPBR; | 1317 | 4 * CONFIG_RTC_DRV_AT91SAM9_GPBR; |
1316 | rtt_resources[1].end = rtt_resources[1].start + 3; | 1318 | rtt_resources[1].end = rtt_resources[1].start + 3; |
1319 | rtt_resources[2].start = NR_IRQS_LEGACY + AT91_ID_SYS; | ||
1320 | rtt_resources[2].end = NR_IRQS_LEGACY + AT91_ID_SYS; | ||
1317 | } | 1321 | } |
1318 | #else | 1322 | #else |
1319 | static void __init at91_add_device_rtt_rtc(void) | 1323 | static void __init at91_add_device_rtt_rtc(void) |
diff --git a/arch/arm/mach-at91/at91sam9rl_devices.c b/arch/arm/mach-at91/at91sam9rl_devices.c index f09fff932172..b3d365dadef5 100644 --- a/arch/arm/mach-at91/at91sam9rl_devices.c +++ b/arch/arm/mach-at91/at91sam9rl_devices.c | |||
@@ -688,6 +688,8 @@ static struct resource rtt_resources[] = { | |||
688 | .flags = IORESOURCE_MEM, | 688 | .flags = IORESOURCE_MEM, |
689 | }, { | 689 | }, { |
690 | .flags = IORESOURCE_MEM, | 690 | .flags = IORESOURCE_MEM, |
691 | }, { | ||
692 | .flags = IORESOURCE_IRQ, | ||
691 | } | 693 | } |
692 | }; | 694 | }; |
693 | 695 | ||
@@ -705,10 +707,12 @@ static void __init at91_add_device_rtt_rtc(void) | |||
705 | * The second resource is needed: | 707 | * The second resource is needed: |
706 | * GPBR will serve as the storage for RTC time offset | 708 | * GPBR will serve as the storage for RTC time offset |
707 | */ | 709 | */ |
708 | at91sam9rl_rtt_device.num_resources = 2; | 710 | at91sam9rl_rtt_device.num_resources = 3; |
709 | rtt_resources[1].start = AT91SAM9RL_BASE_GPBR + | 711 | rtt_resources[1].start = AT91SAM9RL_BASE_GPBR + |
710 | 4 * CONFIG_RTC_DRV_AT91SAM9_GPBR; | 712 | 4 * CONFIG_RTC_DRV_AT91SAM9_GPBR; |
711 | rtt_resources[1].end = rtt_resources[1].start + 3; | 713 | rtt_resources[1].end = rtt_resources[1].start + 3; |
714 | rtt_resources[2].start = NR_IRQS_LEGACY + AT91_ID_SYS; | ||
715 | rtt_resources[2].end = NR_IRQS_LEGACY + AT91_ID_SYS; | ||
712 | } | 716 | } |
713 | #else | 717 | #else |
714 | static void __init at91_add_device_rtt_rtc(void) | 718 | static void __init at91_add_device_rtt_rtc(void) |
diff --git a/arch/arm/mach-at91/clock.c b/arch/arm/mach-at91/clock.c index de2ec6b8fea7..188c82971ebd 100644 --- a/arch/arm/mach-at91/clock.c +++ b/arch/arm/mach-at91/clock.c | |||
@@ -63,6 +63,12 @@ EXPORT_SYMBOL_GPL(at91_pmc_base); | |||
63 | 63 | ||
64 | #define cpu_has_300M_plla() (cpu_is_at91sam9g10()) | 64 | #define cpu_has_300M_plla() (cpu_is_at91sam9g10()) |
65 | 65 | ||
66 | #define cpu_has_240M_plla() (cpu_is_at91sam9261() \ | ||
67 | || cpu_is_at91sam9263() \ | ||
68 | || cpu_is_at91sam9rl()) | ||
69 | |||
70 | #define cpu_has_210M_plla() (cpu_is_at91sam9260()) | ||
71 | |||
66 | #define cpu_has_pllb() (!(cpu_is_at91sam9rl() \ | 72 | #define cpu_has_pllb() (!(cpu_is_at91sam9rl() \ |
67 | || cpu_is_at91sam9g45() \ | 73 | || cpu_is_at91sam9g45() \ |
68 | || cpu_is_at91sam9x5() \ | 74 | || cpu_is_at91sam9x5() \ |
@@ -706,6 +712,12 @@ static int __init at91_pmc_init(unsigned long main_clock) | |||
706 | } else if (cpu_has_800M_plla()) { | 712 | } else if (cpu_has_800M_plla()) { |
707 | if (plla.rate_hz > 800000000) | 713 | if (plla.rate_hz > 800000000) |
708 | pll_overclock = true; | 714 | pll_overclock = true; |
715 | } else if (cpu_has_240M_plla()) { | ||
716 | if (plla.rate_hz > 240000000) | ||
717 | pll_overclock = true; | ||
718 | } else if (cpu_has_210M_plla()) { | ||
719 | if (plla.rate_hz > 210000000) | ||
720 | pll_overclock = true; | ||
709 | } else { | 721 | } else { |
710 | if (plla.rate_hz > 209000000) | 722 | if (plla.rate_hz > 209000000) |
711 | pll_overclock = true; | 723 | pll_overclock = true; |
diff --git a/arch/arm/mach-gemini/irq.c b/arch/arm/mach-gemini/irq.c index ca70e5fcc7ac..020852d3bdd8 100644 --- a/arch/arm/mach-gemini/irq.c +++ b/arch/arm/mach-gemini/irq.c | |||
@@ -17,6 +17,7 @@ | |||
17 | #include <linux/sched.h> | 17 | #include <linux/sched.h> |
18 | #include <asm/irq.h> | 18 | #include <asm/irq.h> |
19 | #include <asm/mach/irq.h> | 19 | #include <asm/mach/irq.h> |
20 | #include <asm/system_misc.h> | ||
20 | #include <mach/hardware.h> | 21 | #include <mach/hardware.h> |
21 | 22 | ||
22 | #define IRQ_SOURCE(base_addr) (base_addr + 0x00) | 23 | #define IRQ_SOURCE(base_addr) (base_addr + 0x00) |
diff --git a/arch/arm/mach-kirkwood/common.c b/arch/arm/mach-kirkwood/common.c index 3226077735b1..1201191d7f1b 100644 --- a/arch/arm/mach-kirkwood/common.c +++ b/arch/arm/mach-kirkwood/common.c | |||
@@ -517,6 +517,13 @@ void __init kirkwood_wdt_init(void) | |||
517 | void __init kirkwood_init_early(void) | 517 | void __init kirkwood_init_early(void) |
518 | { | 518 | { |
519 | orion_time_set_base(TIMER_VIRT_BASE); | 519 | orion_time_set_base(TIMER_VIRT_BASE); |
520 | |||
521 | /* | ||
522 | * Some Kirkwood devices allocate their coherent buffers from atomic | ||
523 | * context. Increase size of atomic coherent pool to make sure such | ||
524 | * the allocations won't fail. | ||
525 | */ | ||
526 | init_dma_coherent_pool_size(SZ_1M); | ||
520 | } | 527 | } |
521 | 528 | ||
522 | int kirkwood_tclk; | 529 | int kirkwood_tclk; |
diff --git a/arch/arm/mach-kirkwood/db88f6281-bp-setup.c b/arch/arm/mach-kirkwood/db88f6281-bp-setup.c index d93359379598..be90b7d0e10b 100644 --- a/arch/arm/mach-kirkwood/db88f6281-bp-setup.c +++ b/arch/arm/mach-kirkwood/db88f6281-bp-setup.c | |||
@@ -10,6 +10,7 @@ | |||
10 | 10 | ||
11 | #include <linux/kernel.h> | 11 | #include <linux/kernel.h> |
12 | #include <linux/init.h> | 12 | #include <linux/init.h> |
13 | #include <linux/sizes.h> | ||
13 | #include <linux/platform_device.h> | 14 | #include <linux/platform_device.h> |
14 | #include <linux/mtd/partitions.h> | 15 | #include <linux/mtd/partitions.h> |
15 | #include <linux/ata_platform.h> | 16 | #include <linux/ata_platform.h> |
diff --git a/arch/arm/mach-shmobile/board-armadillo800eva.c b/arch/arm/mach-shmobile/board-armadillo800eva.c index cf10f92856dc..453a6e50db8b 100644 --- a/arch/arm/mach-shmobile/board-armadillo800eva.c +++ b/arch/arm/mach-shmobile/board-armadillo800eva.c | |||
@@ -520,13 +520,14 @@ static struct platform_device hdmi_lcdc_device = { | |||
520 | }; | 520 | }; |
521 | 521 | ||
522 | /* GPIO KEY */ | 522 | /* GPIO KEY */ |
523 | #define GPIO_KEY(c, g, d) { .code = c, .gpio = g, .desc = d, .active_low = 1 } | 523 | #define GPIO_KEY(c, g, d, ...) \ |
524 | { .code = c, .gpio = g, .desc = d, .active_low = 1, __VA_ARGS__ } | ||
524 | 525 | ||
525 | static struct gpio_keys_button gpio_buttons[] = { | 526 | static struct gpio_keys_button gpio_buttons[] = { |
526 | GPIO_KEY(KEY_POWER, GPIO_PORT99, "SW1"), | 527 | GPIO_KEY(KEY_POWER, GPIO_PORT99, "SW3", .wakeup = 1), |
527 | GPIO_KEY(KEY_BACK, GPIO_PORT100, "SW2"), | 528 | GPIO_KEY(KEY_BACK, GPIO_PORT100, "SW4"), |
528 | GPIO_KEY(KEY_MENU, GPIO_PORT97, "SW3"), | 529 | GPIO_KEY(KEY_MENU, GPIO_PORT97, "SW5"), |
529 | GPIO_KEY(KEY_HOME, GPIO_PORT98, "SW4"), | 530 | GPIO_KEY(KEY_HOME, GPIO_PORT98, "SW6"), |
530 | }; | 531 | }; |
531 | 532 | ||
532 | static struct gpio_keys_platform_data gpio_key_info = { | 533 | static struct gpio_keys_platform_data gpio_key_info = { |
@@ -901,8 +902,8 @@ static struct platform_device *eva_devices[] __initdata = { | |||
901 | &camera_device, | 902 | &camera_device, |
902 | &ceu0_device, | 903 | &ceu0_device, |
903 | &fsi_device, | 904 | &fsi_device, |
904 | &fsi_hdmi_device, | ||
905 | &fsi_wm8978_device, | 905 | &fsi_wm8978_device, |
906 | &fsi_hdmi_device, | ||
906 | }; | 907 | }; |
907 | 908 | ||
908 | static void __init eva_clock_init(void) | 909 | static void __init eva_clock_init(void) |
diff --git a/arch/arm/mach-shmobile/board-mackerel.c b/arch/arm/mach-shmobile/board-mackerel.c index 7ea2b31e3199..c129542f6aed 100644 --- a/arch/arm/mach-shmobile/board-mackerel.c +++ b/arch/arm/mach-shmobile/board-mackerel.c | |||
@@ -695,6 +695,7 @@ static struct platform_device usbhs0_device = { | |||
695 | * - J30 "open" | 695 | * - J30 "open" |
696 | * - modify usbhs1_get_id() USBHS_HOST -> USBHS_GADGET | 696 | * - modify usbhs1_get_id() USBHS_HOST -> USBHS_GADGET |
697 | * - add .get_vbus = usbhs_get_vbus in usbhs1_private | 697 | * - add .get_vbus = usbhs_get_vbus in usbhs1_private |
698 | * - check usbhs0_device(pio)/usbhs1_device(irq) order in mackerel_devices. | ||
698 | */ | 699 | */ |
699 | #define IRQ8 evt2irq(0x0300) | 700 | #define IRQ8 evt2irq(0x0300) |
700 | #define USB_PHY_MODE (1 << 4) | 701 | #define USB_PHY_MODE (1 << 4) |
@@ -1325,8 +1326,8 @@ static struct platform_device *mackerel_devices[] __initdata = { | |||
1325 | &nor_flash_device, | 1326 | &nor_flash_device, |
1326 | &smc911x_device, | 1327 | &smc911x_device, |
1327 | &lcdc_device, | 1328 | &lcdc_device, |
1328 | &usbhs1_device, | ||
1329 | &usbhs0_device, | 1329 | &usbhs0_device, |
1330 | &usbhs1_device, | ||
1330 | &leds_device, | 1331 | &leds_device, |
1331 | &fsi_device, | 1332 | &fsi_device, |
1332 | &fsi_ak4643_device, | 1333 | &fsi_ak4643_device, |
diff --git a/arch/arm/mach-shmobile/board-marzen.c b/arch/arm/mach-shmobile/board-marzen.c index 3a528cf4366c..fcf5a47f4772 100644 --- a/arch/arm/mach-shmobile/board-marzen.c +++ b/arch/arm/mach-shmobile/board-marzen.c | |||
@@ -67,7 +67,7 @@ static struct smsc911x_platform_config smsc911x_platdata = { | |||
67 | 67 | ||
68 | static struct platform_device eth_device = { | 68 | static struct platform_device eth_device = { |
69 | .name = "smsc911x", | 69 | .name = "smsc911x", |
70 | .id = 0, | 70 | .id = -1, |
71 | .dev = { | 71 | .dev = { |
72 | .platform_data = &smsc911x_platdata, | 72 | .platform_data = &smsc911x_platdata, |
73 | }, | 73 | }, |
diff --git a/arch/arm/mach-shmobile/intc-sh73a0.c b/arch/arm/mach-shmobile/intc-sh73a0.c index ee447404c857..588555a67d9c 100644 --- a/arch/arm/mach-shmobile/intc-sh73a0.c +++ b/arch/arm/mach-shmobile/intc-sh73a0.c | |||
@@ -259,9 +259,9 @@ static int sh73a0_set_wake(struct irq_data *data, unsigned int on) | |||
259 | return 0; /* always allow wakeup */ | 259 | return 0; /* always allow wakeup */ |
260 | } | 260 | } |
261 | 261 | ||
262 | #define RELOC_BASE 0x1000 | 262 | #define RELOC_BASE 0x1200 |
263 | 263 | ||
264 | /* INTCA IRQ pins at INTCS + 0x1000 to make space for GIC+INTC handling */ | 264 | /* INTCA IRQ pins at INTCS + RELOC_BASE to make space for GIC+INTC handling */ |
265 | #define INTCS_VECT_RELOC(n, vect) INTCS_VECT((n), (vect) + RELOC_BASE) | 265 | #define INTCS_VECT_RELOC(n, vect) INTCS_VECT((n), (vect) + RELOC_BASE) |
266 | 266 | ||
267 | INTC_IRQ_PINS_32(intca_irq_pins, 0xe6900000, | 267 | INTC_IRQ_PINS_32(intca_irq_pins, 0xe6900000, |
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index 4e7d1182e8a3..051204fc4617 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c | |||
@@ -267,17 +267,19 @@ static void __dma_free_remap(void *cpu_addr, size_t size) | |||
267 | vunmap(cpu_addr); | 267 | vunmap(cpu_addr); |
268 | } | 268 | } |
269 | 269 | ||
270 | #define DEFAULT_DMA_COHERENT_POOL_SIZE SZ_256K | ||
271 | |||
270 | struct dma_pool { | 272 | struct dma_pool { |
271 | size_t size; | 273 | size_t size; |
272 | spinlock_t lock; | 274 | spinlock_t lock; |
273 | unsigned long *bitmap; | 275 | unsigned long *bitmap; |
274 | unsigned long nr_pages; | 276 | unsigned long nr_pages; |
275 | void *vaddr; | 277 | void *vaddr; |
276 | struct page *page; | 278 | struct page **pages; |
277 | }; | 279 | }; |
278 | 280 | ||
279 | static struct dma_pool atomic_pool = { | 281 | static struct dma_pool atomic_pool = { |
280 | .size = SZ_256K, | 282 | .size = DEFAULT_DMA_COHERENT_POOL_SIZE, |
281 | }; | 283 | }; |
282 | 284 | ||
283 | static int __init early_coherent_pool(char *p) | 285 | static int __init early_coherent_pool(char *p) |
@@ -287,6 +289,21 @@ static int __init early_coherent_pool(char *p) | |||
287 | } | 289 | } |
288 | early_param("coherent_pool", early_coherent_pool); | 290 | early_param("coherent_pool", early_coherent_pool); |
289 | 291 | ||
292 | void __init init_dma_coherent_pool_size(unsigned long size) | ||
293 | { | ||
294 | /* | ||
295 | * Catch any attempt to set the pool size too late. | ||
296 | */ | ||
297 | BUG_ON(atomic_pool.vaddr); | ||
298 | |||
299 | /* | ||
300 | * Set architecture specific coherent pool size only if | ||
301 | * it has not been changed by kernel command line parameter. | ||
302 | */ | ||
303 | if (atomic_pool.size == DEFAULT_DMA_COHERENT_POOL_SIZE) | ||
304 | atomic_pool.size = size; | ||
305 | } | ||
306 | |||
290 | /* | 307 | /* |
291 | * Initialise the coherent pool for atomic allocations. | 308 | * Initialise the coherent pool for atomic allocations. |
292 | */ | 309 | */ |
@@ -297,6 +314,7 @@ static int __init atomic_pool_init(void) | |||
297 | unsigned long nr_pages = pool->size >> PAGE_SHIFT; | 314 | unsigned long nr_pages = pool->size >> PAGE_SHIFT; |
298 | unsigned long *bitmap; | 315 | unsigned long *bitmap; |
299 | struct page *page; | 316 | struct page *page; |
317 | struct page **pages; | ||
300 | void *ptr; | 318 | void *ptr; |
301 | int bitmap_size = BITS_TO_LONGS(nr_pages) * sizeof(long); | 319 | int bitmap_size = BITS_TO_LONGS(nr_pages) * sizeof(long); |
302 | 320 | ||
@@ -304,21 +322,31 @@ static int __init atomic_pool_init(void) | |||
304 | if (!bitmap) | 322 | if (!bitmap) |
305 | goto no_bitmap; | 323 | goto no_bitmap; |
306 | 324 | ||
325 | pages = kzalloc(nr_pages * sizeof(struct page *), GFP_KERNEL); | ||
326 | if (!pages) | ||
327 | goto no_pages; | ||
328 | |||
307 | if (IS_ENABLED(CONFIG_CMA)) | 329 | if (IS_ENABLED(CONFIG_CMA)) |
308 | ptr = __alloc_from_contiguous(NULL, pool->size, prot, &page); | 330 | ptr = __alloc_from_contiguous(NULL, pool->size, prot, &page); |
309 | else | 331 | else |
310 | ptr = __alloc_remap_buffer(NULL, pool->size, GFP_KERNEL, prot, | 332 | ptr = __alloc_remap_buffer(NULL, pool->size, GFP_KERNEL, prot, |
311 | &page, NULL); | 333 | &page, NULL); |
312 | if (ptr) { | 334 | if (ptr) { |
335 | int i; | ||
336 | |||
337 | for (i = 0; i < nr_pages; i++) | ||
338 | pages[i] = page + i; | ||
339 | |||
313 | spin_lock_init(&pool->lock); | 340 | spin_lock_init(&pool->lock); |
314 | pool->vaddr = ptr; | 341 | pool->vaddr = ptr; |
315 | pool->page = page; | 342 | pool->pages = pages; |
316 | pool->bitmap = bitmap; | 343 | pool->bitmap = bitmap; |
317 | pool->nr_pages = nr_pages; | 344 | pool->nr_pages = nr_pages; |
318 | pr_info("DMA: preallocated %u KiB pool for atomic coherent allocations\n", | 345 | pr_info("DMA: preallocated %u KiB pool for atomic coherent allocations\n", |
319 | (unsigned)pool->size / 1024); | 346 | (unsigned)pool->size / 1024); |
320 | return 0; | 347 | return 0; |
321 | } | 348 | } |
349 | no_pages: | ||
322 | kfree(bitmap); | 350 | kfree(bitmap); |
323 | no_bitmap: | 351 | no_bitmap: |
324 | pr_err("DMA: failed to allocate %u KiB pool for atomic coherent allocation\n", | 352 | pr_err("DMA: failed to allocate %u KiB pool for atomic coherent allocation\n", |
@@ -443,27 +471,45 @@ static void *__alloc_from_pool(size_t size, struct page **ret_page) | |||
443 | if (pageno < pool->nr_pages) { | 471 | if (pageno < pool->nr_pages) { |
444 | bitmap_set(pool->bitmap, pageno, count); | 472 | bitmap_set(pool->bitmap, pageno, count); |
445 | ptr = pool->vaddr + PAGE_SIZE * pageno; | 473 | ptr = pool->vaddr + PAGE_SIZE * pageno; |
446 | *ret_page = pool->page + pageno; | 474 | *ret_page = pool->pages[pageno]; |
475 | } else { | ||
476 | pr_err_once("ERROR: %u KiB atomic DMA coherent pool is too small!\n" | ||
477 | "Please increase it with coherent_pool= kernel parameter!\n", | ||
478 | (unsigned)pool->size / 1024); | ||
447 | } | 479 | } |
448 | spin_unlock_irqrestore(&pool->lock, flags); | 480 | spin_unlock_irqrestore(&pool->lock, flags); |
449 | 481 | ||
450 | return ptr; | 482 | return ptr; |
451 | } | 483 | } |
452 | 484 | ||
485 | static bool __in_atomic_pool(void *start, size_t size) | ||
486 | { | ||
487 | struct dma_pool *pool = &atomic_pool; | ||
488 | void *end = start + size; | ||
489 | void *pool_start = pool->vaddr; | ||
490 | void *pool_end = pool->vaddr + pool->size; | ||
491 | |||
492 | if (start < pool_start || start > pool_end) | ||
493 | return false; | ||
494 | |||
495 | if (end <= pool_end) | ||
496 | return true; | ||
497 | |||
498 | WARN(1, "Wrong coherent size(%p-%p) from atomic pool(%p-%p)\n", | ||
499 | start, end - 1, pool_start, pool_end - 1); | ||
500 | |||
501 | return false; | ||
502 | } | ||
503 | |||
453 | static int __free_from_pool(void *start, size_t size) | 504 | static int __free_from_pool(void *start, size_t size) |
454 | { | 505 | { |
455 | struct dma_pool *pool = &atomic_pool; | 506 | struct dma_pool *pool = &atomic_pool; |
456 | unsigned long pageno, count; | 507 | unsigned long pageno, count; |
457 | unsigned long flags; | 508 | unsigned long flags; |
458 | 509 | ||
459 | if (start < pool->vaddr || start > pool->vaddr + pool->size) | 510 | if (!__in_atomic_pool(start, size)) |
460 | return 0; | 511 | return 0; |
461 | 512 | ||
462 | if (start + size > pool->vaddr + pool->size) { | ||
463 | WARN(1, "freeing wrong coherent size from pool\n"); | ||
464 | return 0; | ||
465 | } | ||
466 | |||
467 | pageno = (start - pool->vaddr) >> PAGE_SHIFT; | 513 | pageno = (start - pool->vaddr) >> PAGE_SHIFT; |
468 | count = size >> PAGE_SHIFT; | 514 | count = size >> PAGE_SHIFT; |
469 | 515 | ||
@@ -1090,10 +1136,22 @@ static int __iommu_remove_mapping(struct device *dev, dma_addr_t iova, size_t si | |||
1090 | return 0; | 1136 | return 0; |
1091 | } | 1137 | } |
1092 | 1138 | ||
1139 | static struct page **__atomic_get_pages(void *addr) | ||
1140 | { | ||
1141 | struct dma_pool *pool = &atomic_pool; | ||
1142 | struct page **pages = pool->pages; | ||
1143 | int offs = (addr - pool->vaddr) >> PAGE_SHIFT; | ||
1144 | |||
1145 | return pages + offs; | ||
1146 | } | ||
1147 | |||
1093 | static struct page **__iommu_get_pages(void *cpu_addr, struct dma_attrs *attrs) | 1148 | static struct page **__iommu_get_pages(void *cpu_addr, struct dma_attrs *attrs) |
1094 | { | 1149 | { |
1095 | struct vm_struct *area; | 1150 | struct vm_struct *area; |
1096 | 1151 | ||
1152 | if (__in_atomic_pool(cpu_addr, PAGE_SIZE)) | ||
1153 | return __atomic_get_pages(cpu_addr); | ||
1154 | |||
1097 | if (dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs)) | 1155 | if (dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs)) |
1098 | return cpu_addr; | 1156 | return cpu_addr; |
1099 | 1157 | ||
@@ -1103,6 +1161,34 @@ static struct page **__iommu_get_pages(void *cpu_addr, struct dma_attrs *attrs) | |||
1103 | return NULL; | 1161 | return NULL; |
1104 | } | 1162 | } |
1105 | 1163 | ||
1164 | static void *__iommu_alloc_atomic(struct device *dev, size_t size, | ||
1165 | dma_addr_t *handle) | ||
1166 | { | ||
1167 | struct page *page; | ||
1168 | void *addr; | ||
1169 | |||
1170 | addr = __alloc_from_pool(size, &page); | ||
1171 | if (!addr) | ||
1172 | return NULL; | ||
1173 | |||
1174 | *handle = __iommu_create_mapping(dev, &page, size); | ||
1175 | if (*handle == DMA_ERROR_CODE) | ||
1176 | goto err_mapping; | ||
1177 | |||
1178 | return addr; | ||
1179 | |||
1180 | err_mapping: | ||
1181 | __free_from_pool(addr, size); | ||
1182 | return NULL; | ||
1183 | } | ||
1184 | |||
1185 | static void __iommu_free_atomic(struct device *dev, struct page **pages, | ||
1186 | dma_addr_t handle, size_t size) | ||
1187 | { | ||
1188 | __iommu_remove_mapping(dev, handle, size); | ||
1189 | __free_from_pool(page_address(pages[0]), size); | ||
1190 | } | ||
1191 | |||
1106 | static void *arm_iommu_alloc_attrs(struct device *dev, size_t size, | 1192 | static void *arm_iommu_alloc_attrs(struct device *dev, size_t size, |
1107 | dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs) | 1193 | dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs) |
1108 | { | 1194 | { |
@@ -1113,6 +1199,9 @@ static void *arm_iommu_alloc_attrs(struct device *dev, size_t size, | |||
1113 | *handle = DMA_ERROR_CODE; | 1199 | *handle = DMA_ERROR_CODE; |
1114 | size = PAGE_ALIGN(size); | 1200 | size = PAGE_ALIGN(size); |
1115 | 1201 | ||
1202 | if (gfp & GFP_ATOMIC) | ||
1203 | return __iommu_alloc_atomic(dev, size, handle); | ||
1204 | |||
1116 | pages = __iommu_alloc_buffer(dev, size, gfp); | 1205 | pages = __iommu_alloc_buffer(dev, size, gfp); |
1117 | if (!pages) | 1206 | if (!pages) |
1118 | return NULL; | 1207 | return NULL; |
@@ -1179,6 +1268,11 @@ void arm_iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr, | |||
1179 | return; | 1268 | return; |
1180 | } | 1269 | } |
1181 | 1270 | ||
1271 | if (__in_atomic_pool(cpu_addr, size)) { | ||
1272 | __iommu_free_atomic(dev, pages, handle, size); | ||
1273 | return; | ||
1274 | } | ||
1275 | |||
1182 | if (!dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs)) { | 1276 | if (!dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs)) { |
1183 | unmap_kernel_range((unsigned long)cpu_addr, size); | 1277 | unmap_kernel_range((unsigned long)cpu_addr, size); |
1184 | vunmap(cpu_addr); | 1278 | vunmap(cpu_addr); |
diff --git a/arch/parisc/include/asm/atomic.h b/arch/parisc/include/asm/atomic.h index 6c6defc24619..af9cf30ed474 100644 --- a/arch/parisc/include/asm/atomic.h +++ b/arch/parisc/include/asm/atomic.h | |||
@@ -141,7 +141,7 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u) | |||
141 | 141 | ||
142 | #define atomic_sub_and_test(i,v) (atomic_sub_return((i),(v)) == 0) | 142 | #define atomic_sub_and_test(i,v) (atomic_sub_return((i),(v)) == 0) |
143 | 143 | ||
144 | #define ATOMIC_INIT(i) ((atomic_t) { (i) }) | 144 | #define ATOMIC_INIT(i) { (i) } |
145 | 145 | ||
146 | #define smp_mb__before_atomic_dec() smp_mb() | 146 | #define smp_mb__before_atomic_dec() smp_mb() |
147 | #define smp_mb__after_atomic_dec() smp_mb() | 147 | #define smp_mb__after_atomic_dec() smp_mb() |
@@ -150,7 +150,7 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u) | |||
150 | 150 | ||
151 | #ifdef CONFIG_64BIT | 151 | #ifdef CONFIG_64BIT |
152 | 152 | ||
153 | #define ATOMIC64_INIT(i) ((atomic64_t) { (i) }) | 153 | #define ATOMIC64_INIT(i) { (i) } |
154 | 154 | ||
155 | static __inline__ s64 | 155 | static __inline__ s64 |
156 | __atomic64_add_return(s64 i, atomic64_t *v) | 156 | __atomic64_add_return(s64 i, atomic64_t *v) |
diff --git a/arch/parisc/kernel/process.c b/arch/parisc/kernel/process.c index d4b94b395c16..2c05a9292a81 100644 --- a/arch/parisc/kernel/process.c +++ b/arch/parisc/kernel/process.c | |||
@@ -309,7 +309,7 @@ copy_thread(unsigned long clone_flags, unsigned long usp, | |||
309 | cregs->ksp = (unsigned long)stack | 309 | cregs->ksp = (unsigned long)stack |
310 | + (pregs->gr[21] & (THREAD_SIZE - 1)); | 310 | + (pregs->gr[21] & (THREAD_SIZE - 1)); |
311 | cregs->gr[30] = usp; | 311 | cregs->gr[30] = usp; |
312 | if (p->personality == PER_HPUX) { | 312 | if (personality(p->personality) == PER_HPUX) { |
313 | #ifdef CONFIG_HPUX | 313 | #ifdef CONFIG_HPUX |
314 | cregs->kpc = (unsigned long) &hpux_child_return; | 314 | cregs->kpc = (unsigned long) &hpux_child_return; |
315 | #else | 315 | #else |
diff --git a/arch/parisc/kernel/sys_parisc.c b/arch/parisc/kernel/sys_parisc.c index c9b932260f47..7426e40699bd 100644 --- a/arch/parisc/kernel/sys_parisc.c +++ b/arch/parisc/kernel/sys_parisc.c | |||
@@ -225,12 +225,12 @@ long parisc_personality(unsigned long personality) | |||
225 | long err; | 225 | long err; |
226 | 226 | ||
227 | if (personality(current->personality) == PER_LINUX32 | 227 | if (personality(current->personality) == PER_LINUX32 |
228 | && personality == PER_LINUX) | 228 | && personality(personality) == PER_LINUX) |
229 | personality = PER_LINUX32; | 229 | personality = (personality & ~PER_MASK) | PER_LINUX32; |
230 | 230 | ||
231 | err = sys_personality(personality); | 231 | err = sys_personality(personality); |
232 | if (err == PER_LINUX32) | 232 | if (personality(err) == PER_LINUX32) |
233 | err = PER_LINUX; | 233 | err = (err & ~PER_MASK) | PER_LINUX; |
234 | 234 | ||
235 | return err; | 235 | return err; |
236 | } | 236 | } |
diff --git a/arch/powerpc/include/asm/processor.h b/arch/powerpc/include/asm/processor.h index 53b6dfa83344..54b73a28c205 100644 --- a/arch/powerpc/include/asm/processor.h +++ b/arch/powerpc/include/asm/processor.h | |||
@@ -386,6 +386,7 @@ extern unsigned long cpuidle_disable; | |||
386 | enum idle_boot_override {IDLE_NO_OVERRIDE = 0, IDLE_POWERSAVE_OFF}; | 386 | enum idle_boot_override {IDLE_NO_OVERRIDE = 0, IDLE_POWERSAVE_OFF}; |
387 | 387 | ||
388 | extern int powersave_nap; /* set if nap mode can be used in idle loop */ | 388 | extern int powersave_nap; /* set if nap mode can be used in idle loop */ |
389 | extern void power7_nap(void); | ||
389 | 390 | ||
390 | #ifdef CONFIG_PSERIES_IDLE | 391 | #ifdef CONFIG_PSERIES_IDLE |
391 | extern void update_smt_snooze_delay(int snooze); | 392 | extern void update_smt_snooze_delay(int snooze); |
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c index 85b05c463fae..e8995727b1c1 100644 --- a/arch/powerpc/kernel/asm-offsets.c +++ b/arch/powerpc/kernel/asm-offsets.c | |||
@@ -76,6 +76,7 @@ int main(void) | |||
76 | DEFINE(SIGSEGV, SIGSEGV); | 76 | DEFINE(SIGSEGV, SIGSEGV); |
77 | DEFINE(NMI_MASK, NMI_MASK); | 77 | DEFINE(NMI_MASK, NMI_MASK); |
78 | DEFINE(THREAD_DSCR, offsetof(struct thread_struct, dscr)); | 78 | DEFINE(THREAD_DSCR, offsetof(struct thread_struct, dscr)); |
79 | DEFINE(THREAD_DSCR_INHERIT, offsetof(struct thread_struct, dscr_inherit)); | ||
79 | #else | 80 | #else |
80 | DEFINE(THREAD_INFO, offsetof(struct task_struct, stack)); | 81 | DEFINE(THREAD_INFO, offsetof(struct task_struct, stack)); |
81 | #endif /* CONFIG_PPC64 */ | 82 | #endif /* CONFIG_PPC64 */ |
diff --git a/arch/powerpc/kernel/dbell.c b/arch/powerpc/kernel/dbell.c index 5b25c8060fd6..a892680668d8 100644 --- a/arch/powerpc/kernel/dbell.c +++ b/arch/powerpc/kernel/dbell.c | |||
@@ -28,6 +28,8 @@ void doorbell_setup_this_cpu(void) | |||
28 | 28 | ||
29 | void doorbell_cause_ipi(int cpu, unsigned long data) | 29 | void doorbell_cause_ipi(int cpu, unsigned long data) |
30 | { | 30 | { |
31 | /* Order previous accesses vs. msgsnd, which is treated as a store */ | ||
32 | mb(); | ||
31 | ppc_msgsnd(PPC_DBELL, 0, data); | 33 | ppc_msgsnd(PPC_DBELL, 0, data); |
32 | } | 34 | } |
33 | 35 | ||
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S index 4b01a25e29ef..b40e0b4815b3 100644 --- a/arch/powerpc/kernel/entry_64.S +++ b/arch/powerpc/kernel/entry_64.S | |||
@@ -370,6 +370,12 @@ _GLOBAL(ret_from_fork) | |||
370 | li r3,0 | 370 | li r3,0 |
371 | b syscall_exit | 371 | b syscall_exit |
372 | 372 | ||
373 | .section ".toc","aw" | ||
374 | DSCR_DEFAULT: | ||
375 | .tc dscr_default[TC],dscr_default | ||
376 | |||
377 | .section ".text" | ||
378 | |||
373 | /* | 379 | /* |
374 | * This routine switches between two different tasks. The process | 380 | * This routine switches between two different tasks. The process |
375 | * state of one is saved on its kernel stack. Then the state | 381 | * state of one is saved on its kernel stack. Then the state |
@@ -509,9 +515,6 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT) | |||
509 | mr r1,r8 /* start using new stack pointer */ | 515 | mr r1,r8 /* start using new stack pointer */ |
510 | std r7,PACAKSAVE(r13) | 516 | std r7,PACAKSAVE(r13) |
511 | 517 | ||
512 | ld r6,_CCR(r1) | ||
513 | mtcrf 0xFF,r6 | ||
514 | |||
515 | #ifdef CONFIG_ALTIVEC | 518 | #ifdef CONFIG_ALTIVEC |
516 | BEGIN_FTR_SECTION | 519 | BEGIN_FTR_SECTION |
517 | ld r0,THREAD_VRSAVE(r4) | 520 | ld r0,THREAD_VRSAVE(r4) |
@@ -520,14 +523,22 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) | |||
520 | #endif /* CONFIG_ALTIVEC */ | 523 | #endif /* CONFIG_ALTIVEC */ |
521 | #ifdef CONFIG_PPC64 | 524 | #ifdef CONFIG_PPC64 |
522 | BEGIN_FTR_SECTION | 525 | BEGIN_FTR_SECTION |
526 | lwz r6,THREAD_DSCR_INHERIT(r4) | ||
527 | ld r7,DSCR_DEFAULT@toc(2) | ||
523 | ld r0,THREAD_DSCR(r4) | 528 | ld r0,THREAD_DSCR(r4) |
524 | cmpd r0,r25 | 529 | cmpwi r6,0 |
525 | beq 1f | 530 | bne 1f |
531 | ld r0,0(r7) | ||
532 | 1: cmpd r0,r25 | ||
533 | beq 2f | ||
526 | mtspr SPRN_DSCR,r0 | 534 | mtspr SPRN_DSCR,r0 |
527 | 1: | 535 | 2: |
528 | END_FTR_SECTION_IFSET(CPU_FTR_DSCR) | 536 | END_FTR_SECTION_IFSET(CPU_FTR_DSCR) |
529 | #endif | 537 | #endif |
530 | 538 | ||
539 | ld r6,_CCR(r1) | ||
540 | mtcrf 0xFF,r6 | ||
541 | |||
531 | /* r3-r13 are destroyed -- Cort */ | 542 | /* r3-r13 are destroyed -- Cort */ |
532 | REST_8GPRS(14, r1) | 543 | REST_8GPRS(14, r1) |
533 | REST_10GPRS(22, r1) | 544 | REST_10GPRS(22, r1) |
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S index e894515e77bb..39aa97d3ff88 100644 --- a/arch/powerpc/kernel/exceptions-64s.S +++ b/arch/powerpc/kernel/exceptions-64s.S | |||
@@ -186,7 +186,7 @@ hardware_interrupt_hv: | |||
186 | KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x800) | 186 | KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x800) |
187 | 187 | ||
188 | MASKABLE_EXCEPTION_PSERIES(0x900, 0x900, decrementer) | 188 | MASKABLE_EXCEPTION_PSERIES(0x900, 0x900, decrementer) |
189 | MASKABLE_EXCEPTION_HV(0x980, 0x982, decrementer) | 189 | STD_EXCEPTION_HV(0x980, 0x982, hdecrementer) |
190 | 190 | ||
191 | STD_EXCEPTION_PSERIES(0xa00, 0xa00, trap_0a) | 191 | STD_EXCEPTION_PSERIES(0xa00, 0xa00, trap_0a) |
192 | KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xa00) | 192 | KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xa00) |
@@ -486,6 +486,7 @@ machine_check_common: | |||
486 | 486 | ||
487 | STD_EXCEPTION_COMMON_ASYNC(0x500, hardware_interrupt, do_IRQ) | 487 | STD_EXCEPTION_COMMON_ASYNC(0x500, hardware_interrupt, do_IRQ) |
488 | STD_EXCEPTION_COMMON_ASYNC(0x900, decrementer, .timer_interrupt) | 488 | STD_EXCEPTION_COMMON_ASYNC(0x900, decrementer, .timer_interrupt) |
489 | STD_EXCEPTION_COMMON(0x980, hdecrementer, .hdec_interrupt) | ||
489 | STD_EXCEPTION_COMMON(0xa00, trap_0a, .unknown_exception) | 490 | STD_EXCEPTION_COMMON(0xa00, trap_0a, .unknown_exception) |
490 | STD_EXCEPTION_COMMON(0xb00, trap_0b, .unknown_exception) | 491 | STD_EXCEPTION_COMMON(0xb00, trap_0b, .unknown_exception) |
491 | STD_EXCEPTION_COMMON(0xd00, single_step, .single_step_exception) | 492 | STD_EXCEPTION_COMMON(0xd00, single_step, .single_step_exception) |
diff --git a/arch/powerpc/kernel/idle_power7.S b/arch/powerpc/kernel/idle_power7.S index 7140d838339e..e11863f4e595 100644 --- a/arch/powerpc/kernel/idle_power7.S +++ b/arch/powerpc/kernel/idle_power7.S | |||
@@ -28,7 +28,9 @@ _GLOBAL(power7_idle) | |||
28 | lwz r4,ADDROFF(powersave_nap)(r3) | 28 | lwz r4,ADDROFF(powersave_nap)(r3) |
29 | cmpwi 0,r4,0 | 29 | cmpwi 0,r4,0 |
30 | beqlr | 30 | beqlr |
31 | /* fall through */ | ||
31 | 32 | ||
33 | _GLOBAL(power7_nap) | ||
32 | /* NAP is a state loss, we create a regs frame on the | 34 | /* NAP is a state loss, we create a regs frame on the |
33 | * stack, fill it up with the state we care about and | 35 | * stack, fill it up with the state we care about and |
34 | * stick a pointer to it in PACAR1. We really only | 36 | * stick a pointer to it in PACAR1. We really only |
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c index 710f400476de..1a1f2ddfb581 100644 --- a/arch/powerpc/kernel/process.c +++ b/arch/powerpc/kernel/process.c | |||
@@ -802,16 +802,8 @@ int copy_thread(unsigned long clone_flags, unsigned long usp, | |||
802 | #endif /* CONFIG_PPC_STD_MMU_64 */ | 802 | #endif /* CONFIG_PPC_STD_MMU_64 */ |
803 | #ifdef CONFIG_PPC64 | 803 | #ifdef CONFIG_PPC64 |
804 | if (cpu_has_feature(CPU_FTR_DSCR)) { | 804 | if (cpu_has_feature(CPU_FTR_DSCR)) { |
805 | if (current->thread.dscr_inherit) { | 805 | p->thread.dscr_inherit = current->thread.dscr_inherit; |
806 | p->thread.dscr_inherit = 1; | 806 | p->thread.dscr = current->thread.dscr; |
807 | p->thread.dscr = current->thread.dscr; | ||
808 | } else if (0 != dscr_default) { | ||
809 | p->thread.dscr_inherit = 1; | ||
810 | p->thread.dscr = dscr_default; | ||
811 | } else { | ||
812 | p->thread.dscr_inherit = 0; | ||
813 | p->thread.dscr = 0; | ||
814 | } | ||
815 | } | 807 | } |
816 | #endif | 808 | #endif |
817 | 809 | ||
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c index 0321007086f7..8d4214afc21d 100644 --- a/arch/powerpc/kernel/smp.c +++ b/arch/powerpc/kernel/smp.c | |||
@@ -198,8 +198,15 @@ void smp_muxed_ipi_message_pass(int cpu, int msg) | |||
198 | struct cpu_messages *info = &per_cpu(ipi_message, cpu); | 198 | struct cpu_messages *info = &per_cpu(ipi_message, cpu); |
199 | char *message = (char *)&info->messages; | 199 | char *message = (char *)&info->messages; |
200 | 200 | ||
201 | /* | ||
202 | * Order previous accesses before accesses in the IPI handler. | ||
203 | */ | ||
204 | smp_mb(); | ||
201 | message[msg] = 1; | 205 | message[msg] = 1; |
202 | mb(); | 206 | /* |
207 | * cause_ipi functions are required to include a full barrier | ||
208 | * before doing whatever causes the IPI. | ||
209 | */ | ||
203 | smp_ops->cause_ipi(cpu, info->data); | 210 | smp_ops->cause_ipi(cpu, info->data); |
204 | } | 211 | } |
205 | 212 | ||
@@ -211,7 +218,7 @@ irqreturn_t smp_ipi_demux(void) | |||
211 | mb(); /* order any irq clear */ | 218 | mb(); /* order any irq clear */ |
212 | 219 | ||
213 | do { | 220 | do { |
214 | all = xchg_local(&info->messages, 0); | 221 | all = xchg(&info->messages, 0); |
215 | 222 | ||
216 | #ifdef __BIG_ENDIAN | 223 | #ifdef __BIG_ENDIAN |
217 | if (all & (1 << (24 - 8 * PPC_MSG_CALL_FUNCTION))) | 224 | if (all & (1 << (24 - 8 * PPC_MSG_CALL_FUNCTION))) |
diff --git a/arch/powerpc/kernel/sysfs.c b/arch/powerpc/kernel/sysfs.c index 3529446c2abd..8302af649219 100644 --- a/arch/powerpc/kernel/sysfs.c +++ b/arch/powerpc/kernel/sysfs.c | |||
@@ -194,6 +194,14 @@ static ssize_t show_dscr_default(struct device *dev, | |||
194 | return sprintf(buf, "%lx\n", dscr_default); | 194 | return sprintf(buf, "%lx\n", dscr_default); |
195 | } | 195 | } |
196 | 196 | ||
197 | static void update_dscr(void *dummy) | ||
198 | { | ||
199 | if (!current->thread.dscr_inherit) { | ||
200 | current->thread.dscr = dscr_default; | ||
201 | mtspr(SPRN_DSCR, dscr_default); | ||
202 | } | ||
203 | } | ||
204 | |||
197 | static ssize_t __used store_dscr_default(struct device *dev, | 205 | static ssize_t __used store_dscr_default(struct device *dev, |
198 | struct device_attribute *attr, const char *buf, | 206 | struct device_attribute *attr, const char *buf, |
199 | size_t count) | 207 | size_t count) |
@@ -206,6 +214,8 @@ static ssize_t __used store_dscr_default(struct device *dev, | |||
206 | return -EINVAL; | 214 | return -EINVAL; |
207 | dscr_default = val; | 215 | dscr_default = val; |
208 | 216 | ||
217 | on_each_cpu(update_dscr, NULL, 1); | ||
218 | |||
209 | return count; | 219 | return count; |
210 | } | 220 | } |
211 | 221 | ||
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c index be171ee73bf8..e49e93191b69 100644 --- a/arch/powerpc/kernel/time.c +++ b/arch/powerpc/kernel/time.c | |||
@@ -535,6 +535,15 @@ void timer_interrupt(struct pt_regs * regs) | |||
535 | trace_timer_interrupt_exit(regs); | 535 | trace_timer_interrupt_exit(regs); |
536 | } | 536 | } |
537 | 537 | ||
538 | /* | ||
539 | * Hypervisor decrementer interrupts shouldn't occur but are sometimes | ||
540 | * left pending on exit from a KVM guest. We don't need to do anything | ||
541 | * to clear them, as they are edge-triggered. | ||
542 | */ | ||
543 | void hdec_interrupt(struct pt_regs *regs) | ||
544 | { | ||
545 | } | ||
546 | |||
538 | #ifdef CONFIG_SUSPEND | 547 | #ifdef CONFIG_SUSPEND |
539 | static void generic_suspend_disable_irqs(void) | 548 | static void generic_suspend_disable_irqs(void) |
540 | { | 549 | { |
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c index 158972341a2d..ae0843fa7a61 100644 --- a/arch/powerpc/kernel/traps.c +++ b/arch/powerpc/kernel/traps.c | |||
@@ -972,8 +972,9 @@ static int emulate_instruction(struct pt_regs *regs) | |||
972 | cpu_has_feature(CPU_FTR_DSCR)) { | 972 | cpu_has_feature(CPU_FTR_DSCR)) { |
973 | PPC_WARN_EMULATED(mtdscr, regs); | 973 | PPC_WARN_EMULATED(mtdscr, regs); |
974 | rd = (instword >> 21) & 0x1f; | 974 | rd = (instword >> 21) & 0x1f; |
975 | mtspr(SPRN_DSCR, regs->gpr[rd]); | 975 | current->thread.dscr = regs->gpr[rd]; |
976 | current->thread.dscr_inherit = 1; | 976 | current->thread.dscr_inherit = 1; |
977 | mtspr(SPRN_DSCR, current->thread.dscr); | ||
977 | return 0; | 978 | return 0; |
978 | } | 979 | } |
979 | #endif | 980 | #endif |
diff --git a/arch/powerpc/lib/code-patching.c b/arch/powerpc/lib/code-patching.c index dd223b3eb333..17e5b2364312 100644 --- a/arch/powerpc/lib/code-patching.c +++ b/arch/powerpc/lib/code-patching.c | |||
@@ -20,7 +20,7 @@ int patch_instruction(unsigned int *addr, unsigned int instr) | |||
20 | { | 20 | { |
21 | int err; | 21 | int err; |
22 | 22 | ||
23 | err = __put_user(instr, addr); | 23 | __put_user_size(instr, addr, 4, err); |
24 | if (err) | 24 | if (err) |
25 | return err; | 25 | return err; |
26 | asm ("dcbst 0, %0; sync; icbi 0,%0; sync; isync" : : "r" (addr)); | 26 | asm ("dcbst 0, %0; sync; icbi 0,%0; sync; isync" : : "r" (addr)); |
diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c index 39b159751c35..59213cfaeca9 100644 --- a/arch/powerpc/mm/numa.c +++ b/arch/powerpc/mm/numa.c | |||
@@ -1436,11 +1436,11 @@ static long vphn_get_associativity(unsigned long cpu, | |||
1436 | 1436 | ||
1437 | /* | 1437 | /* |
1438 | * Update the node maps and sysfs entries for each cpu whose home node | 1438 | * Update the node maps and sysfs entries for each cpu whose home node |
1439 | * has changed. | 1439 | * has changed. Returns 1 when the topology has changed, and 0 otherwise. |
1440 | */ | 1440 | */ |
1441 | int arch_update_cpu_topology(void) | 1441 | int arch_update_cpu_topology(void) |
1442 | { | 1442 | { |
1443 | int cpu, nid, old_nid; | 1443 | int cpu, nid, old_nid, changed = 0; |
1444 | unsigned int associativity[VPHN_ASSOC_BUFSIZE] = {0}; | 1444 | unsigned int associativity[VPHN_ASSOC_BUFSIZE] = {0}; |
1445 | struct device *dev; | 1445 | struct device *dev; |
1446 | 1446 | ||
@@ -1466,9 +1466,10 @@ int arch_update_cpu_topology(void) | |||
1466 | dev = get_cpu_device(cpu); | 1466 | dev = get_cpu_device(cpu); |
1467 | if (dev) | 1467 | if (dev) |
1468 | kobject_uevent(&dev->kobj, KOBJ_CHANGE); | 1468 | kobject_uevent(&dev->kobj, KOBJ_CHANGE); |
1469 | changed = 1; | ||
1469 | } | 1470 | } |
1470 | 1471 | ||
1471 | return 1; | 1472 | return changed; |
1472 | } | 1473 | } |
1473 | 1474 | ||
1474 | static void topology_work_fn(struct work_struct *work) | 1475 | static void topology_work_fn(struct work_struct *work) |
diff --git a/arch/powerpc/platforms/powernv/smp.c b/arch/powerpc/platforms/powernv/smp.c index 3ef46254c35b..7698b6e13c57 100644 --- a/arch/powerpc/platforms/powernv/smp.c +++ b/arch/powerpc/platforms/powernv/smp.c | |||
@@ -106,14 +106,6 @@ static void pnv_smp_cpu_kill_self(void) | |||
106 | { | 106 | { |
107 | unsigned int cpu; | 107 | unsigned int cpu; |
108 | 108 | ||
109 | /* If powersave_nap is enabled, use NAP mode, else just | ||
110 | * spin aimlessly | ||
111 | */ | ||
112 | if (!powersave_nap) { | ||
113 | generic_mach_cpu_die(); | ||
114 | return; | ||
115 | } | ||
116 | |||
117 | /* Standard hot unplug procedure */ | 109 | /* Standard hot unplug procedure */ |
118 | local_irq_disable(); | 110 | local_irq_disable(); |
119 | idle_task_exit(); | 111 | idle_task_exit(); |
@@ -128,7 +120,7 @@ static void pnv_smp_cpu_kill_self(void) | |||
128 | */ | 120 | */ |
129 | mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) & ~(u64)LPCR_PECE1); | 121 | mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) & ~(u64)LPCR_PECE1); |
130 | while (!generic_check_cpu_restart(cpu)) { | 122 | while (!generic_check_cpu_restart(cpu)) { |
131 | power7_idle(); | 123 | power7_nap(); |
132 | if (!generic_check_cpu_restart(cpu)) { | 124 | if (!generic_check_cpu_restart(cpu)) { |
133 | DBG("CPU%d Unexpected exit while offline !\n", cpu); | 125 | DBG("CPU%d Unexpected exit while offline !\n", cpu); |
134 | /* We may be getting an IPI, so we re-enable | 126 | /* We may be getting an IPI, so we re-enable |
diff --git a/arch/powerpc/sysdev/xics/icp-hv.c b/arch/powerpc/sysdev/xics/icp-hv.c index 14469cf9df68..df0fc5821469 100644 --- a/arch/powerpc/sysdev/xics/icp-hv.c +++ b/arch/powerpc/sysdev/xics/icp-hv.c | |||
@@ -65,7 +65,11 @@ static inline void icp_hv_set_xirr(unsigned int value) | |||
65 | static inline void icp_hv_set_qirr(int n_cpu , u8 value) | 65 | static inline void icp_hv_set_qirr(int n_cpu , u8 value) |
66 | { | 66 | { |
67 | int hw_cpu = get_hard_smp_processor_id(n_cpu); | 67 | int hw_cpu = get_hard_smp_processor_id(n_cpu); |
68 | long rc = plpar_hcall_norets(H_IPI, hw_cpu, value); | 68 | long rc; |
69 | |||
70 | /* Make sure all previous accesses are ordered before IPI sending */ | ||
71 | mb(); | ||
72 | rc = plpar_hcall_norets(H_IPI, hw_cpu, value); | ||
69 | if (rc != H_SUCCESS) { | 73 | if (rc != H_SUCCESS) { |
70 | pr_err("%s: bad return code qirr cpu=%d hw_cpu=%d mfrr=0x%x " | 74 | pr_err("%s: bad return code qirr cpu=%d hw_cpu=%d mfrr=0x%x " |
71 | "returned %ld\n", __func__, n_cpu, hw_cpu, value, rc); | 75 | "returned %ld\n", __func__, n_cpu, hw_cpu, value, rc); |
diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h index 32e8449640fa..9b94a160fe7f 100644 --- a/arch/s390/include/asm/elf.h +++ b/arch/s390/include/asm/elf.h | |||
@@ -180,7 +180,8 @@ extern char elf_platform[]; | |||
180 | #define ELF_PLATFORM (elf_platform) | 180 | #define ELF_PLATFORM (elf_platform) |
181 | 181 | ||
182 | #ifndef CONFIG_64BIT | 182 | #ifndef CONFIG_64BIT |
183 | #define SET_PERSONALITY(ex) set_personality(PER_LINUX) | 183 | #define SET_PERSONALITY(ex) \ |
184 | set_personality(PER_LINUX | (current->personality & (~PER_MASK))) | ||
184 | #else /* CONFIG_64BIT */ | 185 | #else /* CONFIG_64BIT */ |
185 | #define SET_PERSONALITY(ex) \ | 186 | #define SET_PERSONALITY(ex) \ |
186 | do { \ | 187 | do { \ |
diff --git a/arch/s390/include/asm/posix_types.h b/arch/s390/include/asm/posix_types.h index 7bcc14e395f0..bf2a2ad2f800 100644 --- a/arch/s390/include/asm/posix_types.h +++ b/arch/s390/include/asm/posix_types.h | |||
@@ -13,6 +13,7 @@ | |||
13 | */ | 13 | */ |
14 | 14 | ||
15 | typedef unsigned long __kernel_size_t; | 15 | typedef unsigned long __kernel_size_t; |
16 | typedef long __kernel_ssize_t; | ||
16 | #define __kernel_size_t __kernel_size_t | 17 | #define __kernel_size_t __kernel_size_t |
17 | 18 | ||
18 | typedef unsigned short __kernel_old_dev_t; | 19 | typedef unsigned short __kernel_old_dev_t; |
@@ -25,7 +26,6 @@ typedef unsigned short __kernel_mode_t; | |||
25 | typedef unsigned short __kernel_ipc_pid_t; | 26 | typedef unsigned short __kernel_ipc_pid_t; |
26 | typedef unsigned short __kernel_uid_t; | 27 | typedef unsigned short __kernel_uid_t; |
27 | typedef unsigned short __kernel_gid_t; | 28 | typedef unsigned short __kernel_gid_t; |
28 | typedef int __kernel_ssize_t; | ||
29 | typedef int __kernel_ptrdiff_t; | 29 | typedef int __kernel_ptrdiff_t; |
30 | 30 | ||
31 | #else /* __s390x__ */ | 31 | #else /* __s390x__ */ |
@@ -35,7 +35,6 @@ typedef unsigned int __kernel_mode_t; | |||
35 | typedef int __kernel_ipc_pid_t; | 35 | typedef int __kernel_ipc_pid_t; |
36 | typedef unsigned int __kernel_uid_t; | 36 | typedef unsigned int __kernel_uid_t; |
37 | typedef unsigned int __kernel_gid_t; | 37 | typedef unsigned int __kernel_gid_t; |
38 | typedef long __kernel_ssize_t; | ||
39 | typedef long __kernel_ptrdiff_t; | 38 | typedef long __kernel_ptrdiff_t; |
40 | typedef unsigned long __kernel_sigset_t; /* at least 32 bits */ | 39 | typedef unsigned long __kernel_sigset_t; /* at least 32 bits */ |
41 | 40 | ||
diff --git a/arch/s390/include/asm/smp.h b/arch/s390/include/asm/smp.h index a0a8340daafa..ce26ac3cb162 100644 --- a/arch/s390/include/asm/smp.h +++ b/arch/s390/include/asm/smp.h | |||
@@ -44,6 +44,7 @@ static inline void smp_call_online_cpu(void (*func)(void *), void *data) | |||
44 | } | 44 | } |
45 | 45 | ||
46 | static inline int smp_find_processor_id(int address) { return 0; } | 46 | static inline int smp_find_processor_id(int address) { return 0; } |
47 | static inline int smp_store_status(int cpu) { return 0; } | ||
47 | static inline int smp_vcpu_scheduled(int cpu) { return 1; } | 48 | static inline int smp_vcpu_scheduled(int cpu) { return 1; } |
48 | static inline void smp_yield_cpu(int cpu) { } | 49 | static inline void smp_yield_cpu(int cpu) { } |
49 | static inline void smp_yield(void) { } | 50 | static inline void smp_yield(void) { } |
diff --git a/arch/um/os-Linux/time.c b/arch/um/os-Linux/time.c index f60238559af3..0748fe0c8a73 100644 --- a/arch/um/os-Linux/time.c +++ b/arch/um/os-Linux/time.c | |||
@@ -114,7 +114,7 @@ static void deliver_alarm(void) | |||
114 | skew += this_tick - last_tick; | 114 | skew += this_tick - last_tick; |
115 | 115 | ||
116 | while (skew >= one_tick) { | 116 | while (skew >= one_tick) { |
117 | alarm_handler(SIGVTALRM, NULL); | 117 | alarm_handler(SIGVTALRM, NULL, NULL); |
118 | skew -= one_tick; | 118 | skew -= one_tick; |
119 | } | 119 | } |
120 | 120 | ||
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index dce75b760312..148ed666e311 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c | |||
@@ -2000,6 +2000,9 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata) | |||
2000 | case MSR_KVM_STEAL_TIME: | 2000 | case MSR_KVM_STEAL_TIME: |
2001 | data = vcpu->arch.st.msr_val; | 2001 | data = vcpu->arch.st.msr_val; |
2002 | break; | 2002 | break; |
2003 | case MSR_KVM_PV_EOI_EN: | ||
2004 | data = vcpu->arch.pv_eoi.msr_val; | ||
2005 | break; | ||
2003 | case MSR_IA32_P5_MC_ADDR: | 2006 | case MSR_IA32_P5_MC_ADDR: |
2004 | case MSR_IA32_P5_MC_TYPE: | 2007 | case MSR_IA32_P5_MC_TYPE: |
2005 | case MSR_IA32_MCG_CAP: | 2008 | case MSR_IA32_MCG_CAP: |
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c index b65a76133f4f..5141d808e751 100644 --- a/arch/x86/xen/mmu.c +++ b/arch/x86/xen/mmu.c | |||
@@ -1283,7 +1283,7 @@ static void xen_flush_tlb_others(const struct cpumask *cpus, | |||
1283 | cpumask_clear_cpu(smp_processor_id(), to_cpumask(args->mask)); | 1283 | cpumask_clear_cpu(smp_processor_id(), to_cpumask(args->mask)); |
1284 | 1284 | ||
1285 | args->op.cmd = MMUEXT_TLB_FLUSH_MULTI; | 1285 | args->op.cmd = MMUEXT_TLB_FLUSH_MULTI; |
1286 | if (start != TLB_FLUSH_ALL && (end - start) <= PAGE_SIZE) { | 1286 | if (end != TLB_FLUSH_ALL && (end - start) <= PAGE_SIZE) { |
1287 | args->op.cmd = MMUEXT_INVLPG_MULTI; | 1287 | args->op.cmd = MMUEXT_INVLPG_MULTI; |
1288 | args->op.arg1.linear_addr = start; | 1288 | args->op.arg1.linear_addr = start; |
1289 | } | 1289 | } |
diff --git a/arch/x86/xen/p2m.c b/arch/x86/xen/p2m.c index d4b255463253..76ba0e97e530 100644 --- a/arch/x86/xen/p2m.c +++ b/arch/x86/xen/p2m.c | |||
@@ -599,7 +599,7 @@ bool __init early_can_reuse_p2m_middle(unsigned long set_pfn, unsigned long set_ | |||
599 | if (p2m_index(set_pfn)) | 599 | if (p2m_index(set_pfn)) |
600 | return false; | 600 | return false; |
601 | 601 | ||
602 | for (pfn = 0; pfn <= MAX_DOMAIN_PAGES; pfn += P2M_PER_PAGE) { | 602 | for (pfn = 0; pfn < MAX_DOMAIN_PAGES; pfn += P2M_PER_PAGE) { |
603 | topidx = p2m_top_index(pfn); | 603 | topidx = p2m_top_index(pfn); |
604 | 604 | ||
605 | if (!p2m_top[topidx]) | 605 | if (!p2m_top[topidx]) |
diff --git a/drivers/base/dma-contiguous.c b/drivers/base/dma-contiguous.c index 78efb0306a44..34d94c762a1e 100644 --- a/drivers/base/dma-contiguous.c +++ b/drivers/base/dma-contiguous.c | |||
@@ -250,7 +250,7 @@ int __init dma_declare_contiguous(struct device *dev, unsigned long size, | |||
250 | return -EINVAL; | 250 | return -EINVAL; |
251 | 251 | ||
252 | /* Sanitise input arguments */ | 252 | /* Sanitise input arguments */ |
253 | alignment = PAGE_SIZE << max(MAX_ORDER, pageblock_order); | 253 | alignment = PAGE_SIZE << max(MAX_ORDER - 1, pageblock_order); |
254 | base = ALIGN(base, alignment); | 254 | base = ALIGN(base, alignment); |
255 | size = ALIGN(size, alignment); | 255 | size = ALIGN(size, alignment); |
256 | limit &= ~(alignment - 1); | 256 | limit &= ~(alignment - 1); |
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig index b16c8a72a2e2..ba7926f5c099 100644 --- a/drivers/gpio/Kconfig +++ b/drivers/gpio/Kconfig | |||
@@ -294,7 +294,7 @@ config GPIO_MAX732X_IRQ | |||
294 | 294 | ||
295 | config GPIO_MC9S08DZ60 | 295 | config GPIO_MC9S08DZ60 |
296 | bool "MX35 3DS BOARD MC9S08DZ60 GPIO functions" | 296 | bool "MX35 3DS BOARD MC9S08DZ60 GPIO functions" |
297 | depends on I2C && MACH_MX35_3DS | 297 | depends on I2C=y && MACH_MX35_3DS |
298 | help | 298 | help |
299 | Select this to enable the MC9S08DZ60 GPIO driver | 299 | Select this to enable the MC9S08DZ60 GPIO driver |
300 | 300 | ||
diff --git a/drivers/gpio/gpio-em.c b/drivers/gpio/gpio-em.c index ae37181798b3..ec48ed512628 100644 --- a/drivers/gpio/gpio-em.c +++ b/drivers/gpio/gpio-em.c | |||
@@ -247,9 +247,9 @@ static int __devinit em_gio_irq_domain_init(struct em_gio_priv *p) | |||
247 | 247 | ||
248 | p->irq_base = irq_alloc_descs(pdata->irq_base, 0, | 248 | p->irq_base = irq_alloc_descs(pdata->irq_base, 0, |
249 | pdata->number_of_pins, numa_node_id()); | 249 | pdata->number_of_pins, numa_node_id()); |
250 | if (IS_ERR_VALUE(p->irq_base)) { | 250 | if (p->irq_base < 0) { |
251 | dev_err(&pdev->dev, "cannot get irq_desc\n"); | 251 | dev_err(&pdev->dev, "cannot get irq_desc\n"); |
252 | return -ENXIO; | 252 | return p->irq_base; |
253 | } | 253 | } |
254 | pr_debug("gio: hw base = %d, nr = %d, sw base = %d\n", | 254 | pr_debug("gio: hw base = %d, nr = %d, sw base = %d\n", |
255 | pdata->gpio_base, pdata->number_of_pins, p->irq_base); | 255 | pdata->gpio_base, pdata->number_of_pins, p->irq_base); |
diff --git a/drivers/gpio/gpio-rdc321x.c b/drivers/gpio/gpio-rdc321x.c index e97016af6443..b62d443e9a59 100644 --- a/drivers/gpio/gpio-rdc321x.c +++ b/drivers/gpio/gpio-rdc321x.c | |||
@@ -170,6 +170,7 @@ static int __devinit rdc321x_gpio_probe(struct platform_device *pdev) | |||
170 | rdc321x_gpio_dev->reg2_data_base = r->start + 0x4; | 170 | rdc321x_gpio_dev->reg2_data_base = r->start + 0x4; |
171 | 171 | ||
172 | rdc321x_gpio_dev->chip.label = "rdc321x-gpio"; | 172 | rdc321x_gpio_dev->chip.label = "rdc321x-gpio"; |
173 | rdc321x_gpio_dev->chip.owner = THIS_MODULE; | ||
173 | rdc321x_gpio_dev->chip.direction_input = rdc_gpio_direction_input; | 174 | rdc321x_gpio_dev->chip.direction_input = rdc_gpio_direction_input; |
174 | rdc321x_gpio_dev->chip.direction_output = rdc_gpio_config; | 175 | rdc321x_gpio_dev->chip.direction_output = rdc_gpio_config; |
175 | rdc321x_gpio_dev->chip.get = rdc_gpio_get_value; | 176 | rdc321x_gpio_dev->chip.get = rdc_gpio_get_value; |
diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c index a18c4aa68b1e..f1a45997aea8 100644 --- a/drivers/gpio/gpiolib-of.c +++ b/drivers/gpio/gpiolib-of.c | |||
@@ -82,7 +82,7 @@ int of_get_named_gpio_flags(struct device_node *np, const char *propname, | |||
82 | gpiochip_find(&gg_data, of_gpiochip_find_and_xlate); | 82 | gpiochip_find(&gg_data, of_gpiochip_find_and_xlate); |
83 | 83 | ||
84 | of_node_put(gg_data.gpiospec.np); | 84 | of_node_put(gg_data.gpiospec.np); |
85 | pr_debug("%s exited with status %d\n", __func__, ret); | 85 | pr_debug("%s exited with status %d\n", __func__, gg_data.out_gpio); |
86 | return gg_data.out_gpio; | 86 | return gg_data.out_gpio; |
87 | } | 87 | } |
88 | EXPORT_SYMBOL(of_get_named_gpio_flags); | 88 | EXPORT_SYMBOL(of_get_named_gpio_flags); |
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c index 08a7aa722d6b..6fbfc244748f 100644 --- a/drivers/gpu/drm/drm_crtc.c +++ b/drivers/gpu/drm/drm_crtc.c | |||
@@ -1981,7 +1981,7 @@ int drm_mode_cursor_ioctl(struct drm_device *dev, | |||
1981 | if (!drm_core_check_feature(dev, DRIVER_MODESET)) | 1981 | if (!drm_core_check_feature(dev, DRIVER_MODESET)) |
1982 | return -EINVAL; | 1982 | return -EINVAL; |
1983 | 1983 | ||
1984 | if (!req->flags) | 1984 | if (!req->flags || (~DRM_MODE_CURSOR_FLAGS & req->flags)) |
1985 | return -EINVAL; | 1985 | return -EINVAL; |
1986 | 1986 | ||
1987 | mutex_lock(&dev->mode_config.mutex); | 1987 | mutex_lock(&dev->mode_config.mutex); |
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c index a8743c399e83..b7ee230572b7 100644 --- a/drivers/gpu/drm/drm_edid.c +++ b/drivers/gpu/drm/drm_edid.c | |||
@@ -87,6 +87,9 @@ static struct edid_quirk { | |||
87 | int product_id; | 87 | int product_id; |
88 | u32 quirks; | 88 | u32 quirks; |
89 | } edid_quirk_list[] = { | 89 | } edid_quirk_list[] = { |
90 | /* ASUS VW222S */ | ||
91 | { "ACI", 0x22a2, EDID_QUIRK_FORCE_REDUCED_BLANKING }, | ||
92 | |||
90 | /* Acer AL1706 */ | 93 | /* Acer AL1706 */ |
91 | { "ACR", 44358, EDID_QUIRK_PREFER_LARGE_60 }, | 94 | { "ACR", 44358, EDID_QUIRK_PREFER_LARGE_60 }, |
92 | /* Acer F51 */ | 95 | /* Acer F51 */ |
diff --git a/drivers/gpu/drm/gma500/psb_intel_display.c b/drivers/gpu/drm/gma500/psb_intel_display.c index 30dc22a7156c..8033526bb53b 100644 --- a/drivers/gpu/drm/gma500/psb_intel_display.c +++ b/drivers/gpu/drm/gma500/psb_intel_display.c | |||
@@ -1362,6 +1362,9 @@ void psb_intel_crtc_init(struct drm_device *dev, int pipe, | |||
1362 | (struct drm_connector **) (psb_intel_crtc + 1); | 1362 | (struct drm_connector **) (psb_intel_crtc + 1); |
1363 | psb_intel_crtc->mode_set.num_connectors = 0; | 1363 | psb_intel_crtc->mode_set.num_connectors = 0; |
1364 | psb_intel_cursor_init(dev, psb_intel_crtc); | 1364 | psb_intel_cursor_init(dev, psb_intel_crtc); |
1365 | |||
1366 | /* Set to true so that the pipe is forced off on initial config. */ | ||
1367 | psb_intel_crtc->active = true; | ||
1365 | } | 1368 | } |
1366 | 1369 | ||
1367 | int psb_intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data, | 1370 | int psb_intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data, |
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index d9a5372ec56f..60815b861ec2 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c | |||
@@ -72,7 +72,7 @@ int i915_gem_init_aliasing_ppgtt(struct drm_device *dev) | |||
72 | /* ppgtt PDEs reside in the global gtt pagetable, which has 512*1024 | 72 | /* ppgtt PDEs reside in the global gtt pagetable, which has 512*1024 |
73 | * entries. For aliasing ppgtt support we just steal them at the end for | 73 | * entries. For aliasing ppgtt support we just steal them at the end for |
74 | * now. */ | 74 | * now. */ |
75 | first_pd_entry_in_global_pt = 512*1024 - I915_PPGTT_PD_ENTRIES; | 75 | first_pd_entry_in_global_pt = dev_priv->mm.gtt->gtt_total_entries - I915_PPGTT_PD_ENTRIES; |
76 | 76 | ||
77 | ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL); | 77 | ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL); |
78 | if (!ppgtt) | 78 | if (!ppgtt) |
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index a69a3d0d3acf..2dfa6cf4886b 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c | |||
@@ -1384,7 +1384,7 @@ static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv, | |||
1384 | enum pipe pipe, int reg) | 1384 | enum pipe pipe, int reg) |
1385 | { | 1385 | { |
1386 | u32 val = I915_READ(reg); | 1386 | u32 val = I915_READ(reg); |
1387 | WARN(hdmi_pipe_enabled(dev_priv, val, pipe), | 1387 | WARN(hdmi_pipe_enabled(dev_priv, pipe, val), |
1388 | "PCH HDMI (0x%08x) enabled on transcoder %c, should be disabled\n", | 1388 | "PCH HDMI (0x%08x) enabled on transcoder %c, should be disabled\n", |
1389 | reg, pipe_name(pipe)); | 1389 | reg, pipe_name(pipe)); |
1390 | 1390 | ||
@@ -1404,13 +1404,13 @@ static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv, | |||
1404 | 1404 | ||
1405 | reg = PCH_ADPA; | 1405 | reg = PCH_ADPA; |
1406 | val = I915_READ(reg); | 1406 | val = I915_READ(reg); |
1407 | WARN(adpa_pipe_enabled(dev_priv, val, pipe), | 1407 | WARN(adpa_pipe_enabled(dev_priv, pipe, val), |
1408 | "PCH VGA enabled on transcoder %c, should be disabled\n", | 1408 | "PCH VGA enabled on transcoder %c, should be disabled\n", |
1409 | pipe_name(pipe)); | 1409 | pipe_name(pipe)); |
1410 | 1410 | ||
1411 | reg = PCH_LVDS; | 1411 | reg = PCH_LVDS; |
1412 | val = I915_READ(reg); | 1412 | val = I915_READ(reg); |
1413 | WARN(lvds_pipe_enabled(dev_priv, val, pipe), | 1413 | WARN(lvds_pipe_enabled(dev_priv, pipe, val), |
1414 | "PCH LVDS enabled on transcoder %c, should be disabled\n", | 1414 | "PCH LVDS enabled on transcoder %c, should be disabled\n", |
1415 | pipe_name(pipe)); | 1415 | pipe_name(pipe)); |
1416 | 1416 | ||
@@ -1872,7 +1872,7 @@ static void disable_pch_hdmi(struct drm_i915_private *dev_priv, | |||
1872 | enum pipe pipe, int reg) | 1872 | enum pipe pipe, int reg) |
1873 | { | 1873 | { |
1874 | u32 val = I915_READ(reg); | 1874 | u32 val = I915_READ(reg); |
1875 | if (hdmi_pipe_enabled(dev_priv, val, pipe)) { | 1875 | if (hdmi_pipe_enabled(dev_priv, pipe, val)) { |
1876 | DRM_DEBUG_KMS("Disabling pch HDMI %x on pipe %d\n", | 1876 | DRM_DEBUG_KMS("Disabling pch HDMI %x on pipe %d\n", |
1877 | reg, pipe); | 1877 | reg, pipe); |
1878 | I915_WRITE(reg, val & ~PORT_ENABLE); | 1878 | I915_WRITE(reg, val & ~PORT_ENABLE); |
@@ -1894,12 +1894,12 @@ static void intel_disable_pch_ports(struct drm_i915_private *dev_priv, | |||
1894 | 1894 | ||
1895 | reg = PCH_ADPA; | 1895 | reg = PCH_ADPA; |
1896 | val = I915_READ(reg); | 1896 | val = I915_READ(reg); |
1897 | if (adpa_pipe_enabled(dev_priv, val, pipe)) | 1897 | if (adpa_pipe_enabled(dev_priv, pipe, val)) |
1898 | I915_WRITE(reg, val & ~ADPA_DAC_ENABLE); | 1898 | I915_WRITE(reg, val & ~ADPA_DAC_ENABLE); |
1899 | 1899 | ||
1900 | reg = PCH_LVDS; | 1900 | reg = PCH_LVDS; |
1901 | val = I915_READ(reg); | 1901 | val = I915_READ(reg); |
1902 | if (lvds_pipe_enabled(dev_priv, val, pipe)) { | 1902 | if (lvds_pipe_enabled(dev_priv, pipe, val)) { |
1903 | DRM_DEBUG_KMS("disable lvds on pipe %d val 0x%08x\n", pipe, val); | 1903 | DRM_DEBUG_KMS("disable lvds on pipe %d val 0x%08x\n", pipe, val); |
1904 | I915_WRITE(reg, val & ~LVDS_PORT_EN); | 1904 | I915_WRITE(reg, val & ~LVDS_PORT_EN); |
1905 | POSTING_READ(reg); | 1905 | POSTING_READ(reg); |
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c index e05c0d3e3440..e9a6f6aaed85 100644 --- a/drivers/gpu/drm/i915/intel_lvds.c +++ b/drivers/gpu/drm/i915/intel_lvds.c | |||
@@ -780,6 +780,14 @@ static const struct dmi_system_id intel_no_lvds[] = { | |||
780 | DMI_MATCH(DMI_BOARD_NAME, "ZBOXSD-ID12/ID13"), | 780 | DMI_MATCH(DMI_BOARD_NAME, "ZBOXSD-ID12/ID13"), |
781 | }, | 781 | }, |
782 | }, | 782 | }, |
783 | { | ||
784 | .callback = intel_no_lvds_dmi_callback, | ||
785 | .ident = "Gigabyte GA-D525TUD", | ||
786 | .matches = { | ||
787 | DMI_MATCH(DMI_BOARD_VENDOR, "Gigabyte Technology Co., Ltd."), | ||
788 | DMI_MATCH(DMI_BOARD_NAME, "D525TUD"), | ||
789 | }, | ||
790 | }, | ||
783 | 791 | ||
784 | { } /* terminating entry */ | 792 | { } /* terminating entry */ |
785 | }; | 793 | }; |
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c index cc8df4de2d92..7644f31a3778 100644 --- a/drivers/gpu/drm/i915/intel_sprite.c +++ b/drivers/gpu/drm/i915/intel_sprite.c | |||
@@ -60,11 +60,11 @@ ivb_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb, | |||
60 | 60 | ||
61 | switch (fb->pixel_format) { | 61 | switch (fb->pixel_format) { |
62 | case DRM_FORMAT_XBGR8888: | 62 | case DRM_FORMAT_XBGR8888: |
63 | sprctl |= SPRITE_FORMAT_RGBX888; | 63 | sprctl |= SPRITE_FORMAT_RGBX888 | SPRITE_RGB_ORDER_RGBX; |
64 | pixel_size = 4; | 64 | pixel_size = 4; |
65 | break; | 65 | break; |
66 | case DRM_FORMAT_XRGB8888: | 66 | case DRM_FORMAT_XRGB8888: |
67 | sprctl |= SPRITE_FORMAT_RGBX888 | SPRITE_RGB_ORDER_RGBX; | 67 | sprctl |= SPRITE_FORMAT_RGBX888; |
68 | pixel_size = 4; | 68 | pixel_size = 4; |
69 | break; | 69 | break; |
70 | case DRM_FORMAT_YUYV: | 70 | case DRM_FORMAT_YUYV: |
diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c index 1866dbb49979..c61014442aa9 100644 --- a/drivers/gpu/drm/nouveau/nouveau_state.c +++ b/drivers/gpu/drm/nouveau/nouveau_state.c | |||
@@ -736,9 +736,11 @@ nouveau_card_init(struct drm_device *dev) | |||
736 | } | 736 | } |
737 | break; | 737 | break; |
738 | case NV_C0: | 738 | case NV_C0: |
739 | nvc0_copy_create(dev, 1); | 739 | if (!(nv_rd32(dev, 0x022500) & 0x00000200)) |
740 | nvc0_copy_create(dev, 1); | ||
740 | case NV_D0: | 741 | case NV_D0: |
741 | nvc0_copy_create(dev, 0); | 742 | if (!(nv_rd32(dev, 0x022500) & 0x00000100)) |
743 | nvc0_copy_create(dev, 0); | ||
742 | break; | 744 | break; |
743 | default: | 745 | default: |
744 | break; | 746 | break; |
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c index f4d4505fe831..2817101fb167 100644 --- a/drivers/gpu/drm/radeon/atombios_crtc.c +++ b/drivers/gpu/drm/radeon/atombios_crtc.c | |||
@@ -258,7 +258,6 @@ void atombios_crtc_dpms(struct drm_crtc *crtc, int mode) | |||
258 | radeon_crtc->enabled = true; | 258 | radeon_crtc->enabled = true; |
259 | /* adjust pm to dpms changes BEFORE enabling crtcs */ | 259 | /* adjust pm to dpms changes BEFORE enabling crtcs */ |
260 | radeon_pm_compute_clocks(rdev); | 260 | radeon_pm_compute_clocks(rdev); |
261 | /* disable crtc pair power gating before programming */ | ||
262 | if (ASIC_IS_DCE6(rdev) && !radeon_crtc->in_mode_set) | 261 | if (ASIC_IS_DCE6(rdev) && !radeon_crtc->in_mode_set) |
263 | atombios_powergate_crtc(crtc, ATOM_DISABLE); | 262 | atombios_powergate_crtc(crtc, ATOM_DISABLE); |
264 | atombios_enable_crtc(crtc, ATOM_ENABLE); | 263 | atombios_enable_crtc(crtc, ATOM_ENABLE); |
@@ -278,25 +277,8 @@ void atombios_crtc_dpms(struct drm_crtc *crtc, int mode) | |||
278 | atombios_enable_crtc_memreq(crtc, ATOM_DISABLE); | 277 | atombios_enable_crtc_memreq(crtc, ATOM_DISABLE); |
279 | atombios_enable_crtc(crtc, ATOM_DISABLE); | 278 | atombios_enable_crtc(crtc, ATOM_DISABLE); |
280 | radeon_crtc->enabled = false; | 279 | radeon_crtc->enabled = false; |
281 | /* power gating is per-pair */ | 280 | if (ASIC_IS_DCE6(rdev) && !radeon_crtc->in_mode_set) |
282 | if (ASIC_IS_DCE6(rdev) && !radeon_crtc->in_mode_set) { | 281 | atombios_powergate_crtc(crtc, ATOM_ENABLE); |
283 | struct drm_crtc *other_crtc; | ||
284 | struct radeon_crtc *other_radeon_crtc; | ||
285 | list_for_each_entry(other_crtc, &rdev->ddev->mode_config.crtc_list, head) { | ||
286 | other_radeon_crtc = to_radeon_crtc(other_crtc); | ||
287 | if (((radeon_crtc->crtc_id == 0) && (other_radeon_crtc->crtc_id == 1)) || | ||
288 | ((radeon_crtc->crtc_id == 1) && (other_radeon_crtc->crtc_id == 0)) || | ||
289 | ((radeon_crtc->crtc_id == 2) && (other_radeon_crtc->crtc_id == 3)) || | ||
290 | ((radeon_crtc->crtc_id == 3) && (other_radeon_crtc->crtc_id == 2)) || | ||
291 | ((radeon_crtc->crtc_id == 4) && (other_radeon_crtc->crtc_id == 5)) || | ||
292 | ((radeon_crtc->crtc_id == 5) && (other_radeon_crtc->crtc_id == 4))) { | ||
293 | /* if both crtcs in the pair are off, enable power gating */ | ||
294 | if (other_radeon_crtc->enabled == false) | ||
295 | atombios_powergate_crtc(crtc, ATOM_ENABLE); | ||
296 | break; | ||
297 | } | ||
298 | } | ||
299 | } | ||
300 | /* adjust pm to dpms changes AFTER disabling crtcs */ | 282 | /* adjust pm to dpms changes AFTER disabling crtcs */ |
301 | radeon_pm_compute_clocks(rdev); | 283 | radeon_pm_compute_clocks(rdev); |
302 | break; | 284 | break; |
@@ -1682,9 +1664,22 @@ static void atombios_crtc_disable(struct drm_crtc *crtc) | |||
1682 | struct drm_device *dev = crtc->dev; | 1664 | struct drm_device *dev = crtc->dev; |
1683 | struct radeon_device *rdev = dev->dev_private; | 1665 | struct radeon_device *rdev = dev->dev_private; |
1684 | struct radeon_atom_ss ss; | 1666 | struct radeon_atom_ss ss; |
1667 | int i; | ||
1685 | 1668 | ||
1686 | atombios_crtc_dpms(crtc, DRM_MODE_DPMS_OFF); | 1669 | atombios_crtc_dpms(crtc, DRM_MODE_DPMS_OFF); |
1687 | 1670 | ||
1671 | for (i = 0; i < rdev->num_crtc; i++) { | ||
1672 | if (rdev->mode_info.crtcs[i] && | ||
1673 | rdev->mode_info.crtcs[i]->enabled && | ||
1674 | i != radeon_crtc->crtc_id && | ||
1675 | radeon_crtc->pll_id == rdev->mode_info.crtcs[i]->pll_id) { | ||
1676 | /* one other crtc is using this pll don't turn | ||
1677 | * off the pll | ||
1678 | */ | ||
1679 | goto done; | ||
1680 | } | ||
1681 | } | ||
1682 | |||
1688 | switch (radeon_crtc->pll_id) { | 1683 | switch (radeon_crtc->pll_id) { |
1689 | case ATOM_PPLL1: | 1684 | case ATOM_PPLL1: |
1690 | case ATOM_PPLL2: | 1685 | case ATOM_PPLL2: |
@@ -1701,6 +1696,7 @@ static void atombios_crtc_disable(struct drm_crtc *crtc) | |||
1701 | default: | 1696 | default: |
1702 | break; | 1697 | break; |
1703 | } | 1698 | } |
1699 | done: | ||
1704 | radeon_crtc->pll_id = -1; | 1700 | radeon_crtc->pll_id = -1; |
1705 | } | 1701 | } |
1706 | 1702 | ||
diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c index 7712cf5ab33b..3623b98ed3fe 100644 --- a/drivers/gpu/drm/radeon/atombios_dp.c +++ b/drivers/gpu/drm/radeon/atombios_dp.c | |||
@@ -577,30 +577,25 @@ int radeon_dp_get_panel_mode(struct drm_encoder *encoder, | |||
577 | struct radeon_device *rdev = dev->dev_private; | 577 | struct radeon_device *rdev = dev->dev_private; |
578 | struct radeon_connector *radeon_connector = to_radeon_connector(connector); | 578 | struct radeon_connector *radeon_connector = to_radeon_connector(connector); |
579 | int panel_mode = DP_PANEL_MODE_EXTERNAL_DP_MODE; | 579 | int panel_mode = DP_PANEL_MODE_EXTERNAL_DP_MODE; |
580 | u16 dp_bridge = radeon_connector_encoder_get_dp_bridge_encoder_id(connector); | ||
581 | u8 tmp; | ||
580 | 582 | ||
581 | if (!ASIC_IS_DCE4(rdev)) | 583 | if (!ASIC_IS_DCE4(rdev)) |
582 | return panel_mode; | 584 | return panel_mode; |
583 | 585 | ||
584 | if (radeon_connector_encoder_get_dp_bridge_encoder_id(connector) == | 586 | if (dp_bridge != ENCODER_OBJECT_ID_NONE) { |
585 | ENCODER_OBJECT_ID_NUTMEG) | 587 | /* DP bridge chips */ |
586 | panel_mode = DP_PANEL_MODE_INTERNAL_DP1_MODE; | 588 | tmp = radeon_read_dpcd_reg(radeon_connector, DP_EDP_CONFIGURATION_CAP); |
587 | else if (radeon_connector_encoder_get_dp_bridge_encoder_id(connector) == | 589 | if (tmp & 1) |
588 | ENCODER_OBJECT_ID_TRAVIS) { | 590 | panel_mode = DP_PANEL_MODE_INTERNAL_DP2_MODE; |
589 | u8 id[6]; | 591 | else if ((dp_bridge == ENCODER_OBJECT_ID_NUTMEG) || |
590 | int i; | 592 | (dp_bridge == ENCODER_OBJECT_ID_TRAVIS)) |
591 | for (i = 0; i < 6; i++) | ||
592 | id[i] = radeon_read_dpcd_reg(radeon_connector, 0x503 + i); | ||
593 | if (id[0] == 0x73 && | ||
594 | id[1] == 0x69 && | ||
595 | id[2] == 0x76 && | ||
596 | id[3] == 0x61 && | ||
597 | id[4] == 0x72 && | ||
598 | id[5] == 0x54) | ||
599 | panel_mode = DP_PANEL_MODE_INTERNAL_DP1_MODE; | 593 | panel_mode = DP_PANEL_MODE_INTERNAL_DP1_MODE; |
600 | else | 594 | else |
601 | panel_mode = DP_PANEL_MODE_INTERNAL_DP2_MODE; | 595 | panel_mode = DP_PANEL_MODE_EXTERNAL_DP_MODE; |
602 | } else if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) { | 596 | } else if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) { |
603 | u8 tmp = radeon_read_dpcd_reg(radeon_connector, DP_EDP_CONFIGURATION_CAP); | 597 | /* eDP */ |
598 | tmp = radeon_read_dpcd_reg(radeon_connector, DP_EDP_CONFIGURATION_CAP); | ||
604 | if (tmp & 1) | 599 | if (tmp & 1) |
605 | panel_mode = DP_PANEL_MODE_INTERNAL_DP2_MODE; | 600 | panel_mode = DP_PANEL_MODE_INTERNAL_DP2_MODE; |
606 | } | 601 | } |
diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c index f9bc27fe269a..6e8803a1170c 100644 --- a/drivers/gpu/drm/radeon/atombios_encoders.c +++ b/drivers/gpu/drm/radeon/atombios_encoders.c | |||
@@ -1379,6 +1379,8 @@ radeon_atom_encoder_dpms_dig(struct drm_encoder *encoder, int mode) | |||
1379 | struct drm_device *dev = encoder->dev; | 1379 | struct drm_device *dev = encoder->dev; |
1380 | struct radeon_device *rdev = dev->dev_private; | 1380 | struct radeon_device *rdev = dev->dev_private; |
1381 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); | 1381 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); |
1382 | struct drm_encoder *ext_encoder = radeon_get_external_encoder(encoder); | ||
1383 | struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; | ||
1382 | struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); | 1384 | struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); |
1383 | struct radeon_connector *radeon_connector = NULL; | 1385 | struct radeon_connector *radeon_connector = NULL; |
1384 | struct radeon_connector_atom_dig *radeon_dig_connector = NULL; | 1386 | struct radeon_connector_atom_dig *radeon_dig_connector = NULL; |
@@ -1390,19 +1392,37 @@ radeon_atom_encoder_dpms_dig(struct drm_encoder *encoder, int mode) | |||
1390 | 1392 | ||
1391 | switch (mode) { | 1393 | switch (mode) { |
1392 | case DRM_MODE_DPMS_ON: | 1394 | case DRM_MODE_DPMS_ON: |
1393 | /* some early dce3.2 boards have a bug in their transmitter control table */ | 1395 | if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE5(rdev)) { |
1394 | if ((rdev->family == CHIP_RV710) || (rdev->family == CHIP_RV730) || | 1396 | if (!connector) |
1395 | ASIC_IS_DCE41(rdev) || ASIC_IS_DCE5(rdev)) { | 1397 | dig->panel_mode = DP_PANEL_MODE_EXTERNAL_DP_MODE; |
1396 | if (ASIC_IS_DCE6(rdev)) { | 1398 | else |
1397 | /* It seems we need to call ATOM_ENCODER_CMD_SETUP again | 1399 | dig->panel_mode = radeon_dp_get_panel_mode(encoder, connector); |
1398 | * before reenabling encoder on DPMS ON, otherwise we never | 1400 | |
1399 | * get picture | 1401 | /* setup and enable the encoder */ |
1400 | */ | 1402 | atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_SETUP, 0); |
1401 | atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_SETUP, 0); | 1403 | atombios_dig_encoder_setup(encoder, |
1404 | ATOM_ENCODER_CMD_SETUP_PANEL_MODE, | ||
1405 | dig->panel_mode); | ||
1406 | if (ext_encoder) { | ||
1407 | if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE61(rdev)) | ||
1408 | atombios_external_encoder_setup(encoder, ext_encoder, | ||
1409 | EXTERNAL_ENCODER_ACTION_V3_ENCODER_SETUP); | ||
1402 | } | 1410 | } |
1403 | atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0); | 1411 | atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0); |
1404 | } else { | 1412 | } else if (ASIC_IS_DCE4(rdev)) { |
1413 | /* setup and enable the encoder */ | ||
1414 | atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_SETUP, 0); | ||
1415 | /* enable the transmitter */ | ||
1416 | atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0); | ||
1405 | atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE_OUTPUT, 0, 0); | 1417 | atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE_OUTPUT, 0, 0); |
1418 | } else { | ||
1419 | /* setup and enable the encoder and transmitter */ | ||
1420 | atombios_dig_encoder_setup(encoder, ATOM_ENABLE, 0); | ||
1421 | atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_SETUP, 0, 0); | ||
1422 | atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0); | ||
1423 | /* some early dce3.2 boards have a bug in their transmitter control table */ | ||
1424 | if ((rdev->family != CHIP_RV710) || (rdev->family != CHIP_RV730)) | ||
1425 | atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE_OUTPUT, 0, 0); | ||
1406 | } | 1426 | } |
1407 | if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(encoder)) && connector) { | 1427 | if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(encoder)) && connector) { |
1408 | if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) { | 1428 | if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) { |
@@ -1420,10 +1440,19 @@ radeon_atom_encoder_dpms_dig(struct drm_encoder *encoder, int mode) | |||
1420 | case DRM_MODE_DPMS_STANDBY: | 1440 | case DRM_MODE_DPMS_STANDBY: |
1421 | case DRM_MODE_DPMS_SUSPEND: | 1441 | case DRM_MODE_DPMS_SUSPEND: |
1422 | case DRM_MODE_DPMS_OFF: | 1442 | case DRM_MODE_DPMS_OFF: |
1423 | if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE5(rdev)) | 1443 | if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE5(rdev)) { |
1444 | /* disable the transmitter */ | ||
1424 | atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0); | 1445 | atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0); |
1425 | else | 1446 | } else if (ASIC_IS_DCE4(rdev)) { |
1447 | /* disable the transmitter */ | ||
1448 | atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE_OUTPUT, 0, 0); | ||
1449 | atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0); | ||
1450 | } else { | ||
1451 | /* disable the encoder and transmitter */ | ||
1426 | atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE_OUTPUT, 0, 0); | 1452 | atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE_OUTPUT, 0, 0); |
1453 | atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0); | ||
1454 | atombios_dig_encoder_setup(encoder, ATOM_DISABLE, 0); | ||
1455 | } | ||
1427 | if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(encoder)) && connector) { | 1456 | if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(encoder)) && connector) { |
1428 | if (ASIC_IS_DCE4(rdev)) | 1457 | if (ASIC_IS_DCE4(rdev)) |
1429 | atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_VIDEO_OFF, 0); | 1458 | atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_VIDEO_OFF, 0); |
@@ -1740,13 +1769,34 @@ static int radeon_atom_pick_dig_encoder(struct drm_encoder *encoder) | |||
1740 | struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc); | 1769 | struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc); |
1741 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); | 1770 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); |
1742 | struct drm_encoder *test_encoder; | 1771 | struct drm_encoder *test_encoder; |
1743 | struct radeon_encoder_atom_dig *dig; | 1772 | struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; |
1744 | uint32_t dig_enc_in_use = 0; | 1773 | uint32_t dig_enc_in_use = 0; |
1745 | 1774 | ||
1746 | /* DCE4/5 */ | 1775 | if (ASIC_IS_DCE6(rdev)) { |
1747 | if (ASIC_IS_DCE4(rdev)) { | 1776 | /* DCE6 */ |
1748 | dig = radeon_encoder->enc_priv; | 1777 | switch (radeon_encoder->encoder_id) { |
1749 | if (ASIC_IS_DCE41(rdev)) { | 1778 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: |
1779 | if (dig->linkb) | ||
1780 | return 1; | ||
1781 | else | ||
1782 | return 0; | ||
1783 | break; | ||
1784 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: | ||
1785 | if (dig->linkb) | ||
1786 | return 3; | ||
1787 | else | ||
1788 | return 2; | ||
1789 | break; | ||
1790 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: | ||
1791 | if (dig->linkb) | ||
1792 | return 5; | ||
1793 | else | ||
1794 | return 4; | ||
1795 | break; | ||
1796 | } | ||
1797 | } else if (ASIC_IS_DCE4(rdev)) { | ||
1798 | /* DCE4/5 */ | ||
1799 | if (ASIC_IS_DCE41(rdev) && !ASIC_IS_DCE61(rdev)) { | ||
1750 | /* ontario follows DCE4 */ | 1800 | /* ontario follows DCE4 */ |
1751 | if (rdev->family == CHIP_PALM) { | 1801 | if (rdev->family == CHIP_PALM) { |
1752 | if (dig->linkb) | 1802 | if (dig->linkb) |
@@ -1848,10 +1898,12 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder, | |||
1848 | struct drm_device *dev = encoder->dev; | 1898 | struct drm_device *dev = encoder->dev; |
1849 | struct radeon_device *rdev = dev->dev_private; | 1899 | struct radeon_device *rdev = dev->dev_private; |
1850 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); | 1900 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); |
1851 | struct drm_encoder *ext_encoder = radeon_get_external_encoder(encoder); | ||
1852 | 1901 | ||
1853 | radeon_encoder->pixel_clock = adjusted_mode->clock; | 1902 | radeon_encoder->pixel_clock = adjusted_mode->clock; |
1854 | 1903 | ||
1904 | /* need to call this here rather than in prepare() since we need some crtc info */ | ||
1905 | radeon_atom_encoder_dpms(encoder, DRM_MODE_DPMS_OFF); | ||
1906 | |||
1855 | if (ASIC_IS_AVIVO(rdev) && !ASIC_IS_DCE4(rdev)) { | 1907 | if (ASIC_IS_AVIVO(rdev) && !ASIC_IS_DCE4(rdev)) { |
1856 | if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT | ATOM_DEVICE_TV_SUPPORT)) | 1908 | if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT | ATOM_DEVICE_TV_SUPPORT)) |
1857 | atombios_yuv_setup(encoder, true); | 1909 | atombios_yuv_setup(encoder, true); |
@@ -1870,38 +1922,7 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder, | |||
1870 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: | 1922 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: |
1871 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: | 1923 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: |
1872 | case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA: | 1924 | case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA: |
1873 | if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE5(rdev)) { | 1925 | /* handled in dpms */ |
1874 | struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); | ||
1875 | struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; | ||
1876 | |||
1877 | if (!connector) | ||
1878 | dig->panel_mode = DP_PANEL_MODE_EXTERNAL_DP_MODE; | ||
1879 | else | ||
1880 | dig->panel_mode = radeon_dp_get_panel_mode(encoder, connector); | ||
1881 | |||
1882 | /* setup and enable the encoder */ | ||
1883 | atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_SETUP, 0); | ||
1884 | atombios_dig_encoder_setup(encoder, | ||
1885 | ATOM_ENCODER_CMD_SETUP_PANEL_MODE, | ||
1886 | dig->panel_mode); | ||
1887 | } else if (ASIC_IS_DCE4(rdev)) { | ||
1888 | /* disable the transmitter */ | ||
1889 | atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0); | ||
1890 | /* setup and enable the encoder */ | ||
1891 | atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_SETUP, 0); | ||
1892 | |||
1893 | /* enable the transmitter */ | ||
1894 | atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0); | ||
1895 | } else { | ||
1896 | /* disable the encoder and transmitter */ | ||
1897 | atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0); | ||
1898 | atombios_dig_encoder_setup(encoder, ATOM_DISABLE, 0); | ||
1899 | |||
1900 | /* setup and enable the encoder and transmitter */ | ||
1901 | atombios_dig_encoder_setup(encoder, ATOM_ENABLE, 0); | ||
1902 | atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_SETUP, 0, 0); | ||
1903 | atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0); | ||
1904 | } | ||
1905 | break; | 1926 | break; |
1906 | case ENCODER_OBJECT_ID_INTERNAL_DDI: | 1927 | case ENCODER_OBJECT_ID_INTERNAL_DDI: |
1907 | case ENCODER_OBJECT_ID_INTERNAL_DVO1: | 1928 | case ENCODER_OBJECT_ID_INTERNAL_DVO1: |
@@ -1922,14 +1943,6 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder, | |||
1922 | break; | 1943 | break; |
1923 | } | 1944 | } |
1924 | 1945 | ||
1925 | if (ext_encoder) { | ||
1926 | if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE61(rdev)) | ||
1927 | atombios_external_encoder_setup(encoder, ext_encoder, | ||
1928 | EXTERNAL_ENCODER_ACTION_V3_ENCODER_SETUP); | ||
1929 | else | ||
1930 | atombios_external_encoder_setup(encoder, ext_encoder, ATOM_ENABLE); | ||
1931 | } | ||
1932 | |||
1933 | atombios_apply_encoder_quirks(encoder, adjusted_mode); | 1946 | atombios_apply_encoder_quirks(encoder, adjusted_mode); |
1934 | 1947 | ||
1935 | if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI) { | 1948 | if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI) { |
@@ -2116,7 +2129,6 @@ static void radeon_atom_encoder_prepare(struct drm_encoder *encoder) | |||
2116 | } | 2129 | } |
2117 | 2130 | ||
2118 | radeon_atom_output_lock(encoder, true); | 2131 | radeon_atom_output_lock(encoder, true); |
2119 | radeon_atom_encoder_dpms(encoder, DRM_MODE_DPMS_OFF); | ||
2120 | 2132 | ||
2121 | if (connector) { | 2133 | if (connector) { |
2122 | struct radeon_connector *radeon_connector = to_radeon_connector(connector); | 2134 | struct radeon_connector *radeon_connector = to_radeon_connector(connector); |
@@ -2137,6 +2149,7 @@ static void radeon_atom_encoder_prepare(struct drm_encoder *encoder) | |||
2137 | 2149 | ||
2138 | static void radeon_atom_encoder_commit(struct drm_encoder *encoder) | 2150 | static void radeon_atom_encoder_commit(struct drm_encoder *encoder) |
2139 | { | 2151 | { |
2152 | /* need to call this here as we need the crtc set up */ | ||
2140 | radeon_atom_encoder_dpms(encoder, DRM_MODE_DPMS_ON); | 2153 | radeon_atom_encoder_dpms(encoder, DRM_MODE_DPMS_ON); |
2141 | radeon_atom_output_lock(encoder, false); | 2154 | radeon_atom_output_lock(encoder, false); |
2142 | } | 2155 | } |
@@ -2177,14 +2190,7 @@ static void radeon_atom_encoder_disable(struct drm_encoder *encoder) | |||
2177 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: | 2190 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: |
2178 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: | 2191 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: |
2179 | case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA: | 2192 | case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA: |
2180 | if (ASIC_IS_DCE4(rdev)) | 2193 | /* handled in dpms */ |
2181 | /* disable the transmitter */ | ||
2182 | atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0); | ||
2183 | else { | ||
2184 | /* disable the encoder and transmitter */ | ||
2185 | atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0); | ||
2186 | atombios_dig_encoder_setup(encoder, ATOM_DISABLE, 0); | ||
2187 | } | ||
2188 | break; | 2194 | break; |
2189 | case ENCODER_OBJECT_ID_INTERNAL_DDI: | 2195 | case ENCODER_OBJECT_ID_INTERNAL_DDI: |
2190 | case ENCODER_OBJECT_ID_INTERNAL_DVO1: | 2196 | case ENCODER_OBJECT_ID_INTERNAL_DVO1: |
diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c index ab74e6b149e7..f37676d7f217 100644 --- a/drivers/gpu/drm/radeon/r600_cs.c +++ b/drivers/gpu/drm/radeon/r600_cs.c | |||
@@ -63,6 +63,7 @@ struct r600_cs_track { | |||
63 | u32 cb_color_size_idx[8]; /* unused */ | 63 | u32 cb_color_size_idx[8]; /* unused */ |
64 | u32 cb_target_mask; | 64 | u32 cb_target_mask; |
65 | u32 cb_shader_mask; /* unused */ | 65 | u32 cb_shader_mask; /* unused */ |
66 | bool is_resolve; | ||
66 | u32 cb_color_size[8]; | 67 | u32 cb_color_size[8]; |
67 | u32 vgt_strmout_en; | 68 | u32 vgt_strmout_en; |
68 | u32 vgt_strmout_buffer_en; | 69 | u32 vgt_strmout_buffer_en; |
@@ -315,7 +316,15 @@ static void r600_cs_track_init(struct r600_cs_track *track) | |||
315 | track->cb_color_bo[i] = NULL; | 316 | track->cb_color_bo[i] = NULL; |
316 | track->cb_color_bo_offset[i] = 0xFFFFFFFF; | 317 | track->cb_color_bo_offset[i] = 0xFFFFFFFF; |
317 | track->cb_color_bo_mc[i] = 0xFFFFFFFF; | 318 | track->cb_color_bo_mc[i] = 0xFFFFFFFF; |
318 | } | 319 | track->cb_color_frag_bo[i] = NULL; |
320 | track->cb_color_frag_offset[i] = 0xFFFFFFFF; | ||
321 | track->cb_color_tile_bo[i] = NULL; | ||
322 | track->cb_color_tile_offset[i] = 0xFFFFFFFF; | ||
323 | track->cb_color_mask[i] = 0xFFFFFFFF; | ||
324 | } | ||
325 | track->is_resolve = false; | ||
326 | track->nsamples = 16; | ||
327 | track->log_nsamples = 4; | ||
319 | track->cb_target_mask = 0xFFFFFFFF; | 328 | track->cb_target_mask = 0xFFFFFFFF; |
320 | track->cb_shader_mask = 0xFFFFFFFF; | 329 | track->cb_shader_mask = 0xFFFFFFFF; |
321 | track->cb_dirty = true; | 330 | track->cb_dirty = true; |
@@ -352,6 +361,8 @@ static int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i) | |||
352 | volatile u32 *ib = p->ib.ptr; | 361 | volatile u32 *ib = p->ib.ptr; |
353 | unsigned array_mode; | 362 | unsigned array_mode; |
354 | u32 format; | 363 | u32 format; |
364 | /* When resolve is used, the second colorbuffer has always 1 sample. */ | ||
365 | unsigned nsamples = track->is_resolve && i == 1 ? 1 : track->nsamples; | ||
355 | 366 | ||
356 | size = radeon_bo_size(track->cb_color_bo[i]) - track->cb_color_bo_offset[i]; | 367 | size = radeon_bo_size(track->cb_color_bo[i]) - track->cb_color_bo_offset[i]; |
357 | format = G_0280A0_FORMAT(track->cb_color_info[i]); | 368 | format = G_0280A0_FORMAT(track->cb_color_info[i]); |
@@ -375,7 +386,7 @@ static int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i) | |||
375 | array_check.group_size = track->group_size; | 386 | array_check.group_size = track->group_size; |
376 | array_check.nbanks = track->nbanks; | 387 | array_check.nbanks = track->nbanks; |
377 | array_check.npipes = track->npipes; | 388 | array_check.npipes = track->npipes; |
378 | array_check.nsamples = track->nsamples; | 389 | array_check.nsamples = nsamples; |
379 | array_check.blocksize = r600_fmt_get_blocksize(format); | 390 | array_check.blocksize = r600_fmt_get_blocksize(format); |
380 | if (r600_get_array_mode_alignment(&array_check, | 391 | if (r600_get_array_mode_alignment(&array_check, |
381 | &pitch_align, &height_align, &depth_align, &base_align)) { | 392 | &pitch_align, &height_align, &depth_align, &base_align)) { |
@@ -421,7 +432,7 @@ static int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i) | |||
421 | 432 | ||
422 | /* check offset */ | 433 | /* check offset */ |
423 | tmp = r600_fmt_get_nblocksy(format, height) * r600_fmt_get_nblocksx(format, pitch) * | 434 | tmp = r600_fmt_get_nblocksy(format, height) * r600_fmt_get_nblocksx(format, pitch) * |
424 | r600_fmt_get_blocksize(format) * track->nsamples; | 435 | r600_fmt_get_blocksize(format) * nsamples; |
425 | switch (array_mode) { | 436 | switch (array_mode) { |
426 | default: | 437 | default: |
427 | case V_0280A0_ARRAY_LINEAR_GENERAL: | 438 | case V_0280A0_ARRAY_LINEAR_GENERAL: |
@@ -792,6 +803,12 @@ static int r600_cs_track_check(struct radeon_cs_parser *p) | |||
792 | */ | 803 | */ |
793 | if (track->cb_dirty) { | 804 | if (track->cb_dirty) { |
794 | tmp = track->cb_target_mask; | 805 | tmp = track->cb_target_mask; |
806 | |||
807 | /* We must check both colorbuffers for RESOLVE. */ | ||
808 | if (track->is_resolve) { | ||
809 | tmp |= 0xff; | ||
810 | } | ||
811 | |||
795 | for (i = 0; i < 8; i++) { | 812 | for (i = 0; i < 8; i++) { |
796 | if ((tmp >> (i * 4)) & 0xF) { | 813 | if ((tmp >> (i * 4)) & 0xF) { |
797 | /* at least one component is enabled */ | 814 | /* at least one component is enabled */ |
@@ -1281,6 +1298,11 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx) | |||
1281 | track->nsamples = 1 << tmp; | 1298 | track->nsamples = 1 << tmp; |
1282 | track->cb_dirty = true; | 1299 | track->cb_dirty = true; |
1283 | break; | 1300 | break; |
1301 | case R_028808_CB_COLOR_CONTROL: | ||
1302 | tmp = G_028808_SPECIAL_OP(radeon_get_ib_value(p, idx)); | ||
1303 | track->is_resolve = tmp == V_028808_SPECIAL_RESOLVE_BOX; | ||
1304 | track->cb_dirty = true; | ||
1305 | break; | ||
1284 | case R_0280A0_CB_COLOR0_INFO: | 1306 | case R_0280A0_CB_COLOR0_INFO: |
1285 | case R_0280A4_CB_COLOR1_INFO: | 1307 | case R_0280A4_CB_COLOR1_INFO: |
1286 | case R_0280A8_CB_COLOR2_INFO: | 1308 | case R_0280A8_CB_COLOR2_INFO: |
@@ -1416,7 +1438,7 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx) | |||
1416 | case R_028118_CB_COLOR6_MASK: | 1438 | case R_028118_CB_COLOR6_MASK: |
1417 | case R_02811C_CB_COLOR7_MASK: | 1439 | case R_02811C_CB_COLOR7_MASK: |
1418 | tmp = (reg - R_028100_CB_COLOR0_MASK) / 4; | 1440 | tmp = (reg - R_028100_CB_COLOR0_MASK) / 4; |
1419 | track->cb_color_mask[tmp] = ib[idx]; | 1441 | track->cb_color_mask[tmp] = radeon_get_ib_value(p, idx); |
1420 | if (G_0280A0_TILE_MODE(track->cb_color_info[tmp])) { | 1442 | if (G_0280A0_TILE_MODE(track->cb_color_info[tmp])) { |
1421 | track->cb_dirty = true; | 1443 | track->cb_dirty = true; |
1422 | } | 1444 | } |
diff --git a/drivers/gpu/drm/radeon/r600d.h b/drivers/gpu/drm/radeon/r600d.h index bdb69a63062f..fa6f37099ba9 100644 --- a/drivers/gpu/drm/radeon/r600d.h +++ b/drivers/gpu/drm/radeon/r600d.h | |||
@@ -66,6 +66,14 @@ | |||
66 | #define CC_RB_BACKEND_DISABLE 0x98F4 | 66 | #define CC_RB_BACKEND_DISABLE 0x98F4 |
67 | #define BACKEND_DISABLE(x) ((x) << 16) | 67 | #define BACKEND_DISABLE(x) ((x) << 16) |
68 | 68 | ||
69 | #define R_028808_CB_COLOR_CONTROL 0x28808 | ||
70 | #define S_028808_SPECIAL_OP(x) (((x) & 0x7) << 4) | ||
71 | #define G_028808_SPECIAL_OP(x) (((x) >> 4) & 0x7) | ||
72 | #define C_028808_SPECIAL_OP 0xFFFFFF8F | ||
73 | #define V_028808_SPECIAL_NORMAL 0x00 | ||
74 | #define V_028808_SPECIAL_DISABLE 0x01 | ||
75 | #define V_028808_SPECIAL_RESOLVE_BOX 0x07 | ||
76 | |||
69 | #define CB_COLOR0_BASE 0x28040 | 77 | #define CB_COLOR0_BASE 0x28040 |
70 | #define CB_COLOR1_BASE 0x28044 | 78 | #define CB_COLOR1_BASE 0x28044 |
71 | #define CB_COLOR2_BASE 0x28048 | 79 | #define CB_COLOR2_BASE 0x28048 |
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c index d2e243867ac6..7a3daebd732d 100644 --- a/drivers/gpu/drm/radeon/radeon_device.c +++ b/drivers/gpu/drm/radeon/radeon_device.c | |||
@@ -1051,7 +1051,7 @@ int radeon_device_init(struct radeon_device *rdev, | |||
1051 | if (rdev->flags & RADEON_IS_AGP) | 1051 | if (rdev->flags & RADEON_IS_AGP) |
1052 | rdev->need_dma32 = true; | 1052 | rdev->need_dma32 = true; |
1053 | if ((rdev->flags & RADEON_IS_PCI) && | 1053 | if ((rdev->flags & RADEON_IS_PCI) && |
1054 | (rdev->family < CHIP_RS400)) | 1054 | (rdev->family <= CHIP_RS740)) |
1055 | rdev->need_dma32 = true; | 1055 | rdev->need_dma32 = true; |
1056 | 1056 | ||
1057 | dma_bits = rdev->need_dma32 ? 32 : 40; | 1057 | dma_bits = rdev->need_dma32 ? 32 : 40; |
@@ -1346,12 +1346,15 @@ retry: | |||
1346 | for (i = 0; i < RADEON_NUM_RINGS; ++i) { | 1346 | for (i = 0; i < RADEON_NUM_RINGS; ++i) { |
1347 | radeon_ring_restore(rdev, &rdev->ring[i], | 1347 | radeon_ring_restore(rdev, &rdev->ring[i], |
1348 | ring_sizes[i], ring_data[i]); | 1348 | ring_sizes[i], ring_data[i]); |
1349 | ring_sizes[i] = 0; | ||
1350 | ring_data[i] = NULL; | ||
1349 | } | 1351 | } |
1350 | 1352 | ||
1351 | r = radeon_ib_ring_tests(rdev); | 1353 | r = radeon_ib_ring_tests(rdev); |
1352 | if (r) { | 1354 | if (r) { |
1353 | dev_err(rdev->dev, "ib ring test failed (%d).\n", r); | 1355 | dev_err(rdev->dev, "ib ring test failed (%d).\n", r); |
1354 | if (saved) { | 1356 | if (saved) { |
1357 | saved = false; | ||
1355 | radeon_suspend(rdev); | 1358 | radeon_suspend(rdev); |
1356 | goto retry; | 1359 | goto retry; |
1357 | } | 1360 | } |
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c index 27d22d709c90..8c593ea82c41 100644 --- a/drivers/gpu/drm/radeon/radeon_drv.c +++ b/drivers/gpu/drm/radeon/radeon_drv.c | |||
@@ -63,9 +63,10 @@ | |||
63 | * 2.19.0 - r600-eg: MSAA textures | 63 | * 2.19.0 - r600-eg: MSAA textures |
64 | * 2.20.0 - r600-si: RADEON_INFO_TIMESTAMP query | 64 | * 2.20.0 - r600-si: RADEON_INFO_TIMESTAMP query |
65 | * 2.21.0 - r600-r700: FMASK and CMASK | 65 | * 2.21.0 - r600-r700: FMASK and CMASK |
66 | * 2.22.0 - r600 only: RESOLVE_BOX allowed | ||
66 | */ | 67 | */ |
67 | #define KMS_DRIVER_MAJOR 2 | 68 | #define KMS_DRIVER_MAJOR 2 |
68 | #define KMS_DRIVER_MINOR 21 | 69 | #define KMS_DRIVER_MINOR 22 |
69 | #define KMS_DRIVER_PATCHLEVEL 0 | 70 | #define KMS_DRIVER_PATCHLEVEL 0 |
70 | int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags); | 71 | int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags); |
71 | int radeon_driver_unload_kms(struct drm_device *dev); | 72 | int radeon_driver_unload_kms(struct drm_device *dev); |
diff --git a/drivers/gpu/drm/radeon/reg_srcs/r600 b/drivers/gpu/drm/radeon/reg_srcs/r600 index f93e45d869f4..20bfbda7b3f1 100644 --- a/drivers/gpu/drm/radeon/reg_srcs/r600 +++ b/drivers/gpu/drm/radeon/reg_srcs/r600 | |||
@@ -744,7 +744,6 @@ r600 0x9400 | |||
744 | 0x00028C38 CB_CLRCMP_DST | 744 | 0x00028C38 CB_CLRCMP_DST |
745 | 0x00028C3C CB_CLRCMP_MSK | 745 | 0x00028C3C CB_CLRCMP_MSK |
746 | 0x00028C34 CB_CLRCMP_SRC | 746 | 0x00028C34 CB_CLRCMP_SRC |
747 | 0x00028808 CB_COLOR_CONTROL | ||
748 | 0x0002842C CB_FOG_BLUE | 747 | 0x0002842C CB_FOG_BLUE |
749 | 0x00028428 CB_FOG_GREEN | 748 | 0x00028428 CB_FOG_GREEN |
750 | 0x00028424 CB_FOG_RED | 749 | 0x00028424 CB_FOG_RED |
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c index 8bf8a64e5115..8bcd168fffae 100644 --- a/drivers/hid/hid-core.c +++ b/drivers/hid/hid-core.c | |||
@@ -996,7 +996,8 @@ static void hid_process_event(struct hid_device *hid, struct hid_field *field, | |||
996 | struct hid_driver *hdrv = hid->driver; | 996 | struct hid_driver *hdrv = hid->driver; |
997 | int ret; | 997 | int ret; |
998 | 998 | ||
999 | hid_dump_input(hid, usage, value); | 999 | if (!list_empty(&hid->debug_list)) |
1000 | hid_dump_input(hid, usage, value); | ||
1000 | 1001 | ||
1001 | if (hdrv && hdrv->event && hid_match_usage(hid, usage)) { | 1002 | if (hdrv && hdrv->event && hid_match_usage(hid, usage)) { |
1002 | ret = hdrv->event(hid, field, usage, value); | 1003 | ret = hdrv->event(hid, field, usage, value); |
@@ -1558,7 +1559,9 @@ static const struct hid_device_id hid_have_special_driver[] = { | |||
1558 | { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_EASYPEN_M610X) }, | 1559 | { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_EASYPEN_M610X) }, |
1559 | { HID_USB_DEVICE(USB_VENDOR_ID_LABTEC, USB_DEVICE_ID_LABTEC_WIRELESS_KEYBOARD) }, | 1560 | { HID_USB_DEVICE(USB_VENDOR_ID_LABTEC, USB_DEVICE_ID_LABTEC_WIRELESS_KEYBOARD) }, |
1560 | { HID_USB_DEVICE(USB_VENDOR_ID_LCPOWER, USB_DEVICE_ID_LCPOWER_LC1000 ) }, | 1561 | { HID_USB_DEVICE(USB_VENDOR_ID_LCPOWER, USB_DEVICE_ID_LCPOWER_LC1000 ) }, |
1561 | { HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_TPKBD) }, | 1562 | #if IS_ENABLED(CONFIG_HID_LENOVO_TPKBD) |
1563 | { HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_TPKBD) }, | ||
1564 | #endif | ||
1562 | { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_MX3000_RECEIVER) }, | 1565 | { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_MX3000_RECEIVER) }, |
1563 | { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_S510_RECEIVER) }, | 1566 | { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_S510_RECEIVER) }, |
1564 | { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_S510_RECEIVER_2) }, | 1567 | { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_S510_RECEIVER_2) }, |
diff --git a/drivers/hid/hid-logitech-dj.c b/drivers/hid/hid-logitech-dj.c index 0f9c146fc00d..4d524b5f52f5 100644 --- a/drivers/hid/hid-logitech-dj.c +++ b/drivers/hid/hid-logitech-dj.c | |||
@@ -439,7 +439,7 @@ static int logi_dj_recv_query_paired_devices(struct dj_receiver_dev *djrcv_dev) | |||
439 | struct dj_report *dj_report; | 439 | struct dj_report *dj_report; |
440 | int retval; | 440 | int retval; |
441 | 441 | ||
442 | dj_report = kzalloc(sizeof(dj_report), GFP_KERNEL); | 442 | dj_report = kzalloc(sizeof(struct dj_report), GFP_KERNEL); |
443 | if (!dj_report) | 443 | if (!dj_report) |
444 | return -ENOMEM; | 444 | return -ENOMEM; |
445 | dj_report->report_id = REPORT_ID_DJ_SHORT; | 445 | dj_report->report_id = REPORT_ID_DJ_SHORT; |
@@ -456,7 +456,7 @@ static int logi_dj_recv_switch_to_dj_mode(struct dj_receiver_dev *djrcv_dev, | |||
456 | struct dj_report *dj_report; | 456 | struct dj_report *dj_report; |
457 | int retval; | 457 | int retval; |
458 | 458 | ||
459 | dj_report = kzalloc(sizeof(dj_report), GFP_KERNEL); | 459 | dj_report = kzalloc(sizeof(struct dj_report), GFP_KERNEL); |
460 | if (!dj_report) | 460 | if (!dj_report) |
461 | return -ENOMEM; | 461 | return -ENOMEM; |
462 | dj_report->report_id = REPORT_ID_DJ_SHORT; | 462 | dj_report->report_id = REPORT_ID_DJ_SHORT; |
diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c index 903eef3d3e10..991e85c7325c 100644 --- a/drivers/hid/usbhid/hid-quirks.c +++ b/drivers/hid/usbhid/hid-quirks.c | |||
@@ -70,6 +70,7 @@ static const struct hid_blacklist { | |||
70 | { USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_AXIS_295, HID_QUIRK_NOGET }, | 70 | { USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_AXIS_295, HID_QUIRK_NOGET }, |
71 | { USB_VENDOR_ID_DMI, USB_DEVICE_ID_DMI_ENC, HID_QUIRK_NOGET }, | 71 | { USB_VENDOR_ID_DMI, USB_DEVICE_ID_DMI_ENC, HID_QUIRK_NOGET }, |
72 | { USB_VENDOR_ID_ELO, USB_DEVICE_ID_ELO_TS2700, HID_QUIRK_NOGET }, | 72 | { USB_VENDOR_ID_ELO, USB_DEVICE_ID_ELO_TS2700, HID_QUIRK_NOGET }, |
73 | { USB_VENDOR_ID_MGE, USB_DEVICE_ID_MGE_UPS, HID_QUIRK_NOGET }, | ||
73 | { USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN, HID_QUIRK_NO_INIT_REPORTS }, | 74 | { USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN, HID_QUIRK_NO_INIT_REPORTS }, |
74 | { USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN1, HID_QUIRK_NO_INIT_REPORTS }, | 75 | { USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN1, HID_QUIRK_NO_INIT_REPORTS }, |
75 | { USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN2, HID_QUIRK_NO_INIT_REPORTS }, | 76 | { USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN2, HID_QUIRK_NO_INIT_REPORTS }, |
diff --git a/drivers/hwmon/asus_atk0110.c b/drivers/hwmon/asus_atk0110.c index 351d1f4593e7..4ee578948723 100644 --- a/drivers/hwmon/asus_atk0110.c +++ b/drivers/hwmon/asus_atk0110.c | |||
@@ -34,6 +34,12 @@ static const struct dmi_system_id __initconst atk_force_new_if[] = { | |||
34 | .matches = { | 34 | .matches = { |
35 | DMI_MATCH(DMI_BOARD_NAME, "SABERTOOTH X58") | 35 | DMI_MATCH(DMI_BOARD_NAME, "SABERTOOTH X58") |
36 | } | 36 | } |
37 | }, { | ||
38 | /* Old interface reads the same sensor for fan0 and fan1 */ | ||
39 | .ident = "Asus M5A78L", | ||
40 | .matches = { | ||
41 | DMI_MATCH(DMI_BOARD_NAME, "M5A78L") | ||
42 | } | ||
37 | }, | 43 | }, |
38 | { } | 44 | { } |
39 | }; | 45 | }; |
diff --git a/drivers/input/keyboard/imx_keypad.c b/drivers/input/keyboard/imx_keypad.c index ff4c0a87a25f..ce68e361558c 100644 --- a/drivers/input/keyboard/imx_keypad.c +++ b/drivers/input/keyboard/imx_keypad.c | |||
@@ -358,6 +358,7 @@ static void imx_keypad_inhibit(struct imx_keypad *keypad) | |||
358 | /* Inhibit KDI and KRI interrupts. */ | 358 | /* Inhibit KDI and KRI interrupts. */ |
359 | reg_val = readw(keypad->mmio_base + KPSR); | 359 | reg_val = readw(keypad->mmio_base + KPSR); |
360 | reg_val &= ~(KBD_STAT_KRIE | KBD_STAT_KDIE); | 360 | reg_val &= ~(KBD_STAT_KRIE | KBD_STAT_KDIE); |
361 | reg_val |= KBD_STAT_KPKR | KBD_STAT_KPKD; | ||
361 | writew(reg_val, keypad->mmio_base + KPSR); | 362 | writew(reg_val, keypad->mmio_base + KPSR); |
362 | 363 | ||
363 | /* Colums as open drain and disable all rows */ | 364 | /* Colums as open drain and disable all rows */ |
@@ -515,7 +516,9 @@ static int __devinit imx_keypad_probe(struct platform_device *pdev) | |||
515 | input_set_drvdata(input_dev, keypad); | 516 | input_set_drvdata(input_dev, keypad); |
516 | 517 | ||
517 | /* Ensure that the keypad will stay dormant until opened */ | 518 | /* Ensure that the keypad will stay dormant until opened */ |
519 | clk_enable(keypad->clk); | ||
518 | imx_keypad_inhibit(keypad); | 520 | imx_keypad_inhibit(keypad); |
521 | clk_disable(keypad->clk); | ||
519 | 522 | ||
520 | error = request_irq(irq, imx_keypad_irq_handler, 0, | 523 | error = request_irq(irq, imx_keypad_irq_handler, 0, |
521 | pdev->name, keypad); | 524 | pdev->name, keypad); |
diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h index 5ec774d6c82b..6918773ce024 100644 --- a/drivers/input/serio/i8042-x86ia64io.h +++ b/drivers/input/serio/i8042-x86ia64io.h | |||
@@ -177,6 +177,20 @@ static const struct dmi_system_id __initconst i8042_dmi_noloop_table[] = { | |||
177 | }, | 177 | }, |
178 | }, | 178 | }, |
179 | { | 179 | { |
180 | /* Gigabyte T1005 - defines wrong chassis type ("Other") */ | ||
181 | .matches = { | ||
182 | DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"), | ||
183 | DMI_MATCH(DMI_PRODUCT_NAME, "T1005"), | ||
184 | }, | ||
185 | }, | ||
186 | { | ||
187 | /* Gigabyte T1005M/P - defines wrong chassis type ("Other") */ | ||
188 | .matches = { | ||
189 | DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"), | ||
190 | DMI_MATCH(DMI_PRODUCT_NAME, "T1005M/P"), | ||
191 | }, | ||
192 | }, | ||
193 | { | ||
180 | .matches = { | 194 | .matches = { |
181 | DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), | 195 | DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), |
182 | DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion dv9700"), | 196 | DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion dv9700"), |
diff --git a/drivers/input/tablet/wacom_wac.c b/drivers/input/tablet/wacom_wac.c index 002041975de9..532d067a9e07 100644 --- a/drivers/input/tablet/wacom_wac.c +++ b/drivers/input/tablet/wacom_wac.c | |||
@@ -1848,7 +1848,10 @@ static const struct wacom_features wacom_features_0x2A = | |||
1848 | { "Wacom Intuos5 M", WACOM_PKGLEN_INTUOS, 44704, 27940, 2047, | 1848 | { "Wacom Intuos5 M", WACOM_PKGLEN_INTUOS, 44704, 27940, 2047, |
1849 | 63, INTUOS5, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES }; | 1849 | 63, INTUOS5, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES }; |
1850 | static const struct wacom_features wacom_features_0xF4 = | 1850 | static const struct wacom_features wacom_features_0xF4 = |
1851 | { "Wacom Cintiq 24HD", WACOM_PKGLEN_INTUOS, 104480, 65600, 2047, | 1851 | { "Wacom Cintiq 24HD", WACOM_PKGLEN_INTUOS, 104480, 65600, 2047, |
1852 | 63, WACOM_24HD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES }; | ||
1853 | static const struct wacom_features wacom_features_0xF8 = | ||
1854 | { "Wacom Cintiq 24HD touch", WACOM_PKGLEN_INTUOS, 104480, 65600, 2047, | ||
1852 | 63, WACOM_24HD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES }; | 1855 | 63, WACOM_24HD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES }; |
1853 | static const struct wacom_features wacom_features_0x3F = | 1856 | static const struct wacom_features wacom_features_0x3F = |
1854 | { "Wacom Cintiq 21UX", WACOM_PKGLEN_INTUOS, 87200, 65600, 1023, | 1857 | { "Wacom Cintiq 21UX", WACOM_PKGLEN_INTUOS, 87200, 65600, 1023, |
@@ -2091,6 +2094,7 @@ const struct usb_device_id wacom_ids[] = { | |||
2091 | { USB_DEVICE_WACOM(0xEF) }, | 2094 | { USB_DEVICE_WACOM(0xEF) }, |
2092 | { USB_DEVICE_WACOM(0x47) }, | 2095 | { USB_DEVICE_WACOM(0x47) }, |
2093 | { USB_DEVICE_WACOM(0xF4) }, | 2096 | { USB_DEVICE_WACOM(0xF4) }, |
2097 | { USB_DEVICE_WACOM(0xF8) }, | ||
2094 | { USB_DEVICE_WACOM(0xFA) }, | 2098 | { USB_DEVICE_WACOM(0xFA) }, |
2095 | { USB_DEVICE_LENOVO(0x6004) }, | 2099 | { USB_DEVICE_LENOVO(0x6004) }, |
2096 | { } | 2100 | { } |
diff --git a/drivers/input/touchscreen/edt-ft5x06.c b/drivers/input/touchscreen/edt-ft5x06.c index 9afc777a40a7..b06a5e3a665e 100644 --- a/drivers/input/touchscreen/edt-ft5x06.c +++ b/drivers/input/touchscreen/edt-ft5x06.c | |||
@@ -602,6 +602,7 @@ edt_ft5x06_ts_teardown_debugfs(struct edt_ft5x06_ts_data *tsdata) | |||
602 | { | 602 | { |
603 | if (tsdata->debug_dir) | 603 | if (tsdata->debug_dir) |
604 | debugfs_remove_recursive(tsdata->debug_dir); | 604 | debugfs_remove_recursive(tsdata->debug_dir); |
605 | kfree(tsdata->raw_buffer); | ||
605 | } | 606 | } |
606 | 607 | ||
607 | #else | 608 | #else |
@@ -843,7 +844,6 @@ static int __devexit edt_ft5x06_ts_remove(struct i2c_client *client) | |||
843 | if (gpio_is_valid(pdata->reset_pin)) | 844 | if (gpio_is_valid(pdata->reset_pin)) |
844 | gpio_free(pdata->reset_pin); | 845 | gpio_free(pdata->reset_pin); |
845 | 846 | ||
846 | kfree(tsdata->raw_buffer); | ||
847 | kfree(tsdata); | 847 | kfree(tsdata); |
848 | 848 | ||
849 | return 0; | 849 | return 0; |
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c index f1c84decb192..172a768036d8 100644 --- a/drivers/mmc/card/block.c +++ b/drivers/mmc/card/block.c | |||
@@ -1411,7 +1411,8 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req) | |||
1411 | /* complete ongoing async transfer before issuing discard */ | 1411 | /* complete ongoing async transfer before issuing discard */ |
1412 | if (card->host->areq) | 1412 | if (card->host->areq) |
1413 | mmc_blk_issue_rw_rq(mq, NULL); | 1413 | mmc_blk_issue_rw_rq(mq, NULL); |
1414 | if (req->cmd_flags & REQ_SECURE) | 1414 | if (req->cmd_flags & REQ_SECURE && |
1415 | !(card->quirks & MMC_QUIRK_SEC_ERASE_TRIM_BROKEN)) | ||
1415 | ret = mmc_blk_issue_secdiscard_rq(mq, req); | 1416 | ret = mmc_blk_issue_secdiscard_rq(mq, req); |
1416 | else | 1417 | else |
1417 | ret = mmc_blk_issue_discard_rq(mq, req); | 1418 | ret = mmc_blk_issue_discard_rq(mq, req); |
@@ -1716,6 +1717,7 @@ force_ro_fail: | |||
1716 | #define CID_MANFID_SANDISK 0x2 | 1717 | #define CID_MANFID_SANDISK 0x2 |
1717 | #define CID_MANFID_TOSHIBA 0x11 | 1718 | #define CID_MANFID_TOSHIBA 0x11 |
1718 | #define CID_MANFID_MICRON 0x13 | 1719 | #define CID_MANFID_MICRON 0x13 |
1720 | #define CID_MANFID_SAMSUNG 0x15 | ||
1719 | 1721 | ||
1720 | static const struct mmc_fixup blk_fixups[] = | 1722 | static const struct mmc_fixup blk_fixups[] = |
1721 | { | 1723 | { |
@@ -1752,6 +1754,28 @@ static const struct mmc_fixup blk_fixups[] = | |||
1752 | MMC_FIXUP(CID_NAME_ANY, CID_MANFID_MICRON, 0x200, add_quirk_mmc, | 1754 | MMC_FIXUP(CID_NAME_ANY, CID_MANFID_MICRON, 0x200, add_quirk_mmc, |
1753 | MMC_QUIRK_LONG_READ_TIME), | 1755 | MMC_QUIRK_LONG_READ_TIME), |
1754 | 1756 | ||
1757 | /* | ||
1758 | * On these Samsung MoviNAND parts, performing secure erase or | ||
1759 | * secure trim can result in unrecoverable corruption due to a | ||
1760 | * firmware bug. | ||
1761 | */ | ||
1762 | MMC_FIXUP("M8G2FA", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc, | ||
1763 | MMC_QUIRK_SEC_ERASE_TRIM_BROKEN), | ||
1764 | MMC_FIXUP("MAG4FA", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc, | ||
1765 | MMC_QUIRK_SEC_ERASE_TRIM_BROKEN), | ||
1766 | MMC_FIXUP("MBG8FA", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc, | ||
1767 | MMC_QUIRK_SEC_ERASE_TRIM_BROKEN), | ||
1768 | MMC_FIXUP("MCGAFA", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc, | ||
1769 | MMC_QUIRK_SEC_ERASE_TRIM_BROKEN), | ||
1770 | MMC_FIXUP("VAL00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc, | ||
1771 | MMC_QUIRK_SEC_ERASE_TRIM_BROKEN), | ||
1772 | MMC_FIXUP("VYL00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc, | ||
1773 | MMC_QUIRK_SEC_ERASE_TRIM_BROKEN), | ||
1774 | MMC_FIXUP("KYL00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc, | ||
1775 | MMC_QUIRK_SEC_ERASE_TRIM_BROKEN), | ||
1776 | MMC_FIXUP("VZL00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc, | ||
1777 | MMC_QUIRK_SEC_ERASE_TRIM_BROKEN), | ||
1778 | |||
1755 | END_FIXUP | 1779 | END_FIXUP |
1756 | }; | 1780 | }; |
1757 | 1781 | ||
diff --git a/drivers/mmc/host/atmel-mci.c b/drivers/mmc/host/atmel-mci.c index 322412cec4ee..a53c7c478e05 100644 --- a/drivers/mmc/host/atmel-mci.c +++ b/drivers/mmc/host/atmel-mci.c | |||
@@ -81,6 +81,7 @@ struct atmel_mci_caps { | |||
81 | bool has_bad_data_ordering; | 81 | bool has_bad_data_ordering; |
82 | bool need_reset_after_xfer; | 82 | bool need_reset_after_xfer; |
83 | bool need_blksz_mul_4; | 83 | bool need_blksz_mul_4; |
84 | bool need_notbusy_for_read_ops; | ||
84 | }; | 85 | }; |
85 | 86 | ||
86 | struct atmel_mci_dma { | 87 | struct atmel_mci_dma { |
@@ -1625,7 +1626,8 @@ static void atmci_tasklet_func(unsigned long priv) | |||
1625 | __func__); | 1626 | __func__); |
1626 | atmci_set_completed(host, EVENT_XFER_COMPLETE); | 1627 | atmci_set_completed(host, EVENT_XFER_COMPLETE); |
1627 | 1628 | ||
1628 | if (host->data->flags & MMC_DATA_WRITE) { | 1629 | if (host->caps.need_notbusy_for_read_ops || |
1630 | (host->data->flags & MMC_DATA_WRITE)) { | ||
1629 | atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY); | 1631 | atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY); |
1630 | state = STATE_WAITING_NOTBUSY; | 1632 | state = STATE_WAITING_NOTBUSY; |
1631 | } else if (host->mrq->stop) { | 1633 | } else if (host->mrq->stop) { |
@@ -2218,6 +2220,7 @@ static void __init atmci_get_cap(struct atmel_mci *host) | |||
2218 | host->caps.has_bad_data_ordering = 1; | 2220 | host->caps.has_bad_data_ordering = 1; |
2219 | host->caps.need_reset_after_xfer = 1; | 2221 | host->caps.need_reset_after_xfer = 1; |
2220 | host->caps.need_blksz_mul_4 = 1; | 2222 | host->caps.need_blksz_mul_4 = 1; |
2223 | host->caps.need_notbusy_for_read_ops = 0; | ||
2221 | 2224 | ||
2222 | /* keep only major version number */ | 2225 | /* keep only major version number */ |
2223 | switch (version & 0xf00) { | 2226 | switch (version & 0xf00) { |
@@ -2238,6 +2241,7 @@ static void __init atmci_get_cap(struct atmel_mci *host) | |||
2238 | case 0x200: | 2241 | case 0x200: |
2239 | host->caps.has_rwproof = 1; | 2242 | host->caps.has_rwproof = 1; |
2240 | host->caps.need_blksz_mul_4 = 0; | 2243 | host->caps.need_blksz_mul_4 = 0; |
2244 | host->caps.need_notbusy_for_read_ops = 1; | ||
2241 | case 0x100: | 2245 | case 0x100: |
2242 | host->caps.has_bad_data_ordering = 0; | 2246 | host->caps.has_bad_data_ordering = 0; |
2243 | host->caps.need_reset_after_xfer = 0; | 2247 | host->caps.need_reset_after_xfer = 0; |
diff --git a/drivers/mmc/host/bfin_sdh.c b/drivers/mmc/host/bfin_sdh.c index 03666174ca48..a17dd7363ceb 100644 --- a/drivers/mmc/host/bfin_sdh.c +++ b/drivers/mmc/host/bfin_sdh.c | |||
@@ -49,13 +49,6 @@ | |||
49 | #define bfin_write_SDH_CFG bfin_write_RSI_CFG | 49 | #define bfin_write_SDH_CFG bfin_write_RSI_CFG |
50 | #endif | 50 | #endif |
51 | 51 | ||
52 | struct dma_desc_array { | ||
53 | unsigned long start_addr; | ||
54 | unsigned short cfg; | ||
55 | unsigned short x_count; | ||
56 | short x_modify; | ||
57 | } __packed; | ||
58 | |||
59 | struct sdh_host { | 52 | struct sdh_host { |
60 | struct mmc_host *mmc; | 53 | struct mmc_host *mmc; |
61 | spinlock_t lock; | 54 | spinlock_t lock; |
diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c index 72dc3cde646d..af40d227bece 100644 --- a/drivers/mmc/host/dw_mmc.c +++ b/drivers/mmc/host/dw_mmc.c | |||
@@ -627,6 +627,7 @@ static void dw_mci_setup_bus(struct dw_mci_slot *slot) | |||
627 | { | 627 | { |
628 | struct dw_mci *host = slot->host; | 628 | struct dw_mci *host = slot->host; |
629 | u32 div; | 629 | u32 div; |
630 | u32 clk_en_a; | ||
630 | 631 | ||
631 | if (slot->clock != host->current_speed) { | 632 | if (slot->clock != host->current_speed) { |
632 | div = host->bus_hz / slot->clock; | 633 | div = host->bus_hz / slot->clock; |
@@ -659,9 +660,11 @@ static void dw_mci_setup_bus(struct dw_mci_slot *slot) | |||
659 | mci_send_cmd(slot, | 660 | mci_send_cmd(slot, |
660 | SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0); | 661 | SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0); |
661 | 662 | ||
662 | /* enable clock */ | 663 | /* enable clock; only low power if no SDIO */ |
663 | mci_writel(host, CLKENA, ((SDMMC_CLKEN_ENABLE | | 664 | clk_en_a = SDMMC_CLKEN_ENABLE << slot->id; |
664 | SDMMC_CLKEN_LOW_PWR) << slot->id)); | 665 | if (!(mci_readl(host, INTMASK) & SDMMC_INT_SDIO(slot->id))) |
666 | clk_en_a |= SDMMC_CLKEN_LOW_PWR << slot->id; | ||
667 | mci_writel(host, CLKENA, clk_en_a); | ||
665 | 668 | ||
666 | /* inform CIU */ | 669 | /* inform CIU */ |
667 | mci_send_cmd(slot, | 670 | mci_send_cmd(slot, |
@@ -862,6 +865,30 @@ static int dw_mci_get_cd(struct mmc_host *mmc) | |||
862 | return present; | 865 | return present; |
863 | } | 866 | } |
864 | 867 | ||
868 | /* | ||
869 | * Disable lower power mode. | ||
870 | * | ||
871 | * Low power mode will stop the card clock when idle. According to the | ||
872 | * description of the CLKENA register we should disable low power mode | ||
873 | * for SDIO cards if we need SDIO interrupts to work. | ||
874 | * | ||
875 | * This function is fast if low power mode is already disabled. | ||
876 | */ | ||
877 | static void dw_mci_disable_low_power(struct dw_mci_slot *slot) | ||
878 | { | ||
879 | struct dw_mci *host = slot->host; | ||
880 | u32 clk_en_a; | ||
881 | const u32 clken_low_pwr = SDMMC_CLKEN_LOW_PWR << slot->id; | ||
882 | |||
883 | clk_en_a = mci_readl(host, CLKENA); | ||
884 | |||
885 | if (clk_en_a & clken_low_pwr) { | ||
886 | mci_writel(host, CLKENA, clk_en_a & ~clken_low_pwr); | ||
887 | mci_send_cmd(slot, SDMMC_CMD_UPD_CLK | | ||
888 | SDMMC_CMD_PRV_DAT_WAIT, 0); | ||
889 | } | ||
890 | } | ||
891 | |||
865 | static void dw_mci_enable_sdio_irq(struct mmc_host *mmc, int enb) | 892 | static void dw_mci_enable_sdio_irq(struct mmc_host *mmc, int enb) |
866 | { | 893 | { |
867 | struct dw_mci_slot *slot = mmc_priv(mmc); | 894 | struct dw_mci_slot *slot = mmc_priv(mmc); |
@@ -871,6 +898,14 @@ static void dw_mci_enable_sdio_irq(struct mmc_host *mmc, int enb) | |||
871 | /* Enable/disable Slot Specific SDIO interrupt */ | 898 | /* Enable/disable Slot Specific SDIO interrupt */ |
872 | int_mask = mci_readl(host, INTMASK); | 899 | int_mask = mci_readl(host, INTMASK); |
873 | if (enb) { | 900 | if (enb) { |
901 | /* | ||
902 | * Turn off low power mode if it was enabled. This is a bit of | ||
903 | * a heavy operation and we disable / enable IRQs a lot, so | ||
904 | * we'll leave low power mode disabled and it will get | ||
905 | * re-enabled again in dw_mci_setup_bus(). | ||
906 | */ | ||
907 | dw_mci_disable_low_power(slot); | ||
908 | |||
874 | mci_writel(host, INTMASK, | 909 | mci_writel(host, INTMASK, |
875 | (int_mask | SDMMC_INT_SDIO(slot->id))); | 910 | (int_mask | SDMMC_INT_SDIO(slot->id))); |
876 | } else { | 911 | } else { |
@@ -1429,22 +1464,10 @@ static void dw_mci_read_data_pio(struct dw_mci *host) | |||
1429 | nbytes += len; | 1464 | nbytes += len; |
1430 | remain -= len; | 1465 | remain -= len; |
1431 | } while (remain); | 1466 | } while (remain); |
1432 | sg_miter->consumed = offset; | ||
1433 | 1467 | ||
1468 | sg_miter->consumed = offset; | ||
1434 | status = mci_readl(host, MINTSTS); | 1469 | status = mci_readl(host, MINTSTS); |
1435 | mci_writel(host, RINTSTS, SDMMC_INT_RXDR); | 1470 | mci_writel(host, RINTSTS, SDMMC_INT_RXDR); |
1436 | if (status & DW_MCI_DATA_ERROR_FLAGS) { | ||
1437 | host->data_status = status; | ||
1438 | data->bytes_xfered += nbytes; | ||
1439 | sg_miter_stop(sg_miter); | ||
1440 | host->sg = NULL; | ||
1441 | smp_wmb(); | ||
1442 | |||
1443 | set_bit(EVENT_DATA_ERROR, &host->pending_events); | ||
1444 | |||
1445 | tasklet_schedule(&host->tasklet); | ||
1446 | return; | ||
1447 | } | ||
1448 | } while (status & SDMMC_INT_RXDR); /*if the RXDR is ready read again*/ | 1471 | } while (status & SDMMC_INT_RXDR); /*if the RXDR is ready read again*/ |
1449 | data->bytes_xfered += nbytes; | 1472 | data->bytes_xfered += nbytes; |
1450 | 1473 | ||
@@ -1497,23 +1520,10 @@ static void dw_mci_write_data_pio(struct dw_mci *host) | |||
1497 | nbytes += len; | 1520 | nbytes += len; |
1498 | remain -= len; | 1521 | remain -= len; |
1499 | } while (remain); | 1522 | } while (remain); |
1500 | sg_miter->consumed = offset; | ||
1501 | 1523 | ||
1524 | sg_miter->consumed = offset; | ||
1502 | status = mci_readl(host, MINTSTS); | 1525 | status = mci_readl(host, MINTSTS); |
1503 | mci_writel(host, RINTSTS, SDMMC_INT_TXDR); | 1526 | mci_writel(host, RINTSTS, SDMMC_INT_TXDR); |
1504 | if (status & DW_MCI_DATA_ERROR_FLAGS) { | ||
1505 | host->data_status = status; | ||
1506 | data->bytes_xfered += nbytes; | ||
1507 | sg_miter_stop(sg_miter); | ||
1508 | host->sg = NULL; | ||
1509 | |||
1510 | smp_wmb(); | ||
1511 | |||
1512 | set_bit(EVENT_DATA_ERROR, &host->pending_events); | ||
1513 | |||
1514 | tasklet_schedule(&host->tasklet); | ||
1515 | return; | ||
1516 | } | ||
1517 | } while (status & SDMMC_INT_TXDR); /* if TXDR write again */ | 1527 | } while (status & SDMMC_INT_TXDR); /* if TXDR write again */ |
1518 | data->bytes_xfered += nbytes; | 1528 | data->bytes_xfered += nbytes; |
1519 | 1529 | ||
@@ -1547,12 +1557,11 @@ static void dw_mci_cmd_interrupt(struct dw_mci *host, u32 status) | |||
1547 | static irqreturn_t dw_mci_interrupt(int irq, void *dev_id) | 1557 | static irqreturn_t dw_mci_interrupt(int irq, void *dev_id) |
1548 | { | 1558 | { |
1549 | struct dw_mci *host = dev_id; | 1559 | struct dw_mci *host = dev_id; |
1550 | u32 status, pending; | 1560 | u32 pending; |
1551 | unsigned int pass_count = 0; | 1561 | unsigned int pass_count = 0; |
1552 | int i; | 1562 | int i; |
1553 | 1563 | ||
1554 | do { | 1564 | do { |
1555 | status = mci_readl(host, RINTSTS); | ||
1556 | pending = mci_readl(host, MINTSTS); /* read-only mask reg */ | 1565 | pending = mci_readl(host, MINTSTS); /* read-only mask reg */ |
1557 | 1566 | ||
1558 | /* | 1567 | /* |
@@ -1570,7 +1579,7 @@ static irqreturn_t dw_mci_interrupt(int irq, void *dev_id) | |||
1570 | 1579 | ||
1571 | if (pending & DW_MCI_CMD_ERROR_FLAGS) { | 1580 | if (pending & DW_MCI_CMD_ERROR_FLAGS) { |
1572 | mci_writel(host, RINTSTS, DW_MCI_CMD_ERROR_FLAGS); | 1581 | mci_writel(host, RINTSTS, DW_MCI_CMD_ERROR_FLAGS); |
1573 | host->cmd_status = status; | 1582 | host->cmd_status = pending; |
1574 | smp_wmb(); | 1583 | smp_wmb(); |
1575 | set_bit(EVENT_CMD_COMPLETE, &host->pending_events); | 1584 | set_bit(EVENT_CMD_COMPLETE, &host->pending_events); |
1576 | } | 1585 | } |
@@ -1578,18 +1587,16 @@ static irqreturn_t dw_mci_interrupt(int irq, void *dev_id) | |||
1578 | if (pending & DW_MCI_DATA_ERROR_FLAGS) { | 1587 | if (pending & DW_MCI_DATA_ERROR_FLAGS) { |
1579 | /* if there is an error report DATA_ERROR */ | 1588 | /* if there is an error report DATA_ERROR */ |
1580 | mci_writel(host, RINTSTS, DW_MCI_DATA_ERROR_FLAGS); | 1589 | mci_writel(host, RINTSTS, DW_MCI_DATA_ERROR_FLAGS); |
1581 | host->data_status = status; | 1590 | host->data_status = pending; |
1582 | smp_wmb(); | 1591 | smp_wmb(); |
1583 | set_bit(EVENT_DATA_ERROR, &host->pending_events); | 1592 | set_bit(EVENT_DATA_ERROR, &host->pending_events); |
1584 | if (!(pending & (SDMMC_INT_DTO | SDMMC_INT_DCRC | | 1593 | tasklet_schedule(&host->tasklet); |
1585 | SDMMC_INT_SBE | SDMMC_INT_EBE))) | ||
1586 | tasklet_schedule(&host->tasklet); | ||
1587 | } | 1594 | } |
1588 | 1595 | ||
1589 | if (pending & SDMMC_INT_DATA_OVER) { | 1596 | if (pending & SDMMC_INT_DATA_OVER) { |
1590 | mci_writel(host, RINTSTS, SDMMC_INT_DATA_OVER); | 1597 | mci_writel(host, RINTSTS, SDMMC_INT_DATA_OVER); |
1591 | if (!host->data_status) | 1598 | if (!host->data_status) |
1592 | host->data_status = status; | 1599 | host->data_status = pending; |
1593 | smp_wmb(); | 1600 | smp_wmb(); |
1594 | if (host->dir_status == DW_MCI_RECV_STATUS) { | 1601 | if (host->dir_status == DW_MCI_RECV_STATUS) { |
1595 | if (host->sg != NULL) | 1602 | if (host->sg != NULL) |
@@ -1613,7 +1620,7 @@ static irqreturn_t dw_mci_interrupt(int irq, void *dev_id) | |||
1613 | 1620 | ||
1614 | if (pending & SDMMC_INT_CMD_DONE) { | 1621 | if (pending & SDMMC_INT_CMD_DONE) { |
1615 | mci_writel(host, RINTSTS, SDMMC_INT_CMD_DONE); | 1622 | mci_writel(host, RINTSTS, SDMMC_INT_CMD_DONE); |
1616 | dw_mci_cmd_interrupt(host, status); | 1623 | dw_mci_cmd_interrupt(host, pending); |
1617 | } | 1624 | } |
1618 | 1625 | ||
1619 | if (pending & SDMMC_INT_CD) { | 1626 | if (pending & SDMMC_INT_CD) { |
diff --git a/drivers/mmc/host/mxs-mmc.c b/drivers/mmc/host/mxs-mmc.c index a51f9309ffbb..ad3fcea1269e 100644 --- a/drivers/mmc/host/mxs-mmc.c +++ b/drivers/mmc/host/mxs-mmc.c | |||
@@ -285,11 +285,11 @@ static irqreturn_t mxs_mmc_irq_handler(int irq, void *dev_id) | |||
285 | writel(stat & MXS_MMC_IRQ_BITS, | 285 | writel(stat & MXS_MMC_IRQ_BITS, |
286 | host->base + HW_SSP_CTRL1(host) + STMP_OFFSET_REG_CLR); | 286 | host->base + HW_SSP_CTRL1(host) + STMP_OFFSET_REG_CLR); |
287 | 287 | ||
288 | spin_unlock(&host->lock); | ||
289 | |||
288 | if ((stat & BM_SSP_CTRL1_SDIO_IRQ) && (stat & BM_SSP_CTRL1_SDIO_IRQ_EN)) | 290 | if ((stat & BM_SSP_CTRL1_SDIO_IRQ) && (stat & BM_SSP_CTRL1_SDIO_IRQ_EN)) |
289 | mmc_signal_sdio_irq(host->mmc); | 291 | mmc_signal_sdio_irq(host->mmc); |
290 | 292 | ||
291 | spin_unlock(&host->lock); | ||
292 | |||
293 | if (stat & BM_SSP_CTRL1_RESP_TIMEOUT_IRQ) | 293 | if (stat & BM_SSP_CTRL1_RESP_TIMEOUT_IRQ) |
294 | cmd->error = -ETIMEDOUT; | 294 | cmd->error = -ETIMEDOUT; |
295 | else if (stat & BM_SSP_CTRL1_RESP_ERR_IRQ) | 295 | else if (stat & BM_SSP_CTRL1_RESP_ERR_IRQ) |
@@ -644,11 +644,6 @@ static void mxs_mmc_enable_sdio_irq(struct mmc_host *mmc, int enable) | |||
644 | host->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_SET); | 644 | host->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_SET); |
645 | writel(BM_SSP_CTRL1_SDIO_IRQ_EN, | 645 | writel(BM_SSP_CTRL1_SDIO_IRQ_EN, |
646 | host->base + HW_SSP_CTRL1(host) + STMP_OFFSET_REG_SET); | 646 | host->base + HW_SSP_CTRL1(host) + STMP_OFFSET_REG_SET); |
647 | |||
648 | if (readl(host->base + HW_SSP_STATUS(host)) & | ||
649 | BM_SSP_STATUS_SDIO_IRQ) | ||
650 | mmc_signal_sdio_irq(host->mmc); | ||
651 | |||
652 | } else { | 647 | } else { |
653 | writel(BM_SSP_CTRL0_SDIO_IRQ_CHECK, | 648 | writel(BM_SSP_CTRL0_SDIO_IRQ_CHECK, |
654 | host->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_CLR); | 649 | host->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_CLR); |
@@ -657,6 +652,11 @@ static void mxs_mmc_enable_sdio_irq(struct mmc_host *mmc, int enable) | |||
657 | } | 652 | } |
658 | 653 | ||
659 | spin_unlock_irqrestore(&host->lock, flags); | 654 | spin_unlock_irqrestore(&host->lock, flags); |
655 | |||
656 | if (enable && readl(host->base + HW_SSP_STATUS(host)) & | ||
657 | BM_SSP_STATUS_SDIO_IRQ) | ||
658 | mmc_signal_sdio_irq(host->mmc); | ||
659 | |||
660 | } | 660 | } |
661 | 661 | ||
662 | static const struct mmc_host_ops mxs_mmc_ops = { | 662 | static const struct mmc_host_ops mxs_mmc_ops = { |
diff --git a/drivers/mmc/host/omap.c b/drivers/mmc/host/omap.c index 50e08f03aa65..a5999a74496a 100644 --- a/drivers/mmc/host/omap.c +++ b/drivers/mmc/host/omap.c | |||
@@ -668,7 +668,7 @@ mmc_omap_clk_timer(unsigned long data) | |||
668 | static void | 668 | static void |
669 | mmc_omap_xfer_data(struct mmc_omap_host *host, int write) | 669 | mmc_omap_xfer_data(struct mmc_omap_host *host, int write) |
670 | { | 670 | { |
671 | int n; | 671 | int n, nwords; |
672 | 672 | ||
673 | if (host->buffer_bytes_left == 0) { | 673 | if (host->buffer_bytes_left == 0) { |
674 | host->sg_idx++; | 674 | host->sg_idx++; |
@@ -678,15 +678,23 @@ mmc_omap_xfer_data(struct mmc_omap_host *host, int write) | |||
678 | n = 64; | 678 | n = 64; |
679 | if (n > host->buffer_bytes_left) | 679 | if (n > host->buffer_bytes_left) |
680 | n = host->buffer_bytes_left; | 680 | n = host->buffer_bytes_left; |
681 | |||
682 | nwords = n / 2; | ||
683 | nwords += n & 1; /* handle odd number of bytes to transfer */ | ||
684 | |||
681 | host->buffer_bytes_left -= n; | 685 | host->buffer_bytes_left -= n; |
682 | host->total_bytes_left -= n; | 686 | host->total_bytes_left -= n; |
683 | host->data->bytes_xfered += n; | 687 | host->data->bytes_xfered += n; |
684 | 688 | ||
685 | if (write) { | 689 | if (write) { |
686 | __raw_writesw(host->virt_base + OMAP_MMC_REG(host, DATA), host->buffer, n); | 690 | __raw_writesw(host->virt_base + OMAP_MMC_REG(host, DATA), |
691 | host->buffer, nwords); | ||
687 | } else { | 692 | } else { |
688 | __raw_readsw(host->virt_base + OMAP_MMC_REG(host, DATA), host->buffer, n); | 693 | __raw_readsw(host->virt_base + OMAP_MMC_REG(host, DATA), |
694 | host->buffer, nwords); | ||
689 | } | 695 | } |
696 | |||
697 | host->buffer += nwords; | ||
690 | } | 698 | } |
691 | 699 | ||
692 | static inline void mmc_omap_report_irq(u16 status) | 700 | static inline void mmc_omap_report_irq(u16 status) |
diff --git a/drivers/mmc/host/sdhci-esdhc.h b/drivers/mmc/host/sdhci-esdhc.h index b97b2f5dafdb..d25f9ab9a54d 100644 --- a/drivers/mmc/host/sdhci-esdhc.h +++ b/drivers/mmc/host/sdhci-esdhc.h | |||
@@ -48,14 +48,14 @@ static inline void esdhc_set_clock(struct sdhci_host *host, unsigned int clock) | |||
48 | int div = 1; | 48 | int div = 1; |
49 | u32 temp; | 49 | u32 temp; |
50 | 50 | ||
51 | if (clock == 0) | ||
52 | goto out; | ||
53 | |||
51 | temp = sdhci_readl(host, ESDHC_SYSTEM_CONTROL); | 54 | temp = sdhci_readl(host, ESDHC_SYSTEM_CONTROL); |
52 | temp &= ~(ESDHC_CLOCK_IPGEN | ESDHC_CLOCK_HCKEN | ESDHC_CLOCK_PEREN | 55 | temp &= ~(ESDHC_CLOCK_IPGEN | ESDHC_CLOCK_HCKEN | ESDHC_CLOCK_PEREN |
53 | | ESDHC_CLOCK_MASK); | 56 | | ESDHC_CLOCK_MASK); |
54 | sdhci_writel(host, temp, ESDHC_SYSTEM_CONTROL); | 57 | sdhci_writel(host, temp, ESDHC_SYSTEM_CONTROL); |
55 | 58 | ||
56 | if (clock == 0) | ||
57 | goto out; | ||
58 | |||
59 | while (host->max_clk / pre_div / 16 > clock && pre_div < 256) | 59 | while (host->max_clk / pre_div / 16 > clock && pre_div < 256) |
60 | pre_div *= 2; | 60 | pre_div *= 2; |
61 | 61 | ||
diff --git a/drivers/mtd/ubi/vtbl.c b/drivers/mtd/ubi/vtbl.c index 437bc193e170..568307cc7caf 100644 --- a/drivers/mtd/ubi/vtbl.c +++ b/drivers/mtd/ubi/vtbl.c | |||
@@ -340,7 +340,7 @@ retry: | |||
340 | * of this LEB as it will be deleted and freed in 'ubi_add_to_av()'. | 340 | * of this LEB as it will be deleted and freed in 'ubi_add_to_av()'. |
341 | */ | 341 | */ |
342 | err = ubi_add_to_av(ubi, ai, new_aeb->pnum, new_aeb->ec, vid_hdr, 0); | 342 | err = ubi_add_to_av(ubi, ai, new_aeb->pnum, new_aeb->ec, vid_hdr, 0); |
343 | kfree(new_aeb); | 343 | kmem_cache_free(ai->aeb_slab_cache, new_aeb); |
344 | ubi_free_vid_hdr(ubi, vid_hdr); | 344 | ubi_free_vid_hdr(ubi, vid_hdr); |
345 | return err; | 345 | return err; |
346 | 346 | ||
@@ -353,7 +353,7 @@ write_error: | |||
353 | list_add(&new_aeb->u.list, &ai->erase); | 353 | list_add(&new_aeb->u.list, &ai->erase); |
354 | goto retry; | 354 | goto retry; |
355 | } | 355 | } |
356 | kfree(new_aeb); | 356 | kmem_cache_free(ai->aeb_slab_cache, new_aeb); |
357 | out_free: | 357 | out_free: |
358 | ubi_free_vid_hdr(ubi, vid_hdr); | 358 | ubi_free_vid_hdr(ubi, vid_hdr); |
359 | return err; | 359 | return err; |
diff --git a/drivers/net/can/sja1000/sja1000_platform.c b/drivers/net/can/sja1000/sja1000_platform.c index 4f50145f6483..662c5f7eb0c5 100644 --- a/drivers/net/can/sja1000/sja1000_platform.c +++ b/drivers/net/can/sja1000/sja1000_platform.c | |||
@@ -109,7 +109,9 @@ static int sp_probe(struct platform_device *pdev) | |||
109 | priv = netdev_priv(dev); | 109 | priv = netdev_priv(dev); |
110 | 110 | ||
111 | dev->irq = res_irq->start; | 111 | dev->irq = res_irq->start; |
112 | priv->irq_flags = res_irq->flags & (IRQF_TRIGGER_MASK | IRQF_SHARED); | 112 | priv->irq_flags = res_irq->flags & IRQF_TRIGGER_MASK; |
113 | if (res_irq->flags & IORESOURCE_IRQ_SHAREABLE) | ||
114 | priv->irq_flags |= IRQF_SHARED; | ||
113 | priv->reg_base = addr; | 115 | priv->reg_base = addr; |
114 | /* The CAN clock frequency is half the oscillator clock frequency */ | 116 | /* The CAN clock frequency is half the oscillator clock frequency */ |
115 | priv->can.clock.freq = pdata->osc_freq / 2; | 117 | priv->can.clock.freq = pdata->osc_freq / 2; |
diff --git a/drivers/net/can/softing/softing_fw.c b/drivers/net/can/softing/softing_fw.c index 310596175676..b595d3422b9f 100644 --- a/drivers/net/can/softing/softing_fw.c +++ b/drivers/net/can/softing/softing_fw.c | |||
@@ -150,7 +150,7 @@ int softing_load_fw(const char *file, struct softing *card, | |||
150 | const uint8_t *mem, *end, *dat; | 150 | const uint8_t *mem, *end, *dat; |
151 | uint16_t type, len; | 151 | uint16_t type, len; |
152 | uint32_t addr; | 152 | uint32_t addr; |
153 | uint8_t *buf = NULL; | 153 | uint8_t *buf = NULL, *new_buf; |
154 | int buflen = 0; | 154 | int buflen = 0; |
155 | int8_t type_end = 0; | 155 | int8_t type_end = 0; |
156 | 156 | ||
@@ -199,11 +199,12 @@ int softing_load_fw(const char *file, struct softing *card, | |||
199 | if (len > buflen) { | 199 | if (len > buflen) { |
200 | /* align buflen */ | 200 | /* align buflen */ |
201 | buflen = (len + (1024-1)) & ~(1024-1); | 201 | buflen = (len + (1024-1)) & ~(1024-1); |
202 | buf = krealloc(buf, buflen, GFP_KERNEL); | 202 | new_buf = krealloc(buf, buflen, GFP_KERNEL); |
203 | if (!buf) { | 203 | if (!new_buf) { |
204 | ret = -ENOMEM; | 204 | ret = -ENOMEM; |
205 | goto failed; | 205 | goto failed; |
206 | } | 206 | } |
207 | buf = new_buf; | ||
207 | } | 208 | } |
208 | /* verify record data */ | 209 | /* verify record data */ |
209 | memcpy_fromio(buf, &dpram[addr + offset], len); | 210 | memcpy_fromio(buf, &dpram[addr + offset], len); |
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h index 463b9ec57d80..6d1a24acb77e 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h | |||
@@ -1708,9 +1708,6 @@ struct bnx2x_func_init_params { | |||
1708 | continue; \ | 1708 | continue; \ |
1709 | else | 1709 | else |
1710 | 1710 | ||
1711 | #define for_each_napi_rx_queue(bp, var) \ | ||
1712 | for ((var) = 0; (var) < bp->num_napi_queues; (var)++) | ||
1713 | |||
1714 | /* Skip OOO FP */ | 1711 | /* Skip OOO FP */ |
1715 | #define for_each_tx_queue(bp, var) \ | 1712 | #define for_each_tx_queue(bp, var) \ |
1716 | for ((var) = 0; (var) < BNX2X_NUM_QUEUES(bp); (var)++) \ | 1713 | for ((var) = 0; (var) < BNX2X_NUM_QUEUES(bp); (var)++) \ |
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c index e879e19eb0d6..af20c6ee2cd9 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c | |||
@@ -2046,6 +2046,8 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode) | |||
2046 | */ | 2046 | */ |
2047 | bnx2x_setup_tc(bp->dev, bp->max_cos); | 2047 | bnx2x_setup_tc(bp->dev, bp->max_cos); |
2048 | 2048 | ||
2049 | /* Add all NAPI objects */ | ||
2050 | bnx2x_add_all_napi(bp); | ||
2049 | bnx2x_napi_enable(bp); | 2051 | bnx2x_napi_enable(bp); |
2050 | 2052 | ||
2051 | /* set pf load just before approaching the MCP */ | 2053 | /* set pf load just before approaching the MCP */ |
@@ -2408,6 +2410,8 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode) | |||
2408 | 2410 | ||
2409 | /* Disable HW interrupts, NAPI */ | 2411 | /* Disable HW interrupts, NAPI */ |
2410 | bnx2x_netif_stop(bp, 1); | 2412 | bnx2x_netif_stop(bp, 1); |
2413 | /* Delete all NAPI objects */ | ||
2414 | bnx2x_del_all_napi(bp); | ||
2411 | 2415 | ||
2412 | /* Release IRQs */ | 2416 | /* Release IRQs */ |
2413 | bnx2x_free_irq(bp); | 2417 | bnx2x_free_irq(bp); |
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h index dfa757e74296..21b553229ea4 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h | |||
@@ -792,7 +792,7 @@ static inline void bnx2x_add_all_napi(struct bnx2x *bp) | |||
792 | bp->num_napi_queues = bp->num_queues; | 792 | bp->num_napi_queues = bp->num_queues; |
793 | 793 | ||
794 | /* Add NAPI objects */ | 794 | /* Add NAPI objects */ |
795 | for_each_napi_rx_queue(bp, i) | 795 | for_each_rx_queue(bp, i) |
796 | netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi), | 796 | netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi), |
797 | bnx2x_poll, BNX2X_NAPI_WEIGHT); | 797 | bnx2x_poll, BNX2X_NAPI_WEIGHT); |
798 | } | 798 | } |
@@ -801,7 +801,7 @@ static inline void bnx2x_del_all_napi(struct bnx2x *bp) | |||
801 | { | 801 | { |
802 | int i; | 802 | int i; |
803 | 803 | ||
804 | for_each_napi_rx_queue(bp, i) | 804 | for_each_rx_queue(bp, i) |
805 | netif_napi_del(&bnx2x_fp(bp, i, napi)); | 805 | netif_napi_del(&bnx2x_fp(bp, i, napi)); |
806 | } | 806 | } |
807 | 807 | ||
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c index fc4e0e3885b0..c37a68d68090 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c | |||
@@ -2888,11 +2888,9 @@ static void bnx2x_get_channels(struct net_device *dev, | |||
2888 | */ | 2888 | */ |
2889 | static void bnx2x_change_num_queues(struct bnx2x *bp, int num_rss) | 2889 | static void bnx2x_change_num_queues(struct bnx2x *bp, int num_rss) |
2890 | { | 2890 | { |
2891 | bnx2x_del_all_napi(bp); | ||
2892 | bnx2x_disable_msi(bp); | 2891 | bnx2x_disable_msi(bp); |
2893 | BNX2X_NUM_QUEUES(bp) = num_rss + NON_ETH_CONTEXT_USE; | 2892 | BNX2X_NUM_QUEUES(bp) = num_rss + NON_ETH_CONTEXT_USE; |
2894 | bnx2x_set_int_mode(bp); | 2893 | bnx2x_set_int_mode(bp); |
2895 | bnx2x_add_all_napi(bp); | ||
2896 | } | 2894 | } |
2897 | 2895 | ||
2898 | /** | 2896 | /** |
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index 02b5a343b195..21054987257a 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c | |||
@@ -8427,6 +8427,8 @@ unload_error: | |||
8427 | 8427 | ||
8428 | /* Disable HW interrupts, NAPI */ | 8428 | /* Disable HW interrupts, NAPI */ |
8429 | bnx2x_netif_stop(bp, 1); | 8429 | bnx2x_netif_stop(bp, 1); |
8430 | /* Delete all NAPI objects */ | ||
8431 | bnx2x_del_all_napi(bp); | ||
8430 | 8432 | ||
8431 | /* Release IRQs */ | 8433 | /* Release IRQs */ |
8432 | bnx2x_free_irq(bp); | 8434 | bnx2x_free_irq(bp); |
@@ -11229,10 +11231,12 @@ static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) | |||
11229 | static void poll_bnx2x(struct net_device *dev) | 11231 | static void poll_bnx2x(struct net_device *dev) |
11230 | { | 11232 | { |
11231 | struct bnx2x *bp = netdev_priv(dev); | 11233 | struct bnx2x *bp = netdev_priv(dev); |
11234 | int i; | ||
11232 | 11235 | ||
11233 | disable_irq(bp->pdev->irq); | 11236 | for_each_eth_queue(bp, i) { |
11234 | bnx2x_interrupt(bp->pdev->irq, dev); | 11237 | struct bnx2x_fastpath *fp = &bp->fp[i]; |
11235 | enable_irq(bp->pdev->irq); | 11238 | napi_schedule(&bnx2x_fp(bp, fp->index, napi)); |
11239 | } | ||
11236 | } | 11240 | } |
11237 | #endif | 11241 | #endif |
11238 | 11242 | ||
@@ -11899,9 +11903,6 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev, | |||
11899 | */ | 11903 | */ |
11900 | bnx2x_set_int_mode(bp); | 11904 | bnx2x_set_int_mode(bp); |
11901 | 11905 | ||
11902 | /* Add all NAPI objects */ | ||
11903 | bnx2x_add_all_napi(bp); | ||
11904 | |||
11905 | rc = register_netdev(dev); | 11906 | rc = register_netdev(dev); |
11906 | if (rc) { | 11907 | if (rc) { |
11907 | dev_err(&pdev->dev, "Cannot register net device\n"); | 11908 | dev_err(&pdev->dev, "Cannot register net device\n"); |
@@ -11976,9 +11977,6 @@ static void __devexit bnx2x_remove_one(struct pci_dev *pdev) | |||
11976 | 11977 | ||
11977 | unregister_netdev(dev); | 11978 | unregister_netdev(dev); |
11978 | 11979 | ||
11979 | /* Delete all NAPI objects */ | ||
11980 | bnx2x_del_all_napi(bp); | ||
11981 | |||
11982 | /* Power on: we can't let PCI layer write to us while we are in D3 */ | 11980 | /* Power on: we can't let PCI layer write to us while we are in D3 */ |
11983 | bnx2x_set_power_state(bp, PCI_D0); | 11981 | bnx2x_set_power_state(bp, PCI_D0); |
11984 | 11982 | ||
@@ -12025,6 +12023,8 @@ static int bnx2x_eeh_nic_unload(struct bnx2x *bp) | |||
12025 | bnx2x_tx_disable(bp); | 12023 | bnx2x_tx_disable(bp); |
12026 | 12024 | ||
12027 | bnx2x_netif_stop(bp, 0); | 12025 | bnx2x_netif_stop(bp, 0); |
12026 | /* Delete all NAPI objects */ | ||
12027 | bnx2x_del_all_napi(bp); | ||
12028 | 12028 | ||
12029 | del_timer_sync(&bp->timer); | 12029 | del_timer_sync(&bp->timer); |
12030 | 12030 | ||
diff --git a/drivers/net/ethernet/cirrus/cs89x0.c b/drivers/net/ethernet/cirrus/cs89x0.c index 845b2020f291..138446957786 100644 --- a/drivers/net/ethernet/cirrus/cs89x0.c +++ b/drivers/net/ethernet/cirrus/cs89x0.c | |||
@@ -1243,6 +1243,7 @@ static void set_multicast_list(struct net_device *dev) | |||
1243 | { | 1243 | { |
1244 | struct net_local *lp = netdev_priv(dev); | 1244 | struct net_local *lp = netdev_priv(dev); |
1245 | unsigned long flags; | 1245 | unsigned long flags; |
1246 | u16 cfg; | ||
1246 | 1247 | ||
1247 | spin_lock_irqsave(&lp->lock, flags); | 1248 | spin_lock_irqsave(&lp->lock, flags); |
1248 | if (dev->flags & IFF_PROMISC) | 1249 | if (dev->flags & IFF_PROMISC) |
@@ -1260,11 +1261,10 @@ static void set_multicast_list(struct net_device *dev) | |||
1260 | /* in promiscuous mode, we accept errored packets, | 1261 | /* in promiscuous mode, we accept errored packets, |
1261 | * so we have to enable interrupts on them also | 1262 | * so we have to enable interrupts on them also |
1262 | */ | 1263 | */ |
1263 | writereg(dev, PP_RxCFG, | 1264 | cfg = lp->curr_rx_cfg; |
1264 | (lp->curr_rx_cfg | | 1265 | if (lp->rx_mode == RX_ALL_ACCEPT) |
1265 | (lp->rx_mode == RX_ALL_ACCEPT) | 1266 | cfg |= RX_CRC_ERROR_ENBL | RX_RUNT_ENBL | RX_EXTRA_DATA_ENBL; |
1266 | ? (RX_CRC_ERROR_ENBL | RX_RUNT_ENBL | RX_EXTRA_DATA_ENBL) | 1267 | writereg(dev, PP_RxCFG, cfg); |
1267 | : 0)); | ||
1268 | spin_unlock_irqrestore(&lp->lock, flags); | 1268 | spin_unlock_irqrestore(&lp->lock, flags); |
1269 | } | 1269 | } |
1270 | 1270 | ||
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c index 7fac97b4bb59..8c63d06ab12b 100644 --- a/drivers/net/ethernet/emulex/benet/be_cmds.c +++ b/drivers/net/ethernet/emulex/benet/be_cmds.c | |||
@@ -259,7 +259,7 @@ int be_process_mcc(struct be_adapter *adapter) | |||
259 | int num = 0, status = 0; | 259 | int num = 0, status = 0; |
260 | struct be_mcc_obj *mcc_obj = &adapter->mcc_obj; | 260 | struct be_mcc_obj *mcc_obj = &adapter->mcc_obj; |
261 | 261 | ||
262 | spin_lock_bh(&adapter->mcc_cq_lock); | 262 | spin_lock(&adapter->mcc_cq_lock); |
263 | while ((compl = be_mcc_compl_get(adapter))) { | 263 | while ((compl = be_mcc_compl_get(adapter))) { |
264 | if (compl->flags & CQE_FLAGS_ASYNC_MASK) { | 264 | if (compl->flags & CQE_FLAGS_ASYNC_MASK) { |
265 | /* Interpret flags as an async trailer */ | 265 | /* Interpret flags as an async trailer */ |
@@ -280,7 +280,7 @@ int be_process_mcc(struct be_adapter *adapter) | |||
280 | if (num) | 280 | if (num) |
281 | be_cq_notify(adapter, mcc_obj->cq.id, mcc_obj->rearm_cq, num); | 281 | be_cq_notify(adapter, mcc_obj->cq.id, mcc_obj->rearm_cq, num); |
282 | 282 | ||
283 | spin_unlock_bh(&adapter->mcc_cq_lock); | 283 | spin_unlock(&adapter->mcc_cq_lock); |
284 | return status; | 284 | return status; |
285 | } | 285 | } |
286 | 286 | ||
@@ -295,7 +295,9 @@ static int be_mcc_wait_compl(struct be_adapter *adapter) | |||
295 | if (be_error(adapter)) | 295 | if (be_error(adapter)) |
296 | return -EIO; | 296 | return -EIO; |
297 | 297 | ||
298 | local_bh_disable(); | ||
298 | status = be_process_mcc(adapter); | 299 | status = be_process_mcc(adapter); |
300 | local_bh_enable(); | ||
299 | 301 | ||
300 | if (atomic_read(&mcc_obj->q.used) == 0) | 302 | if (atomic_read(&mcc_obj->q.used) == 0) |
301 | break; | 303 | break; |
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index 90a903d83d87..78b8aa8069f0 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c | |||
@@ -3763,7 +3763,9 @@ static void be_worker(struct work_struct *work) | |||
3763 | /* when interrupts are not yet enabled, just reap any pending | 3763 | /* when interrupts are not yet enabled, just reap any pending |
3764 | * mcc completions */ | 3764 | * mcc completions */ |
3765 | if (!netif_running(adapter->netdev)) { | 3765 | if (!netif_running(adapter->netdev)) { |
3766 | local_bh_disable(); | ||
3766 | be_process_mcc(adapter); | 3767 | be_process_mcc(adapter); |
3768 | local_bh_enable(); | ||
3767 | goto reschedule; | 3769 | goto reschedule; |
3768 | } | 3770 | } |
3769 | 3771 | ||
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c index 4605f7246687..d3233f59a82e 100644 --- a/drivers/net/ethernet/freescale/gianfar.c +++ b/drivers/net/ethernet/freescale/gianfar.c | |||
@@ -1041,7 +1041,7 @@ static int gfar_probe(struct platform_device *ofdev) | |||
1041 | 1041 | ||
1042 | if (priv->device_flags & FSL_GIANFAR_DEV_HAS_VLAN) { | 1042 | if (priv->device_flags & FSL_GIANFAR_DEV_HAS_VLAN) { |
1043 | dev->hw_features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; | 1043 | dev->hw_features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; |
1044 | dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; | 1044 | dev->features |= NETIF_F_HW_VLAN_RX; |
1045 | } | 1045 | } |
1046 | 1046 | ||
1047 | if (priv->device_flags & FSL_GIANFAR_DEV_HAS_EXTENDED_HASH) { | 1047 | if (priv->device_flags & FSL_GIANFAR_DEV_HAS_EXTENDED_HASH) { |
diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h index cd153326c3cf..cb3356c9af80 100644 --- a/drivers/net/ethernet/intel/e1000e/e1000.h +++ b/drivers/net/ethernet/intel/e1000e/e1000.h | |||
@@ -310,6 +310,7 @@ struct e1000_adapter { | |||
310 | */ | 310 | */ |
311 | struct e1000_ring *tx_ring /* One per active queue */ | 311 | struct e1000_ring *tx_ring /* One per active queue */ |
312 | ____cacheline_aligned_in_smp; | 312 | ____cacheline_aligned_in_smp; |
313 | u32 tx_fifo_limit; | ||
313 | 314 | ||
314 | struct napi_struct napi; | 315 | struct napi_struct napi; |
315 | 316 | ||
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index 46c3b1f9ff89..d01a099475a1 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c | |||
@@ -3517,6 +3517,15 @@ void e1000e_reset(struct e1000_adapter *adapter) | |||
3517 | } | 3517 | } |
3518 | 3518 | ||
3519 | /* | 3519 | /* |
3520 | * Alignment of Tx data is on an arbitrary byte boundary with the | ||
3521 | * maximum size per Tx descriptor limited only to the transmit | ||
3522 | * allocation of the packet buffer minus 96 bytes with an upper | ||
3523 | * limit of 24KB due to receive synchronization limitations. | ||
3524 | */ | ||
3525 | adapter->tx_fifo_limit = min_t(u32, ((er32(PBA) >> 16) << 10) - 96, | ||
3526 | 24 << 10); | ||
3527 | |||
3528 | /* | ||
3520 | * Disable Adaptive Interrupt Moderation if 2 full packets cannot | 3529 | * Disable Adaptive Interrupt Moderation if 2 full packets cannot |
3521 | * fit in receive buffer. | 3530 | * fit in receive buffer. |
3522 | */ | 3531 | */ |
@@ -4785,12 +4794,9 @@ static bool e1000_tx_csum(struct e1000_ring *tx_ring, struct sk_buff *skb) | |||
4785 | return 1; | 4794 | return 1; |
4786 | } | 4795 | } |
4787 | 4796 | ||
4788 | #define E1000_MAX_PER_TXD 8192 | ||
4789 | #define E1000_MAX_TXD_PWR 12 | ||
4790 | |||
4791 | static int e1000_tx_map(struct e1000_ring *tx_ring, struct sk_buff *skb, | 4797 | static int e1000_tx_map(struct e1000_ring *tx_ring, struct sk_buff *skb, |
4792 | unsigned int first, unsigned int max_per_txd, | 4798 | unsigned int first, unsigned int max_per_txd, |
4793 | unsigned int nr_frags, unsigned int mss) | 4799 | unsigned int nr_frags) |
4794 | { | 4800 | { |
4795 | struct e1000_adapter *adapter = tx_ring->adapter; | 4801 | struct e1000_adapter *adapter = tx_ring->adapter; |
4796 | struct pci_dev *pdev = adapter->pdev; | 4802 | struct pci_dev *pdev = adapter->pdev; |
@@ -5023,20 +5029,19 @@ static int __e1000_maybe_stop_tx(struct e1000_ring *tx_ring, int size) | |||
5023 | 5029 | ||
5024 | static int e1000_maybe_stop_tx(struct e1000_ring *tx_ring, int size) | 5030 | static int e1000_maybe_stop_tx(struct e1000_ring *tx_ring, int size) |
5025 | { | 5031 | { |
5032 | BUG_ON(size > tx_ring->count); | ||
5033 | |||
5026 | if (e1000_desc_unused(tx_ring) >= size) | 5034 | if (e1000_desc_unused(tx_ring) >= size) |
5027 | return 0; | 5035 | return 0; |
5028 | return __e1000_maybe_stop_tx(tx_ring, size); | 5036 | return __e1000_maybe_stop_tx(tx_ring, size); |
5029 | } | 5037 | } |
5030 | 5038 | ||
5031 | #define TXD_USE_COUNT(S, X) (((S) >> (X)) + 1) | ||
5032 | static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb, | 5039 | static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb, |
5033 | struct net_device *netdev) | 5040 | struct net_device *netdev) |
5034 | { | 5041 | { |
5035 | struct e1000_adapter *adapter = netdev_priv(netdev); | 5042 | struct e1000_adapter *adapter = netdev_priv(netdev); |
5036 | struct e1000_ring *tx_ring = adapter->tx_ring; | 5043 | struct e1000_ring *tx_ring = adapter->tx_ring; |
5037 | unsigned int first; | 5044 | unsigned int first; |
5038 | unsigned int max_per_txd = E1000_MAX_PER_TXD; | ||
5039 | unsigned int max_txd_pwr = E1000_MAX_TXD_PWR; | ||
5040 | unsigned int tx_flags = 0; | 5045 | unsigned int tx_flags = 0; |
5041 | unsigned int len = skb_headlen(skb); | 5046 | unsigned int len = skb_headlen(skb); |
5042 | unsigned int nr_frags; | 5047 | unsigned int nr_frags; |
@@ -5056,18 +5061,8 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb, | |||
5056 | } | 5061 | } |
5057 | 5062 | ||
5058 | mss = skb_shinfo(skb)->gso_size; | 5063 | mss = skb_shinfo(skb)->gso_size; |
5059 | /* | ||
5060 | * The controller does a simple calculation to | ||
5061 | * make sure there is enough room in the FIFO before | ||
5062 | * initiating the DMA for each buffer. The calc is: | ||
5063 | * 4 = ceil(buffer len/mss). To make sure we don't | ||
5064 | * overrun the FIFO, adjust the max buffer len if mss | ||
5065 | * drops. | ||
5066 | */ | ||
5067 | if (mss) { | 5064 | if (mss) { |
5068 | u8 hdr_len; | 5065 | u8 hdr_len; |
5069 | max_per_txd = min(mss << 2, max_per_txd); | ||
5070 | max_txd_pwr = fls(max_per_txd) - 1; | ||
5071 | 5066 | ||
5072 | /* | 5067 | /* |
5073 | * TSO Workaround for 82571/2/3 Controllers -- if skb->data | 5068 | * TSO Workaround for 82571/2/3 Controllers -- if skb->data |
@@ -5097,12 +5092,12 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb, | |||
5097 | count++; | 5092 | count++; |
5098 | count++; | 5093 | count++; |
5099 | 5094 | ||
5100 | count += TXD_USE_COUNT(len, max_txd_pwr); | 5095 | count += DIV_ROUND_UP(len, adapter->tx_fifo_limit); |
5101 | 5096 | ||
5102 | nr_frags = skb_shinfo(skb)->nr_frags; | 5097 | nr_frags = skb_shinfo(skb)->nr_frags; |
5103 | for (f = 0; f < nr_frags; f++) | 5098 | for (f = 0; f < nr_frags; f++) |
5104 | count += TXD_USE_COUNT(skb_frag_size(&skb_shinfo(skb)->frags[f]), | 5099 | count += DIV_ROUND_UP(skb_frag_size(&skb_shinfo(skb)->frags[f]), |
5105 | max_txd_pwr); | 5100 | adapter->tx_fifo_limit); |
5106 | 5101 | ||
5107 | if (adapter->hw.mac.tx_pkt_filtering) | 5102 | if (adapter->hw.mac.tx_pkt_filtering) |
5108 | e1000_transfer_dhcp_info(adapter, skb); | 5103 | e1000_transfer_dhcp_info(adapter, skb); |
@@ -5144,15 +5139,18 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb, | |||
5144 | tx_flags |= E1000_TX_FLAGS_NO_FCS; | 5139 | tx_flags |= E1000_TX_FLAGS_NO_FCS; |
5145 | 5140 | ||
5146 | /* if count is 0 then mapping error has occurred */ | 5141 | /* if count is 0 then mapping error has occurred */ |
5147 | count = e1000_tx_map(tx_ring, skb, first, max_per_txd, nr_frags, mss); | 5142 | count = e1000_tx_map(tx_ring, skb, first, adapter->tx_fifo_limit, |
5143 | nr_frags); | ||
5148 | if (count) { | 5144 | if (count) { |
5149 | skb_tx_timestamp(skb); | 5145 | skb_tx_timestamp(skb); |
5150 | 5146 | ||
5151 | netdev_sent_queue(netdev, skb->len); | 5147 | netdev_sent_queue(netdev, skb->len); |
5152 | e1000_tx_queue(tx_ring, tx_flags, count); | 5148 | e1000_tx_queue(tx_ring, tx_flags, count); |
5153 | /* Make sure there is space in the ring for the next send. */ | 5149 | /* Make sure there is space in the ring for the next send. */ |
5154 | e1000_maybe_stop_tx(tx_ring, MAX_SKB_FRAGS + 2); | 5150 | e1000_maybe_stop_tx(tx_ring, |
5155 | 5151 | (MAX_SKB_FRAGS * | |
5152 | DIV_ROUND_UP(PAGE_SIZE, | ||
5153 | adapter->tx_fifo_limit) + 2)); | ||
5156 | } else { | 5154 | } else { |
5157 | dev_kfree_skb_any(skb); | 5155 | dev_kfree_skb_any(skb); |
5158 | tx_ring->buffer_info[first].time_stamp = 0; | 5156 | tx_ring->buffer_info[first].time_stamp = 0; |
@@ -6327,8 +6325,8 @@ static int __devinit e1000_probe(struct pci_dev *pdev, | |||
6327 | adapter->hw.phy.autoneg_advertised = 0x2f; | 6325 | adapter->hw.phy.autoneg_advertised = 0x2f; |
6328 | 6326 | ||
6329 | /* ring size defaults */ | 6327 | /* ring size defaults */ |
6330 | adapter->rx_ring->count = 256; | 6328 | adapter->rx_ring->count = E1000_DEFAULT_RXD; |
6331 | adapter->tx_ring->count = 256; | 6329 | adapter->tx_ring->count = E1000_DEFAULT_TXD; |
6332 | 6330 | ||
6333 | /* | 6331 | /* |
6334 | * Initial Wake on LAN setting - If APM wake is enabled in | 6332 | * Initial Wake on LAN setting - If APM wake is enabled in |
diff --git a/drivers/net/ethernet/sfc/ethtool.c b/drivers/net/ethernet/sfc/ethtool.c index 8cba2df82b18..5faedd855b77 100644 --- a/drivers/net/ethernet/sfc/ethtool.c +++ b/drivers/net/ethernet/sfc/ethtool.c | |||
@@ -863,8 +863,8 @@ static int efx_ethtool_get_class_rule(struct efx_nic *efx, | |||
863 | &ip_entry->ip4dst, &ip_entry->pdst); | 863 | &ip_entry->ip4dst, &ip_entry->pdst); |
864 | if (rc != 0) { | 864 | if (rc != 0) { |
865 | rc = efx_filter_get_ipv4_full( | 865 | rc = efx_filter_get_ipv4_full( |
866 | &spec, &proto, &ip_entry->ip4src, &ip_entry->psrc, | 866 | &spec, &proto, &ip_entry->ip4dst, &ip_entry->pdst, |
867 | &ip_entry->ip4dst, &ip_entry->pdst); | 867 | &ip_entry->ip4src, &ip_entry->psrc); |
868 | EFX_WARN_ON_PARANOID(rc); | 868 | EFX_WARN_ON_PARANOID(rc); |
869 | ip_mask->ip4src = ~0; | 869 | ip_mask->ip4src = ~0; |
870 | ip_mask->psrc = ~0; | 870 | ip_mask->psrc = ~0; |
diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h index e2d083228f3a..719be3912aa9 100644 --- a/drivers/net/ethernet/stmicro/stmmac/common.h +++ b/drivers/net/ethernet/stmicro/stmmac/common.h | |||
@@ -22,6 +22,9 @@ | |||
22 | Author: Giuseppe Cavallaro <peppe.cavallaro@st.com> | 22 | Author: Giuseppe Cavallaro <peppe.cavallaro@st.com> |
23 | *******************************************************************************/ | 23 | *******************************************************************************/ |
24 | 24 | ||
25 | #ifndef __COMMON_H__ | ||
26 | #define __COMMON_H__ | ||
27 | |||
25 | #include <linux/etherdevice.h> | 28 | #include <linux/etherdevice.h> |
26 | #include <linux/netdevice.h> | 29 | #include <linux/netdevice.h> |
27 | #include <linux/phy.h> | 30 | #include <linux/phy.h> |
@@ -366,3 +369,5 @@ extern void stmmac_set_mac(void __iomem *ioaddr, bool enable); | |||
366 | 369 | ||
367 | extern void dwmac_dma_flush_tx_fifo(void __iomem *ioaddr); | 370 | extern void dwmac_dma_flush_tx_fifo(void __iomem *ioaddr); |
368 | extern const struct stmmac_ring_mode_ops ring_mode_ops; | 371 | extern const struct stmmac_ring_mode_ops ring_mode_ops; |
372 | |||
373 | #endif /* __COMMON_H__ */ | ||
diff --git a/drivers/net/ethernet/stmicro/stmmac/descs.h b/drivers/net/ethernet/stmicro/stmmac/descs.h index 9820ec842cc0..223adf95fd03 100644 --- a/drivers/net/ethernet/stmicro/stmmac/descs.h +++ b/drivers/net/ethernet/stmicro/stmmac/descs.h | |||
@@ -20,6 +20,10 @@ | |||
20 | 20 | ||
21 | Author: Giuseppe Cavallaro <peppe.cavallaro@st.com> | 21 | Author: Giuseppe Cavallaro <peppe.cavallaro@st.com> |
22 | *******************************************************************************/ | 22 | *******************************************************************************/ |
23 | |||
24 | #ifndef __DESCS_H__ | ||
25 | #define __DESCS_H__ | ||
26 | |||
23 | struct dma_desc { | 27 | struct dma_desc { |
24 | /* Receive descriptor */ | 28 | /* Receive descriptor */ |
25 | union { | 29 | union { |
@@ -166,3 +170,5 @@ enum tdes_csum_insertion { | |||
166 | * is not calculated */ | 170 | * is not calculated */ |
167 | cic_full = 3, /* IP header and pseudoheader */ | 171 | cic_full = 3, /* IP header and pseudoheader */ |
168 | }; | 172 | }; |
173 | |||
174 | #endif /* __DESCS_H__ */ | ||
diff --git a/drivers/net/ethernet/stmicro/stmmac/descs_com.h b/drivers/net/ethernet/stmicro/stmmac/descs_com.h index dd8d6e19dff6..7ee9499a6e38 100644 --- a/drivers/net/ethernet/stmicro/stmmac/descs_com.h +++ b/drivers/net/ethernet/stmicro/stmmac/descs_com.h | |||
@@ -27,6 +27,9 @@ | |||
27 | Author: Giuseppe Cavallaro <peppe.cavallaro@st.com> | 27 | Author: Giuseppe Cavallaro <peppe.cavallaro@st.com> |
28 | *******************************************************************************/ | 28 | *******************************************************************************/ |
29 | 29 | ||
30 | #ifndef __DESC_COM_H__ | ||
31 | #define __DESC_COM_H__ | ||
32 | |||
30 | #if defined(CONFIG_STMMAC_RING) | 33 | #if defined(CONFIG_STMMAC_RING) |
31 | static inline void ehn_desc_rx_set_on_ring_chain(struct dma_desc *p, int end) | 34 | static inline void ehn_desc_rx_set_on_ring_chain(struct dma_desc *p, int end) |
32 | { | 35 | { |
@@ -124,3 +127,5 @@ static inline void norm_set_tx_desc_len(struct dma_desc *p, int len) | |||
124 | p->des01.tx.buffer1_size = len; | 127 | p->des01.tx.buffer1_size = len; |
125 | } | 128 | } |
126 | #endif | 129 | #endif |
130 | |||
131 | #endif /* __DESC_COM_H__ */ | ||
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac100.h b/drivers/net/ethernet/stmicro/stmmac/dwmac100.h index 7c6d857a9cc7..2ec6aeae349e 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac100.h +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac100.h | |||
@@ -22,6 +22,9 @@ | |||
22 | Author: Giuseppe Cavallaro <peppe.cavallaro@st.com> | 22 | Author: Giuseppe Cavallaro <peppe.cavallaro@st.com> |
23 | *******************************************************************************/ | 23 | *******************************************************************************/ |
24 | 24 | ||
25 | #ifndef __DWMAC100_H__ | ||
26 | #define __DWMAC100_H__ | ||
27 | |||
25 | #include <linux/phy.h> | 28 | #include <linux/phy.h> |
26 | #include "common.h" | 29 | #include "common.h" |
27 | 30 | ||
@@ -119,3 +122,5 @@ enum ttc_control { | |||
119 | #define DMA_MISSED_FRAME_M_CNTR 0x0000ffff /* Missed Frame Couinter */ | 122 | #define DMA_MISSED_FRAME_M_CNTR 0x0000ffff /* Missed Frame Couinter */ |
120 | 123 | ||
121 | extern const struct stmmac_dma_ops dwmac100_dma_ops; | 124 | extern const struct stmmac_dma_ops dwmac100_dma_ops; |
125 | |||
126 | #endif /* __DWMAC100_H__ */ | ||
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h b/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h index f90fcb5f9573..0e4cacedc1f0 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h | |||
@@ -19,6 +19,8 @@ | |||
19 | 19 | ||
20 | Author: Giuseppe Cavallaro <peppe.cavallaro@st.com> | 20 | Author: Giuseppe Cavallaro <peppe.cavallaro@st.com> |
21 | *******************************************************************************/ | 21 | *******************************************************************************/ |
22 | #ifndef __DWMAC1000_H__ | ||
23 | #define __DWMAC1000_H__ | ||
22 | 24 | ||
23 | #include <linux/phy.h> | 25 | #include <linux/phy.h> |
24 | #include "common.h" | 26 | #include "common.h" |
@@ -229,6 +231,7 @@ enum rtc_control { | |||
229 | #define GMAC_MMC_RX_CSUM_OFFLOAD 0x208 | 231 | #define GMAC_MMC_RX_CSUM_OFFLOAD 0x208 |
230 | 232 | ||
231 | /* Synopsys Core versions */ | 233 | /* Synopsys Core versions */ |
232 | #define DWMAC_CORE_3_40 34 | 234 | #define DWMAC_CORE_3_40 0x34 |
233 | 235 | ||
234 | extern const struct stmmac_dma_ops dwmac1000_dma_ops; | 236 | extern const struct stmmac_dma_ops dwmac1000_dma_ops; |
237 | #endif /* __DWMAC1000_H__ */ | ||
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h b/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h index e678ce39d014..e49c9a0fd6ff 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h | |||
@@ -22,6 +22,9 @@ | |||
22 | Author: Giuseppe Cavallaro <peppe.cavallaro@st.com> | 22 | Author: Giuseppe Cavallaro <peppe.cavallaro@st.com> |
23 | *******************************************************************************/ | 23 | *******************************************************************************/ |
24 | 24 | ||
25 | #ifndef __DWMAC_DMA_H__ | ||
26 | #define __DWMAC_DMA_H__ | ||
27 | |||
25 | /* DMA CRS Control and Status Register Mapping */ | 28 | /* DMA CRS Control and Status Register Mapping */ |
26 | #define DMA_BUS_MODE 0x00001000 /* Bus Mode */ | 29 | #define DMA_BUS_MODE 0x00001000 /* Bus Mode */ |
27 | #define DMA_XMT_POLL_DEMAND 0x00001004 /* Transmit Poll Demand */ | 30 | #define DMA_XMT_POLL_DEMAND 0x00001004 /* Transmit Poll Demand */ |
@@ -109,3 +112,5 @@ extern void dwmac_dma_start_rx(void __iomem *ioaddr); | |||
109 | extern void dwmac_dma_stop_rx(void __iomem *ioaddr); | 112 | extern void dwmac_dma_stop_rx(void __iomem *ioaddr); |
110 | extern int dwmac_dma_interrupt(void __iomem *ioaddr, | 113 | extern int dwmac_dma_interrupt(void __iomem *ioaddr, |
111 | struct stmmac_extra_stats *x); | 114 | struct stmmac_extra_stats *x); |
115 | |||
116 | #endif /* __DWMAC_DMA_H__ */ | ||
diff --git a/drivers/net/ethernet/stmicro/stmmac/mmc.h b/drivers/net/ethernet/stmicro/stmmac/mmc.h index a38352024cb8..67995ef25251 100644 --- a/drivers/net/ethernet/stmicro/stmmac/mmc.h +++ b/drivers/net/ethernet/stmicro/stmmac/mmc.h | |||
@@ -22,6 +22,9 @@ | |||
22 | Author: Giuseppe Cavallaro <peppe.cavallaro@st.com> | 22 | Author: Giuseppe Cavallaro <peppe.cavallaro@st.com> |
23 | *******************************************************************************/ | 23 | *******************************************************************************/ |
24 | 24 | ||
25 | #ifndef __MMC_H__ | ||
26 | #define __MMC_H__ | ||
27 | |||
25 | /* MMC control register */ | 28 | /* MMC control register */ |
26 | /* When set, all counter are reset */ | 29 | /* When set, all counter are reset */ |
27 | #define MMC_CNTRL_COUNTER_RESET 0x1 | 30 | #define MMC_CNTRL_COUNTER_RESET 0x1 |
@@ -129,3 +132,5 @@ struct stmmac_counters { | |||
129 | extern void dwmac_mmc_ctrl(void __iomem *ioaddr, unsigned int mode); | 132 | extern void dwmac_mmc_ctrl(void __iomem *ioaddr, unsigned int mode); |
130 | extern void dwmac_mmc_intr_all_mask(void __iomem *ioaddr); | 133 | extern void dwmac_mmc_intr_all_mask(void __iomem *ioaddr); |
131 | extern void dwmac_mmc_read(void __iomem *ioaddr, struct stmmac_counters *mmc); | 134 | extern void dwmac_mmc_read(void __iomem *ioaddr, struct stmmac_counters *mmc); |
135 | |||
136 | #endif /* __MMC_H__ */ | ||
diff --git a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c index c07cfe989f6e..0c74a702d461 100644 --- a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c +++ b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c | |||
@@ -33,7 +33,7 @@ | |||
33 | #define MMC_TX_INTR 0x00000108 /* MMC TX Interrupt */ | 33 | #define MMC_TX_INTR 0x00000108 /* MMC TX Interrupt */ |
34 | #define MMC_RX_INTR_MASK 0x0000010c /* MMC Interrupt Mask */ | 34 | #define MMC_RX_INTR_MASK 0x0000010c /* MMC Interrupt Mask */ |
35 | #define MMC_TX_INTR_MASK 0x00000110 /* MMC Interrupt Mask */ | 35 | #define MMC_TX_INTR_MASK 0x00000110 /* MMC Interrupt Mask */ |
36 | #define MMC_DEFAUL_MASK 0xffffffff | 36 | #define MMC_DEFAULT_MASK 0xffffffff |
37 | 37 | ||
38 | /* MMC TX counter registers */ | 38 | /* MMC TX counter registers */ |
39 | 39 | ||
@@ -147,8 +147,8 @@ void dwmac_mmc_ctrl(void __iomem *ioaddr, unsigned int mode) | |||
147 | /* To mask all all interrupts.*/ | 147 | /* To mask all all interrupts.*/ |
148 | void dwmac_mmc_intr_all_mask(void __iomem *ioaddr) | 148 | void dwmac_mmc_intr_all_mask(void __iomem *ioaddr) |
149 | { | 149 | { |
150 | writel(MMC_DEFAUL_MASK, ioaddr + MMC_RX_INTR_MASK); | 150 | writel(MMC_DEFAULT_MASK, ioaddr + MMC_RX_INTR_MASK); |
151 | writel(MMC_DEFAUL_MASK, ioaddr + MMC_TX_INTR_MASK); | 151 | writel(MMC_DEFAULT_MASK, ioaddr + MMC_TX_INTR_MASK); |
152 | } | 152 | } |
153 | 153 | ||
154 | /* This reads the MAC core counters (if actaully supported). | 154 | /* This reads the MAC core counters (if actaully supported). |
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h index f2d3665430ad..e872e1da3137 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h | |||
@@ -20,6 +20,9 @@ | |||
20 | Author: Giuseppe Cavallaro <peppe.cavallaro@st.com> | 20 | Author: Giuseppe Cavallaro <peppe.cavallaro@st.com> |
21 | *******************************************************************************/ | 21 | *******************************************************************************/ |
22 | 22 | ||
23 | #ifndef __STMMAC_H__ | ||
24 | #define __STMMAC_H__ | ||
25 | |||
23 | #define STMMAC_RESOURCE_NAME "stmmaceth" | 26 | #define STMMAC_RESOURCE_NAME "stmmaceth" |
24 | #define DRV_MODULE_VERSION "March_2012" | 27 | #define DRV_MODULE_VERSION "March_2012" |
25 | 28 | ||
@@ -166,3 +169,5 @@ static inline void stmmac_unregister_pci(void) | |||
166 | { | 169 | { |
167 | } | 170 | } |
168 | #endif /* CONFIG_STMMAC_PCI */ | 171 | #endif /* CONFIG_STMMAC_PCI */ |
172 | |||
173 | #endif /* __STMMAC_H__ */ | ||
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_timer.h b/drivers/net/ethernet/stmicro/stmmac/stmmac_timer.h index 6863590d184b..aea9b14cdfbe 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_timer.h +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_timer.h | |||
@@ -21,6 +21,8 @@ | |||
21 | 21 | ||
22 | Author: Giuseppe Cavallaro <peppe.cavallaro@st.com> | 22 | Author: Giuseppe Cavallaro <peppe.cavallaro@st.com> |
23 | *******************************************************************************/ | 23 | *******************************************************************************/ |
24 | #ifndef __STMMAC_TIMER_H__ | ||
25 | #define __STMMAC_TIMER_H__ | ||
24 | 26 | ||
25 | struct stmmac_timer { | 27 | struct stmmac_timer { |
26 | void (*timer_start) (unsigned int new_freq); | 28 | void (*timer_start) (unsigned int new_freq); |
@@ -40,3 +42,5 @@ void stmmac_schedule(struct net_device *dev); | |||
40 | extern int tmu2_register_user(void *fnt, void *data); | 42 | extern int tmu2_register_user(void *fnt, void *data); |
41 | extern void tmu2_unregister_user(void); | 43 | extern void tmu2_unregister_user(void); |
42 | #endif | 44 | #endif |
45 | |||
46 | #endif /* __STMMAC_TIMER_H__ */ | ||
diff --git a/drivers/net/ethernet/ti/davinci_mdio.c b/drivers/net/ethernet/ti/davinci_mdio.c index cd7ee204e94a..a9ca4a03d31b 100644 --- a/drivers/net/ethernet/ti/davinci_mdio.c +++ b/drivers/net/ethernet/ti/davinci_mdio.c | |||
@@ -394,8 +394,10 @@ static int __devexit davinci_mdio_remove(struct platform_device *pdev) | |||
394 | struct device *dev = &pdev->dev; | 394 | struct device *dev = &pdev->dev; |
395 | struct davinci_mdio_data *data = dev_get_drvdata(dev); | 395 | struct davinci_mdio_data *data = dev_get_drvdata(dev); |
396 | 396 | ||
397 | if (data->bus) | 397 | if (data->bus) { |
398 | mdiobus_unregister(data->bus); | ||
398 | mdiobus_free(data->bus); | 399 | mdiobus_free(data->bus); |
400 | } | ||
399 | 401 | ||
400 | if (data->clk) | 402 | if (data->clk) |
401 | clk_put(data->clk); | 403 | clk_put(data->clk); |
diff --git a/drivers/net/fddi/skfp/pmf.c b/drivers/net/fddi/skfp/pmf.c index 24d8566cfd8b..441b4dc79450 100644 --- a/drivers/net/fddi/skfp/pmf.c +++ b/drivers/net/fddi/skfp/pmf.c | |||
@@ -673,7 +673,7 @@ void smt_add_para(struct s_smc *smc, struct s_pcon *pcon, u_short para, | |||
673 | sm_pm_get_ls(smc,port_to_mib(smc,port))) ; | 673 | sm_pm_get_ls(smc,port_to_mib(smc,port))) ; |
674 | break ; | 674 | break ; |
675 | case SMT_P_REASON : | 675 | case SMT_P_REASON : |
676 | * (u_long *) to = 0 ; | 676 | *(u32 *)to = 0 ; |
677 | sp_len = 4 ; | 677 | sp_len = 4 ; |
678 | goto sp_done ; | 678 | goto sp_done ; |
679 | case SMT_P1033 : /* time stamp */ | 679 | case SMT_P1033 : /* time stamp */ |
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c index 328397c66730..adfab3fc5478 100644 --- a/drivers/net/usb/qmi_wwan.c +++ b/drivers/net/usb/qmi_wwan.c | |||
@@ -413,7 +413,9 @@ static const struct usb_device_id products[] = { | |||
413 | 413 | ||
414 | /* 5. Gobi 2000 and 3000 devices */ | 414 | /* 5. Gobi 2000 and 3000 devices */ |
415 | {QMI_GOBI_DEVICE(0x413c, 0x8186)}, /* Dell Gobi 2000 Modem device (N0218, VU936) */ | 415 | {QMI_GOBI_DEVICE(0x413c, 0x8186)}, /* Dell Gobi 2000 Modem device (N0218, VU936) */ |
416 | {QMI_GOBI_DEVICE(0x413c, 0x8194)}, /* Dell Gobi 3000 Composite */ | ||
416 | {QMI_GOBI_DEVICE(0x05c6, 0x920b)}, /* Generic Gobi 2000 Modem device */ | 417 | {QMI_GOBI_DEVICE(0x05c6, 0x920b)}, /* Generic Gobi 2000 Modem device */ |
418 | {QMI_GOBI_DEVICE(0x05c6, 0x920d)}, /* Gobi 3000 Composite */ | ||
417 | {QMI_GOBI_DEVICE(0x05c6, 0x9225)}, /* Sony Gobi 2000 Modem device (N0279, VU730) */ | 419 | {QMI_GOBI_DEVICE(0x05c6, 0x9225)}, /* Sony Gobi 2000 Modem device (N0279, VU730) */ |
418 | {QMI_GOBI_DEVICE(0x05c6, 0x9245)}, /* Samsung Gobi 2000 Modem device (VL176) */ | 420 | {QMI_GOBI_DEVICE(0x05c6, 0x9245)}, /* Samsung Gobi 2000 Modem device (VL176) */ |
419 | {QMI_GOBI_DEVICE(0x03f0, 0x251d)}, /* HP Gobi 2000 Modem device (VP412) */ | 421 | {QMI_GOBI_DEVICE(0x03f0, 0x251d)}, /* HP Gobi 2000 Modem device (VP412) */ |
@@ -441,6 +443,8 @@ static const struct usb_device_id products[] = { | |||
441 | {QMI_GOBI_DEVICE(0x1199, 0x9015)}, /* Sierra Wireless Gobi 3000 Modem device */ | 443 | {QMI_GOBI_DEVICE(0x1199, 0x9015)}, /* Sierra Wireless Gobi 3000 Modem device */ |
442 | {QMI_GOBI_DEVICE(0x1199, 0x9019)}, /* Sierra Wireless Gobi 3000 Modem device */ | 444 | {QMI_GOBI_DEVICE(0x1199, 0x9019)}, /* Sierra Wireless Gobi 3000 Modem device */ |
443 | {QMI_GOBI_DEVICE(0x1199, 0x901b)}, /* Sierra Wireless MC7770 */ | 445 | {QMI_GOBI_DEVICE(0x1199, 0x901b)}, /* Sierra Wireless MC7770 */ |
446 | {QMI_GOBI_DEVICE(0x12d1, 0x14f1)}, /* Sony Gobi 3000 Composite */ | ||
447 | {QMI_GOBI_DEVICE(0x1410, 0xa021)}, /* Foxconn Gobi 3000 Modem device (Novatel E396) */ | ||
444 | 448 | ||
445 | { } /* END */ | 449 | { } /* END */ |
446 | }; | 450 | }; |
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c index 8531c1caac28..fd4b26d46fd5 100644 --- a/drivers/net/usb/usbnet.c +++ b/drivers/net/usb/usbnet.c | |||
@@ -1573,7 +1573,7 @@ int usbnet_resume (struct usb_interface *intf) | |||
1573 | netif_device_present(dev->net) && | 1573 | netif_device_present(dev->net) && |
1574 | !timer_pending(&dev->delay) && | 1574 | !timer_pending(&dev->delay) && |
1575 | !test_bit(EVENT_RX_HALT, &dev->flags)) | 1575 | !test_bit(EVENT_RX_HALT, &dev->flags)) |
1576 | rx_alloc_submit(dev, GFP_KERNEL); | 1576 | rx_alloc_submit(dev, GFP_NOIO); |
1577 | 1577 | ||
1578 | if (!(dev->txq.qlen >= TX_QLEN(dev))) | 1578 | if (!(dev->txq.qlen >= TX_QLEN(dev))) |
1579 | netif_tx_wake_all_queues(dev->net); | 1579 | netif_tx_wake_all_queues(dev->net); |
diff --git a/drivers/net/wireless/ath/ath5k/eeprom.c b/drivers/net/wireless/ath/ath5k/eeprom.c index 4026c906cc7b..b7e0258887e7 100644 --- a/drivers/net/wireless/ath/ath5k/eeprom.c +++ b/drivers/net/wireless/ath/ath5k/eeprom.c | |||
@@ -1482,7 +1482,7 @@ ath5k_eeprom_read_target_rate_pwr_info(struct ath5k_hw *ah, unsigned int mode) | |||
1482 | case AR5K_EEPROM_MODE_11A: | 1482 | case AR5K_EEPROM_MODE_11A: |
1483 | offset += AR5K_EEPROM_TARGET_PWR_OFF_11A(ee->ee_version); | 1483 | offset += AR5K_EEPROM_TARGET_PWR_OFF_11A(ee->ee_version); |
1484 | rate_pcal_info = ee->ee_rate_tpwr_a; | 1484 | rate_pcal_info = ee->ee_rate_tpwr_a; |
1485 | ee->ee_rate_target_pwr_num[mode] = AR5K_EEPROM_N_5GHZ_CHAN; | 1485 | ee->ee_rate_target_pwr_num[mode] = AR5K_EEPROM_N_5GHZ_RATE_CHAN; |
1486 | break; | 1486 | break; |
1487 | case AR5K_EEPROM_MODE_11B: | 1487 | case AR5K_EEPROM_MODE_11B: |
1488 | offset += AR5K_EEPROM_TARGET_PWR_OFF_11B(ee->ee_version); | 1488 | offset += AR5K_EEPROM_TARGET_PWR_OFF_11B(ee->ee_version); |
diff --git a/drivers/net/wireless/ath/ath5k/eeprom.h b/drivers/net/wireless/ath/ath5k/eeprom.h index dc2bcfeadeb4..94a9bbea6874 100644 --- a/drivers/net/wireless/ath/ath5k/eeprom.h +++ b/drivers/net/wireless/ath/ath5k/eeprom.h | |||
@@ -182,6 +182,7 @@ | |||
182 | #define AR5K_EEPROM_EEP_DELTA 10 | 182 | #define AR5K_EEPROM_EEP_DELTA 10 |
183 | #define AR5K_EEPROM_N_MODES 3 | 183 | #define AR5K_EEPROM_N_MODES 3 |
184 | #define AR5K_EEPROM_N_5GHZ_CHAN 10 | 184 | #define AR5K_EEPROM_N_5GHZ_CHAN 10 |
185 | #define AR5K_EEPROM_N_5GHZ_RATE_CHAN 8 | ||
185 | #define AR5K_EEPROM_N_2GHZ_CHAN 3 | 186 | #define AR5K_EEPROM_N_2GHZ_CHAN 3 |
186 | #define AR5K_EEPROM_N_2GHZ_CHAN_2413 4 | 187 | #define AR5K_EEPROM_N_2GHZ_CHAN_2413 4 |
187 | #define AR5K_EEPROM_N_2GHZ_CHAN_MAX 4 | 188 | #define AR5K_EEPROM_N_2GHZ_CHAN_MAX 4 |
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c index 192ad5c1fcc8..a5edebeb0b4f 100644 --- a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c +++ b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c | |||
@@ -1233,6 +1233,9 @@ uint brcms_reset(struct brcms_info *wl) | |||
1233 | /* dpc will not be rescheduled */ | 1233 | /* dpc will not be rescheduled */ |
1234 | wl->resched = false; | 1234 | wl->resched = false; |
1235 | 1235 | ||
1236 | /* inform publicly that interface is down */ | ||
1237 | wl->pub->up = false; | ||
1238 | |||
1236 | return 0; | 1239 | return 0; |
1237 | } | 1240 | } |
1238 | 1241 | ||
diff --git a/drivers/net/wireless/ipw2x00/ipw2100.c b/drivers/net/wireless/ipw2x00/ipw2100.c index 95aa8e1683ec..83324b321652 100644 --- a/drivers/net/wireless/ipw2x00/ipw2100.c +++ b/drivers/net/wireless/ipw2x00/ipw2100.c | |||
@@ -2042,7 +2042,8 @@ static void isr_indicate_associated(struct ipw2100_priv *priv, u32 status) | |||
2042 | return; | 2042 | return; |
2043 | } | 2043 | } |
2044 | len = ETH_ALEN; | 2044 | len = ETH_ALEN; |
2045 | ipw2100_get_ordinal(priv, IPW_ORD_STAT_ASSN_AP_BSSID, &bssid, &len); | 2045 | ret = ipw2100_get_ordinal(priv, IPW_ORD_STAT_ASSN_AP_BSSID, bssid, |
2046 | &len); | ||
2046 | if (ret) { | 2047 | if (ret) { |
2047 | IPW_DEBUG_INFO("failed querying ordinals at line %d\n", | 2048 | IPW_DEBUG_INFO("failed querying ordinals at line %d\n", |
2048 | __LINE__); | 2049 | __LINE__); |
diff --git a/drivers/net/wireless/iwlwifi/dvm/debugfs.c b/drivers/net/wireless/iwlwifi/dvm/debugfs.c index 46782f1102ac..a47b306b522c 100644 --- a/drivers/net/wireless/iwlwifi/dvm/debugfs.c +++ b/drivers/net/wireless/iwlwifi/dvm/debugfs.c | |||
@@ -124,6 +124,9 @@ static ssize_t iwl_dbgfs_sram_read(struct file *file, | |||
124 | const struct fw_img *img; | 124 | const struct fw_img *img; |
125 | size_t bufsz; | 125 | size_t bufsz; |
126 | 126 | ||
127 | if (!iwl_is_ready_rf(priv)) | ||
128 | return -EAGAIN; | ||
129 | |||
127 | /* default is to dump the entire data segment */ | 130 | /* default is to dump the entire data segment */ |
128 | if (!priv->dbgfs_sram_offset && !priv->dbgfs_sram_len) { | 131 | if (!priv->dbgfs_sram_offset && !priv->dbgfs_sram_len) { |
129 | priv->dbgfs_sram_offset = 0x800000; | 132 | priv->dbgfs_sram_offset = 0x800000; |
diff --git a/drivers/net/wireless/iwlwifi/pcie/internal.h b/drivers/net/wireless/iwlwifi/pcie/internal.h index d9694c58208c..4ffc18dc3a57 100644 --- a/drivers/net/wireless/iwlwifi/pcie/internal.h +++ b/drivers/net/wireless/iwlwifi/pcie/internal.h | |||
@@ -350,7 +350,7 @@ int iwl_queue_space(const struct iwl_queue *q); | |||
350 | /***************************************************** | 350 | /***************************************************** |
351 | * Error handling | 351 | * Error handling |
352 | ******************************************************/ | 352 | ******************************************************/ |
353 | int iwl_dump_fh(struct iwl_trans *trans, char **buf, bool display); | 353 | int iwl_dump_fh(struct iwl_trans *trans, char **buf); |
354 | void iwl_dump_csr(struct iwl_trans *trans); | 354 | void iwl_dump_csr(struct iwl_trans *trans); |
355 | 355 | ||
356 | /***************************************************** | 356 | /***************************************************** |
diff --git a/drivers/net/wireless/iwlwifi/pcie/rx.c b/drivers/net/wireless/iwlwifi/pcie/rx.c index 39a6ca1f009c..d1a61ba6247a 100644 --- a/drivers/net/wireless/iwlwifi/pcie/rx.c +++ b/drivers/net/wireless/iwlwifi/pcie/rx.c | |||
@@ -555,7 +555,7 @@ static void iwl_irq_handle_error(struct iwl_trans *trans) | |||
555 | } | 555 | } |
556 | 556 | ||
557 | iwl_dump_csr(trans); | 557 | iwl_dump_csr(trans); |
558 | iwl_dump_fh(trans, NULL, false); | 558 | iwl_dump_fh(trans, NULL); |
559 | 559 | ||
560 | iwl_op_mode_nic_error(trans->op_mode); | 560 | iwl_op_mode_nic_error(trans->op_mode); |
561 | } | 561 | } |
diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c index 939c2f78df58..1e86ea2266d4 100644 --- a/drivers/net/wireless/iwlwifi/pcie/trans.c +++ b/drivers/net/wireless/iwlwifi/pcie/trans.c | |||
@@ -1649,13 +1649,9 @@ static const char *get_fh_string(int cmd) | |||
1649 | #undef IWL_CMD | 1649 | #undef IWL_CMD |
1650 | } | 1650 | } |
1651 | 1651 | ||
1652 | int iwl_dump_fh(struct iwl_trans *trans, char **buf, bool display) | 1652 | int iwl_dump_fh(struct iwl_trans *trans, char **buf) |
1653 | { | 1653 | { |
1654 | int i; | 1654 | int i; |
1655 | #ifdef CONFIG_IWLWIFI_DEBUG | ||
1656 | int pos = 0; | ||
1657 | size_t bufsz = 0; | ||
1658 | #endif | ||
1659 | static const u32 fh_tbl[] = { | 1655 | static const u32 fh_tbl[] = { |
1660 | FH_RSCSR_CHNL0_STTS_WPTR_REG, | 1656 | FH_RSCSR_CHNL0_STTS_WPTR_REG, |
1661 | FH_RSCSR_CHNL0_RBDCB_BASE_REG, | 1657 | FH_RSCSR_CHNL0_RBDCB_BASE_REG, |
@@ -1667,29 +1663,35 @@ int iwl_dump_fh(struct iwl_trans *trans, char **buf, bool display) | |||
1667 | FH_TSSR_TX_STATUS_REG, | 1663 | FH_TSSR_TX_STATUS_REG, |
1668 | FH_TSSR_TX_ERROR_REG | 1664 | FH_TSSR_TX_ERROR_REG |
1669 | }; | 1665 | }; |
1670 | #ifdef CONFIG_IWLWIFI_DEBUG | 1666 | |
1671 | if (display) { | 1667 | #ifdef CONFIG_IWLWIFI_DEBUGFS |
1672 | bufsz = ARRAY_SIZE(fh_tbl) * 48 + 40; | 1668 | if (buf) { |
1669 | int pos = 0; | ||
1670 | size_t bufsz = ARRAY_SIZE(fh_tbl) * 48 + 40; | ||
1671 | |||
1673 | *buf = kmalloc(bufsz, GFP_KERNEL); | 1672 | *buf = kmalloc(bufsz, GFP_KERNEL); |
1674 | if (!*buf) | 1673 | if (!*buf) |
1675 | return -ENOMEM; | 1674 | return -ENOMEM; |
1675 | |||
1676 | pos += scnprintf(*buf + pos, bufsz - pos, | 1676 | pos += scnprintf(*buf + pos, bufsz - pos, |
1677 | "FH register values:\n"); | 1677 | "FH register values:\n"); |
1678 | for (i = 0; i < ARRAY_SIZE(fh_tbl); i++) { | 1678 | |
1679 | for (i = 0; i < ARRAY_SIZE(fh_tbl); i++) | ||
1679 | pos += scnprintf(*buf + pos, bufsz - pos, | 1680 | pos += scnprintf(*buf + pos, bufsz - pos, |
1680 | " %34s: 0X%08x\n", | 1681 | " %34s: 0X%08x\n", |
1681 | get_fh_string(fh_tbl[i]), | 1682 | get_fh_string(fh_tbl[i]), |
1682 | iwl_read_direct32(trans, fh_tbl[i])); | 1683 | iwl_read_direct32(trans, fh_tbl[i])); |
1683 | } | 1684 | |
1684 | return pos; | 1685 | return pos; |
1685 | } | 1686 | } |
1686 | #endif | 1687 | #endif |
1688 | |||
1687 | IWL_ERR(trans, "FH register values:\n"); | 1689 | IWL_ERR(trans, "FH register values:\n"); |
1688 | for (i = 0; i < ARRAY_SIZE(fh_tbl); i++) { | 1690 | for (i = 0; i < ARRAY_SIZE(fh_tbl); i++) |
1689 | IWL_ERR(trans, " %34s: 0X%08x\n", | 1691 | IWL_ERR(trans, " %34s: 0X%08x\n", |
1690 | get_fh_string(fh_tbl[i]), | 1692 | get_fh_string(fh_tbl[i]), |
1691 | iwl_read_direct32(trans, fh_tbl[i])); | 1693 | iwl_read_direct32(trans, fh_tbl[i])); |
1692 | } | 1694 | |
1693 | return 0; | 1695 | return 0; |
1694 | } | 1696 | } |
1695 | 1697 | ||
@@ -1982,11 +1984,11 @@ static ssize_t iwl_dbgfs_fh_reg_read(struct file *file, | |||
1982 | size_t count, loff_t *ppos) | 1984 | size_t count, loff_t *ppos) |
1983 | { | 1985 | { |
1984 | struct iwl_trans *trans = file->private_data; | 1986 | struct iwl_trans *trans = file->private_data; |
1985 | char *buf; | 1987 | char *buf = NULL; |
1986 | int pos = 0; | 1988 | int pos = 0; |
1987 | ssize_t ret = -EFAULT; | 1989 | ssize_t ret = -EFAULT; |
1988 | 1990 | ||
1989 | ret = pos = iwl_dump_fh(trans, &buf, true); | 1991 | ret = pos = iwl_dump_fh(trans, &buf); |
1990 | if (buf) { | 1992 | if (buf) { |
1991 | ret = simple_read_from_buffer(user_buf, | 1993 | ret = simple_read_from_buffer(user_buf, |
1992 | count, ppos, buf, pos); | 1994 | count, ppos, buf, pos); |
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c index 30899901aef5..650f79a1f2bd 100644 --- a/drivers/net/xen-netfront.c +++ b/drivers/net/xen-netfront.c | |||
@@ -57,8 +57,7 @@ | |||
57 | static const struct ethtool_ops xennet_ethtool_ops; | 57 | static const struct ethtool_ops xennet_ethtool_ops; |
58 | 58 | ||
59 | struct netfront_cb { | 59 | struct netfront_cb { |
60 | struct page *page; | 60 | int pull_to; |
61 | unsigned offset; | ||
62 | }; | 61 | }; |
63 | 62 | ||
64 | #define NETFRONT_SKB_CB(skb) ((struct netfront_cb *)((skb)->cb)) | 63 | #define NETFRONT_SKB_CB(skb) ((struct netfront_cb *)((skb)->cb)) |
@@ -867,15 +866,9 @@ static int handle_incoming_queue(struct net_device *dev, | |||
867 | struct sk_buff *skb; | 866 | struct sk_buff *skb; |
868 | 867 | ||
869 | while ((skb = __skb_dequeue(rxq)) != NULL) { | 868 | while ((skb = __skb_dequeue(rxq)) != NULL) { |
870 | struct page *page = NETFRONT_SKB_CB(skb)->page; | 869 | int pull_to = NETFRONT_SKB_CB(skb)->pull_to; |
871 | void *vaddr = page_address(page); | ||
872 | unsigned offset = NETFRONT_SKB_CB(skb)->offset; | ||
873 | |||
874 | memcpy(skb->data, vaddr + offset, | ||
875 | skb_headlen(skb)); | ||
876 | 870 | ||
877 | if (page != skb_frag_page(&skb_shinfo(skb)->frags[0])) | 871 | __pskb_pull_tail(skb, pull_to - skb_headlen(skb)); |
878 | __free_page(page); | ||
879 | 872 | ||
880 | /* Ethernet work: Delayed to here as it peeks the header. */ | 873 | /* Ethernet work: Delayed to here as it peeks the header. */ |
881 | skb->protocol = eth_type_trans(skb, dev); | 874 | skb->protocol = eth_type_trans(skb, dev); |
@@ -913,7 +906,6 @@ static int xennet_poll(struct napi_struct *napi, int budget) | |||
913 | struct sk_buff_head errq; | 906 | struct sk_buff_head errq; |
914 | struct sk_buff_head tmpq; | 907 | struct sk_buff_head tmpq; |
915 | unsigned long flags; | 908 | unsigned long flags; |
916 | unsigned int len; | ||
917 | int err; | 909 | int err; |
918 | 910 | ||
919 | spin_lock(&np->rx_lock); | 911 | spin_lock(&np->rx_lock); |
@@ -955,24 +947,13 @@ err: | |||
955 | } | 947 | } |
956 | } | 948 | } |
957 | 949 | ||
958 | NETFRONT_SKB_CB(skb)->page = | 950 | NETFRONT_SKB_CB(skb)->pull_to = rx->status; |
959 | skb_frag_page(&skb_shinfo(skb)->frags[0]); | 951 | if (NETFRONT_SKB_CB(skb)->pull_to > RX_COPY_THRESHOLD) |
960 | NETFRONT_SKB_CB(skb)->offset = rx->offset; | 952 | NETFRONT_SKB_CB(skb)->pull_to = RX_COPY_THRESHOLD; |
961 | |||
962 | len = rx->status; | ||
963 | if (len > RX_COPY_THRESHOLD) | ||
964 | len = RX_COPY_THRESHOLD; | ||
965 | skb_put(skb, len); | ||
966 | 953 | ||
967 | if (rx->status > len) { | 954 | skb_shinfo(skb)->frags[0].page_offset = rx->offset; |
968 | skb_shinfo(skb)->frags[0].page_offset = | 955 | skb_frag_size_set(&skb_shinfo(skb)->frags[0], rx->status); |
969 | rx->offset + len; | 956 | skb->data_len = rx->status; |
970 | skb_frag_size_set(&skb_shinfo(skb)->frags[0], rx->status - len); | ||
971 | skb->data_len = rx->status - len; | ||
972 | } else { | ||
973 | __skb_fill_page_desc(skb, 0, NULL, 0, 0); | ||
974 | skb_shinfo(skb)->nr_frags = 0; | ||
975 | } | ||
976 | 957 | ||
977 | i = xennet_fill_frags(np, skb, &tmpq); | 958 | i = xennet_fill_frags(np, skb, &tmpq); |
978 | 959 | ||
@@ -999,7 +980,7 @@ err: | |||
999 | * receive throughout using the standard receive | 980 | * receive throughout using the standard receive |
1000 | * buffer size was cut by 25%(!!!). | 981 | * buffer size was cut by 25%(!!!). |
1001 | */ | 982 | */ |
1002 | skb->truesize += skb->data_len - (RX_COPY_THRESHOLD - len); | 983 | skb->truesize += skb->data_len - RX_COPY_THRESHOLD; |
1003 | skb->len += skb->data_len; | 984 | skb->len += skb->data_len; |
1004 | 985 | ||
1005 | if (rx->flags & XEN_NETRXF_csum_blank) | 986 | if (rx->flags & XEN_NETRXF_csum_blank) |
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c index 5270f1a99328..d6fd6b6d9d4b 100644 --- a/drivers/pci/pci-driver.c +++ b/drivers/pci/pci-driver.c | |||
@@ -280,8 +280,12 @@ static long local_pci_probe(void *_ddi) | |||
280 | { | 280 | { |
281 | struct drv_dev_and_id *ddi = _ddi; | 281 | struct drv_dev_and_id *ddi = _ddi; |
282 | struct device *dev = &ddi->dev->dev; | 282 | struct device *dev = &ddi->dev->dev; |
283 | struct device *parent = dev->parent; | ||
283 | int rc; | 284 | int rc; |
284 | 285 | ||
286 | /* The parent bridge must be in active state when probing */ | ||
287 | if (parent) | ||
288 | pm_runtime_get_sync(parent); | ||
285 | /* Unbound PCI devices are always set to disabled and suspended. | 289 | /* Unbound PCI devices are always set to disabled and suspended. |
286 | * During probe, the device is set to enabled and active and the | 290 | * During probe, the device is set to enabled and active and the |
287 | * usage count is incremented. If the driver supports runtime PM, | 291 | * usage count is incremented. If the driver supports runtime PM, |
@@ -298,6 +302,8 @@ static long local_pci_probe(void *_ddi) | |||
298 | pm_runtime_set_suspended(dev); | 302 | pm_runtime_set_suspended(dev); |
299 | pm_runtime_put_noidle(dev); | 303 | pm_runtime_put_noidle(dev); |
300 | } | 304 | } |
305 | if (parent) | ||
306 | pm_runtime_put(parent); | ||
301 | return rc; | 307 | return rc; |
302 | } | 308 | } |
303 | 309 | ||
diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c index 6869009c7393..02d107b15281 100644 --- a/drivers/pci/pci-sysfs.c +++ b/drivers/pci/pci-sysfs.c | |||
@@ -458,6 +458,40 @@ boot_vga_show(struct device *dev, struct device_attribute *attr, char *buf) | |||
458 | } | 458 | } |
459 | struct device_attribute vga_attr = __ATTR_RO(boot_vga); | 459 | struct device_attribute vga_attr = __ATTR_RO(boot_vga); |
460 | 460 | ||
461 | static void | ||
462 | pci_config_pm_runtime_get(struct pci_dev *pdev) | ||
463 | { | ||
464 | struct device *dev = &pdev->dev; | ||
465 | struct device *parent = dev->parent; | ||
466 | |||
467 | if (parent) | ||
468 | pm_runtime_get_sync(parent); | ||
469 | pm_runtime_get_noresume(dev); | ||
470 | /* | ||
471 | * pdev->current_state is set to PCI_D3cold during suspending, | ||
472 | * so wait until suspending completes | ||
473 | */ | ||
474 | pm_runtime_barrier(dev); | ||
475 | /* | ||
476 | * Only need to resume devices in D3cold, because config | ||
477 | * registers are still accessible for devices suspended but | ||
478 | * not in D3cold. | ||
479 | */ | ||
480 | if (pdev->current_state == PCI_D3cold) | ||
481 | pm_runtime_resume(dev); | ||
482 | } | ||
483 | |||
484 | static void | ||
485 | pci_config_pm_runtime_put(struct pci_dev *pdev) | ||
486 | { | ||
487 | struct device *dev = &pdev->dev; | ||
488 | struct device *parent = dev->parent; | ||
489 | |||
490 | pm_runtime_put(dev); | ||
491 | if (parent) | ||
492 | pm_runtime_put_sync(parent); | ||
493 | } | ||
494 | |||
461 | static ssize_t | 495 | static ssize_t |
462 | pci_read_config(struct file *filp, struct kobject *kobj, | 496 | pci_read_config(struct file *filp, struct kobject *kobj, |
463 | struct bin_attribute *bin_attr, | 497 | struct bin_attribute *bin_attr, |
@@ -484,6 +518,8 @@ pci_read_config(struct file *filp, struct kobject *kobj, | |||
484 | size = count; | 518 | size = count; |
485 | } | 519 | } |
486 | 520 | ||
521 | pci_config_pm_runtime_get(dev); | ||
522 | |||
487 | if ((off & 1) && size) { | 523 | if ((off & 1) && size) { |
488 | u8 val; | 524 | u8 val; |
489 | pci_user_read_config_byte(dev, off, &val); | 525 | pci_user_read_config_byte(dev, off, &val); |
@@ -529,6 +565,8 @@ pci_read_config(struct file *filp, struct kobject *kobj, | |||
529 | --size; | 565 | --size; |
530 | } | 566 | } |
531 | 567 | ||
568 | pci_config_pm_runtime_put(dev); | ||
569 | |||
532 | return count; | 570 | return count; |
533 | } | 571 | } |
534 | 572 | ||
@@ -549,6 +587,8 @@ pci_write_config(struct file* filp, struct kobject *kobj, | |||
549 | count = size; | 587 | count = size; |
550 | } | 588 | } |
551 | 589 | ||
590 | pci_config_pm_runtime_get(dev); | ||
591 | |||
552 | if ((off & 1) && size) { | 592 | if ((off & 1) && size) { |
553 | pci_user_write_config_byte(dev, off, data[off - init_off]); | 593 | pci_user_write_config_byte(dev, off, data[off - init_off]); |
554 | off++; | 594 | off++; |
@@ -587,6 +627,8 @@ pci_write_config(struct file* filp, struct kobject *kobj, | |||
587 | --size; | 627 | --size; |
588 | } | 628 | } |
589 | 629 | ||
630 | pci_config_pm_runtime_put(dev); | ||
631 | |||
590 | return count; | 632 | return count; |
591 | } | 633 | } |
592 | 634 | ||
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index f3ea977a5b1b..ab4bf5a4c2f1 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c | |||
@@ -1941,6 +1941,7 @@ void pci_pm_init(struct pci_dev *dev) | |||
1941 | dev->pm_cap = pm; | 1941 | dev->pm_cap = pm; |
1942 | dev->d3_delay = PCI_PM_D3_WAIT; | 1942 | dev->d3_delay = PCI_PM_D3_WAIT; |
1943 | dev->d3cold_delay = PCI_PM_D3COLD_WAIT; | 1943 | dev->d3cold_delay = PCI_PM_D3COLD_WAIT; |
1944 | dev->d3cold_allowed = true; | ||
1944 | 1945 | ||
1945 | dev->d1_support = false; | 1946 | dev->d1_support = false; |
1946 | dev->d2_support = false; | 1947 | dev->d2_support = false; |
diff --git a/drivers/pci/pcie/portdrv_pci.c b/drivers/pci/pcie/portdrv_pci.c index 3a7eefcb270a..e76b44777dbf 100644 --- a/drivers/pci/pcie/portdrv_pci.c +++ b/drivers/pci/pcie/portdrv_pci.c | |||
@@ -140,9 +140,17 @@ static int pcie_port_runtime_resume(struct device *dev) | |||
140 | { | 140 | { |
141 | return 0; | 141 | return 0; |
142 | } | 142 | } |
143 | |||
144 | static int pcie_port_runtime_idle(struct device *dev) | ||
145 | { | ||
146 | /* Delay for a short while to prevent too frequent suspend/resume */ | ||
147 | pm_schedule_suspend(dev, 10); | ||
148 | return -EBUSY; | ||
149 | } | ||
143 | #else | 150 | #else |
144 | #define pcie_port_runtime_suspend NULL | 151 | #define pcie_port_runtime_suspend NULL |
145 | #define pcie_port_runtime_resume NULL | 152 | #define pcie_port_runtime_resume NULL |
153 | #define pcie_port_runtime_idle NULL | ||
146 | #endif | 154 | #endif |
147 | 155 | ||
148 | static const struct dev_pm_ops pcie_portdrv_pm_ops = { | 156 | static const struct dev_pm_ops pcie_portdrv_pm_ops = { |
@@ -155,6 +163,7 @@ static const struct dev_pm_ops pcie_portdrv_pm_ops = { | |||
155 | .resume_noirq = pcie_port_resume_noirq, | 163 | .resume_noirq = pcie_port_resume_noirq, |
156 | .runtime_suspend = pcie_port_runtime_suspend, | 164 | .runtime_suspend = pcie_port_runtime_suspend, |
157 | .runtime_resume = pcie_port_runtime_resume, | 165 | .runtime_resume = pcie_port_runtime_resume, |
166 | .runtime_idle = pcie_port_runtime_idle, | ||
158 | }; | 167 | }; |
159 | 168 | ||
160 | #define PCIE_PORTDRV_PM_OPS (&pcie_portdrv_pm_ops) | 169 | #define PCIE_PORTDRV_PM_OPS (&pcie_portdrv_pm_ops) |
@@ -200,6 +209,11 @@ static int __devinit pcie_portdrv_probe(struct pci_dev *dev, | |||
200 | return status; | 209 | return status; |
201 | 210 | ||
202 | pci_save_state(dev); | 211 | pci_save_state(dev); |
212 | /* | ||
213 | * D3cold may not work properly on some PCIe port, so disable | ||
214 | * it by default. | ||
215 | */ | ||
216 | dev->d3cold_allowed = false; | ||
203 | if (!pci_match_id(port_runtime_pm_black_list, dev)) | 217 | if (!pci_match_id(port_runtime_pm_black_list, dev)) |
204 | pm_runtime_put_noidle(&dev->dev); | 218 | pm_runtime_put_noidle(&dev->dev); |
205 | 219 | ||
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c index 6c143b4497ca..9f8a6b79a8ec 100644 --- a/drivers/pci/probe.c +++ b/drivers/pci/probe.c | |||
@@ -144,15 +144,13 @@ static inline unsigned long decode_bar(struct pci_dev *dev, u32 bar) | |||
144 | case PCI_BASE_ADDRESS_MEM_TYPE_32: | 144 | case PCI_BASE_ADDRESS_MEM_TYPE_32: |
145 | break; | 145 | break; |
146 | case PCI_BASE_ADDRESS_MEM_TYPE_1M: | 146 | case PCI_BASE_ADDRESS_MEM_TYPE_1M: |
147 | dev_info(&dev->dev, "1M mem BAR treated as 32-bit BAR\n"); | 147 | /* 1M mem BAR treated as 32-bit BAR */ |
148 | break; | 148 | break; |
149 | case PCI_BASE_ADDRESS_MEM_TYPE_64: | 149 | case PCI_BASE_ADDRESS_MEM_TYPE_64: |
150 | flags |= IORESOURCE_MEM_64; | 150 | flags |= IORESOURCE_MEM_64; |
151 | break; | 151 | break; |
152 | default: | 152 | default: |
153 | dev_warn(&dev->dev, | 153 | /* mem unknown type treated as 32-bit BAR */ |
154 | "mem unknown type %x treated as 32-bit BAR\n", | ||
155 | mem_type); | ||
156 | break; | 154 | break; |
157 | } | 155 | } |
158 | return flags; | 156 | return flags; |
@@ -173,9 +171,11 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type, | |||
173 | u32 l, sz, mask; | 171 | u32 l, sz, mask; |
174 | u16 orig_cmd; | 172 | u16 orig_cmd; |
175 | struct pci_bus_region region; | 173 | struct pci_bus_region region; |
174 | bool bar_too_big = false, bar_disabled = false; | ||
176 | 175 | ||
177 | mask = type ? PCI_ROM_ADDRESS_MASK : ~0; | 176 | mask = type ? PCI_ROM_ADDRESS_MASK : ~0; |
178 | 177 | ||
178 | /* No printks while decoding is disabled! */ | ||
179 | if (!dev->mmio_always_on) { | 179 | if (!dev->mmio_always_on) { |
180 | pci_read_config_word(dev, PCI_COMMAND, &orig_cmd); | 180 | pci_read_config_word(dev, PCI_COMMAND, &orig_cmd); |
181 | pci_write_config_word(dev, PCI_COMMAND, | 181 | pci_write_config_word(dev, PCI_COMMAND, |
@@ -240,8 +240,7 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type, | |||
240 | goto fail; | 240 | goto fail; |
241 | 241 | ||
242 | if ((sizeof(resource_size_t) < 8) && (sz64 > 0x100000000ULL)) { | 242 | if ((sizeof(resource_size_t) < 8) && (sz64 > 0x100000000ULL)) { |
243 | dev_err(&dev->dev, "reg %x: can't handle 64-bit BAR\n", | 243 | bar_too_big = true; |
244 | pos); | ||
245 | goto fail; | 244 | goto fail; |
246 | } | 245 | } |
247 | 246 | ||
@@ -252,12 +251,11 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type, | |||
252 | region.start = 0; | 251 | region.start = 0; |
253 | region.end = sz64; | 252 | region.end = sz64; |
254 | pcibios_bus_to_resource(dev, res, ®ion); | 253 | pcibios_bus_to_resource(dev, res, ®ion); |
254 | bar_disabled = true; | ||
255 | } else { | 255 | } else { |
256 | region.start = l64; | 256 | region.start = l64; |
257 | region.end = l64 + sz64; | 257 | region.end = l64 + sz64; |
258 | pcibios_bus_to_resource(dev, res, ®ion); | 258 | pcibios_bus_to_resource(dev, res, ®ion); |
259 | dev_printk(KERN_DEBUG, &dev->dev, "reg %x: %pR\n", | ||
260 | pos, res); | ||
261 | } | 259 | } |
262 | } else { | 260 | } else { |
263 | sz = pci_size(l, sz, mask); | 261 | sz = pci_size(l, sz, mask); |
@@ -268,18 +266,23 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type, | |||
268 | region.start = l; | 266 | region.start = l; |
269 | region.end = l + sz; | 267 | region.end = l + sz; |
270 | pcibios_bus_to_resource(dev, res, ®ion); | 268 | pcibios_bus_to_resource(dev, res, ®ion); |
271 | |||
272 | dev_printk(KERN_DEBUG, &dev->dev, "reg %x: %pR\n", pos, res); | ||
273 | } | 269 | } |
274 | 270 | ||
275 | out: | 271 | goto out; |
272 | |||
273 | |||
274 | fail: | ||
275 | res->flags = 0; | ||
276 | out: | ||
276 | if (!dev->mmio_always_on) | 277 | if (!dev->mmio_always_on) |
277 | pci_write_config_word(dev, PCI_COMMAND, orig_cmd); | 278 | pci_write_config_word(dev, PCI_COMMAND, orig_cmd); |
278 | 279 | ||
280 | if (bar_too_big) | ||
281 | dev_err(&dev->dev, "reg %x: can't handle 64-bit BAR\n", pos); | ||
282 | if (res->flags && !bar_disabled) | ||
283 | dev_printk(KERN_DEBUG, &dev->dev, "reg %x: %pR\n", pos, res); | ||
284 | |||
279 | return (res->flags & IORESOURCE_MEM_64) ? 1 : 0; | 285 | return (res->flags & IORESOURCE_MEM_64) ? 1 : 0; |
280 | fail: | ||
281 | res->flags = 0; | ||
282 | goto out; | ||
283 | } | 286 | } |
284 | 287 | ||
285 | static void pci_read_bases(struct pci_dev *dev, unsigned int howmany, int rom) | 288 | static void pci_read_bases(struct pci_dev *dev, unsigned int howmany, int rom) |
diff --git a/drivers/rtc/rtc-at91sam9.c b/drivers/rtc/rtc-at91sam9.c index 831868904e02..1dd61f402b04 100644 --- a/drivers/rtc/rtc-at91sam9.c +++ b/drivers/rtc/rtc-at91sam9.c | |||
@@ -58,6 +58,7 @@ struct sam9_rtc { | |||
58 | struct rtc_device *rtcdev; | 58 | struct rtc_device *rtcdev; |
59 | u32 imr; | 59 | u32 imr; |
60 | void __iomem *gpbr; | 60 | void __iomem *gpbr; |
61 | int irq; | ||
61 | }; | 62 | }; |
62 | 63 | ||
63 | #define rtt_readl(rtc, field) \ | 64 | #define rtt_readl(rtc, field) \ |
@@ -292,7 +293,7 @@ static int __devinit at91_rtc_probe(struct platform_device *pdev) | |||
292 | { | 293 | { |
293 | struct resource *r, *r_gpbr; | 294 | struct resource *r, *r_gpbr; |
294 | struct sam9_rtc *rtc; | 295 | struct sam9_rtc *rtc; |
295 | int ret; | 296 | int ret, irq; |
296 | u32 mr; | 297 | u32 mr; |
297 | 298 | ||
298 | r = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 299 | r = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
@@ -302,10 +303,18 @@ static int __devinit at91_rtc_probe(struct platform_device *pdev) | |||
302 | return -ENODEV; | 303 | return -ENODEV; |
303 | } | 304 | } |
304 | 305 | ||
306 | irq = platform_get_irq(pdev, 0); | ||
307 | if (irq < 0) { | ||
308 | dev_err(&pdev->dev, "failed to get interrupt resource\n"); | ||
309 | return irq; | ||
310 | } | ||
311 | |||
305 | rtc = kzalloc(sizeof *rtc, GFP_KERNEL); | 312 | rtc = kzalloc(sizeof *rtc, GFP_KERNEL); |
306 | if (!rtc) | 313 | if (!rtc) |
307 | return -ENOMEM; | 314 | return -ENOMEM; |
308 | 315 | ||
316 | rtc->irq = irq; | ||
317 | |||
309 | /* platform setup code should have handled this; sigh */ | 318 | /* platform setup code should have handled this; sigh */ |
310 | if (!device_can_wakeup(&pdev->dev)) | 319 | if (!device_can_wakeup(&pdev->dev)) |
311 | device_init_wakeup(&pdev->dev, 1); | 320 | device_init_wakeup(&pdev->dev, 1); |
@@ -345,11 +354,10 @@ static int __devinit at91_rtc_probe(struct platform_device *pdev) | |||
345 | } | 354 | } |
346 | 355 | ||
347 | /* register irq handler after we know what name we'll use */ | 356 | /* register irq handler after we know what name we'll use */ |
348 | ret = request_irq(AT91_ID_SYS, at91_rtc_interrupt, | 357 | ret = request_irq(rtc->irq, at91_rtc_interrupt, IRQF_SHARED, |
349 | IRQF_SHARED, | ||
350 | dev_name(&rtc->rtcdev->dev), rtc); | 358 | dev_name(&rtc->rtcdev->dev), rtc); |
351 | if (ret) { | 359 | if (ret) { |
352 | dev_dbg(&pdev->dev, "can't share IRQ %d?\n", AT91_ID_SYS); | 360 | dev_dbg(&pdev->dev, "can't share IRQ %d?\n", rtc->irq); |
353 | rtc_device_unregister(rtc->rtcdev); | 361 | rtc_device_unregister(rtc->rtcdev); |
354 | goto fail_register; | 362 | goto fail_register; |
355 | } | 363 | } |
@@ -386,7 +394,7 @@ static int __devexit at91_rtc_remove(struct platform_device *pdev) | |||
386 | 394 | ||
387 | /* disable all interrupts */ | 395 | /* disable all interrupts */ |
388 | rtt_writel(rtc, MR, mr & ~(AT91_RTT_ALMIEN | AT91_RTT_RTTINCIEN)); | 396 | rtt_writel(rtc, MR, mr & ~(AT91_RTT_ALMIEN | AT91_RTT_RTTINCIEN)); |
389 | free_irq(AT91_ID_SYS, rtc); | 397 | free_irq(rtc->irq, rtc); |
390 | 398 | ||
391 | rtc_device_unregister(rtc->rtcdev); | 399 | rtc_device_unregister(rtc->rtcdev); |
392 | 400 | ||
@@ -423,7 +431,7 @@ static int at91_rtc_suspend(struct platform_device *pdev, | |||
423 | rtc->imr = mr & (AT91_RTT_ALMIEN | AT91_RTT_RTTINCIEN); | 431 | rtc->imr = mr & (AT91_RTT_ALMIEN | AT91_RTT_RTTINCIEN); |
424 | if (rtc->imr) { | 432 | if (rtc->imr) { |
425 | if (device_may_wakeup(&pdev->dev) && (mr & AT91_RTT_ALMIEN)) { | 433 | if (device_may_wakeup(&pdev->dev) && (mr & AT91_RTT_ALMIEN)) { |
426 | enable_irq_wake(AT91_ID_SYS); | 434 | enable_irq_wake(rtc->irq); |
427 | /* don't let RTTINC cause wakeups */ | 435 | /* don't let RTTINC cause wakeups */ |
428 | if (mr & AT91_RTT_RTTINCIEN) | 436 | if (mr & AT91_RTT_RTTINCIEN) |
429 | rtt_writel(rtc, MR, mr & ~AT91_RTT_RTTINCIEN); | 437 | rtt_writel(rtc, MR, mr & ~AT91_RTT_RTTINCIEN); |
@@ -441,7 +449,7 @@ static int at91_rtc_resume(struct platform_device *pdev) | |||
441 | 449 | ||
442 | if (rtc->imr) { | 450 | if (rtc->imr) { |
443 | if (device_may_wakeup(&pdev->dev)) | 451 | if (device_may_wakeup(&pdev->dev)) |
444 | disable_irq_wake(AT91_ID_SYS); | 452 | disable_irq_wake(rtc->irq); |
445 | mr = rtt_readl(rtc, MR); | 453 | mr = rtt_readl(rtc, MR); |
446 | rtt_writel(rtc, MR, mr | rtc->imr); | 454 | rtt_writel(rtc, MR, mr | rtc->imr); |
447 | } | 455 | } |
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c index 40a826a7295f..2fb2b9ea97ec 100644 --- a/drivers/s390/block/dasd_eckd.c +++ b/drivers/s390/block/dasd_eckd.c | |||
@@ -3804,7 +3804,7 @@ dasd_eckd_ioctl(struct dasd_block *block, unsigned int cmd, void __user *argp) | |||
3804 | case BIODASDSYMMIO: | 3804 | case BIODASDSYMMIO: |
3805 | return dasd_symm_io(device, argp); | 3805 | return dasd_symm_io(device, argp); |
3806 | default: | 3806 | default: |
3807 | return -ENOIOCTLCMD; | 3807 | return -ENOTTY; |
3808 | } | 3808 | } |
3809 | } | 3809 | } |
3810 | 3810 | ||
diff --git a/drivers/s390/block/dasd_ioctl.c b/drivers/s390/block/dasd_ioctl.c index cceae70279f6..654c6921a6d4 100644 --- a/drivers/s390/block/dasd_ioctl.c +++ b/drivers/s390/block/dasd_ioctl.c | |||
@@ -498,12 +498,9 @@ int dasd_ioctl(struct block_device *bdev, fmode_t mode, | |||
498 | break; | 498 | break; |
499 | default: | 499 | default: |
500 | /* if the discipline has an ioctl method try it. */ | 500 | /* if the discipline has an ioctl method try it. */ |
501 | if (base->discipline->ioctl) { | 501 | rc = -ENOTTY; |
502 | if (base->discipline->ioctl) | ||
502 | rc = base->discipline->ioctl(block, cmd, argp); | 503 | rc = base->discipline->ioctl(block, cmd, argp); |
503 | if (rc == -ENOIOCTLCMD) | ||
504 | rc = -EINVAL; | ||
505 | } else | ||
506 | rc = -EINVAL; | ||
507 | } | 504 | } |
508 | dasd_put_device(base); | 505 | dasd_put_device(base); |
509 | return rc; | 506 | return rc; |
diff --git a/drivers/video/auo_k190x.c b/drivers/video/auo_k190x.c index 77da6a2f43dc..c03ecdd31e4c 100644 --- a/drivers/video/auo_k190x.c +++ b/drivers/video/auo_k190x.c | |||
@@ -987,7 +987,6 @@ err_regfb: | |||
987 | fb_dealloc_cmap(&info->cmap); | 987 | fb_dealloc_cmap(&info->cmap); |
988 | err_cmap: | 988 | err_cmap: |
989 | fb_deferred_io_cleanup(info); | 989 | fb_deferred_io_cleanup(info); |
990 | kfree(info->fbdefio); | ||
991 | err_defio: | 990 | err_defio: |
992 | vfree((void *)info->screen_base); | 991 | vfree((void *)info->screen_base); |
993 | err_irq: | 992 | err_irq: |
@@ -1022,7 +1021,6 @@ int __devexit auok190x_common_remove(struct platform_device *pdev) | |||
1022 | fb_dealloc_cmap(&info->cmap); | 1021 | fb_dealloc_cmap(&info->cmap); |
1023 | 1022 | ||
1024 | fb_deferred_io_cleanup(info); | 1023 | fb_deferred_io_cleanup(info); |
1025 | kfree(info->fbdefio); | ||
1026 | 1024 | ||
1027 | vfree((void *)info->screen_base); | 1025 | vfree((void *)info->screen_base); |
1028 | 1026 | ||
diff --git a/drivers/video/console/bitblit.c b/drivers/video/console/bitblit.c index 28b1a834906b..61b182bf32a2 100644 --- a/drivers/video/console/bitblit.c +++ b/drivers/video/console/bitblit.c | |||
@@ -162,7 +162,7 @@ static void bit_putcs(struct vc_data *vc, struct fb_info *info, | |||
162 | image.depth = 1; | 162 | image.depth = 1; |
163 | 163 | ||
164 | if (attribute) { | 164 | if (attribute) { |
165 | buf = kmalloc(cellsize, GFP_KERNEL); | 165 | buf = kmalloc(cellsize, GFP_ATOMIC); |
166 | if (!buf) | 166 | if (!buf) |
167 | return; | 167 | return; |
168 | } | 168 | } |
diff --git a/drivers/video/console/fbcon.c b/drivers/video/console/fbcon.c index 88e92041d8f0..fdefa8fd72c4 100644 --- a/drivers/video/console/fbcon.c +++ b/drivers/video/console/fbcon.c | |||
@@ -449,7 +449,7 @@ static int __init fb_console_setup(char *this_opt) | |||
449 | 449 | ||
450 | while ((options = strsep(&this_opt, ",")) != NULL) { | 450 | while ((options = strsep(&this_opt, ",")) != NULL) { |
451 | if (!strncmp(options, "font:", 5)) | 451 | if (!strncmp(options, "font:", 5)) |
452 | strcpy(fontname, options + 5); | 452 | strlcpy(fontname, options + 5, sizeof(fontname)); |
453 | 453 | ||
454 | if (!strncmp(options, "scrollback:", 11)) { | 454 | if (!strncmp(options, "scrollback:", 11)) { |
455 | options += 11; | 455 | options += 11; |
diff --git a/drivers/video/mb862xx/mb862xxfbdrv.c b/drivers/video/mb862xx/mb862xxfbdrv.c index 00ce1f34b496..57d940be5f3d 100644 --- a/drivers/video/mb862xx/mb862xxfbdrv.c +++ b/drivers/video/mb862xx/mb862xxfbdrv.c | |||
@@ -328,6 +328,8 @@ static int mb862xxfb_ioctl(struct fb_info *fbi, unsigned int cmd, | |||
328 | case MB862XX_L1_SET_CFG: | 328 | case MB862XX_L1_SET_CFG: |
329 | if (copy_from_user(l1_cfg, argp, sizeof(*l1_cfg))) | 329 | if (copy_from_user(l1_cfg, argp, sizeof(*l1_cfg))) |
330 | return -EFAULT; | 330 | return -EFAULT; |
331 | if (l1_cfg->dh == 0 || l1_cfg->dw == 0) | ||
332 | return -EINVAL; | ||
331 | if ((l1_cfg->sw >= l1_cfg->dw) && (l1_cfg->sh >= l1_cfg->dh)) { | 333 | if ((l1_cfg->sw >= l1_cfg->dw) && (l1_cfg->sh >= l1_cfg->dh)) { |
332 | /* downscaling */ | 334 | /* downscaling */ |
333 | outreg(cap, GC_CAP_CSC, | 335 | outreg(cap, GC_CAP_CSC, |
diff --git a/drivers/video/omap2/dss/sdi.c b/drivers/video/omap2/dss/sdi.c index 5d31699fbd3c..f43bfe17b3b6 100644 --- a/drivers/video/omap2/dss/sdi.c +++ b/drivers/video/omap2/dss/sdi.c | |||
@@ -105,6 +105,20 @@ int omapdss_sdi_display_enable(struct omap_dss_device *dssdev) | |||
105 | 105 | ||
106 | sdi_config_lcd_manager(dssdev); | 106 | sdi_config_lcd_manager(dssdev); |
107 | 107 | ||
108 | /* | ||
109 | * LCLK and PCLK divisors are located in shadow registers, and we | ||
110 | * normally write them to DISPC registers when enabling the output. | ||
111 | * However, SDI uses pck-free as source clock for its PLL, and pck-free | ||
112 | * is affected by the divisors. And as we need the PLL before enabling | ||
113 | * the output, we need to write the divisors early. | ||
114 | * | ||
115 | * It seems just writing to the DISPC register is enough, and we don't | ||
116 | * need to care about the shadow register mechanism for pck-free. The | ||
117 | * exact reason for this is unknown. | ||
118 | */ | ||
119 | dispc_mgr_set_clock_div(dssdev->manager->id, | ||
120 | &sdi.mgr_config.clock_info); | ||
121 | |||
108 | dss_sdi_init(dssdev->phy.sdi.datapairs); | 122 | dss_sdi_init(dssdev->phy.sdi.datapairs); |
109 | r = dss_sdi_enable(); | 123 | r = dss_sdi_enable(); |
110 | if (r) | 124 | if (r) |
diff --git a/drivers/video/omap2/omapfb/omapfb-main.c b/drivers/video/omap2/omapfb/omapfb-main.c index 08ec1a7103f2..fc671d3d8004 100644 --- a/drivers/video/omap2/omapfb/omapfb-main.c +++ b/drivers/video/omap2/omapfb/omapfb-main.c | |||
@@ -1192,7 +1192,7 @@ static int _setcolreg(struct fb_info *fbi, u_int regno, u_int red, u_int green, | |||
1192 | break; | 1192 | break; |
1193 | 1193 | ||
1194 | if (regno < 16) { | 1194 | if (regno < 16) { |
1195 | u16 pal; | 1195 | u32 pal; |
1196 | pal = ((red >> (16 - var->red.length)) << | 1196 | pal = ((red >> (16 - var->red.length)) << |
1197 | var->red.offset) | | 1197 | var->red.offset) | |
1198 | ((green >> (16 - var->green.length)) << | 1198 | ((green >> (16 - var->green.length)) << |
diff --git a/drivers/watchdog/da9052_wdt.c b/drivers/watchdog/da9052_wdt.c index 3f75129eb0a9..f7abbaeebcaf 100644 --- a/drivers/watchdog/da9052_wdt.c +++ b/drivers/watchdog/da9052_wdt.c | |||
@@ -21,7 +21,6 @@ | |||
21 | #include <linux/types.h> | 21 | #include <linux/types.h> |
22 | #include <linux/kernel.h> | 22 | #include <linux/kernel.h> |
23 | #include <linux/jiffies.h> | 23 | #include <linux/jiffies.h> |
24 | #include <linux/delay.h> | ||
25 | 24 | ||
26 | #include <linux/mfd/da9052/reg.h> | 25 | #include <linux/mfd/da9052/reg.h> |
27 | #include <linux/mfd/da9052/da9052.h> | 26 | #include <linux/mfd/da9052/da9052.h> |
diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c index 1afb4fba11b4..4d519488d304 100644 --- a/drivers/xen/swiotlb-xen.c +++ b/drivers/xen/swiotlb-xen.c | |||
@@ -232,7 +232,7 @@ xen_swiotlb_alloc_coherent(struct device *hwdev, size_t size, | |||
232 | return ret; | 232 | return ret; |
233 | 233 | ||
234 | if (hwdev && hwdev->coherent_dma_mask) | 234 | if (hwdev && hwdev->coherent_dma_mask) |
235 | dma_mask = hwdev->coherent_dma_mask; | 235 | dma_mask = dma_alloc_coherent_mask(hwdev, flags); |
236 | 236 | ||
237 | phys = virt_to_phys(ret); | 237 | phys = virt_to_phys(ret); |
238 | dev_addr = xen_phys_to_bus(phys); | 238 | dev_addr = xen_phys_to_bus(phys); |
diff --git a/drivers/xen/xen-pciback/pci_stub.c b/drivers/xen/xen-pciback/pci_stub.c index 097e536e8672..03342728bf23 100644 --- a/drivers/xen/xen-pciback/pci_stub.c +++ b/drivers/xen/xen-pciback/pci_stub.c | |||
@@ -353,16 +353,16 @@ static int __devinit pcistub_init_device(struct pci_dev *dev) | |||
353 | if (err) | 353 | if (err) |
354 | goto config_release; | 354 | goto config_release; |
355 | 355 | ||
356 | dev_dbg(&dev->dev, "reseting (FLR, D3, etc) the device\n"); | ||
357 | __pci_reset_function_locked(dev); | ||
358 | |||
359 | /* We need the device active to save the state. */ | 356 | /* We need the device active to save the state. */ |
360 | dev_dbg(&dev->dev, "save state of device\n"); | 357 | dev_dbg(&dev->dev, "save state of device\n"); |
361 | pci_save_state(dev); | 358 | pci_save_state(dev); |
362 | dev_data->pci_saved_state = pci_store_saved_state(dev); | 359 | dev_data->pci_saved_state = pci_store_saved_state(dev); |
363 | if (!dev_data->pci_saved_state) | 360 | if (!dev_data->pci_saved_state) |
364 | dev_err(&dev->dev, "Could not store PCI conf saved state!\n"); | 361 | dev_err(&dev->dev, "Could not store PCI conf saved state!\n"); |
365 | 362 | else { | |
363 | dev_dbg(&dev->dev, "reseting (FLR, D3, etc) the device\n"); | ||
364 | __pci_reset_function_locked(dev); | ||
365 | } | ||
366 | /* Now disable the device (this also ensures some private device | 366 | /* Now disable the device (this also ensures some private device |
367 | * data is setup before we export) | 367 | * data is setup before we export) |
368 | */ | 368 | */ |
diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c index a256f3b2a845..ff6475f409d6 100644 --- a/fs/btrfs/backref.c +++ b/fs/btrfs/backref.c | |||
@@ -1438,10 +1438,10 @@ int iterate_inodes_from_logical(u64 logical, struct btrfs_fs_info *fs_info, | |||
1438 | ret = extent_from_logical(fs_info, logical, path, | 1438 | ret = extent_from_logical(fs_info, logical, path, |
1439 | &found_key); | 1439 | &found_key); |
1440 | btrfs_release_path(path); | 1440 | btrfs_release_path(path); |
1441 | if (ret & BTRFS_EXTENT_FLAG_TREE_BLOCK) | ||
1442 | ret = -EINVAL; | ||
1443 | if (ret < 0) | 1441 | if (ret < 0) |
1444 | return ret; | 1442 | return ret; |
1443 | if (ret & BTRFS_EXTENT_FLAG_TREE_BLOCK) | ||
1444 | return -EINVAL; | ||
1445 | 1445 | ||
1446 | extent_item_pos = logical - found_key.objectid; | 1446 | extent_item_pos = logical - found_key.objectid; |
1447 | ret = iterate_extent_inodes(fs_info, found_key.objectid, | 1447 | ret = iterate_extent_inodes(fs_info, found_key.objectid, |
diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c index 86eff48dab78..43d1c5a3a030 100644 --- a/fs/btrfs/compression.c +++ b/fs/btrfs/compression.c | |||
@@ -818,6 +818,7 @@ static void free_workspace(int type, struct list_head *workspace) | |||
818 | btrfs_compress_op[idx]->free_workspace(workspace); | 818 | btrfs_compress_op[idx]->free_workspace(workspace); |
819 | atomic_dec(alloc_workspace); | 819 | atomic_dec(alloc_workspace); |
820 | wake: | 820 | wake: |
821 | smp_mb(); | ||
821 | if (waitqueue_active(workspace_wait)) | 822 | if (waitqueue_active(workspace_wait)) |
822 | wake_up(workspace_wait); | 823 | wake_up(workspace_wait); |
823 | } | 824 | } |
diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c index 9d7621f271ff..6d183f60d63a 100644 --- a/fs/btrfs/ctree.c +++ b/fs/btrfs/ctree.c | |||
@@ -421,12 +421,6 @@ void btrfs_put_tree_mod_seq(struct btrfs_fs_info *fs_info, | |||
421 | spin_unlock(&fs_info->tree_mod_seq_lock); | 421 | spin_unlock(&fs_info->tree_mod_seq_lock); |
422 | 422 | ||
423 | /* | 423 | /* |
424 | * we removed the lowest blocker from the blocker list, so there may be | ||
425 | * more processible delayed refs. | ||
426 | */ | ||
427 | wake_up(&fs_info->tree_mod_seq_wait); | ||
428 | |||
429 | /* | ||
430 | * anything that's lower than the lowest existing (read: blocked) | 424 | * anything that's lower than the lowest existing (read: blocked) |
431 | * sequence number can be removed from the tree. | 425 | * sequence number can be removed from the tree. |
432 | */ | 426 | */ |
@@ -631,6 +625,9 @@ __tree_mod_log_free_eb(struct btrfs_fs_info *fs_info, struct extent_buffer *eb) | |||
631 | u32 nritems; | 625 | u32 nritems; |
632 | int ret; | 626 | int ret; |
633 | 627 | ||
628 | if (btrfs_header_level(eb) == 0) | ||
629 | return; | ||
630 | |||
634 | nritems = btrfs_header_nritems(eb); | 631 | nritems = btrfs_header_nritems(eb); |
635 | for (i = nritems - 1; i >= 0; i--) { | 632 | for (i = nritems - 1; i >= 0; i--) { |
636 | ret = tree_mod_log_insert_key_locked(fs_info, eb, i, | 633 | ret = tree_mod_log_insert_key_locked(fs_info, eb, i, |
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 4bab807227ad..0d195b507660 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h | |||
@@ -1252,7 +1252,6 @@ struct btrfs_fs_info { | |||
1252 | atomic_t tree_mod_seq; | 1252 | atomic_t tree_mod_seq; |
1253 | struct list_head tree_mod_seq_list; | 1253 | struct list_head tree_mod_seq_list; |
1254 | struct seq_list tree_mod_seq_elem; | 1254 | struct seq_list tree_mod_seq_elem; |
1255 | wait_queue_head_t tree_mod_seq_wait; | ||
1256 | 1255 | ||
1257 | /* this protects tree_mod_log */ | 1256 | /* this protects tree_mod_log */ |
1258 | rwlock_t tree_mod_log_lock; | 1257 | rwlock_t tree_mod_log_lock; |
@@ -3192,7 +3191,7 @@ int btrfs_del_csums(struct btrfs_trans_handle *trans, | |||
3192 | int btrfs_lookup_bio_sums(struct btrfs_root *root, struct inode *inode, | 3191 | int btrfs_lookup_bio_sums(struct btrfs_root *root, struct inode *inode, |
3193 | struct bio *bio, u32 *dst); | 3192 | struct bio *bio, u32 *dst); |
3194 | int btrfs_lookup_bio_sums_dio(struct btrfs_root *root, struct inode *inode, | 3193 | int btrfs_lookup_bio_sums_dio(struct btrfs_root *root, struct inode *inode, |
3195 | struct bio *bio, u64 logical_offset, u32 *dst); | 3194 | struct bio *bio, u64 logical_offset); |
3196 | int btrfs_insert_file_extent(struct btrfs_trans_handle *trans, | 3195 | int btrfs_insert_file_extent(struct btrfs_trans_handle *trans, |
3197 | struct btrfs_root *root, | 3196 | struct btrfs_root *root, |
3198 | u64 objectid, u64 pos, | 3197 | u64 objectid, u64 pos, |
diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c index 335605c8ceab..07d5eeb1e6f1 100644 --- a/fs/btrfs/delayed-inode.c +++ b/fs/btrfs/delayed-inode.c | |||
@@ -512,8 +512,8 @@ static void __btrfs_remove_delayed_item(struct btrfs_delayed_item *delayed_item) | |||
512 | 512 | ||
513 | rb_erase(&delayed_item->rb_node, root); | 513 | rb_erase(&delayed_item->rb_node, root); |
514 | delayed_item->delayed_node->count--; | 514 | delayed_item->delayed_node->count--; |
515 | atomic_dec(&delayed_root->items); | 515 | if (atomic_dec_return(&delayed_root->items) < |
516 | if (atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND && | 516 | BTRFS_DELAYED_BACKGROUND && |
517 | waitqueue_active(&delayed_root->wait)) | 517 | waitqueue_active(&delayed_root->wait)) |
518 | wake_up(&delayed_root->wait); | 518 | wake_up(&delayed_root->wait); |
519 | } | 519 | } |
@@ -1028,9 +1028,10 @@ do_again: | |||
1028 | btrfs_release_delayed_item(prev); | 1028 | btrfs_release_delayed_item(prev); |
1029 | ret = 0; | 1029 | ret = 0; |
1030 | btrfs_release_path(path); | 1030 | btrfs_release_path(path); |
1031 | if (curr) | 1031 | if (curr) { |
1032 | mutex_unlock(&node->mutex); | ||
1032 | goto do_again; | 1033 | goto do_again; |
1033 | else | 1034 | } else |
1034 | goto delete_fail; | 1035 | goto delete_fail; |
1035 | } | 1036 | } |
1036 | 1037 | ||
@@ -1055,8 +1056,7 @@ static void btrfs_release_delayed_inode(struct btrfs_delayed_node *delayed_node) | |||
1055 | delayed_node->count--; | 1056 | delayed_node->count--; |
1056 | 1057 | ||
1057 | delayed_root = delayed_node->root->fs_info->delayed_root; | 1058 | delayed_root = delayed_node->root->fs_info->delayed_root; |
1058 | atomic_dec(&delayed_root->items); | 1059 | if (atomic_dec_return(&delayed_root->items) < |
1059 | if (atomic_read(&delayed_root->items) < | ||
1060 | BTRFS_DELAYED_BACKGROUND && | 1060 | BTRFS_DELAYED_BACKGROUND && |
1061 | waitqueue_active(&delayed_root->wait)) | 1061 | waitqueue_active(&delayed_root->wait)) |
1062 | wake_up(&delayed_root->wait); | 1062 | wake_up(&delayed_root->wait); |
diff --git a/fs/btrfs/delayed-ref.c b/fs/btrfs/delayed-ref.c index da7419ed01bb..ae9411773397 100644 --- a/fs/btrfs/delayed-ref.c +++ b/fs/btrfs/delayed-ref.c | |||
@@ -38,17 +38,14 @@ | |||
38 | static int comp_tree_refs(struct btrfs_delayed_tree_ref *ref2, | 38 | static int comp_tree_refs(struct btrfs_delayed_tree_ref *ref2, |
39 | struct btrfs_delayed_tree_ref *ref1) | 39 | struct btrfs_delayed_tree_ref *ref1) |
40 | { | 40 | { |
41 | if (ref1->node.type == BTRFS_TREE_BLOCK_REF_KEY) { | 41 | if (ref1->root < ref2->root) |
42 | if (ref1->root < ref2->root) | 42 | return -1; |
43 | return -1; | 43 | if (ref1->root > ref2->root) |
44 | if (ref1->root > ref2->root) | 44 | return 1; |
45 | return 1; | 45 | if (ref1->parent < ref2->parent) |
46 | } else { | 46 | return -1; |
47 | if (ref1->parent < ref2->parent) | 47 | if (ref1->parent > ref2->parent) |
48 | return -1; | 48 | return 1; |
49 | if (ref1->parent > ref2->parent) | ||
50 | return 1; | ||
51 | } | ||
52 | return 0; | 49 | return 0; |
53 | } | 50 | } |
54 | 51 | ||
@@ -85,7 +82,8 @@ static int comp_data_refs(struct btrfs_delayed_data_ref *ref2, | |||
85 | * type of the delayed backrefs and content of delayed backrefs. | 82 | * type of the delayed backrefs and content of delayed backrefs. |
86 | */ | 83 | */ |
87 | static int comp_entry(struct btrfs_delayed_ref_node *ref2, | 84 | static int comp_entry(struct btrfs_delayed_ref_node *ref2, |
88 | struct btrfs_delayed_ref_node *ref1) | 85 | struct btrfs_delayed_ref_node *ref1, |
86 | bool compare_seq) | ||
89 | { | 87 | { |
90 | if (ref1->bytenr < ref2->bytenr) | 88 | if (ref1->bytenr < ref2->bytenr) |
91 | return -1; | 89 | return -1; |
@@ -102,10 +100,12 @@ static int comp_entry(struct btrfs_delayed_ref_node *ref2, | |||
102 | if (ref1->type > ref2->type) | 100 | if (ref1->type > ref2->type) |
103 | return 1; | 101 | return 1; |
104 | /* merging of sequenced refs is not allowed */ | 102 | /* merging of sequenced refs is not allowed */ |
105 | if (ref1->seq < ref2->seq) | 103 | if (compare_seq) { |
106 | return -1; | 104 | if (ref1->seq < ref2->seq) |
107 | if (ref1->seq > ref2->seq) | 105 | return -1; |
108 | return 1; | 106 | if (ref1->seq > ref2->seq) |
107 | return 1; | ||
108 | } | ||
109 | if (ref1->type == BTRFS_TREE_BLOCK_REF_KEY || | 109 | if (ref1->type == BTRFS_TREE_BLOCK_REF_KEY || |
110 | ref1->type == BTRFS_SHARED_BLOCK_REF_KEY) { | 110 | ref1->type == BTRFS_SHARED_BLOCK_REF_KEY) { |
111 | return comp_tree_refs(btrfs_delayed_node_to_tree_ref(ref2), | 111 | return comp_tree_refs(btrfs_delayed_node_to_tree_ref(ref2), |
@@ -139,7 +139,7 @@ static struct btrfs_delayed_ref_node *tree_insert(struct rb_root *root, | |||
139 | entry = rb_entry(parent_node, struct btrfs_delayed_ref_node, | 139 | entry = rb_entry(parent_node, struct btrfs_delayed_ref_node, |
140 | rb_node); | 140 | rb_node); |
141 | 141 | ||
142 | cmp = comp_entry(entry, ins); | 142 | cmp = comp_entry(entry, ins, 1); |
143 | if (cmp < 0) | 143 | if (cmp < 0) |
144 | p = &(*p)->rb_left; | 144 | p = &(*p)->rb_left; |
145 | else if (cmp > 0) | 145 | else if (cmp > 0) |
@@ -233,6 +233,114 @@ int btrfs_delayed_ref_lock(struct btrfs_trans_handle *trans, | |||
233 | return 0; | 233 | return 0; |
234 | } | 234 | } |
235 | 235 | ||
236 | static void inline drop_delayed_ref(struct btrfs_trans_handle *trans, | ||
237 | struct btrfs_delayed_ref_root *delayed_refs, | ||
238 | struct btrfs_delayed_ref_node *ref) | ||
239 | { | ||
240 | rb_erase(&ref->rb_node, &delayed_refs->root); | ||
241 | ref->in_tree = 0; | ||
242 | btrfs_put_delayed_ref(ref); | ||
243 | delayed_refs->num_entries--; | ||
244 | if (trans->delayed_ref_updates) | ||
245 | trans->delayed_ref_updates--; | ||
246 | } | ||
247 | |||
248 | static int merge_ref(struct btrfs_trans_handle *trans, | ||
249 | struct btrfs_delayed_ref_root *delayed_refs, | ||
250 | struct btrfs_delayed_ref_node *ref, u64 seq) | ||
251 | { | ||
252 | struct rb_node *node; | ||
253 | int merged = 0; | ||
254 | int mod = 0; | ||
255 | int done = 0; | ||
256 | |||
257 | node = rb_prev(&ref->rb_node); | ||
258 | while (node) { | ||
259 | struct btrfs_delayed_ref_node *next; | ||
260 | |||
261 | next = rb_entry(node, struct btrfs_delayed_ref_node, rb_node); | ||
262 | node = rb_prev(node); | ||
263 | if (next->bytenr != ref->bytenr) | ||
264 | break; | ||
265 | if (seq && next->seq >= seq) | ||
266 | break; | ||
267 | if (comp_entry(ref, next, 0)) | ||
268 | continue; | ||
269 | |||
270 | if (ref->action == next->action) { | ||
271 | mod = next->ref_mod; | ||
272 | } else { | ||
273 | if (ref->ref_mod < next->ref_mod) { | ||
274 | struct btrfs_delayed_ref_node *tmp; | ||
275 | |||
276 | tmp = ref; | ||
277 | ref = next; | ||
278 | next = tmp; | ||
279 | done = 1; | ||
280 | } | ||
281 | mod = -next->ref_mod; | ||
282 | } | ||
283 | |||
284 | merged++; | ||
285 | drop_delayed_ref(trans, delayed_refs, next); | ||
286 | ref->ref_mod += mod; | ||
287 | if (ref->ref_mod == 0) { | ||
288 | drop_delayed_ref(trans, delayed_refs, ref); | ||
289 | break; | ||
290 | } else { | ||
291 | /* | ||
292 | * You can't have multiples of the same ref on a tree | ||
293 | * block. | ||
294 | */ | ||
295 | WARN_ON(ref->type == BTRFS_TREE_BLOCK_REF_KEY || | ||
296 | ref->type == BTRFS_SHARED_BLOCK_REF_KEY); | ||
297 | } | ||
298 | |||
299 | if (done) | ||
300 | break; | ||
301 | node = rb_prev(&ref->rb_node); | ||
302 | } | ||
303 | |||
304 | return merged; | ||
305 | } | ||
306 | |||
307 | void btrfs_merge_delayed_refs(struct btrfs_trans_handle *trans, | ||
308 | struct btrfs_fs_info *fs_info, | ||
309 | struct btrfs_delayed_ref_root *delayed_refs, | ||
310 | struct btrfs_delayed_ref_head *head) | ||
311 | { | ||
312 | struct rb_node *node; | ||
313 | u64 seq = 0; | ||
314 | |||
315 | spin_lock(&fs_info->tree_mod_seq_lock); | ||
316 | if (!list_empty(&fs_info->tree_mod_seq_list)) { | ||
317 | struct seq_list *elem; | ||
318 | |||
319 | elem = list_first_entry(&fs_info->tree_mod_seq_list, | ||
320 | struct seq_list, list); | ||
321 | seq = elem->seq; | ||
322 | } | ||
323 | spin_unlock(&fs_info->tree_mod_seq_lock); | ||
324 | |||
325 | node = rb_prev(&head->node.rb_node); | ||
326 | while (node) { | ||
327 | struct btrfs_delayed_ref_node *ref; | ||
328 | |||
329 | ref = rb_entry(node, struct btrfs_delayed_ref_node, | ||
330 | rb_node); | ||
331 | if (ref->bytenr != head->node.bytenr) | ||
332 | break; | ||
333 | |||
334 | /* We can't merge refs that are outside of our seq count */ | ||
335 | if (seq && ref->seq >= seq) | ||
336 | break; | ||
337 | if (merge_ref(trans, delayed_refs, ref, seq)) | ||
338 | node = rb_prev(&head->node.rb_node); | ||
339 | else | ||
340 | node = rb_prev(node); | ||
341 | } | ||
342 | } | ||
343 | |||
236 | int btrfs_check_delayed_seq(struct btrfs_fs_info *fs_info, | 344 | int btrfs_check_delayed_seq(struct btrfs_fs_info *fs_info, |
237 | struct btrfs_delayed_ref_root *delayed_refs, | 345 | struct btrfs_delayed_ref_root *delayed_refs, |
238 | u64 seq) | 346 | u64 seq) |
@@ -336,18 +444,11 @@ update_existing_ref(struct btrfs_trans_handle *trans, | |||
336 | * every changing the extent allocation tree. | 444 | * every changing the extent allocation tree. |
337 | */ | 445 | */ |
338 | existing->ref_mod--; | 446 | existing->ref_mod--; |
339 | if (existing->ref_mod == 0) { | 447 | if (existing->ref_mod == 0) |
340 | rb_erase(&existing->rb_node, | 448 | drop_delayed_ref(trans, delayed_refs, existing); |
341 | &delayed_refs->root); | 449 | else |
342 | existing->in_tree = 0; | ||
343 | btrfs_put_delayed_ref(existing); | ||
344 | delayed_refs->num_entries--; | ||
345 | if (trans->delayed_ref_updates) | ||
346 | trans->delayed_ref_updates--; | ||
347 | } else { | ||
348 | WARN_ON(existing->type == BTRFS_TREE_BLOCK_REF_KEY || | 450 | WARN_ON(existing->type == BTRFS_TREE_BLOCK_REF_KEY || |
349 | existing->type == BTRFS_SHARED_BLOCK_REF_KEY); | 451 | existing->type == BTRFS_SHARED_BLOCK_REF_KEY); |
350 | } | ||
351 | } else { | 452 | } else { |
352 | WARN_ON(existing->type == BTRFS_TREE_BLOCK_REF_KEY || | 453 | WARN_ON(existing->type == BTRFS_TREE_BLOCK_REF_KEY || |
353 | existing->type == BTRFS_SHARED_BLOCK_REF_KEY); | 454 | existing->type == BTRFS_SHARED_BLOCK_REF_KEY); |
@@ -662,9 +763,6 @@ int btrfs_add_delayed_tree_ref(struct btrfs_fs_info *fs_info, | |||
662 | add_delayed_tree_ref(fs_info, trans, &ref->node, bytenr, | 763 | add_delayed_tree_ref(fs_info, trans, &ref->node, bytenr, |
663 | num_bytes, parent, ref_root, level, action, | 764 | num_bytes, parent, ref_root, level, action, |
664 | for_cow); | 765 | for_cow); |
665 | if (!need_ref_seq(for_cow, ref_root) && | ||
666 | waitqueue_active(&fs_info->tree_mod_seq_wait)) | ||
667 | wake_up(&fs_info->tree_mod_seq_wait); | ||
668 | spin_unlock(&delayed_refs->lock); | 766 | spin_unlock(&delayed_refs->lock); |
669 | if (need_ref_seq(for_cow, ref_root)) | 767 | if (need_ref_seq(for_cow, ref_root)) |
670 | btrfs_qgroup_record_ref(trans, &ref->node, extent_op); | 768 | btrfs_qgroup_record_ref(trans, &ref->node, extent_op); |
@@ -713,9 +811,6 @@ int btrfs_add_delayed_data_ref(struct btrfs_fs_info *fs_info, | |||
713 | add_delayed_data_ref(fs_info, trans, &ref->node, bytenr, | 811 | add_delayed_data_ref(fs_info, trans, &ref->node, bytenr, |
714 | num_bytes, parent, ref_root, owner, offset, | 812 | num_bytes, parent, ref_root, owner, offset, |
715 | action, for_cow); | 813 | action, for_cow); |
716 | if (!need_ref_seq(for_cow, ref_root) && | ||
717 | waitqueue_active(&fs_info->tree_mod_seq_wait)) | ||
718 | wake_up(&fs_info->tree_mod_seq_wait); | ||
719 | spin_unlock(&delayed_refs->lock); | 814 | spin_unlock(&delayed_refs->lock); |
720 | if (need_ref_seq(for_cow, ref_root)) | 815 | if (need_ref_seq(for_cow, ref_root)) |
721 | btrfs_qgroup_record_ref(trans, &ref->node, extent_op); | 816 | btrfs_qgroup_record_ref(trans, &ref->node, extent_op); |
@@ -744,8 +839,6 @@ int btrfs_add_delayed_extent_op(struct btrfs_fs_info *fs_info, | |||
744 | num_bytes, BTRFS_UPDATE_DELAYED_HEAD, | 839 | num_bytes, BTRFS_UPDATE_DELAYED_HEAD, |
745 | extent_op->is_data); | 840 | extent_op->is_data); |
746 | 841 | ||
747 | if (waitqueue_active(&fs_info->tree_mod_seq_wait)) | ||
748 | wake_up(&fs_info->tree_mod_seq_wait); | ||
749 | spin_unlock(&delayed_refs->lock); | 842 | spin_unlock(&delayed_refs->lock); |
750 | return 0; | 843 | return 0; |
751 | } | 844 | } |
diff --git a/fs/btrfs/delayed-ref.h b/fs/btrfs/delayed-ref.h index 0d7c90c366b6..ab5300595847 100644 --- a/fs/btrfs/delayed-ref.h +++ b/fs/btrfs/delayed-ref.h | |||
@@ -167,6 +167,10 @@ int btrfs_add_delayed_extent_op(struct btrfs_fs_info *fs_info, | |||
167 | struct btrfs_trans_handle *trans, | 167 | struct btrfs_trans_handle *trans, |
168 | u64 bytenr, u64 num_bytes, | 168 | u64 bytenr, u64 num_bytes, |
169 | struct btrfs_delayed_extent_op *extent_op); | 169 | struct btrfs_delayed_extent_op *extent_op); |
170 | void btrfs_merge_delayed_refs(struct btrfs_trans_handle *trans, | ||
171 | struct btrfs_fs_info *fs_info, | ||
172 | struct btrfs_delayed_ref_root *delayed_refs, | ||
173 | struct btrfs_delayed_ref_head *head); | ||
170 | 174 | ||
171 | struct btrfs_delayed_ref_head * | 175 | struct btrfs_delayed_ref_head * |
172 | btrfs_find_delayed_ref_head(struct btrfs_trans_handle *trans, u64 bytenr); | 176 | btrfs_find_delayed_ref_head(struct btrfs_trans_handle *trans, u64 bytenr); |
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 62e0cafd6e25..22e98e04c2ea 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c | |||
@@ -377,9 +377,13 @@ static int btree_read_extent_buffer_pages(struct btrfs_root *root, | |||
377 | ret = read_extent_buffer_pages(io_tree, eb, start, | 377 | ret = read_extent_buffer_pages(io_tree, eb, start, |
378 | WAIT_COMPLETE, | 378 | WAIT_COMPLETE, |
379 | btree_get_extent, mirror_num); | 379 | btree_get_extent, mirror_num); |
380 | if (!ret && !verify_parent_transid(io_tree, eb, | 380 | if (!ret) { |
381 | if (!verify_parent_transid(io_tree, eb, | ||
381 | parent_transid, 0)) | 382 | parent_transid, 0)) |
382 | break; | 383 | break; |
384 | else | ||
385 | ret = -EIO; | ||
386 | } | ||
383 | 387 | ||
384 | /* | 388 | /* |
385 | * This buffer's crc is fine, but its contents are corrupted, so | 389 | * This buffer's crc is fine, but its contents are corrupted, so |
@@ -754,9 +758,7 @@ static void run_one_async_done(struct btrfs_work *work) | |||
754 | limit = btrfs_async_submit_limit(fs_info); | 758 | limit = btrfs_async_submit_limit(fs_info); |
755 | limit = limit * 2 / 3; | 759 | limit = limit * 2 / 3; |
756 | 760 | ||
757 | atomic_dec(&fs_info->nr_async_submits); | 761 | if (atomic_dec_return(&fs_info->nr_async_submits) < limit && |
758 | |||
759 | if (atomic_read(&fs_info->nr_async_submits) < limit && | ||
760 | waitqueue_active(&fs_info->async_submit_wait)) | 762 | waitqueue_active(&fs_info->async_submit_wait)) |
761 | wake_up(&fs_info->async_submit_wait); | 763 | wake_up(&fs_info->async_submit_wait); |
762 | 764 | ||
@@ -2032,8 +2034,6 @@ int open_ctree(struct super_block *sb, | |||
2032 | fs_info->free_chunk_space = 0; | 2034 | fs_info->free_chunk_space = 0; |
2033 | fs_info->tree_mod_log = RB_ROOT; | 2035 | fs_info->tree_mod_log = RB_ROOT; |
2034 | 2036 | ||
2035 | init_waitqueue_head(&fs_info->tree_mod_seq_wait); | ||
2036 | |||
2037 | /* readahead state */ | 2037 | /* readahead state */ |
2038 | INIT_RADIX_TREE(&fs_info->reada_tree, GFP_NOFS & ~__GFP_WAIT); | 2038 | INIT_RADIX_TREE(&fs_info->reada_tree, GFP_NOFS & ~__GFP_WAIT); |
2039 | spin_lock_init(&fs_info->reada_lock); | 2039 | spin_lock_init(&fs_info->reada_lock); |
@@ -2528,8 +2528,7 @@ retry_root_backup: | |||
2528 | goto fail_trans_kthread; | 2528 | goto fail_trans_kthread; |
2529 | 2529 | ||
2530 | /* do not make disk changes in broken FS */ | 2530 | /* do not make disk changes in broken FS */ |
2531 | if (btrfs_super_log_root(disk_super) != 0 && | 2531 | if (btrfs_super_log_root(disk_super) != 0) { |
2532 | !(fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR)) { | ||
2533 | u64 bytenr = btrfs_super_log_root(disk_super); | 2532 | u64 bytenr = btrfs_super_log_root(disk_super); |
2534 | 2533 | ||
2535 | if (fs_devices->rw_devices == 0) { | 2534 | if (fs_devices->rw_devices == 0) { |
@@ -3189,30 +3188,14 @@ int close_ctree(struct btrfs_root *root) | |||
3189 | /* clear out the rbtree of defraggable inodes */ | 3188 | /* clear out the rbtree of defraggable inodes */ |
3190 | btrfs_run_defrag_inodes(fs_info); | 3189 | btrfs_run_defrag_inodes(fs_info); |
3191 | 3190 | ||
3192 | /* | ||
3193 | * Here come 2 situations when btrfs is broken to flip readonly: | ||
3194 | * | ||
3195 | * 1. when btrfs flips readonly somewhere else before | ||
3196 | * btrfs_commit_super, sb->s_flags has MS_RDONLY flag, | ||
3197 | * and btrfs will skip to write sb directly to keep | ||
3198 | * ERROR state on disk. | ||
3199 | * | ||
3200 | * 2. when btrfs flips readonly just in btrfs_commit_super, | ||
3201 | * and in such case, btrfs cannot write sb via btrfs_commit_super, | ||
3202 | * and since fs_state has been set BTRFS_SUPER_FLAG_ERROR flag, | ||
3203 | * btrfs will cleanup all FS resources first and write sb then. | ||
3204 | */ | ||
3205 | if (!(fs_info->sb->s_flags & MS_RDONLY)) { | 3191 | if (!(fs_info->sb->s_flags & MS_RDONLY)) { |
3206 | ret = btrfs_commit_super(root); | 3192 | ret = btrfs_commit_super(root); |
3207 | if (ret) | 3193 | if (ret) |
3208 | printk(KERN_ERR "btrfs: commit super ret %d\n", ret); | 3194 | printk(KERN_ERR "btrfs: commit super ret %d\n", ret); |
3209 | } | 3195 | } |
3210 | 3196 | ||
3211 | if (fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR) { | 3197 | if (fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR) |
3212 | ret = btrfs_error_commit_super(root); | 3198 | btrfs_error_commit_super(root); |
3213 | if (ret) | ||
3214 | printk(KERN_ERR "btrfs: commit super ret %d\n", ret); | ||
3215 | } | ||
3216 | 3199 | ||
3217 | btrfs_put_block_group_cache(fs_info); | 3200 | btrfs_put_block_group_cache(fs_info); |
3218 | 3201 | ||
@@ -3434,18 +3417,11 @@ static int btrfs_check_super_valid(struct btrfs_fs_info *fs_info, | |||
3434 | if (read_only) | 3417 | if (read_only) |
3435 | return 0; | 3418 | return 0; |
3436 | 3419 | ||
3437 | if (fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR) { | ||
3438 | printk(KERN_WARNING "warning: mount fs with errors, " | ||
3439 | "running btrfsck is recommended\n"); | ||
3440 | } | ||
3441 | |||
3442 | return 0; | 3420 | return 0; |
3443 | } | 3421 | } |
3444 | 3422 | ||
3445 | int btrfs_error_commit_super(struct btrfs_root *root) | 3423 | void btrfs_error_commit_super(struct btrfs_root *root) |
3446 | { | 3424 | { |
3447 | int ret; | ||
3448 | |||
3449 | mutex_lock(&root->fs_info->cleaner_mutex); | 3425 | mutex_lock(&root->fs_info->cleaner_mutex); |
3450 | btrfs_run_delayed_iputs(root); | 3426 | btrfs_run_delayed_iputs(root); |
3451 | mutex_unlock(&root->fs_info->cleaner_mutex); | 3427 | mutex_unlock(&root->fs_info->cleaner_mutex); |
@@ -3455,10 +3431,6 @@ int btrfs_error_commit_super(struct btrfs_root *root) | |||
3455 | 3431 | ||
3456 | /* cleanup FS via transaction */ | 3432 | /* cleanup FS via transaction */ |
3457 | btrfs_cleanup_transaction(root); | 3433 | btrfs_cleanup_transaction(root); |
3458 | |||
3459 | ret = write_ctree_super(NULL, root, 0); | ||
3460 | |||
3461 | return ret; | ||
3462 | } | 3434 | } |
3463 | 3435 | ||
3464 | static void btrfs_destroy_ordered_operations(struct btrfs_root *root) | 3436 | static void btrfs_destroy_ordered_operations(struct btrfs_root *root) |
@@ -3782,14 +3754,17 @@ int btrfs_cleanup_transaction(struct btrfs_root *root) | |||
3782 | /* FIXME: cleanup wait for commit */ | 3754 | /* FIXME: cleanup wait for commit */ |
3783 | t->in_commit = 1; | 3755 | t->in_commit = 1; |
3784 | t->blocked = 1; | 3756 | t->blocked = 1; |
3757 | smp_mb(); | ||
3785 | if (waitqueue_active(&root->fs_info->transaction_blocked_wait)) | 3758 | if (waitqueue_active(&root->fs_info->transaction_blocked_wait)) |
3786 | wake_up(&root->fs_info->transaction_blocked_wait); | 3759 | wake_up(&root->fs_info->transaction_blocked_wait); |
3787 | 3760 | ||
3788 | t->blocked = 0; | 3761 | t->blocked = 0; |
3762 | smp_mb(); | ||
3789 | if (waitqueue_active(&root->fs_info->transaction_wait)) | 3763 | if (waitqueue_active(&root->fs_info->transaction_wait)) |
3790 | wake_up(&root->fs_info->transaction_wait); | 3764 | wake_up(&root->fs_info->transaction_wait); |
3791 | 3765 | ||
3792 | t->commit_done = 1; | 3766 | t->commit_done = 1; |
3767 | smp_mb(); | ||
3793 | if (waitqueue_active(&t->commit_wait)) | 3768 | if (waitqueue_active(&t->commit_wait)) |
3794 | wake_up(&t->commit_wait); | 3769 | wake_up(&t->commit_wait); |
3795 | 3770 | ||
diff --git a/fs/btrfs/disk-io.h b/fs/btrfs/disk-io.h index 95e147eea239..c5b00a735fef 100644 --- a/fs/btrfs/disk-io.h +++ b/fs/btrfs/disk-io.h | |||
@@ -54,7 +54,7 @@ int write_ctree_super(struct btrfs_trans_handle *trans, | |||
54 | struct btrfs_root *root, int max_mirrors); | 54 | struct btrfs_root *root, int max_mirrors); |
55 | struct buffer_head *btrfs_read_dev_super(struct block_device *bdev); | 55 | struct buffer_head *btrfs_read_dev_super(struct block_device *bdev); |
56 | int btrfs_commit_super(struct btrfs_root *root); | 56 | int btrfs_commit_super(struct btrfs_root *root); |
57 | int btrfs_error_commit_super(struct btrfs_root *root); | 57 | void btrfs_error_commit_super(struct btrfs_root *root); |
58 | struct extent_buffer *btrfs_find_tree_block(struct btrfs_root *root, | 58 | struct extent_buffer *btrfs_find_tree_block(struct btrfs_root *root, |
59 | u64 bytenr, u32 blocksize); | 59 | u64 bytenr, u32 blocksize); |
60 | struct btrfs_root *btrfs_read_fs_root_no_radix(struct btrfs_root *tree_root, | 60 | struct btrfs_root *btrfs_read_fs_root_no_radix(struct btrfs_root *tree_root, |
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 4e1b153b7c47..ba58024d40d3 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c | |||
@@ -2252,6 +2252,16 @@ static noinline int run_clustered_refs(struct btrfs_trans_handle *trans, | |||
2252 | } | 2252 | } |
2253 | 2253 | ||
2254 | /* | 2254 | /* |
2255 | * We need to try and merge add/drops of the same ref since we | ||
2256 | * can run into issues with relocate dropping the implicit ref | ||
2257 | * and then it being added back again before the drop can | ||
2258 | * finish. If we merged anything we need to re-loop so we can | ||
2259 | * get a good ref. | ||
2260 | */ | ||
2261 | btrfs_merge_delayed_refs(trans, fs_info, delayed_refs, | ||
2262 | locked_ref); | ||
2263 | |||
2264 | /* | ||
2255 | * locked_ref is the head node, so we have to go one | 2265 | * locked_ref is the head node, so we have to go one |
2256 | * node back for any delayed ref updates | 2266 | * node back for any delayed ref updates |
2257 | */ | 2267 | */ |
@@ -2318,12 +2328,23 @@ static noinline int run_clustered_refs(struct btrfs_trans_handle *trans, | |||
2318 | ref->in_tree = 0; | 2328 | ref->in_tree = 0; |
2319 | rb_erase(&ref->rb_node, &delayed_refs->root); | 2329 | rb_erase(&ref->rb_node, &delayed_refs->root); |
2320 | delayed_refs->num_entries--; | 2330 | delayed_refs->num_entries--; |
2321 | /* | 2331 | if (locked_ref) { |
2322 | * we modified num_entries, but as we're currently running | 2332 | /* |
2323 | * delayed refs, skip | 2333 | * when we play the delayed ref, also correct the |
2324 | * wake_up(&delayed_refs->seq_wait); | 2334 | * ref_mod on head |
2325 | * here. | 2335 | */ |
2326 | */ | 2336 | switch (ref->action) { |
2337 | case BTRFS_ADD_DELAYED_REF: | ||
2338 | case BTRFS_ADD_DELAYED_EXTENT: | ||
2339 | locked_ref->node.ref_mod -= ref->ref_mod; | ||
2340 | break; | ||
2341 | case BTRFS_DROP_DELAYED_REF: | ||
2342 | locked_ref->node.ref_mod += ref->ref_mod; | ||
2343 | break; | ||
2344 | default: | ||
2345 | WARN_ON(1); | ||
2346 | } | ||
2347 | } | ||
2327 | spin_unlock(&delayed_refs->lock); | 2348 | spin_unlock(&delayed_refs->lock); |
2328 | 2349 | ||
2329 | ret = run_one_delayed_ref(trans, root, ref, extent_op, | 2350 | ret = run_one_delayed_ref(trans, root, ref, extent_op, |
@@ -2350,22 +2371,6 @@ next: | |||
2350 | return count; | 2371 | return count; |
2351 | } | 2372 | } |
2352 | 2373 | ||
2353 | static void wait_for_more_refs(struct btrfs_fs_info *fs_info, | ||
2354 | struct btrfs_delayed_ref_root *delayed_refs, | ||
2355 | unsigned long num_refs, | ||
2356 | struct list_head *first_seq) | ||
2357 | { | ||
2358 | spin_unlock(&delayed_refs->lock); | ||
2359 | pr_debug("waiting for more refs (num %ld, first %p)\n", | ||
2360 | num_refs, first_seq); | ||
2361 | wait_event(fs_info->tree_mod_seq_wait, | ||
2362 | num_refs != delayed_refs->num_entries || | ||
2363 | fs_info->tree_mod_seq_list.next != first_seq); | ||
2364 | pr_debug("done waiting for more refs (num %ld, first %p)\n", | ||
2365 | delayed_refs->num_entries, fs_info->tree_mod_seq_list.next); | ||
2366 | spin_lock(&delayed_refs->lock); | ||
2367 | } | ||
2368 | |||
2369 | #ifdef SCRAMBLE_DELAYED_REFS | 2374 | #ifdef SCRAMBLE_DELAYED_REFS |
2370 | /* | 2375 | /* |
2371 | * Normally delayed refs get processed in ascending bytenr order. This | 2376 | * Normally delayed refs get processed in ascending bytenr order. This |
@@ -2460,13 +2465,11 @@ int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans, | |||
2460 | struct btrfs_delayed_ref_root *delayed_refs; | 2465 | struct btrfs_delayed_ref_root *delayed_refs; |
2461 | struct btrfs_delayed_ref_node *ref; | 2466 | struct btrfs_delayed_ref_node *ref; |
2462 | struct list_head cluster; | 2467 | struct list_head cluster; |
2463 | struct list_head *first_seq = NULL; | ||
2464 | int ret; | 2468 | int ret; |
2465 | u64 delayed_start; | 2469 | u64 delayed_start; |
2466 | int run_all = count == (unsigned long)-1; | 2470 | int run_all = count == (unsigned long)-1; |
2467 | int run_most = 0; | 2471 | int run_most = 0; |
2468 | unsigned long num_refs = 0; | 2472 | int loops; |
2469 | int consider_waiting; | ||
2470 | 2473 | ||
2471 | /* We'll clean this up in btrfs_cleanup_transaction */ | 2474 | /* We'll clean this up in btrfs_cleanup_transaction */ |
2472 | if (trans->aborted) | 2475 | if (trans->aborted) |
@@ -2484,7 +2487,7 @@ int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans, | |||
2484 | delayed_refs = &trans->transaction->delayed_refs; | 2487 | delayed_refs = &trans->transaction->delayed_refs; |
2485 | INIT_LIST_HEAD(&cluster); | 2488 | INIT_LIST_HEAD(&cluster); |
2486 | again: | 2489 | again: |
2487 | consider_waiting = 0; | 2490 | loops = 0; |
2488 | spin_lock(&delayed_refs->lock); | 2491 | spin_lock(&delayed_refs->lock); |
2489 | 2492 | ||
2490 | #ifdef SCRAMBLE_DELAYED_REFS | 2493 | #ifdef SCRAMBLE_DELAYED_REFS |
@@ -2512,31 +2515,6 @@ again: | |||
2512 | if (ret) | 2515 | if (ret) |
2513 | break; | 2516 | break; |
2514 | 2517 | ||
2515 | if (delayed_start >= delayed_refs->run_delayed_start) { | ||
2516 | if (consider_waiting == 0) { | ||
2517 | /* | ||
2518 | * btrfs_find_ref_cluster looped. let's do one | ||
2519 | * more cycle. if we don't run any delayed ref | ||
2520 | * during that cycle (because we can't because | ||
2521 | * all of them are blocked) and if the number of | ||
2522 | * refs doesn't change, we avoid busy waiting. | ||
2523 | */ | ||
2524 | consider_waiting = 1; | ||
2525 | num_refs = delayed_refs->num_entries; | ||
2526 | first_seq = root->fs_info->tree_mod_seq_list.next; | ||
2527 | } else { | ||
2528 | wait_for_more_refs(root->fs_info, delayed_refs, | ||
2529 | num_refs, first_seq); | ||
2530 | /* | ||
2531 | * after waiting, things have changed. we | ||
2532 | * dropped the lock and someone else might have | ||
2533 | * run some refs, built new clusters and so on. | ||
2534 | * therefore, we restart staleness detection. | ||
2535 | */ | ||
2536 | consider_waiting = 0; | ||
2537 | } | ||
2538 | } | ||
2539 | |||
2540 | ret = run_clustered_refs(trans, root, &cluster); | 2518 | ret = run_clustered_refs(trans, root, &cluster); |
2541 | if (ret < 0) { | 2519 | if (ret < 0) { |
2542 | spin_unlock(&delayed_refs->lock); | 2520 | spin_unlock(&delayed_refs->lock); |
@@ -2549,9 +2527,26 @@ again: | |||
2549 | if (count == 0) | 2527 | if (count == 0) |
2550 | break; | 2528 | break; |
2551 | 2529 | ||
2552 | if (ret || delayed_refs->run_delayed_start == 0) { | 2530 | if (delayed_start >= delayed_refs->run_delayed_start) { |
2531 | if (loops == 0) { | ||
2532 | /* | ||
2533 | * btrfs_find_ref_cluster looped. let's do one | ||
2534 | * more cycle. if we don't run any delayed ref | ||
2535 | * during that cycle (because we can't because | ||
2536 | * all of them are blocked), bail out. | ||
2537 | */ | ||
2538 | loops = 1; | ||
2539 | } else { | ||
2540 | /* | ||
2541 | * no runnable refs left, stop trying | ||
2542 | */ | ||
2543 | BUG_ON(run_all); | ||
2544 | break; | ||
2545 | } | ||
2546 | } | ||
2547 | if (ret) { | ||
2553 | /* refs were run, let's reset staleness detection */ | 2548 | /* refs were run, let's reset staleness detection */ |
2554 | consider_waiting = 0; | 2549 | loops = 0; |
2555 | } | 2550 | } |
2556 | } | 2551 | } |
2557 | 2552 | ||
@@ -3007,17 +3002,16 @@ again: | |||
3007 | } | 3002 | } |
3008 | spin_unlock(&block_group->lock); | 3003 | spin_unlock(&block_group->lock); |
3009 | 3004 | ||
3010 | num_pages = (int)div64_u64(block_group->key.offset, 1024 * 1024 * 1024); | 3005 | /* |
3006 | * Try to preallocate enough space based on how big the block group is. | ||
3007 | * Keep in mind this has to include any pinned space which could end up | ||
3008 | * taking up quite a bit since it's not folded into the other space | ||
3009 | * cache. | ||
3010 | */ | ||
3011 | num_pages = (int)div64_u64(block_group->key.offset, 256 * 1024 * 1024); | ||
3011 | if (!num_pages) | 3012 | if (!num_pages) |
3012 | num_pages = 1; | 3013 | num_pages = 1; |
3013 | 3014 | ||
3014 | /* | ||
3015 | * Just to make absolutely sure we have enough space, we're going to | ||
3016 | * preallocate 12 pages worth of space for each block group. In | ||
3017 | * practice we ought to use at most 8, but we need extra space so we can | ||
3018 | * add our header and have a terminator between the extents and the | ||
3019 | * bitmaps. | ||
3020 | */ | ||
3021 | num_pages *= 16; | 3015 | num_pages *= 16; |
3022 | num_pages *= PAGE_CACHE_SIZE; | 3016 | num_pages *= PAGE_CACHE_SIZE; |
3023 | 3017 | ||
@@ -4571,8 +4565,10 @@ int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes) | |||
4571 | if (root->fs_info->quota_enabled) { | 4565 | if (root->fs_info->quota_enabled) { |
4572 | ret = btrfs_qgroup_reserve(root, num_bytes + | 4566 | ret = btrfs_qgroup_reserve(root, num_bytes + |
4573 | nr_extents * root->leafsize); | 4567 | nr_extents * root->leafsize); |
4574 | if (ret) | 4568 | if (ret) { |
4569 | mutex_unlock(&BTRFS_I(inode)->delalloc_mutex); | ||
4575 | return ret; | 4570 | return ret; |
4571 | } | ||
4576 | } | 4572 | } |
4577 | 4573 | ||
4578 | ret = reserve_metadata_bytes(root, block_rsv, to_reserve, flush); | 4574 | ret = reserve_metadata_bytes(root, block_rsv, to_reserve, flush); |
@@ -5294,9 +5290,6 @@ static noinline int check_ref_cleanup(struct btrfs_trans_handle *trans, | |||
5294 | rb_erase(&head->node.rb_node, &delayed_refs->root); | 5290 | rb_erase(&head->node.rb_node, &delayed_refs->root); |
5295 | 5291 | ||
5296 | delayed_refs->num_entries--; | 5292 | delayed_refs->num_entries--; |
5297 | smp_mb(); | ||
5298 | if (waitqueue_active(&root->fs_info->tree_mod_seq_wait)) | ||
5299 | wake_up(&root->fs_info->tree_mod_seq_wait); | ||
5300 | 5293 | ||
5301 | /* | 5294 | /* |
5302 | * we don't take a ref on the node because we're removing it from the | 5295 | * we don't take a ref on the node because we're removing it from the |
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 45c81bb4ac82..4c878476bb91 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c | |||
@@ -2330,23 +2330,10 @@ static void end_bio_extent_readpage(struct bio *bio, int err) | |||
2330 | if (uptodate && tree->ops && tree->ops->readpage_end_io_hook) { | 2330 | if (uptodate && tree->ops && tree->ops->readpage_end_io_hook) { |
2331 | ret = tree->ops->readpage_end_io_hook(page, start, end, | 2331 | ret = tree->ops->readpage_end_io_hook(page, start, end, |
2332 | state, mirror); | 2332 | state, mirror); |
2333 | if (ret) { | 2333 | if (ret) |
2334 | /* no IO indicated but software detected errors | ||
2335 | * in the block, either checksum errors or | ||
2336 | * issues with the contents */ | ||
2337 | struct btrfs_root *root = | ||
2338 | BTRFS_I(page->mapping->host)->root; | ||
2339 | struct btrfs_device *device; | ||
2340 | |||
2341 | uptodate = 0; | 2334 | uptodate = 0; |
2342 | device = btrfs_find_device_for_logical( | 2335 | else |
2343 | root, start, mirror); | ||
2344 | if (device) | ||
2345 | btrfs_dev_stat_inc_and_print(device, | ||
2346 | BTRFS_DEV_STAT_CORRUPTION_ERRS); | ||
2347 | } else { | ||
2348 | clean_io_failure(start, page); | 2336 | clean_io_failure(start, page); |
2349 | } | ||
2350 | } | 2337 | } |
2351 | 2338 | ||
2352 | if (!uptodate && tree->ops && tree->ops->readpage_io_failed_hook) { | 2339 | if (!uptodate && tree->ops && tree->ops->readpage_io_failed_hook) { |
diff --git a/fs/btrfs/file-item.c b/fs/btrfs/file-item.c index b45b9de0c21d..857d93cd01dc 100644 --- a/fs/btrfs/file-item.c +++ b/fs/btrfs/file-item.c | |||
@@ -272,9 +272,9 @@ int btrfs_lookup_bio_sums(struct btrfs_root *root, struct inode *inode, | |||
272 | } | 272 | } |
273 | 273 | ||
274 | int btrfs_lookup_bio_sums_dio(struct btrfs_root *root, struct inode *inode, | 274 | int btrfs_lookup_bio_sums_dio(struct btrfs_root *root, struct inode *inode, |
275 | struct bio *bio, u64 offset, u32 *dst) | 275 | struct bio *bio, u64 offset) |
276 | { | 276 | { |
277 | return __btrfs_lookup_bio_sums(root, inode, bio, offset, dst, 1); | 277 | return __btrfs_lookup_bio_sums(root, inode, bio, offset, NULL, 1); |
278 | } | 278 | } |
279 | 279 | ||
280 | int btrfs_lookup_csums_range(struct btrfs_root *root, u64 start, u64 end, | 280 | int btrfs_lookup_csums_range(struct btrfs_root *root, u64 start, u64 end, |
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 6e8f416773d4..ec154f954646 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c | |||
@@ -1008,9 +1008,7 @@ static noinline void async_cow_submit(struct btrfs_work *work) | |||
1008 | nr_pages = (async_cow->end - async_cow->start + PAGE_CACHE_SIZE) >> | 1008 | nr_pages = (async_cow->end - async_cow->start + PAGE_CACHE_SIZE) >> |
1009 | PAGE_CACHE_SHIFT; | 1009 | PAGE_CACHE_SHIFT; |
1010 | 1010 | ||
1011 | atomic_sub(nr_pages, &root->fs_info->async_delalloc_pages); | 1011 | if (atomic_sub_return(nr_pages, &root->fs_info->async_delalloc_pages) < |
1012 | |||
1013 | if (atomic_read(&root->fs_info->async_delalloc_pages) < | ||
1014 | 5 * 1024 * 1024 && | 1012 | 5 * 1024 * 1024 && |
1015 | waitqueue_active(&root->fs_info->async_submit_wait)) | 1013 | waitqueue_active(&root->fs_info->async_submit_wait)) |
1016 | wake_up(&root->fs_info->async_submit_wait); | 1014 | wake_up(&root->fs_info->async_submit_wait); |
@@ -1885,8 +1883,11 @@ static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent) | |||
1885 | trans = btrfs_join_transaction_nolock(root); | 1883 | trans = btrfs_join_transaction_nolock(root); |
1886 | else | 1884 | else |
1887 | trans = btrfs_join_transaction(root); | 1885 | trans = btrfs_join_transaction(root); |
1888 | if (IS_ERR(trans)) | 1886 | if (IS_ERR(trans)) { |
1889 | return PTR_ERR(trans); | 1887 | ret = PTR_ERR(trans); |
1888 | trans = NULL; | ||
1889 | goto out; | ||
1890 | } | ||
1890 | trans->block_rsv = &root->fs_info->delalloc_block_rsv; | 1891 | trans->block_rsv = &root->fs_info->delalloc_block_rsv; |
1891 | ret = btrfs_update_inode_fallback(trans, root, inode); | 1892 | ret = btrfs_update_inode_fallback(trans, root, inode); |
1892 | if (ret) /* -ENOMEM or corruption */ | 1893 | if (ret) /* -ENOMEM or corruption */ |
@@ -3174,7 +3175,7 @@ int btrfs_unlink_subvol(struct btrfs_trans_handle *trans, | |||
3174 | btrfs_i_size_write(dir, dir->i_size - name_len * 2); | 3175 | btrfs_i_size_write(dir, dir->i_size - name_len * 2); |
3175 | inode_inc_iversion(dir); | 3176 | inode_inc_iversion(dir); |
3176 | dir->i_mtime = dir->i_ctime = CURRENT_TIME; | 3177 | dir->i_mtime = dir->i_ctime = CURRENT_TIME; |
3177 | ret = btrfs_update_inode(trans, root, dir); | 3178 | ret = btrfs_update_inode_fallback(trans, root, dir); |
3178 | if (ret) | 3179 | if (ret) |
3179 | btrfs_abort_transaction(trans, root, ret); | 3180 | btrfs_abort_transaction(trans, root, ret); |
3180 | out: | 3181 | out: |
@@ -5774,18 +5775,112 @@ out: | |||
5774 | return ret; | 5775 | return ret; |
5775 | } | 5776 | } |
5776 | 5777 | ||
5778 | static int lock_extent_direct(struct inode *inode, u64 lockstart, u64 lockend, | ||
5779 | struct extent_state **cached_state, int writing) | ||
5780 | { | ||
5781 | struct btrfs_ordered_extent *ordered; | ||
5782 | int ret = 0; | ||
5783 | |||
5784 | while (1) { | ||
5785 | lock_extent_bits(&BTRFS_I(inode)->io_tree, lockstart, lockend, | ||
5786 | 0, cached_state); | ||
5787 | /* | ||
5788 | * We're concerned with the entire range that we're going to be | ||
5789 | * doing DIO to, so we need to make sure theres no ordered | ||
5790 | * extents in this range. | ||
5791 | */ | ||
5792 | ordered = btrfs_lookup_ordered_range(inode, lockstart, | ||
5793 | lockend - lockstart + 1); | ||
5794 | |||
5795 | /* | ||
5796 | * We need to make sure there are no buffered pages in this | ||
5797 | * range either, we could have raced between the invalidate in | ||
5798 | * generic_file_direct_write and locking the extent. The | ||
5799 | * invalidate needs to happen so that reads after a write do not | ||
5800 | * get stale data. | ||
5801 | */ | ||
5802 | if (!ordered && (!writing || | ||
5803 | !test_range_bit(&BTRFS_I(inode)->io_tree, | ||
5804 | lockstart, lockend, EXTENT_UPTODATE, 0, | ||
5805 | *cached_state))) | ||
5806 | break; | ||
5807 | |||
5808 | unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart, lockend, | ||
5809 | cached_state, GFP_NOFS); | ||
5810 | |||
5811 | if (ordered) { | ||
5812 | btrfs_start_ordered_extent(inode, ordered, 1); | ||
5813 | btrfs_put_ordered_extent(ordered); | ||
5814 | } else { | ||
5815 | /* Screw you mmap */ | ||
5816 | ret = filemap_write_and_wait_range(inode->i_mapping, | ||
5817 | lockstart, | ||
5818 | lockend); | ||
5819 | if (ret) | ||
5820 | break; | ||
5821 | |||
5822 | /* | ||
5823 | * If we found a page that couldn't be invalidated just | ||
5824 | * fall back to buffered. | ||
5825 | */ | ||
5826 | ret = invalidate_inode_pages2_range(inode->i_mapping, | ||
5827 | lockstart >> PAGE_CACHE_SHIFT, | ||
5828 | lockend >> PAGE_CACHE_SHIFT); | ||
5829 | if (ret) | ||
5830 | break; | ||
5831 | } | ||
5832 | |||
5833 | cond_resched(); | ||
5834 | } | ||
5835 | |||
5836 | return ret; | ||
5837 | } | ||
5838 | |||
5777 | static int btrfs_get_blocks_direct(struct inode *inode, sector_t iblock, | 5839 | static int btrfs_get_blocks_direct(struct inode *inode, sector_t iblock, |
5778 | struct buffer_head *bh_result, int create) | 5840 | struct buffer_head *bh_result, int create) |
5779 | { | 5841 | { |
5780 | struct extent_map *em; | 5842 | struct extent_map *em; |
5781 | struct btrfs_root *root = BTRFS_I(inode)->root; | 5843 | struct btrfs_root *root = BTRFS_I(inode)->root; |
5844 | struct extent_state *cached_state = NULL; | ||
5782 | u64 start = iblock << inode->i_blkbits; | 5845 | u64 start = iblock << inode->i_blkbits; |
5846 | u64 lockstart, lockend; | ||
5783 | u64 len = bh_result->b_size; | 5847 | u64 len = bh_result->b_size; |
5784 | struct btrfs_trans_handle *trans; | 5848 | struct btrfs_trans_handle *trans; |
5849 | int unlock_bits = EXTENT_LOCKED; | ||
5850 | int ret; | ||
5851 | |||
5852 | if (create) { | ||
5853 | ret = btrfs_delalloc_reserve_space(inode, len); | ||
5854 | if (ret) | ||
5855 | return ret; | ||
5856 | unlock_bits |= EXTENT_DELALLOC | EXTENT_DIRTY; | ||
5857 | } else { | ||
5858 | len = min_t(u64, len, root->sectorsize); | ||
5859 | } | ||
5860 | |||
5861 | lockstart = start; | ||
5862 | lockend = start + len - 1; | ||
5863 | |||
5864 | /* | ||
5865 | * If this errors out it's because we couldn't invalidate pagecache for | ||
5866 | * this range and we need to fallback to buffered. | ||
5867 | */ | ||
5868 | if (lock_extent_direct(inode, lockstart, lockend, &cached_state, create)) | ||
5869 | return -ENOTBLK; | ||
5870 | |||
5871 | if (create) { | ||
5872 | ret = set_extent_bit(&BTRFS_I(inode)->io_tree, lockstart, | ||
5873 | lockend, EXTENT_DELALLOC, NULL, | ||
5874 | &cached_state, GFP_NOFS); | ||
5875 | if (ret) | ||
5876 | goto unlock_err; | ||
5877 | } | ||
5785 | 5878 | ||
5786 | em = btrfs_get_extent(inode, NULL, 0, start, len, 0); | 5879 | em = btrfs_get_extent(inode, NULL, 0, start, len, 0); |
5787 | if (IS_ERR(em)) | 5880 | if (IS_ERR(em)) { |
5788 | return PTR_ERR(em); | 5881 | ret = PTR_ERR(em); |
5882 | goto unlock_err; | ||
5883 | } | ||
5789 | 5884 | ||
5790 | /* | 5885 | /* |
5791 | * Ok for INLINE and COMPRESSED extents we need to fallback on buffered | 5886 | * Ok for INLINE and COMPRESSED extents we need to fallback on buffered |
@@ -5804,17 +5899,16 @@ static int btrfs_get_blocks_direct(struct inode *inode, sector_t iblock, | |||
5804 | if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags) || | 5899 | if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags) || |
5805 | em->block_start == EXTENT_MAP_INLINE) { | 5900 | em->block_start == EXTENT_MAP_INLINE) { |
5806 | free_extent_map(em); | 5901 | free_extent_map(em); |
5807 | return -ENOTBLK; | 5902 | ret = -ENOTBLK; |
5903 | goto unlock_err; | ||
5808 | } | 5904 | } |
5809 | 5905 | ||
5810 | /* Just a good old fashioned hole, return */ | 5906 | /* Just a good old fashioned hole, return */ |
5811 | if (!create && (em->block_start == EXTENT_MAP_HOLE || | 5907 | if (!create && (em->block_start == EXTENT_MAP_HOLE || |
5812 | test_bit(EXTENT_FLAG_PREALLOC, &em->flags))) { | 5908 | test_bit(EXTENT_FLAG_PREALLOC, &em->flags))) { |
5813 | free_extent_map(em); | 5909 | free_extent_map(em); |
5814 | /* DIO will do one hole at a time, so just unlock a sector */ | 5910 | ret = 0; |
5815 | unlock_extent(&BTRFS_I(inode)->io_tree, start, | 5911 | goto unlock_err; |
5816 | start + root->sectorsize - 1); | ||
5817 | return 0; | ||
5818 | } | 5912 | } |
5819 | 5913 | ||
5820 | /* | 5914 | /* |
@@ -5827,8 +5921,9 @@ static int btrfs_get_blocks_direct(struct inode *inode, sector_t iblock, | |||
5827 | * | 5921 | * |
5828 | */ | 5922 | */ |
5829 | if (!create) { | 5923 | if (!create) { |
5830 | len = em->len - (start - em->start); | 5924 | len = min(len, em->len - (start - em->start)); |
5831 | goto map; | 5925 | lockstart = start + len; |
5926 | goto unlock; | ||
5832 | } | 5927 | } |
5833 | 5928 | ||
5834 | if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags) || | 5929 | if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags) || |
@@ -5860,7 +5955,7 @@ static int btrfs_get_blocks_direct(struct inode *inode, sector_t iblock, | |||
5860 | btrfs_end_transaction(trans, root); | 5955 | btrfs_end_transaction(trans, root); |
5861 | if (ret) { | 5956 | if (ret) { |
5862 | free_extent_map(em); | 5957 | free_extent_map(em); |
5863 | return ret; | 5958 | goto unlock_err; |
5864 | } | 5959 | } |
5865 | goto unlock; | 5960 | goto unlock; |
5866 | } | 5961 | } |
@@ -5873,14 +5968,12 @@ must_cow: | |||
5873 | */ | 5968 | */ |
5874 | len = bh_result->b_size; | 5969 | len = bh_result->b_size; |
5875 | em = btrfs_new_extent_direct(inode, em, start, len); | 5970 | em = btrfs_new_extent_direct(inode, em, start, len); |
5876 | if (IS_ERR(em)) | 5971 | if (IS_ERR(em)) { |
5877 | return PTR_ERR(em); | 5972 | ret = PTR_ERR(em); |
5973 | goto unlock_err; | ||
5974 | } | ||
5878 | len = min(len, em->len - (start - em->start)); | 5975 | len = min(len, em->len - (start - em->start)); |
5879 | unlock: | 5976 | unlock: |
5880 | clear_extent_bit(&BTRFS_I(inode)->io_tree, start, start + len - 1, | ||
5881 | EXTENT_LOCKED | EXTENT_DELALLOC | EXTENT_DIRTY, 1, | ||
5882 | 0, NULL, GFP_NOFS); | ||
5883 | map: | ||
5884 | bh_result->b_blocknr = (em->block_start + (start - em->start)) >> | 5977 | bh_result->b_blocknr = (em->block_start + (start - em->start)) >> |
5885 | inode->i_blkbits; | 5978 | inode->i_blkbits; |
5886 | bh_result->b_size = len; | 5979 | bh_result->b_size = len; |
@@ -5898,9 +5991,44 @@ map: | |||
5898 | i_size_write(inode, start + len); | 5991 | i_size_write(inode, start + len); |
5899 | } | 5992 | } |
5900 | 5993 | ||
5994 | /* | ||
5995 | * In the case of write we need to clear and unlock the entire range, | ||
5996 | * in the case of read we need to unlock only the end area that we | ||
5997 | * aren't using if there is any left over space. | ||
5998 | */ | ||
5999 | if (lockstart < lockend) { | ||
6000 | if (create && len < lockend - lockstart) { | ||
6001 | clear_extent_bit(&BTRFS_I(inode)->io_tree, lockstart, | ||
6002 | lockstart + len - 1, unlock_bits, 1, 0, | ||
6003 | &cached_state, GFP_NOFS); | ||
6004 | /* | ||
6005 | * Beside unlock, we also need to cleanup reserved space | ||
6006 | * for the left range by attaching EXTENT_DO_ACCOUNTING. | ||
6007 | */ | ||
6008 | clear_extent_bit(&BTRFS_I(inode)->io_tree, | ||
6009 | lockstart + len, lockend, | ||
6010 | unlock_bits | EXTENT_DO_ACCOUNTING, | ||
6011 | 1, 0, NULL, GFP_NOFS); | ||
6012 | } else { | ||
6013 | clear_extent_bit(&BTRFS_I(inode)->io_tree, lockstart, | ||
6014 | lockend, unlock_bits, 1, 0, | ||
6015 | &cached_state, GFP_NOFS); | ||
6016 | } | ||
6017 | } else { | ||
6018 | free_extent_state(cached_state); | ||
6019 | } | ||
6020 | |||
5901 | free_extent_map(em); | 6021 | free_extent_map(em); |
5902 | 6022 | ||
5903 | return 0; | 6023 | return 0; |
6024 | |||
6025 | unlock_err: | ||
6026 | if (create) | ||
6027 | unlock_bits |= EXTENT_DO_ACCOUNTING; | ||
6028 | |||
6029 | clear_extent_bit(&BTRFS_I(inode)->io_tree, lockstart, lockend, | ||
6030 | unlock_bits, 1, 0, &cached_state, GFP_NOFS); | ||
6031 | return ret; | ||
5904 | } | 6032 | } |
5905 | 6033 | ||
5906 | struct btrfs_dio_private { | 6034 | struct btrfs_dio_private { |
@@ -5908,7 +6036,6 @@ struct btrfs_dio_private { | |||
5908 | u64 logical_offset; | 6036 | u64 logical_offset; |
5909 | u64 disk_bytenr; | 6037 | u64 disk_bytenr; |
5910 | u64 bytes; | 6038 | u64 bytes; |
5911 | u32 *csums; | ||
5912 | void *private; | 6039 | void *private; |
5913 | 6040 | ||
5914 | /* number of bios pending for this dio */ | 6041 | /* number of bios pending for this dio */ |
@@ -5928,7 +6055,6 @@ static void btrfs_endio_direct_read(struct bio *bio, int err) | |||
5928 | struct inode *inode = dip->inode; | 6055 | struct inode *inode = dip->inode; |
5929 | struct btrfs_root *root = BTRFS_I(inode)->root; | 6056 | struct btrfs_root *root = BTRFS_I(inode)->root; |
5930 | u64 start; | 6057 | u64 start; |
5931 | u32 *private = dip->csums; | ||
5932 | 6058 | ||
5933 | start = dip->logical_offset; | 6059 | start = dip->logical_offset; |
5934 | do { | 6060 | do { |
@@ -5936,8 +6062,12 @@ static void btrfs_endio_direct_read(struct bio *bio, int err) | |||
5936 | struct page *page = bvec->bv_page; | 6062 | struct page *page = bvec->bv_page; |
5937 | char *kaddr; | 6063 | char *kaddr; |
5938 | u32 csum = ~(u32)0; | 6064 | u32 csum = ~(u32)0; |
6065 | u64 private = ~(u32)0; | ||
5939 | unsigned long flags; | 6066 | unsigned long flags; |
5940 | 6067 | ||
6068 | if (get_state_private(&BTRFS_I(inode)->io_tree, | ||
6069 | start, &private)) | ||
6070 | goto failed; | ||
5941 | local_irq_save(flags); | 6071 | local_irq_save(flags); |
5942 | kaddr = kmap_atomic(page); | 6072 | kaddr = kmap_atomic(page); |
5943 | csum = btrfs_csum_data(root, kaddr + bvec->bv_offset, | 6073 | csum = btrfs_csum_data(root, kaddr + bvec->bv_offset, |
@@ -5947,18 +6077,18 @@ static void btrfs_endio_direct_read(struct bio *bio, int err) | |||
5947 | local_irq_restore(flags); | 6077 | local_irq_restore(flags); |
5948 | 6078 | ||
5949 | flush_dcache_page(bvec->bv_page); | 6079 | flush_dcache_page(bvec->bv_page); |
5950 | if (csum != *private) { | 6080 | if (csum != private) { |
6081 | failed: | ||
5951 | printk(KERN_ERR "btrfs csum failed ino %llu off" | 6082 | printk(KERN_ERR "btrfs csum failed ino %llu off" |
5952 | " %llu csum %u private %u\n", | 6083 | " %llu csum %u private %u\n", |
5953 | (unsigned long long)btrfs_ino(inode), | 6084 | (unsigned long long)btrfs_ino(inode), |
5954 | (unsigned long long)start, | 6085 | (unsigned long long)start, |
5955 | csum, *private); | 6086 | csum, (unsigned)private); |
5956 | err = -EIO; | 6087 | err = -EIO; |
5957 | } | 6088 | } |
5958 | } | 6089 | } |
5959 | 6090 | ||
5960 | start += bvec->bv_len; | 6091 | start += bvec->bv_len; |
5961 | private++; | ||
5962 | bvec++; | 6092 | bvec++; |
5963 | } while (bvec <= bvec_end); | 6093 | } while (bvec <= bvec_end); |
5964 | 6094 | ||
@@ -5966,7 +6096,6 @@ static void btrfs_endio_direct_read(struct bio *bio, int err) | |||
5966 | dip->logical_offset + dip->bytes - 1); | 6096 | dip->logical_offset + dip->bytes - 1); |
5967 | bio->bi_private = dip->private; | 6097 | bio->bi_private = dip->private; |
5968 | 6098 | ||
5969 | kfree(dip->csums); | ||
5970 | kfree(dip); | 6099 | kfree(dip); |
5971 | 6100 | ||
5972 | /* If we had a csum failure make sure to clear the uptodate flag */ | 6101 | /* If we had a csum failure make sure to clear the uptodate flag */ |
@@ -6072,7 +6201,7 @@ static struct bio *btrfs_dio_bio_alloc(struct block_device *bdev, | |||
6072 | 6201 | ||
6073 | static inline int __btrfs_submit_dio_bio(struct bio *bio, struct inode *inode, | 6202 | static inline int __btrfs_submit_dio_bio(struct bio *bio, struct inode *inode, |
6074 | int rw, u64 file_offset, int skip_sum, | 6203 | int rw, u64 file_offset, int skip_sum, |
6075 | u32 *csums, int async_submit) | 6204 | int async_submit) |
6076 | { | 6205 | { |
6077 | int write = rw & REQ_WRITE; | 6206 | int write = rw & REQ_WRITE; |
6078 | struct btrfs_root *root = BTRFS_I(inode)->root; | 6207 | struct btrfs_root *root = BTRFS_I(inode)->root; |
@@ -6105,8 +6234,7 @@ static inline int __btrfs_submit_dio_bio(struct bio *bio, struct inode *inode, | |||
6105 | if (ret) | 6234 | if (ret) |
6106 | goto err; | 6235 | goto err; |
6107 | } else if (!skip_sum) { | 6236 | } else if (!skip_sum) { |
6108 | ret = btrfs_lookup_bio_sums_dio(root, inode, bio, | 6237 | ret = btrfs_lookup_bio_sums_dio(root, inode, bio, file_offset); |
6109 | file_offset, csums); | ||
6110 | if (ret) | 6238 | if (ret) |
6111 | goto err; | 6239 | goto err; |
6112 | } | 6240 | } |
@@ -6132,10 +6260,8 @@ static int btrfs_submit_direct_hook(int rw, struct btrfs_dio_private *dip, | |||
6132 | u64 submit_len = 0; | 6260 | u64 submit_len = 0; |
6133 | u64 map_length; | 6261 | u64 map_length; |
6134 | int nr_pages = 0; | 6262 | int nr_pages = 0; |
6135 | u32 *csums = dip->csums; | ||
6136 | int ret = 0; | 6263 | int ret = 0; |
6137 | int async_submit = 0; | 6264 | int async_submit = 0; |
6138 | int write = rw & REQ_WRITE; | ||
6139 | 6265 | ||
6140 | map_length = orig_bio->bi_size; | 6266 | map_length = orig_bio->bi_size; |
6141 | ret = btrfs_map_block(map_tree, READ, start_sector << 9, | 6267 | ret = btrfs_map_block(map_tree, READ, start_sector << 9, |
@@ -6171,16 +6297,13 @@ static int btrfs_submit_direct_hook(int rw, struct btrfs_dio_private *dip, | |||
6171 | atomic_inc(&dip->pending_bios); | 6297 | atomic_inc(&dip->pending_bios); |
6172 | ret = __btrfs_submit_dio_bio(bio, inode, rw, | 6298 | ret = __btrfs_submit_dio_bio(bio, inode, rw, |
6173 | file_offset, skip_sum, | 6299 | file_offset, skip_sum, |
6174 | csums, async_submit); | 6300 | async_submit); |
6175 | if (ret) { | 6301 | if (ret) { |
6176 | bio_put(bio); | 6302 | bio_put(bio); |
6177 | atomic_dec(&dip->pending_bios); | 6303 | atomic_dec(&dip->pending_bios); |
6178 | goto out_err; | 6304 | goto out_err; |
6179 | } | 6305 | } |
6180 | 6306 | ||
6181 | /* Write's use the ordered csums */ | ||
6182 | if (!write && !skip_sum) | ||
6183 | csums = csums + nr_pages; | ||
6184 | start_sector += submit_len >> 9; | 6307 | start_sector += submit_len >> 9; |
6185 | file_offset += submit_len; | 6308 | file_offset += submit_len; |
6186 | 6309 | ||
@@ -6210,7 +6333,7 @@ static int btrfs_submit_direct_hook(int rw, struct btrfs_dio_private *dip, | |||
6210 | 6333 | ||
6211 | submit: | 6334 | submit: |
6212 | ret = __btrfs_submit_dio_bio(bio, inode, rw, file_offset, skip_sum, | 6335 | ret = __btrfs_submit_dio_bio(bio, inode, rw, file_offset, skip_sum, |
6213 | csums, async_submit); | 6336 | async_submit); |
6214 | if (!ret) | 6337 | if (!ret) |
6215 | return 0; | 6338 | return 0; |
6216 | 6339 | ||
@@ -6246,17 +6369,6 @@ static void btrfs_submit_direct(int rw, struct bio *bio, struct inode *inode, | |||
6246 | ret = -ENOMEM; | 6369 | ret = -ENOMEM; |
6247 | goto free_ordered; | 6370 | goto free_ordered; |
6248 | } | 6371 | } |
6249 | dip->csums = NULL; | ||
6250 | |||
6251 | /* Write's use the ordered csum stuff, so we don't need dip->csums */ | ||
6252 | if (!write && !skip_sum) { | ||
6253 | dip->csums = kmalloc(sizeof(u32) * bio->bi_vcnt, GFP_NOFS); | ||
6254 | if (!dip->csums) { | ||
6255 | kfree(dip); | ||
6256 | ret = -ENOMEM; | ||
6257 | goto free_ordered; | ||
6258 | } | ||
6259 | } | ||
6260 | 6372 | ||
6261 | dip->private = bio->bi_private; | 6373 | dip->private = bio->bi_private; |
6262 | dip->inode = inode; | 6374 | dip->inode = inode; |
@@ -6341,132 +6453,22 @@ static ssize_t check_direct_IO(struct btrfs_root *root, int rw, struct kiocb *io | |||
6341 | out: | 6453 | out: |
6342 | return retval; | 6454 | return retval; |
6343 | } | 6455 | } |
6456 | |||
6344 | static ssize_t btrfs_direct_IO(int rw, struct kiocb *iocb, | 6457 | static ssize_t btrfs_direct_IO(int rw, struct kiocb *iocb, |
6345 | const struct iovec *iov, loff_t offset, | 6458 | const struct iovec *iov, loff_t offset, |
6346 | unsigned long nr_segs) | 6459 | unsigned long nr_segs) |
6347 | { | 6460 | { |
6348 | struct file *file = iocb->ki_filp; | 6461 | struct file *file = iocb->ki_filp; |
6349 | struct inode *inode = file->f_mapping->host; | 6462 | struct inode *inode = file->f_mapping->host; |
6350 | struct btrfs_ordered_extent *ordered; | ||
6351 | struct extent_state *cached_state = NULL; | ||
6352 | u64 lockstart, lockend; | ||
6353 | ssize_t ret; | ||
6354 | int writing = rw & WRITE; | ||
6355 | int write_bits = 0; | ||
6356 | size_t count = iov_length(iov, nr_segs); | ||
6357 | 6463 | ||
6358 | if (check_direct_IO(BTRFS_I(inode)->root, rw, iocb, iov, | 6464 | if (check_direct_IO(BTRFS_I(inode)->root, rw, iocb, iov, |
6359 | offset, nr_segs)) { | 6465 | offset, nr_segs)) |
6360 | return 0; | 6466 | return 0; |
6361 | } | ||
6362 | |||
6363 | lockstart = offset; | ||
6364 | lockend = offset + count - 1; | ||
6365 | |||
6366 | if (writing) { | ||
6367 | ret = btrfs_delalloc_reserve_space(inode, count); | ||
6368 | if (ret) | ||
6369 | goto out; | ||
6370 | } | ||
6371 | |||
6372 | while (1) { | ||
6373 | lock_extent_bits(&BTRFS_I(inode)->io_tree, lockstart, lockend, | ||
6374 | 0, &cached_state); | ||
6375 | /* | ||
6376 | * We're concerned with the entire range that we're going to be | ||
6377 | * doing DIO to, so we need to make sure theres no ordered | ||
6378 | * extents in this range. | ||
6379 | */ | ||
6380 | ordered = btrfs_lookup_ordered_range(inode, lockstart, | ||
6381 | lockend - lockstart + 1); | ||
6382 | |||
6383 | /* | ||
6384 | * We need to make sure there are no buffered pages in this | ||
6385 | * range either, we could have raced between the invalidate in | ||
6386 | * generic_file_direct_write and locking the extent. The | ||
6387 | * invalidate needs to happen so that reads after a write do not | ||
6388 | * get stale data. | ||
6389 | */ | ||
6390 | if (!ordered && (!writing || | ||
6391 | !test_range_bit(&BTRFS_I(inode)->io_tree, | ||
6392 | lockstart, lockend, EXTENT_UPTODATE, 0, | ||
6393 | cached_state))) | ||
6394 | break; | ||
6395 | |||
6396 | unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart, lockend, | ||
6397 | &cached_state, GFP_NOFS); | ||
6398 | |||
6399 | if (ordered) { | ||
6400 | btrfs_start_ordered_extent(inode, ordered, 1); | ||
6401 | btrfs_put_ordered_extent(ordered); | ||
6402 | } else { | ||
6403 | /* Screw you mmap */ | ||
6404 | ret = filemap_write_and_wait_range(file->f_mapping, | ||
6405 | lockstart, | ||
6406 | lockend); | ||
6407 | if (ret) | ||
6408 | goto out; | ||
6409 | |||
6410 | /* | ||
6411 | * If we found a page that couldn't be invalidated just | ||
6412 | * fall back to buffered. | ||
6413 | */ | ||
6414 | ret = invalidate_inode_pages2_range(file->f_mapping, | ||
6415 | lockstart >> PAGE_CACHE_SHIFT, | ||
6416 | lockend >> PAGE_CACHE_SHIFT); | ||
6417 | if (ret) { | ||
6418 | if (ret == -EBUSY) | ||
6419 | ret = 0; | ||
6420 | goto out; | ||
6421 | } | ||
6422 | } | ||
6423 | |||
6424 | cond_resched(); | ||
6425 | } | ||
6426 | 6467 | ||
6427 | /* | 6468 | return __blockdev_direct_IO(rw, iocb, inode, |
6428 | * we don't use btrfs_set_extent_delalloc because we don't want | ||
6429 | * the dirty or uptodate bits | ||
6430 | */ | ||
6431 | if (writing) { | ||
6432 | write_bits = EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING; | ||
6433 | ret = set_extent_bit(&BTRFS_I(inode)->io_tree, lockstart, lockend, | ||
6434 | EXTENT_DELALLOC, NULL, &cached_state, | ||
6435 | GFP_NOFS); | ||
6436 | if (ret) { | ||
6437 | clear_extent_bit(&BTRFS_I(inode)->io_tree, lockstart, | ||
6438 | lockend, EXTENT_LOCKED | write_bits, | ||
6439 | 1, 0, &cached_state, GFP_NOFS); | ||
6440 | goto out; | ||
6441 | } | ||
6442 | } | ||
6443 | |||
6444 | free_extent_state(cached_state); | ||
6445 | cached_state = NULL; | ||
6446 | |||
6447 | ret = __blockdev_direct_IO(rw, iocb, inode, | ||
6448 | BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev, | 6469 | BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev, |
6449 | iov, offset, nr_segs, btrfs_get_blocks_direct, NULL, | 6470 | iov, offset, nr_segs, btrfs_get_blocks_direct, NULL, |
6450 | btrfs_submit_direct, 0); | 6471 | btrfs_submit_direct, 0); |
6451 | |||
6452 | if (ret < 0 && ret != -EIOCBQUEUED) { | ||
6453 | clear_extent_bit(&BTRFS_I(inode)->io_tree, offset, | ||
6454 | offset + iov_length(iov, nr_segs) - 1, | ||
6455 | EXTENT_LOCKED | write_bits, 1, 0, | ||
6456 | &cached_state, GFP_NOFS); | ||
6457 | } else if (ret >= 0 && ret < iov_length(iov, nr_segs)) { | ||
6458 | /* | ||
6459 | * We're falling back to buffered, unlock the section we didn't | ||
6460 | * do IO on. | ||
6461 | */ | ||
6462 | clear_extent_bit(&BTRFS_I(inode)->io_tree, offset + ret, | ||
6463 | offset + iov_length(iov, nr_segs) - 1, | ||
6464 | EXTENT_LOCKED | write_bits, 1, 0, | ||
6465 | &cached_state, GFP_NOFS); | ||
6466 | } | ||
6467 | out: | ||
6468 | free_extent_state(cached_state); | ||
6469 | return ret; | ||
6470 | } | 6472 | } |
6471 | 6473 | ||
6472 | static int btrfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, | 6474 | static int btrfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, |
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index 7bb755677a22..9df50fa8a078 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c | |||
@@ -424,7 +424,7 @@ static noinline int create_subvol(struct btrfs_root *root, | |||
424 | uuid_le_gen(&new_uuid); | 424 | uuid_le_gen(&new_uuid); |
425 | memcpy(root_item.uuid, new_uuid.b, BTRFS_UUID_SIZE); | 425 | memcpy(root_item.uuid, new_uuid.b, BTRFS_UUID_SIZE); |
426 | root_item.otime.sec = cpu_to_le64(cur_time.tv_sec); | 426 | root_item.otime.sec = cpu_to_le64(cur_time.tv_sec); |
427 | root_item.otime.nsec = cpu_to_le64(cur_time.tv_nsec); | 427 | root_item.otime.nsec = cpu_to_le32(cur_time.tv_nsec); |
428 | root_item.ctime = root_item.otime; | 428 | root_item.ctime = root_item.otime; |
429 | btrfs_set_root_ctransid(&root_item, trans->transid); | 429 | btrfs_set_root_ctransid(&root_item, trans->transid); |
430 | btrfs_set_root_otransid(&root_item, trans->transid); | 430 | btrfs_set_root_otransid(&root_item, trans->transid); |
diff --git a/fs/btrfs/locking.c b/fs/btrfs/locking.c index a44eff074805..2a1762c66041 100644 --- a/fs/btrfs/locking.c +++ b/fs/btrfs/locking.c | |||
@@ -67,7 +67,7 @@ void btrfs_clear_lock_blocking_rw(struct extent_buffer *eb, int rw) | |||
67 | { | 67 | { |
68 | if (eb->lock_nested) { | 68 | if (eb->lock_nested) { |
69 | read_lock(&eb->lock); | 69 | read_lock(&eb->lock); |
70 | if (&eb->lock_nested && current->pid == eb->lock_owner) { | 70 | if (eb->lock_nested && current->pid == eb->lock_owner) { |
71 | read_unlock(&eb->lock); | 71 | read_unlock(&eb->lock); |
72 | return; | 72 | return; |
73 | } | 73 | } |
diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c index bc424ae5a81a..38b42e7bc91d 100644 --- a/fs/btrfs/qgroup.c +++ b/fs/btrfs/qgroup.c | |||
@@ -1364,13 +1364,17 @@ int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans, | |||
1364 | spin_lock(&fs_info->qgroup_lock); | 1364 | spin_lock(&fs_info->qgroup_lock); |
1365 | 1365 | ||
1366 | dstgroup = add_qgroup_rb(fs_info, objectid); | 1366 | dstgroup = add_qgroup_rb(fs_info, objectid); |
1367 | if (!dstgroup) | 1367 | if (IS_ERR(dstgroup)) { |
1368 | ret = PTR_ERR(dstgroup); | ||
1368 | goto unlock; | 1369 | goto unlock; |
1370 | } | ||
1369 | 1371 | ||
1370 | if (srcid) { | 1372 | if (srcid) { |
1371 | srcgroup = find_qgroup_rb(fs_info, srcid); | 1373 | srcgroup = find_qgroup_rb(fs_info, srcid); |
1372 | if (!srcgroup) | 1374 | if (!srcgroup) { |
1375 | ret = -EINVAL; | ||
1373 | goto unlock; | 1376 | goto unlock; |
1377 | } | ||
1374 | dstgroup->rfer = srcgroup->rfer - level_size; | 1378 | dstgroup->rfer = srcgroup->rfer - level_size; |
1375 | dstgroup->rfer_cmpr = srcgroup->rfer_cmpr - level_size; | 1379 | dstgroup->rfer_cmpr = srcgroup->rfer_cmpr - level_size; |
1376 | srcgroup->excl = level_size; | 1380 | srcgroup->excl = level_size; |
@@ -1379,8 +1383,10 @@ int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans, | |||
1379 | qgroup_dirty(fs_info, srcgroup); | 1383 | qgroup_dirty(fs_info, srcgroup); |
1380 | } | 1384 | } |
1381 | 1385 | ||
1382 | if (!inherit) | 1386 | if (!inherit) { |
1387 | ret = -EINVAL; | ||
1383 | goto unlock; | 1388 | goto unlock; |
1389 | } | ||
1384 | 1390 | ||
1385 | i_qgroups = (u64 *)(inherit + 1); | 1391 | i_qgroups = (u64 *)(inherit + 1); |
1386 | for (i = 0; i < inherit->num_qgroups; ++i) { | 1392 | for (i = 0; i < inherit->num_qgroups; ++i) { |
diff --git a/fs/btrfs/root-tree.c b/fs/btrfs/root-tree.c index 6bb465cca20f..10d8e4d88071 100644 --- a/fs/btrfs/root-tree.c +++ b/fs/btrfs/root-tree.c | |||
@@ -544,8 +544,8 @@ void btrfs_update_root_times(struct btrfs_trans_handle *trans, | |||
544 | struct timespec ct = CURRENT_TIME; | 544 | struct timespec ct = CURRENT_TIME; |
545 | 545 | ||
546 | spin_lock(&root->root_times_lock); | 546 | spin_lock(&root->root_times_lock); |
547 | item->ctransid = trans->transid; | 547 | item->ctransid = cpu_to_le64(trans->transid); |
548 | item->ctime.sec = cpu_to_le64(ct.tv_sec); | 548 | item->ctime.sec = cpu_to_le64(ct.tv_sec); |
549 | item->ctime.nsec = cpu_to_le64(ct.tv_nsec); | 549 | item->ctime.nsec = cpu_to_le32(ct.tv_nsec); |
550 | spin_unlock(&root->root_times_lock); | 550 | spin_unlock(&root->root_times_lock); |
551 | } | 551 | } |
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c index f2eb24c477a3..83d6f9f9c220 100644 --- a/fs/btrfs/super.c +++ b/fs/btrfs/super.c | |||
@@ -838,7 +838,6 @@ int btrfs_sync_fs(struct super_block *sb, int wait) | |||
838 | struct btrfs_trans_handle *trans; | 838 | struct btrfs_trans_handle *trans; |
839 | struct btrfs_fs_info *fs_info = btrfs_sb(sb); | 839 | struct btrfs_fs_info *fs_info = btrfs_sb(sb); |
840 | struct btrfs_root *root = fs_info->tree_root; | 840 | struct btrfs_root *root = fs_info->tree_root; |
841 | int ret; | ||
842 | 841 | ||
843 | trace_btrfs_sync_fs(wait); | 842 | trace_btrfs_sync_fs(wait); |
844 | 843 | ||
@@ -849,11 +848,17 @@ int btrfs_sync_fs(struct super_block *sb, int wait) | |||
849 | 848 | ||
850 | btrfs_wait_ordered_extents(root, 0, 0); | 849 | btrfs_wait_ordered_extents(root, 0, 0); |
851 | 850 | ||
852 | trans = btrfs_start_transaction(root, 0); | 851 | spin_lock(&fs_info->trans_lock); |
852 | if (!fs_info->running_transaction) { | ||
853 | spin_unlock(&fs_info->trans_lock); | ||
854 | return 0; | ||
855 | } | ||
856 | spin_unlock(&fs_info->trans_lock); | ||
857 | |||
858 | trans = btrfs_join_transaction(root); | ||
853 | if (IS_ERR(trans)) | 859 | if (IS_ERR(trans)) |
854 | return PTR_ERR(trans); | 860 | return PTR_ERR(trans); |
855 | ret = btrfs_commit_transaction(trans, root); | 861 | return btrfs_commit_transaction(trans, root); |
856 | return ret; | ||
857 | } | 862 | } |
858 | 863 | ||
859 | static int btrfs_show_options(struct seq_file *seq, struct dentry *dentry) | 864 | static int btrfs_show_options(struct seq_file *seq, struct dentry *dentry) |
@@ -1530,6 +1535,8 @@ static int btrfs_show_devname(struct seq_file *m, struct dentry *root) | |||
1530 | while (cur_devices) { | 1535 | while (cur_devices) { |
1531 | head = &cur_devices->devices; | 1536 | head = &cur_devices->devices; |
1532 | list_for_each_entry(dev, head, dev_list) { | 1537 | list_for_each_entry(dev, head, dev_list) { |
1538 | if (dev->missing) | ||
1539 | continue; | ||
1533 | if (!first_dev || dev->devid < first_dev->devid) | 1540 | if (!first_dev || dev->devid < first_dev->devid) |
1534 | first_dev = dev; | 1541 | first_dev = dev; |
1535 | } | 1542 | } |
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c index 17be3dedacba..27c26004e050 100644 --- a/fs/btrfs/transaction.c +++ b/fs/btrfs/transaction.c | |||
@@ -1031,6 +1031,7 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans, | |||
1031 | 1031 | ||
1032 | btrfs_i_size_write(parent_inode, parent_inode->i_size + | 1032 | btrfs_i_size_write(parent_inode, parent_inode->i_size + |
1033 | dentry->d_name.len * 2); | 1033 | dentry->d_name.len * 2); |
1034 | parent_inode->i_mtime = parent_inode->i_ctime = CURRENT_TIME; | ||
1034 | ret = btrfs_update_inode(trans, parent_root, parent_inode); | 1035 | ret = btrfs_update_inode(trans, parent_root, parent_inode); |
1035 | if (ret) | 1036 | if (ret) |
1036 | goto abort_trans_dput; | 1037 | goto abort_trans_dput; |
@@ -1066,7 +1067,7 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans, | |||
1066 | memcpy(new_root_item->parent_uuid, root->root_item.uuid, | 1067 | memcpy(new_root_item->parent_uuid, root->root_item.uuid, |
1067 | BTRFS_UUID_SIZE); | 1068 | BTRFS_UUID_SIZE); |
1068 | new_root_item->otime.sec = cpu_to_le64(cur_time.tv_sec); | 1069 | new_root_item->otime.sec = cpu_to_le64(cur_time.tv_sec); |
1069 | new_root_item->otime.nsec = cpu_to_le64(cur_time.tv_nsec); | 1070 | new_root_item->otime.nsec = cpu_to_le32(cur_time.tv_nsec); |
1070 | btrfs_set_root_otransid(new_root_item, trans->transid); | 1071 | btrfs_set_root_otransid(new_root_item, trans->transid); |
1071 | memset(&new_root_item->stime, 0, sizeof(new_root_item->stime)); | 1072 | memset(&new_root_item->stime, 0, sizeof(new_root_item->stime)); |
1072 | memset(&new_root_item->rtime, 0, sizeof(new_root_item->rtime)); | 1073 | memset(&new_root_item->rtime, 0, sizeof(new_root_item->rtime)); |
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index e86ae04abe6a..88b969aeeb71 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c | |||
@@ -227,9 +227,8 @@ loop_lock: | |||
227 | cur = pending; | 227 | cur = pending; |
228 | pending = pending->bi_next; | 228 | pending = pending->bi_next; |
229 | cur->bi_next = NULL; | 229 | cur->bi_next = NULL; |
230 | atomic_dec(&fs_info->nr_async_bios); | ||
231 | 230 | ||
232 | if (atomic_read(&fs_info->nr_async_bios) < limit && | 231 | if (atomic_dec_return(&fs_info->nr_async_bios) < limit && |
233 | waitqueue_active(&fs_info->async_submit_wait)) | 232 | waitqueue_active(&fs_info->async_submit_wait)) |
234 | wake_up(&fs_info->async_submit_wait); | 233 | wake_up(&fs_info->async_submit_wait); |
235 | 234 | ||
@@ -569,9 +568,11 @@ static int __btrfs_close_devices(struct btrfs_fs_devices *fs_devices) | |||
569 | memcpy(new_device, device, sizeof(*new_device)); | 568 | memcpy(new_device, device, sizeof(*new_device)); |
570 | 569 | ||
571 | /* Safe because we are under uuid_mutex */ | 570 | /* Safe because we are under uuid_mutex */ |
572 | name = rcu_string_strdup(device->name->str, GFP_NOFS); | 571 | if (device->name) { |
573 | BUG_ON(device->name && !name); /* -ENOMEM */ | 572 | name = rcu_string_strdup(device->name->str, GFP_NOFS); |
574 | rcu_assign_pointer(new_device->name, name); | 573 | BUG_ON(device->name && !name); /* -ENOMEM */ |
574 | rcu_assign_pointer(new_device->name, name); | ||
575 | } | ||
575 | new_device->bdev = NULL; | 576 | new_device->bdev = NULL; |
576 | new_device->writeable = 0; | 577 | new_device->writeable = 0; |
577 | new_device->in_fs_metadata = 0; | 578 | new_device->in_fs_metadata = 0; |
@@ -4605,28 +4606,6 @@ int btrfs_read_sys_array(struct btrfs_root *root) | |||
4605 | return ret; | 4606 | return ret; |
4606 | } | 4607 | } |
4607 | 4608 | ||
4608 | struct btrfs_device *btrfs_find_device_for_logical(struct btrfs_root *root, | ||
4609 | u64 logical, int mirror_num) | ||
4610 | { | ||
4611 | struct btrfs_mapping_tree *map_tree = &root->fs_info->mapping_tree; | ||
4612 | int ret; | ||
4613 | u64 map_length = 0; | ||
4614 | struct btrfs_bio *bbio = NULL; | ||
4615 | struct btrfs_device *device; | ||
4616 | |||
4617 | BUG_ON(mirror_num == 0); | ||
4618 | ret = btrfs_map_block(map_tree, WRITE, logical, &map_length, &bbio, | ||
4619 | mirror_num); | ||
4620 | if (ret) { | ||
4621 | BUG_ON(bbio != NULL); | ||
4622 | return NULL; | ||
4623 | } | ||
4624 | BUG_ON(mirror_num != bbio->mirror_num); | ||
4625 | device = bbio->stripes[mirror_num - 1].dev; | ||
4626 | kfree(bbio); | ||
4627 | return device; | ||
4628 | } | ||
4629 | |||
4630 | int btrfs_read_chunk_tree(struct btrfs_root *root) | 4609 | int btrfs_read_chunk_tree(struct btrfs_root *root) |
4631 | { | 4610 | { |
4632 | struct btrfs_path *path; | 4611 | struct btrfs_path *path; |
diff --git a/fs/btrfs/volumes.h b/fs/btrfs/volumes.h index 5479325987b3..53c06af92e8d 100644 --- a/fs/btrfs/volumes.h +++ b/fs/btrfs/volumes.h | |||
@@ -289,8 +289,6 @@ int btrfs_cancel_balance(struct btrfs_fs_info *fs_info); | |||
289 | int btrfs_chunk_readonly(struct btrfs_root *root, u64 chunk_offset); | 289 | int btrfs_chunk_readonly(struct btrfs_root *root, u64 chunk_offset); |
290 | int find_free_dev_extent(struct btrfs_device *device, u64 num_bytes, | 290 | int find_free_dev_extent(struct btrfs_device *device, u64 num_bytes, |
291 | u64 *start, u64 *max_avail); | 291 | u64 *start, u64 *max_avail); |
292 | struct btrfs_device *btrfs_find_device_for_logical(struct btrfs_root *root, | ||
293 | u64 logical, int mirror_num); | ||
294 | void btrfs_dev_stat_print_on_error(struct btrfs_device *device); | 292 | void btrfs_dev_stat_print_on_error(struct btrfs_device *device); |
295 | void btrfs_dev_stat_inc_and_print(struct btrfs_device *dev, int index); | 293 | void btrfs_dev_stat_inc_and_print(struct btrfs_device *dev, int index); |
296 | int btrfs_get_dev_stats(struct btrfs_root *root, | 294 | int btrfs_get_dev_stats(struct btrfs_root *root, |
diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c index 074923ce593d..f0cf934ba877 100644 --- a/fs/cifs/cifssmb.c +++ b/fs/cifs/cifssmb.c | |||
@@ -1576,9 +1576,14 @@ cifs_readv_callback(struct mid_q_entry *mid) | |||
1576 | /* result already set, check signature */ | 1576 | /* result already set, check signature */ |
1577 | if (server->sec_mode & | 1577 | if (server->sec_mode & |
1578 | (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED)) { | 1578 | (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED)) { |
1579 | if (cifs_verify_signature(rdata->iov, rdata->nr_iov, | 1579 | int rc = 0; |
1580 | server, mid->sequence_number + 1)) | 1580 | |
1581 | cERROR(1, "Unexpected SMB signature"); | 1581 | rc = cifs_verify_signature(rdata->iov, rdata->nr_iov, |
1582 | server, | ||
1583 | mid->sequence_number + 1); | ||
1584 | if (rc) | ||
1585 | cERROR(1, "SMB signature verification returned " | ||
1586 | "error = %d", rc); | ||
1582 | } | 1587 | } |
1583 | /* FIXME: should this be counted toward the initiating task? */ | 1588 | /* FIXME: should this be counted toward the initiating task? */ |
1584 | task_io_account_read(rdata->bytes); | 1589 | task_io_account_read(rdata->bytes); |
diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c index cbe709ad6663..781025be48bc 100644 --- a/fs/cifs/dir.c +++ b/fs/cifs/dir.c | |||
@@ -356,19 +356,12 @@ cifs_create_get_file_info: | |||
356 | cifs_create_set_dentry: | 356 | cifs_create_set_dentry: |
357 | if (rc != 0) { | 357 | if (rc != 0) { |
358 | cFYI(1, "Create worked, get_inode_info failed rc = %d", rc); | 358 | cFYI(1, "Create worked, get_inode_info failed rc = %d", rc); |
359 | CIFSSMBClose(xid, tcon, *fileHandle); | ||
359 | goto out; | 360 | goto out; |
360 | } | 361 | } |
361 | d_drop(direntry); | 362 | d_drop(direntry); |
362 | d_add(direntry, newinode); | 363 | d_add(direntry, newinode); |
363 | 364 | ||
364 | /* ENOENT for create? How weird... */ | ||
365 | rc = -ENOENT; | ||
366 | if (!newinode) { | ||
367 | CIFSSMBClose(xid, tcon, *fileHandle); | ||
368 | goto out; | ||
369 | } | ||
370 | rc = 0; | ||
371 | |||
372 | out: | 365 | out: |
373 | kfree(buf); | 366 | kfree(buf); |
374 | kfree(full_path); | 367 | kfree(full_path); |
diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c index 7354877fa3bd..cb79c7edecb0 100644 --- a/fs/cifs/inode.c +++ b/fs/cifs/inode.c | |||
@@ -124,10 +124,10 @@ cifs_fattr_to_inode(struct inode *inode, struct cifs_fattr *fattr) | |||
124 | { | 124 | { |
125 | struct cifsInodeInfo *cifs_i = CIFS_I(inode); | 125 | struct cifsInodeInfo *cifs_i = CIFS_I(inode); |
126 | struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb); | 126 | struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb); |
127 | unsigned long oldtime = cifs_i->time; | ||
128 | 127 | ||
129 | cifs_revalidate_cache(inode, fattr); | 128 | cifs_revalidate_cache(inode, fattr); |
130 | 129 | ||
130 | spin_lock(&inode->i_lock); | ||
131 | inode->i_atime = fattr->cf_atime; | 131 | inode->i_atime = fattr->cf_atime; |
132 | inode->i_mtime = fattr->cf_mtime; | 132 | inode->i_mtime = fattr->cf_mtime; |
133 | inode->i_ctime = fattr->cf_ctime; | 133 | inode->i_ctime = fattr->cf_ctime; |
@@ -148,9 +148,6 @@ cifs_fattr_to_inode(struct inode *inode, struct cifs_fattr *fattr) | |||
148 | else | 148 | else |
149 | cifs_i->time = jiffies; | 149 | cifs_i->time = jiffies; |
150 | 150 | ||
151 | cFYI(1, "inode 0x%p old_time=%ld new_time=%ld", inode, | ||
152 | oldtime, cifs_i->time); | ||
153 | |||
154 | cifs_i->delete_pending = fattr->cf_flags & CIFS_FATTR_DELETE_PENDING; | 151 | cifs_i->delete_pending = fattr->cf_flags & CIFS_FATTR_DELETE_PENDING; |
155 | 152 | ||
156 | cifs_i->server_eof = fattr->cf_eof; | 153 | cifs_i->server_eof = fattr->cf_eof; |
@@ -158,7 +155,6 @@ cifs_fattr_to_inode(struct inode *inode, struct cifs_fattr *fattr) | |||
158 | * Can't safely change the file size here if the client is writing to | 155 | * Can't safely change the file size here if the client is writing to |
159 | * it due to potential races. | 156 | * it due to potential races. |
160 | */ | 157 | */ |
161 | spin_lock(&inode->i_lock); | ||
162 | if (is_size_safe_to_change(cifs_i, fattr->cf_eof)) { | 158 | if (is_size_safe_to_change(cifs_i, fattr->cf_eof)) { |
163 | i_size_write(inode, fattr->cf_eof); | 159 | i_size_write(inode, fattr->cf_eof); |
164 | 160 | ||
@@ -859,12 +855,14 @@ struct inode *cifs_root_iget(struct super_block *sb) | |||
859 | 855 | ||
860 | if (rc && tcon->ipc) { | 856 | if (rc && tcon->ipc) { |
861 | cFYI(1, "ipc connection - fake read inode"); | 857 | cFYI(1, "ipc connection - fake read inode"); |
858 | spin_lock(&inode->i_lock); | ||
862 | inode->i_mode |= S_IFDIR; | 859 | inode->i_mode |= S_IFDIR; |
863 | set_nlink(inode, 2); | 860 | set_nlink(inode, 2); |
864 | inode->i_op = &cifs_ipc_inode_ops; | 861 | inode->i_op = &cifs_ipc_inode_ops; |
865 | inode->i_fop = &simple_dir_operations; | 862 | inode->i_fop = &simple_dir_operations; |
866 | inode->i_uid = cifs_sb->mnt_uid; | 863 | inode->i_uid = cifs_sb->mnt_uid; |
867 | inode->i_gid = cifs_sb->mnt_gid; | 864 | inode->i_gid = cifs_sb->mnt_gid; |
865 | spin_unlock(&inode->i_lock); | ||
868 | } else if (rc) { | 866 | } else if (rc) { |
869 | iget_failed(inode); | 867 | iget_failed(inode); |
870 | inode = ERR_PTR(rc); | 868 | inode = ERR_PTR(rc); |
@@ -1110,6 +1108,15 @@ undo_setattr: | |||
1110 | goto out_close; | 1108 | goto out_close; |
1111 | } | 1109 | } |
1112 | 1110 | ||
1111 | /* copied from fs/nfs/dir.c with small changes */ | ||
1112 | static void | ||
1113 | cifs_drop_nlink(struct inode *inode) | ||
1114 | { | ||
1115 | spin_lock(&inode->i_lock); | ||
1116 | if (inode->i_nlink > 0) | ||
1117 | drop_nlink(inode); | ||
1118 | spin_unlock(&inode->i_lock); | ||
1119 | } | ||
1113 | 1120 | ||
1114 | /* | 1121 | /* |
1115 | * If dentry->d_inode is null (usually meaning the cached dentry | 1122 | * If dentry->d_inode is null (usually meaning the cached dentry |
@@ -1166,13 +1173,13 @@ retry_std_delete: | |||
1166 | psx_del_no_retry: | 1173 | psx_del_no_retry: |
1167 | if (!rc) { | 1174 | if (!rc) { |
1168 | if (inode) | 1175 | if (inode) |
1169 | drop_nlink(inode); | 1176 | cifs_drop_nlink(inode); |
1170 | } else if (rc == -ENOENT) { | 1177 | } else if (rc == -ENOENT) { |
1171 | d_drop(dentry); | 1178 | d_drop(dentry); |
1172 | } else if (rc == -ETXTBSY) { | 1179 | } else if (rc == -ETXTBSY) { |
1173 | rc = cifs_rename_pending_delete(full_path, dentry, xid); | 1180 | rc = cifs_rename_pending_delete(full_path, dentry, xid); |
1174 | if (rc == 0) | 1181 | if (rc == 0) |
1175 | drop_nlink(inode); | 1182 | cifs_drop_nlink(inode); |
1176 | } else if ((rc == -EACCES) && (dosattr == 0) && inode) { | 1183 | } else if ((rc == -EACCES) && (dosattr == 0) && inode) { |
1177 | attrs = kzalloc(sizeof(*attrs), GFP_KERNEL); | 1184 | attrs = kzalloc(sizeof(*attrs), GFP_KERNEL); |
1178 | if (attrs == NULL) { | 1185 | if (attrs == NULL) { |
@@ -1241,9 +1248,10 @@ cifs_mkdir_qinfo(struct inode *inode, struct dentry *dentry, umode_t mode, | |||
1241 | * setting nlink not necessary except in cases where we failed to get it | 1248 | * setting nlink not necessary except in cases where we failed to get it |
1242 | * from the server or was set bogus | 1249 | * from the server or was set bogus |
1243 | */ | 1250 | */ |
1251 | spin_lock(&dentry->d_inode->i_lock); | ||
1244 | if ((dentry->d_inode) && (dentry->d_inode->i_nlink < 2)) | 1252 | if ((dentry->d_inode) && (dentry->d_inode->i_nlink < 2)) |
1245 | set_nlink(dentry->d_inode, 2); | 1253 | set_nlink(dentry->d_inode, 2); |
1246 | 1254 | spin_unlock(&dentry->d_inode->i_lock); | |
1247 | mode &= ~current_umask(); | 1255 | mode &= ~current_umask(); |
1248 | /* must turn on setgid bit if parent dir has it */ | 1256 | /* must turn on setgid bit if parent dir has it */ |
1249 | if (inode->i_mode & S_ISGID) | 1257 | if (inode->i_mode & S_ISGID) |
diff --git a/fs/cifs/link.c b/fs/cifs/link.c index 09e4b3ae4564..e6ce3b112875 100644 --- a/fs/cifs/link.c +++ b/fs/cifs/link.c | |||
@@ -433,7 +433,9 @@ cifs_hardlink(struct dentry *old_file, struct inode *inode, | |||
433 | if (old_file->d_inode) { | 433 | if (old_file->d_inode) { |
434 | cifsInode = CIFS_I(old_file->d_inode); | 434 | cifsInode = CIFS_I(old_file->d_inode); |
435 | if (rc == 0) { | 435 | if (rc == 0) { |
436 | spin_lock(&old_file->d_inode->i_lock); | ||
436 | inc_nlink(old_file->d_inode); | 437 | inc_nlink(old_file->d_inode); |
438 | spin_unlock(&old_file->d_inode->i_lock); | ||
437 | /* BB should we make this contingent on superblock flag NOATIME? */ | 439 | /* BB should we make this contingent on superblock flag NOATIME? */ |
438 | /* old_file->d_inode->i_ctime = CURRENT_TIME;*/ | 440 | /* old_file->d_inode->i_ctime = CURRENT_TIME;*/ |
439 | /* parent dir timestamps will update from srv | 441 | /* parent dir timestamps will update from srv |
diff --git a/fs/cifs/smb2misc.c b/fs/cifs/smb2misc.c index a4ff5d547554..e4d3b9964167 100644 --- a/fs/cifs/smb2misc.c +++ b/fs/cifs/smb2misc.c | |||
@@ -52,7 +52,8 @@ check_smb2_hdr(struct smb2_hdr *hdr, __u64 mid) | |||
52 | cERROR(1, "Bad protocol string signature header %x", | 52 | cERROR(1, "Bad protocol string signature header %x", |
53 | *(unsigned int *) hdr->ProtocolId); | 53 | *(unsigned int *) hdr->ProtocolId); |
54 | if (mid != hdr->MessageId) | 54 | if (mid != hdr->MessageId) |
55 | cERROR(1, "Mids do not match"); | 55 | cERROR(1, "Mids do not match: %llu and %llu", mid, |
56 | hdr->MessageId); | ||
56 | } | 57 | } |
57 | cERROR(1, "Bad SMB detected. The Mid=%llu", hdr->MessageId); | 58 | cERROR(1, "Bad SMB detected. The Mid=%llu", hdr->MessageId); |
58 | return 1; | 59 | return 1; |
@@ -107,7 +108,7 @@ smb2_check_message(char *buf, unsigned int length) | |||
107 | * ie Validate the wct via smb2_struct_sizes table above | 108 | * ie Validate the wct via smb2_struct_sizes table above |
108 | */ | 109 | */ |
109 | 110 | ||
110 | if (length < 2 + sizeof(struct smb2_hdr)) { | 111 | if (length < sizeof(struct smb2_pdu)) { |
111 | if ((length >= sizeof(struct smb2_hdr)) && (hdr->Status != 0)) { | 112 | if ((length >= sizeof(struct smb2_hdr)) && (hdr->Status != 0)) { |
112 | pdu->StructureSize2 = 0; | 113 | pdu->StructureSize2 = 0; |
113 | /* | 114 | /* |
@@ -121,15 +122,15 @@ smb2_check_message(char *buf, unsigned int length) | |||
121 | return 1; | 122 | return 1; |
122 | } | 123 | } |
123 | if (len > CIFSMaxBufSize + MAX_SMB2_HDR_SIZE - 4) { | 124 | if (len > CIFSMaxBufSize + MAX_SMB2_HDR_SIZE - 4) { |
124 | cERROR(1, "SMB length greater than maximum, mid=%lld", mid); | 125 | cERROR(1, "SMB length greater than maximum, mid=%llu", mid); |
125 | return 1; | 126 | return 1; |
126 | } | 127 | } |
127 | 128 | ||
128 | if (check_smb2_hdr(hdr, mid)) | 129 | if (check_smb2_hdr(hdr, mid)) |
129 | return 1; | 130 | return 1; |
130 | 131 | ||
131 | if (hdr->StructureSize != SMB2_HEADER_SIZE) { | 132 | if (hdr->StructureSize != SMB2_HEADER_STRUCTURE_SIZE) { |
132 | cERROR(1, "Illegal structure size %d", | 133 | cERROR(1, "Illegal structure size %u", |
133 | le16_to_cpu(hdr->StructureSize)); | 134 | le16_to_cpu(hdr->StructureSize)); |
134 | return 1; | 135 | return 1; |
135 | } | 136 | } |
@@ -161,8 +162,9 @@ smb2_check_message(char *buf, unsigned int length) | |||
161 | if (4 + len != clc_len) { | 162 | if (4 + len != clc_len) { |
162 | cFYI(1, "Calculated size %u length %u mismatch mid %llu", | 163 | cFYI(1, "Calculated size %u length %u mismatch mid %llu", |
163 | clc_len, 4 + len, mid); | 164 | clc_len, 4 + len, mid); |
164 | if (clc_len == 4 + len + 1) /* BB FIXME (fix samba) */ | 165 | /* server can return one byte more */ |
165 | return 0; /* BB workaround Samba 3 bug SessSetup rsp */ | 166 | if (clc_len == 4 + len + 1) |
167 | return 0; | ||
166 | return 1; | 168 | return 1; |
167 | } | 169 | } |
168 | return 0; | 170 | return 0; |
diff --git a/fs/cifs/smb2pdu.h b/fs/cifs/smb2pdu.h index f37a1b41b402..c5fbfac5d576 100644 --- a/fs/cifs/smb2pdu.h +++ b/fs/cifs/smb2pdu.h | |||
@@ -87,10 +87,6 @@ | |||
87 | 87 | ||
88 | #define SMB2_PROTO_NUMBER __constant_cpu_to_le32(0x424d53fe) | 88 | #define SMB2_PROTO_NUMBER __constant_cpu_to_le32(0x424d53fe) |
89 | 89 | ||
90 | #define SMB2_HEADER_SIZE __constant_le16_to_cpu(64) | ||
91 | |||
92 | #define SMB2_ERROR_STRUCTURE_SIZE2 __constant_le16_to_cpu(9) | ||
93 | |||
94 | /* | 90 | /* |
95 | * SMB2 Header Definition | 91 | * SMB2 Header Definition |
96 | * | 92 | * |
@@ -99,6 +95,9 @@ | |||
99 | * "PDU" : "Protocol Data Unit" (ie a network "frame") | 95 | * "PDU" : "Protocol Data Unit" (ie a network "frame") |
100 | * | 96 | * |
101 | */ | 97 | */ |
98 | |||
99 | #define SMB2_HEADER_STRUCTURE_SIZE __constant_le16_to_cpu(64) | ||
100 | |||
102 | struct smb2_hdr { | 101 | struct smb2_hdr { |
103 | __be32 smb2_buf_length; /* big endian on wire */ | 102 | __be32 smb2_buf_length; /* big endian on wire */ |
104 | /* length is only two or three bytes - with | 103 | /* length is only two or three bytes - with |
@@ -140,6 +139,9 @@ struct smb2_pdu { | |||
140 | * command code name for the struct. Note that structures must be packed. | 139 | * command code name for the struct. Note that structures must be packed. |
141 | * | 140 | * |
142 | */ | 141 | */ |
142 | |||
143 | #define SMB2_ERROR_STRUCTURE_SIZE2 __constant_le16_to_cpu(9) | ||
144 | |||
143 | struct smb2_err_rsp { | 145 | struct smb2_err_rsp { |
144 | struct smb2_hdr hdr; | 146 | struct smb2_hdr hdr; |
145 | __le16 StructureSize; | 147 | __le16 StructureSize; |
diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c index 83867ef348df..d9b639b95fa8 100644 --- a/fs/cifs/transport.c +++ b/fs/cifs/transport.c | |||
@@ -503,13 +503,16 @@ cifs_check_receive(struct mid_q_entry *mid, struct TCP_Server_Info *server, | |||
503 | /* convert the length into a more usable form */ | 503 | /* convert the length into a more usable form */ |
504 | if (server->sec_mode & (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED)) { | 504 | if (server->sec_mode & (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED)) { |
505 | struct kvec iov; | 505 | struct kvec iov; |
506 | int rc = 0; | ||
506 | 507 | ||
507 | iov.iov_base = mid->resp_buf; | 508 | iov.iov_base = mid->resp_buf; |
508 | iov.iov_len = len; | 509 | iov.iov_len = len; |
509 | /* FIXME: add code to kill session */ | 510 | /* FIXME: add code to kill session */ |
510 | if (cifs_verify_signature(&iov, 1, server, | 511 | rc = cifs_verify_signature(&iov, 1, server, |
511 | mid->sequence_number + 1) != 0) | 512 | mid->sequence_number + 1); |
512 | cERROR(1, "Unexpected SMB signature"); | 513 | if (rc) |
514 | cERROR(1, "SMB signature verification returned error = " | ||
515 | "%d", rc); | ||
513 | } | 516 | } |
514 | 517 | ||
515 | /* BB special case reconnect tid and uid here? */ | 518 | /* BB special case reconnect tid and uid here? */ |
diff --git a/fs/logfs/dev_bdev.c b/fs/logfs/dev_bdev.c index df0de27c2733..e784a217b500 100644 --- a/fs/logfs/dev_bdev.c +++ b/fs/logfs/dev_bdev.c | |||
@@ -26,6 +26,7 @@ static int sync_request(struct page *page, struct block_device *bdev, int rw) | |||
26 | struct completion complete; | 26 | struct completion complete; |
27 | 27 | ||
28 | bio_init(&bio); | 28 | bio_init(&bio); |
29 | bio.bi_max_vecs = 1; | ||
29 | bio.bi_io_vec = &bio_vec; | 30 | bio.bi_io_vec = &bio_vec; |
30 | bio_vec.bv_page = page; | 31 | bio_vec.bv_page = page; |
31 | bio_vec.bv_len = PAGE_SIZE; | 32 | bio_vec.bv_len = PAGE_SIZE; |
@@ -95,12 +96,11 @@ static int __bdev_writeseg(struct super_block *sb, u64 ofs, pgoff_t index, | |||
95 | struct address_space *mapping = super->s_mapping_inode->i_mapping; | 96 | struct address_space *mapping = super->s_mapping_inode->i_mapping; |
96 | struct bio *bio; | 97 | struct bio *bio; |
97 | struct page *page; | 98 | struct page *page; |
98 | struct request_queue *q = bdev_get_queue(sb->s_bdev); | 99 | unsigned int max_pages; |
99 | unsigned int max_pages = queue_max_hw_sectors(q) >> (PAGE_SHIFT - 9); | ||
100 | int i; | 100 | int i; |
101 | 101 | ||
102 | if (max_pages > BIO_MAX_PAGES) | 102 | max_pages = min(nr_pages, (size_t) bio_get_nr_vecs(super->s_bdev)); |
103 | max_pages = BIO_MAX_PAGES; | 103 | |
104 | bio = bio_alloc(GFP_NOFS, max_pages); | 104 | bio = bio_alloc(GFP_NOFS, max_pages); |
105 | BUG_ON(!bio); | 105 | BUG_ON(!bio); |
106 | 106 | ||
@@ -190,12 +190,11 @@ static int do_erase(struct super_block *sb, u64 ofs, pgoff_t index, | |||
190 | { | 190 | { |
191 | struct logfs_super *super = logfs_super(sb); | 191 | struct logfs_super *super = logfs_super(sb); |
192 | struct bio *bio; | 192 | struct bio *bio; |
193 | struct request_queue *q = bdev_get_queue(sb->s_bdev); | 193 | unsigned int max_pages; |
194 | unsigned int max_pages = queue_max_hw_sectors(q) >> (PAGE_SHIFT - 9); | ||
195 | int i; | 194 | int i; |
196 | 195 | ||
197 | if (max_pages > BIO_MAX_PAGES) | 196 | max_pages = min(nr_pages, (size_t) bio_get_nr_vecs(super->s_bdev)); |
198 | max_pages = BIO_MAX_PAGES; | 197 | |
199 | bio = bio_alloc(GFP_NOFS, max_pages); | 198 | bio = bio_alloc(GFP_NOFS, max_pages); |
200 | BUG_ON(!bio); | 199 | BUG_ON(!bio); |
201 | 200 | ||
diff --git a/fs/logfs/inode.c b/fs/logfs/inode.c index a422f42238b2..6984562738d3 100644 --- a/fs/logfs/inode.c +++ b/fs/logfs/inode.c | |||
@@ -156,10 +156,26 @@ static void __logfs_destroy_inode(struct inode *inode) | |||
156 | call_rcu(&inode->i_rcu, logfs_i_callback); | 156 | call_rcu(&inode->i_rcu, logfs_i_callback); |
157 | } | 157 | } |
158 | 158 | ||
159 | static void __logfs_destroy_meta_inode(struct inode *inode) | ||
160 | { | ||
161 | struct logfs_inode *li = logfs_inode(inode); | ||
162 | BUG_ON(li->li_block); | ||
163 | call_rcu(&inode->i_rcu, logfs_i_callback); | ||
164 | } | ||
165 | |||
159 | static void logfs_destroy_inode(struct inode *inode) | 166 | static void logfs_destroy_inode(struct inode *inode) |
160 | { | 167 | { |
161 | struct logfs_inode *li = logfs_inode(inode); | 168 | struct logfs_inode *li = logfs_inode(inode); |
162 | 169 | ||
170 | if (inode->i_ino < LOGFS_RESERVED_INOS) { | ||
171 | /* | ||
172 | * The reserved inodes are never destroyed unless we are in | ||
173 | * unmont path. | ||
174 | */ | ||
175 | __logfs_destroy_meta_inode(inode); | ||
176 | return; | ||
177 | } | ||
178 | |||
163 | BUG_ON(list_empty(&li->li_freeing_list)); | 179 | BUG_ON(list_empty(&li->li_freeing_list)); |
164 | spin_lock(&logfs_inode_lock); | 180 | spin_lock(&logfs_inode_lock); |
165 | li->li_refcount--; | 181 | li->li_refcount--; |
@@ -373,8 +389,8 @@ static void logfs_put_super(struct super_block *sb) | |||
373 | { | 389 | { |
374 | struct logfs_super *super = logfs_super(sb); | 390 | struct logfs_super *super = logfs_super(sb); |
375 | /* kill the meta-inodes */ | 391 | /* kill the meta-inodes */ |
376 | iput(super->s_master_inode); | ||
377 | iput(super->s_segfile_inode); | 392 | iput(super->s_segfile_inode); |
393 | iput(super->s_master_inode); | ||
378 | iput(super->s_mapping_inode); | 394 | iput(super->s_mapping_inode); |
379 | } | 395 | } |
380 | 396 | ||
diff --git a/fs/logfs/journal.c b/fs/logfs/journal.c index 1e1c369df22b..2a09b8d73989 100644 --- a/fs/logfs/journal.c +++ b/fs/logfs/journal.c | |||
@@ -565,7 +565,7 @@ static void write_wbuf(struct super_block *sb, struct logfs_area *area, | |||
565 | index = ofs >> PAGE_SHIFT; | 565 | index = ofs >> PAGE_SHIFT; |
566 | page_ofs = ofs & (PAGE_SIZE - 1); | 566 | page_ofs = ofs & (PAGE_SIZE - 1); |
567 | 567 | ||
568 | page = find_lock_page(mapping, index); | 568 | page = find_or_create_page(mapping, index, GFP_NOFS); |
569 | BUG_ON(!page); | 569 | BUG_ON(!page); |
570 | memcpy(wbuf, page_address(page) + page_ofs, super->s_writesize); | 570 | memcpy(wbuf, page_address(page) + page_ofs, super->s_writesize); |
571 | unlock_page(page); | 571 | unlock_page(page); |
diff --git a/fs/logfs/readwrite.c b/fs/logfs/readwrite.c index f1cb512c5019..5be0abef603d 100644 --- a/fs/logfs/readwrite.c +++ b/fs/logfs/readwrite.c | |||
@@ -2189,7 +2189,6 @@ void logfs_evict_inode(struct inode *inode) | |||
2189 | return; | 2189 | return; |
2190 | } | 2190 | } |
2191 | 2191 | ||
2192 | BUG_ON(inode->i_ino < LOGFS_RESERVED_INOS); | ||
2193 | page = inode_to_page(inode); | 2192 | page = inode_to_page(inode); |
2194 | BUG_ON(!page); /* FIXME: Use emergency page */ | 2193 | BUG_ON(!page); /* FIXME: Use emergency page */ |
2195 | logfs_put_write_page(page); | 2194 | logfs_put_write_page(page); |
diff --git a/fs/logfs/segment.c b/fs/logfs/segment.c index e28d090c98d6..038da0991794 100644 --- a/fs/logfs/segment.c +++ b/fs/logfs/segment.c | |||
@@ -886,7 +886,7 @@ static struct logfs_area *alloc_area(struct super_block *sb) | |||
886 | 886 | ||
887 | static void map_invalidatepage(struct page *page, unsigned long l) | 887 | static void map_invalidatepage(struct page *page, unsigned long l) |
888 | { | 888 | { |
889 | BUG(); | 889 | return; |
890 | } | 890 | } |
891 | 891 | ||
892 | static int map_releasepage(struct page *page, gfp_t g) | 892 | static int map_releasepage(struct page *page, gfp_t g) |
diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h index ced362533e3c..bfacf0d5a225 100644 --- a/include/drm/drm_crtc.h +++ b/include/drm/drm_crtc.h | |||
@@ -118,7 +118,8 @@ enum drm_mode_status { | |||
118 | .hdisplay = (hd), .hsync_start = (hss), .hsync_end = (hse), \ | 118 | .hdisplay = (hd), .hsync_start = (hss), .hsync_end = (hse), \ |
119 | .htotal = (ht), .hskew = (hsk), .vdisplay = (vd), \ | 119 | .htotal = (ht), .hskew = (hsk), .vdisplay = (vd), \ |
120 | .vsync_start = (vss), .vsync_end = (vse), .vtotal = (vt), \ | 120 | .vsync_start = (vss), .vsync_end = (vse), .vtotal = (vt), \ |
121 | .vscan = (vs), .flags = (f), .vrefresh = 0 | 121 | .vscan = (vs), .flags = (f), .vrefresh = 0, \ |
122 | .base.type = DRM_MODE_OBJECT_MODE | ||
122 | 123 | ||
123 | #define CRTC_INTERLACE_HALVE_V 0x1 /* halve V values for interlacing */ | 124 | #define CRTC_INTERLACE_HALVE_V 0x1 /* halve V values for interlacing */ |
124 | 125 | ||
diff --git a/include/drm/drm_mode.h b/include/drm/drm_mode.h index 5581980b14f6..3d6301b6ec16 100644 --- a/include/drm/drm_mode.h +++ b/include/drm/drm_mode.h | |||
@@ -359,8 +359,9 @@ struct drm_mode_mode_cmd { | |||
359 | struct drm_mode_modeinfo mode; | 359 | struct drm_mode_modeinfo mode; |
360 | }; | 360 | }; |
361 | 361 | ||
362 | #define DRM_MODE_CURSOR_BO (1<<0) | 362 | #define DRM_MODE_CURSOR_BO 0x01 |
363 | #define DRM_MODE_CURSOR_MOVE (1<<1) | 363 | #define DRM_MODE_CURSOR_MOVE 0x02 |
364 | #define DRM_MODE_CURSOR_FLAGS 0x03 | ||
364 | 365 | ||
365 | /* | 366 | /* |
366 | * depending on the value in flags different members are used. | 367 | * depending on the value in flags different members are used. |
diff --git a/include/linux/kernel.h b/include/linux/kernel.h index 604382143bcf..594b419b7d20 100644 --- a/include/linux/kernel.h +++ b/include/linux/kernel.h | |||
@@ -82,10 +82,18 @@ | |||
82 | __x - (__x % (y)); \ | 82 | __x - (__x % (y)); \ |
83 | } \ | 83 | } \ |
84 | ) | 84 | ) |
85 | |||
86 | /* | ||
87 | * Divide positive or negative dividend by positive divisor and round | ||
88 | * to closest integer. Result is undefined for negative divisors. | ||
89 | */ | ||
85 | #define DIV_ROUND_CLOSEST(x, divisor)( \ | 90 | #define DIV_ROUND_CLOSEST(x, divisor)( \ |
86 | { \ | 91 | { \ |
87 | typeof(divisor) __divisor = divisor; \ | 92 | typeof(x) __x = x; \ |
88 | (((x) + ((__divisor) / 2)) / (__divisor)); \ | 93 | typeof(divisor) __d = divisor; \ |
94 | (((typeof(x))-1) >= 0 || (__x) >= 0) ? \ | ||
95 | (((__x) + ((__d) / 2)) / (__d)) : \ | ||
96 | (((__x) - ((__d) / 2)) / (__d)); \ | ||
89 | } \ | 97 | } \ |
90 | ) | 98 | ) |
91 | 99 | ||
diff --git a/include/linux/mmc/card.h b/include/linux/mmc/card.h index 111aca5e97f3..4b27f9f503e4 100644 --- a/include/linux/mmc/card.h +++ b/include/linux/mmc/card.h | |||
@@ -239,6 +239,7 @@ struct mmc_card { | |||
239 | #define MMC_QUIRK_BLK_NO_CMD23 (1<<7) /* Avoid CMD23 for regular multiblock */ | 239 | #define MMC_QUIRK_BLK_NO_CMD23 (1<<7) /* Avoid CMD23 for regular multiblock */ |
240 | #define MMC_QUIRK_BROKEN_BYTE_MODE_512 (1<<8) /* Avoid sending 512 bytes in */ | 240 | #define MMC_QUIRK_BROKEN_BYTE_MODE_512 (1<<8) /* Avoid sending 512 bytes in */ |
241 | #define MMC_QUIRK_LONG_READ_TIME (1<<9) /* Data read time > CSD says */ | 241 | #define MMC_QUIRK_LONG_READ_TIME (1<<9) /* Data read time > CSD says */ |
242 | #define MMC_QUIRK_SEC_ERASE_TRIM_BROKEN (1<<10) /* Skip secure for erase/trim */ | ||
242 | /* byte mode */ | 243 | /* byte mode */ |
243 | unsigned int poweroff_notify_state; /* eMMC4.5 notify feature */ | 244 | unsigned int poweroff_notify_state; /* eMMC4.5 notify feature */ |
244 | #define MMC_NO_POWER_NOTIFICATION 0 | 245 | #define MMC_NO_POWER_NOTIFICATION 0 |
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index fc3526077348..6b4565c440c8 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h | |||
@@ -2149,7 +2149,7 @@ | |||
2149 | #define PCI_DEVICE_ID_TIGON3_5704S 0x16a8 | 2149 | #define PCI_DEVICE_ID_TIGON3_5704S 0x16a8 |
2150 | #define PCI_DEVICE_ID_NX2_57800_VF 0x16a9 | 2150 | #define PCI_DEVICE_ID_NX2_57800_VF 0x16a9 |
2151 | #define PCI_DEVICE_ID_NX2_5706S 0x16aa | 2151 | #define PCI_DEVICE_ID_NX2_5706S 0x16aa |
2152 | #define PCI_DEVICE_ID_NX2_57840_MF 0x16ab | 2152 | #define PCI_DEVICE_ID_NX2_57840_MF 0x16a4 |
2153 | #define PCI_DEVICE_ID_NX2_5708S 0x16ac | 2153 | #define PCI_DEVICE_ID_NX2_5708S 0x16ac |
2154 | #define PCI_DEVICE_ID_NX2_57840_VF 0x16ad | 2154 | #define PCI_DEVICE_ID_NX2_57840_VF 0x16ad |
2155 | #define PCI_DEVICE_ID_NX2_57810_MF 0x16ae | 2155 | #define PCI_DEVICE_ID_NX2_57810_MF 0x16ae |
diff --git a/include/linux/time.h b/include/linux/time.h index b0bbd8f0130d..b51e664c83e7 100644 --- a/include/linux/time.h +++ b/include/linux/time.h | |||
@@ -125,6 +125,13 @@ static inline bool timespec_valid(const struct timespec *ts) | |||
125 | /* Can't have more nanoseconds then a second */ | 125 | /* Can't have more nanoseconds then a second */ |
126 | if ((unsigned long)ts->tv_nsec >= NSEC_PER_SEC) | 126 | if ((unsigned long)ts->tv_nsec >= NSEC_PER_SEC) |
127 | return false; | 127 | return false; |
128 | return true; | ||
129 | } | ||
130 | |||
131 | static inline bool timespec_valid_strict(const struct timespec *ts) | ||
132 | { | ||
133 | if (!timespec_valid(ts)) | ||
134 | return false; | ||
128 | /* Disallow values that could overflow ktime_t */ | 135 | /* Disallow values that could overflow ktime_t */ |
129 | if ((unsigned long long)ts->tv_sec >= KTIME_SEC_MAX) | 136 | if ((unsigned long long)ts->tv_sec >= KTIME_SEC_MAX) |
130 | return false; | 137 | return false; |
diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h index 802de56c41e8..2f322c38bd4d 100644 --- a/include/linux/tracepoint.h +++ b/include/linux/tracepoint.h | |||
@@ -136,6 +136,22 @@ static inline void tracepoint_synchronize_unregister(void) | |||
136 | postrcu; \ | 136 | postrcu; \ |
137 | } while (0) | 137 | } while (0) |
138 | 138 | ||
139 | #ifndef MODULE | ||
140 | #define __DECLARE_TRACE_RCU(name, proto, args, cond, data_proto, data_args) \ | ||
141 | static inline void trace_##name##_rcuidle(proto) \ | ||
142 | { \ | ||
143 | if (static_key_false(&__tracepoint_##name.key)) \ | ||
144 | __DO_TRACE(&__tracepoint_##name, \ | ||
145 | TP_PROTO(data_proto), \ | ||
146 | TP_ARGS(data_args), \ | ||
147 | TP_CONDITION(cond), \ | ||
148 | rcu_idle_exit(), \ | ||
149 | rcu_idle_enter()); \ | ||
150 | } | ||
151 | #else | ||
152 | #define __DECLARE_TRACE_RCU(name, proto, args, cond, data_proto, data_args) | ||
153 | #endif | ||
154 | |||
139 | /* | 155 | /* |
140 | * Make sure the alignment of the structure in the __tracepoints section will | 156 | * Make sure the alignment of the structure in the __tracepoints section will |
141 | * not add unwanted padding between the beginning of the section and the | 157 | * not add unwanted padding between the beginning of the section and the |
@@ -151,16 +167,8 @@ static inline void tracepoint_synchronize_unregister(void) | |||
151 | TP_ARGS(data_args), \ | 167 | TP_ARGS(data_args), \ |
152 | TP_CONDITION(cond),,); \ | 168 | TP_CONDITION(cond),,); \ |
153 | } \ | 169 | } \ |
154 | static inline void trace_##name##_rcuidle(proto) \ | 170 | __DECLARE_TRACE_RCU(name, PARAMS(proto), PARAMS(args), \ |
155 | { \ | 171 | PARAMS(cond), PARAMS(data_proto), PARAMS(data_args)) \ |
156 | if (static_key_false(&__tracepoint_##name.key)) \ | ||
157 | __DO_TRACE(&__tracepoint_##name, \ | ||
158 | TP_PROTO(data_proto), \ | ||
159 | TP_ARGS(data_args), \ | ||
160 | TP_CONDITION(cond), \ | ||
161 | rcu_idle_exit(), \ | ||
162 | rcu_idle_enter()); \ | ||
163 | } \ | ||
164 | static inline int \ | 172 | static inline int \ |
165 | register_trace_##name(void (*probe)(data_proto), void *data) \ | 173 | register_trace_##name(void (*probe)(data_proto), void *data) \ |
166 | { \ | 174 | { \ |
diff --git a/include/net/netfilter/nf_conntrack_ecache.h b/include/net/netfilter/nf_conntrack_ecache.h index e1ce1048fe5f..4a045cda9c60 100644 --- a/include/net/netfilter/nf_conntrack_ecache.h +++ b/include/net/netfilter/nf_conntrack_ecache.h | |||
@@ -18,6 +18,7 @@ struct nf_conntrack_ecache { | |||
18 | u16 ctmask; /* bitmask of ct events to be delivered */ | 18 | u16 ctmask; /* bitmask of ct events to be delivered */ |
19 | u16 expmask; /* bitmask of expect events to be delivered */ | 19 | u16 expmask; /* bitmask of expect events to be delivered */ |
20 | u32 pid; /* netlink pid of destroyer */ | 20 | u32 pid; /* netlink pid of destroyer */ |
21 | struct timer_list timeout; | ||
21 | }; | 22 | }; |
22 | 23 | ||
23 | static inline struct nf_conntrack_ecache * | 24 | static inline struct nf_conntrack_ecache * |
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c index 0c1485e42be6..34e5eac81424 100644 --- a/kernel/time/timekeeping.c +++ b/kernel/time/timekeeping.c | |||
@@ -428,7 +428,7 @@ int do_settimeofday(const struct timespec *tv) | |||
428 | struct timespec ts_delta, xt; | 428 | struct timespec ts_delta, xt; |
429 | unsigned long flags; | 429 | unsigned long flags; |
430 | 430 | ||
431 | if (!timespec_valid(tv)) | 431 | if (!timespec_valid_strict(tv)) |
432 | return -EINVAL; | 432 | return -EINVAL; |
433 | 433 | ||
434 | write_seqlock_irqsave(&tk->lock, flags); | 434 | write_seqlock_irqsave(&tk->lock, flags); |
@@ -476,7 +476,7 @@ int timekeeping_inject_offset(struct timespec *ts) | |||
476 | 476 | ||
477 | /* Make sure the proposed value is valid */ | 477 | /* Make sure the proposed value is valid */ |
478 | tmp = timespec_add(tk_xtime(tk), *ts); | 478 | tmp = timespec_add(tk_xtime(tk), *ts); |
479 | if (!timespec_valid(&tmp)) { | 479 | if (!timespec_valid_strict(&tmp)) { |
480 | ret = -EINVAL; | 480 | ret = -EINVAL; |
481 | goto error; | 481 | goto error; |
482 | } | 482 | } |
@@ -659,7 +659,7 @@ void __init timekeeping_init(void) | |||
659 | struct timespec now, boot, tmp; | 659 | struct timespec now, boot, tmp; |
660 | 660 | ||
661 | read_persistent_clock(&now); | 661 | read_persistent_clock(&now); |
662 | if (!timespec_valid(&now)) { | 662 | if (!timespec_valid_strict(&now)) { |
663 | pr_warn("WARNING: Persistent clock returned invalid value!\n" | 663 | pr_warn("WARNING: Persistent clock returned invalid value!\n" |
664 | " Check your CMOS/BIOS settings.\n"); | 664 | " Check your CMOS/BIOS settings.\n"); |
665 | now.tv_sec = 0; | 665 | now.tv_sec = 0; |
@@ -667,7 +667,7 @@ void __init timekeeping_init(void) | |||
667 | } | 667 | } |
668 | 668 | ||
669 | read_boot_clock(&boot); | 669 | read_boot_clock(&boot); |
670 | if (!timespec_valid(&boot)) { | 670 | if (!timespec_valid_strict(&boot)) { |
671 | pr_warn("WARNING: Boot clock returned invalid value!\n" | 671 | pr_warn("WARNING: Boot clock returned invalid value!\n" |
672 | " Check your CMOS/BIOS settings.\n"); | 672 | " Check your CMOS/BIOS settings.\n"); |
673 | boot.tv_sec = 0; | 673 | boot.tv_sec = 0; |
@@ -713,7 +713,7 @@ static struct timespec timekeeping_suspend_time; | |||
713 | static void __timekeeping_inject_sleeptime(struct timekeeper *tk, | 713 | static void __timekeeping_inject_sleeptime(struct timekeeper *tk, |
714 | struct timespec *delta) | 714 | struct timespec *delta) |
715 | { | 715 | { |
716 | if (!timespec_valid(delta)) { | 716 | if (!timespec_valid_strict(delta)) { |
717 | printk(KERN_WARNING "__timekeeping_inject_sleeptime: Invalid " | 717 | printk(KERN_WARNING "__timekeeping_inject_sleeptime: Invalid " |
718 | "sleep delta value!\n"); | 718 | "sleep delta value!\n"); |
719 | return; | 719 | return; |
diff --git a/mm/mempolicy.c b/mm/mempolicy.c index bd92431d4c49..4ada3be6e252 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c | |||
@@ -2562,7 +2562,7 @@ int mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol, int no_context) | |||
2562 | break; | 2562 | break; |
2563 | 2563 | ||
2564 | default: | 2564 | default: |
2565 | BUG(); | 2565 | return -EINVAL; |
2566 | } | 2566 | } |
2567 | 2567 | ||
2568 | l = strlen(policy_modes[mode]); | 2568 | l = strlen(policy_modes[mode]); |
@@ -3260,6 +3260,7 @@ force_grow: | |||
3260 | 3260 | ||
3261 | /* cache_grow can reenable interrupts, then ac could change. */ | 3261 | /* cache_grow can reenable interrupts, then ac could change. */ |
3262 | ac = cpu_cache_get(cachep); | 3262 | ac = cpu_cache_get(cachep); |
3263 | node = numa_mem_id(); | ||
3263 | 3264 | ||
3264 | /* no objects in sight? abort */ | 3265 | /* no objects in sight? abort */ |
3265 | if (!x && (ac->avail == 0 || force_refill)) | 3266 | if (!x && (ac->avail == 0 || force_refill)) |
diff --git a/net/core/netpoll.c b/net/core/netpoll.c index 346b1eb83a1f..e4ba3e70c174 100644 --- a/net/core/netpoll.c +++ b/net/core/netpoll.c | |||
@@ -168,24 +168,16 @@ static void poll_napi(struct net_device *dev) | |||
168 | struct napi_struct *napi; | 168 | struct napi_struct *napi; |
169 | int budget = 16; | 169 | int budget = 16; |
170 | 170 | ||
171 | WARN_ON_ONCE(!irqs_disabled()); | ||
172 | |||
173 | list_for_each_entry(napi, &dev->napi_list, dev_list) { | 171 | list_for_each_entry(napi, &dev->napi_list, dev_list) { |
174 | local_irq_enable(); | ||
175 | if (napi->poll_owner != smp_processor_id() && | 172 | if (napi->poll_owner != smp_processor_id() && |
176 | spin_trylock(&napi->poll_lock)) { | 173 | spin_trylock(&napi->poll_lock)) { |
177 | rcu_read_lock_bh(); | ||
178 | budget = poll_one_napi(rcu_dereference_bh(dev->npinfo), | 174 | budget = poll_one_napi(rcu_dereference_bh(dev->npinfo), |
179 | napi, budget); | 175 | napi, budget); |
180 | rcu_read_unlock_bh(); | ||
181 | spin_unlock(&napi->poll_lock); | 176 | spin_unlock(&napi->poll_lock); |
182 | 177 | ||
183 | if (!budget) { | 178 | if (!budget) |
184 | local_irq_disable(); | ||
185 | break; | 179 | break; |
186 | } | ||
187 | } | 180 | } |
188 | local_irq_disable(); | ||
189 | } | 181 | } |
190 | } | 182 | } |
191 | 183 | ||
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c index 8eec8f4a0536..ebdf06f938bf 100644 --- a/net/ipv4/ipmr.c +++ b/net/ipv4/ipmr.c | |||
@@ -124,6 +124,8 @@ static DEFINE_SPINLOCK(mfc_unres_lock); | |||
124 | static struct kmem_cache *mrt_cachep __read_mostly; | 124 | static struct kmem_cache *mrt_cachep __read_mostly; |
125 | 125 | ||
126 | static struct mr_table *ipmr_new_table(struct net *net, u32 id); | 126 | static struct mr_table *ipmr_new_table(struct net *net, u32 id); |
127 | static void ipmr_free_table(struct mr_table *mrt); | ||
128 | |||
127 | static int ip_mr_forward(struct net *net, struct mr_table *mrt, | 129 | static int ip_mr_forward(struct net *net, struct mr_table *mrt, |
128 | struct sk_buff *skb, struct mfc_cache *cache, | 130 | struct sk_buff *skb, struct mfc_cache *cache, |
129 | int local); | 131 | int local); |
@@ -131,6 +133,7 @@ static int ipmr_cache_report(struct mr_table *mrt, | |||
131 | struct sk_buff *pkt, vifi_t vifi, int assert); | 133 | struct sk_buff *pkt, vifi_t vifi, int assert); |
132 | static int __ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb, | 134 | static int __ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb, |
133 | struct mfc_cache *c, struct rtmsg *rtm); | 135 | struct mfc_cache *c, struct rtmsg *rtm); |
136 | static void mroute_clean_tables(struct mr_table *mrt); | ||
134 | static void ipmr_expire_process(unsigned long arg); | 137 | static void ipmr_expire_process(unsigned long arg); |
135 | 138 | ||
136 | #ifdef CONFIG_IP_MROUTE_MULTIPLE_TABLES | 139 | #ifdef CONFIG_IP_MROUTE_MULTIPLE_TABLES |
@@ -271,7 +274,7 @@ static void __net_exit ipmr_rules_exit(struct net *net) | |||
271 | 274 | ||
272 | list_for_each_entry_safe(mrt, next, &net->ipv4.mr_tables, list) { | 275 | list_for_each_entry_safe(mrt, next, &net->ipv4.mr_tables, list) { |
273 | list_del(&mrt->list); | 276 | list_del(&mrt->list); |
274 | kfree(mrt); | 277 | ipmr_free_table(mrt); |
275 | } | 278 | } |
276 | fib_rules_unregister(net->ipv4.mr_rules_ops); | 279 | fib_rules_unregister(net->ipv4.mr_rules_ops); |
277 | } | 280 | } |
@@ -299,7 +302,7 @@ static int __net_init ipmr_rules_init(struct net *net) | |||
299 | 302 | ||
300 | static void __net_exit ipmr_rules_exit(struct net *net) | 303 | static void __net_exit ipmr_rules_exit(struct net *net) |
301 | { | 304 | { |
302 | kfree(net->ipv4.mrt); | 305 | ipmr_free_table(net->ipv4.mrt); |
303 | } | 306 | } |
304 | #endif | 307 | #endif |
305 | 308 | ||
@@ -336,6 +339,13 @@ static struct mr_table *ipmr_new_table(struct net *net, u32 id) | |||
336 | return mrt; | 339 | return mrt; |
337 | } | 340 | } |
338 | 341 | ||
342 | static void ipmr_free_table(struct mr_table *mrt) | ||
343 | { | ||
344 | del_timer_sync(&mrt->ipmr_expire_timer); | ||
345 | mroute_clean_tables(mrt); | ||
346 | kfree(mrt); | ||
347 | } | ||
348 | |||
339 | /* Service routines creating virtual interfaces: DVMRP tunnels and PIMREG */ | 349 | /* Service routines creating virtual interfaces: DVMRP tunnels and PIMREG */ |
340 | 350 | ||
341 | static void ipmr_del_tunnel(struct net_device *dev, struct vifctl *v) | 351 | static void ipmr_del_tunnel(struct net_device *dev, struct vifctl *v) |
diff --git a/net/ipv4/netfilter/nf_nat_sip.c b/net/ipv4/netfilter/nf_nat_sip.c index 4ad9cf173992..9c87cde28ff8 100644 --- a/net/ipv4/netfilter/nf_nat_sip.c +++ b/net/ipv4/netfilter/nf_nat_sip.c | |||
@@ -502,7 +502,10 @@ static unsigned int ip_nat_sdp_media(struct sk_buff *skb, unsigned int dataoff, | |||
502 | ret = nf_ct_expect_related(rtcp_exp); | 502 | ret = nf_ct_expect_related(rtcp_exp); |
503 | if (ret == 0) | 503 | if (ret == 0) |
504 | break; | 504 | break; |
505 | else if (ret != -EBUSY) { | 505 | else if (ret == -EBUSY) { |
506 | nf_ct_unexpect_related(rtp_exp); | ||
507 | continue; | ||
508 | } else if (ret < 0) { | ||
506 | nf_ct_unexpect_related(rtp_exp); | 509 | nf_ct_unexpect_related(rtp_exp); |
507 | port = 0; | 510 | port = 0; |
508 | break; | 511 | break; |
diff --git a/net/ipv4/route.c b/net/ipv4/route.c index fd9ecb52c66b..82cf2a722b23 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c | |||
@@ -934,12 +934,14 @@ static u32 __ip_rt_update_pmtu(struct rtable *rt, struct flowi4 *fl4, u32 mtu) | |||
934 | if (mtu < ip_rt_min_pmtu) | 934 | if (mtu < ip_rt_min_pmtu) |
935 | mtu = ip_rt_min_pmtu; | 935 | mtu = ip_rt_min_pmtu; |
936 | 936 | ||
937 | rcu_read_lock(); | ||
937 | if (fib_lookup(dev_net(rt->dst.dev), fl4, &res) == 0) { | 938 | if (fib_lookup(dev_net(rt->dst.dev), fl4, &res) == 0) { |
938 | struct fib_nh *nh = &FIB_RES_NH(res); | 939 | struct fib_nh *nh = &FIB_RES_NH(res); |
939 | 940 | ||
940 | update_or_create_fnhe(nh, fl4->daddr, 0, mtu, | 941 | update_or_create_fnhe(nh, fl4->daddr, 0, mtu, |
941 | jiffies + ip_rt_mtu_expires); | 942 | jiffies + ip_rt_mtu_expires); |
942 | } | 943 | } |
944 | rcu_read_unlock(); | ||
943 | return mtu; | 945 | return mtu; |
944 | } | 946 | } |
945 | 947 | ||
@@ -956,7 +958,7 @@ static void ip_rt_update_pmtu(struct dst_entry *dst, struct sock *sk, | |||
956 | dst->obsolete = DST_OBSOLETE_KILL; | 958 | dst->obsolete = DST_OBSOLETE_KILL; |
957 | } else { | 959 | } else { |
958 | rt->rt_pmtu = mtu; | 960 | rt->rt_pmtu = mtu; |
959 | dst_set_expires(&rt->dst, ip_rt_mtu_expires); | 961 | rt->dst.expires = max(1UL, jiffies + ip_rt_mtu_expires); |
960 | } | 962 | } |
961 | } | 963 | } |
962 | 964 | ||
@@ -1263,7 +1265,7 @@ static void ipv4_dst_destroy(struct dst_entry *dst) | |||
1263 | { | 1265 | { |
1264 | struct rtable *rt = (struct rtable *) dst; | 1266 | struct rtable *rt = (struct rtable *) dst; |
1265 | 1267 | ||
1266 | if (dst->flags & DST_NOCACHE) { | 1268 | if (!list_empty(&rt->rt_uncached)) { |
1267 | spin_lock_bh(&rt_uncached_lock); | 1269 | spin_lock_bh(&rt_uncached_lock); |
1268 | list_del(&rt->rt_uncached); | 1270 | list_del(&rt->rt_uncached); |
1269 | spin_unlock_bh(&rt_uncached_lock); | 1271 | spin_unlock_bh(&rt_uncached_lock); |
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 85308b90df80..6e38c6c23caa 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c | |||
@@ -2926,13 +2926,14 @@ static void tcp_enter_recovery(struct sock *sk, bool ece_ack) | |||
2926 | * tcp_xmit_retransmit_queue(). | 2926 | * tcp_xmit_retransmit_queue(). |
2927 | */ | 2927 | */ |
2928 | static void tcp_fastretrans_alert(struct sock *sk, int pkts_acked, | 2928 | static void tcp_fastretrans_alert(struct sock *sk, int pkts_acked, |
2929 | int newly_acked_sacked, bool is_dupack, | 2929 | int prior_sacked, bool is_dupack, |
2930 | int flag) | 2930 | int flag) |
2931 | { | 2931 | { |
2932 | struct inet_connection_sock *icsk = inet_csk(sk); | 2932 | struct inet_connection_sock *icsk = inet_csk(sk); |
2933 | struct tcp_sock *tp = tcp_sk(sk); | 2933 | struct tcp_sock *tp = tcp_sk(sk); |
2934 | int do_lost = is_dupack || ((flag & FLAG_DATA_SACKED) && | 2934 | int do_lost = is_dupack || ((flag & FLAG_DATA_SACKED) && |
2935 | (tcp_fackets_out(tp) > tp->reordering)); | 2935 | (tcp_fackets_out(tp) > tp->reordering)); |
2936 | int newly_acked_sacked = 0; | ||
2936 | int fast_rexmit = 0; | 2937 | int fast_rexmit = 0; |
2937 | 2938 | ||
2938 | if (WARN_ON(!tp->packets_out && tp->sacked_out)) | 2939 | if (WARN_ON(!tp->packets_out && tp->sacked_out)) |
@@ -2992,6 +2993,7 @@ static void tcp_fastretrans_alert(struct sock *sk, int pkts_acked, | |||
2992 | tcp_add_reno_sack(sk); | 2993 | tcp_add_reno_sack(sk); |
2993 | } else | 2994 | } else |
2994 | do_lost = tcp_try_undo_partial(sk, pkts_acked); | 2995 | do_lost = tcp_try_undo_partial(sk, pkts_acked); |
2996 | newly_acked_sacked = pkts_acked + tp->sacked_out - prior_sacked; | ||
2995 | break; | 2997 | break; |
2996 | case TCP_CA_Loss: | 2998 | case TCP_CA_Loss: |
2997 | if (flag & FLAG_DATA_ACKED) | 2999 | if (flag & FLAG_DATA_ACKED) |
@@ -3013,6 +3015,7 @@ static void tcp_fastretrans_alert(struct sock *sk, int pkts_acked, | |||
3013 | if (is_dupack) | 3015 | if (is_dupack) |
3014 | tcp_add_reno_sack(sk); | 3016 | tcp_add_reno_sack(sk); |
3015 | } | 3017 | } |
3018 | newly_acked_sacked = pkts_acked + tp->sacked_out - prior_sacked; | ||
3016 | 3019 | ||
3017 | if (icsk->icsk_ca_state <= TCP_CA_Disorder) | 3020 | if (icsk->icsk_ca_state <= TCP_CA_Disorder) |
3018 | tcp_try_undo_dsack(sk); | 3021 | tcp_try_undo_dsack(sk); |
@@ -3590,7 +3593,6 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag) | |||
3590 | int prior_packets; | 3593 | int prior_packets; |
3591 | int prior_sacked = tp->sacked_out; | 3594 | int prior_sacked = tp->sacked_out; |
3592 | int pkts_acked = 0; | 3595 | int pkts_acked = 0; |
3593 | int newly_acked_sacked = 0; | ||
3594 | bool frto_cwnd = false; | 3596 | bool frto_cwnd = false; |
3595 | 3597 | ||
3596 | /* If the ack is older than previous acks | 3598 | /* If the ack is older than previous acks |
@@ -3666,8 +3668,6 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag) | |||
3666 | flag |= tcp_clean_rtx_queue(sk, prior_fackets, prior_snd_una); | 3668 | flag |= tcp_clean_rtx_queue(sk, prior_fackets, prior_snd_una); |
3667 | 3669 | ||
3668 | pkts_acked = prior_packets - tp->packets_out; | 3670 | pkts_acked = prior_packets - tp->packets_out; |
3669 | newly_acked_sacked = (prior_packets - prior_sacked) - | ||
3670 | (tp->packets_out - tp->sacked_out); | ||
3671 | 3671 | ||
3672 | if (tp->frto_counter) | 3672 | if (tp->frto_counter) |
3673 | frto_cwnd = tcp_process_frto(sk, flag); | 3673 | frto_cwnd = tcp_process_frto(sk, flag); |
@@ -3681,7 +3681,7 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag) | |||
3681 | tcp_may_raise_cwnd(sk, flag)) | 3681 | tcp_may_raise_cwnd(sk, flag)) |
3682 | tcp_cong_avoid(sk, ack, prior_in_flight); | 3682 | tcp_cong_avoid(sk, ack, prior_in_flight); |
3683 | is_dupack = !(flag & (FLAG_SND_UNA_ADVANCED | FLAG_NOT_DUP)); | 3683 | is_dupack = !(flag & (FLAG_SND_UNA_ADVANCED | FLAG_NOT_DUP)); |
3684 | tcp_fastretrans_alert(sk, pkts_acked, newly_acked_sacked, | 3684 | tcp_fastretrans_alert(sk, pkts_acked, prior_sacked, |
3685 | is_dupack, flag); | 3685 | is_dupack, flag); |
3686 | } else { | 3686 | } else { |
3687 | if ((flag & FLAG_DATA_ACKED) && !frto_cwnd) | 3687 | if ((flag & FLAG_DATA_ACKED) && !frto_cwnd) |
@@ -3698,7 +3698,7 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag) | |||
3698 | no_queue: | 3698 | no_queue: |
3699 | /* If data was DSACKed, see if we can undo a cwnd reduction. */ | 3699 | /* If data was DSACKed, see if we can undo a cwnd reduction. */ |
3700 | if (flag & FLAG_DSACKING_ACK) | 3700 | if (flag & FLAG_DSACKING_ACK) |
3701 | tcp_fastretrans_alert(sk, pkts_acked, newly_acked_sacked, | 3701 | tcp_fastretrans_alert(sk, pkts_acked, prior_sacked, |
3702 | is_dupack, flag); | 3702 | is_dupack, flag); |
3703 | /* If this ack opens up a zero window, clear backoff. It was | 3703 | /* If this ack opens up a zero window, clear backoff. It was |
3704 | * being used to time the probes, and is probably far higher than | 3704 | * being used to time the probes, and is probably far higher than |
@@ -3718,8 +3718,7 @@ old_ack: | |||
3718 | */ | 3718 | */ |
3719 | if (TCP_SKB_CB(skb)->sacked) { | 3719 | if (TCP_SKB_CB(skb)->sacked) { |
3720 | flag |= tcp_sacktag_write_queue(sk, skb, prior_snd_una); | 3720 | flag |= tcp_sacktag_write_queue(sk, skb, prior_snd_una); |
3721 | newly_acked_sacked = tp->sacked_out - prior_sacked; | 3721 | tcp_fastretrans_alert(sk, pkts_acked, prior_sacked, |
3722 | tcp_fastretrans_alert(sk, pkts_acked, newly_acked_sacked, | ||
3723 | is_dupack, flag); | 3722 | is_dupack, flag); |
3724 | } | 3723 | } |
3725 | 3724 | ||
diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c index 6dc7fd353ef5..282f3723ee19 100644 --- a/net/ipv6/esp6.c +++ b/net/ipv6/esp6.c | |||
@@ -167,8 +167,6 @@ static int esp6_output(struct xfrm_state *x, struct sk_buff *skb) | |||
167 | struct esp_data *esp = x->data; | 167 | struct esp_data *esp = x->data; |
168 | 168 | ||
169 | /* skb is pure payload to encrypt */ | 169 | /* skb is pure payload to encrypt */ |
170 | err = -ENOMEM; | ||
171 | |||
172 | aead = esp->aead; | 170 | aead = esp->aead; |
173 | alen = crypto_aead_authsize(aead); | 171 | alen = crypto_aead_authsize(aead); |
174 | 172 | ||
@@ -203,8 +201,10 @@ static int esp6_output(struct xfrm_state *x, struct sk_buff *skb) | |||
203 | } | 201 | } |
204 | 202 | ||
205 | tmp = esp_alloc_tmp(aead, nfrags + sglists, seqhilen); | 203 | tmp = esp_alloc_tmp(aead, nfrags + sglists, seqhilen); |
206 | if (!tmp) | 204 | if (!tmp) { |
205 | err = -ENOMEM; | ||
207 | goto error; | 206 | goto error; |
207 | } | ||
208 | 208 | ||
209 | seqhi = esp_tmp_seqhi(tmp); | 209 | seqhi = esp_tmp_seqhi(tmp); |
210 | iv = esp_tmp_iv(aead, tmp, seqhilen); | 210 | iv = esp_tmp_iv(aead, tmp, seqhilen); |
diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c index 393355d37b47..513cab08a986 100644 --- a/net/l2tp/l2tp_core.c +++ b/net/l2tp/l2tp_core.c | |||
@@ -1347,11 +1347,10 @@ static void l2tp_tunnel_free(struct l2tp_tunnel *tunnel) | |||
1347 | /* Remove from tunnel list */ | 1347 | /* Remove from tunnel list */ |
1348 | spin_lock_bh(&pn->l2tp_tunnel_list_lock); | 1348 | spin_lock_bh(&pn->l2tp_tunnel_list_lock); |
1349 | list_del_rcu(&tunnel->list); | 1349 | list_del_rcu(&tunnel->list); |
1350 | kfree_rcu(tunnel, rcu); | ||
1350 | spin_unlock_bh(&pn->l2tp_tunnel_list_lock); | 1351 | spin_unlock_bh(&pn->l2tp_tunnel_list_lock); |
1351 | synchronize_rcu(); | ||
1352 | 1352 | ||
1353 | atomic_dec(&l2tp_tunnel_count); | 1353 | atomic_dec(&l2tp_tunnel_count); |
1354 | kfree(tunnel); | ||
1355 | } | 1354 | } |
1356 | 1355 | ||
1357 | /* Create a socket for the tunnel, if one isn't set up by | 1356 | /* Create a socket for the tunnel, if one isn't set up by |
diff --git a/net/l2tp/l2tp_core.h b/net/l2tp/l2tp_core.h index a38ec6cdeee1..56d583e083a7 100644 --- a/net/l2tp/l2tp_core.h +++ b/net/l2tp/l2tp_core.h | |||
@@ -163,6 +163,7 @@ struct l2tp_tunnel_cfg { | |||
163 | 163 | ||
164 | struct l2tp_tunnel { | 164 | struct l2tp_tunnel { |
165 | int magic; /* Should be L2TP_TUNNEL_MAGIC */ | 165 | int magic; /* Should be L2TP_TUNNEL_MAGIC */ |
166 | struct rcu_head rcu; | ||
166 | rwlock_t hlist_lock; /* protect session_hlist */ | 167 | rwlock_t hlist_lock; /* protect session_hlist */ |
167 | struct hlist_head session_hlist[L2TP_HASH_SIZE]; | 168 | struct hlist_head session_hlist[L2TP_HASH_SIZE]; |
168 | /* hashed list of sessions, | 169 | /* hashed list of sessions, |
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c index acf712ffb5e6..c5e8c9c31f76 100644 --- a/net/mac80211/tx.c +++ b/net/mac80211/tx.c | |||
@@ -1811,37 +1811,31 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb, | |||
1811 | meshhdrlen = ieee80211_new_mesh_header(&mesh_hdr, | 1811 | meshhdrlen = ieee80211_new_mesh_header(&mesh_hdr, |
1812 | sdata, NULL, NULL); | 1812 | sdata, NULL, NULL); |
1813 | } else { | 1813 | } else { |
1814 | int is_mesh_mcast = 1; | 1814 | /* DS -> MBSS (802.11-2012 13.11.3.3). |
1815 | const u8 *mesh_da; | 1815 | * For unicast with unknown forwarding information, |
1816 | * destination might be in the MBSS or if that fails | ||
1817 | * forwarded to another mesh gate. In either case | ||
1818 | * resolution will be handled in ieee80211_xmit(), so | ||
1819 | * leave the original DA. This also works for mcast */ | ||
1820 | const u8 *mesh_da = skb->data; | ||
1821 | |||
1822 | if (mppath) | ||
1823 | mesh_da = mppath->mpp; | ||
1824 | else if (mpath) | ||
1825 | mesh_da = mpath->dst; | ||
1826 | rcu_read_unlock(); | ||
1816 | 1827 | ||
1817 | if (is_multicast_ether_addr(skb->data)) | ||
1818 | /* DA TA mSA AE:SA */ | ||
1819 | mesh_da = skb->data; | ||
1820 | else { | ||
1821 | static const u8 bcast[ETH_ALEN] = | ||
1822 | { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; | ||
1823 | if (mppath) { | ||
1824 | /* RA TA mDA mSA AE:DA SA */ | ||
1825 | mesh_da = mppath->mpp; | ||
1826 | is_mesh_mcast = 0; | ||
1827 | } else if (mpath) { | ||
1828 | mesh_da = mpath->dst; | ||
1829 | is_mesh_mcast = 0; | ||
1830 | } else { | ||
1831 | /* DA TA mSA AE:SA */ | ||
1832 | mesh_da = bcast; | ||
1833 | } | ||
1834 | } | ||
1835 | hdrlen = ieee80211_fill_mesh_addresses(&hdr, &fc, | 1828 | hdrlen = ieee80211_fill_mesh_addresses(&hdr, &fc, |
1836 | mesh_da, sdata->vif.addr); | 1829 | mesh_da, sdata->vif.addr); |
1837 | rcu_read_unlock(); | 1830 | if (is_multicast_ether_addr(mesh_da)) |
1838 | if (is_mesh_mcast) | 1831 | /* DA TA mSA AE:SA */ |
1839 | meshhdrlen = | 1832 | meshhdrlen = |
1840 | ieee80211_new_mesh_header(&mesh_hdr, | 1833 | ieee80211_new_mesh_header(&mesh_hdr, |
1841 | sdata, | 1834 | sdata, |
1842 | skb->data + ETH_ALEN, | 1835 | skb->data + ETH_ALEN, |
1843 | NULL); | 1836 | NULL); |
1844 | else | 1837 | else |
1838 | /* RA TA mDA mSA AE:DA SA */ | ||
1845 | meshhdrlen = | 1839 | meshhdrlen = |
1846 | ieee80211_new_mesh_header(&mesh_hdr, | 1840 | ieee80211_new_mesh_header(&mesh_hdr, |
1847 | sdata, | 1841 | sdata, |
diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c index 72bf32a84874..f51013c07b9f 100644 --- a/net/netfilter/ipvs/ip_vs_ctl.c +++ b/net/netfilter/ipvs/ip_vs_ctl.c | |||
@@ -1171,8 +1171,10 @@ ip_vs_add_service(struct net *net, struct ip_vs_service_user_kern *u, | |||
1171 | goto out_err; | 1171 | goto out_err; |
1172 | } | 1172 | } |
1173 | svc->stats.cpustats = alloc_percpu(struct ip_vs_cpu_stats); | 1173 | svc->stats.cpustats = alloc_percpu(struct ip_vs_cpu_stats); |
1174 | if (!svc->stats.cpustats) | 1174 | if (!svc->stats.cpustats) { |
1175 | ret = -ENOMEM; | ||
1175 | goto out_err; | 1176 | goto out_err; |
1177 | } | ||
1176 | 1178 | ||
1177 | /* I'm the first user of the service */ | 1179 | /* I'm the first user of the service */ |
1178 | atomic_set(&svc->usecnt, 0); | 1180 | atomic_set(&svc->usecnt, 0); |
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c index cf4875565d67..2ceec64b19f9 100644 --- a/net/netfilter/nf_conntrack_core.c +++ b/net/netfilter/nf_conntrack_core.c | |||
@@ -249,12 +249,15 @@ static void death_by_event(unsigned long ul_conntrack) | |||
249 | { | 249 | { |
250 | struct nf_conn *ct = (void *)ul_conntrack; | 250 | struct nf_conn *ct = (void *)ul_conntrack; |
251 | struct net *net = nf_ct_net(ct); | 251 | struct net *net = nf_ct_net(ct); |
252 | struct nf_conntrack_ecache *ecache = nf_ct_ecache_find(ct); | ||
253 | |||
254 | BUG_ON(ecache == NULL); | ||
252 | 255 | ||
253 | if (nf_conntrack_event(IPCT_DESTROY, ct) < 0) { | 256 | if (nf_conntrack_event(IPCT_DESTROY, ct) < 0) { |
254 | /* bad luck, let's retry again */ | 257 | /* bad luck, let's retry again */ |
255 | ct->timeout.expires = jiffies + | 258 | ecache->timeout.expires = jiffies + |
256 | (random32() % net->ct.sysctl_events_retry_timeout); | 259 | (random32() % net->ct.sysctl_events_retry_timeout); |
257 | add_timer(&ct->timeout); | 260 | add_timer(&ecache->timeout); |
258 | return; | 261 | return; |
259 | } | 262 | } |
260 | /* we've got the event delivered, now it's dying */ | 263 | /* we've got the event delivered, now it's dying */ |
@@ -268,6 +271,9 @@ static void death_by_event(unsigned long ul_conntrack) | |||
268 | void nf_ct_insert_dying_list(struct nf_conn *ct) | 271 | void nf_ct_insert_dying_list(struct nf_conn *ct) |
269 | { | 272 | { |
270 | struct net *net = nf_ct_net(ct); | 273 | struct net *net = nf_ct_net(ct); |
274 | struct nf_conntrack_ecache *ecache = nf_ct_ecache_find(ct); | ||
275 | |||
276 | BUG_ON(ecache == NULL); | ||
271 | 277 | ||
272 | /* add this conntrack to the dying list */ | 278 | /* add this conntrack to the dying list */ |
273 | spin_lock_bh(&nf_conntrack_lock); | 279 | spin_lock_bh(&nf_conntrack_lock); |
@@ -275,10 +281,10 @@ void nf_ct_insert_dying_list(struct nf_conn *ct) | |||
275 | &net->ct.dying); | 281 | &net->ct.dying); |
276 | spin_unlock_bh(&nf_conntrack_lock); | 282 | spin_unlock_bh(&nf_conntrack_lock); |
277 | /* set a new timer to retry event delivery */ | 283 | /* set a new timer to retry event delivery */ |
278 | setup_timer(&ct->timeout, death_by_event, (unsigned long)ct); | 284 | setup_timer(&ecache->timeout, death_by_event, (unsigned long)ct); |
279 | ct->timeout.expires = jiffies + | 285 | ecache->timeout.expires = jiffies + |
280 | (random32() % net->ct.sysctl_events_retry_timeout); | 286 | (random32() % net->ct.sysctl_events_retry_timeout); |
281 | add_timer(&ct->timeout); | 287 | add_timer(&ecache->timeout); |
282 | } | 288 | } |
283 | EXPORT_SYMBOL_GPL(nf_ct_insert_dying_list); | 289 | EXPORT_SYMBOL_GPL(nf_ct_insert_dying_list); |
284 | 290 | ||
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c index da4fc37a8578..9807f3278fcb 100644 --- a/net/netfilter/nf_conntrack_netlink.c +++ b/net/netfilter/nf_conntrack_netlink.c | |||
@@ -2790,7 +2790,8 @@ static int __init ctnetlink_init(void) | |||
2790 | goto err_unreg_subsys; | 2790 | goto err_unreg_subsys; |
2791 | } | 2791 | } |
2792 | 2792 | ||
2793 | if (register_pernet_subsys(&ctnetlink_net_ops)) { | 2793 | ret = register_pernet_subsys(&ctnetlink_net_ops); |
2794 | if (ret < 0) { | ||
2794 | pr_err("ctnetlink_init: cannot register pernet operations\n"); | 2795 | pr_err("ctnetlink_init: cannot register pernet operations\n"); |
2795 | goto err_unreg_exp_subsys; | 2796 | goto err_unreg_exp_subsys; |
2796 | } | 2797 | } |
diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c index 169ab59ed9d4..14e2f3903142 100644 --- a/net/netfilter/nfnetlink_log.c +++ b/net/netfilter/nfnetlink_log.c | |||
@@ -480,7 +480,7 @@ __build_packet_message(struct nfulnl_instance *inst, | |||
480 | } | 480 | } |
481 | 481 | ||
482 | if (indev && skb_mac_header_was_set(skb)) { | 482 | if (indev && skb_mac_header_was_set(skb)) { |
483 | if (nla_put_be32(inst->skb, NFULA_HWTYPE, htons(skb->dev->type)) || | 483 | if (nla_put_be16(inst->skb, NFULA_HWTYPE, htons(skb->dev->type)) || |
484 | nla_put_be16(inst->skb, NFULA_HWLEN, | 484 | nla_put_be16(inst->skb, NFULA_HWLEN, |
485 | htons(skb->dev->hard_header_len)) || | 485 | htons(skb->dev->hard_header_len)) || |
486 | nla_put(inst->skb, NFULA_HWHEADER, skb->dev->hard_header_len, | 486 | nla_put(inst->skb, NFULA_HWHEADER, skb->dev->hard_header_len, |
@@ -996,8 +996,10 @@ static int __init nfnetlink_log_init(void) | |||
996 | 996 | ||
997 | #ifdef CONFIG_PROC_FS | 997 | #ifdef CONFIG_PROC_FS |
998 | if (!proc_create("nfnetlink_log", 0440, | 998 | if (!proc_create("nfnetlink_log", 0440, |
999 | proc_net_netfilter, &nful_file_ops)) | 999 | proc_net_netfilter, &nful_file_ops)) { |
1000 | status = -ENOMEM; | ||
1000 | goto cleanup_logger; | 1001 | goto cleanup_logger; |
1002 | } | ||
1001 | #endif | 1003 | #endif |
1002 | return status; | 1004 | return status; |
1003 | 1005 | ||
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c index 1445d73533ed..527023823b5c 100644 --- a/net/netlink/af_netlink.c +++ b/net/netlink/af_netlink.c | |||
@@ -1373,7 +1373,8 @@ static int netlink_sendmsg(struct kiocb *kiocb, struct socket *sock, | |||
1373 | dst_pid = addr->nl_pid; | 1373 | dst_pid = addr->nl_pid; |
1374 | dst_group = ffs(addr->nl_groups); | 1374 | dst_group = ffs(addr->nl_groups); |
1375 | err = -EPERM; | 1375 | err = -EPERM; |
1376 | if (dst_group && !netlink_capable(sock, NL_NONROOT_SEND)) | 1376 | if ((dst_group || dst_pid) && |
1377 | !netlink_capable(sock, NL_NONROOT_SEND)) | ||
1377 | goto out; | 1378 | goto out; |
1378 | } else { | 1379 | } else { |
1379 | dst_pid = nlk->dst_pid; | 1380 | dst_pid = nlk->dst_pid; |
@@ -2147,6 +2148,7 @@ static void __init netlink_add_usersock_entry(void) | |||
2147 | rcu_assign_pointer(nl_table[NETLINK_USERSOCK].listeners, listeners); | 2148 | rcu_assign_pointer(nl_table[NETLINK_USERSOCK].listeners, listeners); |
2148 | nl_table[NETLINK_USERSOCK].module = THIS_MODULE; | 2149 | nl_table[NETLINK_USERSOCK].module = THIS_MODULE; |
2149 | nl_table[NETLINK_USERSOCK].registered = 1; | 2150 | nl_table[NETLINK_USERSOCK].registered = 1; |
2151 | nl_table[NETLINK_USERSOCK].nl_nonroot = NL_NONROOT_SEND; | ||
2150 | 2152 | ||
2151 | netlink_table_ungrab(); | 2153 | netlink_table_ungrab(); |
2152 | } | 2154 | } |
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index aee7196aac36..c5c9e2a54218 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c | |||
@@ -1273,7 +1273,7 @@ static void __fanout_unlink(struct sock *sk, struct packet_sock *po) | |||
1273 | spin_unlock(&f->lock); | 1273 | spin_unlock(&f->lock); |
1274 | } | 1274 | } |
1275 | 1275 | ||
1276 | bool match_fanout_group(struct packet_type *ptype, struct sock * sk) | 1276 | static bool match_fanout_group(struct packet_type *ptype, struct sock * sk) |
1277 | { | 1277 | { |
1278 | if (ptype->af_packet_priv == (void*)((struct packet_sock *)sk)->fanout) | 1278 | if (ptype->af_packet_priv == (void*)((struct packet_sock *)sk)->fanout) |
1279 | return true; | 1279 | return true; |
diff --git a/net/socket.c b/net/socket.c index a5471f804d99..edc3c4af9085 100644 --- a/net/socket.c +++ b/net/socket.c | |||
@@ -2604,7 +2604,7 @@ static int do_siocgstamp(struct net *net, struct socket *sock, | |||
2604 | err = sock_do_ioctl(net, sock, cmd, (unsigned long)&ktv); | 2604 | err = sock_do_ioctl(net, sock, cmd, (unsigned long)&ktv); |
2605 | set_fs(old_fs); | 2605 | set_fs(old_fs); |
2606 | if (!err) | 2606 | if (!err) |
2607 | err = compat_put_timeval(up, &ktv); | 2607 | err = compat_put_timeval(&ktv, up); |
2608 | 2608 | ||
2609 | return err; | 2609 | return err; |
2610 | } | 2610 | } |
@@ -2620,7 +2620,7 @@ static int do_siocgstampns(struct net *net, struct socket *sock, | |||
2620 | err = sock_do_ioctl(net, sock, cmd, (unsigned long)&kts); | 2620 | err = sock_do_ioctl(net, sock, cmd, (unsigned long)&kts); |
2621 | set_fs(old_fs); | 2621 | set_fs(old_fs); |
2622 | if (!err) | 2622 | if (!err) |
2623 | err = compat_put_timespec(up, &kts); | 2623 | err = compat_put_timespec(&kts, up); |
2624 | 2624 | ||
2625 | return err; | 2625 | return err; |
2626 | } | 2626 | } |
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c index 87cd0e4d4282..210be48d8ae3 100644 --- a/net/xfrm/xfrm_state.c +++ b/net/xfrm/xfrm_state.c | |||
@@ -1994,8 +1994,10 @@ int __xfrm_init_state(struct xfrm_state *x, bool init_replay) | |||
1994 | goto error; | 1994 | goto error; |
1995 | 1995 | ||
1996 | x->outer_mode = xfrm_get_mode(x->props.mode, family); | 1996 | x->outer_mode = xfrm_get_mode(x->props.mode, family); |
1997 | if (x->outer_mode == NULL) | 1997 | if (x->outer_mode == NULL) { |
1998 | err = -EPROTONOSUPPORT; | ||
1998 | goto error; | 1999 | goto error; |
2000 | } | ||
1999 | 2001 | ||
2000 | if (init_replay) { | 2002 | if (init_replay) { |
2001 | err = xfrm_init_replay(x); | 2003 | err = xfrm_init_replay(x); |
diff --git a/scripts/Makefile.fwinst b/scripts/Makefile.fwinst index 6bf8e87f1dcf..c3f69ae275d1 100644 --- a/scripts/Makefile.fwinst +++ b/scripts/Makefile.fwinst | |||
@@ -42,7 +42,7 @@ quiet_cmd_install = INSTALL $(subst $(srctree)/,,$@) | |||
42 | $(installed-fw-dirs): | 42 | $(installed-fw-dirs): |
43 | $(call cmd,mkdir) | 43 | $(call cmd,mkdir) |
44 | 44 | ||
45 | $(installed-fw): $(INSTALL_FW_PATH)/%: $(obj)/% | $(INSTALL_FW_PATH)/$$(dir %) | 45 | $(installed-fw): $(INSTALL_FW_PATH)/%: $(obj)/% | $$(dir $(INSTALL_FW_PATH)/%) |
46 | $(call cmd,install) | 46 | $(call cmd,install) |
47 | 47 | ||
48 | PHONY += __fw_install __fw_modinst FORCE | 48 | PHONY += __fw_install __fw_modinst FORCE |
diff --git a/scripts/link-vmlinux.sh b/scripts/link-vmlinux.sh index 4629038c9e5a..4235a6361fec 100644 --- a/scripts/link-vmlinux.sh +++ b/scripts/link-vmlinux.sh | |||
@@ -211,7 +211,7 @@ if [ -n "${CONFIG_KALLSYMS}" ]; then | |||
211 | 211 | ||
212 | if ! cmp -s System.map .tmp_System.map; then | 212 | if ! cmp -s System.map .tmp_System.map; then |
213 | echo >&2 Inconsistent kallsyms data | 213 | echo >&2 Inconsistent kallsyms data |
214 | echo >&2 echo Try "make KALLSYMS_EXTRA_PASS=1" as a workaround | 214 | echo >&2 Try "make KALLSYMS_EXTRA_PASS=1" as a workaround |
215 | cleanup | 215 | cleanup |
216 | exit 1 | 216 | exit 1 |
217 | fi | 217 | fi |
diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c index f560051a949e..f25c24c743f9 100644 --- a/sound/pci/hda/hda_codec.c +++ b/sound/pci/hda/hda_codec.c | |||
@@ -1209,6 +1209,9 @@ static void snd_hda_codec_free(struct hda_codec *codec) | |||
1209 | kfree(codec); | 1209 | kfree(codec); |
1210 | } | 1210 | } |
1211 | 1211 | ||
1212 | static bool snd_hda_codec_get_supported_ps(struct hda_codec *codec, | ||
1213 | hda_nid_t fg, unsigned int power_state); | ||
1214 | |||
1212 | static void hda_set_power_state(struct hda_codec *codec, hda_nid_t fg, | 1215 | static void hda_set_power_state(struct hda_codec *codec, hda_nid_t fg, |
1213 | unsigned int power_state); | 1216 | unsigned int power_state); |
1214 | 1217 | ||
@@ -1317,6 +1320,10 @@ int /*__devinit*/ snd_hda_codec_new(struct hda_bus *bus, | |||
1317 | AC_VERB_GET_SUBSYSTEM_ID, 0); | 1320 | AC_VERB_GET_SUBSYSTEM_ID, 0); |
1318 | } | 1321 | } |
1319 | 1322 | ||
1323 | codec->epss = snd_hda_codec_get_supported_ps(codec, | ||
1324 | codec->afg ? codec->afg : codec->mfg, | ||
1325 | AC_PWRST_EPSS); | ||
1326 | |||
1320 | /* power-up all before initialization */ | 1327 | /* power-up all before initialization */ |
1321 | hda_set_power_state(codec, | 1328 | hda_set_power_state(codec, |
1322 | codec->afg ? codec->afg : codec->mfg, | 1329 | codec->afg ? codec->afg : codec->mfg, |
@@ -3543,8 +3550,7 @@ static void hda_set_power_state(struct hda_codec *codec, hda_nid_t fg, | |||
3543 | /* this delay seems necessary to avoid click noise at power-down */ | 3550 | /* this delay seems necessary to avoid click noise at power-down */ |
3544 | if (power_state == AC_PWRST_D3) { | 3551 | if (power_state == AC_PWRST_D3) { |
3545 | /* transition time less than 10ms for power down */ | 3552 | /* transition time less than 10ms for power down */ |
3546 | bool epss = snd_hda_codec_get_supported_ps(codec, fg, AC_PWRST_EPSS); | 3553 | msleep(codec->epss ? 10 : 100); |
3547 | msleep(epss ? 10 : 100); | ||
3548 | } | 3554 | } |
3549 | 3555 | ||
3550 | /* repeat power states setting at most 10 times*/ | 3556 | /* repeat power states setting at most 10 times*/ |
diff --git a/sound/pci/hda/hda_codec.h b/sound/pci/hda/hda_codec.h index 7fbc1bcaf1a9..e5a7e19a8071 100644 --- a/sound/pci/hda/hda_codec.h +++ b/sound/pci/hda/hda_codec.h | |||
@@ -862,6 +862,7 @@ struct hda_codec { | |||
862 | unsigned int ignore_misc_bit:1; /* ignore MISC_NO_PRESENCE bit */ | 862 | unsigned int ignore_misc_bit:1; /* ignore MISC_NO_PRESENCE bit */ |
863 | unsigned int no_jack_detect:1; /* Machine has no jack-detection */ | 863 | unsigned int no_jack_detect:1; /* Machine has no jack-detection */ |
864 | unsigned int pcm_format_first:1; /* PCM format must be set first */ | 864 | unsigned int pcm_format_first:1; /* PCM format must be set first */ |
865 | unsigned int epss:1; /* supporting EPSS? */ | ||
865 | #ifdef CONFIG_SND_HDA_POWER_SAVE | 866 | #ifdef CONFIG_SND_HDA_POWER_SAVE |
866 | unsigned int power_on :1; /* current (global) power-state */ | 867 | unsigned int power_on :1; /* current (global) power-state */ |
867 | int power_transition; /* power-state in transition */ | 868 | int power_transition; /* power-state in transition */ |
diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c index ea5775a1a7db..6f806d3e56bb 100644 --- a/sound/pci/hda/patch_sigmatel.c +++ b/sound/pci/hda/patch_sigmatel.c | |||
@@ -4543,6 +4543,9 @@ static void stac92xx_line_out_detect(struct hda_codec *codec, | |||
4543 | struct auto_pin_cfg *cfg = &spec->autocfg; | 4543 | struct auto_pin_cfg *cfg = &spec->autocfg; |
4544 | int i; | 4544 | int i; |
4545 | 4545 | ||
4546 | if (cfg->speaker_outs == 0) | ||
4547 | return; | ||
4548 | |||
4546 | for (i = 0; i < cfg->line_outs; i++) { | 4549 | for (i = 0; i < cfg->line_outs; i++) { |
4547 | if (presence) | 4550 | if (presence) |
4548 | break; | 4551 | break; |
@@ -5531,6 +5534,7 @@ static int patch_stac92hd83xxx(struct hda_codec *codec) | |||
5531 | snd_hda_codec_set_pincfg(codec, 0xf, 0x2181205e); | 5534 | snd_hda_codec_set_pincfg(codec, 0xf, 0x2181205e); |
5532 | } | 5535 | } |
5533 | 5536 | ||
5537 | codec->epss = 0; /* longer delay needed for D3 */ | ||
5534 | codec->no_trigger_sense = 1; | 5538 | codec->no_trigger_sense = 1; |
5535 | codec->spec = spec; | 5539 | codec->spec = spec; |
5536 | 5540 | ||
diff --git a/sound/usb/card.c b/sound/usb/card.c index d5b5c3388e28..4a469f0cb6d4 100644 --- a/sound/usb/card.c +++ b/sound/usb/card.c | |||
@@ -553,7 +553,7 @@ static void snd_usb_audio_disconnect(struct usb_device *dev, | |||
553 | struct snd_usb_audio *chip) | 553 | struct snd_usb_audio *chip) |
554 | { | 554 | { |
555 | struct snd_card *card; | 555 | struct snd_card *card; |
556 | struct list_head *p; | 556 | struct list_head *p, *n; |
557 | 557 | ||
558 | if (chip == (void *)-1L) | 558 | if (chip == (void *)-1L) |
559 | return; | 559 | return; |
@@ -570,7 +570,7 @@ static void snd_usb_audio_disconnect(struct usb_device *dev, | |||
570 | snd_usb_stream_disconnect(p); | 570 | snd_usb_stream_disconnect(p); |
571 | } | 571 | } |
572 | /* release the endpoint resources */ | 572 | /* release the endpoint resources */ |
573 | list_for_each(p, &chip->ep_list) { | 573 | list_for_each_safe(p, n, &chip->ep_list) { |
574 | snd_usb_endpoint_free(p); | 574 | snd_usb_endpoint_free(p); |
575 | } | 575 | } |
576 | /* release the midi resources */ | 576 | /* release the midi resources */ |
diff --git a/sound/usb/endpoint.c b/sound/usb/endpoint.c index c41181202688..d6e2bb49c59c 100644 --- a/sound/usb/endpoint.c +++ b/sound/usb/endpoint.c | |||
@@ -141,7 +141,7 @@ int snd_usb_endpoint_implict_feedback_sink(struct snd_usb_endpoint *ep) | |||
141 | * | 141 | * |
142 | * For implicit feedback, next_packet_size() is unused. | 142 | * For implicit feedback, next_packet_size() is unused. |
143 | */ | 143 | */ |
144 | static int next_packet_size(struct snd_usb_endpoint *ep) | 144 | int snd_usb_endpoint_next_packet_size(struct snd_usb_endpoint *ep) |
145 | { | 145 | { |
146 | unsigned long flags; | 146 | unsigned long flags; |
147 | int ret; | 147 | int ret; |
@@ -177,15 +177,6 @@ static void retire_inbound_urb(struct snd_usb_endpoint *ep, | |||
177 | ep->retire_data_urb(ep->data_subs, urb); | 177 | ep->retire_data_urb(ep->data_subs, urb); |
178 | } | 178 | } |
179 | 179 | ||
180 | static void prepare_outbound_urb_sizes(struct snd_usb_endpoint *ep, | ||
181 | struct snd_urb_ctx *ctx) | ||
182 | { | ||
183 | int i; | ||
184 | |||
185 | for (i = 0; i < ctx->packets; ++i) | ||
186 | ctx->packet_size[i] = next_packet_size(ep); | ||
187 | } | ||
188 | |||
189 | /* | 180 | /* |
190 | * Prepare a PLAYBACK urb for submission to the bus. | 181 | * Prepare a PLAYBACK urb for submission to the bus. |
191 | */ | 182 | */ |
@@ -370,7 +361,6 @@ static void snd_complete_urb(struct urb *urb) | |||
370 | goto exit_clear; | 361 | goto exit_clear; |
371 | } | 362 | } |
372 | 363 | ||
373 | prepare_outbound_urb_sizes(ep, ctx); | ||
374 | prepare_outbound_urb(ep, ctx); | 364 | prepare_outbound_urb(ep, ctx); |
375 | } else { | 365 | } else { |
376 | retire_inbound_urb(ep, ctx); | 366 | retire_inbound_urb(ep, ctx); |
@@ -799,7 +789,9 @@ int snd_usb_endpoint_set_params(struct snd_usb_endpoint *ep, | |||
799 | /** | 789 | /** |
800 | * snd_usb_endpoint_start: start an snd_usb_endpoint | 790 | * snd_usb_endpoint_start: start an snd_usb_endpoint |
801 | * | 791 | * |
802 | * @ep: the endpoint to start | 792 | * @ep: the endpoint to start |
793 | * @can_sleep: flag indicating whether the operation is executed in | ||
794 | * non-atomic context | ||
803 | * | 795 | * |
804 | * A call to this function will increment the use count of the endpoint. | 796 | * A call to this function will increment the use count of the endpoint. |
805 | * In case it is not already running, the URBs for this endpoint will be | 797 | * In case it is not already running, the URBs for this endpoint will be |
@@ -809,7 +801,7 @@ int snd_usb_endpoint_set_params(struct snd_usb_endpoint *ep, | |||
809 | * | 801 | * |
810 | * Returns an error if the URB submission failed, 0 in all other cases. | 802 | * Returns an error if the URB submission failed, 0 in all other cases. |
811 | */ | 803 | */ |
812 | int snd_usb_endpoint_start(struct snd_usb_endpoint *ep) | 804 | int snd_usb_endpoint_start(struct snd_usb_endpoint *ep, int can_sleep) |
813 | { | 805 | { |
814 | int err; | 806 | int err; |
815 | unsigned int i; | 807 | unsigned int i; |
@@ -821,6 +813,11 @@ int snd_usb_endpoint_start(struct snd_usb_endpoint *ep) | |||
821 | if (++ep->use_count != 1) | 813 | if (++ep->use_count != 1) |
822 | return 0; | 814 | return 0; |
823 | 815 | ||
816 | /* just to be sure */ | ||
817 | deactivate_urbs(ep, 0, can_sleep); | ||
818 | if (can_sleep) | ||
819 | wait_clear_urbs(ep); | ||
820 | |||
824 | ep->active_mask = 0; | 821 | ep->active_mask = 0; |
825 | ep->unlink_mask = 0; | 822 | ep->unlink_mask = 0; |
826 | ep->phase = 0; | 823 | ep->phase = 0; |
@@ -850,7 +847,6 @@ int snd_usb_endpoint_start(struct snd_usb_endpoint *ep) | |||
850 | goto __error; | 847 | goto __error; |
851 | 848 | ||
852 | if (usb_pipeout(ep->pipe)) { | 849 | if (usb_pipeout(ep->pipe)) { |
853 | prepare_outbound_urb_sizes(ep, urb->context); | ||
854 | prepare_outbound_urb(ep, urb->context); | 850 | prepare_outbound_urb(ep, urb->context); |
855 | } else { | 851 | } else { |
856 | prepare_inbound_urb(ep, urb->context); | 852 | prepare_inbound_urb(ep, urb->context); |
diff --git a/sound/usb/endpoint.h b/sound/usb/endpoint.h index ee2723fb174f..cbbbdf226d66 100644 --- a/sound/usb/endpoint.h +++ b/sound/usb/endpoint.h | |||
@@ -13,7 +13,7 @@ int snd_usb_endpoint_set_params(struct snd_usb_endpoint *ep, | |||
13 | struct audioformat *fmt, | 13 | struct audioformat *fmt, |
14 | struct snd_usb_endpoint *sync_ep); | 14 | struct snd_usb_endpoint *sync_ep); |
15 | 15 | ||
16 | int snd_usb_endpoint_start(struct snd_usb_endpoint *ep); | 16 | int snd_usb_endpoint_start(struct snd_usb_endpoint *ep, int can_sleep); |
17 | void snd_usb_endpoint_stop(struct snd_usb_endpoint *ep, | 17 | void snd_usb_endpoint_stop(struct snd_usb_endpoint *ep, |
18 | int force, int can_sleep, int wait); | 18 | int force, int can_sleep, int wait); |
19 | int snd_usb_endpoint_activate(struct snd_usb_endpoint *ep); | 19 | int snd_usb_endpoint_activate(struct snd_usb_endpoint *ep); |
@@ -21,6 +21,7 @@ int snd_usb_endpoint_deactivate(struct snd_usb_endpoint *ep); | |||
21 | void snd_usb_endpoint_free(struct list_head *head); | 21 | void snd_usb_endpoint_free(struct list_head *head); |
22 | 22 | ||
23 | int snd_usb_endpoint_implict_feedback_sink(struct snd_usb_endpoint *ep); | 23 | int snd_usb_endpoint_implict_feedback_sink(struct snd_usb_endpoint *ep); |
24 | int snd_usb_endpoint_next_packet_size(struct snd_usb_endpoint *ep); | ||
24 | 25 | ||
25 | void snd_usb_handle_sync_urb(struct snd_usb_endpoint *ep, | 26 | void snd_usb_handle_sync_urb(struct snd_usb_endpoint *ep, |
26 | struct snd_usb_endpoint *sender, | 27 | struct snd_usb_endpoint *sender, |
diff --git a/sound/usb/pcm.c b/sound/usb/pcm.c index 62ec808ed792..fd5e982fc98c 100644 --- a/sound/usb/pcm.c +++ b/sound/usb/pcm.c | |||
@@ -212,7 +212,7 @@ int snd_usb_init_pitch(struct snd_usb_audio *chip, int iface, | |||
212 | } | 212 | } |
213 | } | 213 | } |
214 | 214 | ||
215 | static int start_endpoints(struct snd_usb_substream *subs) | 215 | static int start_endpoints(struct snd_usb_substream *subs, int can_sleep) |
216 | { | 216 | { |
217 | int err; | 217 | int err; |
218 | 218 | ||
@@ -225,7 +225,7 @@ static int start_endpoints(struct snd_usb_substream *subs) | |||
225 | snd_printdd(KERN_DEBUG "Starting data EP @%p\n", ep); | 225 | snd_printdd(KERN_DEBUG "Starting data EP @%p\n", ep); |
226 | 226 | ||
227 | ep->data_subs = subs; | 227 | ep->data_subs = subs; |
228 | err = snd_usb_endpoint_start(ep); | 228 | err = snd_usb_endpoint_start(ep, can_sleep); |
229 | if (err < 0) { | 229 | if (err < 0) { |
230 | clear_bit(SUBSTREAM_FLAG_DATA_EP_STARTED, &subs->flags); | 230 | clear_bit(SUBSTREAM_FLAG_DATA_EP_STARTED, &subs->flags); |
231 | return err; | 231 | return err; |
@@ -236,10 +236,25 @@ static int start_endpoints(struct snd_usb_substream *subs) | |||
236 | !test_and_set_bit(SUBSTREAM_FLAG_SYNC_EP_STARTED, &subs->flags)) { | 236 | !test_and_set_bit(SUBSTREAM_FLAG_SYNC_EP_STARTED, &subs->flags)) { |
237 | struct snd_usb_endpoint *ep = subs->sync_endpoint; | 237 | struct snd_usb_endpoint *ep = subs->sync_endpoint; |
238 | 238 | ||
239 | if (subs->data_endpoint->iface != subs->sync_endpoint->iface || | ||
240 | subs->data_endpoint->alt_idx != subs->sync_endpoint->alt_idx) { | ||
241 | err = usb_set_interface(subs->dev, | ||
242 | subs->sync_endpoint->iface, | ||
243 | subs->sync_endpoint->alt_idx); | ||
244 | if (err < 0) { | ||
245 | snd_printk(KERN_ERR | ||
246 | "%d:%d:%d: cannot set interface (%d)\n", | ||
247 | subs->dev->devnum, | ||
248 | subs->sync_endpoint->iface, | ||
249 | subs->sync_endpoint->alt_idx, err); | ||
250 | return -EIO; | ||
251 | } | ||
252 | } | ||
253 | |||
239 | snd_printdd(KERN_DEBUG "Starting sync EP @%p\n", ep); | 254 | snd_printdd(KERN_DEBUG "Starting sync EP @%p\n", ep); |
240 | 255 | ||
241 | ep->sync_slave = subs->data_endpoint; | 256 | ep->sync_slave = subs->data_endpoint; |
242 | err = snd_usb_endpoint_start(ep); | 257 | err = snd_usb_endpoint_start(ep, can_sleep); |
243 | if (err < 0) { | 258 | if (err < 0) { |
244 | clear_bit(SUBSTREAM_FLAG_SYNC_EP_STARTED, &subs->flags); | 259 | clear_bit(SUBSTREAM_FLAG_SYNC_EP_STARTED, &subs->flags); |
245 | return err; | 260 | return err; |
@@ -544,13 +559,10 @@ static int snd_usb_pcm_prepare(struct snd_pcm_substream *substream) | |||
544 | subs->last_frame_number = 0; | 559 | subs->last_frame_number = 0; |
545 | runtime->delay = 0; | 560 | runtime->delay = 0; |
546 | 561 | ||
547 | /* clear the pending deactivation on the target EPs */ | ||
548 | deactivate_endpoints(subs); | ||
549 | |||
550 | /* for playback, submit the URBs now; otherwise, the first hwptr_done | 562 | /* for playback, submit the URBs now; otherwise, the first hwptr_done |
551 | * updates for all URBs would happen at the same time when starting */ | 563 | * updates for all URBs would happen at the same time when starting */ |
552 | if (subs->direction == SNDRV_PCM_STREAM_PLAYBACK) | 564 | if (subs->direction == SNDRV_PCM_STREAM_PLAYBACK) |
553 | return start_endpoints(subs); | 565 | return start_endpoints(subs, 1); |
554 | 566 | ||
555 | return 0; | 567 | return 0; |
556 | } | 568 | } |
@@ -1032,6 +1044,7 @@ static void prepare_playback_urb(struct snd_usb_substream *subs, | |||
1032 | struct urb *urb) | 1044 | struct urb *urb) |
1033 | { | 1045 | { |
1034 | struct snd_pcm_runtime *runtime = subs->pcm_substream->runtime; | 1046 | struct snd_pcm_runtime *runtime = subs->pcm_substream->runtime; |
1047 | struct snd_usb_endpoint *ep = subs->data_endpoint; | ||
1035 | struct snd_urb_ctx *ctx = urb->context; | 1048 | struct snd_urb_ctx *ctx = urb->context; |
1036 | unsigned int counts, frames, bytes; | 1049 | unsigned int counts, frames, bytes; |
1037 | int i, stride, period_elapsed = 0; | 1050 | int i, stride, period_elapsed = 0; |
@@ -1043,7 +1056,11 @@ static void prepare_playback_urb(struct snd_usb_substream *subs, | |||
1043 | urb->number_of_packets = 0; | 1056 | urb->number_of_packets = 0; |
1044 | spin_lock_irqsave(&subs->lock, flags); | 1057 | spin_lock_irqsave(&subs->lock, flags); |
1045 | for (i = 0; i < ctx->packets; i++) { | 1058 | for (i = 0; i < ctx->packets; i++) { |
1046 | counts = ctx->packet_size[i]; | 1059 | if (ctx->packet_size[i]) |
1060 | counts = ctx->packet_size[i]; | ||
1061 | else | ||
1062 | counts = snd_usb_endpoint_next_packet_size(ep); | ||
1063 | |||
1047 | /* set up descriptor */ | 1064 | /* set up descriptor */ |
1048 | urb->iso_frame_desc[i].offset = frames * stride; | 1065 | urb->iso_frame_desc[i].offset = frames * stride; |
1049 | urb->iso_frame_desc[i].length = counts * stride; | 1066 | urb->iso_frame_desc[i].length = counts * stride; |
@@ -1094,7 +1111,16 @@ static void prepare_playback_urb(struct snd_usb_substream *subs, | |||
1094 | subs->hwptr_done += bytes; | 1111 | subs->hwptr_done += bytes; |
1095 | if (subs->hwptr_done >= runtime->buffer_size * stride) | 1112 | if (subs->hwptr_done >= runtime->buffer_size * stride) |
1096 | subs->hwptr_done -= runtime->buffer_size * stride; | 1113 | subs->hwptr_done -= runtime->buffer_size * stride; |
1114 | |||
1115 | /* update delay with exact number of samples queued */ | ||
1116 | runtime->delay = subs->last_delay; | ||
1097 | runtime->delay += frames; | 1117 | runtime->delay += frames; |
1118 | subs->last_delay = runtime->delay; | ||
1119 | |||
1120 | /* realign last_frame_number */ | ||
1121 | subs->last_frame_number = usb_get_current_frame_number(subs->dev); | ||
1122 | subs->last_frame_number &= 0xFF; /* keep 8 LSBs */ | ||
1123 | |||
1098 | spin_unlock_irqrestore(&subs->lock, flags); | 1124 | spin_unlock_irqrestore(&subs->lock, flags); |
1099 | urb->transfer_buffer_length = bytes; | 1125 | urb->transfer_buffer_length = bytes; |
1100 | if (period_elapsed) | 1126 | if (period_elapsed) |
@@ -1112,12 +1138,26 @@ static void retire_playback_urb(struct snd_usb_substream *subs, | |||
1112 | struct snd_pcm_runtime *runtime = subs->pcm_substream->runtime; | 1138 | struct snd_pcm_runtime *runtime = subs->pcm_substream->runtime; |
1113 | int stride = runtime->frame_bits >> 3; | 1139 | int stride = runtime->frame_bits >> 3; |
1114 | int processed = urb->transfer_buffer_length / stride; | 1140 | int processed = urb->transfer_buffer_length / stride; |
1141 | int est_delay; | ||
1115 | 1142 | ||
1116 | spin_lock_irqsave(&subs->lock, flags); | 1143 | spin_lock_irqsave(&subs->lock, flags); |
1117 | if (processed > runtime->delay) | 1144 | est_delay = snd_usb_pcm_delay(subs, runtime->rate); |
1118 | runtime->delay = 0; | 1145 | /* update delay with exact number of samples played */ |
1146 | if (processed > subs->last_delay) | ||
1147 | subs->last_delay = 0; | ||
1119 | else | 1148 | else |
1120 | runtime->delay -= processed; | 1149 | subs->last_delay -= processed; |
1150 | runtime->delay = subs->last_delay; | ||
1151 | |||
1152 | /* | ||
1153 | * Report when delay estimate is off by more than 2ms. | ||
1154 | * The error should be lower than 2ms since the estimate relies | ||
1155 | * on two reads of a counter updated every ms. | ||
1156 | */ | ||
1157 | if (abs(est_delay - subs->last_delay) * 1000 > runtime->rate * 2) | ||
1158 | snd_printk(KERN_DEBUG "delay: estimated %d, actual %d\n", | ||
1159 | est_delay, subs->last_delay); | ||
1160 | |||
1121 | spin_unlock_irqrestore(&subs->lock, flags); | 1161 | spin_unlock_irqrestore(&subs->lock, flags); |
1122 | } | 1162 | } |
1123 | 1163 | ||
@@ -1175,7 +1215,7 @@ static int snd_usb_substream_capture_trigger(struct snd_pcm_substream *substream | |||
1175 | 1215 | ||
1176 | switch (cmd) { | 1216 | switch (cmd) { |
1177 | case SNDRV_PCM_TRIGGER_START: | 1217 | case SNDRV_PCM_TRIGGER_START: |
1178 | err = start_endpoints(subs); | 1218 | err = start_endpoints(subs, 0); |
1179 | if (err < 0) | 1219 | if (err < 0) |
1180 | return err; | 1220 | return err; |
1181 | 1221 | ||
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 246852397e30..d617f69131d7 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c | |||
@@ -1976,9 +1976,10 @@ static long kvm_vcpu_compat_ioctl(struct file *filp, | |||
1976 | if (copy_from_user(&csigset, sigmask_arg->sigset, | 1976 | if (copy_from_user(&csigset, sigmask_arg->sigset, |
1977 | sizeof csigset)) | 1977 | sizeof csigset)) |
1978 | goto out; | 1978 | goto out; |
1979 | } | 1979 | sigset_from_compat(&sigset, &csigset); |
1980 | sigset_from_compat(&sigset, &csigset); | 1980 | r = kvm_vcpu_ioctl_set_sigmask(vcpu, &sigset); |
1981 | r = kvm_vcpu_ioctl_set_sigmask(vcpu, &sigset); | 1981 | } else |
1982 | r = kvm_vcpu_ioctl_set_sigmask(vcpu, NULL); | ||
1982 | break; | 1983 | break; |
1983 | } | 1984 | } |
1984 | default: | 1985 | default: |