diff options
author | Ingo Molnar <mingo@elte.hu> | 2010-07-05 02:30:55 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2010-07-05 02:30:58 -0400 |
commit | 08f8ba07998ab1b9efcdd3f28dadf6866a605ddb (patch) | |
tree | 8e09855446f7b0312bb729d4b539090756e57927 | |
parent | 0879b100f3c187257729f36cba33d96ec2875766 (diff) | |
parent | 815c4163b6c8ebf8152f42b0a5fd015cfdcedc78 (diff) |
Merge commit 'v2.6.35-rc4' into perf/core
Merge reason: Pick up the latest perf fixes
Signed-off-by: Ingo Molnar <mingo@elte.hu>
179 files changed, 1866 insertions, 1451 deletions
diff --git a/Documentation/watchdog/watchdog-parameters.txt b/Documentation/watchdog/watchdog-parameters.txt index 41c95cc1dc1f..17ddd822b456 100644 --- a/Documentation/watchdog/watchdog-parameters.txt +++ b/Documentation/watchdog/watchdog-parameters.txt | |||
@@ -125,6 +125,11 @@ ibmasr: | |||
125 | nowayout: Watchdog cannot be stopped once started | 125 | nowayout: Watchdog cannot be stopped once started |
126 | (default=kernel config parameter) | 126 | (default=kernel config parameter) |
127 | ------------------------------------------------- | 127 | ------------------------------------------------- |
128 | imx2_wdt: | ||
129 | timeout: Watchdog timeout in seconds (default 60 s) | ||
130 | nowayout: Watchdog cannot be stopped once started | ||
131 | (default=kernel config parameter) | ||
132 | ------------------------------------------------- | ||
128 | indydog: | 133 | indydog: |
129 | nowayout: Watchdog cannot be stopped once started | 134 | nowayout: Watchdog cannot be stopped once started |
130 | (default=kernel config parameter) | 135 | (default=kernel config parameter) |
diff --git a/MAINTAINERS b/MAINTAINERS index f9730fa0b24c..9f90de2e00f8 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
@@ -896,11 +896,13 @@ S: Maintained | |||
896 | 896 | ||
897 | ARM/SAMSUNG ARM ARCHITECTURES | 897 | ARM/SAMSUNG ARM ARCHITECTURES |
898 | M: Ben Dooks <ben-linux@fluff.org> | 898 | M: Ben Dooks <ben-linux@fluff.org> |
899 | M: Kukjin Kim <kgene.kim@samsung.com> | ||
899 | L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) | 900 | L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) |
900 | W: http://www.fluff.org/ben/linux/ | 901 | W: http://www.fluff.org/ben/linux/ |
901 | S: Maintained | 902 | S: Maintained |
902 | F: arch/arm/plat-s3c/ | 903 | F: arch/arm/plat-samsung/ |
903 | F: arch/arm/plat-s3c24xx/ | 904 | F: arch/arm/plat-s3c24xx/ |
905 | F: arch/arm/plat-s5p/ | ||
904 | 906 | ||
905 | ARM/S3C2410 ARM ARCHITECTURE | 907 | ARM/S3C2410 ARM ARCHITECTURE |
906 | M: Ben Dooks <ben-linux@fluff.org> | 908 | M: Ben Dooks <ben-linux@fluff.org> |
@@ -1148,7 +1150,7 @@ F: drivers/mmc/host/atmel-mci.c | |||
1148 | F: drivers/mmc/host/atmel-mci-regs.h | 1150 | F: drivers/mmc/host/atmel-mci-regs.h |
1149 | 1151 | ||
1150 | ATMEL AT91 / AT32 SERIAL DRIVER | 1152 | ATMEL AT91 / AT32 SERIAL DRIVER |
1151 | M: Haavard Skinnemoen <hskinnemoen@atmel.com> | 1153 | M: Nicolas Ferre <nicolas.ferre@atmel.com> |
1152 | S: Supported | 1154 | S: Supported |
1153 | F: drivers/serial/atmel_serial.c | 1155 | F: drivers/serial/atmel_serial.c |
1154 | 1156 | ||
@@ -1160,18 +1162,18 @@ F: drivers/video/atmel_lcdfb.c | |||
1160 | F: include/video/atmel_lcdc.h | 1162 | F: include/video/atmel_lcdc.h |
1161 | 1163 | ||
1162 | ATMEL MACB ETHERNET DRIVER | 1164 | ATMEL MACB ETHERNET DRIVER |
1163 | M: Haavard Skinnemoen <hskinnemoen@atmel.com> | 1165 | M: Nicolas Ferre <nicolas.ferre@atmel.com> |
1164 | S: Supported | 1166 | S: Supported |
1165 | F: drivers/net/macb.* | 1167 | F: drivers/net/macb.* |
1166 | 1168 | ||
1167 | ATMEL SPI DRIVER | 1169 | ATMEL SPI DRIVER |
1168 | M: Haavard Skinnemoen <hskinnemoen@atmel.com> | 1170 | M: Nicolas Ferre <nicolas.ferre@atmel.com> |
1169 | S: Supported | 1171 | S: Supported |
1170 | F: drivers/spi/atmel_spi.* | 1172 | F: drivers/spi/atmel_spi.* |
1171 | 1173 | ||
1172 | ATMEL USBA UDC DRIVER | 1174 | ATMEL USBA UDC DRIVER |
1173 | M: Haavard Skinnemoen <hskinnemoen@atmel.com> | 1175 | M: Nicolas Ferre <nicolas.ferre@atmel.com> |
1174 | L: kernel@avr32linux.org | 1176 | L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) |
1175 | W: http://avr32linux.org/twiki/bin/view/Main/AtmelUsbDeviceDriver | 1177 | W: http://avr32linux.org/twiki/bin/view/Main/AtmelUsbDeviceDriver |
1176 | S: Supported | 1178 | S: Supported |
1177 | F: drivers/usb/gadget/atmel_usba_udc.* | 1179 | F: drivers/usb/gadget/atmel_usba_udc.* |
@@ -2109,11 +2111,18 @@ F: drivers/edac/i5000_edac.c | |||
2109 | 2111 | ||
2110 | EDAC-I5400 | 2112 | EDAC-I5400 |
2111 | M: Mauro Carvalho Chehab <mchehab@redhat.com> | 2113 | M: Mauro Carvalho Chehab <mchehab@redhat.com> |
2112 | L: bluesmoke-devel@lists.sourceforge.net (moderated for non-subscribers) | 2114 | L: linux-edac@vger.kernel.org |
2113 | W: bluesmoke.sourceforge.net | 2115 | W: bluesmoke.sourceforge.net |
2114 | S: Maintained | 2116 | S: Maintained |
2115 | F: drivers/edac/i5400_edac.c | 2117 | F: drivers/edac/i5400_edac.c |
2116 | 2118 | ||
2119 | EDAC-I7CORE | ||
2120 | M: Mauro Carvalho Chehab <mchehab@redhat.com> | ||
2121 | L: linux-edac@vger.kernel.org | ||
2122 | W: bluesmoke.sourceforge.net | ||
2123 | S: Maintained | ||
2124 | F: drivers/edac/i7core_edac.c linux/edac_mce.h drivers/edac/edac_mce.c | ||
2125 | |||
2117 | EDAC-I82975X | 2126 | EDAC-I82975X |
2118 | M: Ranganathan Desikan <ravi@jetztechnologies.com> | 2127 | M: Ranganathan Desikan <ravi@jetztechnologies.com> |
2119 | M: "Arvind R." <arvind@jetztechnologies.com> | 2128 | M: "Arvind R." <arvind@jetztechnologies.com> |
@@ -3373,7 +3382,7 @@ KPROBES | |||
3373 | M: Ananth N Mavinakayanahalli <ananth@in.ibm.com> | 3382 | M: Ananth N Mavinakayanahalli <ananth@in.ibm.com> |
3374 | M: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com> | 3383 | M: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com> |
3375 | M: "David S. Miller" <davem@davemloft.net> | 3384 | M: "David S. Miller" <davem@davemloft.net> |
3376 | M: Masami Hiramatsu <mhiramat@redhat.com> | 3385 | M: Masami Hiramatsu <masami.hiramatsu.pt@hitachi.com> |
3377 | S: Maintained | 3386 | S: Maintained |
3378 | F: Documentation/kprobes.txt | 3387 | F: Documentation/kprobes.txt |
3379 | F: include/linux/kprobes.h | 3388 | F: include/linux/kprobes.h |
@@ -4621,6 +4630,12 @@ M: Robert Jarzmik <robert.jarzmik@free.fr> | |||
4621 | L: rtc-linux@googlegroups.com | 4630 | L: rtc-linux@googlegroups.com |
4622 | S: Maintained | 4631 | S: Maintained |
4623 | 4632 | ||
4633 | QLOGIC QLA1280 SCSI DRIVER | ||
4634 | M: Michael Reed <mdr@sgi.com> | ||
4635 | L: linux-scsi@vger.kernel.org | ||
4636 | S: Maintained | ||
4637 | F: drivers/scsi/qla1280.[ch] | ||
4638 | |||
4624 | QLOGIC QLA2XXX FC-SCSI DRIVER | 4639 | QLOGIC QLA2XXX FC-SCSI DRIVER |
4625 | M: Andrew Vasquez <andrew.vasquez@qlogic.com> | 4640 | M: Andrew Vasquez <andrew.vasquez@qlogic.com> |
4626 | M: linux-driver@qlogic.com | 4641 | M: linux-driver@qlogic.com |
@@ -1,7 +1,7 @@ | |||
1 | VERSION = 2 | 1 | VERSION = 2 |
2 | PATCHLEVEL = 6 | 2 | PATCHLEVEL = 6 |
3 | SUBLEVEL = 35 | 3 | SUBLEVEL = 35 |
4 | EXTRAVERSION = -rc3 | 4 | EXTRAVERSION = -rc4 |
5 | NAME = Sheep on Meth | 5 | NAME = Sheep on Meth |
6 | 6 | ||
7 | # *DOCUMENTATION* | 7 | # *DOCUMENTATION* |
@@ -883,80 +883,10 @@ PHONY += $(vmlinux-dirs) | |||
883 | $(vmlinux-dirs): prepare scripts | 883 | $(vmlinux-dirs): prepare scripts |
884 | $(Q)$(MAKE) $(build)=$@ | 884 | $(Q)$(MAKE) $(build)=$@ |
885 | 885 | ||
886 | # Build the kernel release string | ||
887 | # | ||
888 | # The KERNELRELEASE value built here is stored in the file | ||
889 | # include/config/kernel.release, and is used when executing several | ||
890 | # make targets, such as "make install" or "make modules_install." | ||
891 | # | ||
892 | # The eventual kernel release string consists of the following fields, | ||
893 | # shown in a hierarchical format to show how smaller parts are concatenated | ||
894 | # to form the larger and final value, with values coming from places like | ||
895 | # the Makefile, kernel config options, make command line options and/or | ||
896 | # SCM tag information. | ||
897 | # | ||
898 | # $(KERNELVERSION) | ||
899 | # $(VERSION) eg, 2 | ||
900 | # $(PATCHLEVEL) eg, 6 | ||
901 | # $(SUBLEVEL) eg, 18 | ||
902 | # $(EXTRAVERSION) eg, -rc6 | ||
903 | # $(localver-full) | ||
904 | # $(localver) | ||
905 | # localversion* (files without backups, containing '~') | ||
906 | # $(CONFIG_LOCALVERSION) (from kernel config setting) | ||
907 | # $(LOCALVERSION) (from make command line, if provided) | ||
908 | # $(localver-extra) | ||
909 | # $(scm-identifier) (unique SCM tag, if one exists) | ||
910 | # ./scripts/setlocalversion (only with CONFIG_LOCALVERSION_AUTO) | ||
911 | # .scmversion (only with CONFIG_LOCALVERSION_AUTO) | ||
912 | # + (only without CONFIG_LOCALVERSION_AUTO | ||
913 | # and without LOCALVERSION= and | ||
914 | # repository is at non-tagged commit) | ||
915 | # | ||
916 | # For kernels without CONFIG_LOCALVERSION_AUTO compiled from an SCM that has | ||
917 | # been revised beyond a tagged commit, `+' is appended to the version string | ||
918 | # when not overridden by using "make LOCALVERSION=". This indicates that the | ||
919 | # kernel is not a vanilla release version and has been modified. | ||
920 | |||
921 | pattern = ".*/localversion[^~]*" | ||
922 | string = $(shell cat /dev/null \ | ||
923 | `find $(objtree) $(srctree) -maxdepth 1 -regex $(pattern) | sort -u`) | ||
924 | |||
925 | localver = $(subst $(space),, $(string) \ | ||
926 | $(patsubst "%",%,$(CONFIG_LOCALVERSION))) | ||
927 | |||
928 | # scripts/setlocalversion is called to create a unique identifier if the source | ||
929 | # is managed by a known SCM and the repository has been revised since the last | ||
930 | # tagged (release) commit. The format of the identifier is determined by the | ||
931 | # SCM's implementation. | ||
932 | # | ||
933 | # .scmversion is used when generating rpm packages so we do not loose | ||
934 | # the version information from the SCM when we do the build of the kernel | ||
935 | # from the copied source | ||
936 | ifeq ($(wildcard .scmversion),) | ||
937 | scm-identifier = $(shell $(CONFIG_SHELL) \ | ||
938 | $(srctree)/scripts/setlocalversion $(srctree)) | ||
939 | else | ||
940 | scm-identifier = $(shell cat .scmversion 2> /dev/null) | ||
941 | endif | ||
942 | |||
943 | ifdef CONFIG_LOCALVERSION_AUTO | ||
944 | localver-extra = $(scm-identifier) | ||
945 | else | ||
946 | ifneq ($(scm-identifier),) | ||
947 | ifeq ("$(origin LOCALVERSION)", "undefined") | ||
948 | localver-extra = + | ||
949 | endif | ||
950 | endif | ||
951 | endif | ||
952 | |||
953 | localver-full = $(localver)$(LOCALVERSION)$(localver-extra) | ||
954 | |||
955 | # Store (new) KERNELRELASE string in include/config/kernel.release | 886 | # Store (new) KERNELRELASE string in include/config/kernel.release |
956 | kernelrelease = $(KERNELVERSION)$(localver-full) | ||
957 | include/config/kernel.release: include/config/auto.conf FORCE | 887 | include/config/kernel.release: include/config/auto.conf FORCE |
958 | $(Q)rm -f $@ | 888 | $(Q)rm -f $@ |
959 | $(Q)echo $(kernelrelease) > $@ | 889 | $(Q)echo "$(KERNELVERSION)$$($(CONFIG_SHELL) scripts/setlocalversion $(srctree))" > $@ |
960 | 890 | ||
961 | 891 | ||
962 | # Things we need to do before we recursively start building the kernel | 892 | # Things we need to do before we recursively start building the kernel |
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 1f254bd6c937..98922f7d2d12 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig | |||
@@ -955,7 +955,8 @@ config XSCALE_PMU | |||
955 | default y | 955 | default y |
956 | 956 | ||
957 | config CPU_HAS_PMU | 957 | config CPU_HAS_PMU |
958 | depends on CPU_V6 || CPU_V7 || XSCALE_PMU | 958 | depends on (CPU_V6 || CPU_V7 || XSCALE_PMU) && \ |
959 | (!ARCH_OMAP3 || OMAP3_EMU) | ||
959 | default y | 960 | default y |
960 | bool | 961 | bool |
961 | 962 | ||
diff --git a/arch/arm/include/asm/mach/udc_pxa2xx.h b/arch/arm/include/asm/mach/udc_pxa2xx.h index f3eabf1ecec3..833306ee9e7f 100644 --- a/arch/arm/include/asm/mach/udc_pxa2xx.h +++ b/arch/arm/include/asm/mach/udc_pxa2xx.h | |||
@@ -21,8 +21,8 @@ struct pxa2xx_udc_mach_info { | |||
21 | * here. Note that sometimes the signals go through inverters... | 21 | * here. Note that sometimes the signals go through inverters... |
22 | */ | 22 | */ |
23 | bool gpio_vbus_inverted; | 23 | bool gpio_vbus_inverted; |
24 | u16 gpio_vbus; /* high == vbus present */ | 24 | int gpio_vbus; /* high == vbus present */ |
25 | bool gpio_pullup_inverted; | 25 | bool gpio_pullup_inverted; |
26 | u16 gpio_pullup; /* high == pullup activated */ | 26 | int gpio_pullup; /* high == pullup activated */ |
27 | }; | 27 | }; |
28 | 28 | ||
diff --git a/arch/arm/include/asm/processor.h b/arch/arm/include/asm/processor.h index 6a89567ffc5b..7bed3daf83b8 100644 --- a/arch/arm/include/asm/processor.h +++ b/arch/arm/include/asm/processor.h | |||
@@ -91,7 +91,11 @@ extern void release_thread(struct task_struct *); | |||
91 | 91 | ||
92 | unsigned long get_wchan(struct task_struct *p); | 92 | unsigned long get_wchan(struct task_struct *p); |
93 | 93 | ||
94 | #if __LINUX_ARM_ARCH__ == 6 | ||
95 | #define cpu_relax() smp_mb() | ||
96 | #else | ||
94 | #define cpu_relax() barrier() | 97 | #define cpu_relax() barrier() |
98 | #endif | ||
95 | 99 | ||
96 | /* | 100 | /* |
97 | * Create a new kernel thread | 101 | * Create a new kernel thread |
diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c index 5b7cfafc0720..417c392ddf1c 100644 --- a/arch/arm/kernel/perf_event.c +++ b/arch/arm/kernel/perf_event.c | |||
@@ -201,7 +201,7 @@ armpmu_event_update(struct perf_event *event, | |||
201 | { | 201 | { |
202 | int shift = 64 - 32; | 202 | int shift = 64 - 32; |
203 | s64 prev_raw_count, new_raw_count; | 203 | s64 prev_raw_count, new_raw_count; |
204 | s64 delta; | 204 | u64 delta; |
205 | 205 | ||
206 | again: | 206 | again: |
207 | prev_raw_count = local64_read(&hwc->prev_count); | 207 | prev_raw_count = local64_read(&hwc->prev_count); |
diff --git a/arch/arm/mach-mx3/mach-mx31lilly.c b/arch/arm/mach-mx3/mach-mx31lilly.c index d3d5877c750e..b2c7f512070f 100644 --- a/arch/arm/mach-mx3/mach-mx31lilly.c +++ b/arch/arm/mach-mx3/mach-mx31lilly.c | |||
@@ -115,6 +115,8 @@ static struct platform_device physmap_flash_device = { | |||
115 | 115 | ||
116 | /* USB */ | 116 | /* USB */ |
117 | 117 | ||
118 | #if defined(CONFIG_USB_ULPI) | ||
119 | |||
118 | #define USB_PAD_CFG (PAD_CTL_DRV_MAX | PAD_CTL_SRE_FAST | PAD_CTL_HYS_CMOS | \ | 120 | #define USB_PAD_CFG (PAD_CTL_DRV_MAX | PAD_CTL_SRE_FAST | PAD_CTL_HYS_CMOS | \ |
119 | PAD_CTL_ODE_CMOS | PAD_CTL_100K_PU) | 121 | PAD_CTL_ODE_CMOS | PAD_CTL_100K_PU) |
120 | 122 | ||
@@ -244,10 +246,20 @@ static struct mxc_usbh_platform_data usbh2_pdata = { | |||
244 | .flags = MXC_EHCI_POWER_PINS_ENABLED, | 246 | .flags = MXC_EHCI_POWER_PINS_ENABLED, |
245 | }; | 247 | }; |
246 | 248 | ||
247 | static struct platform_device *devices[] __initdata = { | 249 | static void lilly1131_usb_init(void) |
248 | &smsc91x_device, | 250 | { |
249 | &physmap_flash_device, | 251 | usbotg_pdata.otg = otg_ulpi_create(&mxc_ulpi_access_ops, |
250 | }; | 252 | USB_OTG_DRV_VBUS | USB_OTG_DRV_VBUS_EXT); |
253 | usbh2_pdata.otg = otg_ulpi_create(&mxc_ulpi_access_ops, | ||
254 | USB_OTG_DRV_VBUS | USB_OTG_DRV_VBUS_EXT); | ||
255 | |||
256 | mxc_register_device(&mxc_usbh1, &usbh1_pdata); | ||
257 | mxc_register_device(&mxc_usbh2, &usbh2_pdata); | ||
258 | } | ||
259 | |||
260 | #else | ||
261 | static inline void lilly1131_usb_init(void) {} | ||
262 | #endif /* CONFIG_USB_ULPI */ | ||
251 | 263 | ||
252 | /* SPI */ | 264 | /* SPI */ |
253 | 265 | ||
@@ -279,6 +291,11 @@ static struct spi_board_info mc13783_dev __initdata = { | |||
279 | .platform_data = &mc13783_pdata, | 291 | .platform_data = &mc13783_pdata, |
280 | }; | 292 | }; |
281 | 293 | ||
294 | static struct platform_device *devices[] __initdata = { | ||
295 | &smsc91x_device, | ||
296 | &physmap_flash_device, | ||
297 | }; | ||
298 | |||
282 | static int mx31lilly_baseboard; | 299 | static int mx31lilly_baseboard; |
283 | core_param(mx31lilly_baseboard, mx31lilly_baseboard, int, 0444); | 300 | core_param(mx31lilly_baseboard, mx31lilly_baseboard, int, 0444); |
284 | 301 | ||
@@ -321,13 +338,7 @@ static void __init mx31lilly_board_init(void) | |||
321 | platform_add_devices(devices, ARRAY_SIZE(devices)); | 338 | platform_add_devices(devices, ARRAY_SIZE(devices)); |
322 | 339 | ||
323 | /* USB */ | 340 | /* USB */ |
324 | usbotg_pdata.otg = otg_ulpi_create(&mxc_ulpi_access_ops, | 341 | lilly1131_usb_init(); |
325 | USB_OTG_DRV_VBUS | USB_OTG_DRV_VBUS_EXT); | ||
326 | usbh2_pdata.otg = otg_ulpi_create(&mxc_ulpi_access_ops, | ||
327 | USB_OTG_DRV_VBUS | USB_OTG_DRV_VBUS_EXT); | ||
328 | |||
329 | mxc_register_device(&mxc_usbh1, &usbh1_pdata); | ||
330 | mxc_register_device(&mxc_usbh2, &usbh2_pdata); | ||
331 | } | 342 | } |
332 | 343 | ||
333 | static void __init mx31lilly_timer_init(void) | 344 | static void __init mx31lilly_timer_init(void) |
diff --git a/arch/arm/mach-omap2/board-omap3stalker.c b/arch/arm/mach-omap2/board-omap3stalker.c index f848ba8dbc16..a04cffd691c5 100644 --- a/arch/arm/mach-omap2/board-omap3stalker.c +++ b/arch/arm/mach-omap2/board-omap3stalker.c | |||
@@ -538,9 +538,7 @@ static void ads7846_dev_init(void) | |||
538 | printk(KERN_ERR "can't get ads7846 pen down GPIO\n"); | 538 | printk(KERN_ERR "can't get ads7846 pen down GPIO\n"); |
539 | 539 | ||
540 | gpio_direction_input(OMAP3_STALKER_TS_GPIO); | 540 | gpio_direction_input(OMAP3_STALKER_TS_GPIO); |
541 | 541 | gpio_set_debounce(OMAP3_STALKER_TS_GPIO, 310); | |
542 | omap_set_gpio_debounce(OMAP3_STALKER_TS_GPIO, 1); | ||
543 | omap_set_gpio_debounce_time(OMAP3_STALKER_TS_GPIO, 0xa); | ||
544 | } | 542 | } |
545 | 543 | ||
546 | static int ads7846_get_pendown_state(void) | 544 | static int ads7846_get_pendown_state(void) |
diff --git a/arch/arm/mach-omap2/clock44xx_data.c b/arch/arm/mach-omap2/clock44xx_data.c index 02804224517b..e10db7a90cb2 100644 --- a/arch/arm/mach-omap2/clock44xx_data.c +++ b/arch/arm/mach-omap2/clock44xx_data.c | |||
@@ -1369,6 +1369,7 @@ static struct clk emif1_ick = { | |||
1369 | .ops = &clkops_omap2_dflt, | 1369 | .ops = &clkops_omap2_dflt, |
1370 | .enable_reg = OMAP4430_CM_MEMIF_EMIF_1_CLKCTRL, | 1370 | .enable_reg = OMAP4430_CM_MEMIF_EMIF_1_CLKCTRL, |
1371 | .enable_bit = OMAP4430_MODULEMODE_HWCTRL, | 1371 | .enable_bit = OMAP4430_MODULEMODE_HWCTRL, |
1372 | .flags = ENABLE_ON_INIT, | ||
1372 | .clkdm_name = "l3_emif_clkdm", | 1373 | .clkdm_name = "l3_emif_clkdm", |
1373 | .parent = &ddrphy_ck, | 1374 | .parent = &ddrphy_ck, |
1374 | .recalc = &followparent_recalc, | 1375 | .recalc = &followparent_recalc, |
@@ -1379,6 +1380,7 @@ static struct clk emif2_ick = { | |||
1379 | .ops = &clkops_omap2_dflt, | 1380 | .ops = &clkops_omap2_dflt, |
1380 | .enable_reg = OMAP4430_CM_MEMIF_EMIF_2_CLKCTRL, | 1381 | .enable_reg = OMAP4430_CM_MEMIF_EMIF_2_CLKCTRL, |
1381 | .enable_bit = OMAP4430_MODULEMODE_HWCTRL, | 1382 | .enable_bit = OMAP4430_MODULEMODE_HWCTRL, |
1383 | .flags = ENABLE_ON_INIT, | ||
1382 | .clkdm_name = "l3_emif_clkdm", | 1384 | .clkdm_name = "l3_emif_clkdm", |
1383 | .parent = &ddrphy_ck, | 1385 | .parent = &ddrphy_ck, |
1384 | .recalc = &followparent_recalc, | 1386 | .recalc = &followparent_recalc, |
diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c index 95c9a5f774e1..b7a4133267d8 100644 --- a/arch/arm/mach-omap2/omap_hwmod.c +++ b/arch/arm/mach-omap2/omap_hwmod.c | |||
@@ -409,10 +409,11 @@ static int _init_main_clk(struct omap_hwmod *oh) | |||
409 | return 0; | 409 | return 0; |
410 | 410 | ||
411 | oh->_clk = omap_clk_get_by_name(oh->main_clk); | 411 | oh->_clk = omap_clk_get_by_name(oh->main_clk); |
412 | if (!oh->_clk) | 412 | if (!oh->_clk) { |
413 | pr_warning("omap_hwmod: %s: cannot clk_get main_clk %s\n", | 413 | pr_warning("omap_hwmod: %s: cannot clk_get main_clk %s\n", |
414 | oh->name, oh->main_clk); | 414 | oh->name, oh->main_clk); |
415 | return -EINVAL; | 415 | return -EINVAL; |
416 | } | ||
416 | 417 | ||
417 | if (!oh->_clk->clkdm) | 418 | if (!oh->_clk->clkdm) |
418 | pr_warning("omap_hwmod: %s: missing clockdomain for %s.\n", | 419 | pr_warning("omap_hwmod: %s: missing clockdomain for %s.\n", |
@@ -444,10 +445,11 @@ static int _init_interface_clks(struct omap_hwmod *oh) | |||
444 | continue; | 445 | continue; |
445 | 446 | ||
446 | c = omap_clk_get_by_name(os->clk); | 447 | c = omap_clk_get_by_name(os->clk); |
447 | if (!c) | 448 | if (!c) { |
448 | pr_warning("omap_hwmod: %s: cannot clk_get interface_clk %s\n", | 449 | pr_warning("omap_hwmod: %s: cannot clk_get interface_clk %s\n", |
449 | oh->name, os->clk); | 450 | oh->name, os->clk); |
450 | ret = -EINVAL; | 451 | ret = -EINVAL; |
452 | } | ||
451 | os->_clk = c; | 453 | os->_clk = c; |
452 | } | 454 | } |
453 | 455 | ||
@@ -470,10 +472,11 @@ static int _init_opt_clks(struct omap_hwmod *oh) | |||
470 | 472 | ||
471 | for (i = oh->opt_clks_cnt, oc = oh->opt_clks; i > 0; i--, oc++) { | 473 | for (i = oh->opt_clks_cnt, oc = oh->opt_clks; i > 0; i--, oc++) { |
472 | c = omap_clk_get_by_name(oc->clk); | 474 | c = omap_clk_get_by_name(oc->clk); |
473 | if (!c) | 475 | if (!c) { |
474 | pr_warning("omap_hwmod: %s: cannot clk_get opt_clk %s\n", | 476 | pr_warning("omap_hwmod: %s: cannot clk_get opt_clk %s\n", |
475 | oh->name, oc->clk); | 477 | oh->name, oc->clk); |
476 | ret = -EINVAL; | 478 | ret = -EINVAL; |
479 | } | ||
477 | oc->_clk = c; | 480 | oc->_clk = c; |
478 | } | 481 | } |
479 | 482 | ||
diff --git a/arch/arm/mach-omap2/pm34xx.c b/arch/arm/mach-omap2/pm34xx.c index 2e967716cc3f..b88737fd6cfe 100644 --- a/arch/arm/mach-omap2/pm34xx.c +++ b/arch/arm/mach-omap2/pm34xx.c | |||
@@ -99,7 +99,7 @@ static void omap3_enable_io_chain(void) | |||
99 | /* Do a readback to assure write has been done */ | 99 | /* Do a readback to assure write has been done */ |
100 | prm_read_mod_reg(WKUP_MOD, PM_WKEN); | 100 | prm_read_mod_reg(WKUP_MOD, PM_WKEN); |
101 | 101 | ||
102 | while (!(prm_read_mod_reg(WKUP_MOD, PM_WKST) & | 102 | while (!(prm_read_mod_reg(WKUP_MOD, PM_WKEN) & |
103 | OMAP3430_ST_IO_CHAIN_MASK)) { | 103 | OMAP3430_ST_IO_CHAIN_MASK)) { |
104 | timeout++; | 104 | timeout++; |
105 | if (timeout > 1000) { | 105 | if (timeout > 1000) { |
@@ -108,7 +108,7 @@ static void omap3_enable_io_chain(void) | |||
108 | return; | 108 | return; |
109 | } | 109 | } |
110 | prm_set_mod_reg_bits(OMAP3430_ST_IO_CHAIN_MASK, | 110 | prm_set_mod_reg_bits(OMAP3430_ST_IO_CHAIN_MASK, |
111 | WKUP_MOD, PM_WKST); | 111 | WKUP_MOD, PM_WKEN); |
112 | } | 112 | } |
113 | } | 113 | } |
114 | } | 114 | } |
diff --git a/arch/arm/mach-omap2/usb-ehci.c b/arch/arm/mach-omap2/usb-ehci.c index c68f799e83c5..d72d1ac30333 100644 --- a/arch/arm/mach-omap2/usb-ehci.c +++ b/arch/arm/mach-omap2/usb-ehci.c | |||
@@ -20,6 +20,8 @@ | |||
20 | #include <linux/delay.h> | 20 | #include <linux/delay.h> |
21 | #include <linux/platform_device.h> | 21 | #include <linux/platform_device.h> |
22 | #include <linux/clk.h> | 22 | #include <linux/clk.h> |
23 | #include <linux/dma-mapping.h> | ||
24 | |||
23 | #include <asm/io.h> | 25 | #include <asm/io.h> |
24 | #include <plat/mux.h> | 26 | #include <plat/mux.h> |
25 | 27 | ||
diff --git a/arch/arm/mach-pxa/mioa701.c b/arch/arm/mach-pxa/mioa701.c index d60db87dde08..fa6a708b4099 100644 --- a/arch/arm/mach-pxa/mioa701.c +++ b/arch/arm/mach-pxa/mioa701.c | |||
@@ -697,7 +697,7 @@ static struct i2c_board_info __initdata mioa701_pi2c_devices[] = { | |||
697 | }; | 697 | }; |
698 | 698 | ||
699 | /* Board I2C devices. */ | 699 | /* Board I2C devices. */ |
700 | static struct i2c_board_info __initdata mioa701_i2c_devices[] = { | 700 | static struct i2c_board_info mioa701_i2c_devices[] = { |
701 | { | 701 | { |
702 | I2C_BOARD_INFO("mt9m111", 0x5d), | 702 | I2C_BOARD_INFO("mt9m111", 0x5d), |
703 | }, | 703 | }, |
diff --git a/arch/arm/mach-pxa/z2.c b/arch/arm/mach-pxa/z2.c index f5d1ae3db3a4..d303c6929d32 100644 --- a/arch/arm/mach-pxa/z2.c +++ b/arch/arm/mach-pxa/z2.c | |||
@@ -3,8 +3,9 @@ | |||
3 | * | 3 | * |
4 | * Support for the Zipit Z2 Handheld device. | 4 | * Support for the Zipit Z2 Handheld device. |
5 | * | 5 | * |
6 | * Author: Ken McGuire | 6 | * Copyright (C) 2009-2010 Marek Vasut <marek.vasut@gmail.com> |
7 | * Created: Jan 25, 2009 | 7 | * |
8 | * Based on research and code by: Ken McGuire | ||
8 | * Based on mainstone.c as modified for the Zipit Z2. | 9 | * Based on mainstone.c as modified for the Zipit Z2. |
9 | * | 10 | * |
10 | * This program is free software; you can redistribute it and/or modify | 11 | * This program is free software; you can redistribute it and/or modify |
@@ -157,21 +158,14 @@ static struct mtd_partition z2_flash_parts[] = { | |||
157 | { | 158 | { |
158 | .name = "U-Boot Bootloader", | 159 | .name = "U-Boot Bootloader", |
159 | .offset = 0x0, | 160 | .offset = 0x0, |
160 | .size = 0x20000, | 161 | .size = 0x40000, |
161 | }, | 162 | }, { |
162 | { | ||
163 | .name = "Linux Kernel", | ||
164 | .offset = 0x20000, | ||
165 | .size = 0x220000, | ||
166 | }, | ||
167 | { | ||
168 | .name = "Filesystem", | ||
169 | .offset = 0x240000, | ||
170 | .size = 0x5b0000, | ||
171 | }, | ||
172 | { | ||
173 | .name = "U-Boot Environment", | 163 | .name = "U-Boot Environment", |
174 | .offset = 0x7f0000, | 164 | .offset = 0x40000, |
165 | .size = 0x60000, | ||
166 | }, { | ||
167 | .name = "Flash", | ||
168 | .offset = 0x60000, | ||
175 | .size = MTDPART_SIZ_FULL, | 169 | .size = MTDPART_SIZ_FULL, |
176 | }, | 170 | }, |
177 | }; | 171 | }; |
diff --git a/arch/arm/mach-realview/Kconfig b/arch/arm/mach-realview/Kconfig index ee5e392430e8..b4575ae9648e 100644 --- a/arch/arm/mach-realview/Kconfig +++ b/arch/arm/mach-realview/Kconfig | |||
@@ -18,6 +18,7 @@ config REALVIEW_EB_ARM11MP | |||
18 | bool "Support ARM11MPCore tile" | 18 | bool "Support ARM11MPCore tile" |
19 | depends on MACH_REALVIEW_EB | 19 | depends on MACH_REALVIEW_EB |
20 | select CPU_V6 | 20 | select CPU_V6 |
21 | select ARCH_HAS_BARRIERS if SMP | ||
21 | help | 22 | help |
22 | Enable support for the ARM11MPCore tile on the Realview platform. | 23 | Enable support for the ARM11MPCore tile on the Realview platform. |
23 | 24 | ||
@@ -35,6 +36,7 @@ config MACH_REALVIEW_PB11MP | |||
35 | select CPU_V6 | 36 | select CPU_V6 |
36 | select ARM_GIC | 37 | select ARM_GIC |
37 | select HAVE_PATA_PLATFORM | 38 | select HAVE_PATA_PLATFORM |
39 | select ARCH_HAS_BARRIERS if SMP | ||
38 | help | 40 | help |
39 | Include support for the ARM(R) RealView MPCore Platform Baseboard. | 41 | Include support for the ARM(R) RealView MPCore Platform Baseboard. |
40 | PB11MPCore is a platform with an on-board ARM11MPCore and has | 42 | PB11MPCore is a platform with an on-board ARM11MPCore and has |
diff --git a/arch/arm/mach-realview/include/mach/barriers.h b/arch/arm/mach-realview/include/mach/barriers.h new file mode 100644 index 000000000000..0c5d749d7b5f --- /dev/null +++ b/arch/arm/mach-realview/include/mach/barriers.h | |||
@@ -0,0 +1,8 @@ | |||
1 | /* | ||
2 | * Barriers redefined for RealView ARM11MPCore platforms with L220 cache | ||
3 | * controller to work around hardware errata causing the outer_sync() | ||
4 | * operation to deadlock the system. | ||
5 | */ | ||
6 | #define mb() dsb() | ||
7 | #define rmb() dmb() | ||
8 | #define wmb() mb() | ||
diff --git a/arch/arm/mach-realview/realview_eb.c b/arch/arm/mach-realview/realview_eb.c index 422ccd70d5f5..4425018fab82 100644 --- a/arch/arm/mach-realview/realview_eb.c +++ b/arch/arm/mach-realview/realview_eb.c | |||
@@ -32,6 +32,7 @@ | |||
32 | #include <asm/leds.h> | 32 | #include <asm/leds.h> |
33 | #include <asm/mach-types.h> | 33 | #include <asm/mach-types.h> |
34 | #include <asm/pmu.h> | 34 | #include <asm/pmu.h> |
35 | #include <asm/pgtable.h> | ||
35 | #include <asm/hardware/gic.h> | 36 | #include <asm/hardware/gic.h> |
36 | #include <asm/hardware/cache-l2x0.h> | 37 | #include <asm/hardware/cache-l2x0.h> |
37 | #include <asm/localtimer.h> | 38 | #include <asm/localtimer.h> |
@@ -457,7 +458,7 @@ static void __init realview_eb_init(void) | |||
457 | 458 | ||
458 | MACHINE_START(REALVIEW_EB, "ARM-RealView EB") | 459 | MACHINE_START(REALVIEW_EB, "ARM-RealView EB") |
459 | /* Maintainer: ARM Ltd/Deep Blue Solutions Ltd */ | 460 | /* Maintainer: ARM Ltd/Deep Blue Solutions Ltd */ |
460 | .phys_io = REALVIEW_EB_UART0_BASE, | 461 | .phys_io = REALVIEW_EB_UART0_BASE & SECTION_MASK, |
461 | .io_pg_offst = (IO_ADDRESS(REALVIEW_EB_UART0_BASE) >> 18) & 0xfffc, | 462 | .io_pg_offst = (IO_ADDRESS(REALVIEW_EB_UART0_BASE) >> 18) & 0xfffc, |
462 | .boot_params = PHYS_OFFSET + 0x00000100, | 463 | .boot_params = PHYS_OFFSET + 0x00000100, |
463 | .fixup = realview_fixup, | 464 | .fixup = realview_fixup, |
diff --git a/arch/arm/mach-realview/realview_pb1176.c b/arch/arm/mach-realview/realview_pb1176.c index 96568ebfa2bb..099a1f125cf8 100644 --- a/arch/arm/mach-realview/realview_pb1176.c +++ b/arch/arm/mach-realview/realview_pb1176.c | |||
@@ -32,6 +32,7 @@ | |||
32 | #include <asm/leds.h> | 32 | #include <asm/leds.h> |
33 | #include <asm/mach-types.h> | 33 | #include <asm/mach-types.h> |
34 | #include <asm/pmu.h> | 34 | #include <asm/pmu.h> |
35 | #include <asm/pgtable.h> | ||
35 | #include <asm/hardware/gic.h> | 36 | #include <asm/hardware/gic.h> |
36 | #include <asm/hardware/cache-l2x0.h> | 37 | #include <asm/hardware/cache-l2x0.h> |
37 | 38 | ||
@@ -351,7 +352,7 @@ static void __init realview_pb1176_init(void) | |||
351 | 352 | ||
352 | MACHINE_START(REALVIEW_PB1176, "ARM-RealView PB1176") | 353 | MACHINE_START(REALVIEW_PB1176, "ARM-RealView PB1176") |
353 | /* Maintainer: ARM Ltd/Deep Blue Solutions Ltd */ | 354 | /* Maintainer: ARM Ltd/Deep Blue Solutions Ltd */ |
354 | .phys_io = REALVIEW_PB1176_UART0_BASE, | 355 | .phys_io = REALVIEW_PB1176_UART0_BASE & SECTION_MASK, |
355 | .io_pg_offst = (IO_ADDRESS(REALVIEW_PB1176_UART0_BASE) >> 18) & 0xfffc, | 356 | .io_pg_offst = (IO_ADDRESS(REALVIEW_PB1176_UART0_BASE) >> 18) & 0xfffc, |
356 | .boot_params = PHYS_OFFSET + 0x00000100, | 357 | .boot_params = PHYS_OFFSET + 0x00000100, |
357 | .fixup = realview_pb1176_fixup, | 358 | .fixup = realview_pb1176_fixup, |
diff --git a/arch/arm/mach-realview/realview_pb11mp.c b/arch/arm/mach-realview/realview_pb11mp.c index 7fbefbbebaf0..0e07a5ccb75f 100644 --- a/arch/arm/mach-realview/realview_pb11mp.c +++ b/arch/arm/mach-realview/realview_pb11mp.c | |||
@@ -32,6 +32,7 @@ | |||
32 | #include <asm/leds.h> | 32 | #include <asm/leds.h> |
33 | #include <asm/mach-types.h> | 33 | #include <asm/mach-types.h> |
34 | #include <asm/pmu.h> | 34 | #include <asm/pmu.h> |
35 | #include <asm/pgtable.h> | ||
35 | #include <asm/hardware/gic.h> | 36 | #include <asm/hardware/gic.h> |
36 | #include <asm/hardware/cache-l2x0.h> | 37 | #include <asm/hardware/cache-l2x0.h> |
37 | #include <asm/localtimer.h> | 38 | #include <asm/localtimer.h> |
@@ -373,7 +374,7 @@ static void __init realview_pb11mp_init(void) | |||
373 | 374 | ||
374 | MACHINE_START(REALVIEW_PB11MP, "ARM-RealView PB11MPCore") | 375 | MACHINE_START(REALVIEW_PB11MP, "ARM-RealView PB11MPCore") |
375 | /* Maintainer: ARM Ltd/Deep Blue Solutions Ltd */ | 376 | /* Maintainer: ARM Ltd/Deep Blue Solutions Ltd */ |
376 | .phys_io = REALVIEW_PB11MP_UART0_BASE, | 377 | .phys_io = REALVIEW_PB11MP_UART0_BASE & SECTION_MASK, |
377 | .io_pg_offst = (IO_ADDRESS(REALVIEW_PB11MP_UART0_BASE) >> 18) & 0xfffc, | 378 | .io_pg_offst = (IO_ADDRESS(REALVIEW_PB11MP_UART0_BASE) >> 18) & 0xfffc, |
378 | .boot_params = PHYS_OFFSET + 0x00000100, | 379 | .boot_params = PHYS_OFFSET + 0x00000100, |
379 | .fixup = realview_fixup, | 380 | .fixup = realview_fixup, |
diff --git a/arch/arm/mach-realview/realview_pba8.c b/arch/arm/mach-realview/realview_pba8.c index d3c113b3dfce..ac2f06f1ca50 100644 --- a/arch/arm/mach-realview/realview_pba8.c +++ b/arch/arm/mach-realview/realview_pba8.c | |||
@@ -31,6 +31,7 @@ | |||
31 | #include <asm/leds.h> | 31 | #include <asm/leds.h> |
32 | #include <asm/mach-types.h> | 32 | #include <asm/mach-types.h> |
33 | #include <asm/pmu.h> | 33 | #include <asm/pmu.h> |
34 | #include <asm/pgtable.h> | ||
34 | #include <asm/hardware/gic.h> | 35 | #include <asm/hardware/gic.h> |
35 | 36 | ||
36 | #include <asm/mach/arch.h> | 37 | #include <asm/mach/arch.h> |
@@ -323,7 +324,7 @@ static void __init realview_pba8_init(void) | |||
323 | 324 | ||
324 | MACHINE_START(REALVIEW_PBA8, "ARM-RealView PB-A8") | 325 | MACHINE_START(REALVIEW_PBA8, "ARM-RealView PB-A8") |
325 | /* Maintainer: ARM Ltd/Deep Blue Solutions Ltd */ | 326 | /* Maintainer: ARM Ltd/Deep Blue Solutions Ltd */ |
326 | .phys_io = REALVIEW_PBA8_UART0_BASE, | 327 | .phys_io = REALVIEW_PBA8_UART0_BASE & SECTION_MASK, |
327 | .io_pg_offst = (IO_ADDRESS(REALVIEW_PBA8_UART0_BASE) >> 18) & 0xfffc, | 328 | .io_pg_offst = (IO_ADDRESS(REALVIEW_PBA8_UART0_BASE) >> 18) & 0xfffc, |
328 | .boot_params = PHYS_OFFSET + 0x00000100, | 329 | .boot_params = PHYS_OFFSET + 0x00000100, |
329 | .fixup = realview_fixup, | 330 | .fixup = realview_fixup, |
diff --git a/arch/arm/mach-realview/realview_pbx.c b/arch/arm/mach-realview/realview_pbx.c index a235ba30996b..08fd683adc4c 100644 --- a/arch/arm/mach-realview/realview_pbx.c +++ b/arch/arm/mach-realview/realview_pbx.c | |||
@@ -31,6 +31,7 @@ | |||
31 | #include <asm/mach-types.h> | 31 | #include <asm/mach-types.h> |
32 | #include <asm/pmu.h> | 32 | #include <asm/pmu.h> |
33 | #include <asm/smp_twd.h> | 33 | #include <asm/smp_twd.h> |
34 | #include <asm/pgtable.h> | ||
34 | #include <asm/hardware/gic.h> | 35 | #include <asm/hardware/gic.h> |
35 | #include <asm/hardware/cache-l2x0.h> | 36 | #include <asm/hardware/cache-l2x0.h> |
36 | 37 | ||
@@ -409,7 +410,7 @@ static void __init realview_pbx_init(void) | |||
409 | 410 | ||
410 | MACHINE_START(REALVIEW_PBX, "ARM-RealView PBX") | 411 | MACHINE_START(REALVIEW_PBX, "ARM-RealView PBX") |
411 | /* Maintainer: ARM Ltd/Deep Blue Solutions Ltd */ | 412 | /* Maintainer: ARM Ltd/Deep Blue Solutions Ltd */ |
412 | .phys_io = REALVIEW_PBX_UART0_BASE, | 413 | .phys_io = REALVIEW_PBX_UART0_BASE & SECTION_MASK, |
413 | .io_pg_offst = (IO_ADDRESS(REALVIEW_PBX_UART0_BASE) >> 18) & 0xfffc, | 414 | .io_pg_offst = (IO_ADDRESS(REALVIEW_PBX_UART0_BASE) >> 18) & 0xfffc, |
414 | .boot_params = PHYS_OFFSET + 0x00000100, | 415 | .boot_params = PHYS_OFFSET + 0x00000100, |
415 | .fixup = realview_pbx_fixup, | 416 | .fixup = realview_pbx_fixup, |
diff --git a/arch/arm/mach-vexpress/ct-ca9x4.c b/arch/arm/mach-vexpress/ct-ca9x4.c index 9b11eedba65f..6353459bb567 100644 --- a/arch/arm/mach-vexpress/ct-ca9x4.c +++ b/arch/arm/mach-vexpress/ct-ca9x4.c | |||
@@ -10,6 +10,7 @@ | |||
10 | #include <linux/amba/clcd.h> | 10 | #include <linux/amba/clcd.h> |
11 | 11 | ||
12 | #include <asm/clkdev.h> | 12 | #include <asm/clkdev.h> |
13 | #include <asm/pgtable.h> | ||
13 | #include <asm/hardware/arm_timer.h> | 14 | #include <asm/hardware/arm_timer.h> |
14 | #include <asm/hardware/cache-l2x0.h> | 15 | #include <asm/hardware/cache-l2x0.h> |
15 | #include <asm/hardware/gic.h> | 16 | #include <asm/hardware/gic.h> |
@@ -236,7 +237,7 @@ static void ct_ca9x4_init(void) | |||
236 | } | 237 | } |
237 | 238 | ||
238 | MACHINE_START(VEXPRESS, "ARM-Versatile Express CA9x4") | 239 | MACHINE_START(VEXPRESS, "ARM-Versatile Express CA9x4") |
239 | .phys_io = V2M_UART0, | 240 | .phys_io = V2M_UART0 & SECTION_MASK, |
240 | .io_pg_offst = (__MMIO_P2V(V2M_UART0) >> 18) & 0xfffc, | 241 | .io_pg_offst = (__MMIO_P2V(V2M_UART0) >> 18) & 0xfffc, |
241 | .boot_params = PHYS_OFFSET + 0x00000100, | 242 | .boot_params = PHYS_OFFSET + 0x00000100, |
242 | .map_io = ct_ca9x4_map_io, | 243 | .map_io = ct_ca9x4_map_io, |
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig index 346ae14824a5..101105e52610 100644 --- a/arch/arm/mm/Kconfig +++ b/arch/arm/mm/Kconfig | |||
@@ -735,6 +735,25 @@ config NEEDS_SYSCALL_FOR_CMPXCHG | |||
735 | Forget about fast user space cmpxchg support. | 735 | Forget about fast user space cmpxchg support. |
736 | It is just not possible. | 736 | It is just not possible. |
737 | 737 | ||
738 | config DMA_CACHE_RWFO | ||
739 | bool "Enable read/write for ownership DMA cache maintenance" | ||
740 | depends on CPU_V6 && SMP | ||
741 | default y | ||
742 | help | ||
743 | The Snoop Control Unit on ARM11MPCore does not detect the | ||
744 | cache maintenance operations and the dma_{map,unmap}_area() | ||
745 | functions may leave stale cache entries on other CPUs. By | ||
746 | enabling this option, Read or Write For Ownership in the ARMv6 | ||
747 | DMA cache maintenance functions is performed. These LDR/STR | ||
748 | instructions change the cache line state to shared or modified | ||
749 | so that the cache operation has the desired effect. | ||
750 | |||
751 | Note that the workaround is only valid on processors that do | ||
752 | not perform speculative loads into the D-cache. For such | ||
753 | processors, if cache maintenance operations are not broadcast | ||
754 | in hardware, other workarounds are needed (e.g. cache | ||
755 | maintenance broadcasting in software via FIQ). | ||
756 | |||
738 | config OUTER_CACHE | 757 | config OUTER_CACHE |
739 | bool | 758 | bool |
740 | 759 | ||
@@ -794,6 +813,8 @@ config ARM_L1_CACHE_SHIFT | |||
794 | 813 | ||
795 | config ARM_DMA_MEM_BUFFERABLE | 814 | config ARM_DMA_MEM_BUFFERABLE |
796 | bool "Use non-cacheable memory for DMA" if CPU_V6 && !CPU_V7 | 815 | bool "Use non-cacheable memory for DMA" if CPU_V6 && !CPU_V7 |
816 | depends on !(MACH_REALVIEW_PB1176 || REALVIEW_EB_ARM11MP || \ | ||
817 | MACH_REALVIEW_PB11MP) | ||
797 | default y if CPU_V6 || CPU_V7 | 818 | default y if CPU_V6 || CPU_V7 |
798 | help | 819 | help |
799 | Historically, the kernel has used strongly ordered mappings to | 820 | Historically, the kernel has used strongly ordered mappings to |
diff --git a/arch/arm/mm/cache-v6.S b/arch/arm/mm/cache-v6.S index e46ecd847138..86aa689ef1aa 100644 --- a/arch/arm/mm/cache-v6.S +++ b/arch/arm/mm/cache-v6.S | |||
@@ -211,8 +211,9 @@ v6_dma_inv_range: | |||
211 | mcrne p15, 0, r1, c7, c15, 1 @ clean & invalidate unified line | 211 | mcrne p15, 0, r1, c7, c15, 1 @ clean & invalidate unified line |
212 | #endif | 212 | #endif |
213 | 1: | 213 | 1: |
214 | #ifdef CONFIG_SMP | 214 | #ifdef CONFIG_DMA_CACHE_RWFO |
215 | str r0, [r0] @ write for ownership | 215 | ldr r2, [r0] @ read for ownership |
216 | str r2, [r0] @ write for ownership | ||
216 | #endif | 217 | #endif |
217 | #ifdef HARVARD_CACHE | 218 | #ifdef HARVARD_CACHE |
218 | mcr p15, 0, r0, c7, c6, 1 @ invalidate D line | 219 | mcr p15, 0, r0, c7, c6, 1 @ invalidate D line |
@@ -234,7 +235,7 @@ v6_dma_inv_range: | |||
234 | v6_dma_clean_range: | 235 | v6_dma_clean_range: |
235 | bic r0, r0, #D_CACHE_LINE_SIZE - 1 | 236 | bic r0, r0, #D_CACHE_LINE_SIZE - 1 |
236 | 1: | 237 | 1: |
237 | #ifdef CONFIG_SMP | 238 | #ifdef CONFIG_DMA_CACHE_RWFO |
238 | ldr r2, [r0] @ read for ownership | 239 | ldr r2, [r0] @ read for ownership |
239 | #endif | 240 | #endif |
240 | #ifdef HARVARD_CACHE | 241 | #ifdef HARVARD_CACHE |
@@ -257,7 +258,7 @@ v6_dma_clean_range: | |||
257 | ENTRY(v6_dma_flush_range) | 258 | ENTRY(v6_dma_flush_range) |
258 | bic r0, r0, #D_CACHE_LINE_SIZE - 1 | 259 | bic r0, r0, #D_CACHE_LINE_SIZE - 1 |
259 | 1: | 260 | 1: |
260 | #ifdef CONFIG_SMP | 261 | #ifdef CONFIG_DMA_CACHE_RWFO |
261 | ldr r2, [r0] @ read for ownership | 262 | ldr r2, [r0] @ read for ownership |
262 | str r2, [r0] @ write for ownership | 263 | str r2, [r0] @ write for ownership |
263 | #endif | 264 | #endif |
@@ -283,9 +284,13 @@ ENTRY(v6_dma_map_area) | |||
283 | add r1, r1, r0 | 284 | add r1, r1, r0 |
284 | teq r2, #DMA_FROM_DEVICE | 285 | teq r2, #DMA_FROM_DEVICE |
285 | beq v6_dma_inv_range | 286 | beq v6_dma_inv_range |
287 | #ifndef CONFIG_DMA_CACHE_RWFO | ||
288 | b v6_dma_clean_range | ||
289 | #else | ||
286 | teq r2, #DMA_TO_DEVICE | 290 | teq r2, #DMA_TO_DEVICE |
287 | beq v6_dma_clean_range | 291 | beq v6_dma_clean_range |
288 | b v6_dma_flush_range | 292 | b v6_dma_flush_range |
293 | #endif | ||
289 | ENDPROC(v6_dma_map_area) | 294 | ENDPROC(v6_dma_map_area) |
290 | 295 | ||
291 | /* | 296 | /* |
@@ -295,6 +300,11 @@ ENDPROC(v6_dma_map_area) | |||
295 | * - dir - DMA direction | 300 | * - dir - DMA direction |
296 | */ | 301 | */ |
297 | ENTRY(v6_dma_unmap_area) | 302 | ENTRY(v6_dma_unmap_area) |
303 | #ifndef CONFIG_DMA_CACHE_RWFO | ||
304 | add r1, r1, r0 | ||
305 | teq r2, #DMA_TO_DEVICE | ||
306 | bne v6_dma_inv_range | ||
307 | #endif | ||
298 | mov pc, lr | 308 | mov pc, lr |
299 | ENDPROC(v6_dma_unmap_area) | 309 | ENDPROC(v6_dma_unmap_area) |
300 | 310 | ||
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index 13fa536d82e6..9e7742f0a102 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c | |||
@@ -24,15 +24,6 @@ | |||
24 | #include <asm/tlbflush.h> | 24 | #include <asm/tlbflush.h> |
25 | #include <asm/sizes.h> | 25 | #include <asm/sizes.h> |
26 | 26 | ||
27 | /* Sanity check size */ | ||
28 | #if (CONSISTENT_DMA_SIZE % SZ_2M) | ||
29 | #error "CONSISTENT_DMA_SIZE must be multiple of 2MiB" | ||
30 | #endif | ||
31 | |||
32 | #define CONSISTENT_OFFSET(x) (((unsigned long)(x) - CONSISTENT_BASE) >> PAGE_SHIFT) | ||
33 | #define CONSISTENT_PTE_INDEX(x) (((unsigned long)(x) - CONSISTENT_BASE) >> PGDIR_SHIFT) | ||
34 | #define NUM_CONSISTENT_PTES (CONSISTENT_DMA_SIZE >> PGDIR_SHIFT) | ||
35 | |||
36 | static u64 get_coherent_dma_mask(struct device *dev) | 27 | static u64 get_coherent_dma_mask(struct device *dev) |
37 | { | 28 | { |
38 | u64 mask = ISA_DMA_THRESHOLD; | 29 | u64 mask = ISA_DMA_THRESHOLD; |
@@ -123,6 +114,15 @@ static void __dma_free_buffer(struct page *page, size_t size) | |||
123 | } | 114 | } |
124 | 115 | ||
125 | #ifdef CONFIG_MMU | 116 | #ifdef CONFIG_MMU |
117 | /* Sanity check size */ | ||
118 | #if (CONSISTENT_DMA_SIZE % SZ_2M) | ||
119 | #error "CONSISTENT_DMA_SIZE must be multiple of 2MiB" | ||
120 | #endif | ||
121 | |||
122 | #define CONSISTENT_OFFSET(x) (((unsigned long)(x) - CONSISTENT_BASE) >> PAGE_SHIFT) | ||
123 | #define CONSISTENT_PTE_INDEX(x) (((unsigned long)(x) - CONSISTENT_BASE) >> PGDIR_SHIFT) | ||
124 | #define NUM_CONSISTENT_PTES (CONSISTENT_DMA_SIZE >> PGDIR_SHIFT) | ||
125 | |||
126 | /* | 126 | /* |
127 | * These are the page tables (2MB each) covering uncached, DMA consistent allocations | 127 | * These are the page tables (2MB each) covering uncached, DMA consistent allocations |
128 | */ | 128 | */ |
diff --git a/arch/arm/plat-omap/dmtimer.c b/arch/arm/plat-omap/dmtimer.c index c64875f11fac..44bafdab2dce 100644 --- a/arch/arm/plat-omap/dmtimer.c +++ b/arch/arm/plat-omap/dmtimer.c | |||
@@ -541,11 +541,11 @@ void omap_dm_timer_stop(struct omap_dm_timer *timer) | |||
541 | * timer is stopped | 541 | * timer is stopped |
542 | */ | 542 | */ |
543 | udelay(3500000 / clk_get_rate(timer->fclk) + 1); | 543 | udelay(3500000 / clk_get_rate(timer->fclk) + 1); |
544 | /* Ack possibly pending interrupt */ | ||
545 | omap_dm_timer_write_reg(timer, OMAP_TIMER_STAT_REG, | ||
546 | OMAP_TIMER_INT_OVERFLOW); | ||
547 | #endif | 544 | #endif |
548 | } | 545 | } |
546 | /* Ack possibly pending interrupt */ | ||
547 | omap_dm_timer_write_reg(timer, OMAP_TIMER_STAT_REG, | ||
548 | OMAP_TIMER_INT_OVERFLOW); | ||
549 | } | 549 | } |
550 | EXPORT_SYMBOL_GPL(omap_dm_timer_stop); | 550 | EXPORT_SYMBOL_GPL(omap_dm_timer_stop); |
551 | 551 | ||
diff --git a/arch/arm/plat-omap/gpio.c b/arch/arm/plat-omap/gpio.c index 393e9219a5b6..9b7e3545f325 100644 --- a/arch/arm/plat-omap/gpio.c +++ b/arch/arm/plat-omap/gpio.c | |||
@@ -673,6 +673,7 @@ static void _set_gpio_debounce(struct gpio_bank *bank, unsigned gpio, | |||
673 | if (cpu_is_omap34xx() || cpu_is_omap44xx()) | 673 | if (cpu_is_omap34xx() || cpu_is_omap44xx()) |
674 | clk_disable(bank->dbck); | 674 | clk_disable(bank->dbck); |
675 | } | 675 | } |
676 | bank->dbck_enable_mask = val; | ||
676 | 677 | ||
677 | __raw_writel(val, reg); | 678 | __raw_writel(val, reg); |
678 | } | 679 | } |
diff --git a/arch/arm/plat-omap/iovmm.c b/arch/arm/plat-omap/iovmm.c index e43983ba59c5..8ce0de247c71 100644 --- a/arch/arm/plat-omap/iovmm.c +++ b/arch/arm/plat-omap/iovmm.c | |||
@@ -140,8 +140,10 @@ static struct sg_table *sgtable_alloc(const size_t bytes, u32 flags) | |||
140 | return ERR_PTR(-ENOMEM); | 140 | return ERR_PTR(-ENOMEM); |
141 | 141 | ||
142 | err = sg_alloc_table(sgt, nr_entries, GFP_KERNEL); | 142 | err = sg_alloc_table(sgt, nr_entries, GFP_KERNEL); |
143 | if (err) | 143 | if (err) { |
144 | kfree(sgt); | ||
144 | return ERR_PTR(err); | 145 | return ERR_PTR(err); |
146 | } | ||
145 | 147 | ||
146 | pr_debug("%s: sgt:%p(%d entries)\n", __func__, sgt, nr_entries); | 148 | pr_debug("%s: sgt:%p(%d entries)\n", __func__, sgt, nr_entries); |
147 | 149 | ||
diff --git a/arch/arm/plat-pxa/Makefile b/arch/arm/plat-pxa/Makefile index 6187edfbcb77..a17cc0c6a6b0 100644 --- a/arch/arm/plat-pxa/Makefile +++ b/arch/arm/plat-pxa/Makefile | |||
@@ -2,8 +2,9 @@ | |||
2 | # Makefile for code common across different PXA processor families | 2 | # Makefile for code common across different PXA processor families |
3 | # | 3 | # |
4 | 4 | ||
5 | obj-y := dma.o pmu.o | 5 | obj-y := dma.o |
6 | 6 | ||
7 | obj-$(CONFIG_ARCH_PXA) += pmu.o | ||
7 | obj-$(CONFIG_GENERIC_GPIO) += gpio.o | 8 | obj-$(CONFIG_GENERIC_GPIO) += gpio.o |
8 | obj-$(CONFIG_PXA3xx) += mfp.o | 9 | obj-$(CONFIG_PXA3xx) += mfp.o |
9 | obj-$(CONFIG_ARCH_MMP) += mfp.o | 10 | obj-$(CONFIG_ARCH_MMP) += mfp.o |
diff --git a/arch/ia64/mm/tlb.c b/arch/ia64/mm/tlb.c index 5dfd916e9ea6..7b3cdc6c6d91 100644 --- a/arch/ia64/mm/tlb.c +++ b/arch/ia64/mm/tlb.c | |||
@@ -121,7 +121,7 @@ static inline void down_spin(struct spinaphore *ss) | |||
121 | ia64_invala(); | 121 | ia64_invala(); |
122 | 122 | ||
123 | for (;;) { | 123 | for (;;) { |
124 | asm volatile ("ld4.c.nc %0=[%1]" : "=r"(serve) : "r"(&ss->serve) : "memory"); | 124 | asm volatile ("ld8.c.nc %0=[%1]" : "=r"(serve) : "r"(&ss->serve) : "memory"); |
125 | if (time_before(t, serve)) | 125 | if (time_before(t, serve)) |
126 | return; | 126 | return; |
127 | cpu_relax(); | 127 | cpu_relax(); |
diff --git a/arch/um/os-Linux/mem.c b/arch/um/os-Linux/mem.c index 93a11d7edfa0..e696144d2be3 100644 --- a/arch/um/os-Linux/mem.c +++ b/arch/um/os-Linux/mem.c | |||
@@ -10,6 +10,7 @@ | |||
10 | #include <errno.h> | 10 | #include <errno.h> |
11 | #include <fcntl.h> | 11 | #include <fcntl.h> |
12 | #include <string.h> | 12 | #include <string.h> |
13 | #include <sys/stat.h> | ||
13 | #include <sys/mman.h> | 14 | #include <sys/mman.h> |
14 | #include <sys/param.h> | 15 | #include <sys/param.h> |
15 | #include "init.h" | 16 | #include "init.h" |
diff --git a/arch/x86/kernel/cpu/perf_event_amd.c b/arch/x86/kernel/cpu/perf_event_amd.c index 611df11ba15e..c2897b7b4a3b 100644 --- a/arch/x86/kernel/cpu/perf_event_amd.c +++ b/arch/x86/kernel/cpu/perf_event_amd.c | |||
@@ -102,8 +102,8 @@ static const u64 amd_perfmon_event_map[] = | |||
102 | [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0, | 102 | [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0, |
103 | [PERF_COUNT_HW_CACHE_REFERENCES] = 0x0080, | 103 | [PERF_COUNT_HW_CACHE_REFERENCES] = 0x0080, |
104 | [PERF_COUNT_HW_CACHE_MISSES] = 0x0081, | 104 | [PERF_COUNT_HW_CACHE_MISSES] = 0x0081, |
105 | [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c4, | 105 | [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c2, |
106 | [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c5, | 106 | [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c3, |
107 | }; | 107 | }; |
108 | 108 | ||
109 | static u64 amd_pmu_event_map(int hw_event) | 109 | static u64 amd_pmu_event_map(int hw_event) |
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c index 142d70c74b02..725ef4d17cd5 100644 --- a/arch/x86/kernel/traps.c +++ b/arch/x86/kernel/traps.c | |||
@@ -526,6 +526,7 @@ asmlinkage __kprobes struct pt_regs *sync_regs(struct pt_regs *eregs) | |||
526 | dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code) | 526 | dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code) |
527 | { | 527 | { |
528 | struct task_struct *tsk = current; | 528 | struct task_struct *tsk = current; |
529 | int user_icebp = 0; | ||
529 | unsigned long dr6; | 530 | unsigned long dr6; |
530 | int si_code; | 531 | int si_code; |
531 | 532 | ||
@@ -534,6 +535,14 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code) | |||
534 | /* Filter out all the reserved bits which are preset to 1 */ | 535 | /* Filter out all the reserved bits which are preset to 1 */ |
535 | dr6 &= ~DR6_RESERVED; | 536 | dr6 &= ~DR6_RESERVED; |
536 | 537 | ||
538 | /* | ||
539 | * If dr6 has no reason to give us about the origin of this trap, | ||
540 | * then it's very likely the result of an icebp/int01 trap. | ||
541 | * User wants a sigtrap for that. | ||
542 | */ | ||
543 | if (!dr6 && user_mode(regs)) | ||
544 | user_icebp = 1; | ||
545 | |||
537 | /* Catch kmemcheck conditions first of all! */ | 546 | /* Catch kmemcheck conditions first of all! */ |
538 | if ((dr6 & DR_STEP) && kmemcheck_trap(regs)) | 547 | if ((dr6 & DR_STEP) && kmemcheck_trap(regs)) |
539 | return; | 548 | return; |
@@ -575,7 +584,7 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code) | |||
575 | regs->flags &= ~X86_EFLAGS_TF; | 584 | regs->flags &= ~X86_EFLAGS_TF; |
576 | } | 585 | } |
577 | si_code = get_si_code(tsk->thread.debugreg6); | 586 | si_code = get_si_code(tsk->thread.debugreg6); |
578 | if (tsk->thread.debugreg6 & (DR_STEP | DR_TRAP_BITS)) | 587 | if (tsk->thread.debugreg6 & (DR_STEP | DR_TRAP_BITS) || user_icebp) |
579 | send_sigtrap(tsk, regs, error_code, si_code); | 588 | send_sigtrap(tsk, regs, error_code, si_code); |
580 | preempt_conditional_cli(regs); | 589 | preempt_conditional_cli(regs); |
581 | 590 | ||
diff --git a/block/blk-core.c b/block/blk-core.c index f84cce42fc58..f0640d7f800f 100644 --- a/block/blk-core.c +++ b/block/blk-core.c | |||
@@ -1149,13 +1149,10 @@ void init_request_from_bio(struct request *req, struct bio *bio) | |||
1149 | else | 1149 | else |
1150 | req->cmd_flags |= bio->bi_rw & REQ_FAILFAST_MASK; | 1150 | req->cmd_flags |= bio->bi_rw & REQ_FAILFAST_MASK; |
1151 | 1151 | ||
1152 | if (unlikely(bio_rw_flagged(bio, BIO_RW_DISCARD))) { | 1152 | if (bio_rw_flagged(bio, BIO_RW_DISCARD)) |
1153 | req->cmd_flags |= REQ_DISCARD; | 1153 | req->cmd_flags |= REQ_DISCARD; |
1154 | if (bio_rw_flagged(bio, BIO_RW_BARRIER)) | 1154 | if (bio_rw_flagged(bio, BIO_RW_BARRIER)) |
1155 | req->cmd_flags |= REQ_SOFTBARRIER; | ||
1156 | } else if (unlikely(bio_rw_flagged(bio, BIO_RW_BARRIER))) | ||
1157 | req->cmd_flags |= REQ_HARDBARRIER; | 1155 | req->cmd_flags |= REQ_HARDBARRIER; |
1158 | |||
1159 | if (bio_rw_flagged(bio, BIO_RW_SYNCIO)) | 1156 | if (bio_rw_flagged(bio, BIO_RW_SYNCIO)) |
1160 | req->cmd_flags |= REQ_RW_SYNC; | 1157 | req->cmd_flags |= REQ_RW_SYNC; |
1161 | if (bio_rw_flagged(bio, BIO_RW_META)) | 1158 | if (bio_rw_flagged(bio, BIO_RW_META)) |
@@ -1586,7 +1583,7 @@ void submit_bio(int rw, struct bio *bio) | |||
1586 | * If it's a regular read/write or a barrier with data attached, | 1583 | * If it's a regular read/write or a barrier with data attached, |
1587 | * go through the normal accounting stuff before submission. | 1584 | * go through the normal accounting stuff before submission. |
1588 | */ | 1585 | */ |
1589 | if (bio_has_data(bio)) { | 1586 | if (bio_has_data(bio) && !(rw & (1 << BIO_RW_DISCARD))) { |
1590 | if (rw & WRITE) { | 1587 | if (rw & WRITE) { |
1591 | count_vm_events(PGPGOUT, count); | 1588 | count_vm_events(PGPGOUT, count); |
1592 | } else { | 1589 | } else { |
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index 5ff4f4850e71..7982b830db58 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c | |||
@@ -14,7 +14,7 @@ | |||
14 | #include <linux/rbtree.h> | 14 | #include <linux/rbtree.h> |
15 | #include <linux/ioprio.h> | 15 | #include <linux/ioprio.h> |
16 | #include <linux/blktrace_api.h> | 16 | #include <linux/blktrace_api.h> |
17 | #include "blk-cgroup.h" | 17 | #include "cfq.h" |
18 | 18 | ||
19 | /* | 19 | /* |
20 | * tunables | 20 | * tunables |
@@ -879,7 +879,7 @@ cfq_group_service_tree_del(struct cfq_data *cfqd, struct cfq_group *cfqg) | |||
879 | if (!RB_EMPTY_NODE(&cfqg->rb_node)) | 879 | if (!RB_EMPTY_NODE(&cfqg->rb_node)) |
880 | cfq_rb_erase(&cfqg->rb_node, st); | 880 | cfq_rb_erase(&cfqg->rb_node, st); |
881 | cfqg->saved_workload_slice = 0; | 881 | cfqg->saved_workload_slice = 0; |
882 | blkiocg_update_dequeue_stats(&cfqg->blkg, 1); | 882 | cfq_blkiocg_update_dequeue_stats(&cfqg->blkg, 1); |
883 | } | 883 | } |
884 | 884 | ||
885 | static inline unsigned int cfq_cfqq_slice_usage(struct cfq_queue *cfqq) | 885 | static inline unsigned int cfq_cfqq_slice_usage(struct cfq_queue *cfqq) |
@@ -939,8 +939,8 @@ static void cfq_group_served(struct cfq_data *cfqd, struct cfq_group *cfqg, | |||
939 | 939 | ||
940 | cfq_log_cfqg(cfqd, cfqg, "served: vt=%llu min_vt=%llu", cfqg->vdisktime, | 940 | cfq_log_cfqg(cfqd, cfqg, "served: vt=%llu min_vt=%llu", cfqg->vdisktime, |
941 | st->min_vdisktime); | 941 | st->min_vdisktime); |
942 | blkiocg_update_timeslice_used(&cfqg->blkg, used_sl); | 942 | cfq_blkiocg_update_timeslice_used(&cfqg->blkg, used_sl); |
943 | blkiocg_set_start_empty_time(&cfqg->blkg); | 943 | cfq_blkiocg_set_start_empty_time(&cfqg->blkg); |
944 | } | 944 | } |
945 | 945 | ||
946 | #ifdef CONFIG_CFQ_GROUP_IOSCHED | 946 | #ifdef CONFIG_CFQ_GROUP_IOSCHED |
@@ -995,7 +995,7 @@ cfq_find_alloc_cfqg(struct cfq_data *cfqd, struct cgroup *cgroup, int create) | |||
995 | 995 | ||
996 | /* Add group onto cgroup list */ | 996 | /* Add group onto cgroup list */ |
997 | sscanf(dev_name(bdi->dev), "%u:%u", &major, &minor); | 997 | sscanf(dev_name(bdi->dev), "%u:%u", &major, &minor); |
998 | blkiocg_add_blkio_group(blkcg, &cfqg->blkg, (void *)cfqd, | 998 | cfq_blkiocg_add_blkio_group(blkcg, &cfqg->blkg, (void *)cfqd, |
999 | MKDEV(major, minor)); | 999 | MKDEV(major, minor)); |
1000 | cfqg->weight = blkcg_get_weight(blkcg, cfqg->blkg.dev); | 1000 | cfqg->weight = blkcg_get_weight(blkcg, cfqg->blkg.dev); |
1001 | 1001 | ||
@@ -1079,7 +1079,7 @@ static void cfq_release_cfq_groups(struct cfq_data *cfqd) | |||
1079 | * it from cgroup list, then it will take care of destroying | 1079 | * it from cgroup list, then it will take care of destroying |
1080 | * cfqg also. | 1080 | * cfqg also. |
1081 | */ | 1081 | */ |
1082 | if (!blkiocg_del_blkio_group(&cfqg->blkg)) | 1082 | if (!cfq_blkiocg_del_blkio_group(&cfqg->blkg)) |
1083 | cfq_destroy_cfqg(cfqd, cfqg); | 1083 | cfq_destroy_cfqg(cfqd, cfqg); |
1084 | } | 1084 | } |
1085 | } | 1085 | } |
@@ -1421,10 +1421,10 @@ static void cfq_reposition_rq_rb(struct cfq_queue *cfqq, struct request *rq) | |||
1421 | { | 1421 | { |
1422 | elv_rb_del(&cfqq->sort_list, rq); | 1422 | elv_rb_del(&cfqq->sort_list, rq); |
1423 | cfqq->queued[rq_is_sync(rq)]--; | 1423 | cfqq->queued[rq_is_sync(rq)]--; |
1424 | blkiocg_update_io_remove_stats(&(RQ_CFQG(rq))->blkg, rq_data_dir(rq), | 1424 | cfq_blkiocg_update_io_remove_stats(&(RQ_CFQG(rq))->blkg, |
1425 | rq_is_sync(rq)); | 1425 | rq_data_dir(rq), rq_is_sync(rq)); |
1426 | cfq_add_rq_rb(rq); | 1426 | cfq_add_rq_rb(rq); |
1427 | blkiocg_update_io_add_stats(&(RQ_CFQG(rq))->blkg, | 1427 | cfq_blkiocg_update_io_add_stats(&(RQ_CFQG(rq))->blkg, |
1428 | &cfqq->cfqd->serving_group->blkg, rq_data_dir(rq), | 1428 | &cfqq->cfqd->serving_group->blkg, rq_data_dir(rq), |
1429 | rq_is_sync(rq)); | 1429 | rq_is_sync(rq)); |
1430 | } | 1430 | } |
@@ -1482,8 +1482,8 @@ static void cfq_remove_request(struct request *rq) | |||
1482 | cfq_del_rq_rb(rq); | 1482 | cfq_del_rq_rb(rq); |
1483 | 1483 | ||
1484 | cfqq->cfqd->rq_queued--; | 1484 | cfqq->cfqd->rq_queued--; |
1485 | blkiocg_update_io_remove_stats(&(RQ_CFQG(rq))->blkg, rq_data_dir(rq), | 1485 | cfq_blkiocg_update_io_remove_stats(&(RQ_CFQG(rq))->blkg, |
1486 | rq_is_sync(rq)); | 1486 | rq_data_dir(rq), rq_is_sync(rq)); |
1487 | if (rq_is_meta(rq)) { | 1487 | if (rq_is_meta(rq)) { |
1488 | WARN_ON(!cfqq->meta_pending); | 1488 | WARN_ON(!cfqq->meta_pending); |
1489 | cfqq->meta_pending--; | 1489 | cfqq->meta_pending--; |
@@ -1518,8 +1518,8 @@ static void cfq_merged_request(struct request_queue *q, struct request *req, | |||
1518 | static void cfq_bio_merged(struct request_queue *q, struct request *req, | 1518 | static void cfq_bio_merged(struct request_queue *q, struct request *req, |
1519 | struct bio *bio) | 1519 | struct bio *bio) |
1520 | { | 1520 | { |
1521 | blkiocg_update_io_merged_stats(&(RQ_CFQG(req))->blkg, bio_data_dir(bio), | 1521 | cfq_blkiocg_update_io_merged_stats(&(RQ_CFQG(req))->blkg, |
1522 | cfq_bio_sync(bio)); | 1522 | bio_data_dir(bio), cfq_bio_sync(bio)); |
1523 | } | 1523 | } |
1524 | 1524 | ||
1525 | static void | 1525 | static void |
@@ -1539,8 +1539,8 @@ cfq_merged_requests(struct request_queue *q, struct request *rq, | |||
1539 | if (cfqq->next_rq == next) | 1539 | if (cfqq->next_rq == next) |
1540 | cfqq->next_rq = rq; | 1540 | cfqq->next_rq = rq; |
1541 | cfq_remove_request(next); | 1541 | cfq_remove_request(next); |
1542 | blkiocg_update_io_merged_stats(&(RQ_CFQG(rq))->blkg, rq_data_dir(next), | 1542 | cfq_blkiocg_update_io_merged_stats(&(RQ_CFQG(rq))->blkg, |
1543 | rq_is_sync(next)); | 1543 | rq_data_dir(next), rq_is_sync(next)); |
1544 | } | 1544 | } |
1545 | 1545 | ||
1546 | static int cfq_allow_merge(struct request_queue *q, struct request *rq, | 1546 | static int cfq_allow_merge(struct request_queue *q, struct request *rq, |
@@ -1571,7 +1571,7 @@ static int cfq_allow_merge(struct request_queue *q, struct request *rq, | |||
1571 | static inline void cfq_del_timer(struct cfq_data *cfqd, struct cfq_queue *cfqq) | 1571 | static inline void cfq_del_timer(struct cfq_data *cfqd, struct cfq_queue *cfqq) |
1572 | { | 1572 | { |
1573 | del_timer(&cfqd->idle_slice_timer); | 1573 | del_timer(&cfqd->idle_slice_timer); |
1574 | blkiocg_update_idle_time_stats(&cfqq->cfqg->blkg); | 1574 | cfq_blkiocg_update_idle_time_stats(&cfqq->cfqg->blkg); |
1575 | } | 1575 | } |
1576 | 1576 | ||
1577 | static void __cfq_set_active_queue(struct cfq_data *cfqd, | 1577 | static void __cfq_set_active_queue(struct cfq_data *cfqd, |
@@ -1580,7 +1580,7 @@ static void __cfq_set_active_queue(struct cfq_data *cfqd, | |||
1580 | if (cfqq) { | 1580 | if (cfqq) { |
1581 | cfq_log_cfqq(cfqd, cfqq, "set_active wl_prio:%d wl_type:%d", | 1581 | cfq_log_cfqq(cfqd, cfqq, "set_active wl_prio:%d wl_type:%d", |
1582 | cfqd->serving_prio, cfqd->serving_type); | 1582 | cfqd->serving_prio, cfqd->serving_type); |
1583 | blkiocg_update_avg_queue_size_stats(&cfqq->cfqg->blkg); | 1583 | cfq_blkiocg_update_avg_queue_size_stats(&cfqq->cfqg->blkg); |
1584 | cfqq->slice_start = 0; | 1584 | cfqq->slice_start = 0; |
1585 | cfqq->dispatch_start = jiffies; | 1585 | cfqq->dispatch_start = jiffies; |
1586 | cfqq->allocated_slice = 0; | 1586 | cfqq->allocated_slice = 0; |
@@ -1911,7 +1911,7 @@ static void cfq_arm_slice_timer(struct cfq_data *cfqd) | |||
1911 | sl = cfqd->cfq_slice_idle; | 1911 | sl = cfqd->cfq_slice_idle; |
1912 | 1912 | ||
1913 | mod_timer(&cfqd->idle_slice_timer, jiffies + sl); | 1913 | mod_timer(&cfqd->idle_slice_timer, jiffies + sl); |
1914 | blkiocg_update_set_idle_time_stats(&cfqq->cfqg->blkg); | 1914 | cfq_blkiocg_update_set_idle_time_stats(&cfqq->cfqg->blkg); |
1915 | cfq_log_cfqq(cfqd, cfqq, "arm_idle: %lu", sl); | 1915 | cfq_log_cfqq(cfqd, cfqq, "arm_idle: %lu", sl); |
1916 | } | 1916 | } |
1917 | 1917 | ||
@@ -1931,7 +1931,7 @@ static void cfq_dispatch_insert(struct request_queue *q, struct request *rq) | |||
1931 | elv_dispatch_sort(q, rq); | 1931 | elv_dispatch_sort(q, rq); |
1932 | 1932 | ||
1933 | cfqd->rq_in_flight[cfq_cfqq_sync(cfqq)]++; | 1933 | cfqd->rq_in_flight[cfq_cfqq_sync(cfqq)]++; |
1934 | blkiocg_update_dispatch_stats(&cfqq->cfqg->blkg, blk_rq_bytes(rq), | 1934 | cfq_blkiocg_update_dispatch_stats(&cfqq->cfqg->blkg, blk_rq_bytes(rq), |
1935 | rq_data_dir(rq), rq_is_sync(rq)); | 1935 | rq_data_dir(rq), rq_is_sync(rq)); |
1936 | } | 1936 | } |
1937 | 1937 | ||
@@ -1986,6 +1986,15 @@ static void cfq_setup_merge(struct cfq_queue *cfqq, struct cfq_queue *new_cfqq) | |||
1986 | int process_refs, new_process_refs; | 1986 | int process_refs, new_process_refs; |
1987 | struct cfq_queue *__cfqq; | 1987 | struct cfq_queue *__cfqq; |
1988 | 1988 | ||
1989 | /* | ||
1990 | * If there are no process references on the new_cfqq, then it is | ||
1991 | * unsafe to follow the ->new_cfqq chain as other cfqq's in the | ||
1992 | * chain may have dropped their last reference (not just their | ||
1993 | * last process reference). | ||
1994 | */ | ||
1995 | if (!cfqq_process_refs(new_cfqq)) | ||
1996 | return; | ||
1997 | |||
1989 | /* Avoid a circular list and skip interim queue merges */ | 1998 | /* Avoid a circular list and skip interim queue merges */ |
1990 | while ((__cfqq = new_cfqq->new_cfqq)) { | 1999 | while ((__cfqq = new_cfqq->new_cfqq)) { |
1991 | if (__cfqq == cfqq) | 2000 | if (__cfqq == cfqq) |
@@ -1994,17 +2003,17 @@ static void cfq_setup_merge(struct cfq_queue *cfqq, struct cfq_queue *new_cfqq) | |||
1994 | } | 2003 | } |
1995 | 2004 | ||
1996 | process_refs = cfqq_process_refs(cfqq); | 2005 | process_refs = cfqq_process_refs(cfqq); |
2006 | new_process_refs = cfqq_process_refs(new_cfqq); | ||
1997 | /* | 2007 | /* |
1998 | * If the process for the cfqq has gone away, there is no | 2008 | * If the process for the cfqq has gone away, there is no |
1999 | * sense in merging the queues. | 2009 | * sense in merging the queues. |
2000 | */ | 2010 | */ |
2001 | if (process_refs == 0) | 2011 | if (process_refs == 0 || new_process_refs == 0) |
2002 | return; | 2012 | return; |
2003 | 2013 | ||
2004 | /* | 2014 | /* |
2005 | * Merge in the direction of the lesser amount of work. | 2015 | * Merge in the direction of the lesser amount of work. |
2006 | */ | 2016 | */ |
2007 | new_process_refs = cfqq_process_refs(new_cfqq); | ||
2008 | if (new_process_refs >= process_refs) { | 2017 | if (new_process_refs >= process_refs) { |
2009 | cfqq->new_cfqq = new_cfqq; | 2018 | cfqq->new_cfqq = new_cfqq; |
2010 | atomic_add(process_refs, &new_cfqq->ref); | 2019 | atomic_add(process_refs, &new_cfqq->ref); |
@@ -3248,7 +3257,7 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq, | |||
3248 | cfq_clear_cfqq_wait_request(cfqq); | 3257 | cfq_clear_cfqq_wait_request(cfqq); |
3249 | __blk_run_queue(cfqd->queue); | 3258 | __blk_run_queue(cfqd->queue); |
3250 | } else { | 3259 | } else { |
3251 | blkiocg_update_idle_time_stats( | 3260 | cfq_blkiocg_update_idle_time_stats( |
3252 | &cfqq->cfqg->blkg); | 3261 | &cfqq->cfqg->blkg); |
3253 | cfq_mark_cfqq_must_dispatch(cfqq); | 3262 | cfq_mark_cfqq_must_dispatch(cfqq); |
3254 | } | 3263 | } |
@@ -3276,7 +3285,7 @@ static void cfq_insert_request(struct request_queue *q, struct request *rq) | |||
3276 | rq_set_fifo_time(rq, jiffies + cfqd->cfq_fifo_expire[rq_is_sync(rq)]); | 3285 | rq_set_fifo_time(rq, jiffies + cfqd->cfq_fifo_expire[rq_is_sync(rq)]); |
3277 | list_add_tail(&rq->queuelist, &cfqq->fifo); | 3286 | list_add_tail(&rq->queuelist, &cfqq->fifo); |
3278 | cfq_add_rq_rb(rq); | 3287 | cfq_add_rq_rb(rq); |
3279 | blkiocg_update_io_add_stats(&(RQ_CFQG(rq))->blkg, | 3288 | cfq_blkiocg_update_io_add_stats(&(RQ_CFQG(rq))->blkg, |
3280 | &cfqd->serving_group->blkg, rq_data_dir(rq), | 3289 | &cfqd->serving_group->blkg, rq_data_dir(rq), |
3281 | rq_is_sync(rq)); | 3290 | rq_is_sync(rq)); |
3282 | cfq_rq_enqueued(cfqd, cfqq, rq); | 3291 | cfq_rq_enqueued(cfqd, cfqq, rq); |
@@ -3364,9 +3373,9 @@ static void cfq_completed_request(struct request_queue *q, struct request *rq) | |||
3364 | WARN_ON(!cfqq->dispatched); | 3373 | WARN_ON(!cfqq->dispatched); |
3365 | cfqd->rq_in_driver--; | 3374 | cfqd->rq_in_driver--; |
3366 | cfqq->dispatched--; | 3375 | cfqq->dispatched--; |
3367 | blkiocg_update_completion_stats(&cfqq->cfqg->blkg, rq_start_time_ns(rq), | 3376 | cfq_blkiocg_update_completion_stats(&cfqq->cfqg->blkg, |
3368 | rq_io_start_time_ns(rq), rq_data_dir(rq), | 3377 | rq_start_time_ns(rq), rq_io_start_time_ns(rq), |
3369 | rq_is_sync(rq)); | 3378 | rq_data_dir(rq), rq_is_sync(rq)); |
3370 | 3379 | ||
3371 | cfqd->rq_in_flight[cfq_cfqq_sync(cfqq)]--; | 3380 | cfqd->rq_in_flight[cfq_cfqq_sync(cfqq)]--; |
3372 | 3381 | ||
@@ -3730,7 +3739,7 @@ static void cfq_exit_queue(struct elevator_queue *e) | |||
3730 | 3739 | ||
3731 | cfq_put_async_queues(cfqd); | 3740 | cfq_put_async_queues(cfqd); |
3732 | cfq_release_cfq_groups(cfqd); | 3741 | cfq_release_cfq_groups(cfqd); |
3733 | blkiocg_del_blkio_group(&cfqd->root_group.blkg); | 3742 | cfq_blkiocg_del_blkio_group(&cfqd->root_group.blkg); |
3734 | 3743 | ||
3735 | spin_unlock_irq(q->queue_lock); | 3744 | spin_unlock_irq(q->queue_lock); |
3736 | 3745 | ||
@@ -3798,8 +3807,8 @@ static void *cfq_init_queue(struct request_queue *q) | |||
3798 | */ | 3807 | */ |
3799 | atomic_set(&cfqg->ref, 1); | 3808 | atomic_set(&cfqg->ref, 1); |
3800 | rcu_read_lock(); | 3809 | rcu_read_lock(); |
3801 | blkiocg_add_blkio_group(&blkio_root_cgroup, &cfqg->blkg, (void *)cfqd, | 3810 | cfq_blkiocg_add_blkio_group(&blkio_root_cgroup, &cfqg->blkg, |
3802 | 0); | 3811 | (void *)cfqd, 0); |
3803 | rcu_read_unlock(); | 3812 | rcu_read_unlock(); |
3804 | #endif | 3813 | #endif |
3805 | /* | 3814 | /* |
diff --git a/block/cfq.h b/block/cfq.h new file mode 100644 index 000000000000..93448e5a2e41 --- /dev/null +++ b/block/cfq.h | |||
@@ -0,0 +1,115 @@ | |||
1 | #ifndef _CFQ_H | ||
2 | #define _CFQ_H | ||
3 | #include "blk-cgroup.h" | ||
4 | |||
5 | #ifdef CONFIG_CFQ_GROUP_IOSCHED | ||
6 | static inline void cfq_blkiocg_update_io_add_stats(struct blkio_group *blkg, | ||
7 | struct blkio_group *curr_blkg, bool direction, bool sync) | ||
8 | { | ||
9 | blkiocg_update_io_add_stats(blkg, curr_blkg, direction, sync); | ||
10 | } | ||
11 | |||
12 | static inline void cfq_blkiocg_update_dequeue_stats(struct blkio_group *blkg, | ||
13 | unsigned long dequeue) | ||
14 | { | ||
15 | blkiocg_update_dequeue_stats(blkg, dequeue); | ||
16 | } | ||
17 | |||
18 | static inline void cfq_blkiocg_update_timeslice_used(struct blkio_group *blkg, | ||
19 | unsigned long time) | ||
20 | { | ||
21 | blkiocg_update_timeslice_used(blkg, time); | ||
22 | } | ||
23 | |||
24 | static inline void cfq_blkiocg_set_start_empty_time(struct blkio_group *blkg) | ||
25 | { | ||
26 | blkiocg_set_start_empty_time(blkg); | ||
27 | } | ||
28 | |||
29 | static inline void cfq_blkiocg_update_io_remove_stats(struct blkio_group *blkg, | ||
30 | bool direction, bool sync) | ||
31 | { | ||
32 | blkiocg_update_io_remove_stats(blkg, direction, sync); | ||
33 | } | ||
34 | |||
35 | static inline void cfq_blkiocg_update_io_merged_stats(struct blkio_group *blkg, | ||
36 | bool direction, bool sync) | ||
37 | { | ||
38 | blkiocg_update_io_merged_stats(blkg, direction, sync); | ||
39 | } | ||
40 | |||
41 | static inline void cfq_blkiocg_update_idle_time_stats(struct blkio_group *blkg) | ||
42 | { | ||
43 | blkiocg_update_idle_time_stats(blkg); | ||
44 | } | ||
45 | |||
46 | static inline void | ||
47 | cfq_blkiocg_update_avg_queue_size_stats(struct blkio_group *blkg) | ||
48 | { | ||
49 | blkiocg_update_avg_queue_size_stats(blkg); | ||
50 | } | ||
51 | |||
52 | static inline void | ||
53 | cfq_blkiocg_update_set_idle_time_stats(struct blkio_group *blkg) | ||
54 | { | ||
55 | blkiocg_update_set_idle_time_stats(blkg); | ||
56 | } | ||
57 | |||
58 | static inline void cfq_blkiocg_update_dispatch_stats(struct blkio_group *blkg, | ||
59 | uint64_t bytes, bool direction, bool sync) | ||
60 | { | ||
61 | blkiocg_update_dispatch_stats(blkg, bytes, direction, sync); | ||
62 | } | ||
63 | |||
64 | static inline void cfq_blkiocg_update_completion_stats(struct blkio_group *blkg, uint64_t start_time, uint64_t io_start_time, bool direction, bool sync) | ||
65 | { | ||
66 | blkiocg_update_completion_stats(blkg, start_time, io_start_time, | ||
67 | direction, sync); | ||
68 | } | ||
69 | |||
70 | static inline void cfq_blkiocg_add_blkio_group(struct blkio_cgroup *blkcg, | ||
71 | struct blkio_group *blkg, void *key, dev_t dev) { | ||
72 | blkiocg_add_blkio_group(blkcg, blkg, key, dev); | ||
73 | } | ||
74 | |||
75 | static inline int cfq_blkiocg_del_blkio_group(struct blkio_group *blkg) | ||
76 | { | ||
77 | return blkiocg_del_blkio_group(blkg); | ||
78 | } | ||
79 | |||
80 | #else /* CFQ_GROUP_IOSCHED */ | ||
81 | static inline void cfq_blkiocg_update_io_add_stats(struct blkio_group *blkg, | ||
82 | struct blkio_group *curr_blkg, bool direction, bool sync) {} | ||
83 | |||
84 | static inline void cfq_blkiocg_update_dequeue_stats(struct blkio_group *blkg, | ||
85 | unsigned long dequeue) {} | ||
86 | |||
87 | static inline void cfq_blkiocg_update_timeslice_used(struct blkio_group *blkg, | ||
88 | unsigned long time) {} | ||
89 | static inline void cfq_blkiocg_set_start_empty_time(struct blkio_group *blkg) {} | ||
90 | static inline void cfq_blkiocg_update_io_remove_stats(struct blkio_group *blkg, | ||
91 | bool direction, bool sync) {} | ||
92 | static inline void cfq_blkiocg_update_io_merged_stats(struct blkio_group *blkg, | ||
93 | bool direction, bool sync) {} | ||
94 | static inline void cfq_blkiocg_update_idle_time_stats(struct blkio_group *blkg) | ||
95 | { | ||
96 | } | ||
97 | static inline void | ||
98 | cfq_blkiocg_update_avg_queue_size_stats(struct blkio_group *blkg) {} | ||
99 | |||
100 | static inline void | ||
101 | cfq_blkiocg_update_set_idle_time_stats(struct blkio_group *blkg) {} | ||
102 | |||
103 | static inline void cfq_blkiocg_update_dispatch_stats(struct blkio_group *blkg, | ||
104 | uint64_t bytes, bool direction, bool sync) {} | ||
105 | static inline void cfq_blkiocg_update_completion_stats(struct blkio_group *blkg, uint64_t start_time, uint64_t io_start_time, bool direction, bool sync) {} | ||
106 | |||
107 | static inline void cfq_blkiocg_add_blkio_group(struct blkio_cgroup *blkcg, | ||
108 | struct blkio_group *blkg, void *key, dev_t dev) {} | ||
109 | static inline int cfq_blkiocg_del_blkio_group(struct blkio_group *blkg) | ||
110 | { | ||
111 | return 0; | ||
112 | } | ||
113 | |||
114 | #endif /* CFQ_GROUP_IOSCHED */ | ||
115 | #endif | ||
diff --git a/drivers/acpi/apei/erst.c b/drivers/acpi/apei/erst.c index 2ebc39115507..864dd46c346f 100644 --- a/drivers/acpi/apei/erst.c +++ b/drivers/acpi/apei/erst.c | |||
@@ -781,7 +781,7 @@ static int __init erst_init(void) | |||
781 | status = acpi_get_table(ACPI_SIG_ERST, 0, | 781 | status = acpi_get_table(ACPI_SIG_ERST, 0, |
782 | (struct acpi_table_header **)&erst_tab); | 782 | (struct acpi_table_header **)&erst_tab); |
783 | if (status == AE_NOT_FOUND) { | 783 | if (status == AE_NOT_FOUND) { |
784 | pr_err(ERST_PFX "Table is not found!\n"); | 784 | pr_info(ERST_PFX "Table is not found!\n"); |
785 | goto err; | 785 | goto err; |
786 | } else if (ACPI_FAILURE(status)) { | 786 | } else if (ACPI_FAILURE(status)) { |
787 | const char *msg = acpi_format_exception(status); | 787 | const char *msg = acpi_format_exception(status); |
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c index 8ca16f54e1ed..f2522534ae63 100644 --- a/drivers/ata/ahci.c +++ b/drivers/ata/ahci.c | |||
@@ -1053,6 +1053,16 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
1053 | if (pdev->vendor == PCI_VENDOR_ID_MARVELL && !marvell_enable) | 1053 | if (pdev->vendor == PCI_VENDOR_ID_MARVELL && !marvell_enable) |
1054 | return -ENODEV; | 1054 | return -ENODEV; |
1055 | 1055 | ||
1056 | /* | ||
1057 | * For some reason, MCP89 on MacBook 7,1 doesn't work with | ||
1058 | * ahci, use ata_generic instead. | ||
1059 | */ | ||
1060 | if (pdev->vendor == PCI_VENDOR_ID_NVIDIA && | ||
1061 | pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP89_SATA && | ||
1062 | pdev->subsystem_vendor == PCI_VENDOR_ID_APPLE && | ||
1063 | pdev->subsystem_device == 0xcb89) | ||
1064 | return -ENODEV; | ||
1065 | |||
1056 | /* Promise's PDC42819 is a SAS/SATA controller that has an AHCI mode. | 1066 | /* Promise's PDC42819 is a SAS/SATA controller that has an AHCI mode. |
1057 | * At the moment, we can only use the AHCI mode. Let the users know | 1067 | * At the moment, we can only use the AHCI mode. Let the users know |
1058 | * that for SAS drives they're out of luck. | 1068 | * that for SAS drives they're out of luck. |
diff --git a/drivers/ata/ata_generic.c b/drivers/ata/ata_generic.c index 573158a9668d..7107a6929deb 100644 --- a/drivers/ata/ata_generic.c +++ b/drivers/ata/ata_generic.c | |||
@@ -32,6 +32,11 @@ | |||
32 | * A generic parallel ATA driver using libata | 32 | * A generic parallel ATA driver using libata |
33 | */ | 33 | */ |
34 | 34 | ||
35 | enum { | ||
36 | ATA_GEN_CLASS_MATCH = (1 << 0), | ||
37 | ATA_GEN_FORCE_DMA = (1 << 1), | ||
38 | }; | ||
39 | |||
35 | /** | 40 | /** |
36 | * generic_set_mode - mode setting | 41 | * generic_set_mode - mode setting |
37 | * @link: link to set up | 42 | * @link: link to set up |
@@ -46,13 +51,17 @@ | |||
46 | static int generic_set_mode(struct ata_link *link, struct ata_device **unused) | 51 | static int generic_set_mode(struct ata_link *link, struct ata_device **unused) |
47 | { | 52 | { |
48 | struct ata_port *ap = link->ap; | 53 | struct ata_port *ap = link->ap; |
54 | const struct pci_device_id *id = ap->host->private_data; | ||
49 | int dma_enabled = 0; | 55 | int dma_enabled = 0; |
50 | struct ata_device *dev; | 56 | struct ata_device *dev; |
51 | struct pci_dev *pdev = to_pci_dev(ap->host->dev); | 57 | struct pci_dev *pdev = to_pci_dev(ap->host->dev); |
52 | 58 | ||
53 | /* Bits 5 and 6 indicate if DMA is active on master/slave */ | 59 | if (id->driver_data & ATA_GEN_FORCE_DMA) { |
54 | if (ap->ioaddr.bmdma_addr) | 60 | dma_enabled = 0xff; |
61 | } else if (ap->ioaddr.bmdma_addr) { | ||
62 | /* Bits 5 and 6 indicate if DMA is active on master/slave */ | ||
55 | dma_enabled = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS); | 63 | dma_enabled = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS); |
64 | } | ||
56 | 65 | ||
57 | if (pdev->vendor == PCI_VENDOR_ID_CENATEK) | 66 | if (pdev->vendor == PCI_VENDOR_ID_CENATEK) |
58 | dma_enabled = 0xFF; | 67 | dma_enabled = 0xFF; |
@@ -126,7 +135,7 @@ static int ata_generic_init_one(struct pci_dev *dev, const struct pci_device_id | |||
126 | const struct ata_port_info *ppi[] = { &info, NULL }; | 135 | const struct ata_port_info *ppi[] = { &info, NULL }; |
127 | 136 | ||
128 | /* Don't use the generic entry unless instructed to do so */ | 137 | /* Don't use the generic entry unless instructed to do so */ |
129 | if (id->driver_data == 1 && all_generic_ide == 0) | 138 | if ((id->driver_data & ATA_GEN_CLASS_MATCH) && all_generic_ide == 0) |
130 | return -ENODEV; | 139 | return -ENODEV; |
131 | 140 | ||
132 | /* Devices that need care */ | 141 | /* Devices that need care */ |
@@ -155,7 +164,7 @@ static int ata_generic_init_one(struct pci_dev *dev, const struct pci_device_id | |||
155 | return rc; | 164 | return rc; |
156 | pcim_pin_device(dev); | 165 | pcim_pin_device(dev); |
157 | } | 166 | } |
158 | return ata_pci_bmdma_init_one(dev, ppi, &generic_sht, NULL, 0); | 167 | return ata_pci_bmdma_init_one(dev, ppi, &generic_sht, (void *)id, 0); |
159 | } | 168 | } |
160 | 169 | ||
161 | static struct pci_device_id ata_generic[] = { | 170 | static struct pci_device_id ata_generic[] = { |
@@ -167,7 +176,15 @@ static struct pci_device_id ata_generic[] = { | |||
167 | { PCI_DEVICE(PCI_VENDOR_ID_HINT, PCI_DEVICE_ID_HINT_VXPROII_IDE), }, | 176 | { PCI_DEVICE(PCI_VENDOR_ID_HINT, PCI_DEVICE_ID_HINT_VXPROII_IDE), }, |
168 | { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C561), }, | 177 | { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C561), }, |
169 | { PCI_DEVICE(PCI_VENDOR_ID_OPTI, PCI_DEVICE_ID_OPTI_82C558), }, | 178 | { PCI_DEVICE(PCI_VENDOR_ID_OPTI, PCI_DEVICE_ID_OPTI_82C558), }, |
170 | { PCI_DEVICE(PCI_VENDOR_ID_CENATEK,PCI_DEVICE_ID_CENATEK_IDE), }, | 179 | { PCI_DEVICE(PCI_VENDOR_ID_CENATEK,PCI_DEVICE_ID_CENATEK_IDE), |
180 | .driver_data = ATA_GEN_FORCE_DMA }, | ||
181 | /* | ||
182 | * For some reason, MCP89 on MacBook 7,1 doesn't work with | ||
183 | * ahci, use ata_generic instead. | ||
184 | */ | ||
185 | { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP89_SATA, | ||
186 | PCI_VENDOR_ID_APPLE, 0xcb89, | ||
187 | .driver_data = ATA_GEN_FORCE_DMA }, | ||
171 | #if !defined(CONFIG_PATA_TOSHIBA) && !defined(CONFIG_PATA_TOSHIBA_MODULE) | 188 | #if !defined(CONFIG_PATA_TOSHIBA) && !defined(CONFIG_PATA_TOSHIBA_MODULE) |
172 | { PCI_DEVICE(PCI_VENDOR_ID_TOSHIBA,PCI_DEVICE_ID_TOSHIBA_PICCOLO_1), }, | 189 | { PCI_DEVICE(PCI_VENDOR_ID_TOSHIBA,PCI_DEVICE_ID_TOSHIBA_PICCOLO_1), }, |
173 | { PCI_DEVICE(PCI_VENDOR_ID_TOSHIBA,PCI_DEVICE_ID_TOSHIBA_PICCOLO_2), }, | 190 | { PCI_DEVICE(PCI_VENDOR_ID_TOSHIBA,PCI_DEVICE_ID_TOSHIBA_PICCOLO_2), }, |
@@ -175,7 +192,8 @@ static struct pci_device_id ata_generic[] = { | |||
175 | { PCI_DEVICE(PCI_VENDOR_ID_TOSHIBA,PCI_DEVICE_ID_TOSHIBA_PICCOLO_5), }, | 192 | { PCI_DEVICE(PCI_VENDOR_ID_TOSHIBA,PCI_DEVICE_ID_TOSHIBA_PICCOLO_5), }, |
176 | #endif | 193 | #endif |
177 | /* Must come last. If you add entries adjust this table appropriately */ | 194 | /* Must come last. If you add entries adjust this table appropriately */ |
178 | { PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_STORAGE_IDE << 8, 0xFFFFFF00UL, 1}, | 195 | { PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_IDE << 8, 0xFFFFFF00UL), |
196 | .driver_data = ATA_GEN_CLASS_MATCH }, | ||
179 | { 0, }, | 197 | { 0, }, |
180 | }; | 198 | }; |
181 | 199 | ||
diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c index 261f86d102e8..81e772a94d59 100644 --- a/drivers/ata/libahci.c +++ b/drivers/ata/libahci.c | |||
@@ -324,6 +324,7 @@ static ssize_t ahci_store_em_buffer(struct device *dev, | |||
324 | struct ahci_host_priv *hpriv = ap->host->private_data; | 324 | struct ahci_host_priv *hpriv = ap->host->private_data; |
325 | void __iomem *mmio = hpriv->mmio; | 325 | void __iomem *mmio = hpriv->mmio; |
326 | void __iomem *em_mmio = mmio + hpriv->em_loc; | 326 | void __iomem *em_mmio = mmio + hpriv->em_loc; |
327 | const unsigned char *msg_buf = buf; | ||
327 | u32 em_ctl, msg; | 328 | u32 em_ctl, msg; |
328 | unsigned long flags; | 329 | unsigned long flags; |
329 | int i; | 330 | int i; |
@@ -343,8 +344,8 @@ static ssize_t ahci_store_em_buffer(struct device *dev, | |||
343 | } | 344 | } |
344 | 345 | ||
345 | for (i = 0; i < size; i += 4) { | 346 | for (i = 0; i < size; i += 4) { |
346 | msg = buf[i] | buf[i + 1] << 8 | | 347 | msg = msg_buf[i] | msg_buf[i + 1] << 8 | |
347 | buf[i + 2] << 16 | buf[i + 3] << 24; | 348 | msg_buf[i + 2] << 16 | msg_buf[i + 3] << 24; |
348 | writel(msg, em_mmio + i); | 349 | writel(msg, em_mmio + i); |
349 | } | 350 | } |
350 | 351 | ||
diff --git a/drivers/block/cciss_scsi.c b/drivers/block/cciss_scsi.c index 3381505c8a6c..72dae92f3cab 100644 --- a/drivers/block/cciss_scsi.c +++ b/drivers/block/cciss_scsi.c | |||
@@ -861,6 +861,7 @@ cciss_scsi_detect(int ctlr) | |||
861 | sh->n_io_port = 0; // I don't think we use these two... | 861 | sh->n_io_port = 0; // I don't think we use these two... |
862 | sh->this_id = SELF_SCSI_ID; | 862 | sh->this_id = SELF_SCSI_ID; |
863 | sh->sg_tablesize = hba[ctlr]->maxsgentries; | 863 | sh->sg_tablesize = hba[ctlr]->maxsgentries; |
864 | sh->max_cmd_len = MAX_COMMAND_SIZE; | ||
864 | 865 | ||
865 | ((struct cciss_scsi_adapter_data_t *) | 866 | ((struct cciss_scsi_adapter_data_t *) |
866 | hba[ctlr]->scsi_ctlr)->scsi_host = sh; | 867 | hba[ctlr]->scsi_ctlr)->scsi_host = sh; |
diff --git a/drivers/block/cpqarray.c b/drivers/block/cpqarray.c index 91d11631cec9..abb4ec6690fc 100644 --- a/drivers/block/cpqarray.c +++ b/drivers/block/cpqarray.c | |||
@@ -386,7 +386,7 @@ static void __devexit cpqarray_remove_one_eisa (int i) | |||
386 | } | 386 | } |
387 | 387 | ||
388 | /* pdev is NULL for eisa */ | 388 | /* pdev is NULL for eisa */ |
389 | static int __init cpqarray_register_ctlr( int i, struct pci_dev *pdev) | 389 | static int __devinit cpqarray_register_ctlr( int i, struct pci_dev *pdev) |
390 | { | 390 | { |
391 | struct request_queue *q; | 391 | struct request_queue *q; |
392 | int j; | 392 | int j; |
@@ -503,7 +503,7 @@ Enomem4: | |||
503 | return -1; | 503 | return -1; |
504 | } | 504 | } |
505 | 505 | ||
506 | static int __init cpqarray_init_one( struct pci_dev *pdev, | 506 | static int __devinit cpqarray_init_one( struct pci_dev *pdev, |
507 | const struct pci_device_id *ent) | 507 | const struct pci_device_id *ent) |
508 | { | 508 | { |
509 | int i; | 509 | int i; |
@@ -740,7 +740,7 @@ __setup("smart2=", cpqarray_setup); | |||
740 | /* | 740 | /* |
741 | * Find an EISA controller's signature. Set up an hba if we find it. | 741 | * Find an EISA controller's signature. Set up an hba if we find it. |
742 | */ | 742 | */ |
743 | static int __init cpqarray_eisa_detect(void) | 743 | static int __devinit cpqarray_eisa_detect(void) |
744 | { | 744 | { |
745 | int i=0, j; | 745 | int i=0, j; |
746 | __u32 board_id; | 746 | __u32 board_id; |
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c index 6b077f93acc6..7258c95e895e 100644 --- a/drivers/block/drbd/drbd_main.c +++ b/drivers/block/drbd/drbd_main.c | |||
@@ -1236,8 +1236,6 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os, | |||
1236 | /* Last part of the attaching process ... */ | 1236 | /* Last part of the attaching process ... */ |
1237 | if (ns.conn >= C_CONNECTED && | 1237 | if (ns.conn >= C_CONNECTED && |
1238 | os.disk == D_ATTACHING && ns.disk == D_NEGOTIATING) { | 1238 | os.disk == D_ATTACHING && ns.disk == D_NEGOTIATING) { |
1239 | kfree(mdev->p_uuid); /* We expect to receive up-to-date UUIDs soon. */ | ||
1240 | mdev->p_uuid = NULL; /* ...to not use the old ones in the mean time */ | ||
1241 | drbd_send_sizes(mdev, 0, 0); /* to start sync... */ | 1239 | drbd_send_sizes(mdev, 0, 0); /* to start sync... */ |
1242 | drbd_send_uuids(mdev); | 1240 | drbd_send_uuids(mdev); |
1243 | drbd_send_state(mdev); | 1241 | drbd_send_state(mdev); |
diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c index 632e3245d1bb..2151f18b21de 100644 --- a/drivers/block/drbd/drbd_nl.c +++ b/drivers/block/drbd/drbd_nl.c | |||
@@ -1114,6 +1114,12 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp | |||
1114 | mdev->new_state_tmp.i = ns.i; | 1114 | mdev->new_state_tmp.i = ns.i; |
1115 | ns.i = os.i; | 1115 | ns.i = os.i; |
1116 | ns.disk = D_NEGOTIATING; | 1116 | ns.disk = D_NEGOTIATING; |
1117 | |||
1118 | /* We expect to receive up-to-date UUIDs soon. | ||
1119 | To avoid a race in receive_state, free p_uuid while | ||
1120 | holding req_lock. I.e. atomic with the state change */ | ||
1121 | kfree(mdev->p_uuid); | ||
1122 | mdev->p_uuid = NULL; | ||
1117 | } | 1123 | } |
1118 | 1124 | ||
1119 | rv = _drbd_set_state(mdev, ns, CS_VERBOSE, NULL); | 1125 | rv = _drbd_set_state(mdev, ns, CS_VERBOSE, NULL); |
diff --git a/drivers/char/agp/generic.c b/drivers/char/agp/generic.c index 4b51982fd23a..d2abf5143983 100644 --- a/drivers/char/agp/generic.c +++ b/drivers/char/agp/generic.c | |||
@@ -97,20 +97,18 @@ EXPORT_SYMBOL(agp_flush_chipset); | |||
97 | void agp_alloc_page_array(size_t size, struct agp_memory *mem) | 97 | void agp_alloc_page_array(size_t size, struct agp_memory *mem) |
98 | { | 98 | { |
99 | mem->pages = NULL; | 99 | mem->pages = NULL; |
100 | mem->vmalloc_flag = false; | ||
101 | 100 | ||
102 | if (size <= 2*PAGE_SIZE) | 101 | if (size <= 2*PAGE_SIZE) |
103 | mem->pages = kmalloc(size, GFP_KERNEL | __GFP_NORETRY); | 102 | mem->pages = kmalloc(size, GFP_KERNEL | __GFP_NOWARN); |
104 | if (mem->pages == NULL) { | 103 | if (mem->pages == NULL) { |
105 | mem->pages = vmalloc(size); | 104 | mem->pages = vmalloc(size); |
106 | mem->vmalloc_flag = true; | ||
107 | } | 105 | } |
108 | } | 106 | } |
109 | EXPORT_SYMBOL(agp_alloc_page_array); | 107 | EXPORT_SYMBOL(agp_alloc_page_array); |
110 | 108 | ||
111 | void agp_free_page_array(struct agp_memory *mem) | 109 | void agp_free_page_array(struct agp_memory *mem) |
112 | { | 110 | { |
113 | if (mem->vmalloc_flag) { | 111 | if (is_vmalloc_addr(mem->pages)) { |
114 | vfree(mem->pages); | 112 | vfree(mem->pages); |
115 | } else { | 113 | } else { |
116 | kfree(mem->pages); | 114 | kfree(mem->pages); |
diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c index 35603dd4e6c5..094bdc355b1f 100644 --- a/drivers/char/ipmi/ipmi_si_intf.c +++ b/drivers/char/ipmi/ipmi_si_intf.c | |||
@@ -302,6 +302,12 @@ struct smi_info { | |||
302 | 302 | ||
303 | static int force_kipmid[SI_MAX_PARMS]; | 303 | static int force_kipmid[SI_MAX_PARMS]; |
304 | static int num_force_kipmid; | 304 | static int num_force_kipmid; |
305 | #ifdef CONFIG_PCI | ||
306 | static int pci_registered; | ||
307 | #endif | ||
308 | #ifdef CONFIG_PPC_OF | ||
309 | static int of_registered; | ||
310 | #endif | ||
305 | 311 | ||
306 | static unsigned int kipmid_max_busy_us[SI_MAX_PARMS]; | 312 | static unsigned int kipmid_max_busy_us[SI_MAX_PARMS]; |
307 | static int num_max_busy_us; | 313 | static int num_max_busy_us; |
@@ -1018,7 +1024,7 @@ static int ipmi_thread(void *data) | |||
1018 | else if (smi_result == SI_SM_IDLE) | 1024 | else if (smi_result == SI_SM_IDLE) |
1019 | schedule_timeout_interruptible(100); | 1025 | schedule_timeout_interruptible(100); |
1020 | else | 1026 | else |
1021 | schedule_timeout_interruptible(0); | 1027 | schedule_timeout_interruptible(1); |
1022 | } | 1028 | } |
1023 | return 0; | 1029 | return 0; |
1024 | } | 1030 | } |
@@ -3314,6 +3320,8 @@ static __devinit int init_ipmi_si(void) | |||
3314 | rv = pci_register_driver(&ipmi_pci_driver); | 3320 | rv = pci_register_driver(&ipmi_pci_driver); |
3315 | if (rv) | 3321 | if (rv) |
3316 | printk(KERN_ERR PFX "Unable to register PCI driver: %d\n", rv); | 3322 | printk(KERN_ERR PFX "Unable to register PCI driver: %d\n", rv); |
3323 | else | ||
3324 | pci_registered = 1; | ||
3317 | #endif | 3325 | #endif |
3318 | 3326 | ||
3319 | #ifdef CONFIG_ACPI | 3327 | #ifdef CONFIG_ACPI |
@@ -3330,6 +3338,7 @@ static __devinit int init_ipmi_si(void) | |||
3330 | 3338 | ||
3331 | #ifdef CONFIG_PPC_OF | 3339 | #ifdef CONFIG_PPC_OF |
3332 | of_register_platform_driver(&ipmi_of_platform_driver); | 3340 | of_register_platform_driver(&ipmi_of_platform_driver); |
3341 | of_registered = 1; | ||
3333 | #endif | 3342 | #endif |
3334 | 3343 | ||
3335 | /* We prefer devices with interrupts, but in the case of a machine | 3344 | /* We prefer devices with interrupts, but in the case of a machine |
@@ -3383,11 +3392,13 @@ static __devinit int init_ipmi_si(void) | |||
3383 | if (unload_when_empty && list_empty(&smi_infos)) { | 3392 | if (unload_when_empty && list_empty(&smi_infos)) { |
3384 | mutex_unlock(&smi_infos_lock); | 3393 | mutex_unlock(&smi_infos_lock); |
3385 | #ifdef CONFIG_PCI | 3394 | #ifdef CONFIG_PCI |
3386 | pci_unregister_driver(&ipmi_pci_driver); | 3395 | if (pci_registered) |
3396 | pci_unregister_driver(&ipmi_pci_driver); | ||
3387 | #endif | 3397 | #endif |
3388 | 3398 | ||
3389 | #ifdef CONFIG_PPC_OF | 3399 | #ifdef CONFIG_PPC_OF |
3390 | of_unregister_platform_driver(&ipmi_of_platform_driver); | 3400 | if (of_registered) |
3401 | of_unregister_platform_driver(&ipmi_of_platform_driver); | ||
3391 | #endif | 3402 | #endif |
3392 | driver_unregister(&ipmi_driver.driver); | 3403 | driver_unregister(&ipmi_driver.driver); |
3393 | printk(KERN_WARNING PFX | 3404 | printk(KERN_WARNING PFX |
@@ -3478,14 +3489,16 @@ static __exit void cleanup_ipmi_si(void) | |||
3478 | return; | 3489 | return; |
3479 | 3490 | ||
3480 | #ifdef CONFIG_PCI | 3491 | #ifdef CONFIG_PCI |
3481 | pci_unregister_driver(&ipmi_pci_driver); | 3492 | if (pci_registered) |
3493 | pci_unregister_driver(&ipmi_pci_driver); | ||
3482 | #endif | 3494 | #endif |
3483 | #ifdef CONFIG_ACPI | 3495 | #ifdef CONFIG_ACPI |
3484 | pnp_unregister_driver(&ipmi_pnp_driver); | 3496 | pnp_unregister_driver(&ipmi_pnp_driver); |
3485 | #endif | 3497 | #endif |
3486 | 3498 | ||
3487 | #ifdef CONFIG_PPC_OF | 3499 | #ifdef CONFIG_PPC_OF |
3488 | of_unregister_platform_driver(&ipmi_of_platform_driver); | 3500 | if (of_registered) |
3501 | of_unregister_platform_driver(&ipmi_of_platform_driver); | ||
3489 | #endif | 3502 | #endif |
3490 | 3503 | ||
3491 | mutex_lock(&smi_infos_lock); | 3504 | mutex_lock(&smi_infos_lock); |
diff --git a/drivers/cpuidle/governors/menu.c b/drivers/cpuidle/governors/menu.c index 52ff8aa63f84..1b128702d300 100644 --- a/drivers/cpuidle/governors/menu.c +++ b/drivers/cpuidle/governors/menu.c | |||
@@ -143,7 +143,7 @@ static inline int which_bucket(unsigned int duration) | |||
143 | * This allows us to calculate | 143 | * This allows us to calculate |
144 | * E(duration)|iowait | 144 | * E(duration)|iowait |
145 | */ | 145 | */ |
146 | if (nr_iowait_cpu()) | 146 | if (nr_iowait_cpu(smp_processor_id())) |
147 | bucket = BUCKETS/2; | 147 | bucket = BUCKETS/2; |
148 | 148 | ||
149 | if (duration < 10) | 149 | if (duration < 10) |
@@ -175,7 +175,7 @@ static inline int performance_multiplier(void) | |||
175 | mult += 2 * get_loadavg(); | 175 | mult += 2 * get_loadavg(); |
176 | 176 | ||
177 | /* for IO wait tasks (per cpu!) we add 5x each */ | 177 | /* for IO wait tasks (per cpu!) we add 5x each */ |
178 | mult += 10 * nr_iowait_cpu(); | 178 | mult += 10 * nr_iowait_cpu(smp_processor_id()); |
179 | 179 | ||
180 | return mult; | 180 | return mult; |
181 | } | 181 | } |
diff --git a/drivers/dma/ppc4xx/adma.c b/drivers/dma/ppc4xx/adma.c index 5a22ca6927e5..7c3747902a37 100644 --- a/drivers/dma/ppc4xx/adma.c +++ b/drivers/dma/ppc4xx/adma.c | |||
@@ -4257,10 +4257,12 @@ static int ppc440spe_adma_setup_irqs(struct ppc440spe_adma_device *adev, | |||
4257 | struct ppc440spe_adma_chan *chan, | 4257 | struct ppc440spe_adma_chan *chan, |
4258 | int *initcode) | 4258 | int *initcode) |
4259 | { | 4259 | { |
4260 | struct of_device *ofdev; | ||
4260 | struct device_node *np; | 4261 | struct device_node *np; |
4261 | int ret; | 4262 | int ret; |
4262 | 4263 | ||
4263 | np = container_of(adev->dev, struct of_device, dev)->node; | 4264 | ofdev = container_of(adev->dev, struct of_device, dev); |
4265 | np = ofdev->dev.of_node; | ||
4264 | if (adev->id != PPC440SPE_XOR_ID) { | 4266 | if (adev->id != PPC440SPE_XOR_ID) { |
4265 | adev->err_irq = irq_of_parse_and_map(np, 1); | 4267 | adev->err_irq = irq_of_parse_and_map(np, 1); |
4266 | if (adev->err_irq == NO_IRQ) { | 4268 | if (adev->err_irq == NO_IRQ) { |
diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c index cf17dbb8014f..ac9f7985096d 100644 --- a/drivers/edac/amd64_edac.c +++ b/drivers/edac/amd64_edac.c | |||
@@ -1958,20 +1958,20 @@ static int get_channel_from_ecc_syndrome(struct mem_ctl_info *mci, u16 syndrome) | |||
1958 | u32 value = 0; | 1958 | u32 value = 0; |
1959 | int err_sym = 0; | 1959 | int err_sym = 0; |
1960 | 1960 | ||
1961 | amd64_read_pci_cfg(pvt->misc_f3_ctl, 0x180, &value); | 1961 | if (boot_cpu_data.x86 == 0x10) { |
1962 | 1962 | ||
1963 | /* F3x180[EccSymbolSize]=1, x8 symbols */ | 1963 | amd64_read_pci_cfg(pvt->misc_f3_ctl, 0x180, &value); |
1964 | if (boot_cpu_data.x86 == 0x10 && | 1964 | |
1965 | boot_cpu_data.x86_model > 7 && | 1965 | /* F3x180[EccSymbolSize]=1 => x8 symbols */ |
1966 | value & BIT(25)) { | 1966 | if (boot_cpu_data.x86_model > 7 && |
1967 | err_sym = decode_syndrome(syndrome, x8_vectors, | 1967 | value & BIT(25)) { |
1968 | ARRAY_SIZE(x8_vectors), 8); | 1968 | err_sym = decode_syndrome(syndrome, x8_vectors, |
1969 | return map_err_sym_to_channel(err_sym, 8); | 1969 | ARRAY_SIZE(x8_vectors), 8); |
1970 | } else { | 1970 | return map_err_sym_to_channel(err_sym, 8); |
1971 | err_sym = decode_syndrome(syndrome, x4_vectors, | 1971 | } |
1972 | ARRAY_SIZE(x4_vectors), 4); | ||
1973 | return map_err_sym_to_channel(err_sym, 4); | ||
1974 | } | 1972 | } |
1973 | err_sym = decode_syndrome(syndrome, x4_vectors, ARRAY_SIZE(x4_vectors), 4); | ||
1974 | return map_err_sym_to_channel(err_sym, 4); | ||
1975 | } | 1975 | } |
1976 | 1976 | ||
1977 | /* | 1977 | /* |
diff --git a/drivers/edac/i7core_edac.c b/drivers/edac/i7core_edac.c index 6b8b7b41ec5f..cc9357da0e34 100644 --- a/drivers/edac/i7core_edac.c +++ b/drivers/edac/i7core_edac.c | |||
@@ -1233,10 +1233,28 @@ static void __init i7core_xeon_pci_fixup(struct pci_id_table *table) | |||
1233 | for (i = 0; i < MAX_SOCKET_BUSES; i++) | 1233 | for (i = 0; i < MAX_SOCKET_BUSES; i++) |
1234 | pcibios_scan_specific_bus(255-i); | 1234 | pcibios_scan_specific_bus(255-i); |
1235 | } | 1235 | } |
1236 | pci_dev_put(pdev); | ||
1236 | table++; | 1237 | table++; |
1237 | } | 1238 | } |
1238 | } | 1239 | } |
1239 | 1240 | ||
1241 | static unsigned i7core_pci_lastbus(void) | ||
1242 | { | ||
1243 | int last_bus = 0, bus; | ||
1244 | struct pci_bus *b = NULL; | ||
1245 | |||
1246 | while ((b = pci_find_next_bus(b)) != NULL) { | ||
1247 | bus = b->number; | ||
1248 | debugf0("Found bus %d\n", bus); | ||
1249 | if (bus > last_bus) | ||
1250 | last_bus = bus; | ||
1251 | } | ||
1252 | |||
1253 | debugf0("Last bus %d\n", last_bus); | ||
1254 | |||
1255 | return last_bus; | ||
1256 | } | ||
1257 | |||
1240 | /* | 1258 | /* |
1241 | * i7core_get_devices Find and perform 'get' operation on the MCH's | 1259 | * i7core_get_devices Find and perform 'get' operation on the MCH's |
1242 | * device/functions we want to reference for this driver | 1260 | * device/functions we want to reference for this driver |
@@ -1244,7 +1262,8 @@ static void __init i7core_xeon_pci_fixup(struct pci_id_table *table) | |||
1244 | * Need to 'get' device 16 func 1 and func 2 | 1262 | * Need to 'get' device 16 func 1 and func 2 |
1245 | */ | 1263 | */ |
1246 | int i7core_get_onedevice(struct pci_dev **prev, int devno, | 1264 | int i7core_get_onedevice(struct pci_dev **prev, int devno, |
1247 | struct pci_id_descr *dev_descr, unsigned n_devs) | 1265 | struct pci_id_descr *dev_descr, unsigned n_devs, |
1266 | unsigned last_bus) | ||
1248 | { | 1267 | { |
1249 | struct i7core_dev *i7core_dev; | 1268 | struct i7core_dev *i7core_dev; |
1250 | 1269 | ||
@@ -1291,10 +1310,7 @@ int i7core_get_onedevice(struct pci_dev **prev, int devno, | |||
1291 | } | 1310 | } |
1292 | bus = pdev->bus->number; | 1311 | bus = pdev->bus->number; |
1293 | 1312 | ||
1294 | if (bus == 0x3f) | 1313 | socket = last_bus - bus; |
1295 | socket = 0; | ||
1296 | else | ||
1297 | socket = 255 - bus; | ||
1298 | 1314 | ||
1299 | i7core_dev = get_i7core_dev(socket); | 1315 | i7core_dev = get_i7core_dev(socket); |
1300 | if (!i7core_dev) { | 1316 | if (!i7core_dev) { |
@@ -1358,17 +1374,21 @@ int i7core_get_onedevice(struct pci_dev **prev, int devno, | |||
1358 | 1374 | ||
1359 | static int i7core_get_devices(struct pci_id_table *table) | 1375 | static int i7core_get_devices(struct pci_id_table *table) |
1360 | { | 1376 | { |
1361 | int i, rc; | 1377 | int i, rc, last_bus; |
1362 | struct pci_dev *pdev = NULL; | 1378 | struct pci_dev *pdev = NULL; |
1363 | struct pci_id_descr *dev_descr; | 1379 | struct pci_id_descr *dev_descr; |
1364 | 1380 | ||
1381 | last_bus = i7core_pci_lastbus(); | ||
1382 | |||
1365 | while (table && table->descr) { | 1383 | while (table && table->descr) { |
1366 | dev_descr = table->descr; | 1384 | dev_descr = table->descr; |
1367 | for (i = 0; i < table->n_devs; i++) { | 1385 | for (i = 0; i < table->n_devs; i++) { |
1368 | pdev = NULL; | 1386 | pdev = NULL; |
1369 | do { | 1387 | do { |
1370 | rc = i7core_get_onedevice(&pdev, i, &dev_descr[i], | 1388 | rc = i7core_get_onedevice(&pdev, i, |
1371 | table->n_devs); | 1389 | &dev_descr[i], |
1390 | table->n_devs, | ||
1391 | last_bus); | ||
1372 | if (rc < 0) { | 1392 | if (rc < 0) { |
1373 | if (i == 0) { | 1393 | if (i == 0) { |
1374 | i = table->n_devs; | 1394 | i = table->n_devs; |
@@ -1927,21 +1947,26 @@ fail: | |||
1927 | * 0 for FOUND a device | 1947 | * 0 for FOUND a device |
1928 | * < 0 for error code | 1948 | * < 0 for error code |
1929 | */ | 1949 | */ |
1950 | |||
1951 | static int probed = 0; | ||
1952 | |||
1930 | static int __devinit i7core_probe(struct pci_dev *pdev, | 1953 | static int __devinit i7core_probe(struct pci_dev *pdev, |
1931 | const struct pci_device_id *id) | 1954 | const struct pci_device_id *id) |
1932 | { | 1955 | { |
1933 | int dev_idx = id->driver_data; | ||
1934 | int rc; | 1956 | int rc; |
1935 | struct i7core_dev *i7core_dev; | 1957 | struct i7core_dev *i7core_dev; |
1936 | 1958 | ||
1959 | /* get the pci devices we want to reserve for our use */ | ||
1960 | mutex_lock(&i7core_edac_lock); | ||
1961 | |||
1937 | /* | 1962 | /* |
1938 | * All memory controllers are allocated at the first pass. | 1963 | * All memory controllers are allocated at the first pass. |
1939 | */ | 1964 | */ |
1940 | if (unlikely(dev_idx >= 1)) | 1965 | if (unlikely(probed >= 1)) { |
1966 | mutex_unlock(&i7core_edac_lock); | ||
1941 | return -EINVAL; | 1967 | return -EINVAL; |
1942 | 1968 | } | |
1943 | /* get the pci devices we want to reserve for our use */ | 1969 | probed++; |
1944 | mutex_lock(&i7core_edac_lock); | ||
1945 | 1970 | ||
1946 | rc = i7core_get_devices(pci_dev_table); | 1971 | rc = i7core_get_devices(pci_dev_table); |
1947 | if (unlikely(rc < 0)) | 1972 | if (unlikely(rc < 0)) |
@@ -2013,6 +2038,8 @@ static void __devexit i7core_remove(struct pci_dev *pdev) | |||
2013 | i7core_dev->socket); | 2038 | i7core_dev->socket); |
2014 | } | 2039 | } |
2015 | } | 2040 | } |
2041 | probed--; | ||
2042 | |||
2016 | mutex_unlock(&i7core_edac_lock); | 2043 | mutex_unlock(&i7core_edac_lock); |
2017 | } | 2044 | } |
2018 | 2045 | ||
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig index 724038dab4ca..7face915b963 100644 --- a/drivers/gpio/Kconfig +++ b/drivers/gpio/Kconfig | |||
@@ -1,5 +1,5 @@ | |||
1 | # | 1 | # |
2 | # GPIO infrastructure and expanders | 2 | # platform-neutral GPIO infrastructure and expanders |
3 | # | 3 | # |
4 | 4 | ||
5 | config ARCH_WANT_OPTIONAL_GPIOLIB | 5 | config ARCH_WANT_OPTIONAL_GPIOLIB |
diff --git a/drivers/gpio/Makefile b/drivers/gpio/Makefile index 51c3cdd41b5a..e53dcff49b4f 100644 --- a/drivers/gpio/Makefile +++ b/drivers/gpio/Makefile | |||
@@ -1,4 +1,8 @@ | |||
1 | # gpio support: dedicated expander chips, etc | 1 | # generic gpio support: dedicated expander chips, etc |
2 | # | ||
3 | # NOTE: platform-specific GPIO drivers don't belong in the | ||
4 | # drivers/gpio directory; put them with other platform setup | ||
5 | # code, IRQ controllers, board init, etc. | ||
2 | 6 | ||
3 | ccflags-$(CONFIG_DEBUG_GPIO) += -DDEBUG | 7 | ccflags-$(CONFIG_DEBUG_GPIO) += -DDEBUG |
4 | 8 | ||
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c index 08c4c926e65f..1f2cc6b09623 100644 --- a/drivers/gpu/drm/drm_fb_helper.c +++ b/drivers/gpu/drm/drm_fb_helper.c | |||
@@ -146,7 +146,7 @@ static bool drm_fb_helper_connector_parse_command_line(struct drm_fb_helper_conn | |||
146 | cvt = 1; | 146 | cvt = 1; |
147 | break; | 147 | break; |
148 | case 'R': | 148 | case 'R': |
149 | if (!cvt) | 149 | if (cvt) |
150 | rb = 1; | 150 | rb = 1; |
151 | break; | 151 | break; |
152 | case 'm': | 152 | case 'm': |
@@ -1024,11 +1024,18 @@ static struct drm_display_mode *drm_pick_cmdline_mode(struct drm_fb_helper_conne | |||
1024 | } | 1024 | } |
1025 | 1025 | ||
1026 | create_mode: | 1026 | create_mode: |
1027 | mode = drm_cvt_mode(fb_helper_conn->connector->dev, cmdline_mode->xres, | 1027 | if (cmdline_mode->cvt) |
1028 | cmdline_mode->yres, | 1028 | mode = drm_cvt_mode(fb_helper_conn->connector->dev, |
1029 | cmdline_mode->refresh_specified ? cmdline_mode->refresh : 60, | 1029 | cmdline_mode->xres, cmdline_mode->yres, |
1030 | cmdline_mode->rb, cmdline_mode->interlace, | 1030 | cmdline_mode->refresh_specified ? cmdline_mode->refresh : 60, |
1031 | cmdline_mode->margins); | 1031 | cmdline_mode->rb, cmdline_mode->interlace, |
1032 | cmdline_mode->margins); | ||
1033 | else | ||
1034 | mode = drm_gtf_mode(fb_helper_conn->connector->dev, | ||
1035 | cmdline_mode->xres, cmdline_mode->yres, | ||
1036 | cmdline_mode->refresh_specified ? cmdline_mode->refresh : 60, | ||
1037 | cmdline_mode->interlace, | ||
1038 | cmdline_mode->margins); | ||
1032 | drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V); | 1039 | drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V); |
1033 | list_add(&mode->head, &fb_helper_conn->connector->modes); | 1040 | list_add(&mode->head, &fb_helper_conn->connector->modes); |
1034 | return mode; | 1041 | return mode; |
diff --git a/drivers/gpu/drm/i915/dvo_tfp410.c b/drivers/gpu/drm/i915/dvo_tfp410.c index 66c697bc9b22..56f66426207f 100644 --- a/drivers/gpu/drm/i915/dvo_tfp410.c +++ b/drivers/gpu/drm/i915/dvo_tfp410.c | |||
@@ -208,7 +208,7 @@ static enum drm_connector_status tfp410_detect(struct intel_dvo_device *dvo) | |||
208 | uint8_t ctl2; | 208 | uint8_t ctl2; |
209 | 209 | ||
210 | if (tfp410_readb(dvo, TFP410_CTL_2, &ctl2)) { | 210 | if (tfp410_readb(dvo, TFP410_CTL_2, &ctl2)) { |
211 | if (ctl2 & TFP410_CTL_2_HTPLG) | 211 | if (ctl2 & TFP410_CTL_2_RSEN) |
212 | ret = connector_status_connected; | 212 | ret = connector_status_connected; |
213 | else | 213 | else |
214 | ret = connector_status_disconnected; | 214 | ret = connector_status_disconnected; |
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 52510ad8b25d..aee83fa178f6 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c | |||
@@ -620,7 +620,7 @@ static int i915_sr_status(struct seq_file *m, void *unused) | |||
620 | drm_i915_private_t *dev_priv = dev->dev_private; | 620 | drm_i915_private_t *dev_priv = dev->dev_private; |
621 | bool sr_enabled = false; | 621 | bool sr_enabled = false; |
622 | 622 | ||
623 | if (IS_I965G(dev) || IS_I945G(dev) || IS_I945GM(dev)) | 623 | if (IS_I965GM(dev) || IS_I945G(dev) || IS_I945GM(dev)) |
624 | sr_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN; | 624 | sr_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN; |
625 | else if (IS_I915GM(dev)) | 625 | else if (IS_I915GM(dev)) |
626 | sr_enabled = I915_READ(INSTPM) & INSTPM_SELF_EN; | 626 | sr_enabled = I915_READ(INSTPM) & INSTPM_SELF_EN; |
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index 59a2bf8592ec..f00c5ae9556c 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c | |||
@@ -128,9 +128,11 @@ static int i915_dma_cleanup(struct drm_device * dev) | |||
128 | if (dev->irq_enabled) | 128 | if (dev->irq_enabled) |
129 | drm_irq_uninstall(dev); | 129 | drm_irq_uninstall(dev); |
130 | 130 | ||
131 | mutex_lock(&dev->struct_mutex); | ||
131 | intel_cleanup_ring_buffer(dev, &dev_priv->render_ring); | 132 | intel_cleanup_ring_buffer(dev, &dev_priv->render_ring); |
132 | if (HAS_BSD(dev)) | 133 | if (HAS_BSD(dev)) |
133 | intel_cleanup_ring_buffer(dev, &dev_priv->bsd_ring); | 134 | intel_cleanup_ring_buffer(dev, &dev_priv->bsd_ring); |
135 | mutex_unlock(&dev->struct_mutex); | ||
134 | 136 | ||
135 | /* Clear the HWS virtual address at teardown */ | 137 | /* Clear the HWS virtual address at teardown */ |
136 | if (I915_NEED_GFX_HWS(dev)) | 138 | if (I915_NEED_GFX_HWS(dev)) |
@@ -1229,7 +1231,7 @@ static void i915_warn_stolen(struct drm_device *dev) | |||
1229 | static void i915_setup_compression(struct drm_device *dev, int size) | 1231 | static void i915_setup_compression(struct drm_device *dev, int size) |
1230 | { | 1232 | { |
1231 | struct drm_i915_private *dev_priv = dev->dev_private; | 1233 | struct drm_i915_private *dev_priv = dev->dev_private; |
1232 | struct drm_mm_node *compressed_fb, *compressed_llb; | 1234 | struct drm_mm_node *compressed_fb, *uninitialized_var(compressed_llb); |
1233 | unsigned long cfb_base; | 1235 | unsigned long cfb_base; |
1234 | unsigned long ll_base = 0; | 1236 | unsigned long ll_base = 0; |
1235 | 1237 | ||
@@ -1410,6 +1412,10 @@ static int i915_load_modeset_init(struct drm_device *dev, | |||
1410 | if (ret) | 1412 | if (ret) |
1411 | goto cleanup_vga_client; | 1413 | goto cleanup_vga_client; |
1412 | 1414 | ||
1415 | /* IIR "flip pending" bit means done if this bit is set */ | ||
1416 | if (IS_GEN3(dev) && (I915_READ(ECOSKPD) & ECO_FLIP_DONE)) | ||
1417 | dev_priv->flip_pending_is_done = true; | ||
1418 | |||
1413 | intel_modeset_init(dev); | 1419 | intel_modeset_init(dev); |
1414 | 1420 | ||
1415 | ret = drm_irq_install(dev); | 1421 | ret = drm_irq_install(dev); |
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 276583159847..d147ab2f5bfc 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h | |||
@@ -596,6 +596,7 @@ typedef struct drm_i915_private { | |||
596 | struct drm_crtc *plane_to_crtc_mapping[2]; | 596 | struct drm_crtc *plane_to_crtc_mapping[2]; |
597 | struct drm_crtc *pipe_to_crtc_mapping[2]; | 597 | struct drm_crtc *pipe_to_crtc_mapping[2]; |
598 | wait_queue_head_t pending_flip_queue; | 598 | wait_queue_head_t pending_flip_queue; |
599 | bool flip_pending_is_done; | ||
599 | 600 | ||
600 | /* Reclocking support */ | 601 | /* Reclocking support */ |
601 | bool render_reclock_avail; | 602 | bool render_reclock_avail; |
@@ -1076,7 +1077,7 @@ extern int intel_trans_dp_port_sel (struct drm_crtc *crtc); | |||
1076 | drm_i915_private_t *dev_priv = dev->dev_private; \ | 1077 | drm_i915_private_t *dev_priv = dev->dev_private; \ |
1077 | if (I915_VERBOSE) \ | 1078 | if (I915_VERBOSE) \ |
1078 | DRM_DEBUG(" BEGIN_LP_RING %x\n", (int)(n)); \ | 1079 | DRM_DEBUG(" BEGIN_LP_RING %x\n", (int)(n)); \ |
1079 | intel_ring_begin(dev, &dev_priv->render_ring, 4*(n)); \ | 1080 | intel_ring_begin(dev, &dev_priv->render_ring, (n)); \ |
1080 | } while (0) | 1081 | } while (0) |
1081 | 1082 | ||
1082 | 1083 | ||
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 9ded3dae6c87..074385882ccf 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c | |||
@@ -2239,7 +2239,7 @@ i915_gem_object_get_pages(struct drm_gem_object *obj, | |||
2239 | mapping = inode->i_mapping; | 2239 | mapping = inode->i_mapping; |
2240 | for (i = 0; i < page_count; i++) { | 2240 | for (i = 0; i < page_count; i++) { |
2241 | page = read_cache_page_gfp(mapping, i, | 2241 | page = read_cache_page_gfp(mapping, i, |
2242 | mapping_gfp_mask (mapping) | | 2242 | GFP_HIGHUSER | |
2243 | __GFP_COLD | | 2243 | __GFP_COLD | |
2244 | gfpmask); | 2244 | gfpmask); |
2245 | if (IS_ERR(page)) | 2245 | if (IS_ERR(page)) |
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 2479be001e40..dba53d4b9fb3 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c | |||
@@ -940,22 +940,30 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS) | |||
940 | if (HAS_BSD(dev) && (iir & I915_BSD_USER_INTERRUPT)) | 940 | if (HAS_BSD(dev) && (iir & I915_BSD_USER_INTERRUPT)) |
941 | DRM_WAKEUP(&dev_priv->bsd_ring.irq_queue); | 941 | DRM_WAKEUP(&dev_priv->bsd_ring.irq_queue); |
942 | 942 | ||
943 | if (iir & I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT) | 943 | if (iir & I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT) { |
944 | intel_prepare_page_flip(dev, 0); | 944 | intel_prepare_page_flip(dev, 0); |
945 | if (dev_priv->flip_pending_is_done) | ||
946 | intel_finish_page_flip_plane(dev, 0); | ||
947 | } | ||
945 | 948 | ||
946 | if (iir & I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT) | 949 | if (iir & I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT) { |
947 | intel_prepare_page_flip(dev, 1); | 950 | intel_prepare_page_flip(dev, 1); |
951 | if (dev_priv->flip_pending_is_done) | ||
952 | intel_finish_page_flip_plane(dev, 1); | ||
953 | } | ||
948 | 954 | ||
949 | if (pipea_stats & vblank_status) { | 955 | if (pipea_stats & vblank_status) { |
950 | vblank++; | 956 | vblank++; |
951 | drm_handle_vblank(dev, 0); | 957 | drm_handle_vblank(dev, 0); |
952 | intel_finish_page_flip(dev, 0); | 958 | if (!dev_priv->flip_pending_is_done) |
959 | intel_finish_page_flip(dev, 0); | ||
953 | } | 960 | } |
954 | 961 | ||
955 | if (pipeb_stats & vblank_status) { | 962 | if (pipeb_stats & vblank_status) { |
956 | vblank++; | 963 | vblank++; |
957 | drm_handle_vblank(dev, 1); | 964 | drm_handle_vblank(dev, 1); |
958 | intel_finish_page_flip(dev, 1); | 965 | if (!dev_priv->flip_pending_is_done) |
966 | intel_finish_page_flip(dev, 1); | ||
959 | } | 967 | } |
960 | 968 | ||
961 | if ((pipea_stats & I915_LEGACY_BLC_EVENT_STATUS) || | 969 | if ((pipea_stats & I915_LEGACY_BLC_EVENT_STATUS) || |
@@ -1387,29 +1395,10 @@ int i915_driver_irq_postinstall(struct drm_device *dev) | |||
1387 | dev_priv->pipestat[1] = 0; | 1395 | dev_priv->pipestat[1] = 0; |
1388 | 1396 | ||
1389 | if (I915_HAS_HOTPLUG(dev)) { | 1397 | if (I915_HAS_HOTPLUG(dev)) { |
1390 | u32 hotplug_en = I915_READ(PORT_HOTPLUG_EN); | ||
1391 | |||
1392 | /* Note HDMI and DP share bits */ | ||
1393 | if (dev_priv->hotplug_supported_mask & HDMIB_HOTPLUG_INT_STATUS) | ||
1394 | hotplug_en |= HDMIB_HOTPLUG_INT_EN; | ||
1395 | if (dev_priv->hotplug_supported_mask & HDMIC_HOTPLUG_INT_STATUS) | ||
1396 | hotplug_en |= HDMIC_HOTPLUG_INT_EN; | ||
1397 | if (dev_priv->hotplug_supported_mask & HDMID_HOTPLUG_INT_STATUS) | ||
1398 | hotplug_en |= HDMID_HOTPLUG_INT_EN; | ||
1399 | if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS) | ||
1400 | hotplug_en |= SDVOC_HOTPLUG_INT_EN; | ||
1401 | if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS) | ||
1402 | hotplug_en |= SDVOB_HOTPLUG_INT_EN; | ||
1403 | if (dev_priv->hotplug_supported_mask & CRT_HOTPLUG_INT_STATUS) | ||
1404 | hotplug_en |= CRT_HOTPLUG_INT_EN; | ||
1405 | /* Ignore TV since it's buggy */ | ||
1406 | |||
1407 | I915_WRITE(PORT_HOTPLUG_EN, hotplug_en); | ||
1408 | |||
1409 | /* Enable in IER... */ | 1398 | /* Enable in IER... */ |
1410 | enable_mask |= I915_DISPLAY_PORT_INTERRUPT; | 1399 | enable_mask |= I915_DISPLAY_PORT_INTERRUPT; |
1411 | /* and unmask in IMR */ | 1400 | /* and unmask in IMR */ |
1412 | i915_enable_irq(dev_priv, I915_DISPLAY_PORT_INTERRUPT); | 1401 | dev_priv->irq_mask_reg &= ~I915_DISPLAY_PORT_INTERRUPT; |
1413 | } | 1402 | } |
1414 | 1403 | ||
1415 | /* | 1404 | /* |
@@ -1427,16 +1416,41 @@ int i915_driver_irq_postinstall(struct drm_device *dev) | |||
1427 | } | 1416 | } |
1428 | I915_WRITE(EMR, error_mask); | 1417 | I915_WRITE(EMR, error_mask); |
1429 | 1418 | ||
1430 | /* Disable pipe interrupt enables, clear pending pipe status */ | ||
1431 | I915_WRITE(PIPEASTAT, I915_READ(PIPEASTAT) & 0x8000ffff); | ||
1432 | I915_WRITE(PIPEBSTAT, I915_READ(PIPEBSTAT) & 0x8000ffff); | ||
1433 | /* Clear pending interrupt status */ | ||
1434 | I915_WRITE(IIR, I915_READ(IIR)); | ||
1435 | |||
1436 | I915_WRITE(IER, enable_mask); | ||
1437 | I915_WRITE(IMR, dev_priv->irq_mask_reg); | 1419 | I915_WRITE(IMR, dev_priv->irq_mask_reg); |
1420 | I915_WRITE(IER, enable_mask); | ||
1438 | (void) I915_READ(IER); | 1421 | (void) I915_READ(IER); |
1439 | 1422 | ||
1423 | if (I915_HAS_HOTPLUG(dev)) { | ||
1424 | u32 hotplug_en = I915_READ(PORT_HOTPLUG_EN); | ||
1425 | |||
1426 | /* Note HDMI and DP share bits */ | ||
1427 | if (dev_priv->hotplug_supported_mask & HDMIB_HOTPLUG_INT_STATUS) | ||
1428 | hotplug_en |= HDMIB_HOTPLUG_INT_EN; | ||
1429 | if (dev_priv->hotplug_supported_mask & HDMIC_HOTPLUG_INT_STATUS) | ||
1430 | hotplug_en |= HDMIC_HOTPLUG_INT_EN; | ||
1431 | if (dev_priv->hotplug_supported_mask & HDMID_HOTPLUG_INT_STATUS) | ||
1432 | hotplug_en |= HDMID_HOTPLUG_INT_EN; | ||
1433 | if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS) | ||
1434 | hotplug_en |= SDVOC_HOTPLUG_INT_EN; | ||
1435 | if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS) | ||
1436 | hotplug_en |= SDVOB_HOTPLUG_INT_EN; | ||
1437 | if (dev_priv->hotplug_supported_mask & CRT_HOTPLUG_INT_STATUS) { | ||
1438 | hotplug_en |= CRT_HOTPLUG_INT_EN; | ||
1439 | |||
1440 | /* Programming the CRT detection parameters tends | ||
1441 | to generate a spurious hotplug event about three | ||
1442 | seconds later. So just do it once. | ||
1443 | */ | ||
1444 | if (IS_G4X(dev)) | ||
1445 | hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64; | ||
1446 | hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50; | ||
1447 | } | ||
1448 | |||
1449 | /* Ignore TV since it's buggy */ | ||
1450 | |||
1451 | I915_WRITE(PORT_HOTPLUG_EN, hotplug_en); | ||
1452 | } | ||
1453 | |||
1440 | opregion_enable_asle(dev); | 1454 | opregion_enable_asle(dev); |
1441 | 1455 | ||
1442 | return 0; | 1456 | return 0; |
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 64b0a3afd92b..150400f40534 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h | |||
@@ -178,6 +178,7 @@ | |||
178 | #define MI_OVERLAY_OFF (0x2<<21) | 178 | #define MI_OVERLAY_OFF (0x2<<21) |
179 | #define MI_LOAD_SCAN_LINES_INCL MI_INSTR(0x12, 0) | 179 | #define MI_LOAD_SCAN_LINES_INCL MI_INSTR(0x12, 0) |
180 | #define MI_DISPLAY_FLIP MI_INSTR(0x14, 2) | 180 | #define MI_DISPLAY_FLIP MI_INSTR(0x14, 2) |
181 | #define MI_DISPLAY_FLIP_I915 MI_INSTR(0x14, 1) | ||
181 | #define MI_DISPLAY_FLIP_PLANE(n) ((n) << 20) | 182 | #define MI_DISPLAY_FLIP_PLANE(n) ((n) << 20) |
182 | #define MI_STORE_DWORD_IMM MI_INSTR(0x20, 1) | 183 | #define MI_STORE_DWORD_IMM MI_INSTR(0x20, 1) |
183 | #define MI_MEM_VIRTUAL (1 << 22) /* 965+ only */ | 184 | #define MI_MEM_VIRTUAL (1 << 22) /* 965+ only */ |
@@ -368,6 +369,9 @@ | |||
368 | #define CM0_RC_OP_FLUSH_DISABLE (1<<0) | 369 | #define CM0_RC_OP_FLUSH_DISABLE (1<<0) |
369 | #define BB_ADDR 0x02140 /* 8 bytes */ | 370 | #define BB_ADDR 0x02140 /* 8 bytes */ |
370 | #define GFX_FLSH_CNTL 0x02170 /* 915+ only */ | 371 | #define GFX_FLSH_CNTL 0x02170 /* 915+ only */ |
372 | #define ECOSKPD 0x021d0 | ||
373 | #define ECO_GATING_CX_ONLY (1<<3) | ||
374 | #define ECO_FLIP_DONE (1<<0) | ||
371 | 375 | ||
372 | /* GEN6 interrupt control */ | 376 | /* GEN6 interrupt control */ |
373 | #define GEN6_RENDER_HWSTAM 0x2098 | 377 | #define GEN6_RENDER_HWSTAM 0x2098 |
@@ -1130,7 +1134,6 @@ | |||
1130 | #define CRT_HOTPLUG_DETECT_DELAY_2G (1 << 4) | 1134 | #define CRT_HOTPLUG_DETECT_DELAY_2G (1 << 4) |
1131 | #define CRT_HOTPLUG_DETECT_VOLTAGE_325MV (0 << 2) | 1135 | #define CRT_HOTPLUG_DETECT_VOLTAGE_325MV (0 << 2) |
1132 | #define CRT_HOTPLUG_DETECT_VOLTAGE_475MV (1 << 2) | 1136 | #define CRT_HOTPLUG_DETECT_VOLTAGE_475MV (1 << 2) |
1133 | #define CRT_HOTPLUG_MASK (0x3fc) /* Bits 9-2 */ | ||
1134 | 1137 | ||
1135 | #define PORT_HOTPLUG_STAT 0x61114 | 1138 | #define PORT_HOTPLUG_STAT 0x61114 |
1136 | #define HDMIB_HOTPLUG_INT_STATUS (1 << 29) | 1139 | #define HDMIB_HOTPLUG_INT_STATUS (1 << 29) |
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c index 22ff38455731..ee0732b222a1 100644 --- a/drivers/gpu/drm/i915/intel_crt.c +++ b/drivers/gpu/drm/i915/intel_crt.c | |||
@@ -234,14 +234,8 @@ static bool intel_crt_detect_hotplug(struct drm_connector *connector) | |||
234 | else | 234 | else |
235 | tries = 1; | 235 | tries = 1; |
236 | hotplug_en = orig = I915_READ(PORT_HOTPLUG_EN); | 236 | hotplug_en = orig = I915_READ(PORT_HOTPLUG_EN); |
237 | hotplug_en &= CRT_HOTPLUG_MASK; | ||
238 | hotplug_en |= CRT_HOTPLUG_FORCE_DETECT; | 237 | hotplug_en |= CRT_HOTPLUG_FORCE_DETECT; |
239 | 238 | ||
240 | if (IS_G4X(dev)) | ||
241 | hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64; | ||
242 | |||
243 | hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50; | ||
244 | |||
245 | for (i = 0; i < tries ; i++) { | 239 | for (i = 0; i < tries ; i++) { |
246 | unsigned long timeout; | 240 | unsigned long timeout; |
247 | /* turn on the FORCE_DETECT */ | 241 | /* turn on the FORCE_DETECT */ |
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index cc8131ff319f..68dcf36e2793 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c | |||
@@ -2970,11 +2970,13 @@ static void i965_update_wm(struct drm_device *dev, int planea_clock, | |||
2970 | if (srwm < 0) | 2970 | if (srwm < 0) |
2971 | srwm = 1; | 2971 | srwm = 1; |
2972 | srwm &= 0x3f; | 2972 | srwm &= 0x3f; |
2973 | I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN); | 2973 | if (IS_I965GM(dev)) |
2974 | I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN); | ||
2974 | } else { | 2975 | } else { |
2975 | /* Turn off self refresh if both pipes are enabled */ | 2976 | /* Turn off self refresh if both pipes are enabled */ |
2976 | I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF) | 2977 | if (IS_I965GM(dev)) |
2977 | & ~FW_BLC_SELF_EN); | 2978 | I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF) |
2979 | & ~FW_BLC_SELF_EN); | ||
2978 | } | 2980 | } |
2979 | 2981 | ||
2980 | DRM_DEBUG_KMS("Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n", | 2982 | DRM_DEBUG_KMS("Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n", |
@@ -4483,6 +4485,7 @@ static void intel_idle_update(struct work_struct *work) | |||
4483 | struct drm_device *dev = dev_priv->dev; | 4485 | struct drm_device *dev = dev_priv->dev; |
4484 | struct drm_crtc *crtc; | 4486 | struct drm_crtc *crtc; |
4485 | struct intel_crtc *intel_crtc; | 4487 | struct intel_crtc *intel_crtc; |
4488 | int enabled = 0; | ||
4486 | 4489 | ||
4487 | if (!i915_powersave) | 4490 | if (!i915_powersave) |
4488 | return; | 4491 | return; |
@@ -4491,21 +4494,22 @@ static void intel_idle_update(struct work_struct *work) | |||
4491 | 4494 | ||
4492 | i915_update_gfx_val(dev_priv); | 4495 | i915_update_gfx_val(dev_priv); |
4493 | 4496 | ||
4494 | if (IS_I945G(dev) || IS_I945GM(dev)) { | ||
4495 | DRM_DEBUG_DRIVER("enable memory self refresh on 945\n"); | ||
4496 | I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN_MASK | FW_BLC_SELF_EN); | ||
4497 | } | ||
4498 | |||
4499 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { | 4497 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { |
4500 | /* Skip inactive CRTCs */ | 4498 | /* Skip inactive CRTCs */ |
4501 | if (!crtc->fb) | 4499 | if (!crtc->fb) |
4502 | continue; | 4500 | continue; |
4503 | 4501 | ||
4502 | enabled++; | ||
4504 | intel_crtc = to_intel_crtc(crtc); | 4503 | intel_crtc = to_intel_crtc(crtc); |
4505 | if (!intel_crtc->busy) | 4504 | if (!intel_crtc->busy) |
4506 | intel_decrease_pllclock(crtc); | 4505 | intel_decrease_pllclock(crtc); |
4507 | } | 4506 | } |
4508 | 4507 | ||
4508 | if ((enabled == 1) && (IS_I945G(dev) || IS_I945GM(dev))) { | ||
4509 | DRM_DEBUG_DRIVER("enable memory self refresh on 945\n"); | ||
4510 | I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN_MASK | FW_BLC_SELF_EN); | ||
4511 | } | ||
4512 | |||
4509 | mutex_unlock(&dev->struct_mutex); | 4513 | mutex_unlock(&dev->struct_mutex); |
4510 | } | 4514 | } |
4511 | 4515 | ||
@@ -4601,10 +4605,10 @@ static void intel_unpin_work_fn(struct work_struct *__work) | |||
4601 | kfree(work); | 4605 | kfree(work); |
4602 | } | 4606 | } |
4603 | 4607 | ||
4604 | void intel_finish_page_flip(struct drm_device *dev, int pipe) | 4608 | static void do_intel_finish_page_flip(struct drm_device *dev, |
4609 | struct drm_crtc *crtc) | ||
4605 | { | 4610 | { |
4606 | drm_i915_private_t *dev_priv = dev->dev_private; | 4611 | drm_i915_private_t *dev_priv = dev->dev_private; |
4607 | struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; | ||
4608 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 4612 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
4609 | struct intel_unpin_work *work; | 4613 | struct intel_unpin_work *work; |
4610 | struct drm_i915_gem_object *obj_priv; | 4614 | struct drm_i915_gem_object *obj_priv; |
@@ -4648,6 +4652,22 @@ void intel_finish_page_flip(struct drm_device *dev, int pipe) | |||
4648 | schedule_work(&work->work); | 4652 | schedule_work(&work->work); |
4649 | } | 4653 | } |
4650 | 4654 | ||
4655 | void intel_finish_page_flip(struct drm_device *dev, int pipe) | ||
4656 | { | ||
4657 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
4658 | struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; | ||
4659 | |||
4660 | do_intel_finish_page_flip(dev, crtc); | ||
4661 | } | ||
4662 | |||
4663 | void intel_finish_page_flip_plane(struct drm_device *dev, int plane) | ||
4664 | { | ||
4665 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
4666 | struct drm_crtc *crtc = dev_priv->plane_to_crtc_mapping[plane]; | ||
4667 | |||
4668 | do_intel_finish_page_flip(dev, crtc); | ||
4669 | } | ||
4670 | |||
4651 | void intel_prepare_page_flip(struct drm_device *dev, int plane) | 4671 | void intel_prepare_page_flip(struct drm_device *dev, int plane) |
4652 | { | 4672 | { |
4653 | drm_i915_private_t *dev_priv = dev->dev_private; | 4673 | drm_i915_private_t *dev_priv = dev->dev_private; |
@@ -4678,6 +4698,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, | |||
4678 | unsigned long flags; | 4698 | unsigned long flags; |
4679 | int pipesrc_reg = (intel_crtc->pipe == 0) ? PIPEASRC : PIPEBSRC; | 4699 | int pipesrc_reg = (intel_crtc->pipe == 0) ? PIPEASRC : PIPEBSRC; |
4680 | int ret, pipesrc; | 4700 | int ret, pipesrc; |
4701 | u32 flip_mask; | ||
4681 | 4702 | ||
4682 | work = kzalloc(sizeof *work, GFP_KERNEL); | 4703 | work = kzalloc(sizeof *work, GFP_KERNEL); |
4683 | if (work == NULL) | 4704 | if (work == NULL) |
@@ -4731,15 +4752,28 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, | |||
4731 | atomic_inc(&obj_priv->pending_flip); | 4752 | atomic_inc(&obj_priv->pending_flip); |
4732 | work->pending_flip_obj = obj; | 4753 | work->pending_flip_obj = obj; |
4733 | 4754 | ||
4755 | if (intel_crtc->plane) | ||
4756 | flip_mask = I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; | ||
4757 | else | ||
4758 | flip_mask = I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT; | ||
4759 | |||
4760 | /* Wait for any previous flip to finish */ | ||
4761 | if (IS_GEN3(dev)) | ||
4762 | while (I915_READ(ISR) & flip_mask) | ||
4763 | ; | ||
4764 | |||
4734 | BEGIN_LP_RING(4); | 4765 | BEGIN_LP_RING(4); |
4735 | OUT_RING(MI_DISPLAY_FLIP | | ||
4736 | MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); | ||
4737 | OUT_RING(fb->pitch); | ||
4738 | if (IS_I965G(dev)) { | 4766 | if (IS_I965G(dev)) { |
4767 | OUT_RING(MI_DISPLAY_FLIP | | ||
4768 | MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); | ||
4769 | OUT_RING(fb->pitch); | ||
4739 | OUT_RING(obj_priv->gtt_offset | obj_priv->tiling_mode); | 4770 | OUT_RING(obj_priv->gtt_offset | obj_priv->tiling_mode); |
4740 | pipesrc = I915_READ(pipesrc_reg); | 4771 | pipesrc = I915_READ(pipesrc_reg); |
4741 | OUT_RING(pipesrc & 0x0fff0fff); | 4772 | OUT_RING(pipesrc & 0x0fff0fff); |
4742 | } else { | 4773 | } else { |
4774 | OUT_RING(MI_DISPLAY_FLIP_I915 | | ||
4775 | MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); | ||
4776 | OUT_RING(fb->pitch); | ||
4743 | OUT_RING(obj_priv->gtt_offset); | 4777 | OUT_RING(obj_priv->gtt_offset); |
4744 | OUT_RING(MI_NOOP); | 4778 | OUT_RING(MI_NOOP); |
4745 | } | 4779 | } |
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index 49b54f05d3cf..1aac59e83bff 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c | |||
@@ -136,6 +136,12 @@ intel_dp_link_required(struct drm_device *dev, | |||
136 | } | 136 | } |
137 | 137 | ||
138 | static int | 138 | static int |
139 | intel_dp_max_data_rate(int max_link_clock, int max_lanes) | ||
140 | { | ||
141 | return (max_link_clock * max_lanes * 8) / 10; | ||
142 | } | ||
143 | |||
144 | static int | ||
139 | intel_dp_mode_valid(struct drm_connector *connector, | 145 | intel_dp_mode_valid(struct drm_connector *connector, |
140 | struct drm_display_mode *mode) | 146 | struct drm_display_mode *mode) |
141 | { | 147 | { |
@@ -144,8 +150,11 @@ intel_dp_mode_valid(struct drm_connector *connector, | |||
144 | int max_link_clock = intel_dp_link_clock(intel_dp_max_link_bw(intel_encoder)); | 150 | int max_link_clock = intel_dp_link_clock(intel_dp_max_link_bw(intel_encoder)); |
145 | int max_lanes = intel_dp_max_lane_count(intel_encoder); | 151 | int max_lanes = intel_dp_max_lane_count(intel_encoder); |
146 | 152 | ||
147 | if (intel_dp_link_required(connector->dev, intel_encoder, mode->clock) | 153 | /* only refuse the mode on non eDP since we have seen some wierd eDP panels |
148 | > max_link_clock * max_lanes) | 154 | which are outside spec tolerances but somehow work by magic */ |
155 | if (!IS_eDP(intel_encoder) && | ||
156 | (intel_dp_link_required(connector->dev, intel_encoder, mode->clock) | ||
157 | > intel_dp_max_data_rate(max_link_clock, max_lanes))) | ||
149 | return MODE_CLOCK_HIGH; | 158 | return MODE_CLOCK_HIGH; |
150 | 159 | ||
151 | if (mode->clock < 10000) | 160 | if (mode->clock < 10000) |
@@ -506,7 +515,7 @@ intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode, | |||
506 | 515 | ||
507 | for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) { | 516 | for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) { |
508 | for (clock = 0; clock <= max_clock; clock++) { | 517 | for (clock = 0; clock <= max_clock; clock++) { |
509 | int link_avail = intel_dp_link_clock(bws[clock]) * lane_count; | 518 | int link_avail = intel_dp_max_data_rate(intel_dp_link_clock(bws[clock]), lane_count); |
510 | 519 | ||
511 | if (intel_dp_link_required(encoder->dev, intel_encoder, mode->clock) | 520 | if (intel_dp_link_required(encoder->dev, intel_encoder, mode->clock) |
512 | <= link_avail) { | 521 | <= link_avail) { |
@@ -521,6 +530,18 @@ intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode, | |||
521 | } | 530 | } |
522 | } | 531 | } |
523 | } | 532 | } |
533 | |||
534 | if (IS_eDP(intel_encoder)) { | ||
535 | /* okay we failed just pick the highest */ | ||
536 | dp_priv->lane_count = max_lane_count; | ||
537 | dp_priv->link_bw = bws[max_clock]; | ||
538 | adjusted_mode->clock = intel_dp_link_clock(dp_priv->link_bw); | ||
539 | DRM_DEBUG_KMS("Force picking display port link bw %02x lane " | ||
540 | "count %d clock %d\n", | ||
541 | dp_priv->link_bw, dp_priv->lane_count, | ||
542 | adjusted_mode->clock); | ||
543 | return true; | ||
544 | } | ||
524 | return false; | 545 | return false; |
525 | } | 546 | } |
526 | 547 | ||
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index df931f787665..72206f37c4fb 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h | |||
@@ -224,6 +224,7 @@ extern void intel_fbdev_fini(struct drm_device *dev); | |||
224 | 224 | ||
225 | extern void intel_prepare_page_flip(struct drm_device *dev, int plane); | 225 | extern void intel_prepare_page_flip(struct drm_device *dev, int plane); |
226 | extern void intel_finish_page_flip(struct drm_device *dev, int pipe); | 226 | extern void intel_finish_page_flip(struct drm_device *dev, int pipe); |
227 | extern void intel_finish_page_flip_plane(struct drm_device *dev, int plane); | ||
227 | 228 | ||
228 | extern void intel_setup_overlay(struct drm_device *dev); | 229 | extern void intel_setup_overlay(struct drm_device *dev); |
229 | extern void intel_cleanup_overlay(struct drm_device *dev); | 230 | extern void intel_cleanup_overlay(struct drm_device *dev); |
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c index 6a1accd83aec..31df55f0a0a7 100644 --- a/drivers/gpu/drm/i915/intel_lvds.c +++ b/drivers/gpu/drm/i915/intel_lvds.c | |||
@@ -983,8 +983,8 @@ void intel_lvds_init(struct drm_device *dev) | |||
983 | 983 | ||
984 | drm_connector_attach_property(&intel_connector->base, | 984 | drm_connector_attach_property(&intel_connector->base, |
985 | dev->mode_config.scaling_mode_property, | 985 | dev->mode_config.scaling_mode_property, |
986 | DRM_MODE_SCALE_FULLSCREEN); | 986 | DRM_MODE_SCALE_ASPECT); |
987 | lvds_priv->fitting_mode = DRM_MODE_SCALE_FULLSCREEN; | 987 | lvds_priv->fitting_mode = DRM_MODE_SCALE_ASPECT; |
988 | /* | 988 | /* |
989 | * LVDS discovery: | 989 | * LVDS discovery: |
990 | * 1) check for EDID on DDC | 990 | * 1) check for EDID on DDC |
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index cea4f1a8709e..26362f8495a8 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c | |||
@@ -94,7 +94,7 @@ render_ring_flush(struct drm_device *dev, | |||
94 | #if WATCH_EXEC | 94 | #if WATCH_EXEC |
95 | DRM_INFO("%s: queue flush %08x to ring\n", __func__, cmd); | 95 | DRM_INFO("%s: queue flush %08x to ring\n", __func__, cmd); |
96 | #endif | 96 | #endif |
97 | intel_ring_begin(dev, ring, 8); | 97 | intel_ring_begin(dev, ring, 2); |
98 | intel_ring_emit(dev, ring, cmd); | 98 | intel_ring_emit(dev, ring, cmd); |
99 | intel_ring_emit(dev, ring, MI_NOOP); | 99 | intel_ring_emit(dev, ring, MI_NOOP); |
100 | intel_ring_advance(dev, ring); | 100 | intel_ring_advance(dev, ring); |
@@ -358,7 +358,7 @@ bsd_ring_flush(struct drm_device *dev, | |||
358 | u32 invalidate_domains, | 358 | u32 invalidate_domains, |
359 | u32 flush_domains) | 359 | u32 flush_domains) |
360 | { | 360 | { |
361 | intel_ring_begin(dev, ring, 8); | 361 | intel_ring_begin(dev, ring, 2); |
362 | intel_ring_emit(dev, ring, MI_FLUSH); | 362 | intel_ring_emit(dev, ring, MI_FLUSH); |
363 | intel_ring_emit(dev, ring, MI_NOOP); | 363 | intel_ring_emit(dev, ring, MI_NOOP); |
364 | intel_ring_advance(dev, ring); | 364 | intel_ring_advance(dev, ring); |
@@ -687,6 +687,7 @@ int intel_wrap_ring_buffer(struct drm_device *dev, | |||
687 | *virt++ = MI_NOOP; | 687 | *virt++ = MI_NOOP; |
688 | 688 | ||
689 | ring->tail = 0; | 689 | ring->tail = 0; |
690 | ring->space = ring->head - 8; | ||
690 | 691 | ||
691 | return 0; | 692 | return 0; |
692 | } | 693 | } |
@@ -721,8 +722,9 @@ int intel_wait_ring_buffer(struct drm_device *dev, | |||
721 | } | 722 | } |
722 | 723 | ||
723 | void intel_ring_begin(struct drm_device *dev, | 724 | void intel_ring_begin(struct drm_device *dev, |
724 | struct intel_ring_buffer *ring, int n) | 725 | struct intel_ring_buffer *ring, int num_dwords) |
725 | { | 726 | { |
727 | int n = 4*num_dwords; | ||
726 | if (unlikely(ring->tail + n > ring->size)) | 728 | if (unlikely(ring->tail + n > ring->size)) |
727 | intel_wrap_ring_buffer(dev, ring); | 729 | intel_wrap_ring_buffer(dev, ring); |
728 | if (unlikely(ring->space < n)) | 730 | if (unlikely(ring->space < n)) |
@@ -752,7 +754,7 @@ void intel_fill_struct(struct drm_device *dev, | |||
752 | { | 754 | { |
753 | unsigned int *virt = ring->virtual_start + ring->tail; | 755 | unsigned int *virt = ring->virtual_start + ring->tail; |
754 | BUG_ON((len&~(4-1)) != 0); | 756 | BUG_ON((len&~(4-1)) != 0); |
755 | intel_ring_begin(dev, ring, len); | 757 | intel_ring_begin(dev, ring, len/4); |
756 | memcpy(virt, data, len); | 758 | memcpy(virt, data, len); |
757 | ring->tail += len; | 759 | ring->tail += len; |
758 | ring->tail &= ring->size - 1; | 760 | ring->tail &= ring->size - 1; |
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c index f3f2827017ef..8c2d6478a221 100644 --- a/drivers/gpu/drm/radeon/atombios_crtc.c +++ b/drivers/gpu/drm/radeon/atombios_crtc.c | |||
@@ -498,7 +498,7 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc, | |||
498 | if ((rdev->family == CHIP_RS600) || | 498 | if ((rdev->family == CHIP_RS600) || |
499 | (rdev->family == CHIP_RS690) || | 499 | (rdev->family == CHIP_RS690) || |
500 | (rdev->family == CHIP_RS740)) | 500 | (rdev->family == CHIP_RS740)) |
501 | pll->flags |= (RADEON_PLL_USE_FRAC_FB_DIV | | 501 | pll->flags |= (/*RADEON_PLL_USE_FRAC_FB_DIV |*/ |
502 | RADEON_PLL_PREFER_CLOSEST_LOWER); | 502 | RADEON_PLL_PREFER_CLOSEST_LOWER); |
503 | 503 | ||
504 | if (ASIC_IS_DCE32(rdev) && mode->clock > 200000) /* range limits??? */ | 504 | if (ASIC_IS_DCE32(rdev) && mode->clock > 200000) /* range limits??? */ |
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c index 4b6623df3b96..1caf625e472b 100644 --- a/drivers/gpu/drm/radeon/evergreen.c +++ b/drivers/gpu/drm/radeon/evergreen.c | |||
@@ -607,7 +607,7 @@ static void evergreen_mc_program(struct radeon_device *rdev) | |||
607 | WREG32(MC_VM_FB_LOCATION, tmp); | 607 | WREG32(MC_VM_FB_LOCATION, tmp); |
608 | WREG32(HDP_NONSURFACE_BASE, (rdev->mc.vram_start >> 8)); | 608 | WREG32(HDP_NONSURFACE_BASE, (rdev->mc.vram_start >> 8)); |
609 | WREG32(HDP_NONSURFACE_INFO, (2 << 7)); | 609 | WREG32(HDP_NONSURFACE_INFO, (2 << 7)); |
610 | WREG32(HDP_NONSURFACE_SIZE, (rdev->mc.mc_vram_size - 1) | 0x3FF); | 610 | WREG32(HDP_NONSURFACE_SIZE, 0x3FFFFFFF); |
611 | if (rdev->flags & RADEON_IS_AGP) { | 611 | if (rdev->flags & RADEON_IS_AGP) { |
612 | WREG32(MC_VM_AGP_TOP, rdev->mc.gtt_end >> 16); | 612 | WREG32(MC_VM_AGP_TOP, rdev->mc.gtt_end >> 16); |
613 | WREG32(MC_VM_AGP_BOT, rdev->mc.gtt_start >> 16); | 613 | WREG32(MC_VM_AGP_BOT, rdev->mc.gtt_start >> 16); |
@@ -1222,11 +1222,11 @@ static void evergreen_gpu_init(struct radeon_device *rdev) | |||
1222 | ps_thread_count = 128; | 1222 | ps_thread_count = 128; |
1223 | 1223 | ||
1224 | sq_thread_resource_mgmt = NUM_PS_THREADS(ps_thread_count); | 1224 | sq_thread_resource_mgmt = NUM_PS_THREADS(ps_thread_count); |
1225 | sq_thread_resource_mgmt |= NUM_VS_THREADS(((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8; | 1225 | sq_thread_resource_mgmt |= NUM_VS_THREADS((((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8); |
1226 | sq_thread_resource_mgmt |= NUM_GS_THREADS(((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8; | 1226 | sq_thread_resource_mgmt |= NUM_GS_THREADS((((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8); |
1227 | sq_thread_resource_mgmt |= NUM_ES_THREADS(((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8; | 1227 | sq_thread_resource_mgmt |= NUM_ES_THREADS((((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8); |
1228 | sq_thread_resource_mgmt_2 = NUM_HS_THREADS(((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8; | 1228 | sq_thread_resource_mgmt_2 = NUM_HS_THREADS((((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8); |
1229 | sq_thread_resource_mgmt_2 |= NUM_LS_THREADS(((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8; | 1229 | sq_thread_resource_mgmt_2 |= NUM_LS_THREADS((((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8); |
1230 | 1230 | ||
1231 | sq_stack_resource_mgmt_1 = NUM_PS_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6); | 1231 | sq_stack_resource_mgmt_1 = NUM_PS_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6); |
1232 | sq_stack_resource_mgmt_1 |= NUM_VS_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6); | 1232 | sq_stack_resource_mgmt_1 |= NUM_VS_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6); |
@@ -1260,6 +1260,9 @@ static void evergreen_gpu_init(struct radeon_device *rdev) | |||
1260 | WREG32(VGT_GS_VERTEX_REUSE, 16); | 1260 | WREG32(VGT_GS_VERTEX_REUSE, 16); |
1261 | WREG32(PA_SC_LINE_STIPPLE_STATE, 0); | 1261 | WREG32(PA_SC_LINE_STIPPLE_STATE, 0); |
1262 | 1262 | ||
1263 | WREG32(VGT_VERTEX_REUSE_BLOCK_CNTL, 14); | ||
1264 | WREG32(VGT_OUT_DEALLOC_CNTL, 16); | ||
1265 | |||
1263 | WREG32(CB_PERF_CTR0_SEL_0, 0); | 1266 | WREG32(CB_PERF_CTR0_SEL_0, 0); |
1264 | WREG32(CB_PERF_CTR0_SEL_1, 0); | 1267 | WREG32(CB_PERF_CTR0_SEL_1, 0); |
1265 | WREG32(CB_PERF_CTR1_SEL_0, 0); | 1268 | WREG32(CB_PERF_CTR1_SEL_0, 0); |
@@ -1269,6 +1272,26 @@ static void evergreen_gpu_init(struct radeon_device *rdev) | |||
1269 | WREG32(CB_PERF_CTR3_SEL_0, 0); | 1272 | WREG32(CB_PERF_CTR3_SEL_0, 0); |
1270 | WREG32(CB_PERF_CTR3_SEL_1, 0); | 1273 | WREG32(CB_PERF_CTR3_SEL_1, 0); |
1271 | 1274 | ||
1275 | /* clear render buffer base addresses */ | ||
1276 | WREG32(CB_COLOR0_BASE, 0); | ||
1277 | WREG32(CB_COLOR1_BASE, 0); | ||
1278 | WREG32(CB_COLOR2_BASE, 0); | ||
1279 | WREG32(CB_COLOR3_BASE, 0); | ||
1280 | WREG32(CB_COLOR4_BASE, 0); | ||
1281 | WREG32(CB_COLOR5_BASE, 0); | ||
1282 | WREG32(CB_COLOR6_BASE, 0); | ||
1283 | WREG32(CB_COLOR7_BASE, 0); | ||
1284 | WREG32(CB_COLOR8_BASE, 0); | ||
1285 | WREG32(CB_COLOR9_BASE, 0); | ||
1286 | WREG32(CB_COLOR10_BASE, 0); | ||
1287 | WREG32(CB_COLOR11_BASE, 0); | ||
1288 | |||
1289 | /* set the shader const cache sizes to 0 */ | ||
1290 | for (i = SQ_ALU_CONST_BUFFER_SIZE_PS_0; i < 0x28200; i += 4) | ||
1291 | WREG32(i, 0); | ||
1292 | for (i = SQ_ALU_CONST_BUFFER_SIZE_HS_0; i < 0x29000; i += 4) | ||
1293 | WREG32(i, 0); | ||
1294 | |||
1272 | hdp_host_path_cntl = RREG32(HDP_HOST_PATH_CNTL); | 1295 | hdp_host_path_cntl = RREG32(HDP_HOST_PATH_CNTL); |
1273 | WREG32(HDP_HOST_PATH_CNTL, hdp_host_path_cntl); | 1296 | WREG32(HDP_HOST_PATH_CNTL, hdp_host_path_cntl); |
1274 | 1297 | ||
diff --git a/drivers/gpu/drm/radeon/evergreen_cs.c b/drivers/gpu/drm/radeon/evergreen_cs.c index 64516b950891..010963d4570f 100644 --- a/drivers/gpu/drm/radeon/evergreen_cs.c +++ b/drivers/gpu/drm/radeon/evergreen_cs.c | |||
@@ -1197,7 +1197,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p, | |||
1197 | DRM_ERROR("bad SET_RESOURCE (tex)\n"); | 1197 | DRM_ERROR("bad SET_RESOURCE (tex)\n"); |
1198 | return -EINVAL; | 1198 | return -EINVAL; |
1199 | } | 1199 | } |
1200 | ib[idx+1+(i*8)+3] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); | 1200 | ib[idx+1+(i*8)+2] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); |
1201 | if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) | 1201 | if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) |
1202 | ib[idx+1+(i*8)+1] |= TEX_ARRAY_MODE(ARRAY_2D_TILED_THIN1); | 1202 | ib[idx+1+(i*8)+1] |= TEX_ARRAY_MODE(ARRAY_2D_TILED_THIN1); |
1203 | else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) | 1203 | else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) |
@@ -1209,7 +1209,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p, | |||
1209 | DRM_ERROR("bad SET_RESOURCE (tex)\n"); | 1209 | DRM_ERROR("bad SET_RESOURCE (tex)\n"); |
1210 | return -EINVAL; | 1210 | return -EINVAL; |
1211 | } | 1211 | } |
1212 | ib[idx+1+(i*8)+4] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); | 1212 | ib[idx+1+(i*8)+3] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); |
1213 | mipmap = reloc->robj; | 1213 | mipmap = reloc->robj; |
1214 | r = evergreen_check_texture_resource(p, idx+1+(i*8), | 1214 | r = evergreen_check_texture_resource(p, idx+1+(i*8), |
1215 | texture, mipmap); | 1215 | texture, mipmap); |
diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h index 79683f6b4452..a1cd621780e2 100644 --- a/drivers/gpu/drm/radeon/evergreend.h +++ b/drivers/gpu/drm/radeon/evergreend.h | |||
@@ -713,6 +713,9 @@ | |||
713 | #define SQ_GSVS_RING_OFFSET_2 0x28930 | 713 | #define SQ_GSVS_RING_OFFSET_2 0x28930 |
714 | #define SQ_GSVS_RING_OFFSET_3 0x28934 | 714 | #define SQ_GSVS_RING_OFFSET_3 0x28934 |
715 | 715 | ||
716 | #define SQ_ALU_CONST_BUFFER_SIZE_PS_0 0x28140 | ||
717 | #define SQ_ALU_CONST_BUFFER_SIZE_HS_0 0x28f80 | ||
718 | |||
716 | #define SQ_ALU_CONST_CACHE_PS_0 0x28940 | 719 | #define SQ_ALU_CONST_CACHE_PS_0 0x28940 |
717 | #define SQ_ALU_CONST_CACHE_PS_1 0x28944 | 720 | #define SQ_ALU_CONST_CACHE_PS_1 0x28944 |
718 | #define SQ_ALU_CONST_CACHE_PS_2 0x28948 | 721 | #define SQ_ALU_CONST_CACHE_PS_2 0x28948 |
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c index cf89aa2eb28c..3970e62eaab8 100644 --- a/drivers/gpu/drm/radeon/r100.c +++ b/drivers/gpu/drm/radeon/r100.c | |||
@@ -1628,6 +1628,7 @@ static int r100_packet0_check(struct radeon_cs_parser *p, | |||
1628 | case RADEON_TXFORMAT_RGB332: | 1628 | case RADEON_TXFORMAT_RGB332: |
1629 | case RADEON_TXFORMAT_Y8: | 1629 | case RADEON_TXFORMAT_Y8: |
1630 | track->textures[i].cpp = 1; | 1630 | track->textures[i].cpp = 1; |
1631 | track->textures[i].compress_format = R100_TRACK_COMP_NONE; | ||
1631 | break; | 1632 | break; |
1632 | case RADEON_TXFORMAT_AI88: | 1633 | case RADEON_TXFORMAT_AI88: |
1633 | case RADEON_TXFORMAT_ARGB1555: | 1634 | case RADEON_TXFORMAT_ARGB1555: |
@@ -1639,12 +1640,14 @@ static int r100_packet0_check(struct radeon_cs_parser *p, | |||
1639 | case RADEON_TXFORMAT_LDUDV655: | 1640 | case RADEON_TXFORMAT_LDUDV655: |
1640 | case RADEON_TXFORMAT_DUDV88: | 1641 | case RADEON_TXFORMAT_DUDV88: |
1641 | track->textures[i].cpp = 2; | 1642 | track->textures[i].cpp = 2; |
1643 | track->textures[i].compress_format = R100_TRACK_COMP_NONE; | ||
1642 | break; | 1644 | break; |
1643 | case RADEON_TXFORMAT_ARGB8888: | 1645 | case RADEON_TXFORMAT_ARGB8888: |
1644 | case RADEON_TXFORMAT_RGBA8888: | 1646 | case RADEON_TXFORMAT_RGBA8888: |
1645 | case RADEON_TXFORMAT_SHADOW32: | 1647 | case RADEON_TXFORMAT_SHADOW32: |
1646 | case RADEON_TXFORMAT_LDUDUV8888: | 1648 | case RADEON_TXFORMAT_LDUDUV8888: |
1647 | track->textures[i].cpp = 4; | 1649 | track->textures[i].cpp = 4; |
1650 | track->textures[i].compress_format = R100_TRACK_COMP_NONE; | ||
1648 | break; | 1651 | break; |
1649 | case RADEON_TXFORMAT_DXT1: | 1652 | case RADEON_TXFORMAT_DXT1: |
1650 | track->textures[i].cpp = 1; | 1653 | track->textures[i].cpp = 1; |
@@ -2604,12 +2607,6 @@ int r100_set_surface_reg(struct radeon_device *rdev, int reg, | |||
2604 | int surf_index = reg * 16; | 2607 | int surf_index = reg * 16; |
2605 | int flags = 0; | 2608 | int flags = 0; |
2606 | 2609 | ||
2607 | /* r100/r200 divide by 16 */ | ||
2608 | if (rdev->family < CHIP_R300) | ||
2609 | flags = pitch / 16; | ||
2610 | else | ||
2611 | flags = pitch / 8; | ||
2612 | |||
2613 | if (rdev->family <= CHIP_RS200) { | 2610 | if (rdev->family <= CHIP_RS200) { |
2614 | if ((tiling_flags & (RADEON_TILING_MACRO|RADEON_TILING_MICRO)) | 2611 | if ((tiling_flags & (RADEON_TILING_MACRO|RADEON_TILING_MICRO)) |
2615 | == (RADEON_TILING_MACRO|RADEON_TILING_MICRO)) | 2612 | == (RADEON_TILING_MACRO|RADEON_TILING_MICRO)) |
@@ -2633,6 +2630,20 @@ int r100_set_surface_reg(struct radeon_device *rdev, int reg, | |||
2633 | if (tiling_flags & RADEON_TILING_SWAP_32BIT) | 2630 | if (tiling_flags & RADEON_TILING_SWAP_32BIT) |
2634 | flags |= RADEON_SURF_AP0_SWP_32BPP | RADEON_SURF_AP1_SWP_32BPP; | 2631 | flags |= RADEON_SURF_AP0_SWP_32BPP | RADEON_SURF_AP1_SWP_32BPP; |
2635 | 2632 | ||
2633 | /* when we aren't tiling the pitch seems to needs to be furtherdivided down. - tested on power5 + rn50 server */ | ||
2634 | if (tiling_flags & (RADEON_TILING_SWAP_16BIT | RADEON_TILING_SWAP_32BIT)) { | ||
2635 | if (!(tiling_flags & (RADEON_TILING_MACRO | RADEON_TILING_MICRO))) | ||
2636 | if (ASIC_IS_RN50(rdev)) | ||
2637 | pitch /= 16; | ||
2638 | } | ||
2639 | |||
2640 | /* r100/r200 divide by 16 */ | ||
2641 | if (rdev->family < CHIP_R300) | ||
2642 | flags |= pitch / 16; | ||
2643 | else | ||
2644 | flags |= pitch / 8; | ||
2645 | |||
2646 | |||
2636 | DRM_DEBUG("writing surface %d %d %x %x\n", reg, flags, offset, offset+obj_size-1); | 2647 | DRM_DEBUG("writing surface %d %d %x %x\n", reg, flags, offset, offset+obj_size-1); |
2637 | WREG32(RADEON_SURFACE0_INFO + surf_index, flags); | 2648 | WREG32(RADEON_SURFACE0_INFO + surf_index, flags); |
2638 | WREG32(RADEON_SURFACE0_LOWER_BOUND + surf_index, offset); | 2649 | WREG32(RADEON_SURFACE0_LOWER_BOUND + surf_index, offset); |
@@ -3147,33 +3158,6 @@ static inline void r100_cs_track_texture_print(struct r100_cs_track_texture *t) | |||
3147 | DRM_ERROR("compress format %d\n", t->compress_format); | 3158 | DRM_ERROR("compress format %d\n", t->compress_format); |
3148 | } | 3159 | } |
3149 | 3160 | ||
3150 | static int r100_cs_track_cube(struct radeon_device *rdev, | ||
3151 | struct r100_cs_track *track, unsigned idx) | ||
3152 | { | ||
3153 | unsigned face, w, h; | ||
3154 | struct radeon_bo *cube_robj; | ||
3155 | unsigned long size; | ||
3156 | |||
3157 | for (face = 0; face < 5; face++) { | ||
3158 | cube_robj = track->textures[idx].cube_info[face].robj; | ||
3159 | w = track->textures[idx].cube_info[face].width; | ||
3160 | h = track->textures[idx].cube_info[face].height; | ||
3161 | |||
3162 | size = w * h; | ||
3163 | size *= track->textures[idx].cpp; | ||
3164 | |||
3165 | size += track->textures[idx].cube_info[face].offset; | ||
3166 | |||
3167 | if (size > radeon_bo_size(cube_robj)) { | ||
3168 | DRM_ERROR("Cube texture offset greater than object size %lu %lu\n", | ||
3169 | size, radeon_bo_size(cube_robj)); | ||
3170 | r100_cs_track_texture_print(&track->textures[idx]); | ||
3171 | return -1; | ||
3172 | } | ||
3173 | } | ||
3174 | return 0; | ||
3175 | } | ||
3176 | |||
3177 | static int r100_track_compress_size(int compress_format, int w, int h) | 3161 | static int r100_track_compress_size(int compress_format, int w, int h) |
3178 | { | 3162 | { |
3179 | int block_width, block_height, block_bytes; | 3163 | int block_width, block_height, block_bytes; |
@@ -3204,6 +3188,37 @@ static int r100_track_compress_size(int compress_format, int w, int h) | |||
3204 | return sz; | 3188 | return sz; |
3205 | } | 3189 | } |
3206 | 3190 | ||
3191 | static int r100_cs_track_cube(struct radeon_device *rdev, | ||
3192 | struct r100_cs_track *track, unsigned idx) | ||
3193 | { | ||
3194 | unsigned face, w, h; | ||
3195 | struct radeon_bo *cube_robj; | ||
3196 | unsigned long size; | ||
3197 | unsigned compress_format = track->textures[idx].compress_format; | ||
3198 | |||
3199 | for (face = 0; face < 5; face++) { | ||
3200 | cube_robj = track->textures[idx].cube_info[face].robj; | ||
3201 | w = track->textures[idx].cube_info[face].width; | ||
3202 | h = track->textures[idx].cube_info[face].height; | ||
3203 | |||
3204 | if (compress_format) { | ||
3205 | size = r100_track_compress_size(compress_format, w, h); | ||
3206 | } else | ||
3207 | size = w * h; | ||
3208 | size *= track->textures[idx].cpp; | ||
3209 | |||
3210 | size += track->textures[idx].cube_info[face].offset; | ||
3211 | |||
3212 | if (size > radeon_bo_size(cube_robj)) { | ||
3213 | DRM_ERROR("Cube texture offset greater than object size %lu %lu\n", | ||
3214 | size, radeon_bo_size(cube_robj)); | ||
3215 | r100_cs_track_texture_print(&track->textures[idx]); | ||
3216 | return -1; | ||
3217 | } | ||
3218 | } | ||
3219 | return 0; | ||
3220 | } | ||
3221 | |||
3207 | static int r100_cs_track_texture_check(struct radeon_device *rdev, | 3222 | static int r100_cs_track_texture_check(struct radeon_device *rdev, |
3208 | struct r100_cs_track *track) | 3223 | struct r100_cs_track *track) |
3209 | { | 3224 | { |
diff --git a/drivers/gpu/drm/radeon/r200.c b/drivers/gpu/drm/radeon/r200.c index 85617c311212..0266d72e0a4c 100644 --- a/drivers/gpu/drm/radeon/r200.c +++ b/drivers/gpu/drm/radeon/r200.c | |||
@@ -415,6 +415,8 @@ int r200_packet0_check(struct radeon_cs_parser *p, | |||
415 | /* 2D, 3D, CUBE */ | 415 | /* 2D, 3D, CUBE */ |
416 | switch (tmp) { | 416 | switch (tmp) { |
417 | case 0: | 417 | case 0: |
418 | case 3: | ||
419 | case 4: | ||
418 | case 5: | 420 | case 5: |
419 | case 6: | 421 | case 6: |
420 | case 7: | 422 | case 7: |
@@ -450,6 +452,7 @@ int r200_packet0_check(struct radeon_cs_parser *p, | |||
450 | case R200_TXFORMAT_RGB332: | 452 | case R200_TXFORMAT_RGB332: |
451 | case R200_TXFORMAT_Y8: | 453 | case R200_TXFORMAT_Y8: |
452 | track->textures[i].cpp = 1; | 454 | track->textures[i].cpp = 1; |
455 | track->textures[i].compress_format = R100_TRACK_COMP_NONE; | ||
453 | break; | 456 | break; |
454 | case R200_TXFORMAT_AI88: | 457 | case R200_TXFORMAT_AI88: |
455 | case R200_TXFORMAT_ARGB1555: | 458 | case R200_TXFORMAT_ARGB1555: |
@@ -461,6 +464,7 @@ int r200_packet0_check(struct radeon_cs_parser *p, | |||
461 | case R200_TXFORMAT_DVDU88: | 464 | case R200_TXFORMAT_DVDU88: |
462 | case R200_TXFORMAT_AVYU4444: | 465 | case R200_TXFORMAT_AVYU4444: |
463 | track->textures[i].cpp = 2; | 466 | track->textures[i].cpp = 2; |
467 | track->textures[i].compress_format = R100_TRACK_COMP_NONE; | ||
464 | break; | 468 | break; |
465 | case R200_TXFORMAT_ARGB8888: | 469 | case R200_TXFORMAT_ARGB8888: |
466 | case R200_TXFORMAT_RGBA8888: | 470 | case R200_TXFORMAT_RGBA8888: |
@@ -468,6 +472,7 @@ int r200_packet0_check(struct radeon_cs_parser *p, | |||
468 | case R200_TXFORMAT_BGR111110: | 472 | case R200_TXFORMAT_BGR111110: |
469 | case R200_TXFORMAT_LDVDU8888: | 473 | case R200_TXFORMAT_LDVDU8888: |
470 | track->textures[i].cpp = 4; | 474 | track->textures[i].cpp = 4; |
475 | track->textures[i].compress_format = R100_TRACK_COMP_NONE; | ||
471 | break; | 476 | break; |
472 | case R200_TXFORMAT_DXT1: | 477 | case R200_TXFORMAT_DXT1: |
473 | track->textures[i].cpp = 1; | 478 | track->textures[i].cpp = 1; |
diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c index b2f9efe2897c..7e81db5eb804 100644 --- a/drivers/gpu/drm/radeon/r300.c +++ b/drivers/gpu/drm/radeon/r300.c | |||
@@ -881,6 +881,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p, | |||
881 | case R300_TX_FORMAT_Y4X4: | 881 | case R300_TX_FORMAT_Y4X4: |
882 | case R300_TX_FORMAT_Z3Y3X2: | 882 | case R300_TX_FORMAT_Z3Y3X2: |
883 | track->textures[i].cpp = 1; | 883 | track->textures[i].cpp = 1; |
884 | track->textures[i].compress_format = R100_TRACK_COMP_NONE; | ||
884 | break; | 885 | break; |
885 | case R300_TX_FORMAT_X16: | 886 | case R300_TX_FORMAT_X16: |
886 | case R300_TX_FORMAT_Y8X8: | 887 | case R300_TX_FORMAT_Y8X8: |
@@ -892,6 +893,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p, | |||
892 | case R300_TX_FORMAT_B8G8_B8G8: | 893 | case R300_TX_FORMAT_B8G8_B8G8: |
893 | case R300_TX_FORMAT_G8R8_G8B8: | 894 | case R300_TX_FORMAT_G8R8_G8B8: |
894 | track->textures[i].cpp = 2; | 895 | track->textures[i].cpp = 2; |
896 | track->textures[i].compress_format = R100_TRACK_COMP_NONE; | ||
895 | break; | 897 | break; |
896 | case R300_TX_FORMAT_Y16X16: | 898 | case R300_TX_FORMAT_Y16X16: |
897 | case R300_TX_FORMAT_Z11Y11X10: | 899 | case R300_TX_FORMAT_Z11Y11X10: |
@@ -902,14 +904,17 @@ static int r300_packet0_check(struct radeon_cs_parser *p, | |||
902 | case R300_TX_FORMAT_FL_I32: | 904 | case R300_TX_FORMAT_FL_I32: |
903 | case 0x1e: | 905 | case 0x1e: |
904 | track->textures[i].cpp = 4; | 906 | track->textures[i].cpp = 4; |
907 | track->textures[i].compress_format = R100_TRACK_COMP_NONE; | ||
905 | break; | 908 | break; |
906 | case R300_TX_FORMAT_W16Z16Y16X16: | 909 | case R300_TX_FORMAT_W16Z16Y16X16: |
907 | case R300_TX_FORMAT_FL_R16G16B16A16: | 910 | case R300_TX_FORMAT_FL_R16G16B16A16: |
908 | case R300_TX_FORMAT_FL_I32A32: | 911 | case R300_TX_FORMAT_FL_I32A32: |
909 | track->textures[i].cpp = 8; | 912 | track->textures[i].cpp = 8; |
913 | track->textures[i].compress_format = R100_TRACK_COMP_NONE; | ||
910 | break; | 914 | break; |
911 | case R300_TX_FORMAT_FL_R32G32B32A32: | 915 | case R300_TX_FORMAT_FL_R32G32B32A32: |
912 | track->textures[i].cpp = 16; | 916 | track->textures[i].cpp = 16; |
917 | track->textures[i].compress_format = R100_TRACK_COMP_NONE; | ||
913 | break; | 918 | break; |
914 | case R300_TX_FORMAT_DXT1: | 919 | case R300_TX_FORMAT_DXT1: |
915 | track->textures[i].cpp = 1; | 920 | track->textures[i].cpp = 1; |
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c index 0e91871f45be..3d6645ce2151 100644 --- a/drivers/gpu/drm/radeon/r600.c +++ b/drivers/gpu/drm/radeon/r600.c | |||
@@ -130,9 +130,14 @@ void r600_pm_get_dynpm_state(struct radeon_device *rdev) | |||
130 | break; | 130 | break; |
131 | } | 131 | } |
132 | } | 132 | } |
133 | } else | 133 | } else { |
134 | rdev->pm.requested_power_state_index = | 134 | if (rdev->pm.current_power_state_index == 0) |
135 | rdev->pm.current_power_state_index - 1; | 135 | rdev->pm.requested_power_state_index = |
136 | rdev->pm.num_power_states - 1; | ||
137 | else | ||
138 | rdev->pm.requested_power_state_index = | ||
139 | rdev->pm.current_power_state_index - 1; | ||
140 | } | ||
136 | } | 141 | } |
137 | rdev->pm.requested_clock_mode_index = 0; | 142 | rdev->pm.requested_clock_mode_index = 0; |
138 | /* don't use the power state if crtcs are active and no display flag is set */ | 143 | /* don't use the power state if crtcs are active and no display flag is set */ |
@@ -1097,7 +1102,7 @@ static void r600_mc_program(struct radeon_device *rdev) | |||
1097 | WREG32(MC_VM_FB_LOCATION, tmp); | 1102 | WREG32(MC_VM_FB_LOCATION, tmp); |
1098 | WREG32(HDP_NONSURFACE_BASE, (rdev->mc.vram_start >> 8)); | 1103 | WREG32(HDP_NONSURFACE_BASE, (rdev->mc.vram_start >> 8)); |
1099 | WREG32(HDP_NONSURFACE_INFO, (2 << 7)); | 1104 | WREG32(HDP_NONSURFACE_INFO, (2 << 7)); |
1100 | WREG32(HDP_NONSURFACE_SIZE, rdev->mc.mc_vram_size | 0x3FF); | 1105 | WREG32(HDP_NONSURFACE_SIZE, 0x3FFFFFFF); |
1101 | if (rdev->flags & RADEON_IS_AGP) { | 1106 | if (rdev->flags & RADEON_IS_AGP) { |
1102 | WREG32(MC_VM_AGP_TOP, rdev->mc.gtt_end >> 22); | 1107 | WREG32(MC_VM_AGP_TOP, rdev->mc.gtt_end >> 22); |
1103 | WREG32(MC_VM_AGP_BOT, rdev->mc.gtt_start >> 22); | 1108 | WREG32(MC_VM_AGP_BOT, rdev->mc.gtt_start >> 22); |
@@ -1219,8 +1224,10 @@ int r600_mc_init(struct radeon_device *rdev) | |||
1219 | rdev->mc.visible_vram_size = rdev->mc.aper_size; | 1224 | rdev->mc.visible_vram_size = rdev->mc.aper_size; |
1220 | r600_vram_gtt_location(rdev, &rdev->mc); | 1225 | r600_vram_gtt_location(rdev, &rdev->mc); |
1221 | 1226 | ||
1222 | if (rdev->flags & RADEON_IS_IGP) | 1227 | if (rdev->flags & RADEON_IS_IGP) { |
1228 | rs690_pm_info(rdev); | ||
1223 | rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev); | 1229 | rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev); |
1230 | } | ||
1224 | radeon_update_bandwidth_info(rdev); | 1231 | radeon_update_bandwidth_info(rdev); |
1225 | return 0; | 1232 | return 0; |
1226 | } | 1233 | } |
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index 8e1d44ca26ec..ab61aaa887bb 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h | |||
@@ -177,6 +177,7 @@ void radeon_pm_resume(struct radeon_device *rdev); | |||
177 | void radeon_combios_get_power_modes(struct radeon_device *rdev); | 177 | void radeon_combios_get_power_modes(struct radeon_device *rdev); |
178 | void radeon_atombios_get_power_modes(struct radeon_device *rdev); | 178 | void radeon_atombios_get_power_modes(struct radeon_device *rdev); |
179 | void radeon_atom_set_voltage(struct radeon_device *rdev, u16 level); | 179 | void radeon_atom_set_voltage(struct radeon_device *rdev, u16 level); |
180 | void rs690_pm_info(struct radeon_device *rdev); | ||
180 | 181 | ||
181 | /* | 182 | /* |
182 | * Fences. | 183 | * Fences. |
@@ -619,7 +620,8 @@ enum radeon_dynpm_state { | |||
619 | DYNPM_STATE_DISABLED, | 620 | DYNPM_STATE_DISABLED, |
620 | DYNPM_STATE_MINIMUM, | 621 | DYNPM_STATE_MINIMUM, |
621 | DYNPM_STATE_PAUSED, | 622 | DYNPM_STATE_PAUSED, |
622 | DYNPM_STATE_ACTIVE | 623 | DYNPM_STATE_ACTIVE, |
624 | DYNPM_STATE_SUSPENDED, | ||
623 | }; | 625 | }; |
624 | enum radeon_dynpm_action { | 626 | enum radeon_dynpm_action { |
625 | DYNPM_ACTION_NONE, | 627 | DYNPM_ACTION_NONE, |
diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c index 87f7e2cc52d4..646f96f97c77 100644 --- a/drivers/gpu/drm/radeon/radeon_asic.c +++ b/drivers/gpu/drm/radeon/radeon_asic.c | |||
@@ -780,6 +780,13 @@ int radeon_asic_init(struct radeon_device *rdev) | |||
780 | case CHIP_R423: | 780 | case CHIP_R423: |
781 | case CHIP_RV410: | 781 | case CHIP_RV410: |
782 | rdev->asic = &r420_asic; | 782 | rdev->asic = &r420_asic; |
783 | /* handle macs */ | ||
784 | if (rdev->bios == NULL) { | ||
785 | rdev->asic->get_engine_clock = &radeon_legacy_get_engine_clock; | ||
786 | rdev->asic->set_engine_clock = &radeon_legacy_set_engine_clock; | ||
787 | rdev->asic->get_memory_clock = &radeon_legacy_get_memory_clock; | ||
788 | rdev->asic->set_memory_clock = NULL; | ||
789 | } | ||
783 | break; | 790 | break; |
784 | case CHIP_RS400: | 791 | case CHIP_RS400: |
785 | case CHIP_RS480: | 792 | case CHIP_RS480: |
diff --git a/drivers/gpu/drm/radeon/radeon_bios.c b/drivers/gpu/drm/radeon/radeon_bios.c index fbba938f8048..2c9213739999 100644 --- a/drivers/gpu/drm/radeon/radeon_bios.c +++ b/drivers/gpu/drm/radeon/radeon_bios.c | |||
@@ -48,6 +48,10 @@ static bool igp_read_bios_from_vram(struct radeon_device *rdev) | |||
48 | resource_size_t vram_base; | 48 | resource_size_t vram_base; |
49 | resource_size_t size = 256 * 1024; /* ??? */ | 49 | resource_size_t size = 256 * 1024; /* ??? */ |
50 | 50 | ||
51 | if (!(rdev->flags & RADEON_IS_IGP)) | ||
52 | if (!radeon_card_posted(rdev)) | ||
53 | return false; | ||
54 | |||
51 | rdev->bios = NULL; | 55 | rdev->bios = NULL; |
52 | vram_base = drm_get_resource_start(rdev->ddev, 0); | 56 | vram_base = drm_get_resource_start(rdev->ddev, 0); |
53 | bios = ioremap(vram_base, size); | 57 | bios = ioremap(vram_base, size); |
diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c index 1bee2f9e24a5..d1c1d8dd93ce 100644 --- a/drivers/gpu/drm/radeon/radeon_combios.c +++ b/drivers/gpu/drm/radeon/radeon_combios.c | |||
@@ -1411,6 +1411,11 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev) | |||
1411 | rdev->mode_info.connector_table = CT_IMAC_G5_ISIGHT; | 1411 | rdev->mode_info.connector_table = CT_IMAC_G5_ISIGHT; |
1412 | } else | 1412 | } else |
1413 | #endif /* CONFIG_PPC_PMAC */ | 1413 | #endif /* CONFIG_PPC_PMAC */ |
1414 | #ifdef CONFIG_PPC64 | ||
1415 | if (ASIC_IS_RN50(rdev)) | ||
1416 | rdev->mode_info.connector_table = CT_RN50_POWER; | ||
1417 | else | ||
1418 | #endif | ||
1414 | rdev->mode_info.connector_table = CT_GENERIC; | 1419 | rdev->mode_info.connector_table = CT_GENERIC; |
1415 | } | 1420 | } |
1416 | 1421 | ||
@@ -1853,6 +1858,33 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev) | |||
1853 | CONNECTOR_OBJECT_ID_SVIDEO, | 1858 | CONNECTOR_OBJECT_ID_SVIDEO, |
1854 | &hpd); | 1859 | &hpd); |
1855 | break; | 1860 | break; |
1861 | case CT_RN50_POWER: | ||
1862 | DRM_INFO("Connector Table: %d (rn50-power)\n", | ||
1863 | rdev->mode_info.connector_table); | ||
1864 | /* VGA - primary dac */ | ||
1865 | ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_VGA_DDC); | ||
1866 | hpd.hpd = RADEON_HPD_NONE; | ||
1867 | radeon_add_legacy_encoder(dev, | ||
1868 | radeon_get_encoder_id(dev, | ||
1869 | ATOM_DEVICE_CRT1_SUPPORT, | ||
1870 | 1), | ||
1871 | ATOM_DEVICE_CRT1_SUPPORT); | ||
1872 | radeon_add_legacy_connector(dev, 0, ATOM_DEVICE_CRT1_SUPPORT, | ||
1873 | DRM_MODE_CONNECTOR_VGA, &ddc_i2c, | ||
1874 | CONNECTOR_OBJECT_ID_VGA, | ||
1875 | &hpd); | ||
1876 | ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_CRT2_DDC); | ||
1877 | hpd.hpd = RADEON_HPD_NONE; | ||
1878 | radeon_add_legacy_encoder(dev, | ||
1879 | radeon_get_encoder_id(dev, | ||
1880 | ATOM_DEVICE_CRT2_SUPPORT, | ||
1881 | 2), | ||
1882 | ATOM_DEVICE_CRT2_SUPPORT); | ||
1883 | radeon_add_legacy_connector(dev, 1, ATOM_DEVICE_CRT2_SUPPORT, | ||
1884 | DRM_MODE_CONNECTOR_VGA, &ddc_i2c, | ||
1885 | CONNECTOR_OBJECT_ID_VGA, | ||
1886 | &hpd); | ||
1887 | break; | ||
1856 | default: | 1888 | default: |
1857 | DRM_INFO("Connector table: %d (invalid)\n", | 1889 | DRM_INFO("Connector table: %d (invalid)\n", |
1858 | rdev->mode_info.connector_table); | 1890 | rdev->mode_info.connector_table); |
@@ -1906,15 +1938,6 @@ static bool radeon_apply_legacy_quirks(struct drm_device *dev, | |||
1906 | return false; | 1938 | return false; |
1907 | } | 1939 | } |
1908 | 1940 | ||
1909 | /* Some RV100 cards with 2 VGA ports show up with DVI+VGA */ | ||
1910 | if (dev->pdev->device == 0x5159 && | ||
1911 | dev->pdev->subsystem_vendor == 0x1002 && | ||
1912 | dev->pdev->subsystem_device == 0x013a) { | ||
1913 | if (*legacy_connector == CONNECTOR_DVI_I_LEGACY) | ||
1914 | *legacy_connector = CONNECTOR_CRT_LEGACY; | ||
1915 | |||
1916 | } | ||
1917 | |||
1918 | /* X300 card with extra non-existent DVI port */ | 1941 | /* X300 card with extra non-existent DVI port */ |
1919 | if (dev->pdev->device == 0x5B60 && | 1942 | if (dev->pdev->device == 0x5B60 && |
1920 | dev->pdev->subsystem_vendor == 0x17af && | 1943 | dev->pdev->subsystem_vendor == 0x17af && |
@@ -3019,6 +3042,14 @@ void radeon_combios_asic_init(struct drm_device *dev) | |||
3019 | combios_write_ram_size(dev); | 3042 | combios_write_ram_size(dev); |
3020 | } | 3043 | } |
3021 | 3044 | ||
3045 | /* quirk for rs4xx HP nx6125 laptop to make it resume | ||
3046 | * - it hangs on resume inside the dynclk 1 table. | ||
3047 | */ | ||
3048 | if (rdev->family == CHIP_RS480 && | ||
3049 | rdev->pdev->subsystem_vendor == 0x103c && | ||
3050 | rdev->pdev->subsystem_device == 0x308b) | ||
3051 | return; | ||
3052 | |||
3022 | /* DYN CLK 1 */ | 3053 | /* DYN CLK 1 */ |
3023 | table = combios_get_table_offset(dev, COMBIOS_DYN_CLK_1_TABLE); | 3054 | table = combios_get_table_offset(dev, COMBIOS_DYN_CLK_1_TABLE); |
3024 | if (table) | 3055 | if (table) |
diff --git a/drivers/gpu/drm/radeon/radeon_cursor.c b/drivers/gpu/drm/radeon/radeon_cursor.c index b7023fff89eb..4eb67c0e0996 100644 --- a/drivers/gpu/drm/radeon/radeon_cursor.c +++ b/drivers/gpu/drm/radeon/radeon_cursor.c | |||
@@ -194,7 +194,7 @@ unpin: | |||
194 | fail: | 194 | fail: |
195 | drm_gem_object_unreference_unlocked(obj); | 195 | drm_gem_object_unreference_unlocked(obj); |
196 | 196 | ||
197 | return 0; | 197 | return ret; |
198 | } | 198 | } |
199 | 199 | ||
200 | int radeon_crtc_cursor_move(struct drm_crtc *crtc, | 200 | int radeon_crtc_cursor_move(struct drm_crtc *crtc, |
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c index f10faed21567..5f317317aba2 100644 --- a/drivers/gpu/drm/radeon/radeon_device.c +++ b/drivers/gpu/drm/radeon/radeon_device.c | |||
@@ -779,6 +779,7 @@ int radeon_suspend_kms(struct drm_device *dev, pm_message_t state) | |||
779 | 779 | ||
780 | int radeon_resume_kms(struct drm_device *dev) | 780 | int radeon_resume_kms(struct drm_device *dev) |
781 | { | 781 | { |
782 | struct drm_connector *connector; | ||
782 | struct radeon_device *rdev = dev->dev_private; | 783 | struct radeon_device *rdev = dev->dev_private; |
783 | 784 | ||
784 | if (rdev->powered_down) | 785 | if (rdev->powered_down) |
@@ -797,6 +798,12 @@ int radeon_resume_kms(struct drm_device *dev) | |||
797 | radeon_resume(rdev); | 798 | radeon_resume(rdev); |
798 | radeon_pm_resume(rdev); | 799 | radeon_pm_resume(rdev); |
799 | radeon_restore_bios_scratch_regs(rdev); | 800 | radeon_restore_bios_scratch_regs(rdev); |
801 | |||
802 | /* turn on display hw */ | ||
803 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { | ||
804 | drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON); | ||
805 | } | ||
806 | |||
800 | radeon_fbdev_set_suspend(rdev, 0); | 807 | radeon_fbdev_set_suspend(rdev, 0); |
801 | release_console_sem(); | 808 | release_console_sem(); |
802 | 809 | ||
diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c index 1ebb100015b7..e0b30b264c28 100644 --- a/drivers/gpu/drm/radeon/radeon_encoders.c +++ b/drivers/gpu/drm/radeon/radeon_encoders.c | |||
@@ -1072,6 +1072,8 @@ radeon_atom_encoder_dpms(struct drm_encoder *encoder, int mode) | |||
1072 | if (is_dig) { | 1072 | if (is_dig) { |
1073 | switch (mode) { | 1073 | switch (mode) { |
1074 | case DRM_MODE_DPMS_ON: | 1074 | case DRM_MODE_DPMS_ON: |
1075 | if (!ASIC_IS_DCE4(rdev)) | ||
1076 | atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE_OUTPUT, 0, 0); | ||
1075 | if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_DP) { | 1077 | if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_DP) { |
1076 | struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); | 1078 | struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); |
1077 | 1079 | ||
@@ -1079,8 +1081,6 @@ radeon_atom_encoder_dpms(struct drm_encoder *encoder, int mode) | |||
1079 | if (ASIC_IS_DCE4(rdev)) | 1081 | if (ASIC_IS_DCE4(rdev)) |
1080 | atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_VIDEO_ON); | 1082 | atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_VIDEO_ON); |
1081 | } | 1083 | } |
1082 | if (!ASIC_IS_DCE4(rdev)) | ||
1083 | atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE_OUTPUT, 0, 0); | ||
1084 | break; | 1084 | break; |
1085 | case DRM_MODE_DPMS_STANDBY: | 1085 | case DRM_MODE_DPMS_STANDBY: |
1086 | case DRM_MODE_DPMS_SUSPEND: | 1086 | case DRM_MODE_DPMS_SUSPEND: |
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c index 5b07b8848e09..bad77f40a9da 100644 --- a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c +++ b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c | |||
@@ -928,16 +928,14 @@ static void radeon_legacy_tv_dac_mode_set(struct drm_encoder *encoder, | |||
928 | if (ASIC_IS_R300(rdev)) { | 928 | if (ASIC_IS_R300(rdev)) { |
929 | gpiopad_a = RREG32(RADEON_GPIOPAD_A) | 1; | 929 | gpiopad_a = RREG32(RADEON_GPIOPAD_A) | 1; |
930 | disp_output_cntl = RREG32(RADEON_DISP_OUTPUT_CNTL); | 930 | disp_output_cntl = RREG32(RADEON_DISP_OUTPUT_CNTL); |
931 | } | 931 | } else if (rdev->family != CHIP_R200) |
932 | |||
933 | if (rdev->family == CHIP_R200 || ASIC_IS_R300(rdev)) | ||
934 | disp_tv_out_cntl = RREG32(RADEON_DISP_TV_OUT_CNTL); | ||
935 | else | ||
936 | disp_hw_debug = RREG32(RADEON_DISP_HW_DEBUG); | 932 | disp_hw_debug = RREG32(RADEON_DISP_HW_DEBUG); |
937 | 933 | else if (rdev->family == CHIP_R200) | |
938 | if (rdev->family == CHIP_R200) | ||
939 | fp2_gen_cntl = RREG32(RADEON_FP2_GEN_CNTL); | 934 | fp2_gen_cntl = RREG32(RADEON_FP2_GEN_CNTL); |
940 | 935 | ||
936 | if (rdev->family >= CHIP_R200) | ||
937 | disp_tv_out_cntl = RREG32(RADEON_DISP_TV_OUT_CNTL); | ||
938 | |||
941 | if (is_tv) { | 939 | if (is_tv) { |
942 | uint32_t dac_cntl; | 940 | uint32_t dac_cntl; |
943 | 941 | ||
@@ -1002,15 +1000,13 @@ static void radeon_legacy_tv_dac_mode_set(struct drm_encoder *encoder, | |||
1002 | if (ASIC_IS_R300(rdev)) { | 1000 | if (ASIC_IS_R300(rdev)) { |
1003 | WREG32_P(RADEON_GPIOPAD_A, gpiopad_a, ~1); | 1001 | WREG32_P(RADEON_GPIOPAD_A, gpiopad_a, ~1); |
1004 | WREG32(RADEON_DISP_OUTPUT_CNTL, disp_output_cntl); | 1002 | WREG32(RADEON_DISP_OUTPUT_CNTL, disp_output_cntl); |
1005 | } | 1003 | } else if (rdev->family != CHIP_R200) |
1004 | WREG32(RADEON_DISP_HW_DEBUG, disp_hw_debug); | ||
1005 | else if (rdev->family == CHIP_R200) | ||
1006 | WREG32(RADEON_FP2_GEN_CNTL, fp2_gen_cntl); | ||
1006 | 1007 | ||
1007 | if (rdev->family >= CHIP_R200) | 1008 | if (rdev->family >= CHIP_R200) |
1008 | WREG32(RADEON_DISP_TV_OUT_CNTL, disp_tv_out_cntl); | 1009 | WREG32(RADEON_DISP_TV_OUT_CNTL, disp_tv_out_cntl); |
1009 | else | ||
1010 | WREG32(RADEON_DISP_HW_DEBUG, disp_hw_debug); | ||
1011 | |||
1012 | if (rdev->family == CHIP_R200) | ||
1013 | WREG32(RADEON_FP2_GEN_CNTL, fp2_gen_cntl); | ||
1014 | 1010 | ||
1015 | if (is_tv) | 1011 | if (is_tv) |
1016 | radeon_legacy_tv_mode_set(encoder, mode, adjusted_mode); | 1012 | radeon_legacy_tv_mode_set(encoder, mode, adjusted_mode); |
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h index 67358baf28b2..95696aa57ac8 100644 --- a/drivers/gpu/drm/radeon/radeon_mode.h +++ b/drivers/gpu/drm/radeon/radeon_mode.h | |||
@@ -206,6 +206,7 @@ enum radeon_connector_table { | |||
206 | CT_MINI_INTERNAL, | 206 | CT_MINI_INTERNAL, |
207 | CT_IMAC_G5_ISIGHT, | 207 | CT_IMAC_G5_ISIGHT, |
208 | CT_EMAC, | 208 | CT_EMAC, |
209 | CT_RN50_POWER, | ||
209 | }; | 210 | }; |
210 | 211 | ||
211 | enum radeon_dvo_chip { | 212 | enum radeon_dvo_chip { |
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c index 63f679a04b25..115d26b762cc 100644 --- a/drivers/gpu/drm/radeon/radeon_pm.c +++ b/drivers/gpu/drm/radeon/radeon_pm.c | |||
@@ -397,13 +397,20 @@ static ssize_t radeon_set_pm_method(struct device *dev, | |||
397 | rdev->pm.dynpm_planned_action = DYNPM_ACTION_DEFAULT; | 397 | rdev->pm.dynpm_planned_action = DYNPM_ACTION_DEFAULT; |
398 | mutex_unlock(&rdev->pm.mutex); | 398 | mutex_unlock(&rdev->pm.mutex); |
399 | } else if (strncmp("profile", buf, strlen("profile")) == 0) { | 399 | } else if (strncmp("profile", buf, strlen("profile")) == 0) { |
400 | bool flush_wq = false; | ||
401 | |||
400 | mutex_lock(&rdev->pm.mutex); | 402 | mutex_lock(&rdev->pm.mutex); |
401 | rdev->pm.pm_method = PM_METHOD_PROFILE; | 403 | if (rdev->pm.pm_method == PM_METHOD_DYNPM) { |
404 | cancel_delayed_work(&rdev->pm.dynpm_idle_work); | ||
405 | flush_wq = true; | ||
406 | } | ||
402 | /* disable dynpm */ | 407 | /* disable dynpm */ |
403 | rdev->pm.dynpm_state = DYNPM_STATE_DISABLED; | 408 | rdev->pm.dynpm_state = DYNPM_STATE_DISABLED; |
404 | rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE; | 409 | rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE; |
405 | cancel_delayed_work(&rdev->pm.dynpm_idle_work); | 410 | rdev->pm.pm_method = PM_METHOD_PROFILE; |
406 | mutex_unlock(&rdev->pm.mutex); | 411 | mutex_unlock(&rdev->pm.mutex); |
412 | if (flush_wq) | ||
413 | flush_workqueue(rdev->wq); | ||
407 | } else { | 414 | } else { |
408 | DRM_ERROR("invalid power method!\n"); | 415 | DRM_ERROR("invalid power method!\n"); |
409 | goto fail; | 416 | goto fail; |
@@ -418,9 +425,18 @@ static DEVICE_ATTR(power_method, S_IRUGO | S_IWUSR, radeon_get_pm_method, radeon | |||
418 | 425 | ||
419 | void radeon_pm_suspend(struct radeon_device *rdev) | 426 | void radeon_pm_suspend(struct radeon_device *rdev) |
420 | { | 427 | { |
428 | bool flush_wq = false; | ||
429 | |||
421 | mutex_lock(&rdev->pm.mutex); | 430 | mutex_lock(&rdev->pm.mutex); |
422 | cancel_delayed_work(&rdev->pm.dynpm_idle_work); | 431 | if (rdev->pm.pm_method == PM_METHOD_DYNPM) { |
432 | cancel_delayed_work(&rdev->pm.dynpm_idle_work); | ||
433 | if (rdev->pm.dynpm_state == DYNPM_STATE_ACTIVE) | ||
434 | rdev->pm.dynpm_state = DYNPM_STATE_SUSPENDED; | ||
435 | flush_wq = true; | ||
436 | } | ||
423 | mutex_unlock(&rdev->pm.mutex); | 437 | mutex_unlock(&rdev->pm.mutex); |
438 | if (flush_wq) | ||
439 | flush_workqueue(rdev->wq); | ||
424 | } | 440 | } |
425 | 441 | ||
426 | void radeon_pm_resume(struct radeon_device *rdev) | 442 | void radeon_pm_resume(struct radeon_device *rdev) |
@@ -432,6 +448,12 @@ void radeon_pm_resume(struct radeon_device *rdev) | |||
432 | rdev->pm.current_sclk = rdev->clock.default_sclk; | 448 | rdev->pm.current_sclk = rdev->clock.default_sclk; |
433 | rdev->pm.current_mclk = rdev->clock.default_mclk; | 449 | rdev->pm.current_mclk = rdev->clock.default_mclk; |
434 | rdev->pm.current_vddc = rdev->pm.power_state[rdev->pm.default_power_state_index].clock_info[0].voltage.voltage; | 450 | rdev->pm.current_vddc = rdev->pm.power_state[rdev->pm.default_power_state_index].clock_info[0].voltage.voltage; |
451 | if (rdev->pm.pm_method == PM_METHOD_DYNPM | ||
452 | && rdev->pm.dynpm_state == DYNPM_STATE_SUSPENDED) { | ||
453 | rdev->pm.dynpm_state = DYNPM_STATE_ACTIVE; | ||
454 | queue_delayed_work(rdev->wq, &rdev->pm.dynpm_idle_work, | ||
455 | msecs_to_jiffies(RADEON_IDLE_LOOP_MS)); | ||
456 | } | ||
435 | mutex_unlock(&rdev->pm.mutex); | 457 | mutex_unlock(&rdev->pm.mutex); |
436 | radeon_pm_compute_clocks(rdev); | 458 | radeon_pm_compute_clocks(rdev); |
437 | } | 459 | } |
@@ -486,6 +508,8 @@ int radeon_pm_init(struct radeon_device *rdev) | |||
486 | void radeon_pm_fini(struct radeon_device *rdev) | 508 | void radeon_pm_fini(struct radeon_device *rdev) |
487 | { | 509 | { |
488 | if (rdev->pm.num_power_states > 1) { | 510 | if (rdev->pm.num_power_states > 1) { |
511 | bool flush_wq = false; | ||
512 | |||
489 | mutex_lock(&rdev->pm.mutex); | 513 | mutex_lock(&rdev->pm.mutex); |
490 | if (rdev->pm.pm_method == PM_METHOD_PROFILE) { | 514 | if (rdev->pm.pm_method == PM_METHOD_PROFILE) { |
491 | rdev->pm.profile = PM_PROFILE_DEFAULT; | 515 | rdev->pm.profile = PM_PROFILE_DEFAULT; |
@@ -493,13 +517,16 @@ void radeon_pm_fini(struct radeon_device *rdev) | |||
493 | radeon_pm_set_clocks(rdev); | 517 | radeon_pm_set_clocks(rdev); |
494 | } else if (rdev->pm.pm_method == PM_METHOD_DYNPM) { | 518 | } else if (rdev->pm.pm_method == PM_METHOD_DYNPM) { |
495 | /* cancel work */ | 519 | /* cancel work */ |
496 | cancel_delayed_work_sync(&rdev->pm.dynpm_idle_work); | 520 | cancel_delayed_work(&rdev->pm.dynpm_idle_work); |
521 | flush_wq = true; | ||
497 | /* reset default clocks */ | 522 | /* reset default clocks */ |
498 | rdev->pm.dynpm_state = DYNPM_STATE_DISABLED; | 523 | rdev->pm.dynpm_state = DYNPM_STATE_DISABLED; |
499 | rdev->pm.dynpm_planned_action = DYNPM_ACTION_DEFAULT; | 524 | rdev->pm.dynpm_planned_action = DYNPM_ACTION_DEFAULT; |
500 | radeon_pm_set_clocks(rdev); | 525 | radeon_pm_set_clocks(rdev); |
501 | } | 526 | } |
502 | mutex_unlock(&rdev->pm.mutex); | 527 | mutex_unlock(&rdev->pm.mutex); |
528 | if (flush_wq) | ||
529 | flush_workqueue(rdev->wq); | ||
503 | 530 | ||
504 | device_remove_file(rdev->dev, &dev_attr_power_profile); | 531 | device_remove_file(rdev->dev, &dev_attr_power_profile); |
505 | device_remove_file(rdev->dev, &dev_attr_power_method); | 532 | device_remove_file(rdev->dev, &dev_attr_power_method); |
@@ -720,12 +747,12 @@ static void radeon_dynpm_idle_work_handler(struct work_struct *work) | |||
720 | radeon_pm_get_dynpm_state(rdev); | 747 | radeon_pm_get_dynpm_state(rdev); |
721 | radeon_pm_set_clocks(rdev); | 748 | radeon_pm_set_clocks(rdev); |
722 | } | 749 | } |
750 | |||
751 | queue_delayed_work(rdev->wq, &rdev->pm.dynpm_idle_work, | ||
752 | msecs_to_jiffies(RADEON_IDLE_LOOP_MS)); | ||
723 | } | 753 | } |
724 | mutex_unlock(&rdev->pm.mutex); | 754 | mutex_unlock(&rdev->pm.mutex); |
725 | ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched); | 755 | ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched); |
726 | |||
727 | queue_delayed_work(rdev->wq, &rdev->pm.dynpm_idle_work, | ||
728 | msecs_to_jiffies(RADEON_IDLE_LOOP_MS)); | ||
729 | } | 756 | } |
730 | 757 | ||
731 | /* | 758 | /* |
diff --git a/drivers/gpu/drm/radeon/reg_srcs/evergreen b/drivers/gpu/drm/radeon/reg_srcs/evergreen index b5c757f68d3c..f78fd592544d 100644 --- a/drivers/gpu/drm/radeon/reg_srcs/evergreen +++ b/drivers/gpu/drm/radeon/reg_srcs/evergreen | |||
@@ -80,8 +80,8 @@ evergreen 0x9400 | |||
80 | 0x00028010 DB_RENDER_OVERRIDE2 | 80 | 0x00028010 DB_RENDER_OVERRIDE2 |
81 | 0x00028028 DB_STENCIL_CLEAR | 81 | 0x00028028 DB_STENCIL_CLEAR |
82 | 0x0002802C DB_DEPTH_CLEAR | 82 | 0x0002802C DB_DEPTH_CLEAR |
83 | 0x00028034 PA_SC_SCREEN_SCISSOR_BR | ||
84 | 0x00028030 PA_SC_SCREEN_SCISSOR_TL | 83 | 0x00028030 PA_SC_SCREEN_SCISSOR_TL |
84 | 0x00028034 PA_SC_SCREEN_SCISSOR_BR | ||
85 | 0x0002805C DB_DEPTH_SLICE | 85 | 0x0002805C DB_DEPTH_SLICE |
86 | 0x00028140 SQ_ALU_CONST_BUFFER_SIZE_PS_0 | 86 | 0x00028140 SQ_ALU_CONST_BUFFER_SIZE_PS_0 |
87 | 0x00028144 SQ_ALU_CONST_BUFFER_SIZE_PS_1 | 87 | 0x00028144 SQ_ALU_CONST_BUFFER_SIZE_PS_1 |
@@ -460,8 +460,8 @@ evergreen 0x9400 | |||
460 | 0x00028844 SQ_PGM_RESOURCES_PS | 460 | 0x00028844 SQ_PGM_RESOURCES_PS |
461 | 0x00028848 SQ_PGM_RESOURCES_2_PS | 461 | 0x00028848 SQ_PGM_RESOURCES_2_PS |
462 | 0x0002884C SQ_PGM_EXPORTS_PS | 462 | 0x0002884C SQ_PGM_EXPORTS_PS |
463 | 0x0002885C SQ_PGM_RESOURCES_VS | 463 | 0x00028860 SQ_PGM_RESOURCES_VS |
464 | 0x00028860 SQ_PGM_RESOURCES_2_VS | 464 | 0x00028864 SQ_PGM_RESOURCES_2_VS |
465 | 0x00028878 SQ_PGM_RESOURCES_GS | 465 | 0x00028878 SQ_PGM_RESOURCES_GS |
466 | 0x0002887C SQ_PGM_RESOURCES_2_GS | 466 | 0x0002887C SQ_PGM_RESOURCES_2_GS |
467 | 0x00028890 SQ_PGM_RESOURCES_ES | 467 | 0x00028890 SQ_PGM_RESOURCES_ES |
@@ -469,8 +469,8 @@ evergreen 0x9400 | |||
469 | 0x000288A8 SQ_PGM_RESOURCES_FS | 469 | 0x000288A8 SQ_PGM_RESOURCES_FS |
470 | 0x000288BC SQ_PGM_RESOURCES_HS | 470 | 0x000288BC SQ_PGM_RESOURCES_HS |
471 | 0x000288C0 SQ_PGM_RESOURCES_2_HS | 471 | 0x000288C0 SQ_PGM_RESOURCES_2_HS |
472 | 0x000288D0 SQ_PGM_RESOURCES_LS | 472 | 0x000288D4 SQ_PGM_RESOURCES_LS |
473 | 0x000288D4 SQ_PGM_RESOURCES_2_LS | 473 | 0x000288D8 SQ_PGM_RESOURCES_2_LS |
474 | 0x000288E8 SQ_LDS_ALLOC | 474 | 0x000288E8 SQ_LDS_ALLOC |
475 | 0x000288EC SQ_LDS_ALLOC_PS | 475 | 0x000288EC SQ_LDS_ALLOC_PS |
476 | 0x000288F0 SQ_VTX_SEMANTIC_CLEAR | 476 | 0x000288F0 SQ_VTX_SEMANTIC_CLEAR |
diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c index bcc33195ebc2..f4f0a61bcdce 100644 --- a/drivers/gpu/drm/radeon/rs690.c +++ b/drivers/gpu/drm/radeon/rs690.c | |||
@@ -79,7 +79,13 @@ void rs690_pm_info(struct radeon_device *rdev) | |||
79 | tmp.full = dfixed_const(100); | 79 | tmp.full = dfixed_const(100); |
80 | rdev->pm.igp_sideport_mclk.full = dfixed_const(info->info.ulBootUpMemoryClock); | 80 | rdev->pm.igp_sideport_mclk.full = dfixed_const(info->info.ulBootUpMemoryClock); |
81 | rdev->pm.igp_sideport_mclk.full = dfixed_div(rdev->pm.igp_sideport_mclk, tmp); | 81 | rdev->pm.igp_sideport_mclk.full = dfixed_div(rdev->pm.igp_sideport_mclk, tmp); |
82 | rdev->pm.igp_system_mclk.full = dfixed_const(le16_to_cpu(info->info.usK8MemoryClock)); | 82 | if (info->info.usK8MemoryClock) |
83 | rdev->pm.igp_system_mclk.full = dfixed_const(le16_to_cpu(info->info.usK8MemoryClock)); | ||
84 | else if (rdev->clock.default_mclk) { | ||
85 | rdev->pm.igp_system_mclk.full = dfixed_const(rdev->clock.default_mclk); | ||
86 | rdev->pm.igp_system_mclk.full = dfixed_div(rdev->pm.igp_system_mclk, tmp); | ||
87 | } else | ||
88 | rdev->pm.igp_system_mclk.full = dfixed_const(400); | ||
83 | rdev->pm.igp_ht_link_clk.full = dfixed_const(le16_to_cpu(info->info.usFSBClock)); | 89 | rdev->pm.igp_ht_link_clk.full = dfixed_const(le16_to_cpu(info->info.usFSBClock)); |
84 | rdev->pm.igp_ht_link_width.full = dfixed_const(info->info.ucHTLinkWidth); | 90 | rdev->pm.igp_ht_link_width.full = dfixed_const(info->info.ucHTLinkWidth); |
85 | break; | 91 | break; |
@@ -87,34 +93,31 @@ void rs690_pm_info(struct radeon_device *rdev) | |||
87 | tmp.full = dfixed_const(100); | 93 | tmp.full = dfixed_const(100); |
88 | rdev->pm.igp_sideport_mclk.full = dfixed_const(info->info_v2.ulBootUpSidePortClock); | 94 | rdev->pm.igp_sideport_mclk.full = dfixed_const(info->info_v2.ulBootUpSidePortClock); |
89 | rdev->pm.igp_sideport_mclk.full = dfixed_div(rdev->pm.igp_sideport_mclk, tmp); | 95 | rdev->pm.igp_sideport_mclk.full = dfixed_div(rdev->pm.igp_sideport_mclk, tmp); |
90 | rdev->pm.igp_system_mclk.full = dfixed_const(info->info_v2.ulBootUpUMAClock); | 96 | if (info->info_v2.ulBootUpUMAClock) |
97 | rdev->pm.igp_system_mclk.full = dfixed_const(info->info_v2.ulBootUpUMAClock); | ||
98 | else if (rdev->clock.default_mclk) | ||
99 | rdev->pm.igp_system_mclk.full = dfixed_const(rdev->clock.default_mclk); | ||
100 | else | ||
101 | rdev->pm.igp_system_mclk.full = dfixed_const(66700); | ||
91 | rdev->pm.igp_system_mclk.full = dfixed_div(rdev->pm.igp_system_mclk, tmp); | 102 | rdev->pm.igp_system_mclk.full = dfixed_div(rdev->pm.igp_system_mclk, tmp); |
92 | rdev->pm.igp_ht_link_clk.full = dfixed_const(info->info_v2.ulHTLinkFreq); | 103 | rdev->pm.igp_ht_link_clk.full = dfixed_const(info->info_v2.ulHTLinkFreq); |
93 | rdev->pm.igp_ht_link_clk.full = dfixed_div(rdev->pm.igp_ht_link_clk, tmp); | 104 | rdev->pm.igp_ht_link_clk.full = dfixed_div(rdev->pm.igp_ht_link_clk, tmp); |
94 | rdev->pm.igp_ht_link_width.full = dfixed_const(le16_to_cpu(info->info_v2.usMinHTLinkWidth)); | 105 | rdev->pm.igp_ht_link_width.full = dfixed_const(le16_to_cpu(info->info_v2.usMinHTLinkWidth)); |
95 | break; | 106 | break; |
96 | default: | 107 | default: |
97 | tmp.full = dfixed_const(100); | ||
98 | /* We assume the slower possible clock ie worst case */ | 108 | /* We assume the slower possible clock ie worst case */ |
99 | /* DDR 333Mhz */ | 109 | rdev->pm.igp_sideport_mclk.full = dfixed_const(200); |
100 | rdev->pm.igp_sideport_mclk.full = dfixed_const(333); | 110 | rdev->pm.igp_system_mclk.full = dfixed_const(200); |
101 | /* FIXME: system clock ? */ | 111 | rdev->pm.igp_ht_link_clk.full = dfixed_const(1000); |
102 | rdev->pm.igp_system_mclk.full = dfixed_const(100); | ||
103 | rdev->pm.igp_system_mclk.full = dfixed_div(rdev->pm.igp_system_mclk, tmp); | ||
104 | rdev->pm.igp_ht_link_clk.full = dfixed_const(200); | ||
105 | rdev->pm.igp_ht_link_width.full = dfixed_const(8); | 112 | rdev->pm.igp_ht_link_width.full = dfixed_const(8); |
106 | DRM_ERROR("No integrated system info for your GPU, using safe default\n"); | 113 | DRM_ERROR("No integrated system info for your GPU, using safe default\n"); |
107 | break; | 114 | break; |
108 | } | 115 | } |
109 | } else { | 116 | } else { |
110 | tmp.full = dfixed_const(100); | ||
111 | /* We assume the slower possible clock ie worst case */ | 117 | /* We assume the slower possible clock ie worst case */ |
112 | /* DDR 333Mhz */ | 118 | rdev->pm.igp_sideport_mclk.full = dfixed_const(200); |
113 | rdev->pm.igp_sideport_mclk.full = dfixed_const(333); | 119 | rdev->pm.igp_system_mclk.full = dfixed_const(200); |
114 | /* FIXME: system clock ? */ | 120 | rdev->pm.igp_ht_link_clk.full = dfixed_const(1000); |
115 | rdev->pm.igp_system_mclk.full = dfixed_const(100); | ||
116 | rdev->pm.igp_system_mclk.full = dfixed_div(rdev->pm.igp_system_mclk, tmp); | ||
117 | rdev->pm.igp_ht_link_clk.full = dfixed_const(200); | ||
118 | rdev->pm.igp_ht_link_width.full = dfixed_const(8); | 121 | rdev->pm.igp_ht_link_width.full = dfixed_const(8); |
119 | DRM_ERROR("No integrated system info for your GPU, using safe default\n"); | 122 | DRM_ERROR("No integrated system info for your GPU, using safe default\n"); |
120 | } | 123 | } |
@@ -228,10 +231,6 @@ void rs690_crtc_bandwidth_compute(struct radeon_device *rdev, | |||
228 | fixed20_12 a, b, c; | 231 | fixed20_12 a, b, c; |
229 | fixed20_12 pclk, request_fifo_depth, tolerable_latency, estimated_width; | 232 | fixed20_12 pclk, request_fifo_depth, tolerable_latency, estimated_width; |
230 | fixed20_12 consumption_time, line_time, chunk_time, read_delay_latency; | 233 | fixed20_12 consumption_time, line_time, chunk_time, read_delay_latency; |
231 | /* FIXME: detect IGP with sideport memory, i don't think there is any | ||
232 | * such product available | ||
233 | */ | ||
234 | bool sideport = false; | ||
235 | 234 | ||
236 | if (!crtc->base.enabled) { | 235 | if (!crtc->base.enabled) { |
237 | /* FIXME: wouldn't it better to set priority mark to maximum */ | 236 | /* FIXME: wouldn't it better to set priority mark to maximum */ |
@@ -300,7 +299,7 @@ void rs690_crtc_bandwidth_compute(struct radeon_device *rdev, | |||
300 | 299 | ||
301 | /* Maximun bandwidth is the minimun bandwidth of all component */ | 300 | /* Maximun bandwidth is the minimun bandwidth of all component */ |
302 | rdev->pm.max_bandwidth = rdev->pm.core_bandwidth; | 301 | rdev->pm.max_bandwidth = rdev->pm.core_bandwidth; |
303 | if (sideport) { | 302 | if (rdev->mc.igp_sideport_enabled) { |
304 | if (rdev->pm.max_bandwidth.full > rdev->pm.sideport_bandwidth.full && | 303 | if (rdev->pm.max_bandwidth.full > rdev->pm.sideport_bandwidth.full && |
305 | rdev->pm.sideport_bandwidth.full) | 304 | rdev->pm.sideport_bandwidth.full) |
306 | rdev->pm.max_bandwidth = rdev->pm.sideport_bandwidth; | 305 | rdev->pm.max_bandwidth = rdev->pm.sideport_bandwidth; |
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c index cec536c222c5..b7fd82064922 100644 --- a/drivers/gpu/drm/radeon/rv770.c +++ b/drivers/gpu/drm/radeon/rv770.c | |||
@@ -224,7 +224,7 @@ static void rv770_mc_program(struct radeon_device *rdev) | |||
224 | WREG32(MC_VM_FB_LOCATION, tmp); | 224 | WREG32(MC_VM_FB_LOCATION, tmp); |
225 | WREG32(HDP_NONSURFACE_BASE, (rdev->mc.vram_start >> 8)); | 225 | WREG32(HDP_NONSURFACE_BASE, (rdev->mc.vram_start >> 8)); |
226 | WREG32(HDP_NONSURFACE_INFO, (2 << 7)); | 226 | WREG32(HDP_NONSURFACE_INFO, (2 << 7)); |
227 | WREG32(HDP_NONSURFACE_SIZE, (rdev->mc.mc_vram_size - 1) | 0x3FF); | 227 | WREG32(HDP_NONSURFACE_SIZE, 0x3FFFFFFF); |
228 | if (rdev->flags & RADEON_IS_AGP) { | 228 | if (rdev->flags & RADEON_IS_AGP) { |
229 | WREG32(MC_VM_AGP_TOP, rdev->mc.gtt_end >> 16); | 229 | WREG32(MC_VM_AGP_TOP, rdev->mc.gtt_end >> 16); |
230 | WREG32(MC_VM_AGP_BOT, rdev->mc.gtt_start >> 16); | 230 | WREG32(MC_VM_AGP_BOT, rdev->mc.gtt_start >> 16); |
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c index ef910694bd63..2f047577b1e3 100644 --- a/drivers/gpu/drm/ttm/ttm_page_alloc.c +++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c | |||
@@ -667,7 +667,7 @@ int ttm_get_pages(struct list_head *pages, int flags, | |||
667 | { | 667 | { |
668 | struct ttm_page_pool *pool = ttm_get_pool(flags, cstate); | 668 | struct ttm_page_pool *pool = ttm_get_pool(flags, cstate); |
669 | struct page *p = NULL; | 669 | struct page *p = NULL; |
670 | int gfp_flags = 0; | 670 | int gfp_flags = GFP_USER; |
671 | int r; | 671 | int r; |
672 | 672 | ||
673 | /* set zero flag for page allocation if required */ | 673 | /* set zero flag for page allocation if required */ |
diff --git a/drivers/power/z2_battery.c b/drivers/power/z2_battery.c index 9cca465436e3..85064a9f649e 100644 --- a/drivers/power/z2_battery.c +++ b/drivers/power/z2_battery.c | |||
@@ -9,19 +9,13 @@ | |||
9 | * | 9 | * |
10 | */ | 10 | */ |
11 | 11 | ||
12 | #include <linux/init.h> | ||
13 | #include <linux/kernel.h> | ||
14 | #include <linux/module.h> | 12 | #include <linux/module.h> |
15 | #include <linux/platform_device.h> | ||
16 | #include <linux/power_supply.h> | ||
17 | #include <linux/i2c.h> | ||
18 | #include <linux/spinlock.h> | ||
19 | #include <linux/interrupt.h> | ||
20 | #include <linux/gpio.h> | 13 | #include <linux/gpio.h> |
14 | #include <linux/i2c.h> | ||
21 | #include <linux/interrupt.h> | 15 | #include <linux/interrupt.h> |
22 | #include <linux/irq.h> | 16 | #include <linux/irq.h> |
23 | #include <asm/irq.h> | 17 | #include <linux/power_supply.h> |
24 | #include <asm/mach/irq.h> | 18 | #include <linux/slab.h> |
25 | #include <linux/z2_battery.h> | 19 | #include <linux/z2_battery.h> |
26 | 20 | ||
27 | #define Z2_DEFAULT_NAME "Z2" | 21 | #define Z2_DEFAULT_NAME "Z2" |
diff --git a/drivers/rtc/rtc-ds1307.c b/drivers/rtc/rtc-ds1307.c index de033b7ac21f..d827ce570a8c 100644 --- a/drivers/rtc/rtc-ds1307.c +++ b/drivers/rtc/rtc-ds1307.c | |||
@@ -777,7 +777,7 @@ static int __devinit ds1307_probe(struct i2c_client *client, | |||
777 | 777 | ||
778 | read_rtc: | 778 | read_rtc: |
779 | /* read RTC registers */ | 779 | /* read RTC registers */ |
780 | tmp = ds1307->read_block_data(ds1307->client, 0, 8, buf); | 780 | tmp = ds1307->read_block_data(ds1307->client, ds1307->offset, 8, buf); |
781 | if (tmp != 8) { | 781 | if (tmp != 8) { |
782 | pr_debug("read error %d\n", tmp); | 782 | pr_debug("read error %d\n", tmp); |
783 | err = -EIO; | 783 | err = -EIO; |
@@ -862,7 +862,7 @@ read_rtc: | |||
862 | if (ds1307->regs[DS1307_REG_HOUR] & DS1307_BIT_PM) | 862 | if (ds1307->regs[DS1307_REG_HOUR] & DS1307_BIT_PM) |
863 | tmp += 12; | 863 | tmp += 12; |
864 | i2c_smbus_write_byte_data(client, | 864 | i2c_smbus_write_byte_data(client, |
865 | DS1307_REG_HOUR, | 865 | ds1307->offset + DS1307_REG_HOUR, |
866 | bin2bcd(tmp)); | 866 | bin2bcd(tmp)); |
867 | } | 867 | } |
868 | 868 | ||
diff --git a/drivers/serial/cpm_uart/cpm_uart_core.c b/drivers/serial/cpm_uart/cpm_uart_core.c index 9eb62a256e9a..cd6cf575902e 100644 --- a/drivers/serial/cpm_uart/cpm_uart_core.c +++ b/drivers/serial/cpm_uart/cpm_uart_core.c | |||
@@ -930,6 +930,83 @@ static void cpm_uart_config_port(struct uart_port *port, int flags) | |||
930 | } | 930 | } |
931 | } | 931 | } |
932 | 932 | ||
933 | #if defined(CONFIG_CONSOLE_POLL) || defined(CONFIG_SERIAL_CPM_CONSOLE) | ||
934 | /* | ||
935 | * Write a string to the serial port | ||
936 | * Note that this is called with interrupts already disabled | ||
937 | */ | ||
938 | static void cpm_uart_early_write(struct uart_cpm_port *pinfo, | ||
939 | const char *string, u_int count) | ||
940 | { | ||
941 | unsigned int i; | ||
942 | cbd_t __iomem *bdp, *bdbase; | ||
943 | unsigned char *cpm_outp_addr; | ||
944 | |||
945 | /* Get the address of the host memory buffer. | ||
946 | */ | ||
947 | bdp = pinfo->tx_cur; | ||
948 | bdbase = pinfo->tx_bd_base; | ||
949 | |||
950 | /* | ||
951 | * Now, do each character. This is not as bad as it looks | ||
952 | * since this is a holding FIFO and not a transmitting FIFO. | ||
953 | * We could add the complexity of filling the entire transmit | ||
954 | * buffer, but we would just wait longer between accesses...... | ||
955 | */ | ||
956 | for (i = 0; i < count; i++, string++) { | ||
957 | /* Wait for transmitter fifo to empty. | ||
958 | * Ready indicates output is ready, and xmt is doing | ||
959 | * that, not that it is ready for us to send. | ||
960 | */ | ||
961 | while ((in_be16(&bdp->cbd_sc) & BD_SC_READY) != 0) | ||
962 | ; | ||
963 | |||
964 | /* Send the character out. | ||
965 | * If the buffer address is in the CPM DPRAM, don't | ||
966 | * convert it. | ||
967 | */ | ||
968 | cpm_outp_addr = cpm2cpu_addr(in_be32(&bdp->cbd_bufaddr), | ||
969 | pinfo); | ||
970 | *cpm_outp_addr = *string; | ||
971 | |||
972 | out_be16(&bdp->cbd_datlen, 1); | ||
973 | setbits16(&bdp->cbd_sc, BD_SC_READY); | ||
974 | |||
975 | if (in_be16(&bdp->cbd_sc) & BD_SC_WRAP) | ||
976 | bdp = bdbase; | ||
977 | else | ||
978 | bdp++; | ||
979 | |||
980 | /* if a LF, also do CR... */ | ||
981 | if (*string == 10) { | ||
982 | while ((in_be16(&bdp->cbd_sc) & BD_SC_READY) != 0) | ||
983 | ; | ||
984 | |||
985 | cpm_outp_addr = cpm2cpu_addr(in_be32(&bdp->cbd_bufaddr), | ||
986 | pinfo); | ||
987 | *cpm_outp_addr = 13; | ||
988 | |||
989 | out_be16(&bdp->cbd_datlen, 1); | ||
990 | setbits16(&bdp->cbd_sc, BD_SC_READY); | ||
991 | |||
992 | if (in_be16(&bdp->cbd_sc) & BD_SC_WRAP) | ||
993 | bdp = bdbase; | ||
994 | else | ||
995 | bdp++; | ||
996 | } | ||
997 | } | ||
998 | |||
999 | /* | ||
1000 | * Finally, Wait for transmitter & holding register to empty | ||
1001 | * and restore the IER | ||
1002 | */ | ||
1003 | while ((in_be16(&bdp->cbd_sc) & BD_SC_READY) != 0) | ||
1004 | ; | ||
1005 | |||
1006 | pinfo->tx_cur = bdp; | ||
1007 | } | ||
1008 | #endif | ||
1009 | |||
933 | #ifdef CONFIG_CONSOLE_POLL | 1010 | #ifdef CONFIG_CONSOLE_POLL |
934 | /* Serial polling routines for writing and reading from the uart while | 1011 | /* Serial polling routines for writing and reading from the uart while |
935 | * in an interrupt or debug context. | 1012 | * in an interrupt or debug context. |
@@ -999,7 +1076,7 @@ static void cpm_put_poll_char(struct uart_port *port, | |||
999 | static char ch[2]; | 1076 | static char ch[2]; |
1000 | 1077 | ||
1001 | ch[0] = (char)c; | 1078 | ch[0] = (char)c; |
1002 | cpm_uart_early_write(pinfo->port.line, ch, 1); | 1079 | cpm_uart_early_write(pinfo, ch, 1); |
1003 | } | 1080 | } |
1004 | #endif /* CONFIG_CONSOLE_POLL */ | 1081 | #endif /* CONFIG_CONSOLE_POLL */ |
1005 | 1082 | ||
@@ -1130,9 +1207,6 @@ static void cpm_uart_console_write(struct console *co, const char *s, | |||
1130 | u_int count) | 1207 | u_int count) |
1131 | { | 1208 | { |
1132 | struct uart_cpm_port *pinfo = &cpm_uart_ports[co->index]; | 1209 | struct uart_cpm_port *pinfo = &cpm_uart_ports[co->index]; |
1133 | unsigned int i; | ||
1134 | cbd_t __iomem *bdp, *bdbase; | ||
1135 | unsigned char *cp; | ||
1136 | unsigned long flags; | 1210 | unsigned long flags; |
1137 | int nolock = oops_in_progress; | 1211 | int nolock = oops_in_progress; |
1138 | 1212 | ||
@@ -1142,66 +1216,7 @@ static void cpm_uart_console_write(struct console *co, const char *s, | |||
1142 | spin_lock_irqsave(&pinfo->port.lock, flags); | 1216 | spin_lock_irqsave(&pinfo->port.lock, flags); |
1143 | } | 1217 | } |
1144 | 1218 | ||
1145 | /* Get the address of the host memory buffer. | 1219 | cpm_uart_early_write(pinfo, s, count); |
1146 | */ | ||
1147 | bdp = pinfo->tx_cur; | ||
1148 | bdbase = pinfo->tx_bd_base; | ||
1149 | |||
1150 | /* | ||
1151 | * Now, do each character. This is not as bad as it looks | ||
1152 | * since this is a holding FIFO and not a transmitting FIFO. | ||
1153 | * We could add the complexity of filling the entire transmit | ||
1154 | * buffer, but we would just wait longer between accesses...... | ||
1155 | */ | ||
1156 | for (i = 0; i < count; i++, s++) { | ||
1157 | /* Wait for transmitter fifo to empty. | ||
1158 | * Ready indicates output is ready, and xmt is doing | ||
1159 | * that, not that it is ready for us to send. | ||
1160 | */ | ||
1161 | while ((in_be16(&bdp->cbd_sc) & BD_SC_READY) != 0) | ||
1162 | ; | ||
1163 | |||
1164 | /* Send the character out. | ||
1165 | * If the buffer address is in the CPM DPRAM, don't | ||
1166 | * convert it. | ||
1167 | */ | ||
1168 | cp = cpm2cpu_addr(in_be32(&bdp->cbd_bufaddr), pinfo); | ||
1169 | *cp = *s; | ||
1170 | |||
1171 | out_be16(&bdp->cbd_datlen, 1); | ||
1172 | setbits16(&bdp->cbd_sc, BD_SC_READY); | ||
1173 | |||
1174 | if (in_be16(&bdp->cbd_sc) & BD_SC_WRAP) | ||
1175 | bdp = bdbase; | ||
1176 | else | ||
1177 | bdp++; | ||
1178 | |||
1179 | /* if a LF, also do CR... */ | ||
1180 | if (*s == 10) { | ||
1181 | while ((in_be16(&bdp->cbd_sc) & BD_SC_READY) != 0) | ||
1182 | ; | ||
1183 | |||
1184 | cp = cpm2cpu_addr(in_be32(&bdp->cbd_bufaddr), pinfo); | ||
1185 | *cp = 13; | ||
1186 | |||
1187 | out_be16(&bdp->cbd_datlen, 1); | ||
1188 | setbits16(&bdp->cbd_sc, BD_SC_READY); | ||
1189 | |||
1190 | if (in_be16(&bdp->cbd_sc) & BD_SC_WRAP) | ||
1191 | bdp = bdbase; | ||
1192 | else | ||
1193 | bdp++; | ||
1194 | } | ||
1195 | } | ||
1196 | |||
1197 | /* | ||
1198 | * Finally, Wait for transmitter & holding register to empty | ||
1199 | * and restore the IER | ||
1200 | */ | ||
1201 | while ((in_be16(&bdp->cbd_sc) & BD_SC_READY) != 0) | ||
1202 | ; | ||
1203 | |||
1204 | pinfo->tx_cur = bdp; | ||
1205 | 1220 | ||
1206 | if (unlikely(nolock)) { | 1221 | if (unlikely(nolock)) { |
1207 | local_irq_restore(flags); | 1222 | local_irq_restore(flags); |
diff --git a/drivers/staging/batman-adv/bat_sysfs.c b/drivers/staging/batman-adv/bat_sysfs.c index e2c000b80ca0..212bc21e6d68 100644 --- a/drivers/staging/batman-adv/bat_sysfs.c +++ b/drivers/staging/batman-adv/bat_sysfs.c | |||
@@ -225,9 +225,9 @@ static struct bat_attribute *mesh_attrs[] = { | |||
225 | NULL, | 225 | NULL, |
226 | }; | 226 | }; |
227 | 227 | ||
228 | static ssize_t transtable_local_read(struct kobject *kobj, | 228 | static ssize_t transtable_local_read(struct file *filp, struct kobject *kobj, |
229 | struct bin_attribute *bin_attr, | 229 | struct bin_attribute *bin_attr, |
230 | char *buff, loff_t off, size_t count) | 230 | char *buff, loff_t off, size_t count) |
231 | { | 231 | { |
232 | struct device *dev = to_dev(kobj->parent); | 232 | struct device *dev = to_dev(kobj->parent); |
233 | struct net_device *net_dev = to_net_dev(dev); | 233 | struct net_device *net_dev = to_net_dev(dev); |
@@ -235,9 +235,9 @@ static ssize_t transtable_local_read(struct kobject *kobj, | |||
235 | return hna_local_fill_buffer_text(net_dev, buff, count, off); | 235 | return hna_local_fill_buffer_text(net_dev, buff, count, off); |
236 | } | 236 | } |
237 | 237 | ||
238 | static ssize_t transtable_global_read(struct kobject *kobj, | 238 | static ssize_t transtable_global_read(struct file *filp, struct kobject *kobj, |
239 | struct bin_attribute *bin_attr, | 239 | struct bin_attribute *bin_attr, |
240 | char *buff, loff_t off, size_t count) | 240 | char *buff, loff_t off, size_t count) |
241 | { | 241 | { |
242 | struct device *dev = to_dev(kobj->parent); | 242 | struct device *dev = to_dev(kobj->parent); |
243 | struct net_device *net_dev = to_net_dev(dev); | 243 | struct net_device *net_dev = to_net_dev(dev); |
@@ -245,9 +245,9 @@ static ssize_t transtable_global_read(struct kobject *kobj, | |||
245 | return hna_global_fill_buffer_text(net_dev, buff, count, off); | 245 | return hna_global_fill_buffer_text(net_dev, buff, count, off); |
246 | } | 246 | } |
247 | 247 | ||
248 | static ssize_t originators_read(struct kobject *kobj, | 248 | static ssize_t originators_read(struct file *filp, struct kobject *kobj, |
249 | struct bin_attribute *bin_attr, | 249 | struct bin_attribute *bin_attr, |
250 | char *buff, loff_t off, size_t count) | 250 | char *buff, loff_t off, size_t count) |
251 | { | 251 | { |
252 | struct device *dev = to_dev(kobj->parent); | 252 | struct device *dev = to_dev(kobj->parent); |
253 | struct net_device *net_dev = to_net_dev(dev); | 253 | struct net_device *net_dev = to_net_dev(dev); |
@@ -255,9 +255,9 @@ static ssize_t originators_read(struct kobject *kobj, | |||
255 | return orig_fill_buffer_text(net_dev, buff, count, off); | 255 | return orig_fill_buffer_text(net_dev, buff, count, off); |
256 | } | 256 | } |
257 | 257 | ||
258 | static ssize_t vis_data_read(struct kobject *kobj, | 258 | static ssize_t vis_data_read(struct file *filp, struct kobject *kobj, |
259 | struct bin_attribute *bin_attr, | 259 | struct bin_attribute *bin_attr, |
260 | char *buff, loff_t off, size_t count) | 260 | char *buff, loff_t off, size_t count) |
261 | { | 261 | { |
262 | struct device *dev = to_dev(kobj->parent); | 262 | struct device *dev = to_dev(kobj->parent); |
263 | struct net_device *net_dev = to_net_dev(dev); | 263 | struct net_device *net_dev = to_net_dev(dev); |
diff --git a/drivers/staging/batman-adv/device.c b/drivers/staging/batman-adv/device.c index 7eb6559e0315..32204b5572d0 100644 --- a/drivers/staging/batman-adv/device.c +++ b/drivers/staging/batman-adv/device.c | |||
@@ -196,7 +196,7 @@ ssize_t bat_device_read(struct file *file, char __user *buf, size_t count, | |||
196 | kfree(device_packet); | 196 | kfree(device_packet); |
197 | 197 | ||
198 | if (error) | 198 | if (error) |
199 | return error; | 199 | return -EFAULT; |
200 | 200 | ||
201 | return sizeof(struct icmp_packet); | 201 | return sizeof(struct icmp_packet); |
202 | } | 202 | } |
diff --git a/drivers/staging/comedi/drivers/adl_pci9111.c b/drivers/staging/comedi/drivers/adl_pci9111.c index 36a254cd4413..39d112b708e3 100644 --- a/drivers/staging/comedi/drivers/adl_pci9111.c +++ b/drivers/staging/comedi/drivers/adl_pci9111.c | |||
@@ -824,9 +824,12 @@ static int pci9111_ai_do_cmd(struct comedi_device *dev, | |||
824 | plx9050_interrupt_control(dev_private->lcr_io_base, true, true, | 824 | plx9050_interrupt_control(dev_private->lcr_io_base, true, true, |
825 | false, true, true); | 825 | false, true, true); |
826 | 826 | ||
827 | dev_private->scan_delay = | 827 | if (async_cmd->scan_begin_src == TRIG_TIMER) { |
828 | (async_cmd->scan_begin_arg / (async_cmd->convert_arg * | 828 | dev_private->scan_delay = |
829 | async_cmd->chanlist_len)) - 1; | 829 | (async_cmd->scan_begin_arg / |
830 | (async_cmd->convert_arg * | ||
831 | async_cmd->chanlist_len)) - 1; | ||
832 | } | ||
830 | 833 | ||
831 | break; | 834 | break; |
832 | 835 | ||
diff --git a/drivers/staging/comedi/drivers/cb_pcidda.c b/drivers/staging/comedi/drivers/cb_pcidda.c index 81829d6fd287..c374bee25068 100644 --- a/drivers/staging/comedi/drivers/cb_pcidda.c +++ b/drivers/staging/comedi/drivers/cb_pcidda.c | |||
@@ -52,7 +52,6 @@ Please report success/failure with other different cards to | |||
52 | #include "8255.h" | 52 | #include "8255.h" |
53 | 53 | ||
54 | #define PCI_VENDOR_ID_CB 0x1307 /* PCI vendor number of ComputerBoards */ | 54 | #define PCI_VENDOR_ID_CB 0x1307 /* PCI vendor number of ComputerBoards */ |
55 | #define N_BOARDS 10 /* Number of boards in cb_pcidda_boards */ | ||
56 | #define EEPROM_SIZE 128 /* number of entries in eeprom */ | 55 | #define EEPROM_SIZE 128 /* number of entries in eeprom */ |
57 | #define MAX_AO_CHANNELS 8 /* maximum number of ao channels for supported boards */ | 56 | #define MAX_AO_CHANNELS 8 /* maximum number of ao channels for supported boards */ |
58 | 57 | ||
@@ -307,7 +306,7 @@ static int cb_pcidda_attach(struct comedi_device *dev, | |||
307 | continue; | 306 | continue; |
308 | } | 307 | } |
309 | } | 308 | } |
310 | for (index = 0; index < N_BOARDS; index++) { | 309 | for (index = 0; index < ARRAY_SIZE(cb_pcidda_boards); index++) { |
311 | if (cb_pcidda_boards[index].device_id == | 310 | if (cb_pcidda_boards[index].device_id == |
312 | pcidev->device) { | 311 | pcidev->device) { |
313 | goto found; | 312 | goto found; |
diff --git a/drivers/staging/hv/channel_mgmt.c b/drivers/staging/hv/channel_mgmt.c index 3f53b4d1e4cf..12db555a3a5d 100644 --- a/drivers/staging/hv/channel_mgmt.c +++ b/drivers/staging/hv/channel_mgmt.c | |||
@@ -23,6 +23,7 @@ | |||
23 | #include <linux/slab.h> | 23 | #include <linux/slab.h> |
24 | #include <linux/list.h> | 24 | #include <linux/list.h> |
25 | #include <linux/module.h> | 25 | #include <linux/module.h> |
26 | #include <linux/completion.h> | ||
26 | #include "osd.h" | 27 | #include "osd.h" |
27 | #include "logging.h" | 28 | #include "logging.h" |
28 | #include "vmbus_private.h" | 29 | #include "vmbus_private.h" |
@@ -293,6 +294,25 @@ void FreeVmbusChannel(struct vmbus_channel *Channel) | |||
293 | Channel); | 294 | Channel); |
294 | } | 295 | } |
295 | 296 | ||
297 | |||
298 | DECLARE_COMPLETION(hv_channel_ready); | ||
299 | |||
300 | /* | ||
301 | * Count initialized channels, and ensure all channels are ready when hv_vmbus | ||
302 | * module loading completes. | ||
303 | */ | ||
304 | static void count_hv_channel(void) | ||
305 | { | ||
306 | static int counter; | ||
307 | unsigned long flags; | ||
308 | |||
309 | spin_lock_irqsave(&gVmbusConnection.channel_lock, flags); | ||
310 | if (++counter == MAX_MSG_TYPES) | ||
311 | complete(&hv_channel_ready); | ||
312 | spin_unlock_irqrestore(&gVmbusConnection.channel_lock, flags); | ||
313 | } | ||
314 | |||
315 | |||
296 | /* | 316 | /* |
297 | * VmbusChannelProcessOffer - Process the offer by creating a channel/device | 317 | * VmbusChannelProcessOffer - Process the offer by creating a channel/device |
298 | * associated with this offer | 318 | * associated with this offer |
@@ -373,22 +393,21 @@ static void VmbusChannelProcessOffer(void *context) | |||
373 | * can cleanup properly | 393 | * can cleanup properly |
374 | */ | 394 | */ |
375 | newChannel->State = CHANNEL_OPEN_STATE; | 395 | newChannel->State = CHANNEL_OPEN_STATE; |
376 | cnt = 0; | ||
377 | 396 | ||
378 | while (cnt != MAX_MSG_TYPES) { | 397 | /* Open IC channels */ |
398 | for (cnt = 0; cnt < MAX_MSG_TYPES; cnt++) { | ||
379 | if (memcmp(&newChannel->OfferMsg.Offer.InterfaceType, | 399 | if (memcmp(&newChannel->OfferMsg.Offer.InterfaceType, |
380 | &hv_cb_utils[cnt].data, | 400 | &hv_cb_utils[cnt].data, |
381 | sizeof(struct hv_guid)) == 0) { | 401 | sizeof(struct hv_guid)) == 0 && |
402 | VmbusChannelOpen(newChannel, 2 * PAGE_SIZE, | ||
403 | 2 * PAGE_SIZE, NULL, 0, | ||
404 | hv_cb_utils[cnt].callback, | ||
405 | newChannel) == 0) { | ||
406 | hv_cb_utils[cnt].channel = newChannel; | ||
382 | DPRINT_INFO(VMBUS, "%s", | 407 | DPRINT_INFO(VMBUS, "%s", |
383 | hv_cb_utils[cnt].log_msg); | 408 | hv_cb_utils[cnt].log_msg); |
384 | 409 | count_hv_channel(); | |
385 | if (VmbusChannelOpen(newChannel, 2 * PAGE_SIZE, | ||
386 | 2 * PAGE_SIZE, NULL, 0, | ||
387 | hv_cb_utils[cnt].callback, | ||
388 | newChannel) == 0) | ||
389 | hv_cb_utils[cnt].channel = newChannel; | ||
390 | } | 410 | } |
391 | cnt++; | ||
392 | } | 411 | } |
393 | } | 412 | } |
394 | DPRINT_EXIT(VMBUS); | 413 | DPRINT_EXIT(VMBUS); |
diff --git a/drivers/staging/hv/hv_utils.c b/drivers/staging/hv/hv_utils.c index 8a49aafea37a..2adc9b48ca9c 100644 --- a/drivers/staging/hv/hv_utils.c +++ b/drivers/staging/hv/hv_utils.c | |||
@@ -24,6 +24,8 @@ | |||
24 | #include <linux/slab.h> | 24 | #include <linux/slab.h> |
25 | #include <linux/sysctl.h> | 25 | #include <linux/sysctl.h> |
26 | #include <linux/reboot.h> | 26 | #include <linux/reboot.h> |
27 | #include <linux/dmi.h> | ||
28 | #include <linux/pci.h> | ||
27 | 29 | ||
28 | #include "logging.h" | 30 | #include "logging.h" |
29 | #include "osd.h" | 31 | #include "osd.h" |
@@ -251,10 +253,36 @@ static void heartbeat_onchannelcallback(void *context) | |||
251 | DPRINT_EXIT(VMBUS); | 253 | DPRINT_EXIT(VMBUS); |
252 | } | 254 | } |
253 | 255 | ||
256 | static const struct pci_device_id __initconst | ||
257 | hv_utils_pci_table[] __maybe_unused = { | ||
258 | { PCI_DEVICE(0x1414, 0x5353) }, /* Hyper-V emulated VGA controller */ | ||
259 | { 0 } | ||
260 | }; | ||
261 | MODULE_DEVICE_TABLE(pci, hv_utils_pci_table); | ||
262 | |||
263 | |||
264 | static const struct dmi_system_id __initconst | ||
265 | hv_utils_dmi_table[] __maybe_unused = { | ||
266 | { | ||
267 | .ident = "Hyper-V", | ||
268 | .matches = { | ||
269 | DMI_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"), | ||
270 | DMI_MATCH(DMI_PRODUCT_NAME, "Virtual Machine"), | ||
271 | DMI_MATCH(DMI_BOARD_NAME, "Virtual Machine"), | ||
272 | }, | ||
273 | }, | ||
274 | { }, | ||
275 | }; | ||
276 | MODULE_DEVICE_TABLE(dmi, hv_utils_dmi_table); | ||
277 | |||
278 | |||
254 | static int __init init_hyperv_utils(void) | 279 | static int __init init_hyperv_utils(void) |
255 | { | 280 | { |
256 | printk(KERN_INFO "Registering HyperV Utility Driver\n"); | 281 | printk(KERN_INFO "Registering HyperV Utility Driver\n"); |
257 | 282 | ||
283 | if (!dmi_check_system(hv_utils_dmi_table)) | ||
284 | return -ENODEV; | ||
285 | |||
258 | hv_cb_utils[HV_SHUTDOWN_MSG].channel->OnChannelCallback = | 286 | hv_cb_utils[HV_SHUTDOWN_MSG].channel->OnChannelCallback = |
259 | &shutdown_onchannelcallback; | 287 | &shutdown_onchannelcallback; |
260 | hv_cb_utils[HV_SHUTDOWN_MSG].callback = &shutdown_onchannelcallback; | 288 | hv_cb_utils[HV_SHUTDOWN_MSG].callback = &shutdown_onchannelcallback; |
diff --git a/drivers/staging/hv/vmbus.h b/drivers/staging/hv/vmbus.h index 0c6ee0f487f3..3c14b2926e00 100644 --- a/drivers/staging/hv/vmbus.h +++ b/drivers/staging/hv/vmbus.h | |||
@@ -74,4 +74,6 @@ int vmbus_child_driver_register(struct driver_context *driver_ctx); | |||
74 | void vmbus_child_driver_unregister(struct driver_context *driver_ctx); | 74 | void vmbus_child_driver_unregister(struct driver_context *driver_ctx); |
75 | void vmbus_get_interface(struct vmbus_channel_interface *interface); | 75 | void vmbus_get_interface(struct vmbus_channel_interface *interface); |
76 | 76 | ||
77 | extern struct completion hv_channel_ready; | ||
78 | |||
77 | #endif /* _VMBUS_H_ */ | 79 | #endif /* _VMBUS_H_ */ |
diff --git a/drivers/staging/hv/vmbus_drv.c b/drivers/staging/hv/vmbus_drv.c index c21731a12ca7..22c80ece6388 100644 --- a/drivers/staging/hv/vmbus_drv.c +++ b/drivers/staging/hv/vmbus_drv.c | |||
@@ -27,6 +27,7 @@ | |||
27 | #include <linux/pci.h> | 27 | #include <linux/pci.h> |
28 | #include <linux/dmi.h> | 28 | #include <linux/dmi.h> |
29 | #include <linux/slab.h> | 29 | #include <linux/slab.h> |
30 | #include <linux/completion.h> | ||
30 | #include "version_info.h" | 31 | #include "version_info.h" |
31 | #include "osd.h" | 32 | #include "osd.h" |
32 | #include "logging.h" | 33 | #include "logging.h" |
@@ -356,6 +357,8 @@ static int vmbus_bus_init(int (*drv_init)(struct hv_driver *drv)) | |||
356 | 357 | ||
357 | vmbus_drv_obj->GetChannelOffers(); | 358 | vmbus_drv_obj->GetChannelOffers(); |
358 | 359 | ||
360 | wait_for_completion(&hv_channel_ready); | ||
361 | |||
359 | cleanup: | 362 | cleanup: |
360 | DPRINT_EXIT(VMBUS_DRV); | 363 | DPRINT_EXIT(VMBUS_DRV); |
361 | 364 | ||
diff --git a/drivers/staging/mrst-touchscreen/intel-mid-touch.c b/drivers/staging/mrst-touchscreen/intel-mid-touch.c index 1db00975a594..abba22f921be 100644 --- a/drivers/staging/mrst-touchscreen/intel-mid-touch.c +++ b/drivers/staging/mrst-touchscreen/intel-mid-touch.c | |||
@@ -817,9 +817,9 @@ static int mrstouch_remove(struct spi_device *spi) | |||
817 | free_irq(mrstouchdevp->irq, mrstouchdevp); | 817 | free_irq(mrstouchdevp->irq, mrstouchdevp); |
818 | input_unregister_device(mrstouchdevp->input); | 818 | input_unregister_device(mrstouchdevp->input); |
819 | input_free_device(mrstouchdevp->input); | 819 | input_free_device(mrstouchdevp->input); |
820 | kfree(mrstouchdevp); | ||
821 | if (mrstouchdevp->pendet_thrd) | 820 | if (mrstouchdevp->pendet_thrd) |
822 | kthread_stop(mrstouchdevp->pendet_thrd); | 821 | kthread_stop(mrstouchdevp->pendet_thrd); |
822 | kfree(mrstouchdevp); | ||
823 | return 0; | 823 | return 0; |
824 | } | 824 | } |
825 | 825 | ||
diff --git a/drivers/staging/rt2860/usb_main_dev.c b/drivers/staging/rt2860/usb_main_dev.c index b740662d095a..674769d2b59b 100644 --- a/drivers/staging/rt2860/usb_main_dev.c +++ b/drivers/staging/rt2860/usb_main_dev.c | |||
@@ -77,6 +77,7 @@ struct usb_device_id rtusb_usb_id[] = { | |||
77 | {USB_DEVICE(0x083A, 0x7522)}, /* Arcadyan */ | 77 | {USB_DEVICE(0x083A, 0x7522)}, /* Arcadyan */ |
78 | {USB_DEVICE(0x0CDE, 0x0022)}, /* ZCOM */ | 78 | {USB_DEVICE(0x0CDE, 0x0022)}, /* ZCOM */ |
79 | {USB_DEVICE(0x0586, 0x3416)}, /* Zyxel */ | 79 | {USB_DEVICE(0x0586, 0x3416)}, /* Zyxel */ |
80 | {USB_DEVICE(0x0586, 0x341a)}, /* Zyxel NWD-270N */ | ||
80 | {USB_DEVICE(0x0CDE, 0x0025)}, /* Zyxel */ | 81 | {USB_DEVICE(0x0CDE, 0x0025)}, /* Zyxel */ |
81 | {USB_DEVICE(0x1740, 0x9701)}, /* EnGenius */ | 82 | {USB_DEVICE(0x1740, 0x9701)}, /* EnGenius */ |
82 | {USB_DEVICE(0x1740, 0x9702)}, /* EnGenius */ | 83 | {USB_DEVICE(0x1740, 0x9702)}, /* EnGenius */ |
diff --git a/drivers/staging/rtl8187se/r8180_core.c b/drivers/staging/rtl8187se/r8180_core.c index dacefea78113..49ab9fa9ffa7 100644 --- a/drivers/staging/rtl8187se/r8180_core.c +++ b/drivers/staging/rtl8187se/r8180_core.c | |||
@@ -66,8 +66,6 @@ static int hwseqnum = 0; | |||
66 | static int hwwep = 0; | 66 | static int hwwep = 0; |
67 | static int channels = 0x3fff; | 67 | static int channels = 0x3fff; |
68 | 68 | ||
69 | #define eqMacAddr(a, b) (((a)[0] == (b)[0] && (a)[1] == (b)[1] && (a)[2] == (b)[2] && (a)[3] == (b)[3] && (a)[4] == (b)[4] && (a)[5] == (b)[5]) ? 1 : 0) | ||
70 | #define cpMacAddr(des, src) ((des)[0] = (src)[0], (des)[1] = (src)[1], (des)[2] = (src)[2], (des)[3] = (src)[3], (des)[4] = (src)[4], (des)[5] = (src)[5]) | ||
71 | MODULE_LICENSE("GPL"); | 69 | MODULE_LICENSE("GPL"); |
72 | MODULE_DEVICE_TABLE(pci, rtl8180_pci_id_tbl); | 70 | MODULE_DEVICE_TABLE(pci, rtl8180_pci_id_tbl); |
73 | MODULE_AUTHOR("Andrea Merello <andreamrl@tiscali.it>"); | 71 | MODULE_AUTHOR("Andrea Merello <andreamrl@tiscali.it>"); |
diff --git a/drivers/staging/rtl8192su/r8192U_core.c b/drivers/staging/rtl8192su/r8192U_core.c index 447d6474a70c..1b6890611fb6 100644 --- a/drivers/staging/rtl8192su/r8192U_core.c +++ b/drivers/staging/rtl8192su/r8192U_core.c | |||
@@ -112,28 +112,29 @@ u32 rt_global_debug_component = \ | |||
112 | #define CAM_CONTENT_COUNT 8 | 112 | #define CAM_CONTENT_COUNT 8 |
113 | 113 | ||
114 | static const struct usb_device_id rtl8192_usb_id_tbl[] = { | 114 | static const struct usb_device_id rtl8192_usb_id_tbl[] = { |
115 | /* Realtek */ | 115 | {USB_DEVICE(0x0bda, 0x8171)}, /* Realtek */ |
116 | {USB_DEVICE(0x0bda, 0x8171)}, | ||
117 | {USB_DEVICE(0x0bda, 0x8192)}, | ||
118 | {USB_DEVICE(0x0bda, 0x8709)}, | ||
119 | /* Corega */ | ||
120 | {USB_DEVICE(0x07aa, 0x0043)}, | ||
121 | /* Belkin */ | ||
122 | {USB_DEVICE(0x050d, 0x805E)}, | ||
123 | {USB_DEVICE(0x050d, 0x815F)}, /* Belkin F5D8053 v6 */ | ||
124 | /* Sitecom */ | ||
125 | {USB_DEVICE(0x0df6, 0x0031)}, | ||
126 | {USB_DEVICE(0x0df6, 0x004b)}, /* WL-349 */ | ||
127 | /* EnGenius */ | ||
128 | {USB_DEVICE(0x1740, 0x9201)}, | ||
129 | /* Dlink */ | ||
130 | {USB_DEVICE(0x2001, 0x3301)}, | ||
131 | /* Zinwell */ | ||
132 | {USB_DEVICE(0x5a57, 0x0290)}, | ||
133 | /* Guillemot */ | ||
134 | {USB_DEVICE(0x06f8, 0xe031)}, | ||
135 | //92SU | ||
136 | {USB_DEVICE(0x0bda, 0x8172)}, | 116 | {USB_DEVICE(0x0bda, 0x8172)}, |
117 | {USB_DEVICE(0x0bda, 0x8173)}, | ||
118 | {USB_DEVICE(0x0bda, 0x8174)}, | ||
119 | {USB_DEVICE(0x0bda, 0x8712)}, | ||
120 | {USB_DEVICE(0x0bda, 0x8713)}, | ||
121 | {USB_DEVICE(0x07aa, 0x0047)}, | ||
122 | {USB_DEVICE(0x07d1, 0x3303)}, | ||
123 | {USB_DEVICE(0x07d1, 0x3302)}, | ||
124 | {USB_DEVICE(0x07d1, 0x3300)}, | ||
125 | {USB_DEVICE(0x1740, 0x9603)}, | ||
126 | {USB_DEVICE(0x1740, 0x9605)}, | ||
127 | {USB_DEVICE(0x050d, 0x815F)}, | ||
128 | {USB_DEVICE(0x06f8, 0xe031)}, | ||
129 | {USB_DEVICE(0x7392, 0x7611)}, | ||
130 | {USB_DEVICE(0x7392, 0x7612)}, | ||
131 | {USB_DEVICE(0x7392, 0x7622)}, | ||
132 | {USB_DEVICE(0x0DF6, 0x0045)}, | ||
133 | {USB_DEVICE(0x0E66, 0x0015)}, | ||
134 | {USB_DEVICE(0x0E66, 0x0016)}, | ||
135 | {USB_DEVICE(0x0b05, 0x1786)}, | ||
136 | /* these are not in the official list */ | ||
137 | {USB_DEVICE(0x0df6, 0x004b)}, /* WL-349 */ | ||
137 | {} | 138 | {} |
138 | }; | 139 | }; |
139 | 140 | ||
diff --git a/drivers/staging/rtl8192u/r8192U_core.c b/drivers/staging/rtl8192u/r8192U_core.c index 2bede271a2f0..f38472c2e75c 100644 --- a/drivers/staging/rtl8192u/r8192U_core.c +++ b/drivers/staging/rtl8192u/r8192U_core.c | |||
@@ -121,6 +121,8 @@ static const struct usb_device_id rtl8192_usb_id_tbl[] = { | |||
121 | {USB_DEVICE(0x2001, 0x3301)}, | 121 | {USB_DEVICE(0x2001, 0x3301)}, |
122 | /* Zinwell */ | 122 | /* Zinwell */ |
123 | {USB_DEVICE(0x5a57, 0x0290)}, | 123 | {USB_DEVICE(0x5a57, 0x0290)}, |
124 | /* LG */ | ||
125 | {USB_DEVICE(0x043e, 0x7a01)}, | ||
124 | {} | 126 | {} |
125 | }; | 127 | }; |
126 | 128 | ||
diff --git a/drivers/staging/usbip/usbip_common.c b/drivers/staging/usbip/usbip_common.c index 52408164036f..6a499f0eb594 100644 --- a/drivers/staging/usbip/usbip_common.c +++ b/drivers/staging/usbip/usbip_common.c | |||
@@ -378,47 +378,67 @@ int usbip_thread(void *param) | |||
378 | complete_and_exit(&ut->thread_done, 0); | 378 | complete_and_exit(&ut->thread_done, 0); |
379 | } | 379 | } |
380 | 380 | ||
381 | static void stop_rx_thread(struct usbip_device *ud) | ||
382 | { | ||
383 | if (ud->tcp_rx.thread != NULL) { | ||
384 | send_sig(SIGKILL, ud->tcp_rx.thread, 1); | ||
385 | wait_for_completion(&ud->tcp_rx.thread_done); | ||
386 | usbip_udbg("rx_thread for ud %p has finished\n", ud); | ||
387 | } | ||
388 | } | ||
389 | |||
390 | static void stop_tx_thread(struct usbip_device *ud) | ||
391 | { | ||
392 | if (ud->tcp_tx.thread != NULL) { | ||
393 | send_sig(SIGKILL, ud->tcp_tx.thread, 1); | ||
394 | wait_for_completion(&ud->tcp_tx.thread_done); | ||
395 | usbip_udbg("tx_thread for ud %p has finished\n", ud); | ||
396 | } | ||
397 | } | ||
398 | |||
381 | int usbip_start_threads(struct usbip_device *ud) | 399 | int usbip_start_threads(struct usbip_device *ud) |
382 | { | 400 | { |
383 | /* | 401 | /* |
384 | * threads are invoked per one device (per one connection). | 402 | * threads are invoked per one device (per one connection). |
385 | */ | 403 | */ |
386 | struct task_struct *th; | 404 | struct task_struct *th; |
405 | int err = 0; | ||
387 | 406 | ||
388 | th = kthread_run(usbip_thread, (void *)&ud->tcp_rx, "usbip"); | 407 | th = kthread_run(usbip_thread, (void *)&ud->tcp_rx, "usbip"); |
389 | if (IS_ERR(th)) { | 408 | if (IS_ERR(th)) { |
390 | printk(KERN_WARNING | 409 | printk(KERN_WARNING |
391 | "Unable to start control thread\n"); | 410 | "Unable to start control thread\n"); |
392 | return PTR_ERR(th); | 411 | err = PTR_ERR(th); |
412 | goto ust_exit; | ||
393 | } | 413 | } |
414 | |||
394 | th = kthread_run(usbip_thread, (void *)&ud->tcp_tx, "usbip"); | 415 | th = kthread_run(usbip_thread, (void *)&ud->tcp_tx, "usbip"); |
395 | if (IS_ERR(th)) { | 416 | if (IS_ERR(th)) { |
396 | printk(KERN_WARNING | 417 | printk(KERN_WARNING |
397 | "Unable to start control thread\n"); | 418 | "Unable to start control thread\n"); |
398 | return PTR_ERR(th); | 419 | err = PTR_ERR(th); |
420 | goto tx_thread_err; | ||
399 | } | 421 | } |
400 | 422 | ||
401 | /* confirm threads are starting */ | 423 | /* confirm threads are starting */ |
402 | wait_for_completion(&ud->tcp_rx.thread_done); | 424 | wait_for_completion(&ud->tcp_rx.thread_done); |
403 | wait_for_completion(&ud->tcp_tx.thread_done); | 425 | wait_for_completion(&ud->tcp_tx.thread_done); |
426 | |||
404 | return 0; | 427 | return 0; |
428 | |||
429 | tx_thread_err: | ||
430 | stop_rx_thread(ud); | ||
431 | |||
432 | ust_exit: | ||
433 | return err; | ||
405 | } | 434 | } |
406 | EXPORT_SYMBOL_GPL(usbip_start_threads); | 435 | EXPORT_SYMBOL_GPL(usbip_start_threads); |
407 | 436 | ||
408 | void usbip_stop_threads(struct usbip_device *ud) | 437 | void usbip_stop_threads(struct usbip_device *ud) |
409 | { | 438 | { |
410 | /* kill threads related to this sdev, if v.c. exists */ | 439 | /* kill threads related to this sdev, if v.c. exists */ |
411 | if (ud->tcp_rx.thread != NULL) { | 440 | stop_rx_thread(ud); |
412 | send_sig(SIGKILL, ud->tcp_rx.thread, 1); | 441 | stop_tx_thread(ud); |
413 | wait_for_completion(&ud->tcp_rx.thread_done); | ||
414 | usbip_udbg("rx_thread for ud %p has finished\n", ud); | ||
415 | } | ||
416 | |||
417 | if (ud->tcp_tx.thread != NULL) { | ||
418 | send_sig(SIGKILL, ud->tcp_tx.thread, 1); | ||
419 | wait_for_completion(&ud->tcp_tx.thread_done); | ||
420 | usbip_udbg("tx_thread for ud %p has finished\n", ud); | ||
421 | } | ||
422 | } | 442 | } |
423 | EXPORT_SYMBOL_GPL(usbip_stop_threads); | 443 | EXPORT_SYMBOL_GPL(usbip_stop_threads); |
424 | 444 | ||
diff --git a/drivers/staging/wlags49_h2/wl_enc.c b/drivers/staging/wlags49_h2/wl_enc.c index 48c44c8fdb28..26cf5486edd6 100644 --- a/drivers/staging/wlags49_h2/wl_enc.c +++ b/drivers/staging/wlags49_h2/wl_enc.c | |||
@@ -62,6 +62,7 @@ | |||
62 | /******************************************************************************* | 62 | /******************************************************************************* |
63 | * include files | 63 | * include files |
64 | ******************************************************************************/ | 64 | ******************************************************************************/ |
65 | #include <linux/string.h> | ||
65 | #include <wl_version.h> | 66 | #include <wl_version.h> |
66 | 67 | ||
67 | #include <debug.h> | 68 | #include <debug.h> |
diff --git a/drivers/staging/wlags49_h2/wl_sysfs.h b/drivers/staging/wlags49_h2/wl_sysfs.h index 6d96d03cf490..fa658c38001e 100644 --- a/drivers/staging/wlags49_h2/wl_sysfs.h +++ b/drivers/staging/wlags49_h2/wl_sysfs.h | |||
@@ -2,6 +2,6 @@ | |||
2 | extern void register_wlags_sysfs(struct net_device *); | 2 | extern void register_wlags_sysfs(struct net_device *); |
3 | extern void unregister_wlags_sysfs(struct net_device *); | 3 | extern void unregister_wlags_sysfs(struct net_device *); |
4 | #else | 4 | #else |
5 | static void register_wlags_sysfs(struct net_device *) { return; }; | 5 | static inline void register_wlags_sysfs(struct net_device *net) { } |
6 | static void unregister_wlags_sysfs(struct net_device *) { return; }; | 6 | static inline void unregister_wlags_sysfs(struct net_device *net) { } |
7 | #endif | 7 | #endif |
diff --git a/drivers/usb/core/driver.c b/drivers/usb/core/driver.c index de98a94d1853..a6bd53ace035 100644 --- a/drivers/usb/core/driver.c +++ b/drivers/usb/core/driver.c | |||
@@ -1272,8 +1272,7 @@ static int usb_resume_both(struct usb_device *udev, pm_message_t msg) | |||
1272 | 1272 | ||
1273 | static void choose_wakeup(struct usb_device *udev, pm_message_t msg) | 1273 | static void choose_wakeup(struct usb_device *udev, pm_message_t msg) |
1274 | { | 1274 | { |
1275 | int w, i; | 1275 | int w; |
1276 | struct usb_interface *intf; | ||
1277 | 1276 | ||
1278 | /* Remote wakeup is needed only when we actually go to sleep. | 1277 | /* Remote wakeup is needed only when we actually go to sleep. |
1279 | * For things like FREEZE and QUIESCE, if the device is already | 1278 | * For things like FREEZE and QUIESCE, if the device is already |
@@ -1285,16 +1284,10 @@ static void choose_wakeup(struct usb_device *udev, pm_message_t msg) | |||
1285 | return; | 1284 | return; |
1286 | } | 1285 | } |
1287 | 1286 | ||
1288 | /* If remote wakeup is permitted, see whether any interface drivers | 1287 | /* Enable remote wakeup if it is allowed, even if no interface drivers |
1289 | * actually want it. | 1288 | * actually want it. |
1290 | */ | 1289 | */ |
1291 | w = 0; | 1290 | w = device_may_wakeup(&udev->dev); |
1292 | if (device_may_wakeup(&udev->dev) && udev->actconfig) { | ||
1293 | for (i = 0; i < udev->actconfig->desc.bNumInterfaces; i++) { | ||
1294 | intf = udev->actconfig->interface[i]; | ||
1295 | w |= intf->needs_remote_wakeup; | ||
1296 | } | ||
1297 | } | ||
1298 | 1291 | ||
1299 | /* If the device is autosuspended with the wrong wakeup setting, | 1292 | /* If the device is autosuspended with the wrong wakeup setting, |
1300 | * autoresume now so the setting can be changed. | 1293 | * autoresume now so the setting can be changed. |
diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c index a73e08fdab36..fd4c36ea5e46 100644 --- a/drivers/usb/core/message.c +++ b/drivers/usb/core/message.c | |||
@@ -416,8 +416,11 @@ int usb_sg_init(struct usb_sg_request *io, struct usb_device *dev, | |||
416 | /* A length of zero means transfer the whole sg list */ | 416 | /* A length of zero means transfer the whole sg list */ |
417 | len = length; | 417 | len = length; |
418 | if (len == 0) { | 418 | if (len == 0) { |
419 | for_each_sg(sg, sg, nents, i) | 419 | struct scatterlist *sg2; |
420 | len += sg->length; | 420 | int j; |
421 | |||
422 | for_each_sg(sg, sg2, nents, j) | ||
423 | len += sg2->length; | ||
421 | } | 424 | } |
422 | } else { | 425 | } else { |
423 | /* | 426 | /* |
diff --git a/drivers/usb/gadget/f_eem.c b/drivers/usb/gadget/f_eem.c index 38226e9a371d..95dd4662d6a8 100644 --- a/drivers/usb/gadget/f_eem.c +++ b/drivers/usb/gadget/f_eem.c | |||
@@ -469,8 +469,7 @@ static int eem_unwrap(struct gether *port, | |||
469 | crc = get_unaligned_le32(skb->data + len | 469 | crc = get_unaligned_le32(skb->data + len |
470 | - ETH_FCS_LEN); | 470 | - ETH_FCS_LEN); |
471 | crc2 = ~crc32_le(~0, | 471 | crc2 = ~crc32_le(~0, |
472 | skb->data, | 472 | skb->data, len - ETH_FCS_LEN); |
473 | skb->len - ETH_FCS_LEN); | ||
474 | } else { | 473 | } else { |
475 | crc = get_unaligned_be32(skb->data + len | 474 | crc = get_unaligned_be32(skb->data + len |
476 | - ETH_FCS_LEN); | 475 | - ETH_FCS_LEN); |
diff --git a/drivers/usb/gadget/f_mass_storage.c b/drivers/usb/gadget/f_mass_storage.c index 7d05a0be5c60..4ce899c9b165 100644 --- a/drivers/usb/gadget/f_mass_storage.c +++ b/drivers/usb/gadget/f_mass_storage.c | |||
@@ -321,8 +321,8 @@ struct fsg_dev; | |||
321 | /* Data shared by all the FSG instances. */ | 321 | /* Data shared by all the FSG instances. */ |
322 | struct fsg_common { | 322 | struct fsg_common { |
323 | struct usb_gadget *gadget; | 323 | struct usb_gadget *gadget; |
324 | struct fsg_dev *fsg; | 324 | struct fsg_dev *fsg, *new_fsg; |
325 | struct fsg_dev *prev_fsg; | 325 | wait_queue_head_t fsg_wait; |
326 | 326 | ||
327 | /* filesem protects: backing files in use */ | 327 | /* filesem protects: backing files in use */ |
328 | struct rw_semaphore filesem; | 328 | struct rw_semaphore filesem; |
@@ -351,7 +351,6 @@ struct fsg_common { | |||
351 | enum fsg_state state; /* For exception handling */ | 351 | enum fsg_state state; /* For exception handling */ |
352 | unsigned int exception_req_tag; | 352 | unsigned int exception_req_tag; |
353 | 353 | ||
354 | u8 config, new_config; | ||
355 | enum data_direction data_dir; | 354 | enum data_direction data_dir; |
356 | u32 data_size; | 355 | u32 data_size; |
357 | u32 data_size_from_cmnd; | 356 | u32 data_size_from_cmnd; |
@@ -595,7 +594,7 @@ static int fsg_setup(struct usb_function *f, | |||
595 | u16 w_value = le16_to_cpu(ctrl->wValue); | 594 | u16 w_value = le16_to_cpu(ctrl->wValue); |
596 | u16 w_length = le16_to_cpu(ctrl->wLength); | 595 | u16 w_length = le16_to_cpu(ctrl->wLength); |
597 | 596 | ||
598 | if (!fsg->common->config) | 597 | if (!fsg_is_set(fsg->common)) |
599 | return -EOPNOTSUPP; | 598 | return -EOPNOTSUPP; |
600 | 599 | ||
601 | switch (ctrl->bRequest) { | 600 | switch (ctrl->bRequest) { |
@@ -2303,24 +2302,20 @@ static int alloc_request(struct fsg_common *common, struct usb_ep *ep, | |||
2303 | return -ENOMEM; | 2302 | return -ENOMEM; |
2304 | } | 2303 | } |
2305 | 2304 | ||
2306 | /* | 2305 | /* Reset interface setting and re-init endpoint state (toggle etc). */ |
2307 | * Reset interface setting and re-init endpoint state (toggle etc). | 2306 | static int do_set_interface(struct fsg_common *common, struct fsg_dev *new_fsg) |
2308 | * Call with altsetting < 0 to disable the interface. The only other | ||
2309 | * available altsetting is 0, which enables the interface. | ||
2310 | */ | ||
2311 | static int do_set_interface(struct fsg_common *common, int altsetting) | ||
2312 | { | 2307 | { |
2313 | int rc = 0; | 2308 | const struct usb_endpoint_descriptor *d; |
2314 | int i; | 2309 | struct fsg_dev *fsg; |
2315 | const struct usb_endpoint_descriptor *d; | 2310 | int i, rc = 0; |
2316 | 2311 | ||
2317 | if (common->running) | 2312 | if (common->running) |
2318 | DBG(common, "reset interface\n"); | 2313 | DBG(common, "reset interface\n"); |
2319 | 2314 | ||
2320 | reset: | 2315 | reset: |
2321 | /* Deallocate the requests */ | 2316 | /* Deallocate the requests */ |
2322 | if (common->prev_fsg) { | 2317 | if (common->fsg) { |
2323 | struct fsg_dev *fsg = common->prev_fsg; | 2318 | fsg = common->fsg; |
2324 | 2319 | ||
2325 | for (i = 0; i < FSG_NUM_BUFFERS; ++i) { | 2320 | for (i = 0; i < FSG_NUM_BUFFERS; ++i) { |
2326 | struct fsg_buffhd *bh = &common->buffhds[i]; | 2321 | struct fsg_buffhd *bh = &common->buffhds[i]; |
@@ -2345,88 +2340,53 @@ reset: | |||
2345 | fsg->bulk_out_enabled = 0; | 2340 | fsg->bulk_out_enabled = 0; |
2346 | } | 2341 | } |
2347 | 2342 | ||
2348 | common->prev_fsg = 0; | 2343 | common->fsg = NULL; |
2344 | wake_up(&common->fsg_wait); | ||
2349 | } | 2345 | } |
2350 | 2346 | ||
2351 | common->running = 0; | 2347 | common->running = 0; |
2352 | if (altsetting < 0 || rc != 0) | 2348 | if (!new_fsg || rc) |
2353 | return rc; | 2349 | return rc; |
2354 | 2350 | ||
2355 | DBG(common, "set interface %d\n", altsetting); | 2351 | common->fsg = new_fsg; |
2352 | fsg = common->fsg; | ||
2356 | 2353 | ||
2357 | if (fsg_is_set(common)) { | 2354 | /* Enable the endpoints */ |
2358 | struct fsg_dev *fsg = common->fsg; | 2355 | d = fsg_ep_desc(common->gadget, |
2359 | common->prev_fsg = common->fsg; | 2356 | &fsg_fs_bulk_in_desc, &fsg_hs_bulk_in_desc); |
2357 | rc = enable_endpoint(common, fsg->bulk_in, d); | ||
2358 | if (rc) | ||
2359 | goto reset; | ||
2360 | fsg->bulk_in_enabled = 1; | ||
2361 | |||
2362 | d = fsg_ep_desc(common->gadget, | ||
2363 | &fsg_fs_bulk_out_desc, &fsg_hs_bulk_out_desc); | ||
2364 | rc = enable_endpoint(common, fsg->bulk_out, d); | ||
2365 | if (rc) | ||
2366 | goto reset; | ||
2367 | fsg->bulk_out_enabled = 1; | ||
2368 | common->bulk_out_maxpacket = le16_to_cpu(d->wMaxPacketSize); | ||
2369 | clear_bit(IGNORE_BULK_OUT, &fsg->atomic_bitflags); | ||
2360 | 2370 | ||
2361 | /* Enable the endpoints */ | 2371 | /* Allocate the requests */ |
2362 | d = fsg_ep_desc(common->gadget, | 2372 | for (i = 0; i < FSG_NUM_BUFFERS; ++i) { |
2363 | &fsg_fs_bulk_in_desc, &fsg_hs_bulk_in_desc); | 2373 | struct fsg_buffhd *bh = &common->buffhds[i]; |
2364 | rc = enable_endpoint(common, fsg->bulk_in, d); | 2374 | |
2375 | rc = alloc_request(common, fsg->bulk_in, &bh->inreq); | ||
2365 | if (rc) | 2376 | if (rc) |
2366 | goto reset; | 2377 | goto reset; |
2367 | fsg->bulk_in_enabled = 1; | 2378 | rc = alloc_request(common, fsg->bulk_out, &bh->outreq); |
2368 | |||
2369 | d = fsg_ep_desc(common->gadget, | ||
2370 | &fsg_fs_bulk_out_desc, &fsg_hs_bulk_out_desc); | ||
2371 | rc = enable_endpoint(common, fsg->bulk_out, d); | ||
2372 | if (rc) | 2379 | if (rc) |
2373 | goto reset; | 2380 | goto reset; |
2374 | fsg->bulk_out_enabled = 1; | 2381 | bh->inreq->buf = bh->outreq->buf = bh->buf; |
2375 | common->bulk_out_maxpacket = le16_to_cpu(d->wMaxPacketSize); | 2382 | bh->inreq->context = bh->outreq->context = bh; |
2376 | clear_bit(IGNORE_BULK_OUT, &fsg->atomic_bitflags); | 2383 | bh->inreq->complete = bulk_in_complete; |
2377 | 2384 | bh->outreq->complete = bulk_out_complete; | |
2378 | /* Allocate the requests */ | ||
2379 | for (i = 0; i < FSG_NUM_BUFFERS; ++i) { | ||
2380 | struct fsg_buffhd *bh = &common->buffhds[i]; | ||
2381 | |||
2382 | rc = alloc_request(common, fsg->bulk_in, &bh->inreq); | ||
2383 | if (rc) | ||
2384 | goto reset; | ||
2385 | rc = alloc_request(common, fsg->bulk_out, &bh->outreq); | ||
2386 | if (rc) | ||
2387 | goto reset; | ||
2388 | bh->inreq->buf = bh->outreq->buf = bh->buf; | ||
2389 | bh->inreq->context = bh->outreq->context = bh; | ||
2390 | bh->inreq->complete = bulk_in_complete; | ||
2391 | bh->outreq->complete = bulk_out_complete; | ||
2392 | } | ||
2393 | |||
2394 | common->running = 1; | ||
2395 | for (i = 0; i < common->nluns; ++i) | ||
2396 | common->luns[i].unit_attention_data = SS_RESET_OCCURRED; | ||
2397 | return rc; | ||
2398 | } else { | ||
2399 | return -EIO; | ||
2400 | } | ||
2401 | } | ||
2402 | |||
2403 | |||
2404 | /* | ||
2405 | * Change our operational configuration. This code must agree with the code | ||
2406 | * that returns config descriptors, and with interface altsetting code. | ||
2407 | * | ||
2408 | * It's also responsible for power management interactions. Some | ||
2409 | * configurations might not work with our current power sources. | ||
2410 | * For now we just assume the gadget is always self-powered. | ||
2411 | */ | ||
2412 | static int do_set_config(struct fsg_common *common, u8 new_config) | ||
2413 | { | ||
2414 | int rc = 0; | ||
2415 | |||
2416 | /* Disable the single interface */ | ||
2417 | if (common->config != 0) { | ||
2418 | DBG(common, "reset config\n"); | ||
2419 | common->config = 0; | ||
2420 | rc = do_set_interface(common, -1); | ||
2421 | } | 2385 | } |
2422 | 2386 | ||
2423 | /* Enable the interface */ | 2387 | common->running = 1; |
2424 | if (new_config != 0) { | 2388 | for (i = 0; i < common->nluns; ++i) |
2425 | common->config = new_config; | 2389 | common->luns[i].unit_attention_data = SS_RESET_OCCURRED; |
2426 | rc = do_set_interface(common, 0); | ||
2427 | if (rc != 0) | ||
2428 | common->config = 0; /* Reset on errors */ | ||
2429 | } | ||
2430 | return rc; | 2390 | return rc; |
2431 | } | 2391 | } |
2432 | 2392 | ||
@@ -2437,9 +2397,7 @@ static int do_set_config(struct fsg_common *common, u8 new_config) | |||
2437 | static int fsg_set_alt(struct usb_function *f, unsigned intf, unsigned alt) | 2397 | static int fsg_set_alt(struct usb_function *f, unsigned intf, unsigned alt) |
2438 | { | 2398 | { |
2439 | struct fsg_dev *fsg = fsg_from_func(f); | 2399 | struct fsg_dev *fsg = fsg_from_func(f); |
2440 | fsg->common->prev_fsg = fsg->common->fsg; | 2400 | fsg->common->new_fsg = fsg; |
2441 | fsg->common->fsg = fsg; | ||
2442 | fsg->common->new_config = 1; | ||
2443 | raise_exception(fsg->common, FSG_STATE_CONFIG_CHANGE); | 2401 | raise_exception(fsg->common, FSG_STATE_CONFIG_CHANGE); |
2444 | return 0; | 2402 | return 0; |
2445 | } | 2403 | } |
@@ -2447,9 +2405,7 @@ static int fsg_set_alt(struct usb_function *f, unsigned intf, unsigned alt) | |||
2447 | static void fsg_disable(struct usb_function *f) | 2405 | static void fsg_disable(struct usb_function *f) |
2448 | { | 2406 | { |
2449 | struct fsg_dev *fsg = fsg_from_func(f); | 2407 | struct fsg_dev *fsg = fsg_from_func(f); |
2450 | fsg->common->prev_fsg = fsg->common->fsg; | 2408 | fsg->common->new_fsg = NULL; |
2451 | fsg->common->fsg = fsg; | ||
2452 | fsg->common->new_config = 0; | ||
2453 | raise_exception(fsg->common, FSG_STATE_CONFIG_CHANGE); | 2409 | raise_exception(fsg->common, FSG_STATE_CONFIG_CHANGE); |
2454 | } | 2410 | } |
2455 | 2411 | ||
@@ -2459,19 +2415,17 @@ static void fsg_disable(struct usb_function *f) | |||
2459 | static void handle_exception(struct fsg_common *common) | 2415 | static void handle_exception(struct fsg_common *common) |
2460 | { | 2416 | { |
2461 | siginfo_t info; | 2417 | siginfo_t info; |
2462 | int sig; | ||
2463 | int i; | 2418 | int i; |
2464 | struct fsg_buffhd *bh; | 2419 | struct fsg_buffhd *bh; |
2465 | enum fsg_state old_state; | 2420 | enum fsg_state old_state; |
2466 | u8 new_config; | ||
2467 | struct fsg_lun *curlun; | 2421 | struct fsg_lun *curlun; |
2468 | unsigned int exception_req_tag; | 2422 | unsigned int exception_req_tag; |
2469 | int rc; | ||
2470 | 2423 | ||
2471 | /* Clear the existing signals. Anything but SIGUSR1 is converted | 2424 | /* Clear the existing signals. Anything but SIGUSR1 is converted |
2472 | * into a high-priority EXIT exception. */ | 2425 | * into a high-priority EXIT exception. */ |
2473 | for (;;) { | 2426 | for (;;) { |
2474 | sig = dequeue_signal_lock(current, ¤t->blocked, &info); | 2427 | int sig = |
2428 | dequeue_signal_lock(current, ¤t->blocked, &info); | ||
2475 | if (!sig) | 2429 | if (!sig) |
2476 | break; | 2430 | break; |
2477 | if (sig != SIGUSR1) { | 2431 | if (sig != SIGUSR1) { |
@@ -2482,7 +2436,7 @@ static void handle_exception(struct fsg_common *common) | |||
2482 | } | 2436 | } |
2483 | 2437 | ||
2484 | /* Cancel all the pending transfers */ | 2438 | /* Cancel all the pending transfers */ |
2485 | if (fsg_is_set(common)) { | 2439 | if (likely(common->fsg)) { |
2486 | for (i = 0; i < FSG_NUM_BUFFERS; ++i) { | 2440 | for (i = 0; i < FSG_NUM_BUFFERS; ++i) { |
2487 | bh = &common->buffhds[i]; | 2441 | bh = &common->buffhds[i]; |
2488 | if (bh->inreq_busy) | 2442 | if (bh->inreq_busy) |
@@ -2523,7 +2477,6 @@ static void handle_exception(struct fsg_common *common) | |||
2523 | common->next_buffhd_to_fill = &common->buffhds[0]; | 2477 | common->next_buffhd_to_fill = &common->buffhds[0]; |
2524 | common->next_buffhd_to_drain = &common->buffhds[0]; | 2478 | common->next_buffhd_to_drain = &common->buffhds[0]; |
2525 | exception_req_tag = common->exception_req_tag; | 2479 | exception_req_tag = common->exception_req_tag; |
2526 | new_config = common->new_config; | ||
2527 | old_state = common->state; | 2480 | old_state = common->state; |
2528 | 2481 | ||
2529 | if (old_state == FSG_STATE_ABORT_BULK_OUT) | 2482 | if (old_state == FSG_STATE_ABORT_BULK_OUT) |
@@ -2573,12 +2526,12 @@ static void handle_exception(struct fsg_common *common) | |||
2573 | break; | 2526 | break; |
2574 | 2527 | ||
2575 | case FSG_STATE_CONFIG_CHANGE: | 2528 | case FSG_STATE_CONFIG_CHANGE: |
2576 | rc = do_set_config(common, new_config); | 2529 | do_set_interface(common, common->new_fsg); |
2577 | break; | 2530 | break; |
2578 | 2531 | ||
2579 | case FSG_STATE_EXIT: | 2532 | case FSG_STATE_EXIT: |
2580 | case FSG_STATE_TERMINATED: | 2533 | case FSG_STATE_TERMINATED: |
2581 | do_set_config(common, 0); /* Free resources */ | 2534 | do_set_interface(common, NULL); /* Free resources */ |
2582 | spin_lock_irq(&common->lock); | 2535 | spin_lock_irq(&common->lock); |
2583 | common->state = FSG_STATE_TERMINATED; /* Stop the thread */ | 2536 | common->state = FSG_STATE_TERMINATED; /* Stop the thread */ |
2584 | spin_unlock_irq(&common->lock); | 2537 | spin_unlock_irq(&common->lock); |
@@ -2863,6 +2816,7 @@ buffhds_first_it: | |||
2863 | goto error_release; | 2816 | goto error_release; |
2864 | } | 2817 | } |
2865 | init_completion(&common->thread_notifier); | 2818 | init_completion(&common->thread_notifier); |
2819 | init_waitqueue_head(&common->fsg_wait); | ||
2866 | #undef OR | 2820 | #undef OR |
2867 | 2821 | ||
2868 | 2822 | ||
@@ -2957,9 +2911,17 @@ static void fsg_common_release(struct kref *ref) | |||
2957 | static void fsg_unbind(struct usb_configuration *c, struct usb_function *f) | 2911 | static void fsg_unbind(struct usb_configuration *c, struct usb_function *f) |
2958 | { | 2912 | { |
2959 | struct fsg_dev *fsg = fsg_from_func(f); | 2913 | struct fsg_dev *fsg = fsg_from_func(f); |
2914 | struct fsg_common *common = fsg->common; | ||
2960 | 2915 | ||
2961 | DBG(fsg, "unbind\n"); | 2916 | DBG(fsg, "unbind\n"); |
2962 | fsg_common_put(fsg->common); | 2917 | if (fsg->common->fsg == fsg) { |
2918 | fsg->common->new_fsg = NULL; | ||
2919 | raise_exception(fsg->common, FSG_STATE_CONFIG_CHANGE); | ||
2920 | /* FIXME: make interruptible or killable somehow? */ | ||
2921 | wait_event(common->fsg_wait, common->fsg != fsg); | ||
2922 | } | ||
2923 | |||
2924 | fsg_common_put(common); | ||
2963 | usb_free_descriptors(fsg->function.descriptors); | 2925 | usb_free_descriptors(fsg->function.descriptors); |
2964 | usb_free_descriptors(fsg->function.hs_descriptors); | 2926 | usb_free_descriptors(fsg->function.hs_descriptors); |
2965 | kfree(fsg); | 2927 | kfree(fsg); |
@@ -2970,7 +2932,6 @@ static int fsg_bind(struct usb_configuration *c, struct usb_function *f) | |||
2970 | { | 2932 | { |
2971 | struct fsg_dev *fsg = fsg_from_func(f); | 2933 | struct fsg_dev *fsg = fsg_from_func(f); |
2972 | struct usb_gadget *gadget = c->cdev->gadget; | 2934 | struct usb_gadget *gadget = c->cdev->gadget; |
2973 | int rc; | ||
2974 | int i; | 2935 | int i; |
2975 | struct usb_ep *ep; | 2936 | struct usb_ep *ep; |
2976 | 2937 | ||
@@ -2996,6 +2957,11 @@ static int fsg_bind(struct usb_configuration *c, struct usb_function *f) | |||
2996 | ep->driver_data = fsg->common; /* claim the endpoint */ | 2957 | ep->driver_data = fsg->common; /* claim the endpoint */ |
2997 | fsg->bulk_out = ep; | 2958 | fsg->bulk_out = ep; |
2998 | 2959 | ||
2960 | /* Copy descriptors */ | ||
2961 | f->descriptors = usb_copy_descriptors(fsg_fs_function); | ||
2962 | if (unlikely(!f->descriptors)) | ||
2963 | return -ENOMEM; | ||
2964 | |||
2999 | if (gadget_is_dualspeed(gadget)) { | 2965 | if (gadget_is_dualspeed(gadget)) { |
3000 | /* Assume endpoint addresses are the same for both speeds */ | 2966 | /* Assume endpoint addresses are the same for both speeds */ |
3001 | fsg_hs_bulk_in_desc.bEndpointAddress = | 2967 | fsg_hs_bulk_in_desc.bEndpointAddress = |
@@ -3003,16 +2969,17 @@ static int fsg_bind(struct usb_configuration *c, struct usb_function *f) | |||
3003 | fsg_hs_bulk_out_desc.bEndpointAddress = | 2969 | fsg_hs_bulk_out_desc.bEndpointAddress = |
3004 | fsg_fs_bulk_out_desc.bEndpointAddress; | 2970 | fsg_fs_bulk_out_desc.bEndpointAddress; |
3005 | f->hs_descriptors = usb_copy_descriptors(fsg_hs_function); | 2971 | f->hs_descriptors = usb_copy_descriptors(fsg_hs_function); |
3006 | if (unlikely(!f->hs_descriptors)) | 2972 | if (unlikely(!f->hs_descriptors)) { |
2973 | usb_free_descriptors(f->descriptors); | ||
3007 | return -ENOMEM; | 2974 | return -ENOMEM; |
2975 | } | ||
3008 | } | 2976 | } |
3009 | 2977 | ||
3010 | return 0; | 2978 | return 0; |
3011 | 2979 | ||
3012 | autoconf_fail: | 2980 | autoconf_fail: |
3013 | ERROR(fsg, "unable to autoconfigure all endpoints\n"); | 2981 | ERROR(fsg, "unable to autoconfigure all endpoints\n"); |
3014 | rc = -ENOTSUPP; | 2982 | return -ENOTSUPP; |
3015 | return rc; | ||
3016 | } | 2983 | } |
3017 | 2984 | ||
3018 | 2985 | ||
@@ -3036,11 +3003,6 @@ static int fsg_add(struct usb_composite_dev *cdev, | |||
3036 | 3003 | ||
3037 | fsg->function.name = FSG_DRIVER_DESC; | 3004 | fsg->function.name = FSG_DRIVER_DESC; |
3038 | fsg->function.strings = fsg_strings_array; | 3005 | fsg->function.strings = fsg_strings_array; |
3039 | fsg->function.descriptors = usb_copy_descriptors(fsg_fs_function); | ||
3040 | if (unlikely(!fsg->function.descriptors)) { | ||
3041 | rc = -ENOMEM; | ||
3042 | goto error_free_fsg; | ||
3043 | } | ||
3044 | fsg->function.bind = fsg_bind; | 3006 | fsg->function.bind = fsg_bind; |
3045 | fsg->function.unbind = fsg_unbind; | 3007 | fsg->function.unbind = fsg_unbind; |
3046 | fsg->function.setup = fsg_setup; | 3008 | fsg->function.setup = fsg_setup; |
@@ -3056,19 +3018,9 @@ static int fsg_add(struct usb_composite_dev *cdev, | |||
3056 | 3018 | ||
3057 | rc = usb_add_function(c, &fsg->function); | 3019 | rc = usb_add_function(c, &fsg->function); |
3058 | if (unlikely(rc)) | 3020 | if (unlikely(rc)) |
3059 | goto error_free_all; | 3021 | kfree(fsg); |
3060 | 3022 | else | |
3061 | fsg_common_get(fsg->common); | 3023 | fsg_common_get(fsg->common); |
3062 | return 0; | ||
3063 | |||
3064 | error_free_all: | ||
3065 | usb_free_descriptors(fsg->function.descriptors); | ||
3066 | /* fsg_bind() might have copied those; or maybe not? who cares | ||
3067 | * -- free it just in case. */ | ||
3068 | usb_free_descriptors(fsg->function.hs_descriptors); | ||
3069 | error_free_fsg: | ||
3070 | kfree(fsg); | ||
3071 | |||
3072 | return rc; | 3024 | return rc; |
3073 | } | 3025 | } |
3074 | 3026 | ||
diff --git a/drivers/usb/gadget/g_ffs.c b/drivers/usb/gadget/g_ffs.c index 4b0e4a040d6f..d1af253a9105 100644 --- a/drivers/usb/gadget/g_ffs.c +++ b/drivers/usb/gadget/g_ffs.c | |||
@@ -392,6 +392,17 @@ static int __gfs_do_config(struct usb_configuration *c, | |||
392 | if (unlikely(ret < 0)) | 392 | if (unlikely(ret < 0)) |
393 | return ret; | 393 | return ret; |
394 | 394 | ||
395 | /* After previous do_configs there may be some invalid | ||
396 | * pointers in c->interface array. This happens every time | ||
397 | * a user space function with fewer interfaces than a user | ||
398 | * space function that was run before the new one is run. The | ||
399 | * compasit's set_config() assumes that if there is no more | ||
400 | * then MAX_CONFIG_INTERFACES interfaces in a configuration | ||
401 | * then there is a NULL pointer after the last interface in | ||
402 | * c->interface array. We need to make sure this is true. */ | ||
403 | if (c->next_interface_id < ARRAY_SIZE(c->interface)) | ||
404 | c->interface[c->next_interface_id] = NULL; | ||
405 | |||
395 | return 0; | 406 | return 0; |
396 | } | 407 | } |
397 | 408 | ||
diff --git a/drivers/usb/gadget/printer.c b/drivers/usb/gadget/printer.c index 43abf55d8c60..4c3ac5c42237 100644 --- a/drivers/usb/gadget/printer.c +++ b/drivers/usb/gadget/printer.c | |||
@@ -82,7 +82,7 @@ static struct class *usb_gadget_class; | |||
82 | struct printer_dev { | 82 | struct printer_dev { |
83 | spinlock_t lock; /* lock this structure */ | 83 | spinlock_t lock; /* lock this structure */ |
84 | /* lock buffer lists during read/write calls */ | 84 | /* lock buffer lists during read/write calls */ |
85 | spinlock_t lock_printer_io; | 85 | struct mutex lock_printer_io; |
86 | struct usb_gadget *gadget; | 86 | struct usb_gadget *gadget; |
87 | struct usb_request *req; /* for control responses */ | 87 | struct usb_request *req; /* for control responses */ |
88 | u8 config; | 88 | u8 config; |
@@ -567,7 +567,7 @@ printer_read(struct file *fd, char __user *buf, size_t len, loff_t *ptr) | |||
567 | 567 | ||
568 | DBG(dev, "printer_read trying to read %d bytes\n", (int)len); | 568 | DBG(dev, "printer_read trying to read %d bytes\n", (int)len); |
569 | 569 | ||
570 | spin_lock(&dev->lock_printer_io); | 570 | mutex_lock(&dev->lock_printer_io); |
571 | spin_lock_irqsave(&dev->lock, flags); | 571 | spin_lock_irqsave(&dev->lock, flags); |
572 | 572 | ||
573 | /* We will use this flag later to check if a printer reset happened | 573 | /* We will use this flag later to check if a printer reset happened |
@@ -601,7 +601,7 @@ printer_read(struct file *fd, char __user *buf, size_t len, loff_t *ptr) | |||
601 | * call or not. | 601 | * call or not. |
602 | */ | 602 | */ |
603 | if (fd->f_flags & (O_NONBLOCK|O_NDELAY)) { | 603 | if (fd->f_flags & (O_NONBLOCK|O_NDELAY)) { |
604 | spin_unlock(&dev->lock_printer_io); | 604 | mutex_unlock(&dev->lock_printer_io); |
605 | return -EAGAIN; | 605 | return -EAGAIN; |
606 | } | 606 | } |
607 | 607 | ||
@@ -648,7 +648,7 @@ printer_read(struct file *fd, char __user *buf, size_t len, loff_t *ptr) | |||
648 | if (dev->reset_printer) { | 648 | if (dev->reset_printer) { |
649 | list_add(¤t_rx_req->list, &dev->rx_reqs); | 649 | list_add(¤t_rx_req->list, &dev->rx_reqs); |
650 | spin_unlock_irqrestore(&dev->lock, flags); | 650 | spin_unlock_irqrestore(&dev->lock, flags); |
651 | spin_unlock(&dev->lock_printer_io); | 651 | mutex_unlock(&dev->lock_printer_io); |
652 | return -EAGAIN; | 652 | return -EAGAIN; |
653 | } | 653 | } |
654 | 654 | ||
@@ -673,7 +673,7 @@ printer_read(struct file *fd, char __user *buf, size_t len, loff_t *ptr) | |||
673 | dev->current_rx_buf = current_rx_buf; | 673 | dev->current_rx_buf = current_rx_buf; |
674 | 674 | ||
675 | spin_unlock_irqrestore(&dev->lock, flags); | 675 | spin_unlock_irqrestore(&dev->lock, flags); |
676 | spin_unlock(&dev->lock_printer_io); | 676 | mutex_unlock(&dev->lock_printer_io); |
677 | 677 | ||
678 | DBG(dev, "printer_read returned %d bytes\n", (int)bytes_copied); | 678 | DBG(dev, "printer_read returned %d bytes\n", (int)bytes_copied); |
679 | 679 | ||
@@ -697,7 +697,7 @@ printer_write(struct file *fd, const char __user *buf, size_t len, loff_t *ptr) | |||
697 | if (len == 0) | 697 | if (len == 0) |
698 | return -EINVAL; | 698 | return -EINVAL; |
699 | 699 | ||
700 | spin_lock(&dev->lock_printer_io); | 700 | mutex_lock(&dev->lock_printer_io); |
701 | spin_lock_irqsave(&dev->lock, flags); | 701 | spin_lock_irqsave(&dev->lock, flags); |
702 | 702 | ||
703 | /* Check if a printer reset happens while we have interrupts on */ | 703 | /* Check if a printer reset happens while we have interrupts on */ |
@@ -713,7 +713,7 @@ printer_write(struct file *fd, const char __user *buf, size_t len, loff_t *ptr) | |||
713 | * a NON-Blocking call or not. | 713 | * a NON-Blocking call or not. |
714 | */ | 714 | */ |
715 | if (fd->f_flags & (O_NONBLOCK|O_NDELAY)) { | 715 | if (fd->f_flags & (O_NONBLOCK|O_NDELAY)) { |
716 | spin_unlock(&dev->lock_printer_io); | 716 | mutex_unlock(&dev->lock_printer_io); |
717 | return -EAGAIN; | 717 | return -EAGAIN; |
718 | } | 718 | } |
719 | 719 | ||
@@ -752,7 +752,7 @@ printer_write(struct file *fd, const char __user *buf, size_t len, loff_t *ptr) | |||
752 | 752 | ||
753 | if (copy_from_user(req->buf, buf, size)) { | 753 | if (copy_from_user(req->buf, buf, size)) { |
754 | list_add(&req->list, &dev->tx_reqs); | 754 | list_add(&req->list, &dev->tx_reqs); |
755 | spin_unlock(&dev->lock_printer_io); | 755 | mutex_unlock(&dev->lock_printer_io); |
756 | return bytes_copied; | 756 | return bytes_copied; |
757 | } | 757 | } |
758 | 758 | ||
@@ -766,14 +766,14 @@ printer_write(struct file *fd, const char __user *buf, size_t len, loff_t *ptr) | |||
766 | if (dev->reset_printer) { | 766 | if (dev->reset_printer) { |
767 | list_add(&req->list, &dev->tx_reqs); | 767 | list_add(&req->list, &dev->tx_reqs); |
768 | spin_unlock_irqrestore(&dev->lock, flags); | 768 | spin_unlock_irqrestore(&dev->lock, flags); |
769 | spin_unlock(&dev->lock_printer_io); | 769 | mutex_unlock(&dev->lock_printer_io); |
770 | return -EAGAIN; | 770 | return -EAGAIN; |
771 | } | 771 | } |
772 | 772 | ||
773 | if (usb_ep_queue(dev->in_ep, req, GFP_ATOMIC)) { | 773 | if (usb_ep_queue(dev->in_ep, req, GFP_ATOMIC)) { |
774 | list_add(&req->list, &dev->tx_reqs); | 774 | list_add(&req->list, &dev->tx_reqs); |
775 | spin_unlock_irqrestore(&dev->lock, flags); | 775 | spin_unlock_irqrestore(&dev->lock, flags); |
776 | spin_unlock(&dev->lock_printer_io); | 776 | mutex_unlock(&dev->lock_printer_io); |
777 | return -EAGAIN; | 777 | return -EAGAIN; |
778 | } | 778 | } |
779 | 779 | ||
@@ -782,7 +782,7 @@ printer_write(struct file *fd, const char __user *buf, size_t len, loff_t *ptr) | |||
782 | } | 782 | } |
783 | 783 | ||
784 | spin_unlock_irqrestore(&dev->lock, flags); | 784 | spin_unlock_irqrestore(&dev->lock, flags); |
785 | spin_unlock(&dev->lock_printer_io); | 785 | mutex_unlock(&dev->lock_printer_io); |
786 | 786 | ||
787 | DBG(dev, "printer_write sent %d bytes\n", (int)bytes_copied); | 787 | DBG(dev, "printer_write sent %d bytes\n", (int)bytes_copied); |
788 | 788 | ||
@@ -820,11 +820,11 @@ printer_poll(struct file *fd, poll_table *wait) | |||
820 | unsigned long flags; | 820 | unsigned long flags; |
821 | int status = 0; | 821 | int status = 0; |
822 | 822 | ||
823 | spin_lock(&dev->lock_printer_io); | 823 | mutex_lock(&dev->lock_printer_io); |
824 | spin_lock_irqsave(&dev->lock, flags); | 824 | spin_lock_irqsave(&dev->lock, flags); |
825 | setup_rx_reqs(dev); | 825 | setup_rx_reqs(dev); |
826 | spin_unlock_irqrestore(&dev->lock, flags); | 826 | spin_unlock_irqrestore(&dev->lock, flags); |
827 | spin_unlock(&dev->lock_printer_io); | 827 | mutex_unlock(&dev->lock_printer_io); |
828 | 828 | ||
829 | poll_wait(fd, &dev->rx_wait, wait); | 829 | poll_wait(fd, &dev->rx_wait, wait); |
830 | poll_wait(fd, &dev->tx_wait, wait); | 830 | poll_wait(fd, &dev->tx_wait, wait); |
@@ -1461,7 +1461,7 @@ autoconf_fail: | |||
1461 | } | 1461 | } |
1462 | 1462 | ||
1463 | spin_lock_init(&dev->lock); | 1463 | spin_lock_init(&dev->lock); |
1464 | spin_lock_init(&dev->lock_printer_io); | 1464 | mutex_init(&dev->lock_printer_io); |
1465 | INIT_LIST_HEAD(&dev->tx_reqs); | 1465 | INIT_LIST_HEAD(&dev->tx_reqs); |
1466 | INIT_LIST_HEAD(&dev->tx_reqs_active); | 1466 | INIT_LIST_HEAD(&dev->tx_reqs_active); |
1467 | INIT_LIST_HEAD(&dev->rx_reqs); | 1467 | INIT_LIST_HEAD(&dev->rx_reqs); |
@@ -1594,7 +1594,7 @@ cleanup(void) | |||
1594 | { | 1594 | { |
1595 | int status; | 1595 | int status; |
1596 | 1596 | ||
1597 | spin_lock(&usb_printer_gadget.lock_printer_io); | 1597 | mutex_lock(&usb_printer_gadget.lock_printer_io); |
1598 | class_destroy(usb_gadget_class); | 1598 | class_destroy(usb_gadget_class); |
1599 | unregister_chrdev_region(g_printer_devno, 2); | 1599 | unregister_chrdev_region(g_printer_devno, 2); |
1600 | 1600 | ||
@@ -1602,6 +1602,6 @@ cleanup(void) | |||
1602 | if (status) | 1602 | if (status) |
1603 | ERROR(dev, "usb_gadget_unregister_driver %x\n", status); | 1603 | ERROR(dev, "usb_gadget_unregister_driver %x\n", status); |
1604 | 1604 | ||
1605 | spin_unlock(&usb_printer_gadget.lock_printer_io); | 1605 | mutex_unlock(&usb_printer_gadget.lock_printer_io); |
1606 | } | 1606 | } |
1607 | module_exit(cleanup); | 1607 | module_exit(cleanup); |
diff --git a/drivers/usb/gadget/s3c2410_udc.c b/drivers/usb/gadget/s3c2410_udc.c index d5f4c1d45c97..e724a051bfdd 100644 --- a/drivers/usb/gadget/s3c2410_udc.c +++ b/drivers/usb/gadget/s3c2410_udc.c | |||
@@ -1700,9 +1700,13 @@ int usb_gadget_unregister_driver(struct usb_gadget_driver *driver) | |||
1700 | if (!driver || driver != udc->driver || !driver->unbind) | 1700 | if (!driver || driver != udc->driver || !driver->unbind) |
1701 | return -EINVAL; | 1701 | return -EINVAL; |
1702 | 1702 | ||
1703 | dprintk(DEBUG_NORMAL,"usb_gadget_register_driver() '%s'\n", | 1703 | dprintk(DEBUG_NORMAL, "usb_gadget_unregister_driver() '%s'\n", |
1704 | driver->driver.name); | 1704 | driver->driver.name); |
1705 | 1705 | ||
1706 | /* report disconnect */ | ||
1707 | if (driver->disconnect) | ||
1708 | driver->disconnect(&udc->gadget); | ||
1709 | |||
1706 | driver->unbind(&udc->gadget); | 1710 | driver->unbind(&udc->gadget); |
1707 | 1711 | ||
1708 | device_del(&udc->gadget.dev); | 1712 | device_del(&udc->gadget.dev); |
diff --git a/drivers/usb/gadget/u_serial.c b/drivers/usb/gadget/u_serial.c index 16bdf77f582a..3e8dcb5455e3 100644 --- a/drivers/usb/gadget/u_serial.c +++ b/drivers/usb/gadget/u_serial.c | |||
@@ -536,17 +536,11 @@ recycle: | |||
536 | list_move(&req->list, &port->read_pool); | 536 | list_move(&req->list, &port->read_pool); |
537 | } | 537 | } |
538 | 538 | ||
539 | /* Push from tty to ldisc; this is immediate with low_latency, and | 539 | /* Push from tty to ldisc; without low_latency set this is handled by |
540 | * may trigger callbacks to this driver ... so drop the spinlock. | 540 | * a workqueue, so we won't get callbacks and can hold port_lock |
541 | */ | 541 | */ |
542 | if (tty && do_push) { | 542 | if (tty && do_push) { |
543 | spin_unlock_irq(&port->port_lock); | ||
544 | tty_flip_buffer_push(tty); | 543 | tty_flip_buffer_push(tty); |
545 | wake_up_interruptible(&tty->read_wait); | ||
546 | spin_lock_irq(&port->port_lock); | ||
547 | |||
548 | /* tty may have been closed */ | ||
549 | tty = port->port_tty; | ||
550 | } | 544 | } |
551 | 545 | ||
552 | 546 | ||
@@ -784,11 +778,6 @@ static int gs_open(struct tty_struct *tty, struct file *file) | |||
784 | port->open_count = 1; | 778 | port->open_count = 1; |
785 | port->openclose = false; | 779 | port->openclose = false; |
786 | 780 | ||
787 | /* low_latency means ldiscs work in tasklet context, without | ||
788 | * needing a workqueue schedule ... easier to keep up. | ||
789 | */ | ||
790 | tty->low_latency = 1; | ||
791 | |||
792 | /* if connected, start the I/O stream */ | 781 | /* if connected, start the I/O stream */ |
793 | if (port->port_usb) { | 782 | if (port->port_usb) { |
794 | struct gserial *gser = port->port_usb; | 783 | struct gserial *gser = port->port_usb; |
@@ -1195,6 +1184,7 @@ void gserial_cleanup(void) | |||
1195 | n_ports = 0; | 1184 | n_ports = 0; |
1196 | 1185 | ||
1197 | tty_unregister_driver(gs_tty_driver); | 1186 | tty_unregister_driver(gs_tty_driver); |
1187 | put_tty_driver(gs_tty_driver); | ||
1198 | gs_tty_driver = NULL; | 1188 | gs_tty_driver = NULL; |
1199 | 1189 | ||
1200 | pr_debug("%s: cleaned up ttyGS* support\n", __func__); | 1190 | pr_debug("%s: cleaned up ttyGS* support\n", __func__); |
diff --git a/drivers/usb/host/ehci-mxc.c b/drivers/usb/host/ehci-mxc.c index 544ccfd7056e..bd4027745aa7 100644 --- a/drivers/usb/host/ehci-mxc.c +++ b/drivers/usb/host/ehci-mxc.c | |||
@@ -207,10 +207,17 @@ static int ehci_mxc_drv_probe(struct platform_device *pdev) | |||
207 | /* Initialize the transceiver */ | 207 | /* Initialize the transceiver */ |
208 | if (pdata->otg) { | 208 | if (pdata->otg) { |
209 | pdata->otg->io_priv = hcd->regs + ULPI_VIEWPORT_OFFSET; | 209 | pdata->otg->io_priv = hcd->regs + ULPI_VIEWPORT_OFFSET; |
210 | if (otg_init(pdata->otg) != 0) | 210 | ret = otg_init(pdata->otg); |
211 | dev_err(dev, "unable to init transceiver\n"); | 211 | if (ret) { |
212 | else if (otg_set_vbus(pdata->otg, 1) != 0) | 212 | dev_err(dev, "unable to init transceiver, probably missing\n"); |
213 | ret = -ENODEV; | ||
214 | goto err_add; | ||
215 | } | ||
216 | ret = otg_set_vbus(pdata->otg, 1); | ||
217 | if (ret) { | ||
213 | dev_err(dev, "unable to enable vbus on transceiver\n"); | 218 | dev_err(dev, "unable to enable vbus on transceiver\n"); |
219 | goto err_add; | ||
220 | } | ||
214 | } | 221 | } |
215 | 222 | ||
216 | priv->hcd = hcd; | 223 | priv->hcd = hcd; |
diff --git a/drivers/usb/host/isp1362-hcd.c b/drivers/usb/host/isp1362-hcd.c index 20a0dfe0fe36..0587ad4ce5c2 100644 --- a/drivers/usb/host/isp1362-hcd.c +++ b/drivers/usb/host/isp1362-hcd.c | |||
@@ -2224,12 +2224,9 @@ static void remove_debug_file(struct isp1362_hcd *isp1362_hcd) | |||
2224 | 2224 | ||
2225 | /*-------------------------------------------------------------------------*/ | 2225 | /*-------------------------------------------------------------------------*/ |
2226 | 2226 | ||
2227 | static void isp1362_sw_reset(struct isp1362_hcd *isp1362_hcd) | 2227 | static void __isp1362_sw_reset(struct isp1362_hcd *isp1362_hcd) |
2228 | { | 2228 | { |
2229 | int tmp = 20; | 2229 | int tmp = 20; |
2230 | unsigned long flags; | ||
2231 | |||
2232 | spin_lock_irqsave(&isp1362_hcd->lock, flags); | ||
2233 | 2230 | ||
2234 | isp1362_write_reg16(isp1362_hcd, HCSWRES, HCSWRES_MAGIC); | 2231 | isp1362_write_reg16(isp1362_hcd, HCSWRES, HCSWRES_MAGIC); |
2235 | isp1362_write_reg32(isp1362_hcd, HCCMDSTAT, OHCI_HCR); | 2232 | isp1362_write_reg32(isp1362_hcd, HCCMDSTAT, OHCI_HCR); |
@@ -2240,6 +2237,14 @@ static void isp1362_sw_reset(struct isp1362_hcd *isp1362_hcd) | |||
2240 | } | 2237 | } |
2241 | if (!tmp) | 2238 | if (!tmp) |
2242 | pr_err("Software reset timeout\n"); | 2239 | pr_err("Software reset timeout\n"); |
2240 | } | ||
2241 | |||
2242 | static void isp1362_sw_reset(struct isp1362_hcd *isp1362_hcd) | ||
2243 | { | ||
2244 | unsigned long flags; | ||
2245 | |||
2246 | spin_lock_irqsave(&isp1362_hcd->lock, flags); | ||
2247 | __isp1362_sw_reset(isp1362_hcd); | ||
2243 | spin_unlock_irqrestore(&isp1362_hcd->lock, flags); | 2248 | spin_unlock_irqrestore(&isp1362_hcd->lock, flags); |
2244 | } | 2249 | } |
2245 | 2250 | ||
@@ -2418,7 +2423,7 @@ static void isp1362_hc_stop(struct usb_hcd *hcd) | |||
2418 | if (isp1362_hcd->board && isp1362_hcd->board->reset) | 2423 | if (isp1362_hcd->board && isp1362_hcd->board->reset) |
2419 | isp1362_hcd->board->reset(hcd->self.controller, 1); | 2424 | isp1362_hcd->board->reset(hcd->self.controller, 1); |
2420 | else | 2425 | else |
2421 | isp1362_sw_reset(isp1362_hcd); | 2426 | __isp1362_sw_reset(isp1362_hcd); |
2422 | 2427 | ||
2423 | if (isp1362_hcd->board && isp1362_hcd->board->clock) | 2428 | if (isp1362_hcd->board && isp1362_hcd->board->clock) |
2424 | isp1362_hcd->board->clock(hcd->self.controller, 0); | 2429 | isp1362_hcd->board->clock(hcd->self.controller, 0); |
diff --git a/drivers/usb/host/r8a66597-hcd.c b/drivers/usb/host/r8a66597-hcd.c index 1a2bb4ce638f..77be3c24a427 100644 --- a/drivers/usb/host/r8a66597-hcd.c +++ b/drivers/usb/host/r8a66597-hcd.c | |||
@@ -1065,7 +1065,7 @@ static void r8a66597_usb_connect(struct r8a66597 *r8a66597, int port) | |||
1065 | else if (speed == LSMODE) | 1065 | else if (speed == LSMODE) |
1066 | rh->port |= USB_PORT_STAT_LOW_SPEED; | 1066 | rh->port |= USB_PORT_STAT_LOW_SPEED; |
1067 | 1067 | ||
1068 | rh->port &= USB_PORT_STAT_RESET; | 1068 | rh->port &= ~USB_PORT_STAT_RESET; |
1069 | rh->port |= USB_PORT_STAT_ENABLE; | 1069 | rh->port |= USB_PORT_STAT_ENABLE; |
1070 | } | 1070 | } |
1071 | 1071 | ||
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c index 9012098add6b..94e6934edb09 100644 --- a/drivers/usb/host/xhci-ring.c +++ b/drivers/usb/host/xhci-ring.c | |||
@@ -182,8 +182,12 @@ static void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring, bool consumer | |||
182 | * set, but other sections talk about dealing with the chain bit set. This was | 182 | * set, but other sections talk about dealing with the chain bit set. This was |
183 | * fixed in the 0.96 specification errata, but we have to assume that all 0.95 | 183 | * fixed in the 0.96 specification errata, but we have to assume that all 0.95 |
184 | * xHCI hardware can't handle the chain bit being cleared on a link TRB. | 184 | * xHCI hardware can't handle the chain bit being cleared on a link TRB. |
185 | * | ||
186 | * @more_trbs_coming: Will you enqueue more TRBs before calling | ||
187 | * prepare_transfer()? | ||
185 | */ | 188 | */ |
186 | static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring, bool consumer) | 189 | static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring, |
190 | bool consumer, bool more_trbs_coming) | ||
187 | { | 191 | { |
188 | u32 chain; | 192 | u32 chain; |
189 | union xhci_trb *next; | 193 | union xhci_trb *next; |
@@ -199,15 +203,28 @@ static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring, bool consumer | |||
199 | while (last_trb(xhci, ring, ring->enq_seg, next)) { | 203 | while (last_trb(xhci, ring, ring->enq_seg, next)) { |
200 | if (!consumer) { | 204 | if (!consumer) { |
201 | if (ring != xhci->event_ring) { | 205 | if (ring != xhci->event_ring) { |
202 | if (chain) { | 206 | /* |
203 | next->link.control |= TRB_CHAIN; | 207 | * If the caller doesn't plan on enqueueing more |
204 | 208 | * TDs before ringing the doorbell, then we | |
205 | /* Give this link TRB to the hardware */ | 209 | * don't want to give the link TRB to the |
206 | wmb(); | 210 | * hardware just yet. We'll give the link TRB |
207 | next->link.control ^= TRB_CYCLE; | 211 | * back in prepare_ring() just before we enqueue |
208 | } else { | 212 | * the TD at the top of the ring. |
213 | */ | ||
214 | if (!chain && !more_trbs_coming) | ||
209 | break; | 215 | break; |
216 | |||
217 | /* If we're not dealing with 0.95 hardware, | ||
218 | * carry over the chain bit of the previous TRB | ||
219 | * (which may mean the chain bit is cleared). | ||
220 | */ | ||
221 | if (!xhci_link_trb_quirk(xhci)) { | ||
222 | next->link.control &= ~TRB_CHAIN; | ||
223 | next->link.control |= chain; | ||
210 | } | 224 | } |
225 | /* Give this link TRB to the hardware */ | ||
226 | wmb(); | ||
227 | next->link.control ^= TRB_CYCLE; | ||
211 | } | 228 | } |
212 | /* Toggle the cycle bit after the last ring segment. */ | 229 | /* Toggle the cycle bit after the last ring segment. */ |
213 | if (last_trb_on_last_seg(xhci, ring, ring->enq_seg, next)) { | 230 | if (last_trb_on_last_seg(xhci, ring, ring->enq_seg, next)) { |
@@ -1707,9 +1724,12 @@ void xhci_handle_event(struct xhci_hcd *xhci) | |||
1707 | /* | 1724 | /* |
1708 | * Generic function for queueing a TRB on a ring. | 1725 | * Generic function for queueing a TRB on a ring. |
1709 | * The caller must have checked to make sure there's room on the ring. | 1726 | * The caller must have checked to make sure there's room on the ring. |
1727 | * | ||
1728 | * @more_trbs_coming: Will you enqueue more TRBs before calling | ||
1729 | * prepare_transfer()? | ||
1710 | */ | 1730 | */ |
1711 | static void queue_trb(struct xhci_hcd *xhci, struct xhci_ring *ring, | 1731 | static void queue_trb(struct xhci_hcd *xhci, struct xhci_ring *ring, |
1712 | bool consumer, | 1732 | bool consumer, bool more_trbs_coming, |
1713 | u32 field1, u32 field2, u32 field3, u32 field4) | 1733 | u32 field1, u32 field2, u32 field3, u32 field4) |
1714 | { | 1734 | { |
1715 | struct xhci_generic_trb *trb; | 1735 | struct xhci_generic_trb *trb; |
@@ -1719,7 +1739,7 @@ static void queue_trb(struct xhci_hcd *xhci, struct xhci_ring *ring, | |||
1719 | trb->field[1] = field2; | 1739 | trb->field[1] = field2; |
1720 | trb->field[2] = field3; | 1740 | trb->field[2] = field3; |
1721 | trb->field[3] = field4; | 1741 | trb->field[3] = field4; |
1722 | inc_enq(xhci, ring, consumer); | 1742 | inc_enq(xhci, ring, consumer, more_trbs_coming); |
1723 | } | 1743 | } |
1724 | 1744 | ||
1725 | /* | 1745 | /* |
@@ -1988,6 +2008,7 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags, | |||
1988 | int trb_buff_len, this_sg_len, running_total; | 2008 | int trb_buff_len, this_sg_len, running_total; |
1989 | bool first_trb; | 2009 | bool first_trb; |
1990 | u64 addr; | 2010 | u64 addr; |
2011 | bool more_trbs_coming; | ||
1991 | 2012 | ||
1992 | struct xhci_generic_trb *start_trb; | 2013 | struct xhci_generic_trb *start_trb; |
1993 | int start_cycle; | 2014 | int start_cycle; |
@@ -2073,7 +2094,11 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags, | |||
2073 | length_field = TRB_LEN(trb_buff_len) | | 2094 | length_field = TRB_LEN(trb_buff_len) | |
2074 | remainder | | 2095 | remainder | |
2075 | TRB_INTR_TARGET(0); | 2096 | TRB_INTR_TARGET(0); |
2076 | queue_trb(xhci, ep_ring, false, | 2097 | if (num_trbs > 1) |
2098 | more_trbs_coming = true; | ||
2099 | else | ||
2100 | more_trbs_coming = false; | ||
2101 | queue_trb(xhci, ep_ring, false, more_trbs_coming, | ||
2077 | lower_32_bits(addr), | 2102 | lower_32_bits(addr), |
2078 | upper_32_bits(addr), | 2103 | upper_32_bits(addr), |
2079 | length_field, | 2104 | length_field, |
@@ -2124,6 +2149,7 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags, | |||
2124 | int num_trbs; | 2149 | int num_trbs; |
2125 | struct xhci_generic_trb *start_trb; | 2150 | struct xhci_generic_trb *start_trb; |
2126 | bool first_trb; | 2151 | bool first_trb; |
2152 | bool more_trbs_coming; | ||
2127 | int start_cycle; | 2153 | int start_cycle; |
2128 | u32 field, length_field; | 2154 | u32 field, length_field; |
2129 | 2155 | ||
@@ -2212,7 +2238,11 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags, | |||
2212 | length_field = TRB_LEN(trb_buff_len) | | 2238 | length_field = TRB_LEN(trb_buff_len) | |
2213 | remainder | | 2239 | remainder | |
2214 | TRB_INTR_TARGET(0); | 2240 | TRB_INTR_TARGET(0); |
2215 | queue_trb(xhci, ep_ring, false, | 2241 | if (num_trbs > 1) |
2242 | more_trbs_coming = true; | ||
2243 | else | ||
2244 | more_trbs_coming = false; | ||
2245 | queue_trb(xhci, ep_ring, false, more_trbs_coming, | ||
2216 | lower_32_bits(addr), | 2246 | lower_32_bits(addr), |
2217 | upper_32_bits(addr), | 2247 | upper_32_bits(addr), |
2218 | length_field, | 2248 | length_field, |
@@ -2291,7 +2321,7 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags, | |||
2291 | /* Queue setup TRB - see section 6.4.1.2.1 */ | 2321 | /* Queue setup TRB - see section 6.4.1.2.1 */ |
2292 | /* FIXME better way to translate setup_packet into two u32 fields? */ | 2322 | /* FIXME better way to translate setup_packet into two u32 fields? */ |
2293 | setup = (struct usb_ctrlrequest *) urb->setup_packet; | 2323 | setup = (struct usb_ctrlrequest *) urb->setup_packet; |
2294 | queue_trb(xhci, ep_ring, false, | 2324 | queue_trb(xhci, ep_ring, false, true, |
2295 | /* FIXME endianness is probably going to bite my ass here. */ | 2325 | /* FIXME endianness is probably going to bite my ass here. */ |
2296 | setup->bRequestType | setup->bRequest << 8 | setup->wValue << 16, | 2326 | setup->bRequestType | setup->bRequest << 8 | setup->wValue << 16, |
2297 | setup->wIndex | setup->wLength << 16, | 2327 | setup->wIndex | setup->wLength << 16, |
@@ -2307,7 +2337,7 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags, | |||
2307 | if (urb->transfer_buffer_length > 0) { | 2337 | if (urb->transfer_buffer_length > 0) { |
2308 | if (setup->bRequestType & USB_DIR_IN) | 2338 | if (setup->bRequestType & USB_DIR_IN) |
2309 | field |= TRB_DIR_IN; | 2339 | field |= TRB_DIR_IN; |
2310 | queue_trb(xhci, ep_ring, false, | 2340 | queue_trb(xhci, ep_ring, false, true, |
2311 | lower_32_bits(urb->transfer_dma), | 2341 | lower_32_bits(urb->transfer_dma), |
2312 | upper_32_bits(urb->transfer_dma), | 2342 | upper_32_bits(urb->transfer_dma), |
2313 | length_field, | 2343 | length_field, |
@@ -2324,7 +2354,7 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags, | |||
2324 | field = 0; | 2354 | field = 0; |
2325 | else | 2355 | else |
2326 | field = TRB_DIR_IN; | 2356 | field = TRB_DIR_IN; |
2327 | queue_trb(xhci, ep_ring, false, | 2357 | queue_trb(xhci, ep_ring, false, false, |
2328 | 0, | 2358 | 0, |
2329 | 0, | 2359 | 0, |
2330 | TRB_INTR_TARGET(0), | 2360 | TRB_INTR_TARGET(0), |
@@ -2361,7 +2391,7 @@ static int queue_command(struct xhci_hcd *xhci, u32 field1, u32 field2, | |||
2361 | "unfailable commands failed.\n"); | 2391 | "unfailable commands failed.\n"); |
2362 | return -ENOMEM; | 2392 | return -ENOMEM; |
2363 | } | 2393 | } |
2364 | queue_trb(xhci, xhci->cmd_ring, false, field1, field2, field3, | 2394 | queue_trb(xhci, xhci->cmd_ring, false, false, field1, field2, field3, |
2365 | field4 | xhci->cmd_ring->cycle_state); | 2395 | field4 | xhci->cmd_ring->cycle_state); |
2366 | return 0; | 2396 | return 0; |
2367 | } | 2397 | } |
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c index fad70bc83555..3b795c56221f 100644 --- a/drivers/usb/musb/musb_core.c +++ b/drivers/usb/musb/musb_core.c | |||
@@ -219,8 +219,8 @@ static int musb_ulpi_write(struct otg_transceiver *otg, | |||
219 | return 0; | 219 | return 0; |
220 | } | 220 | } |
221 | #else | 221 | #else |
222 | #define musb_ulpi_read(a, b) NULL | 222 | #define musb_ulpi_read NULL |
223 | #define musb_ulpi_write(a, b, c) NULL | 223 | #define musb_ulpi_write NULL |
224 | #endif | 224 | #endif |
225 | 225 | ||
226 | static struct otg_io_access_ops musb_ulpi_access = { | 226 | static struct otg_io_access_ops musb_ulpi_access = { |
@@ -451,10 +451,6 @@ void musb_hnp_stop(struct musb *musb) | |||
451 | * @param power | 451 | * @param power |
452 | */ | 452 | */ |
453 | 453 | ||
454 | #define STAGE0_MASK (MUSB_INTR_RESUME | MUSB_INTR_SESSREQ \ | ||
455 | | MUSB_INTR_VBUSERROR | MUSB_INTR_CONNECT \ | ||
456 | | MUSB_INTR_RESET) | ||
457 | |||
458 | static irqreturn_t musb_stage0_irq(struct musb *musb, u8 int_usb, | 454 | static irqreturn_t musb_stage0_irq(struct musb *musb, u8 int_usb, |
459 | u8 devctl, u8 power) | 455 | u8 devctl, u8 power) |
460 | { | 456 | { |
@@ -642,7 +638,7 @@ static irqreturn_t musb_stage0_irq(struct musb *musb, u8 int_usb, | |||
642 | handled = IRQ_HANDLED; | 638 | handled = IRQ_HANDLED; |
643 | } | 639 | } |
644 | 640 | ||
645 | 641 | #endif | |
646 | if (int_usb & MUSB_INTR_SUSPEND) { | 642 | if (int_usb & MUSB_INTR_SUSPEND) { |
647 | DBG(1, "SUSPEND (%s) devctl %02x power %02x\n", | 643 | DBG(1, "SUSPEND (%s) devctl %02x power %02x\n", |
648 | otg_state_string(musb), devctl, power); | 644 | otg_state_string(musb), devctl, power); |
@@ -705,6 +701,7 @@ static irqreturn_t musb_stage0_irq(struct musb *musb, u8 int_usb, | |||
705 | } | 701 | } |
706 | } | 702 | } |
707 | 703 | ||
704 | #ifdef CONFIG_USB_MUSB_HDRC_HCD | ||
708 | if (int_usb & MUSB_INTR_CONNECT) { | 705 | if (int_usb & MUSB_INTR_CONNECT) { |
709 | struct usb_hcd *hcd = musb_to_hcd(musb); | 706 | struct usb_hcd *hcd = musb_to_hcd(musb); |
710 | void __iomem *mbase = musb->mregs; | 707 | void __iomem *mbase = musb->mregs; |
@@ -1597,7 +1594,7 @@ irqreturn_t musb_interrupt(struct musb *musb) | |||
1597 | /* the core can interrupt us for multiple reasons; docs have | 1594 | /* the core can interrupt us for multiple reasons; docs have |
1598 | * a generic interrupt flowchart to follow | 1595 | * a generic interrupt flowchart to follow |
1599 | */ | 1596 | */ |
1600 | if (musb->int_usb & STAGE0_MASK) | 1597 | if (musb->int_usb) |
1601 | retval |= musb_stage0_irq(musb, musb->int_usb, | 1598 | retval |= musb_stage0_irq(musb, musb->int_usb, |
1602 | devctl, power); | 1599 | devctl, power); |
1603 | 1600 | ||
diff --git a/drivers/usb/musb/musb_core.h b/drivers/usb/musb/musb_core.h index b22d02dea7d3..91d67794e350 100644 --- a/drivers/usb/musb/musb_core.h +++ b/drivers/usb/musb/musb_core.h | |||
@@ -470,7 +470,8 @@ struct musb_csr_regs { | |||
470 | 470 | ||
471 | struct musb_context_registers { | 471 | struct musb_context_registers { |
472 | 472 | ||
473 | #ifdef CONFIG_PM | 473 | #if defined(CONFIG_ARCH_OMAP2430) || defined(CONFIG_ARCH_OMAP3) || \ |
474 | defined(CONFIG_ARCH_OMAP4) | ||
474 | u32 otg_sysconfig, otg_forcestandby; | 475 | u32 otg_sysconfig, otg_forcestandby; |
475 | #endif | 476 | #endif |
476 | u8 power; | 477 | u8 power; |
@@ -484,7 +485,8 @@ struct musb_context_registers { | |||
484 | struct musb_csr_regs index_regs[MUSB_C_NUM_EPS]; | 485 | struct musb_csr_regs index_regs[MUSB_C_NUM_EPS]; |
485 | }; | 486 | }; |
486 | 487 | ||
487 | #ifdef CONFIG_PM | 488 | #if defined(CONFIG_ARCH_OMAP2430) || defined(CONFIG_ARCH_OMAP3) || \ |
489 | defined(CONFIG_ARCH_OMAP4) | ||
488 | extern void musb_platform_save_context(struct musb *musb, | 490 | extern void musb_platform_save_context(struct musb *musb, |
489 | struct musb_context_registers *musb_context); | 491 | struct musb_context_registers *musb_context); |
490 | extern void musb_platform_restore_context(struct musb *musb, | 492 | extern void musb_platform_restore_context(struct musb *musb, |
diff --git a/drivers/usb/musb/musbhsdma.c b/drivers/usb/musb/musbhsdma.c index 1008044a3bbc..dc66e4376d49 100644 --- a/drivers/usb/musb/musbhsdma.c +++ b/drivers/usb/musb/musbhsdma.c | |||
@@ -132,18 +132,9 @@ static void configure_channel(struct dma_channel *channel, | |||
132 | if (mode) { | 132 | if (mode) { |
133 | csr |= 1 << MUSB_HSDMA_MODE1_SHIFT; | 133 | csr |= 1 << MUSB_HSDMA_MODE1_SHIFT; |
134 | BUG_ON(len < packet_sz); | 134 | BUG_ON(len < packet_sz); |
135 | |||
136 | if (packet_sz >= 64) { | ||
137 | csr |= MUSB_HSDMA_BURSTMODE_INCR16 | ||
138 | << MUSB_HSDMA_BURSTMODE_SHIFT; | ||
139 | } else if (packet_sz >= 32) { | ||
140 | csr |= MUSB_HSDMA_BURSTMODE_INCR8 | ||
141 | << MUSB_HSDMA_BURSTMODE_SHIFT; | ||
142 | } else if (packet_sz >= 16) { | ||
143 | csr |= MUSB_HSDMA_BURSTMODE_INCR4 | ||
144 | << MUSB_HSDMA_BURSTMODE_SHIFT; | ||
145 | } | ||
146 | } | 135 | } |
136 | csr |= MUSB_HSDMA_BURSTMODE_INCR16 | ||
137 | << MUSB_HSDMA_BURSTMODE_SHIFT; | ||
147 | 138 | ||
148 | csr |= (musb_channel->epnum << MUSB_HSDMA_ENDPOINT_SHIFT) | 139 | csr |= (musb_channel->epnum << MUSB_HSDMA_ENDPOINT_SHIFT) |
149 | | (1 << MUSB_HSDMA_ENABLE_SHIFT) | 140 | | (1 << MUSB_HSDMA_ENABLE_SHIFT) |
diff --git a/drivers/usb/otg/ulpi.c b/drivers/usb/otg/ulpi.c index b1b346932946..d331b222ad21 100644 --- a/drivers/usb/otg/ulpi.c +++ b/drivers/usb/otg/ulpi.c | |||
@@ -59,12 +59,17 @@ static int ulpi_set_flags(struct otg_transceiver *otg) | |||
59 | 59 | ||
60 | static int ulpi_init(struct otg_transceiver *otg) | 60 | static int ulpi_init(struct otg_transceiver *otg) |
61 | { | 61 | { |
62 | int i, vid, pid; | 62 | int i, vid, pid, ret; |
63 | 63 | u32 ulpi_id = 0; | |
64 | vid = (otg_io_read(otg, ULPI_VENDOR_ID_HIGH) << 8) | | 64 | |
65 | otg_io_read(otg, ULPI_VENDOR_ID_LOW); | 65 | for (i = 0; i < 4; i++) { |
66 | pid = (otg_io_read(otg, ULPI_PRODUCT_ID_HIGH) << 8) | | 66 | ret = otg_io_read(otg, ULPI_PRODUCT_ID_HIGH - i); |
67 | otg_io_read(otg, ULPI_PRODUCT_ID_LOW); | 67 | if (ret < 0) |
68 | return ret; | ||
69 | ulpi_id = (ulpi_id << 8) | ret; | ||
70 | } | ||
71 | vid = ulpi_id & 0xffff; | ||
72 | pid = ulpi_id >> 16; | ||
68 | 73 | ||
69 | pr_info("ULPI transceiver vendor/product ID 0x%04x/0x%04x\n", vid, pid); | 74 | pr_info("ULPI transceiver vendor/product ID 0x%04x/0x%04x\n", vid, pid); |
70 | 75 | ||
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c index 79dd1ae195e5..da7e334b0407 100644 --- a/drivers/usb/serial/ftdi_sio.c +++ b/drivers/usb/serial/ftdi_sio.c | |||
@@ -653,7 +653,6 @@ static struct usb_device_id id_table_combined [] = { | |||
653 | { USB_DEVICE(EVOLUTION_VID, EVOLUTION_ER1_PID) }, | 653 | { USB_DEVICE(EVOLUTION_VID, EVOLUTION_ER1_PID) }, |
654 | { USB_DEVICE(EVOLUTION_VID, EVO_HYBRID_PID) }, | 654 | { USB_DEVICE(EVOLUTION_VID, EVO_HYBRID_PID) }, |
655 | { USB_DEVICE(EVOLUTION_VID, EVO_RCM4_PID) }, | 655 | { USB_DEVICE(EVOLUTION_VID, EVO_RCM4_PID) }, |
656 | { USB_DEVICE(CONTEC_VID, CONTEC_COM1USBH_PID) }, | ||
657 | { USB_DEVICE(FTDI_VID, FTDI_ARTEMIS_PID) }, | 656 | { USB_DEVICE(FTDI_VID, FTDI_ARTEMIS_PID) }, |
658 | { USB_DEVICE(FTDI_VID, FTDI_ATIK_ATK16_PID) }, | 657 | { USB_DEVICE(FTDI_VID, FTDI_ATIK_ATK16_PID) }, |
659 | { USB_DEVICE(FTDI_VID, FTDI_ATIK_ATK16C_PID) }, | 658 | { USB_DEVICE(FTDI_VID, FTDI_ATIK_ATK16C_PID) }, |
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h index 94d86c3febcb..bbc159a1df45 100644 --- a/drivers/usb/serial/ftdi_sio_ids.h +++ b/drivers/usb/serial/ftdi_sio_ids.h | |||
@@ -501,13 +501,6 @@ | |||
501 | #define CONTEC_COM1USBH_PID 0x8311 /* COM-1(USB)H */ | 501 | #define CONTEC_COM1USBH_PID 0x8311 /* COM-1(USB)H */ |
502 | 502 | ||
503 | /* | 503 | /* |
504 | * Contec products (http://www.contec.com) | ||
505 | * Submitted by Daniel Sangorrin | ||
506 | */ | ||
507 | #define CONTEC_VID 0x06CE /* Vendor ID */ | ||
508 | #define CONTEC_COM1USBH_PID 0x8311 /* COM-1(USB)H */ | ||
509 | |||
510 | /* | ||
511 | * Definitions for B&B Electronics products. | 504 | * Definitions for B&B Electronics products. |
512 | */ | 505 | */ |
513 | #define BANDB_VID 0x0856 /* B&B Electronics Vendor ID */ | 506 | #define BANDB_VID 0x0856 /* B&B Electronics Vendor ID */ |
diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c index 04bb759536bb..93d72eb8cafc 100644 --- a/drivers/usb/serial/qcserial.c +++ b/drivers/usb/serial/qcserial.c | |||
@@ -139,6 +139,7 @@ static int qcprobe(struct usb_serial *serial, const struct usb_device_id *id) | |||
139 | "Could not set interface, error %d\n", | 139 | "Could not set interface, error %d\n", |
140 | retval); | 140 | retval); |
141 | retval = -ENODEV; | 141 | retval = -ENODEV; |
142 | kfree(data); | ||
142 | } | 143 | } |
143 | return retval; | 144 | return retval; |
144 | } | 145 | } |
@@ -155,6 +156,7 @@ static int qcprobe(struct usb_serial *serial, const struct usb_device_id *id) | |||
155 | "Could not set interface, error %d\n", | 156 | "Could not set interface, error %d\n", |
156 | retval); | 157 | retval); |
157 | retval = -ENODEV; | 158 | retval = -ENODEV; |
159 | kfree(data); | ||
158 | } | 160 | } |
159 | return retval; | 161 | return retval; |
160 | } | 162 | } |
@@ -163,6 +165,7 @@ static int qcprobe(struct usb_serial *serial, const struct usb_device_id *id) | |||
163 | default: | 165 | default: |
164 | dev_err(&serial->dev->dev, | 166 | dev_err(&serial->dev->dev, |
165 | "unknown number of interfaces: %d\n", nintf); | 167 | "unknown number of interfaces: %d\n", nintf); |
168 | kfree(data); | ||
166 | return -ENODEV; | 169 | return -ENODEV; |
167 | } | 170 | } |
168 | 171 | ||
diff --git a/drivers/video/geode/gxfb_core.c b/drivers/video/geode/gxfb_core.c index 76e7dac6f259..70b1d9d51c96 100644 --- a/drivers/video/geode/gxfb_core.c +++ b/drivers/video/geode/gxfb_core.c | |||
@@ -40,7 +40,7 @@ static int vram; | |||
40 | static int vt_switch; | 40 | static int vt_switch; |
41 | 41 | ||
42 | /* Modes relevant to the GX (taken from modedb.c) */ | 42 | /* Modes relevant to the GX (taken from modedb.c) */ |
43 | static struct fb_videomode gx_modedb[] __initdata = { | 43 | static struct fb_videomode gx_modedb[] __devinitdata = { |
44 | /* 640x480-60 VESA */ | 44 | /* 640x480-60 VESA */ |
45 | { NULL, 60, 640, 480, 39682, 48, 16, 33, 10, 96, 2, | 45 | { NULL, 60, 640, 480, 39682, 48, 16, 33, 10, 96, 2, |
46 | 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA }, | 46 | 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA }, |
@@ -110,14 +110,15 @@ static struct fb_videomode gx_modedb[] __initdata = { | |||
110 | #ifdef CONFIG_OLPC | 110 | #ifdef CONFIG_OLPC |
111 | #include <asm/olpc.h> | 111 | #include <asm/olpc.h> |
112 | 112 | ||
113 | static struct fb_videomode gx_dcon_modedb[] __initdata = { | 113 | static struct fb_videomode gx_dcon_modedb[] __devinitdata = { |
114 | /* The only mode the DCON has is 1200x900 */ | 114 | /* The only mode the DCON has is 1200x900 */ |
115 | { NULL, 50, 1200, 900, 17460, 24, 8, 4, 5, 8, 3, | 115 | { NULL, 50, 1200, 900, 17460, 24, 8, 4, 5, 8, 3, |
116 | FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT, | 116 | FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT, |
117 | FB_VMODE_NONINTERLACED, 0 } | 117 | FB_VMODE_NONINTERLACED, 0 } |
118 | }; | 118 | }; |
119 | 119 | ||
120 | static void __init get_modedb(struct fb_videomode **modedb, unsigned int *size) | 120 | static void __devinit get_modedb(struct fb_videomode **modedb, |
121 | unsigned int *size) | ||
121 | { | 122 | { |
122 | if (olpc_has_dcon()) { | 123 | if (olpc_has_dcon()) { |
123 | *modedb = (struct fb_videomode *) gx_dcon_modedb; | 124 | *modedb = (struct fb_videomode *) gx_dcon_modedb; |
@@ -129,7 +130,8 @@ static void __init get_modedb(struct fb_videomode **modedb, unsigned int *size) | |||
129 | } | 130 | } |
130 | 131 | ||
131 | #else | 132 | #else |
132 | static void __init get_modedb(struct fb_videomode **modedb, unsigned int *size) | 133 | static void __devinit get_modedb(struct fb_videomode **modedb, |
134 | unsigned int *size) | ||
133 | { | 135 | { |
134 | *modedb = (struct fb_videomode *) gx_modedb; | 136 | *modedb = (struct fb_videomode *) gx_modedb; |
135 | *size = ARRAY_SIZE(gx_modedb); | 137 | *size = ARRAY_SIZE(gx_modedb); |
@@ -226,7 +228,8 @@ static int gxfb_blank(int blank_mode, struct fb_info *info) | |||
226 | return gx_blank_display(info, blank_mode); | 228 | return gx_blank_display(info, blank_mode); |
227 | } | 229 | } |
228 | 230 | ||
229 | static int __init gxfb_map_video_memory(struct fb_info *info, struct pci_dev *dev) | 231 | static int __devinit gxfb_map_video_memory(struct fb_info *info, |
232 | struct pci_dev *dev) | ||
230 | { | 233 | { |
231 | struct gxfb_par *par = info->par; | 234 | struct gxfb_par *par = info->par; |
232 | int ret; | 235 | int ret; |
@@ -290,7 +293,7 @@ static struct fb_ops gxfb_ops = { | |||
290 | .fb_imageblit = cfb_imageblit, | 293 | .fb_imageblit = cfb_imageblit, |
291 | }; | 294 | }; |
292 | 295 | ||
293 | static struct fb_info * __init gxfb_init_fbinfo(struct device *dev) | 296 | static struct fb_info *__devinit gxfb_init_fbinfo(struct device *dev) |
294 | { | 297 | { |
295 | struct gxfb_par *par; | 298 | struct gxfb_par *par; |
296 | struct fb_info *info; | 299 | struct fb_info *info; |
@@ -371,7 +374,8 @@ static int gxfb_resume(struct pci_dev *pdev) | |||
371 | } | 374 | } |
372 | #endif | 375 | #endif |
373 | 376 | ||
374 | static int __init gxfb_probe(struct pci_dev *pdev, const struct pci_device_id *id) | 377 | static int __devinit gxfb_probe(struct pci_dev *pdev, |
378 | const struct pci_device_id *id) | ||
375 | { | 379 | { |
376 | struct gxfb_par *par; | 380 | struct gxfb_par *par; |
377 | struct fb_info *info; | 381 | struct fb_info *info; |
@@ -451,7 +455,7 @@ static int __init gxfb_probe(struct pci_dev *pdev, const struct pci_device_id *i | |||
451 | return ret; | 455 | return ret; |
452 | } | 456 | } |
453 | 457 | ||
454 | static void gxfb_remove(struct pci_dev *pdev) | 458 | static void __devexit gxfb_remove(struct pci_dev *pdev) |
455 | { | 459 | { |
456 | struct fb_info *info = pci_get_drvdata(pdev); | 460 | struct fb_info *info = pci_get_drvdata(pdev); |
457 | struct gxfb_par *par = info->par; | 461 | struct gxfb_par *par = info->par; |
diff --git a/drivers/video/geode/lxfb_core.c b/drivers/video/geode/lxfb_core.c index 1a18da86d3fa..39bdbedf43b4 100644 --- a/drivers/video/geode/lxfb_core.c +++ b/drivers/video/geode/lxfb_core.c | |||
@@ -35,7 +35,7 @@ static int vt_switch; | |||
35 | * we try to make it something sane - 640x480-60 is sane | 35 | * we try to make it something sane - 640x480-60 is sane |
36 | */ | 36 | */ |
37 | 37 | ||
38 | static struct fb_videomode geode_modedb[] __initdata = { | 38 | static struct fb_videomode geode_modedb[] __devinitdata = { |
39 | /* 640x480-60 */ | 39 | /* 640x480-60 */ |
40 | { NULL, 60, 640, 480, 39682, 48, 8, 25, 2, 88, 2, | 40 | { NULL, 60, 640, 480, 39682, 48, 8, 25, 2, 88, 2, |
41 | FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT, | 41 | FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT, |
@@ -219,14 +219,15 @@ static struct fb_videomode geode_modedb[] __initdata = { | |||
219 | #ifdef CONFIG_OLPC | 219 | #ifdef CONFIG_OLPC |
220 | #include <asm/olpc.h> | 220 | #include <asm/olpc.h> |
221 | 221 | ||
222 | static struct fb_videomode olpc_dcon_modedb[] __initdata = { | 222 | static struct fb_videomode olpc_dcon_modedb[] __devinitdata = { |
223 | /* The only mode the DCON has is 1200x900 */ | 223 | /* The only mode the DCON has is 1200x900 */ |
224 | { NULL, 50, 1200, 900, 17460, 24, 8, 4, 5, 8, 3, | 224 | { NULL, 50, 1200, 900, 17460, 24, 8, 4, 5, 8, 3, |
225 | FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT, | 225 | FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT, |
226 | FB_VMODE_NONINTERLACED, 0 } | 226 | FB_VMODE_NONINTERLACED, 0 } |
227 | }; | 227 | }; |
228 | 228 | ||
229 | static void __init get_modedb(struct fb_videomode **modedb, unsigned int *size) | 229 | static void __devinit get_modedb(struct fb_videomode **modedb, |
230 | unsigned int *size) | ||
230 | { | 231 | { |
231 | if (olpc_has_dcon()) { | 232 | if (olpc_has_dcon()) { |
232 | *modedb = (struct fb_videomode *) olpc_dcon_modedb; | 233 | *modedb = (struct fb_videomode *) olpc_dcon_modedb; |
@@ -238,7 +239,8 @@ static void __init get_modedb(struct fb_videomode **modedb, unsigned int *size) | |||
238 | } | 239 | } |
239 | 240 | ||
240 | #else | 241 | #else |
241 | static void __init get_modedb(struct fb_videomode **modedb, unsigned int *size) | 242 | static void __devinit get_modedb(struct fb_videomode **modedb, |
243 | unsigned int *size) | ||
242 | { | 244 | { |
243 | *modedb = (struct fb_videomode *) geode_modedb; | 245 | *modedb = (struct fb_videomode *) geode_modedb; |
244 | *size = ARRAY_SIZE(geode_modedb); | 246 | *size = ARRAY_SIZE(geode_modedb); |
@@ -334,7 +336,7 @@ static int lxfb_blank(int blank_mode, struct fb_info *info) | |||
334 | } | 336 | } |
335 | 337 | ||
336 | 338 | ||
337 | static int __init lxfb_map_video_memory(struct fb_info *info, | 339 | static int __devinit lxfb_map_video_memory(struct fb_info *info, |
338 | struct pci_dev *dev) | 340 | struct pci_dev *dev) |
339 | { | 341 | { |
340 | struct lxfb_par *par = info->par; | 342 | struct lxfb_par *par = info->par; |
@@ -412,7 +414,7 @@ static struct fb_ops lxfb_ops = { | |||
412 | .fb_imageblit = cfb_imageblit, | 414 | .fb_imageblit = cfb_imageblit, |
413 | }; | 415 | }; |
414 | 416 | ||
415 | static struct fb_info * __init lxfb_init_fbinfo(struct device *dev) | 417 | static struct fb_info * __devinit lxfb_init_fbinfo(struct device *dev) |
416 | { | 418 | { |
417 | struct lxfb_par *par; | 419 | struct lxfb_par *par; |
418 | struct fb_info *info; | 420 | struct fb_info *info; |
@@ -496,7 +498,7 @@ static int lxfb_resume(struct pci_dev *pdev) | |||
496 | #define lxfb_resume NULL | 498 | #define lxfb_resume NULL |
497 | #endif | 499 | #endif |
498 | 500 | ||
499 | static int __init lxfb_probe(struct pci_dev *pdev, | 501 | static int __devinit lxfb_probe(struct pci_dev *pdev, |
500 | const struct pci_device_id *id) | 502 | const struct pci_device_id *id) |
501 | { | 503 | { |
502 | struct lxfb_par *par; | 504 | struct lxfb_par *par; |
@@ -588,7 +590,7 @@ err: | |||
588 | return ret; | 590 | return ret; |
589 | } | 591 | } |
590 | 592 | ||
591 | static void lxfb_remove(struct pci_dev *pdev) | 593 | static void __devexit lxfb_remove(struct pci_dev *pdev) |
592 | { | 594 | { |
593 | struct fb_info *info = pci_get_drvdata(pdev); | 595 | struct fb_info *info = pci_get_drvdata(pdev); |
594 | struct lxfb_par *par = info->par; | 596 | struct lxfb_par *par = info->par; |
diff --git a/drivers/video/nuc900fb.c b/drivers/video/nuc900fb.c index d4cde79ea15e..81687ed26ba9 100644 --- a/drivers/video/nuc900fb.c +++ b/drivers/video/nuc900fb.c | |||
@@ -596,8 +596,6 @@ static int __devinit nuc900fb_probe(struct platform_device *pdev) | |||
596 | goto release_regs; | 596 | goto release_regs; |
597 | } | 597 | } |
598 | 598 | ||
599 | nuc900_driver_clksrc_div(&pdev->dev, "ext", 0x2); | ||
600 | |||
601 | fbi->clk = clk_get(&pdev->dev, NULL); | 599 | fbi->clk = clk_get(&pdev->dev, NULL); |
602 | if (!fbi->clk || IS_ERR(fbi->clk)) { | 600 | if (!fbi->clk || IS_ERR(fbi->clk)) { |
603 | printk(KERN_ERR "nuc900-lcd:failed to get lcd clock source\n"); | 601 | printk(KERN_ERR "nuc900-lcd:failed to get lcd clock source\n"); |
diff --git a/fs/binfmt_flat.c b/fs/binfmt_flat.c index b6ab27ccf214..811384bec8de 100644 --- a/fs/binfmt_flat.c +++ b/fs/binfmt_flat.c | |||
@@ -68,11 +68,7 @@ | |||
68 | * Here we can be a bit looser than the data sections since this | 68 | * Here we can be a bit looser than the data sections since this |
69 | * needs to only meet arch ABI requirements. | 69 | * needs to only meet arch ABI requirements. |
70 | */ | 70 | */ |
71 | #ifdef ARCH_SLAB_MINALIGN | 71 | #define FLAT_STACK_ALIGN max_t(unsigned long, sizeof(void *), ARCH_SLAB_MINALIGN) |
72 | #define FLAT_STACK_ALIGN (ARCH_SLAB_MINALIGN) | ||
73 | #else | ||
74 | #define FLAT_STACK_ALIGN (sizeof(void *)) | ||
75 | #endif | ||
76 | 72 | ||
77 | #define RELOC_FAILED 0xff00ff01 /* Relocation incorrect somewhere */ | 73 | #define RELOC_FAILED 0xff00ff01 /* Relocation incorrect somewhere */ |
78 | #define UNLOADED_LIB 0x7ff000ff /* Placeholder for unused library */ | 74 | #define UNLOADED_LIB 0x7ff000ff /* Placeholder for unused library */ |
diff --git a/fs/dcache.c b/fs/dcache.c index d96047b4a633..c8c78ba07827 100644 --- a/fs/dcache.c +++ b/fs/dcache.c | |||
@@ -590,6 +590,8 @@ static void prune_dcache(int count) | |||
590 | up_read(&sb->s_umount); | 590 | up_read(&sb->s_umount); |
591 | } | 591 | } |
592 | spin_lock(&sb_lock); | 592 | spin_lock(&sb_lock); |
593 | /* lock was dropped, must reset next */ | ||
594 | list_safe_reset_next(sb, n, s_list); | ||
593 | count -= pruned; | 595 | count -= pruned; |
594 | __put_super(sb); | 596 | __put_super(sb); |
595 | /* more work left to do? */ | 597 | /* more work left to do? */ |
diff --git a/fs/fcntl.c b/fs/fcntl.c index 51e11bf5708f..9d175d623aab 100644 --- a/fs/fcntl.c +++ b/fs/fcntl.c | |||
@@ -733,12 +733,14 @@ static void kill_fasync_rcu(struct fasync_struct *fa, int sig, int band) | |||
733 | { | 733 | { |
734 | while (fa) { | 734 | while (fa) { |
735 | struct fown_struct *fown; | 735 | struct fown_struct *fown; |
736 | unsigned long flags; | ||
737 | |||
736 | if (fa->magic != FASYNC_MAGIC) { | 738 | if (fa->magic != FASYNC_MAGIC) { |
737 | printk(KERN_ERR "kill_fasync: bad magic number in " | 739 | printk(KERN_ERR "kill_fasync: bad magic number in " |
738 | "fasync_struct!\n"); | 740 | "fasync_struct!\n"); |
739 | return; | 741 | return; |
740 | } | 742 | } |
741 | spin_lock(&fa->fa_lock); | 743 | spin_lock_irqsave(&fa->fa_lock, flags); |
742 | if (fa->fa_file) { | 744 | if (fa->fa_file) { |
743 | fown = &fa->fa_file->f_owner; | 745 | fown = &fa->fa_file->f_owner; |
744 | /* Don't send SIGURG to processes which have not set a | 746 | /* Don't send SIGURG to processes which have not set a |
@@ -747,7 +749,7 @@ static void kill_fasync_rcu(struct fasync_struct *fa, int sig, int band) | |||
747 | if (!(sig == SIGURG && fown->signum == 0)) | 749 | if (!(sig == SIGURG && fown->signum == 0)) |
748 | send_sigio(fown, fa->fa_fd, band); | 750 | send_sigio(fown, fa->fa_fd, band); |
749 | } | 751 | } |
750 | spin_unlock(&fa->fa_lock); | 752 | spin_unlock_irqrestore(&fa->fa_lock, flags); |
751 | fa = rcu_dereference(fa->fa_next); | 753 | fa = rcu_dereference(fa->fa_next); |
752 | } | 754 | } |
753 | } | 755 | } |
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c index 1d1088f48bc2..0609607d3955 100644 --- a/fs/fs-writeback.c +++ b/fs/fs-writeback.c | |||
@@ -63,24 +63,16 @@ struct bdi_work { | |||
63 | }; | 63 | }; |
64 | 64 | ||
65 | enum { | 65 | enum { |
66 | WS_USED_B = 0, | 66 | WS_INPROGRESS = 0, |
67 | WS_ONSTACK_B, | 67 | WS_ONSTACK, |
68 | }; | 68 | }; |
69 | 69 | ||
70 | #define WS_USED (1 << WS_USED_B) | ||
71 | #define WS_ONSTACK (1 << WS_ONSTACK_B) | ||
72 | |||
73 | static inline bool bdi_work_on_stack(struct bdi_work *work) | ||
74 | { | ||
75 | return test_bit(WS_ONSTACK_B, &work->state); | ||
76 | } | ||
77 | |||
78 | static inline void bdi_work_init(struct bdi_work *work, | 70 | static inline void bdi_work_init(struct bdi_work *work, |
79 | struct wb_writeback_args *args) | 71 | struct wb_writeback_args *args) |
80 | { | 72 | { |
81 | INIT_RCU_HEAD(&work->rcu_head); | 73 | INIT_RCU_HEAD(&work->rcu_head); |
82 | work->args = *args; | 74 | work->args = *args; |
83 | work->state = WS_USED; | 75 | __set_bit(WS_INPROGRESS, &work->state); |
84 | } | 76 | } |
85 | 77 | ||
86 | /** | 78 | /** |
@@ -95,43 +87,16 @@ int writeback_in_progress(struct backing_dev_info *bdi) | |||
95 | return !list_empty(&bdi->work_list); | 87 | return !list_empty(&bdi->work_list); |
96 | } | 88 | } |
97 | 89 | ||
98 | static void bdi_work_clear(struct bdi_work *work) | ||
99 | { | ||
100 | clear_bit(WS_USED_B, &work->state); | ||
101 | smp_mb__after_clear_bit(); | ||
102 | /* | ||
103 | * work can have disappeared at this point. bit waitq functions | ||
104 | * should be able to tolerate this, provided bdi_sched_wait does | ||
105 | * not dereference it's pointer argument. | ||
106 | */ | ||
107 | wake_up_bit(&work->state, WS_USED_B); | ||
108 | } | ||
109 | |||
110 | static void bdi_work_free(struct rcu_head *head) | 90 | static void bdi_work_free(struct rcu_head *head) |
111 | { | 91 | { |
112 | struct bdi_work *work = container_of(head, struct bdi_work, rcu_head); | 92 | struct bdi_work *work = container_of(head, struct bdi_work, rcu_head); |
113 | 93 | ||
114 | if (!bdi_work_on_stack(work)) | 94 | clear_bit(WS_INPROGRESS, &work->state); |
115 | kfree(work); | 95 | smp_mb__after_clear_bit(); |
116 | else | 96 | wake_up_bit(&work->state, WS_INPROGRESS); |
117 | bdi_work_clear(work); | ||
118 | } | ||
119 | |||
120 | static void wb_work_complete(struct bdi_work *work) | ||
121 | { | ||
122 | const enum writeback_sync_modes sync_mode = work->args.sync_mode; | ||
123 | int onstack = bdi_work_on_stack(work); | ||
124 | 97 | ||
125 | /* | 98 | if (!test_bit(WS_ONSTACK, &work->state)) |
126 | * For allocated work, we can clear the done/seen bit right here. | 99 | kfree(work); |
127 | * For on-stack work, we need to postpone both the clear and free | ||
128 | * to after the RCU grace period, since the stack could be invalidated | ||
129 | * as soon as bdi_work_clear() has done the wakeup. | ||
130 | */ | ||
131 | if (!onstack) | ||
132 | bdi_work_clear(work); | ||
133 | if (sync_mode == WB_SYNC_NONE || onstack) | ||
134 | call_rcu(&work->rcu_head, bdi_work_free); | ||
135 | } | 100 | } |
136 | 101 | ||
137 | static void wb_clear_pending(struct bdi_writeback *wb, struct bdi_work *work) | 102 | static void wb_clear_pending(struct bdi_writeback *wb, struct bdi_work *work) |
@@ -147,7 +112,7 @@ static void wb_clear_pending(struct bdi_writeback *wb, struct bdi_work *work) | |||
147 | list_del_rcu(&work->list); | 112 | list_del_rcu(&work->list); |
148 | spin_unlock(&bdi->wb_lock); | 113 | spin_unlock(&bdi->wb_lock); |
149 | 114 | ||
150 | wb_work_complete(work); | 115 | call_rcu(&work->rcu_head, bdi_work_free); |
151 | } | 116 | } |
152 | } | 117 | } |
153 | 118 | ||
@@ -185,9 +150,9 @@ static void bdi_queue_work(struct backing_dev_info *bdi, struct bdi_work *work) | |||
185 | * Used for on-stack allocated work items. The caller needs to wait until | 150 | * Used for on-stack allocated work items. The caller needs to wait until |
186 | * the wb threads have acked the work before it's safe to continue. | 151 | * the wb threads have acked the work before it's safe to continue. |
187 | */ | 152 | */ |
188 | static void bdi_wait_on_work_clear(struct bdi_work *work) | 153 | static void bdi_wait_on_work_done(struct bdi_work *work) |
189 | { | 154 | { |
190 | wait_on_bit(&work->state, WS_USED_B, bdi_sched_wait, | 155 | wait_on_bit(&work->state, WS_INPROGRESS, bdi_sched_wait, |
191 | TASK_UNINTERRUPTIBLE); | 156 | TASK_UNINTERRUPTIBLE); |
192 | } | 157 | } |
193 | 158 | ||
@@ -213,37 +178,28 @@ static void bdi_alloc_queue_work(struct backing_dev_info *bdi, | |||
213 | } | 178 | } |
214 | 179 | ||
215 | /** | 180 | /** |
216 | * bdi_sync_writeback - start and wait for writeback | 181 | * bdi_queue_work_onstack - start and wait for writeback |
217 | * @bdi: the backing device to write from | ||
218 | * @sb: write inodes from this super_block | 182 | * @sb: write inodes from this super_block |
219 | * | 183 | * |
220 | * Description: | 184 | * Description: |
221 | * This does WB_SYNC_ALL data integrity writeback and waits for the | 185 | * This function initiates writeback and waits for the operation to |
222 | * IO to complete. Callers must hold the sb s_umount semaphore for | 186 | * complete. Callers must hold the sb s_umount semaphore for |
223 | * reading, to avoid having the super disappear before we are done. | 187 | * reading, to avoid having the super disappear before we are done. |
224 | */ | 188 | */ |
225 | static void bdi_sync_writeback(struct backing_dev_info *bdi, | 189 | static void bdi_queue_work_onstack(struct wb_writeback_args *args) |
226 | struct super_block *sb) | ||
227 | { | 190 | { |
228 | struct wb_writeback_args args = { | ||
229 | .sb = sb, | ||
230 | .sync_mode = WB_SYNC_ALL, | ||
231 | .nr_pages = LONG_MAX, | ||
232 | .range_cyclic = 0, | ||
233 | }; | ||
234 | struct bdi_work work; | 191 | struct bdi_work work; |
235 | 192 | ||
236 | bdi_work_init(&work, &args); | 193 | bdi_work_init(&work, args); |
237 | work.state |= WS_ONSTACK; | 194 | __set_bit(WS_ONSTACK, &work.state); |
238 | 195 | ||
239 | bdi_queue_work(bdi, &work); | 196 | bdi_queue_work(args->sb->s_bdi, &work); |
240 | bdi_wait_on_work_clear(&work); | 197 | bdi_wait_on_work_done(&work); |
241 | } | 198 | } |
242 | 199 | ||
243 | /** | 200 | /** |
244 | * bdi_start_writeback - start writeback | 201 | * bdi_start_writeback - start writeback |
245 | * @bdi: the backing device to write from | 202 | * @bdi: the backing device to write from |
246 | * @sb: write inodes from this super_block | ||
247 | * @nr_pages: the number of pages to write | 203 | * @nr_pages: the number of pages to write |
248 | * | 204 | * |
249 | * Description: | 205 | * Description: |
@@ -252,25 +208,34 @@ static void bdi_sync_writeback(struct backing_dev_info *bdi, | |||
252 | * completion. Caller need not hold sb s_umount semaphore. | 208 | * completion. Caller need not hold sb s_umount semaphore. |
253 | * | 209 | * |
254 | */ | 210 | */ |
255 | void bdi_start_writeback(struct backing_dev_info *bdi, struct super_block *sb, | 211 | void bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages) |
256 | long nr_pages) | ||
257 | { | 212 | { |
258 | struct wb_writeback_args args = { | 213 | struct wb_writeback_args args = { |
259 | .sb = sb, | ||
260 | .sync_mode = WB_SYNC_NONE, | 214 | .sync_mode = WB_SYNC_NONE, |
261 | .nr_pages = nr_pages, | 215 | .nr_pages = nr_pages, |
262 | .range_cyclic = 1, | 216 | .range_cyclic = 1, |
263 | }; | 217 | }; |
264 | 218 | ||
265 | /* | 219 | bdi_alloc_queue_work(bdi, &args); |
266 | * We treat @nr_pages=0 as the special case to do background writeback, | 220 | } |
267 | * ie. to sync pages until the background dirty threshold is reached. | ||
268 | */ | ||
269 | if (!nr_pages) { | ||
270 | args.nr_pages = LONG_MAX; | ||
271 | args.for_background = 1; | ||
272 | } | ||
273 | 221 | ||
222 | /** | ||
223 | * bdi_start_background_writeback - start background writeback | ||
224 | * @bdi: the backing device to write from | ||
225 | * | ||
226 | * Description: | ||
227 | * This does WB_SYNC_NONE background writeback. The IO is only | ||
228 | * started when this function returns, we make no guarentees on | ||
229 | * completion. Caller need not hold sb s_umount semaphore. | ||
230 | */ | ||
231 | void bdi_start_background_writeback(struct backing_dev_info *bdi) | ||
232 | { | ||
233 | struct wb_writeback_args args = { | ||
234 | .sync_mode = WB_SYNC_NONE, | ||
235 | .nr_pages = LONG_MAX, | ||
236 | .for_background = 1, | ||
237 | .range_cyclic = 1, | ||
238 | }; | ||
274 | bdi_alloc_queue_work(bdi, &args); | 239 | bdi_alloc_queue_work(bdi, &args); |
275 | } | 240 | } |
276 | 241 | ||
@@ -561,48 +526,30 @@ select_queue: | |||
561 | return ret; | 526 | return ret; |
562 | } | 527 | } |
563 | 528 | ||
564 | static void unpin_sb_for_writeback(struct super_block *sb) | ||
565 | { | ||
566 | up_read(&sb->s_umount); | ||
567 | put_super(sb); | ||
568 | } | ||
569 | |||
570 | enum sb_pin_state { | ||
571 | SB_PINNED, | ||
572 | SB_NOT_PINNED, | ||
573 | SB_PIN_FAILED | ||
574 | }; | ||
575 | |||
576 | /* | 529 | /* |
577 | * For WB_SYNC_NONE writeback, the caller does not have the sb pinned | 530 | * For background writeback the caller does not have the sb pinned |
578 | * before calling writeback. So make sure that we do pin it, so it doesn't | 531 | * before calling writeback. So make sure that we do pin it, so it doesn't |
579 | * go away while we are writing inodes from it. | 532 | * go away while we are writing inodes from it. |
580 | */ | 533 | */ |
581 | static enum sb_pin_state pin_sb_for_writeback(struct writeback_control *wbc, | 534 | static bool pin_sb_for_writeback(struct super_block *sb) |
582 | struct super_block *sb) | ||
583 | { | 535 | { |
584 | /* | ||
585 | * Caller must already hold the ref for this | ||
586 | */ | ||
587 | if (wbc->sync_mode == WB_SYNC_ALL) { | ||
588 | WARN_ON(!rwsem_is_locked(&sb->s_umount)); | ||
589 | return SB_NOT_PINNED; | ||
590 | } | ||
591 | spin_lock(&sb_lock); | 536 | spin_lock(&sb_lock); |
537 | if (list_empty(&sb->s_instances)) { | ||
538 | spin_unlock(&sb_lock); | ||
539 | return false; | ||
540 | } | ||
541 | |||
592 | sb->s_count++; | 542 | sb->s_count++; |
543 | spin_unlock(&sb_lock); | ||
544 | |||
593 | if (down_read_trylock(&sb->s_umount)) { | 545 | if (down_read_trylock(&sb->s_umount)) { |
594 | if (sb->s_root) { | 546 | if (sb->s_root) |
595 | spin_unlock(&sb_lock); | 547 | return true; |
596 | return SB_PINNED; | ||
597 | } | ||
598 | /* | ||
599 | * umounted, drop rwsem again and fall through to failure | ||
600 | */ | ||
601 | up_read(&sb->s_umount); | 548 | up_read(&sb->s_umount); |
602 | } | 549 | } |
603 | sb->s_count--; | 550 | |
604 | spin_unlock(&sb_lock); | 551 | put_super(sb); |
605 | return SB_PIN_FAILED; | 552 | return false; |
606 | } | 553 | } |
607 | 554 | ||
608 | /* | 555 | /* |
@@ -681,24 +628,31 @@ static void writeback_inodes_wb(struct bdi_writeback *wb, | |||
681 | struct inode *inode = list_entry(wb->b_io.prev, | 628 | struct inode *inode = list_entry(wb->b_io.prev, |
682 | struct inode, i_list); | 629 | struct inode, i_list); |
683 | struct super_block *sb = inode->i_sb; | 630 | struct super_block *sb = inode->i_sb; |
684 | enum sb_pin_state state; | ||
685 | 631 | ||
686 | if (wbc->sb && sb != wbc->sb) { | 632 | if (wbc->sb) { |
687 | /* super block given and doesn't | 633 | /* |
688 | match, skip this inode */ | 634 | * We are requested to write out inodes for a specific |
689 | redirty_tail(inode); | 635 | * superblock. This means we already have s_umount |
690 | continue; | 636 | * taken by the caller which also waits for us to |
691 | } | 637 | * complete the writeout. |
692 | state = pin_sb_for_writeback(wbc, sb); | 638 | */ |
639 | if (sb != wbc->sb) { | ||
640 | redirty_tail(inode); | ||
641 | continue; | ||
642 | } | ||
693 | 643 | ||
694 | if (state == SB_PIN_FAILED) { | 644 | WARN_ON(!rwsem_is_locked(&sb->s_umount)); |
695 | requeue_io(inode); | 645 | |
696 | continue; | 646 | ret = writeback_sb_inodes(sb, wb, wbc); |
647 | } else { | ||
648 | if (!pin_sb_for_writeback(sb)) { | ||
649 | requeue_io(inode); | ||
650 | continue; | ||
651 | } | ||
652 | ret = writeback_sb_inodes(sb, wb, wbc); | ||
653 | drop_super(sb); | ||
697 | } | 654 | } |
698 | ret = writeback_sb_inodes(sb, wb, wbc); | ||
699 | 655 | ||
700 | if (state == SB_PINNED) | ||
701 | unpin_sb_for_writeback(sb); | ||
702 | if (ret) | 656 | if (ret) |
703 | break; | 657 | break; |
704 | } | 658 | } |
@@ -911,7 +865,7 @@ long wb_do_writeback(struct bdi_writeback *wb, int force_wait) | |||
911 | * If this isn't a data integrity operation, just notify | 865 | * If this isn't a data integrity operation, just notify |
912 | * that we have seen this work and we are now starting it. | 866 | * that we have seen this work and we are now starting it. |
913 | */ | 867 | */ |
914 | if (args.sync_mode == WB_SYNC_NONE) | 868 | if (!test_bit(WS_ONSTACK, &work->state)) |
915 | wb_clear_pending(wb, work); | 869 | wb_clear_pending(wb, work); |
916 | 870 | ||
917 | wrote += wb_writeback(wb, &args); | 871 | wrote += wb_writeback(wb, &args); |
@@ -920,7 +874,7 @@ long wb_do_writeback(struct bdi_writeback *wb, int force_wait) | |||
920 | * This is a data integrity writeback, so only do the | 874 | * This is a data integrity writeback, so only do the |
921 | * notification when we have completed the work. | 875 | * notification when we have completed the work. |
922 | */ | 876 | */ |
923 | if (args.sync_mode == WB_SYNC_ALL) | 877 | if (test_bit(WS_ONSTACK, &work->state)) |
924 | wb_clear_pending(wb, work); | 878 | wb_clear_pending(wb, work); |
925 | } | 879 | } |
926 | 880 | ||
@@ -978,42 +932,32 @@ int bdi_writeback_task(struct bdi_writeback *wb) | |||
978 | } | 932 | } |
979 | 933 | ||
980 | /* | 934 | /* |
981 | * Schedule writeback for all backing devices. This does WB_SYNC_NONE | 935 | * Start writeback of `nr_pages' pages. If `nr_pages' is zero, write back |
982 | * writeback, for integrity writeback see bdi_sync_writeback(). | 936 | * the whole world. |
983 | */ | 937 | */ |
984 | static void bdi_writeback_all(struct super_block *sb, long nr_pages) | 938 | void wakeup_flusher_threads(long nr_pages) |
985 | { | 939 | { |
940 | struct backing_dev_info *bdi; | ||
986 | struct wb_writeback_args args = { | 941 | struct wb_writeback_args args = { |
987 | .sb = sb, | ||
988 | .nr_pages = nr_pages, | ||
989 | .sync_mode = WB_SYNC_NONE, | 942 | .sync_mode = WB_SYNC_NONE, |
990 | }; | 943 | }; |
991 | struct backing_dev_info *bdi; | ||
992 | 944 | ||
993 | rcu_read_lock(); | 945 | if (nr_pages) { |
946 | args.nr_pages = nr_pages; | ||
947 | } else { | ||
948 | args.nr_pages = global_page_state(NR_FILE_DIRTY) + | ||
949 | global_page_state(NR_UNSTABLE_NFS); | ||
950 | } | ||
994 | 951 | ||
952 | rcu_read_lock(); | ||
995 | list_for_each_entry_rcu(bdi, &bdi_list, bdi_list) { | 953 | list_for_each_entry_rcu(bdi, &bdi_list, bdi_list) { |
996 | if (!bdi_has_dirty_io(bdi)) | 954 | if (!bdi_has_dirty_io(bdi)) |
997 | continue; | 955 | continue; |
998 | |||
999 | bdi_alloc_queue_work(bdi, &args); | 956 | bdi_alloc_queue_work(bdi, &args); |
1000 | } | 957 | } |
1001 | |||
1002 | rcu_read_unlock(); | 958 | rcu_read_unlock(); |
1003 | } | 959 | } |
1004 | 960 | ||
1005 | /* | ||
1006 | * Start writeback of `nr_pages' pages. If `nr_pages' is zero, write back | ||
1007 | * the whole world. | ||
1008 | */ | ||
1009 | void wakeup_flusher_threads(long nr_pages) | ||
1010 | { | ||
1011 | if (nr_pages == 0) | ||
1012 | nr_pages = global_page_state(NR_FILE_DIRTY) + | ||
1013 | global_page_state(NR_UNSTABLE_NFS); | ||
1014 | bdi_writeback_all(NULL, nr_pages); | ||
1015 | } | ||
1016 | |||
1017 | static noinline void block_dump___mark_inode_dirty(struct inode *inode) | 961 | static noinline void block_dump___mark_inode_dirty(struct inode *inode) |
1018 | { | 962 | { |
1019 | if (inode->i_ino || strcmp(inode->i_sb->s_id, "bdev")) { | 963 | if (inode->i_ino || strcmp(inode->i_sb->s_id, "bdev")) { |
@@ -1218,12 +1162,17 @@ void writeback_inodes_sb(struct super_block *sb) | |||
1218 | { | 1162 | { |
1219 | unsigned long nr_dirty = global_page_state(NR_FILE_DIRTY); | 1163 | unsigned long nr_dirty = global_page_state(NR_FILE_DIRTY); |
1220 | unsigned long nr_unstable = global_page_state(NR_UNSTABLE_NFS); | 1164 | unsigned long nr_unstable = global_page_state(NR_UNSTABLE_NFS); |
1221 | long nr_to_write; | 1165 | struct wb_writeback_args args = { |
1166 | .sb = sb, | ||
1167 | .sync_mode = WB_SYNC_NONE, | ||
1168 | }; | ||
1169 | |||
1170 | WARN_ON(!rwsem_is_locked(&sb->s_umount)); | ||
1222 | 1171 | ||
1223 | nr_to_write = nr_dirty + nr_unstable + | 1172 | args.nr_pages = nr_dirty + nr_unstable + |
1224 | (inodes_stat.nr_inodes - inodes_stat.nr_unused); | 1173 | (inodes_stat.nr_inodes - inodes_stat.nr_unused); |
1225 | 1174 | ||
1226 | bdi_start_writeback(sb->s_bdi, sb, nr_to_write); | 1175 | bdi_queue_work_onstack(&args); |
1227 | } | 1176 | } |
1228 | EXPORT_SYMBOL(writeback_inodes_sb); | 1177 | EXPORT_SYMBOL(writeback_inodes_sb); |
1229 | 1178 | ||
@@ -1237,7 +1186,9 @@ EXPORT_SYMBOL(writeback_inodes_sb); | |||
1237 | int writeback_inodes_sb_if_idle(struct super_block *sb) | 1186 | int writeback_inodes_sb_if_idle(struct super_block *sb) |
1238 | { | 1187 | { |
1239 | if (!writeback_in_progress(sb->s_bdi)) { | 1188 | if (!writeback_in_progress(sb->s_bdi)) { |
1189 | down_read(&sb->s_umount); | ||
1240 | writeback_inodes_sb(sb); | 1190 | writeback_inodes_sb(sb); |
1191 | up_read(&sb->s_umount); | ||
1241 | return 1; | 1192 | return 1; |
1242 | } else | 1193 | } else |
1243 | return 0; | 1194 | return 0; |
@@ -1253,7 +1204,16 @@ EXPORT_SYMBOL(writeback_inodes_sb_if_idle); | |||
1253 | */ | 1204 | */ |
1254 | void sync_inodes_sb(struct super_block *sb) | 1205 | void sync_inodes_sb(struct super_block *sb) |
1255 | { | 1206 | { |
1256 | bdi_sync_writeback(sb->s_bdi, sb); | 1207 | struct wb_writeback_args args = { |
1208 | .sb = sb, | ||
1209 | .sync_mode = WB_SYNC_ALL, | ||
1210 | .nr_pages = LONG_MAX, | ||
1211 | .range_cyclic = 0, | ||
1212 | }; | ||
1213 | |||
1214 | WARN_ON(!rwsem_is_locked(&sb->s_umount)); | ||
1215 | |||
1216 | bdi_queue_work_onstack(&args); | ||
1257 | wait_sb_inodes(sb); | 1217 | wait_sb_inodes(sb); |
1258 | } | 1218 | } |
1259 | EXPORT_SYMBOL(sync_inodes_sb); | 1219 | EXPORT_SYMBOL(sync_inodes_sb); |
diff --git a/fs/proc/task_nommu.c b/fs/proc/task_nommu.c index 46d4b5d72bd3..cb6306e63843 100644 --- a/fs/proc/task_nommu.c +++ b/fs/proc/task_nommu.c | |||
@@ -122,11 +122,20 @@ int task_statm(struct mm_struct *mm, int *shared, int *text, | |||
122 | return size; | 122 | return size; |
123 | } | 123 | } |
124 | 124 | ||
125 | static void pad_len_spaces(struct seq_file *m, int len) | ||
126 | { | ||
127 | len = 25 + sizeof(void*) * 6 - len; | ||
128 | if (len < 1) | ||
129 | len = 1; | ||
130 | seq_printf(m, "%*c", len, ' '); | ||
131 | } | ||
132 | |||
125 | /* | 133 | /* |
126 | * display a single VMA to a sequenced file | 134 | * display a single VMA to a sequenced file |
127 | */ | 135 | */ |
128 | static int nommu_vma_show(struct seq_file *m, struct vm_area_struct *vma) | 136 | static int nommu_vma_show(struct seq_file *m, struct vm_area_struct *vma) |
129 | { | 137 | { |
138 | struct mm_struct *mm = vma->vm_mm; | ||
130 | unsigned long ino = 0; | 139 | unsigned long ino = 0; |
131 | struct file *file; | 140 | struct file *file; |
132 | dev_t dev = 0; | 141 | dev_t dev = 0; |
@@ -155,11 +164,14 @@ static int nommu_vma_show(struct seq_file *m, struct vm_area_struct *vma) | |||
155 | MAJOR(dev), MINOR(dev), ino, &len); | 164 | MAJOR(dev), MINOR(dev), ino, &len); |
156 | 165 | ||
157 | if (file) { | 166 | if (file) { |
158 | len = 25 + sizeof(void *) * 6 - len; | 167 | pad_len_spaces(m, len); |
159 | if (len < 1) | ||
160 | len = 1; | ||
161 | seq_printf(m, "%*c", len, ' '); | ||
162 | seq_path(m, &file->f_path, ""); | 168 | seq_path(m, &file->f_path, ""); |
169 | } else if (mm) { | ||
170 | if (vma->vm_start <= mm->start_stack && | ||
171 | vma->vm_end >= mm->start_stack) { | ||
172 | pad_len_spaces(m, len); | ||
173 | seq_puts(m, "[stack]"); | ||
174 | } | ||
163 | } | 175 | } |
164 | 176 | ||
165 | seq_putc(m, '\n'); | 177 | seq_putc(m, '\n'); |
diff --git a/fs/super.c b/fs/super.c index 5c35bc7a499e..938119ab8dcb 100644 --- a/fs/super.c +++ b/fs/super.c | |||
@@ -374,6 +374,8 @@ void sync_supers(void) | |||
374 | up_read(&sb->s_umount); | 374 | up_read(&sb->s_umount); |
375 | 375 | ||
376 | spin_lock(&sb_lock); | 376 | spin_lock(&sb_lock); |
377 | /* lock was dropped, must reset next */ | ||
378 | list_safe_reset_next(sb, n, s_list); | ||
377 | __put_super(sb); | 379 | __put_super(sb); |
378 | } | 380 | } |
379 | } | 381 | } |
@@ -405,6 +407,8 @@ void iterate_supers(void (*f)(struct super_block *, void *), void *arg) | |||
405 | up_read(&sb->s_umount); | 407 | up_read(&sb->s_umount); |
406 | 408 | ||
407 | spin_lock(&sb_lock); | 409 | spin_lock(&sb_lock); |
410 | /* lock was dropped, must reset next */ | ||
411 | list_safe_reset_next(sb, n, s_list); | ||
408 | __put_super(sb); | 412 | __put_super(sb); |
409 | } | 413 | } |
410 | spin_unlock(&sb_lock); | 414 | spin_unlock(&sb_lock); |
@@ -585,6 +589,8 @@ static void do_emergency_remount(struct work_struct *work) | |||
585 | } | 589 | } |
586 | up_write(&sb->s_umount); | 590 | up_write(&sb->s_umount); |
587 | spin_lock(&sb_lock); | 591 | spin_lock(&sb_lock); |
592 | /* lock was dropped, must reset next */ | ||
593 | list_safe_reset_next(sb, n, s_list); | ||
588 | __put_super(sb); | 594 | __put_super(sb); |
589 | } | 595 | } |
590 | spin_unlock(&sb_lock); | 596 | spin_unlock(&sb_lock); |
diff --git a/fs/sysv/ialloc.c b/fs/sysv/ialloc.c index bbd69bdb0fa8..fcc498ec9b33 100644 --- a/fs/sysv/ialloc.c +++ b/fs/sysv/ialloc.c | |||
@@ -25,6 +25,7 @@ | |||
25 | #include <linux/stat.h> | 25 | #include <linux/stat.h> |
26 | #include <linux/string.h> | 26 | #include <linux/string.h> |
27 | #include <linux/buffer_head.h> | 27 | #include <linux/buffer_head.h> |
28 | #include <linux/writeback.h> | ||
28 | #include "sysv.h" | 29 | #include "sysv.h" |
29 | 30 | ||
30 | /* We don't trust the value of | 31 | /* We don't trust the value of |
@@ -139,6 +140,9 @@ struct inode * sysv_new_inode(const struct inode * dir, mode_t mode) | |||
139 | struct inode *inode; | 140 | struct inode *inode; |
140 | sysv_ino_t ino; | 141 | sysv_ino_t ino; |
141 | unsigned count; | 142 | unsigned count; |
143 | struct writeback_control wbc = { | ||
144 | .sync_mode = WB_SYNC_NONE | ||
145 | }; | ||
142 | 146 | ||
143 | inode = new_inode(sb); | 147 | inode = new_inode(sb); |
144 | if (!inode) | 148 | if (!inode) |
@@ -168,7 +172,7 @@ struct inode * sysv_new_inode(const struct inode * dir, mode_t mode) | |||
168 | insert_inode_hash(inode); | 172 | insert_inode_hash(inode); |
169 | mark_inode_dirty(inode); | 173 | mark_inode_dirty(inode); |
170 | 174 | ||
171 | sysv_write_inode(inode, 0); /* ensure inode not allocated again */ | 175 | sysv_write_inode(inode, &wbc); /* ensure inode not allocated again */ |
172 | mark_inode_dirty(inode); /* cleared by sysv_write_inode() */ | 176 | mark_inode_dirty(inode); /* cleared by sysv_write_inode() */ |
173 | /* That's it. */ | 177 | /* That's it. */ |
174 | unlock_super(sb); | 178 | unlock_super(sb); |
diff --git a/fs/ubifs/budget.c b/fs/ubifs/budget.c index 076ca50e9933..c8ff0d1ae5d3 100644 --- a/fs/ubifs/budget.c +++ b/fs/ubifs/budget.c | |||
@@ -62,7 +62,9 @@ | |||
62 | */ | 62 | */ |
63 | static void shrink_liability(struct ubifs_info *c, int nr_to_write) | 63 | static void shrink_liability(struct ubifs_info *c, int nr_to_write) |
64 | { | 64 | { |
65 | down_read(&c->vfs_sb->s_umount); | ||
65 | writeback_inodes_sb(c->vfs_sb); | 66 | writeback_inodes_sb(c->vfs_sb); |
67 | up_read(&c->vfs_sb->s_umount); | ||
66 | } | 68 | } |
67 | 69 | ||
68 | /** | 70 | /** |
diff --git a/fs/xfs/linux-2.6/xfs_export.c b/fs/xfs/linux-2.6/xfs_export.c index 846b75aeb2ab..e7839ee49e43 100644 --- a/fs/xfs/linux-2.6/xfs_export.c +++ b/fs/xfs/linux-2.6/xfs_export.c | |||
@@ -128,13 +128,12 @@ xfs_nfs_get_inode( | |||
128 | return ERR_PTR(-ESTALE); | 128 | return ERR_PTR(-ESTALE); |
129 | 129 | ||
130 | /* | 130 | /* |
131 | * The XFS_IGET_BULKSTAT means that an invalid inode number is just | 131 | * The XFS_IGET_UNTRUSTED means that an invalid inode number is just |
132 | * fine and not an indication of a corrupted filesystem. Because | 132 | * fine and not an indication of a corrupted filesystem as clients can |
133 | * clients can send any kind of invalid file handle, e.g. after | 133 | * send invalid file handles and we have to handle it gracefully.. |
134 | * a restore on the server we have to deal with this case gracefully. | ||
135 | */ | 134 | */ |
136 | error = xfs_iget(mp, NULL, ino, XFS_IGET_BULKSTAT, | 135 | error = xfs_iget(mp, NULL, ino, XFS_IGET_UNTRUSTED, |
137 | XFS_ILOCK_SHARED, &ip, 0); | 136 | XFS_ILOCK_SHARED, &ip); |
138 | if (error) { | 137 | if (error) { |
139 | /* | 138 | /* |
140 | * EINVAL means the inode cluster doesn't exist anymore. | 139 | * EINVAL means the inode cluster doesn't exist anymore. |
diff --git a/fs/xfs/linux-2.6/xfs_ioctl.c b/fs/xfs/linux-2.6/xfs_ioctl.c index 699b60cbab9c..e59a81062830 100644 --- a/fs/xfs/linux-2.6/xfs_ioctl.c +++ b/fs/xfs/linux-2.6/xfs_ioctl.c | |||
@@ -679,10 +679,9 @@ xfs_ioc_bulkstat( | |||
679 | error = xfs_bulkstat_single(mp, &inlast, | 679 | error = xfs_bulkstat_single(mp, &inlast, |
680 | bulkreq.ubuffer, &done); | 680 | bulkreq.ubuffer, &done); |
681 | else /* XFS_IOC_FSBULKSTAT */ | 681 | else /* XFS_IOC_FSBULKSTAT */ |
682 | error = xfs_bulkstat(mp, &inlast, &count, | 682 | error = xfs_bulkstat(mp, &inlast, &count, xfs_bulkstat_one, |
683 | (bulkstat_one_pf)xfs_bulkstat_one, NULL, | 683 | sizeof(xfs_bstat_t), bulkreq.ubuffer, |
684 | sizeof(xfs_bstat_t), bulkreq.ubuffer, | 684 | &done); |
685 | BULKSTAT_FG_QUICK, &done); | ||
686 | 685 | ||
687 | if (error) | 686 | if (error) |
688 | return -error; | 687 | return -error; |
diff --git a/fs/xfs/linux-2.6/xfs_ioctl32.c b/fs/xfs/linux-2.6/xfs_ioctl32.c index 9287135e9bfc..52ed49e6465c 100644 --- a/fs/xfs/linux-2.6/xfs_ioctl32.c +++ b/fs/xfs/linux-2.6/xfs_ioctl32.c | |||
@@ -237,15 +237,12 @@ xfs_bulkstat_one_compat( | |||
237 | xfs_ino_t ino, /* inode number to get data for */ | 237 | xfs_ino_t ino, /* inode number to get data for */ |
238 | void __user *buffer, /* buffer to place output in */ | 238 | void __user *buffer, /* buffer to place output in */ |
239 | int ubsize, /* size of buffer */ | 239 | int ubsize, /* size of buffer */ |
240 | void *private_data, /* my private data */ | ||
241 | xfs_daddr_t bno, /* starting bno of inode cluster */ | ||
242 | int *ubused, /* bytes used by me */ | 240 | int *ubused, /* bytes used by me */ |
243 | void *dibuff, /* on-disk inode buffer */ | ||
244 | int *stat) /* BULKSTAT_RV_... */ | 241 | int *stat) /* BULKSTAT_RV_... */ |
245 | { | 242 | { |
246 | return xfs_bulkstat_one_int(mp, ino, buffer, ubsize, | 243 | return xfs_bulkstat_one_int(mp, ino, buffer, ubsize, |
247 | xfs_bulkstat_one_fmt_compat, bno, | 244 | xfs_bulkstat_one_fmt_compat, |
248 | ubused, dibuff, stat); | 245 | ubused, stat); |
249 | } | 246 | } |
250 | 247 | ||
251 | /* copied from xfs_ioctl.c */ | 248 | /* copied from xfs_ioctl.c */ |
@@ -298,13 +295,11 @@ xfs_compat_ioc_bulkstat( | |||
298 | int res; | 295 | int res; |
299 | 296 | ||
300 | error = xfs_bulkstat_one_compat(mp, inlast, bulkreq.ubuffer, | 297 | error = xfs_bulkstat_one_compat(mp, inlast, bulkreq.ubuffer, |
301 | sizeof(compat_xfs_bstat_t), | 298 | sizeof(compat_xfs_bstat_t), 0, &res); |
302 | NULL, 0, NULL, NULL, &res); | ||
303 | } else if (cmd == XFS_IOC_FSBULKSTAT_32) { | 299 | } else if (cmd == XFS_IOC_FSBULKSTAT_32) { |
304 | error = xfs_bulkstat(mp, &inlast, &count, | 300 | error = xfs_bulkstat(mp, &inlast, &count, |
305 | xfs_bulkstat_one_compat, NULL, | 301 | xfs_bulkstat_one_compat, sizeof(compat_xfs_bstat_t), |
306 | sizeof(compat_xfs_bstat_t), bulkreq.ubuffer, | 302 | bulkreq.ubuffer, &done); |
307 | BULKSTAT_FG_QUICK, &done); | ||
308 | } else | 303 | } else |
309 | error = XFS_ERROR(EINVAL); | 304 | error = XFS_ERROR(EINVAL); |
310 | if (error) | 305 | if (error) |
diff --git a/fs/xfs/quota/xfs_qm.c b/fs/xfs/quota/xfs_qm.c index 2d8b7bc792c9..8c117ff2e3ab 100644 --- a/fs/xfs/quota/xfs_qm.c +++ b/fs/xfs/quota/xfs_qm.c | |||
@@ -1632,10 +1632,7 @@ xfs_qm_dqusage_adjust( | |||
1632 | xfs_ino_t ino, /* inode number to get data for */ | 1632 | xfs_ino_t ino, /* inode number to get data for */ |
1633 | void __user *buffer, /* not used */ | 1633 | void __user *buffer, /* not used */ |
1634 | int ubsize, /* not used */ | 1634 | int ubsize, /* not used */ |
1635 | void *private_data, /* not used */ | ||
1636 | xfs_daddr_t bno, /* starting block of inode cluster */ | ||
1637 | int *ubused, /* not used */ | 1635 | int *ubused, /* not used */ |
1638 | void *dip, /* on-disk inode pointer (not used) */ | ||
1639 | int *res) /* result code value */ | 1636 | int *res) /* result code value */ |
1640 | { | 1637 | { |
1641 | xfs_inode_t *ip; | 1638 | xfs_inode_t *ip; |
@@ -1660,7 +1657,7 @@ xfs_qm_dqusage_adjust( | |||
1660 | * the case in all other instances. It's OK that we do this because | 1657 | * the case in all other instances. It's OK that we do this because |
1661 | * quotacheck is done only at mount time. | 1658 | * quotacheck is done only at mount time. |
1662 | */ | 1659 | */ |
1663 | if ((error = xfs_iget(mp, NULL, ino, 0, XFS_ILOCK_EXCL, &ip, bno))) { | 1660 | if ((error = xfs_iget(mp, NULL, ino, 0, XFS_ILOCK_EXCL, &ip))) { |
1664 | *res = BULKSTAT_RV_NOTHING; | 1661 | *res = BULKSTAT_RV_NOTHING; |
1665 | return error; | 1662 | return error; |
1666 | } | 1663 | } |
@@ -1796,12 +1793,13 @@ xfs_qm_quotacheck( | |||
1796 | * Iterate thru all the inodes in the file system, | 1793 | * Iterate thru all the inodes in the file system, |
1797 | * adjusting the corresponding dquot counters in core. | 1794 | * adjusting the corresponding dquot counters in core. |
1798 | */ | 1795 | */ |
1799 | if ((error = xfs_bulkstat(mp, &lastino, &count, | 1796 | error = xfs_bulkstat(mp, &lastino, &count, |
1800 | xfs_qm_dqusage_adjust, NULL, | 1797 | xfs_qm_dqusage_adjust, |
1801 | structsz, NULL, BULKSTAT_FG_IGET, &done))) | 1798 | structsz, NULL, &done); |
1799 | if (error) | ||
1802 | break; | 1800 | break; |
1803 | 1801 | ||
1804 | } while (! done); | 1802 | } while (!done); |
1805 | 1803 | ||
1806 | /* | 1804 | /* |
1807 | * We've made all the changes that we need to make incore. | 1805 | * We've made all the changes that we need to make incore. |
@@ -1889,14 +1887,14 @@ xfs_qm_init_quotainos( | |||
1889 | mp->m_sb.sb_uquotino != NULLFSINO) { | 1887 | mp->m_sb.sb_uquotino != NULLFSINO) { |
1890 | ASSERT(mp->m_sb.sb_uquotino > 0); | 1888 | ASSERT(mp->m_sb.sb_uquotino > 0); |
1891 | if ((error = xfs_iget(mp, NULL, mp->m_sb.sb_uquotino, | 1889 | if ((error = xfs_iget(mp, NULL, mp->m_sb.sb_uquotino, |
1892 | 0, 0, &uip, 0))) | 1890 | 0, 0, &uip))) |
1893 | return XFS_ERROR(error); | 1891 | return XFS_ERROR(error); |
1894 | } | 1892 | } |
1895 | if (XFS_IS_OQUOTA_ON(mp) && | 1893 | if (XFS_IS_OQUOTA_ON(mp) && |
1896 | mp->m_sb.sb_gquotino != NULLFSINO) { | 1894 | mp->m_sb.sb_gquotino != NULLFSINO) { |
1897 | ASSERT(mp->m_sb.sb_gquotino > 0); | 1895 | ASSERT(mp->m_sb.sb_gquotino > 0); |
1898 | if ((error = xfs_iget(mp, NULL, mp->m_sb.sb_gquotino, | 1896 | if ((error = xfs_iget(mp, NULL, mp->m_sb.sb_gquotino, |
1899 | 0, 0, &gip, 0))) { | 1897 | 0, 0, &gip))) { |
1900 | if (uip) | 1898 | if (uip) |
1901 | IRELE(uip); | 1899 | IRELE(uip); |
1902 | return XFS_ERROR(error); | 1900 | return XFS_ERROR(error); |
diff --git a/fs/xfs/quota/xfs_qm_syscalls.c b/fs/xfs/quota/xfs_qm_syscalls.c index 92b002f1805f..b4487764e923 100644 --- a/fs/xfs/quota/xfs_qm_syscalls.c +++ b/fs/xfs/quota/xfs_qm_syscalls.c | |||
@@ -262,7 +262,7 @@ xfs_qm_scall_trunc_qfiles( | |||
262 | } | 262 | } |
263 | 263 | ||
264 | if ((flags & XFS_DQ_USER) && mp->m_sb.sb_uquotino != NULLFSINO) { | 264 | if ((flags & XFS_DQ_USER) && mp->m_sb.sb_uquotino != NULLFSINO) { |
265 | error = xfs_iget(mp, NULL, mp->m_sb.sb_uquotino, 0, 0, &qip, 0); | 265 | error = xfs_iget(mp, NULL, mp->m_sb.sb_uquotino, 0, 0, &qip); |
266 | if (!error) { | 266 | if (!error) { |
267 | error = xfs_truncate_file(mp, qip); | 267 | error = xfs_truncate_file(mp, qip); |
268 | IRELE(qip); | 268 | IRELE(qip); |
@@ -271,7 +271,7 @@ xfs_qm_scall_trunc_qfiles( | |||
271 | 271 | ||
272 | if ((flags & (XFS_DQ_GROUP|XFS_DQ_PROJ)) && | 272 | if ((flags & (XFS_DQ_GROUP|XFS_DQ_PROJ)) && |
273 | mp->m_sb.sb_gquotino != NULLFSINO) { | 273 | mp->m_sb.sb_gquotino != NULLFSINO) { |
274 | error2 = xfs_iget(mp, NULL, mp->m_sb.sb_gquotino, 0, 0, &qip, 0); | 274 | error2 = xfs_iget(mp, NULL, mp->m_sb.sb_gquotino, 0, 0, &qip); |
275 | if (!error2) { | 275 | if (!error2) { |
276 | error2 = xfs_truncate_file(mp, qip); | 276 | error2 = xfs_truncate_file(mp, qip); |
277 | IRELE(qip); | 277 | IRELE(qip); |
@@ -417,12 +417,12 @@ xfs_qm_scall_getqstat( | |||
417 | } | 417 | } |
418 | if (!uip && mp->m_sb.sb_uquotino != NULLFSINO) { | 418 | if (!uip && mp->m_sb.sb_uquotino != NULLFSINO) { |
419 | if (xfs_iget(mp, NULL, mp->m_sb.sb_uquotino, | 419 | if (xfs_iget(mp, NULL, mp->m_sb.sb_uquotino, |
420 | 0, 0, &uip, 0) == 0) | 420 | 0, 0, &uip) == 0) |
421 | tempuqip = B_TRUE; | 421 | tempuqip = B_TRUE; |
422 | } | 422 | } |
423 | if (!gip && mp->m_sb.sb_gquotino != NULLFSINO) { | 423 | if (!gip && mp->m_sb.sb_gquotino != NULLFSINO) { |
424 | if (xfs_iget(mp, NULL, mp->m_sb.sb_gquotino, | 424 | if (xfs_iget(mp, NULL, mp->m_sb.sb_gquotino, |
425 | 0, 0, &gip, 0) == 0) | 425 | 0, 0, &gip) == 0) |
426 | tempgqip = B_TRUE; | 426 | tempgqip = B_TRUE; |
427 | } | 427 | } |
428 | if (uip) { | 428 | if (uip) { |
@@ -1109,10 +1109,7 @@ xfs_qm_internalqcheck_adjust( | |||
1109 | xfs_ino_t ino, /* inode number to get data for */ | 1109 | xfs_ino_t ino, /* inode number to get data for */ |
1110 | void __user *buffer, /* not used */ | 1110 | void __user *buffer, /* not used */ |
1111 | int ubsize, /* not used */ | 1111 | int ubsize, /* not used */ |
1112 | void *private_data, /* not used */ | ||
1113 | xfs_daddr_t bno, /* starting block of inode cluster */ | ||
1114 | int *ubused, /* not used */ | 1112 | int *ubused, /* not used */ |
1115 | void *dip, /* not used */ | ||
1116 | int *res) /* bulkstat result code */ | 1113 | int *res) /* bulkstat result code */ |
1117 | { | 1114 | { |
1118 | xfs_inode_t *ip; | 1115 | xfs_inode_t *ip; |
@@ -1134,7 +1131,7 @@ xfs_qm_internalqcheck_adjust( | |||
1134 | ipreleased = B_FALSE; | 1131 | ipreleased = B_FALSE; |
1135 | again: | 1132 | again: |
1136 | lock_flags = XFS_ILOCK_SHARED; | 1133 | lock_flags = XFS_ILOCK_SHARED; |
1137 | if ((error = xfs_iget(mp, NULL, ino, 0, lock_flags, &ip, bno))) { | 1134 | if ((error = xfs_iget(mp, NULL, ino, 0, lock_flags, &ip))) { |
1138 | *res = BULKSTAT_RV_NOTHING; | 1135 | *res = BULKSTAT_RV_NOTHING; |
1139 | return (error); | 1136 | return (error); |
1140 | } | 1137 | } |
@@ -1205,15 +1202,15 @@ xfs_qm_internalqcheck( | |||
1205 | * Iterate thru all the inodes in the file system, | 1202 | * Iterate thru all the inodes in the file system, |
1206 | * adjusting the corresponding dquot counters | 1203 | * adjusting the corresponding dquot counters |
1207 | */ | 1204 | */ |
1208 | if ((error = xfs_bulkstat(mp, &lastino, &count, | 1205 | error = xfs_bulkstat(mp, &lastino, &count, |
1209 | xfs_qm_internalqcheck_adjust, NULL, | 1206 | xfs_qm_internalqcheck_adjust, |
1210 | 0, NULL, BULKSTAT_FG_IGET, &done))) { | 1207 | 0, NULL, &done); |
1208 | if (error) { | ||
1209 | cmn_err(CE_DEBUG, "Bulkstat returned error 0x%x", error); | ||
1211 | break; | 1210 | break; |
1212 | } | 1211 | } |
1213 | } while (! done); | 1212 | } while (!done); |
1214 | if (error) { | 1213 | |
1215 | cmn_err(CE_DEBUG, "Bulkstat returned error 0x%x", error); | ||
1216 | } | ||
1217 | cmn_err(CE_DEBUG, "Checking results against system dquots"); | 1214 | cmn_err(CE_DEBUG, "Checking results against system dquots"); |
1218 | for (i = 0; i < qmtest_hashmask; i++) { | 1215 | for (i = 0; i < qmtest_hashmask; i++) { |
1219 | xfs_dqtest_t *d, *n; | 1216 | xfs_dqtest_t *d, *n; |
diff --git a/fs/xfs/xfs_dfrag.c b/fs/xfs/xfs_dfrag.c index 5bba29a07812..7f159d2a429a 100644 --- a/fs/xfs/xfs_dfrag.c +++ b/fs/xfs/xfs_dfrag.c | |||
@@ -69,7 +69,9 @@ xfs_swapext( | |||
69 | goto out; | 69 | goto out; |
70 | } | 70 | } |
71 | 71 | ||
72 | if (!(file->f_mode & FMODE_WRITE) || (file->f_flags & O_APPEND)) { | 72 | if (!(file->f_mode & FMODE_WRITE) || |
73 | !(file->f_mode & FMODE_READ) || | ||
74 | (file->f_flags & O_APPEND)) { | ||
73 | error = XFS_ERROR(EBADF); | 75 | error = XFS_ERROR(EBADF); |
74 | goto out_put_file; | 76 | goto out_put_file; |
75 | } | 77 | } |
@@ -81,6 +83,7 @@ xfs_swapext( | |||
81 | } | 83 | } |
82 | 84 | ||
83 | if (!(tmp_file->f_mode & FMODE_WRITE) || | 85 | if (!(tmp_file->f_mode & FMODE_WRITE) || |
86 | !(tmp_file->f_mode & FMODE_READ) || | ||
84 | (tmp_file->f_flags & O_APPEND)) { | 87 | (tmp_file->f_flags & O_APPEND)) { |
85 | error = XFS_ERROR(EBADF); | 88 | error = XFS_ERROR(EBADF); |
86 | goto out_put_tmp_file; | 89 | goto out_put_tmp_file; |
diff --git a/fs/xfs/xfs_ialloc.c b/fs/xfs/xfs_ialloc.c index 9d884c127bb9..c7142a064c48 100644 --- a/fs/xfs/xfs_ialloc.c +++ b/fs/xfs/xfs_ialloc.c | |||
@@ -1203,6 +1203,63 @@ error0: | |||
1203 | return error; | 1203 | return error; |
1204 | } | 1204 | } |
1205 | 1205 | ||
1206 | STATIC int | ||
1207 | xfs_imap_lookup( | ||
1208 | struct xfs_mount *mp, | ||
1209 | struct xfs_trans *tp, | ||
1210 | xfs_agnumber_t agno, | ||
1211 | xfs_agino_t agino, | ||
1212 | xfs_agblock_t agbno, | ||
1213 | xfs_agblock_t *chunk_agbno, | ||
1214 | xfs_agblock_t *offset_agbno, | ||
1215 | int flags) | ||
1216 | { | ||
1217 | struct xfs_inobt_rec_incore rec; | ||
1218 | struct xfs_btree_cur *cur; | ||
1219 | struct xfs_buf *agbp; | ||
1220 | xfs_agino_t startino; | ||
1221 | int error; | ||
1222 | int i; | ||
1223 | |||
1224 | error = xfs_ialloc_read_agi(mp, tp, agno, &agbp); | ||
1225 | if (error) { | ||
1226 | xfs_fs_cmn_err(CE_ALERT, mp, "xfs_imap: " | ||
1227 | "xfs_ialloc_read_agi() returned " | ||
1228 | "error %d, agno %d", | ||
1229 | error, agno); | ||
1230 | return error; | ||
1231 | } | ||
1232 | |||
1233 | /* | ||
1234 | * derive and lookup the exact inode record for the given agino. If the | ||
1235 | * record cannot be found, then it's an invalid inode number and we | ||
1236 | * should abort. | ||
1237 | */ | ||
1238 | cur = xfs_inobt_init_cursor(mp, tp, agbp, agno); | ||
1239 | startino = agino & ~(XFS_IALLOC_INODES(mp) - 1); | ||
1240 | error = xfs_inobt_lookup(cur, startino, XFS_LOOKUP_EQ, &i); | ||
1241 | if (!error) { | ||
1242 | if (i) | ||
1243 | error = xfs_inobt_get_rec(cur, &rec, &i); | ||
1244 | if (!error && i == 0) | ||
1245 | error = EINVAL; | ||
1246 | } | ||
1247 | |||
1248 | xfs_trans_brelse(tp, agbp); | ||
1249 | xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR); | ||
1250 | if (error) | ||
1251 | return error; | ||
1252 | |||
1253 | /* for untrusted inodes check it is allocated first */ | ||
1254 | if ((flags & XFS_IGET_UNTRUSTED) && | ||
1255 | (rec.ir_free & XFS_INOBT_MASK(agino - rec.ir_startino))) | ||
1256 | return EINVAL; | ||
1257 | |||
1258 | *chunk_agbno = XFS_AGINO_TO_AGBNO(mp, rec.ir_startino); | ||
1259 | *offset_agbno = agbno - *chunk_agbno; | ||
1260 | return 0; | ||
1261 | } | ||
1262 | |||
1206 | /* | 1263 | /* |
1207 | * Return the location of the inode in imap, for mapping it into a buffer. | 1264 | * Return the location of the inode in imap, for mapping it into a buffer. |
1208 | */ | 1265 | */ |
@@ -1235,8 +1292,11 @@ xfs_imap( | |||
1235 | if (agno >= mp->m_sb.sb_agcount || agbno >= mp->m_sb.sb_agblocks || | 1292 | if (agno >= mp->m_sb.sb_agcount || agbno >= mp->m_sb.sb_agblocks || |
1236 | ino != XFS_AGINO_TO_INO(mp, agno, agino)) { | 1293 | ino != XFS_AGINO_TO_INO(mp, agno, agino)) { |
1237 | #ifdef DEBUG | 1294 | #ifdef DEBUG |
1238 | /* no diagnostics for bulkstat, ino comes from userspace */ | 1295 | /* |
1239 | if (flags & XFS_IGET_BULKSTAT) | 1296 | * Don't output diagnostic information for untrusted inodes |
1297 | * as they can be invalid without implying corruption. | ||
1298 | */ | ||
1299 | if (flags & XFS_IGET_UNTRUSTED) | ||
1240 | return XFS_ERROR(EINVAL); | 1300 | return XFS_ERROR(EINVAL); |
1241 | if (agno >= mp->m_sb.sb_agcount) { | 1301 | if (agno >= mp->m_sb.sb_agcount) { |
1242 | xfs_fs_cmn_err(CE_ALERT, mp, | 1302 | xfs_fs_cmn_err(CE_ALERT, mp, |
@@ -1263,6 +1323,23 @@ xfs_imap( | |||
1263 | return XFS_ERROR(EINVAL); | 1323 | return XFS_ERROR(EINVAL); |
1264 | } | 1324 | } |
1265 | 1325 | ||
1326 | blks_per_cluster = XFS_INODE_CLUSTER_SIZE(mp) >> mp->m_sb.sb_blocklog; | ||
1327 | |||
1328 | /* | ||
1329 | * For bulkstat and handle lookups, we have an untrusted inode number | ||
1330 | * that we have to verify is valid. We cannot do this just by reading | ||
1331 | * the inode buffer as it may have been unlinked and removed leaving | ||
1332 | * inodes in stale state on disk. Hence we have to do a btree lookup | ||
1333 | * in all cases where an untrusted inode number is passed. | ||
1334 | */ | ||
1335 | if (flags & XFS_IGET_UNTRUSTED) { | ||
1336 | error = xfs_imap_lookup(mp, tp, agno, agino, agbno, | ||
1337 | &chunk_agbno, &offset_agbno, flags); | ||
1338 | if (error) | ||
1339 | return error; | ||
1340 | goto out_map; | ||
1341 | } | ||
1342 | |||
1266 | /* | 1343 | /* |
1267 | * If the inode cluster size is the same as the blocksize or | 1344 | * If the inode cluster size is the same as the blocksize or |
1268 | * smaller we get to the buffer by simple arithmetics. | 1345 | * smaller we get to the buffer by simple arithmetics. |
@@ -1277,24 +1354,6 @@ xfs_imap( | |||
1277 | return 0; | 1354 | return 0; |
1278 | } | 1355 | } |
1279 | 1356 | ||
1280 | blks_per_cluster = XFS_INODE_CLUSTER_SIZE(mp) >> mp->m_sb.sb_blocklog; | ||
1281 | |||
1282 | /* | ||
1283 | * If we get a block number passed from bulkstat we can use it to | ||
1284 | * find the buffer easily. | ||
1285 | */ | ||
1286 | if (imap->im_blkno) { | ||
1287 | offset = XFS_INO_TO_OFFSET(mp, ino); | ||
1288 | ASSERT(offset < mp->m_sb.sb_inopblock); | ||
1289 | |||
1290 | cluster_agbno = xfs_daddr_to_agbno(mp, imap->im_blkno); | ||
1291 | offset += (agbno - cluster_agbno) * mp->m_sb.sb_inopblock; | ||
1292 | |||
1293 | imap->im_len = XFS_FSB_TO_BB(mp, blks_per_cluster); | ||
1294 | imap->im_boffset = (ushort)(offset << mp->m_sb.sb_inodelog); | ||
1295 | return 0; | ||
1296 | } | ||
1297 | |||
1298 | /* | 1357 | /* |
1299 | * If the inode chunks are aligned then use simple maths to | 1358 | * If the inode chunks are aligned then use simple maths to |
1300 | * find the location. Otherwise we have to do a btree | 1359 | * find the location. Otherwise we have to do a btree |
@@ -1304,50 +1363,13 @@ xfs_imap( | |||
1304 | offset_agbno = agbno & mp->m_inoalign_mask; | 1363 | offset_agbno = agbno & mp->m_inoalign_mask; |
1305 | chunk_agbno = agbno - offset_agbno; | 1364 | chunk_agbno = agbno - offset_agbno; |
1306 | } else { | 1365 | } else { |
1307 | xfs_btree_cur_t *cur; /* inode btree cursor */ | 1366 | error = xfs_imap_lookup(mp, tp, agno, agino, agbno, |
1308 | xfs_inobt_rec_incore_t chunk_rec; | 1367 | &chunk_agbno, &offset_agbno, flags); |
1309 | xfs_buf_t *agbp; /* agi buffer */ | ||
1310 | int i; /* temp state */ | ||
1311 | |||
1312 | error = xfs_ialloc_read_agi(mp, tp, agno, &agbp); | ||
1313 | if (error) { | ||
1314 | xfs_fs_cmn_err(CE_ALERT, mp, "xfs_imap: " | ||
1315 | "xfs_ialloc_read_agi() returned " | ||
1316 | "error %d, agno %d", | ||
1317 | error, agno); | ||
1318 | return error; | ||
1319 | } | ||
1320 | |||
1321 | cur = xfs_inobt_init_cursor(mp, tp, agbp, agno); | ||
1322 | error = xfs_inobt_lookup(cur, agino, XFS_LOOKUP_LE, &i); | ||
1323 | if (error) { | ||
1324 | xfs_fs_cmn_err(CE_ALERT, mp, "xfs_imap: " | ||
1325 | "xfs_inobt_lookup() failed"); | ||
1326 | goto error0; | ||
1327 | } | ||
1328 | |||
1329 | error = xfs_inobt_get_rec(cur, &chunk_rec, &i); | ||
1330 | if (error) { | ||
1331 | xfs_fs_cmn_err(CE_ALERT, mp, "xfs_imap: " | ||
1332 | "xfs_inobt_get_rec() failed"); | ||
1333 | goto error0; | ||
1334 | } | ||
1335 | if (i == 0) { | ||
1336 | #ifdef DEBUG | ||
1337 | xfs_fs_cmn_err(CE_ALERT, mp, "xfs_imap: " | ||
1338 | "xfs_inobt_get_rec() failed"); | ||
1339 | #endif /* DEBUG */ | ||
1340 | error = XFS_ERROR(EINVAL); | ||
1341 | } | ||
1342 | error0: | ||
1343 | xfs_trans_brelse(tp, agbp); | ||
1344 | xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR); | ||
1345 | if (error) | 1368 | if (error) |
1346 | return error; | 1369 | return error; |
1347 | chunk_agbno = XFS_AGINO_TO_AGBNO(mp, chunk_rec.ir_startino); | ||
1348 | offset_agbno = agbno - chunk_agbno; | ||
1349 | } | 1370 | } |
1350 | 1371 | ||
1372 | out_map: | ||
1351 | ASSERT(agbno >= chunk_agbno); | 1373 | ASSERT(agbno >= chunk_agbno); |
1352 | cluster_agbno = chunk_agbno + | 1374 | cluster_agbno = chunk_agbno + |
1353 | ((offset_agbno / blks_per_cluster) * blks_per_cluster); | 1375 | ((offset_agbno / blks_per_cluster) * blks_per_cluster); |
diff --git a/fs/xfs/xfs_iget.c b/fs/xfs/xfs_iget.c index 75df75f43d48..8f8b91be2c99 100644 --- a/fs/xfs/xfs_iget.c +++ b/fs/xfs/xfs_iget.c | |||
@@ -259,7 +259,6 @@ xfs_iget_cache_miss( | |||
259 | xfs_trans_t *tp, | 259 | xfs_trans_t *tp, |
260 | xfs_ino_t ino, | 260 | xfs_ino_t ino, |
261 | struct xfs_inode **ipp, | 261 | struct xfs_inode **ipp, |
262 | xfs_daddr_t bno, | ||
263 | int flags, | 262 | int flags, |
264 | int lock_flags) | 263 | int lock_flags) |
265 | { | 264 | { |
@@ -272,7 +271,7 @@ xfs_iget_cache_miss( | |||
272 | if (!ip) | 271 | if (!ip) |
273 | return ENOMEM; | 272 | return ENOMEM; |
274 | 273 | ||
275 | error = xfs_iread(mp, tp, ip, bno, flags); | 274 | error = xfs_iread(mp, tp, ip, flags); |
276 | if (error) | 275 | if (error) |
277 | goto out_destroy; | 276 | goto out_destroy; |
278 | 277 | ||
@@ -358,8 +357,6 @@ out_destroy: | |||
358 | * within the file system for the inode being requested. | 357 | * within the file system for the inode being requested. |
359 | * lock_flags -- flags indicating how to lock the inode. See the comment | 358 | * lock_flags -- flags indicating how to lock the inode. See the comment |
360 | * for xfs_ilock() for a list of valid values. | 359 | * for xfs_ilock() for a list of valid values. |
361 | * bno -- the block number starting the buffer containing the inode, | ||
362 | * if known (as by bulkstat), else 0. | ||
363 | */ | 360 | */ |
364 | int | 361 | int |
365 | xfs_iget( | 362 | xfs_iget( |
@@ -368,8 +365,7 @@ xfs_iget( | |||
368 | xfs_ino_t ino, | 365 | xfs_ino_t ino, |
369 | uint flags, | 366 | uint flags, |
370 | uint lock_flags, | 367 | uint lock_flags, |
371 | xfs_inode_t **ipp, | 368 | xfs_inode_t **ipp) |
372 | xfs_daddr_t bno) | ||
373 | { | 369 | { |
374 | xfs_inode_t *ip; | 370 | xfs_inode_t *ip; |
375 | int error; | 371 | int error; |
@@ -397,7 +393,7 @@ again: | |||
397 | read_unlock(&pag->pag_ici_lock); | 393 | read_unlock(&pag->pag_ici_lock); |
398 | XFS_STATS_INC(xs_ig_missed); | 394 | XFS_STATS_INC(xs_ig_missed); |
399 | 395 | ||
400 | error = xfs_iget_cache_miss(mp, pag, tp, ino, &ip, bno, | 396 | error = xfs_iget_cache_miss(mp, pag, tp, ino, &ip, |
401 | flags, lock_flags); | 397 | flags, lock_flags); |
402 | if (error) | 398 | if (error) |
403 | goto out_error_or_again; | 399 | goto out_error_or_again; |
diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c index d53c39de7d05..b76a829d7e20 100644 --- a/fs/xfs/xfs_inode.c +++ b/fs/xfs/xfs_inode.c | |||
@@ -177,7 +177,7 @@ xfs_imap_to_bp( | |||
177 | if (unlikely(XFS_TEST_ERROR(!di_ok, mp, | 177 | if (unlikely(XFS_TEST_ERROR(!di_ok, mp, |
178 | XFS_ERRTAG_ITOBP_INOTOBP, | 178 | XFS_ERRTAG_ITOBP_INOTOBP, |
179 | XFS_RANDOM_ITOBP_INOTOBP))) { | 179 | XFS_RANDOM_ITOBP_INOTOBP))) { |
180 | if (iget_flags & XFS_IGET_BULKSTAT) { | 180 | if (iget_flags & XFS_IGET_UNTRUSTED) { |
181 | xfs_trans_brelse(tp, bp); | 181 | xfs_trans_brelse(tp, bp); |
182 | return XFS_ERROR(EINVAL); | 182 | return XFS_ERROR(EINVAL); |
183 | } | 183 | } |
@@ -787,7 +787,6 @@ xfs_iread( | |||
787 | xfs_mount_t *mp, | 787 | xfs_mount_t *mp, |
788 | xfs_trans_t *tp, | 788 | xfs_trans_t *tp, |
789 | xfs_inode_t *ip, | 789 | xfs_inode_t *ip, |
790 | xfs_daddr_t bno, | ||
791 | uint iget_flags) | 790 | uint iget_flags) |
792 | { | 791 | { |
793 | xfs_buf_t *bp; | 792 | xfs_buf_t *bp; |
@@ -797,11 +796,9 @@ xfs_iread( | |||
797 | /* | 796 | /* |
798 | * Fill in the location information in the in-core inode. | 797 | * Fill in the location information in the in-core inode. |
799 | */ | 798 | */ |
800 | ip->i_imap.im_blkno = bno; | ||
801 | error = xfs_imap(mp, tp, ip->i_ino, &ip->i_imap, iget_flags); | 799 | error = xfs_imap(mp, tp, ip->i_ino, &ip->i_imap, iget_flags); |
802 | if (error) | 800 | if (error) |
803 | return error; | 801 | return error; |
804 | ASSERT(bno == 0 || bno == ip->i_imap.im_blkno); | ||
805 | 802 | ||
806 | /* | 803 | /* |
807 | * Get pointers to the on-disk inode and the buffer containing it. | 804 | * Get pointers to the on-disk inode and the buffer containing it. |
diff --git a/fs/xfs/xfs_inode.h b/fs/xfs/xfs_inode.h index 9965e40a4615..78550df13cd6 100644 --- a/fs/xfs/xfs_inode.h +++ b/fs/xfs/xfs_inode.h | |||
@@ -442,7 +442,7 @@ static inline void xfs_ifunlock(xfs_inode_t *ip) | |||
442 | * xfs_iget.c prototypes. | 442 | * xfs_iget.c prototypes. |
443 | */ | 443 | */ |
444 | int xfs_iget(struct xfs_mount *, struct xfs_trans *, xfs_ino_t, | 444 | int xfs_iget(struct xfs_mount *, struct xfs_trans *, xfs_ino_t, |
445 | uint, uint, xfs_inode_t **, xfs_daddr_t); | 445 | uint, uint, xfs_inode_t **); |
446 | void xfs_iput(xfs_inode_t *, uint); | 446 | void xfs_iput(xfs_inode_t *, uint); |
447 | void xfs_iput_new(xfs_inode_t *, uint); | 447 | void xfs_iput_new(xfs_inode_t *, uint); |
448 | void xfs_ilock(xfs_inode_t *, uint); | 448 | void xfs_ilock(xfs_inode_t *, uint); |
@@ -500,7 +500,7 @@ do { \ | |||
500 | * Flags for xfs_iget() | 500 | * Flags for xfs_iget() |
501 | */ | 501 | */ |
502 | #define XFS_IGET_CREATE 0x1 | 502 | #define XFS_IGET_CREATE 0x1 |
503 | #define XFS_IGET_BULKSTAT 0x2 | 503 | #define XFS_IGET_UNTRUSTED 0x2 |
504 | 504 | ||
505 | int xfs_inotobp(struct xfs_mount *, struct xfs_trans *, | 505 | int xfs_inotobp(struct xfs_mount *, struct xfs_trans *, |
506 | xfs_ino_t, struct xfs_dinode **, | 506 | xfs_ino_t, struct xfs_dinode **, |
@@ -509,7 +509,7 @@ int xfs_itobp(struct xfs_mount *, struct xfs_trans *, | |||
509 | struct xfs_inode *, struct xfs_dinode **, | 509 | struct xfs_inode *, struct xfs_dinode **, |
510 | struct xfs_buf **, uint); | 510 | struct xfs_buf **, uint); |
511 | int xfs_iread(struct xfs_mount *, struct xfs_trans *, | 511 | int xfs_iread(struct xfs_mount *, struct xfs_trans *, |
512 | struct xfs_inode *, xfs_daddr_t, uint); | 512 | struct xfs_inode *, uint); |
513 | void xfs_dinode_to_disk(struct xfs_dinode *, | 513 | void xfs_dinode_to_disk(struct xfs_dinode *, |
514 | struct xfs_icdinode *); | 514 | struct xfs_icdinode *); |
515 | void xfs_idestroy_fork(struct xfs_inode *, int); | 515 | void xfs_idestroy_fork(struct xfs_inode *, int); |
diff --git a/fs/xfs/xfs_itable.c b/fs/xfs/xfs_itable.c index b1b801e4a28e..2b86f8610512 100644 --- a/fs/xfs/xfs_itable.c +++ b/fs/xfs/xfs_itable.c | |||
@@ -49,24 +49,40 @@ xfs_internal_inum( | |||
49 | (ino == mp->m_sb.sb_uquotino || ino == mp->m_sb.sb_gquotino))); | 49 | (ino == mp->m_sb.sb_uquotino || ino == mp->m_sb.sb_gquotino))); |
50 | } | 50 | } |
51 | 51 | ||
52 | STATIC int | 52 | /* |
53 | xfs_bulkstat_one_iget( | 53 | * Return stat information for one inode. |
54 | xfs_mount_t *mp, /* mount point for filesystem */ | 54 | * Return 0 if ok, else errno. |
55 | xfs_ino_t ino, /* inode number to get data for */ | 55 | */ |
56 | xfs_daddr_t bno, /* starting bno of inode cluster */ | 56 | int |
57 | xfs_bstat_t *buf, /* return buffer */ | 57 | xfs_bulkstat_one_int( |
58 | int *stat) /* BULKSTAT_RV_... */ | 58 | struct xfs_mount *mp, /* mount point for filesystem */ |
59 | xfs_ino_t ino, /* inode to get data for */ | ||
60 | void __user *buffer, /* buffer to place output in */ | ||
61 | int ubsize, /* size of buffer */ | ||
62 | bulkstat_one_fmt_pf formatter, /* formatter, copy to user */ | ||
63 | int *ubused, /* bytes used by me */ | ||
64 | int *stat) /* BULKSTAT_RV_... */ | ||
59 | { | 65 | { |
60 | xfs_icdinode_t *dic; /* dinode core info pointer */ | 66 | struct xfs_icdinode *dic; /* dinode core info pointer */ |
61 | xfs_inode_t *ip; /* incore inode pointer */ | 67 | struct xfs_inode *ip; /* incore inode pointer */ |
62 | struct inode *inode; | 68 | struct inode *inode; |
63 | int error; | 69 | struct xfs_bstat *buf; /* return buffer */ |
70 | int error = 0; /* error value */ | ||
71 | |||
72 | *stat = BULKSTAT_RV_NOTHING; | ||
73 | |||
74 | if (!buffer || xfs_internal_inum(mp, ino)) | ||
75 | return XFS_ERROR(EINVAL); | ||
76 | |||
77 | buf = kmem_alloc(sizeof(*buf), KM_SLEEP | KM_MAYFAIL); | ||
78 | if (!buf) | ||
79 | return XFS_ERROR(ENOMEM); | ||
64 | 80 | ||
65 | error = xfs_iget(mp, NULL, ino, | 81 | error = xfs_iget(mp, NULL, ino, |
66 | XFS_IGET_BULKSTAT, XFS_ILOCK_SHARED, &ip, bno); | 82 | XFS_IGET_UNTRUSTED, XFS_ILOCK_SHARED, &ip); |
67 | if (error) { | 83 | if (error) { |
68 | *stat = BULKSTAT_RV_NOTHING; | 84 | *stat = BULKSTAT_RV_NOTHING; |
69 | return error; | 85 | goto out_free; |
70 | } | 86 | } |
71 | 87 | ||
72 | ASSERT(ip != NULL); | 88 | ASSERT(ip != NULL); |
@@ -127,77 +143,16 @@ xfs_bulkstat_one_iget( | |||
127 | buf->bs_blocks = dic->di_nblocks + ip->i_delayed_blks; | 143 | buf->bs_blocks = dic->di_nblocks + ip->i_delayed_blks; |
128 | break; | 144 | break; |
129 | } | 145 | } |
130 | |||
131 | xfs_iput(ip, XFS_ILOCK_SHARED); | 146 | xfs_iput(ip, XFS_ILOCK_SHARED); |
132 | return error; | ||
133 | } | ||
134 | 147 | ||
135 | STATIC void | 148 | error = formatter(buffer, ubsize, ubused, buf); |
136 | xfs_bulkstat_one_dinode( | ||
137 | xfs_mount_t *mp, /* mount point for filesystem */ | ||
138 | xfs_ino_t ino, /* inode number to get data for */ | ||
139 | xfs_dinode_t *dic, /* dinode inode pointer */ | ||
140 | xfs_bstat_t *buf) /* return buffer */ | ||
141 | { | ||
142 | /* | ||
143 | * The inode format changed when we moved the link count and | ||
144 | * made it 32 bits long. If this is an old format inode, | ||
145 | * convert it in memory to look like a new one. If it gets | ||
146 | * flushed to disk we will convert back before flushing or | ||
147 | * logging it. We zero out the new projid field and the old link | ||
148 | * count field. We'll handle clearing the pad field (the remains | ||
149 | * of the old uuid field) when we actually convert the inode to | ||
150 | * the new format. We don't change the version number so that we | ||
151 | * can distinguish this from a real new format inode. | ||
152 | */ | ||
153 | if (dic->di_version == 1) { | ||
154 | buf->bs_nlink = be16_to_cpu(dic->di_onlink); | ||
155 | buf->bs_projid = 0; | ||
156 | } else { | ||
157 | buf->bs_nlink = be32_to_cpu(dic->di_nlink); | ||
158 | buf->bs_projid = be16_to_cpu(dic->di_projid); | ||
159 | } | ||
160 | 149 | ||
161 | buf->bs_ino = ino; | 150 | if (!error) |
162 | buf->bs_mode = be16_to_cpu(dic->di_mode); | 151 | *stat = BULKSTAT_RV_DIDONE; |
163 | buf->bs_uid = be32_to_cpu(dic->di_uid); | ||
164 | buf->bs_gid = be32_to_cpu(dic->di_gid); | ||
165 | buf->bs_size = be64_to_cpu(dic->di_size); | ||
166 | buf->bs_atime.tv_sec = be32_to_cpu(dic->di_atime.t_sec); | ||
167 | buf->bs_atime.tv_nsec = be32_to_cpu(dic->di_atime.t_nsec); | ||
168 | buf->bs_mtime.tv_sec = be32_to_cpu(dic->di_mtime.t_sec); | ||
169 | buf->bs_mtime.tv_nsec = be32_to_cpu(dic->di_mtime.t_nsec); | ||
170 | buf->bs_ctime.tv_sec = be32_to_cpu(dic->di_ctime.t_sec); | ||
171 | buf->bs_ctime.tv_nsec = be32_to_cpu(dic->di_ctime.t_nsec); | ||
172 | buf->bs_xflags = xfs_dic2xflags(dic); | ||
173 | buf->bs_extsize = be32_to_cpu(dic->di_extsize) << mp->m_sb.sb_blocklog; | ||
174 | buf->bs_extents = be32_to_cpu(dic->di_nextents); | ||
175 | buf->bs_gen = be32_to_cpu(dic->di_gen); | ||
176 | memset(buf->bs_pad, 0, sizeof(buf->bs_pad)); | ||
177 | buf->bs_dmevmask = be32_to_cpu(dic->di_dmevmask); | ||
178 | buf->bs_dmstate = be16_to_cpu(dic->di_dmstate); | ||
179 | buf->bs_aextents = be16_to_cpu(dic->di_anextents); | ||
180 | buf->bs_forkoff = XFS_DFORK_BOFF(dic); | ||
181 | 152 | ||
182 | switch (dic->di_format) { | 153 | out_free: |
183 | case XFS_DINODE_FMT_DEV: | 154 | kmem_free(buf); |
184 | buf->bs_rdev = xfs_dinode_get_rdev(dic); | 155 | return error; |
185 | buf->bs_blksize = BLKDEV_IOSIZE; | ||
186 | buf->bs_blocks = 0; | ||
187 | break; | ||
188 | case XFS_DINODE_FMT_LOCAL: | ||
189 | case XFS_DINODE_FMT_UUID: | ||
190 | buf->bs_rdev = 0; | ||
191 | buf->bs_blksize = mp->m_sb.sb_blocksize; | ||
192 | buf->bs_blocks = 0; | ||
193 | break; | ||
194 | case XFS_DINODE_FMT_EXTENTS: | ||
195 | case XFS_DINODE_FMT_BTREE: | ||
196 | buf->bs_rdev = 0; | ||
197 | buf->bs_blksize = mp->m_sb.sb_blocksize; | ||
198 | buf->bs_blocks = be64_to_cpu(dic->di_nblocks); | ||
199 | break; | ||
200 | } | ||
201 | } | 156 | } |
202 | 157 | ||
203 | /* Return 0 on success or positive error */ | 158 | /* Return 0 on success or positive error */ |
@@ -217,118 +172,17 @@ xfs_bulkstat_one_fmt( | |||
217 | return 0; | 172 | return 0; |
218 | } | 173 | } |
219 | 174 | ||
220 | /* | ||
221 | * Return stat information for one inode. | ||
222 | * Return 0 if ok, else errno. | ||
223 | */ | ||
224 | int /* error status */ | ||
225 | xfs_bulkstat_one_int( | ||
226 | xfs_mount_t *mp, /* mount point for filesystem */ | ||
227 | xfs_ino_t ino, /* inode number to get data for */ | ||
228 | void __user *buffer, /* buffer to place output in */ | ||
229 | int ubsize, /* size of buffer */ | ||
230 | bulkstat_one_fmt_pf formatter, /* formatter, copy to user */ | ||
231 | xfs_daddr_t bno, /* starting bno of inode cluster */ | ||
232 | int *ubused, /* bytes used by me */ | ||
233 | void *dibuff, /* on-disk inode buffer */ | ||
234 | int *stat) /* BULKSTAT_RV_... */ | ||
235 | { | ||
236 | xfs_bstat_t *buf; /* return buffer */ | ||
237 | int error = 0; /* error value */ | ||
238 | xfs_dinode_t *dip; /* dinode inode pointer */ | ||
239 | |||
240 | dip = (xfs_dinode_t *)dibuff; | ||
241 | *stat = BULKSTAT_RV_NOTHING; | ||
242 | |||
243 | if (!buffer || xfs_internal_inum(mp, ino)) | ||
244 | return XFS_ERROR(EINVAL); | ||
245 | |||
246 | buf = kmem_alloc(sizeof(*buf), KM_SLEEP); | ||
247 | |||
248 | if (dip == NULL) { | ||
249 | /* We're not being passed a pointer to a dinode. This happens | ||
250 | * if BULKSTAT_FG_IGET is selected. Do the iget. | ||
251 | */ | ||
252 | error = xfs_bulkstat_one_iget(mp, ino, bno, buf, stat); | ||
253 | if (error) | ||
254 | goto out_free; | ||
255 | } else { | ||
256 | xfs_bulkstat_one_dinode(mp, ino, dip, buf); | ||
257 | } | ||
258 | |||
259 | error = formatter(buffer, ubsize, ubused, buf); | ||
260 | if (error) | ||
261 | goto out_free; | ||
262 | |||
263 | *stat = BULKSTAT_RV_DIDONE; | ||
264 | |||
265 | out_free: | ||
266 | kmem_free(buf); | ||
267 | return error; | ||
268 | } | ||
269 | |||
270 | int | 175 | int |
271 | xfs_bulkstat_one( | 176 | xfs_bulkstat_one( |
272 | xfs_mount_t *mp, /* mount point for filesystem */ | 177 | xfs_mount_t *mp, /* mount point for filesystem */ |
273 | xfs_ino_t ino, /* inode number to get data for */ | 178 | xfs_ino_t ino, /* inode number to get data for */ |
274 | void __user *buffer, /* buffer to place output in */ | 179 | void __user *buffer, /* buffer to place output in */ |
275 | int ubsize, /* size of buffer */ | 180 | int ubsize, /* size of buffer */ |
276 | void *private_data, /* my private data */ | ||
277 | xfs_daddr_t bno, /* starting bno of inode cluster */ | ||
278 | int *ubused, /* bytes used by me */ | 181 | int *ubused, /* bytes used by me */ |
279 | void *dibuff, /* on-disk inode buffer */ | ||
280 | int *stat) /* BULKSTAT_RV_... */ | 182 | int *stat) /* BULKSTAT_RV_... */ |
281 | { | 183 | { |
282 | return xfs_bulkstat_one_int(mp, ino, buffer, ubsize, | 184 | return xfs_bulkstat_one_int(mp, ino, buffer, ubsize, |
283 | xfs_bulkstat_one_fmt, bno, | 185 | xfs_bulkstat_one_fmt, ubused, stat); |
284 | ubused, dibuff, stat); | ||
285 | } | ||
286 | |||
287 | /* | ||
288 | * Test to see whether we can use the ondisk inode directly, based | ||
289 | * on the given bulkstat flags, filling in dipp accordingly. | ||
290 | * Returns zero if the inode is dodgey. | ||
291 | */ | ||
292 | STATIC int | ||
293 | xfs_bulkstat_use_dinode( | ||
294 | xfs_mount_t *mp, | ||
295 | int flags, | ||
296 | xfs_buf_t *bp, | ||
297 | int clustidx, | ||
298 | xfs_dinode_t **dipp) | ||
299 | { | ||
300 | xfs_dinode_t *dip; | ||
301 | unsigned int aformat; | ||
302 | |||
303 | *dipp = NULL; | ||
304 | if (!bp || (flags & BULKSTAT_FG_IGET)) | ||
305 | return 1; | ||
306 | dip = (xfs_dinode_t *) | ||
307 | xfs_buf_offset(bp, clustidx << mp->m_sb.sb_inodelog); | ||
308 | /* | ||
309 | * Check the buffer containing the on-disk inode for di_mode == 0. | ||
310 | * This is to prevent xfs_bulkstat from picking up just reclaimed | ||
311 | * inodes that have their in-core state initialized but not flushed | ||
312 | * to disk yet. This is a temporary hack that would require a proper | ||
313 | * fix in the future. | ||
314 | */ | ||
315 | if (be16_to_cpu(dip->di_magic) != XFS_DINODE_MAGIC || | ||
316 | !XFS_DINODE_GOOD_VERSION(dip->di_version) || | ||
317 | !dip->di_mode) | ||
318 | return 0; | ||
319 | if (flags & BULKSTAT_FG_QUICK) { | ||
320 | *dipp = dip; | ||
321 | return 1; | ||
322 | } | ||
323 | /* BULKSTAT_FG_INLINE: if attr fork is local, or not there, use it */ | ||
324 | aformat = dip->di_aformat; | ||
325 | if ((XFS_DFORK_Q(dip) == 0) || | ||
326 | (aformat == XFS_DINODE_FMT_LOCAL) || | ||
327 | (aformat == XFS_DINODE_FMT_EXTENTS && !dip->di_anextents)) { | ||
328 | *dipp = dip; | ||
329 | return 1; | ||
330 | } | ||
331 | return 1; | ||
332 | } | 186 | } |
333 | 187 | ||
334 | #define XFS_BULKSTAT_UBLEFT(ubleft) ((ubleft) >= statstruct_size) | 188 | #define XFS_BULKSTAT_UBLEFT(ubleft) ((ubleft) >= statstruct_size) |
@@ -342,10 +196,8 @@ xfs_bulkstat( | |||
342 | xfs_ino_t *lastinop, /* last inode returned */ | 196 | xfs_ino_t *lastinop, /* last inode returned */ |
343 | int *ubcountp, /* size of buffer/count returned */ | 197 | int *ubcountp, /* size of buffer/count returned */ |
344 | bulkstat_one_pf formatter, /* func that'd fill a single buf */ | 198 | bulkstat_one_pf formatter, /* func that'd fill a single buf */ |
345 | void *private_data,/* private data for formatter */ | ||
346 | size_t statstruct_size, /* sizeof struct filling */ | 199 | size_t statstruct_size, /* sizeof struct filling */ |
347 | char __user *ubuffer, /* buffer with inode stats */ | 200 | char __user *ubuffer, /* buffer with inode stats */ |
348 | int flags, /* defined in xfs_itable.h */ | ||
349 | int *done) /* 1 if there are more stats to get */ | 201 | int *done) /* 1 if there are more stats to get */ |
350 | { | 202 | { |
351 | xfs_agblock_t agbno=0;/* allocation group block number */ | 203 | xfs_agblock_t agbno=0;/* allocation group block number */ |
@@ -380,14 +232,12 @@ xfs_bulkstat( | |||
380 | int ubelem; /* spaces used in user's buffer */ | 232 | int ubelem; /* spaces used in user's buffer */ |
381 | int ubused; /* bytes used by formatter */ | 233 | int ubused; /* bytes used by formatter */ |
382 | xfs_buf_t *bp; /* ptr to on-disk inode cluster buf */ | 234 | xfs_buf_t *bp; /* ptr to on-disk inode cluster buf */ |
383 | xfs_dinode_t *dip; /* ptr into bp for specific inode */ | ||
384 | 235 | ||
385 | /* | 236 | /* |
386 | * Get the last inode value, see if there's nothing to do. | 237 | * Get the last inode value, see if there's nothing to do. |
387 | */ | 238 | */ |
388 | ino = (xfs_ino_t)*lastinop; | 239 | ino = (xfs_ino_t)*lastinop; |
389 | lastino = ino; | 240 | lastino = ino; |
390 | dip = NULL; | ||
391 | agno = XFS_INO_TO_AGNO(mp, ino); | 241 | agno = XFS_INO_TO_AGNO(mp, ino); |
392 | agino = XFS_INO_TO_AGINO(mp, ino); | 242 | agino = XFS_INO_TO_AGINO(mp, ino); |
393 | if (agno >= mp->m_sb.sb_agcount || | 243 | if (agno >= mp->m_sb.sb_agcount || |
@@ -612,37 +462,6 @@ xfs_bulkstat( | |||
612 | irbp->ir_startino) + | 462 | irbp->ir_startino) + |
613 | ((chunkidx & nimask) >> | 463 | ((chunkidx & nimask) >> |
614 | mp->m_sb.sb_inopblog); | 464 | mp->m_sb.sb_inopblog); |
615 | |||
616 | if (flags & (BULKSTAT_FG_QUICK | | ||
617 | BULKSTAT_FG_INLINE)) { | ||
618 | int offset; | ||
619 | |||
620 | ino = XFS_AGINO_TO_INO(mp, agno, | ||
621 | agino); | ||
622 | bno = XFS_AGB_TO_DADDR(mp, agno, | ||
623 | agbno); | ||
624 | |||
625 | /* | ||
626 | * Get the inode cluster buffer | ||
627 | */ | ||
628 | if (bp) | ||
629 | xfs_buf_relse(bp); | ||
630 | |||
631 | error = xfs_inotobp(mp, NULL, ino, &dip, | ||
632 | &bp, &offset, | ||
633 | XFS_IGET_BULKSTAT); | ||
634 | |||
635 | if (!error) | ||
636 | clustidx = offset / mp->m_sb.sb_inodesize; | ||
637 | if (XFS_TEST_ERROR(error != 0, | ||
638 | mp, XFS_ERRTAG_BULKSTAT_READ_CHUNK, | ||
639 | XFS_RANDOM_BULKSTAT_READ_CHUNK)) { | ||
640 | bp = NULL; | ||
641 | ubleft = 0; | ||
642 | rval = error; | ||
643 | break; | ||
644 | } | ||
645 | } | ||
646 | } | 465 | } |
647 | ino = XFS_AGINO_TO_INO(mp, agno, agino); | 466 | ino = XFS_AGINO_TO_INO(mp, agno, agino); |
648 | bno = XFS_AGB_TO_DADDR(mp, agno, agbno); | 467 | bno = XFS_AGB_TO_DADDR(mp, agno, agbno); |
@@ -658,35 +477,13 @@ xfs_bulkstat( | |||
658 | * when the chunk is used up. | 477 | * when the chunk is used up. |
659 | */ | 478 | */ |
660 | irbp->ir_freecount++; | 479 | irbp->ir_freecount++; |
661 | if (!xfs_bulkstat_use_dinode(mp, flags, bp, | ||
662 | clustidx, &dip)) { | ||
663 | lastino = ino; | ||
664 | continue; | ||
665 | } | ||
666 | /* | ||
667 | * If we need to do an iget, cannot hold bp. | ||
668 | * Drop it, until starting the next cluster. | ||
669 | */ | ||
670 | if ((flags & BULKSTAT_FG_INLINE) && !dip) { | ||
671 | if (bp) | ||
672 | xfs_buf_relse(bp); | ||
673 | bp = NULL; | ||
674 | } | ||
675 | 480 | ||
676 | /* | 481 | /* |
677 | * Get the inode and fill in a single buffer. | 482 | * Get the inode and fill in a single buffer. |
678 | * BULKSTAT_FG_QUICK uses dip to fill it in. | ||
679 | * BULKSTAT_FG_IGET uses igets. | ||
680 | * BULKSTAT_FG_INLINE uses dip if we have an | ||
681 | * inline attr fork, else igets. | ||
682 | * See: xfs_bulkstat_one & xfs_dm_bulkstat_one. | ||
683 | * This is also used to count inodes/blks, etc | ||
684 | * in xfs_qm_quotacheck. | ||
685 | */ | 483 | */ |
686 | ubused = statstruct_size; | 484 | ubused = statstruct_size; |
687 | error = formatter(mp, ino, ubufp, | 485 | error = formatter(mp, ino, ubufp, ubleft, |
688 | ubleft, private_data, | 486 | &ubused, &fmterror); |
689 | bno, &ubused, dip, &fmterror); | ||
690 | if (fmterror == BULKSTAT_RV_NOTHING) { | 487 | if (fmterror == BULKSTAT_RV_NOTHING) { |
691 | if (error && error != ENOENT && | 488 | if (error && error != ENOENT && |
692 | error != EINVAL) { | 489 | error != EINVAL) { |
@@ -778,8 +575,7 @@ xfs_bulkstat_single( | |||
778 | */ | 575 | */ |
779 | 576 | ||
780 | ino = (xfs_ino_t)*lastinop; | 577 | ino = (xfs_ino_t)*lastinop; |
781 | error = xfs_bulkstat_one(mp, ino, buffer, sizeof(xfs_bstat_t), | 578 | error = xfs_bulkstat_one(mp, ino, buffer, sizeof(xfs_bstat_t), 0, &res); |
782 | NULL, 0, NULL, NULL, &res); | ||
783 | if (error) { | 579 | if (error) { |
784 | /* | 580 | /* |
785 | * Special case way failed, do it the "long" way | 581 | * Special case way failed, do it the "long" way |
@@ -788,8 +584,7 @@ xfs_bulkstat_single( | |||
788 | (*lastinop)--; | 584 | (*lastinop)--; |
789 | count = 1; | 585 | count = 1; |
790 | if (xfs_bulkstat(mp, lastinop, &count, xfs_bulkstat_one, | 586 | if (xfs_bulkstat(mp, lastinop, &count, xfs_bulkstat_one, |
791 | NULL, sizeof(xfs_bstat_t), buffer, | 587 | sizeof(xfs_bstat_t), buffer, done)) |
792 | BULKSTAT_FG_IGET, done)) | ||
793 | return error; | 588 | return error; |
794 | if (count == 0 || (xfs_ino_t)*lastinop != ino) | 589 | if (count == 0 || (xfs_ino_t)*lastinop != ino) |
795 | return error == EFSCORRUPTED ? | 590 | return error == EFSCORRUPTED ? |
diff --git a/fs/xfs/xfs_itable.h b/fs/xfs/xfs_itable.h index 20792bf45946..97295d91d170 100644 --- a/fs/xfs/xfs_itable.h +++ b/fs/xfs/xfs_itable.h | |||
@@ -27,10 +27,7 @@ typedef int (*bulkstat_one_pf)(struct xfs_mount *mp, | |||
27 | xfs_ino_t ino, | 27 | xfs_ino_t ino, |
28 | void __user *buffer, | 28 | void __user *buffer, |
29 | int ubsize, | 29 | int ubsize, |
30 | void *private_data, | ||
31 | xfs_daddr_t bno, | ||
32 | int *ubused, | 30 | int *ubused, |
33 | void *dip, | ||
34 | int *stat); | 31 | int *stat); |
35 | 32 | ||
36 | /* | 33 | /* |
@@ -41,13 +38,6 @@ typedef int (*bulkstat_one_pf)(struct xfs_mount *mp, | |||
41 | #define BULKSTAT_RV_GIVEUP 2 | 38 | #define BULKSTAT_RV_GIVEUP 2 |
42 | 39 | ||
43 | /* | 40 | /* |
44 | * Values for bulkstat flag argument. | ||
45 | */ | ||
46 | #define BULKSTAT_FG_IGET 0x1 /* Go through the buffer cache */ | ||
47 | #define BULKSTAT_FG_QUICK 0x2 /* No iget, walk the dinode cluster */ | ||
48 | #define BULKSTAT_FG_INLINE 0x4 /* No iget if inline attrs */ | ||
49 | |||
50 | /* | ||
51 | * Return stat information in bulk (by-inode) for the filesystem. | 41 | * Return stat information in bulk (by-inode) for the filesystem. |
52 | */ | 42 | */ |
53 | int /* error status */ | 43 | int /* error status */ |
@@ -56,10 +46,8 @@ xfs_bulkstat( | |||
56 | xfs_ino_t *lastino, /* last inode returned */ | 46 | xfs_ino_t *lastino, /* last inode returned */ |
57 | int *count, /* size of buffer/count returned */ | 47 | int *count, /* size of buffer/count returned */ |
58 | bulkstat_one_pf formatter, /* func that'd fill a single buf */ | 48 | bulkstat_one_pf formatter, /* func that'd fill a single buf */ |
59 | void *private_data, /* private data for formatter */ | ||
60 | size_t statstruct_size,/* sizeof struct that we're filling */ | 49 | size_t statstruct_size,/* sizeof struct that we're filling */ |
61 | char __user *ubuffer,/* buffer with inode stats */ | 50 | char __user *ubuffer,/* buffer with inode stats */ |
62 | int flags, /* flag to control access method */ | ||
63 | int *done); /* 1 if there are more stats to get */ | 51 | int *done); /* 1 if there are more stats to get */ |
64 | 52 | ||
65 | int | 53 | int |
@@ -82,9 +70,7 @@ xfs_bulkstat_one_int( | |||
82 | void __user *buffer, | 70 | void __user *buffer, |
83 | int ubsize, | 71 | int ubsize, |
84 | bulkstat_one_fmt_pf formatter, | 72 | bulkstat_one_fmt_pf formatter, |
85 | xfs_daddr_t bno, | ||
86 | int *ubused, | 73 | int *ubused, |
87 | void *dibuff, | ||
88 | int *stat); | 74 | int *stat); |
89 | 75 | ||
90 | int | 76 | int |
@@ -93,10 +79,7 @@ xfs_bulkstat_one( | |||
93 | xfs_ino_t ino, | 79 | xfs_ino_t ino, |
94 | void __user *buffer, | 80 | void __user *buffer, |
95 | int ubsize, | 81 | int ubsize, |
96 | void *private_data, | ||
97 | xfs_daddr_t bno, | ||
98 | int *ubused, | 82 | int *ubused, |
99 | void *dibuff, | ||
100 | int *stat); | 83 | int *stat); |
101 | 84 | ||
102 | typedef int (*inumbers_fmt_pf)( | 85 | typedef int (*inumbers_fmt_pf)( |
diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c index ed0684cc50ee..9ac5cfab27b9 100644 --- a/fs/xfs/xfs_log_recover.c +++ b/fs/xfs/xfs_log_recover.c | |||
@@ -3198,7 +3198,7 @@ xlog_recover_process_one_iunlink( | |||
3198 | int error; | 3198 | int error; |
3199 | 3199 | ||
3200 | ino = XFS_AGINO_TO_INO(mp, agno, agino); | 3200 | ino = XFS_AGINO_TO_INO(mp, agno, agino); |
3201 | error = xfs_iget(mp, NULL, ino, 0, 0, &ip, 0); | 3201 | error = xfs_iget(mp, NULL, ino, 0, 0, &ip); |
3202 | if (error) | 3202 | if (error) |
3203 | goto fail; | 3203 | goto fail; |
3204 | 3204 | ||
diff --git a/fs/xfs/xfs_mount.c b/fs/xfs/xfs_mount.c index d59f4e8bedcf..69f62d8b2816 100644 --- a/fs/xfs/xfs_mount.c +++ b/fs/xfs/xfs_mount.c | |||
@@ -1300,7 +1300,7 @@ xfs_mountfs( | |||
1300 | * Get and sanity-check the root inode. | 1300 | * Get and sanity-check the root inode. |
1301 | * Save the pointer to it in the mount structure. | 1301 | * Save the pointer to it in the mount structure. |
1302 | */ | 1302 | */ |
1303 | error = xfs_iget(mp, NULL, sbp->sb_rootino, 0, XFS_ILOCK_EXCL, &rip, 0); | 1303 | error = xfs_iget(mp, NULL, sbp->sb_rootino, 0, XFS_ILOCK_EXCL, &rip); |
1304 | if (error) { | 1304 | if (error) { |
1305 | cmn_err(CE_WARN, "XFS: failed to read root inode"); | 1305 | cmn_err(CE_WARN, "XFS: failed to read root inode"); |
1306 | goto out_log_dealloc; | 1306 | goto out_log_dealloc; |
diff --git a/fs/xfs/xfs_rtalloc.c b/fs/xfs/xfs_rtalloc.c index 16445518506d..a2d32ce335aa 100644 --- a/fs/xfs/xfs_rtalloc.c +++ b/fs/xfs/xfs_rtalloc.c | |||
@@ -2277,12 +2277,12 @@ xfs_rtmount_inodes( | |||
2277 | sbp = &mp->m_sb; | 2277 | sbp = &mp->m_sb; |
2278 | if (sbp->sb_rbmino == NULLFSINO) | 2278 | if (sbp->sb_rbmino == NULLFSINO) |
2279 | return 0; | 2279 | return 0; |
2280 | error = xfs_iget(mp, NULL, sbp->sb_rbmino, 0, 0, &mp->m_rbmip, 0); | 2280 | error = xfs_iget(mp, NULL, sbp->sb_rbmino, 0, 0, &mp->m_rbmip); |
2281 | if (error) | 2281 | if (error) |
2282 | return error; | 2282 | return error; |
2283 | ASSERT(mp->m_rbmip != NULL); | 2283 | ASSERT(mp->m_rbmip != NULL); |
2284 | ASSERT(sbp->sb_rsumino != NULLFSINO); | 2284 | ASSERT(sbp->sb_rsumino != NULLFSINO); |
2285 | error = xfs_iget(mp, NULL, sbp->sb_rsumino, 0, 0, &mp->m_rsumip, 0); | 2285 | error = xfs_iget(mp, NULL, sbp->sb_rsumino, 0, 0, &mp->m_rsumip); |
2286 | if (error) { | 2286 | if (error) { |
2287 | IRELE(mp->m_rbmip); | 2287 | IRELE(mp->m_rbmip); |
2288 | return error; | 2288 | return error; |
diff --git a/fs/xfs/xfs_trans_inode.c b/fs/xfs/xfs_trans_inode.c index 785ff101da0a..2559dfec946b 100644 --- a/fs/xfs/xfs_trans_inode.c +++ b/fs/xfs/xfs_trans_inode.c | |||
@@ -62,7 +62,7 @@ xfs_trans_iget( | |||
62 | { | 62 | { |
63 | int error; | 63 | int error; |
64 | 64 | ||
65 | error = xfs_iget(mp, tp, ino, flags, lock_flags, ipp, 0); | 65 | error = xfs_iget(mp, tp, ino, flags, lock_flags, ipp); |
66 | if (!error && tp) | 66 | if (!error && tp) |
67 | xfs_trans_ijoin(tp, *ipp, lock_flags); | 67 | xfs_trans_ijoin(tp, *ipp, lock_flags); |
68 | return error; | 68 | return error; |
diff --git a/fs/xfs/xfs_vnodeops.c b/fs/xfs/xfs_vnodeops.c index a06bd62504fc..c1646838898f 100644 --- a/fs/xfs/xfs_vnodeops.c +++ b/fs/xfs/xfs_vnodeops.c | |||
@@ -1269,7 +1269,7 @@ xfs_lookup( | |||
1269 | if (error) | 1269 | if (error) |
1270 | goto out; | 1270 | goto out; |
1271 | 1271 | ||
1272 | error = xfs_iget(dp->i_mount, NULL, inum, 0, 0, ipp, 0); | 1272 | error = xfs_iget(dp->i_mount, NULL, inum, 0, 0, ipp); |
1273 | if (error) | 1273 | if (error) |
1274 | goto out_free_name; | 1274 | goto out_free_name; |
1275 | 1275 | ||
diff --git a/include/linux/agp_backend.h b/include/linux/agp_backend.h index 9101ed64f803..09ea4a1e9505 100644 --- a/include/linux/agp_backend.h +++ b/include/linux/agp_backend.h | |||
@@ -79,7 +79,6 @@ struct agp_memory { | |||
79 | u32 physical; | 79 | u32 physical; |
80 | bool is_bound; | 80 | bool is_bound; |
81 | bool is_flushed; | 81 | bool is_flushed; |
82 | bool vmalloc_flag; | ||
83 | /* list of agp_memory mapped to the aperture */ | 82 | /* list of agp_memory mapped to the aperture */ |
84 | struct list_head mapped_list; | 83 | struct list_head mapped_list; |
85 | /* DMA-mapped addresses */ | 84 | /* DMA-mapped addresses */ |
diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h index aee5f6ce166e..9ae2889096b6 100644 --- a/include/linux/backing-dev.h +++ b/include/linux/backing-dev.h | |||
@@ -105,8 +105,8 @@ int bdi_register(struct backing_dev_info *bdi, struct device *parent, | |||
105 | int bdi_register_dev(struct backing_dev_info *bdi, dev_t dev); | 105 | int bdi_register_dev(struct backing_dev_info *bdi, dev_t dev); |
106 | void bdi_unregister(struct backing_dev_info *bdi); | 106 | void bdi_unregister(struct backing_dev_info *bdi); |
107 | int bdi_setup_and_register(struct backing_dev_info *, char *, unsigned int); | 107 | int bdi_setup_and_register(struct backing_dev_info *, char *, unsigned int); |
108 | void bdi_start_writeback(struct backing_dev_info *bdi, struct super_block *sb, | 108 | void bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages); |
109 | long nr_pages); | 109 | void bdi_start_background_writeback(struct backing_dev_info *bdi); |
110 | int bdi_writeback_task(struct bdi_writeback *wb); | 110 | int bdi_writeback_task(struct bdi_writeback *wb); |
111 | int bdi_has_dirty_io(struct backing_dev_info *bdi); | 111 | int bdi_has_dirty_io(struct backing_dev_info *bdi); |
112 | void bdi_arm_supers_timer(void); | 112 | void bdi_arm_supers_timer(void); |
diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h index 73dcf804bc94..0da5b187f124 100644 --- a/include/linux/compiler-gcc.h +++ b/include/linux/compiler-gcc.h | |||
@@ -58,8 +58,12 @@ | |||
58 | * naked functions because then mcount is called without stack and frame pointer | 58 | * naked functions because then mcount is called without stack and frame pointer |
59 | * being set up and there is no chance to restore the lr register to the value | 59 | * being set up and there is no chance to restore the lr register to the value |
60 | * before mcount was called. | 60 | * before mcount was called. |
61 | * | ||
62 | * The asm() bodies of naked functions often depend on standard calling conventions, | ||
63 | * therefore they must be noinline and noclone. GCC 4.[56] currently fail to enforce | ||
64 | * this, so we must do so ourselves. See GCC PR44290. | ||
61 | */ | 65 | */ |
62 | #define __naked __attribute__((naked)) notrace | 66 | #define __naked __attribute__((naked)) noinline __noclone notrace |
63 | 67 | ||
64 | #define __noreturn __attribute__((noreturn)) | 68 | #define __noreturn __attribute__((noreturn)) |
65 | 69 | ||
@@ -85,3 +89,7 @@ | |||
85 | #define _gcc_header(x) __gcc_header(linux/compiler-gcc##x.h) | 89 | #define _gcc_header(x) __gcc_header(linux/compiler-gcc##x.h) |
86 | #define gcc_header(x) _gcc_header(x) | 90 | #define gcc_header(x) _gcc_header(x) |
87 | #include gcc_header(__GNUC__) | 91 | #include gcc_header(__GNUC__) |
92 | |||
93 | #if !defined(__noclone) | ||
94 | #define __noclone /* not needed */ | ||
95 | #endif | ||
diff --git a/include/linux/compiler-gcc4.h b/include/linux/compiler-gcc4.h index 94dea3ffbfa1..fcfa5b9a4317 100644 --- a/include/linux/compiler-gcc4.h +++ b/include/linux/compiler-gcc4.h | |||
@@ -48,6 +48,10 @@ | |||
48 | * unreleased. Really, we need to have autoconf for the kernel. | 48 | * unreleased. Really, we need to have autoconf for the kernel. |
49 | */ | 49 | */ |
50 | #define unreachable() __builtin_unreachable() | 50 | #define unreachable() __builtin_unreachable() |
51 | |||
52 | /* Mark a function definition as prohibited from being cloned. */ | ||
53 | #define __noclone __attribute__((__noclone__)) | ||
54 | |||
51 | #endif | 55 | #endif |
52 | 56 | ||
53 | #endif | 57 | #endif |
diff --git a/include/linux/drbd.h b/include/linux/drbd.h index 30da4ae48972..b8d2516668aa 100644 --- a/include/linux/drbd.h +++ b/include/linux/drbd.h | |||
@@ -53,7 +53,7 @@ | |||
53 | 53 | ||
54 | 54 | ||
55 | extern const char *drbd_buildtag(void); | 55 | extern const char *drbd_buildtag(void); |
56 | #define REL_VERSION "8.3.8rc2" | 56 | #define REL_VERSION "8.3.8" |
57 | #define API_VERSION 88 | 57 | #define API_VERSION 88 |
58 | #define PRO_VERSION_MIN 86 | 58 | #define PRO_VERSION_MIN 86 |
59 | #define PRO_VERSION_MAX 94 | 59 | #define PRO_VERSION_MAX 94 |
diff --git a/include/linux/dynamic_debug.h b/include/linux/dynamic_debug.h index b3cd4de9432b..52c0da4bdd18 100644 --- a/include/linux/dynamic_debug.h +++ b/include/linux/dynamic_debug.h | |||
@@ -40,7 +40,7 @@ int ddebug_add_module(struct _ddebug *tab, unsigned int n, | |||
40 | const char *modname); | 40 | const char *modname); |
41 | 41 | ||
42 | #if defined(CONFIG_DYNAMIC_DEBUG) | 42 | #if defined(CONFIG_DYNAMIC_DEBUG) |
43 | extern int ddebug_remove_module(char *mod_name); | 43 | extern int ddebug_remove_module(const char *mod_name); |
44 | 44 | ||
45 | #define __dynamic_dbg_enabled(dd) ({ \ | 45 | #define __dynamic_dbg_enabled(dd) ({ \ |
46 | int __ret = 0; \ | 46 | int __ret = 0; \ |
@@ -73,7 +73,7 @@ extern int ddebug_remove_module(char *mod_name); | |||
73 | 73 | ||
74 | #else | 74 | #else |
75 | 75 | ||
76 | static inline int ddebug_remove_module(char *mod) | 76 | static inline int ddebug_remove_module(const char *mod) |
77 | { | 77 | { |
78 | return 0; | 78 | return 0; |
79 | } | 79 | } |
diff --git a/include/linux/fb.h b/include/linux/fb.h index 907ace3a64c8..8e5a9dfb76bf 100644 --- a/include/linux/fb.h +++ b/include/linux/fb.h | |||
@@ -786,8 +786,6 @@ struct fb_tile_ops { | |||
786 | #define FBINFO_MISC_USEREVENT 0x10000 /* event request | 786 | #define FBINFO_MISC_USEREVENT 0x10000 /* event request |
787 | from userspace */ | 787 | from userspace */ |
788 | #define FBINFO_MISC_TILEBLITTING 0x20000 /* use tile blitting */ | 788 | #define FBINFO_MISC_TILEBLITTING 0x20000 /* use tile blitting */ |
789 | #define FBINFO_MISC_FIRMWARE 0x40000 /* a replaceable firmware | ||
790 | inited framebuffer */ | ||
791 | 789 | ||
792 | /* A driver may set this flag to indicate that it does want a set_par to be | 790 | /* A driver may set this flag to indicate that it does want a set_par to be |
793 | * called every time when fbcon_switch is executed. The advantage is that with | 791 | * called every time when fbcon_switch is executed. The advantage is that with |
@@ -801,6 +799,8 @@ struct fb_tile_ops { | |||
801 | */ | 799 | */ |
802 | #define FBINFO_MISC_ALWAYS_SETPAR 0x40000 | 800 | #define FBINFO_MISC_ALWAYS_SETPAR 0x40000 |
803 | 801 | ||
802 | /* where the fb is a firmware driver, and can be replaced with a proper one */ | ||
803 | #define FBINFO_MISC_FIRMWARE 0x80000 | ||
804 | /* | 804 | /* |
805 | * Host and GPU endianness differ. | 805 | * Host and GPU endianness differ. |
806 | */ | 806 | */ |
diff --git a/include/linux/list.h b/include/linux/list.h index 8392884a2977..5d57a3a1fa1b 100644 --- a/include/linux/list.h +++ b/include/linux/list.h | |||
@@ -544,6 +544,21 @@ static inline void list_splice_tail_init(struct list_head *list, | |||
544 | &pos->member != (head); \ | 544 | &pos->member != (head); \ |
545 | pos = n, n = list_entry(n->member.prev, typeof(*n), member)) | 545 | pos = n, n = list_entry(n->member.prev, typeof(*n), member)) |
546 | 546 | ||
547 | /** | ||
548 | * list_safe_reset_next - reset a stale list_for_each_entry_safe loop | ||
549 | * @pos: the loop cursor used in the list_for_each_entry_safe loop | ||
550 | * @n: temporary storage used in list_for_each_entry_safe | ||
551 | * @member: the name of the list_struct within the struct. | ||
552 | * | ||
553 | * list_safe_reset_next is not safe to use in general if the list may be | ||
554 | * modified concurrently (eg. the lock is dropped in the loop body). An | ||
555 | * exception to this is if the cursor element (pos) is pinned in the list, | ||
556 | * and list_safe_reset_next is called after re-taking the lock and before | ||
557 | * completing the current iteration of the loop body. | ||
558 | */ | ||
559 | #define list_safe_reset_next(pos, n, member) \ | ||
560 | n = list_entry(pos->member.next, typeof(*pos), member) | ||
561 | |||
547 | /* | 562 | /* |
548 | * Double linked lists with a single pointer list head. | 563 | * Double linked lists with a single pointer list head. |
549 | * Mostly useful for hash tables where the two pointer list head is | 564 | * Mostly useful for hash tables where the two pointer list head is |
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index 4eb467910a45..3bedcc149c84 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h | |||
@@ -1261,6 +1261,7 @@ | |||
1261 | #define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP77_IDE 0x0759 | 1261 | #define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP77_IDE 0x0759 |
1262 | #define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP73_SMBUS 0x07D8 | 1262 | #define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP73_SMBUS 0x07D8 |
1263 | #define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP79_SMBUS 0x0AA2 | 1263 | #define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP79_SMBUS 0x0AA2 |
1264 | #define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP89_SATA 0x0D85 | ||
1264 | 1265 | ||
1265 | #define PCI_VENDOR_ID_IMS 0x10e0 | 1266 | #define PCI_VENDOR_ID_IMS 0x10e0 |
1266 | #define PCI_DEVICE_ID_IMS_TT128 0x9128 | 1267 | #define PCI_DEVICE_ID_IMS_TT128 0x9128 |
diff --git a/include/linux/sched.h b/include/linux/sched.h index f118809c953f..747fcaedddb7 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
@@ -139,7 +139,7 @@ extern int nr_processes(void); | |||
139 | extern unsigned long nr_running(void); | 139 | extern unsigned long nr_running(void); |
140 | extern unsigned long nr_uninterruptible(void); | 140 | extern unsigned long nr_uninterruptible(void); |
141 | extern unsigned long nr_iowait(void); | 141 | extern unsigned long nr_iowait(void); |
142 | extern unsigned long nr_iowait_cpu(void); | 142 | extern unsigned long nr_iowait_cpu(int cpu); |
143 | extern unsigned long this_cpu_load(void); | 143 | extern unsigned long this_cpu_load(void); |
144 | 144 | ||
145 | 145 | ||
diff --git a/init/main.c b/init/main.c index e407a05adc29..4ab5124a2952 100644 --- a/init/main.c +++ b/init/main.c | |||
@@ -422,18 +422,26 @@ static void __init setup_command_line(char *command_line) | |||
422 | * gcc-3.4 accidentally inlines this function, so use noinline. | 422 | * gcc-3.4 accidentally inlines this function, so use noinline. |
423 | */ | 423 | */ |
424 | 424 | ||
425 | static __initdata DECLARE_COMPLETION(kthreadd_done); | ||
426 | |||
425 | static noinline void __init_refok rest_init(void) | 427 | static noinline void __init_refok rest_init(void) |
426 | __releases(kernel_lock) | 428 | __releases(kernel_lock) |
427 | { | 429 | { |
428 | int pid; | 430 | int pid; |
429 | 431 | ||
430 | rcu_scheduler_starting(); | 432 | rcu_scheduler_starting(); |
433 | /* | ||
434 | * We need to spawn init first so that it obtains pid 1, however | ||
435 | * the init task will end up wanting to create kthreads, which, if | ||
436 | * we schedule it before we create kthreadd, will OOPS. | ||
437 | */ | ||
431 | kernel_thread(kernel_init, NULL, CLONE_FS | CLONE_SIGHAND); | 438 | kernel_thread(kernel_init, NULL, CLONE_FS | CLONE_SIGHAND); |
432 | numa_default_policy(); | 439 | numa_default_policy(); |
433 | pid = kernel_thread(kthreadd, NULL, CLONE_FS | CLONE_FILES); | 440 | pid = kernel_thread(kthreadd, NULL, CLONE_FS | CLONE_FILES); |
434 | rcu_read_lock(); | 441 | rcu_read_lock(); |
435 | kthreadd_task = find_task_by_pid_ns(pid, &init_pid_ns); | 442 | kthreadd_task = find_task_by_pid_ns(pid, &init_pid_ns); |
436 | rcu_read_unlock(); | 443 | rcu_read_unlock(); |
444 | complete(&kthreadd_done); | ||
437 | unlock_kernel(); | 445 | unlock_kernel(); |
438 | 446 | ||
439 | /* | 447 | /* |
@@ -849,6 +857,10 @@ static noinline int init_post(void) | |||
849 | 857 | ||
850 | static int __init kernel_init(void * unused) | 858 | static int __init kernel_init(void * unused) |
851 | { | 859 | { |
860 | /* | ||
861 | * Wait until kthreadd is all set-up. | ||
862 | */ | ||
863 | wait_for_completion(&kthreadd_done); | ||
852 | lock_kernel(); | 864 | lock_kernel(); |
853 | 865 | ||
854 | /* | 866 | /* |
diff --git a/kernel/futex.c b/kernel/futex.c index e7a35f1039e7..6a3a5fa1526d 100644 --- a/kernel/futex.c +++ b/kernel/futex.c | |||
@@ -429,20 +429,11 @@ static void free_pi_state(struct futex_pi_state *pi_state) | |||
429 | static struct task_struct * futex_find_get_task(pid_t pid) | 429 | static struct task_struct * futex_find_get_task(pid_t pid) |
430 | { | 430 | { |
431 | struct task_struct *p; | 431 | struct task_struct *p; |
432 | const struct cred *cred = current_cred(), *pcred; | ||
433 | 432 | ||
434 | rcu_read_lock(); | 433 | rcu_read_lock(); |
435 | p = find_task_by_vpid(pid); | 434 | p = find_task_by_vpid(pid); |
436 | if (!p) { | 435 | if (p) |
437 | p = ERR_PTR(-ESRCH); | 436 | get_task_struct(p); |
438 | } else { | ||
439 | pcred = __task_cred(p); | ||
440 | if (cred->euid != pcred->euid && | ||
441 | cred->euid != pcred->uid) | ||
442 | p = ERR_PTR(-ESRCH); | ||
443 | else | ||
444 | get_task_struct(p); | ||
445 | } | ||
446 | 437 | ||
447 | rcu_read_unlock(); | 438 | rcu_read_unlock(); |
448 | 439 | ||
@@ -564,8 +555,8 @@ lookup_pi_state(u32 uval, struct futex_hash_bucket *hb, | |||
564 | if (!pid) | 555 | if (!pid) |
565 | return -ESRCH; | 556 | return -ESRCH; |
566 | p = futex_find_get_task(pid); | 557 | p = futex_find_get_task(pid); |
567 | if (IS_ERR(p)) | 558 | if (!p) |
568 | return PTR_ERR(p); | 559 | return -ESRCH; |
569 | 560 | ||
570 | /* | 561 | /* |
571 | * We need to look at the task state flags to figure out, | 562 | * We need to look at the task state flags to figure out, |
diff --git a/kernel/kexec.c b/kernel/kexec.c index 474a84715eac..131b1703936f 100644 --- a/kernel/kexec.c +++ b/kernel/kexec.c | |||
@@ -1089,9 +1089,10 @@ void crash_kexec(struct pt_regs *regs) | |||
1089 | 1089 | ||
1090 | size_t crash_get_memory_size(void) | 1090 | size_t crash_get_memory_size(void) |
1091 | { | 1091 | { |
1092 | size_t size; | 1092 | size_t size = 0; |
1093 | mutex_lock(&kexec_mutex); | 1093 | mutex_lock(&kexec_mutex); |
1094 | size = crashk_res.end - crashk_res.start + 1; | 1094 | if (crashk_res.end != crashk_res.start) |
1095 | size = crashk_res.end - crashk_res.start + 1; | ||
1095 | mutex_unlock(&kexec_mutex); | 1096 | mutex_unlock(&kexec_mutex); |
1096 | return size; | 1097 | return size; |
1097 | } | 1098 | } |
@@ -1134,7 +1135,7 @@ int crash_shrink_memory(unsigned long new_size) | |||
1134 | 1135 | ||
1135 | free_reserved_phys_range(end, crashk_res.end); | 1136 | free_reserved_phys_range(end, crashk_res.end); |
1136 | 1137 | ||
1137 | if (start == end) | 1138 | if ((start == end) && (crashk_res.parent != NULL)) |
1138 | release_resource(&crashk_res); | 1139 | release_resource(&crashk_res); |
1139 | crashk_res.end = end - 1; | 1140 | crashk_res.end = end - 1; |
1140 | 1141 | ||
diff --git a/kernel/module.c b/kernel/module.c index 8c6b42840dd1..5d2d28197c82 100644 --- a/kernel/module.c +++ b/kernel/module.c | |||
@@ -2062,6 +2062,12 @@ static void dynamic_debug_setup(struct _ddebug *debug, unsigned int num) | |||
2062 | #endif | 2062 | #endif |
2063 | } | 2063 | } |
2064 | 2064 | ||
2065 | static void dynamic_debug_remove(struct _ddebug *debug) | ||
2066 | { | ||
2067 | if (debug) | ||
2068 | ddebug_remove_module(debug->modname); | ||
2069 | } | ||
2070 | |||
2065 | static void *module_alloc_update_bounds(unsigned long size) | 2071 | static void *module_alloc_update_bounds(unsigned long size) |
2066 | { | 2072 | { |
2067 | void *ret = module_alloc(size); | 2073 | void *ret = module_alloc(size); |
@@ -2124,6 +2130,8 @@ static noinline struct module *load_module(void __user *umod, | |||
2124 | void *ptr = NULL; /* Stops spurious gcc warning */ | 2130 | void *ptr = NULL; /* Stops spurious gcc warning */ |
2125 | unsigned long symoffs, stroffs, *strmap; | 2131 | unsigned long symoffs, stroffs, *strmap; |
2126 | void __percpu *percpu; | 2132 | void __percpu *percpu; |
2133 | struct _ddebug *debug = NULL; | ||
2134 | unsigned int num_debug = 0; | ||
2127 | 2135 | ||
2128 | mm_segment_t old_fs; | 2136 | mm_segment_t old_fs; |
2129 | 2137 | ||
@@ -2476,15 +2484,9 @@ static noinline struct module *load_module(void __user *umod, | |||
2476 | kfree(strmap); | 2484 | kfree(strmap); |
2477 | strmap = NULL; | 2485 | strmap = NULL; |
2478 | 2486 | ||
2479 | if (!mod->taints) { | 2487 | if (!mod->taints) |
2480 | struct _ddebug *debug; | ||
2481 | unsigned int num_debug; | ||
2482 | |||
2483 | debug = section_objs(hdr, sechdrs, secstrings, "__verbose", | 2488 | debug = section_objs(hdr, sechdrs, secstrings, "__verbose", |
2484 | sizeof(*debug), &num_debug); | 2489 | sizeof(*debug), &num_debug); |
2485 | if (debug) | ||
2486 | dynamic_debug_setup(debug, num_debug); | ||
2487 | } | ||
2488 | 2490 | ||
2489 | err = module_finalize(hdr, sechdrs, mod); | 2491 | err = module_finalize(hdr, sechdrs, mod); |
2490 | if (err < 0) | 2492 | if (err < 0) |
@@ -2526,10 +2528,13 @@ static noinline struct module *load_module(void __user *umod, | |||
2526 | goto unlock; | 2528 | goto unlock; |
2527 | } | 2529 | } |
2528 | 2530 | ||
2531 | if (debug) | ||
2532 | dynamic_debug_setup(debug, num_debug); | ||
2533 | |||
2529 | /* Find duplicate symbols */ | 2534 | /* Find duplicate symbols */ |
2530 | err = verify_export_symbols(mod); | 2535 | err = verify_export_symbols(mod); |
2531 | if (err < 0) | 2536 | if (err < 0) |
2532 | goto unlock; | 2537 | goto ddebug; |
2533 | 2538 | ||
2534 | list_add_rcu(&mod->list, &modules); | 2539 | list_add_rcu(&mod->list, &modules); |
2535 | mutex_unlock(&module_mutex); | 2540 | mutex_unlock(&module_mutex); |
@@ -2557,6 +2562,8 @@ static noinline struct module *load_module(void __user *umod, | |||
2557 | mutex_lock(&module_mutex); | 2562 | mutex_lock(&module_mutex); |
2558 | /* Unlink carefully: kallsyms could be walking list. */ | 2563 | /* Unlink carefully: kallsyms could be walking list. */ |
2559 | list_del_rcu(&mod->list); | 2564 | list_del_rcu(&mod->list); |
2565 | ddebug: | ||
2566 | dynamic_debug_remove(debug); | ||
2560 | unlock: | 2567 | unlock: |
2561 | mutex_unlock(&module_mutex); | 2568 | mutex_unlock(&module_mutex); |
2562 | synchronize_sched(); | 2569 | synchronize_sched(); |
diff --git a/kernel/sched.c b/kernel/sched.c index 8c473adbf223..265cf3a2b5d8 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -2873,9 +2873,9 @@ unsigned long nr_iowait(void) | |||
2873 | return sum; | 2873 | return sum; |
2874 | } | 2874 | } |
2875 | 2875 | ||
2876 | unsigned long nr_iowait_cpu(void) | 2876 | unsigned long nr_iowait_cpu(int cpu) |
2877 | { | 2877 | { |
2878 | struct rq *this = this_rq(); | 2878 | struct rq *this = cpu_rq(cpu); |
2879 | return atomic_read(&this->nr_iowait); | 2879 | return atomic_read(&this->nr_iowait); |
2880 | } | 2880 | } |
2881 | 2881 | ||
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c index 783fbadf2202..813993b5fb61 100644 --- a/kernel/time/tick-sched.c +++ b/kernel/time/tick-sched.c | |||
@@ -154,14 +154,14 @@ static void tick_nohz_update_jiffies(ktime_t now) | |||
154 | * Updates the per cpu time idle statistics counters | 154 | * Updates the per cpu time idle statistics counters |
155 | */ | 155 | */ |
156 | static void | 156 | static void |
157 | update_ts_time_stats(struct tick_sched *ts, ktime_t now, u64 *last_update_time) | 157 | update_ts_time_stats(int cpu, struct tick_sched *ts, ktime_t now, u64 *last_update_time) |
158 | { | 158 | { |
159 | ktime_t delta; | 159 | ktime_t delta; |
160 | 160 | ||
161 | if (ts->idle_active) { | 161 | if (ts->idle_active) { |
162 | delta = ktime_sub(now, ts->idle_entrytime); | 162 | delta = ktime_sub(now, ts->idle_entrytime); |
163 | ts->idle_sleeptime = ktime_add(ts->idle_sleeptime, delta); | 163 | ts->idle_sleeptime = ktime_add(ts->idle_sleeptime, delta); |
164 | if (nr_iowait_cpu() > 0) | 164 | if (nr_iowait_cpu(cpu) > 0) |
165 | ts->iowait_sleeptime = ktime_add(ts->iowait_sleeptime, delta); | 165 | ts->iowait_sleeptime = ktime_add(ts->iowait_sleeptime, delta); |
166 | ts->idle_entrytime = now; | 166 | ts->idle_entrytime = now; |
167 | } | 167 | } |
@@ -175,19 +175,19 @@ static void tick_nohz_stop_idle(int cpu, ktime_t now) | |||
175 | { | 175 | { |
176 | struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); | 176 | struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); |
177 | 177 | ||
178 | update_ts_time_stats(ts, now, NULL); | 178 | update_ts_time_stats(cpu, ts, now, NULL); |
179 | ts->idle_active = 0; | 179 | ts->idle_active = 0; |
180 | 180 | ||
181 | sched_clock_idle_wakeup_event(0); | 181 | sched_clock_idle_wakeup_event(0); |
182 | } | 182 | } |
183 | 183 | ||
184 | static ktime_t tick_nohz_start_idle(struct tick_sched *ts) | 184 | static ktime_t tick_nohz_start_idle(int cpu, struct tick_sched *ts) |
185 | { | 185 | { |
186 | ktime_t now; | 186 | ktime_t now; |
187 | 187 | ||
188 | now = ktime_get(); | 188 | now = ktime_get(); |
189 | 189 | ||
190 | update_ts_time_stats(ts, now, NULL); | 190 | update_ts_time_stats(cpu, ts, now, NULL); |
191 | 191 | ||
192 | ts->idle_entrytime = now; | 192 | ts->idle_entrytime = now; |
193 | ts->idle_active = 1; | 193 | ts->idle_active = 1; |
@@ -216,7 +216,7 @@ u64 get_cpu_idle_time_us(int cpu, u64 *last_update_time) | |||
216 | if (!tick_nohz_enabled) | 216 | if (!tick_nohz_enabled) |
217 | return -1; | 217 | return -1; |
218 | 218 | ||
219 | update_ts_time_stats(ts, ktime_get(), last_update_time); | 219 | update_ts_time_stats(cpu, ts, ktime_get(), last_update_time); |
220 | 220 | ||
221 | return ktime_to_us(ts->idle_sleeptime); | 221 | return ktime_to_us(ts->idle_sleeptime); |
222 | } | 222 | } |
@@ -242,7 +242,7 @@ u64 get_cpu_iowait_time_us(int cpu, u64 *last_update_time) | |||
242 | if (!tick_nohz_enabled) | 242 | if (!tick_nohz_enabled) |
243 | return -1; | 243 | return -1; |
244 | 244 | ||
245 | update_ts_time_stats(ts, ktime_get(), last_update_time); | 245 | update_ts_time_stats(cpu, ts, ktime_get(), last_update_time); |
246 | 246 | ||
247 | return ktime_to_us(ts->iowait_sleeptime); | 247 | return ktime_to_us(ts->iowait_sleeptime); |
248 | } | 248 | } |
@@ -284,7 +284,7 @@ void tick_nohz_stop_sched_tick(int inidle) | |||
284 | */ | 284 | */ |
285 | ts->inidle = 1; | 285 | ts->inidle = 1; |
286 | 286 | ||
287 | now = tick_nohz_start_idle(ts); | 287 | now = tick_nohz_start_idle(cpu, ts); |
288 | 288 | ||
289 | /* | 289 | /* |
290 | * If this cpu is offline and it is the one which updates | 290 | * If this cpu is offline and it is the one which updates |
diff --git a/lib/dynamic_debug.c b/lib/dynamic_debug.c index 3df8eb17a607..02afc2533728 100644 --- a/lib/dynamic_debug.c +++ b/lib/dynamic_debug.c | |||
@@ -692,7 +692,7 @@ static void ddebug_table_free(struct ddebug_table *dt) | |||
692 | * Called in response to a module being unloaded. Removes | 692 | * Called in response to a module being unloaded. Removes |
693 | * any ddebug_table's which point at the module. | 693 | * any ddebug_table's which point at the module. |
694 | */ | 694 | */ |
695 | int ddebug_remove_module(char *mod_name) | 695 | int ddebug_remove_module(const char *mod_name) |
696 | { | 696 | { |
697 | struct ddebug_table *dt, *nextdt; | 697 | struct ddebug_table *dt, *nextdt; |
698 | int ret = -ENOENT; | 698 | int ret = -ENOENT; |
diff --git a/lib/genalloc.c b/lib/genalloc.c index 736c3b06398e..1923f1490e72 100644 --- a/lib/genalloc.c +++ b/lib/genalloc.c | |||
@@ -128,7 +128,6 @@ unsigned long gen_pool_alloc(struct gen_pool *pool, size_t size) | |||
128 | chunk = list_entry(_chunk, struct gen_pool_chunk, next_chunk); | 128 | chunk = list_entry(_chunk, struct gen_pool_chunk, next_chunk); |
129 | 129 | ||
130 | end_bit = (chunk->end_addr - chunk->start_addr) >> order; | 130 | end_bit = (chunk->end_addr - chunk->start_addr) >> order; |
131 | end_bit -= nbits + 1; | ||
132 | 131 | ||
133 | spin_lock_irqsave(&chunk->lock, flags); | 132 | spin_lock_irqsave(&chunk->lock, flags); |
134 | start_bit = bitmap_find_next_zero_area(chunk->bits, end_bit, 0, | 133 | start_bit = bitmap_find_next_zero_area(chunk->bits, end_bit, 0, |
diff --git a/mm/memcontrol.c b/mm/memcontrol.c index c6ece0a57595..20a8193a7af8 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c | |||
@@ -1370,7 +1370,7 @@ static void memcg_wakeup_oom(struct mem_cgroup *mem) | |||
1370 | 1370 | ||
1371 | static void memcg_oom_recover(struct mem_cgroup *mem) | 1371 | static void memcg_oom_recover(struct mem_cgroup *mem) |
1372 | { | 1372 | { |
1373 | if (mem->oom_kill_disable && atomic_read(&mem->oom_lock)) | 1373 | if (atomic_read(&mem->oom_lock)) |
1374 | memcg_wakeup_oom(mem); | 1374 | memcg_wakeup_oom(mem); |
1375 | } | 1375 | } |
1376 | 1376 | ||
@@ -3781,6 +3781,8 @@ static int mem_cgroup_oom_control_write(struct cgroup *cgrp, | |||
3781 | return -EINVAL; | 3781 | return -EINVAL; |
3782 | } | 3782 | } |
3783 | mem->oom_kill_disable = val; | 3783 | mem->oom_kill_disable = val; |
3784 | if (!val) | ||
3785 | memcg_oom_recover(mem); | ||
3784 | cgroup_unlock(); | 3786 | cgroup_unlock(); |
3785 | return 0; | 3787 | return 0; |
3786 | } | 3788 | } |
diff --git a/mm/mempolicy.c b/mm/mempolicy.c index 5d6fb339de03..5bc0a96beb51 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c | |||
@@ -2094,7 +2094,7 @@ void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol) | |||
2094 | NODEMASK_SCRATCH(scratch); | 2094 | NODEMASK_SCRATCH(scratch); |
2095 | 2095 | ||
2096 | if (!scratch) | 2096 | if (!scratch) |
2097 | return; | 2097 | goto put_mpol; |
2098 | /* contextualize the tmpfs mount point mempolicy */ | 2098 | /* contextualize the tmpfs mount point mempolicy */ |
2099 | new = mpol_new(mpol->mode, mpol->flags, &mpol->w.user_nodemask); | 2099 | new = mpol_new(mpol->mode, mpol->flags, &mpol->w.user_nodemask); |
2100 | if (IS_ERR(new)) | 2100 | if (IS_ERR(new)) |
@@ -2103,19 +2103,20 @@ void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol) | |||
2103 | task_lock(current); | 2103 | task_lock(current); |
2104 | ret = mpol_set_nodemask(new, &mpol->w.user_nodemask, scratch); | 2104 | ret = mpol_set_nodemask(new, &mpol->w.user_nodemask, scratch); |
2105 | task_unlock(current); | 2105 | task_unlock(current); |
2106 | mpol_put(mpol); /* drop our ref on sb mpol */ | ||
2107 | if (ret) | 2106 | if (ret) |
2108 | goto put_free; | 2107 | goto put_new; |
2109 | 2108 | ||
2110 | /* Create pseudo-vma that contains just the policy */ | 2109 | /* Create pseudo-vma that contains just the policy */ |
2111 | memset(&pvma, 0, sizeof(struct vm_area_struct)); | 2110 | memset(&pvma, 0, sizeof(struct vm_area_struct)); |
2112 | pvma.vm_end = TASK_SIZE; /* policy covers entire file */ | 2111 | pvma.vm_end = TASK_SIZE; /* policy covers entire file */ |
2113 | mpol_set_shared_policy(sp, &pvma, new); /* adds ref */ | 2112 | mpol_set_shared_policy(sp, &pvma, new); /* adds ref */ |
2114 | 2113 | ||
2115 | put_free: | 2114 | put_new: |
2116 | mpol_put(new); /* drop initial ref */ | 2115 | mpol_put(new); /* drop initial ref */ |
2117 | free_scratch: | 2116 | free_scratch: |
2118 | NODEMASK_SCRATCH_FREE(scratch); | 2117 | NODEMASK_SCRATCH_FREE(scratch); |
2118 | put_mpol: | ||
2119 | mpol_put(mpol); /* drop our incoming ref on sb mpol */ | ||
2119 | } | 2120 | } |
2120 | } | 2121 | } |
2121 | 2122 | ||
diff --git a/mm/page-writeback.c b/mm/page-writeback.c index bbd396ac9546..54f28bd493d3 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c | |||
@@ -597,7 +597,7 @@ static void balance_dirty_pages(struct address_space *mapping, | |||
597 | (!laptop_mode && ((global_page_state(NR_FILE_DIRTY) | 597 | (!laptop_mode && ((global_page_state(NR_FILE_DIRTY) |
598 | + global_page_state(NR_UNSTABLE_NFS)) | 598 | + global_page_state(NR_UNSTABLE_NFS)) |
599 | > background_thresh))) | 599 | > background_thresh))) |
600 | bdi_start_writeback(bdi, NULL, 0); | 600 | bdi_start_background_writeback(bdi); |
601 | } | 601 | } |
602 | 602 | ||
603 | void set_page_dirty_balance(struct page *page, int page_mkwrite) | 603 | void set_page_dirty_balance(struct page *page, int page_mkwrite) |
@@ -705,9 +705,8 @@ void laptop_mode_timer_fn(unsigned long data) | |||
705 | * We want to write everything out, not just down to the dirty | 705 | * We want to write everything out, not just down to the dirty |
706 | * threshold | 706 | * threshold |
707 | */ | 707 | */ |
708 | |||
709 | if (bdi_has_dirty_io(&q->backing_dev_info)) | 708 | if (bdi_has_dirty_io(&q->backing_dev_info)) |
710 | bdi_start_writeback(&q->backing_dev_info, NULL, nr_pages); | 709 | bdi_start_writeback(&q->backing_dev_info, nr_pages); |
711 | } | 710 | } |
712 | 711 | ||
713 | /* | 712 | /* |
diff --git a/scripts/package/Makefile b/scripts/package/Makefile index 18513b0191db..cea12744a671 100644 --- a/scripts/package/Makefile +++ b/scripts/package/Makefile | |||
@@ -44,7 +44,7 @@ rpm-pkg rpm: $(objtree)/kernel.spec FORCE | |||
44 | fi | 44 | fi |
45 | $(MAKE) clean | 45 | $(MAKE) clean |
46 | $(PREV) ln -sf $(srctree) $(KERNELPATH) | 46 | $(PREV) ln -sf $(srctree) $(KERNELPATH) |
47 | $(CONFIG_SHELL) $(srctree)/scripts/setlocalversion > $(objtree)/.scmversion | 47 | $(CONFIG_SHELL) $(srctree)/scripts/setlocalversion --scm-only > $(objtree)/.scmversion |
48 | $(PREV) tar -cz $(RCS_TAR_IGNORE) -f $(KERNELPATH).tar.gz $(KERNELPATH)/. | 48 | $(PREV) tar -cz $(RCS_TAR_IGNORE) -f $(KERNELPATH).tar.gz $(KERNELPATH)/. |
49 | $(PREV) rm $(KERNELPATH) | 49 | $(PREV) rm $(KERNELPATH) |
50 | rm -f $(objtree)/.scmversion | 50 | rm -f $(objtree)/.scmversion |
diff --git a/scripts/setlocalversion b/scripts/setlocalversion index 46989b88d734..d6a866ed1835 100755 --- a/scripts/setlocalversion +++ b/scripts/setlocalversion | |||
@@ -10,73 +10,158 @@ | |||
10 | # | 10 | # |
11 | 11 | ||
12 | usage() { | 12 | usage() { |
13 | echo "Usage: $0 [srctree]" >&2 | 13 | echo "Usage: $0 [--scm-only] [srctree]" >&2 |
14 | exit 1 | 14 | exit 1 |
15 | } | 15 | } |
16 | 16 | ||
17 | cd "${1:-.}" || usage | 17 | scm_only=false |
18 | srctree=. | ||
19 | if test "$1" = "--scm-only"; then | ||
20 | scm_only=true | ||
21 | shift | ||
22 | fi | ||
23 | if test $# -gt 0; then | ||
24 | srctree=$1 | ||
25 | shift | ||
26 | fi | ||
27 | if test $# -gt 0 -o ! -d "$srctree"; then | ||
28 | usage | ||
29 | fi | ||
18 | 30 | ||
19 | # Check for git and a git repo. | 31 | scm_version() |
20 | if head=`git rev-parse --verify --short HEAD 2>/dev/null`; then | 32 | { |
33 | local short=false | ||
21 | 34 | ||
22 | # If we are at a tagged commit (like "v2.6.30-rc6"), we ignore it, | 35 | cd "$srctree" |
23 | # because this version is defined in the top level Makefile. | 36 | if test -e .scmversion; then |
24 | if [ -z "`git describe --exact-match 2>/dev/null`" ]; then | 37 | cat "$_" |
38 | return | ||
39 | fi | ||
40 | if test "$1" = "--short"; then | ||
41 | short=true | ||
42 | fi | ||
25 | 43 | ||
26 | # If we are past a tagged commit (like "v2.6.30-rc5-302-g72357d5"), | 44 | # Check for git and a git repo. |
27 | # we pretty print it. | 45 | if head=`git rev-parse --verify --short HEAD 2>/dev/null`; then |
28 | if atag="`git describe 2>/dev/null`"; then | 46 | |
29 | echo "$atag" | awk -F- '{printf("-%05d-%s", $(NF-1),$(NF))}' | 47 | # If we are at a tagged commit (like "v2.6.30-rc6"), we ignore |
48 | # it, because this version is defined in the top level Makefile. | ||
49 | if [ -z "`git describe --exact-match 2>/dev/null`" ]; then | ||
50 | |||
51 | # If only the short version is requested, don't bother | ||
52 | # running further git commands | ||
53 | if $short; then | ||
54 | echo "+" | ||
55 | return | ||
56 | fi | ||
57 | # If we are past a tagged commit (like | ||
58 | # "v2.6.30-rc5-302-g72357d5"), we pretty print it. | ||
59 | if atag="`git describe 2>/dev/null`"; then | ||
60 | echo "$atag" | awk -F- '{printf("-%05d-%s", $(NF-1),$(NF))}' | ||
61 | |||
62 | # If we don't have a tag at all we print -g{commitish}. | ||
63 | else | ||
64 | printf '%s%s' -g $head | ||
65 | fi | ||
66 | fi | ||
30 | 67 | ||
31 | # If we don't have a tag at all we print -g{commitish}. | 68 | # Is this git on svn? |
32 | else | 69 | if git config --get svn-remote.svn.url >/dev/null; then |
33 | printf '%s%s' -g $head | 70 | printf -- '-svn%s' "`git svn find-rev $head`" |
34 | fi | 71 | fi |
35 | fi | ||
36 | 72 | ||
37 | # Is this git on svn? | 73 | # Update index only on r/w media |
38 | if git config --get svn-remote.svn.url >/dev/null; then | 74 | [ -w . ] && git update-index --refresh --unmerged > /dev/null |
39 | printf -- '-svn%s' "`git svn find-rev $head`" | ||
40 | fi | ||
41 | 75 | ||
42 | # Update index only on r/w media | 76 | # Check for uncommitted changes |
43 | [ -w . ] && git update-index --refresh --unmerged > /dev/null | 77 | if git diff-index --name-only HEAD | grep -v "^scripts/package" \ |
78 | | read dummy; then | ||
79 | printf '%s' -dirty | ||
80 | fi | ||
44 | 81 | ||
45 | # Check for uncommitted changes | 82 | # All done with git |
46 | if git diff-index --name-only HEAD | grep -v "^scripts/package" \ | 83 | return |
47 | | read dummy; then | ||
48 | printf '%s' -dirty | ||
49 | fi | 84 | fi |
50 | 85 | ||
51 | # All done with git | 86 | # Check for mercurial and a mercurial repo. |
52 | exit | 87 | if hgid=`hg id 2>/dev/null`; then |
53 | fi | 88 | tag=`printf '%s' "$hgid" | cut -d' ' -f2` |
89 | |||
90 | # Do we have an untagged version? | ||
91 | if [ -z "$tag" -o "$tag" = tip ]; then | ||
92 | id=`printf '%s' "$hgid" | sed 's/[+ ].*//'` | ||
93 | printf '%s%s' -hg "$id" | ||
94 | fi | ||
54 | 95 | ||
55 | # Check for mercurial and a mercurial repo. | 96 | # Are there uncommitted changes? |
56 | if hgid=`hg id 2>/dev/null`; then | 97 | # These are represented by + after the changeset id. |
57 | tag=`printf '%s' "$hgid" | cut -d' ' -f2` | 98 | case "$hgid" in |
99 | *+|*+\ *) printf '%s' -dirty ;; | ||
100 | esac | ||
58 | 101 | ||
59 | # Do we have an untagged version? | 102 | # All done with mercurial |
60 | if [ -z "$tag" -o "$tag" = tip ]; then | 103 | return |
61 | id=`printf '%s' "$hgid" | sed 's/[+ ].*//'` | ||
62 | printf '%s%s' -hg "$id" | ||
63 | fi | 104 | fi |
64 | 105 | ||
65 | # Are there uncommitted changes? | 106 | # Check for svn and a svn repo. |
66 | # These are represented by + after the changeset id. | 107 | if rev=`svn info 2>/dev/null | grep '^Last Changed Rev'`; then |
67 | case "$hgid" in | 108 | rev=`echo $rev | awk '{print $NF}'` |
68 | *+|*+\ *) printf '%s' -dirty ;; | 109 | printf -- '-svn%s' "$rev" |
69 | esac | ||
70 | 110 | ||
71 | # All done with mercurial | 111 | # All done with svn |
112 | return | ||
113 | fi | ||
114 | } | ||
115 | |||
116 | collect_files() | ||
117 | { | ||
118 | local file res | ||
119 | |||
120 | for file; do | ||
121 | case "$file" in | ||
122 | *\~*) | ||
123 | continue | ||
124 | ;; | ||
125 | esac | ||
126 | if test -e "$file"; then | ||
127 | res="$res$(cat "$file")" | ||
128 | fi | ||
129 | done | ||
130 | echo "$res" | ||
131 | } | ||
132 | |||
133 | if $scm_only; then | ||
134 | scm_version | ||
72 | exit | 135 | exit |
73 | fi | 136 | fi |
74 | 137 | ||
75 | # Check for svn and a svn repo. | 138 | if test -e include/config/auto.conf; then |
76 | if rev=`svn info 2>/dev/null | grep '^Last Changed Rev'`; then | 139 | source "$_" |
77 | rev=`echo $rev | awk '{print $NF}'` | 140 | else |
78 | printf -- '-svn%s' "$rev" | 141 | echo "Error: kernelrelease not valid - run 'make prepare' to update it" |
142 | exit 1 | ||
143 | fi | ||
79 | 144 | ||
80 | # All done with svn | 145 | # localversion* files in the build and source directory |
81 | exit | 146 | res="$(collect_files localversion*)" |
147 | if test ! "$srctree" -ef .; then | ||
148 | res="$res$(collect_files "$srctree"/localversion*)" | ||
149 | fi | ||
150 | |||
151 | # CONFIG_LOCALVERSION and LOCALVERSION (if set) | ||
152 | res="${res}${CONFIG_LOCALVERSION}${LOCALVERSION}" | ||
153 | |||
154 | # scm version string if not at a tagged commit | ||
155 | if test "$CONFIG_LOCALVERSION_AUTO" = "y"; then | ||
156 | # full scm version string | ||
157 | res="$res$(scm_version)" | ||
158 | else | ||
159 | # apped a plus sign if the repository is not in a clean tagged | ||
160 | # state and LOCALVERSION= is not specified | ||
161 | if test "${LOCALVERSION+set}" != "set"; then | ||
162 | scm=$(scm_version --short) | ||
163 | res="$res${scm:++}" | ||
164 | fi | ||
82 | fi | 165 | fi |
166 | |||
167 | echo "$res" | ||
diff --git a/tools/perf/util/thread.c b/tools/perf/util/thread.c index 1f7ecd47f499..9a448b47400c 100644 --- a/tools/perf/util/thread.c +++ b/tools/perf/util/thread.c | |||
@@ -7,6 +7,15 @@ | |||
7 | #include "util.h" | 7 | #include "util.h" |
8 | #include "debug.h" | 8 | #include "debug.h" |
9 | 9 | ||
10 | /* Skip "." and ".." directories */ | ||
11 | static int filter(const struct dirent *dir) | ||
12 | { | ||
13 | if (dir->d_name[0] == '.') | ||
14 | return 0; | ||
15 | else | ||
16 | return 1; | ||
17 | } | ||
18 | |||
10 | int find_all_tid(int pid, pid_t ** all_tid) | 19 | int find_all_tid(int pid, pid_t ** all_tid) |
11 | { | 20 | { |
12 | char name[256]; | 21 | char name[256]; |
@@ -16,7 +25,7 @@ int find_all_tid(int pid, pid_t ** all_tid) | |||
16 | int i; | 25 | int i; |
17 | 26 | ||
18 | sprintf(name, "/proc/%d/task", pid); | 27 | sprintf(name, "/proc/%d/task", pid); |
19 | items = scandir(name, &namelist, NULL, NULL); | 28 | items = scandir(name, &namelist, filter, NULL); |
20 | if (items <= 0) | 29 | if (items <= 0) |
21 | return -ENOENT; | 30 | return -ENOENT; |
22 | *all_tid = malloc(sizeof(pid_t) * items); | 31 | *all_tid = malloc(sizeof(pid_t) * items); |