diff options
548 files changed, 8156 insertions, 4572 deletions
diff --git a/Documentation/ABI/testing/sysfs-block b/Documentation/ABI/testing/sysfs-block index cbbd3e069945..5f3bedaf8e35 100644 --- a/Documentation/ABI/testing/sysfs-block +++ b/Documentation/ABI/testing/sysfs-block | |||
@@ -94,28 +94,37 @@ What: /sys/block/<disk>/queue/physical_block_size | |||
94 | Date: May 2009 | 94 | Date: May 2009 |
95 | Contact: Martin K. Petersen <martin.petersen@oracle.com> | 95 | Contact: Martin K. Petersen <martin.petersen@oracle.com> |
96 | Description: | 96 | Description: |
97 | This is the smallest unit the storage device can write | 97 | This is the smallest unit a physical storage device can |
98 | without resorting to read-modify-write operation. It is | 98 | write atomically. It is usually the same as the logical |
99 | usually the same as the logical block size but may be | 99 | block size but may be bigger. One example is SATA |
100 | bigger. One example is SATA drives with 4KB sectors | 100 | drives with 4KB sectors that expose a 512-byte logical |
101 | that expose a 512-byte logical block size to the | 101 | block size to the operating system. For stacked block |
102 | operating system. | 102 | devices the physical_block_size variable contains the |
103 | maximum physical_block_size of the component devices. | ||
103 | 104 | ||
104 | What: /sys/block/<disk>/queue/minimum_io_size | 105 | What: /sys/block/<disk>/queue/minimum_io_size |
105 | Date: April 2009 | 106 | Date: April 2009 |
106 | Contact: Martin K. Petersen <martin.petersen@oracle.com> | 107 | Contact: Martin K. Petersen <martin.petersen@oracle.com> |
107 | Description: | 108 | Description: |
108 | Storage devices may report a preferred minimum I/O size, | 109 | Storage devices may report a granularity or preferred |
109 | which is the smallest request the device can perform | 110 | minimum I/O size which is the smallest request the |
110 | without incurring a read-modify-write penalty. For disk | 111 | device can perform without incurring a performance |
111 | drives this is often the physical block size. For RAID | 112 | penalty. For disk drives this is often the physical |
112 | arrays it is often the stripe chunk size. | 113 | block size. For RAID arrays it is often the stripe |
114 | chunk size. A properly aligned multiple of | ||
115 | minimum_io_size is the preferred request size for | ||
116 | workloads where a high number of I/O operations is | ||
117 | desired. | ||
113 | 118 | ||
114 | What: /sys/block/<disk>/queue/optimal_io_size | 119 | What: /sys/block/<disk>/queue/optimal_io_size |
115 | Date: April 2009 | 120 | Date: April 2009 |
116 | Contact: Martin K. Petersen <martin.petersen@oracle.com> | 121 | Contact: Martin K. Petersen <martin.petersen@oracle.com> |
117 | Description: | 122 | Description: |
118 | Storage devices may report an optimal I/O size, which is | 123 | Storage devices may report an optimal I/O size, which is |
119 | the device's preferred unit of receiving I/O. This is | 124 | the device's preferred unit for sustained I/O. This is |
120 | rarely reported for disk drives. For RAID devices it is | 125 | rarely reported for disk drives. For RAID arrays it is |
121 | usually the stripe width or the internal block size. | 126 | usually the stripe width or the internal track size. A |
127 | properly aligned multiple of optimal_io_size is the | ||
128 | preferred request size for workloads where sustained | ||
129 | throughput is desired. If no optimal I/O size is | ||
130 | reported this file contains 0. | ||
diff --git a/Documentation/DocBook/kernel-hacking.tmpl b/Documentation/DocBook/kernel-hacking.tmpl index a50d6cd58573..992e67e6be7f 100644 --- a/Documentation/DocBook/kernel-hacking.tmpl +++ b/Documentation/DocBook/kernel-hacking.tmpl | |||
@@ -449,8 +449,8 @@ printk(KERN_INFO "i = %u\n", i); | |||
449 | </para> | 449 | </para> |
450 | 450 | ||
451 | <programlisting> | 451 | <programlisting> |
452 | __u32 ipaddress; | 452 | __be32 ipaddress; |
453 | printk(KERN_INFO "my ip: %d.%d.%d.%d\n", NIPQUAD(ipaddress)); | 453 | printk(KERN_INFO "my ip: %pI4\n", &ipaddress); |
454 | </programlisting> | 454 | </programlisting> |
455 | 455 | ||
456 | <para> | 456 | <para> |
diff --git a/Documentation/arm/memory.txt b/Documentation/arm/memory.txt index 43cb1004d35f..9d58c7c5eddd 100644 --- a/Documentation/arm/memory.txt +++ b/Documentation/arm/memory.txt | |||
@@ -21,6 +21,8 @@ ffff8000 ffffffff copy_user_page / clear_user_page use. | |||
21 | For SA11xx and Xscale, this is used to | 21 | For SA11xx and Xscale, this is used to |
22 | setup a minicache mapping. | 22 | setup a minicache mapping. |
23 | 23 | ||
24 | ffff4000 ffffffff cache aliasing on ARMv6 and later CPUs. | ||
25 | |||
24 | ffff1000 ffff7fff Reserved. | 26 | ffff1000 ffff7fff Reserved. |
25 | Platforms must not use this address range. | 27 | Platforms must not use this address range. |
26 | 28 | ||
diff --git a/Documentation/ioctl/ioctl-number.txt b/Documentation/ioctl/ioctl-number.txt index 7bb0d934b6d8..dbea4f95fc85 100644 --- a/Documentation/ioctl/ioctl-number.txt +++ b/Documentation/ioctl/ioctl-number.txt | |||
@@ -139,6 +139,7 @@ Code Seq# Include File Comments | |||
139 | 'm' all linux/synclink.h conflict! | 139 | 'm' all linux/synclink.h conflict! |
140 | 'm' 00-1F net/irda/irmod.h conflict! | 140 | 'm' 00-1F net/irda/irmod.h conflict! |
141 | 'n' 00-7F linux/ncp_fs.h | 141 | 'n' 00-7F linux/ncp_fs.h |
142 | 'n' 80-8F linux/nilfs2_fs.h NILFS2 | ||
142 | 'n' E0-FF video/matrox.h matroxfb | 143 | 'n' E0-FF video/matrox.h matroxfb |
143 | 'o' 00-1F fs/ocfs2/ocfs2_fs.h OCFS2 | 144 | 'o' 00-1F fs/ocfs2/ocfs2_fs.h OCFS2 |
144 | 'o' 00-03 include/mtd/ubi-user.h conflict! (OCFS2 and UBI overlaps) | 145 | 'o' 00-03 include/mtd/ubi-user.h conflict! (OCFS2 and UBI overlaps) |
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt index dd1a6d4bb747..7936b801fe6a 100644 --- a/Documentation/kernel-parameters.txt +++ b/Documentation/kernel-parameters.txt | |||
@@ -1115,6 +1115,10 @@ and is between 256 and 4096 characters. It is defined in the file | |||
1115 | libata.dma=4 Compact Flash DMA only | 1115 | libata.dma=4 Compact Flash DMA only |
1116 | Combinations also work, so libata.dma=3 enables DMA | 1116 | Combinations also work, so libata.dma=3 enables DMA |
1117 | for disks and CDROMs, but not CFs. | 1117 | for disks and CDROMs, but not CFs. |
1118 | |||
1119 | libata.ignore_hpa= [LIBATA] Ignore HPA limit | ||
1120 | libata.ignore_hpa=0 keep BIOS limits (default) | ||
1121 | libata.ignore_hpa=1 ignore limits, using full disk | ||
1118 | 1122 | ||
1119 | libata.noacpi [LIBATA] Disables use of ACPI in libata suspend/resume | 1123 | libata.noacpi [LIBATA] Disables use of ACPI in libata suspend/resume |
1120 | when set. | 1124 | when set. |
diff --git a/Documentation/laptops/thinkpad-acpi.txt b/Documentation/laptops/thinkpad-acpi.txt index f2296ecedb89..e2ddcdeb61b6 100644 --- a/Documentation/laptops/thinkpad-acpi.txt +++ b/Documentation/laptops/thinkpad-acpi.txt | |||
@@ -36,8 +36,6 @@ detailed description): | |||
36 | - Bluetooth enable and disable | 36 | - Bluetooth enable and disable |
37 | - video output switching, expansion control | 37 | - video output switching, expansion control |
38 | - ThinkLight on and off | 38 | - ThinkLight on and off |
39 | - limited docking and undocking | ||
40 | - UltraBay eject | ||
41 | - CMOS/UCMS control | 39 | - CMOS/UCMS control |
42 | - LED control | 40 | - LED control |
43 | - ACPI sounds | 41 | - ACPI sounds |
@@ -729,131 +727,6 @@ cannot be read or if it is unknown, thinkpad-acpi will report it as "off". | |||
729 | It is impossible to know if the status returned through sysfs is valid. | 727 | It is impossible to know if the status returned through sysfs is valid. |
730 | 728 | ||
731 | 729 | ||
732 | Docking / undocking -- /proc/acpi/ibm/dock | ||
733 | ------------------------------------------ | ||
734 | |||
735 | Docking and undocking (e.g. with the X4 UltraBase) requires some | ||
736 | actions to be taken by the operating system to safely make or break | ||
737 | the electrical connections with the dock. | ||
738 | |||
739 | The docking feature of this driver generates the following ACPI events: | ||
740 | |||
741 | ibm/dock GDCK 00000003 00000001 -- eject request | ||
742 | ibm/dock GDCK 00000003 00000002 -- undocked | ||
743 | ibm/dock GDCK 00000000 00000003 -- docked | ||
744 | |||
745 | NOTE: These events will only be generated if the laptop was docked | ||
746 | when originally booted. This is due to the current lack of support for | ||
747 | hot plugging of devices in the Linux ACPI framework. If the laptop was | ||
748 | booted while not in the dock, the following message is shown in the | ||
749 | logs: | ||
750 | |||
751 | Mar 17 01:42:34 aero kernel: thinkpad_acpi: dock device not present | ||
752 | |||
753 | In this case, no dock-related events are generated but the dock and | ||
754 | undock commands described below still work. They can be executed | ||
755 | manually or triggered by Fn key combinations (see the example acpid | ||
756 | configuration files included in the driver tarball package available | ||
757 | on the web site). | ||
758 | |||
759 | When the eject request button on the dock is pressed, the first event | ||
760 | above is generated. The handler for this event should issue the | ||
761 | following command: | ||
762 | |||
763 | echo undock > /proc/acpi/ibm/dock | ||
764 | |||
765 | After the LED on the dock goes off, it is safe to eject the laptop. | ||
766 | Note: if you pressed this key by mistake, go ahead and eject the | ||
767 | laptop, then dock it back in. Otherwise, the dock may not function as | ||
768 | expected. | ||
769 | |||
770 | When the laptop is docked, the third event above is generated. The | ||
771 | handler for this event should issue the following command to fully | ||
772 | enable the dock: | ||
773 | |||
774 | echo dock > /proc/acpi/ibm/dock | ||
775 | |||
776 | The contents of the /proc/acpi/ibm/dock file shows the current status | ||
777 | of the dock, as provided by the ACPI framework. | ||
778 | |||
779 | The docking support in this driver does not take care of enabling or | ||
780 | disabling any other devices you may have attached to the dock. For | ||
781 | example, a CD drive plugged into the UltraBase needs to be disabled or | ||
782 | enabled separately. See the provided example acpid configuration files | ||
783 | for how this can be accomplished. | ||
784 | |||
785 | There is no support yet for PCI devices that may be attached to a | ||
786 | docking station, e.g. in the ThinkPad Dock II. The driver currently | ||
787 | does not recognize, enable or disable such devices. This means that | ||
788 | the only docking stations currently supported are the X-series | ||
789 | UltraBase docks and "dumb" port replicators like the Mini Dock (the | ||
790 | latter don't need any ACPI support, actually). | ||
791 | |||
792 | |||
793 | UltraBay eject -- /proc/acpi/ibm/bay | ||
794 | ------------------------------------ | ||
795 | |||
796 | Inserting or ejecting an UltraBay device requires some actions to be | ||
797 | taken by the operating system to safely make or break the electrical | ||
798 | connections with the device. | ||
799 | |||
800 | This feature generates the following ACPI events: | ||
801 | |||
802 | ibm/bay MSTR 00000003 00000000 -- eject request | ||
803 | ibm/bay MSTR 00000001 00000000 -- eject lever inserted | ||
804 | |||
805 | NOTE: These events will only be generated if the UltraBay was present | ||
806 | when the laptop was originally booted (on the X series, the UltraBay | ||
807 | is in the dock, so it may not be present if the laptop was undocked). | ||
808 | This is due to the current lack of support for hot plugging of devices | ||
809 | in the Linux ACPI framework. If the laptop was booted without the | ||
810 | UltraBay, the following message is shown in the logs: | ||
811 | |||
812 | Mar 17 01:42:34 aero kernel: thinkpad_acpi: bay device not present | ||
813 | |||
814 | In this case, no bay-related events are generated but the eject | ||
815 | command described below still works. It can be executed manually or | ||
816 | triggered by a hot key combination. | ||
817 | |||
818 | Sliding the eject lever generates the first event shown above. The | ||
819 | handler for this event should take whatever actions are necessary to | ||
820 | shut down the device in the UltraBay (e.g. call idectl), then issue | ||
821 | the following command: | ||
822 | |||
823 | echo eject > /proc/acpi/ibm/bay | ||
824 | |||
825 | After the LED on the UltraBay goes off, it is safe to pull out the | ||
826 | device. | ||
827 | |||
828 | When the eject lever is inserted, the second event above is | ||
829 | generated. The handler for this event should take whatever actions are | ||
830 | necessary to enable the UltraBay device (e.g. call idectl). | ||
831 | |||
832 | The contents of the /proc/acpi/ibm/bay file shows the current status | ||
833 | of the UltraBay, as provided by the ACPI framework. | ||
834 | |||
835 | EXPERIMENTAL warm eject support on the 600e/x, A22p and A3x (To use | ||
836 | this feature, you need to supply the experimental=1 parameter when | ||
837 | loading the module): | ||
838 | |||
839 | These models do not have a button near the UltraBay device to request | ||
840 | a hot eject but rather require the laptop to be put to sleep | ||
841 | (suspend-to-ram) before the bay device is ejected or inserted). | ||
842 | The sequence of steps to eject the device is as follows: | ||
843 | |||
844 | echo eject > /proc/acpi/ibm/bay | ||
845 | put the ThinkPad to sleep | ||
846 | remove the drive | ||
847 | resume from sleep | ||
848 | cat /proc/acpi/ibm/bay should show that the drive was removed | ||
849 | |||
850 | On the A3x, both the UltraBay 2000 and UltraBay Plus devices are | ||
851 | supported. Use "eject2" instead of "eject" for the second bay. | ||
852 | |||
853 | Note: the UltraBay eject support on the 600e/x, A22p and A3x is | ||
854 | EXPERIMENTAL and may not work as expected. USE WITH CAUTION! | ||
855 | |||
856 | |||
857 | CMOS/UCMS control | 730 | CMOS/UCMS control |
858 | ----------------- | 731 | ----------------- |
859 | 732 | ||
diff --git a/Documentation/lockdep-design.txt b/Documentation/lockdep-design.txt index e20d913d5914..abf768c681e2 100644 --- a/Documentation/lockdep-design.txt +++ b/Documentation/lockdep-design.txt | |||
@@ -30,9 +30,9 @@ State | |||
30 | The validator tracks lock-class usage history into 4n + 1 separate state bits: | 30 | The validator tracks lock-class usage history into 4n + 1 separate state bits: |
31 | 31 | ||
32 | - 'ever held in STATE context' | 32 | - 'ever held in STATE context' |
33 | - 'ever head as readlock in STATE context' | 33 | - 'ever held as readlock in STATE context' |
34 | - 'ever head with STATE enabled' | 34 | - 'ever held with STATE enabled' |
35 | - 'ever head as readlock with STATE enabled' | 35 | - 'ever held as readlock with STATE enabled' |
36 | 36 | ||
37 | Where STATE can be either one of (kernel/lockdep_states.h) | 37 | Where STATE can be either one of (kernel/lockdep_states.h) |
38 | - hardirq | 38 | - hardirq |
diff --git a/MAINTAINERS b/MAINTAINERS index d6befb2c470f..b1114cfac6bf 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
@@ -4995,7 +4995,9 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/jikos/trivial.git | |||
4995 | S: Maintained | 4995 | S: Maintained |
4996 | 4996 | ||
4997 | TTY LAYER | 4997 | TTY LAYER |
4998 | S: Orphan | 4998 | M: Greg Kroah-Hartman <gregkh@suse.de> |
4999 | S: Maintained | ||
5000 | T: quilt kernel.org/pub/linux/kernel/people/gregkh/gregkh-2.6/ | ||
4999 | F: drivers/char/tty_* | 5001 | F: drivers/char/tty_* |
5000 | F: drivers/serial/serial_core.c | 5002 | F: drivers/serial/serial_core.c |
5001 | F: include/linux/serial_core.h | 5003 | F: include/linux/serial_core.h |
@@ -1,7 +1,7 @@ | |||
1 | VERSION = 2 | 1 | VERSION = 2 |
2 | PATCHLEVEL = 6 | 2 | PATCHLEVEL = 6 |
3 | SUBLEVEL = 31 | 3 | SUBLEVEL = 31 |
4 | EXTRAVERSION = -rc5 | 4 | EXTRAVERSION = -rc6 |
5 | NAME = Man-Eating Seals of Antiquity | 5 | NAME = Man-Eating Seals of Antiquity |
6 | 6 | ||
7 | # *DOCUMENTATION* | 7 | # *DOCUMENTATION* |
diff --git a/arch/arm/boot/compressed/misc.c b/arch/arm/boot/compressed/misc.c index 9e6e512f0117..17153b54613b 100644 --- a/arch/arm/boot/compressed/misc.c +++ b/arch/arm/boot/compressed/misc.c | |||
@@ -29,7 +29,6 @@ unsigned int __machine_arch_type; | |||
29 | 29 | ||
30 | static void putstr(const char *ptr); | 30 | static void putstr(const char *ptr); |
31 | 31 | ||
32 | #include <linux/compiler.h> | ||
33 | #include <mach/uncompress.h> | 32 | #include <mach/uncompress.h> |
34 | 33 | ||
35 | #ifdef CONFIG_DEBUG_ICEDCC | 34 | #ifdef CONFIG_DEBUG_ICEDCC |
diff --git a/arch/arm/common/clkdev.c b/arch/arm/common/clkdev.c index f37afd9422f3..aae5bc01acc8 100644 --- a/arch/arm/common/clkdev.c +++ b/arch/arm/common/clkdev.c | |||
@@ -17,6 +17,7 @@ | |||
17 | #include <linux/err.h> | 17 | #include <linux/err.h> |
18 | #include <linux/string.h> | 18 | #include <linux/string.h> |
19 | #include <linux/mutex.h> | 19 | #include <linux/mutex.h> |
20 | #include <linux/clk.h> | ||
20 | 21 | ||
21 | #include <asm/clkdev.h> | 22 | #include <asm/clkdev.h> |
22 | #include <mach/clkdev.h> | 23 | #include <mach/clkdev.h> |
diff --git a/arch/arm/configs/mx27_defconfig b/arch/arm/configs/mx27_defconfig index 083516cd0d7f..75263a83741c 100644 --- a/arch/arm/configs/mx27_defconfig +++ b/arch/arm/configs/mx27_defconfig | |||
@@ -1,15 +1,15 @@ | |||
1 | # | 1 | # |
2 | # Automatically generated make config: don't edit | 2 | # Automatically generated make config: don't edit |
3 | # Linux kernel version: 2.6.30-rc1 | 3 | # Linux kernel version: 2.6.31-rc4 |
4 | # Wed Apr 8 10:18:06 2009 | 4 | # Fri Jul 24 16:08:06 2009 |
5 | # | 5 | # |
6 | CONFIG_ARM=y | 6 | CONFIG_ARM=y |
7 | CONFIG_HAVE_PWM=y | ||
7 | CONFIG_SYS_SUPPORTS_APM_EMULATION=y | 8 | CONFIG_SYS_SUPPORTS_APM_EMULATION=y |
8 | CONFIG_GENERIC_GPIO=y | 9 | CONFIG_GENERIC_GPIO=y |
9 | CONFIG_GENERIC_TIME=y | 10 | CONFIG_GENERIC_TIME=y |
10 | CONFIG_GENERIC_CLOCKEVENTS=y | 11 | CONFIG_GENERIC_CLOCKEVENTS=y |
11 | CONFIG_MMU=y | 12 | CONFIG_MMU=y |
12 | # CONFIG_NO_IOPORT is not set | ||
13 | CONFIG_GENERIC_HARDIRQS=y | 13 | CONFIG_GENERIC_HARDIRQS=y |
14 | CONFIG_STACKTRACE_SUPPORT=y | 14 | CONFIG_STACKTRACE_SUPPORT=y |
15 | CONFIG_HAVE_LATENCYTOP_SUPPORT=y | 15 | CONFIG_HAVE_LATENCYTOP_SUPPORT=y |
@@ -18,14 +18,13 @@ CONFIG_TRACE_IRQFLAGS_SUPPORT=y | |||
18 | CONFIG_HARDIRQS_SW_RESEND=y | 18 | CONFIG_HARDIRQS_SW_RESEND=y |
19 | CONFIG_GENERIC_IRQ_PROBE=y | 19 | CONFIG_GENERIC_IRQ_PROBE=y |
20 | CONFIG_RWSEM_GENERIC_SPINLOCK=y | 20 | CONFIG_RWSEM_GENERIC_SPINLOCK=y |
21 | # CONFIG_ARCH_HAS_ILOG2_U32 is not set | ||
22 | # CONFIG_ARCH_HAS_ILOG2_U64 is not set | ||
23 | CONFIG_GENERIC_HWEIGHT=y | 21 | CONFIG_GENERIC_HWEIGHT=y |
24 | CONFIG_GENERIC_CALIBRATE_DELAY=y | 22 | CONFIG_GENERIC_CALIBRATE_DELAY=y |
25 | CONFIG_ARCH_MTD_XIP=y | 23 | CONFIG_ARCH_MTD_XIP=y |
26 | CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y | 24 | CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y |
27 | CONFIG_VECTORS_BASE=0xffff0000 | 25 | CONFIG_VECTORS_BASE=0xffff0000 |
28 | CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config" | 26 | CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config" |
27 | CONFIG_CONSTRUCTORS=y | ||
29 | 28 | ||
30 | # | 29 | # |
31 | # General setup | 30 | # General setup |
@@ -85,7 +84,12 @@ CONFIG_TIMERFD=y | |||
85 | CONFIG_EVENTFD=y | 84 | CONFIG_EVENTFD=y |
86 | CONFIG_SHMEM=y | 85 | CONFIG_SHMEM=y |
87 | CONFIG_AIO=y | 86 | CONFIG_AIO=y |
87 | |||
88 | # | ||
89 | # Performance Counters | ||
90 | # | ||
88 | CONFIG_VM_EVENT_COUNTERS=y | 91 | CONFIG_VM_EVENT_COUNTERS=y |
92 | # CONFIG_STRIP_ASM_SYMS is not set | ||
89 | # CONFIG_COMPAT_BRK is not set | 93 | # CONFIG_COMPAT_BRK is not set |
90 | CONFIG_SLAB=y | 94 | CONFIG_SLAB=y |
91 | # CONFIG_SLUB is not set | 95 | # CONFIG_SLUB is not set |
@@ -99,6 +103,12 @@ CONFIG_KPROBES=y | |||
99 | CONFIG_KRETPROBES=y | 103 | CONFIG_KRETPROBES=y |
100 | CONFIG_HAVE_KPROBES=y | 104 | CONFIG_HAVE_KPROBES=y |
101 | CONFIG_HAVE_KRETPROBES=y | 105 | CONFIG_HAVE_KRETPROBES=y |
106 | CONFIG_HAVE_CLK=y | ||
107 | |||
108 | # | ||
109 | # GCOV-based kernel profiling | ||
110 | # | ||
111 | # CONFIG_GCOV_KERNEL is not set | ||
102 | # CONFIG_SLOW_WORK is not set | 112 | # CONFIG_SLOW_WORK is not set |
103 | CONFIG_HAVE_GENERIC_DMA_COHERENT=y | 113 | CONFIG_HAVE_GENERIC_DMA_COHERENT=y |
104 | CONFIG_SLABINFO=y | 114 | CONFIG_SLABINFO=y |
@@ -111,7 +121,7 @@ CONFIG_MODULE_UNLOAD=y | |||
111 | # CONFIG_MODVERSIONS is not set | 121 | # CONFIG_MODVERSIONS is not set |
112 | # CONFIG_MODULE_SRCVERSION_ALL is not set | 122 | # CONFIG_MODULE_SRCVERSION_ALL is not set |
113 | CONFIG_BLOCK=y | 123 | CONFIG_BLOCK=y |
114 | # CONFIG_LBD is not set | 124 | CONFIG_LBDAF=y |
115 | # CONFIG_BLK_DEV_BSG is not set | 125 | # CONFIG_BLK_DEV_BSG is not set |
116 | # CONFIG_BLK_DEV_INTEGRITY is not set | 126 | # CONFIG_BLK_DEV_INTEGRITY is not set |
117 | 127 | ||
@@ -138,13 +148,14 @@ CONFIG_FREEZER=y | |||
138 | # CONFIG_ARCH_VERSATILE is not set | 148 | # CONFIG_ARCH_VERSATILE is not set |
139 | # CONFIG_ARCH_AT91 is not set | 149 | # CONFIG_ARCH_AT91 is not set |
140 | # CONFIG_ARCH_CLPS711X is not set | 150 | # CONFIG_ARCH_CLPS711X is not set |
151 | # CONFIG_ARCH_GEMINI is not set | ||
141 | # CONFIG_ARCH_EBSA110 is not set | 152 | # CONFIG_ARCH_EBSA110 is not set |
142 | # CONFIG_ARCH_EP93XX is not set | 153 | # CONFIG_ARCH_EP93XX is not set |
143 | # CONFIG_ARCH_GEMINI is not set | ||
144 | # CONFIG_ARCH_FOOTBRIDGE is not set | 154 | # CONFIG_ARCH_FOOTBRIDGE is not set |
155 | CONFIG_ARCH_MXC=y | ||
156 | # CONFIG_ARCH_STMP3XXX is not set | ||
145 | # CONFIG_ARCH_NETX is not set | 157 | # CONFIG_ARCH_NETX is not set |
146 | # CONFIG_ARCH_H720X is not set | 158 | # CONFIG_ARCH_H720X is not set |
147 | # CONFIG_ARCH_IMX is not set | ||
148 | # CONFIG_ARCH_IOP13XX is not set | 159 | # CONFIG_ARCH_IOP13XX is not set |
149 | # CONFIG_ARCH_IOP32X is not set | 160 | # CONFIG_ARCH_IOP32X is not set |
150 | # CONFIG_ARCH_IOP33X is not set | 161 | # CONFIG_ARCH_IOP33X is not set |
@@ -153,25 +164,25 @@ CONFIG_FREEZER=y | |||
153 | # CONFIG_ARCH_IXP4XX is not set | 164 | # CONFIG_ARCH_IXP4XX is not set |
154 | # CONFIG_ARCH_L7200 is not set | 165 | # CONFIG_ARCH_L7200 is not set |
155 | # CONFIG_ARCH_KIRKWOOD is not set | 166 | # CONFIG_ARCH_KIRKWOOD is not set |
156 | # CONFIG_ARCH_KS8695 is not set | ||
157 | # CONFIG_ARCH_NS9XXX is not set | ||
158 | # CONFIG_ARCH_LOKI is not set | 167 | # CONFIG_ARCH_LOKI is not set |
159 | # CONFIG_ARCH_MV78XX0 is not set | 168 | # CONFIG_ARCH_MV78XX0 is not set |
160 | CONFIG_ARCH_MXC=y | ||
161 | # CONFIG_ARCH_ORION5X is not set | 169 | # CONFIG_ARCH_ORION5X is not set |
170 | # CONFIG_ARCH_MMP is not set | ||
171 | # CONFIG_ARCH_KS8695 is not set | ||
172 | # CONFIG_ARCH_NS9XXX is not set | ||
173 | # CONFIG_ARCH_W90X900 is not set | ||
162 | # CONFIG_ARCH_PNX4008 is not set | 174 | # CONFIG_ARCH_PNX4008 is not set |
163 | # CONFIG_ARCH_PXA is not set | 175 | # CONFIG_ARCH_PXA is not set |
164 | # CONFIG_ARCH_MMP is not set | 176 | # CONFIG_ARCH_MSM is not set |
165 | # CONFIG_ARCH_RPC is not set | 177 | # CONFIG_ARCH_RPC is not set |
166 | # CONFIG_ARCH_SA1100 is not set | 178 | # CONFIG_ARCH_SA1100 is not set |
167 | # CONFIG_ARCH_S3C2410 is not set | 179 | # CONFIG_ARCH_S3C2410 is not set |
168 | # CONFIG_ARCH_S3C64XX is not set | 180 | # CONFIG_ARCH_S3C64XX is not set |
169 | # CONFIG_ARCH_SHARK is not set | 181 | # CONFIG_ARCH_SHARK is not set |
170 | # CONFIG_ARCH_LH7A40X is not set | 182 | # CONFIG_ARCH_LH7A40X is not set |
183 | # CONFIG_ARCH_U300 is not set | ||
171 | # CONFIG_ARCH_DAVINCI is not set | 184 | # CONFIG_ARCH_DAVINCI is not set |
172 | # CONFIG_ARCH_OMAP is not set | 185 | # CONFIG_ARCH_OMAP is not set |
173 | # CONFIG_ARCH_MSM is not set | ||
174 | # CONFIG_ARCH_W90X900 is not set | ||
175 | 186 | ||
176 | # | 187 | # |
177 | # Freescale MXC Implementations | 188 | # Freescale MXC Implementations |
@@ -188,6 +199,8 @@ CONFIG_MACH_MX27=y | |||
188 | CONFIG_MACH_MX27ADS=y | 199 | CONFIG_MACH_MX27ADS=y |
189 | CONFIG_MACH_PCM038=y | 200 | CONFIG_MACH_PCM038=y |
190 | CONFIG_MACH_PCM970_BASEBOARD=y | 201 | CONFIG_MACH_PCM970_BASEBOARD=y |
202 | CONFIG_MACH_MX27_3DS=y | ||
203 | CONFIG_MACH_MX27LITE=y | ||
191 | CONFIG_MXC_IRQ_PRIOR=y | 204 | CONFIG_MXC_IRQ_PRIOR=y |
192 | CONFIG_MXC_PWM=y | 205 | CONFIG_MXC_PWM=y |
193 | 206 | ||
@@ -213,7 +226,6 @@ CONFIG_ARM_THUMB=y | |||
213 | # CONFIG_CPU_DCACHE_DISABLE is not set | 226 | # CONFIG_CPU_DCACHE_DISABLE is not set |
214 | # CONFIG_CPU_DCACHE_WRITETHROUGH is not set | 227 | # CONFIG_CPU_DCACHE_WRITETHROUGH is not set |
215 | # CONFIG_CPU_CACHE_ROUND_ROBIN is not set | 228 | # CONFIG_CPU_CACHE_ROUND_ROBIN is not set |
216 | # CONFIG_OUTER_CACHE is not set | ||
217 | CONFIG_COMMON_CLKDEV=y | 229 | CONFIG_COMMON_CLKDEV=y |
218 | 230 | ||
219 | # | 231 | # |
@@ -238,7 +250,6 @@ CONFIG_PREEMPT=y | |||
238 | CONFIG_HZ=100 | 250 | CONFIG_HZ=100 |
239 | CONFIG_AEABI=y | 251 | CONFIG_AEABI=y |
240 | CONFIG_OABI_COMPAT=y | 252 | CONFIG_OABI_COMPAT=y |
241 | CONFIG_ARCH_FLATMEM_HAS_HOLES=y | ||
242 | # CONFIG_ARCH_SPARSEMEM_DEFAULT is not set | 253 | # CONFIG_ARCH_SPARSEMEM_DEFAULT is not set |
243 | # CONFIG_ARCH_SELECT_MEMORY_MODEL is not set | 254 | # CONFIG_ARCH_SELECT_MEMORY_MODEL is not set |
244 | # CONFIG_HIGHMEM is not set | 255 | # CONFIG_HIGHMEM is not set |
@@ -253,10 +264,11 @@ CONFIG_SPLIT_PTLOCK_CPUS=4096 | |||
253 | # CONFIG_PHYS_ADDR_T_64BIT is not set | 264 | # CONFIG_PHYS_ADDR_T_64BIT is not set |
254 | CONFIG_ZONE_DMA_FLAG=0 | 265 | CONFIG_ZONE_DMA_FLAG=0 |
255 | CONFIG_VIRT_TO_BUS=y | 266 | CONFIG_VIRT_TO_BUS=y |
256 | CONFIG_UNEVICTABLE_LRU=y | ||
257 | CONFIG_HAVE_MLOCK=y | 267 | CONFIG_HAVE_MLOCK=y |
258 | CONFIG_HAVE_MLOCKED_PAGE_BIT=y | 268 | CONFIG_HAVE_MLOCKED_PAGE_BIT=y |
269 | CONFIG_DEFAULT_MMAP_MIN_ADDR=4096 | ||
259 | CONFIG_ALIGNMENT_TRAP=y | 270 | CONFIG_ALIGNMENT_TRAP=y |
271 | # CONFIG_UACCESS_WITH_MEMCPY is not set | ||
260 | 272 | ||
261 | # | 273 | # |
262 | # Boot options | 274 | # Boot options |
@@ -361,6 +373,7 @@ CONFIG_DEFAULT_TCP_CONG="cubic" | |||
361 | # CONFIG_ECONET is not set | 373 | # CONFIG_ECONET is not set |
362 | # CONFIG_WAN_ROUTER is not set | 374 | # CONFIG_WAN_ROUTER is not set |
363 | # CONFIG_PHONET is not set | 375 | # CONFIG_PHONET is not set |
376 | # CONFIG_IEEE802154 is not set | ||
364 | # CONFIG_NET_SCHED is not set | 377 | # CONFIG_NET_SCHED is not set |
365 | # CONFIG_DCB is not set | 378 | # CONFIG_DCB is not set |
366 | 379 | ||
@@ -474,7 +487,16 @@ CONFIG_MTD_PHYSMAP=y | |||
474 | # CONFIG_MTD_DOC2000 is not set | 487 | # CONFIG_MTD_DOC2000 is not set |
475 | # CONFIG_MTD_DOC2001 is not set | 488 | # CONFIG_MTD_DOC2001 is not set |
476 | # CONFIG_MTD_DOC2001PLUS is not set | 489 | # CONFIG_MTD_DOC2001PLUS is not set |
477 | # CONFIG_MTD_NAND is not set | 490 | CONFIG_MTD_NAND=y |
491 | # CONFIG_MTD_NAND_VERIFY_WRITE is not set | ||
492 | # CONFIG_MTD_NAND_ECC_SMC is not set | ||
493 | # CONFIG_MTD_NAND_MUSEUM_IDS is not set | ||
494 | # CONFIG_MTD_NAND_GPIO is not set | ||
495 | CONFIG_MTD_NAND_IDS=y | ||
496 | # CONFIG_MTD_NAND_DISKONCHIP is not set | ||
497 | # CONFIG_MTD_NAND_NANDSIM is not set | ||
498 | # CONFIG_MTD_NAND_PLATFORM is not set | ||
499 | CONFIG_MTD_NAND_MXC=y | ||
478 | # CONFIG_MTD_ONENAND is not set | 500 | # CONFIG_MTD_ONENAND is not set |
479 | 501 | ||
480 | # | 502 | # |
@@ -485,7 +507,15 @@ CONFIG_MTD_PHYSMAP=y | |||
485 | # | 507 | # |
486 | # UBI - Unsorted block images | 508 | # UBI - Unsorted block images |
487 | # | 509 | # |
488 | # CONFIG_MTD_UBI is not set | 510 | CONFIG_MTD_UBI=y |
511 | CONFIG_MTD_UBI_WL_THRESHOLD=4096 | ||
512 | CONFIG_MTD_UBI_BEB_RESERVE=1 | ||
513 | # CONFIG_MTD_UBI_GLUEBI is not set | ||
514 | |||
515 | # | ||
516 | # UBI debugging options | ||
517 | # | ||
518 | # CONFIG_MTD_UBI_DEBUG is not set | ||
489 | # CONFIG_PARPORT is not set | 519 | # CONFIG_PARPORT is not set |
490 | CONFIG_BLK_DEV=y | 520 | CONFIG_BLK_DEV=y |
491 | # CONFIG_BLK_DEV_COW_COMMON is not set | 521 | # CONFIG_BLK_DEV_COW_COMMON is not set |
@@ -494,7 +524,21 @@ CONFIG_BLK_DEV=y | |||
494 | # CONFIG_BLK_DEV_RAM is not set | 524 | # CONFIG_BLK_DEV_RAM is not set |
495 | # CONFIG_CDROM_PKTCDVD is not set | 525 | # CONFIG_CDROM_PKTCDVD is not set |
496 | # CONFIG_ATA_OVER_ETH is not set | 526 | # CONFIG_ATA_OVER_ETH is not set |
497 | # CONFIG_MISC_DEVICES is not set | 527 | # CONFIG_MG_DISK is not set |
528 | CONFIG_MISC_DEVICES=y | ||
529 | # CONFIG_ICS932S401 is not set | ||
530 | # CONFIG_ENCLOSURE_SERVICES is not set | ||
531 | # CONFIG_ISL29003 is not set | ||
532 | # CONFIG_C2PORT is not set | ||
533 | |||
534 | # | ||
535 | # EEPROM support | ||
536 | # | ||
537 | CONFIG_EEPROM_AT24=y | ||
538 | # CONFIG_EEPROM_AT25 is not set | ||
539 | # CONFIG_EEPROM_LEGACY is not set | ||
540 | # CONFIG_EEPROM_MAX6875 is not set | ||
541 | # CONFIG_EEPROM_93CX6 is not set | ||
498 | CONFIG_HAVE_IDE=y | 542 | CONFIG_HAVE_IDE=y |
499 | # CONFIG_IDE is not set | 543 | # CONFIG_IDE is not set |
500 | 544 | ||
@@ -508,7 +552,6 @@ CONFIG_HAVE_IDE=y | |||
508 | # CONFIG_ATA is not set | 552 | # CONFIG_ATA is not set |
509 | # CONFIG_MD is not set | 553 | # CONFIG_MD is not set |
510 | CONFIG_NETDEVICES=y | 554 | CONFIG_NETDEVICES=y |
511 | CONFIG_COMPAT_NET_DEV_OPS=y | ||
512 | # CONFIG_DUMMY is not set | 555 | # CONFIG_DUMMY is not set |
513 | # CONFIG_BONDING is not set | 556 | # CONFIG_BONDING is not set |
514 | # CONFIG_MACVLAN is not set | 557 | # CONFIG_MACVLAN is not set |
@@ -534,6 +577,8 @@ CONFIG_NET_ETHERNET=y | |||
534 | # CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT is not set | 577 | # CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT is not set |
535 | # CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set | 578 | # CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set |
536 | # CONFIG_B44 is not set | 579 | # CONFIG_B44 is not set |
580 | # CONFIG_KS8842 is not set | ||
581 | # CONFIG_KS8851 is not set | ||
537 | CONFIG_FEC=y | 582 | CONFIG_FEC=y |
538 | # CONFIG_FEC2 is not set | 583 | # CONFIG_FEC2 is not set |
539 | # CONFIG_NETDEV_1000 is not set | 584 | # CONFIG_NETDEV_1000 is not set |
@@ -580,6 +625,11 @@ CONFIG_INPUT_EVDEV=y | |||
580 | # CONFIG_INPUT_TABLET is not set | 625 | # CONFIG_INPUT_TABLET is not set |
581 | CONFIG_INPUT_TOUCHSCREEN=y | 626 | CONFIG_INPUT_TOUCHSCREEN=y |
582 | # CONFIG_TOUCHSCREEN_ADS7846 is not set | 627 | # CONFIG_TOUCHSCREEN_ADS7846 is not set |
628 | # CONFIG_TOUCHSCREEN_AD7877 is not set | ||
629 | # CONFIG_TOUCHSCREEN_AD7879_I2C is not set | ||
630 | # CONFIG_TOUCHSCREEN_AD7879_SPI is not set | ||
631 | # CONFIG_TOUCHSCREEN_AD7879 is not set | ||
632 | # CONFIG_TOUCHSCREEN_EETI is not set | ||
583 | # CONFIG_TOUCHSCREEN_FUJITSU is not set | 633 | # CONFIG_TOUCHSCREEN_FUJITSU is not set |
584 | # CONFIG_TOUCHSCREEN_GUNZE is not set | 634 | # CONFIG_TOUCHSCREEN_GUNZE is not set |
585 | # CONFIG_TOUCHSCREEN_ELO is not set | 635 | # CONFIG_TOUCHSCREEN_ELO is not set |
@@ -592,6 +642,7 @@ CONFIG_INPUT_TOUCHSCREEN=y | |||
592 | # CONFIG_TOUCHSCREEN_TOUCHWIN is not set | 642 | # CONFIG_TOUCHSCREEN_TOUCHWIN is not set |
593 | # CONFIG_TOUCHSCREEN_TOUCHIT213 is not set | 643 | # CONFIG_TOUCHSCREEN_TOUCHIT213 is not set |
594 | # CONFIG_TOUCHSCREEN_TSC2007 is not set | 644 | # CONFIG_TOUCHSCREEN_TSC2007 is not set |
645 | # CONFIG_TOUCHSCREEN_W90X900 is not set | ||
595 | # CONFIG_INPUT_MISC is not set | 646 | # CONFIG_INPUT_MISC is not set |
596 | 647 | ||
597 | # | 648 | # |
@@ -644,6 +695,7 @@ CONFIG_I2C_HELPER_AUTO=y | |||
644 | # | 695 | # |
645 | # I2C system bus drivers (mostly embedded / system-on-chip) | 696 | # I2C system bus drivers (mostly embedded / system-on-chip) |
646 | # | 697 | # |
698 | # CONFIG_I2C_DESIGNWARE is not set | ||
647 | # CONFIG_I2C_GPIO is not set | 699 | # CONFIG_I2C_GPIO is not set |
648 | CONFIG_I2C_IMX=y | 700 | CONFIG_I2C_IMX=y |
649 | # CONFIG_I2C_OCORES is not set | 701 | # CONFIG_I2C_OCORES is not set |
@@ -668,7 +720,6 @@ CONFIG_I2C_IMX=y | |||
668 | # CONFIG_SENSORS_PCF8574 is not set | 720 | # CONFIG_SENSORS_PCF8574 is not set |
669 | # CONFIG_PCF8575 is not set | 721 | # CONFIG_PCF8575 is not set |
670 | # CONFIG_SENSORS_PCA9539 is not set | 722 | # CONFIG_SENSORS_PCA9539 is not set |
671 | # CONFIG_SENSORS_MAX6875 is not set | ||
672 | # CONFIG_SENSORS_TSL2550 is not set | 723 | # CONFIG_SENSORS_TSL2550 is not set |
673 | # CONFIG_I2C_DEBUG_CORE is not set | 724 | # CONFIG_I2C_DEBUG_CORE is not set |
674 | # CONFIG_I2C_DEBUG_ALGO is not set | 725 | # CONFIG_I2C_DEBUG_ALGO is not set |
@@ -719,6 +770,7 @@ CONFIG_W1=y | |||
719 | # | 770 | # |
720 | # CONFIG_W1_MASTER_DS2482 is not set | 771 | # CONFIG_W1_MASTER_DS2482 is not set |
721 | CONFIG_W1_MASTER_MXC=y | 772 | CONFIG_W1_MASTER_MXC=y |
773 | # CONFIG_W1_MASTER_DS1WM is not set | ||
722 | # CONFIG_W1_MASTER_GPIO is not set | 774 | # CONFIG_W1_MASTER_GPIO is not set |
723 | 775 | ||
724 | # | 776 | # |
@@ -753,54 +805,16 @@ CONFIG_SSB_POSSIBLE=y | |||
753 | # CONFIG_TPS65010 is not set | 805 | # CONFIG_TPS65010 is not set |
754 | # CONFIG_TWL4030_CORE is not set | 806 | # CONFIG_TWL4030_CORE is not set |
755 | # CONFIG_MFD_TMIO is not set | 807 | # CONFIG_MFD_TMIO is not set |
808 | # CONFIG_MFD_T7L66XB is not set | ||
809 | # CONFIG_MFD_TC6387XB is not set | ||
756 | # CONFIG_MFD_TC6393XB is not set | 810 | # CONFIG_MFD_TC6393XB is not set |
757 | # CONFIG_PMIC_DA903X is not set | 811 | # CONFIG_PMIC_DA903X is not set |
758 | # CONFIG_MFD_WM8400 is not set | 812 | # CONFIG_MFD_WM8400 is not set |
759 | # CONFIG_MFD_WM8350_I2C is not set | 813 | # CONFIG_MFD_WM8350_I2C is not set |
760 | # CONFIG_MFD_PCF50633 is not set | 814 | # CONFIG_MFD_PCF50633 is not set |
761 | 815 | # CONFIG_AB3100_CORE is not set | |
762 | # | 816 | # CONFIG_EZX_PCAP is not set |
763 | # Multimedia devices | 817 | # CONFIG_MEDIA_SUPPORT is not set |
764 | # | ||
765 | |||
766 | # | ||
767 | # Multimedia core support | ||
768 | # | ||
769 | CONFIG_VIDEO_DEV=y | ||
770 | CONFIG_VIDEO_V4L2_COMMON=y | ||
771 | CONFIG_VIDEO_ALLOW_V4L1=y | ||
772 | CONFIG_VIDEO_V4L1_COMPAT=y | ||
773 | # CONFIG_DVB_CORE is not set | ||
774 | CONFIG_VIDEO_MEDIA=y | ||
775 | |||
776 | # | ||
777 | # Multimedia drivers | ||
778 | # | ||
779 | # CONFIG_MEDIA_ATTACH is not set | ||
780 | CONFIG_MEDIA_TUNER=y | ||
781 | # CONFIG_MEDIA_TUNER_CUSTOMISE is not set | ||
782 | CONFIG_MEDIA_TUNER_SIMPLE=y | ||
783 | CONFIG_MEDIA_TUNER_TDA8290=y | ||
784 | CONFIG_MEDIA_TUNER_TDA9887=y | ||
785 | CONFIG_MEDIA_TUNER_TEA5761=y | ||
786 | CONFIG_MEDIA_TUNER_TEA5767=y | ||
787 | CONFIG_MEDIA_TUNER_MT20XX=y | ||
788 | CONFIG_MEDIA_TUNER_XC2028=y | ||
789 | CONFIG_MEDIA_TUNER_XC5000=y | ||
790 | CONFIG_MEDIA_TUNER_MC44S803=y | ||
791 | CONFIG_VIDEO_V4L2=y | ||
792 | CONFIG_VIDEO_V4L1=y | ||
793 | CONFIG_VIDEO_CAPTURE_DRIVERS=y | ||
794 | # CONFIG_VIDEO_ADV_DEBUG is not set | ||
795 | # CONFIG_VIDEO_FIXED_MINOR_RANGES is not set | ||
796 | CONFIG_VIDEO_HELPER_CHIPS_AUTO=y | ||
797 | # CONFIG_VIDEO_VIVI is not set | ||
798 | # CONFIG_VIDEO_CPIA is not set | ||
799 | # CONFIG_VIDEO_SAA5246A is not set | ||
800 | # CONFIG_VIDEO_SAA5249 is not set | ||
801 | # CONFIG_SOC_CAMERA is not set | ||
802 | # CONFIG_RADIO_ADAPTERS is not set | ||
803 | # CONFIG_DAB is not set | ||
804 | 818 | ||
805 | # | 819 | # |
806 | # Graphics support | 820 | # Graphics support |
@@ -917,6 +931,7 @@ CONFIG_RTC_DRV_PCF8563=y | |||
917 | # CONFIG_RTC_DRV_S35390A is not set | 931 | # CONFIG_RTC_DRV_S35390A is not set |
918 | # CONFIG_RTC_DRV_FM3130 is not set | 932 | # CONFIG_RTC_DRV_FM3130 is not set |
919 | # CONFIG_RTC_DRV_RX8581 is not set | 933 | # CONFIG_RTC_DRV_RX8581 is not set |
934 | # CONFIG_RTC_DRV_RX8025 is not set | ||
920 | 935 | ||
921 | # | 936 | # |
922 | # SPI RTC drivers | 937 | # SPI RTC drivers |
@@ -962,12 +977,15 @@ CONFIG_RTC_DRV_PCF8563=y | |||
962 | # CONFIG_REISERFS_FS is not set | 977 | # CONFIG_REISERFS_FS is not set |
963 | # CONFIG_JFS_FS is not set | 978 | # CONFIG_JFS_FS is not set |
964 | # CONFIG_FS_POSIX_ACL is not set | 979 | # CONFIG_FS_POSIX_ACL is not set |
965 | CONFIG_FILE_LOCKING=y | ||
966 | # CONFIG_XFS_FS is not set | 980 | # CONFIG_XFS_FS is not set |
981 | # CONFIG_GFS2_FS is not set | ||
967 | # CONFIG_OCFS2_FS is not set | 982 | # CONFIG_OCFS2_FS is not set |
968 | # CONFIG_BTRFS_FS is not set | 983 | # CONFIG_BTRFS_FS is not set |
984 | CONFIG_FILE_LOCKING=y | ||
985 | CONFIG_FSNOTIFY=y | ||
969 | # CONFIG_DNOTIFY is not set | 986 | # CONFIG_DNOTIFY is not set |
970 | # CONFIG_INOTIFY is not set | 987 | # CONFIG_INOTIFY is not set |
988 | CONFIG_INOTIFY_USER=y | ||
971 | # CONFIG_QUOTA is not set | 989 | # CONFIG_QUOTA is not set |
972 | # CONFIG_AUTOFS_FS is not set | 990 | # CONFIG_AUTOFS_FS is not set |
973 | # CONFIG_AUTOFS4_FS is not set | 991 | # CONFIG_AUTOFS4_FS is not set |
@@ -1021,6 +1039,12 @@ CONFIG_JFFS2_ZLIB=y | |||
1021 | # CONFIG_JFFS2_LZO is not set | 1039 | # CONFIG_JFFS2_LZO is not set |
1022 | CONFIG_JFFS2_RTIME=y | 1040 | CONFIG_JFFS2_RTIME=y |
1023 | # CONFIG_JFFS2_RUBIN is not set | 1041 | # CONFIG_JFFS2_RUBIN is not set |
1042 | CONFIG_UBIFS_FS=y | ||
1043 | # CONFIG_UBIFS_FS_XATTR is not set | ||
1044 | # CONFIG_UBIFS_FS_ADVANCED_COMPR is not set | ||
1045 | CONFIG_UBIFS_FS_LZO=y | ||
1046 | CONFIG_UBIFS_FS_ZLIB=y | ||
1047 | # CONFIG_UBIFS_FS_DEBUG is not set | ||
1024 | # CONFIG_CRAMFS is not set | 1048 | # CONFIG_CRAMFS is not set |
1025 | # CONFIG_SQUASHFS is not set | 1049 | # CONFIG_SQUASHFS is not set |
1026 | # CONFIG_VXFS_FS is not set | 1050 | # CONFIG_VXFS_FS is not set |
@@ -1119,25 +1143,11 @@ CONFIG_SYSCTL_SYSCALL_CHECK=y | |||
1119 | CONFIG_NOP_TRACER=y | 1143 | CONFIG_NOP_TRACER=y |
1120 | CONFIG_HAVE_FUNCTION_TRACER=y | 1144 | CONFIG_HAVE_FUNCTION_TRACER=y |
1121 | CONFIG_RING_BUFFER=y | 1145 | CONFIG_RING_BUFFER=y |
1146 | CONFIG_EVENT_TRACING=y | ||
1147 | CONFIG_CONTEXT_SWITCH_TRACER=y | ||
1122 | CONFIG_TRACING=y | 1148 | CONFIG_TRACING=y |
1123 | CONFIG_TRACING_SUPPORT=y | 1149 | CONFIG_TRACING_SUPPORT=y |
1124 | 1150 | # CONFIG_FTRACE is not set | |
1125 | # | ||
1126 | # Tracers | ||
1127 | # | ||
1128 | # CONFIG_FUNCTION_TRACER is not set | ||
1129 | # CONFIG_IRQSOFF_TRACER is not set | ||
1130 | # CONFIG_PREEMPT_TRACER is not set | ||
1131 | # CONFIG_SCHED_TRACER is not set | ||
1132 | # CONFIG_CONTEXT_SWITCH_TRACER is not set | ||
1133 | # CONFIG_EVENT_TRACER is not set | ||
1134 | # CONFIG_BOOT_TRACER is not set | ||
1135 | # CONFIG_TRACE_BRANCH_PROFILING is not set | ||
1136 | # CONFIG_STACK_TRACER is not set | ||
1137 | # CONFIG_KMEMTRACE is not set | ||
1138 | # CONFIG_WORKQUEUE_TRACER is not set | ||
1139 | # CONFIG_BLK_DEV_IO_TRACE is not set | ||
1140 | # CONFIG_FTRACE_STARTUP_TEST is not set | ||
1141 | # CONFIG_DYNAMIC_DEBUG is not set | 1151 | # CONFIG_DYNAMIC_DEBUG is not set |
1142 | # CONFIG_SAMPLES is not set | 1152 | # CONFIG_SAMPLES is not set |
1143 | CONFIG_HAVE_ARCH_KGDB=y | 1153 | CONFIG_HAVE_ARCH_KGDB=y |
@@ -1151,16 +1161,104 @@ CONFIG_ARM_UNWIND=y | |||
1151 | # CONFIG_SECURITY is not set | 1161 | # CONFIG_SECURITY is not set |
1152 | # CONFIG_SECURITYFS is not set | 1162 | # CONFIG_SECURITYFS is not set |
1153 | # CONFIG_SECURITY_FILE_CAPABILITIES is not set | 1163 | # CONFIG_SECURITY_FILE_CAPABILITIES is not set |
1154 | # CONFIG_CRYPTO is not set | 1164 | CONFIG_CRYPTO=y |
1165 | |||
1166 | # | ||
1167 | # Crypto core or helper | ||
1168 | # | ||
1169 | # CONFIG_CRYPTO_FIPS is not set | ||
1170 | CONFIG_CRYPTO_ALGAPI=y | ||
1171 | CONFIG_CRYPTO_ALGAPI2=y | ||
1172 | # CONFIG_CRYPTO_MANAGER is not set | ||
1173 | # CONFIG_CRYPTO_MANAGER2 is not set | ||
1174 | # CONFIG_CRYPTO_GF128MUL is not set | ||
1175 | # CONFIG_CRYPTO_NULL is not set | ||
1176 | # CONFIG_CRYPTO_CRYPTD is not set | ||
1177 | # CONFIG_CRYPTO_AUTHENC is not set | ||
1178 | # CONFIG_CRYPTO_TEST is not set | ||
1179 | |||
1180 | # | ||
1181 | # Authenticated Encryption with Associated Data | ||
1182 | # | ||
1183 | # CONFIG_CRYPTO_CCM is not set | ||
1184 | # CONFIG_CRYPTO_GCM is not set | ||
1185 | # CONFIG_CRYPTO_SEQIV is not set | ||
1186 | |||
1187 | # | ||
1188 | # Block modes | ||
1189 | # | ||
1190 | # CONFIG_CRYPTO_CBC is not set | ||
1191 | # CONFIG_CRYPTO_CTR is not set | ||
1192 | # CONFIG_CRYPTO_CTS is not set | ||
1193 | # CONFIG_CRYPTO_ECB is not set | ||
1194 | # CONFIG_CRYPTO_LRW is not set | ||
1195 | # CONFIG_CRYPTO_PCBC is not set | ||
1196 | # CONFIG_CRYPTO_XTS is not set | ||
1197 | |||
1198 | # | ||
1199 | # Hash modes | ||
1200 | # | ||
1201 | # CONFIG_CRYPTO_HMAC is not set | ||
1202 | # CONFIG_CRYPTO_XCBC is not set | ||
1203 | |||
1204 | # | ||
1205 | # Digest | ||
1206 | # | ||
1207 | # CONFIG_CRYPTO_CRC32C is not set | ||
1208 | # CONFIG_CRYPTO_MD4 is not set | ||
1209 | # CONFIG_CRYPTO_MD5 is not set | ||
1210 | # CONFIG_CRYPTO_MICHAEL_MIC is not set | ||
1211 | # CONFIG_CRYPTO_RMD128 is not set | ||
1212 | # CONFIG_CRYPTO_RMD160 is not set | ||
1213 | # CONFIG_CRYPTO_RMD256 is not set | ||
1214 | # CONFIG_CRYPTO_RMD320 is not set | ||
1215 | # CONFIG_CRYPTO_SHA1 is not set | ||
1216 | # CONFIG_CRYPTO_SHA256 is not set | ||
1217 | # CONFIG_CRYPTO_SHA512 is not set | ||
1218 | # CONFIG_CRYPTO_TGR192 is not set | ||
1219 | # CONFIG_CRYPTO_WP512 is not set | ||
1220 | |||
1221 | # | ||
1222 | # Ciphers | ||
1223 | # | ||
1224 | # CONFIG_CRYPTO_AES is not set | ||
1225 | # CONFIG_CRYPTO_ANUBIS is not set | ||
1226 | # CONFIG_CRYPTO_ARC4 is not set | ||
1227 | # CONFIG_CRYPTO_BLOWFISH is not set | ||
1228 | # CONFIG_CRYPTO_CAMELLIA is not set | ||
1229 | # CONFIG_CRYPTO_CAST5 is not set | ||
1230 | # CONFIG_CRYPTO_CAST6 is not set | ||
1231 | # CONFIG_CRYPTO_DES is not set | ||
1232 | # CONFIG_CRYPTO_FCRYPT is not set | ||
1233 | # CONFIG_CRYPTO_KHAZAD is not set | ||
1234 | # CONFIG_CRYPTO_SALSA20 is not set | ||
1235 | # CONFIG_CRYPTO_SEED is not set | ||
1236 | # CONFIG_CRYPTO_SERPENT is not set | ||
1237 | # CONFIG_CRYPTO_TEA is not set | ||
1238 | # CONFIG_CRYPTO_TWOFISH is not set | ||
1239 | |||
1240 | # | ||
1241 | # Compression | ||
1242 | # | ||
1243 | CONFIG_CRYPTO_DEFLATE=y | ||
1244 | # CONFIG_CRYPTO_ZLIB is not set | ||
1245 | CONFIG_CRYPTO_LZO=y | ||
1246 | |||
1247 | # | ||
1248 | # Random Number Generation | ||
1249 | # | ||
1250 | # CONFIG_CRYPTO_ANSI_CPRNG is not set | ||
1251 | CONFIG_CRYPTO_HW=y | ||
1155 | CONFIG_BINARY_PRINTF=y | 1252 | CONFIG_BINARY_PRINTF=y |
1156 | 1253 | ||
1157 | # | 1254 | # |
1158 | # Library routines | 1255 | # Library routines |
1159 | # | 1256 | # |
1160 | CONFIG_BITREVERSE=y | 1257 | CONFIG_BITREVERSE=y |
1258 | CONFIG_RATIONAL=y | ||
1161 | CONFIG_GENERIC_FIND_LAST_BIT=y | 1259 | CONFIG_GENERIC_FIND_LAST_BIT=y |
1162 | # CONFIG_CRC_CCITT is not set | 1260 | # CONFIG_CRC_CCITT is not set |
1163 | # CONFIG_CRC16 is not set | 1261 | CONFIG_CRC16=y |
1164 | # CONFIG_CRC_T10DIF is not set | 1262 | # CONFIG_CRC_T10DIF is not set |
1165 | # CONFIG_CRC_ITU_T is not set | 1263 | # CONFIG_CRC_ITU_T is not set |
1166 | CONFIG_CRC32=y | 1264 | CONFIG_CRC32=y |
@@ -1168,6 +1266,8 @@ CONFIG_CRC32=y | |||
1168 | # CONFIG_LIBCRC32C is not set | 1266 | # CONFIG_LIBCRC32C is not set |
1169 | CONFIG_ZLIB_INFLATE=y | 1267 | CONFIG_ZLIB_INFLATE=y |
1170 | CONFIG_ZLIB_DEFLATE=y | 1268 | CONFIG_ZLIB_DEFLATE=y |
1269 | CONFIG_LZO_COMPRESS=y | ||
1270 | CONFIG_LZO_DECOMPRESS=y | ||
1171 | CONFIG_HAS_IOMEM=y | 1271 | CONFIG_HAS_IOMEM=y |
1172 | CONFIG_HAS_IOPORT=y | 1272 | CONFIG_HAS_IOPORT=y |
1173 | CONFIG_HAS_DMA=y | 1273 | CONFIG_HAS_DMA=y |
diff --git a/arch/arm/configs/mx3_defconfig b/arch/arm/configs/mx3_defconfig index 20ada526f6de..a4f9a2a8149c 100644 --- a/arch/arm/configs/mx3_defconfig +++ b/arch/arm/configs/mx3_defconfig | |||
@@ -1,15 +1,15 @@ | |||
1 | # | 1 | # |
2 | # Automatically generated make config: don't edit | 2 | # Automatically generated make config: don't edit |
3 | # Linux kernel version: 2.6.30-rc1 | 3 | # Linux kernel version: 2.6.31-rc4 |
4 | # Wed Apr 8 11:06:37 2009 | 4 | # Tue Jul 28 14:11:34 2009 |
5 | # | 5 | # |
6 | CONFIG_ARM=y | 6 | CONFIG_ARM=y |
7 | CONFIG_HAVE_PWM=y | ||
7 | CONFIG_SYS_SUPPORTS_APM_EMULATION=y | 8 | CONFIG_SYS_SUPPORTS_APM_EMULATION=y |
8 | CONFIG_GENERIC_GPIO=y | 9 | CONFIG_GENERIC_GPIO=y |
9 | CONFIG_GENERIC_TIME=y | 10 | CONFIG_GENERIC_TIME=y |
10 | CONFIG_GENERIC_CLOCKEVENTS=y | 11 | CONFIG_GENERIC_CLOCKEVENTS=y |
11 | CONFIG_MMU=y | 12 | CONFIG_MMU=y |
12 | # CONFIG_NO_IOPORT is not set | ||
13 | CONFIG_GENERIC_HARDIRQS=y | 13 | CONFIG_GENERIC_HARDIRQS=y |
14 | CONFIG_STACKTRACE_SUPPORT=y | 14 | CONFIG_STACKTRACE_SUPPORT=y |
15 | CONFIG_HAVE_LATENCYTOP_SUPPORT=y | 15 | CONFIG_HAVE_LATENCYTOP_SUPPORT=y |
@@ -18,14 +18,13 @@ CONFIG_TRACE_IRQFLAGS_SUPPORT=y | |||
18 | CONFIG_HARDIRQS_SW_RESEND=y | 18 | CONFIG_HARDIRQS_SW_RESEND=y |
19 | CONFIG_GENERIC_IRQ_PROBE=y | 19 | CONFIG_GENERIC_IRQ_PROBE=y |
20 | CONFIG_RWSEM_GENERIC_SPINLOCK=y | 20 | CONFIG_RWSEM_GENERIC_SPINLOCK=y |
21 | # CONFIG_ARCH_HAS_ILOG2_U32 is not set | ||
22 | # CONFIG_ARCH_HAS_ILOG2_U64 is not set | ||
23 | CONFIG_GENERIC_HWEIGHT=y | 21 | CONFIG_GENERIC_HWEIGHT=y |
24 | CONFIG_GENERIC_CALIBRATE_DELAY=y | 22 | CONFIG_GENERIC_CALIBRATE_DELAY=y |
25 | CONFIG_ARCH_MTD_XIP=y | 23 | CONFIG_ARCH_MTD_XIP=y |
26 | CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y | 24 | CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y |
27 | CONFIG_VECTORS_BASE=0xffff0000 | 25 | CONFIG_VECTORS_BASE=0xffff0000 |
28 | CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config" | 26 | CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config" |
27 | CONFIG_CONSTRUCTORS=y | ||
29 | 28 | ||
30 | # | 29 | # |
31 | # General setup | 30 | # General setup |
@@ -86,7 +85,12 @@ CONFIG_TIMERFD=y | |||
86 | CONFIG_EVENTFD=y | 85 | CONFIG_EVENTFD=y |
87 | CONFIG_SHMEM=y | 86 | CONFIG_SHMEM=y |
88 | CONFIG_AIO=y | 87 | CONFIG_AIO=y |
88 | |||
89 | # | ||
90 | # Performance Counters | ||
91 | # | ||
89 | CONFIG_VM_EVENT_COUNTERS=y | 92 | CONFIG_VM_EVENT_COUNTERS=y |
93 | # CONFIG_STRIP_ASM_SYMS is not set | ||
90 | CONFIG_COMPAT_BRK=y | 94 | CONFIG_COMPAT_BRK=y |
91 | CONFIG_SLAB=y | 95 | CONFIG_SLAB=y |
92 | # CONFIG_SLUB is not set | 96 | # CONFIG_SLUB is not set |
@@ -97,6 +101,11 @@ CONFIG_HAVE_OPROFILE=y | |||
97 | # CONFIG_KPROBES is not set | 101 | # CONFIG_KPROBES is not set |
98 | CONFIG_HAVE_KPROBES=y | 102 | CONFIG_HAVE_KPROBES=y |
99 | CONFIG_HAVE_KRETPROBES=y | 103 | CONFIG_HAVE_KRETPROBES=y |
104 | CONFIG_HAVE_CLK=y | ||
105 | |||
106 | # | ||
107 | # GCOV-based kernel profiling | ||
108 | # | ||
100 | # CONFIG_SLOW_WORK is not set | 109 | # CONFIG_SLOW_WORK is not set |
101 | CONFIG_HAVE_GENERIC_DMA_COHERENT=y | 110 | CONFIG_HAVE_GENERIC_DMA_COHERENT=y |
102 | CONFIG_SLABINFO=y | 111 | CONFIG_SLABINFO=y |
@@ -109,7 +118,7 @@ CONFIG_MODULE_FORCE_UNLOAD=y | |||
109 | CONFIG_MODVERSIONS=y | 118 | CONFIG_MODVERSIONS=y |
110 | # CONFIG_MODULE_SRCVERSION_ALL is not set | 119 | # CONFIG_MODULE_SRCVERSION_ALL is not set |
111 | CONFIG_BLOCK=y | 120 | CONFIG_BLOCK=y |
112 | # CONFIG_LBD is not set | 121 | CONFIG_LBDAF=y |
113 | # CONFIG_BLK_DEV_BSG is not set | 122 | # CONFIG_BLK_DEV_BSG is not set |
114 | # CONFIG_BLK_DEV_INTEGRITY is not set | 123 | # CONFIG_BLK_DEV_INTEGRITY is not set |
115 | 124 | ||
@@ -136,13 +145,14 @@ CONFIG_FREEZER=y | |||
136 | # CONFIG_ARCH_VERSATILE is not set | 145 | # CONFIG_ARCH_VERSATILE is not set |
137 | # CONFIG_ARCH_AT91 is not set | 146 | # CONFIG_ARCH_AT91 is not set |
138 | # CONFIG_ARCH_CLPS711X is not set | 147 | # CONFIG_ARCH_CLPS711X is not set |
148 | # CONFIG_ARCH_GEMINI is not set | ||
139 | # CONFIG_ARCH_EBSA110 is not set | 149 | # CONFIG_ARCH_EBSA110 is not set |
140 | # CONFIG_ARCH_EP93XX is not set | 150 | # CONFIG_ARCH_EP93XX is not set |
141 | # CONFIG_ARCH_GEMINI is not set | ||
142 | # CONFIG_ARCH_FOOTBRIDGE is not set | 151 | # CONFIG_ARCH_FOOTBRIDGE is not set |
152 | CONFIG_ARCH_MXC=y | ||
153 | # CONFIG_ARCH_STMP3XXX is not set | ||
143 | # CONFIG_ARCH_NETX is not set | 154 | # CONFIG_ARCH_NETX is not set |
144 | # CONFIG_ARCH_H720X is not set | 155 | # CONFIG_ARCH_H720X is not set |
145 | # CONFIG_ARCH_IMX is not set | ||
146 | # CONFIG_ARCH_IOP13XX is not set | 156 | # CONFIG_ARCH_IOP13XX is not set |
147 | # CONFIG_ARCH_IOP32X is not set | 157 | # CONFIG_ARCH_IOP32X is not set |
148 | # CONFIG_ARCH_IOP33X is not set | 158 | # CONFIG_ARCH_IOP33X is not set |
@@ -151,25 +161,25 @@ CONFIG_FREEZER=y | |||
151 | # CONFIG_ARCH_IXP4XX is not set | 161 | # CONFIG_ARCH_IXP4XX is not set |
152 | # CONFIG_ARCH_L7200 is not set | 162 | # CONFIG_ARCH_L7200 is not set |
153 | # CONFIG_ARCH_KIRKWOOD is not set | 163 | # CONFIG_ARCH_KIRKWOOD is not set |
154 | # CONFIG_ARCH_KS8695 is not set | ||
155 | # CONFIG_ARCH_NS9XXX is not set | ||
156 | # CONFIG_ARCH_LOKI is not set | 164 | # CONFIG_ARCH_LOKI is not set |
157 | # CONFIG_ARCH_MV78XX0 is not set | 165 | # CONFIG_ARCH_MV78XX0 is not set |
158 | CONFIG_ARCH_MXC=y | ||
159 | # CONFIG_ARCH_ORION5X is not set | 166 | # CONFIG_ARCH_ORION5X is not set |
167 | # CONFIG_ARCH_MMP is not set | ||
168 | # CONFIG_ARCH_KS8695 is not set | ||
169 | # CONFIG_ARCH_NS9XXX is not set | ||
170 | # CONFIG_ARCH_W90X900 is not set | ||
160 | # CONFIG_ARCH_PNX4008 is not set | 171 | # CONFIG_ARCH_PNX4008 is not set |
161 | # CONFIG_ARCH_PXA is not set | 172 | # CONFIG_ARCH_PXA is not set |
162 | # CONFIG_ARCH_MMP is not set | 173 | # CONFIG_ARCH_MSM is not set |
163 | # CONFIG_ARCH_RPC is not set | 174 | # CONFIG_ARCH_RPC is not set |
164 | # CONFIG_ARCH_SA1100 is not set | 175 | # CONFIG_ARCH_SA1100 is not set |
165 | # CONFIG_ARCH_S3C2410 is not set | 176 | # CONFIG_ARCH_S3C2410 is not set |
166 | # CONFIG_ARCH_S3C64XX is not set | 177 | # CONFIG_ARCH_S3C64XX is not set |
167 | # CONFIG_ARCH_SHARK is not set | 178 | # CONFIG_ARCH_SHARK is not set |
168 | # CONFIG_ARCH_LH7A40X is not set | 179 | # CONFIG_ARCH_LH7A40X is not set |
180 | # CONFIG_ARCH_U300 is not set | ||
169 | # CONFIG_ARCH_DAVINCI is not set | 181 | # CONFIG_ARCH_DAVINCI is not set |
170 | # CONFIG_ARCH_OMAP is not set | 182 | # CONFIG_ARCH_OMAP is not set |
171 | # CONFIG_ARCH_MSM is not set | ||
172 | # CONFIG_ARCH_W90X900 is not set | ||
173 | 183 | ||
174 | # | 184 | # |
175 | # Freescale MXC Implementations | 185 | # Freescale MXC Implementations |
@@ -178,6 +188,7 @@ CONFIG_ARCH_MXC=y | |||
178 | # CONFIG_ARCH_MX2 is not set | 188 | # CONFIG_ARCH_MX2 is not set |
179 | CONFIG_ARCH_MX3=y | 189 | CONFIG_ARCH_MX3=y |
180 | CONFIG_ARCH_MX31=y | 190 | CONFIG_ARCH_MX31=y |
191 | CONFIG_ARCH_MX35=y | ||
181 | 192 | ||
182 | # | 193 | # |
183 | # MX3 platforms: | 194 | # MX3 platforms: |
@@ -185,12 +196,19 @@ CONFIG_ARCH_MX31=y | |||
185 | CONFIG_MACH_MX31ADS=y | 196 | CONFIG_MACH_MX31ADS=y |
186 | CONFIG_MACH_MX31ADS_WM1133_EV1=y | 197 | CONFIG_MACH_MX31ADS_WM1133_EV1=y |
187 | CONFIG_MACH_PCM037=y | 198 | CONFIG_MACH_PCM037=y |
199 | CONFIG_MACH_PCM037_EET=y | ||
188 | CONFIG_MACH_MX31LITE=y | 200 | CONFIG_MACH_MX31LITE=y |
189 | CONFIG_MACH_MX31_3DS=y | 201 | CONFIG_MACH_MX31_3DS=y |
190 | CONFIG_MACH_MX31MOBOARD=y | 202 | CONFIG_MACH_MX31MOBOARD=y |
203 | CONFIG_MACH_MX31LILLY=y | ||
191 | CONFIG_MACH_QONG=y | 204 | CONFIG_MACH_QONG=y |
205 | CONFIG_MACH_PCM043=y | ||
206 | CONFIG_MACH_ARMADILLO5X0=y | ||
207 | CONFIG_MACH_MX35_3DS=y | ||
192 | CONFIG_MXC_IRQ_PRIOR=y | 208 | CONFIG_MXC_IRQ_PRIOR=y |
193 | CONFIG_MXC_PWM=y | 209 | CONFIG_MXC_PWM=y |
210 | CONFIG_ARCH_HAS_RNGA=y | ||
211 | CONFIG_ARCH_MXC_IOMUX_V3=y | ||
194 | 212 | ||
195 | # | 213 | # |
196 | # Processor Type | 214 | # Processor Type |
@@ -218,6 +236,7 @@ CONFIG_ARM_THUMB=y | |||
218 | # CONFIG_CPU_BPREDICT_DISABLE is not set | 236 | # CONFIG_CPU_BPREDICT_DISABLE is not set |
219 | CONFIG_OUTER_CACHE=y | 237 | CONFIG_OUTER_CACHE=y |
220 | CONFIG_CACHE_L2X0=y | 238 | CONFIG_CACHE_L2X0=y |
239 | # CONFIG_ARM_ERRATA_411920 is not set | ||
221 | CONFIG_COMMON_CLKDEV=y | 240 | CONFIG_COMMON_CLKDEV=y |
222 | 241 | ||
223 | # | 242 | # |
@@ -242,7 +261,6 @@ CONFIG_PREEMPT=y | |||
242 | CONFIG_HZ=100 | 261 | CONFIG_HZ=100 |
243 | CONFIG_AEABI=y | 262 | CONFIG_AEABI=y |
244 | CONFIG_OABI_COMPAT=y | 263 | CONFIG_OABI_COMPAT=y |
245 | CONFIG_ARCH_FLATMEM_HAS_HOLES=y | ||
246 | # CONFIG_ARCH_SPARSEMEM_DEFAULT is not set | 264 | # CONFIG_ARCH_SPARSEMEM_DEFAULT is not set |
247 | # CONFIG_ARCH_SELECT_MEMORY_MODEL is not set | 265 | # CONFIG_ARCH_SELECT_MEMORY_MODEL is not set |
248 | # CONFIG_HIGHMEM is not set | 266 | # CONFIG_HIGHMEM is not set |
@@ -257,10 +275,11 @@ CONFIG_SPLIT_PTLOCK_CPUS=4 | |||
257 | # CONFIG_PHYS_ADDR_T_64BIT is not set | 275 | # CONFIG_PHYS_ADDR_T_64BIT is not set |
258 | CONFIG_ZONE_DMA_FLAG=0 | 276 | CONFIG_ZONE_DMA_FLAG=0 |
259 | CONFIG_VIRT_TO_BUS=y | 277 | CONFIG_VIRT_TO_BUS=y |
260 | CONFIG_UNEVICTABLE_LRU=y | ||
261 | CONFIG_HAVE_MLOCK=y | 278 | CONFIG_HAVE_MLOCK=y |
262 | CONFIG_HAVE_MLOCKED_PAGE_BIT=y | 279 | CONFIG_HAVE_MLOCKED_PAGE_BIT=y |
280 | CONFIG_DEFAULT_MMAP_MIN_ADDR=4096 | ||
263 | CONFIG_ALIGNMENT_TRAP=y | 281 | CONFIG_ALIGNMENT_TRAP=y |
282 | # CONFIG_UACCESS_WITH_MEMCPY is not set | ||
264 | 283 | ||
265 | # | 284 | # |
266 | # Boot options | 285 | # Boot options |
@@ -362,6 +381,7 @@ CONFIG_DEFAULT_TCP_CONG="cubic" | |||
362 | # CONFIG_ECONET is not set | 381 | # CONFIG_ECONET is not set |
363 | # CONFIG_WAN_ROUTER is not set | 382 | # CONFIG_WAN_ROUTER is not set |
364 | # CONFIG_PHONET is not set | 383 | # CONFIG_PHONET is not set |
384 | # CONFIG_IEEE802154 is not set | ||
365 | # CONFIG_NET_SCHED is not set | 385 | # CONFIG_NET_SCHED is not set |
366 | # CONFIG_DCB is not set | 386 | # CONFIG_DCB is not set |
367 | 387 | ||
@@ -465,7 +485,16 @@ CONFIG_MTD_PHYSMAP=y | |||
465 | # CONFIG_MTD_DOC2000 is not set | 485 | # CONFIG_MTD_DOC2000 is not set |
466 | # CONFIG_MTD_DOC2001 is not set | 486 | # CONFIG_MTD_DOC2001 is not set |
467 | # CONFIG_MTD_DOC2001PLUS is not set | 487 | # CONFIG_MTD_DOC2001PLUS is not set |
468 | # CONFIG_MTD_NAND is not set | 488 | CONFIG_MTD_NAND=y |
489 | # CONFIG_MTD_NAND_VERIFY_WRITE is not set | ||
490 | # CONFIG_MTD_NAND_ECC_SMC is not set | ||
491 | # CONFIG_MTD_NAND_MUSEUM_IDS is not set | ||
492 | # CONFIG_MTD_NAND_GPIO is not set | ||
493 | CONFIG_MTD_NAND_IDS=y | ||
494 | # CONFIG_MTD_NAND_DISKONCHIP is not set | ||
495 | # CONFIG_MTD_NAND_NANDSIM is not set | ||
496 | # CONFIG_MTD_NAND_PLATFORM is not set | ||
497 | CONFIG_MTD_NAND_MXC=y | ||
469 | # CONFIG_MTD_ONENAND is not set | 498 | # CONFIG_MTD_ONENAND is not set |
470 | 499 | ||
471 | # | 500 | # |
@@ -476,10 +505,30 @@ CONFIG_MTD_PHYSMAP=y | |||
476 | # | 505 | # |
477 | # UBI - Unsorted block images | 506 | # UBI - Unsorted block images |
478 | # | 507 | # |
479 | # CONFIG_MTD_UBI is not set | 508 | CONFIG_MTD_UBI=y |
509 | CONFIG_MTD_UBI_WL_THRESHOLD=4096 | ||
510 | CONFIG_MTD_UBI_BEB_RESERVE=1 | ||
511 | # CONFIG_MTD_UBI_GLUEBI is not set | ||
512 | |||
513 | # | ||
514 | # UBI debugging options | ||
515 | # | ||
516 | # CONFIG_MTD_UBI_DEBUG is not set | ||
480 | # CONFIG_PARPORT is not set | 517 | # CONFIG_PARPORT is not set |
481 | # CONFIG_BLK_DEV is not set | 518 | # CONFIG_BLK_DEV is not set |
482 | # CONFIG_MISC_DEVICES is not set | 519 | CONFIG_MISC_DEVICES=y |
520 | # CONFIG_ICS932S401 is not set | ||
521 | # CONFIG_ENCLOSURE_SERVICES is not set | ||
522 | # CONFIG_ISL29003 is not set | ||
523 | # CONFIG_C2PORT is not set | ||
524 | |||
525 | # | ||
526 | # EEPROM support | ||
527 | # | ||
528 | CONFIG_EEPROM_AT24=y | ||
529 | # CONFIG_EEPROM_LEGACY is not set | ||
530 | # CONFIG_EEPROM_MAX6875 is not set | ||
531 | # CONFIG_EEPROM_93CX6 is not set | ||
483 | CONFIG_HAVE_IDE=y | 532 | CONFIG_HAVE_IDE=y |
484 | # CONFIG_IDE is not set | 533 | # CONFIG_IDE is not set |
485 | 534 | ||
@@ -493,7 +542,6 @@ CONFIG_HAVE_IDE=y | |||
493 | # CONFIG_ATA is not set | 542 | # CONFIG_ATA is not set |
494 | # CONFIG_MD is not set | 543 | # CONFIG_MD is not set |
495 | CONFIG_NETDEVICES=y | 544 | CONFIG_NETDEVICES=y |
496 | CONFIG_COMPAT_NET_DEV_OPS=y | ||
497 | # CONFIG_DUMMY is not set | 545 | # CONFIG_DUMMY is not set |
498 | # CONFIG_BONDING is not set | 546 | # CONFIG_BONDING is not set |
499 | # CONFIG_MACVLAN is not set | 547 | # CONFIG_MACVLAN is not set |
@@ -528,7 +576,7 @@ CONFIG_MII=y | |||
528 | # CONFIG_ETHOC is not set | 576 | # CONFIG_ETHOC is not set |
529 | # CONFIG_SMC911X is not set | 577 | # CONFIG_SMC911X is not set |
530 | CONFIG_SMSC911X=y | 578 | CONFIG_SMSC911X=y |
531 | # CONFIG_DNET is not set | 579 | CONFIG_DNET=y |
532 | # CONFIG_IBM_NEW_EMAC_ZMII is not set | 580 | # CONFIG_IBM_NEW_EMAC_ZMII is not set |
533 | # CONFIG_IBM_NEW_EMAC_RGMII is not set | 581 | # CONFIG_IBM_NEW_EMAC_RGMII is not set |
534 | # CONFIG_IBM_NEW_EMAC_TAH is not set | 582 | # CONFIG_IBM_NEW_EMAC_TAH is not set |
@@ -537,8 +585,10 @@ CONFIG_SMSC911X=y | |||
537 | # CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT is not set | 585 | # CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT is not set |
538 | # CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set | 586 | # CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set |
539 | # CONFIG_B44 is not set | 587 | # CONFIG_B44 is not set |
540 | CONFIG_CS89x0=y | 588 | # CONFIG_CS89x0 is not set |
541 | CONFIG_CS89x0_NONISA_IRQ=y | 589 | # CONFIG_KS8842 is not set |
590 | CONFIG_FEC=y | ||
591 | # CONFIG_FEC2 is not set | ||
542 | # CONFIG_NETDEV_1000 is not set | 592 | # CONFIG_NETDEV_1000 is not set |
543 | # CONFIG_NETDEV_10000 is not set | 593 | # CONFIG_NETDEV_10000 is not set |
544 | 594 | ||
@@ -609,6 +659,7 @@ CONFIG_I2C_HELPER_AUTO=y | |||
609 | # | 659 | # |
610 | # I2C system bus drivers (mostly embedded / system-on-chip) | 660 | # I2C system bus drivers (mostly embedded / system-on-chip) |
611 | # | 661 | # |
662 | # CONFIG_I2C_DESIGNWARE is not set | ||
612 | # CONFIG_I2C_GPIO is not set | 663 | # CONFIG_I2C_GPIO is not set |
613 | CONFIG_I2C_IMX=y | 664 | CONFIG_I2C_IMX=y |
614 | # CONFIG_I2C_OCORES is not set | 665 | # CONFIG_I2C_OCORES is not set |
@@ -633,7 +684,6 @@ CONFIG_I2C_IMX=y | |||
633 | # CONFIG_SENSORS_PCF8574 is not set | 684 | # CONFIG_SENSORS_PCF8574 is not set |
634 | # CONFIG_PCF8575 is not set | 685 | # CONFIG_PCF8575 is not set |
635 | # CONFIG_SENSORS_PCA9539 is not set | 686 | # CONFIG_SENSORS_PCA9539 is not set |
636 | # CONFIG_SENSORS_MAX6875 is not set | ||
637 | # CONFIG_SENSORS_TSL2550 is not set | 687 | # CONFIG_SENSORS_TSL2550 is not set |
638 | # CONFIG_I2C_DEBUG_CORE is not set | 688 | # CONFIG_I2C_DEBUG_CORE is not set |
639 | # CONFIG_I2C_DEBUG_ALGO is not set | 689 | # CONFIG_I2C_DEBUG_ALGO is not set |
@@ -669,6 +719,7 @@ CONFIG_W1=y | |||
669 | # | 719 | # |
670 | # CONFIG_W1_MASTER_DS2482 is not set | 720 | # CONFIG_W1_MASTER_DS2482 is not set |
671 | CONFIG_W1_MASTER_MXC=y | 721 | CONFIG_W1_MASTER_MXC=y |
722 | # CONFIG_W1_MASTER_DS1WM is not set | ||
672 | # CONFIG_W1_MASTER_GPIO is not set | 723 | # CONFIG_W1_MASTER_GPIO is not set |
673 | 724 | ||
674 | # | 725 | # |
@@ -703,6 +754,8 @@ CONFIG_SSB_POSSIBLE=y | |||
703 | # CONFIG_TPS65010 is not set | 754 | # CONFIG_TPS65010 is not set |
704 | # CONFIG_TWL4030_CORE is not set | 755 | # CONFIG_TWL4030_CORE is not set |
705 | # CONFIG_MFD_TMIO is not set | 756 | # CONFIG_MFD_TMIO is not set |
757 | # CONFIG_MFD_T7L66XB is not set | ||
758 | # CONFIG_MFD_TC6387XB is not set | ||
706 | # CONFIG_MFD_TC6393XB is not set | 759 | # CONFIG_MFD_TC6393XB is not set |
707 | # CONFIG_PMIC_DA903X is not set | 760 | # CONFIG_PMIC_DA903X is not set |
708 | # CONFIG_MFD_WM8400 is not set | 761 | # CONFIG_MFD_WM8400 is not set |
@@ -711,10 +764,8 @@ CONFIG_MFD_WM8350_CONFIG_MODE_0=y | |||
711 | CONFIG_MFD_WM8352_CONFIG_MODE_0=y | 764 | CONFIG_MFD_WM8352_CONFIG_MODE_0=y |
712 | CONFIG_MFD_WM8350_I2C=y | 765 | CONFIG_MFD_WM8350_I2C=y |
713 | # CONFIG_MFD_PCF50633 is not set | 766 | # CONFIG_MFD_PCF50633 is not set |
714 | 767 | # CONFIG_AB3100_CORE is not set | |
715 | # | 768 | CONFIG_MEDIA_SUPPORT=y |
716 | # Multimedia devices | ||
717 | # | ||
718 | 769 | ||
719 | # | 770 | # |
720 | # Multimedia core support | 771 | # Multimedia core support |
@@ -758,8 +809,10 @@ CONFIG_SOC_CAMERA_MT9T031=y | |||
758 | CONFIG_SOC_CAMERA_MT9V022=y | 809 | CONFIG_SOC_CAMERA_MT9V022=y |
759 | CONFIG_SOC_CAMERA_TW9910=y | 810 | CONFIG_SOC_CAMERA_TW9910=y |
760 | # CONFIG_SOC_CAMERA_PLATFORM is not set | 811 | # CONFIG_SOC_CAMERA_PLATFORM is not set |
761 | # CONFIG_SOC_CAMERA_OV772X is not set | 812 | CONFIG_SOC_CAMERA_OV772X=y |
813 | CONFIG_MX3_VIDEO=y | ||
762 | CONFIG_VIDEO_MX3=y | 814 | CONFIG_VIDEO_MX3=y |
815 | # CONFIG_VIDEO_SH_MOBILE_CEU is not set | ||
763 | # CONFIG_RADIO_ADAPTERS is not set | 816 | # CONFIG_RADIO_ADAPTERS is not set |
764 | # CONFIG_DAB is not set | 817 | # CONFIG_DAB is not set |
765 | 818 | ||
@@ -847,8 +900,11 @@ CONFIG_REGULATOR=y | |||
847 | # CONFIG_REGULATOR_DEBUG is not set | 900 | # CONFIG_REGULATOR_DEBUG is not set |
848 | # CONFIG_REGULATOR_FIXED_VOLTAGE is not set | 901 | # CONFIG_REGULATOR_FIXED_VOLTAGE is not set |
849 | # CONFIG_REGULATOR_VIRTUAL_CONSUMER is not set | 902 | # CONFIG_REGULATOR_VIRTUAL_CONSUMER is not set |
903 | # CONFIG_REGULATOR_USERSPACE_CONSUMER is not set | ||
850 | # CONFIG_REGULATOR_BQ24022 is not set | 904 | # CONFIG_REGULATOR_BQ24022 is not set |
905 | # CONFIG_REGULATOR_MAX1586 is not set | ||
851 | CONFIG_REGULATOR_WM8350=y | 906 | CONFIG_REGULATOR_WM8350=y |
907 | # CONFIG_REGULATOR_LP3971 is not set | ||
852 | # CONFIG_UIO is not set | 908 | # CONFIG_UIO is not set |
853 | # CONFIG_STAGING is not set | 909 | # CONFIG_STAGING is not set |
854 | 910 | ||
@@ -861,10 +917,12 @@ CONFIG_REGULATOR_WM8350=y | |||
861 | # CONFIG_REISERFS_FS is not set | 917 | # CONFIG_REISERFS_FS is not set |
862 | # CONFIG_JFS_FS is not set | 918 | # CONFIG_JFS_FS is not set |
863 | # CONFIG_FS_POSIX_ACL is not set | 919 | # CONFIG_FS_POSIX_ACL is not set |
864 | CONFIG_FILE_LOCKING=y | ||
865 | # CONFIG_XFS_FS is not set | 920 | # CONFIG_XFS_FS is not set |
921 | # CONFIG_GFS2_FS is not set | ||
866 | # CONFIG_OCFS2_FS is not set | 922 | # CONFIG_OCFS2_FS is not set |
867 | # CONFIG_BTRFS_FS is not set | 923 | # CONFIG_BTRFS_FS is not set |
924 | CONFIG_FILE_LOCKING=y | ||
925 | CONFIG_FSNOTIFY=y | ||
868 | # CONFIG_DNOTIFY is not set | 926 | # CONFIG_DNOTIFY is not set |
869 | CONFIG_INOTIFY=y | 927 | CONFIG_INOTIFY=y |
870 | CONFIG_INOTIFY_USER=y | 928 | CONFIG_INOTIFY_USER=y |
@@ -921,6 +979,12 @@ CONFIG_JFFS2_ZLIB=y | |||
921 | # CONFIG_JFFS2_LZO is not set | 979 | # CONFIG_JFFS2_LZO is not set |
922 | CONFIG_JFFS2_RTIME=y | 980 | CONFIG_JFFS2_RTIME=y |
923 | # CONFIG_JFFS2_RUBIN is not set | 981 | # CONFIG_JFFS2_RUBIN is not set |
982 | CONFIG_UBIFS_FS=y | ||
983 | # CONFIG_UBIFS_FS_XATTR is not set | ||
984 | # CONFIG_UBIFS_FS_ADVANCED_COMPR is not set | ||
985 | CONFIG_UBIFS_FS_LZO=y | ||
986 | CONFIG_UBIFS_FS_ZLIB=y | ||
987 | # CONFIG_UBIFS_FS_DEBUG is not set | ||
924 | # CONFIG_CRAMFS is not set | 988 | # CONFIG_CRAMFS is not set |
925 | # CONFIG_SQUASHFS is not set | 989 | # CONFIG_SQUASHFS is not set |
926 | # CONFIG_VXFS_FS is not set | 990 | # CONFIG_VXFS_FS is not set |
@@ -937,6 +1001,7 @@ CONFIG_NFS_FS=y | |||
937 | CONFIG_NFS_V3=y | 1001 | CONFIG_NFS_V3=y |
938 | # CONFIG_NFS_V3_ACL is not set | 1002 | # CONFIG_NFS_V3_ACL is not set |
939 | CONFIG_NFS_V4=y | 1003 | CONFIG_NFS_V4=y |
1004 | # CONFIG_NFS_V4_1 is not set | ||
940 | CONFIG_ROOT_NFS=y | 1005 | CONFIG_ROOT_NFS=y |
941 | # CONFIG_NFSD is not set | 1006 | # CONFIG_NFSD is not set |
942 | CONFIG_LOCKD=y | 1007 | CONFIG_LOCKD=y |
@@ -979,22 +1044,7 @@ CONFIG_FRAME_WARN=1024 | |||
979 | CONFIG_SYSCTL_SYSCALL_CHECK=y | 1044 | CONFIG_SYSCTL_SYSCALL_CHECK=y |
980 | CONFIG_HAVE_FUNCTION_TRACER=y | 1045 | CONFIG_HAVE_FUNCTION_TRACER=y |
981 | CONFIG_TRACING_SUPPORT=y | 1046 | CONFIG_TRACING_SUPPORT=y |
982 | 1047 | # CONFIG_FTRACE is not set | |
983 | # | ||
984 | # Tracers | ||
985 | # | ||
986 | # CONFIG_FUNCTION_TRACER is not set | ||
987 | # CONFIG_IRQSOFF_TRACER is not set | ||
988 | # CONFIG_PREEMPT_TRACER is not set | ||
989 | # CONFIG_SCHED_TRACER is not set | ||
990 | # CONFIG_CONTEXT_SWITCH_TRACER is not set | ||
991 | # CONFIG_EVENT_TRACER is not set | ||
992 | # CONFIG_BOOT_TRACER is not set | ||
993 | # CONFIG_TRACE_BRANCH_PROFILING is not set | ||
994 | # CONFIG_STACK_TRACER is not set | ||
995 | # CONFIG_KMEMTRACE is not set | ||
996 | # CONFIG_WORKQUEUE_TRACER is not set | ||
997 | # CONFIG_BLK_DEV_IO_TRACE is not set | ||
998 | # CONFIG_SAMPLES is not set | 1048 | # CONFIG_SAMPLES is not set |
999 | CONFIG_HAVE_ARCH_KGDB=y | 1049 | CONFIG_HAVE_ARCH_KGDB=y |
1000 | CONFIG_ARM_UNWIND=y | 1050 | CONFIG_ARM_UNWIND=y |
@@ -1094,9 +1144,9 @@ CONFIG_CRYPTO_DES=y | |||
1094 | # | 1144 | # |
1095 | # Compression | 1145 | # Compression |
1096 | # | 1146 | # |
1097 | # CONFIG_CRYPTO_DEFLATE is not set | 1147 | CONFIG_CRYPTO_DEFLATE=y |
1098 | # CONFIG_CRYPTO_ZLIB is not set | 1148 | # CONFIG_CRYPTO_ZLIB is not set |
1099 | # CONFIG_CRYPTO_LZO is not set | 1149 | CONFIG_CRYPTO_LZO=y |
1100 | 1150 | ||
1101 | # | 1151 | # |
1102 | # Random Number Generation | 1152 | # Random Number Generation |
@@ -1109,9 +1159,10 @@ CONFIG_CRYPTO_HW=y | |||
1109 | # Library routines | 1159 | # Library routines |
1110 | # | 1160 | # |
1111 | CONFIG_BITREVERSE=y | 1161 | CONFIG_BITREVERSE=y |
1162 | CONFIG_RATIONAL=y | ||
1112 | CONFIG_GENERIC_FIND_LAST_BIT=y | 1163 | CONFIG_GENERIC_FIND_LAST_BIT=y |
1113 | # CONFIG_CRC_CCITT is not set | 1164 | # CONFIG_CRC_CCITT is not set |
1114 | # CONFIG_CRC16 is not set | 1165 | CONFIG_CRC16=y |
1115 | # CONFIG_CRC_T10DIF is not set | 1166 | # CONFIG_CRC_T10DIF is not set |
1116 | # CONFIG_CRC_ITU_T is not set | 1167 | # CONFIG_CRC_ITU_T is not set |
1117 | CONFIG_CRC32=y | 1168 | CONFIG_CRC32=y |
@@ -1119,6 +1170,8 @@ CONFIG_CRC32=y | |||
1119 | # CONFIG_LIBCRC32C is not set | 1170 | # CONFIG_LIBCRC32C is not set |
1120 | CONFIG_ZLIB_INFLATE=y | 1171 | CONFIG_ZLIB_INFLATE=y |
1121 | CONFIG_ZLIB_DEFLATE=y | 1172 | CONFIG_ZLIB_DEFLATE=y |
1173 | CONFIG_LZO_COMPRESS=y | ||
1174 | CONFIG_LZO_DECOMPRESS=y | ||
1122 | CONFIG_HAS_IOMEM=y | 1175 | CONFIG_HAS_IOMEM=y |
1123 | CONFIG_HAS_IOPORT=y | 1176 | CONFIG_HAS_IOPORT=y |
1124 | CONFIG_HAS_DMA=y | 1177 | CONFIG_HAS_DMA=y |
diff --git a/arch/arm/configs/omap3_evm_defconfig b/arch/arm/configs/omap3_evm_defconfig index 28be17fbc157..d5ff4776cd0a 100644 --- a/arch/arm/configs/omap3_evm_defconfig +++ b/arch/arm/configs/omap3_evm_defconfig | |||
@@ -1107,7 +1107,7 @@ CONFIG_USB_ZERO=m | |||
1107 | CONFIG_USB_OTG_UTILS=y | 1107 | CONFIG_USB_OTG_UTILS=y |
1108 | # CONFIG_USB_GPIO_VBUS is not set | 1108 | # CONFIG_USB_GPIO_VBUS is not set |
1109 | # CONFIG_ISP1301_OMAP is not set | 1109 | # CONFIG_ISP1301_OMAP is not set |
1110 | CONFIG_TWL4030_USB=y | 1110 | # CONFIG_TWL4030_USB is not set |
1111 | # CONFIG_NOP_USB_XCEIV is not set | 1111 | # CONFIG_NOP_USB_XCEIV is not set |
1112 | CONFIG_MMC=y | 1112 | CONFIG_MMC=y |
1113 | # CONFIG_MMC_DEBUG is not set | 1113 | # CONFIG_MMC_DEBUG is not set |
diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h index 9e07fe507029..9ed2377fe8e5 100644 --- a/arch/arm/include/asm/atomic.h +++ b/arch/arm/include/asm/atomic.h | |||
@@ -159,8 +159,6 @@ static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr) | |||
159 | 159 | ||
160 | #else /* ARM_ARCH_6 */ | 160 | #else /* ARM_ARCH_6 */ |
161 | 161 | ||
162 | #include <asm/system.h> | ||
163 | |||
164 | #ifdef CONFIG_SMP | 162 | #ifdef CONFIG_SMP |
165 | #error SMP not supported on pre-ARMv6 CPUs | 163 | #error SMP not supported on pre-ARMv6 CPUs |
166 | #endif | 164 | #endif |
diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S index 366e5097a41a..8c3de1a350b5 100644 --- a/arch/arm/kernel/entry-common.S +++ b/arch/arm/kernel/entry-common.S | |||
@@ -148,7 +148,7 @@ trace: | |||
148 | sub r0, r0, #MCOUNT_INSN_SIZE | 148 | sub r0, r0, #MCOUNT_INSN_SIZE |
149 | mov lr, pc | 149 | mov lr, pc |
150 | mov pc, r2 | 150 | mov pc, r2 |
151 | mov lr, r1 @ restore lr | 151 | ldr lr, [fp, #-4] @ restore lr |
152 | ldmia sp!, {r0-r3, pc} | 152 | ldmia sp!, {r0-r3, pc} |
153 | 153 | ||
154 | #endif /* CONFIG_DYNAMIC_FTRACE */ | 154 | #endif /* CONFIG_DYNAMIC_FTRACE */ |
diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c index 93bb4247b7ed..f6bc5d442782 100644 --- a/arch/arm/kernel/signal.c +++ b/arch/arm/kernel/signal.c | |||
@@ -133,7 +133,7 @@ sys_sigaction(int sig, const struct old_sigaction __user *act, | |||
133 | } | 133 | } |
134 | 134 | ||
135 | #ifdef CONFIG_CRUNCH | 135 | #ifdef CONFIG_CRUNCH |
136 | static int preserve_crunch_context(struct crunch_sigframe *frame) | 136 | static int preserve_crunch_context(struct crunch_sigframe __user *frame) |
137 | { | 137 | { |
138 | char kbuf[sizeof(*frame) + 8]; | 138 | char kbuf[sizeof(*frame) + 8]; |
139 | struct crunch_sigframe *kframe; | 139 | struct crunch_sigframe *kframe; |
@@ -146,7 +146,7 @@ static int preserve_crunch_context(struct crunch_sigframe *frame) | |||
146 | return __copy_to_user(frame, kframe, sizeof(*frame)); | 146 | return __copy_to_user(frame, kframe, sizeof(*frame)); |
147 | } | 147 | } |
148 | 148 | ||
149 | static int restore_crunch_context(struct crunch_sigframe *frame) | 149 | static int restore_crunch_context(struct crunch_sigframe __user *frame) |
150 | { | 150 | { |
151 | char kbuf[sizeof(*frame) + 8]; | 151 | char kbuf[sizeof(*frame) + 8]; |
152 | struct crunch_sigframe *kframe; | 152 | struct crunch_sigframe *kframe; |
diff --git a/arch/arm/mach-davinci/board-dm355-evm.c b/arch/arm/mach-davinci/board-dm355-evm.c index 5ac2f565d860..d6ab64ccd496 100644 --- a/arch/arm/mach-davinci/board-dm355-evm.c +++ b/arch/arm/mach-davinci/board-dm355-evm.c | |||
@@ -37,7 +37,6 @@ | |||
37 | #include <mach/serial.h> | 37 | #include <mach/serial.h> |
38 | #include <mach/nand.h> | 38 | #include <mach/nand.h> |
39 | #include <mach/mmc.h> | 39 | #include <mach/mmc.h> |
40 | #include <mach/common.h> | ||
41 | 40 | ||
42 | #define DAVINCI_ASYNC_EMIF_CONTROL_BASE 0x01e10000 | 41 | #define DAVINCI_ASYNC_EMIF_CONTROL_BASE 0x01e10000 |
43 | #define DAVINCI_ASYNC_EMIF_DATA_CE0_BASE 0x02000000 | 42 | #define DAVINCI_ASYNC_EMIF_DATA_CE0_BASE 0x02000000 |
diff --git a/arch/arm/mach-davinci/board-dm355-leopard.c b/arch/arm/mach-davinci/board-dm355-leopard.c index 28c9008df4f4..84ad5d161a87 100644 --- a/arch/arm/mach-davinci/board-dm355-leopard.c +++ b/arch/arm/mach-davinci/board-dm355-leopard.c | |||
@@ -36,7 +36,6 @@ | |||
36 | #include <mach/serial.h> | 36 | #include <mach/serial.h> |
37 | #include <mach/nand.h> | 37 | #include <mach/nand.h> |
38 | #include <mach/mmc.h> | 38 | #include <mach/mmc.h> |
39 | #include <mach/common.h> | ||
40 | 39 | ||
41 | #define DAVINCI_ASYNC_EMIF_CONTROL_BASE 0x01e10000 | 40 | #define DAVINCI_ASYNC_EMIF_CONTROL_BASE 0x01e10000 |
42 | #define DAVINCI_ASYNC_EMIF_DATA_CE0_BASE 0x02000000 | 41 | #define DAVINCI_ASYNC_EMIF_DATA_CE0_BASE 0x02000000 |
diff --git a/arch/arm/mach-davinci/board-dm644x-evm.c b/arch/arm/mach-davinci/board-dm644x-evm.c index d9d40450bdc5..56c8cd01de9a 100644 --- a/arch/arm/mach-davinci/board-dm644x-evm.c +++ b/arch/arm/mach-davinci/board-dm644x-evm.c | |||
@@ -45,7 +45,6 @@ | |||
45 | #include <mach/nand.h> | 45 | #include <mach/nand.h> |
46 | #include <mach/mmc.h> | 46 | #include <mach/mmc.h> |
47 | #include <mach/emac.h> | 47 | #include <mach/emac.h> |
48 | #include <mach/common.h> | ||
49 | 48 | ||
50 | #define DM644X_EVM_PHY_MASK (0x2) | 49 | #define DM644X_EVM_PHY_MASK (0x2) |
51 | #define DM644X_EVM_MDIO_FREQUENCY (2200000) /* PHY bus frequency */ | 50 | #define DM644X_EVM_MDIO_FREQUENCY (2200000) /* PHY bus frequency */ |
diff --git a/arch/arm/mach-davinci/board-dm646x-evm.c b/arch/arm/mach-davinci/board-dm646x-evm.c index e17de6352624..8657e72debc1 100644 --- a/arch/arm/mach-davinci/board-dm646x-evm.c +++ b/arch/arm/mach-davinci/board-dm646x-evm.c | |||
@@ -47,7 +47,6 @@ | |||
47 | #include <mach/i2c.h> | 47 | #include <mach/i2c.h> |
48 | #include <mach/mmc.h> | 48 | #include <mach/mmc.h> |
49 | #include <mach/emac.h> | 49 | #include <mach/emac.h> |
50 | #include <mach/common.h> | ||
51 | 50 | ||
52 | #define DM646X_EVM_PHY_MASK (0x2) | 51 | #define DM646X_EVM_PHY_MASK (0x2) |
53 | #define DM646X_EVM_MDIO_FREQUENCY (2200000) /* PHY bus frequency */ | 52 | #define DM646X_EVM_MDIO_FREQUENCY (2200000) /* PHY bus frequency */ |
diff --git a/arch/arm/mach-davinci/board-sffsdr.c b/arch/arm/mach-davinci/board-sffsdr.c index 748a8e48541e..7acdfd8ac071 100644 --- a/arch/arm/mach-davinci/board-sffsdr.c +++ b/arch/arm/mach-davinci/board-sffsdr.c | |||
@@ -52,7 +52,6 @@ | |||
52 | #include <mach/serial.h> | 52 | #include <mach/serial.h> |
53 | #include <mach/psc.h> | 53 | #include <mach/psc.h> |
54 | #include <mach/mux.h> | 54 | #include <mach/mux.h> |
55 | #include <mach/common.h> | ||
56 | 55 | ||
57 | #define SFFSDR_PHY_MASK (0x2) | 56 | #define SFFSDR_PHY_MASK (0x2) |
58 | #define SFFSDR_MDIO_FREQUENCY (2200000) /* PHY bus frequency */ | 57 | #define SFFSDR_MDIO_FREQUENCY (2200000) /* PHY bus frequency */ |
diff --git a/arch/arm/mach-ep93xx/include/mach/ts72xx.h b/arch/arm/mach-ep93xx/include/mach/ts72xx.h index 34ddec081c40..411734422c1d 100644 --- a/arch/arm/mach-ep93xx/include/mach/ts72xx.h +++ b/arch/arm/mach-ep93xx/include/mach/ts72xx.h | |||
@@ -41,9 +41,6 @@ | |||
41 | #define TS72XX_OPTIONS2_TS9420_BOOT 0x02 | 41 | #define TS72XX_OPTIONS2_TS9420_BOOT 0x02 |
42 | 42 | ||
43 | 43 | ||
44 | #define TS72XX_NOR_PHYS_BASE 0x60000000 | ||
45 | #define TS72XX_NOR2_PHYS_BASE 0x62000000 | ||
46 | |||
47 | #define TS72XX_NAND1_DATA_PHYS_BASE 0x60000000 | 44 | #define TS72XX_NAND1_DATA_PHYS_BASE 0x60000000 |
48 | #define TS72XX_NAND2_DATA_PHYS_BASE 0x70000000 | 45 | #define TS72XX_NAND2_DATA_PHYS_BASE 0x70000000 |
49 | #define TS72XX_NAND_DATA_VIRT_BASE 0xfebfc000 | 46 | #define TS72XX_NAND_DATA_VIRT_BASE 0xfebfc000 |
diff --git a/arch/arm/mach-ep93xx/ts72xx.c b/arch/arm/mach-ep93xx/ts72xx.c index 7ee024d34829..aaf1371412af 100644 --- a/arch/arm/mach-ep93xx/ts72xx.c +++ b/arch/arm/mach-ep93xx/ts72xx.c | |||
@@ -112,13 +112,16 @@ static void __init ts72xx_map_io(void) | |||
112 | } | 112 | } |
113 | } | 113 | } |
114 | 114 | ||
115 | /************************************************************************* | ||
116 | * NOR flash (TS-7200 only) | ||
117 | *************************************************************************/ | ||
115 | static struct physmap_flash_data ts72xx_flash_data = { | 118 | static struct physmap_flash_data ts72xx_flash_data = { |
116 | .width = 1, | 119 | .width = 2, |
117 | }; | 120 | }; |
118 | 121 | ||
119 | static struct resource ts72xx_flash_resource = { | 122 | static struct resource ts72xx_flash_resource = { |
120 | .start = TS72XX_NOR_PHYS_BASE, | 123 | .start = EP93XX_CS6_PHYS_BASE, |
121 | .end = TS72XX_NOR_PHYS_BASE + SZ_16M - 1, | 124 | .end = EP93XX_CS6_PHYS_BASE + SZ_16M - 1, |
122 | .flags = IORESOURCE_MEM, | 125 | .flags = IORESOURCE_MEM, |
123 | }; | 126 | }; |
124 | 127 | ||
@@ -132,6 +135,12 @@ static struct platform_device ts72xx_flash = { | |||
132 | .resource = &ts72xx_flash_resource, | 135 | .resource = &ts72xx_flash_resource, |
133 | }; | 136 | }; |
134 | 137 | ||
138 | static void __init ts72xx_register_flash(void) | ||
139 | { | ||
140 | if (board_is_ts7200()) | ||
141 | platform_device_register(&ts72xx_flash); | ||
142 | } | ||
143 | |||
135 | static unsigned char ts72xx_rtc_readbyte(unsigned long addr) | 144 | static unsigned char ts72xx_rtc_readbyte(unsigned long addr) |
136 | { | 145 | { |
137 | __raw_writeb(addr, TS72XX_RTC_INDEX_VIRT_BASE); | 146 | __raw_writeb(addr, TS72XX_RTC_INDEX_VIRT_BASE); |
@@ -165,8 +174,7 @@ static struct ep93xx_eth_data ts72xx_eth_data = { | |||
165 | static void __init ts72xx_init_machine(void) | 174 | static void __init ts72xx_init_machine(void) |
166 | { | 175 | { |
167 | ep93xx_init_devices(); | 176 | ep93xx_init_devices(); |
168 | if (board_is_ts7200()) | 177 | ts72xx_register_flash(); |
169 | platform_device_register(&ts72xx_flash); | ||
170 | platform_device_register(&ts72xx_rtc_device); | 178 | platform_device_register(&ts72xx_rtc_device); |
171 | 179 | ||
172 | ep93xx_register_eth(&ts72xx_eth_data, 1); | 180 | ep93xx_register_eth(&ts72xx_eth_data, 1); |
diff --git a/arch/arm/mach-ks8695/include/mach/hardware.h b/arch/arm/mach-ks8695/include/mach/hardware.h index 1d640d075b7e..e0f911d9e021 100644 --- a/arch/arm/mach-ks8695/include/mach/hardware.h +++ b/arch/arm/mach-ks8695/include/mach/hardware.h | |||
@@ -17,6 +17,11 @@ | |||
17 | #include <asm/sizes.h> | 17 | #include <asm/sizes.h> |
18 | 18 | ||
19 | /* | 19 | /* |
20 | * Clocks are derived from MCLK, which is 25Mhz | ||
21 | */ | ||
22 | #define KS8695_CLOCK_RATE 25000000 | ||
23 | |||
24 | /* | ||
20 | * Physical RAM address. | 25 | * Physical RAM address. |
21 | */ | 26 | */ |
22 | #define KS8695_SDRAM_PA 0x00000000 | 27 | #define KS8695_SDRAM_PA 0x00000000 |
diff --git a/arch/arm/mach-ks8695/include/mach/timex.h b/arch/arm/mach-ks8695/include/mach/timex.h index 4682e350369b..10f716371bd3 100644 --- a/arch/arm/mach-ks8695/include/mach/timex.h +++ b/arch/arm/mach-ks8695/include/mach/timex.h | |||
@@ -14,7 +14,8 @@ | |||
14 | #ifndef __ASM_ARCH_TIMEX_H | 14 | #ifndef __ASM_ARCH_TIMEX_H |
15 | #define __ASM_ARCH_TIMEX_H | 15 | #define __ASM_ARCH_TIMEX_H |
16 | 16 | ||
17 | /* timers are derived from MCLK, which is 25MHz */ | 17 | #include <mach/hardware.h> |
18 | #define CLOCK_TICK_RATE 25000000 | 18 | |
19 | #define CLOCK_TICK_RATE KS8695_CLOCK_RATE | ||
19 | 20 | ||
20 | #endif | 21 | #endif |
diff --git a/arch/arm/mach-ks8695/pci.c b/arch/arm/mach-ks8695/pci.c index f5ebcc0fcab9..78499667eb7b 100644 --- a/arch/arm/mach-ks8695/pci.c +++ b/arch/arm/mach-ks8695/pci.c | |||
@@ -245,6 +245,9 @@ static int ks8695_pci_fault(unsigned long addr, unsigned int fsr, struct pt_regs | |||
245 | 245 | ||
246 | static void __init ks8695_pci_preinit(void) | 246 | static void __init ks8695_pci_preinit(void) |
247 | { | 247 | { |
248 | /* make software reset to avoid freeze if PCI bus was messed up */ | ||
249 | __raw_writel(0x80000000, KS8695_PCI_VA + KS8695_PBCS); | ||
250 | |||
248 | /* stage 1 initialization, subid, subdevice = 0x0001 */ | 251 | /* stage 1 initialization, subid, subdevice = 0x0001 */ |
249 | __raw_writel(0x00010001, KS8695_PCI_VA + KS8695_CRCSID); | 252 | __raw_writel(0x00010001, KS8695_PCI_VA + KS8695_CRCSID); |
250 | 253 | ||
diff --git a/arch/arm/mach-omap1/mcbsp.c b/arch/arm/mach-omap1/mcbsp.c index a2d7814896be..505d98cfe508 100644 --- a/arch/arm/mach-omap1/mcbsp.c +++ b/arch/arm/mach-omap1/mcbsp.c | |||
@@ -19,7 +19,6 @@ | |||
19 | 19 | ||
20 | #include <mach/irqs.h> | 20 | #include <mach/irqs.h> |
21 | #include <mach/dma.h> | 21 | #include <mach/dma.h> |
22 | #include <mach/irqs.h> | ||
23 | #include <mach/mux.h> | 22 | #include <mach/mux.h> |
24 | #include <mach/cpu.h> | 23 | #include <mach/cpu.h> |
25 | #include <mach/mcbsp.h> | 24 | #include <mach/mcbsp.h> |
diff --git a/arch/arm/mach-omap2/board-omap3evm.c b/arch/arm/mach-omap2/board-omap3evm.c index d3cc145814d0..cf3dd771a678 100644 --- a/arch/arm/mach-omap2/board-omap3evm.c +++ b/arch/arm/mach-omap2/board-omap3evm.c | |||
@@ -25,6 +25,7 @@ | |||
25 | #include <linux/spi/spi.h> | 25 | #include <linux/spi/spi.h> |
26 | #include <linux/spi/ads7846.h> | 26 | #include <linux/spi/ads7846.h> |
27 | #include <linux/i2c/twl4030.h> | 27 | #include <linux/i2c/twl4030.h> |
28 | #include <linux/usb/otg.h> | ||
28 | 29 | ||
29 | #include <mach/hardware.h> | 30 | #include <mach/hardware.h> |
30 | #include <asm/mach-types.h> | 31 | #include <asm/mach-types.h> |
@@ -307,6 +308,10 @@ static void __init omap3_evm_init(void) | |||
307 | ARRAY_SIZE(omap3evm_spi_board_info)); | 308 | ARRAY_SIZE(omap3evm_spi_board_info)); |
308 | 309 | ||
309 | omap_serial_init(); | 310 | omap_serial_init(); |
311 | #ifdef CONFIG_NOP_USB_XCEIV | ||
312 | /* OMAP3EVM uses ISP1504 phy and so register nop transceiver */ | ||
313 | usb_nop_xceiv_register(); | ||
314 | #endif | ||
310 | usb_musb_init(); | 315 | usb_musb_init(); |
311 | ads7846_dev_init(); | 316 | ads7846_dev_init(); |
312 | } | 317 | } |
diff --git a/arch/arm/mach-omap2/mcbsp.c b/arch/arm/mach-omap2/mcbsp.c index a5c0f0435cd6..99b6e1546311 100644 --- a/arch/arm/mach-omap2/mcbsp.c +++ b/arch/arm/mach-omap2/mcbsp.c | |||
@@ -19,7 +19,6 @@ | |||
19 | 19 | ||
20 | #include <mach/irqs.h> | 20 | #include <mach/irqs.h> |
21 | #include <mach/dma.h> | 21 | #include <mach/dma.h> |
22 | #include <mach/irqs.h> | ||
23 | #include <mach/mux.h> | 22 | #include <mach/mux.h> |
24 | #include <mach/cpu.h> | 23 | #include <mach/cpu.h> |
25 | #include <mach/mcbsp.h> | 24 | #include <mach/mcbsp.h> |
diff --git a/arch/arm/mach-omap2/usb-musb.c b/arch/arm/mach-omap2/usb-musb.c index d85296dc896c..739e59e8025c 100644 --- a/arch/arm/mach-omap2/usb-musb.c +++ b/arch/arm/mach-omap2/usb-musb.c | |||
@@ -155,20 +155,6 @@ static struct platform_device musb_device = { | |||
155 | .resource = musb_resources, | 155 | .resource = musb_resources, |
156 | }; | 156 | }; |
157 | 157 | ||
158 | #ifdef CONFIG_NOP_USB_XCEIV | ||
159 | static u64 nop_xceiv_dmamask = DMA_BIT_MASK(32); | ||
160 | |||
161 | static struct platform_device nop_xceiv_device = { | ||
162 | .name = "nop_usb_xceiv", | ||
163 | .id = -1, | ||
164 | .dev = { | ||
165 | .dma_mask = &nop_xceiv_dmamask, | ||
166 | .coherent_dma_mask = DMA_BIT_MASK(32), | ||
167 | .platform_data = NULL, | ||
168 | }, | ||
169 | }; | ||
170 | #endif | ||
171 | |||
172 | void __init usb_musb_init(void) | 158 | void __init usb_musb_init(void) |
173 | { | 159 | { |
174 | if (cpu_is_omap243x()) | 160 | if (cpu_is_omap243x()) |
@@ -183,13 +169,6 @@ void __init usb_musb_init(void) | |||
183 | */ | 169 | */ |
184 | musb_plat.clock = "ick"; | 170 | musb_plat.clock = "ick"; |
185 | 171 | ||
186 | #ifdef CONFIG_NOP_USB_XCEIV | ||
187 | if (platform_device_register(&nop_xceiv_device) < 0) { | ||
188 | printk(KERN_ERR "Unable to register NOP-XCEIV device\n"); | ||
189 | return; | ||
190 | } | ||
191 | #endif | ||
192 | |||
193 | if (platform_device_register(&musb_device) < 0) { | 172 | if (platform_device_register(&musb_device) < 0) { |
194 | printk(KERN_ERR "Unable to register HS-USB (MUSB) device\n"); | 173 | printk(KERN_ERR "Unable to register HS-USB (MUSB) device\n"); |
195 | return; | 174 | return; |
diff --git a/arch/arm/mach-pxa/em-x270.c b/arch/arm/mach-pxa/em-x270.c index 63b10d9bb1d3..9cd09465a0e8 100644 --- a/arch/arm/mach-pxa/em-x270.c +++ b/arch/arm/mach-pxa/em-x270.c | |||
@@ -1141,12 +1141,16 @@ struct power_supply_info em_x270_psy_info = { | |||
1141 | 1141 | ||
1142 | static void em_x270_battery_low(void) | 1142 | static void em_x270_battery_low(void) |
1143 | { | 1143 | { |
1144 | #if defined(CONFIG_APM_EMULATION) | ||
1144 | apm_queue_event(APM_LOW_BATTERY); | 1145 | apm_queue_event(APM_LOW_BATTERY); |
1146 | #endif | ||
1145 | } | 1147 | } |
1146 | 1148 | ||
1147 | static void em_x270_battery_critical(void) | 1149 | static void em_x270_battery_critical(void) |
1148 | { | 1150 | { |
1151 | #if defined(CONFIG_APM_EMULATION) | ||
1149 | apm_queue_event(APM_CRITICAL_SUSPEND); | 1152 | apm_queue_event(APM_CRITICAL_SUSPEND); |
1153 | #endif | ||
1150 | } | 1154 | } |
1151 | 1155 | ||
1152 | struct da9030_battery_info em_x270_batterty_info = { | 1156 | struct da9030_battery_info em_x270_batterty_info = { |
diff --git a/arch/arm/mach-pxa/palmld.c b/arch/arm/mach-pxa/palmld.c index ed70f281dd09..169fcc18154e 100644 --- a/arch/arm/mach-pxa/palmld.c +++ b/arch/arm/mach-pxa/palmld.c | |||
@@ -128,6 +128,10 @@ static unsigned long palmld_pin_config[] __initdata = { | |||
128 | GPIO38_GPIO, /* wifi ready */ | 128 | GPIO38_GPIO, /* wifi ready */ |
129 | GPIO81_GPIO, /* wifi reset */ | 129 | GPIO81_GPIO, /* wifi reset */ |
130 | 130 | ||
131 | /* FFUART */ | ||
132 | GPIO34_FFUART_RXD, | ||
133 | GPIO39_FFUART_TXD, | ||
134 | |||
131 | /* HDD */ | 135 | /* HDD */ |
132 | GPIO98_GPIO, /* HDD reset */ | 136 | GPIO98_GPIO, /* HDD reset */ |
133 | GPIO115_GPIO, /* HDD power */ | 137 | GPIO115_GPIO, /* HDD power */ |
diff --git a/arch/arm/mach-pxa/palmt5.c b/arch/arm/mach-pxa/palmt5.c index aae64a12a734..33f726ff55e5 100644 --- a/arch/arm/mach-pxa/palmt5.c +++ b/arch/arm/mach-pxa/palmt5.c | |||
@@ -111,6 +111,10 @@ static unsigned long palmt5_pin_config[] __initdata = { | |||
111 | /* PWM */ | 111 | /* PWM */ |
112 | GPIO16_PWM0_OUT, | 112 | GPIO16_PWM0_OUT, |
113 | 113 | ||
114 | /* FFUART */ | ||
115 | GPIO34_FFUART_RXD, | ||
116 | GPIO39_FFUART_TXD, | ||
117 | |||
114 | /* MISC */ | 118 | /* MISC */ |
115 | GPIO10_GPIO, /* hotsync button */ | 119 | GPIO10_GPIO, /* hotsync button */ |
116 | GPIO90_GPIO, /* power detect */ | 120 | GPIO90_GPIO, /* power detect */ |
diff --git a/arch/arm/mach-pxa/palmtx.c b/arch/arm/mach-pxa/palmtx.c index 6c15d84bde53..83d020879581 100644 --- a/arch/arm/mach-pxa/palmtx.c +++ b/arch/arm/mach-pxa/palmtx.c | |||
@@ -127,6 +127,10 @@ static unsigned long palmtx_pin_config[] __initdata = { | |||
127 | GPIO76_LCD_PCLK, | 127 | GPIO76_LCD_PCLK, |
128 | GPIO77_LCD_BIAS, | 128 | GPIO77_LCD_BIAS, |
129 | 129 | ||
130 | /* FFUART */ | ||
131 | GPIO34_FFUART_RXD, | ||
132 | GPIO39_FFUART_TXD, | ||
133 | |||
130 | /* MISC. */ | 134 | /* MISC. */ |
131 | GPIO10_GPIO, /* hotsync button */ | 135 | GPIO10_GPIO, /* hotsync button */ |
132 | GPIO12_GPIO, /* power detect */ | 136 | GPIO12_GPIO, /* power detect */ |
diff --git a/arch/arm/mach-pxa/treo680.c b/arch/arm/mach-pxa/treo680.c index a06f19edebb3..753ec4df17b9 100644 --- a/arch/arm/mach-pxa/treo680.c +++ b/arch/arm/mach-pxa/treo680.c | |||
@@ -409,7 +409,7 @@ err1: | |||
409 | 409 | ||
410 | static void treo680_irda_shutdown(struct device *dev) | 410 | static void treo680_irda_shutdown(struct device *dev) |
411 | { | 411 | { |
412 | gpio_free(GPIO_NR_TREO680_AMP_EN); | 412 | gpio_free(GPIO_NR_TREO680_IR_EN); |
413 | } | 413 | } |
414 | 414 | ||
415 | static struct pxaficp_platform_data treo680_ficp_info = { | 415 | static struct pxaficp_platform_data treo680_ficp_info = { |
diff --git a/arch/arm/mach-pxa/zylonite_pxa300.c b/arch/arm/mach-pxa/zylonite_pxa300.c index cefd1c0a854a..84095440a878 100644 --- a/arch/arm/mach-pxa/zylonite_pxa300.c +++ b/arch/arm/mach-pxa/zylonite_pxa300.c | |||
@@ -197,10 +197,12 @@ static void __init zylonite_detect_lcd_panel(void) | |||
197 | for (i = 0; i < NUM_LCD_DETECT_PINS; i++) { | 197 | for (i = 0; i < NUM_LCD_DETECT_PINS; i++) { |
198 | id = id << 1; | 198 | id = id << 1; |
199 | gpio = mfp_to_gpio(lcd_detect_pins[i]); | 199 | gpio = mfp_to_gpio(lcd_detect_pins[i]); |
200 | gpio_request(gpio, "LCD_ID_PINS"); | ||
200 | gpio_direction_input(gpio); | 201 | gpio_direction_input(gpio); |
201 | 202 | ||
202 | if (gpio_get_value(gpio)) | 203 | if (gpio_get_value(gpio)) |
203 | id = id | 0x1; | 204 | id = id | 0x1; |
205 | gpio_free(gpio); | ||
204 | } | 206 | } |
205 | 207 | ||
206 | /* lcd id, flush out bit 1 */ | 208 | /* lcd id, flush out bit 1 */ |
diff --git a/arch/arm/mach-pxa/zylonite_pxa320.c b/arch/arm/mach-pxa/zylonite_pxa320.c index cc5a22833605..60d08f23f5e4 100644 --- a/arch/arm/mach-pxa/zylonite_pxa320.c +++ b/arch/arm/mach-pxa/zylonite_pxa320.c | |||
@@ -176,10 +176,12 @@ static void __init zylonite_detect_lcd_panel(void) | |||
176 | for (i = 0; i < NUM_LCD_DETECT_PINS; i++) { | 176 | for (i = 0; i < NUM_LCD_DETECT_PINS; i++) { |
177 | id = id << 1; | 177 | id = id << 1; |
178 | gpio = mfp_to_gpio(lcd_detect_pins[i]); | 178 | gpio = mfp_to_gpio(lcd_detect_pins[i]); |
179 | gpio_request(gpio, "LCD_ID_PINS"); | ||
179 | gpio_direction_input(gpio); | 180 | gpio_direction_input(gpio); |
180 | 181 | ||
181 | if (gpio_get_value(gpio)) | 182 | if (gpio_get_value(gpio)) |
182 | id = id | 0x1; | 183 | id = id | 0x1; |
184 | gpio_free(gpio); | ||
183 | } | 185 | } |
184 | 186 | ||
185 | /* lcd id, flush out bit 1 */ | 187 | /* lcd id, flush out bit 1 */ |
diff --git a/arch/arm/mach-s3c2410/include/mach/gpio-core.h b/arch/arm/mach-s3c2410/include/mach/gpio-core.h index 8fe192081d3a..f8b879a7973c 100644 --- a/arch/arm/mach-s3c2410/include/mach/gpio-core.h +++ b/arch/arm/mach-s3c2410/include/mach/gpio-core.h | |||
@@ -28,7 +28,7 @@ static inline struct s3c_gpio_chip *s3c_gpiolib_getchip(unsigned int pin) | |||
28 | return NULL; | 28 | return NULL; |
29 | 29 | ||
30 | chip = &s3c24xx_gpios[pin/32]; | 30 | chip = &s3c24xx_gpios[pin/32]; |
31 | return (S3C2410_GPIO_OFFSET(pin) > chip->chip.ngpio) ? chip : NULL; | 31 | return (S3C2410_GPIO_OFFSET(pin) < chip->chip.ngpio) ? chip : NULL; |
32 | } | 32 | } |
33 | 33 | ||
34 | #endif /* __ASM_ARCH_GPIO_CORE_H */ | 34 | #endif /* __ASM_ARCH_GPIO_CORE_H */ |
diff --git a/arch/arm/plat-omap/common.c b/arch/arm/plat-omap/common.c index ebcf006406f9..95587b6c0259 100644 --- a/arch/arm/plat-omap/common.c +++ b/arch/arm/plat-omap/common.c | |||
@@ -253,11 +253,8 @@ static struct clocksource clocksource_32k = { | |||
253 | */ | 253 | */ |
254 | unsigned long long sched_clock(void) | 254 | unsigned long long sched_clock(void) |
255 | { | 255 | { |
256 | unsigned long long ret; | 256 | return clocksource_cyc2ns(clocksource_32k.read(&clocksource_32k), |
257 | 257 | clocksource_32k.mult, clocksource_32k.shift); | |
258 | ret = (unsigned long long)clocksource_32k.read(&clocksource_32k); | ||
259 | ret = (ret * clocksource_32k.mult_orig) >> clocksource_32k.shift; | ||
260 | return ret; | ||
261 | } | 258 | } |
262 | 259 | ||
263 | static int __init omap_init_clocksource_32k(void) | 260 | static int __init omap_init_clocksource_32k(void) |
diff --git a/arch/arm/plat-s3c24xx/pwm.c b/arch/arm/plat-s3c24xx/pwm.c index 0120b760315b..82a6d4de02a3 100644 --- a/arch/arm/plat-s3c24xx/pwm.c +++ b/arch/arm/plat-s3c24xx/pwm.c | |||
@@ -246,6 +246,10 @@ int pwm_config(struct pwm_device *pwm, int duty_ns, int period_ns) | |||
246 | 246 | ||
247 | tcmp = duty_ns / tin_ns; | 247 | tcmp = duty_ns / tin_ns; |
248 | tcmp = tcnt - tcmp; | 248 | tcmp = tcnt - tcmp; |
249 | /* the pwm hw only checks the compare register after a decrement, | ||
250 | so the pin never toggles if tcmp = tcnt */ | ||
251 | if (tcmp == tcnt) | ||
252 | tcmp--; | ||
249 | 253 | ||
250 | pwm_dbg(pwm, "tin_ns=%lu, tcmp=%ld/%lu\n", tin_ns, tcmp, tcnt); | 254 | pwm_dbg(pwm, "tin_ns=%lu, tcmp=%ld/%lu\n", tin_ns, tcmp, tcnt); |
251 | 255 | ||
diff --git a/arch/arm/plat-s3c64xx/pm.c b/arch/arm/plat-s3c64xx/pm.c index 07a6516a4f3c..47632fc7eb66 100644 --- a/arch/arm/plat-s3c64xx/pm.c +++ b/arch/arm/plat-s3c64xx/pm.c | |||
@@ -117,8 +117,6 @@ void s3c_pm_save_core(void) | |||
117 | * this. | 117 | * this. |
118 | */ | 118 | */ |
119 | 119 | ||
120 | #include <plat/regs-gpio.h> | ||
121 | |||
122 | static void s3c64xx_cpu_suspend(void) | 120 | static void s3c64xx_cpu_suspend(void) |
123 | { | 121 | { |
124 | unsigned long tmp; | 122 | unsigned long tmp; |
diff --git a/arch/arm/plat-s3c64xx/s3c6400-clock.c b/arch/arm/plat-s3c64xx/s3c6400-clock.c index 1debc1f9f987..febac1950d8e 100644 --- a/arch/arm/plat-s3c64xx/s3c6400-clock.c +++ b/arch/arm/plat-s3c64xx/s3c6400-clock.c | |||
@@ -153,7 +153,7 @@ static unsigned long s3c64xx_clk_arm_round_rate(struct clk *clk, | |||
153 | u32 div; | 153 | u32 div; |
154 | 154 | ||
155 | if (parent < rate) | 155 | if (parent < rate) |
156 | return rate; | 156 | return parent; |
157 | 157 | ||
158 | div = (parent / rate) - 1; | 158 | div = (parent / rate) - 1; |
159 | if (div > armclk_mask) | 159 | if (div > armclk_mask) |
@@ -175,7 +175,7 @@ static int s3c64xx_clk_arm_set_rate(struct clk *clk, unsigned long rate) | |||
175 | div = clk_get_rate(clk->parent) / rate; | 175 | div = clk_get_rate(clk->parent) / rate; |
176 | 176 | ||
177 | val = __raw_readl(S3C_CLK_DIV0); | 177 | val = __raw_readl(S3C_CLK_DIV0); |
178 | val &= armclk_mask; | 178 | val &= ~armclk_mask; |
179 | val |= (div - 1); | 179 | val |= (div - 1); |
180 | __raw_writel(val, S3C_CLK_DIV0); | 180 | __raw_writel(val, S3C_CLK_DIV0); |
181 | 181 | ||
diff --git a/arch/arm/plat-stmp3xxx/pinmux.c b/arch/arm/plat-stmp3xxx/pinmux.c index d41200382208..6d6b1a468eda 100644 --- a/arch/arm/plat-stmp3xxx/pinmux.c +++ b/arch/arm/plat-stmp3xxx/pinmux.c | |||
@@ -22,7 +22,6 @@ | |||
22 | #include <linux/sysdev.h> | 22 | #include <linux/sysdev.h> |
23 | #include <linux/string.h> | 23 | #include <linux/string.h> |
24 | #include <linux/bitops.h> | 24 | #include <linux/bitops.h> |
25 | #include <linux/sysdev.h> | ||
26 | #include <linux/irq.h> | 25 | #include <linux/irq.h> |
27 | 26 | ||
28 | #include <mach/hardware.h> | 27 | #include <mach/hardware.h> |
diff --git a/arch/ia64/Makefile b/arch/ia64/Makefile index 58a7e46affda..e7cbaa02cd0b 100644 --- a/arch/ia64/Makefile +++ b/arch/ia64/Makefile | |||
@@ -41,11 +41,6 @@ $(error Sorry, you need a newer version of the assember, one that is built from | |||
41 | ftp://ftp.hpl.hp.com/pub/linux-ia64/gas-030124.tar.gz) | 41 | ftp://ftp.hpl.hp.com/pub/linux-ia64/gas-030124.tar.gz) |
42 | endif | 42 | endif |
43 | 43 | ||
44 | ifeq ($(call cc-version),0304) | ||
45 | cflags-$(CONFIG_ITANIUM) += -mtune=merced | ||
46 | cflags-$(CONFIG_MCKINLEY) += -mtune=mckinley | ||
47 | endif | ||
48 | |||
49 | KBUILD_CFLAGS += $(cflags-y) | 44 | KBUILD_CFLAGS += $(cflags-y) |
50 | head-y := arch/ia64/kernel/head.o arch/ia64/kernel/init_task.o | 45 | head-y := arch/ia64/kernel/head.o arch/ia64/kernel/init_task.o |
51 | 46 | ||
diff --git a/arch/ia64/include/asm/bitops.h b/arch/ia64/include/asm/bitops.h index e2ca80037335..57a2787bc9fb 100644 --- a/arch/ia64/include/asm/bitops.h +++ b/arch/ia64/include/asm/bitops.h | |||
@@ -286,7 +286,7 @@ __test_and_clear_bit(int nr, volatile void * addr) | |||
286 | { | 286 | { |
287 | __u32 *p = (__u32 *) addr + (nr >> 5); | 287 | __u32 *p = (__u32 *) addr + (nr >> 5); |
288 | __u32 m = 1 << (nr & 31); | 288 | __u32 m = 1 << (nr & 31); |
289 | int oldbitset = *p & m; | 289 | int oldbitset = (*p & m) != 0; |
290 | 290 | ||
291 | *p &= ~m; | 291 | *p &= ~m; |
292 | return oldbitset; | 292 | return oldbitset; |
diff --git a/arch/ia64/include/asm/pgtable.h b/arch/ia64/include/asm/pgtable.h index 0a9cc73d35c7..8840a690d1e7 100644 --- a/arch/ia64/include/asm/pgtable.h +++ b/arch/ia64/include/asm/pgtable.h | |||
@@ -155,7 +155,6 @@ | |||
155 | #include <linux/bitops.h> | 155 | #include <linux/bitops.h> |
156 | #include <asm/cacheflush.h> | 156 | #include <asm/cacheflush.h> |
157 | #include <asm/mmu_context.h> | 157 | #include <asm/mmu_context.h> |
158 | #include <asm/processor.h> | ||
159 | 158 | ||
160 | /* | 159 | /* |
161 | * Next come the mappings that determine how mmap() protection bits | 160 | * Next come the mappings that determine how mmap() protection bits |
diff --git a/arch/ia64/kernel/ia64_ksyms.c b/arch/ia64/kernel/ia64_ksyms.c index 2d311864e359..8ebccb589e1c 100644 --- a/arch/ia64/kernel/ia64_ksyms.c +++ b/arch/ia64/kernel/ia64_ksyms.c | |||
@@ -21,6 +21,7 @@ EXPORT_SYMBOL(csum_ipv6_magic); | |||
21 | 21 | ||
22 | #include <asm/page.h> | 22 | #include <asm/page.h> |
23 | EXPORT_SYMBOL(clear_page); | 23 | EXPORT_SYMBOL(clear_page); |
24 | EXPORT_SYMBOL(copy_page); | ||
24 | 25 | ||
25 | #ifdef CONFIG_VIRTUAL_MEM_MAP | 26 | #ifdef CONFIG_VIRTUAL_MEM_MAP |
26 | #include <linux/bootmem.h> | 27 | #include <linux/bootmem.h> |
@@ -60,9 +61,6 @@ EXPORT_SYMBOL(__udivdi3); | |||
60 | EXPORT_SYMBOL(__moddi3); | 61 | EXPORT_SYMBOL(__moddi3); |
61 | EXPORT_SYMBOL(__umoddi3); | 62 | EXPORT_SYMBOL(__umoddi3); |
62 | 63 | ||
63 | #include <asm/page.h> | ||
64 | EXPORT_SYMBOL(copy_page); | ||
65 | |||
66 | #if defined(CONFIG_MD_RAID456) || defined(CONFIG_MD_RAID456_MODULE) | 64 | #if defined(CONFIG_MD_RAID456) || defined(CONFIG_MD_RAID456_MODULE) |
67 | extern void xor_ia64_2(void); | 65 | extern void xor_ia64_2(void); |
68 | extern void xor_ia64_3(void); | 66 | extern void xor_ia64_3(void); |
diff --git a/arch/ia64/kernel/iosapic.c b/arch/ia64/kernel/iosapic.c index c48b03f2b61d..dab4d393908c 100644 --- a/arch/ia64/kernel/iosapic.c +++ b/arch/ia64/kernel/iosapic.c | |||
@@ -1072,6 +1072,10 @@ iosapic_init (unsigned long phys_addr, unsigned int gsi_base) | |||
1072 | } | 1072 | } |
1073 | 1073 | ||
1074 | addr = ioremap(phys_addr, 0); | 1074 | addr = ioremap(phys_addr, 0); |
1075 | if (addr == NULL) { | ||
1076 | spin_unlock_irqrestore(&iosapic_lock, flags); | ||
1077 | return -ENOMEM; | ||
1078 | } | ||
1075 | ver = iosapic_version(addr); | 1079 | ver = iosapic_version(addr); |
1076 | if ((err = iosapic_check_gsi_range(gsi_base, ver))) { | 1080 | if ((err = iosapic_check_gsi_range(gsi_base, ver))) { |
1077 | iounmap(addr); | 1081 | iounmap(addr); |
diff --git a/arch/ia64/kernel/pci-dma.c b/arch/ia64/kernel/pci-dma.c index 05695962fe44..f6b1ff0aea76 100644 --- a/arch/ia64/kernel/pci-dma.c +++ b/arch/ia64/kernel/pci-dma.c | |||
@@ -69,11 +69,6 @@ iommu_dma_init(void) | |||
69 | 69 | ||
70 | int iommu_dma_supported(struct device *dev, u64 mask) | 70 | int iommu_dma_supported(struct device *dev, u64 mask) |
71 | { | 71 | { |
72 | struct dma_map_ops *ops = platform_dma_get_ops(dev); | ||
73 | |||
74 | if (ops->dma_supported) | ||
75 | return ops->dma_supported(dev, mask); | ||
76 | |||
77 | /* Copied from i386. Doesn't make much sense, because it will | 72 | /* Copied from i386. Doesn't make much sense, because it will |
78 | only work for pci_alloc_coherent. | 73 | only work for pci_alloc_coherent. |
79 | The caller just has to use GFP_DMA in this case. */ | 74 | The caller just has to use GFP_DMA in this case. */ |
diff --git a/arch/ia64/kernel/topology.c b/arch/ia64/kernel/topology.c index bc80dff1df7a..8f060352e129 100644 --- a/arch/ia64/kernel/topology.c +++ b/arch/ia64/kernel/topology.c | |||
@@ -372,6 +372,10 @@ static int __cpuinit cache_add_dev(struct sys_device * sys_dev) | |||
372 | retval = kobject_init_and_add(&all_cpu_cache_info[cpu].kobj, | 372 | retval = kobject_init_and_add(&all_cpu_cache_info[cpu].kobj, |
373 | &cache_ktype_percpu_entry, &sys_dev->kobj, | 373 | &cache_ktype_percpu_entry, &sys_dev->kobj, |
374 | "%s", "cache"); | 374 | "%s", "cache"); |
375 | if (unlikely(retval < 0)) { | ||
376 | cpu_cache_sysfs_exit(cpu); | ||
377 | return retval; | ||
378 | } | ||
375 | 379 | ||
376 | for (i = 0; i < all_cpu_cache_info[cpu].num_cache_leaves; i++) { | 380 | for (i = 0; i < all_cpu_cache_info[cpu].num_cache_leaves; i++) { |
377 | this_object = LEAF_KOBJECT_PTR(cpu,i); | 381 | this_object = LEAF_KOBJECT_PTR(cpu,i); |
@@ -385,7 +389,7 @@ static int __cpuinit cache_add_dev(struct sys_device * sys_dev) | |||
385 | } | 389 | } |
386 | kobject_put(&all_cpu_cache_info[cpu].kobj); | 390 | kobject_put(&all_cpu_cache_info[cpu].kobj); |
387 | cpu_cache_sysfs_exit(cpu); | 391 | cpu_cache_sysfs_exit(cpu); |
388 | break; | 392 | return retval; |
389 | } | 393 | } |
390 | kobject_uevent(&(this_object->kobj), KOBJ_ADD); | 394 | kobject_uevent(&(this_object->kobj), KOBJ_ADD); |
391 | } | 395 | } |
diff --git a/arch/ia64/kvm/mmio.c b/arch/ia64/kvm/mmio.c index 21f63fffc379..9bf55afd08d0 100644 --- a/arch/ia64/kvm/mmio.c +++ b/arch/ia64/kvm/mmio.c | |||
@@ -247,7 +247,8 @@ void emulate_io_inst(struct kvm_vcpu *vcpu, u64 padr, u64 ma) | |||
247 | vcpu_get_fpreg(vcpu, inst.M9.f2, &v); | 247 | vcpu_get_fpreg(vcpu, inst.M9.f2, &v); |
248 | /* Write high word. FIXME: this is a kludge! */ | 248 | /* Write high word. FIXME: this is a kludge! */ |
249 | v.u.bits[1] &= 0x3ffff; | 249 | v.u.bits[1] &= 0x3ffff; |
250 | mmio_access(vcpu, padr + 8, &v.u.bits[1], 8, ma, IOREQ_WRITE); | 250 | mmio_access(vcpu, padr + 8, (u64 *)&v.u.bits[1], 8, |
251 | ma, IOREQ_WRITE); | ||
251 | data = v.u.bits[0]; | 252 | data = v.u.bits[0]; |
252 | size = 3; | 253 | size = 3; |
253 | } else if (inst.M10.major == 7 && inst.M10.x6 == 0x3B) { | 254 | } else if (inst.M10.major == 7 && inst.M10.x6 == 0x3B) { |
@@ -265,7 +266,8 @@ void emulate_io_inst(struct kvm_vcpu *vcpu, u64 padr, u64 ma) | |||
265 | 266 | ||
266 | /* Write high word.FIXME: this is a kludge! */ | 267 | /* Write high word.FIXME: this is a kludge! */ |
267 | v.u.bits[1] &= 0x3ffff; | 268 | v.u.bits[1] &= 0x3ffff; |
268 | mmio_access(vcpu, padr + 8, &v.u.bits[1], 8, ma, IOREQ_WRITE); | 269 | mmio_access(vcpu, padr + 8, (u64 *)&v.u.bits[1], |
270 | 8, ma, IOREQ_WRITE); | ||
269 | data = v.u.bits[0]; | 271 | data = v.u.bits[0]; |
270 | size = 3; | 272 | size = 3; |
271 | } else if (inst.M10.major == 7 && inst.M10.x6 == 0x31) { | 273 | } else if (inst.M10.major == 7 && inst.M10.x6 == 0x31) { |
diff --git a/arch/ia64/kvm/vcpu.c b/arch/ia64/kvm/vcpu.c index 46b02cbcc874..cc406d064a09 100644 --- a/arch/ia64/kvm/vcpu.c +++ b/arch/ia64/kvm/vcpu.c | |||
@@ -461,7 +461,7 @@ void setreg(unsigned long regnum, unsigned long val, | |||
461 | u64 vcpu_get_gr(struct kvm_vcpu *vcpu, unsigned long reg) | 461 | u64 vcpu_get_gr(struct kvm_vcpu *vcpu, unsigned long reg) |
462 | { | 462 | { |
463 | struct kvm_pt_regs *regs = vcpu_regs(vcpu); | 463 | struct kvm_pt_regs *regs = vcpu_regs(vcpu); |
464 | u64 val; | 464 | unsigned long val; |
465 | 465 | ||
466 | if (!reg) | 466 | if (!reg) |
467 | return 0; | 467 | return 0; |
@@ -469,7 +469,7 @@ u64 vcpu_get_gr(struct kvm_vcpu *vcpu, unsigned long reg) | |||
469 | return val; | 469 | return val; |
470 | } | 470 | } |
471 | 471 | ||
472 | void vcpu_set_gr(struct kvm_vcpu *vcpu, u64 reg, u64 value, int nat) | 472 | void vcpu_set_gr(struct kvm_vcpu *vcpu, unsigned long reg, u64 value, int nat) |
473 | { | 473 | { |
474 | struct kvm_pt_regs *regs = vcpu_regs(vcpu); | 474 | struct kvm_pt_regs *regs = vcpu_regs(vcpu); |
475 | long sof = (regs->cr_ifs) & 0x7f; | 475 | long sof = (regs->cr_ifs) & 0x7f; |
@@ -1072,7 +1072,7 @@ void kvm_ttag(struct kvm_vcpu *vcpu, INST64 inst) | |||
1072 | vcpu_set_gr(vcpu, inst.M46.r1, tag, 0); | 1072 | vcpu_set_gr(vcpu, inst.M46.r1, tag, 0); |
1073 | } | 1073 | } |
1074 | 1074 | ||
1075 | int vcpu_tpa(struct kvm_vcpu *vcpu, u64 vadr, u64 *padr) | 1075 | int vcpu_tpa(struct kvm_vcpu *vcpu, u64 vadr, unsigned long *padr) |
1076 | { | 1076 | { |
1077 | struct thash_data *data; | 1077 | struct thash_data *data; |
1078 | union ia64_isr visr, pt_isr; | 1078 | union ia64_isr visr, pt_isr; |
diff --git a/arch/ia64/kvm/vcpu.h b/arch/ia64/kvm/vcpu.h index 042af92ced83..360724d3ae69 100644 --- a/arch/ia64/kvm/vcpu.h +++ b/arch/ia64/kvm/vcpu.h | |||
@@ -686,14 +686,15 @@ static inline int highest_inservice_irq(struct kvm_vcpu *vcpu) | |||
686 | return highest_bits((int *)&(VMX(vcpu, insvc[0]))); | 686 | return highest_bits((int *)&(VMX(vcpu, insvc[0]))); |
687 | } | 687 | } |
688 | 688 | ||
689 | extern void vcpu_get_fpreg(struct kvm_vcpu *vcpu, u64 reg, | 689 | extern void vcpu_get_fpreg(struct kvm_vcpu *vcpu, unsigned long reg, |
690 | struct ia64_fpreg *val); | 690 | struct ia64_fpreg *val); |
691 | extern void vcpu_set_fpreg(struct kvm_vcpu *vcpu, u64 reg, | 691 | extern void vcpu_set_fpreg(struct kvm_vcpu *vcpu, unsigned long reg, |
692 | struct ia64_fpreg *val); | 692 | struct ia64_fpreg *val); |
693 | extern u64 vcpu_get_gr(struct kvm_vcpu *vcpu, u64 reg); | 693 | extern u64 vcpu_get_gr(struct kvm_vcpu *vcpu, unsigned long reg); |
694 | extern void vcpu_set_gr(struct kvm_vcpu *vcpu, u64 reg, u64 val, int nat); | 694 | extern void vcpu_set_gr(struct kvm_vcpu *vcpu, unsigned long reg, |
695 | extern u64 vcpu_get_psr(struct kvm_vcpu *vcpu); | 695 | u64 val, int nat); |
696 | extern void vcpu_set_psr(struct kvm_vcpu *vcpu, u64 val); | 696 | extern unsigned long vcpu_get_psr(struct kvm_vcpu *vcpu); |
697 | extern void vcpu_set_psr(struct kvm_vcpu *vcpu, unsigned long val); | ||
697 | extern u64 vcpu_thash(struct kvm_vcpu *vcpu, u64 vadr); | 698 | extern u64 vcpu_thash(struct kvm_vcpu *vcpu, u64 vadr); |
698 | extern void vcpu_bsw0(struct kvm_vcpu *vcpu); | 699 | extern void vcpu_bsw0(struct kvm_vcpu *vcpu); |
699 | extern void thash_vhpt_insert(struct kvm_vcpu *v, u64 pte, | 700 | extern void thash_vhpt_insert(struct kvm_vcpu *v, u64 pte, |
diff --git a/arch/m68knommu/kernel/time.c b/arch/m68knommu/kernel/time.c index d182b2f72211..68432248515c 100644 --- a/arch/m68knommu/kernel/time.c +++ b/arch/m68knommu/kernel/time.c | |||
@@ -72,9 +72,10 @@ static unsigned long read_rtc_mmss(void) | |||
72 | return mktime(year, mon, day, hour, min, sec);; | 72 | return mktime(year, mon, day, hour, min, sec);; |
73 | } | 73 | } |
74 | 74 | ||
75 | unsigned long read_persistent_clock(void) | 75 | void read_persistent_clock(struct timespec *ts) |
76 | { | 76 | { |
77 | return read_rtc_mmss(); | 77 | ts->tv_sec = read_rtc_mmss(); |
78 | ts->tv_nsec = 0; | ||
78 | } | 79 | } |
79 | 80 | ||
80 | int update_persistent_clock(struct timespec now) | 81 | int update_persistent_clock(struct timespec now) |
diff --git a/arch/mips/alchemy/mtx-1/platform.c b/arch/mips/alchemy/mtx-1/platform.c index 8b5914d1241f..e30e42add697 100644 --- a/arch/mips/alchemy/mtx-1/platform.c +++ b/arch/mips/alchemy/mtx-1/platform.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * MTX-1 platform devices registration | 2 | * MTX-1 platform devices registration |
3 | * | 3 | * |
4 | * Copyright (C) 2007, Florian Fainelli <florian@openwrt.org> | 4 | * Copyright (C) 2007-2009, Florian Fainelli <florian@openwrt.org> |
5 | * | 5 | * |
6 | * This program is free software; you can redistribute it and/or modify | 6 | * This program is free software; you can redistribute it and/or modify |
7 | * it under the terms of the GNU General Public License as published by | 7 | * it under the terms of the GNU General Public License as published by |
@@ -142,7 +142,17 @@ static struct __initdata platform_device * mtx1_devs[] = { | |||
142 | 142 | ||
143 | static int __init mtx1_register_devices(void) | 143 | static int __init mtx1_register_devices(void) |
144 | { | 144 | { |
145 | gpio_direction_input(207); | 145 | int rc; |
146 | |||
147 | rc = gpio_request(mtx1_gpio_button[0].gpio, | ||
148 | mtx1_gpio_button[0].desc); | ||
149 | if (rc < 0) { | ||
150 | printk(KERN_INFO "mtx1: failed to request %d\n", | ||
151 | mtx1_gpio_button[0].gpio); | ||
152 | goto out; | ||
153 | } | ||
154 | gpio_direction_input(mtx1_gpio_button[0].gpio); | ||
155 | out: | ||
146 | return platform_add_devices(mtx1_devs, ARRAY_SIZE(mtx1_devs)); | 156 | return platform_add_devices(mtx1_devs, ARRAY_SIZE(mtx1_devs)); |
147 | } | 157 | } |
148 | 158 | ||
diff --git a/arch/mips/ar7/Makefile b/arch/mips/ar7/Makefile index 7435e44b3964..26bc5da18997 100644 --- a/arch/mips/ar7/Makefile +++ b/arch/mips/ar7/Makefile | |||
@@ -8,3 +8,4 @@ obj-y := \ | |||
8 | platform.o \ | 8 | platform.o \ |
9 | gpio.o \ | 9 | gpio.o \ |
10 | clock.o | 10 | clock.o |
11 | EXTRA_CFLAGS += -Werror | ||
diff --git a/arch/mips/ar7/clock.c b/arch/mips/ar7/clock.c index 27dc6663f2fa..cc65c8eb391b 100644 --- a/arch/mips/ar7/clock.c +++ b/arch/mips/ar7/clock.c | |||
@@ -264,19 +264,6 @@ static void __init tnetd7300_init_clocks(void) | |||
264 | iounmap(bootcr); | 264 | iounmap(bootcr); |
265 | } | 265 | } |
266 | 266 | ||
267 | static int tnetd7200_get_clock(int base, struct tnetd7200_clock *clock, | ||
268 | u32 *bootcr, u32 bus_clock) | ||
269 | { | ||
270 | int divisor = ((readl(&clock->prediv) & 0x1f) + 1) * | ||
271 | ((readl(&clock->postdiv) & 0x1f) + 1); | ||
272 | |||
273 | if (*bootcr & BOOT_PLL_BYPASS) | ||
274 | return base / divisor; | ||
275 | |||
276 | return base * ((readl(&clock->mul) & 0xf) + 1) / divisor; | ||
277 | } | ||
278 | |||
279 | |||
280 | static void tnetd7200_set_clock(int base, struct tnetd7200_clock *clock, | 267 | static void tnetd7200_set_clock(int base, struct tnetd7200_clock *clock, |
281 | int prediv, int postdiv, int postdiv2, int mul, u32 frequency) | 268 | int prediv, int postdiv, int postdiv2, int mul, u32 frequency) |
282 | { | 269 | { |
diff --git a/arch/mips/ar7/memory.c b/arch/mips/ar7/memory.c index 46fed44825a6..696c723dc6d4 100644 --- a/arch/mips/ar7/memory.c +++ b/arch/mips/ar7/memory.c | |||
@@ -52,7 +52,7 @@ static int __init memsize(void) | |||
52 | size <<= 1; | 52 | size <<= 1; |
53 | } while (size < (64 << 20)); | 53 | } while (size < (64 << 20)); |
54 | 54 | ||
55 | writel(tmpaddr, &addr); | 55 | writel((u32)tmpaddr, &addr); |
56 | 56 | ||
57 | return size; | 57 | return size; |
58 | } | 58 | } |
diff --git a/arch/mips/ar7/platform.c b/arch/mips/ar7/platform.c index 542244961780..2ecab6155932 100644 --- a/arch/mips/ar7/platform.c +++ b/arch/mips/ar7/platform.c | |||
@@ -28,7 +28,6 @@ | |||
28 | #include <linux/serial_8250.h> | 28 | #include <linux/serial_8250.h> |
29 | #include <linux/ioport.h> | 29 | #include <linux/ioport.h> |
30 | #include <linux/io.h> | 30 | #include <linux/io.h> |
31 | #include <linux/version.h> | ||
32 | #include <linux/vlynq.h> | 31 | #include <linux/vlynq.h> |
33 | #include <linux/leds.h> | 32 | #include <linux/leds.h> |
34 | #include <linux/string.h> | 33 | #include <linux/string.h> |
@@ -243,13 +242,13 @@ static struct platform_device physmap_flash = { | |||
243 | .num_resources = 1, | 242 | .num_resources = 1, |
244 | }; | 243 | }; |
245 | 244 | ||
246 | static u64 cpmac_dma_mask = DMA_32BIT_MASK; | 245 | static u64 cpmac_dma_mask = DMA_BIT_MASK(32); |
247 | static struct platform_device cpmac_low = { | 246 | static struct platform_device cpmac_low = { |
248 | .id = 0, | 247 | .id = 0, |
249 | .name = "cpmac", | 248 | .name = "cpmac", |
250 | .dev = { | 249 | .dev = { |
251 | .dma_mask = &cpmac_dma_mask, | 250 | .dma_mask = &cpmac_dma_mask, |
252 | .coherent_dma_mask = DMA_32BIT_MASK, | 251 | .coherent_dma_mask = DMA_BIT_MASK(32), |
253 | .platform_data = &cpmac_low_data, | 252 | .platform_data = &cpmac_low_data, |
254 | }, | 253 | }, |
255 | .resource = cpmac_low_res, | 254 | .resource = cpmac_low_res, |
@@ -261,7 +260,7 @@ static struct platform_device cpmac_high = { | |||
261 | .name = "cpmac", | 260 | .name = "cpmac", |
262 | .dev = { | 261 | .dev = { |
263 | .dma_mask = &cpmac_dma_mask, | 262 | .dma_mask = &cpmac_dma_mask, |
264 | .coherent_dma_mask = DMA_32BIT_MASK, | 263 | .coherent_dma_mask = DMA_BIT_MASK(32), |
265 | .platform_data = &cpmac_high_data, | 264 | .platform_data = &cpmac_high_data, |
266 | }, | 265 | }, |
267 | .resource = cpmac_high_res, | 266 | .resource = cpmac_high_res, |
@@ -481,6 +480,7 @@ static void __init detect_leds(void) | |||
481 | static int __init ar7_register_devices(void) | 480 | static int __init ar7_register_devices(void) |
482 | { | 481 | { |
483 | int res; | 482 | int res; |
483 | #ifdef CONFIG_SERIAL_8250 | ||
484 | static struct uart_port uart_port[2]; | 484 | static struct uart_port uart_port[2]; |
485 | 485 | ||
486 | memset(uart_port, 0, sizeof(struct uart_port) * 2); | 486 | memset(uart_port, 0, sizeof(struct uart_port) * 2); |
@@ -512,7 +512,7 @@ static int __init ar7_register_devices(void) | |||
512 | if (res) | 512 | if (res) |
513 | return res; | 513 | return res; |
514 | } | 514 | } |
515 | 515 | #endif /* CONFIG_SERIAL_8250 */ | |
516 | res = platform_device_register(&physmap_flash); | 516 | res = platform_device_register(&physmap_flash); |
517 | if (res) | 517 | if (res) |
518 | return res; | 518 | return res; |
diff --git a/arch/mips/ar7/prom.c b/arch/mips/ar7/prom.c index a320bceb2f9d..5ad6f1db6567 100644 --- a/arch/mips/ar7/prom.c +++ b/arch/mips/ar7/prom.c | |||
@@ -144,7 +144,7 @@ static char * __init lookup_psp_var_map(u8 num) | |||
144 | { | 144 | { |
145 | int i; | 145 | int i; |
146 | 146 | ||
147 | for (i = 0; i < sizeof(psp_var_map); i++) | 147 | for (i = 0; i < ARRAY_SIZE(psp_var_map); i++) |
148 | if (psp_var_map[i].num == num) | 148 | if (psp_var_map[i].num == num) |
149 | return psp_var_map[i].value; | 149 | return psp_var_map[i].value; |
150 | 150 | ||
diff --git a/arch/mips/ar7/setup.c b/arch/mips/ar7/setup.c index 6ebb5f16d967..39f6b5b96463 100644 --- a/arch/mips/ar7/setup.c +++ b/arch/mips/ar7/setup.c | |||
@@ -15,7 +15,6 @@ | |||
15 | * with this program; if not, write to the Free Software Foundation, Inc., | 15 | * with this program; if not, write to the Free Software Foundation, Inc., |
16 | * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. | 16 | * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. |
17 | */ | 17 | */ |
18 | #include <linux/version.h> | ||
19 | #include <linux/init.h> | 18 | #include <linux/init.h> |
20 | #include <linux/ioport.h> | 19 | #include <linux/ioport.h> |
21 | #include <linux/pm.h> | 20 | #include <linux/pm.h> |
diff --git a/arch/mips/cavium-octeon/smp.c b/arch/mips/cavium-octeon/smp.c index 0b891a9c6253..32d51a31dc48 100644 --- a/arch/mips/cavium-octeon/smp.c +++ b/arch/mips/cavium-octeon/smp.c | |||
@@ -194,11 +194,11 @@ static void octeon_init_secondary(void) | |||
194 | void octeon_prepare_cpus(unsigned int max_cpus) | 194 | void octeon_prepare_cpus(unsigned int max_cpus) |
195 | { | 195 | { |
196 | cvmx_write_csr(CVMX_CIU_MBOX_CLRX(cvmx_get_core_num()), 0xffffffff); | 196 | cvmx_write_csr(CVMX_CIU_MBOX_CLRX(cvmx_get_core_num()), 0xffffffff); |
197 | if (request_irq(OCTEON_IRQ_MBOX0, mailbox_interrupt, IRQF_SHARED, | 197 | if (request_irq(OCTEON_IRQ_MBOX0, mailbox_interrupt, IRQF_DISABLED, |
198 | "mailbox0", mailbox_interrupt)) { | 198 | "mailbox0", mailbox_interrupt)) { |
199 | panic("Cannot request_irq(OCTEON_IRQ_MBOX0)\n"); | 199 | panic("Cannot request_irq(OCTEON_IRQ_MBOX0)\n"); |
200 | } | 200 | } |
201 | if (request_irq(OCTEON_IRQ_MBOX1, mailbox_interrupt, IRQF_SHARED, | 201 | if (request_irq(OCTEON_IRQ_MBOX1, mailbox_interrupt, IRQF_DISABLED, |
202 | "mailbox1", mailbox_interrupt)) { | 202 | "mailbox1", mailbox_interrupt)) { |
203 | panic("Cannot request_irq(OCTEON_IRQ_MBOX1)\n"); | 203 | panic("Cannot request_irq(OCTEON_IRQ_MBOX1)\n"); |
204 | } | 204 | } |
diff --git a/arch/mips/dec/ecc-berr.c b/arch/mips/dec/ecc-berr.c index 6a17c9b508ea..7abce661b90f 100644 --- a/arch/mips/dec/ecc-berr.c +++ b/arch/mips/dec/ecc-berr.c | |||
@@ -1,6 +1,4 @@ | |||
1 | /* | 1 | /* |
2 | * linux/arch/mips/dec/ecc-berr.c | ||
3 | * | ||
4 | * Bus error event handling code for systems equipped with ECC | 2 | * Bus error event handling code for systems equipped with ECC |
5 | * handling logic, i.e. DECstation/DECsystem 5000/200 (KN02), | 3 | * handling logic, i.e. DECstation/DECsystem 5000/200 (KN02), |
6 | * 5000/240 (KN03), 5000/260 (KN05) and DECsystem 5900 (KN03), | 4 | * 5000/240 (KN03), 5000/260 (KN05) and DECsystem 5900 (KN03), |
diff --git a/arch/mips/dec/int-handler.S b/arch/mips/dec/int-handler.S index 00cecdcc75f2..82c852818781 100644 --- a/arch/mips/dec/int-handler.S +++ b/arch/mips/dec/int-handler.S | |||
@@ -1,6 +1,4 @@ | |||
1 | /* | 1 | /* |
2 | * arch/mips/dec/int-handler.S | ||
3 | * | ||
4 | * Copyright (C) 1995, 1996, 1997 Paul M. Antoine and Harald Koerfgen | 2 | * Copyright (C) 1995, 1996, 1997 Paul M. Antoine and Harald Koerfgen |
5 | * Copyright (C) 2000, 2001, 2002, 2003, 2005 Maciej W. Rozycki | 3 | * Copyright (C) 2000, 2001, 2002, 2003, 2005 Maciej W. Rozycki |
6 | * | 4 | * |
diff --git a/arch/mips/dec/ioasic-irq.c b/arch/mips/dec/ioasic-irq.c index 3acb133668dc..cb41954fc321 100644 --- a/arch/mips/dec/ioasic-irq.c +++ b/arch/mips/dec/ioasic-irq.c | |||
@@ -1,6 +1,4 @@ | |||
1 | /* | 1 | /* |
2 | * linux/arch/mips/dec/ioasic-irq.c | ||
3 | * | ||
4 | * DEC I/O ASIC interrupts. | 2 | * DEC I/O ASIC interrupts. |
5 | * | 3 | * |
6 | * Copyright (c) 2002, 2003 Maciej W. Rozycki | 4 | * Copyright (c) 2002, 2003 Maciej W. Rozycki |
diff --git a/arch/mips/dec/kn01-berr.c b/arch/mips/dec/kn01-berr.c index d3b8002bf1e7..b0dc6d53edd6 100644 --- a/arch/mips/dec/kn01-berr.c +++ b/arch/mips/dec/kn01-berr.c | |||
@@ -1,6 +1,4 @@ | |||
1 | /* | 1 | /* |
2 | * linux/arch/mips/dec/kn01-berr.c | ||
3 | * | ||
4 | * Bus error event handling code for DECstation/DECsystem 3100 | 2 | * Bus error event handling code for DECstation/DECsystem 3100 |
5 | * and 2100 (KN01) systems equipped with parity error detection | 3 | * and 2100 (KN01) systems equipped with parity error detection |
6 | * logic. | 4 | * logic. |
diff --git a/arch/mips/dec/kn02-irq.c b/arch/mips/dec/kn02-irq.c index 02439dc0ba83..ed90a8deabcc 100644 --- a/arch/mips/dec/kn02-irq.c +++ b/arch/mips/dec/kn02-irq.c | |||
@@ -1,6 +1,4 @@ | |||
1 | /* | 1 | /* |
2 | * linux/arch/mips/dec/kn02-irq.c | ||
3 | * | ||
4 | * DECstation 5000/200 (KN02) Control and Status Register | 2 | * DECstation 5000/200 (KN02) Control and Status Register |
5 | * interrupts. | 3 | * interrupts. |
6 | * | 4 | * |
diff --git a/arch/mips/dec/kn02xa-berr.c b/arch/mips/dec/kn02xa-berr.c index 5f04545c3606..07ca5405d48d 100644 --- a/arch/mips/dec/kn02xa-berr.c +++ b/arch/mips/dec/kn02xa-berr.c | |||
@@ -1,6 +1,4 @@ | |||
1 | /* | 1 | /* |
2 | * linux/arch/mips/dec/kn02xa-berr.c | ||
3 | * | ||
4 | * Bus error event handling code for 5000-series systems equipped | 2 | * Bus error event handling code for 5000-series systems equipped |
5 | * with parity error detection logic, i.e. DECstation/DECsystem | 3 | * with parity error detection logic, i.e. DECstation/DECsystem |
6 | * 5000/120, /125, /133 (KN02-BA), 5000/150 (KN04-BA) and Personal | 4 | * 5000/120, /125, /133 (KN02-BA), 5000/150 (KN04-BA) and Personal |
diff --git a/arch/mips/dec/prom/call_o32.S b/arch/mips/dec/prom/call_o32.S index e523454bda3a..8c8498159e43 100644 --- a/arch/mips/dec/prom/call_o32.S +++ b/arch/mips/dec/prom/call_o32.S | |||
@@ -1,6 +1,4 @@ | |||
1 | /* | 1 | /* |
2 | * arch/mips/dec/prom/call_o32.S | ||
3 | * | ||
4 | * O32 interface for the 64 (or N32) ABI. | 2 | * O32 interface for the 64 (or N32) ABI. |
5 | * | 3 | * |
6 | * Copyright (C) 2002 Maciej W. Rozycki | 4 | * Copyright (C) 2002 Maciej W. Rozycki |
diff --git a/arch/mips/dec/prom/console.c b/arch/mips/dec/prom/console.c index 078e1a12421d..caa6e047caf1 100644 --- a/arch/mips/dec/prom/console.c +++ b/arch/mips/dec/prom/console.c | |||
@@ -1,6 +1,4 @@ | |||
1 | /* | 1 | /* |
2 | * arch/mips/dec/prom/console.c | ||
3 | * | ||
4 | * DECstation PROM-based early console support. | 2 | * DECstation PROM-based early console support. |
5 | * | 3 | * |
6 | * Copyright (C) 2004, 2007 Maciej W. Rozycki | 4 | * Copyright (C) 2004, 2007 Maciej W. Rozycki |
diff --git a/arch/mips/dec/time.c b/arch/mips/dec/time.c index 1359c03ded51..02f505f23c32 100644 --- a/arch/mips/dec/time.c +++ b/arch/mips/dec/time.c | |||
@@ -1,6 +1,4 @@ | |||
1 | /* | 1 | /* |
2 | * linux/arch/mips/dec/time.c | ||
3 | * | ||
4 | * Copyright (C) 1991, 1992, 1995 Linus Torvalds | 2 | * Copyright (C) 1991, 1992, 1995 Linus Torvalds |
5 | * Copyright (C) 2000, 2003 Maciej W. Rozycki | 3 | * Copyright (C) 2000, 2003 Maciej W. Rozycki |
6 | * | 4 | * |
@@ -20,7 +18,7 @@ | |||
20 | #include <asm/dec/ioasic.h> | 18 | #include <asm/dec/ioasic.h> |
21 | #include <asm/dec/machtype.h> | 19 | #include <asm/dec/machtype.h> |
22 | 20 | ||
23 | unsigned long read_persistent_clock(void) | 21 | void read_persistent_clock(struct timespec *ts) |
24 | { | 22 | { |
25 | unsigned int year, mon, day, hour, min, sec, real_year; | 23 | unsigned int year, mon, day, hour, min, sec, real_year; |
26 | unsigned long flags; | 24 | unsigned long flags; |
@@ -55,7 +53,8 @@ unsigned long read_persistent_clock(void) | |||
55 | 53 | ||
56 | year += real_year - 72 + 2000; | 54 | year += real_year - 72 + 2000; |
57 | 55 | ||
58 | return mktime(year, mon, day, hour, min, sec); | 56 | ts->tv_sec = mktime(year, mon, day, hour, min, sec); |
57 | ts->tv_nsec = 0; | ||
59 | } | 58 | } |
60 | 59 | ||
61 | /* | 60 | /* |
diff --git a/arch/mips/emma/common/Makefile b/arch/mips/emma/common/Makefile index c392d28c1ef1..f27d84d1904f 100644 --- a/arch/mips/emma/common/Makefile +++ b/arch/mips/emma/common/Makefile | |||
@@ -1,7 +1,4 @@ | |||
1 | # | 1 | # |
2 | # arch/mips/emma2rh/common/Makefile | ||
3 | # Makefile for the common code of NEC EMMA2RH based board. | ||
4 | # | ||
5 | # Copyright (C) NEC Electronics Corporation 2005-2006 | 2 | # Copyright (C) NEC Electronics Corporation 2005-2006 |
6 | # | 3 | # |
7 | # This program is free software; you can redistribute it and/or modify | 4 | # This program is free software; you can redistribute it and/or modify |
diff --git a/arch/mips/emma/common/prom.c b/arch/mips/emma/common/prom.c index 120f53fbdb45..708f08761406 100644 --- a/arch/mips/emma/common/prom.c +++ b/arch/mips/emma/common/prom.c | |||
@@ -1,7 +1,4 @@ | |||
1 | /* | 1 | /* |
2 | * arch/mips/emma2rh/common/prom.c | ||
3 | * This file is prom file. | ||
4 | * | ||
5 | * Copyright (C) NEC Electronics Corporation 2004-2006 | 2 | * Copyright (C) NEC Electronics Corporation 2004-2006 |
6 | * | 3 | * |
7 | * This file is based on the arch/mips/ddb5xxx/common/prom.c | 4 | * This file is based on the arch/mips/ddb5xxx/common/prom.c |
diff --git a/arch/mips/emma/markeins/Makefile b/arch/mips/emma/markeins/Makefile index 16e0017ba919..f8ba2508fa2b 100644 --- a/arch/mips/emma/markeins/Makefile +++ b/arch/mips/emma/markeins/Makefile | |||
@@ -1,7 +1,4 @@ | |||
1 | # | 1 | # |
2 | # arch/mips/emma2rh/markeins/Makefile | ||
3 | # Makefile for the common code of NEC EMMA2RH based board. | ||
4 | # | ||
5 | # Copyright (C) NEC Electronics Corporation 2005-2006 | 2 | # Copyright (C) NEC Electronics Corporation 2005-2006 |
6 | # | 3 | # |
7 | # This program is free software; you can redistribute it and/or modify | 4 | # This program is free software; you can redistribute it and/or modify |
diff --git a/arch/mips/emma/markeins/irq.c b/arch/mips/emma/markeins/irq.c index 43828ae796ec..9504b7ee0b7c 100644 --- a/arch/mips/emma/markeins/irq.c +++ b/arch/mips/emma/markeins/irq.c | |||
@@ -1,7 +1,4 @@ | |||
1 | /* | 1 | /* |
2 | * arch/mips/emma2rh/markeins/irq.c | ||
3 | * This file defines the irq handler for EMMA2RH. | ||
4 | * | ||
5 | * Copyright (C) NEC Electronics Corporation 2004-2006 | 2 | * Copyright (C) NEC Electronics Corporation 2004-2006 |
6 | * | 3 | * |
7 | * This file is based on the arch/mips/ddb5xxx/ddb5477/irq.c | 4 | * This file is based on the arch/mips/ddb5xxx/ddb5477/irq.c |
diff --git a/arch/mips/emma/markeins/led.c b/arch/mips/emma/markeins/led.c index 377a181b6561..49755896857f 100644 --- a/arch/mips/emma/markeins/led.c +++ b/arch/mips/emma/markeins/led.c | |||
@@ -1,7 +1,4 @@ | |||
1 | /* | 1 | /* |
2 | * arch/mips/emma2rh/markeins/led.c | ||
3 | * This file defines the led display for Mark-eins. | ||
4 | * | ||
5 | * Copyright (C) NEC Electronics Corporation 2004-2006 | 2 | * Copyright (C) NEC Electronics Corporation 2004-2006 |
6 | * | 3 | * |
7 | * This program is free software; you can redistribute it and/or modify | 4 | * This program is free software; you can redistribute it and/or modify |
diff --git a/arch/mips/emma/markeins/platform.c b/arch/mips/emma/markeins/platform.c index 80ae12ef87db..b05b08b92a34 100644 --- a/arch/mips/emma/markeins/platform.c +++ b/arch/mips/emma/markeins/platform.c | |||
@@ -1,7 +1,4 @@ | |||
1 | /* | 1 | /* |
2 | * arch/mips/emma2rh/markeins/platofrm.c | ||
3 | * This file sets up platform devices for EMMA2RH Mark-eins. | ||
4 | * | ||
5 | * Copyright(C) MontaVista Software Inc, 2006 | 2 | * Copyright(C) MontaVista Software Inc, 2006 |
6 | * | 3 | * |
7 | * Author: dmitry pervushin <dpervushin@ru.mvista.com> | 4 | * Author: dmitry pervushin <dpervushin@ru.mvista.com> |
diff --git a/arch/mips/emma/markeins/setup.c b/arch/mips/emma/markeins/setup.c index 67f456500084..335dc8c1a1bb 100644 --- a/arch/mips/emma/markeins/setup.c +++ b/arch/mips/emma/markeins/setup.c | |||
@@ -1,7 +1,4 @@ | |||
1 | /* | 1 | /* |
2 | * arch/mips/emma2rh/markeins/setup.c | ||
3 | * This file is setup for EMMA2RH Mark-eins. | ||
4 | * | ||
5 | * Copyright (C) NEC Electronics Corporation 2004-2006 | 2 | * Copyright (C) NEC Electronics Corporation 2004-2006 |
6 | * | 3 | * |
7 | * This file is based on the arch/mips/ddb5xxx/ddb5477/setup.c. | 4 | * This file is based on the arch/mips/ddb5xxx/ddb5477/setup.c. |
diff --git a/arch/mips/fw/lib/call_o32.S b/arch/mips/fw/lib/call_o32.S index bdf7d1d4081a..e0a68713b3c3 100644 --- a/arch/mips/fw/lib/call_o32.S +++ b/arch/mips/fw/lib/call_o32.S | |||
@@ -1,6 +1,4 @@ | |||
1 | /* | 1 | /* |
2 | * arch/mips/dec/prom/call_o32.S | ||
3 | * | ||
4 | * O32 interface for the 64 (or N32) ABI. | 2 | * O32 interface for the 64 (or N32) ABI. |
5 | * | 3 | * |
6 | * Copyright (C) 2002 Maciej W. Rozycki | 4 | * Copyright (C) 2002 Maciej W. Rozycki |
diff --git a/arch/mips/include/asm/emma/emma2rh.h b/arch/mips/include/asm/emma/emma2rh.h index 30aea91de626..2afb2fe11b30 100644 --- a/arch/mips/include/asm/emma/emma2rh.h +++ b/arch/mips/include/asm/emma/emma2rh.h | |||
@@ -1,7 +1,4 @@ | |||
1 | /* | 1 | /* |
2 | * arch/mips/include/asm/emma/emma2rh.h | ||
3 | * This file is EMMA2RH common header. | ||
4 | * | ||
5 | * Copyright (C) NEC Electronics Corporation 2005-2006 | 2 | * Copyright (C) NEC Electronics Corporation 2005-2006 |
6 | * | 3 | * |
7 | * This file based on include/asm-mips/ddb5xxx/ddb5xxx.h | 4 | * This file based on include/asm-mips/ddb5xxx/ddb5xxx.h |
diff --git a/arch/mips/include/asm/emma/markeins.h b/arch/mips/include/asm/emma/markeins.h index 973b0628490d..2618bf230248 100644 --- a/arch/mips/include/asm/emma/markeins.h +++ b/arch/mips/include/asm/emma/markeins.h | |||
@@ -1,7 +1,4 @@ | |||
1 | /* | 1 | /* |
2 | * include/asm-mips/emma2rh/markeins.h | ||
3 | * This file is EMMA2RH board depended header. | ||
4 | * | ||
5 | * Copyright (C) NEC Electronics Corporation 2005-2006 | 2 | * Copyright (C) NEC Electronics Corporation 2005-2006 |
6 | * | 3 | * |
7 | * This file based on include/asm-mips/ddb5xxx/ddb5xxx.h | 4 | * This file based on include/asm-mips/ddb5xxx/ddb5xxx.h |
diff --git a/arch/mips/include/asm/gic.h b/arch/mips/include/asm/gic.h index 10292e37c1f7..a8f57341f123 100644 --- a/arch/mips/include/asm/gic.h +++ b/arch/mips/include/asm/gic.h | |||
@@ -20,7 +20,7 @@ | |||
20 | #define GIC_TRIG_EDGE 1 | 20 | #define GIC_TRIG_EDGE 1 |
21 | #define GIC_TRIG_LEVEL 0 | 21 | #define GIC_TRIG_LEVEL 0 |
22 | 22 | ||
23 | #if CONFIG_SMP | 23 | #ifdef CONFIG_SMP |
24 | #define GIC_NUM_INTRS (24 + NR_CPUS * 2) | 24 | #define GIC_NUM_INTRS (24 + NR_CPUS * 2) |
25 | #else | 25 | #else |
26 | #define GIC_NUM_INTRS 32 | 26 | #define GIC_NUM_INTRS 32 |
diff --git a/arch/mips/include/asm/pmc-sierra/msp71xx/war.h b/arch/mips/include/asm/pmc-sierra/msp71xx/war.h index 0bf48fc1892b..9e2ee429c529 100644 --- a/arch/mips/include/asm/pmc-sierra/msp71xx/war.h +++ b/arch/mips/include/asm/pmc-sierra/msp71xx/war.h | |||
@@ -23,6 +23,8 @@ | |||
23 | #if defined(CONFIG_PMC_MSP7120_EVAL) || defined(CONFIG_PMC_MSP7120_GW) || \ | 23 | #if defined(CONFIG_PMC_MSP7120_EVAL) || defined(CONFIG_PMC_MSP7120_GW) || \ |
24 | defined(CONFIG_PMC_MSP7120_FPGA) | 24 | defined(CONFIG_PMC_MSP7120_FPGA) |
25 | #define MIPS34K_MISSED_ITLB_WAR 1 | 25 | #define MIPS34K_MISSED_ITLB_WAR 1 |
26 | #else | ||
27 | #define MIPS34K_MISSED_ITLB_WAR 0 | ||
26 | #endif | 28 | #endif |
27 | 29 | ||
28 | #endif /* __ASM_MIPS_PMC_SIERRA_WAR_H */ | 30 | #endif /* __ASM_MIPS_PMC_SIERRA_WAR_H */ |
diff --git a/arch/mips/include/asm/processor.h b/arch/mips/include/asm/processor.h index 0f926aa0cb47..087a8884ef06 100644 --- a/arch/mips/include/asm/processor.h +++ b/arch/mips/include/asm/processor.h | |||
@@ -311,8 +311,9 @@ extern void start_thread(struct pt_regs * regs, unsigned long pc, unsigned long | |||
311 | 311 | ||
312 | unsigned long get_wchan(struct task_struct *p); | 312 | unsigned long get_wchan(struct task_struct *p); |
313 | 313 | ||
314 | #define __KSTK_TOS(tsk) ((unsigned long)task_stack_page(tsk) + THREAD_SIZE - 32) | 314 | #define __KSTK_TOS(tsk) ((unsigned long)task_stack_page(tsk) + \ |
315 | #define task_pt_regs(tsk) ((struct pt_regs *)__KSTK_TOS(tsk) - 1) | 315 | THREAD_SIZE - 32 - sizeof(struct pt_regs)) |
316 | #define task_pt_regs(tsk) ((struct pt_regs *)__KSTK_TOS(tsk)) | ||
316 | #define KSTK_EIP(tsk) (task_pt_regs(tsk)->cp0_epc) | 317 | #define KSTK_EIP(tsk) (task_pt_regs(tsk)->cp0_epc) |
317 | #define KSTK_ESP(tsk) (task_pt_regs(tsk)->regs[29]) | 318 | #define KSTK_ESP(tsk) (task_pt_regs(tsk)->regs[29]) |
318 | #define KSTK_STATUS(tsk) (task_pt_regs(tsk)->cp0_status) | 319 | #define KSTK_STATUS(tsk) (task_pt_regs(tsk)->cp0_status) |
diff --git a/arch/mips/include/asm/unistd.h b/arch/mips/include/asm/unistd.h index b70c49fdda26..e753a777949b 100644 --- a/arch/mips/include/asm/unistd.h +++ b/arch/mips/include/asm/unistd.h | |||
@@ -354,16 +354,17 @@ | |||
354 | #define __NR_pwritev (__NR_Linux + 331) | 354 | #define __NR_pwritev (__NR_Linux + 331) |
355 | #define __NR_rt_tgsigqueueinfo (__NR_Linux + 332) | 355 | #define __NR_rt_tgsigqueueinfo (__NR_Linux + 332) |
356 | #define __NR_perf_counter_open (__NR_Linux + 333) | 356 | #define __NR_perf_counter_open (__NR_Linux + 333) |
357 | #define __NR_accept4 (__NR_Linux + 334) | ||
357 | 358 | ||
358 | /* | 359 | /* |
359 | * Offset of the last Linux o32 flavoured syscall | 360 | * Offset of the last Linux o32 flavoured syscall |
360 | */ | 361 | */ |
361 | #define __NR_Linux_syscalls 333 | 362 | #define __NR_Linux_syscalls 334 |
362 | 363 | ||
363 | #endif /* _MIPS_SIM == _MIPS_SIM_ABI32 */ | 364 | #endif /* _MIPS_SIM == _MIPS_SIM_ABI32 */ |
364 | 365 | ||
365 | #define __NR_O32_Linux 4000 | 366 | #define __NR_O32_Linux 4000 |
366 | #define __NR_O32_Linux_syscalls 333 | 367 | #define __NR_O32_Linux_syscalls 334 |
367 | 368 | ||
368 | #if _MIPS_SIM == _MIPS_SIM_ABI64 | 369 | #if _MIPS_SIM == _MIPS_SIM_ABI64 |
369 | 370 | ||
@@ -664,16 +665,17 @@ | |||
664 | #define __NR_pwritev (__NR_Linux + 290) | 665 | #define __NR_pwritev (__NR_Linux + 290) |
665 | #define __NR_rt_tgsigqueueinfo (__NR_Linux + 291) | 666 | #define __NR_rt_tgsigqueueinfo (__NR_Linux + 291) |
666 | #define __NR_perf_counter_open (__NR_Linux + 292) | 667 | #define __NR_perf_counter_open (__NR_Linux + 292) |
668 | #define __NR_accept4 (__NR_Linux + 293) | ||
667 | 669 | ||
668 | /* | 670 | /* |
669 | * Offset of the last Linux 64-bit flavoured syscall | 671 | * Offset of the last Linux 64-bit flavoured syscall |
670 | */ | 672 | */ |
671 | #define __NR_Linux_syscalls 292 | 673 | #define __NR_Linux_syscalls 293 |
672 | 674 | ||
673 | #endif /* _MIPS_SIM == _MIPS_SIM_ABI64 */ | 675 | #endif /* _MIPS_SIM == _MIPS_SIM_ABI64 */ |
674 | 676 | ||
675 | #define __NR_64_Linux 5000 | 677 | #define __NR_64_Linux 5000 |
676 | #define __NR_64_Linux_syscalls 292 | 678 | #define __NR_64_Linux_syscalls 293 |
677 | 679 | ||
678 | #if _MIPS_SIM == _MIPS_SIM_NABI32 | 680 | #if _MIPS_SIM == _MIPS_SIM_NABI32 |
679 | 681 | ||
@@ -978,16 +980,17 @@ | |||
978 | #define __NR_pwritev (__NR_Linux + 294) | 980 | #define __NR_pwritev (__NR_Linux + 294) |
979 | #define __NR_rt_tgsigqueueinfo (__NR_Linux + 295) | 981 | #define __NR_rt_tgsigqueueinfo (__NR_Linux + 295) |
980 | #define __NR_perf_counter_open (__NR_Linux + 296) | 982 | #define __NR_perf_counter_open (__NR_Linux + 296) |
983 | #define __NR_accept4 (__NR_Linux + 297) | ||
981 | 984 | ||
982 | /* | 985 | /* |
983 | * Offset of the last N32 flavoured syscall | 986 | * Offset of the last N32 flavoured syscall |
984 | */ | 987 | */ |
985 | #define __NR_Linux_syscalls 296 | 988 | #define __NR_Linux_syscalls 297 |
986 | 989 | ||
987 | #endif /* _MIPS_SIM == _MIPS_SIM_NABI32 */ | 990 | #endif /* _MIPS_SIM == _MIPS_SIM_NABI32 */ |
988 | 991 | ||
989 | #define __NR_N32_Linux 6000 | 992 | #define __NR_N32_Linux 6000 |
990 | #define __NR_N32_Linux_syscalls 296 | 993 | #define __NR_N32_Linux_syscalls 297 |
991 | 994 | ||
992 | #ifdef __KERNEL__ | 995 | #ifdef __KERNEL__ |
993 | 996 | ||
diff --git a/arch/mips/jazz/jazzdma.c b/arch/mips/jazz/jazzdma.c index f0fd636723be..0d64d0f46418 100644 --- a/arch/mips/jazz/jazzdma.c +++ b/arch/mips/jazz/jazzdma.c | |||
@@ -190,7 +190,7 @@ int vdma_free(unsigned long laddr) | |||
190 | return -1; | 190 | return -1; |
191 | } | 191 | } |
192 | 192 | ||
193 | while (pgtbl[i].owner == laddr && i < VDMA_PGTBL_ENTRIES) { | 193 | while (i < VDMA_PGTBL_ENTRIES && pgtbl[i].owner == laddr) { |
194 | pgtbl[i].owner = VDMA_PAGE_EMPTY; | 194 | pgtbl[i].owner = VDMA_PAGE_EMPTY; |
195 | i++; | 195 | i++; |
196 | } | 196 | } |
diff --git a/arch/mips/kernel/head.S b/arch/mips/kernel/head.S index 492a0a8d70fb..531ce7b16124 100644 --- a/arch/mips/kernel/head.S +++ b/arch/mips/kernel/head.S | |||
@@ -188,7 +188,8 @@ NESTED(kernel_entry, 16, sp) # kernel entry point | |||
188 | 188 | ||
189 | MTC0 zero, CP0_CONTEXT # clear context register | 189 | MTC0 zero, CP0_CONTEXT # clear context register |
190 | PTR_LA $28, init_thread_union | 190 | PTR_LA $28, init_thread_union |
191 | PTR_LI sp, _THREAD_SIZE - 32 | 191 | /* Set the SP after an empty pt_regs. */ |
192 | PTR_LI sp, _THREAD_SIZE - 32 - PT_SIZE | ||
192 | PTR_ADDU sp, $28 | 193 | PTR_ADDU sp, $28 |
193 | set_saved_sp sp, t0, t1 | 194 | set_saved_sp sp, t0, t1 |
194 | PTR_SUBU sp, 4 * SZREG # init stack pointer | 195 | PTR_SUBU sp, 4 * SZREG # init stack pointer |
diff --git a/arch/mips/kernel/irq_txx9.c b/arch/mips/kernel/irq_txx9.c index a4d1462c27f7..9b78029bea70 100644 --- a/arch/mips/kernel/irq_txx9.c +++ b/arch/mips/kernel/irq_txx9.c | |||
@@ -1,6 +1,4 @@ | |||
1 | /* | 1 | /* |
2 | * linux/arch/mips/kernel/irq_txx9.c | ||
3 | * | ||
4 | * Based on linux/arch/mips/jmr3927/rbhma3100/irq.c, | 2 | * Based on linux/arch/mips/jmr3927/rbhma3100/irq.c, |
5 | * linux/arch/mips/tx4927/common/tx4927_irq.c, | 3 | * linux/arch/mips/tx4927/common/tx4927_irq.c, |
6 | * linux/arch/mips/tx4938/common/irq.c | 4 | * linux/arch/mips/tx4938/common/irq.c |
diff --git a/arch/mips/kernel/module.c b/arch/mips/kernel/module.c index 3e9100dcc12d..6f51dda87fce 100644 --- a/arch/mips/kernel/module.c +++ b/arch/mips/kernel/module.c | |||
@@ -98,7 +98,8 @@ static int apply_r_mips_32_rela(struct module *me, u32 *location, Elf_Addr v) | |||
98 | static int apply_r_mips_26_rel(struct module *me, u32 *location, Elf_Addr v) | 98 | static int apply_r_mips_26_rel(struct module *me, u32 *location, Elf_Addr v) |
99 | { | 99 | { |
100 | if (v % 4) { | 100 | if (v % 4) { |
101 | printk(KERN_ERR "module %s: dangerous relocation\n", me->name); | 101 | pr_err("module %s: dangerous R_MIPS_26 REL relocation\n", |
102 | me->name); | ||
102 | return -ENOEXEC; | 103 | return -ENOEXEC; |
103 | } | 104 | } |
104 | 105 | ||
@@ -118,7 +119,8 @@ static int apply_r_mips_26_rel(struct module *me, u32 *location, Elf_Addr v) | |||
118 | static int apply_r_mips_26_rela(struct module *me, u32 *location, Elf_Addr v) | 119 | static int apply_r_mips_26_rela(struct module *me, u32 *location, Elf_Addr v) |
119 | { | 120 | { |
120 | if (v % 4) { | 121 | if (v % 4) { |
121 | printk(KERN_ERR "module %s: dangerous relocation\n", me->name); | 122 | pr_err("module %s: dangerous R_MIPS_26 RELArelocation\n", |
123 | me->name); | ||
122 | return -ENOEXEC; | 124 | return -ENOEXEC; |
123 | } | 125 | } |
124 | 126 | ||
@@ -222,7 +224,7 @@ static int apply_r_mips_lo16_rel(struct module *me, u32 *location, Elf_Addr v) | |||
222 | return 0; | 224 | return 0; |
223 | 225 | ||
224 | out_danger: | 226 | out_danger: |
225 | printk(KERN_ERR "module %s: dangerous " "relocation\n", me->name); | 227 | pr_err("module %s: dangerous R_MIPS_LO16 REL relocation\n", me->name); |
226 | 228 | ||
227 | return -ENOEXEC; | 229 | return -ENOEXEC; |
228 | } | 230 | } |
@@ -301,7 +303,7 @@ int apply_relocate(Elf_Shdr *sechdrs, const char *strtab, | |||
301 | /* This is the symbol it is referring to */ | 303 | /* This is the symbol it is referring to */ |
302 | sym = (Elf_Sym *)sechdrs[symindex].sh_addr | 304 | sym = (Elf_Sym *)sechdrs[symindex].sh_addr |
303 | + ELF_MIPS_R_SYM(rel[i]); | 305 | + ELF_MIPS_R_SYM(rel[i]); |
304 | if (!sym->st_value) { | 306 | if (IS_ERR_VALUE(sym->st_value)) { |
305 | /* Ignore unresolved weak symbol */ | 307 | /* Ignore unresolved weak symbol */ |
306 | if (ELF_ST_BIND(sym->st_info) == STB_WEAK) | 308 | if (ELF_ST_BIND(sym->st_info) == STB_WEAK) |
307 | continue; | 309 | continue; |
@@ -341,7 +343,7 @@ int apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab, | |||
341 | /* This is the symbol it is referring to */ | 343 | /* This is the symbol it is referring to */ |
342 | sym = (Elf_Sym *)sechdrs[symindex].sh_addr | 344 | sym = (Elf_Sym *)sechdrs[symindex].sh_addr |
343 | + ELF_MIPS_R_SYM(rel[i]); | 345 | + ELF_MIPS_R_SYM(rel[i]); |
344 | if (!sym->st_value) { | 346 | if (IS_ERR_VALUE(sym->st_value)) { |
345 | /* Ignore unresolved weak symbol */ | 347 | /* Ignore unresolved weak symbol */ |
346 | if (ELF_ST_BIND(sym->st_info) == STB_WEAK) | 348 | if (ELF_ST_BIND(sym->st_info) == STB_WEAK) |
347 | continue; | 349 | continue; |
diff --git a/arch/mips/kernel/proc.c b/arch/mips/kernel/proc.c index e0a4ac18fa07..26109c4d5170 100644 --- a/arch/mips/kernel/proc.c +++ b/arch/mips/kernel/proc.c | |||
@@ -1,6 +1,4 @@ | |||
1 | /* | 1 | /* |
2 | * linux/arch/mips/kernel/proc.c | ||
3 | * | ||
4 | * Copyright (C) 1995, 1996, 2001 Ralf Baechle | 2 | * Copyright (C) 1995, 1996, 2001 Ralf Baechle |
5 | * Copyright (C) 2001, 2004 MIPS Technologies, Inc. | 3 | * Copyright (C) 2001, 2004 MIPS Technologies, Inc. |
6 | * Copyright (C) 2004 Maciej W. Rozycki | 4 | * Copyright (C) 2004 Maciej W. Rozycki |
diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c index c09d681b7181..f3d73e1831c1 100644 --- a/arch/mips/kernel/process.c +++ b/arch/mips/kernel/process.c | |||
@@ -115,7 +115,7 @@ int copy_thread(unsigned long clone_flags, unsigned long usp, | |||
115 | { | 115 | { |
116 | struct thread_info *ti = task_thread_info(p); | 116 | struct thread_info *ti = task_thread_info(p); |
117 | struct pt_regs *childregs; | 117 | struct pt_regs *childregs; |
118 | long childksp; | 118 | unsigned long childksp; |
119 | p->set_child_tid = p->clear_child_tid = NULL; | 119 | p->set_child_tid = p->clear_child_tid = NULL; |
120 | 120 | ||
121 | childksp = (unsigned long)task_stack_page(p) + THREAD_SIZE - 32; | 121 | childksp = (unsigned long)task_stack_page(p) + THREAD_SIZE - 32; |
@@ -132,6 +132,8 @@ int copy_thread(unsigned long clone_flags, unsigned long usp, | |||
132 | 132 | ||
133 | /* set up new TSS. */ | 133 | /* set up new TSS. */ |
134 | childregs = (struct pt_regs *) childksp - 1; | 134 | childregs = (struct pt_regs *) childksp - 1; |
135 | /* Put the stack after the struct pt_regs. */ | ||
136 | childksp = (unsigned long) childregs; | ||
135 | *childregs = *regs; | 137 | *childregs = *regs; |
136 | childregs->regs[7] = 0; /* Clear error flag */ | 138 | childregs->regs[7] = 0; /* Clear error flag */ |
137 | 139 | ||
diff --git a/arch/mips/kernel/scall32-o32.S b/arch/mips/kernel/scall32-o32.S index 20a86e08fd58..b57082123536 100644 --- a/arch/mips/kernel/scall32-o32.S +++ b/arch/mips/kernel/scall32-o32.S | |||
@@ -654,6 +654,7 @@ einval: li v0, -ENOSYS | |||
654 | sys sys_pwritev 6 | 654 | sys sys_pwritev 6 |
655 | sys sys_rt_tgsigqueueinfo 4 | 655 | sys sys_rt_tgsigqueueinfo 4 |
656 | sys sys_perf_counter_open 5 | 656 | sys sys_perf_counter_open 5 |
657 | sys sys_accept4 4 | ||
657 | .endm | 658 | .endm |
658 | 659 | ||
659 | /* We pre-compute the number of _instruction_ bytes needed to | 660 | /* We pre-compute the number of _instruction_ bytes needed to |
diff --git a/arch/mips/kernel/scall64-64.S b/arch/mips/kernel/scall64-64.S index b046130d4c5d..3d866f24e064 100644 --- a/arch/mips/kernel/scall64-64.S +++ b/arch/mips/kernel/scall64-64.S | |||
@@ -491,4 +491,5 @@ sys_call_table: | |||
491 | PTR sys_pwritev /* 5390 */ | 491 | PTR sys_pwritev /* 5390 */ |
492 | PTR sys_rt_tgsigqueueinfo | 492 | PTR sys_rt_tgsigqueueinfo |
493 | PTR sys_perf_counter_open | 493 | PTR sys_perf_counter_open |
494 | PTR sys_accept4 | ||
494 | .size sys_call_table,.-sys_call_table | 495 | .size sys_call_table,.-sys_call_table |
diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S index 15874f9812cc..e855b118a079 100644 --- a/arch/mips/kernel/scall64-n32.S +++ b/arch/mips/kernel/scall64-n32.S | |||
@@ -417,4 +417,5 @@ EXPORT(sysn32_call_table) | |||
417 | PTR sys_pwritev | 417 | PTR sys_pwritev |
418 | PTR compat_sys_rt_tgsigqueueinfo /* 5295 */ | 418 | PTR compat_sys_rt_tgsigqueueinfo /* 5295 */ |
419 | PTR sys_perf_counter_open | 419 | PTR sys_perf_counter_open |
420 | PTR sys_accept4 | ||
420 | .size sysn32_call_table,.-sysn32_call_table | 421 | .size sysn32_call_table,.-sysn32_call_table |
diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S index 781e0f1e9533..0c49f1a660be 100644 --- a/arch/mips/kernel/scall64-o32.S +++ b/arch/mips/kernel/scall64-o32.S | |||
@@ -537,4 +537,5 @@ sys_call_table: | |||
537 | PTR compat_sys_pwritev | 537 | PTR compat_sys_pwritev |
538 | PTR compat_sys_rt_tgsigqueueinfo | 538 | PTR compat_sys_rt_tgsigqueueinfo |
539 | PTR sys_perf_counter_open | 539 | PTR sys_perf_counter_open |
540 | PTR sys_accept4 | ||
540 | .size sys_call_table,.-sys_call_table | 541 | .size sys_call_table,.-sys_call_table |
diff --git a/arch/mips/kernel/smtc.c b/arch/mips/kernel/smtc.c index 8a0626cbb108..c16bb6d6c25c 100644 --- a/arch/mips/kernel/smtc.c +++ b/arch/mips/kernel/smtc.c | |||
@@ -465,11 +465,8 @@ void smtc_prepare_cpus(int cpus) | |||
465 | smtc_configure_tlb(); | 465 | smtc_configure_tlb(); |
466 | 466 | ||
467 | for (tc = 0, vpe = 0 ; (vpe < nvpe) && (tc < ntc) ; vpe++) { | 467 | for (tc = 0, vpe = 0 ; (vpe < nvpe) && (tc < ntc) ; vpe++) { |
468 | /* | 468 | if (tcpervpe[vpe] == 0) |
469 | * Set the MVP bits. | 469 | continue; |
470 | */ | ||
471 | settc(tc); | ||
472 | write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() | VPECONF0_MVP); | ||
473 | if (vpe != 0) | 470 | if (vpe != 0) |
474 | printk(", "); | 471 | printk(", "); |
475 | printk("VPE %d: TC", vpe); | 472 | printk("VPE %d: TC", vpe); |
@@ -488,6 +485,12 @@ void smtc_prepare_cpus(int cpus) | |||
488 | } | 485 | } |
489 | if (vpe != 0) { | 486 | if (vpe != 0) { |
490 | /* | 487 | /* |
488 | * Allow this VPE to control others. | ||
489 | */ | ||
490 | write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() | | ||
491 | VPECONF0_MVP); | ||
492 | |||
493 | /* | ||
491 | * Clear any stale software interrupts from VPE's Cause | 494 | * Clear any stale software interrupts from VPE's Cause |
492 | */ | 495 | */ |
493 | write_vpe_c0_cause(0); | 496 | write_vpe_c0_cause(0); |
diff --git a/arch/mips/kernel/stacktrace.c b/arch/mips/kernel/stacktrace.c index 58f5cd76c8c3..d52ff77baf3f 100644 --- a/arch/mips/kernel/stacktrace.c +++ b/arch/mips/kernel/stacktrace.c | |||
@@ -1,6 +1,4 @@ | |||
1 | /* | 1 | /* |
2 | * arch/mips/kernel/stacktrace.c | ||
3 | * | ||
4 | * Stack trace management functions | 2 | * Stack trace management functions |
5 | * | 3 | * |
6 | * Copyright (C) 2006 Atsushi Nemoto <anemo@mba.ocn.ne.jp> | 4 | * Copyright (C) 2006 Atsushi Nemoto <anemo@mba.ocn.ne.jp> |
diff --git a/arch/mips/kernel/vpe.c b/arch/mips/kernel/vpe.c index 07b9ec2c6e3d..9a1ab7e87fd4 100644 --- a/arch/mips/kernel/vpe.c +++ b/arch/mips/kernel/vpe.c | |||
@@ -73,7 +73,7 @@ static int major; | |||
73 | static const int minor = 1; /* fixed for now */ | 73 | static const int minor = 1; /* fixed for now */ |
74 | 74 | ||
75 | #ifdef CONFIG_MIPS_APSP_KSPD | 75 | #ifdef CONFIG_MIPS_APSP_KSPD |
76 | static struct kspd_notifications kspd_events; | 76 | static struct kspd_notifications kspd_events; |
77 | static int kspd_events_reqd = 0; | 77 | static int kspd_events_reqd = 0; |
78 | #endif | 78 | #endif |
79 | 79 | ||
@@ -155,10 +155,9 @@ struct { | |||
155 | }; | 155 | }; |
156 | 156 | ||
157 | static void release_progmem(void *ptr); | 157 | static void release_progmem(void *ptr); |
158 | extern void save_gp_address(unsigned int secbase, unsigned int rel); | ||
159 | 158 | ||
160 | /* get the vpe associated with this minor */ | 159 | /* get the vpe associated with this minor */ |
161 | struct vpe *get_vpe(int minor) | 160 | static struct vpe *get_vpe(int minor) |
162 | { | 161 | { |
163 | struct vpe *v; | 162 | struct vpe *v; |
164 | 163 | ||
@@ -174,7 +173,7 @@ struct vpe *get_vpe(int minor) | |||
174 | } | 173 | } |
175 | 174 | ||
176 | /* get the vpe associated with this minor */ | 175 | /* get the vpe associated with this minor */ |
177 | struct tc *get_tc(int index) | 176 | static struct tc *get_tc(int index) |
178 | { | 177 | { |
179 | struct tc *t; | 178 | struct tc *t; |
180 | 179 | ||
@@ -186,20 +185,8 @@ struct tc *get_tc(int index) | |||
186 | return NULL; | 185 | return NULL; |
187 | } | 186 | } |
188 | 187 | ||
189 | struct tc *get_tc_unused(void) | ||
190 | { | ||
191 | struct tc *t; | ||
192 | |||
193 | list_for_each_entry(t, &vpecontrol.tc_list, list) { | ||
194 | if (t->state == TC_STATE_UNUSED) | ||
195 | return t; | ||
196 | } | ||
197 | |||
198 | return NULL; | ||
199 | } | ||
200 | |||
201 | /* allocate a vpe and associate it with this minor (or index) */ | 188 | /* allocate a vpe and associate it with this minor (or index) */ |
202 | struct vpe *alloc_vpe(int minor) | 189 | static struct vpe *alloc_vpe(int minor) |
203 | { | 190 | { |
204 | struct vpe *v; | 191 | struct vpe *v; |
205 | 192 | ||
@@ -216,7 +203,7 @@ struct vpe *alloc_vpe(int minor) | |||
216 | } | 203 | } |
217 | 204 | ||
218 | /* allocate a tc. At startup only tc0 is running, all other can be halted. */ | 205 | /* allocate a tc. At startup only tc0 is running, all other can be halted. */ |
219 | struct tc *alloc_tc(int index) | 206 | static struct tc *alloc_tc(int index) |
220 | { | 207 | { |
221 | struct tc *tc; | 208 | struct tc *tc; |
222 | 209 | ||
@@ -232,7 +219,7 @@ out: | |||
232 | } | 219 | } |
233 | 220 | ||
234 | /* clean up and free everything */ | 221 | /* clean up and free everything */ |
235 | void release_vpe(struct vpe *v) | 222 | static void release_vpe(struct vpe *v) |
236 | { | 223 | { |
237 | list_del(&v->list); | 224 | list_del(&v->list); |
238 | if (v->load_addr) | 225 | if (v->load_addr) |
@@ -240,7 +227,7 @@ void release_vpe(struct vpe *v) | |||
240 | kfree(v); | 227 | kfree(v); |
241 | } | 228 | } |
242 | 229 | ||
243 | void dump_mtregs(void) | 230 | static void dump_mtregs(void) |
244 | { | 231 | { |
245 | unsigned long val; | 232 | unsigned long val; |
246 | 233 | ||
@@ -327,7 +314,8 @@ static void layout_sections(struct module *mod, const Elf_Ehdr * hdr, | |||
327 | || (s->sh_flags & masks[m][1]) | 314 | || (s->sh_flags & masks[m][1]) |
328 | || s->sh_entsize != ~0UL) | 315 | || s->sh_entsize != ~0UL) |
329 | continue; | 316 | continue; |
330 | s->sh_entsize = get_offset(&mod->core_size, s); | 317 | s->sh_entsize = |
318 | get_offset((unsigned long *)&mod->core_size, s); | ||
331 | } | 319 | } |
332 | 320 | ||
333 | if (m == 0) | 321 | if (m == 0) |
@@ -461,16 +449,15 @@ static int apply_r_mips_lo16(struct module *me, uint32_t *location, | |||
461 | { | 449 | { |
462 | unsigned long insnlo = *location; | 450 | unsigned long insnlo = *location; |
463 | Elf32_Addr val, vallo; | 451 | Elf32_Addr val, vallo; |
452 | struct mips_hi16 *l, *next; | ||
464 | 453 | ||
465 | /* Sign extend the addend we extract from the lo insn. */ | 454 | /* Sign extend the addend we extract from the lo insn. */ |
466 | vallo = ((insnlo & 0xffff) ^ 0x8000) - 0x8000; | 455 | vallo = ((insnlo & 0xffff) ^ 0x8000) - 0x8000; |
467 | 456 | ||
468 | if (mips_hi16_list != NULL) { | 457 | if (mips_hi16_list != NULL) { |
469 | struct mips_hi16 *l; | ||
470 | 458 | ||
471 | l = mips_hi16_list; | 459 | l = mips_hi16_list; |
472 | while (l != NULL) { | 460 | while (l != NULL) { |
473 | struct mips_hi16 *next; | ||
474 | unsigned long insn; | 461 | unsigned long insn; |
475 | 462 | ||
476 | /* | 463 | /* |
@@ -480,7 +467,7 @@ static int apply_r_mips_lo16(struct module *me, uint32_t *location, | |||
480 | printk(KERN_DEBUG "VPE loader: " | 467 | printk(KERN_DEBUG "VPE loader: " |
481 | "apply_r_mips_lo16/hi16: \t" | 468 | "apply_r_mips_lo16/hi16: \t" |
482 | "inconsistent value information\n"); | 469 | "inconsistent value information\n"); |
483 | return -ENOEXEC; | 470 | goto out_free; |
484 | } | 471 | } |
485 | 472 | ||
486 | /* | 473 | /* |
@@ -518,6 +505,16 @@ static int apply_r_mips_lo16(struct module *me, uint32_t *location, | |||
518 | *location = insnlo; | 505 | *location = insnlo; |
519 | 506 | ||
520 | return 0; | 507 | return 0; |
508 | |||
509 | out_free: | ||
510 | while (l != NULL) { | ||
511 | next = l->next; | ||
512 | kfree(l); | ||
513 | l = next; | ||
514 | } | ||
515 | mips_hi16_list = NULL; | ||
516 | |||
517 | return -ENOEXEC; | ||
521 | } | 518 | } |
522 | 519 | ||
523 | static int (*reloc_handlers[]) (struct module *me, uint32_t *location, | 520 | static int (*reloc_handlers[]) (struct module *me, uint32_t *location, |
@@ -541,7 +538,7 @@ static char *rstrs[] = { | |||
541 | [R_MIPS_PC16] = "MIPS_PC16" | 538 | [R_MIPS_PC16] = "MIPS_PC16" |
542 | }; | 539 | }; |
543 | 540 | ||
544 | int apply_relocations(Elf32_Shdr *sechdrs, | 541 | static int apply_relocations(Elf32_Shdr *sechdrs, |
545 | const char *strtab, | 542 | const char *strtab, |
546 | unsigned int symindex, | 543 | unsigned int symindex, |
547 | unsigned int relsec, | 544 | unsigned int relsec, |
@@ -586,7 +583,7 @@ int apply_relocations(Elf32_Shdr *sechdrs, | |||
586 | return 0; | 583 | return 0; |
587 | } | 584 | } |
588 | 585 | ||
589 | void save_gp_address(unsigned int secbase, unsigned int rel) | 586 | static inline void save_gp_address(unsigned int secbase, unsigned int rel) |
590 | { | 587 | { |
591 | gp_addr = secbase + rel; | 588 | gp_addr = secbase + rel; |
592 | gp_offs = gp_addr - (secbase & 0xffff0000); | 589 | gp_offs = gp_addr - (secbase & 0xffff0000); |
diff --git a/arch/mips/lasat/ds1603.c b/arch/mips/lasat/ds1603.c index 52cb1436a12a..c6fd96ff118d 100644 --- a/arch/mips/lasat/ds1603.c +++ b/arch/mips/lasat/ds1603.c | |||
@@ -135,7 +135,7 @@ static void rtc_end_op(void) | |||
135 | lasat_ndelay(1000); | 135 | lasat_ndelay(1000); |
136 | } | 136 | } |
137 | 137 | ||
138 | unsigned long read_persistent_clock(void) | 138 | void read_persistent_clock(struct timespec *ts) |
139 | { | 139 | { |
140 | unsigned long word; | 140 | unsigned long word; |
141 | unsigned long flags; | 141 | unsigned long flags; |
@@ -147,7 +147,8 @@ unsigned long read_persistent_clock(void) | |||
147 | rtc_end_op(); | 147 | rtc_end_op(); |
148 | spin_unlock_irqrestore(&rtc_lock, flags); | 148 | spin_unlock_irqrestore(&rtc_lock, flags); |
149 | 149 | ||
150 | return word; | 150 | ts->tv_sec = word; |
151 | ts->tv_nsec = 0; | ||
151 | } | 152 | } |
152 | 153 | ||
153 | int rtc_mips_set_mmss(unsigned long time) | 154 | int rtc_mips_set_mmss(unsigned long time) |
diff --git a/arch/mips/lasat/sysctl.c b/arch/mips/lasat/sysctl.c index 8f88886feb12..3f04d4c406b7 100644 --- a/arch/mips/lasat/sysctl.c +++ b/arch/mips/lasat/sysctl.c | |||
@@ -92,10 +92,12 @@ static int rtctmp; | |||
92 | int proc_dolasatrtc(ctl_table *table, int write, struct file *filp, | 92 | int proc_dolasatrtc(ctl_table *table, int write, struct file *filp, |
93 | void *buffer, size_t *lenp, loff_t *ppos) | 93 | void *buffer, size_t *lenp, loff_t *ppos) |
94 | { | 94 | { |
95 | struct timespec ts; | ||
95 | int r; | 96 | int r; |
96 | 97 | ||
97 | if (!write) { | 98 | if (!write) { |
98 | rtctmp = read_persistent_clock(); | 99 | read_persistent_clock(&ts); |
100 | rtctmp = ts.tv_sec; | ||
99 | /* check for time < 0 and set to 0 */ | 101 | /* check for time < 0 and set to 0 */ |
100 | if (rtctmp < 0) | 102 | if (rtctmp < 0) |
101 | rtctmp = 0; | 103 | rtctmp = 0; |
@@ -134,9 +136,11 @@ int sysctl_lasat_rtc(ctl_table *table, | |||
134 | void *oldval, size_t *oldlenp, | 136 | void *oldval, size_t *oldlenp, |
135 | void *newval, size_t newlen) | 137 | void *newval, size_t newlen) |
136 | { | 138 | { |
139 | struct timespec ts; | ||
137 | int r; | 140 | int r; |
138 | 141 | ||
139 | rtctmp = read_persistent_clock(); | 142 | read_persistent_clock(&ts); |
143 | rtctmp = ts.tv_sec; | ||
140 | if (rtctmp < 0) | 144 | if (rtctmp < 0) |
141 | rtctmp = 0; | 145 | rtctmp = 0; |
142 | r = sysctl_intvec(table, oldval, oldlenp, newval, newlen); | 146 | r = sysctl_intvec(table, oldval, oldlenp, newval, newlen); |
diff --git a/arch/mips/lemote/lm2e/setup.c b/arch/mips/lemote/lm2e/setup.c index ebd6ceaef2fd..24b355df6127 100644 --- a/arch/mips/lemote/lm2e/setup.c +++ b/arch/mips/lemote/lm2e/setup.c | |||
@@ -54,9 +54,10 @@ void __init plat_time_init(void) | |||
54 | mips_hpt_frequency = cpu_clock_freq / 2; | 54 | mips_hpt_frequency = cpu_clock_freq / 2; |
55 | } | 55 | } |
56 | 56 | ||
57 | unsigned long read_persistent_clock(void) | 57 | void read_persistent_clock(struct timespec *ts) |
58 | { | 58 | { |
59 | return mc146818_get_cmos_time(); | 59 | ts->tv_sec = mc146818_get_cmos_time(); |
60 | ts->tv_nsec = 0; | ||
60 | } | 61 | } |
61 | 62 | ||
62 | void (*__wbflush)(void); | 63 | void (*__wbflush)(void); |
diff --git a/arch/mips/mipssim/sim_time.c b/arch/mips/mipssim/sim_time.c index 0cea932f1241..5492c42f7650 100644 --- a/arch/mips/mipssim/sim_time.c +++ b/arch/mips/mipssim/sim_time.c | |||
@@ -89,13 +89,13 @@ unsigned __cpuinit get_c0_compare_int(void) | |||
89 | if (cpu_has_veic) { | 89 | if (cpu_has_veic) { |
90 | set_vi_handler(MSC01E_INT_CPUCTR, mips_timer_dispatch); | 90 | set_vi_handler(MSC01E_INT_CPUCTR, mips_timer_dispatch); |
91 | mips_cpu_timer_irq = MSC01E_INT_BASE + MSC01E_INT_CPUCTR; | 91 | mips_cpu_timer_irq = MSC01E_INT_BASE + MSC01E_INT_CPUCTR; |
92 | } else { | 92 | |
93 | #endif | 93 | return mips_cpu_timer_irq; |
94 | { | ||
95 | if (cpu_has_vint) | ||
96 | set_vi_handler(cp0_compare_irq, mips_timer_dispatch); | ||
97 | mips_cpu_timer_irq = MIPS_CPU_IRQ_BASE + cp0_compare_irq; | ||
98 | } | 94 | } |
95 | #endif | ||
96 | if (cpu_has_vint) | ||
97 | set_vi_handler(cp0_compare_irq, mips_timer_dispatch); | ||
98 | mips_cpu_timer_irq = MIPS_CPU_IRQ_BASE + cp0_compare_irq; | ||
99 | 99 | ||
100 | return mips_cpu_timer_irq; | 100 | return mips_cpu_timer_irq; |
101 | } | 101 | } |
diff --git a/arch/mips/mm/c-octeon.c b/arch/mips/mm/c-octeon.c index b165cdcb2818..10ab69f7183f 100644 --- a/arch/mips/mm/c-octeon.c +++ b/arch/mips/mm/c-octeon.c | |||
@@ -289,7 +289,7 @@ static void cache_parity_error_octeon(int non_recoverable) | |||
289 | } | 289 | } |
290 | 290 | ||
291 | /** | 291 | /** |
292 | * Called when the the exception is not recoverable | 292 | * Called when the the exception is recoverable |
293 | */ | 293 | */ |
294 | 294 | ||
295 | asmlinkage void cache_parity_error_octeon_recoverable(void) | 295 | asmlinkage void cache_parity_error_octeon_recoverable(void) |
@@ -298,7 +298,7 @@ asmlinkage void cache_parity_error_octeon_recoverable(void) | |||
298 | } | 298 | } |
299 | 299 | ||
300 | /** | 300 | /** |
301 | * Called when the the exception is recoverable | 301 | * Called when the the exception is not recoverable |
302 | */ | 302 | */ |
303 | 303 | ||
304 | asmlinkage void cache_parity_error_octeon_non_recoverable(void) | 304 | asmlinkage void cache_parity_error_octeon_non_recoverable(void) |
diff --git a/arch/mips/mm/extable.c b/arch/mips/mm/extable.c index 297fb9f390dc..9d25d2ba4b9e 100644 --- a/arch/mips/mm/extable.c +++ b/arch/mips/mm/extable.c | |||
@@ -1,5 +1,9 @@ | |||
1 | /* | 1 | /* |
2 | * linux/arch/mips/mm/extable.c | 2 | * This file is subject to the terms and conditions of the GNU General Public |
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * Copyright (C) 1997, 99, 2001 - 2004 Ralf Baechle <ralf@linux-mips.org> | ||
3 | */ | 7 | */ |
4 | #include <linux/module.h> | 8 | #include <linux/module.h> |
5 | #include <linux/spinlock.h> | 9 | #include <linux/spinlock.h> |
diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c index 6751ce9ede9e..f956ecbb8136 100644 --- a/arch/mips/mm/fault.c +++ b/arch/mips/mm/fault.c | |||
@@ -171,6 +171,7 @@ out_of_memory: | |||
171 | * We ran out of memory, call the OOM killer, and return the userspace | 171 | * We ran out of memory, call the OOM killer, and return the userspace |
172 | * (which will retry the fault, or kill us if we got oom-killed). | 172 | * (which will retry the fault, or kill us if we got oom-killed). |
173 | */ | 173 | */ |
174 | up_read(&mm->mmap_sem); | ||
174 | pagefault_out_of_memory(); | 175 | pagefault_out_of_memory(); |
175 | return; | 176 | return; |
176 | 177 | ||
diff --git a/arch/mips/mti-malta/malta-int.c b/arch/mips/mti-malta/malta-int.c index a8756f82c31b..3e0a9b35ba5c 100644 --- a/arch/mips/mti-malta/malta-int.c +++ b/arch/mips/mti-malta/malta-int.c | |||
@@ -331,6 +331,7 @@ static struct irqaction irq_call = { | |||
331 | .flags = IRQF_DISABLED|IRQF_PERCPU, | 331 | .flags = IRQF_DISABLED|IRQF_PERCPU, |
332 | .name = "IPI_call" | 332 | .name = "IPI_call" |
333 | }; | 333 | }; |
334 | #endif /* CONFIG_MIPS_MT_SMP */ | ||
334 | 335 | ||
335 | static int gic_resched_int_base; | 336 | static int gic_resched_int_base; |
336 | static int gic_call_int_base; | 337 | static int gic_call_int_base; |
@@ -346,7 +347,6 @@ unsigned int plat_ipi_resched_int_xlate(unsigned int cpu) | |||
346 | { | 347 | { |
347 | return GIC_RESCHED_INT(cpu); | 348 | return GIC_RESCHED_INT(cpu); |
348 | } | 349 | } |
349 | #endif /* CONFIG_MIPS_MT_SMP */ | ||
350 | 350 | ||
351 | static struct irqaction i8259irq = { | 351 | static struct irqaction i8259irq = { |
352 | .handler = no_action, | 352 | .handler = no_action, |
diff --git a/arch/mips/mti-malta/malta-time.c b/arch/mips/mti-malta/malta-time.c index 0b97d47691fc..3c6f190aa61c 100644 --- a/arch/mips/mti-malta/malta-time.c +++ b/arch/mips/mti-malta/malta-time.c | |||
@@ -100,9 +100,10 @@ static unsigned int __init estimate_cpu_frequency(void) | |||
100 | return count; | 100 | return count; |
101 | } | 101 | } |
102 | 102 | ||
103 | unsigned long read_persistent_clock(void) | 103 | void read_persistent_clock(struct timespec *ts) |
104 | { | 104 | { |
105 | return mc146818_get_cmos_time(); | 105 | ts->tv_sec = mc146818_get_cmos_time(); |
106 | ts->tv_nsec = 0; | ||
106 | } | 107 | } |
107 | 108 | ||
108 | static void __init plat_perf_setup(void) | 109 | static void __init plat_perf_setup(void) |
diff --git a/arch/mips/nxp/pnx8550/common/time.c b/arch/mips/nxp/pnx8550/common/time.c index 8df43e9e4d90..18b192784877 100644 --- a/arch/mips/nxp/pnx8550/common/time.c +++ b/arch/mips/nxp/pnx8550/common/time.c | |||
@@ -138,7 +138,7 @@ __init void plat_time_init(void) | |||
138 | * HZ timer interrupts per second. | 138 | * HZ timer interrupts per second. |
139 | */ | 139 | */ |
140 | mips_hpt_frequency = 27UL * ((1000000UL * n)/(m * pow2p)); | 140 | mips_hpt_frequency = 27UL * ((1000000UL * n)/(m * pow2p)); |
141 | cpj = (mips_hpt_frequency + HZ / 2) / HZ; | 141 | cpj = DIV_ROUND_CLOSEST(mips_hpt_frequency, HZ); |
142 | write_c0_count(0); | 142 | write_c0_count(0); |
143 | timer_ack(); | 143 | timer_ack(); |
144 | 144 | ||
diff --git a/arch/mips/pci/fixup-emma2rh.c b/arch/mips/pci/fixup-emma2rh.c index fba5aad00d51..0d9ccf4dfc5a 100644 --- a/arch/mips/pci/fixup-emma2rh.c +++ b/arch/mips/pci/fixup-emma2rh.c | |||
@@ -1,7 +1,4 @@ | |||
1 | /* | 1 | /* |
2 | * arch/mips/pci/fixup-emma2rh.c | ||
3 | * This file defines the PCI configration. | ||
4 | * | ||
5 | * Copyright (C) NEC Electronics Corporation 2004-2006 | 2 | * Copyright (C) NEC Electronics Corporation 2004-2006 |
6 | * | 3 | * |
7 | * This file is based on the arch/mips/ddb5xxx/ddb5477/pci.c | 4 | * This file is based on the arch/mips/ddb5xxx/ddb5477/pci.c |
diff --git a/arch/mips/pci/fixup-sb1250.c b/arch/mips/pci/fixup-sb1250.c index 0ad39e53f7b1..f0bb9146e6c0 100644 --- a/arch/mips/pci/fixup-sb1250.c +++ b/arch/mips/pci/fixup-sb1250.c | |||
@@ -1,6 +1,4 @@ | |||
1 | /* | 1 | /* |
2 | * arch/mips/pci/fixup-sb1250.c | ||
3 | * | ||
4 | * Copyright (C) 2004, 2006 MIPS Technologies, Inc. All rights reserved. | 2 | * Copyright (C) 2004, 2006 MIPS Technologies, Inc. All rights reserved. |
5 | * Author: Maciej W. Rozycki <macro@mips.com> | 3 | * Author: Maciej W. Rozycki <macro@mips.com> |
6 | * | 4 | * |
diff --git a/arch/mips/pci/ops-emma2rh.c b/arch/mips/pci/ops-emma2rh.c index 5947a70b0b7f..710aef5c070e 100644 --- a/arch/mips/pci/ops-emma2rh.c +++ b/arch/mips/pci/ops-emma2rh.c | |||
@@ -1,7 +1,4 @@ | |||
1 | /* | 1 | /* |
2 | * arch/mips/pci/ops-emma2rh.c | ||
3 | * This file defines the PCI operation for EMMA2RH. | ||
4 | * | ||
5 | * Copyright (C) NEC Electronics Corporation 2004-2006 | 2 | * Copyright (C) NEC Electronics Corporation 2004-2006 |
6 | * | 3 | * |
7 | * This file is based on the arch/mips/pci/ops-vr41xx.c | 4 | * This file is based on the arch/mips/pci/ops-vr41xx.c |
diff --git a/arch/mips/pci/pci-emma2rh.c b/arch/mips/pci/pci-emma2rh.c index 2df4190232cd..773e34ff4d1c 100644 --- a/arch/mips/pci/pci-emma2rh.c +++ b/arch/mips/pci/pci-emma2rh.c | |||
@@ -1,7 +1,4 @@ | |||
1 | /* | 1 | /* |
2 | * arch/mips/pci/pci-emma2rh.c | ||
3 | * This file defines the PCI configration. | ||
4 | * | ||
5 | * Copyright (C) NEC Electronics Corporation 2004-2006 | 2 | * Copyright (C) NEC Electronics Corporation 2004-2006 |
6 | * | 3 | * |
7 | * This file is based on the arch/mips/ddb5xxx/ddb5477/pci.c | 4 | * This file is based on the arch/mips/ddb5xxx/ddb5477/pci.c |
diff --git a/arch/mips/pci/pci-tx4927.c b/arch/mips/pci/pci-tx4927.c index aaa900596792..a5807406a7f1 100644 --- a/arch/mips/pci/pci-tx4927.c +++ b/arch/mips/pci/pci-tx4927.c | |||
@@ -1,6 +1,4 @@ | |||
1 | /* | 1 | /* |
2 | * linux/arch/mips/pci/pci-tx4927.c | ||
3 | * | ||
4 | * Based on linux/arch/mips/txx9/rbtx4938/setup.c, | 2 | * Based on linux/arch/mips/txx9/rbtx4938/setup.c, |
5 | * and RBTX49xx patch from CELF patch archive. | 3 | * and RBTX49xx patch from CELF patch archive. |
6 | * | 4 | * |
diff --git a/arch/mips/pci/pci-tx4938.c b/arch/mips/pci/pci-tx4938.c index 1ea257bc3b8f..20e45f30b2ef 100644 --- a/arch/mips/pci/pci-tx4938.c +++ b/arch/mips/pci/pci-tx4938.c | |||
@@ -1,6 +1,4 @@ | |||
1 | /* | 1 | /* |
2 | * linux/arch/mips/pci/pci-tx4938.c | ||
3 | * | ||
4 | * Based on linux/arch/mips/txx9/rbtx4938/setup.c, | 2 | * Based on linux/arch/mips/txx9/rbtx4938/setup.c, |
5 | * and RBTX49xx patch from CELF patch archive. | 3 | * and RBTX49xx patch from CELF patch archive. |
6 | * | 4 | * |
diff --git a/arch/mips/pci/pci-tx4939.c b/arch/mips/pci/pci-tx4939.c index 5fecf1cdc325..9ef840693baf 100644 --- a/arch/mips/pci/pci-tx4939.c +++ b/arch/mips/pci/pci-tx4939.c | |||
@@ -1,6 +1,4 @@ | |||
1 | /* | 1 | /* |
2 | * linux/arch/mips/pci/pci-tx4939.c | ||
3 | * | ||
4 | * Based on linux/arch/mips/txx9/rbtx4939/setup.c, | 2 | * Based on linux/arch/mips/txx9/rbtx4939/setup.c, |
5 | * and RBTX49xx patch from CELF patch archive. | 3 | * and RBTX49xx patch from CELF patch archive. |
6 | * | 4 | * |
diff --git a/arch/mips/pci/pcie-octeon.c b/arch/mips/pci/pcie-octeon.c index 75262247f3e4..6aa5c542d52d 100644 --- a/arch/mips/pci/pcie-octeon.c +++ b/arch/mips/pci/pcie-octeon.c | |||
@@ -1040,19 +1040,29 @@ static inline int octeon_pcie_read_config(int pcie_port, struct pci_bus *bus, | |||
1040 | int bus_number = bus->number; | 1040 | int bus_number = bus->number; |
1041 | 1041 | ||
1042 | /* | 1042 | /* |
1043 | * We need to force the bus number to be zero on the root | 1043 | * For the top level bus make sure our hardware bus number |
1044 | * bus. Linux numbers the 2nd root bus to start after all | 1044 | * matches the software one. |
1045 | * buses on root 0. | ||
1046 | */ | 1045 | */ |
1047 | if (bus->parent == NULL) | 1046 | if (bus->parent == NULL) { |
1048 | bus_number = 0; | 1047 | union cvmx_pciercx_cfg006 pciercx_cfg006; |
1048 | pciercx_cfg006.u32 = cvmx_pcie_cfgx_read(pcie_port, | ||
1049 | CVMX_PCIERCX_CFG006(pcie_port)); | ||
1050 | if (pciercx_cfg006.s.pbnum != bus_number) { | ||
1051 | pciercx_cfg006.s.pbnum = bus_number; | ||
1052 | pciercx_cfg006.s.sbnum = bus_number; | ||
1053 | pciercx_cfg006.s.subbnum = bus_number; | ||
1054 | cvmx_pcie_cfgx_write(pcie_port, | ||
1055 | CVMX_PCIERCX_CFG006(pcie_port), | ||
1056 | pciercx_cfg006.u32); | ||
1057 | } | ||
1058 | } | ||
1049 | 1059 | ||
1050 | /* | 1060 | /* |
1051 | * PCIe only has a single device connected to Octeon. It is | 1061 | * PCIe only has a single device connected to Octeon. It is |
1052 | * always device ID 0. Don't bother doing reads for other | 1062 | * always device ID 0. Don't bother doing reads for other |
1053 | * device IDs on the first segment. | 1063 | * device IDs on the first segment. |
1054 | */ | 1064 | */ |
1055 | if ((bus_number == 0) && (devfn >> 3 != 0)) | 1065 | if ((bus->parent == NULL) && (devfn >> 3 != 0)) |
1056 | return PCIBIOS_FUNC_NOT_SUPPORTED; | 1066 | return PCIBIOS_FUNC_NOT_SUPPORTED; |
1057 | 1067 | ||
1058 | /* | 1068 | /* |
@@ -1070,7 +1080,7 @@ static inline int octeon_pcie_read_config(int pcie_port, struct pci_bus *bus, | |||
1070 | * bridge only respondes to device ID 0, function | 1080 | * bridge only respondes to device ID 0, function |
1071 | * 0-1 | 1081 | * 0-1 |
1072 | */ | 1082 | */ |
1073 | if ((bus_number == 0) && (devfn >= 2)) | 1083 | if ((bus->parent == NULL) && (devfn >= 2)) |
1074 | return PCIBIOS_FUNC_NOT_SUPPORTED; | 1084 | return PCIBIOS_FUNC_NOT_SUPPORTED; |
1075 | /* | 1085 | /* |
1076 | * The PCI-X slots are device ID 2,3. Choose one of | 1086 | * The PCI-X slots are device ID 2,3. Choose one of |
@@ -1167,13 +1177,6 @@ static inline int octeon_pcie_write_config(int pcie_port, struct pci_bus *bus, | |||
1167 | int size, u32 val) | 1177 | int size, u32 val) |
1168 | { | 1178 | { |
1169 | int bus_number = bus->number; | 1179 | int bus_number = bus->number; |
1170 | /* | ||
1171 | * We need to force the bus number to be zero on the root | ||
1172 | * bus. Linux numbers the 2nd root bus to start after all | ||
1173 | * busses on root 0. | ||
1174 | */ | ||
1175 | if (bus->parent == NULL) | ||
1176 | bus_number = 0; | ||
1177 | 1180 | ||
1178 | switch (size) { | 1181 | switch (size) { |
1179 | case 4: | 1182 | case 4: |
diff --git a/arch/mips/pmc-sierra/msp71xx/gpio.c b/arch/mips/pmc-sierra/msp71xx/gpio.c index 69848c5813e2..aaccbe524386 100644 --- a/arch/mips/pmc-sierra/msp71xx/gpio.c +++ b/arch/mips/pmc-sierra/msp71xx/gpio.c | |||
@@ -1,6 +1,4 @@ | |||
1 | /* | 1 | /* |
2 | * @file /arch/mips/pmc-sierra/msp71xx/gpio.c | ||
3 | * | ||
4 | * Generic PMC MSP71xx GPIO handling. These base gpio are controlled by two | 2 | * Generic PMC MSP71xx GPIO handling. These base gpio are controlled by two |
5 | * types of registers. The data register sets the output level when in output | 3 | * types of registers. The data register sets the output level when in output |
6 | * mode and when in input mode will contain the value at the input. The config | 4 | * mode and when in input mode will contain the value at the input. The config |
diff --git a/arch/mips/pmc-sierra/msp71xx/gpio_extended.c b/arch/mips/pmc-sierra/msp71xx/gpio_extended.c index fc6dbc6cf1c0..2a99f360fae4 100644 --- a/arch/mips/pmc-sierra/msp71xx/gpio_extended.c +++ b/arch/mips/pmc-sierra/msp71xx/gpio_extended.c | |||
@@ -1,6 +1,4 @@ | |||
1 | /* | 1 | /* |
2 | * @file /arch/mips/pmc-sierra/msp71xx/gpio_extended.c | ||
3 | * | ||
4 | * Generic PMC MSP71xx EXTENDED (EXD) GPIO handling. The extended gpio is | 2 | * Generic PMC MSP71xx EXTENDED (EXD) GPIO handling. The extended gpio is |
5 | * a set of hardware registers that have no need for explicit locking as | 3 | * a set of hardware registers that have no need for explicit locking as |
6 | * it is handled by unique method of writing individual set/clr bits. | 4 | * it is handled by unique method of writing individual set/clr bits. |
diff --git a/arch/mips/pmc-sierra/msp71xx/msp_irq_slp.c b/arch/mips/pmc-sierra/msp71xx/msp_irq_slp.c index f5f1b8d2bb9a..61f390232346 100644 --- a/arch/mips/pmc-sierra/msp71xx/msp_irq_slp.c +++ b/arch/mips/pmc-sierra/msp71xx/msp_irq_slp.c | |||
@@ -45,13 +45,6 @@ static inline void mask_msp_slp_irq(unsigned int irq) | |||
45 | */ | 45 | */ |
46 | static inline void ack_msp_slp_irq(unsigned int irq) | 46 | static inline void ack_msp_slp_irq(unsigned int irq) |
47 | { | 47 | { |
48 | mask_slp_irq(irq); | ||
49 | |||
50 | /* | ||
51 | * only really necessary for 18, 16-14 and sometimes 3:0 (since | ||
52 | * these can be edge sensitive) but it doesn't hurt for the others. | ||
53 | */ | ||
54 | |||
55 | /* check for PER interrupt range */ | 48 | /* check for PER interrupt range */ |
56 | if (irq < MSP_PER_INTBASE) | 49 | if (irq < MSP_PER_INTBASE) |
57 | *SLP_INT_STS_REG = (1 << (irq - MSP_SLP_INTBASE)); | 50 | *SLP_INT_STS_REG = (1 << (irq - MSP_SLP_INTBASE)); |
@@ -62,8 +55,7 @@ static inline void ack_msp_slp_irq(unsigned int irq) | |||
62 | static struct irq_chip msp_slp_irq_controller = { | 55 | static struct irq_chip msp_slp_irq_controller = { |
63 | .name = "MSP_SLP", | 56 | .name = "MSP_SLP", |
64 | .ack = ack_msp_slp_irq, | 57 | .ack = ack_msp_slp_irq, |
65 | .mask = ack_msp_slp_irq, | 58 | .mask = mask_msp_slp_irq, |
66 | .mask_ack = ack_msp_slp_irq, | ||
67 | .unmask = unmask_msp_slp_irq, | 59 | .unmask = unmask_msp_slp_irq, |
68 | }; | 60 | }; |
69 | 61 | ||
@@ -79,7 +71,7 @@ void __init msp_slp_irq_init(void) | |||
79 | 71 | ||
80 | /* initialize all the IRQ descriptors */ | 72 | /* initialize all the IRQ descriptors */ |
81 | for (i = MSP_SLP_INTBASE; i < MSP_PER_INTBASE + 32; i++) | 73 | for (i = MSP_SLP_INTBASE; i < MSP_PER_INTBASE + 32; i++) |
82 | set_irq_chip_and_handler(i, &msp_slp_irq_controller | 74 | set_irq_chip_and_handler(i, &msp_slp_irq_controller, |
83 | handle_level_irq); | 75 | handle_level_irq); |
84 | } | 76 | } |
85 | 77 | ||
diff --git a/arch/mips/pmc-sierra/yosemite/atmel_read_eeprom.c b/arch/mips/pmc-sierra/yosemite/atmel_read_eeprom.c index caf5e9a0acc7..fc990cb31941 100644 --- a/arch/mips/pmc-sierra/yosemite/atmel_read_eeprom.c +++ b/arch/mips/pmc-sierra/yosemite/atmel_read_eeprom.c | |||
@@ -1,6 +1,4 @@ | |||
1 | /* | 1 | /* |
2 | * arch/mips/pmc-sierra/yosemite/atmel_read_eeprom.c | ||
3 | * | ||
4 | * Copyright (C) 2003 PMC-Sierra Inc. | 2 | * Copyright (C) 2003 PMC-Sierra Inc. |
5 | * Author: Manish Lachwani (lachwani@pmc-sierra.com) | 3 | * Author: Manish Lachwani (lachwani@pmc-sierra.com) |
6 | * | 4 | * |
diff --git a/arch/mips/pmc-sierra/yosemite/setup.c b/arch/mips/pmc-sierra/yosemite/setup.c index 2d3c0dca275d..3498ac9c35af 100644 --- a/arch/mips/pmc-sierra/yosemite/setup.c +++ b/arch/mips/pmc-sierra/yosemite/setup.c | |||
@@ -70,7 +70,7 @@ void __init bus_error_init(void) | |||
70 | } | 70 | } |
71 | 71 | ||
72 | 72 | ||
73 | unsigned long read_persistent_clock(void) | 73 | void read_persistent_clock(struct timespec *ts) |
74 | { | 74 | { |
75 | unsigned int year, month, day, hour, min, sec; | 75 | unsigned int year, month, day, hour, min, sec; |
76 | unsigned long flags; | 76 | unsigned long flags; |
@@ -92,7 +92,8 @@ unsigned long read_persistent_clock(void) | |||
92 | m48t37_base->control = 0x00; | 92 | m48t37_base->control = 0x00; |
93 | spin_unlock_irqrestore(&rtc_lock, flags); | 93 | spin_unlock_irqrestore(&rtc_lock, flags); |
94 | 94 | ||
95 | return mktime(year, month, day, hour, min, sec); | 95 | ts->tv_sec = mktime(year, month, day, hour, min, sec); |
96 | ts->tv_nsec = 0; | ||
96 | } | 97 | } |
97 | 98 | ||
98 | int rtc_mips_set_time(unsigned long tim) | 99 | int rtc_mips_set_time(unsigned long tim) |
diff --git a/arch/mips/sibyte/swarm/setup.c b/arch/mips/sibyte/swarm/setup.c index 672e45d495a9..623ffc933c4c 100644 --- a/arch/mips/sibyte/swarm/setup.c +++ b/arch/mips/sibyte/swarm/setup.c | |||
@@ -87,19 +87,26 @@ enum swarm_rtc_type { | |||
87 | 87 | ||
88 | enum swarm_rtc_type swarm_rtc_type; | 88 | enum swarm_rtc_type swarm_rtc_type; |
89 | 89 | ||
90 | unsigned long read_persistent_clock(void) | 90 | void read_persistent_clock(struct timespec *ts) |
91 | { | 91 | { |
92 | unsigned long sec; | ||
93 | |||
92 | switch (swarm_rtc_type) { | 94 | switch (swarm_rtc_type) { |
93 | case RTC_XICOR: | 95 | case RTC_XICOR: |
94 | return xicor_get_time(); | 96 | sec = xicor_get_time(); |
97 | break; | ||
95 | 98 | ||
96 | case RTC_M4LT81: | 99 | case RTC_M4LT81: |
97 | return m41t81_get_time(); | 100 | sec = m41t81_get_time(); |
101 | break; | ||
98 | 102 | ||
99 | case RTC_NONE: | 103 | case RTC_NONE: |
100 | default: | 104 | default: |
101 | return mktime(2000, 1, 1, 0, 0, 0); | 105 | sec = mktime(2000, 1, 1, 0, 0, 0); |
106 | break; | ||
102 | } | 107 | } |
108 | ts->tv_sec = sec; | ||
109 | tv->tv_nsec = 0; | ||
103 | } | 110 | } |
104 | 111 | ||
105 | int rtc_mips_set_time(unsigned long sec) | 112 | int rtc_mips_set_time(unsigned long sec) |
diff --git a/arch/mips/sibyte/swarm/swarm-i2c.c b/arch/mips/sibyte/swarm/swarm-i2c.c index 4282ac9d01d2..062505054d42 100644 --- a/arch/mips/sibyte/swarm/swarm-i2c.c +++ b/arch/mips/sibyte/swarm/swarm-i2c.c | |||
@@ -1,6 +1,4 @@ | |||
1 | /* | 1 | /* |
2 | * arch/mips/sibyte/swarm/swarm-i2c.c | ||
3 | * | ||
4 | * Broadcom BCM91250A (SWARM), etc. I2C platform setup. | 2 | * Broadcom BCM91250A (SWARM), etc. I2C platform setup. |
5 | * | 3 | * |
6 | * Copyright (c) 2008 Maciej W. Rozycki | 4 | * Copyright (c) 2008 Maciej W. Rozycki |
diff --git a/arch/mips/sni/time.c b/arch/mips/sni/time.c index 0d9ec1a5c24a..62df6a598e0a 100644 --- a/arch/mips/sni/time.c +++ b/arch/mips/sni/time.c | |||
@@ -182,7 +182,8 @@ void __init plat_time_init(void) | |||
182 | setup_pit_timer(); | 182 | setup_pit_timer(); |
183 | } | 183 | } |
184 | 184 | ||
185 | unsigned long read_persistent_clock(void) | 185 | void read_persistent_clock(struct timespec *ts) |
186 | { | 186 | { |
187 | return -1; | 187 | ts->tv_sec = -1; |
188 | ts->tv_nsec = 0; | ||
188 | } | 189 | } |
diff --git a/arch/mips/txx9/generic/mem_tx4927.c b/arch/mips/txx9/generic/mem_tx4927.c index ef6ea6e97873..70f9626f8227 100644 --- a/arch/mips/txx9/generic/mem_tx4927.c +++ b/arch/mips/txx9/generic/mem_tx4927.c | |||
@@ -1,6 +1,4 @@ | |||
1 | /* | 1 | /* |
2 | * linux/arch/mips/txx9/generic/mem_tx4927.c | ||
3 | * | ||
4 | * common tx4927 memory interface | 2 | * common tx4927 memory interface |
5 | * | 3 | * |
6 | * Author: MontaVista Software, Inc. | 4 | * Author: MontaVista Software, Inc. |
diff --git a/arch/mips/txx9/generic/setup.c b/arch/mips/txx9/generic/setup.c index 3b7d77d61ce0..a205e2ba8e7b 100644 --- a/arch/mips/txx9/generic/setup.c +++ b/arch/mips/txx9/generic/setup.c | |||
@@ -1,6 +1,4 @@ | |||
1 | /* | 1 | /* |
2 | * linux/arch/mips/txx9/generic/setup.c | ||
3 | * | ||
4 | * Based on linux/arch/mips/txx9/rbtx4938/setup.c, | 2 | * Based on linux/arch/mips/txx9/rbtx4938/setup.c, |
5 | * and RBTX49xx patch from CELF patch archive. | 3 | * and RBTX49xx patch from CELF patch archive. |
6 | * | 4 | * |
diff --git a/arch/mips/txx9/rbtx4939/setup.c b/arch/mips/txx9/rbtx4939/setup.c index c033ffe71cdf..b0c241ecf603 100644 --- a/arch/mips/txx9/rbtx4939/setup.c +++ b/arch/mips/txx9/rbtx4939/setup.c | |||
@@ -512,10 +512,10 @@ static void __init rbtx4939_setup(void) | |||
512 | rbtx4939_ebusc_setup(); | 512 | rbtx4939_ebusc_setup(); |
513 | /* always enable ATA0 */ | 513 | /* always enable ATA0 */ |
514 | txx9_set64(&tx4939_ccfgptr->pcfg, TX4939_PCFG_ATA0MODE); | 514 | txx9_set64(&tx4939_ccfgptr->pcfg, TX4939_PCFG_ATA0MODE); |
515 | rbtx4939_update_ioc_pen(); | ||
516 | if (txx9_master_clock == 0) | 515 | if (txx9_master_clock == 0) |
517 | txx9_master_clock = 20000000; | 516 | txx9_master_clock = 20000000; |
518 | tx4939_setup(); | 517 | tx4939_setup(); |
518 | rbtx4939_update_ioc_pen(); | ||
519 | #ifdef HAVE_RBTX4939_IOSWAB | 519 | #ifdef HAVE_RBTX4939_IOSWAB |
520 | ioswabw = rbtx4939_ioswabw; | 520 | ioswabw = rbtx4939_ioswabw; |
521 | __mem_ioswabw = rbtx4939_mem_ioswabw; | 521 | __mem_ioswabw = rbtx4939_mem_ioswabw; |
diff --git a/arch/mn10300/include/asm/pci.h b/arch/mn10300/include/asm/pci.h index 35d2ed6396f6..19aecc90f7a4 100644 --- a/arch/mn10300/include/asm/pci.h +++ b/arch/mn10300/include/asm/pci.h | |||
@@ -59,7 +59,6 @@ void pcibios_penalize_isa_irq(int irq); | |||
59 | #include <linux/slab.h> | 59 | #include <linux/slab.h> |
60 | #include <asm/scatterlist.h> | 60 | #include <asm/scatterlist.h> |
61 | #include <linux/string.h> | 61 | #include <linux/string.h> |
62 | #include <linux/mm.h> | ||
63 | #include <asm/io.h> | 62 | #include <asm/io.h> |
64 | 63 | ||
65 | struct pci_dev; | 64 | struct pci_dev; |
diff --git a/arch/parisc/kernel/entry.S b/arch/parisc/kernel/entry.S index ae3e70cd1e14..e552e547cb93 100644 --- a/arch/parisc/kernel/entry.S +++ b/arch/parisc/kernel/entry.S | |||
@@ -553,7 +553,7 @@ | |||
553 | * on most of those machines only handles cache transactions. | 553 | * on most of those machines only handles cache transactions. |
554 | */ | 554 | */ |
555 | extrd,u,*= \pte,_PAGE_NO_CACHE_BIT+32,1,%r0 | 555 | extrd,u,*= \pte,_PAGE_NO_CACHE_BIT+32,1,%r0 |
556 | depi 1,12,1,\prot | 556 | depdi 1,12,1,\prot |
557 | 557 | ||
558 | /* Drop prot bits and convert to page addr for iitlbt and idtlbt */ | 558 | /* Drop prot bits and convert to page addr for iitlbt and idtlbt */ |
559 | convert_for_tlb_insert20 \pte | 559 | convert_for_tlb_insert20 \pte |
diff --git a/arch/parisc/kernel/module.c b/arch/parisc/kernel/module.c index ef5caf2e6ed0..61ee0eec4e69 100644 --- a/arch/parisc/kernel/module.c +++ b/arch/parisc/kernel/module.c | |||
@@ -86,8 +86,12 @@ | |||
86 | * the bottom of the table, which has a maximum signed displacement of | 86 | * the bottom of the table, which has a maximum signed displacement of |
87 | * 0x3fff; however, since we're only going forward, this becomes | 87 | * 0x3fff; however, since we're only going forward, this becomes |
88 | * 0x1fff, and thus, since each GOT entry is 8 bytes long we can have | 88 | * 0x1fff, and thus, since each GOT entry is 8 bytes long we can have |
89 | * at most 1023 entries */ | 89 | * at most 1023 entries. |
90 | #define MAX_GOTS 1023 | 90 | * To overcome this 14bit displacement with some kernel modules, we'll |
91 | * use instead the unusal 16bit displacement method (see reassemble_16a) | ||
92 | * which gives us a maximum positive displacement of 0x7fff, and as such | ||
93 | * allows us to allocate up to 4095 GOT entries. */ | ||
94 | #define MAX_GOTS 4095 | ||
91 | 95 | ||
92 | /* three functions to determine where in the module core | 96 | /* three functions to determine where in the module core |
93 | * or init pieces the location is */ | 97 | * or init pieces the location is */ |
@@ -145,12 +149,40 @@ struct stub_entry { | |||
145 | /* The reassemble_* functions prepare an immediate value for | 149 | /* The reassemble_* functions prepare an immediate value for |
146 | insertion into an opcode. pa-risc uses all sorts of weird bitfields | 150 | insertion into an opcode. pa-risc uses all sorts of weird bitfields |
147 | in the instruction to hold the value. */ | 151 | in the instruction to hold the value. */ |
152 | static inline int sign_unext(int x, int len) | ||
153 | { | ||
154 | int len_ones; | ||
155 | |||
156 | len_ones = (1 << len) - 1; | ||
157 | return x & len_ones; | ||
158 | } | ||
159 | |||
160 | static inline int low_sign_unext(int x, int len) | ||
161 | { | ||
162 | int sign, temp; | ||
163 | |||
164 | sign = (x >> (len-1)) & 1; | ||
165 | temp = sign_unext(x, len-1); | ||
166 | return (temp << 1) | sign; | ||
167 | } | ||
168 | |||
148 | static inline int reassemble_14(int as14) | 169 | static inline int reassemble_14(int as14) |
149 | { | 170 | { |
150 | return (((as14 & 0x1fff) << 1) | | 171 | return (((as14 & 0x1fff) << 1) | |
151 | ((as14 & 0x2000) >> 13)); | 172 | ((as14 & 0x2000) >> 13)); |
152 | } | 173 | } |
153 | 174 | ||
175 | static inline int reassemble_16a(int as16) | ||
176 | { | ||
177 | int s, t; | ||
178 | |||
179 | /* Unusual 16-bit encoding, for wide mode only. */ | ||
180 | t = (as16 << 1) & 0xffff; | ||
181 | s = (as16 & 0x8000); | ||
182 | return (t ^ s ^ (s >> 1)) | (s >> 15); | ||
183 | } | ||
184 | |||
185 | |||
154 | static inline int reassemble_17(int as17) | 186 | static inline int reassemble_17(int as17) |
155 | { | 187 | { |
156 | return (((as17 & 0x10000) >> 16) | | 188 | return (((as17 & 0x10000) >> 16) | |
@@ -407,6 +439,7 @@ static Elf_Addr get_stub(struct module *me, unsigned long value, long addend, | |||
407 | enum elf_stub_type stub_type, Elf_Addr loc0, unsigned int targetsec) | 439 | enum elf_stub_type stub_type, Elf_Addr loc0, unsigned int targetsec) |
408 | { | 440 | { |
409 | struct stub_entry *stub; | 441 | struct stub_entry *stub; |
442 | int __maybe_unused d; | ||
410 | 443 | ||
411 | /* initialize stub_offset to point in front of the section */ | 444 | /* initialize stub_offset to point in front of the section */ |
412 | if (!me->arch.section[targetsec].stub_offset) { | 445 | if (!me->arch.section[targetsec].stub_offset) { |
@@ -460,12 +493,19 @@ static Elf_Addr get_stub(struct module *me, unsigned long value, long addend, | |||
460 | */ | 493 | */ |
461 | switch (stub_type) { | 494 | switch (stub_type) { |
462 | case ELF_STUB_GOT: | 495 | case ELF_STUB_GOT: |
463 | stub->insns[0] = 0x537b0000; /* ldd 0(%dp),%dp */ | 496 | d = get_got(me, value, addend); |
497 | if (d <= 15) { | ||
498 | /* Format 5 */ | ||
499 | stub->insns[0] = 0x0f6010db; /* ldd 0(%dp),%dp */ | ||
500 | stub->insns[0] |= low_sign_unext(d, 5) << 16; | ||
501 | } else { | ||
502 | /* Format 3 */ | ||
503 | stub->insns[0] = 0x537b0000; /* ldd 0(%dp),%dp */ | ||
504 | stub->insns[0] |= reassemble_16a(d); | ||
505 | } | ||
464 | stub->insns[1] = 0x53610020; /* ldd 10(%dp),%r1 */ | 506 | stub->insns[1] = 0x53610020; /* ldd 10(%dp),%r1 */ |
465 | stub->insns[2] = 0xe820d000; /* bve (%r1) */ | 507 | stub->insns[2] = 0xe820d000; /* bve (%r1) */ |
466 | stub->insns[3] = 0x537b0030; /* ldd 18(%dp),%dp */ | 508 | stub->insns[3] = 0x537b0030; /* ldd 18(%dp),%dp */ |
467 | |||
468 | stub->insns[0] |= reassemble_14(get_got(me, value, addend) & 0x3fff); | ||
469 | break; | 509 | break; |
470 | case ELF_STUB_MILLI: | 510 | case ELF_STUB_MILLI: |
471 | stub->insns[0] = 0x20200000; /* ldil 0,%r1 */ | 511 | stub->insns[0] = 0x20200000; /* ldil 0,%r1 */ |
diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h index dfdf13c9fefd..fddc3ed715fa 100644 --- a/arch/powerpc/include/asm/kvm_host.h +++ b/arch/powerpc/include/asm/kvm_host.h | |||
@@ -34,7 +34,7 @@ | |||
34 | #define KVM_COALESCED_MMIO_PAGE_OFFSET 1 | 34 | #define KVM_COALESCED_MMIO_PAGE_OFFSET 1 |
35 | 35 | ||
36 | /* We don't currently support large pages. */ | 36 | /* We don't currently support large pages. */ |
37 | #define KVM_PAGES_PER_HPAGE (1<<31) | 37 | #define KVM_PAGES_PER_HPAGE (1UL << 31) |
38 | 38 | ||
39 | struct kvm; | 39 | struct kvm; |
40 | struct kvm_run; | 40 | struct kvm_run; |
diff --git a/arch/powerpc/kernel/dma.c b/arch/powerpc/kernel/dma.c index 20a60d661ba8..ccf129d47d84 100644 --- a/arch/powerpc/kernel/dma.c +++ b/arch/powerpc/kernel/dma.c | |||
@@ -7,6 +7,7 @@ | |||
7 | 7 | ||
8 | #include <linux/device.h> | 8 | #include <linux/device.h> |
9 | #include <linux/dma-mapping.h> | 9 | #include <linux/dma-mapping.h> |
10 | #include <linux/lmb.h> | ||
10 | #include <asm/bug.h> | 11 | #include <asm/bug.h> |
11 | #include <asm/abs_addr.h> | 12 | #include <asm/abs_addr.h> |
12 | 13 | ||
@@ -90,11 +91,10 @@ static void dma_direct_unmap_sg(struct device *dev, struct scatterlist *sg, | |||
90 | static int dma_direct_dma_supported(struct device *dev, u64 mask) | 91 | static int dma_direct_dma_supported(struct device *dev, u64 mask) |
91 | { | 92 | { |
92 | #ifdef CONFIG_PPC64 | 93 | #ifdef CONFIG_PPC64 |
93 | /* Could be improved to check for memory though it better be | 94 | /* Could be improved so platforms can set the limit in case |
94 | * done via some global so platforms can set the limit in case | ||
95 | * they have limited DMA windows | 95 | * they have limited DMA windows |
96 | */ | 96 | */ |
97 | return mask >= DMA_BIT_MASK(32); | 97 | return mask >= (lmb_end_of_DRAM() - 1); |
98 | #else | 98 | #else |
99 | return 1; | 99 | return 1; |
100 | #endif | 100 | #endif |
diff --git a/arch/powerpc/kernel/mpc7450-pmu.c b/arch/powerpc/kernel/mpc7450-pmu.c index c244133c67a6..cc466d039af6 100644 --- a/arch/powerpc/kernel/mpc7450-pmu.c +++ b/arch/powerpc/kernel/mpc7450-pmu.c | |||
@@ -407,7 +407,8 @@ struct power_pmu mpc7450_pmu = { | |||
407 | 407 | ||
408 | static int init_mpc7450_pmu(void) | 408 | static int init_mpc7450_pmu(void) |
409 | { | 409 | { |
410 | if (strcmp(cur_cpu_spec->oprofile_cpu_type, "ppc/7450")) | 410 | if (!cur_cpu_spec->oprofile_cpu_type || |
411 | strcmp(cur_cpu_spec->oprofile_cpu_type, "ppc/7450")) | ||
411 | return -ENODEV; | 412 | return -ENODEV; |
412 | 413 | ||
413 | return register_power_pmu(&mpc7450_pmu); | 414 | return register_power_pmu(&mpc7450_pmu); |
diff --git a/arch/powerpc/kernel/perf_counter.c b/arch/powerpc/kernel/perf_counter.c index 809fdf94b95f..70e1f57f7dd8 100644 --- a/arch/powerpc/kernel/perf_counter.c +++ b/arch/powerpc/kernel/perf_counter.c | |||
@@ -518,6 +518,8 @@ void hw_perf_disable(void) | |||
518 | struct cpu_hw_counters *cpuhw; | 518 | struct cpu_hw_counters *cpuhw; |
519 | unsigned long flags; | 519 | unsigned long flags; |
520 | 520 | ||
521 | if (!ppmu) | ||
522 | return; | ||
521 | local_irq_save(flags); | 523 | local_irq_save(flags); |
522 | cpuhw = &__get_cpu_var(cpu_hw_counters); | 524 | cpuhw = &__get_cpu_var(cpu_hw_counters); |
523 | 525 | ||
@@ -572,6 +574,8 @@ void hw_perf_enable(void) | |||
572 | int n_lim; | 574 | int n_lim; |
573 | int idx; | 575 | int idx; |
574 | 576 | ||
577 | if (!ppmu) | ||
578 | return; | ||
575 | local_irq_save(flags); | 579 | local_irq_save(flags); |
576 | cpuhw = &__get_cpu_var(cpu_hw_counters); | 580 | cpuhw = &__get_cpu_var(cpu_hw_counters); |
577 | if (!cpuhw->disabled) { | 581 | if (!cpuhw->disabled) { |
@@ -737,6 +741,8 @@ int hw_perf_group_sched_in(struct perf_counter *group_leader, | |||
737 | long i, n, n0; | 741 | long i, n, n0; |
738 | struct perf_counter *sub; | 742 | struct perf_counter *sub; |
739 | 743 | ||
744 | if (!ppmu) | ||
745 | return 0; | ||
740 | cpuhw = &__get_cpu_var(cpu_hw_counters); | 746 | cpuhw = &__get_cpu_var(cpu_hw_counters); |
741 | n0 = cpuhw->n_counters; | 747 | n0 = cpuhw->n_counters; |
742 | n = collect_events(group_leader, ppmu->n_counter - n0, | 748 | n = collect_events(group_leader, ppmu->n_counter - n0, |
@@ -1281,6 +1287,8 @@ void hw_perf_counter_setup(int cpu) | |||
1281 | { | 1287 | { |
1282 | struct cpu_hw_counters *cpuhw = &per_cpu(cpu_hw_counters, cpu); | 1288 | struct cpu_hw_counters *cpuhw = &per_cpu(cpu_hw_counters, cpu); |
1283 | 1289 | ||
1290 | if (!ppmu) | ||
1291 | return; | ||
1284 | memset(cpuhw, 0, sizeof(*cpuhw)); | 1292 | memset(cpuhw, 0, sizeof(*cpuhw)); |
1285 | cpuhw->mmcr[0] = MMCR0_FC; | 1293 | cpuhw->mmcr[0] = MMCR0_FC; |
1286 | } | 1294 | } |
diff --git a/arch/powerpc/kernel/power4-pmu.c b/arch/powerpc/kernel/power4-pmu.c index db90b0c5c27b..3c90a3d9173e 100644 --- a/arch/powerpc/kernel/power4-pmu.c +++ b/arch/powerpc/kernel/power4-pmu.c | |||
@@ -606,7 +606,8 @@ static struct power_pmu power4_pmu = { | |||
606 | 606 | ||
607 | static int init_power4_pmu(void) | 607 | static int init_power4_pmu(void) |
608 | { | 608 | { |
609 | if (strcmp(cur_cpu_spec->oprofile_cpu_type, "ppc64/power4")) | 609 | if (!cur_cpu_spec->oprofile_cpu_type || |
610 | strcmp(cur_cpu_spec->oprofile_cpu_type, "ppc64/power4")) | ||
610 | return -ENODEV; | 611 | return -ENODEV; |
611 | 612 | ||
612 | return register_power_pmu(&power4_pmu); | 613 | return register_power_pmu(&power4_pmu); |
diff --git a/arch/powerpc/kernel/power5+-pmu.c b/arch/powerpc/kernel/power5+-pmu.c index f4adca8e98a4..31918af3e355 100644 --- a/arch/powerpc/kernel/power5+-pmu.c +++ b/arch/powerpc/kernel/power5+-pmu.c | |||
@@ -678,8 +678,9 @@ static struct power_pmu power5p_pmu = { | |||
678 | 678 | ||
679 | static int init_power5p_pmu(void) | 679 | static int init_power5p_pmu(void) |
680 | { | 680 | { |
681 | if (strcmp(cur_cpu_spec->oprofile_cpu_type, "ppc64/power5+") | 681 | if (!cur_cpu_spec->oprofile_cpu_type || |
682 | && strcmp(cur_cpu_spec->oprofile_cpu_type, "ppc64/power5++")) | 682 | (strcmp(cur_cpu_spec->oprofile_cpu_type, "ppc64/power5+") |
683 | && strcmp(cur_cpu_spec->oprofile_cpu_type, "ppc64/power5++"))) | ||
683 | return -ENODEV; | 684 | return -ENODEV; |
684 | 685 | ||
685 | return register_power_pmu(&power5p_pmu); | 686 | return register_power_pmu(&power5p_pmu); |
diff --git a/arch/powerpc/kernel/power5-pmu.c b/arch/powerpc/kernel/power5-pmu.c index 29b2c6c0e83a..867f6f663963 100644 --- a/arch/powerpc/kernel/power5-pmu.c +++ b/arch/powerpc/kernel/power5-pmu.c | |||
@@ -618,7 +618,8 @@ static struct power_pmu power5_pmu = { | |||
618 | 618 | ||
619 | static int init_power5_pmu(void) | 619 | static int init_power5_pmu(void) |
620 | { | 620 | { |
621 | if (strcmp(cur_cpu_spec->oprofile_cpu_type, "ppc64/power5")) | 621 | if (!cur_cpu_spec->oprofile_cpu_type || |
622 | strcmp(cur_cpu_spec->oprofile_cpu_type, "ppc64/power5")) | ||
622 | return -ENODEV; | 623 | return -ENODEV; |
623 | 624 | ||
624 | return register_power_pmu(&power5_pmu); | 625 | return register_power_pmu(&power5_pmu); |
diff --git a/arch/powerpc/kernel/power6-pmu.c b/arch/powerpc/kernel/power6-pmu.c index 09ae5bf5bda7..fa21890531da 100644 --- a/arch/powerpc/kernel/power6-pmu.c +++ b/arch/powerpc/kernel/power6-pmu.c | |||
@@ -537,7 +537,8 @@ static struct power_pmu power6_pmu = { | |||
537 | 537 | ||
538 | static int init_power6_pmu(void) | 538 | static int init_power6_pmu(void) |
539 | { | 539 | { |
540 | if (strcmp(cur_cpu_spec->oprofile_cpu_type, "ppc64/power6")) | 540 | if (!cur_cpu_spec->oprofile_cpu_type || |
541 | strcmp(cur_cpu_spec->oprofile_cpu_type, "ppc64/power6")) | ||
541 | return -ENODEV; | 542 | return -ENODEV; |
542 | 543 | ||
543 | return register_power_pmu(&power6_pmu); | 544 | return register_power_pmu(&power6_pmu); |
diff --git a/arch/powerpc/kernel/power7-pmu.c b/arch/powerpc/kernel/power7-pmu.c index 5a9f5cbd40a4..388cf57ad827 100644 --- a/arch/powerpc/kernel/power7-pmu.c +++ b/arch/powerpc/kernel/power7-pmu.c | |||
@@ -366,7 +366,8 @@ static struct power_pmu power7_pmu = { | |||
366 | 366 | ||
367 | static int init_power7_pmu(void) | 367 | static int init_power7_pmu(void) |
368 | { | 368 | { |
369 | if (strcmp(cur_cpu_spec->oprofile_cpu_type, "ppc64/power7")) | 369 | if (!cur_cpu_spec->oprofile_cpu_type || |
370 | strcmp(cur_cpu_spec->oprofile_cpu_type, "ppc64/power7")) | ||
370 | return -ENODEV; | 371 | return -ENODEV; |
371 | 372 | ||
372 | return register_power_pmu(&power7_pmu); | 373 | return register_power_pmu(&power7_pmu); |
diff --git a/arch/powerpc/kernel/ppc970-pmu.c b/arch/powerpc/kernel/ppc970-pmu.c index 833097ac45dc..75dccb71a043 100644 --- a/arch/powerpc/kernel/ppc970-pmu.c +++ b/arch/powerpc/kernel/ppc970-pmu.c | |||
@@ -488,8 +488,9 @@ static struct power_pmu ppc970_pmu = { | |||
488 | 488 | ||
489 | static int init_ppc970_pmu(void) | 489 | static int init_ppc970_pmu(void) |
490 | { | 490 | { |
491 | if (strcmp(cur_cpu_spec->oprofile_cpu_type, "ppc64/970") | 491 | if (!cur_cpu_spec->oprofile_cpu_type || |
492 | && strcmp(cur_cpu_spec->oprofile_cpu_type, "ppc64/970MP")) | 492 | (strcmp(cur_cpu_spec->oprofile_cpu_type, "ppc64/970") |
493 | && strcmp(cur_cpu_spec->oprofile_cpu_type, "ppc64/970MP"))) | ||
493 | return -ENODEV; | 494 | return -ENODEV; |
494 | 495 | ||
495 | return register_power_pmu(&ppc970_pmu); | 496 | return register_power_pmu(&ppc970_pmu); |
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c index 211d7b0cd370..5b1657540a7d 100644 --- a/arch/powerpc/kernel/time.c +++ b/arch/powerpc/kernel/time.c | |||
@@ -772,11 +772,12 @@ int update_persistent_clock(struct timespec now) | |||
772 | return ppc_md.set_rtc_time(&tm); | 772 | return ppc_md.set_rtc_time(&tm); |
773 | } | 773 | } |
774 | 774 | ||
775 | unsigned long read_persistent_clock(void) | 775 | void read_persistent_clock(struct timespec *ts) |
776 | { | 776 | { |
777 | struct rtc_time tm; | 777 | struct rtc_time tm; |
778 | static int first = 1; | 778 | static int first = 1; |
779 | 779 | ||
780 | ts->tv_nsec = 0; | ||
780 | /* XXX this is a litle fragile but will work okay in the short term */ | 781 | /* XXX this is a litle fragile but will work okay in the short term */ |
781 | if (first) { | 782 | if (first) { |
782 | first = 0; | 783 | first = 0; |
@@ -784,14 +785,18 @@ unsigned long read_persistent_clock(void) | |||
784 | timezone_offset = ppc_md.time_init(); | 785 | timezone_offset = ppc_md.time_init(); |
785 | 786 | ||
786 | /* get_boot_time() isn't guaranteed to be safe to call late */ | 787 | /* get_boot_time() isn't guaranteed to be safe to call late */ |
787 | if (ppc_md.get_boot_time) | 788 | if (ppc_md.get_boot_time) { |
788 | return ppc_md.get_boot_time() -timezone_offset; | 789 | ts->tv_sec = ppc_md.get_boot_time() - timezone_offset; |
790 | return; | ||
791 | } | ||
792 | } | ||
793 | if (!ppc_md.get_rtc_time) { | ||
794 | ts->tv_sec = 0; | ||
795 | return; | ||
789 | } | 796 | } |
790 | if (!ppc_md.get_rtc_time) | ||
791 | return 0; | ||
792 | ppc_md.get_rtc_time(&tm); | 797 | ppc_md.get_rtc_time(&tm); |
793 | return mktime(tm.tm_year+1900, tm.tm_mon+1, tm.tm_mday, | 798 | ts->tv_sec = mktime(tm.tm_year+1900, tm.tm_mon+1, tm.tm_mday, |
794 | tm.tm_hour, tm.tm_min, tm.tm_sec); | 799 | tm.tm_hour, tm.tm_min, tm.tm_sec); |
795 | } | 800 | } |
796 | 801 | ||
797 | /* clocksource code */ | 802 | /* clocksource code */ |
diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c index 8d15314381e0..cae14c499511 100644 --- a/arch/s390/kernel/early.c +++ b/arch/s390/kernel/early.c | |||
@@ -208,6 +208,9 @@ static noinline __init void detect_machine_type(void) | |||
208 | machine_flags |= MACHINE_FLAG_KVM; | 208 | machine_flags |= MACHINE_FLAG_KVM; |
209 | else | 209 | else |
210 | machine_flags |= MACHINE_FLAG_VM; | 210 | machine_flags |= MACHINE_FLAG_VM; |
211 | |||
212 | /* Store machine flags for setting up lowcore early */ | ||
213 | S390_lowcore.machine_flags = machine_flags; | ||
211 | } | 214 | } |
212 | 215 | ||
213 | static __init void early_pgm_check_handler(void) | 216 | static __init void early_pgm_check_handler(void) |
diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c index d4c8e9c47c81..6bff1a1d9060 100644 --- a/arch/s390/kernel/time.c +++ b/arch/s390/kernel/time.c | |||
@@ -182,12 +182,14 @@ static void timing_alert_interrupt(__u16 code) | |||
182 | static void etr_reset(void); | 182 | static void etr_reset(void); |
183 | static void stp_reset(void); | 183 | static void stp_reset(void); |
184 | 184 | ||
185 | unsigned long read_persistent_clock(void) | 185 | void read_persistent_clock(struct timespec *ts) |
186 | { | 186 | { |
187 | struct timespec ts; | 187 | tod_to_timeval(get_clock() - TOD_UNIX_EPOCH, ts); |
188 | } | ||
188 | 189 | ||
189 | tod_to_timeval(get_clock() - TOD_UNIX_EPOCH, &ts); | 190 | void read_boot_clock(struct timespec *ts) |
190 | return ts.tv_sec; | 191 | { |
192 | tod_to_timeval(sched_clock_base_cc - TOD_UNIX_EPOCH, ts); | ||
191 | } | 193 | } |
192 | 194 | ||
193 | static cycle_t read_tod_clock(struct clocksource *cs) | 195 | static cycle_t read_tod_clock(struct clocksource *cs) |
@@ -205,6 +207,10 @@ static struct clocksource clocksource_tod = { | |||
205 | .flags = CLOCK_SOURCE_IS_CONTINUOUS, | 207 | .flags = CLOCK_SOURCE_IS_CONTINUOUS, |
206 | }; | 208 | }; |
207 | 209 | ||
210 | struct clocksource * __init clocksource_default_clock(void) | ||
211 | { | ||
212 | return &clocksource_tod; | ||
213 | } | ||
208 | 214 | ||
209 | void update_vsyscall(struct timespec *wall_time, struct clocksource *clock) | 215 | void update_vsyscall(struct timespec *wall_time, struct clocksource *clock) |
210 | { | 216 | { |
@@ -242,10 +248,6 @@ void update_vsyscall_tz(void) | |||
242 | */ | 248 | */ |
243 | void __init time_init(void) | 249 | void __init time_init(void) |
244 | { | 250 | { |
245 | struct timespec ts; | ||
246 | unsigned long flags; | ||
247 | cycle_t now; | ||
248 | |||
249 | /* Reset time synchronization interfaces. */ | 251 | /* Reset time synchronization interfaces. */ |
250 | etr_reset(); | 252 | etr_reset(); |
251 | stp_reset(); | 253 | stp_reset(); |
@@ -261,26 +263,6 @@ void __init time_init(void) | |||
261 | if (clocksource_register(&clocksource_tod) != 0) | 263 | if (clocksource_register(&clocksource_tod) != 0) |
262 | panic("Could not register TOD clock source"); | 264 | panic("Could not register TOD clock source"); |
263 | 265 | ||
264 | /* | ||
265 | * The TOD clock is an accurate clock. The xtime should be | ||
266 | * initialized in a way that the difference between TOD and | ||
267 | * xtime is reasonably small. Too bad that timekeeping_init | ||
268 | * sets xtime.tv_nsec to zero. In addition the clock source | ||
269 | * change from the jiffies clock source to the TOD clock | ||
270 | * source add another error of up to 1/HZ second. The same | ||
271 | * function sets wall_to_monotonic to a value that is too | ||
272 | * small for /proc/uptime to be accurate. | ||
273 | * Reset xtime and wall_to_monotonic to sane values. | ||
274 | */ | ||
275 | write_seqlock_irqsave(&xtime_lock, flags); | ||
276 | now = get_clock(); | ||
277 | tod_to_timeval(now - TOD_UNIX_EPOCH, &xtime); | ||
278 | clocksource_tod.cycle_last = now; | ||
279 | clocksource_tod.raw_time = xtime; | ||
280 | tod_to_timeval(sched_clock_base_cc - TOD_UNIX_EPOCH, &ts); | ||
281 | set_normalized_timespec(&wall_to_monotonic, -ts.tv_sec, -ts.tv_nsec); | ||
282 | write_sequnlock_irqrestore(&xtime_lock, flags); | ||
283 | |||
284 | /* Enable TOD clock interrupts on the boot cpu. */ | 266 | /* Enable TOD clock interrupts on the boot cpu. */ |
285 | init_cpu_timer(); | 267 | init_cpu_timer(); |
286 | 268 | ||
diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c index f04f5301b1b4..4d613415c435 100644 --- a/arch/s390/kvm/interrupt.c +++ b/arch/s390/kvm/interrupt.c | |||
@@ -386,7 +386,7 @@ no_timer: | |||
386 | } | 386 | } |
387 | __unset_cpu_idle(vcpu); | 387 | __unset_cpu_idle(vcpu); |
388 | __set_current_state(TASK_RUNNING); | 388 | __set_current_state(TASK_RUNNING); |
389 | remove_wait_queue(&vcpu->wq, &wait); | 389 | remove_wait_queue(&vcpu->arch.local_int.wq, &wait); |
390 | spin_unlock_bh(&vcpu->arch.local_int.lock); | 390 | spin_unlock_bh(&vcpu->arch.local_int.lock); |
391 | spin_unlock(&vcpu->arch.local_int.float_int->lock); | 391 | spin_unlock(&vcpu->arch.local_int.float_int->lock); |
392 | hrtimer_try_to_cancel(&vcpu->arch.ckc_timer); | 392 | hrtimer_try_to_cancel(&vcpu->arch.ckc_timer); |
diff --git a/arch/s390/kvm/sigp.c b/arch/s390/kvm/sigp.c index 36678835034d..0ef81d6776e9 100644 --- a/arch/s390/kvm/sigp.c +++ b/arch/s390/kvm/sigp.c | |||
@@ -169,7 +169,7 @@ static int __sigp_set_prefix(struct kvm_vcpu *vcpu, u16 cpu_addr, u32 address, | |||
169 | unsigned long *reg) | 169 | unsigned long *reg) |
170 | { | 170 | { |
171 | struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int; | 171 | struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int; |
172 | struct kvm_s390_local_interrupt *li; | 172 | struct kvm_s390_local_interrupt *li = NULL; |
173 | struct kvm_s390_interrupt_info *inti; | 173 | struct kvm_s390_interrupt_info *inti; |
174 | int rc; | 174 | int rc; |
175 | u8 tmp; | 175 | u8 tmp; |
@@ -189,9 +189,10 @@ static int __sigp_set_prefix(struct kvm_vcpu *vcpu, u16 cpu_addr, u32 address, | |||
189 | return 2; /* busy */ | 189 | return 2; /* busy */ |
190 | 190 | ||
191 | spin_lock(&fi->lock); | 191 | spin_lock(&fi->lock); |
192 | li = fi->local_int[cpu_addr]; | 192 | if (cpu_addr < KVM_MAX_VCPUS) |
193 | li = fi->local_int[cpu_addr]; | ||
193 | 194 | ||
194 | if ((cpu_addr >= KVM_MAX_VCPUS) || (li == NULL)) { | 195 | if (li == NULL) { |
195 | rc = 1; /* incorrect state */ | 196 | rc = 1; /* incorrect state */ |
196 | *reg &= SIGP_STAT_INCORRECT_STATE; | 197 | *reg &= SIGP_STAT_INCORRECT_STATE; |
197 | kfree(inti); | 198 | kfree(inti); |
diff --git a/arch/sh/boards/board-ap325rxa.c b/arch/sh/boards/board-ap325rxa.c index 7ffd1b4315bd..b9c88cc519e2 100644 --- a/arch/sh/boards/board-ap325rxa.c +++ b/arch/sh/boards/board-ap325rxa.c | |||
@@ -547,7 +547,7 @@ static int __init ap325rxa_devices_setup(void) | |||
547 | return platform_add_devices(ap325rxa_devices, | 547 | return platform_add_devices(ap325rxa_devices, |
548 | ARRAY_SIZE(ap325rxa_devices)); | 548 | ARRAY_SIZE(ap325rxa_devices)); |
549 | } | 549 | } |
550 | device_initcall(ap325rxa_devices_setup); | 550 | arch_initcall(ap325rxa_devices_setup); |
551 | 551 | ||
552 | /* Return the board specific boot mode pin configuration */ | 552 | /* Return the board specific boot mode pin configuration */ |
553 | static int ap325rxa_mode_pins(void) | 553 | static int ap325rxa_mode_pins(void) |
diff --git a/arch/sh/boards/mach-migor/setup.c b/arch/sh/boards/mach-migor/setup.c index f70f4644deb4..f9b2e4df35b9 100644 --- a/arch/sh/boards/mach-migor/setup.c +++ b/arch/sh/boards/mach-migor/setup.c | |||
@@ -608,7 +608,7 @@ static int __init migor_devices_setup(void) | |||
608 | 608 | ||
609 | return platform_add_devices(migor_devices, ARRAY_SIZE(migor_devices)); | 609 | return platform_add_devices(migor_devices, ARRAY_SIZE(migor_devices)); |
610 | } | 610 | } |
611 | __initcall(migor_devices_setup); | 611 | arch_initcall(migor_devices_setup); |
612 | 612 | ||
613 | /* Return the board specific boot mode pin configuration */ | 613 | /* Return the board specific boot mode pin configuration */ |
614 | static int migor_mode_pins(void) | 614 | static int migor_mode_pins(void) |
diff --git a/arch/sh/kernel/cpu/sh2/setup-sh7619.c b/arch/sh/kernel/cpu/sh2/setup-sh7619.c index 13798733f2db..8555c05e8667 100644 --- a/arch/sh/kernel/cpu/sh2/setup-sh7619.c +++ b/arch/sh/kernel/cpu/sh2/setup-sh7619.c | |||
@@ -187,7 +187,7 @@ static int __init sh7619_devices_setup(void) | |||
187 | return platform_add_devices(sh7619_devices, | 187 | return platform_add_devices(sh7619_devices, |
188 | ARRAY_SIZE(sh7619_devices)); | 188 | ARRAY_SIZE(sh7619_devices)); |
189 | } | 189 | } |
190 | __initcall(sh7619_devices_setup); | 190 | arch_initcall(sh7619_devices_setup); |
191 | 191 | ||
192 | void __init plat_irq_setup(void) | 192 | void __init plat_irq_setup(void) |
193 | { | 193 | { |
diff --git a/arch/sh/kernel/cpu/sh2a/setup-mxg.c b/arch/sh/kernel/cpu/sh2a/setup-mxg.c index 869c2da4820b..b67376445315 100644 --- a/arch/sh/kernel/cpu/sh2a/setup-mxg.c +++ b/arch/sh/kernel/cpu/sh2a/setup-mxg.c | |||
@@ -238,7 +238,7 @@ static int __init mxg_devices_setup(void) | |||
238 | return platform_add_devices(mxg_devices, | 238 | return platform_add_devices(mxg_devices, |
239 | ARRAY_SIZE(mxg_devices)); | 239 | ARRAY_SIZE(mxg_devices)); |
240 | } | 240 | } |
241 | __initcall(mxg_devices_setup); | 241 | arch_initcall(mxg_devices_setup); |
242 | 242 | ||
243 | void __init plat_irq_setup(void) | 243 | void __init plat_irq_setup(void) |
244 | { | 244 | { |
diff --git a/arch/sh/kernel/cpu/sh2a/setup-sh7201.c b/arch/sh/kernel/cpu/sh2a/setup-sh7201.c index d8febe128066..fbde5b75deb9 100644 --- a/arch/sh/kernel/cpu/sh2a/setup-sh7201.c +++ b/arch/sh/kernel/cpu/sh2a/setup-sh7201.c | |||
@@ -357,7 +357,7 @@ static int __init sh7201_devices_setup(void) | |||
357 | return platform_add_devices(sh7201_devices, | 357 | return platform_add_devices(sh7201_devices, |
358 | ARRAY_SIZE(sh7201_devices)); | 358 | ARRAY_SIZE(sh7201_devices)); |
359 | } | 359 | } |
360 | __initcall(sh7201_devices_setup); | 360 | arch_initcall(sh7201_devices_setup); |
361 | 361 | ||
362 | void __init plat_irq_setup(void) | 362 | void __init plat_irq_setup(void) |
363 | { | 363 | { |
diff --git a/arch/sh/kernel/cpu/sh2a/setup-sh7203.c b/arch/sh/kernel/cpu/sh2a/setup-sh7203.c index 62e3039d2398..d3fd536c9a84 100644 --- a/arch/sh/kernel/cpu/sh2a/setup-sh7203.c +++ b/arch/sh/kernel/cpu/sh2a/setup-sh7203.c | |||
@@ -367,7 +367,7 @@ static int __init sh7203_devices_setup(void) | |||
367 | return platform_add_devices(sh7203_devices, | 367 | return platform_add_devices(sh7203_devices, |
368 | ARRAY_SIZE(sh7203_devices)); | 368 | ARRAY_SIZE(sh7203_devices)); |
369 | } | 369 | } |
370 | __initcall(sh7203_devices_setup); | 370 | arch_initcall(sh7203_devices_setup); |
371 | 371 | ||
372 | void __init plat_irq_setup(void) | 372 | void __init plat_irq_setup(void) |
373 | { | 373 | { |
diff --git a/arch/sh/kernel/cpu/sh2a/setup-sh7206.c b/arch/sh/kernel/cpu/sh2a/setup-sh7206.c index 3e6f3d7a58be..a9ccc5e8d9e9 100644 --- a/arch/sh/kernel/cpu/sh2a/setup-sh7206.c +++ b/arch/sh/kernel/cpu/sh2a/setup-sh7206.c | |||
@@ -338,7 +338,7 @@ static int __init sh7206_devices_setup(void) | |||
338 | return platform_add_devices(sh7206_devices, | 338 | return platform_add_devices(sh7206_devices, |
339 | ARRAY_SIZE(sh7206_devices)); | 339 | ARRAY_SIZE(sh7206_devices)); |
340 | } | 340 | } |
341 | __initcall(sh7206_devices_setup); | 341 | arch_initcall(sh7206_devices_setup); |
342 | 342 | ||
343 | void __init plat_irq_setup(void) | 343 | void __init plat_irq_setup(void) |
344 | { | 344 | { |
diff --git a/arch/sh/kernel/cpu/sh3/setup-sh7705.c b/arch/sh/kernel/cpu/sh3/setup-sh7705.c index 88f742fed9ed..c23105983878 100644 --- a/arch/sh/kernel/cpu/sh3/setup-sh7705.c +++ b/arch/sh/kernel/cpu/sh3/setup-sh7705.c | |||
@@ -222,7 +222,7 @@ static int __init sh7705_devices_setup(void) | |||
222 | return platform_add_devices(sh7705_devices, | 222 | return platform_add_devices(sh7705_devices, |
223 | ARRAY_SIZE(sh7705_devices)); | 223 | ARRAY_SIZE(sh7705_devices)); |
224 | } | 224 | } |
225 | __initcall(sh7705_devices_setup); | 225 | arch_initcall(sh7705_devices_setup); |
226 | 226 | ||
227 | static struct platform_device *sh7705_early_devices[] __initdata = { | 227 | static struct platform_device *sh7705_early_devices[] __initdata = { |
228 | &tmu0_device, | 228 | &tmu0_device, |
diff --git a/arch/sh/kernel/cpu/sh3/setup-sh770x.c b/arch/sh/kernel/cpu/sh3/setup-sh770x.c index c56306798584..347ab35d0697 100644 --- a/arch/sh/kernel/cpu/sh3/setup-sh770x.c +++ b/arch/sh/kernel/cpu/sh3/setup-sh770x.c | |||
@@ -250,7 +250,7 @@ static int __init sh770x_devices_setup(void) | |||
250 | return platform_add_devices(sh770x_devices, | 250 | return platform_add_devices(sh770x_devices, |
251 | ARRAY_SIZE(sh770x_devices)); | 251 | ARRAY_SIZE(sh770x_devices)); |
252 | } | 252 | } |
253 | __initcall(sh770x_devices_setup); | 253 | arch_initcall(sh770x_devices_setup); |
254 | 254 | ||
255 | static struct platform_device *sh770x_early_devices[] __initdata = { | 255 | static struct platform_device *sh770x_early_devices[] __initdata = { |
256 | &tmu0_device, | 256 | &tmu0_device, |
diff --git a/arch/sh/kernel/cpu/sh3/setup-sh7710.c b/arch/sh/kernel/cpu/sh3/setup-sh7710.c index efa76c8148f4..717e90ae1097 100644 --- a/arch/sh/kernel/cpu/sh3/setup-sh7710.c +++ b/arch/sh/kernel/cpu/sh3/setup-sh7710.c | |||
@@ -226,7 +226,7 @@ static int __init sh7710_devices_setup(void) | |||
226 | return platform_add_devices(sh7710_devices, | 226 | return platform_add_devices(sh7710_devices, |
227 | ARRAY_SIZE(sh7710_devices)); | 227 | ARRAY_SIZE(sh7710_devices)); |
228 | } | 228 | } |
229 | __initcall(sh7710_devices_setup); | 229 | arch_initcall(sh7710_devices_setup); |
230 | 230 | ||
231 | static struct platform_device *sh7710_early_devices[] __initdata = { | 231 | static struct platform_device *sh7710_early_devices[] __initdata = { |
232 | &tmu0_device, | 232 | &tmu0_device, |
diff --git a/arch/sh/kernel/cpu/sh3/setup-sh7720.c b/arch/sh/kernel/cpu/sh3/setup-sh7720.c index 5b2107798edb..74d8baaf8e96 100644 --- a/arch/sh/kernel/cpu/sh3/setup-sh7720.c +++ b/arch/sh/kernel/cpu/sh3/setup-sh7720.c | |||
@@ -388,7 +388,7 @@ static int __init sh7720_devices_setup(void) | |||
388 | return platform_add_devices(sh7720_devices, | 388 | return platform_add_devices(sh7720_devices, |
389 | ARRAY_SIZE(sh7720_devices)); | 389 | ARRAY_SIZE(sh7720_devices)); |
390 | } | 390 | } |
391 | __initcall(sh7720_devices_setup); | 391 | arch_initcall(sh7720_devices_setup); |
392 | 392 | ||
393 | static struct platform_device *sh7720_early_devices[] __initdata = { | 393 | static struct platform_device *sh7720_early_devices[] __initdata = { |
394 | &cmt0_device, | 394 | &cmt0_device, |
diff --git a/arch/sh/kernel/cpu/sh4/setup-sh4-202.c b/arch/sh/kernel/cpu/sh4/setup-sh4-202.c index 6d088d123591..de4827df19aa 100644 --- a/arch/sh/kernel/cpu/sh4/setup-sh4-202.c +++ b/arch/sh/kernel/cpu/sh4/setup-sh4-202.c | |||
@@ -138,7 +138,7 @@ static int __init sh4202_devices_setup(void) | |||
138 | return platform_add_devices(sh4202_devices, | 138 | return platform_add_devices(sh4202_devices, |
139 | ARRAY_SIZE(sh4202_devices)); | 139 | ARRAY_SIZE(sh4202_devices)); |
140 | } | 140 | } |
141 | __initcall(sh4202_devices_setup); | 141 | arch_initcall(sh4202_devices_setup); |
142 | 142 | ||
143 | static struct platform_device *sh4202_early_devices[] __initdata = { | 143 | static struct platform_device *sh4202_early_devices[] __initdata = { |
144 | &tmu0_device, | 144 | &tmu0_device, |
diff --git a/arch/sh/kernel/cpu/sh4/setup-sh7750.c b/arch/sh/kernel/cpu/sh4/setup-sh7750.c index 851672d15cf4..1b8b122e8f3d 100644 --- a/arch/sh/kernel/cpu/sh4/setup-sh7750.c +++ b/arch/sh/kernel/cpu/sh4/setup-sh7750.c | |||
@@ -239,7 +239,7 @@ static int __init sh7750_devices_setup(void) | |||
239 | return platform_add_devices(sh7750_devices, | 239 | return platform_add_devices(sh7750_devices, |
240 | ARRAY_SIZE(sh7750_devices)); | 240 | ARRAY_SIZE(sh7750_devices)); |
241 | } | 241 | } |
242 | __initcall(sh7750_devices_setup); | 242 | arch_initcall(sh7750_devices_setup); |
243 | 243 | ||
244 | static struct platform_device *sh7750_early_devices[] __initdata = { | 244 | static struct platform_device *sh7750_early_devices[] __initdata = { |
245 | &tmu0_device, | 245 | &tmu0_device, |
diff --git a/arch/sh/kernel/cpu/sh4/setup-sh7760.c b/arch/sh/kernel/cpu/sh4/setup-sh7760.c index 5b822519bd90..7fbb7be9284c 100644 --- a/arch/sh/kernel/cpu/sh4/setup-sh7760.c +++ b/arch/sh/kernel/cpu/sh4/setup-sh7760.c | |||
@@ -265,7 +265,7 @@ static int __init sh7760_devices_setup(void) | |||
265 | return platform_add_devices(sh7760_devices, | 265 | return platform_add_devices(sh7760_devices, |
266 | ARRAY_SIZE(sh7760_devices)); | 266 | ARRAY_SIZE(sh7760_devices)); |
267 | } | 267 | } |
268 | __initcall(sh7760_devices_setup); | 268 | arch_initcall(sh7760_devices_setup); |
269 | 269 | ||
270 | static struct platform_device *sh7760_early_devices[] __initdata = { | 270 | static struct platform_device *sh7760_early_devices[] __initdata = { |
271 | &tmu0_device, | 271 | &tmu0_device, |
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7343.c b/arch/sh/kernel/cpu/sh4a/setup-sh7343.c index 6307e087c864..ac4d5672ec1a 100644 --- a/arch/sh/kernel/cpu/sh4a/setup-sh7343.c +++ b/arch/sh/kernel/cpu/sh4a/setup-sh7343.c | |||
@@ -325,7 +325,7 @@ static int __init sh7343_devices_setup(void) | |||
325 | return platform_add_devices(sh7343_devices, | 325 | return platform_add_devices(sh7343_devices, |
326 | ARRAY_SIZE(sh7343_devices)); | 326 | ARRAY_SIZE(sh7343_devices)); |
327 | } | 327 | } |
328 | __initcall(sh7343_devices_setup); | 328 | arch_initcall(sh7343_devices_setup); |
329 | 329 | ||
330 | static struct platform_device *sh7343_early_devices[] __initdata = { | 330 | static struct platform_device *sh7343_early_devices[] __initdata = { |
331 | &cmt_device, | 331 | &cmt_device, |
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7366.c b/arch/sh/kernel/cpu/sh4a/setup-sh7366.c index c18f7d09281b..1a956b1beccc 100644 --- a/arch/sh/kernel/cpu/sh4a/setup-sh7366.c +++ b/arch/sh/kernel/cpu/sh4a/setup-sh7366.c | |||
@@ -318,7 +318,7 @@ static int __init sh7366_devices_setup(void) | |||
318 | return platform_add_devices(sh7366_devices, | 318 | return platform_add_devices(sh7366_devices, |
319 | ARRAY_SIZE(sh7366_devices)); | 319 | ARRAY_SIZE(sh7366_devices)); |
320 | } | 320 | } |
321 | __initcall(sh7366_devices_setup); | 321 | arch_initcall(sh7366_devices_setup); |
322 | 322 | ||
323 | static struct platform_device *sh7366_early_devices[] __initdata = { | 323 | static struct platform_device *sh7366_early_devices[] __initdata = { |
324 | &cmt_device, | 324 | &cmt_device, |
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7722.c b/arch/sh/kernel/cpu/sh4a/setup-sh7722.c index ea524a2da3e4..cda76ebf87c3 100644 --- a/arch/sh/kernel/cpu/sh4a/setup-sh7722.c +++ b/arch/sh/kernel/cpu/sh4a/setup-sh7722.c | |||
@@ -359,7 +359,7 @@ static int __init sh7722_devices_setup(void) | |||
359 | return platform_add_devices(sh7722_devices, | 359 | return platform_add_devices(sh7722_devices, |
360 | ARRAY_SIZE(sh7722_devices)); | 360 | ARRAY_SIZE(sh7722_devices)); |
361 | } | 361 | } |
362 | __initcall(sh7722_devices_setup); | 362 | arch_initcall(sh7722_devices_setup); |
363 | 363 | ||
364 | static struct platform_device *sh7722_early_devices[] __initdata = { | 364 | static struct platform_device *sh7722_early_devices[] __initdata = { |
365 | &cmt_device, | 365 | &cmt_device, |
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7723.c b/arch/sh/kernel/cpu/sh4a/setup-sh7723.c index e1bb80b2a27b..b45dace9539f 100644 --- a/arch/sh/kernel/cpu/sh4a/setup-sh7723.c +++ b/arch/sh/kernel/cpu/sh4a/setup-sh7723.c | |||
@@ -473,7 +473,7 @@ static int __init sh7723_devices_setup(void) | |||
473 | return platform_add_devices(sh7723_devices, | 473 | return platform_add_devices(sh7723_devices, |
474 | ARRAY_SIZE(sh7723_devices)); | 474 | ARRAY_SIZE(sh7723_devices)); |
475 | } | 475 | } |
476 | __initcall(sh7723_devices_setup); | 476 | arch_initcall(sh7723_devices_setup); |
477 | 477 | ||
478 | static struct platform_device *sh7723_early_devices[] __initdata = { | 478 | static struct platform_device *sh7723_early_devices[] __initdata = { |
479 | &cmt_device, | 479 | &cmt_device, |
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7724.c b/arch/sh/kernel/cpu/sh4a/setup-sh7724.c index e5ac9eb11c63..a04edaab9a29 100644 --- a/arch/sh/kernel/cpu/sh4a/setup-sh7724.c +++ b/arch/sh/kernel/cpu/sh4a/setup-sh7724.c | |||
@@ -508,7 +508,7 @@ static int __init sh7724_devices_setup(void) | |||
508 | return platform_add_devices(sh7724_devices, | 508 | return platform_add_devices(sh7724_devices, |
509 | ARRAY_SIZE(sh7724_devices)); | 509 | ARRAY_SIZE(sh7724_devices)); |
510 | } | 510 | } |
511 | device_initcall(sh7724_devices_setup); | 511 | arch_initcall(sh7724_devices_setup); |
512 | 512 | ||
513 | static struct platform_device *sh7724_early_devices[] __initdata = { | 513 | static struct platform_device *sh7724_early_devices[] __initdata = { |
514 | &cmt_device, | 514 | &cmt_device, |
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7763.c b/arch/sh/kernel/cpu/sh4a/setup-sh7763.c index f1e0c0d36da7..4659fff6b842 100644 --- a/arch/sh/kernel/cpu/sh4a/setup-sh7763.c +++ b/arch/sh/kernel/cpu/sh4a/setup-sh7763.c | |||
@@ -314,7 +314,7 @@ static int __init sh7763_devices_setup(void) | |||
314 | return platform_add_devices(sh7763_devices, | 314 | return platform_add_devices(sh7763_devices, |
315 | ARRAY_SIZE(sh7763_devices)); | 315 | ARRAY_SIZE(sh7763_devices)); |
316 | } | 316 | } |
317 | __initcall(sh7763_devices_setup); | 317 | arch_initcall(sh7763_devices_setup); |
318 | 318 | ||
319 | static struct platform_device *sh7763_early_devices[] __initdata = { | 319 | static struct platform_device *sh7763_early_devices[] __initdata = { |
320 | &tmu0_device, | 320 | &tmu0_device, |
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7770.c b/arch/sh/kernel/cpu/sh4a/setup-sh7770.c index 1e86209db284..eead08d89d32 100644 --- a/arch/sh/kernel/cpu/sh4a/setup-sh7770.c +++ b/arch/sh/kernel/cpu/sh4a/setup-sh7770.c | |||
@@ -368,7 +368,7 @@ static int __init sh7770_devices_setup(void) | |||
368 | return platform_add_devices(sh7770_devices, | 368 | return platform_add_devices(sh7770_devices, |
369 | ARRAY_SIZE(sh7770_devices)); | 369 | ARRAY_SIZE(sh7770_devices)); |
370 | } | 370 | } |
371 | __initcall(sh7770_devices_setup); | 371 | arch_initcall(sh7770_devices_setup); |
372 | 372 | ||
373 | static struct platform_device *sh7770_early_devices[] __initdata = { | 373 | static struct platform_device *sh7770_early_devices[] __initdata = { |
374 | &tmu0_device, | 374 | &tmu0_device, |
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7780.c b/arch/sh/kernel/cpu/sh4a/setup-sh7780.c index 715e05b431e5..2c901f446959 100644 --- a/arch/sh/kernel/cpu/sh4a/setup-sh7780.c +++ b/arch/sh/kernel/cpu/sh4a/setup-sh7780.c | |||
@@ -256,7 +256,7 @@ static int __init sh7780_devices_setup(void) | |||
256 | return platform_add_devices(sh7780_devices, | 256 | return platform_add_devices(sh7780_devices, |
257 | ARRAY_SIZE(sh7780_devices)); | 257 | ARRAY_SIZE(sh7780_devices)); |
258 | } | 258 | } |
259 | __initcall(sh7780_devices_setup); | 259 | arch_initcall(sh7780_devices_setup); |
260 | 260 | ||
261 | static struct platform_device *sh7780_early_devices[] __initdata = { | 261 | static struct platform_device *sh7780_early_devices[] __initdata = { |
262 | &tmu0_device, | 262 | &tmu0_device, |
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7785.c b/arch/sh/kernel/cpu/sh4a/setup-sh7785.c index af561402570b..7f6c718b6c36 100644 --- a/arch/sh/kernel/cpu/sh4a/setup-sh7785.c +++ b/arch/sh/kernel/cpu/sh4a/setup-sh7785.c | |||
@@ -263,7 +263,7 @@ static int __init sh7785_devices_setup(void) | |||
263 | return platform_add_devices(sh7785_devices, | 263 | return platform_add_devices(sh7785_devices, |
264 | ARRAY_SIZE(sh7785_devices)); | 264 | ARRAY_SIZE(sh7785_devices)); |
265 | } | 265 | } |
266 | __initcall(sh7785_devices_setup); | 266 | arch_initcall(sh7785_devices_setup); |
267 | 267 | ||
268 | static struct platform_device *sh7785_early_devices[] __initdata = { | 268 | static struct platform_device *sh7785_early_devices[] __initdata = { |
269 | &tmu0_device, | 269 | &tmu0_device, |
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7786.c b/arch/sh/kernel/cpu/sh4a/setup-sh7786.c index b70049470a0b..0104a8ec5369 100644 --- a/arch/sh/kernel/cpu/sh4a/setup-sh7786.c +++ b/arch/sh/kernel/cpu/sh4a/setup-sh7786.c | |||
@@ -547,7 +547,7 @@ static int __init sh7786_devices_setup(void) | |||
547 | return platform_add_devices(sh7786_devices, | 547 | return platform_add_devices(sh7786_devices, |
548 | ARRAY_SIZE(sh7786_devices)); | 548 | ARRAY_SIZE(sh7786_devices)); |
549 | } | 549 | } |
550 | device_initcall(sh7786_devices_setup); | 550 | arch_initcall(sh7786_devices_setup); |
551 | 551 | ||
552 | void __init plat_early_device_setup(void) | 552 | void __init plat_early_device_setup(void) |
553 | { | 553 | { |
diff --git a/arch/sh/kernel/cpu/sh4a/setup-shx3.c b/arch/sh/kernel/cpu/sh4a/setup-shx3.c index 53c65fd9ccef..07f078961c71 100644 --- a/arch/sh/kernel/cpu/sh4a/setup-shx3.c +++ b/arch/sh/kernel/cpu/sh4a/setup-shx3.c | |||
@@ -256,7 +256,7 @@ static int __init shx3_devices_setup(void) | |||
256 | return platform_add_devices(shx3_devices, | 256 | return platform_add_devices(shx3_devices, |
257 | ARRAY_SIZE(shx3_devices)); | 257 | ARRAY_SIZE(shx3_devices)); |
258 | } | 258 | } |
259 | __initcall(shx3_devices_setup); | 259 | arch_initcall(shx3_devices_setup); |
260 | 260 | ||
261 | void __init plat_early_device_setup(void) | 261 | void __init plat_early_device_setup(void) |
262 | { | 262 | { |
diff --git a/arch/sh/kernel/cpu/sh5/setup-sh5.c b/arch/sh/kernel/cpu/sh5/setup-sh5.c index f5ff1ac57fc2..6a0f82f70032 100644 --- a/arch/sh/kernel/cpu/sh5/setup-sh5.c +++ b/arch/sh/kernel/cpu/sh5/setup-sh5.c | |||
@@ -186,7 +186,7 @@ static int __init sh5_devices_setup(void) | |||
186 | return platform_add_devices(sh5_devices, | 186 | return platform_add_devices(sh5_devices, |
187 | ARRAY_SIZE(sh5_devices)); | 187 | ARRAY_SIZE(sh5_devices)); |
188 | } | 188 | } |
189 | __initcall(sh5_devices_setup); | 189 | arch_initcall(sh5_devices_setup); |
190 | 190 | ||
191 | void __init plat_early_device_setup(void) | 191 | void __init plat_early_device_setup(void) |
192 | { | 192 | { |
diff --git a/arch/sh/kernel/time.c b/arch/sh/kernel/time.c index 9b352a1e3fb4..0e0e8581cf7a 100644 --- a/arch/sh/kernel/time.c +++ b/arch/sh/kernel/time.c | |||
@@ -39,11 +39,9 @@ void (*rtc_sh_get_time)(struct timespec *) = null_rtc_get_time; | |||
39 | int (*rtc_sh_set_time)(const time_t) = null_rtc_set_time; | 39 | int (*rtc_sh_set_time)(const time_t) = null_rtc_set_time; |
40 | 40 | ||
41 | #ifdef CONFIG_GENERIC_CMOS_UPDATE | 41 | #ifdef CONFIG_GENERIC_CMOS_UPDATE |
42 | unsigned long read_persistent_clock(void) | 42 | void read_persistent_clock(struct timespec *ts) |
43 | { | 43 | { |
44 | struct timespec tv; | 44 | rtc_sh_get_time(ts); |
45 | rtc_sh_get_time(&tv); | ||
46 | return tv.tv_sec; | ||
47 | } | 45 | } |
48 | 46 | ||
49 | int update_persistent_clock(struct timespec now) | 47 | int update_persistent_clock(struct timespec now) |
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 738bdc6b0f8b..13ffa5df37d7 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig | |||
@@ -24,6 +24,7 @@ config X86 | |||
24 | select HAVE_UNSTABLE_SCHED_CLOCK | 24 | select HAVE_UNSTABLE_SCHED_CLOCK |
25 | select HAVE_IDE | 25 | select HAVE_IDE |
26 | select HAVE_OPROFILE | 26 | select HAVE_OPROFILE |
27 | select HAVE_PERF_COUNTERS if (!M386 && !M486) | ||
27 | select HAVE_IOREMAP_PROT | 28 | select HAVE_IOREMAP_PROT |
28 | select HAVE_KPROBES | 29 | select HAVE_KPROBES |
29 | select ARCH_WANT_OPTIONAL_GPIOLIB | 30 | select ARCH_WANT_OPTIONAL_GPIOLIB |
@@ -742,7 +743,6 @@ config X86_UP_IOAPIC | |||
742 | config X86_LOCAL_APIC | 743 | config X86_LOCAL_APIC |
743 | def_bool y | 744 | def_bool y |
744 | depends on X86_64 || SMP || X86_32_NON_STANDARD || X86_UP_APIC | 745 | depends on X86_64 || SMP || X86_32_NON_STANDARD || X86_UP_APIC |
745 | select HAVE_PERF_COUNTERS if (!M386 && !M486) | ||
746 | 746 | ||
747 | config X86_IO_APIC | 747 | config X86_IO_APIC |
748 | def_bool y | 748 | def_bool y |
diff --git a/arch/x86/include/asm/efi.h b/arch/x86/include/asm/efi.h index edc90f23e708..8406ed7f9926 100644 --- a/arch/x86/include/asm/efi.h +++ b/arch/x86/include/asm/efi.h | |||
@@ -33,7 +33,7 @@ extern unsigned long asmlinkage efi_call_phys(void *, ...); | |||
33 | #define efi_call_virt6(f, a1, a2, a3, a4, a5, a6) \ | 33 | #define efi_call_virt6(f, a1, a2, a3, a4, a5, a6) \ |
34 | efi_call_virt(f, a1, a2, a3, a4, a5, a6) | 34 | efi_call_virt(f, a1, a2, a3, a4, a5, a6) |
35 | 35 | ||
36 | #define efi_ioremap(addr, size) ioremap_cache(addr, size) | 36 | #define efi_ioremap(addr, size, type) ioremap_cache(addr, size) |
37 | 37 | ||
38 | #else /* !CONFIG_X86_32 */ | 38 | #else /* !CONFIG_X86_32 */ |
39 | 39 | ||
@@ -84,7 +84,8 @@ extern u64 efi_call6(void *fp, u64 arg1, u64 arg2, u64 arg3, | |||
84 | efi_call6((void *)(efi.systab->runtime->f), (u64)(a1), (u64)(a2), \ | 84 | efi_call6((void *)(efi.systab->runtime->f), (u64)(a1), (u64)(a2), \ |
85 | (u64)(a3), (u64)(a4), (u64)(a5), (u64)(a6)) | 85 | (u64)(a3), (u64)(a4), (u64)(a5), (u64)(a6)) |
86 | 86 | ||
87 | extern void __iomem *efi_ioremap(unsigned long addr, unsigned long size); | 87 | extern void __iomem *efi_ioremap(unsigned long addr, unsigned long size, |
88 | u32 type); | ||
88 | 89 | ||
89 | #endif /* CONFIG_X86_32 */ | 90 | #endif /* CONFIG_X86_32 */ |
90 | 91 | ||
diff --git a/arch/x86/include/asm/irqflags.h b/arch/x86/include/asm/irqflags.h index 2bdab21f0898..c6ccbe7e81ad 100644 --- a/arch/x86/include/asm/irqflags.h +++ b/arch/x86/include/asm/irqflags.h | |||
@@ -12,9 +12,15 @@ static inline unsigned long native_save_fl(void) | |||
12 | { | 12 | { |
13 | unsigned long flags; | 13 | unsigned long flags; |
14 | 14 | ||
15 | /* | ||
16 | * Note: this needs to be "=r" not "=rm", because we have the | ||
17 | * stack offset from what gcc expects at the time the "pop" is | ||
18 | * executed, and so a memory reference with respect to the stack | ||
19 | * would end up using the wrong address. | ||
20 | */ | ||
15 | asm volatile("# __raw_save_flags\n\t" | 21 | asm volatile("# __raw_save_flags\n\t" |
16 | "pushf ; pop %0" | 22 | "pushf ; pop %0" |
17 | : "=g" (flags) | 23 | : "=r" (flags) |
18 | : /* no input */ | 24 | : /* no input */ |
19 | : "memory"); | 25 | : "memory"); |
20 | 26 | ||
diff --git a/arch/x86/include/asm/uv/uv_hub.h b/arch/x86/include/asm/uv/uv_hub.h index 341070f7ad5c..77a68505419a 100644 --- a/arch/x86/include/asm/uv/uv_hub.h +++ b/arch/x86/include/asm/uv/uv_hub.h | |||
@@ -175,7 +175,7 @@ DECLARE_PER_CPU(struct uv_hub_info_s, __uv_hub_info); | |||
175 | #define UV_GLOBAL_MMR32_PNODE_BITS(p) ((p) << (UV_GLOBAL_MMR32_PNODE_SHIFT)) | 175 | #define UV_GLOBAL_MMR32_PNODE_BITS(p) ((p) << (UV_GLOBAL_MMR32_PNODE_SHIFT)) |
176 | 176 | ||
177 | #define UV_GLOBAL_MMR64_PNODE_BITS(p) \ | 177 | #define UV_GLOBAL_MMR64_PNODE_BITS(p) \ |
178 | ((unsigned long)(UV_PNODE_TO_GNODE(p)) << UV_GLOBAL_MMR64_PNODE_SHIFT) | 178 | (((unsigned long)(p)) << UV_GLOBAL_MMR64_PNODE_SHIFT) |
179 | 179 | ||
180 | #define UV_APIC_PNODE_SHIFT 6 | 180 | #define UV_APIC_PNODE_SHIFT 6 |
181 | 181 | ||
@@ -327,6 +327,7 @@ struct uv_blade_info { | |||
327 | unsigned short nr_possible_cpus; | 327 | unsigned short nr_possible_cpus; |
328 | unsigned short nr_online_cpus; | 328 | unsigned short nr_online_cpus; |
329 | unsigned short pnode; | 329 | unsigned short pnode; |
330 | short memory_nid; | ||
330 | }; | 331 | }; |
331 | extern struct uv_blade_info *uv_blade_info; | 332 | extern struct uv_blade_info *uv_blade_info; |
332 | extern short *uv_node_to_blade; | 333 | extern short *uv_node_to_blade; |
@@ -363,6 +364,12 @@ static inline int uv_blade_to_pnode(int bid) | |||
363 | return uv_blade_info[bid].pnode; | 364 | return uv_blade_info[bid].pnode; |
364 | } | 365 | } |
365 | 366 | ||
367 | /* Nid of memory node on blade. -1 if no blade-local memory */ | ||
368 | static inline int uv_blade_to_memory_nid(int bid) | ||
369 | { | ||
370 | return uv_blade_info[bid].memory_nid; | ||
371 | } | ||
372 | |||
366 | /* Determine the number of possible cpus on a blade */ | 373 | /* Determine the number of possible cpus on a blade */ |
367 | static inline int uv_blade_nr_possible_cpus(int bid) | 374 | static inline int uv_blade_nr_possible_cpus(int bid) |
368 | { | 375 | { |
diff --git a/arch/x86/include/asm/vgtod.h b/arch/x86/include/asm/vgtod.h index dc27a69e5d2a..3d61e204826f 100644 --- a/arch/x86/include/asm/vgtod.h +++ b/arch/x86/include/asm/vgtod.h | |||
@@ -21,6 +21,7 @@ struct vsyscall_gtod_data { | |||
21 | u32 shift; | 21 | u32 shift; |
22 | } clock; | 22 | } clock; |
23 | struct timespec wall_to_monotonic; | 23 | struct timespec wall_to_monotonic; |
24 | struct timespec wall_time_coarse; | ||
24 | }; | 25 | }; |
25 | extern struct vsyscall_gtod_data __vsyscall_gtod_data | 26 | extern struct vsyscall_gtod_data __vsyscall_gtod_data |
26 | __section_vsyscall_gtod_data; | 27 | __section_vsyscall_gtod_data; |
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c index 2284a4812b68..d2ed6c5ddc80 100644 --- a/arch/x86/kernel/apic/io_apic.c +++ b/arch/x86/kernel/apic/io_apic.c | |||
@@ -3793,6 +3793,9 @@ int arch_enable_uv_irq(char *irq_name, unsigned int irq, int cpu, int mmr_blade, | |||
3793 | mmr_pnode = uv_blade_to_pnode(mmr_blade); | 3793 | mmr_pnode = uv_blade_to_pnode(mmr_blade); |
3794 | uv_write_global_mmr64(mmr_pnode, mmr_offset, mmr_value); | 3794 | uv_write_global_mmr64(mmr_pnode, mmr_offset, mmr_value); |
3795 | 3795 | ||
3796 | if (cfg->move_in_progress) | ||
3797 | send_cleanup_vector(cfg); | ||
3798 | |||
3796 | return irq; | 3799 | return irq; |
3797 | } | 3800 | } |
3798 | 3801 | ||
diff --git a/arch/x86/kernel/apic/x2apic_cluster.c b/arch/x86/kernel/apic/x2apic_cluster.c index 8e4cbb255c38..a5371ec36776 100644 --- a/arch/x86/kernel/apic/x2apic_cluster.c +++ b/arch/x86/kernel/apic/x2apic_cluster.c | |||
@@ -17,11 +17,13 @@ static int x2apic_acpi_madt_oem_check(char *oem_id, char *oem_table_id) | |||
17 | return x2apic_enabled(); | 17 | return x2apic_enabled(); |
18 | } | 18 | } |
19 | 19 | ||
20 | /* Start with all IRQs pointing to boot CPU. IRQ balancing will shift them. */ | 20 | /* |
21 | 21 | * need to use more than cpu 0, because we need more vectors when | |
22 | * MSI-X are used. | ||
23 | */ | ||
22 | static const struct cpumask *x2apic_target_cpus(void) | 24 | static const struct cpumask *x2apic_target_cpus(void) |
23 | { | 25 | { |
24 | return cpumask_of(0); | 26 | return cpu_online_mask; |
25 | } | 27 | } |
26 | 28 | ||
27 | /* | 29 | /* |
@@ -170,7 +172,7 @@ static unsigned long set_apic_id(unsigned int id) | |||
170 | 172 | ||
171 | static int x2apic_cluster_phys_pkg_id(int initial_apicid, int index_msb) | 173 | static int x2apic_cluster_phys_pkg_id(int initial_apicid, int index_msb) |
172 | { | 174 | { |
173 | return current_cpu_data.initial_apicid >> index_msb; | 175 | return initial_apicid >> index_msb; |
174 | } | 176 | } |
175 | 177 | ||
176 | static void x2apic_send_IPI_self(int vector) | 178 | static void x2apic_send_IPI_self(int vector) |
diff --git a/arch/x86/kernel/apic/x2apic_phys.c b/arch/x86/kernel/apic/x2apic_phys.c index a284359627e7..a8989aadc99a 100644 --- a/arch/x86/kernel/apic/x2apic_phys.c +++ b/arch/x86/kernel/apic/x2apic_phys.c | |||
@@ -27,11 +27,13 @@ static int x2apic_acpi_madt_oem_check(char *oem_id, char *oem_table_id) | |||
27 | return 0; | 27 | return 0; |
28 | } | 28 | } |
29 | 29 | ||
30 | /* Start with all IRQs pointing to boot CPU. IRQ balancing will shift them. */ | 30 | /* |
31 | 31 | * need to use more than cpu 0, because we need more vectors when | |
32 | * MSI-X are used. | ||
33 | */ | ||
32 | static const struct cpumask *x2apic_target_cpus(void) | 34 | static const struct cpumask *x2apic_target_cpus(void) |
33 | { | 35 | { |
34 | return cpumask_of(0); | 36 | return cpu_online_mask; |
35 | } | 37 | } |
36 | 38 | ||
37 | static void x2apic_vector_allocation_domain(int cpu, struct cpumask *retmask) | 39 | static void x2apic_vector_allocation_domain(int cpu, struct cpumask *retmask) |
@@ -162,7 +164,7 @@ static unsigned long set_apic_id(unsigned int id) | |||
162 | 164 | ||
163 | static int x2apic_phys_pkg_id(int initial_apicid, int index_msb) | 165 | static int x2apic_phys_pkg_id(int initial_apicid, int index_msb) |
164 | { | 166 | { |
165 | return current_cpu_data.initial_apicid >> index_msb; | 167 | return initial_apicid >> index_msb; |
166 | } | 168 | } |
167 | 169 | ||
168 | static void x2apic_send_IPI_self(int vector) | 170 | static void x2apic_send_IPI_self(int vector) |
diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c index 096d19aea2f7..832e908adcb5 100644 --- a/arch/x86/kernel/apic/x2apic_uv_x.c +++ b/arch/x86/kernel/apic/x2apic_uv_x.c | |||
@@ -261,7 +261,7 @@ struct apic apic_x2apic_uv_x = { | |||
261 | .apic_id_registered = uv_apic_id_registered, | 261 | .apic_id_registered = uv_apic_id_registered, |
262 | 262 | ||
263 | .irq_delivery_mode = dest_Fixed, | 263 | .irq_delivery_mode = dest_Fixed, |
264 | .irq_dest_mode = 1, /* logical */ | 264 | .irq_dest_mode = 0, /* physical */ |
265 | 265 | ||
266 | .target_cpus = uv_target_cpus, | 266 | .target_cpus = uv_target_cpus, |
267 | .disable_esr = 0, | 267 | .disable_esr = 0, |
@@ -362,12 +362,6 @@ static __init void get_lowmem_redirect(unsigned long *base, unsigned long *size) | |||
362 | BUG(); | 362 | BUG(); |
363 | } | 363 | } |
364 | 364 | ||
365 | static __init void map_low_mmrs(void) | ||
366 | { | ||
367 | init_extra_mapping_uc(UV_GLOBAL_MMR32_BASE, UV_GLOBAL_MMR32_SIZE); | ||
368 | init_extra_mapping_uc(UV_LOCAL_MMR_BASE, UV_LOCAL_MMR_SIZE); | ||
369 | } | ||
370 | |||
371 | enum map_type {map_wb, map_uc}; | 365 | enum map_type {map_wb, map_uc}; |
372 | 366 | ||
373 | static __init void map_high(char *id, unsigned long base, int shift, | 367 | static __init void map_high(char *id, unsigned long base, int shift, |
@@ -395,26 +389,6 @@ static __init void map_gru_high(int max_pnode) | |||
395 | map_high("GRU", gru.s.base, shift, max_pnode, map_wb); | 389 | map_high("GRU", gru.s.base, shift, max_pnode, map_wb); |
396 | } | 390 | } |
397 | 391 | ||
398 | static __init void map_config_high(int max_pnode) | ||
399 | { | ||
400 | union uvh_rh_gam_cfg_overlay_config_mmr_u cfg; | ||
401 | int shift = UVH_RH_GAM_CFG_OVERLAY_CONFIG_MMR_BASE_SHFT; | ||
402 | |||
403 | cfg.v = uv_read_local_mmr(UVH_RH_GAM_CFG_OVERLAY_CONFIG_MMR); | ||
404 | if (cfg.s.enable) | ||
405 | map_high("CONFIG", cfg.s.base, shift, max_pnode, map_uc); | ||
406 | } | ||
407 | |||
408 | static __init void map_mmr_high(int max_pnode) | ||
409 | { | ||
410 | union uvh_rh_gam_mmr_overlay_config_mmr_u mmr; | ||
411 | int shift = UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR_BASE_SHFT; | ||
412 | |||
413 | mmr.v = uv_read_local_mmr(UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR); | ||
414 | if (mmr.s.enable) | ||
415 | map_high("MMR", mmr.s.base, shift, max_pnode, map_uc); | ||
416 | } | ||
417 | |||
418 | static __init void map_mmioh_high(int max_pnode) | 392 | static __init void map_mmioh_high(int max_pnode) |
419 | { | 393 | { |
420 | union uvh_rh_gam_mmioh_overlay_config_mmr_u mmioh; | 394 | union uvh_rh_gam_mmioh_overlay_config_mmr_u mmioh; |
@@ -566,8 +540,6 @@ void __init uv_system_init(void) | |||
566 | unsigned long mmr_base, present, paddr; | 540 | unsigned long mmr_base, present, paddr; |
567 | unsigned short pnode_mask; | 541 | unsigned short pnode_mask; |
568 | 542 | ||
569 | map_low_mmrs(); | ||
570 | |||
571 | m_n_config.v = uv_read_local_mmr(UVH_SI_ADDR_MAP_CONFIG); | 543 | m_n_config.v = uv_read_local_mmr(UVH_SI_ADDR_MAP_CONFIG); |
572 | m_val = m_n_config.s.m_skt; | 544 | m_val = m_n_config.s.m_skt; |
573 | n_val = m_n_config.s.n_skt; | 545 | n_val = m_n_config.s.n_skt; |
@@ -591,6 +563,8 @@ void __init uv_system_init(void) | |||
591 | bytes = sizeof(struct uv_blade_info) * uv_num_possible_blades(); | 563 | bytes = sizeof(struct uv_blade_info) * uv_num_possible_blades(); |
592 | uv_blade_info = kmalloc(bytes, GFP_KERNEL); | 564 | uv_blade_info = kmalloc(bytes, GFP_KERNEL); |
593 | BUG_ON(!uv_blade_info); | 565 | BUG_ON(!uv_blade_info); |
566 | for (blade = 0; blade < uv_num_possible_blades(); blade++) | ||
567 | uv_blade_info[blade].memory_nid = -1; | ||
594 | 568 | ||
595 | get_lowmem_redirect(&lowmem_redir_base, &lowmem_redir_size); | 569 | get_lowmem_redirect(&lowmem_redir_base, &lowmem_redir_size); |
596 | 570 | ||
@@ -629,6 +603,9 @@ void __init uv_system_init(void) | |||
629 | lcpu = uv_blade_info[blade].nr_possible_cpus; | 603 | lcpu = uv_blade_info[blade].nr_possible_cpus; |
630 | uv_blade_info[blade].nr_possible_cpus++; | 604 | uv_blade_info[blade].nr_possible_cpus++; |
631 | 605 | ||
606 | /* Any node on the blade, else will contain -1. */ | ||
607 | uv_blade_info[blade].memory_nid = nid; | ||
608 | |||
632 | uv_cpu_hub_info(cpu)->lowmem_remap_base = lowmem_redir_base; | 609 | uv_cpu_hub_info(cpu)->lowmem_remap_base = lowmem_redir_base; |
633 | uv_cpu_hub_info(cpu)->lowmem_remap_top = lowmem_redir_size; | 610 | uv_cpu_hub_info(cpu)->lowmem_remap_top = lowmem_redir_size; |
634 | uv_cpu_hub_info(cpu)->m_val = m_val; | 611 | uv_cpu_hub_info(cpu)->m_val = m_val; |
@@ -662,11 +639,10 @@ void __init uv_system_init(void) | |||
662 | pnode = (paddr >> m_val) & pnode_mask; | 639 | pnode = (paddr >> m_val) & pnode_mask; |
663 | blade = boot_pnode_to_blade(pnode); | 640 | blade = boot_pnode_to_blade(pnode); |
664 | uv_node_to_blade[nid] = blade; | 641 | uv_node_to_blade[nid] = blade; |
642 | max_pnode = max(pnode, max_pnode); | ||
665 | } | 643 | } |
666 | 644 | ||
667 | map_gru_high(max_pnode); | 645 | map_gru_high(max_pnode); |
668 | map_mmr_high(max_pnode); | ||
669 | map_config_high(max_pnode); | ||
670 | map_mmioh_high(max_pnode); | 646 | map_mmioh_high(max_pnode); |
671 | 647 | ||
672 | uv_cpu_init(); | 648 | uv_cpu_init(); |
diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c index 79302e9a33a4..442b5508893f 100644 --- a/arch/x86/kernel/apm_32.c +++ b/arch/x86/kernel/apm_32.c | |||
@@ -811,7 +811,7 @@ static int apm_do_idle(void) | |||
811 | u8 ret = 0; | 811 | u8 ret = 0; |
812 | int idled = 0; | 812 | int idled = 0; |
813 | int polling; | 813 | int polling; |
814 | int err; | 814 | int err = 0; |
815 | 815 | ||
816 | polling = !!(current_thread_info()->status & TS_POLLING); | 816 | polling = !!(current_thread_info()->status & TS_POLLING); |
817 | if (polling) { | 817 | if (polling) { |
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c index e2485b03f1cf..63fddcd082cd 100644 --- a/arch/x86/kernel/cpu/amd.c +++ b/arch/x86/kernel/cpu/amd.c | |||
@@ -400,6 +400,13 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c) | |||
400 | level = cpuid_eax(1); | 400 | level = cpuid_eax(1); |
401 | if((level >= 0x0f48 && level < 0x0f50) || level >= 0x0f58) | 401 | if((level >= 0x0f48 && level < 0x0f50) || level >= 0x0f58) |
402 | set_cpu_cap(c, X86_FEATURE_REP_GOOD); | 402 | set_cpu_cap(c, X86_FEATURE_REP_GOOD); |
403 | |||
404 | /* | ||
405 | * Some BIOSes incorrectly force this feature, but only K8 | ||
406 | * revision D (model = 0x14) and later actually support it. | ||
407 | */ | ||
408 | if (c->x86_model < 0x14) | ||
409 | clear_cpu_cap(c, X86_FEATURE_LAHF_LM); | ||
403 | } | 410 | } |
404 | if (c->x86 == 0x10 || c->x86 == 0x11) | 411 | if (c->x86 == 0x10 || c->x86 == 0x11) |
405 | set_cpu_cap(c, X86_FEATURE_REP_GOOD); | 412 | set_cpu_cap(c, X86_FEATURE_REP_GOOD); |
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index f1961c07af9a..5ce60a88027b 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c | |||
@@ -59,7 +59,30 @@ void __init setup_cpu_local_masks(void) | |||
59 | alloc_bootmem_cpumask_var(&cpu_sibling_setup_mask); | 59 | alloc_bootmem_cpumask_var(&cpu_sibling_setup_mask); |
60 | } | 60 | } |
61 | 61 | ||
62 | static const struct cpu_dev *this_cpu __cpuinitdata; | 62 | static void __cpuinit default_init(struct cpuinfo_x86 *c) |
63 | { | ||
64 | #ifdef CONFIG_X86_64 | ||
65 | display_cacheinfo(c); | ||
66 | #else | ||
67 | /* Not much we can do here... */ | ||
68 | /* Check if at least it has cpuid */ | ||
69 | if (c->cpuid_level == -1) { | ||
70 | /* No cpuid. It must be an ancient CPU */ | ||
71 | if (c->x86 == 4) | ||
72 | strcpy(c->x86_model_id, "486"); | ||
73 | else if (c->x86 == 3) | ||
74 | strcpy(c->x86_model_id, "386"); | ||
75 | } | ||
76 | #endif | ||
77 | } | ||
78 | |||
79 | static const struct cpu_dev __cpuinitconst default_cpu = { | ||
80 | .c_init = default_init, | ||
81 | .c_vendor = "Unknown", | ||
82 | .c_x86_vendor = X86_VENDOR_UNKNOWN, | ||
83 | }; | ||
84 | |||
85 | static const struct cpu_dev *this_cpu __cpuinitdata = &default_cpu; | ||
63 | 86 | ||
64 | DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = { | 87 | DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = { |
65 | #ifdef CONFIG_X86_64 | 88 | #ifdef CONFIG_X86_64 |
@@ -332,29 +355,6 @@ void switch_to_new_gdt(int cpu) | |||
332 | 355 | ||
333 | static const struct cpu_dev *__cpuinitdata cpu_devs[X86_VENDOR_NUM] = {}; | 356 | static const struct cpu_dev *__cpuinitdata cpu_devs[X86_VENDOR_NUM] = {}; |
334 | 357 | ||
335 | static void __cpuinit default_init(struct cpuinfo_x86 *c) | ||
336 | { | ||
337 | #ifdef CONFIG_X86_64 | ||
338 | display_cacheinfo(c); | ||
339 | #else | ||
340 | /* Not much we can do here... */ | ||
341 | /* Check if at least it has cpuid */ | ||
342 | if (c->cpuid_level == -1) { | ||
343 | /* No cpuid. It must be an ancient CPU */ | ||
344 | if (c->x86 == 4) | ||
345 | strcpy(c->x86_model_id, "486"); | ||
346 | else if (c->x86 == 3) | ||
347 | strcpy(c->x86_model_id, "386"); | ||
348 | } | ||
349 | #endif | ||
350 | } | ||
351 | |||
352 | static const struct cpu_dev __cpuinitconst default_cpu = { | ||
353 | .c_init = default_init, | ||
354 | .c_vendor = "Unknown", | ||
355 | .c_x86_vendor = X86_VENDOR_UNKNOWN, | ||
356 | }; | ||
357 | |||
358 | static void __cpuinit get_model_name(struct cpuinfo_x86 *c) | 358 | static void __cpuinit get_model_name(struct cpuinfo_x86 *c) |
359 | { | 359 | { |
360 | unsigned int *v; | 360 | unsigned int *v; |
diff --git a/arch/x86/kernel/cpu/mcheck/therm_throt.c b/arch/x86/kernel/cpu/mcheck/therm_throt.c index bff8dd191dd5..8bc64cfbe936 100644 --- a/arch/x86/kernel/cpu/mcheck/therm_throt.c +++ b/arch/x86/kernel/cpu/mcheck/therm_throt.c | |||
@@ -36,6 +36,7 @@ | |||
36 | 36 | ||
37 | static DEFINE_PER_CPU(__u64, next_check) = INITIAL_JIFFIES; | 37 | static DEFINE_PER_CPU(__u64, next_check) = INITIAL_JIFFIES; |
38 | static DEFINE_PER_CPU(unsigned long, thermal_throttle_count); | 38 | static DEFINE_PER_CPU(unsigned long, thermal_throttle_count); |
39 | static DEFINE_PER_CPU(bool, thermal_throttle_active); | ||
39 | 40 | ||
40 | static atomic_t therm_throt_en = ATOMIC_INIT(0); | 41 | static atomic_t therm_throt_en = ATOMIC_INIT(0); |
41 | 42 | ||
@@ -96,24 +97,27 @@ static int therm_throt_process(int curr) | |||
96 | { | 97 | { |
97 | unsigned int cpu = smp_processor_id(); | 98 | unsigned int cpu = smp_processor_id(); |
98 | __u64 tmp_jiffs = get_jiffies_64(); | 99 | __u64 tmp_jiffs = get_jiffies_64(); |
100 | bool was_throttled = __get_cpu_var(thermal_throttle_active); | ||
101 | bool is_throttled = __get_cpu_var(thermal_throttle_active) = curr; | ||
99 | 102 | ||
100 | if (curr) | 103 | if (is_throttled) |
101 | __get_cpu_var(thermal_throttle_count)++; | 104 | __get_cpu_var(thermal_throttle_count)++; |
102 | 105 | ||
103 | if (time_before64(tmp_jiffs, __get_cpu_var(next_check))) | 106 | if (!(was_throttled ^ is_throttled) && |
107 | time_before64(tmp_jiffs, __get_cpu_var(next_check))) | ||
104 | return 0; | 108 | return 0; |
105 | 109 | ||
106 | __get_cpu_var(next_check) = tmp_jiffs + CHECK_INTERVAL; | 110 | __get_cpu_var(next_check) = tmp_jiffs + CHECK_INTERVAL; |
107 | 111 | ||
108 | /* if we just entered the thermal event */ | 112 | /* if we just entered the thermal event */ |
109 | if (curr) { | 113 | if (is_throttled) { |
110 | printk(KERN_CRIT "CPU%d: Temperature above threshold, " | 114 | printk(KERN_CRIT "CPU%d: Temperature above threshold, " |
111 | "cpu clock throttled (total events = %lu)\n", cpu, | 115 | "cpu clock throttled (total events = %lu)\n", |
112 | __get_cpu_var(thermal_throttle_count)); | 116 | cpu, __get_cpu_var(thermal_throttle_count)); |
113 | 117 | ||
114 | add_taint(TAINT_MACHINE_CHECK); | 118 | add_taint(TAINT_MACHINE_CHECK); |
115 | } else { | 119 | } else if (was_throttled) { |
116 | printk(KERN_CRIT "CPU%d: Temperature/speed normal\n", cpu); | 120 | printk(KERN_INFO "CPU%d: Temperature/speed normal\n", cpu); |
117 | } | 121 | } |
118 | 122 | ||
119 | return 1; | 123 | return 1; |
diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c index a7aa8f900954..900332b800f8 100644 --- a/arch/x86/kernel/cpu/perf_counter.c +++ b/arch/x86/kernel/cpu/perf_counter.c | |||
@@ -55,6 +55,7 @@ struct x86_pmu { | |||
55 | int num_counters_fixed; | 55 | int num_counters_fixed; |
56 | int counter_bits; | 56 | int counter_bits; |
57 | u64 counter_mask; | 57 | u64 counter_mask; |
58 | int apic; | ||
58 | u64 max_period; | 59 | u64 max_period; |
59 | u64 intel_ctrl; | 60 | u64 intel_ctrl; |
60 | }; | 61 | }; |
@@ -72,8 +73,8 @@ static const u64 p6_perfmon_event_map[] = | |||
72 | { | 73 | { |
73 | [PERF_COUNT_HW_CPU_CYCLES] = 0x0079, | 74 | [PERF_COUNT_HW_CPU_CYCLES] = 0x0079, |
74 | [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0, | 75 | [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0, |
75 | [PERF_COUNT_HW_CACHE_REFERENCES] = 0x0000, | 76 | [PERF_COUNT_HW_CACHE_REFERENCES] = 0x0f2e, |
76 | [PERF_COUNT_HW_CACHE_MISSES] = 0x0000, | 77 | [PERF_COUNT_HW_CACHE_MISSES] = 0x012e, |
77 | [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c4, | 78 | [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c4, |
78 | [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c5, | 79 | [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c5, |
79 | [PERF_COUNT_HW_BUS_CYCLES] = 0x0062, | 80 | [PERF_COUNT_HW_BUS_CYCLES] = 0x0062, |
@@ -613,6 +614,7 @@ static DEFINE_MUTEX(pmc_reserve_mutex); | |||
613 | 614 | ||
614 | static bool reserve_pmc_hardware(void) | 615 | static bool reserve_pmc_hardware(void) |
615 | { | 616 | { |
617 | #ifdef CONFIG_X86_LOCAL_APIC | ||
616 | int i; | 618 | int i; |
617 | 619 | ||
618 | if (nmi_watchdog == NMI_LOCAL_APIC) | 620 | if (nmi_watchdog == NMI_LOCAL_APIC) |
@@ -627,9 +629,11 @@ static bool reserve_pmc_hardware(void) | |||
627 | if (!reserve_evntsel_nmi(x86_pmu.eventsel + i)) | 629 | if (!reserve_evntsel_nmi(x86_pmu.eventsel + i)) |
628 | goto eventsel_fail; | 630 | goto eventsel_fail; |
629 | } | 631 | } |
632 | #endif | ||
630 | 633 | ||
631 | return true; | 634 | return true; |
632 | 635 | ||
636 | #ifdef CONFIG_X86_LOCAL_APIC | ||
633 | eventsel_fail: | 637 | eventsel_fail: |
634 | for (i--; i >= 0; i--) | 638 | for (i--; i >= 0; i--) |
635 | release_evntsel_nmi(x86_pmu.eventsel + i); | 639 | release_evntsel_nmi(x86_pmu.eventsel + i); |
@@ -644,10 +648,12 @@ perfctr_fail: | |||
644 | enable_lapic_nmi_watchdog(); | 648 | enable_lapic_nmi_watchdog(); |
645 | 649 | ||
646 | return false; | 650 | return false; |
651 | #endif | ||
647 | } | 652 | } |
648 | 653 | ||
649 | static void release_pmc_hardware(void) | 654 | static void release_pmc_hardware(void) |
650 | { | 655 | { |
656 | #ifdef CONFIG_X86_LOCAL_APIC | ||
651 | int i; | 657 | int i; |
652 | 658 | ||
653 | for (i = 0; i < x86_pmu.num_counters; i++) { | 659 | for (i = 0; i < x86_pmu.num_counters; i++) { |
@@ -657,6 +663,7 @@ static void release_pmc_hardware(void) | |||
657 | 663 | ||
658 | if (nmi_watchdog == NMI_LOCAL_APIC) | 664 | if (nmi_watchdog == NMI_LOCAL_APIC) |
659 | enable_lapic_nmi_watchdog(); | 665 | enable_lapic_nmi_watchdog(); |
666 | #endif | ||
660 | } | 667 | } |
661 | 668 | ||
662 | static void hw_perf_counter_destroy(struct perf_counter *counter) | 669 | static void hw_perf_counter_destroy(struct perf_counter *counter) |
@@ -748,6 +755,15 @@ static int __hw_perf_counter_init(struct perf_counter *counter) | |||
748 | hwc->sample_period = x86_pmu.max_period; | 755 | hwc->sample_period = x86_pmu.max_period; |
749 | hwc->last_period = hwc->sample_period; | 756 | hwc->last_period = hwc->sample_period; |
750 | atomic64_set(&hwc->period_left, hwc->sample_period); | 757 | atomic64_set(&hwc->period_left, hwc->sample_period); |
758 | } else { | ||
759 | /* | ||
760 | * If we have a PMU initialized but no APIC | ||
761 | * interrupts, we cannot sample hardware | ||
762 | * counters (user-space has to fall back and | ||
763 | * sample via a hrtimer based software counter): | ||
764 | */ | ||
765 | if (!x86_pmu.apic) | ||
766 | return -EOPNOTSUPP; | ||
751 | } | 767 | } |
752 | 768 | ||
753 | counter->destroy = hw_perf_counter_destroy; | 769 | counter->destroy = hw_perf_counter_destroy; |
@@ -1449,18 +1465,22 @@ void smp_perf_pending_interrupt(struct pt_regs *regs) | |||
1449 | 1465 | ||
1450 | void set_perf_counter_pending(void) | 1466 | void set_perf_counter_pending(void) |
1451 | { | 1467 | { |
1468 | #ifdef CONFIG_X86_LOCAL_APIC | ||
1452 | apic->send_IPI_self(LOCAL_PENDING_VECTOR); | 1469 | apic->send_IPI_self(LOCAL_PENDING_VECTOR); |
1470 | #endif | ||
1453 | } | 1471 | } |
1454 | 1472 | ||
1455 | void perf_counters_lapic_init(void) | 1473 | void perf_counters_lapic_init(void) |
1456 | { | 1474 | { |
1457 | if (!x86_pmu_initialized()) | 1475 | #ifdef CONFIG_X86_LOCAL_APIC |
1476 | if (!x86_pmu.apic || !x86_pmu_initialized()) | ||
1458 | return; | 1477 | return; |
1459 | 1478 | ||
1460 | /* | 1479 | /* |
1461 | * Always use NMI for PMU | 1480 | * Always use NMI for PMU |
1462 | */ | 1481 | */ |
1463 | apic_write(APIC_LVTPC, APIC_DM_NMI); | 1482 | apic_write(APIC_LVTPC, APIC_DM_NMI); |
1483 | #endif | ||
1464 | } | 1484 | } |
1465 | 1485 | ||
1466 | static int __kprobes | 1486 | static int __kprobes |
@@ -1484,7 +1504,9 @@ perf_counter_nmi_handler(struct notifier_block *self, | |||
1484 | 1504 | ||
1485 | regs = args->regs; | 1505 | regs = args->regs; |
1486 | 1506 | ||
1507 | #ifdef CONFIG_X86_LOCAL_APIC | ||
1487 | apic_write(APIC_LVTPC, APIC_DM_NMI); | 1508 | apic_write(APIC_LVTPC, APIC_DM_NMI); |
1509 | #endif | ||
1488 | /* | 1510 | /* |
1489 | * Can't rely on the handled return value to say it was our NMI, two | 1511 | * Can't rely on the handled return value to say it was our NMI, two |
1490 | * counters could trigger 'simultaneously' raising two back-to-back NMIs. | 1512 | * counters could trigger 'simultaneously' raising two back-to-back NMIs. |
@@ -1515,6 +1537,7 @@ static struct x86_pmu p6_pmu = { | |||
1515 | .event_map = p6_pmu_event_map, | 1537 | .event_map = p6_pmu_event_map, |
1516 | .raw_event = p6_pmu_raw_event, | 1538 | .raw_event = p6_pmu_raw_event, |
1517 | .max_events = ARRAY_SIZE(p6_perfmon_event_map), | 1539 | .max_events = ARRAY_SIZE(p6_perfmon_event_map), |
1540 | .apic = 1, | ||
1518 | .max_period = (1ULL << 31) - 1, | 1541 | .max_period = (1ULL << 31) - 1, |
1519 | .version = 0, | 1542 | .version = 0, |
1520 | .num_counters = 2, | 1543 | .num_counters = 2, |
@@ -1541,6 +1564,7 @@ static struct x86_pmu intel_pmu = { | |||
1541 | .event_map = intel_pmu_event_map, | 1564 | .event_map = intel_pmu_event_map, |
1542 | .raw_event = intel_pmu_raw_event, | 1565 | .raw_event = intel_pmu_raw_event, |
1543 | .max_events = ARRAY_SIZE(intel_perfmon_event_map), | 1566 | .max_events = ARRAY_SIZE(intel_perfmon_event_map), |
1567 | .apic = 1, | ||
1544 | /* | 1568 | /* |
1545 | * Intel PMCs cannot be accessed sanely above 32 bit width, | 1569 | * Intel PMCs cannot be accessed sanely above 32 bit width, |
1546 | * so we install an artificial 1<<31 period regardless of | 1570 | * so we install an artificial 1<<31 period regardless of |
@@ -1564,6 +1588,7 @@ static struct x86_pmu amd_pmu = { | |||
1564 | .num_counters = 4, | 1588 | .num_counters = 4, |
1565 | .counter_bits = 48, | 1589 | .counter_bits = 48, |
1566 | .counter_mask = (1ULL << 48) - 1, | 1590 | .counter_mask = (1ULL << 48) - 1, |
1591 | .apic = 1, | ||
1567 | /* use highest bit to detect overflow */ | 1592 | /* use highest bit to detect overflow */ |
1568 | .max_period = (1ULL << 47) - 1, | 1593 | .max_period = (1ULL << 47) - 1, |
1569 | }; | 1594 | }; |
@@ -1589,13 +1614,14 @@ static int p6_pmu_init(void) | |||
1589 | return -ENODEV; | 1614 | return -ENODEV; |
1590 | } | 1615 | } |
1591 | 1616 | ||
1617 | x86_pmu = p6_pmu; | ||
1618 | |||
1592 | if (!cpu_has_apic) { | 1619 | if (!cpu_has_apic) { |
1593 | pr_info("no Local APIC, try rebooting with lapic"); | 1620 | pr_info("no APIC, boot with the \"lapic\" boot parameter to force-enable it.\n"); |
1594 | return -ENODEV; | 1621 | pr_info("no hardware sampling interrupt available.\n"); |
1622 | x86_pmu.apic = 0; | ||
1595 | } | 1623 | } |
1596 | 1624 | ||
1597 | x86_pmu = p6_pmu; | ||
1598 | |||
1599 | return 0; | 1625 | return 0; |
1600 | } | 1626 | } |
1601 | 1627 | ||
diff --git a/arch/x86/kernel/efi.c b/arch/x86/kernel/efi.c index 96f7ac0bbf01..fe26ba3e3451 100644 --- a/arch/x86/kernel/efi.c +++ b/arch/x86/kernel/efi.c | |||
@@ -354,7 +354,7 @@ void __init efi_init(void) | |||
354 | */ | 354 | */ |
355 | c16 = tmp = early_ioremap(efi.systab->fw_vendor, 2); | 355 | c16 = tmp = early_ioremap(efi.systab->fw_vendor, 2); |
356 | if (c16) { | 356 | if (c16) { |
357 | for (i = 0; i < sizeof(vendor) && *c16; ++i) | 357 | for (i = 0; i < sizeof(vendor) - 1 && *c16; ++i) |
358 | vendor[i] = *c16++; | 358 | vendor[i] = *c16++; |
359 | vendor[i] = '\0'; | 359 | vendor[i] = '\0'; |
360 | } else | 360 | } else |
@@ -512,7 +512,7 @@ void __init efi_enter_virtual_mode(void) | |||
512 | && end_pfn <= max_pfn_mapped)) | 512 | && end_pfn <= max_pfn_mapped)) |
513 | va = __va(md->phys_addr); | 513 | va = __va(md->phys_addr); |
514 | else | 514 | else |
515 | va = efi_ioremap(md->phys_addr, size); | 515 | va = efi_ioremap(md->phys_addr, size, md->type); |
516 | 516 | ||
517 | md->virt_addr = (u64) (unsigned long) va; | 517 | md->virt_addr = (u64) (unsigned long) va; |
518 | 518 | ||
diff --git a/arch/x86/kernel/efi_64.c b/arch/x86/kernel/efi_64.c index 22c3b7828c50..ac0621a7ac3d 100644 --- a/arch/x86/kernel/efi_64.c +++ b/arch/x86/kernel/efi_64.c | |||
@@ -98,10 +98,14 @@ void __init efi_call_phys_epilog(void) | |||
98 | early_runtime_code_mapping_set_exec(0); | 98 | early_runtime_code_mapping_set_exec(0); |
99 | } | 99 | } |
100 | 100 | ||
101 | void __iomem *__init efi_ioremap(unsigned long phys_addr, unsigned long size) | 101 | void __iomem *__init efi_ioremap(unsigned long phys_addr, unsigned long size, |
102 | u32 type) | ||
102 | { | 103 | { |
103 | unsigned long last_map_pfn; | 104 | unsigned long last_map_pfn; |
104 | 105 | ||
106 | if (type == EFI_MEMORY_MAPPED_IO) | ||
107 | return ioremap(phys_addr, size); | ||
108 | |||
105 | last_map_pfn = init_memory_mapping(phys_addr, phys_addr + size); | 109 | last_map_pfn = init_memory_mapping(phys_addr, phys_addr + size); |
106 | if ((last_map_pfn << PAGE_SHIFT) < phys_addr + size) | 110 | if ((last_map_pfn << PAGE_SHIFT) < phys_addr + size) |
107 | return NULL; | 111 | return NULL; |
diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S index 8663afb56535..0d98a01cbdb2 100644 --- a/arch/x86/kernel/head_32.S +++ b/arch/x86/kernel/head_32.S | |||
@@ -602,7 +602,11 @@ ignore_int: | |||
602 | #endif | 602 | #endif |
603 | iret | 603 | iret |
604 | 604 | ||
605 | .section .cpuinit.data,"wa" | 605 | #ifndef CONFIG_HOTPLUG_CPU |
606 | __CPUINITDATA | ||
607 | #else | ||
608 | __REFDATA | ||
609 | #endif | ||
606 | .align 4 | 610 | .align 4 |
607 | ENTRY(initial_code) | 611 | ENTRY(initial_code) |
608 | .long i386_start_kernel | 612 | .long i386_start_kernel |
diff --git a/arch/x86/kernel/i8253.c b/arch/x86/kernel/i8253.c index 5cf36c053ac4..23c167925a5c 100644 --- a/arch/x86/kernel/i8253.c +++ b/arch/x86/kernel/i8253.c | |||
@@ -19,12 +19,6 @@ | |||
19 | DEFINE_SPINLOCK(i8253_lock); | 19 | DEFINE_SPINLOCK(i8253_lock); |
20 | EXPORT_SYMBOL(i8253_lock); | 20 | EXPORT_SYMBOL(i8253_lock); |
21 | 21 | ||
22 | #ifdef CONFIG_X86_32 | ||
23 | static void pit_disable_clocksource(void); | ||
24 | #else | ||
25 | static inline void pit_disable_clocksource(void) { } | ||
26 | #endif | ||
27 | |||
28 | /* | 22 | /* |
29 | * HPET replaces the PIT, when enabled. So we need to know, which of | 23 | * HPET replaces the PIT, when enabled. So we need to know, which of |
30 | * the two timers is used | 24 | * the two timers is used |
@@ -57,12 +51,10 @@ static void init_pit_timer(enum clock_event_mode mode, | |||
57 | outb_pit(0, PIT_CH0); | 51 | outb_pit(0, PIT_CH0); |
58 | outb_pit(0, PIT_CH0); | 52 | outb_pit(0, PIT_CH0); |
59 | } | 53 | } |
60 | pit_disable_clocksource(); | ||
61 | break; | 54 | break; |
62 | 55 | ||
63 | case CLOCK_EVT_MODE_ONESHOT: | 56 | case CLOCK_EVT_MODE_ONESHOT: |
64 | /* One shot setup */ | 57 | /* One shot setup */ |
65 | pit_disable_clocksource(); | ||
66 | outb_pit(0x38, PIT_MODE); | 58 | outb_pit(0x38, PIT_MODE); |
67 | break; | 59 | break; |
68 | 60 | ||
@@ -200,17 +192,6 @@ static struct clocksource pit_cs = { | |||
200 | .shift = 20, | 192 | .shift = 20, |
201 | }; | 193 | }; |
202 | 194 | ||
203 | static void pit_disable_clocksource(void) | ||
204 | { | ||
205 | /* | ||
206 | * Use mult to check whether it is registered or not | ||
207 | */ | ||
208 | if (pit_cs.mult) { | ||
209 | clocksource_unregister(&pit_cs); | ||
210 | pit_cs.mult = 0; | ||
211 | } | ||
212 | } | ||
213 | |||
214 | static int __init init_pit_clocksource(void) | 195 | static int __init init_pit_clocksource(void) |
215 | { | 196 | { |
216 | /* | 197 | /* |
diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c index 508e982dd072..a06e8d101844 100644 --- a/arch/x86/kernel/reboot.c +++ b/arch/x86/kernel/reboot.c | |||
@@ -3,6 +3,7 @@ | |||
3 | #include <linux/init.h> | 3 | #include <linux/init.h> |
4 | #include <linux/pm.h> | 4 | #include <linux/pm.h> |
5 | #include <linux/efi.h> | 5 | #include <linux/efi.h> |
6 | #include <linux/dmi.h> | ||
6 | #include <acpi/reboot.h> | 7 | #include <acpi/reboot.h> |
7 | #include <asm/io.h> | 8 | #include <asm/io.h> |
8 | #include <asm/apic.h> | 9 | #include <asm/apic.h> |
@@ -17,7 +18,6 @@ | |||
17 | #include <asm/cpu.h> | 18 | #include <asm/cpu.h> |
18 | 19 | ||
19 | #ifdef CONFIG_X86_32 | 20 | #ifdef CONFIG_X86_32 |
20 | # include <linux/dmi.h> | ||
21 | # include <linux/ctype.h> | 21 | # include <linux/ctype.h> |
22 | # include <linux/mc146818rtc.h> | 22 | # include <linux/mc146818rtc.h> |
23 | #else | 23 | #else |
@@ -404,6 +404,46 @@ EXPORT_SYMBOL(machine_real_restart); | |||
404 | 404 | ||
405 | #endif /* CONFIG_X86_32 */ | 405 | #endif /* CONFIG_X86_32 */ |
406 | 406 | ||
407 | /* | ||
408 | * Some Apple MacBook and MacBookPro's needs reboot=p to be able to reboot | ||
409 | */ | ||
410 | static int __init set_pci_reboot(const struct dmi_system_id *d) | ||
411 | { | ||
412 | if (reboot_type != BOOT_CF9) { | ||
413 | reboot_type = BOOT_CF9; | ||
414 | printk(KERN_INFO "%s series board detected. " | ||
415 | "Selecting PCI-method for reboots.\n", d->ident); | ||
416 | } | ||
417 | return 0; | ||
418 | } | ||
419 | |||
420 | static struct dmi_system_id __initdata pci_reboot_dmi_table[] = { | ||
421 | { /* Handle problems with rebooting on Apple MacBook5 */ | ||
422 | .callback = set_pci_reboot, | ||
423 | .ident = "Apple MacBook5", | ||
424 | .matches = { | ||
425 | DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."), | ||
426 | DMI_MATCH(DMI_PRODUCT_NAME, "MacBook5"), | ||
427 | }, | ||
428 | }, | ||
429 | { /* Handle problems with rebooting on Apple MacBookPro5 */ | ||
430 | .callback = set_pci_reboot, | ||
431 | .ident = "Apple MacBookPro5", | ||
432 | .matches = { | ||
433 | DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."), | ||
434 | DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro5"), | ||
435 | }, | ||
436 | }, | ||
437 | { } | ||
438 | }; | ||
439 | |||
440 | static int __init pci_reboot_init(void) | ||
441 | { | ||
442 | dmi_check_system(pci_reboot_dmi_table); | ||
443 | return 0; | ||
444 | } | ||
445 | core_initcall(pci_reboot_init); | ||
446 | |||
407 | static inline void kb_wait(void) | 447 | static inline void kb_wait(void) |
408 | { | 448 | { |
409 | int i; | 449 | int i; |
diff --git a/arch/x86/kernel/rtc.c b/arch/x86/kernel/rtc.c index 5d465b207e72..bf67dcb4a44c 100644 --- a/arch/x86/kernel/rtc.c +++ b/arch/x86/kernel/rtc.c | |||
@@ -178,7 +178,7 @@ static int set_rtc_mmss(unsigned long nowtime) | |||
178 | } | 178 | } |
179 | 179 | ||
180 | /* not static: needed by APM */ | 180 | /* not static: needed by APM */ |
181 | unsigned long read_persistent_clock(void) | 181 | void read_persistent_clock(struct timespec *ts) |
182 | { | 182 | { |
183 | unsigned long retval, flags; | 183 | unsigned long retval, flags; |
184 | 184 | ||
@@ -186,7 +186,8 @@ unsigned long read_persistent_clock(void) | |||
186 | retval = get_wallclock(); | 186 | retval = get_wallclock(); |
187 | spin_unlock_irqrestore(&rtc_lock, flags); | 187 | spin_unlock_irqrestore(&rtc_lock, flags); |
188 | 188 | ||
189 | return retval; | 189 | ts->tv_sec = retval; |
190 | ts->tv_nsec = 0; | ||
190 | } | 191 | } |
191 | 192 | ||
192 | int update_persistent_clock(struct timespec now) | 193 | int update_persistent_clock(struct timespec now) |
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c index 6e1a368d21d4..fc3672a303d6 100644 --- a/arch/x86/kernel/tsc.c +++ b/arch/x86/kernel/tsc.c | |||
@@ -275,15 +275,20 @@ static unsigned long pit_calibrate_tsc(u32 latch, unsigned long ms, int loopmin) | |||
275 | * use the TSC value at the transitions to calculate a pretty | 275 | * use the TSC value at the transitions to calculate a pretty |
276 | * good value for the TSC frequencty. | 276 | * good value for the TSC frequencty. |
277 | */ | 277 | */ |
278 | static inline int pit_verify_msb(unsigned char val) | ||
279 | { | ||
280 | /* Ignore LSB */ | ||
281 | inb(0x42); | ||
282 | return inb(0x42) == val; | ||
283 | } | ||
284 | |||
278 | static inline int pit_expect_msb(unsigned char val, u64 *tscp, unsigned long *deltap) | 285 | static inline int pit_expect_msb(unsigned char val, u64 *tscp, unsigned long *deltap) |
279 | { | 286 | { |
280 | int count; | 287 | int count; |
281 | u64 tsc = 0; | 288 | u64 tsc = 0; |
282 | 289 | ||
283 | for (count = 0; count < 50000; count++) { | 290 | for (count = 0; count < 50000; count++) { |
284 | /* Ignore LSB */ | 291 | if (!pit_verify_msb(val)) |
285 | inb(0x42); | ||
286 | if (inb(0x42) != val) | ||
287 | break; | 292 | break; |
288 | tsc = get_cycles(); | 293 | tsc = get_cycles(); |
289 | } | 294 | } |
@@ -336,8 +341,7 @@ static unsigned long quick_pit_calibrate(void) | |||
336 | * to do that is to just read back the 16-bit counter | 341 | * to do that is to just read back the 16-bit counter |
337 | * once from the PIT. | 342 | * once from the PIT. |
338 | */ | 343 | */ |
339 | inb(0x42); | 344 | pit_verify_msb(0); |
340 | inb(0x42); | ||
341 | 345 | ||
342 | if (pit_expect_msb(0xff, &tsc, &d1)) { | 346 | if (pit_expect_msb(0xff, &tsc, &d1)) { |
343 | for (i = 1; i <= MAX_QUICK_PIT_ITERATIONS; i++) { | 347 | for (i = 1; i <= MAX_QUICK_PIT_ITERATIONS; i++) { |
@@ -348,8 +352,19 @@ static unsigned long quick_pit_calibrate(void) | |||
348 | * Iterate until the error is less than 500 ppm | 352 | * Iterate until the error is less than 500 ppm |
349 | */ | 353 | */ |
350 | delta -= tsc; | 354 | delta -= tsc; |
351 | if (d1+d2 < delta >> 11) | 355 | if (d1+d2 >= delta >> 11) |
352 | goto success; | 356 | continue; |
357 | |||
358 | /* | ||
359 | * Check the PIT one more time to verify that | ||
360 | * all TSC reads were stable wrt the PIT. | ||
361 | * | ||
362 | * This also guarantees serialization of the | ||
363 | * last cycle read ('d2') in pit_expect_msb. | ||
364 | */ | ||
365 | if (!pit_verify_msb(0xfe - i)) | ||
366 | break; | ||
367 | goto success; | ||
353 | } | 368 | } |
354 | } | 369 | } |
355 | printk("Fast TSC calibration failed\n"); | 370 | printk("Fast TSC calibration failed\n"); |
@@ -729,10 +744,16 @@ static cycle_t __vsyscall_fn vread_tsc(void) | |||
729 | } | 744 | } |
730 | #endif | 745 | #endif |
731 | 746 | ||
747 | static void resume_tsc(void) | ||
748 | { | ||
749 | clocksource_tsc.cycle_last = 0; | ||
750 | } | ||
751 | |||
732 | static struct clocksource clocksource_tsc = { | 752 | static struct clocksource clocksource_tsc = { |
733 | .name = "tsc", | 753 | .name = "tsc", |
734 | .rating = 300, | 754 | .rating = 300, |
735 | .read = read_tsc, | 755 | .read = read_tsc, |
756 | .resume = resume_tsc, | ||
736 | .mask = CLOCKSOURCE_MASK(64), | 757 | .mask = CLOCKSOURCE_MASK(64), |
737 | .shift = 22, | 758 | .shift = 22, |
738 | .flags = CLOCK_SOURCE_IS_CONTINUOUS | | 759 | .flags = CLOCK_SOURCE_IS_CONTINUOUS | |
@@ -746,12 +767,14 @@ void mark_tsc_unstable(char *reason) | |||
746 | { | 767 | { |
747 | if (!tsc_unstable) { | 768 | if (!tsc_unstable) { |
748 | tsc_unstable = 1; | 769 | tsc_unstable = 1; |
749 | printk("Marking TSC unstable due to %s\n", reason); | 770 | printk(KERN_INFO "Marking TSC unstable due to %s\n", reason); |
750 | /* Change only the rating, when not registered */ | 771 | /* Change only the rating, when not registered */ |
751 | if (clocksource_tsc.mult) | 772 | if (clocksource_tsc.mult) |
752 | clocksource_change_rating(&clocksource_tsc, 0); | 773 | clocksource_mark_unstable(&clocksource_tsc); |
753 | else | 774 | else { |
775 | clocksource_tsc.flags |= CLOCK_SOURCE_UNSTABLE; | ||
754 | clocksource_tsc.rating = 0; | 776 | clocksource_tsc.rating = 0; |
777 | } | ||
755 | } | 778 | } |
756 | } | 779 | } |
757 | 780 | ||
diff --git a/arch/x86/kernel/vmi_32.c b/arch/x86/kernel/vmi_32.c index b263423fbe2a..95a7289e4b0c 100644 --- a/arch/x86/kernel/vmi_32.c +++ b/arch/x86/kernel/vmi_32.c | |||
@@ -441,7 +441,7 @@ vmi_startup_ipi_hook(int phys_apicid, unsigned long start_eip, | |||
441 | ap.ds = __USER_DS; | 441 | ap.ds = __USER_DS; |
442 | ap.es = __USER_DS; | 442 | ap.es = __USER_DS; |
443 | ap.fs = __KERNEL_PERCPU; | 443 | ap.fs = __KERNEL_PERCPU; |
444 | ap.gs = 0; | 444 | ap.gs = __KERNEL_STACK_CANARY; |
445 | 445 | ||
446 | ap.eflags = 0; | 446 | ap.eflags = 0; |
447 | 447 | ||
diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S index 59f31d2dd435..78d185d797de 100644 --- a/arch/x86/kernel/vmlinux.lds.S +++ b/arch/x86/kernel/vmlinux.lds.S | |||
@@ -393,8 +393,8 @@ SECTIONS | |||
393 | 393 | ||
394 | 394 | ||
395 | #ifdef CONFIG_X86_32 | 395 | #ifdef CONFIG_X86_32 |
396 | ASSERT((_end - LOAD_OFFSET <= KERNEL_IMAGE_SIZE), | 396 | . = ASSERT((_end - LOAD_OFFSET <= KERNEL_IMAGE_SIZE), |
397 | "kernel image bigger than KERNEL_IMAGE_SIZE") | 397 | "kernel image bigger than KERNEL_IMAGE_SIZE"); |
398 | #else | 398 | #else |
399 | /* | 399 | /* |
400 | * Per-cpu symbols which need to be offset from __per_cpu_load | 400 | * Per-cpu symbols which need to be offset from __per_cpu_load |
@@ -407,12 +407,12 @@ INIT_PER_CPU(irq_stack_union); | |||
407 | /* | 407 | /* |
408 | * Build-time check on the image size: | 408 | * Build-time check on the image size: |
409 | */ | 409 | */ |
410 | ASSERT((_end - _text <= KERNEL_IMAGE_SIZE), | 410 | . = ASSERT((_end - _text <= KERNEL_IMAGE_SIZE), |
411 | "kernel image bigger than KERNEL_IMAGE_SIZE") | 411 | "kernel image bigger than KERNEL_IMAGE_SIZE"); |
412 | 412 | ||
413 | #ifdef CONFIG_SMP | 413 | #ifdef CONFIG_SMP |
414 | ASSERT((per_cpu__irq_stack_union == 0), | 414 | . = ASSERT((per_cpu__irq_stack_union == 0), |
415 | "irq_stack_union is not at start of per-cpu area"); | 415 | "irq_stack_union is not at start of per-cpu area"); |
416 | #endif | 416 | #endif |
417 | 417 | ||
418 | #endif /* CONFIG_X86_32 */ | 418 | #endif /* CONFIG_X86_32 */ |
@@ -420,7 +420,7 @@ ASSERT((per_cpu__irq_stack_union == 0), | |||
420 | #ifdef CONFIG_KEXEC | 420 | #ifdef CONFIG_KEXEC |
421 | #include <asm/kexec.h> | 421 | #include <asm/kexec.h> |
422 | 422 | ||
423 | ASSERT(kexec_control_code_size <= KEXEC_CONTROL_CODE_MAX_SIZE, | 423 | . = ASSERT(kexec_control_code_size <= KEXEC_CONTROL_CODE_MAX_SIZE, |
424 | "kexec control code size is too big") | 424 | "kexec control code size is too big"); |
425 | #endif | 425 | #endif |
426 | 426 | ||
diff --git a/arch/x86/kernel/vsyscall_64.c b/arch/x86/kernel/vsyscall_64.c index 25ee06a80aad..cf53a78e2dcf 100644 --- a/arch/x86/kernel/vsyscall_64.c +++ b/arch/x86/kernel/vsyscall_64.c | |||
@@ -87,6 +87,7 @@ void update_vsyscall(struct timespec *wall_time, struct clocksource *clock) | |||
87 | vsyscall_gtod_data.wall_time_sec = wall_time->tv_sec; | 87 | vsyscall_gtod_data.wall_time_sec = wall_time->tv_sec; |
88 | vsyscall_gtod_data.wall_time_nsec = wall_time->tv_nsec; | 88 | vsyscall_gtod_data.wall_time_nsec = wall_time->tv_nsec; |
89 | vsyscall_gtod_data.wall_to_monotonic = wall_to_monotonic; | 89 | vsyscall_gtod_data.wall_to_monotonic = wall_to_monotonic; |
90 | vsyscall_gtod_data.wall_time_coarse = __current_kernel_time(); | ||
90 | write_sequnlock_irqrestore(&vsyscall_gtod_data.lock, flags); | 91 | write_sequnlock_irqrestore(&vsyscall_gtod_data.lock, flags); |
91 | } | 92 | } |
92 | 93 | ||
diff --git a/arch/x86/kvm/i8254.c b/arch/x86/kvm/i8254.c index 4d6f0d293ee2..21f68e00524f 100644 --- a/arch/x86/kvm/i8254.c +++ b/arch/x86/kvm/i8254.c | |||
@@ -104,6 +104,9 @@ static s64 __kpit_elapsed(struct kvm *kvm) | |||
104 | ktime_t remaining; | 104 | ktime_t remaining; |
105 | struct kvm_kpit_state *ps = &kvm->arch.vpit->pit_state; | 105 | struct kvm_kpit_state *ps = &kvm->arch.vpit->pit_state; |
106 | 106 | ||
107 | if (!ps->pit_timer.period) | ||
108 | return 0; | ||
109 | |||
107 | /* | 110 | /* |
108 | * The Counter does not stop when it reaches zero. In | 111 | * The Counter does not stop when it reaches zero. In |
109 | * Modes 0, 1, 4, and 5 the Counter ``wraps around'' to | 112 | * Modes 0, 1, 4, and 5 the Counter ``wraps around'' to |
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 7030b5f911bf..0ef5bb2b4043 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c | |||
@@ -489,16 +489,20 @@ static unsigned long *gfn_to_rmap(struct kvm *kvm, gfn_t gfn, int lpage) | |||
489 | * | 489 | * |
490 | * If rmapp bit zero is one, (then rmap & ~1) points to a struct kvm_rmap_desc | 490 | * If rmapp bit zero is one, (then rmap & ~1) points to a struct kvm_rmap_desc |
491 | * containing more mappings. | 491 | * containing more mappings. |
492 | * | ||
493 | * Returns the number of rmap entries before the spte was added or zero if | ||
494 | * the spte was not added. | ||
495 | * | ||
492 | */ | 496 | */ |
493 | static void rmap_add(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn, int lpage) | 497 | static int rmap_add(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn, int lpage) |
494 | { | 498 | { |
495 | struct kvm_mmu_page *sp; | 499 | struct kvm_mmu_page *sp; |
496 | struct kvm_rmap_desc *desc; | 500 | struct kvm_rmap_desc *desc; |
497 | unsigned long *rmapp; | 501 | unsigned long *rmapp; |
498 | int i; | 502 | int i, count = 0; |
499 | 503 | ||
500 | if (!is_rmap_pte(*spte)) | 504 | if (!is_rmap_pte(*spte)) |
501 | return; | 505 | return count; |
502 | gfn = unalias_gfn(vcpu->kvm, gfn); | 506 | gfn = unalias_gfn(vcpu->kvm, gfn); |
503 | sp = page_header(__pa(spte)); | 507 | sp = page_header(__pa(spte)); |
504 | sp->gfns[spte - sp->spt] = gfn; | 508 | sp->gfns[spte - sp->spt] = gfn; |
@@ -515,8 +519,10 @@ static void rmap_add(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn, int lpage) | |||
515 | } else { | 519 | } else { |
516 | rmap_printk("rmap_add: %p %llx many->many\n", spte, *spte); | 520 | rmap_printk("rmap_add: %p %llx many->many\n", spte, *spte); |
517 | desc = (struct kvm_rmap_desc *)(*rmapp & ~1ul); | 521 | desc = (struct kvm_rmap_desc *)(*rmapp & ~1ul); |
518 | while (desc->shadow_ptes[RMAP_EXT-1] && desc->more) | 522 | while (desc->shadow_ptes[RMAP_EXT-1] && desc->more) { |
519 | desc = desc->more; | 523 | desc = desc->more; |
524 | count += RMAP_EXT; | ||
525 | } | ||
520 | if (desc->shadow_ptes[RMAP_EXT-1]) { | 526 | if (desc->shadow_ptes[RMAP_EXT-1]) { |
521 | desc->more = mmu_alloc_rmap_desc(vcpu); | 527 | desc->more = mmu_alloc_rmap_desc(vcpu); |
522 | desc = desc->more; | 528 | desc = desc->more; |
@@ -525,6 +531,7 @@ static void rmap_add(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn, int lpage) | |||
525 | ; | 531 | ; |
526 | desc->shadow_ptes[i] = spte; | 532 | desc->shadow_ptes[i] = spte; |
527 | } | 533 | } |
534 | return count; | ||
528 | } | 535 | } |
529 | 536 | ||
530 | static void rmap_desc_remove_entry(unsigned long *rmapp, | 537 | static void rmap_desc_remove_entry(unsigned long *rmapp, |
@@ -754,6 +761,19 @@ static int kvm_age_rmapp(struct kvm *kvm, unsigned long *rmapp) | |||
754 | return young; | 761 | return young; |
755 | } | 762 | } |
756 | 763 | ||
764 | #define RMAP_RECYCLE_THRESHOLD 1000 | ||
765 | |||
766 | static void rmap_recycle(struct kvm_vcpu *vcpu, gfn_t gfn, int lpage) | ||
767 | { | ||
768 | unsigned long *rmapp; | ||
769 | |||
770 | gfn = unalias_gfn(vcpu->kvm, gfn); | ||
771 | rmapp = gfn_to_rmap(vcpu->kvm, gfn, lpage); | ||
772 | |||
773 | kvm_unmap_rmapp(vcpu->kvm, rmapp); | ||
774 | kvm_flush_remote_tlbs(vcpu->kvm); | ||
775 | } | ||
776 | |||
757 | int kvm_age_hva(struct kvm *kvm, unsigned long hva) | 777 | int kvm_age_hva(struct kvm *kvm, unsigned long hva) |
758 | { | 778 | { |
759 | return kvm_handle_hva(kvm, hva, kvm_age_rmapp); | 779 | return kvm_handle_hva(kvm, hva, kvm_age_rmapp); |
@@ -1407,24 +1427,25 @@ static int kvm_mmu_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp) | |||
1407 | */ | 1427 | */ |
1408 | void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages) | 1428 | void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages) |
1409 | { | 1429 | { |
1430 | int used_pages; | ||
1431 | |||
1432 | used_pages = kvm->arch.n_alloc_mmu_pages - kvm->arch.n_free_mmu_pages; | ||
1433 | used_pages = max(0, used_pages); | ||
1434 | |||
1410 | /* | 1435 | /* |
1411 | * If we set the number of mmu pages to be smaller be than the | 1436 | * If we set the number of mmu pages to be smaller be than the |
1412 | * number of actived pages , we must to free some mmu pages before we | 1437 | * number of actived pages , we must to free some mmu pages before we |
1413 | * change the value | 1438 | * change the value |
1414 | */ | 1439 | */ |
1415 | 1440 | ||
1416 | if ((kvm->arch.n_alloc_mmu_pages - kvm->arch.n_free_mmu_pages) > | 1441 | if (used_pages > kvm_nr_mmu_pages) { |
1417 | kvm_nr_mmu_pages) { | 1442 | while (used_pages > kvm_nr_mmu_pages) { |
1418 | int n_used_mmu_pages = kvm->arch.n_alloc_mmu_pages | ||
1419 | - kvm->arch.n_free_mmu_pages; | ||
1420 | |||
1421 | while (n_used_mmu_pages > kvm_nr_mmu_pages) { | ||
1422 | struct kvm_mmu_page *page; | 1443 | struct kvm_mmu_page *page; |
1423 | 1444 | ||
1424 | page = container_of(kvm->arch.active_mmu_pages.prev, | 1445 | page = container_of(kvm->arch.active_mmu_pages.prev, |
1425 | struct kvm_mmu_page, link); | 1446 | struct kvm_mmu_page, link); |
1426 | kvm_mmu_zap_page(kvm, page); | 1447 | kvm_mmu_zap_page(kvm, page); |
1427 | n_used_mmu_pages--; | 1448 | used_pages--; |
1428 | } | 1449 | } |
1429 | kvm->arch.n_free_mmu_pages = 0; | 1450 | kvm->arch.n_free_mmu_pages = 0; |
1430 | } | 1451 | } |
@@ -1740,6 +1761,7 @@ static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte, | |||
1740 | { | 1761 | { |
1741 | int was_rmapped = 0; | 1762 | int was_rmapped = 0; |
1742 | int was_writeble = is_writeble_pte(*shadow_pte); | 1763 | int was_writeble = is_writeble_pte(*shadow_pte); |
1764 | int rmap_count; | ||
1743 | 1765 | ||
1744 | pgprintk("%s: spte %llx access %x write_fault %d" | 1766 | pgprintk("%s: spte %llx access %x write_fault %d" |
1745 | " user_fault %d gfn %lx\n", | 1767 | " user_fault %d gfn %lx\n", |
@@ -1781,9 +1803,11 @@ static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte, | |||
1781 | 1803 | ||
1782 | page_header_update_slot(vcpu->kvm, shadow_pte, gfn); | 1804 | page_header_update_slot(vcpu->kvm, shadow_pte, gfn); |
1783 | if (!was_rmapped) { | 1805 | if (!was_rmapped) { |
1784 | rmap_add(vcpu, shadow_pte, gfn, largepage); | 1806 | rmap_count = rmap_add(vcpu, shadow_pte, gfn, largepage); |
1785 | if (!is_rmap_pte(*shadow_pte)) | 1807 | if (!is_rmap_pte(*shadow_pte)) |
1786 | kvm_release_pfn_clean(pfn); | 1808 | kvm_release_pfn_clean(pfn); |
1809 | if (rmap_count > RMAP_RECYCLE_THRESHOLD) | ||
1810 | rmap_recycle(vcpu, gfn, largepage); | ||
1787 | } else { | 1811 | } else { |
1788 | if (was_writeble) | 1812 | if (was_writeble) |
1789 | kvm_release_pfn_dirty(pfn); | 1813 | kvm_release_pfn_dirty(pfn); |
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index 71510e07e69e..b1f658ad2f06 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c | |||
@@ -711,6 +711,7 @@ static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu) | |||
711 | svm->vmcb->control.tsc_offset += delta; | 711 | svm->vmcb->control.tsc_offset += delta; |
712 | vcpu->cpu = cpu; | 712 | vcpu->cpu = cpu; |
713 | kvm_migrate_timers(vcpu); | 713 | kvm_migrate_timers(vcpu); |
714 | svm->asid_generation = 0; | ||
714 | } | 715 | } |
715 | 716 | ||
716 | for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++) | 717 | for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++) |
@@ -1031,7 +1032,6 @@ static void new_asid(struct vcpu_svm *svm, struct svm_cpu_data *svm_data) | |||
1031 | svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ALL_ASID; | 1032 | svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ALL_ASID; |
1032 | } | 1033 | } |
1033 | 1034 | ||
1034 | svm->vcpu.cpu = svm_data->cpu; | ||
1035 | svm->asid_generation = svm_data->asid_generation; | 1035 | svm->asid_generation = svm_data->asid_generation; |
1036 | svm->vmcb->control.asid = svm_data->next_asid++; | 1036 | svm->vmcb->control.asid = svm_data->next_asid++; |
1037 | } | 1037 | } |
@@ -2300,8 +2300,8 @@ static void pre_svm_run(struct vcpu_svm *svm) | |||
2300 | struct svm_cpu_data *svm_data = per_cpu(svm_data, cpu); | 2300 | struct svm_cpu_data *svm_data = per_cpu(svm_data, cpu); |
2301 | 2301 | ||
2302 | svm->vmcb->control.tlb_ctl = TLB_CONTROL_DO_NOTHING; | 2302 | svm->vmcb->control.tlb_ctl = TLB_CONTROL_DO_NOTHING; |
2303 | if (svm->vcpu.cpu != cpu || | 2303 | /* FIXME: handle wraparound of asid_generation */ |
2304 | svm->asid_generation != svm_data->asid_generation) | 2304 | if (svm->asid_generation != svm_data->asid_generation) |
2305 | new_asid(svm, svm_data); | 2305 | new_asid(svm, svm_data); |
2306 | } | 2306 | } |
2307 | 2307 | ||
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 356a0ce85c68..29f912927a58 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c | |||
@@ -3157,8 +3157,8 @@ static void handle_invalid_guest_state(struct kvm_vcpu *vcpu, | |||
3157 | struct vcpu_vmx *vmx = to_vmx(vcpu); | 3157 | struct vcpu_vmx *vmx = to_vmx(vcpu); |
3158 | enum emulation_result err = EMULATE_DONE; | 3158 | enum emulation_result err = EMULATE_DONE; |
3159 | 3159 | ||
3160 | preempt_enable(); | ||
3161 | local_irq_enable(); | 3160 | local_irq_enable(); |
3161 | preempt_enable(); | ||
3162 | 3162 | ||
3163 | while (!guest_state_valid(vcpu)) { | 3163 | while (!guest_state_valid(vcpu)) { |
3164 | err = emulate_instruction(vcpu, kvm_run, 0, 0, 0); | 3164 | err = emulate_instruction(vcpu, kvm_run, 0, 0, 0); |
@@ -3168,7 +3168,7 @@ static void handle_invalid_guest_state(struct kvm_vcpu *vcpu, | |||
3168 | 3168 | ||
3169 | if (err != EMULATE_DONE) { | 3169 | if (err != EMULATE_DONE) { |
3170 | kvm_report_emulation_failure(vcpu, "emulation failure"); | 3170 | kvm_report_emulation_failure(vcpu, "emulation failure"); |
3171 | return; | 3171 | break; |
3172 | } | 3172 | } |
3173 | 3173 | ||
3174 | if (signal_pending(current)) | 3174 | if (signal_pending(current)) |
@@ -3177,8 +3177,8 @@ static void handle_invalid_guest_state(struct kvm_vcpu *vcpu, | |||
3177 | schedule(); | 3177 | schedule(); |
3178 | } | 3178 | } |
3179 | 3179 | ||
3180 | local_irq_disable(); | ||
3181 | preempt_disable(); | 3180 | preempt_disable(); |
3181 | local_irq_disable(); | ||
3182 | 3182 | ||
3183 | vmx->invalid_state_emulation_result = err; | 3183 | vmx->invalid_state_emulation_result = err; |
3184 | } | 3184 | } |
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index fe5474aec41a..3d4529011828 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c | |||
@@ -704,11 +704,48 @@ static bool msr_mtrr_valid(unsigned msr) | |||
704 | return false; | 704 | return false; |
705 | } | 705 | } |
706 | 706 | ||
707 | static bool valid_pat_type(unsigned t) | ||
708 | { | ||
709 | return t < 8 && (1 << t) & 0xf3; /* 0, 1, 4, 5, 6, 7 */ | ||
710 | } | ||
711 | |||
712 | static bool valid_mtrr_type(unsigned t) | ||
713 | { | ||
714 | return t < 8 && (1 << t) & 0x73; /* 0, 1, 4, 5, 6 */ | ||
715 | } | ||
716 | |||
717 | static bool mtrr_valid(struct kvm_vcpu *vcpu, u32 msr, u64 data) | ||
718 | { | ||
719 | int i; | ||
720 | |||
721 | if (!msr_mtrr_valid(msr)) | ||
722 | return false; | ||
723 | |||
724 | if (msr == MSR_IA32_CR_PAT) { | ||
725 | for (i = 0; i < 8; i++) | ||
726 | if (!valid_pat_type((data >> (i * 8)) & 0xff)) | ||
727 | return false; | ||
728 | return true; | ||
729 | } else if (msr == MSR_MTRRdefType) { | ||
730 | if (data & ~0xcff) | ||
731 | return false; | ||
732 | return valid_mtrr_type(data & 0xff); | ||
733 | } else if (msr >= MSR_MTRRfix64K_00000 && msr <= MSR_MTRRfix4K_F8000) { | ||
734 | for (i = 0; i < 8 ; i++) | ||
735 | if (!valid_mtrr_type((data >> (i * 8)) & 0xff)) | ||
736 | return false; | ||
737 | return true; | ||
738 | } | ||
739 | |||
740 | /* variable MTRRs */ | ||
741 | return valid_mtrr_type(data & 0xff); | ||
742 | } | ||
743 | |||
707 | static int set_msr_mtrr(struct kvm_vcpu *vcpu, u32 msr, u64 data) | 744 | static int set_msr_mtrr(struct kvm_vcpu *vcpu, u32 msr, u64 data) |
708 | { | 745 | { |
709 | u64 *p = (u64 *)&vcpu->arch.mtrr_state.fixed_ranges; | 746 | u64 *p = (u64 *)&vcpu->arch.mtrr_state.fixed_ranges; |
710 | 747 | ||
711 | if (!msr_mtrr_valid(msr)) | 748 | if (!mtrr_valid(vcpu, msr, data)) |
712 | return 1; | 749 | return 1; |
713 | 750 | ||
714 | if (msr == MSR_MTRRdefType) { | 751 | if (msr == MSR_MTRRdefType) { |
@@ -1079,14 +1116,13 @@ long kvm_arch_dev_ioctl(struct file *filp, | |||
1079 | if (copy_to_user(user_msr_list, &msr_list, sizeof msr_list)) | 1116 | if (copy_to_user(user_msr_list, &msr_list, sizeof msr_list)) |
1080 | goto out; | 1117 | goto out; |
1081 | r = -E2BIG; | 1118 | r = -E2BIG; |
1082 | if (n < num_msrs_to_save) | 1119 | if (n < msr_list.nmsrs) |
1083 | goto out; | 1120 | goto out; |
1084 | r = -EFAULT; | 1121 | r = -EFAULT; |
1085 | if (copy_to_user(user_msr_list->indices, &msrs_to_save, | 1122 | if (copy_to_user(user_msr_list->indices, &msrs_to_save, |
1086 | num_msrs_to_save * sizeof(u32))) | 1123 | num_msrs_to_save * sizeof(u32))) |
1087 | goto out; | 1124 | goto out; |
1088 | if (copy_to_user(user_msr_list->indices | 1125 | if (copy_to_user(user_msr_list->indices + num_msrs_to_save, |
1089 | + num_msrs_to_save * sizeof(u32), | ||
1090 | &emulated_msrs, | 1126 | &emulated_msrs, |
1091 | ARRAY_SIZE(emulated_msrs) * sizeof(u32))) | 1127 | ARRAY_SIZE(emulated_msrs) * sizeof(u32))) |
1092 | goto out; | 1128 | goto out; |
diff --git a/arch/x86/lib/msr.c b/arch/x86/lib/msr.c index 1440b9c0547e..caa24aca8115 100644 --- a/arch/x86/lib/msr.c +++ b/arch/x86/lib/msr.c | |||
@@ -89,16 +89,13 @@ void rdmsr_on_cpus(const cpumask_t *mask, u32 msr_no, struct msr *msrs) | |||
89 | rv.msrs = msrs; | 89 | rv.msrs = msrs; |
90 | rv.msr_no = msr_no; | 90 | rv.msr_no = msr_no; |
91 | 91 | ||
92 | preempt_disable(); | 92 | this_cpu = get_cpu(); |
93 | /* | 93 | |
94 | * FIXME: handle the CPU we're executing on separately for now until | 94 | if (cpumask_test_cpu(this_cpu, mask)) |
95 | * smp_call_function_many has been fixed to not skip it. | 95 | __rdmsr_on_cpu(&rv); |
96 | */ | ||
97 | this_cpu = raw_smp_processor_id(); | ||
98 | smp_call_function_single(this_cpu, __rdmsr_on_cpu, &rv, 1); | ||
99 | 96 | ||
100 | smp_call_function_many(mask, __rdmsr_on_cpu, &rv, 1); | 97 | smp_call_function_many(mask, __rdmsr_on_cpu, &rv, 1); |
101 | preempt_enable(); | 98 | put_cpu(); |
102 | } | 99 | } |
103 | EXPORT_SYMBOL(rdmsr_on_cpus); | 100 | EXPORT_SYMBOL(rdmsr_on_cpus); |
104 | 101 | ||
@@ -121,16 +118,13 @@ void wrmsr_on_cpus(const cpumask_t *mask, u32 msr_no, struct msr *msrs) | |||
121 | rv.msrs = msrs; | 118 | rv.msrs = msrs; |
122 | rv.msr_no = msr_no; | 119 | rv.msr_no = msr_no; |
123 | 120 | ||
124 | preempt_disable(); | 121 | this_cpu = get_cpu(); |
125 | /* | 122 | |
126 | * FIXME: handle the CPU we're executing on separately for now until | 123 | if (cpumask_test_cpu(this_cpu, mask)) |
127 | * smp_call_function_many has been fixed to not skip it. | 124 | __wrmsr_on_cpu(&rv); |
128 | */ | ||
129 | this_cpu = raw_smp_processor_id(); | ||
130 | smp_call_function_single(this_cpu, __wrmsr_on_cpu, &rv, 1); | ||
131 | 125 | ||
132 | smp_call_function_many(mask, __wrmsr_on_cpu, &rv, 1); | 126 | smp_call_function_many(mask, __wrmsr_on_cpu, &rv, 1); |
133 | preempt_enable(); | 127 | put_cpu(); |
134 | } | 128 | } |
135 | EXPORT_SYMBOL(wrmsr_on_cpus); | 129 | EXPORT_SYMBOL(wrmsr_on_cpus); |
136 | 130 | ||
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c index 1b734d7a8966..7e600c1962db 100644 --- a/arch/x86/mm/pageattr.c +++ b/arch/x86/mm/pageattr.c | |||
@@ -591,9 +591,12 @@ static int __change_page_attr(struct cpa_data *cpa, int primary) | |||
591 | unsigned int level; | 591 | unsigned int level; |
592 | pte_t *kpte, old_pte; | 592 | pte_t *kpte, old_pte; |
593 | 593 | ||
594 | if (cpa->flags & CPA_PAGES_ARRAY) | 594 | if (cpa->flags & CPA_PAGES_ARRAY) { |
595 | address = (unsigned long)page_address(cpa->pages[cpa->curpage]); | 595 | struct page *page = cpa->pages[cpa->curpage]; |
596 | else if (cpa->flags & CPA_ARRAY) | 596 | if (unlikely(PageHighMem(page))) |
597 | return 0; | ||
598 | address = (unsigned long)page_address(page); | ||
599 | } else if (cpa->flags & CPA_ARRAY) | ||
597 | address = cpa->vaddr[cpa->curpage]; | 600 | address = cpa->vaddr[cpa->curpage]; |
598 | else | 601 | else |
599 | address = *cpa->vaddr; | 602 | address = *cpa->vaddr; |
@@ -697,9 +700,12 @@ static int cpa_process_alias(struct cpa_data *cpa) | |||
697 | * No need to redo, when the primary call touched the direct | 700 | * No need to redo, when the primary call touched the direct |
698 | * mapping already: | 701 | * mapping already: |
699 | */ | 702 | */ |
700 | if (cpa->flags & CPA_PAGES_ARRAY) | 703 | if (cpa->flags & CPA_PAGES_ARRAY) { |
701 | vaddr = (unsigned long)page_address(cpa->pages[cpa->curpage]); | 704 | struct page *page = cpa->pages[cpa->curpage]; |
702 | else if (cpa->flags & CPA_ARRAY) | 705 | if (unlikely(PageHighMem(page))) |
706 | return 0; | ||
707 | vaddr = (unsigned long)page_address(page); | ||
708 | } else if (cpa->flags & CPA_ARRAY) | ||
703 | vaddr = cpa->vaddr[cpa->curpage]; | 709 | vaddr = cpa->vaddr[cpa->curpage]; |
704 | else | 710 | else |
705 | vaddr = *cpa->vaddr; | 711 | vaddr = *cpa->vaddr; |
@@ -997,12 +1003,15 @@ EXPORT_SYMBOL(set_memory_array_uc); | |||
997 | int _set_memory_wc(unsigned long addr, int numpages) | 1003 | int _set_memory_wc(unsigned long addr, int numpages) |
998 | { | 1004 | { |
999 | int ret; | 1005 | int ret; |
1006 | unsigned long addr_copy = addr; | ||
1007 | |||
1000 | ret = change_page_attr_set(&addr, numpages, | 1008 | ret = change_page_attr_set(&addr, numpages, |
1001 | __pgprot(_PAGE_CACHE_UC_MINUS), 0); | 1009 | __pgprot(_PAGE_CACHE_UC_MINUS), 0); |
1002 | |||
1003 | if (!ret) { | 1010 | if (!ret) { |
1004 | ret = change_page_attr_set(&addr, numpages, | 1011 | ret = change_page_attr_set_clr(&addr_copy, numpages, |
1005 | __pgprot(_PAGE_CACHE_WC), 0); | 1012 | __pgprot(_PAGE_CACHE_WC), |
1013 | __pgprot(_PAGE_CACHE_MASK), | ||
1014 | 0, 0, NULL); | ||
1006 | } | 1015 | } |
1007 | return ret; | 1016 | return ret; |
1008 | } | 1017 | } |
@@ -1119,7 +1128,9 @@ int set_pages_array_uc(struct page **pages, int addrinarray) | |||
1119 | int free_idx; | 1128 | int free_idx; |
1120 | 1129 | ||
1121 | for (i = 0; i < addrinarray; i++) { | 1130 | for (i = 0; i < addrinarray; i++) { |
1122 | start = (unsigned long)page_address(pages[i]); | 1131 | if (PageHighMem(pages[i])) |
1132 | continue; | ||
1133 | start = page_to_pfn(pages[i]) << PAGE_SHIFT; | ||
1123 | end = start + PAGE_SIZE; | 1134 | end = start + PAGE_SIZE; |
1124 | if (reserve_memtype(start, end, _PAGE_CACHE_UC_MINUS, NULL)) | 1135 | if (reserve_memtype(start, end, _PAGE_CACHE_UC_MINUS, NULL)) |
1125 | goto err_out; | 1136 | goto err_out; |
@@ -1132,7 +1143,9 @@ int set_pages_array_uc(struct page **pages, int addrinarray) | |||
1132 | err_out: | 1143 | err_out: |
1133 | free_idx = i; | 1144 | free_idx = i; |
1134 | for (i = 0; i < free_idx; i++) { | 1145 | for (i = 0; i < free_idx; i++) { |
1135 | start = (unsigned long)page_address(pages[i]); | 1146 | if (PageHighMem(pages[i])) |
1147 | continue; | ||
1148 | start = page_to_pfn(pages[i]) << PAGE_SHIFT; | ||
1136 | end = start + PAGE_SIZE; | 1149 | end = start + PAGE_SIZE; |
1137 | free_memtype(start, end); | 1150 | free_memtype(start, end); |
1138 | } | 1151 | } |
@@ -1161,7 +1174,9 @@ int set_pages_array_wb(struct page **pages, int addrinarray) | |||
1161 | return retval; | 1174 | return retval; |
1162 | 1175 | ||
1163 | for (i = 0; i < addrinarray; i++) { | 1176 | for (i = 0; i < addrinarray; i++) { |
1164 | start = (unsigned long)page_address(pages[i]); | 1177 | if (PageHighMem(pages[i])) |
1178 | continue; | ||
1179 | start = page_to_pfn(pages[i]) << PAGE_SHIFT; | ||
1165 | end = start + PAGE_SIZE; | 1180 | end = start + PAGE_SIZE; |
1166 | free_memtype(start, end); | 1181 | free_memtype(start, end); |
1167 | } | 1182 | } |
diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c index af8f9650058c..ed34f5e35999 100644 --- a/arch/x86/mm/pgtable.c +++ b/arch/x86/mm/pgtable.c | |||
@@ -329,7 +329,6 @@ void __init reserve_top_address(unsigned long reserve) | |||
329 | printk(KERN_INFO "Reserving virtual address space above 0x%08x\n", | 329 | printk(KERN_INFO "Reserving virtual address space above 0x%08x\n", |
330 | (int)-reserve); | 330 | (int)-reserve); |
331 | __FIXADDR_TOP = -reserve - PAGE_SIZE; | 331 | __FIXADDR_TOP = -reserve - PAGE_SIZE; |
332 | __VMALLOC_RESERVE += reserve; | ||
333 | #endif | 332 | #endif |
334 | } | 333 | } |
335 | 334 | ||
diff --git a/arch/x86/vdso/vclock_gettime.c b/arch/x86/vdso/vclock_gettime.c index 6a40b78b46aa..ee55754cc3c5 100644 --- a/arch/x86/vdso/vclock_gettime.c +++ b/arch/x86/vdso/vclock_gettime.c | |||
@@ -86,14 +86,47 @@ notrace static noinline int do_monotonic(struct timespec *ts) | |||
86 | return 0; | 86 | return 0; |
87 | } | 87 | } |
88 | 88 | ||
89 | notrace static noinline int do_realtime_coarse(struct timespec *ts) | ||
90 | { | ||
91 | unsigned long seq; | ||
92 | do { | ||
93 | seq = read_seqbegin(>od->lock); | ||
94 | ts->tv_sec = gtod->wall_time_coarse.tv_sec; | ||
95 | ts->tv_nsec = gtod->wall_time_coarse.tv_nsec; | ||
96 | } while (unlikely(read_seqretry(>od->lock, seq))); | ||
97 | return 0; | ||
98 | } | ||
99 | |||
100 | notrace static noinline int do_monotonic_coarse(struct timespec *ts) | ||
101 | { | ||
102 | unsigned long seq, ns, secs; | ||
103 | do { | ||
104 | seq = read_seqbegin(>od->lock); | ||
105 | secs = gtod->wall_time_coarse.tv_sec; | ||
106 | ns = gtod->wall_time_coarse.tv_nsec; | ||
107 | secs += gtod->wall_to_monotonic.tv_sec; | ||
108 | ns += gtod->wall_to_monotonic.tv_nsec; | ||
109 | } while (unlikely(read_seqretry(>od->lock, seq))); | ||
110 | vset_normalized_timespec(ts, secs, ns); | ||
111 | return 0; | ||
112 | } | ||
113 | |||
89 | notrace int __vdso_clock_gettime(clockid_t clock, struct timespec *ts) | 114 | notrace int __vdso_clock_gettime(clockid_t clock, struct timespec *ts) |
90 | { | 115 | { |
91 | if (likely(gtod->sysctl_enabled && gtod->clock.vread)) | 116 | if (likely(gtod->sysctl_enabled)) |
92 | switch (clock) { | 117 | switch (clock) { |
93 | case CLOCK_REALTIME: | 118 | case CLOCK_REALTIME: |
94 | return do_realtime(ts); | 119 | if (likely(gtod->clock.vread)) |
120 | return do_realtime(ts); | ||
121 | break; | ||
95 | case CLOCK_MONOTONIC: | 122 | case CLOCK_MONOTONIC: |
96 | return do_monotonic(ts); | 123 | if (likely(gtod->clock.vread)) |
124 | return do_monotonic(ts); | ||
125 | break; | ||
126 | case CLOCK_REALTIME_COARSE: | ||
127 | return do_realtime_coarse(ts); | ||
128 | case CLOCK_MONOTONIC_COARSE: | ||
129 | return do_monotonic_coarse(ts); | ||
97 | } | 130 | } |
98 | return vdso_fallback_gettime(clock, ts); | 131 | return vdso_fallback_gettime(clock, ts); |
99 | } | 132 | } |
diff --git a/arch/xtensa/kernel/time.c b/arch/xtensa/kernel/time.c index 8848120d291b..19085ff0484a 100644 --- a/arch/xtensa/kernel/time.c +++ b/arch/xtensa/kernel/time.c | |||
@@ -59,9 +59,8 @@ static struct irqaction timer_irqaction = { | |||
59 | 59 | ||
60 | void __init time_init(void) | 60 | void __init time_init(void) |
61 | { | 61 | { |
62 | xtime.tv_nsec = 0; | 62 | /* FIXME: xtime&wall_to_monotonic are set in timekeeping_init. */ |
63 | xtime.tv_sec = read_persistent_clock(); | 63 | read_persistent_clock(&xtime); |
64 | |||
65 | set_normalized_timespec(&wall_to_monotonic, | 64 | set_normalized_timespec(&wall_to_monotonic, |
66 | -xtime.tv_sec, -xtime.tv_nsec); | 65 | -xtime.tv_sec, -xtime.tv_nsec); |
67 | 66 | ||
diff --git a/block/Kconfig b/block/Kconfig index 95a86adc33a1..9be0b56eaee1 100644 --- a/block/Kconfig +++ b/block/Kconfig | |||
@@ -48,9 +48,9 @@ config LBDAF | |||
48 | If unsure, say Y. | 48 | If unsure, say Y. |
49 | 49 | ||
50 | config BLK_DEV_BSG | 50 | config BLK_DEV_BSG |
51 | bool "Block layer SG support v4 (EXPERIMENTAL)" | 51 | bool "Block layer SG support v4" |
52 | depends on EXPERIMENTAL | 52 | default y |
53 | ---help--- | 53 | help |
54 | Saying Y here will enable generic SG (SCSI generic) v4 support | 54 | Saying Y here will enable generic SG (SCSI generic) v4 support |
55 | for any block device. | 55 | for any block device. |
56 | 56 | ||
@@ -60,7 +60,10 @@ config BLK_DEV_BSG | |||
60 | protocols (e.g. Task Management Functions and SMP in Serial | 60 | protocols (e.g. Task Management Functions and SMP in Serial |
61 | Attached SCSI). | 61 | Attached SCSI). |
62 | 62 | ||
63 | If unsure, say N. | 63 | This option is required by recent UDEV versions to properly |
64 | access device serial numbers, etc. | ||
65 | |||
66 | If unsure, say Y. | ||
64 | 67 | ||
65 | config BLK_DEV_INTEGRITY | 68 | config BLK_DEV_INTEGRITY |
66 | bool "Block layer data integrity support" | 69 | bool "Block layer data integrity support" |
diff --git a/block/blk-settings.c b/block/blk-settings.c index 8a3ea3bba10d..476d87065073 100644 --- a/block/blk-settings.c +++ b/block/blk-settings.c | |||
@@ -7,6 +7,7 @@ | |||
7 | #include <linux/bio.h> | 7 | #include <linux/bio.h> |
8 | #include <linux/blkdev.h> | 8 | #include <linux/blkdev.h> |
9 | #include <linux/bootmem.h> /* for max_pfn/max_low_pfn */ | 9 | #include <linux/bootmem.h> /* for max_pfn/max_low_pfn */ |
10 | #include <linux/gcd.h> | ||
10 | 11 | ||
11 | #include "blk.h" | 12 | #include "blk.h" |
12 | 13 | ||
@@ -384,8 +385,8 @@ void blk_queue_alignment_offset(struct request_queue *q, unsigned int offset) | |||
384 | EXPORT_SYMBOL(blk_queue_alignment_offset); | 385 | EXPORT_SYMBOL(blk_queue_alignment_offset); |
385 | 386 | ||
386 | /** | 387 | /** |
387 | * blk_queue_io_min - set minimum request size for the queue | 388 | * blk_limits_io_min - set minimum request size for a device |
388 | * @q: the request queue for the device | 389 | * @limits: the queue limits |
389 | * @min: smallest I/O size in bytes | 390 | * @min: smallest I/O size in bytes |
390 | * | 391 | * |
391 | * Description: | 392 | * Description: |
@@ -394,15 +395,35 @@ EXPORT_SYMBOL(blk_queue_alignment_offset); | |||
394 | * smallest I/O the device can perform without incurring a performance | 395 | * smallest I/O the device can perform without incurring a performance |
395 | * penalty. | 396 | * penalty. |
396 | */ | 397 | */ |
397 | void blk_queue_io_min(struct request_queue *q, unsigned int min) | 398 | void blk_limits_io_min(struct queue_limits *limits, unsigned int min) |
398 | { | 399 | { |
399 | q->limits.io_min = min; | 400 | limits->io_min = min; |
400 | 401 | ||
401 | if (q->limits.io_min < q->limits.logical_block_size) | 402 | if (limits->io_min < limits->logical_block_size) |
402 | q->limits.io_min = q->limits.logical_block_size; | 403 | limits->io_min = limits->logical_block_size; |
403 | 404 | ||
404 | if (q->limits.io_min < q->limits.physical_block_size) | 405 | if (limits->io_min < limits->physical_block_size) |
405 | q->limits.io_min = q->limits.physical_block_size; | 406 | limits->io_min = limits->physical_block_size; |
407 | } | ||
408 | EXPORT_SYMBOL(blk_limits_io_min); | ||
409 | |||
410 | /** | ||
411 | * blk_queue_io_min - set minimum request size for the queue | ||
412 | * @q: the request queue for the device | ||
413 | * @min: smallest I/O size in bytes | ||
414 | * | ||
415 | * Description: | ||
416 | * Storage devices may report a granularity or preferred minimum I/O | ||
417 | * size which is the smallest request the device can perform without | ||
418 | * incurring a performance penalty. For disk drives this is often the | ||
419 | * physical block size. For RAID arrays it is often the stripe chunk | ||
420 | * size. A properly aligned multiple of minimum_io_size is the | ||
421 | * preferred request size for workloads where a high number of I/O | ||
422 | * operations is desired. | ||
423 | */ | ||
424 | void blk_queue_io_min(struct request_queue *q, unsigned int min) | ||
425 | { | ||
426 | blk_limits_io_min(&q->limits, min); | ||
406 | } | 427 | } |
407 | EXPORT_SYMBOL(blk_queue_io_min); | 428 | EXPORT_SYMBOL(blk_queue_io_min); |
408 | 429 | ||
@@ -412,8 +433,12 @@ EXPORT_SYMBOL(blk_queue_io_min); | |||
412 | * @opt: optimal request size in bytes | 433 | * @opt: optimal request size in bytes |
413 | * | 434 | * |
414 | * Description: | 435 | * Description: |
415 | * Drivers can call this function to set the preferred I/O request | 436 | * Storage devices may report an optimal I/O size, which is the |
416 | * size for devices that report such a value. | 437 | * device's preferred unit for sustained I/O. This is rarely reported |
438 | * for disk drives. For RAID arrays it is usually the stripe width or | ||
439 | * the internal track size. A properly aligned multiple of | ||
440 | * optimal_io_size is the preferred request size for workloads where | ||
441 | * sustained throughput is desired. | ||
417 | */ | 442 | */ |
418 | void blk_queue_io_opt(struct request_queue *q, unsigned int opt) | 443 | void blk_queue_io_opt(struct request_queue *q, unsigned int opt) |
419 | { | 444 | { |
@@ -433,27 +458,7 @@ EXPORT_SYMBOL(blk_queue_io_opt); | |||
433 | **/ | 458 | **/ |
434 | void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b) | 459 | void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b) |
435 | { | 460 | { |
436 | /* zero is "infinity" */ | 461 | blk_stack_limits(&t->limits, &b->limits, 0); |
437 | t->limits.max_sectors = min_not_zero(queue_max_sectors(t), | ||
438 | queue_max_sectors(b)); | ||
439 | |||
440 | t->limits.max_hw_sectors = min_not_zero(queue_max_hw_sectors(t), | ||
441 | queue_max_hw_sectors(b)); | ||
442 | |||
443 | t->limits.seg_boundary_mask = min_not_zero(queue_segment_boundary(t), | ||
444 | queue_segment_boundary(b)); | ||
445 | |||
446 | t->limits.max_phys_segments = min_not_zero(queue_max_phys_segments(t), | ||
447 | queue_max_phys_segments(b)); | ||
448 | |||
449 | t->limits.max_hw_segments = min_not_zero(queue_max_hw_segments(t), | ||
450 | queue_max_hw_segments(b)); | ||
451 | |||
452 | t->limits.max_segment_size = min_not_zero(queue_max_segment_size(t), | ||
453 | queue_max_segment_size(b)); | ||
454 | |||
455 | t->limits.logical_block_size = max(queue_logical_block_size(t), | ||
456 | queue_logical_block_size(b)); | ||
457 | 462 | ||
458 | if (!t->queue_lock) | 463 | if (!t->queue_lock) |
459 | WARN_ON_ONCE(1); | 464 | WARN_ON_ONCE(1); |
@@ -523,6 +528,16 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b, | |||
523 | return -1; | 528 | return -1; |
524 | } | 529 | } |
525 | 530 | ||
531 | /* Find lcm() of optimal I/O size */ | ||
532 | if (t->io_opt && b->io_opt) | ||
533 | t->io_opt = (t->io_opt * b->io_opt) / gcd(t->io_opt, b->io_opt); | ||
534 | else if (b->io_opt) | ||
535 | t->io_opt = b->io_opt; | ||
536 | |||
537 | /* Verify that optimal I/O size is a multiple of io_min */ | ||
538 | if (t->io_min && t->io_opt % t->io_min) | ||
539 | return -1; | ||
540 | |||
526 | return 0; | 541 | return 0; |
527 | } | 542 | } |
528 | EXPORT_SYMBOL(blk_stack_limits); | 543 | EXPORT_SYMBOL(blk_stack_limits); |
diff --git a/drivers/acpi/acpi_memhotplug.c b/drivers/acpi/acpi_memhotplug.c index 7a0f4aa4fa1e..9a62224cc278 100644 --- a/drivers/acpi/acpi_memhotplug.c +++ b/drivers/acpi/acpi_memhotplug.c | |||
@@ -38,6 +38,9 @@ | |||
38 | 38 | ||
39 | #define _COMPONENT ACPI_MEMORY_DEVICE_COMPONENT | 39 | #define _COMPONENT ACPI_MEMORY_DEVICE_COMPONENT |
40 | 40 | ||
41 | #undef PREFIX | ||
42 | #define PREFIX "ACPI:memory_hp:" | ||
43 | |||
41 | ACPI_MODULE_NAME("acpi_memhotplug"); | 44 | ACPI_MODULE_NAME("acpi_memhotplug"); |
42 | MODULE_AUTHOR("Naveen B S <naveen.b.s@intel.com>"); | 45 | MODULE_AUTHOR("Naveen B S <naveen.b.s@intel.com>"); |
43 | MODULE_DESCRIPTION("Hotplug Mem Driver"); | 46 | MODULE_DESCRIPTION("Hotplug Mem Driver"); |
@@ -153,6 +156,7 @@ acpi_memory_get_device(acpi_handle handle, | |||
153 | acpi_handle phandle; | 156 | acpi_handle phandle; |
154 | struct acpi_device *device = NULL; | 157 | struct acpi_device *device = NULL; |
155 | struct acpi_device *pdevice = NULL; | 158 | struct acpi_device *pdevice = NULL; |
159 | int result; | ||
156 | 160 | ||
157 | 161 | ||
158 | if (!acpi_bus_get_device(handle, &device) && device) | 162 | if (!acpi_bus_get_device(handle, &device) && device) |
@@ -165,9 +169,9 @@ acpi_memory_get_device(acpi_handle handle, | |||
165 | } | 169 | } |
166 | 170 | ||
167 | /* Get the parent device */ | 171 | /* Get the parent device */ |
168 | status = acpi_bus_get_device(phandle, &pdevice); | 172 | result = acpi_bus_get_device(phandle, &pdevice); |
169 | if (ACPI_FAILURE(status)) { | 173 | if (result) { |
170 | ACPI_EXCEPTION((AE_INFO, status, "Cannot get acpi bus device")); | 174 | printk(KERN_WARNING PREFIX "Cannot get acpi bus device"); |
171 | return -EINVAL; | 175 | return -EINVAL; |
172 | } | 176 | } |
173 | 177 | ||
@@ -175,9 +179,9 @@ acpi_memory_get_device(acpi_handle handle, | |||
175 | * Now add the notified device. This creates the acpi_device | 179 | * Now add the notified device. This creates the acpi_device |
176 | * and invokes .add function | 180 | * and invokes .add function |
177 | */ | 181 | */ |
178 | status = acpi_bus_add(&device, pdevice, handle, ACPI_BUS_TYPE_DEVICE); | 182 | result = acpi_bus_add(&device, pdevice, handle, ACPI_BUS_TYPE_DEVICE); |
179 | if (ACPI_FAILURE(status)) { | 183 | if (result) { |
180 | ACPI_EXCEPTION((AE_INFO, status, "Cannot add acpi bus")); | 184 | printk(KERN_WARNING PREFIX "Cannot add acpi bus"); |
181 | return -EINVAL; | 185 | return -EINVAL; |
182 | } | 186 | } |
183 | 187 | ||
@@ -238,7 +242,12 @@ static int acpi_memory_enable_device(struct acpi_memory_device *mem_device) | |||
238 | num_enabled++; | 242 | num_enabled++; |
239 | continue; | 243 | continue; |
240 | } | 244 | } |
241 | 245 | /* | |
246 | * If the memory block size is zero, please ignore it. | ||
247 | * Don't try to do the following memory hotplug flowchart. | ||
248 | */ | ||
249 | if (!info->length) | ||
250 | continue; | ||
242 | if (node < 0) | 251 | if (node < 0) |
243 | node = memory_add_physaddr_to_nid(info->start_addr); | 252 | node = memory_add_physaddr_to_nid(info->start_addr); |
244 | 253 | ||
@@ -253,8 +262,15 @@ static int acpi_memory_enable_device(struct acpi_memory_device *mem_device) | |||
253 | mem_device->state = MEMORY_INVALID_STATE; | 262 | mem_device->state = MEMORY_INVALID_STATE; |
254 | return -EINVAL; | 263 | return -EINVAL; |
255 | } | 264 | } |
256 | 265 | /* | |
257 | return result; | 266 | * Sometimes the memory device will contain several memory blocks. |
267 | * When one memory block is hot-added to the system memory, it will | ||
268 | * be regarded as a success. | ||
269 | * Otherwise if the last memory block can't be hot-added to the system | ||
270 | * memory, it will be failure and the memory device can't be bound with | ||
271 | * driver. | ||
272 | */ | ||
273 | return 0; | ||
258 | } | 274 | } |
259 | 275 | ||
260 | static int acpi_memory_powerdown_device(struct acpi_memory_device *mem_device) | 276 | static int acpi_memory_powerdown_device(struct acpi_memory_device *mem_device) |
diff --git a/drivers/acpi/acpica/acobject.h b/drivers/acpi/acpica/acobject.h index 544dcf834922..eb6f038b03d9 100644 --- a/drivers/acpi/acpica/acobject.h +++ b/drivers/acpi/acpica/acobject.h | |||
@@ -97,6 +97,7 @@ | |||
97 | #define AOPOBJ_OBJECT_INITIALIZED 0x08 | 97 | #define AOPOBJ_OBJECT_INITIALIZED 0x08 |
98 | #define AOPOBJ_SETUP_COMPLETE 0x10 | 98 | #define AOPOBJ_SETUP_COMPLETE 0x10 |
99 | #define AOPOBJ_SINGLE_DATUM 0x20 | 99 | #define AOPOBJ_SINGLE_DATUM 0x20 |
100 | #define AOPOBJ_INVALID 0x40 /* Used if host OS won't allow an op_region address */ | ||
100 | 101 | ||
101 | /****************************************************************************** | 102 | /****************************************************************************** |
102 | * | 103 | * |
diff --git a/drivers/acpi/acpica/dsopcode.c b/drivers/acpi/acpica/dsopcode.c index 584d766e6f12..b79978f7bc71 100644 --- a/drivers/acpi/acpica/dsopcode.c +++ b/drivers/acpi/acpica/dsopcode.c | |||
@@ -397,6 +397,30 @@ acpi_status acpi_ds_get_region_arguments(union acpi_operand_object *obj_desc) | |||
397 | status = acpi_ds_execute_arguments(node, acpi_ns_get_parent_node(node), | 397 | status = acpi_ds_execute_arguments(node, acpi_ns_get_parent_node(node), |
398 | extra_desc->extra.aml_length, | 398 | extra_desc->extra.aml_length, |
399 | extra_desc->extra.aml_start); | 399 | extra_desc->extra.aml_start); |
400 | if (ACPI_FAILURE(status)) { | ||
401 | return_ACPI_STATUS(status); | ||
402 | } | ||
403 | |||
404 | /* Validate the region address/length via the host OS */ | ||
405 | |||
406 | status = acpi_os_validate_address(obj_desc->region.space_id, | ||
407 | obj_desc->region.address, | ||
408 | (acpi_size) obj_desc->region.length, | ||
409 | acpi_ut_get_node_name(node)); | ||
410 | |||
411 | if (ACPI_FAILURE(status)) { | ||
412 | /* | ||
413 | * Invalid address/length. We will emit an error message and mark | ||
414 | * the region as invalid, so that it will cause an additional error if | ||
415 | * it is ever used. Then return AE_OK. | ||
416 | */ | ||
417 | ACPI_EXCEPTION((AE_INFO, status, | ||
418 | "During address validation of OpRegion [%4.4s]", | ||
419 | node->name.ascii)); | ||
420 | obj_desc->common.flags |= AOPOBJ_INVALID; | ||
421 | status = AE_OK; | ||
422 | } | ||
423 | |||
400 | return_ACPI_STATUS(status); | 424 | return_ACPI_STATUS(status); |
401 | } | 425 | } |
402 | 426 | ||
diff --git a/drivers/acpi/acpica/exfldio.c b/drivers/acpi/acpica/exfldio.c index d4075b821021..6687be167f5f 100644 --- a/drivers/acpi/acpica/exfldio.c +++ b/drivers/acpi/acpica/exfldio.c | |||
@@ -113,6 +113,12 @@ acpi_ex_setup_region(union acpi_operand_object *obj_desc, | |||
113 | } | 113 | } |
114 | } | 114 | } |
115 | 115 | ||
116 | /* Exit if Address/Length have been disallowed by the host OS */ | ||
117 | |||
118 | if (rgn_desc->common.flags & AOPOBJ_INVALID) { | ||
119 | return_ACPI_STATUS(AE_AML_ILLEGAL_ADDRESS); | ||
120 | } | ||
121 | |||
116 | /* | 122 | /* |
117 | * Exit now for SMBus address space, it has a non-linear address space | 123 | * Exit now for SMBus address space, it has a non-linear address space |
118 | * and the request cannot be directly validated | 124 | * and the request cannot be directly validated |
diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c index 71670719d61a..5691f165a952 100644 --- a/drivers/acpi/osl.c +++ b/drivers/acpi/osl.c | |||
@@ -189,11 +189,36 @@ acpi_status __init acpi_os_initialize(void) | |||
189 | return AE_OK; | 189 | return AE_OK; |
190 | } | 190 | } |
191 | 191 | ||
192 | static void bind_to_cpu0(struct work_struct *work) | ||
193 | { | ||
194 | set_cpus_allowed(current, cpumask_of_cpu(0)); | ||
195 | kfree(work); | ||
196 | } | ||
197 | |||
198 | static void bind_workqueue(struct workqueue_struct *wq) | ||
199 | { | ||
200 | struct work_struct *work; | ||
201 | |||
202 | work = kzalloc(sizeof(struct work_struct), GFP_KERNEL); | ||
203 | INIT_WORK(work, bind_to_cpu0); | ||
204 | queue_work(wq, work); | ||
205 | } | ||
206 | |||
192 | acpi_status acpi_os_initialize1(void) | 207 | acpi_status acpi_os_initialize1(void) |
193 | { | 208 | { |
209 | /* | ||
210 | * On some machines, a software-initiated SMI causes corruption unless | ||
211 | * the SMI runs on CPU 0. An SMI can be initiated by any AML, but | ||
212 | * typically it's done in GPE-related methods that are run via | ||
213 | * workqueues, so we can avoid the known corruption cases by binding | ||
214 | * the workqueues to CPU 0. | ||
215 | */ | ||
194 | kacpid_wq = create_singlethread_workqueue("kacpid"); | 216 | kacpid_wq = create_singlethread_workqueue("kacpid"); |
217 | bind_workqueue(kacpid_wq); | ||
195 | kacpi_notify_wq = create_singlethread_workqueue("kacpi_notify"); | 218 | kacpi_notify_wq = create_singlethread_workqueue("kacpi_notify"); |
219 | bind_workqueue(kacpi_notify_wq); | ||
196 | kacpi_hotplug_wq = create_singlethread_workqueue("kacpi_hotplug"); | 220 | kacpi_hotplug_wq = create_singlethread_workqueue("kacpi_hotplug"); |
221 | bind_workqueue(kacpi_hotplug_wq); | ||
197 | BUG_ON(!kacpid_wq); | 222 | BUG_ON(!kacpid_wq); |
198 | BUG_ON(!kacpi_notify_wq); | 223 | BUG_ON(!kacpi_notify_wq); |
199 | BUG_ON(!kacpi_hotplug_wq); | 224 | BUG_ON(!kacpi_hotplug_wq); |
diff --git a/drivers/acpi/system.c b/drivers/acpi/system.c index 0944daec064f..9c61ab2177cf 100644 --- a/drivers/acpi/system.c +++ b/drivers/acpi/system.c | |||
@@ -121,7 +121,7 @@ static void acpi_table_attr_init(struct acpi_table_attr *table_attr, | |||
121 | table_attr->attr.size = 0; | 121 | table_attr->attr.size = 0; |
122 | table_attr->attr.read = acpi_table_show; | 122 | table_attr->attr.read = acpi_table_show; |
123 | table_attr->attr.attr.name = table_attr->name; | 123 | table_attr->attr.attr.name = table_attr->name; |
124 | table_attr->attr.attr.mode = 0444; | 124 | table_attr->attr.attr.mode = 0400; |
125 | 125 | ||
126 | return; | 126 | return; |
127 | } | 127 | } |
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c index 958c1fa41900..fe3eba5d6b3e 100644 --- a/drivers/ata/ahci.c +++ b/drivers/ata/ahci.c | |||
@@ -219,6 +219,8 @@ enum { | |||
219 | AHCI_HFLAG_SECT255 = (1 << 8), /* max 255 sectors */ | 219 | AHCI_HFLAG_SECT255 = (1 << 8), /* max 255 sectors */ |
220 | AHCI_HFLAG_YES_NCQ = (1 << 9), /* force NCQ cap on */ | 220 | AHCI_HFLAG_YES_NCQ = (1 << 9), /* force NCQ cap on */ |
221 | AHCI_HFLAG_NO_SUSPEND = (1 << 10), /* don't suspend */ | 221 | AHCI_HFLAG_NO_SUSPEND = (1 << 10), /* don't suspend */ |
222 | AHCI_HFLAG_SRST_TOUT_IS_OFFLINE = (1 << 11), /* treat SRST timeout as | ||
223 | link offline */ | ||
222 | 224 | ||
223 | /* ap->flags bits */ | 225 | /* ap->flags bits */ |
224 | 226 | ||
@@ -1663,6 +1665,7 @@ static int ahci_do_softreset(struct ata_link *link, unsigned int *class, | |||
1663 | int (*check_ready)(struct ata_link *link)) | 1665 | int (*check_ready)(struct ata_link *link)) |
1664 | { | 1666 | { |
1665 | struct ata_port *ap = link->ap; | 1667 | struct ata_port *ap = link->ap; |
1668 | struct ahci_host_priv *hpriv = ap->host->private_data; | ||
1666 | const char *reason = NULL; | 1669 | const char *reason = NULL; |
1667 | unsigned long now, msecs; | 1670 | unsigned long now, msecs; |
1668 | struct ata_taskfile tf; | 1671 | struct ata_taskfile tf; |
@@ -1701,12 +1704,21 @@ static int ahci_do_softreset(struct ata_link *link, unsigned int *class, | |||
1701 | 1704 | ||
1702 | /* wait for link to become ready */ | 1705 | /* wait for link to become ready */ |
1703 | rc = ata_wait_after_reset(link, deadline, check_ready); | 1706 | rc = ata_wait_after_reset(link, deadline, check_ready); |
1704 | /* link occupied, -ENODEV too is an error */ | 1707 | if (rc == -EBUSY && hpriv->flags & AHCI_HFLAG_SRST_TOUT_IS_OFFLINE) { |
1705 | if (rc) { | 1708 | /* |
1709 | * Workaround for cases where link online status can't | ||
1710 | * be trusted. Treat device readiness timeout as link | ||
1711 | * offline. | ||
1712 | */ | ||
1713 | ata_link_printk(link, KERN_INFO, | ||
1714 | "device not ready, treating as offline\n"); | ||
1715 | *class = ATA_DEV_NONE; | ||
1716 | } else if (rc) { | ||
1717 | /* link occupied, -ENODEV too is an error */ | ||
1706 | reason = "device not ready"; | 1718 | reason = "device not ready"; |
1707 | goto fail; | 1719 | goto fail; |
1708 | } | 1720 | } else |
1709 | *class = ahci_dev_classify(ap); | 1721 | *class = ahci_dev_classify(ap); |
1710 | 1722 | ||
1711 | DPRINTK("EXIT, class=%u\n", *class); | 1723 | DPRINTK("EXIT, class=%u\n", *class); |
1712 | return 0; | 1724 | return 0; |
@@ -1773,7 +1785,8 @@ static int ahci_sb600_softreset(struct ata_link *link, unsigned int *class, | |||
1773 | irq_sts = readl(port_mmio + PORT_IRQ_STAT); | 1785 | irq_sts = readl(port_mmio + PORT_IRQ_STAT); |
1774 | if (irq_sts & PORT_IRQ_BAD_PMP) { | 1786 | if (irq_sts & PORT_IRQ_BAD_PMP) { |
1775 | ata_link_printk(link, KERN_WARNING, | 1787 | ata_link_printk(link, KERN_WARNING, |
1776 | "failed due to HW bug, retry pmp=0\n"); | 1788 | "applying SB600 PMP SRST workaround " |
1789 | "and retrying\n"); | ||
1777 | rc = ahci_do_softreset(link, class, 0, deadline, | 1790 | rc = ahci_do_softreset(link, class, 0, deadline, |
1778 | ahci_check_ready); | 1791 | ahci_check_ready); |
1779 | } | 1792 | } |
@@ -2726,6 +2739,56 @@ static bool ahci_broken_suspend(struct pci_dev *pdev) | |||
2726 | return !ver || strcmp(ver, dmi->driver_data) < 0; | 2739 | return !ver || strcmp(ver, dmi->driver_data) < 0; |
2727 | } | 2740 | } |
2728 | 2741 | ||
2742 | static bool ahci_broken_online(struct pci_dev *pdev) | ||
2743 | { | ||
2744 | #define ENCODE_BUSDEVFN(bus, slot, func) \ | ||
2745 | (void *)(unsigned long)(((bus) << 8) | PCI_DEVFN((slot), (func))) | ||
2746 | static const struct dmi_system_id sysids[] = { | ||
2747 | /* | ||
2748 | * There are several gigabyte boards which use | ||
2749 | * SIMG5723s configured as hardware RAID. Certain | ||
2750 | * 5723 firmware revisions shipped there keep the link | ||
2751 | * online but fail to answer properly to SRST or | ||
2752 | * IDENTIFY when no device is attached downstream | ||
2753 | * causing libata to retry quite a few times leading | ||
2754 | * to excessive detection delay. | ||
2755 | * | ||
2756 | * As these firmwares respond to the second reset try | ||
2757 | * with invalid device signature, considering unknown | ||
2758 | * sig as offline works around the problem acceptably. | ||
2759 | */ | ||
2760 | { | ||
2761 | .ident = "EP45-DQ6", | ||
2762 | .matches = { | ||
2763 | DMI_MATCH(DMI_BOARD_VENDOR, | ||
2764 | "Gigabyte Technology Co., Ltd."), | ||
2765 | DMI_MATCH(DMI_BOARD_NAME, "EP45-DQ6"), | ||
2766 | }, | ||
2767 | .driver_data = ENCODE_BUSDEVFN(0x0a, 0x00, 0), | ||
2768 | }, | ||
2769 | { | ||
2770 | .ident = "EP45-DS5", | ||
2771 | .matches = { | ||
2772 | DMI_MATCH(DMI_BOARD_VENDOR, | ||
2773 | "Gigabyte Technology Co., Ltd."), | ||
2774 | DMI_MATCH(DMI_BOARD_NAME, "EP45-DS5"), | ||
2775 | }, | ||
2776 | .driver_data = ENCODE_BUSDEVFN(0x03, 0x00, 0), | ||
2777 | }, | ||
2778 | { } /* terminate list */ | ||
2779 | }; | ||
2780 | #undef ENCODE_BUSDEVFN | ||
2781 | const struct dmi_system_id *dmi = dmi_first_match(sysids); | ||
2782 | unsigned int val; | ||
2783 | |||
2784 | if (!dmi) | ||
2785 | return false; | ||
2786 | |||
2787 | val = (unsigned long)dmi->driver_data; | ||
2788 | |||
2789 | return pdev->bus->number == (val >> 8) && pdev->devfn == (val & 0xff); | ||
2790 | } | ||
2791 | |||
2729 | static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | 2792 | static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) |
2730 | { | 2793 | { |
2731 | static int printed_version; | 2794 | static int printed_version; |
@@ -2841,6 +2904,12 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
2841 | "BIOS update required for suspend/resume\n"); | 2904 | "BIOS update required for suspend/resume\n"); |
2842 | } | 2905 | } |
2843 | 2906 | ||
2907 | if (ahci_broken_online(pdev)) { | ||
2908 | hpriv->flags |= AHCI_HFLAG_SRST_TOUT_IS_OFFLINE; | ||
2909 | dev_info(&pdev->dev, | ||
2910 | "online status unreliable, applying workaround\n"); | ||
2911 | } | ||
2912 | |||
2844 | /* CAP.NP sometimes indicate the index of the last enabled | 2913 | /* CAP.NP sometimes indicate the index of the last enabled |
2845 | * port, at other times, that of the last possible port, so | 2914 | * port, at other times, that of the last possible port, so |
2846 | * determining the maximum port number requires looking at | 2915 | * determining the maximum port number requires looking at |
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c index 8ac98ff16d7d..072ba5ea138f 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c | |||
@@ -4302,6 +4302,9 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = { | |||
4302 | { "WDC WD2500JD-00HBB0", "WD-WMAL71490727", ATA_HORKAGE_BROKEN_HPA }, | 4302 | { "WDC WD2500JD-00HBB0", "WD-WMAL71490727", ATA_HORKAGE_BROKEN_HPA }, |
4303 | { "MAXTOR 6L080L4", "A93.0500", ATA_HORKAGE_BROKEN_HPA }, | 4303 | { "MAXTOR 6L080L4", "A93.0500", ATA_HORKAGE_BROKEN_HPA }, |
4304 | 4304 | ||
4305 | /* this one allows HPA unlocking but fails IOs on the area */ | ||
4306 | { "OCZ-VERTEX", "1.30", ATA_HORKAGE_BROKEN_HPA }, | ||
4307 | |||
4305 | /* Devices which report 1 sector over size HPA */ | 4308 | /* Devices which report 1 sector over size HPA */ |
4306 | { "ST340823A", NULL, ATA_HORKAGE_HPA_SIZE, }, | 4309 | { "ST340823A", NULL, ATA_HORKAGE_HPA_SIZE, }, |
4307 | { "ST320413A", NULL, ATA_HORKAGE_HPA_SIZE, }, | 4310 | { "ST320413A", NULL, ATA_HORKAGE_HPA_SIZE, }, |
diff --git a/drivers/ata/pata_at91.c b/drivers/ata/pata_at91.c index 5702affcb325..41c94b1ae493 100644 --- a/drivers/ata/pata_at91.c +++ b/drivers/ata/pata_at91.c | |||
@@ -250,7 +250,7 @@ static int __devinit pata_at91_probe(struct platform_device *pdev) | |||
250 | ata_port_desc(ap, "no IRQ, using PIO polling"); | 250 | ata_port_desc(ap, "no IRQ, using PIO polling"); |
251 | } | 251 | } |
252 | 252 | ||
253 | info = kzalloc(sizeof(*info), GFP_KERNEL); | 253 | info = devm_kzalloc(dev, sizeof(*info), GFP_KERNEL); |
254 | 254 | ||
255 | if (!info) { | 255 | if (!info) { |
256 | dev_err(dev, "failed to allocate memory for private data\n"); | 256 | dev_err(dev, "failed to allocate memory for private data\n"); |
@@ -275,7 +275,7 @@ static int __devinit pata_at91_probe(struct platform_device *pdev) | |||
275 | if (!info->ide_addr) { | 275 | if (!info->ide_addr) { |
276 | dev_err(dev, "failed to map IO base\n"); | 276 | dev_err(dev, "failed to map IO base\n"); |
277 | ret = -ENOMEM; | 277 | ret = -ENOMEM; |
278 | goto err_ide_ioremap; | 278 | goto err_put; |
279 | } | 279 | } |
280 | 280 | ||
281 | info->alt_addr = devm_ioremap(dev, | 281 | info->alt_addr = devm_ioremap(dev, |
@@ -284,7 +284,7 @@ static int __devinit pata_at91_probe(struct platform_device *pdev) | |||
284 | if (!info->alt_addr) { | 284 | if (!info->alt_addr) { |
285 | dev_err(dev, "failed to map CTL base\n"); | 285 | dev_err(dev, "failed to map CTL base\n"); |
286 | ret = -ENOMEM; | 286 | ret = -ENOMEM; |
287 | goto err_alt_ioremap; | 287 | goto err_put; |
288 | } | 288 | } |
289 | 289 | ||
290 | ap->ioaddr.cmd_addr = info->ide_addr; | 290 | ap->ioaddr.cmd_addr = info->ide_addr; |
@@ -303,13 +303,8 @@ static int __devinit pata_at91_probe(struct platform_device *pdev) | |||
303 | irq ? ata_sff_interrupt : NULL, | 303 | irq ? ata_sff_interrupt : NULL, |
304 | irq_flags, &pata_at91_sht); | 304 | irq_flags, &pata_at91_sht); |
305 | 305 | ||
306 | err_alt_ioremap: | 306 | err_put: |
307 | devm_iounmap(dev, info->ide_addr); | ||
308 | |||
309 | err_ide_ioremap: | ||
310 | clk_put(info->mck); | 307 | clk_put(info->mck); |
311 | kfree(info); | ||
312 | |||
313 | return ret; | 308 | return ret; |
314 | } | 309 | } |
315 | 310 | ||
@@ -317,7 +312,6 @@ static int __devexit pata_at91_remove(struct platform_device *pdev) | |||
317 | { | 312 | { |
318 | struct ata_host *host = dev_get_drvdata(&pdev->dev); | 313 | struct ata_host *host = dev_get_drvdata(&pdev->dev); |
319 | struct at91_ide_info *info; | 314 | struct at91_ide_info *info; |
320 | struct device *dev = &pdev->dev; | ||
321 | 315 | ||
322 | if (!host) | 316 | if (!host) |
323 | return 0; | 317 | return 0; |
@@ -328,11 +322,8 @@ static int __devexit pata_at91_remove(struct platform_device *pdev) | |||
328 | if (!info) | 322 | if (!info) |
329 | return 0; | 323 | return 0; |
330 | 324 | ||
331 | devm_iounmap(dev, info->ide_addr); | ||
332 | devm_iounmap(dev, info->alt_addr); | ||
333 | clk_put(info->mck); | 325 | clk_put(info->mck); |
334 | 326 | ||
335 | kfree(info); | ||
336 | return 0; | 327 | return 0; |
337 | } | 328 | } |
338 | 329 | ||
diff --git a/drivers/ata/pata_atiixp.c b/drivers/ata/pata_atiixp.c index bec0b8ade66d..45915566e4e9 100644 --- a/drivers/ata/pata_atiixp.c +++ b/drivers/ata/pata_atiixp.c | |||
@@ -1,6 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * pata_atiixp.c - ATI PATA for new ATA layer | 2 | * pata_atiixp.c - ATI PATA for new ATA layer |
3 | * (C) 2005 Red Hat Inc | 3 | * (C) 2005 Red Hat Inc |
4 | * (C) 2009 Bartlomiej Zolnierkiewicz | ||
4 | * | 5 | * |
5 | * Based on | 6 | * Based on |
6 | * | 7 | * |
@@ -61,20 +62,19 @@ static void atiixp_set_pio_timing(struct ata_port *ap, struct ata_device *adev, | |||
61 | 62 | ||
62 | struct pci_dev *pdev = to_pci_dev(ap->host->dev); | 63 | struct pci_dev *pdev = to_pci_dev(ap->host->dev); |
63 | int dn = 2 * ap->port_no + adev->devno; | 64 | int dn = 2 * ap->port_no + adev->devno; |
64 | |||
65 | /* Check this is correct - the order is odd in both drivers */ | ||
66 | int timing_shift = (16 * ap->port_no) + 8 * (adev->devno ^ 1); | 65 | int timing_shift = (16 * ap->port_no) + 8 * (adev->devno ^ 1); |
67 | u16 pio_mode_data, pio_timing_data; | 66 | u32 pio_timing_data; |
67 | u16 pio_mode_data; | ||
68 | 68 | ||
69 | pci_read_config_word(pdev, ATIIXP_IDE_PIO_MODE, &pio_mode_data); | 69 | pci_read_config_word(pdev, ATIIXP_IDE_PIO_MODE, &pio_mode_data); |
70 | pio_mode_data &= ~(0x7 << (4 * dn)); | 70 | pio_mode_data &= ~(0x7 << (4 * dn)); |
71 | pio_mode_data |= pio << (4 * dn); | 71 | pio_mode_data |= pio << (4 * dn); |
72 | pci_write_config_word(pdev, ATIIXP_IDE_PIO_MODE, pio_mode_data); | 72 | pci_write_config_word(pdev, ATIIXP_IDE_PIO_MODE, pio_mode_data); |
73 | 73 | ||
74 | pci_read_config_word(pdev, ATIIXP_IDE_PIO_TIMING, &pio_timing_data); | 74 | pci_read_config_dword(pdev, ATIIXP_IDE_PIO_TIMING, &pio_timing_data); |
75 | pio_timing_data &= ~(0xFF << timing_shift); | 75 | pio_timing_data &= ~(0xFF << timing_shift); |
76 | pio_timing_data |= (pio_timings[pio] << timing_shift); | 76 | pio_timing_data |= (pio_timings[pio] << timing_shift); |
77 | pci_write_config_word(pdev, ATIIXP_IDE_PIO_TIMING, pio_timing_data); | 77 | pci_write_config_dword(pdev, ATIIXP_IDE_PIO_TIMING, pio_timing_data); |
78 | } | 78 | } |
79 | 79 | ||
80 | /** | 80 | /** |
@@ -119,16 +119,17 @@ static void atiixp_set_dmamode(struct ata_port *ap, struct ata_device *adev) | |||
119 | udma_mode_data |= dma << (4 * dn); | 119 | udma_mode_data |= dma << (4 * dn); |
120 | pci_write_config_word(pdev, ATIIXP_IDE_UDMA_MODE, udma_mode_data); | 120 | pci_write_config_word(pdev, ATIIXP_IDE_UDMA_MODE, udma_mode_data); |
121 | } else { | 121 | } else { |
122 | u16 mwdma_timing_data; | ||
123 | /* Check this is correct - the order is odd in both drivers */ | ||
124 | int timing_shift = (16 * ap->port_no) + 8 * (adev->devno ^ 1); | 122 | int timing_shift = (16 * ap->port_no) + 8 * (adev->devno ^ 1); |
123 | u32 mwdma_timing_data; | ||
125 | 124 | ||
126 | dma -= XFER_MW_DMA_0; | 125 | dma -= XFER_MW_DMA_0; |
127 | 126 | ||
128 | pci_read_config_word(pdev, ATIIXP_IDE_MWDMA_TIMING, &mwdma_timing_data); | 127 | pci_read_config_dword(pdev, ATIIXP_IDE_MWDMA_TIMING, |
128 | &mwdma_timing_data); | ||
129 | mwdma_timing_data &= ~(0xFF << timing_shift); | 129 | mwdma_timing_data &= ~(0xFF << timing_shift); |
130 | mwdma_timing_data |= (mwdma_timings[dma] << timing_shift); | 130 | mwdma_timing_data |= (mwdma_timings[dma] << timing_shift); |
131 | pci_write_config_word(pdev, ATIIXP_IDE_MWDMA_TIMING, mwdma_timing_data); | 131 | pci_write_config_dword(pdev, ATIIXP_IDE_MWDMA_TIMING, |
132 | mwdma_timing_data); | ||
132 | } | 133 | } |
133 | /* | 134 | /* |
134 | * We must now look at the PIO mode situation. We may need to | 135 | * We must now look at the PIO mode situation. We may need to |
diff --git a/drivers/ata/sata_nv.c b/drivers/ata/sata_nv.c index b2d11f300c39..86a40582999c 100644 --- a/drivers/ata/sata_nv.c +++ b/drivers/ata/sata_nv.c | |||
@@ -602,6 +602,7 @@ MODULE_VERSION(DRV_VERSION); | |||
602 | 602 | ||
603 | static int adma_enabled; | 603 | static int adma_enabled; |
604 | static int swncq_enabled = 1; | 604 | static int swncq_enabled = 1; |
605 | static int msi_enabled; | ||
605 | 606 | ||
606 | static void nv_adma_register_mode(struct ata_port *ap) | 607 | static void nv_adma_register_mode(struct ata_port *ap) |
607 | { | 608 | { |
@@ -2459,6 +2460,11 @@ static int nv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
2459 | } else if (type == SWNCQ) | 2460 | } else if (type == SWNCQ) |
2460 | nv_swncq_host_init(host); | 2461 | nv_swncq_host_init(host); |
2461 | 2462 | ||
2463 | if (msi_enabled) { | ||
2464 | dev_printk(KERN_NOTICE, &pdev->dev, "Using MSI\n"); | ||
2465 | pci_enable_msi(pdev); | ||
2466 | } | ||
2467 | |||
2462 | pci_set_master(pdev); | 2468 | pci_set_master(pdev); |
2463 | return ata_host_activate(host, pdev->irq, ipriv->irq_handler, | 2469 | return ata_host_activate(host, pdev->irq, ipriv->irq_handler, |
2464 | IRQF_SHARED, ipriv->sht); | 2470 | IRQF_SHARED, ipriv->sht); |
@@ -2558,4 +2564,6 @@ module_param_named(adma, adma_enabled, bool, 0444); | |||
2558 | MODULE_PARM_DESC(adma, "Enable use of ADMA (Default: false)"); | 2564 | MODULE_PARM_DESC(adma, "Enable use of ADMA (Default: false)"); |
2559 | module_param_named(swncq, swncq_enabled, bool, 0444); | 2565 | module_param_named(swncq, swncq_enabled, bool, 0444); |
2560 | MODULE_PARM_DESC(swncq, "Enable use of SWNCQ (Default: true)"); | 2566 | MODULE_PARM_DESC(swncq, "Enable use of SWNCQ (Default: true)"); |
2567 | module_param_named(msi, msi_enabled, bool, 0444); | ||
2568 | MODULE_PARM_DESC(msi, "Enable use of MSI (Default: false)"); | ||
2561 | 2569 | ||
diff --git a/drivers/base/platform.c b/drivers/base/platform.c index 81cb01bfc356..456594bd97bc 100644 --- a/drivers/base/platform.c +++ b/drivers/base/platform.c | |||
@@ -483,9 +483,6 @@ int platform_driver_register(struct platform_driver *drv) | |||
483 | drv->driver.remove = platform_drv_remove; | 483 | drv->driver.remove = platform_drv_remove; |
484 | if (drv->shutdown) | 484 | if (drv->shutdown) |
485 | drv->driver.shutdown = platform_drv_shutdown; | 485 | drv->driver.shutdown = platform_drv_shutdown; |
486 | if (drv->suspend || drv->resume) | ||
487 | pr_warning("Platform driver '%s' needs updating - please use " | ||
488 | "dev_pm_ops\n", drv->driver.name); | ||
489 | 486 | ||
490 | return driver_register(&drv->driver); | 487 | return driver_register(&drv->driver); |
491 | } | 488 | } |
diff --git a/drivers/char/agp/parisc-agp.c b/drivers/char/agp/parisc-agp.c index f4bb43fb8016..e077701ae3d9 100644 --- a/drivers/char/agp/parisc-agp.c +++ b/drivers/char/agp/parisc-agp.c | |||
@@ -225,7 +225,7 @@ static const struct agp_bridge_driver parisc_agp_driver = { | |||
225 | .configure = parisc_agp_configure, | 225 | .configure = parisc_agp_configure, |
226 | .fetch_size = parisc_agp_fetch_size, | 226 | .fetch_size = parisc_agp_fetch_size, |
227 | .tlb_flush = parisc_agp_tlbflush, | 227 | .tlb_flush = parisc_agp_tlbflush, |
228 | .mask_memory = parisc_agp_mask_memory, | 228 | .mask_memory = parisc_agp_page_mask_memory, |
229 | .masks = parisc_agp_masks, | 229 | .masks = parisc_agp_masks, |
230 | .agp_enable = parisc_agp_enable, | 230 | .agp_enable = parisc_agp_enable, |
231 | .cache_flush = global_cache_flush, | 231 | .cache_flush = global_cache_flush, |
diff --git a/drivers/char/pty.c b/drivers/char/pty.c index 6e6942c45f5b..d083c73d784a 100644 --- a/drivers/char/pty.c +++ b/drivers/char/pty.c | |||
@@ -144,6 +144,8 @@ static int pty_write(struct tty_struct *tty, const unsigned char *buf, | |||
144 | 144 | ||
145 | static int pty_write_room(struct tty_struct *tty) | 145 | static int pty_write_room(struct tty_struct *tty) |
146 | { | 146 | { |
147 | if (tty->stopped) | ||
148 | return 0; | ||
147 | return pty_space(tty->link); | 149 | return pty_space(tty->link); |
148 | } | 150 | } |
149 | 151 | ||
diff --git a/drivers/char/tty_ldisc.c b/drivers/char/tty_ldisc.c index acd76b767d4c..1733d3439ad2 100644 --- a/drivers/char/tty_ldisc.c +++ b/drivers/char/tty_ldisc.c | |||
@@ -48,6 +48,41 @@ static DECLARE_WAIT_QUEUE_HEAD(tty_ldisc_wait); | |||
48 | /* Line disc dispatch table */ | 48 | /* Line disc dispatch table */ |
49 | static struct tty_ldisc_ops *tty_ldiscs[NR_LDISCS]; | 49 | static struct tty_ldisc_ops *tty_ldiscs[NR_LDISCS]; |
50 | 50 | ||
51 | static inline struct tty_ldisc *get_ldisc(struct tty_ldisc *ld) | ||
52 | { | ||
53 | if (ld) | ||
54 | atomic_inc(&ld->users); | ||
55 | return ld; | ||
56 | } | ||
57 | |||
58 | static void put_ldisc(struct tty_ldisc *ld) | ||
59 | { | ||
60 | unsigned long flags; | ||
61 | |||
62 | if (WARN_ON_ONCE(!ld)) | ||
63 | return; | ||
64 | |||
65 | /* | ||
66 | * If this is the last user, free the ldisc, and | ||
67 | * release the ldisc ops. | ||
68 | * | ||
69 | * We really want an "atomic_dec_and_lock_irqsave()", | ||
70 | * but we don't have it, so this does it by hand. | ||
71 | */ | ||
72 | local_irq_save(flags); | ||
73 | if (atomic_dec_and_lock(&ld->users, &tty_ldisc_lock)) { | ||
74 | struct tty_ldisc_ops *ldo = ld->ops; | ||
75 | |||
76 | ldo->refcount--; | ||
77 | module_put(ldo->owner); | ||
78 | spin_unlock_irqrestore(&tty_ldisc_lock, flags); | ||
79 | |||
80 | kfree(ld); | ||
81 | return; | ||
82 | } | ||
83 | local_irq_restore(flags); | ||
84 | } | ||
85 | |||
51 | /** | 86 | /** |
52 | * tty_register_ldisc - install a line discipline | 87 | * tty_register_ldisc - install a line discipline |
53 | * @disc: ldisc number | 88 | * @disc: ldisc number |
@@ -142,7 +177,7 @@ static struct tty_ldisc *tty_ldisc_try_get(int disc) | |||
142 | /* lock it */ | 177 | /* lock it */ |
143 | ldops->refcount++; | 178 | ldops->refcount++; |
144 | ld->ops = ldops; | 179 | ld->ops = ldops; |
145 | ld->refcount = 0; | 180 | atomic_set(&ld->users, 1); |
146 | err = 0; | 181 | err = 0; |
147 | } | 182 | } |
148 | } | 183 | } |
@@ -181,35 +216,6 @@ static struct tty_ldisc *tty_ldisc_get(int disc) | |||
181 | return ld; | 216 | return ld; |
182 | } | 217 | } |
183 | 218 | ||
184 | /** | ||
185 | * tty_ldisc_put - drop ldisc reference | ||
186 | * @ld: ldisc | ||
187 | * | ||
188 | * Drop a reference to a line discipline. Manage refcounts and | ||
189 | * module usage counts. Free the ldisc once the recount hits zero. | ||
190 | * | ||
191 | * Locking: | ||
192 | * takes tty_ldisc_lock to guard against ldisc races | ||
193 | */ | ||
194 | |||
195 | static void tty_ldisc_put(struct tty_ldisc *ld) | ||
196 | { | ||
197 | unsigned long flags; | ||
198 | int disc = ld->ops->num; | ||
199 | struct tty_ldisc_ops *ldo; | ||
200 | |||
201 | BUG_ON(disc < N_TTY || disc >= NR_LDISCS); | ||
202 | |||
203 | spin_lock_irqsave(&tty_ldisc_lock, flags); | ||
204 | ldo = tty_ldiscs[disc]; | ||
205 | BUG_ON(ldo->refcount == 0); | ||
206 | ldo->refcount--; | ||
207 | module_put(ldo->owner); | ||
208 | spin_unlock_irqrestore(&tty_ldisc_lock, flags); | ||
209 | WARN_ON(ld->refcount); | ||
210 | kfree(ld); | ||
211 | } | ||
212 | |||
213 | static void *tty_ldiscs_seq_start(struct seq_file *m, loff_t *pos) | 219 | static void *tty_ldiscs_seq_start(struct seq_file *m, loff_t *pos) |
214 | { | 220 | { |
215 | return (*pos < NR_LDISCS) ? pos : NULL; | 221 | return (*pos < NR_LDISCS) ? pos : NULL; |
@@ -234,7 +240,7 @@ static int tty_ldiscs_seq_show(struct seq_file *m, void *v) | |||
234 | if (IS_ERR(ld)) | 240 | if (IS_ERR(ld)) |
235 | return 0; | 241 | return 0; |
236 | seq_printf(m, "%-10s %2d\n", ld->ops->name ? ld->ops->name : "???", i); | 242 | seq_printf(m, "%-10s %2d\n", ld->ops->name ? ld->ops->name : "???", i); |
237 | tty_ldisc_put(ld); | 243 | put_ldisc(ld); |
238 | return 0; | 244 | return 0; |
239 | } | 245 | } |
240 | 246 | ||
@@ -288,20 +294,17 @@ static void tty_ldisc_assign(struct tty_struct *tty, struct tty_ldisc *ld) | |||
288 | * Locking: takes tty_ldisc_lock | 294 | * Locking: takes tty_ldisc_lock |
289 | */ | 295 | */ |
290 | 296 | ||
291 | static int tty_ldisc_try(struct tty_struct *tty) | 297 | static struct tty_ldisc *tty_ldisc_try(struct tty_struct *tty) |
292 | { | 298 | { |
293 | unsigned long flags; | 299 | unsigned long flags; |
294 | struct tty_ldisc *ld; | 300 | struct tty_ldisc *ld; |
295 | int ret = 0; | ||
296 | 301 | ||
297 | spin_lock_irqsave(&tty_ldisc_lock, flags); | 302 | spin_lock_irqsave(&tty_ldisc_lock, flags); |
298 | ld = tty->ldisc; | 303 | ld = NULL; |
299 | if (test_bit(TTY_LDISC, &tty->flags)) { | 304 | if (test_bit(TTY_LDISC, &tty->flags)) |
300 | ld->refcount++; | 305 | ld = get_ldisc(tty->ldisc); |
301 | ret = 1; | ||
302 | } | ||
303 | spin_unlock_irqrestore(&tty_ldisc_lock, flags); | 306 | spin_unlock_irqrestore(&tty_ldisc_lock, flags); |
304 | return ret; | 307 | return ld; |
305 | } | 308 | } |
306 | 309 | ||
307 | /** | 310 | /** |
@@ -322,10 +325,11 @@ static int tty_ldisc_try(struct tty_struct *tty) | |||
322 | 325 | ||
323 | struct tty_ldisc *tty_ldisc_ref_wait(struct tty_struct *tty) | 326 | struct tty_ldisc *tty_ldisc_ref_wait(struct tty_struct *tty) |
324 | { | 327 | { |
328 | struct tty_ldisc *ld; | ||
329 | |||
325 | /* wait_event is a macro */ | 330 | /* wait_event is a macro */ |
326 | wait_event(tty_ldisc_wait, tty_ldisc_try(tty)); | 331 | wait_event(tty_ldisc_wait, (ld = tty_ldisc_try(tty)) != NULL); |
327 | WARN_ON(tty->ldisc->refcount == 0); | 332 | return ld; |
328 | return tty->ldisc; | ||
329 | } | 333 | } |
330 | EXPORT_SYMBOL_GPL(tty_ldisc_ref_wait); | 334 | EXPORT_SYMBOL_GPL(tty_ldisc_ref_wait); |
331 | 335 | ||
@@ -342,9 +346,7 @@ EXPORT_SYMBOL_GPL(tty_ldisc_ref_wait); | |||
342 | 346 | ||
343 | struct tty_ldisc *tty_ldisc_ref(struct tty_struct *tty) | 347 | struct tty_ldisc *tty_ldisc_ref(struct tty_struct *tty) |
344 | { | 348 | { |
345 | if (tty_ldisc_try(tty)) | 349 | return tty_ldisc_try(tty); |
346 | return tty->ldisc; | ||
347 | return NULL; | ||
348 | } | 350 | } |
349 | EXPORT_SYMBOL_GPL(tty_ldisc_ref); | 351 | EXPORT_SYMBOL_GPL(tty_ldisc_ref); |
350 | 352 | ||
@@ -360,21 +362,15 @@ EXPORT_SYMBOL_GPL(tty_ldisc_ref); | |||
360 | 362 | ||
361 | void tty_ldisc_deref(struct tty_ldisc *ld) | 363 | void tty_ldisc_deref(struct tty_ldisc *ld) |
362 | { | 364 | { |
363 | unsigned long flags; | 365 | put_ldisc(ld); |
364 | |||
365 | BUG_ON(ld == NULL); | ||
366 | |||
367 | spin_lock_irqsave(&tty_ldisc_lock, flags); | ||
368 | if (ld->refcount == 0) | ||
369 | printk(KERN_ERR "tty_ldisc_deref: no references.\n"); | ||
370 | else | ||
371 | ld->refcount--; | ||
372 | if (ld->refcount == 0) | ||
373 | wake_up(&tty_ldisc_wait); | ||
374 | spin_unlock_irqrestore(&tty_ldisc_lock, flags); | ||
375 | } | 366 | } |
376 | EXPORT_SYMBOL_GPL(tty_ldisc_deref); | 367 | EXPORT_SYMBOL_GPL(tty_ldisc_deref); |
377 | 368 | ||
369 | static inline void tty_ldisc_put(struct tty_ldisc *ld) | ||
370 | { | ||
371 | put_ldisc(ld); | ||
372 | } | ||
373 | |||
378 | /** | 374 | /** |
379 | * tty_ldisc_enable - allow ldisc use | 375 | * tty_ldisc_enable - allow ldisc use |
380 | * @tty: terminal to activate ldisc on | 376 | * @tty: terminal to activate ldisc on |
@@ -523,31 +519,6 @@ static int tty_ldisc_halt(struct tty_struct *tty) | |||
523 | } | 519 | } |
524 | 520 | ||
525 | /** | 521 | /** |
526 | * tty_ldisc_wait_idle - wait for the ldisc to become idle | ||
527 | * @tty: tty to wait for | ||
528 | * | ||
529 | * Wait for the line discipline to become idle. The discipline must | ||
530 | * have been halted for this to guarantee it remains idle. | ||
531 | * | ||
532 | * tty_ldisc_lock protects the ref counts currently. | ||
533 | */ | ||
534 | |||
535 | static int tty_ldisc_wait_idle(struct tty_struct *tty) | ||
536 | { | ||
537 | unsigned long flags; | ||
538 | spin_lock_irqsave(&tty_ldisc_lock, flags); | ||
539 | while (tty->ldisc->refcount) { | ||
540 | spin_unlock_irqrestore(&tty_ldisc_lock, flags); | ||
541 | if (wait_event_timeout(tty_ldisc_wait, | ||
542 | tty->ldisc->refcount == 0, 5 * HZ) == 0) | ||
543 | return -EBUSY; | ||
544 | spin_lock_irqsave(&tty_ldisc_lock, flags); | ||
545 | } | ||
546 | spin_unlock_irqrestore(&tty_ldisc_lock, flags); | ||
547 | return 0; | ||
548 | } | ||
549 | |||
550 | /** | ||
551 | * tty_set_ldisc - set line discipline | 522 | * tty_set_ldisc - set line discipline |
552 | * @tty: the terminal to set | 523 | * @tty: the terminal to set |
553 | * @ldisc: the line discipline | 524 | * @ldisc: the line discipline |
@@ -642,14 +613,6 @@ int tty_set_ldisc(struct tty_struct *tty, int ldisc) | |||
642 | 613 | ||
643 | flush_scheduled_work(); | 614 | flush_scheduled_work(); |
644 | 615 | ||
645 | /* Let any existing reference holders finish */ | ||
646 | retval = tty_ldisc_wait_idle(tty); | ||
647 | if (retval < 0) { | ||
648 | clear_bit(TTY_LDISC_CHANGING, &tty->flags); | ||
649 | tty_ldisc_put(new_ldisc); | ||
650 | return retval; | ||
651 | } | ||
652 | |||
653 | mutex_lock(&tty->ldisc_mutex); | 616 | mutex_lock(&tty->ldisc_mutex); |
654 | if (test_bit(TTY_HUPPED, &tty->flags)) { | 617 | if (test_bit(TTY_HUPPED, &tty->flags)) { |
655 | /* We were raced by the hangup method. It will have stomped | 618 | /* We were raced by the hangup method. It will have stomped |
@@ -795,7 +758,6 @@ void tty_ldisc_hangup(struct tty_struct *tty) | |||
795 | if (tty->ldisc) { /* Not yet closed */ | 758 | if (tty->ldisc) { /* Not yet closed */ |
796 | /* Switch back to N_TTY */ | 759 | /* Switch back to N_TTY */ |
797 | tty_ldisc_halt(tty); | 760 | tty_ldisc_halt(tty); |
798 | tty_ldisc_wait_idle(tty); | ||
799 | tty_ldisc_reinit(tty); | 761 | tty_ldisc_reinit(tty); |
800 | /* At this point we have a closed ldisc and we want to | 762 | /* At this point we have a closed ldisc and we want to |
801 | reopen it. We could defer this to the next open but | 763 | reopen it. We could defer this to the next open but |
@@ -860,14 +822,6 @@ void tty_ldisc_release(struct tty_struct *tty, struct tty_struct *o_tty) | |||
860 | tty_ldisc_halt(tty); | 822 | tty_ldisc_halt(tty); |
861 | flush_scheduled_work(); | 823 | flush_scheduled_work(); |
862 | 824 | ||
863 | /* | ||
864 | * Wait for any short term users (we know they are just driver | ||
865 | * side waiters as the file is closing so user count on the file | ||
866 | * side is zero. | ||
867 | */ | ||
868 | |||
869 | tty_ldisc_wait_idle(tty); | ||
870 | |||
871 | mutex_lock(&tty->ldisc_mutex); | 825 | mutex_lock(&tty->ldisc_mutex); |
872 | /* | 826 | /* |
873 | * Now kill off the ldisc | 827 | * Now kill off the ldisc |
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index b90eda8b3440..fd69086d08d5 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c | |||
@@ -858,6 +858,8 @@ static int cpufreq_add_dev(struct sys_device *sys_dev) | |||
858 | 858 | ||
859 | /* Check for existing affected CPUs. | 859 | /* Check for existing affected CPUs. |
860 | * They may not be aware of it due to CPU Hotplug. | 860 | * They may not be aware of it due to CPU Hotplug. |
861 | * cpufreq_cpu_put is called when the device is removed | ||
862 | * in __cpufreq_remove_dev() | ||
861 | */ | 863 | */ |
862 | managed_policy = cpufreq_cpu_get(j); | 864 | managed_policy = cpufreq_cpu_get(j); |
863 | if (unlikely(managed_policy)) { | 865 | if (unlikely(managed_policy)) { |
@@ -884,7 +886,7 @@ static int cpufreq_add_dev(struct sys_device *sys_dev) | |||
884 | ret = sysfs_create_link(&sys_dev->kobj, | 886 | ret = sysfs_create_link(&sys_dev->kobj, |
885 | &managed_policy->kobj, | 887 | &managed_policy->kobj, |
886 | "cpufreq"); | 888 | "cpufreq"); |
887 | if (!ret) | 889 | if (ret) |
888 | cpufreq_cpu_put(managed_policy); | 890 | cpufreq_cpu_put(managed_policy); |
889 | /* | 891 | /* |
890 | * Success. We only needed to be added to the mask. | 892 | * Success. We only needed to be added to the mask. |
@@ -924,6 +926,8 @@ static int cpufreq_add_dev(struct sys_device *sys_dev) | |||
924 | 926 | ||
925 | spin_lock_irqsave(&cpufreq_driver_lock, flags); | 927 | spin_lock_irqsave(&cpufreq_driver_lock, flags); |
926 | for_each_cpu(j, policy->cpus) { | 928 | for_each_cpu(j, policy->cpus) { |
929 | if (!cpu_online(j)) | ||
930 | continue; | ||
927 | per_cpu(cpufreq_cpu_data, j) = policy; | 931 | per_cpu(cpufreq_cpu_data, j) = policy; |
928 | per_cpu(policy_cpu, j) = policy->cpu; | 932 | per_cpu(policy_cpu, j) = policy->cpu; |
929 | } | 933 | } |
@@ -1244,13 +1248,22 @@ EXPORT_SYMBOL(cpufreq_get); | |||
1244 | 1248 | ||
1245 | static int cpufreq_suspend(struct sys_device *sysdev, pm_message_t pmsg) | 1249 | static int cpufreq_suspend(struct sys_device *sysdev, pm_message_t pmsg) |
1246 | { | 1250 | { |
1247 | int cpu = sysdev->id; | ||
1248 | int ret = 0; | 1251 | int ret = 0; |
1252 | |||
1253 | #ifdef __powerpc__ | ||
1254 | int cpu = sysdev->id; | ||
1249 | unsigned int cur_freq = 0; | 1255 | unsigned int cur_freq = 0; |
1250 | struct cpufreq_policy *cpu_policy; | 1256 | struct cpufreq_policy *cpu_policy; |
1251 | 1257 | ||
1252 | dprintk("suspending cpu %u\n", cpu); | 1258 | dprintk("suspending cpu %u\n", cpu); |
1253 | 1259 | ||
1260 | /* | ||
1261 | * This whole bogosity is here because Powerbooks are made of fail. | ||
1262 | * No sane platform should need any of the code below to be run. | ||
1263 | * (it's entirely the wrong thing to do, as driver->get may | ||
1264 | * reenable interrupts on some architectures). | ||
1265 | */ | ||
1266 | |||
1254 | if (!cpu_online(cpu)) | 1267 | if (!cpu_online(cpu)) |
1255 | return 0; | 1268 | return 0; |
1256 | 1269 | ||
@@ -1309,6 +1322,7 @@ static int cpufreq_suspend(struct sys_device *sysdev, pm_message_t pmsg) | |||
1309 | 1322 | ||
1310 | out: | 1323 | out: |
1311 | cpufreq_cpu_put(cpu_policy); | 1324 | cpufreq_cpu_put(cpu_policy); |
1325 | #endif /* __powerpc__ */ | ||
1312 | return ret; | 1326 | return ret; |
1313 | } | 1327 | } |
1314 | 1328 | ||
@@ -1322,12 +1336,18 @@ out: | |||
1322 | */ | 1336 | */ |
1323 | static int cpufreq_resume(struct sys_device *sysdev) | 1337 | static int cpufreq_resume(struct sys_device *sysdev) |
1324 | { | 1338 | { |
1325 | int cpu = sysdev->id; | ||
1326 | int ret = 0; | 1339 | int ret = 0; |
1340 | |||
1341 | #ifdef __powerpc__ | ||
1342 | int cpu = sysdev->id; | ||
1327 | struct cpufreq_policy *cpu_policy; | 1343 | struct cpufreq_policy *cpu_policy; |
1328 | 1344 | ||
1329 | dprintk("resuming cpu %u\n", cpu); | 1345 | dprintk("resuming cpu %u\n", cpu); |
1330 | 1346 | ||
1347 | /* As with the ->suspend method, all the code below is | ||
1348 | * only necessary because Powerbooks suck. | ||
1349 | * See commit 42d4dc3f4e1e for jokes. */ | ||
1350 | |||
1331 | if (!cpu_online(cpu)) | 1351 | if (!cpu_online(cpu)) |
1332 | return 0; | 1352 | return 0; |
1333 | 1353 | ||
@@ -1391,6 +1411,7 @@ out: | |||
1391 | schedule_work(&cpu_policy->update); | 1411 | schedule_work(&cpu_policy->update); |
1392 | fail: | 1412 | fail: |
1393 | cpufreq_cpu_put(cpu_policy); | 1413 | cpufreq_cpu_put(cpu_policy); |
1414 | #endif /* __powerpc__ */ | ||
1394 | return ret; | 1415 | return ret; |
1395 | } | 1416 | } |
1396 | 1417 | ||
diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c index 57490502b21c..bdea7e2f94ba 100644 --- a/drivers/cpufreq/cpufreq_conservative.c +++ b/drivers/cpufreq/cpufreq_conservative.c | |||
@@ -63,6 +63,7 @@ struct cpu_dbs_info_s { | |||
63 | unsigned int down_skip; | 63 | unsigned int down_skip; |
64 | unsigned int requested_freq; | 64 | unsigned int requested_freq; |
65 | int cpu; | 65 | int cpu; |
66 | unsigned int enable:1; | ||
66 | /* | 67 | /* |
67 | * percpu mutex that serializes governor limit change with | 68 | * percpu mutex that serializes governor limit change with |
68 | * do_dbs_timer invocation. We do not want do_dbs_timer to run | 69 | * do_dbs_timer invocation. We do not want do_dbs_timer to run |
@@ -141,6 +142,9 @@ dbs_cpufreq_notifier(struct notifier_block *nb, unsigned long val, | |||
141 | 142 | ||
142 | struct cpufreq_policy *policy; | 143 | struct cpufreq_policy *policy; |
143 | 144 | ||
145 | if (!this_dbs_info->enable) | ||
146 | return 0; | ||
147 | |||
144 | policy = this_dbs_info->cur_policy; | 148 | policy = this_dbs_info->cur_policy; |
145 | 149 | ||
146 | /* | 150 | /* |
@@ -497,6 +501,7 @@ static inline void dbs_timer_init(struct cpu_dbs_info_s *dbs_info) | |||
497 | int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate); | 501 | int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate); |
498 | delay -= jiffies % delay; | 502 | delay -= jiffies % delay; |
499 | 503 | ||
504 | dbs_info->enable = 1; | ||
500 | INIT_DELAYED_WORK_DEFERRABLE(&dbs_info->work, do_dbs_timer); | 505 | INIT_DELAYED_WORK_DEFERRABLE(&dbs_info->work, do_dbs_timer); |
501 | queue_delayed_work_on(dbs_info->cpu, kconservative_wq, &dbs_info->work, | 506 | queue_delayed_work_on(dbs_info->cpu, kconservative_wq, &dbs_info->work, |
502 | delay); | 507 | delay); |
@@ -504,6 +509,7 @@ static inline void dbs_timer_init(struct cpu_dbs_info_s *dbs_info) | |||
504 | 509 | ||
505 | static inline void dbs_timer_exit(struct cpu_dbs_info_s *dbs_info) | 510 | static inline void dbs_timer_exit(struct cpu_dbs_info_s *dbs_info) |
506 | { | 511 | { |
512 | dbs_info->enable = 0; | ||
507 | cancel_delayed_work_sync(&dbs_info->work); | 513 | cancel_delayed_work_sync(&dbs_info->work); |
508 | } | 514 | } |
509 | 515 | ||
diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c index 24964c1d0af9..e2a10bcba7a1 100644 --- a/drivers/edac/amd64_edac.c +++ b/drivers/edac/amd64_edac.c | |||
@@ -868,6 +868,8 @@ static void amd64_read_dbam_reg(struct amd64_pvt *pvt) | |||
868 | goto err_reg; | 868 | goto err_reg; |
869 | } | 869 | } |
870 | 870 | ||
871 | return; | ||
872 | |||
871 | err_reg: | 873 | err_reg: |
872 | debugf0("Error reading F2x%03x.\n", reg); | 874 | debugf0("Error reading F2x%03x.\n", reg); |
873 | } | 875 | } |
@@ -2634,6 +2636,8 @@ static void amd64_read_mc_registers(struct amd64_pvt *pvt) | |||
2634 | 2636 | ||
2635 | amd64_dump_misc_regs(pvt); | 2637 | amd64_dump_misc_regs(pvt); |
2636 | 2638 | ||
2639 | return; | ||
2640 | |||
2637 | err_reg: | 2641 | err_reg: |
2638 | debugf0("Reading an MC register failed\n"); | 2642 | debugf0("Reading an MC register failed\n"); |
2639 | 2643 | ||
@@ -2977,6 +2981,9 @@ static int amd64_check_ecc_enabled(struct amd64_pvt *pvt) | |||
2977 | "ECC is enabled by BIOS, Proceeding " | 2981 | "ECC is enabled by BIOS, Proceeding " |
2978 | "with EDAC module initialization\n"); | 2982 | "with EDAC module initialization\n"); |
2979 | 2983 | ||
2984 | /* Signal good ECC status */ | ||
2985 | ret = 0; | ||
2986 | |||
2980 | /* CLEAR the override, since BIOS controlled it */ | 2987 | /* CLEAR the override, since BIOS controlled it */ |
2981 | ecc_enable_override = 0; | 2988 | ecc_enable_override = 0; |
2982 | } | 2989 | } |
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c index 8fab7890a363..33be210d6723 100644 --- a/drivers/gpu/drm/drm_crtc.c +++ b/drivers/gpu/drm/drm_crtc.c | |||
@@ -1461,7 +1461,7 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data, | |||
1461 | goto out; | 1461 | goto out; |
1462 | } | 1462 | } |
1463 | 1463 | ||
1464 | if (crtc_req->count_connectors > 0 && !mode && !fb) { | 1464 | if (crtc_req->count_connectors > 0 && (!mode || !fb)) { |
1465 | DRM_DEBUG("Count connectors is %d but no mode or fb set\n", | 1465 | DRM_DEBUG("Count connectors is %d but no mode or fb set\n", |
1466 | crtc_req->count_connectors); | 1466 | crtc_req->count_connectors); |
1467 | ret = -EINVAL; | 1467 | ret = -EINVAL; |
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c index 3da9cfa31848..6aaa2cb23365 100644 --- a/drivers/gpu/drm/drm_crtc_helper.c +++ b/drivers/gpu/drm/drm_crtc_helper.c | |||
@@ -706,8 +706,8 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set) | |||
706 | struct drm_encoder **save_encoders, *new_encoder; | 706 | struct drm_encoder **save_encoders, *new_encoder; |
707 | struct drm_framebuffer *old_fb = NULL; | 707 | struct drm_framebuffer *old_fb = NULL; |
708 | bool save_enabled; | 708 | bool save_enabled; |
709 | bool mode_changed = false; | 709 | bool mode_changed = false; /* if true do a full mode set */ |
710 | bool fb_changed = false; | 710 | bool fb_changed = false; /* if true and !mode_changed just do a flip */ |
711 | struct drm_connector *connector; | 711 | struct drm_connector *connector; |
712 | int count = 0, ro, fail = 0; | 712 | int count = 0, ro, fail = 0; |
713 | struct drm_crtc_helper_funcs *crtc_funcs; | 713 | struct drm_crtc_helper_funcs *crtc_funcs; |
@@ -758,6 +758,8 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set) | |||
758 | if (set->crtc->fb == NULL) { | 758 | if (set->crtc->fb == NULL) { |
759 | DRM_DEBUG("crtc has no fb, full mode set\n"); | 759 | DRM_DEBUG("crtc has no fb, full mode set\n"); |
760 | mode_changed = true; | 760 | mode_changed = true; |
761 | } else if (set->fb == NULL) { | ||
762 | mode_changed = true; | ||
761 | } else if ((set->fb->bits_per_pixel != | 763 | } else if ((set->fb->bits_per_pixel != |
762 | set->crtc->fb->bits_per_pixel) || | 764 | set->crtc->fb->bits_per_pixel) || |
763 | set->fb->depth != set->crtc->fb->depth) | 765 | set->fb->depth != set->crtc->fb->depth) |
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c index b4a3dbcebe9b..f85aaf21e783 100644 --- a/drivers/gpu/drm/drm_irq.c +++ b/drivers/gpu/drm/drm_irq.c | |||
@@ -566,7 +566,7 @@ int drm_wait_vblank(struct drm_device *dev, void *data, | |||
566 | 566 | ||
567 | ret = drm_vblank_get(dev, crtc); | 567 | ret = drm_vblank_get(dev, crtc); |
568 | if (ret) { | 568 | if (ret) { |
569 | DRM_ERROR("failed to acquire vblank counter, %d\n", ret); | 569 | DRM_DEBUG("failed to acquire vblank counter, %d\n", ret); |
570 | return ret; | 570 | return ret; |
571 | } | 571 | } |
572 | seq = drm_vblank_count(dev, crtc); | 572 | seq = drm_vblank_count(dev, crtc); |
diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c index 54f492a488a9..7914097b09c6 100644 --- a/drivers/gpu/drm/drm_modes.c +++ b/drivers/gpu/drm/drm_modes.c | |||
@@ -566,6 +566,8 @@ void drm_mode_connector_list_update(struct drm_connector *connector) | |||
566 | found_it = 1; | 566 | found_it = 1; |
567 | /* if equal delete the probed mode */ | 567 | /* if equal delete the probed mode */ |
568 | mode->status = pmode->status; | 568 | mode->status = pmode->status; |
569 | /* Merge type bits together */ | ||
570 | mode->type |= pmode->type; | ||
569 | list_del(&pmode->head); | 571 | list_del(&pmode->head); |
570 | drm_mode_destroy(connector->dev, pmode); | 572 | drm_mode_destroy(connector->dev, pmode); |
571 | break; | 573 | break; |
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index 8c4783180bf6..50d1f782768c 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c | |||
@@ -1186,6 +1186,13 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) | |||
1186 | if (ret) | 1186 | if (ret) |
1187 | goto out_iomapfree; | 1187 | goto out_iomapfree; |
1188 | 1188 | ||
1189 | dev_priv->wq = create_workqueue("i915"); | ||
1190 | if (dev_priv->wq == NULL) { | ||
1191 | DRM_ERROR("Failed to create our workqueue.\n"); | ||
1192 | ret = -ENOMEM; | ||
1193 | goto out_iomapfree; | ||
1194 | } | ||
1195 | |||
1189 | /* enable GEM by default */ | 1196 | /* enable GEM by default */ |
1190 | dev_priv->has_gem = 1; | 1197 | dev_priv->has_gem = 1; |
1191 | 1198 | ||
@@ -1211,7 +1218,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) | |||
1211 | if (!I915_NEED_GFX_HWS(dev)) { | 1218 | if (!I915_NEED_GFX_HWS(dev)) { |
1212 | ret = i915_init_phys_hws(dev); | 1219 | ret = i915_init_phys_hws(dev); |
1213 | if (ret != 0) | 1220 | if (ret != 0) |
1214 | goto out_iomapfree; | 1221 | goto out_workqueue_free; |
1215 | } | 1222 | } |
1216 | 1223 | ||
1217 | i915_get_mem_freq(dev); | 1224 | i915_get_mem_freq(dev); |
@@ -1245,7 +1252,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) | |||
1245 | ret = i915_load_modeset_init(dev, prealloc_size, agp_size); | 1252 | ret = i915_load_modeset_init(dev, prealloc_size, agp_size); |
1246 | if (ret < 0) { | 1253 | if (ret < 0) { |
1247 | DRM_ERROR("failed to init modeset\n"); | 1254 | DRM_ERROR("failed to init modeset\n"); |
1248 | goto out_rmmap; | 1255 | goto out_workqueue_free; |
1249 | } | 1256 | } |
1250 | } | 1257 | } |
1251 | 1258 | ||
@@ -1256,6 +1263,8 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) | |||
1256 | 1263 | ||
1257 | return 0; | 1264 | return 0; |
1258 | 1265 | ||
1266 | out_workqueue_free: | ||
1267 | destroy_workqueue(dev_priv->wq); | ||
1259 | out_iomapfree: | 1268 | out_iomapfree: |
1260 | io_mapping_free(dev_priv->mm.gtt_mapping); | 1269 | io_mapping_free(dev_priv->mm.gtt_mapping); |
1261 | out_rmmap: | 1270 | out_rmmap: |
@@ -1269,6 +1278,8 @@ int i915_driver_unload(struct drm_device *dev) | |||
1269 | { | 1278 | { |
1270 | struct drm_i915_private *dev_priv = dev->dev_private; | 1279 | struct drm_i915_private *dev_priv = dev->dev_private; |
1271 | 1280 | ||
1281 | destroy_workqueue(dev_priv->wq); | ||
1282 | |||
1272 | io_mapping_free(dev_priv->mm.gtt_mapping); | 1283 | io_mapping_free(dev_priv->mm.gtt_mapping); |
1273 | if (dev_priv->mm.gtt_mtrr >= 0) { | 1284 | if (dev_priv->mm.gtt_mtrr >= 0) { |
1274 | mtrr_del(dev_priv->mm.gtt_mtrr, dev->agp->base, | 1285 | mtrr_del(dev_priv->mm.gtt_mtrr, dev->agp->base, |
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index d08752875885..7537f57d8a87 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h | |||
@@ -219,6 +219,7 @@ typedef struct drm_i915_private { | |||
219 | unsigned int lvds_vbt:1; | 219 | unsigned int lvds_vbt:1; |
220 | unsigned int int_crt_support:1; | 220 | unsigned int int_crt_support:1; |
221 | unsigned int lvds_use_ssc:1; | 221 | unsigned int lvds_use_ssc:1; |
222 | unsigned int edp_support:1; | ||
222 | int lvds_ssc_freq; | 223 | int lvds_ssc_freq; |
223 | 224 | ||
224 | struct drm_i915_fence_reg fence_regs[16]; /* assume 965 */ | 225 | struct drm_i915_fence_reg fence_regs[16]; /* assume 965 */ |
@@ -229,6 +230,8 @@ typedef struct drm_i915_private { | |||
229 | 230 | ||
230 | spinlock_t error_lock; | 231 | spinlock_t error_lock; |
231 | struct drm_i915_error_state *first_error; | 232 | struct drm_i915_error_state *first_error; |
233 | struct work_struct error_work; | ||
234 | struct workqueue_struct *wq; | ||
232 | 235 | ||
233 | /* Register state */ | 236 | /* Register state */ |
234 | u8 saveLBB; | 237 | u8 saveLBB; |
@@ -888,6 +891,7 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller); | |||
888 | IS_I915GM(dev))) | 891 | IS_I915GM(dev))) |
889 | #define SUPPORTS_INTEGRATED_HDMI(dev) (IS_G4X(dev) || IS_IGDNG(dev)) | 892 | #define SUPPORTS_INTEGRATED_HDMI(dev) (IS_G4X(dev) || IS_IGDNG(dev)) |
890 | #define SUPPORTS_INTEGRATED_DP(dev) (IS_G4X(dev) || IS_IGDNG(dev)) | 893 | #define SUPPORTS_INTEGRATED_DP(dev) (IS_G4X(dev) || IS_IGDNG(dev)) |
894 | #define SUPPORTS_EDP(dev) (IS_IGDNG_M(dev)) | ||
891 | #define I915_HAS_HOTPLUG(dev) (IS_I945G(dev) || IS_I945GM(dev) || IS_I965G(dev)) | 895 | #define I915_HAS_HOTPLUG(dev) (IS_I945G(dev) || IS_I945GM(dev) || IS_I965G(dev)) |
892 | /* dsparb controlled by hw only */ | 896 | /* dsparb controlled by hw only */ |
893 | #define DSPARB_HWCONTROL(dev) (IS_G4X(dev) || IS_IGDNG(dev)) | 897 | #define DSPARB_HWCONTROL(dev) (IS_G4X(dev) || IS_IGDNG(dev)) |
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 5bf420378b6d..140bee142fc2 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c | |||
@@ -1570,7 +1570,7 @@ i915_add_request(struct drm_device *dev, struct drm_file *file_priv, | |||
1570 | } | 1570 | } |
1571 | 1571 | ||
1572 | if (was_empty && !dev_priv->mm.suspended) | 1572 | if (was_empty && !dev_priv->mm.suspended) |
1573 | schedule_delayed_work(&dev_priv->mm.retire_work, HZ); | 1573 | queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ); |
1574 | return seqno; | 1574 | return seqno; |
1575 | } | 1575 | } |
1576 | 1576 | ||
@@ -1719,7 +1719,7 @@ i915_gem_retire_work_handler(struct work_struct *work) | |||
1719 | i915_gem_retire_requests(dev); | 1719 | i915_gem_retire_requests(dev); |
1720 | if (!dev_priv->mm.suspended && | 1720 | if (!dev_priv->mm.suspended && |
1721 | !list_empty(&dev_priv->mm.request_list)) | 1721 | !list_empty(&dev_priv->mm.request_list)) |
1722 | schedule_delayed_work(&dev_priv->mm.retire_work, HZ); | 1722 | queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ); |
1723 | mutex_unlock(&dev->struct_mutex); | 1723 | mutex_unlock(&dev->struct_mutex); |
1724 | } | 1724 | } |
1725 | 1725 | ||
diff --git a/drivers/gpu/drm/i915/i915_gem_debugfs.c b/drivers/gpu/drm/i915/i915_gem_debugfs.c index 9a44bfcb8139..cb3b97405fbf 100644 --- a/drivers/gpu/drm/i915/i915_gem_debugfs.c +++ b/drivers/gpu/drm/i915/i915_gem_debugfs.c | |||
@@ -343,6 +343,8 @@ static int i915_error_state(struct seq_file *m, void *unused) | |||
343 | 343 | ||
344 | error = dev_priv->first_error; | 344 | error = dev_priv->first_error; |
345 | 345 | ||
346 | seq_printf(m, "Time: %ld s %ld us\n", error->time.tv_sec, | ||
347 | error->time.tv_usec); | ||
346 | seq_printf(m, "EIR: 0x%08x\n", error->eir); | 348 | seq_printf(m, "EIR: 0x%08x\n", error->eir); |
347 | seq_printf(m, " PGTBL_ER: 0x%08x\n", error->pgtbl_er); | 349 | seq_printf(m, " PGTBL_ER: 0x%08x\n", error->pgtbl_er); |
348 | seq_printf(m, " INSTPM: 0x%08x\n", error->instpm); | 350 | seq_printf(m, " INSTPM: 0x%08x\n", error->instpm); |
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 7ba23a69a0c0..7ebc84c2881e 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c | |||
@@ -190,7 +190,7 @@ u32 i915_get_vblank_counter(struct drm_device *dev, int pipe) | |||
190 | low_frame = pipe ? PIPEBFRAMEPIXEL : PIPEAFRAMEPIXEL; | 190 | low_frame = pipe ? PIPEBFRAMEPIXEL : PIPEAFRAMEPIXEL; |
191 | 191 | ||
192 | if (!i915_pipe_enabled(dev, pipe)) { | 192 | if (!i915_pipe_enabled(dev, pipe)) { |
193 | DRM_ERROR("trying to get vblank count for disabled pipe %d\n", pipe); | 193 | DRM_DEBUG("trying to get vblank count for disabled pipe %d\n", pipe); |
194 | return 0; | 194 | return 0; |
195 | } | 195 | } |
196 | 196 | ||
@@ -219,7 +219,7 @@ u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe) | |||
219 | int reg = pipe ? PIPEB_FRMCOUNT_GM45 : PIPEA_FRMCOUNT_GM45; | 219 | int reg = pipe ? PIPEB_FRMCOUNT_GM45 : PIPEA_FRMCOUNT_GM45; |
220 | 220 | ||
221 | if (!i915_pipe_enabled(dev, pipe)) { | 221 | if (!i915_pipe_enabled(dev, pipe)) { |
222 | DRM_ERROR("trying to get vblank count for disabled pipe %d\n", pipe); | 222 | DRM_DEBUG("trying to get vblank count for disabled pipe %d\n", pipe); |
223 | return 0; | 223 | return 0; |
224 | } | 224 | } |
225 | 225 | ||
@@ -290,6 +290,35 @@ irqreturn_t igdng_irq_handler(struct drm_device *dev) | |||
290 | return ret; | 290 | return ret; |
291 | } | 291 | } |
292 | 292 | ||
293 | /** | ||
294 | * i915_error_work_func - do process context error handling work | ||
295 | * @work: work struct | ||
296 | * | ||
297 | * Fire an error uevent so userspace can see that a hang or error | ||
298 | * was detected. | ||
299 | */ | ||
300 | static void i915_error_work_func(struct work_struct *work) | ||
301 | { | ||
302 | drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t, | ||
303 | error_work); | ||
304 | struct drm_device *dev = dev_priv->dev; | ||
305 | char *event_string = "ERROR=1"; | ||
306 | char *envp[] = { event_string, NULL }; | ||
307 | |||
308 | DRM_DEBUG("generating error event\n"); | ||
309 | |||
310 | kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, envp); | ||
311 | } | ||
312 | |||
313 | /** | ||
314 | * i915_capture_error_state - capture an error record for later analysis | ||
315 | * @dev: drm device | ||
316 | * | ||
317 | * Should be called when an error is detected (either a hang or an error | ||
318 | * interrupt) to capture error state from the time of the error. Fills | ||
319 | * out a structure which becomes available in debugfs for user level tools | ||
320 | * to pick up. | ||
321 | */ | ||
293 | static void i915_capture_error_state(struct drm_device *dev) | 322 | static void i915_capture_error_state(struct drm_device *dev) |
294 | { | 323 | { |
295 | struct drm_i915_private *dev_priv = dev->dev_private; | 324 | struct drm_i915_private *dev_priv = dev->dev_private; |
@@ -325,12 +354,137 @@ static void i915_capture_error_state(struct drm_device *dev) | |||
325 | error->acthd = I915_READ(ACTHD_I965); | 354 | error->acthd = I915_READ(ACTHD_I965); |
326 | } | 355 | } |
327 | 356 | ||
357 | do_gettimeofday(&error->time); | ||
358 | |||
328 | dev_priv->first_error = error; | 359 | dev_priv->first_error = error; |
329 | 360 | ||
330 | out: | 361 | out: |
331 | spin_unlock_irqrestore(&dev_priv->error_lock, flags); | 362 | spin_unlock_irqrestore(&dev_priv->error_lock, flags); |
332 | } | 363 | } |
333 | 364 | ||
365 | /** | ||
366 | * i915_handle_error - handle an error interrupt | ||
367 | * @dev: drm device | ||
368 | * | ||
369 | * Do some basic checking of regsiter state at error interrupt time and | ||
370 | * dump it to the syslog. Also call i915_capture_error_state() to make | ||
371 | * sure we get a record and make it available in debugfs. Fire a uevent | ||
372 | * so userspace knows something bad happened (should trigger collection | ||
373 | * of a ring dump etc.). | ||
374 | */ | ||
375 | static void i915_handle_error(struct drm_device *dev) | ||
376 | { | ||
377 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
378 | u32 eir = I915_READ(EIR); | ||
379 | u32 pipea_stats = I915_READ(PIPEASTAT); | ||
380 | u32 pipeb_stats = I915_READ(PIPEBSTAT); | ||
381 | |||
382 | i915_capture_error_state(dev); | ||
383 | |||
384 | printk(KERN_ERR "render error detected, EIR: 0x%08x\n", | ||
385 | eir); | ||
386 | |||
387 | if (IS_G4X(dev)) { | ||
388 | if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) { | ||
389 | u32 ipeir = I915_READ(IPEIR_I965); | ||
390 | |||
391 | printk(KERN_ERR " IPEIR: 0x%08x\n", | ||
392 | I915_READ(IPEIR_I965)); | ||
393 | printk(KERN_ERR " IPEHR: 0x%08x\n", | ||
394 | I915_READ(IPEHR_I965)); | ||
395 | printk(KERN_ERR " INSTDONE: 0x%08x\n", | ||
396 | I915_READ(INSTDONE_I965)); | ||
397 | printk(KERN_ERR " INSTPS: 0x%08x\n", | ||
398 | I915_READ(INSTPS)); | ||
399 | printk(KERN_ERR " INSTDONE1: 0x%08x\n", | ||
400 | I915_READ(INSTDONE1)); | ||
401 | printk(KERN_ERR " ACTHD: 0x%08x\n", | ||
402 | I915_READ(ACTHD_I965)); | ||
403 | I915_WRITE(IPEIR_I965, ipeir); | ||
404 | (void)I915_READ(IPEIR_I965); | ||
405 | } | ||
406 | if (eir & GM45_ERROR_PAGE_TABLE) { | ||
407 | u32 pgtbl_err = I915_READ(PGTBL_ER); | ||
408 | printk(KERN_ERR "page table error\n"); | ||
409 | printk(KERN_ERR " PGTBL_ER: 0x%08x\n", | ||
410 | pgtbl_err); | ||
411 | I915_WRITE(PGTBL_ER, pgtbl_err); | ||
412 | (void)I915_READ(PGTBL_ER); | ||
413 | } | ||
414 | } | ||
415 | |||
416 | if (IS_I9XX(dev)) { | ||
417 | if (eir & I915_ERROR_PAGE_TABLE) { | ||
418 | u32 pgtbl_err = I915_READ(PGTBL_ER); | ||
419 | printk(KERN_ERR "page table error\n"); | ||
420 | printk(KERN_ERR " PGTBL_ER: 0x%08x\n", | ||
421 | pgtbl_err); | ||
422 | I915_WRITE(PGTBL_ER, pgtbl_err); | ||
423 | (void)I915_READ(PGTBL_ER); | ||
424 | } | ||
425 | } | ||
426 | |||
427 | if (eir & I915_ERROR_MEMORY_REFRESH) { | ||
428 | printk(KERN_ERR "memory refresh error\n"); | ||
429 | printk(KERN_ERR "PIPEASTAT: 0x%08x\n", | ||
430 | pipea_stats); | ||
431 | printk(KERN_ERR "PIPEBSTAT: 0x%08x\n", | ||
432 | pipeb_stats); | ||
433 | /* pipestat has already been acked */ | ||
434 | } | ||
435 | if (eir & I915_ERROR_INSTRUCTION) { | ||
436 | printk(KERN_ERR "instruction error\n"); | ||
437 | printk(KERN_ERR " INSTPM: 0x%08x\n", | ||
438 | I915_READ(INSTPM)); | ||
439 | if (!IS_I965G(dev)) { | ||
440 | u32 ipeir = I915_READ(IPEIR); | ||
441 | |||
442 | printk(KERN_ERR " IPEIR: 0x%08x\n", | ||
443 | I915_READ(IPEIR)); | ||
444 | printk(KERN_ERR " IPEHR: 0x%08x\n", | ||
445 | I915_READ(IPEHR)); | ||
446 | printk(KERN_ERR " INSTDONE: 0x%08x\n", | ||
447 | I915_READ(INSTDONE)); | ||
448 | printk(KERN_ERR " ACTHD: 0x%08x\n", | ||
449 | I915_READ(ACTHD)); | ||
450 | I915_WRITE(IPEIR, ipeir); | ||
451 | (void)I915_READ(IPEIR); | ||
452 | } else { | ||
453 | u32 ipeir = I915_READ(IPEIR_I965); | ||
454 | |||
455 | printk(KERN_ERR " IPEIR: 0x%08x\n", | ||
456 | I915_READ(IPEIR_I965)); | ||
457 | printk(KERN_ERR " IPEHR: 0x%08x\n", | ||
458 | I915_READ(IPEHR_I965)); | ||
459 | printk(KERN_ERR " INSTDONE: 0x%08x\n", | ||
460 | I915_READ(INSTDONE_I965)); | ||
461 | printk(KERN_ERR " INSTPS: 0x%08x\n", | ||
462 | I915_READ(INSTPS)); | ||
463 | printk(KERN_ERR " INSTDONE1: 0x%08x\n", | ||
464 | I915_READ(INSTDONE1)); | ||
465 | printk(KERN_ERR " ACTHD: 0x%08x\n", | ||
466 | I915_READ(ACTHD_I965)); | ||
467 | I915_WRITE(IPEIR_I965, ipeir); | ||
468 | (void)I915_READ(IPEIR_I965); | ||
469 | } | ||
470 | } | ||
471 | |||
472 | I915_WRITE(EIR, eir); | ||
473 | (void)I915_READ(EIR); | ||
474 | eir = I915_READ(EIR); | ||
475 | if (eir) { | ||
476 | /* | ||
477 | * some errors might have become stuck, | ||
478 | * mask them. | ||
479 | */ | ||
480 | DRM_ERROR("EIR stuck: 0x%08x, masking\n", eir); | ||
481 | I915_WRITE(EMR, I915_READ(EMR) | eir); | ||
482 | I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); | ||
483 | } | ||
484 | |||
485 | queue_work(dev_priv->wq, &dev_priv->error_work); | ||
486 | } | ||
487 | |||
334 | irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS) | 488 | irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS) |
335 | { | 489 | { |
336 | struct drm_device *dev = (struct drm_device *) arg; | 490 | struct drm_device *dev = (struct drm_device *) arg; |
@@ -372,6 +526,9 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS) | |||
372 | pipea_stats = I915_READ(PIPEASTAT); | 526 | pipea_stats = I915_READ(PIPEASTAT); |
373 | pipeb_stats = I915_READ(PIPEBSTAT); | 527 | pipeb_stats = I915_READ(PIPEBSTAT); |
374 | 528 | ||
529 | if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) | ||
530 | i915_handle_error(dev); | ||
531 | |||
375 | /* | 532 | /* |
376 | * Clear the PIPE(A|B)STAT regs before the IIR | 533 | * Clear the PIPE(A|B)STAT regs before the IIR |
377 | */ | 534 | */ |
@@ -403,86 +560,13 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS) | |||
403 | DRM_DEBUG("hotplug event received, stat 0x%08x\n", | 560 | DRM_DEBUG("hotplug event received, stat 0x%08x\n", |
404 | hotplug_status); | 561 | hotplug_status); |
405 | if (hotplug_status & dev_priv->hotplug_supported_mask) | 562 | if (hotplug_status & dev_priv->hotplug_supported_mask) |
406 | schedule_work(&dev_priv->hotplug_work); | 563 | queue_work(dev_priv->wq, |
564 | &dev_priv->hotplug_work); | ||
407 | 565 | ||
408 | I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status); | 566 | I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status); |
409 | I915_READ(PORT_HOTPLUG_STAT); | 567 | I915_READ(PORT_HOTPLUG_STAT); |
410 | } | 568 | } |
411 | 569 | ||
412 | if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) { | ||
413 | u32 eir = I915_READ(EIR); | ||
414 | |||
415 | i915_capture_error_state(dev); | ||
416 | |||
417 | printk(KERN_ERR "render error detected, EIR: 0x%08x\n", | ||
418 | eir); | ||
419 | if (eir & I915_ERROR_PAGE_TABLE) { | ||
420 | u32 pgtbl_err = I915_READ(PGTBL_ER); | ||
421 | printk(KERN_ERR "page table error\n"); | ||
422 | printk(KERN_ERR " PGTBL_ER: 0x%08x\n", | ||
423 | pgtbl_err); | ||
424 | I915_WRITE(PGTBL_ER, pgtbl_err); | ||
425 | (void)I915_READ(PGTBL_ER); | ||
426 | } | ||
427 | if (eir & I915_ERROR_MEMORY_REFRESH) { | ||
428 | printk(KERN_ERR "memory refresh error\n"); | ||
429 | printk(KERN_ERR "PIPEASTAT: 0x%08x\n", | ||
430 | pipea_stats); | ||
431 | printk(KERN_ERR "PIPEBSTAT: 0x%08x\n", | ||
432 | pipeb_stats); | ||
433 | /* pipestat has already been acked */ | ||
434 | } | ||
435 | if (eir & I915_ERROR_INSTRUCTION) { | ||
436 | printk(KERN_ERR "instruction error\n"); | ||
437 | printk(KERN_ERR " INSTPM: 0x%08x\n", | ||
438 | I915_READ(INSTPM)); | ||
439 | if (!IS_I965G(dev)) { | ||
440 | u32 ipeir = I915_READ(IPEIR); | ||
441 | |||
442 | printk(KERN_ERR " IPEIR: 0x%08x\n", | ||
443 | I915_READ(IPEIR)); | ||
444 | printk(KERN_ERR " IPEHR: 0x%08x\n", | ||
445 | I915_READ(IPEHR)); | ||
446 | printk(KERN_ERR " INSTDONE: 0x%08x\n", | ||
447 | I915_READ(INSTDONE)); | ||
448 | printk(KERN_ERR " ACTHD: 0x%08x\n", | ||
449 | I915_READ(ACTHD)); | ||
450 | I915_WRITE(IPEIR, ipeir); | ||
451 | (void)I915_READ(IPEIR); | ||
452 | } else { | ||
453 | u32 ipeir = I915_READ(IPEIR_I965); | ||
454 | |||
455 | printk(KERN_ERR " IPEIR: 0x%08x\n", | ||
456 | I915_READ(IPEIR_I965)); | ||
457 | printk(KERN_ERR " IPEHR: 0x%08x\n", | ||
458 | I915_READ(IPEHR_I965)); | ||
459 | printk(KERN_ERR " INSTDONE: 0x%08x\n", | ||
460 | I915_READ(INSTDONE_I965)); | ||
461 | printk(KERN_ERR " INSTPS: 0x%08x\n", | ||
462 | I915_READ(INSTPS)); | ||
463 | printk(KERN_ERR " INSTDONE1: 0x%08x\n", | ||
464 | I915_READ(INSTDONE1)); | ||
465 | printk(KERN_ERR " ACTHD: 0x%08x\n", | ||
466 | I915_READ(ACTHD_I965)); | ||
467 | I915_WRITE(IPEIR_I965, ipeir); | ||
468 | (void)I915_READ(IPEIR_I965); | ||
469 | } | ||
470 | } | ||
471 | |||
472 | I915_WRITE(EIR, eir); | ||
473 | (void)I915_READ(EIR); | ||
474 | eir = I915_READ(EIR); | ||
475 | if (eir) { | ||
476 | /* | ||
477 | * some errors might have become stuck, | ||
478 | * mask them. | ||
479 | */ | ||
480 | DRM_ERROR("EIR stuck: 0x%08x, masking\n", eir); | ||
481 | I915_WRITE(EMR, I915_READ(EMR) | eir); | ||
482 | I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); | ||
483 | } | ||
484 | } | ||
485 | |||
486 | I915_WRITE(IIR, iir); | 570 | I915_WRITE(IIR, iir); |
487 | new_iir = I915_READ(IIR); /* Flush posted writes */ | 571 | new_iir = I915_READ(IIR); /* Flush posted writes */ |
488 | 572 | ||
@@ -830,6 +914,7 @@ void i915_driver_irq_preinstall(struct drm_device * dev) | |||
830 | atomic_set(&dev_priv->irq_received, 0); | 914 | atomic_set(&dev_priv->irq_received, 0); |
831 | 915 | ||
832 | INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func); | 916 | INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func); |
917 | INIT_WORK(&dev_priv->error_work, i915_error_work_func); | ||
833 | 918 | ||
834 | if (IS_IGDNG(dev)) { | 919 | if (IS_IGDNG(dev)) { |
835 | igdng_irq_preinstall(dev); | 920 | igdng_irq_preinstall(dev); |
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 6c0858484094..2955083aa471 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h | |||
@@ -1395,6 +1395,7 @@ | |||
1395 | #define TV_V_CHROMA_42 0x684a8 | 1395 | #define TV_V_CHROMA_42 0x684a8 |
1396 | 1396 | ||
1397 | /* Display Port */ | 1397 | /* Display Port */ |
1398 | #define DP_A 0x64000 /* eDP */ | ||
1398 | #define DP_B 0x64100 | 1399 | #define DP_B 0x64100 |
1399 | #define DP_C 0x64200 | 1400 | #define DP_C 0x64200 |
1400 | #define DP_D 0x64300 | 1401 | #define DP_D 0x64300 |
@@ -1437,13 +1438,22 @@ | |||
1437 | /* Mystic DPCD version 1.1 special mode */ | 1438 | /* Mystic DPCD version 1.1 special mode */ |
1438 | #define DP_ENHANCED_FRAMING (1 << 18) | 1439 | #define DP_ENHANCED_FRAMING (1 << 18) |
1439 | 1440 | ||
1441 | /* eDP */ | ||
1442 | #define DP_PLL_FREQ_270MHZ (0 << 16) | ||
1443 | #define DP_PLL_FREQ_160MHZ (1 << 16) | ||
1444 | #define DP_PLL_FREQ_MASK (3 << 16) | ||
1445 | |||
1440 | /** locked once port is enabled */ | 1446 | /** locked once port is enabled */ |
1441 | #define DP_PORT_REVERSAL (1 << 15) | 1447 | #define DP_PORT_REVERSAL (1 << 15) |
1442 | 1448 | ||
1449 | /* eDP */ | ||
1450 | #define DP_PLL_ENABLE (1 << 14) | ||
1451 | |||
1443 | /** sends the clock on lane 15 of the PEG for debug */ | 1452 | /** sends the clock on lane 15 of the PEG for debug */ |
1444 | #define DP_CLOCK_OUTPUT_ENABLE (1 << 13) | 1453 | #define DP_CLOCK_OUTPUT_ENABLE (1 << 13) |
1445 | 1454 | ||
1446 | #define DP_SCRAMBLING_DISABLE (1 << 12) | 1455 | #define DP_SCRAMBLING_DISABLE (1 << 12) |
1456 | #define DP_SCRAMBLING_DISABLE_IGDNG (1 << 7) | ||
1447 | 1457 | ||
1448 | /** limit RGB values to avoid confusing TVs */ | 1458 | /** limit RGB values to avoid confusing TVs */ |
1449 | #define DP_COLOR_RANGE_16_235 (1 << 8) | 1459 | #define DP_COLOR_RANGE_16_235 (1 << 8) |
@@ -1463,6 +1473,13 @@ | |||
1463 | * is 20 bytes in each direction, hence the 5 fixed | 1473 | * is 20 bytes in each direction, hence the 5 fixed |
1464 | * data registers | 1474 | * data registers |
1465 | */ | 1475 | */ |
1476 | #define DPA_AUX_CH_CTL 0x64010 | ||
1477 | #define DPA_AUX_CH_DATA1 0x64014 | ||
1478 | #define DPA_AUX_CH_DATA2 0x64018 | ||
1479 | #define DPA_AUX_CH_DATA3 0x6401c | ||
1480 | #define DPA_AUX_CH_DATA4 0x64020 | ||
1481 | #define DPA_AUX_CH_DATA5 0x64024 | ||
1482 | |||
1466 | #define DPB_AUX_CH_CTL 0x64110 | 1483 | #define DPB_AUX_CH_CTL 0x64110 |
1467 | #define DPB_AUX_CH_DATA1 0x64114 | 1484 | #define DPB_AUX_CH_DATA1 0x64114 |
1468 | #define DPB_AUX_CH_DATA2 0x64118 | 1485 | #define DPB_AUX_CH_DATA2 0x64118 |
@@ -1618,7 +1635,7 @@ | |||
1618 | #define I830_FIFO_LINE_SIZE 32 | 1635 | #define I830_FIFO_LINE_SIZE 32 |
1619 | #define I945_FIFO_SIZE 127 /* 945 & 965 */ | 1636 | #define I945_FIFO_SIZE 127 /* 945 & 965 */ |
1620 | #define I915_FIFO_SIZE 95 | 1637 | #define I915_FIFO_SIZE 95 |
1621 | #define I855GM_FIFO_SIZE 255 | 1638 | #define I855GM_FIFO_SIZE 127 /* In cachelines */ |
1622 | #define I830_FIFO_SIZE 95 | 1639 | #define I830_FIFO_SIZE 95 |
1623 | #define I915_MAX_WM 0x3f | 1640 | #define I915_MAX_WM 0x3f |
1624 | 1641 | ||
@@ -1848,6 +1865,8 @@ | |||
1848 | #define PFA_CTL_1 0x68080 | 1865 | #define PFA_CTL_1 0x68080 |
1849 | #define PFB_CTL_1 0x68880 | 1866 | #define PFB_CTL_1 0x68880 |
1850 | #define PF_ENABLE (1<<31) | 1867 | #define PF_ENABLE (1<<31) |
1868 | #define PFA_WIN_SZ 0x68074 | ||
1869 | #define PFB_WIN_SZ 0x68874 | ||
1851 | 1870 | ||
1852 | /* legacy palette */ | 1871 | /* legacy palette */ |
1853 | #define LGC_PALETTE_A 0x4a000 | 1872 | #define LGC_PALETTE_A 0x4a000 |
@@ -2208,4 +2227,28 @@ | |||
2208 | #define PCH_PP_OFF_DELAYS 0xc720c | 2227 | #define PCH_PP_OFF_DELAYS 0xc720c |
2209 | #define PCH_PP_DIVISOR 0xc7210 | 2228 | #define PCH_PP_DIVISOR 0xc7210 |
2210 | 2229 | ||
2230 | #define PCH_DP_B 0xe4100 | ||
2231 | #define PCH_DPB_AUX_CH_CTL 0xe4110 | ||
2232 | #define PCH_DPB_AUX_CH_DATA1 0xe4114 | ||
2233 | #define PCH_DPB_AUX_CH_DATA2 0xe4118 | ||
2234 | #define PCH_DPB_AUX_CH_DATA3 0xe411c | ||
2235 | #define PCH_DPB_AUX_CH_DATA4 0xe4120 | ||
2236 | #define PCH_DPB_AUX_CH_DATA5 0xe4124 | ||
2237 | |||
2238 | #define PCH_DP_C 0xe4200 | ||
2239 | #define PCH_DPC_AUX_CH_CTL 0xe4210 | ||
2240 | #define PCH_DPC_AUX_CH_DATA1 0xe4214 | ||
2241 | #define PCH_DPC_AUX_CH_DATA2 0xe4218 | ||
2242 | #define PCH_DPC_AUX_CH_DATA3 0xe421c | ||
2243 | #define PCH_DPC_AUX_CH_DATA4 0xe4220 | ||
2244 | #define PCH_DPC_AUX_CH_DATA5 0xe4224 | ||
2245 | |||
2246 | #define PCH_DP_D 0xe4300 | ||
2247 | #define PCH_DPD_AUX_CH_CTL 0xe4310 | ||
2248 | #define PCH_DPD_AUX_CH_DATA1 0xe4314 | ||
2249 | #define PCH_DPD_AUX_CH_DATA2 0xe4318 | ||
2250 | #define PCH_DPD_AUX_CH_DATA3 0xe431c | ||
2251 | #define PCH_DPD_AUX_CH_DATA4 0xe4320 | ||
2252 | #define PCH_DPD_AUX_CH_DATA5 0xe4324 | ||
2253 | |||
2211 | #endif /* _I915_REG_H_ */ | 2254 | #endif /* _I915_REG_H_ */ |
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c index 9e1d16e5c3ea..1d04e1904ac6 100644 --- a/drivers/gpu/drm/i915/i915_suspend.c +++ b/drivers/gpu/drm/i915/i915_suspend.c | |||
@@ -598,7 +598,7 @@ int i915_restore_state(struct drm_device *dev) | |||
598 | 598 | ||
599 | for (i = 0; i < 16; i++) { | 599 | for (i = 0; i < 16; i++) { |
600 | I915_WRITE(SWF00 + (i << 2), dev_priv->saveSWF0[i]); | 600 | I915_WRITE(SWF00 + (i << 2), dev_priv->saveSWF0[i]); |
601 | I915_WRITE(SWF10 + (i << 2), dev_priv->saveSWF1[i+7]); | 601 | I915_WRITE(SWF10 + (i << 2), dev_priv->saveSWF1[i]); |
602 | } | 602 | } |
603 | for (i = 0; i < 3; i++) | 603 | for (i = 0; i < 3; i++) |
604 | I915_WRITE(SWF30 + (i << 2), dev_priv->saveSWF2[i]); | 604 | I915_WRITE(SWF30 + (i << 2), dev_priv->saveSWF2[i]); |
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c index 7cc447191028..300aee3296c2 100644 --- a/drivers/gpu/drm/i915/intel_bios.c +++ b/drivers/gpu/drm/i915/intel_bios.c | |||
@@ -97,14 +97,13 @@ static void | |||
97 | parse_lfp_panel_data(struct drm_i915_private *dev_priv, | 97 | parse_lfp_panel_data(struct drm_i915_private *dev_priv, |
98 | struct bdb_header *bdb) | 98 | struct bdb_header *bdb) |
99 | { | 99 | { |
100 | struct drm_device *dev = dev_priv->dev; | ||
101 | struct bdb_lvds_options *lvds_options; | 100 | struct bdb_lvds_options *lvds_options; |
102 | struct bdb_lvds_lfp_data *lvds_lfp_data; | 101 | struct bdb_lvds_lfp_data *lvds_lfp_data; |
103 | struct bdb_lvds_lfp_data_ptrs *lvds_lfp_data_ptrs; | 102 | struct bdb_lvds_lfp_data_ptrs *lvds_lfp_data_ptrs; |
104 | struct bdb_lvds_lfp_data_entry *entry; | 103 | struct bdb_lvds_lfp_data_entry *entry; |
105 | struct lvds_dvo_timing *dvo_timing; | 104 | struct lvds_dvo_timing *dvo_timing; |
106 | struct drm_display_mode *panel_fixed_mode; | 105 | struct drm_display_mode *panel_fixed_mode; |
107 | int lfp_data_size; | 106 | int lfp_data_size, dvo_timing_offset; |
108 | 107 | ||
109 | /* Defaults if we can't find VBT info */ | 108 | /* Defaults if we can't find VBT info */ |
110 | dev_priv->lvds_dither = 0; | 109 | dev_priv->lvds_dither = 0; |
@@ -133,14 +132,16 @@ parse_lfp_panel_data(struct drm_i915_private *dev_priv, | |||
133 | entry = (struct bdb_lvds_lfp_data_entry *) | 132 | entry = (struct bdb_lvds_lfp_data_entry *) |
134 | ((uint8_t *)lvds_lfp_data->data + (lfp_data_size * | 133 | ((uint8_t *)lvds_lfp_data->data + (lfp_data_size * |
135 | lvds_options->panel_type)); | 134 | lvds_options->panel_type)); |
135 | dvo_timing_offset = lvds_lfp_data_ptrs->ptr[0].dvo_timing_offset - | ||
136 | lvds_lfp_data_ptrs->ptr[0].fp_timing_offset; | ||
136 | 137 | ||
137 | /* On IGDNG mobile, LVDS data block removes panel fitting registers. | 138 | /* |
138 | So dec 2 dword from dvo_timing offset */ | 139 | * the size of fp_timing varies on the different platform. |
139 | if (IS_IGDNG(dev)) | 140 | * So calculate the DVO timing relative offset in LVDS data |
140 | dvo_timing = (struct lvds_dvo_timing *) | 141 | * entry to get the DVO timing entry |
141 | ((u8 *)&entry->dvo_timing - 8); | 142 | */ |
142 | else | 143 | dvo_timing = (struct lvds_dvo_timing *) |
143 | dvo_timing = &entry->dvo_timing; | 144 | ((unsigned char *)entry + dvo_timing_offset); |
144 | 145 | ||
145 | panel_fixed_mode = kzalloc(sizeof(*panel_fixed_mode), GFP_KERNEL); | 146 | panel_fixed_mode = kzalloc(sizeof(*panel_fixed_mode), GFP_KERNEL); |
146 | 147 | ||
@@ -295,6 +296,25 @@ parse_sdvo_device_mapping(struct drm_i915_private *dev_priv, | |||
295 | } | 296 | } |
296 | return; | 297 | return; |
297 | } | 298 | } |
299 | |||
300 | static void | ||
301 | parse_driver_features(struct drm_i915_private *dev_priv, | ||
302 | struct bdb_header *bdb) | ||
303 | { | ||
304 | struct drm_device *dev = dev_priv->dev; | ||
305 | struct bdb_driver_features *driver; | ||
306 | |||
307 | /* set default for chips without eDP */ | ||
308 | if (!SUPPORTS_EDP(dev)) { | ||
309 | dev_priv->edp_support = 0; | ||
310 | return; | ||
311 | } | ||
312 | |||
313 | driver = find_section(bdb, BDB_DRIVER_FEATURES); | ||
314 | if (driver && driver->lvds_config == BDB_DRIVER_FEATURE_EDP) | ||
315 | dev_priv->edp_support = 1; | ||
316 | } | ||
317 | |||
298 | /** | 318 | /** |
299 | * intel_init_bios - initialize VBIOS settings & find VBT | 319 | * intel_init_bios - initialize VBIOS settings & find VBT |
300 | * @dev: DRM device | 320 | * @dev: DRM device |
@@ -345,6 +365,8 @@ intel_init_bios(struct drm_device *dev) | |||
345 | parse_lfp_panel_data(dev_priv, bdb); | 365 | parse_lfp_panel_data(dev_priv, bdb); |
346 | parse_sdvo_panel_data(dev_priv, bdb); | 366 | parse_sdvo_panel_data(dev_priv, bdb); |
347 | parse_sdvo_device_mapping(dev_priv, bdb); | 367 | parse_sdvo_device_mapping(dev_priv, bdb); |
368 | parse_driver_features(dev_priv, bdb); | ||
369 | |||
348 | pci_unmap_rom(pdev, bios); | 370 | pci_unmap_rom(pdev, bios); |
349 | 371 | ||
350 | return 0; | 372 | return 0; |
diff --git a/drivers/gpu/drm/i915/intel_bios.h b/drivers/gpu/drm/i915/intel_bios.h index fe72e1c225d8..0f8e5f69ac7a 100644 --- a/drivers/gpu/drm/i915/intel_bios.h +++ b/drivers/gpu/drm/i915/intel_bios.h | |||
@@ -381,6 +381,51 @@ struct bdb_sdvo_lvds_options { | |||
381 | } __attribute__((packed)); | 381 | } __attribute__((packed)); |
382 | 382 | ||
383 | 383 | ||
384 | #define BDB_DRIVER_FEATURE_NO_LVDS 0 | ||
385 | #define BDB_DRIVER_FEATURE_INT_LVDS 1 | ||
386 | #define BDB_DRIVER_FEATURE_SDVO_LVDS 2 | ||
387 | #define BDB_DRIVER_FEATURE_EDP 3 | ||
388 | |||
389 | struct bdb_driver_features { | ||
390 | u8 boot_dev_algorithm:1; | ||
391 | u8 block_display_switch:1; | ||
392 | u8 allow_display_switch:1; | ||
393 | u8 hotplug_dvo:1; | ||
394 | u8 dual_view_zoom:1; | ||
395 | u8 int15h_hook:1; | ||
396 | u8 sprite_in_clone:1; | ||
397 | u8 primary_lfp_id:1; | ||
398 | |||
399 | u16 boot_mode_x; | ||
400 | u16 boot_mode_y; | ||
401 | u8 boot_mode_bpp; | ||
402 | u8 boot_mode_refresh; | ||
403 | |||
404 | u16 enable_lfp_primary:1; | ||
405 | u16 selective_mode_pruning:1; | ||
406 | u16 dual_frequency:1; | ||
407 | u16 render_clock_freq:1; /* 0: high freq; 1: low freq */ | ||
408 | u16 nt_clone_support:1; | ||
409 | u16 power_scheme_ui:1; /* 0: CUI; 1: 3rd party */ | ||
410 | u16 sprite_display_assign:1; /* 0: secondary; 1: primary */ | ||
411 | u16 cui_aspect_scaling:1; | ||
412 | u16 preserve_aspect_ratio:1; | ||
413 | u16 sdvo_device_power_down:1; | ||
414 | u16 crt_hotplug:1; | ||
415 | u16 lvds_config:2; | ||
416 | u16 tv_hotplug:1; | ||
417 | u16 hdmi_config:2; | ||
418 | |||
419 | u8 static_display:1; | ||
420 | u8 reserved2:7; | ||
421 | u16 legacy_crt_max_x; | ||
422 | u16 legacy_crt_max_y; | ||
423 | u8 legacy_crt_max_refresh; | ||
424 | |||
425 | u8 hdmi_termination; | ||
426 | u8 custom_vbt_version; | ||
427 | } __attribute__((packed)); | ||
428 | |||
384 | bool intel_init_bios(struct drm_device *dev); | 429 | bool intel_init_bios(struct drm_device *dev); |
385 | 430 | ||
386 | /* | 431 | /* |
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c index d6a1a6e5539a..4cf8e2e88a40 100644 --- a/drivers/gpu/drm/i915/intel_crt.c +++ b/drivers/gpu/drm/i915/intel_crt.c | |||
@@ -156,6 +156,9 @@ static bool intel_igdng_crt_detect_hotplug(struct drm_connector *connector) | |||
156 | 156 | ||
157 | temp = adpa = I915_READ(PCH_ADPA); | 157 | temp = adpa = I915_READ(PCH_ADPA); |
158 | 158 | ||
159 | adpa &= ~ADPA_DAC_ENABLE; | ||
160 | I915_WRITE(PCH_ADPA, adpa); | ||
161 | |||
159 | adpa &= ~ADPA_CRT_HOTPLUG_MASK; | 162 | adpa &= ~ADPA_CRT_HOTPLUG_MASK; |
160 | 163 | ||
161 | adpa |= (ADPA_CRT_HOTPLUG_PERIOD_128 | | 164 | adpa |= (ADPA_CRT_HOTPLUG_PERIOD_128 | |
@@ -169,13 +172,14 @@ static bool intel_igdng_crt_detect_hotplug(struct drm_connector *connector) | |||
169 | DRM_DEBUG("pch crt adpa 0x%x", adpa); | 172 | DRM_DEBUG("pch crt adpa 0x%x", adpa); |
170 | I915_WRITE(PCH_ADPA, adpa); | 173 | I915_WRITE(PCH_ADPA, adpa); |
171 | 174 | ||
172 | /* This might not be needed as not specified in spec...*/ | 175 | while ((I915_READ(PCH_ADPA) & ADPA_CRT_HOTPLUG_FORCE_TRIGGER) != 0) |
173 | udelay(1000); | 176 | ; |
174 | 177 | ||
175 | /* Check the status to see if both blue and green are on now */ | 178 | /* Check the status to see if both blue and green are on now */ |
176 | adpa = I915_READ(PCH_ADPA); | 179 | adpa = I915_READ(PCH_ADPA); |
177 | if ((adpa & ADPA_CRT_HOTPLUG_MONITOR_MASK) == | 180 | adpa &= ADPA_CRT_HOTPLUG_MONITOR_MASK; |
178 | ADPA_CRT_HOTPLUG_MONITOR_COLOR) | 181 | if ((adpa == ADPA_CRT_HOTPLUG_MONITOR_COLOR) || |
182 | (adpa == ADPA_CRT_HOTPLUG_MONITOR_MONO)) | ||
179 | ret = true; | 183 | ret = true; |
180 | else | 184 | else |
181 | ret = false; | 185 | ret = false; |
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 508838ee31e0..d6fce2133413 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c | |||
@@ -34,6 +34,8 @@ | |||
34 | 34 | ||
35 | #include "drm_crtc_helper.h" | 35 | #include "drm_crtc_helper.h" |
36 | 36 | ||
37 | #define HAS_eDP (intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP)) | ||
38 | |||
37 | bool intel_pipe_has_type (struct drm_crtc *crtc, int type); | 39 | bool intel_pipe_has_type (struct drm_crtc *crtc, int type); |
38 | static void intel_update_watermarks(struct drm_device *dev); | 40 | static void intel_update_watermarks(struct drm_device *dev); |
39 | 41 | ||
@@ -88,7 +90,7 @@ struct intel_limit { | |||
88 | #define I8XX_P2_SLOW 4 | 90 | #define I8XX_P2_SLOW 4 |
89 | #define I8XX_P2_FAST 2 | 91 | #define I8XX_P2_FAST 2 |
90 | #define I8XX_P2_LVDS_SLOW 14 | 92 | #define I8XX_P2_LVDS_SLOW 14 |
91 | #define I8XX_P2_LVDS_FAST 14 /* No fast option */ | 93 | #define I8XX_P2_LVDS_FAST 7 |
92 | #define I8XX_P2_SLOW_LIMIT 165000 | 94 | #define I8XX_P2_SLOW_LIMIT 165000 |
93 | 95 | ||
94 | #define I9XX_DOT_MIN 20000 | 96 | #define I9XX_DOT_MIN 20000 |
@@ -268,6 +270,9 @@ intel_igdng_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, | |||
268 | static bool | 270 | static bool |
269 | intel_find_pll_g4x_dp(const intel_limit_t *, struct drm_crtc *crtc, | 271 | intel_find_pll_g4x_dp(const intel_limit_t *, struct drm_crtc *crtc, |
270 | int target, int refclk, intel_clock_t *best_clock); | 272 | int target, int refclk, intel_clock_t *best_clock); |
273 | static bool | ||
274 | intel_find_pll_igdng_dp(const intel_limit_t *, struct drm_crtc *crtc, | ||
275 | int target, int refclk, intel_clock_t *best_clock); | ||
271 | 276 | ||
272 | static const intel_limit_t intel_limits_i8xx_dvo = { | 277 | static const intel_limit_t intel_limits_i8xx_dvo = { |
273 | .dot = { .min = I8XX_DOT_MIN, .max = I8XX_DOT_MAX }, | 278 | .dot = { .min = I8XX_DOT_MIN, .max = I8XX_DOT_MAX }, |
@@ -598,6 +603,23 @@ bool intel_pipe_has_type (struct drm_crtc *crtc, int type) | |||
598 | return false; | 603 | return false; |
599 | } | 604 | } |
600 | 605 | ||
606 | struct drm_connector * | ||
607 | intel_pipe_get_output (struct drm_crtc *crtc) | ||
608 | { | ||
609 | struct drm_device *dev = crtc->dev; | ||
610 | struct drm_mode_config *mode_config = &dev->mode_config; | ||
611 | struct drm_connector *l_entry, *ret = NULL; | ||
612 | |||
613 | list_for_each_entry(l_entry, &mode_config->connector_list, head) { | ||
614 | if (l_entry->encoder && | ||
615 | l_entry->encoder->crtc == crtc) { | ||
616 | ret = l_entry; | ||
617 | break; | ||
618 | } | ||
619 | } | ||
620 | return ret; | ||
621 | } | ||
622 | |||
601 | #define INTELPllInvalid(s) do { /* DRM_DEBUG(s); */ return false; } while (0) | 623 | #define INTELPllInvalid(s) do { /* DRM_DEBUG(s); */ return false; } while (0) |
602 | /** | 624 | /** |
603 | * Returns whether the given set of divisors are valid for a given refclk with | 625 | * Returns whether the given set of divisors are valid for a given refclk with |
@@ -645,7 +667,7 @@ intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, | |||
645 | int err = target; | 667 | int err = target; |
646 | 668 | ||
647 | if (IS_I9XX(dev) && intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) && | 669 | if (IS_I9XX(dev) && intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) && |
648 | (I915_READ(LVDS) & LVDS_PORT_EN) != 0) { | 670 | (I915_READ(LVDS)) != 0) { |
649 | /* | 671 | /* |
650 | * For LVDS, if the panel is on, just rely on its current | 672 | * For LVDS, if the panel is on, just rely on its current |
651 | * settings for dual-channel. We haven't figured out how to | 673 | * settings for dual-channel. We haven't figured out how to |
@@ -752,6 +774,30 @@ intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, | |||
752 | } | 774 | } |
753 | 775 | ||
754 | static bool | 776 | static bool |
777 | intel_find_pll_igdng_dp(const intel_limit_t *limit, struct drm_crtc *crtc, | ||
778 | int target, int refclk, intel_clock_t *best_clock) | ||
779 | { | ||
780 | struct drm_device *dev = crtc->dev; | ||
781 | intel_clock_t clock; | ||
782 | if (target < 200000) { | ||
783 | clock.n = 1; | ||
784 | clock.p1 = 2; | ||
785 | clock.p2 = 10; | ||
786 | clock.m1 = 12; | ||
787 | clock.m2 = 9; | ||
788 | } else { | ||
789 | clock.n = 2; | ||
790 | clock.p1 = 1; | ||
791 | clock.p2 = 10; | ||
792 | clock.m1 = 14; | ||
793 | clock.m2 = 8; | ||
794 | } | ||
795 | intel_clock(dev, refclk, &clock); | ||
796 | memcpy(best_clock, &clock, sizeof(intel_clock_t)); | ||
797 | return true; | ||
798 | } | ||
799 | |||
800 | static bool | ||
755 | intel_igdng_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, | 801 | intel_igdng_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, |
756 | int target, int refclk, intel_clock_t *best_clock) | 802 | int target, int refclk, intel_clock_t *best_clock) |
757 | { | 803 | { |
@@ -763,6 +809,14 @@ intel_igdng_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, | |||
763 | int err_most = 47; | 809 | int err_most = 47; |
764 | found = false; | 810 | found = false; |
765 | 811 | ||
812 | /* eDP has only 2 clock choice, no n/m/p setting */ | ||
813 | if (HAS_eDP) | ||
814 | return true; | ||
815 | |||
816 | if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) | ||
817 | return intel_find_pll_igdng_dp(limit, crtc, target, | ||
818 | refclk, best_clock); | ||
819 | |||
766 | if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { | 820 | if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { |
767 | if ((I915_READ(LVDS) & LVDS_CLKB_POWER_MASK) == | 821 | if ((I915_READ(LVDS) & LVDS_CLKB_POWER_MASK) == |
768 | LVDS_CLKB_POWER_UP) | 822 | LVDS_CLKB_POWER_UP) |
@@ -998,6 +1052,90 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, | |||
998 | return 0; | 1052 | return 0; |
999 | } | 1053 | } |
1000 | 1054 | ||
1055 | /* Disable the VGA plane that we never use */ | ||
1056 | static void i915_disable_vga (struct drm_device *dev) | ||
1057 | { | ||
1058 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
1059 | u8 sr1; | ||
1060 | u32 vga_reg; | ||
1061 | |||
1062 | if (IS_IGDNG(dev)) | ||
1063 | vga_reg = CPU_VGACNTRL; | ||
1064 | else | ||
1065 | vga_reg = VGACNTRL; | ||
1066 | |||
1067 | if (I915_READ(vga_reg) & VGA_DISP_DISABLE) | ||
1068 | return; | ||
1069 | |||
1070 | I915_WRITE8(VGA_SR_INDEX, 1); | ||
1071 | sr1 = I915_READ8(VGA_SR_DATA); | ||
1072 | I915_WRITE8(VGA_SR_DATA, sr1 | (1 << 5)); | ||
1073 | udelay(100); | ||
1074 | |||
1075 | I915_WRITE(vga_reg, VGA_DISP_DISABLE); | ||
1076 | } | ||
1077 | |||
1078 | static void igdng_disable_pll_edp (struct drm_crtc *crtc) | ||
1079 | { | ||
1080 | struct drm_device *dev = crtc->dev; | ||
1081 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
1082 | u32 dpa_ctl; | ||
1083 | |||
1084 | DRM_DEBUG("\n"); | ||
1085 | dpa_ctl = I915_READ(DP_A); | ||
1086 | dpa_ctl &= ~DP_PLL_ENABLE; | ||
1087 | I915_WRITE(DP_A, dpa_ctl); | ||
1088 | } | ||
1089 | |||
1090 | static void igdng_enable_pll_edp (struct drm_crtc *crtc) | ||
1091 | { | ||
1092 | struct drm_device *dev = crtc->dev; | ||
1093 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
1094 | u32 dpa_ctl; | ||
1095 | |||
1096 | dpa_ctl = I915_READ(DP_A); | ||
1097 | dpa_ctl |= DP_PLL_ENABLE; | ||
1098 | I915_WRITE(DP_A, dpa_ctl); | ||
1099 | udelay(200); | ||
1100 | } | ||
1101 | |||
1102 | |||
1103 | static void igdng_set_pll_edp (struct drm_crtc *crtc, int clock) | ||
1104 | { | ||
1105 | struct drm_device *dev = crtc->dev; | ||
1106 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
1107 | u32 dpa_ctl; | ||
1108 | |||
1109 | DRM_DEBUG("eDP PLL enable for clock %d\n", clock); | ||
1110 | dpa_ctl = I915_READ(DP_A); | ||
1111 | dpa_ctl &= ~DP_PLL_FREQ_MASK; | ||
1112 | |||
1113 | if (clock < 200000) { | ||
1114 | u32 temp; | ||
1115 | dpa_ctl |= DP_PLL_FREQ_160MHZ; | ||
1116 | /* workaround for 160Mhz: | ||
1117 | 1) program 0x4600c bits 15:0 = 0x8124 | ||
1118 | 2) program 0x46010 bit 0 = 1 | ||
1119 | 3) program 0x46034 bit 24 = 1 | ||
1120 | 4) program 0x64000 bit 14 = 1 | ||
1121 | */ | ||
1122 | temp = I915_READ(0x4600c); | ||
1123 | temp &= 0xffff0000; | ||
1124 | I915_WRITE(0x4600c, temp | 0x8124); | ||
1125 | |||
1126 | temp = I915_READ(0x46010); | ||
1127 | I915_WRITE(0x46010, temp | 1); | ||
1128 | |||
1129 | temp = I915_READ(0x46034); | ||
1130 | I915_WRITE(0x46034, temp | (1 << 24)); | ||
1131 | } else { | ||
1132 | dpa_ctl |= DP_PLL_FREQ_270MHZ; | ||
1133 | } | ||
1134 | I915_WRITE(DP_A, dpa_ctl); | ||
1135 | |||
1136 | udelay(500); | ||
1137 | } | ||
1138 | |||
1001 | static void igdng_crtc_dpms(struct drm_crtc *crtc, int mode) | 1139 | static void igdng_crtc_dpms(struct drm_crtc *crtc, int mode) |
1002 | { | 1140 | { |
1003 | struct drm_device *dev = crtc->dev; | 1141 | struct drm_device *dev = crtc->dev; |
@@ -1015,6 +1153,7 @@ static void igdng_crtc_dpms(struct drm_crtc *crtc, int mode) | |||
1015 | int fdi_rx_imr_reg = (pipe == 0) ? FDI_RXA_IMR : FDI_RXB_IMR; | 1153 | int fdi_rx_imr_reg = (pipe == 0) ? FDI_RXA_IMR : FDI_RXB_IMR; |
1016 | int transconf_reg = (pipe == 0) ? TRANSACONF : TRANSBCONF; | 1154 | int transconf_reg = (pipe == 0) ? TRANSACONF : TRANSBCONF; |
1017 | int pf_ctl_reg = (pipe == 0) ? PFA_CTL_1 : PFB_CTL_1; | 1155 | int pf_ctl_reg = (pipe == 0) ? PFA_CTL_1 : PFB_CTL_1; |
1156 | int pf_win_size = (pipe == 0) ? PFA_WIN_SZ : PFB_WIN_SZ; | ||
1018 | int cpu_htot_reg = (pipe == 0) ? HTOTAL_A : HTOTAL_B; | 1157 | int cpu_htot_reg = (pipe == 0) ? HTOTAL_A : HTOTAL_B; |
1019 | int cpu_hblank_reg = (pipe == 0) ? HBLANK_A : HBLANK_B; | 1158 | int cpu_hblank_reg = (pipe == 0) ? HBLANK_A : HBLANK_B; |
1020 | int cpu_hsync_reg = (pipe == 0) ? HSYNC_A : HSYNC_B; | 1159 | int cpu_hsync_reg = (pipe == 0) ? HSYNC_A : HSYNC_B; |
@@ -1028,7 +1167,7 @@ static void igdng_crtc_dpms(struct drm_crtc *crtc, int mode) | |||
1028 | int trans_vblank_reg = (pipe == 0) ? TRANS_VBLANK_A : TRANS_VBLANK_B; | 1167 | int trans_vblank_reg = (pipe == 0) ? TRANS_VBLANK_A : TRANS_VBLANK_B; |
1029 | int trans_vsync_reg = (pipe == 0) ? TRANS_VSYNC_A : TRANS_VSYNC_B; | 1168 | int trans_vsync_reg = (pipe == 0) ? TRANS_VSYNC_A : TRANS_VSYNC_B; |
1030 | u32 temp; | 1169 | u32 temp; |
1031 | int tries = 5, j; | 1170 | int tries = 5, j, n; |
1032 | 1171 | ||
1033 | /* XXX: When our outputs are all unaware of DPMS modes other than off | 1172 | /* XXX: When our outputs are all unaware of DPMS modes other than off |
1034 | * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC. | 1173 | * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC. |
@@ -1038,27 +1177,32 @@ static void igdng_crtc_dpms(struct drm_crtc *crtc, int mode) | |||
1038 | case DRM_MODE_DPMS_STANDBY: | 1177 | case DRM_MODE_DPMS_STANDBY: |
1039 | case DRM_MODE_DPMS_SUSPEND: | 1178 | case DRM_MODE_DPMS_SUSPEND: |
1040 | DRM_DEBUG("crtc %d dpms on\n", pipe); | 1179 | DRM_DEBUG("crtc %d dpms on\n", pipe); |
1041 | /* enable PCH DPLL */ | 1180 | if (HAS_eDP) { |
1042 | temp = I915_READ(pch_dpll_reg); | 1181 | /* enable eDP PLL */ |
1043 | if ((temp & DPLL_VCO_ENABLE) == 0) { | 1182 | igdng_enable_pll_edp(crtc); |
1044 | I915_WRITE(pch_dpll_reg, temp | DPLL_VCO_ENABLE); | 1183 | } else { |
1045 | I915_READ(pch_dpll_reg); | 1184 | /* enable PCH DPLL */ |
1046 | } | 1185 | temp = I915_READ(pch_dpll_reg); |
1047 | 1186 | if ((temp & DPLL_VCO_ENABLE) == 0) { | |
1048 | /* enable PCH FDI RX PLL, wait warmup plus DMI latency */ | 1187 | I915_WRITE(pch_dpll_reg, temp | DPLL_VCO_ENABLE); |
1049 | temp = I915_READ(fdi_rx_reg); | 1188 | I915_READ(pch_dpll_reg); |
1050 | I915_WRITE(fdi_rx_reg, temp | FDI_RX_PLL_ENABLE | | 1189 | } |
1051 | FDI_SEL_PCDCLK | | ||
1052 | FDI_DP_PORT_WIDTH_X4); /* default 4 lanes */ | ||
1053 | I915_READ(fdi_rx_reg); | ||
1054 | udelay(200); | ||
1055 | 1190 | ||
1056 | /* Enable CPU FDI TX PLL, always on for IGDNG */ | 1191 | /* enable PCH FDI RX PLL, wait warmup plus DMI latency */ |
1057 | temp = I915_READ(fdi_tx_reg); | 1192 | temp = I915_READ(fdi_rx_reg); |
1058 | if ((temp & FDI_TX_PLL_ENABLE) == 0) { | 1193 | I915_WRITE(fdi_rx_reg, temp | FDI_RX_PLL_ENABLE | |
1059 | I915_WRITE(fdi_tx_reg, temp | FDI_TX_PLL_ENABLE); | 1194 | FDI_SEL_PCDCLK | |
1060 | I915_READ(fdi_tx_reg); | 1195 | FDI_DP_PORT_WIDTH_X4); /* default 4 lanes */ |
1061 | udelay(100); | 1196 | I915_READ(fdi_rx_reg); |
1197 | udelay(200); | ||
1198 | |||
1199 | /* Enable CPU FDI TX PLL, always on for IGDNG */ | ||
1200 | temp = I915_READ(fdi_tx_reg); | ||
1201 | if ((temp & FDI_TX_PLL_ENABLE) == 0) { | ||
1202 | I915_WRITE(fdi_tx_reg, temp | FDI_TX_PLL_ENABLE); | ||
1203 | I915_READ(fdi_tx_reg); | ||
1204 | udelay(100); | ||
1205 | } | ||
1062 | } | 1206 | } |
1063 | 1207 | ||
1064 | /* Enable CPU pipe */ | 1208 | /* Enable CPU pipe */ |
@@ -1077,122 +1221,126 @@ static void igdng_crtc_dpms(struct drm_crtc *crtc, int mode) | |||
1077 | I915_WRITE(dspbase_reg, I915_READ(dspbase_reg)); | 1221 | I915_WRITE(dspbase_reg, I915_READ(dspbase_reg)); |
1078 | } | 1222 | } |
1079 | 1223 | ||
1080 | /* enable CPU FDI TX and PCH FDI RX */ | 1224 | if (!HAS_eDP) { |
1081 | temp = I915_READ(fdi_tx_reg); | 1225 | /* enable CPU FDI TX and PCH FDI RX */ |
1082 | temp |= FDI_TX_ENABLE; | 1226 | temp = I915_READ(fdi_tx_reg); |
1083 | temp |= FDI_DP_PORT_WIDTH_X4; /* default */ | 1227 | temp |= FDI_TX_ENABLE; |
1084 | temp &= ~FDI_LINK_TRAIN_NONE; | 1228 | temp |= FDI_DP_PORT_WIDTH_X4; /* default */ |
1085 | temp |= FDI_LINK_TRAIN_PATTERN_1; | 1229 | temp &= ~FDI_LINK_TRAIN_NONE; |
1086 | I915_WRITE(fdi_tx_reg, temp); | 1230 | temp |= FDI_LINK_TRAIN_PATTERN_1; |
1087 | I915_READ(fdi_tx_reg); | 1231 | I915_WRITE(fdi_tx_reg, temp); |
1232 | I915_READ(fdi_tx_reg); | ||
1088 | 1233 | ||
1089 | temp = I915_READ(fdi_rx_reg); | 1234 | temp = I915_READ(fdi_rx_reg); |
1090 | temp &= ~FDI_LINK_TRAIN_NONE; | 1235 | temp &= ~FDI_LINK_TRAIN_NONE; |
1091 | temp |= FDI_LINK_TRAIN_PATTERN_1; | 1236 | temp |= FDI_LINK_TRAIN_PATTERN_1; |
1092 | I915_WRITE(fdi_rx_reg, temp | FDI_RX_ENABLE); | 1237 | I915_WRITE(fdi_rx_reg, temp | FDI_RX_ENABLE); |
1093 | I915_READ(fdi_rx_reg); | 1238 | I915_READ(fdi_rx_reg); |
1094 | 1239 | ||
1095 | udelay(150); | 1240 | udelay(150); |
1096 | 1241 | ||
1097 | /* Train FDI. */ | 1242 | /* Train FDI. */ |
1098 | /* umask FDI RX Interrupt symbol_lock and bit_lock bit | 1243 | /* umask FDI RX Interrupt symbol_lock and bit_lock bit |
1099 | for train result */ | 1244 | for train result */ |
1100 | temp = I915_READ(fdi_rx_imr_reg); | 1245 | temp = I915_READ(fdi_rx_imr_reg); |
1101 | temp &= ~FDI_RX_SYMBOL_LOCK; | 1246 | temp &= ~FDI_RX_SYMBOL_LOCK; |
1102 | temp &= ~FDI_RX_BIT_LOCK; | 1247 | temp &= ~FDI_RX_BIT_LOCK; |
1103 | I915_WRITE(fdi_rx_imr_reg, temp); | 1248 | I915_WRITE(fdi_rx_imr_reg, temp); |
1104 | I915_READ(fdi_rx_imr_reg); | 1249 | I915_READ(fdi_rx_imr_reg); |
1105 | udelay(150); | 1250 | udelay(150); |
1106 | 1251 | ||
1107 | temp = I915_READ(fdi_rx_iir_reg); | 1252 | temp = I915_READ(fdi_rx_iir_reg); |
1108 | DRM_DEBUG("FDI_RX_IIR 0x%x\n", temp); | 1253 | DRM_DEBUG("FDI_RX_IIR 0x%x\n", temp); |
1109 | 1254 | ||
1110 | if ((temp & FDI_RX_BIT_LOCK) == 0) { | 1255 | if ((temp & FDI_RX_BIT_LOCK) == 0) { |
1111 | for (j = 0; j < tries; j++) { | 1256 | for (j = 0; j < tries; j++) { |
1112 | temp = I915_READ(fdi_rx_iir_reg); | 1257 | temp = I915_READ(fdi_rx_iir_reg); |
1113 | DRM_DEBUG("FDI_RX_IIR 0x%x\n", temp); | 1258 | DRM_DEBUG("FDI_RX_IIR 0x%x\n", temp); |
1114 | if (temp & FDI_RX_BIT_LOCK) | 1259 | if (temp & FDI_RX_BIT_LOCK) |
1115 | break; | 1260 | break; |
1116 | udelay(200); | 1261 | udelay(200); |
1117 | } | 1262 | } |
1118 | if (j != tries) | 1263 | if (j != tries) |
1264 | I915_WRITE(fdi_rx_iir_reg, | ||
1265 | temp | FDI_RX_BIT_LOCK); | ||
1266 | else | ||
1267 | DRM_DEBUG("train 1 fail\n"); | ||
1268 | } else { | ||
1119 | I915_WRITE(fdi_rx_iir_reg, | 1269 | I915_WRITE(fdi_rx_iir_reg, |
1120 | temp | FDI_RX_BIT_LOCK); | 1270 | temp | FDI_RX_BIT_LOCK); |
1121 | else | 1271 | DRM_DEBUG("train 1 ok 2!\n"); |
1122 | DRM_DEBUG("train 1 fail\n"); | 1272 | } |
1123 | } else { | 1273 | temp = I915_READ(fdi_tx_reg); |
1124 | I915_WRITE(fdi_rx_iir_reg, | 1274 | temp &= ~FDI_LINK_TRAIN_NONE; |
1125 | temp | FDI_RX_BIT_LOCK); | 1275 | temp |= FDI_LINK_TRAIN_PATTERN_2; |
1126 | DRM_DEBUG("train 1 ok 2!\n"); | 1276 | I915_WRITE(fdi_tx_reg, temp); |
1127 | } | 1277 | |
1128 | temp = I915_READ(fdi_tx_reg); | 1278 | temp = I915_READ(fdi_rx_reg); |
1129 | temp &= ~FDI_LINK_TRAIN_NONE; | 1279 | temp &= ~FDI_LINK_TRAIN_NONE; |
1130 | temp |= FDI_LINK_TRAIN_PATTERN_2; | 1280 | temp |= FDI_LINK_TRAIN_PATTERN_2; |
1131 | I915_WRITE(fdi_tx_reg, temp); | 1281 | I915_WRITE(fdi_rx_reg, temp); |
1132 | |||
1133 | temp = I915_READ(fdi_rx_reg); | ||
1134 | temp &= ~FDI_LINK_TRAIN_NONE; | ||
1135 | temp |= FDI_LINK_TRAIN_PATTERN_2; | ||
1136 | I915_WRITE(fdi_rx_reg, temp); | ||
1137 | 1282 | ||
1138 | udelay(150); | 1283 | udelay(150); |
1139 | 1284 | ||
1140 | temp = I915_READ(fdi_rx_iir_reg); | 1285 | temp = I915_READ(fdi_rx_iir_reg); |
1141 | DRM_DEBUG("FDI_RX_IIR 0x%x\n", temp); | 1286 | DRM_DEBUG("FDI_RX_IIR 0x%x\n", temp); |
1142 | 1287 | ||
1143 | if ((temp & FDI_RX_SYMBOL_LOCK) == 0) { | 1288 | if ((temp & FDI_RX_SYMBOL_LOCK) == 0) { |
1144 | for (j = 0; j < tries; j++) { | 1289 | for (j = 0; j < tries; j++) { |
1145 | temp = I915_READ(fdi_rx_iir_reg); | 1290 | temp = I915_READ(fdi_rx_iir_reg); |
1146 | DRM_DEBUG("FDI_RX_IIR 0x%x\n", temp); | 1291 | DRM_DEBUG("FDI_RX_IIR 0x%x\n", temp); |
1147 | if (temp & FDI_RX_SYMBOL_LOCK) | 1292 | if (temp & FDI_RX_SYMBOL_LOCK) |
1148 | break; | 1293 | break; |
1149 | udelay(200); | 1294 | udelay(200); |
1150 | } | 1295 | } |
1151 | if (j != tries) { | 1296 | if (j != tries) { |
1297 | I915_WRITE(fdi_rx_iir_reg, | ||
1298 | temp | FDI_RX_SYMBOL_LOCK); | ||
1299 | DRM_DEBUG("train 2 ok 1!\n"); | ||
1300 | } else | ||
1301 | DRM_DEBUG("train 2 fail\n"); | ||
1302 | } else { | ||
1152 | I915_WRITE(fdi_rx_iir_reg, | 1303 | I915_WRITE(fdi_rx_iir_reg, |
1153 | temp | FDI_RX_SYMBOL_LOCK); | 1304 | temp | FDI_RX_SYMBOL_LOCK); |
1154 | DRM_DEBUG("train 2 ok 1!\n"); | 1305 | DRM_DEBUG("train 2 ok 2!\n"); |
1155 | } else | 1306 | } |
1156 | DRM_DEBUG("train 2 fail\n"); | 1307 | DRM_DEBUG("train done\n"); |
1157 | } else { | ||
1158 | I915_WRITE(fdi_rx_iir_reg, temp | FDI_RX_SYMBOL_LOCK); | ||
1159 | DRM_DEBUG("train 2 ok 2!\n"); | ||
1160 | } | ||
1161 | DRM_DEBUG("train done\n"); | ||
1162 | 1308 | ||
1163 | /* set transcoder timing */ | 1309 | /* set transcoder timing */ |
1164 | I915_WRITE(trans_htot_reg, I915_READ(cpu_htot_reg)); | 1310 | I915_WRITE(trans_htot_reg, I915_READ(cpu_htot_reg)); |
1165 | I915_WRITE(trans_hblank_reg, I915_READ(cpu_hblank_reg)); | 1311 | I915_WRITE(trans_hblank_reg, I915_READ(cpu_hblank_reg)); |
1166 | I915_WRITE(trans_hsync_reg, I915_READ(cpu_hsync_reg)); | 1312 | I915_WRITE(trans_hsync_reg, I915_READ(cpu_hsync_reg)); |
1167 | 1313 | ||
1168 | I915_WRITE(trans_vtot_reg, I915_READ(cpu_vtot_reg)); | 1314 | I915_WRITE(trans_vtot_reg, I915_READ(cpu_vtot_reg)); |
1169 | I915_WRITE(trans_vblank_reg, I915_READ(cpu_vblank_reg)); | 1315 | I915_WRITE(trans_vblank_reg, I915_READ(cpu_vblank_reg)); |
1170 | I915_WRITE(trans_vsync_reg, I915_READ(cpu_vsync_reg)); | 1316 | I915_WRITE(trans_vsync_reg, I915_READ(cpu_vsync_reg)); |
1171 | 1317 | ||
1172 | /* enable PCH transcoder */ | 1318 | /* enable PCH transcoder */ |
1173 | temp = I915_READ(transconf_reg); | 1319 | temp = I915_READ(transconf_reg); |
1174 | I915_WRITE(transconf_reg, temp | TRANS_ENABLE); | 1320 | I915_WRITE(transconf_reg, temp | TRANS_ENABLE); |
1175 | I915_READ(transconf_reg); | 1321 | I915_READ(transconf_reg); |
1176 | 1322 | ||
1177 | while ((I915_READ(transconf_reg) & TRANS_STATE_ENABLE) == 0) | 1323 | while ((I915_READ(transconf_reg) & TRANS_STATE_ENABLE) == 0) |
1178 | ; | 1324 | ; |
1179 | 1325 | ||
1180 | /* enable normal */ | 1326 | /* enable normal */ |
1181 | 1327 | ||
1182 | temp = I915_READ(fdi_tx_reg); | 1328 | temp = I915_READ(fdi_tx_reg); |
1183 | temp &= ~FDI_LINK_TRAIN_NONE; | 1329 | temp &= ~FDI_LINK_TRAIN_NONE; |
1184 | I915_WRITE(fdi_tx_reg, temp | FDI_LINK_TRAIN_NONE | | 1330 | I915_WRITE(fdi_tx_reg, temp | FDI_LINK_TRAIN_NONE | |
1185 | FDI_TX_ENHANCE_FRAME_ENABLE); | 1331 | FDI_TX_ENHANCE_FRAME_ENABLE); |
1186 | I915_READ(fdi_tx_reg); | 1332 | I915_READ(fdi_tx_reg); |
1187 | 1333 | ||
1188 | temp = I915_READ(fdi_rx_reg); | 1334 | temp = I915_READ(fdi_rx_reg); |
1189 | temp &= ~FDI_LINK_TRAIN_NONE; | 1335 | temp &= ~FDI_LINK_TRAIN_NONE; |
1190 | I915_WRITE(fdi_rx_reg, temp | FDI_LINK_TRAIN_NONE | | 1336 | I915_WRITE(fdi_rx_reg, temp | FDI_LINK_TRAIN_NONE | |
1191 | FDI_RX_ENHANCE_FRAME_ENABLE); | 1337 | FDI_RX_ENHANCE_FRAME_ENABLE); |
1192 | I915_READ(fdi_rx_reg); | 1338 | I915_READ(fdi_rx_reg); |
1193 | 1339 | ||
1194 | /* wait one idle pattern time */ | 1340 | /* wait one idle pattern time */ |
1195 | udelay(100); | 1341 | udelay(100); |
1342 | |||
1343 | } | ||
1196 | 1344 | ||
1197 | intel_crtc_load_lut(crtc); | 1345 | intel_crtc_load_lut(crtc); |
1198 | 1346 | ||
@@ -1200,8 +1348,7 @@ static void igdng_crtc_dpms(struct drm_crtc *crtc, int mode) | |||
1200 | case DRM_MODE_DPMS_OFF: | 1348 | case DRM_MODE_DPMS_OFF: |
1201 | DRM_DEBUG("crtc %d dpms off\n", pipe); | 1349 | DRM_DEBUG("crtc %d dpms off\n", pipe); |
1202 | 1350 | ||
1203 | /* Disable the VGA plane that we never use */ | 1351 | i915_disable_vga(dev); |
1204 | I915_WRITE(CPU_VGACNTRL, VGA_DISP_DISABLE); | ||
1205 | 1352 | ||
1206 | /* Disable display plane */ | 1353 | /* Disable display plane */ |
1207 | temp = I915_READ(dspcntr_reg); | 1354 | temp = I915_READ(dspcntr_reg); |
@@ -1217,17 +1364,23 @@ static void igdng_crtc_dpms(struct drm_crtc *crtc, int mode) | |||
1217 | if ((temp & PIPEACONF_ENABLE) != 0) { | 1364 | if ((temp & PIPEACONF_ENABLE) != 0) { |
1218 | I915_WRITE(pipeconf_reg, temp & ~PIPEACONF_ENABLE); | 1365 | I915_WRITE(pipeconf_reg, temp & ~PIPEACONF_ENABLE); |
1219 | I915_READ(pipeconf_reg); | 1366 | I915_READ(pipeconf_reg); |
1367 | n = 0; | ||
1220 | /* wait for cpu pipe off, pipe state */ | 1368 | /* wait for cpu pipe off, pipe state */ |
1221 | while ((I915_READ(pipeconf_reg) & I965_PIPECONF_ACTIVE) != 0) | 1369 | while ((I915_READ(pipeconf_reg) & I965_PIPECONF_ACTIVE) != 0) { |
1222 | ; | 1370 | n++; |
1371 | if (n < 60) { | ||
1372 | udelay(500); | ||
1373 | continue; | ||
1374 | } else { | ||
1375 | DRM_DEBUG("pipe %d off delay\n", pipe); | ||
1376 | break; | ||
1377 | } | ||
1378 | } | ||
1223 | } else | 1379 | } else |
1224 | DRM_DEBUG("crtc %d is disabled\n", pipe); | 1380 | DRM_DEBUG("crtc %d is disabled\n", pipe); |
1225 | 1381 | ||
1226 | /* IGDNG-A : disable cpu panel fitter ? */ | 1382 | if (HAS_eDP) { |
1227 | temp = I915_READ(pf_ctl_reg); | 1383 | igdng_disable_pll_edp(crtc); |
1228 | if ((temp & PF_ENABLE) != 0) { | ||
1229 | I915_WRITE(pf_ctl_reg, temp & ~PF_ENABLE); | ||
1230 | I915_READ(pf_ctl_reg); | ||
1231 | } | 1384 | } |
1232 | 1385 | ||
1233 | /* disable CPU FDI tx and PCH FDI rx */ | 1386 | /* disable CPU FDI tx and PCH FDI rx */ |
@@ -1239,6 +1392,8 @@ static void igdng_crtc_dpms(struct drm_crtc *crtc, int mode) | |||
1239 | I915_WRITE(fdi_rx_reg, temp & ~FDI_RX_ENABLE); | 1392 | I915_WRITE(fdi_rx_reg, temp & ~FDI_RX_ENABLE); |
1240 | I915_READ(fdi_rx_reg); | 1393 | I915_READ(fdi_rx_reg); |
1241 | 1394 | ||
1395 | udelay(100); | ||
1396 | |||
1242 | /* still set train pattern 1 */ | 1397 | /* still set train pattern 1 */ |
1243 | temp = I915_READ(fdi_tx_reg); | 1398 | temp = I915_READ(fdi_tx_reg); |
1244 | temp &= ~FDI_LINK_TRAIN_NONE; | 1399 | temp &= ~FDI_LINK_TRAIN_NONE; |
@@ -1250,14 +1405,25 @@ static void igdng_crtc_dpms(struct drm_crtc *crtc, int mode) | |||
1250 | temp |= FDI_LINK_TRAIN_PATTERN_1; | 1405 | temp |= FDI_LINK_TRAIN_PATTERN_1; |
1251 | I915_WRITE(fdi_rx_reg, temp); | 1406 | I915_WRITE(fdi_rx_reg, temp); |
1252 | 1407 | ||
1408 | udelay(100); | ||
1409 | |||
1253 | /* disable PCH transcoder */ | 1410 | /* disable PCH transcoder */ |
1254 | temp = I915_READ(transconf_reg); | 1411 | temp = I915_READ(transconf_reg); |
1255 | if ((temp & TRANS_ENABLE) != 0) { | 1412 | if ((temp & TRANS_ENABLE) != 0) { |
1256 | I915_WRITE(transconf_reg, temp & ~TRANS_ENABLE); | 1413 | I915_WRITE(transconf_reg, temp & ~TRANS_ENABLE); |
1257 | I915_READ(transconf_reg); | 1414 | I915_READ(transconf_reg); |
1415 | n = 0; | ||
1258 | /* wait for PCH transcoder off, transcoder state */ | 1416 | /* wait for PCH transcoder off, transcoder state */ |
1259 | while ((I915_READ(transconf_reg) & TRANS_STATE_ENABLE) != 0) | 1417 | while ((I915_READ(transconf_reg) & TRANS_STATE_ENABLE) != 0) { |
1260 | ; | 1418 | n++; |
1419 | if (n < 60) { | ||
1420 | udelay(500); | ||
1421 | continue; | ||
1422 | } else { | ||
1423 | DRM_DEBUG("transcoder %d off delay\n", pipe); | ||
1424 | break; | ||
1425 | } | ||
1426 | } | ||
1261 | } | 1427 | } |
1262 | 1428 | ||
1263 | /* disable PCH DPLL */ | 1429 | /* disable PCH DPLL */ |
@@ -1275,6 +1441,22 @@ static void igdng_crtc_dpms(struct drm_crtc *crtc, int mode) | |||
1275 | I915_READ(fdi_rx_reg); | 1441 | I915_READ(fdi_rx_reg); |
1276 | } | 1442 | } |
1277 | 1443 | ||
1444 | /* Disable CPU FDI TX PLL */ | ||
1445 | temp = I915_READ(fdi_tx_reg); | ||
1446 | if ((temp & FDI_TX_PLL_ENABLE) != 0) { | ||
1447 | I915_WRITE(fdi_tx_reg, temp & ~FDI_TX_PLL_ENABLE); | ||
1448 | I915_READ(fdi_tx_reg); | ||
1449 | udelay(100); | ||
1450 | } | ||
1451 | |||
1452 | /* Disable PF */ | ||
1453 | temp = I915_READ(pf_ctl_reg); | ||
1454 | if ((temp & PF_ENABLE) != 0) { | ||
1455 | I915_WRITE(pf_ctl_reg, temp & ~PF_ENABLE); | ||
1456 | I915_READ(pf_ctl_reg); | ||
1457 | } | ||
1458 | I915_WRITE(pf_win_size, 0); | ||
1459 | |||
1278 | /* Wait for the clocks to turn off. */ | 1460 | /* Wait for the clocks to turn off. */ |
1279 | udelay(150); | 1461 | udelay(150); |
1280 | break; | 1462 | break; |
@@ -1342,7 +1524,7 @@ static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode) | |||
1342 | //intel_crtc_dpms_video(crtc, FALSE); TODO | 1524 | //intel_crtc_dpms_video(crtc, FALSE); TODO |
1343 | 1525 | ||
1344 | /* Disable the VGA plane that we never use */ | 1526 | /* Disable the VGA plane that we never use */ |
1345 | I915_WRITE(VGACNTRL, VGA_DISP_DISABLE); | 1527 | i915_disable_vga(dev); |
1346 | 1528 | ||
1347 | /* Disable display plane */ | 1529 | /* Disable display plane */ |
1348 | temp = I915_READ(dspcntr_reg); | 1530 | temp = I915_READ(dspcntr_reg); |
@@ -1623,48 +1805,72 @@ static struct intel_watermark_params igd_cursor_hplloff_wm = { | |||
1623 | IGD_FIFO_LINE_SIZE | 1805 | IGD_FIFO_LINE_SIZE |
1624 | }; | 1806 | }; |
1625 | static struct intel_watermark_params i945_wm_info = { | 1807 | static struct intel_watermark_params i945_wm_info = { |
1626 | I915_FIFO_LINE_SIZE, | 1808 | I945_FIFO_SIZE, |
1627 | I915_MAX_WM, | 1809 | I915_MAX_WM, |
1628 | 1, | 1810 | 1, |
1629 | 0, | 1811 | 2, |
1630 | IGD_FIFO_LINE_SIZE | 1812 | I915_FIFO_LINE_SIZE |
1631 | }; | 1813 | }; |
1632 | static struct intel_watermark_params i915_wm_info = { | 1814 | static struct intel_watermark_params i915_wm_info = { |
1633 | I945_FIFO_SIZE, | 1815 | I915_FIFO_SIZE, |
1634 | I915_MAX_WM, | 1816 | I915_MAX_WM, |
1635 | 1, | 1817 | 1, |
1636 | 0, | 1818 | 2, |
1637 | I915_FIFO_LINE_SIZE | 1819 | I915_FIFO_LINE_SIZE |
1638 | }; | 1820 | }; |
1639 | static struct intel_watermark_params i855_wm_info = { | 1821 | static struct intel_watermark_params i855_wm_info = { |
1640 | I855GM_FIFO_SIZE, | 1822 | I855GM_FIFO_SIZE, |
1641 | I915_MAX_WM, | 1823 | I915_MAX_WM, |
1642 | 1, | 1824 | 1, |
1643 | 0, | 1825 | 2, |
1644 | I830_FIFO_LINE_SIZE | 1826 | I830_FIFO_LINE_SIZE |
1645 | }; | 1827 | }; |
1646 | static struct intel_watermark_params i830_wm_info = { | 1828 | static struct intel_watermark_params i830_wm_info = { |
1647 | I830_FIFO_SIZE, | 1829 | I830_FIFO_SIZE, |
1648 | I915_MAX_WM, | 1830 | I915_MAX_WM, |
1649 | 1, | 1831 | 1, |
1650 | 0, | 1832 | 2, |
1651 | I830_FIFO_LINE_SIZE | 1833 | I830_FIFO_LINE_SIZE |
1652 | }; | 1834 | }; |
1653 | 1835 | ||
1836 | /** | ||
1837 | * intel_calculate_wm - calculate watermark level | ||
1838 | * @clock_in_khz: pixel clock | ||
1839 | * @wm: chip FIFO params | ||
1840 | * @pixel_size: display pixel size | ||
1841 | * @latency_ns: memory latency for the platform | ||
1842 | * | ||
1843 | * Calculate the watermark level (the level at which the display plane will | ||
1844 | * start fetching from memory again). Each chip has a different display | ||
1845 | * FIFO size and allocation, so the caller needs to figure that out and pass | ||
1846 | * in the correct intel_watermark_params structure. | ||
1847 | * | ||
1848 | * As the pixel clock runs, the FIFO will be drained at a rate that depends | ||
1849 | * on the pixel size. When it reaches the watermark level, it'll start | ||
1850 | * fetching FIFO line sized based chunks from memory until the FIFO fills | ||
1851 | * past the watermark point. If the FIFO drains completely, a FIFO underrun | ||
1852 | * will occur, and a display engine hang could result. | ||
1853 | */ | ||
1654 | static unsigned long intel_calculate_wm(unsigned long clock_in_khz, | 1854 | static unsigned long intel_calculate_wm(unsigned long clock_in_khz, |
1655 | struct intel_watermark_params *wm, | 1855 | struct intel_watermark_params *wm, |
1656 | int pixel_size, | 1856 | int pixel_size, |
1657 | unsigned long latency_ns) | 1857 | unsigned long latency_ns) |
1658 | { | 1858 | { |
1659 | unsigned long bytes_required, wm_size; | 1859 | long entries_required, wm_size; |
1860 | |||
1861 | entries_required = (clock_in_khz * pixel_size * latency_ns) / 1000000; | ||
1862 | entries_required /= wm->cacheline_size; | ||
1660 | 1863 | ||
1661 | bytes_required = (clock_in_khz * pixel_size * latency_ns) / 1000000; | 1864 | DRM_DEBUG("FIFO entries required for mode: %d\n", entries_required); |
1662 | bytes_required /= wm->cacheline_size; | ||
1663 | wm_size = wm->fifo_size - bytes_required - wm->guard_size; | ||
1664 | 1865 | ||
1665 | if (wm_size > wm->max_wm) | 1866 | wm_size = wm->fifo_size - (entries_required + wm->guard_size); |
1867 | |||
1868 | DRM_DEBUG("FIFO watermark level: %d\n", wm_size); | ||
1869 | |||
1870 | /* Don't promote wm_size to unsigned... */ | ||
1871 | if (wm_size > (long)wm->max_wm) | ||
1666 | wm_size = wm->max_wm; | 1872 | wm_size = wm->max_wm; |
1667 | if (wm_size == 0) | 1873 | if (wm_size <= 0) |
1668 | wm_size = wm->default_wm; | 1874 | wm_size = wm->default_wm; |
1669 | return wm_size; | 1875 | return wm_size; |
1670 | } | 1876 | } |
@@ -1799,8 +2005,40 @@ static void igd_enable_cxsr(struct drm_device *dev, unsigned long clock, | |||
1799 | return; | 2005 | return; |
1800 | } | 2006 | } |
1801 | 2007 | ||
1802 | const static int latency_ns = 5000; /* default for non-igd platforms */ | 2008 | const static int latency_ns = 3000; /* default for non-igd platforms */ |
1803 | 2009 | ||
2010 | static int intel_get_fifo_size(struct drm_device *dev, int plane) | ||
2011 | { | ||
2012 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
2013 | uint32_t dsparb = I915_READ(DSPARB); | ||
2014 | int size; | ||
2015 | |||
2016 | if (IS_I9XX(dev)) { | ||
2017 | if (plane == 0) | ||
2018 | size = dsparb & 0x7f; | ||
2019 | else | ||
2020 | size = ((dsparb >> DSPARB_CSTART_SHIFT) & 0x7f) - | ||
2021 | (dsparb & 0x7f); | ||
2022 | } else if (IS_I85X(dev)) { | ||
2023 | if (plane == 0) | ||
2024 | size = dsparb & 0x1ff; | ||
2025 | else | ||
2026 | size = ((dsparb >> DSPARB_BEND_SHIFT) & 0x1ff) - | ||
2027 | (dsparb & 0x1ff); | ||
2028 | size >>= 1; /* Convert to cachelines */ | ||
2029 | } else if (IS_845G(dev)) { | ||
2030 | size = dsparb & 0x7f; | ||
2031 | size >>= 2; /* Convert to cachelines */ | ||
2032 | } else { | ||
2033 | size = dsparb & 0x7f; | ||
2034 | size >>= 1; /* Convert to cachelines */ | ||
2035 | } | ||
2036 | |||
2037 | DRM_DEBUG("FIFO size - (0x%08x) %s: %d\n", dsparb, plane ? "B" : "A", | ||
2038 | size); | ||
2039 | |||
2040 | return size; | ||
2041 | } | ||
1804 | 2042 | ||
1805 | static void i965_update_wm(struct drm_device *dev) | 2043 | static void i965_update_wm(struct drm_device *dev) |
1806 | { | 2044 | { |
@@ -1817,101 +2055,89 @@ static void i9xx_update_wm(struct drm_device *dev, int planea_clock, | |||
1817 | int planeb_clock, int sr_hdisplay, int pixel_size) | 2055 | int planeb_clock, int sr_hdisplay, int pixel_size) |
1818 | { | 2056 | { |
1819 | struct drm_i915_private *dev_priv = dev->dev_private; | 2057 | struct drm_i915_private *dev_priv = dev->dev_private; |
1820 | uint32_t fwater_lo = I915_READ(FW_BLC) & MM_FIFO_WATERMARK; | 2058 | uint32_t fwater_lo; |
1821 | uint32_t fwater_hi = I915_READ(FW_BLC2) & LM_FIFO_WATERMARK; | 2059 | uint32_t fwater_hi; |
1822 | int bsize, asize, cwm, bwm = 1, awm = 1, srwm = 1; | 2060 | int total_size, cacheline_size, cwm, srwm = 1; |
1823 | uint32_t dsparb = I915_READ(DSPARB); | 2061 | int planea_wm, planeb_wm; |
1824 | int planea_entries, planeb_entries; | 2062 | struct intel_watermark_params planea_params, planeb_params; |
1825 | struct intel_watermark_params *wm_params; | ||
1826 | unsigned long line_time_us; | 2063 | unsigned long line_time_us; |
1827 | int sr_clock, sr_entries = 0; | 2064 | int sr_clock, sr_entries = 0; |
1828 | 2065 | ||
2066 | /* Create copies of the base settings for each pipe */ | ||
1829 | if (IS_I965GM(dev) || IS_I945GM(dev)) | 2067 | if (IS_I965GM(dev) || IS_I945GM(dev)) |
1830 | wm_params = &i945_wm_info; | 2068 | planea_params = planeb_params = i945_wm_info; |
1831 | else if (IS_I9XX(dev)) | 2069 | else if (IS_I9XX(dev)) |
1832 | wm_params = &i915_wm_info; | 2070 | planea_params = planeb_params = i915_wm_info; |
1833 | else | 2071 | else |
1834 | wm_params = &i855_wm_info; | 2072 | planea_params = planeb_params = i855_wm_info; |
1835 | |||
1836 | planea_entries = intel_calculate_wm(planea_clock, wm_params, | ||
1837 | pixel_size, latency_ns); | ||
1838 | planeb_entries = intel_calculate_wm(planeb_clock, wm_params, | ||
1839 | pixel_size, latency_ns); | ||
1840 | |||
1841 | DRM_DEBUG("FIFO entries - A: %d, B: %d\n", planea_entries, | ||
1842 | planeb_entries); | ||
1843 | 2073 | ||
1844 | if (IS_I9XX(dev)) { | 2074 | /* Grab a couple of global values before we overwrite them */ |
1845 | asize = dsparb & 0x7f; | 2075 | total_size = planea_params.fifo_size; |
1846 | bsize = (dsparb >> DSPARB_CSTART_SHIFT) & 0x7f; | 2076 | cacheline_size = planea_params.cacheline_size; |
1847 | } else { | ||
1848 | asize = dsparb & 0x1ff; | ||
1849 | bsize = (dsparb >> DSPARB_BEND_SHIFT) & 0x1ff; | ||
1850 | } | ||
1851 | DRM_DEBUG("FIFO size - A: %d, B: %d\n", asize, bsize); | ||
1852 | 2077 | ||
1853 | /* Two extra entries for padding */ | 2078 | /* Update per-plane FIFO sizes */ |
1854 | awm = asize - (planea_entries + 2); | 2079 | planea_params.fifo_size = intel_get_fifo_size(dev, 0); |
1855 | bwm = bsize - (planeb_entries + 2); | 2080 | planeb_params.fifo_size = intel_get_fifo_size(dev, 1); |
1856 | 2081 | ||
1857 | /* Sanity check against potentially bad FIFO allocations */ | 2082 | planea_wm = intel_calculate_wm(planea_clock, &planea_params, |
1858 | if (awm <= 0) { | 2083 | pixel_size, latency_ns); |
1859 | /* pipe is on but has too few FIFO entries */ | 2084 | planeb_wm = intel_calculate_wm(planeb_clock, &planeb_params, |
1860 | if (planea_entries != 0) | 2085 | pixel_size, latency_ns); |
1861 | DRM_DEBUG("plane A needs more FIFO entries\n"); | 2086 | DRM_DEBUG("FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm); |
1862 | awm = 1; | ||
1863 | } | ||
1864 | if (bwm <= 0) { | ||
1865 | if (planeb_entries != 0) | ||
1866 | DRM_DEBUG("plane B needs more FIFO entries\n"); | ||
1867 | bwm = 1; | ||
1868 | } | ||
1869 | 2087 | ||
1870 | /* | 2088 | /* |
1871 | * Overlay gets an aggressive default since video jitter is bad. | 2089 | * Overlay gets an aggressive default since video jitter is bad. |
1872 | */ | 2090 | */ |
1873 | cwm = 2; | 2091 | cwm = 2; |
1874 | 2092 | ||
1875 | /* Calc sr entries for one pipe configs */ | 2093 | /* Calc sr entries for one plane configs */ |
1876 | if (!planea_clock || !planeb_clock) { | 2094 | if (sr_hdisplay && (!planea_clock || !planeb_clock)) { |
2095 | /* self-refresh has much higher latency */ | ||
2096 | const static int sr_latency_ns = 6000; | ||
2097 | |||
1877 | sr_clock = planea_clock ? planea_clock : planeb_clock; | 2098 | sr_clock = planea_clock ? planea_clock : planeb_clock; |
1878 | line_time_us = (sr_hdisplay * 1000) / sr_clock; | 2099 | line_time_us = ((sr_hdisplay * 1000) / sr_clock); |
1879 | sr_entries = (((latency_ns / line_time_us) + 1) * pixel_size * | 2100 | |
1880 | sr_hdisplay) / 1000; | 2101 | /* Use ns/us then divide to preserve precision */ |
1881 | sr_entries = roundup(sr_entries / wm_params->cacheline_size, 1); | 2102 | sr_entries = (((sr_latency_ns / line_time_us) + 1) * |
1882 | if (sr_entries < wm_params->fifo_size) | 2103 | pixel_size * sr_hdisplay) / 1000; |
1883 | srwm = wm_params->fifo_size - sr_entries; | 2104 | sr_entries = roundup(sr_entries / cacheline_size, 1); |
2105 | DRM_DEBUG("self-refresh entries: %d\n", sr_entries); | ||
2106 | srwm = total_size - sr_entries; | ||
2107 | if (srwm < 0) | ||
2108 | srwm = 1; | ||
2109 | if (IS_I9XX(dev)) | ||
2110 | I915_WRITE(FW_BLC_SELF, (srwm & 0x3f)); | ||
1884 | } | 2111 | } |
1885 | 2112 | ||
1886 | DRM_DEBUG("Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n", | 2113 | DRM_DEBUG("Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n", |
1887 | awm, bwm, cwm, srwm); | 2114 | planea_wm, planeb_wm, cwm, srwm); |
1888 | 2115 | ||
1889 | fwater_lo = fwater_lo | ((bwm & 0x3f) << 16) | (awm & 0x3f); | 2116 | fwater_lo = ((planeb_wm & 0x3f) << 16) | (planea_wm & 0x3f); |
1890 | fwater_hi = fwater_hi | (cwm & 0x1f); | 2117 | fwater_hi = (cwm & 0x1f); |
2118 | |||
2119 | /* Set request length to 8 cachelines per fetch */ | ||
2120 | fwater_lo = fwater_lo | (1 << 24) | (1 << 8); | ||
2121 | fwater_hi = fwater_hi | (1 << 8); | ||
1891 | 2122 | ||
1892 | I915_WRITE(FW_BLC, fwater_lo); | 2123 | I915_WRITE(FW_BLC, fwater_lo); |
1893 | I915_WRITE(FW_BLC2, fwater_hi); | 2124 | I915_WRITE(FW_BLC2, fwater_hi); |
1894 | if (IS_I9XX(dev)) | ||
1895 | I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN | (srwm & 0x3f)); | ||
1896 | } | 2125 | } |
1897 | 2126 | ||
1898 | static void i830_update_wm(struct drm_device *dev, int planea_clock, | 2127 | static void i830_update_wm(struct drm_device *dev, int planea_clock, |
1899 | int pixel_size) | 2128 | int pixel_size) |
1900 | { | 2129 | { |
1901 | struct drm_i915_private *dev_priv = dev->dev_private; | 2130 | struct drm_i915_private *dev_priv = dev->dev_private; |
1902 | uint32_t dsparb = I915_READ(DSPARB); | 2131 | uint32_t fwater_lo = I915_READ(FW_BLC) & ~0xfff; |
1903 | uint32_t fwater_lo = I915_READ(FW_BLC) & MM_FIFO_WATERMARK; | 2132 | int planea_wm; |
1904 | unsigned int asize, awm; | ||
1905 | int planea_entries; | ||
1906 | |||
1907 | planea_entries = intel_calculate_wm(planea_clock, &i830_wm_info, | ||
1908 | pixel_size, latency_ns); | ||
1909 | 2133 | ||
1910 | asize = dsparb & 0x7f; | 2134 | i830_wm_info.fifo_size = intel_get_fifo_size(dev, 0); |
1911 | 2135 | ||
1912 | awm = asize - planea_entries; | 2136 | planea_wm = intel_calculate_wm(planea_clock, &i830_wm_info, |
2137 | pixel_size, latency_ns); | ||
2138 | fwater_lo |= (3<<8) | planea_wm; | ||
1913 | 2139 | ||
1914 | fwater_lo = fwater_lo | awm; | 2140 | DRM_DEBUG("Setting FIFO watermarks - A: %d\n", planea_wm); |
1915 | 2141 | ||
1916 | I915_WRITE(FW_BLC, fwater_lo); | 2142 | I915_WRITE(FW_BLC, fwater_lo); |
1917 | } | 2143 | } |
@@ -1984,7 +2210,7 @@ static void intel_update_watermarks(struct drm_device *dev) | |||
1984 | if (enabled <= 0) | 2210 | if (enabled <= 0) |
1985 | return; | 2211 | return; |
1986 | 2212 | ||
1987 | /* Single pipe configs can enable self refresh */ | 2213 | /* Single plane configs can enable self refresh */ |
1988 | if (enabled == 1 && IS_IGD(dev)) | 2214 | if (enabled == 1 && IS_IGD(dev)) |
1989 | igd_enable_cxsr(dev, sr_clock, pixel_size); | 2215 | igd_enable_cxsr(dev, sr_clock, pixel_size); |
1990 | else if (IS_IGD(dev)) | 2216 | else if (IS_IGD(dev)) |
@@ -2028,6 +2254,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, | |||
2028 | u32 dpll = 0, fp = 0, dspcntr, pipeconf; | 2254 | u32 dpll = 0, fp = 0, dspcntr, pipeconf; |
2029 | bool ok, is_sdvo = false, is_dvo = false; | 2255 | bool ok, is_sdvo = false, is_dvo = false; |
2030 | bool is_crt = false, is_lvds = false, is_tv = false, is_dp = false; | 2256 | bool is_crt = false, is_lvds = false, is_tv = false, is_dp = false; |
2257 | bool is_edp = false; | ||
2031 | struct drm_mode_config *mode_config = &dev->mode_config; | 2258 | struct drm_mode_config *mode_config = &dev->mode_config; |
2032 | struct drm_connector *connector; | 2259 | struct drm_connector *connector; |
2033 | const intel_limit_t *limit; | 2260 | const intel_limit_t *limit; |
@@ -2043,6 +2270,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, | |||
2043 | int lvds_reg = LVDS; | 2270 | int lvds_reg = LVDS; |
2044 | u32 temp; | 2271 | u32 temp; |
2045 | int sdvo_pixel_multiply; | 2272 | int sdvo_pixel_multiply; |
2273 | int target_clock; | ||
2046 | 2274 | ||
2047 | drm_vblank_pre_modeset(dev, pipe); | 2275 | drm_vblank_pre_modeset(dev, pipe); |
2048 | 2276 | ||
@@ -2074,6 +2302,9 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, | |||
2074 | case INTEL_OUTPUT_DISPLAYPORT: | 2302 | case INTEL_OUTPUT_DISPLAYPORT: |
2075 | is_dp = true; | 2303 | is_dp = true; |
2076 | break; | 2304 | break; |
2305 | case INTEL_OUTPUT_EDP: | ||
2306 | is_edp = true; | ||
2307 | break; | ||
2077 | } | 2308 | } |
2078 | 2309 | ||
2079 | num_outputs++; | 2310 | num_outputs++; |
@@ -2125,11 +2356,29 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, | |||
2125 | } | 2356 | } |
2126 | 2357 | ||
2127 | /* FDI link */ | 2358 | /* FDI link */ |
2128 | if (IS_IGDNG(dev)) | 2359 | if (IS_IGDNG(dev)) { |
2129 | igdng_compute_m_n(3, 4, /* lane num 4 */ | 2360 | int lane, link_bw; |
2130 | adjusted_mode->clock, | 2361 | /* eDP doesn't require FDI link, so just set DP M/N |
2131 | 270000, /* lane clock */ | 2362 | according to current link config */ |
2132 | &m_n); | 2363 | if (is_edp) { |
2364 | struct drm_connector *edp; | ||
2365 | target_clock = mode->clock; | ||
2366 | edp = intel_pipe_get_output(crtc); | ||
2367 | intel_edp_link_config(to_intel_output(edp), | ||
2368 | &lane, &link_bw); | ||
2369 | } else { | ||
2370 | /* DP over FDI requires target mode clock | ||
2371 | instead of link clock */ | ||
2372 | if (is_dp) | ||
2373 | target_clock = mode->clock; | ||
2374 | else | ||
2375 | target_clock = adjusted_mode->clock; | ||
2376 | lane = 4; | ||
2377 | link_bw = 270000; | ||
2378 | } | ||
2379 | igdng_compute_m_n(3, lane, target_clock, | ||
2380 | link_bw, &m_n); | ||
2381 | } | ||
2133 | 2382 | ||
2134 | if (IS_IGD(dev)) | 2383 | if (IS_IGD(dev)) |
2135 | fp = (1 << clock.n) << 16 | clock.m1 << 8 | clock.m2; | 2384 | fp = (1 << clock.n) << 16 | clock.m1 << 8 | clock.m2; |
@@ -2250,29 +2499,15 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, | |||
2250 | dpll_reg = pch_dpll_reg; | 2499 | dpll_reg = pch_dpll_reg; |
2251 | } | 2500 | } |
2252 | 2501 | ||
2253 | if (dpll & DPLL_VCO_ENABLE) { | 2502 | if (is_edp) { |
2503 | igdng_disable_pll_edp(crtc); | ||
2504 | } else if ((dpll & DPLL_VCO_ENABLE)) { | ||
2254 | I915_WRITE(fp_reg, fp); | 2505 | I915_WRITE(fp_reg, fp); |
2255 | I915_WRITE(dpll_reg, dpll & ~DPLL_VCO_ENABLE); | 2506 | I915_WRITE(dpll_reg, dpll & ~DPLL_VCO_ENABLE); |
2256 | I915_READ(dpll_reg); | 2507 | I915_READ(dpll_reg); |
2257 | udelay(150); | 2508 | udelay(150); |
2258 | } | 2509 | } |
2259 | 2510 | ||
2260 | if (IS_IGDNG(dev)) { | ||
2261 | /* enable PCH clock reference source */ | ||
2262 | /* XXX need to change the setting for other outputs */ | ||
2263 | u32 temp; | ||
2264 | temp = I915_READ(PCH_DREF_CONTROL); | ||
2265 | temp &= ~DREF_NONSPREAD_SOURCE_MASK; | ||
2266 | temp |= DREF_NONSPREAD_CK505_ENABLE; | ||
2267 | temp &= ~DREF_SSC_SOURCE_MASK; | ||
2268 | temp |= DREF_SSC_SOURCE_ENABLE; | ||
2269 | temp &= ~DREF_SSC1_ENABLE; | ||
2270 | /* if no eDP, disable source output to CPU */ | ||
2271 | temp &= ~DREF_CPU_SOURCE_OUTPUT_MASK; | ||
2272 | temp |= DREF_CPU_SOURCE_OUTPUT_DISABLE; | ||
2273 | I915_WRITE(PCH_DREF_CONTROL, temp); | ||
2274 | } | ||
2275 | |||
2276 | /* The LVDS pin pair needs to be on before the DPLLs are enabled. | 2511 | /* The LVDS pin pair needs to be on before the DPLLs are enabled. |
2277 | * This is an exception to the general rule that mode_set doesn't turn | 2512 | * This is an exception to the general rule that mode_set doesn't turn |
2278 | * things on. | 2513 | * things on. |
@@ -2304,23 +2539,25 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, | |||
2304 | if (is_dp) | 2539 | if (is_dp) |
2305 | intel_dp_set_m_n(crtc, mode, adjusted_mode); | 2540 | intel_dp_set_m_n(crtc, mode, adjusted_mode); |
2306 | 2541 | ||
2307 | I915_WRITE(fp_reg, fp); | 2542 | if (!is_edp) { |
2308 | I915_WRITE(dpll_reg, dpll); | 2543 | I915_WRITE(fp_reg, fp); |
2309 | I915_READ(dpll_reg); | ||
2310 | /* Wait for the clocks to stabilize. */ | ||
2311 | udelay(150); | ||
2312 | |||
2313 | if (IS_I965G(dev) && !IS_IGDNG(dev)) { | ||
2314 | sdvo_pixel_multiply = adjusted_mode->clock / mode->clock; | ||
2315 | I915_WRITE(dpll_md_reg, (0 << DPLL_MD_UDI_DIVIDER_SHIFT) | | ||
2316 | ((sdvo_pixel_multiply - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT)); | ||
2317 | } else { | ||
2318 | /* write it again -- the BIOS does, after all */ | ||
2319 | I915_WRITE(dpll_reg, dpll); | 2544 | I915_WRITE(dpll_reg, dpll); |
2545 | I915_READ(dpll_reg); | ||
2546 | /* Wait for the clocks to stabilize. */ | ||
2547 | udelay(150); | ||
2548 | |||
2549 | if (IS_I965G(dev) && !IS_IGDNG(dev)) { | ||
2550 | sdvo_pixel_multiply = adjusted_mode->clock / mode->clock; | ||
2551 | I915_WRITE(dpll_md_reg, (0 << DPLL_MD_UDI_DIVIDER_SHIFT) | | ||
2552 | ((sdvo_pixel_multiply - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT)); | ||
2553 | } else { | ||
2554 | /* write it again -- the BIOS does, after all */ | ||
2555 | I915_WRITE(dpll_reg, dpll); | ||
2556 | } | ||
2557 | I915_READ(dpll_reg); | ||
2558 | /* Wait for the clocks to stabilize. */ | ||
2559 | udelay(150); | ||
2320 | } | 2560 | } |
2321 | I915_READ(dpll_reg); | ||
2322 | /* Wait for the clocks to stabilize. */ | ||
2323 | udelay(150); | ||
2324 | 2561 | ||
2325 | I915_WRITE(htot_reg, (adjusted_mode->crtc_hdisplay - 1) | | 2562 | I915_WRITE(htot_reg, (adjusted_mode->crtc_hdisplay - 1) | |
2326 | ((adjusted_mode->crtc_htotal - 1) << 16)); | 2563 | ((adjusted_mode->crtc_htotal - 1) << 16)); |
@@ -2350,10 +2587,14 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, | |||
2350 | I915_WRITE(link_m1_reg, m_n.link_m); | 2587 | I915_WRITE(link_m1_reg, m_n.link_m); |
2351 | I915_WRITE(link_n1_reg, m_n.link_n); | 2588 | I915_WRITE(link_n1_reg, m_n.link_n); |
2352 | 2589 | ||
2353 | /* enable FDI RX PLL too */ | 2590 | if (is_edp) { |
2354 | temp = I915_READ(fdi_rx_reg); | 2591 | igdng_set_pll_edp(crtc, adjusted_mode->clock); |
2355 | I915_WRITE(fdi_rx_reg, temp | FDI_RX_PLL_ENABLE); | 2592 | } else { |
2356 | udelay(200); | 2593 | /* enable FDI RX PLL too */ |
2594 | temp = I915_READ(fdi_rx_reg); | ||
2595 | I915_WRITE(fdi_rx_reg, temp | FDI_RX_PLL_ENABLE); | ||
2596 | udelay(200); | ||
2597 | } | ||
2357 | } | 2598 | } |
2358 | 2599 | ||
2359 | I915_WRITE(pipeconf_reg, pipeconf); | 2600 | I915_WRITE(pipeconf_reg, pipeconf); |
@@ -2951,12 +3192,17 @@ static void intel_setup_outputs(struct drm_device *dev) | |||
2951 | if (IS_IGDNG(dev)) { | 3192 | if (IS_IGDNG(dev)) { |
2952 | int found; | 3193 | int found; |
2953 | 3194 | ||
3195 | if (IS_MOBILE(dev) && (I915_READ(DP_A) & DP_DETECTED)) | ||
3196 | intel_dp_init(dev, DP_A); | ||
3197 | |||
2954 | if (I915_READ(HDMIB) & PORT_DETECTED) { | 3198 | if (I915_READ(HDMIB) & PORT_DETECTED) { |
2955 | /* check SDVOB */ | 3199 | /* check SDVOB */ |
2956 | /* found = intel_sdvo_init(dev, HDMIB); */ | 3200 | /* found = intel_sdvo_init(dev, HDMIB); */ |
2957 | found = 0; | 3201 | found = 0; |
2958 | if (!found) | 3202 | if (!found) |
2959 | intel_hdmi_init(dev, HDMIB); | 3203 | intel_hdmi_init(dev, HDMIB); |
3204 | if (!found && (I915_READ(PCH_DP_B) & DP_DETECTED)) | ||
3205 | intel_dp_init(dev, PCH_DP_B); | ||
2960 | } | 3206 | } |
2961 | 3207 | ||
2962 | if (I915_READ(HDMIC) & PORT_DETECTED) | 3208 | if (I915_READ(HDMIC) & PORT_DETECTED) |
@@ -2965,6 +3211,12 @@ static void intel_setup_outputs(struct drm_device *dev) | |||
2965 | if (I915_READ(HDMID) & PORT_DETECTED) | 3211 | if (I915_READ(HDMID) & PORT_DETECTED) |
2966 | intel_hdmi_init(dev, HDMID); | 3212 | intel_hdmi_init(dev, HDMID); |
2967 | 3213 | ||
3214 | if (I915_READ(PCH_DP_C) & DP_DETECTED) | ||
3215 | intel_dp_init(dev, PCH_DP_C); | ||
3216 | |||
3217 | if (I915_READ(PCH_DP_D) & DP_DETECTED) | ||
3218 | intel_dp_init(dev, PCH_DP_D); | ||
3219 | |||
2968 | } else if (IS_I9XX(dev)) { | 3220 | } else if (IS_I9XX(dev)) { |
2969 | int found; | 3221 | int found; |
2970 | u32 reg; | 3222 | u32 reg; |
@@ -3039,6 +3291,10 @@ static void intel_setup_outputs(struct drm_device *dev) | |||
3039 | (1 << 1)); | 3291 | (1 << 1)); |
3040 | clone_mask = (1 << INTEL_OUTPUT_DISPLAYPORT); | 3292 | clone_mask = (1 << INTEL_OUTPUT_DISPLAYPORT); |
3041 | break; | 3293 | break; |
3294 | case INTEL_OUTPUT_EDP: | ||
3295 | crtc_mask = (1 << 1); | ||
3296 | clone_mask = (1 << INTEL_OUTPUT_EDP); | ||
3297 | break; | ||
3042 | } | 3298 | } |
3043 | encoder->possible_crtcs = crtc_mask; | 3299 | encoder->possible_crtcs = crtc_mask; |
3044 | encoder->possible_clones = intel_connector_clones(dev, clone_mask); | 3300 | encoder->possible_clones = intel_connector_clones(dev, clone_mask); |
@@ -3148,6 +3404,9 @@ void intel_modeset_init(struct drm_device *dev) | |||
3148 | if (IS_I965G(dev)) { | 3404 | if (IS_I965G(dev)) { |
3149 | dev->mode_config.max_width = 8192; | 3405 | dev->mode_config.max_width = 8192; |
3150 | dev->mode_config.max_height = 8192; | 3406 | dev->mode_config.max_height = 8192; |
3407 | } else if (IS_I9XX(dev)) { | ||
3408 | dev->mode_config.max_width = 4096; | ||
3409 | dev->mode_config.max_height = 4096; | ||
3151 | } else { | 3410 | } else { |
3152 | dev->mode_config.max_width = 2048; | 3411 | dev->mode_config.max_width = 2048; |
3153 | dev->mode_config.max_height = 2048; | 3412 | dev->mode_config.max_height = 2048; |
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index 6770ae88370d..a6ff15ac548a 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c | |||
@@ -40,6 +40,8 @@ | |||
40 | 40 | ||
41 | #define DP_LINK_CONFIGURATION_SIZE 9 | 41 | #define DP_LINK_CONFIGURATION_SIZE 9 |
42 | 42 | ||
43 | #define IS_eDP(i) ((i)->type == INTEL_OUTPUT_EDP) | ||
44 | |||
43 | struct intel_dp_priv { | 45 | struct intel_dp_priv { |
44 | uint32_t output_reg; | 46 | uint32_t output_reg; |
45 | uint32_t DP; | 47 | uint32_t DP; |
@@ -63,6 +65,19 @@ intel_dp_link_train(struct intel_output *intel_output, uint32_t DP, | |||
63 | static void | 65 | static void |
64 | intel_dp_link_down(struct intel_output *intel_output, uint32_t DP); | 66 | intel_dp_link_down(struct intel_output *intel_output, uint32_t DP); |
65 | 67 | ||
68 | void | ||
69 | intel_edp_link_config (struct intel_output *intel_output, | ||
70 | int *lane_num, int *link_bw) | ||
71 | { | ||
72 | struct intel_dp_priv *dp_priv = intel_output->dev_priv; | ||
73 | |||
74 | *lane_num = dp_priv->lane_count; | ||
75 | if (dp_priv->link_bw == DP_LINK_BW_1_62) | ||
76 | *link_bw = 162000; | ||
77 | else if (dp_priv->link_bw == DP_LINK_BW_2_7) | ||
78 | *link_bw = 270000; | ||
79 | } | ||
80 | |||
66 | static int | 81 | static int |
67 | intel_dp_max_lane_count(struct intel_output *intel_output) | 82 | intel_dp_max_lane_count(struct intel_output *intel_output) |
68 | { | 83 | { |
@@ -206,7 +221,13 @@ intel_dp_aux_ch(struct intel_output *intel_output, | |||
206 | * and would like to run at 2MHz. So, take the | 221 | * and would like to run at 2MHz. So, take the |
207 | * hrawclk value and divide by 2 and use that | 222 | * hrawclk value and divide by 2 and use that |
208 | */ | 223 | */ |
209 | aux_clock_divider = intel_hrawclk(dev) / 2; | 224 | if (IS_eDP(intel_output)) |
225 | aux_clock_divider = 225; /* eDP input clock at 450Mhz */ | ||
226 | else if (IS_IGDNG(dev)) | ||
227 | aux_clock_divider = 62; /* IGDNG: input clock fixed at 125Mhz */ | ||
228 | else | ||
229 | aux_clock_divider = intel_hrawclk(dev) / 2; | ||
230 | |||
210 | /* Must try at least 3 times according to DP spec */ | 231 | /* Must try at least 3 times according to DP spec */ |
211 | for (try = 0; try < 5; try++) { | 232 | for (try = 0; try < 5; try++) { |
212 | /* Load the send data into the aux channel data registers */ | 233 | /* Load the send data into the aux channel data registers */ |
@@ -236,7 +257,7 @@ intel_dp_aux_ch(struct intel_output *intel_output, | |||
236 | } | 257 | } |
237 | 258 | ||
238 | /* Clear done status and any errors */ | 259 | /* Clear done status and any errors */ |
239 | I915_WRITE(ch_ctl, (ctl | | 260 | I915_WRITE(ch_ctl, (status | |
240 | DP_AUX_CH_CTL_DONE | | 261 | DP_AUX_CH_CTL_DONE | |
241 | DP_AUX_CH_CTL_TIME_OUT_ERROR | | 262 | DP_AUX_CH_CTL_TIME_OUT_ERROR | |
242 | DP_AUX_CH_CTL_RECEIVE_ERROR)); | 263 | DP_AUX_CH_CTL_RECEIVE_ERROR)); |
@@ -295,7 +316,7 @@ intel_dp_aux_native_write(struct intel_output *intel_output, | |||
295 | return -1; | 316 | return -1; |
296 | msg[0] = AUX_NATIVE_WRITE << 4; | 317 | msg[0] = AUX_NATIVE_WRITE << 4; |
297 | msg[1] = address >> 8; | 318 | msg[1] = address >> 8; |
298 | msg[2] = address; | 319 | msg[2] = address & 0xff; |
299 | msg[3] = send_bytes - 1; | 320 | msg[3] = send_bytes - 1; |
300 | memcpy(&msg[4], send, send_bytes); | 321 | memcpy(&msg[4], send, send_bytes); |
301 | msg_bytes = send_bytes + 4; | 322 | msg_bytes = send_bytes + 4; |
@@ -387,8 +408,8 @@ intel_dp_i2c_init(struct intel_output *intel_output, const char *name) | |||
387 | memset(&dp_priv->adapter, '\0', sizeof (dp_priv->adapter)); | 408 | memset(&dp_priv->adapter, '\0', sizeof (dp_priv->adapter)); |
388 | dp_priv->adapter.owner = THIS_MODULE; | 409 | dp_priv->adapter.owner = THIS_MODULE; |
389 | dp_priv->adapter.class = I2C_CLASS_DDC; | 410 | dp_priv->adapter.class = I2C_CLASS_DDC; |
390 | strncpy (dp_priv->adapter.name, name, sizeof dp_priv->adapter.name - 1); | 411 | strncpy (dp_priv->adapter.name, name, sizeof(dp_priv->adapter.name) - 1); |
391 | dp_priv->adapter.name[sizeof dp_priv->adapter.name - 1] = '\0'; | 412 | dp_priv->adapter.name[sizeof(dp_priv->adapter.name) - 1] = '\0'; |
392 | dp_priv->adapter.algo_data = &dp_priv->algo; | 413 | dp_priv->adapter.algo_data = &dp_priv->algo; |
393 | dp_priv->adapter.dev.parent = &intel_output->base.kdev; | 414 | dp_priv->adapter.dev.parent = &intel_output->base.kdev; |
394 | 415 | ||
@@ -493,22 +514,40 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode, | |||
493 | intel_dp_compute_m_n(3, lane_count, | 514 | intel_dp_compute_m_n(3, lane_count, |
494 | mode->clock, adjusted_mode->clock, &m_n); | 515 | mode->clock, adjusted_mode->clock, &m_n); |
495 | 516 | ||
496 | if (intel_crtc->pipe == 0) { | 517 | if (IS_IGDNG(dev)) { |
497 | I915_WRITE(PIPEA_GMCH_DATA_M, | 518 | if (intel_crtc->pipe == 0) { |
498 | ((m_n.tu - 1) << PIPE_GMCH_DATA_M_TU_SIZE_SHIFT) | | 519 | I915_WRITE(TRANSA_DATA_M1, |
499 | m_n.gmch_m); | 520 | ((m_n.tu - 1) << PIPE_GMCH_DATA_M_TU_SIZE_SHIFT) | |
500 | I915_WRITE(PIPEA_GMCH_DATA_N, | 521 | m_n.gmch_m); |
501 | m_n.gmch_n); | 522 | I915_WRITE(TRANSA_DATA_N1, m_n.gmch_n); |
502 | I915_WRITE(PIPEA_DP_LINK_M, m_n.link_m); | 523 | I915_WRITE(TRANSA_DP_LINK_M1, m_n.link_m); |
503 | I915_WRITE(PIPEA_DP_LINK_N, m_n.link_n); | 524 | I915_WRITE(TRANSA_DP_LINK_N1, m_n.link_n); |
525 | } else { | ||
526 | I915_WRITE(TRANSB_DATA_M1, | ||
527 | ((m_n.tu - 1) << PIPE_GMCH_DATA_M_TU_SIZE_SHIFT) | | ||
528 | m_n.gmch_m); | ||
529 | I915_WRITE(TRANSB_DATA_N1, m_n.gmch_n); | ||
530 | I915_WRITE(TRANSB_DP_LINK_M1, m_n.link_m); | ||
531 | I915_WRITE(TRANSB_DP_LINK_N1, m_n.link_n); | ||
532 | } | ||
504 | } else { | 533 | } else { |
505 | I915_WRITE(PIPEB_GMCH_DATA_M, | 534 | if (intel_crtc->pipe == 0) { |
506 | ((m_n.tu - 1) << PIPE_GMCH_DATA_M_TU_SIZE_SHIFT) | | 535 | I915_WRITE(PIPEA_GMCH_DATA_M, |
507 | m_n.gmch_m); | 536 | ((m_n.tu - 1) << PIPE_GMCH_DATA_M_TU_SIZE_SHIFT) | |
508 | I915_WRITE(PIPEB_GMCH_DATA_N, | 537 | m_n.gmch_m); |
509 | m_n.gmch_n); | 538 | I915_WRITE(PIPEA_GMCH_DATA_N, |
510 | I915_WRITE(PIPEB_DP_LINK_M, m_n.link_m); | 539 | m_n.gmch_n); |
511 | I915_WRITE(PIPEB_DP_LINK_N, m_n.link_n); | 540 | I915_WRITE(PIPEA_DP_LINK_M, m_n.link_m); |
541 | I915_WRITE(PIPEA_DP_LINK_N, m_n.link_n); | ||
542 | } else { | ||
543 | I915_WRITE(PIPEB_GMCH_DATA_M, | ||
544 | ((m_n.tu - 1) << PIPE_GMCH_DATA_M_TU_SIZE_SHIFT) | | ||
545 | m_n.gmch_m); | ||
546 | I915_WRITE(PIPEB_GMCH_DATA_N, | ||
547 | m_n.gmch_n); | ||
548 | I915_WRITE(PIPEB_DP_LINK_M, m_n.link_m); | ||
549 | I915_WRITE(PIPEB_DP_LINK_N, m_n.link_n); | ||
550 | } | ||
512 | } | 551 | } |
513 | } | 552 | } |
514 | 553 | ||
@@ -556,8 +595,38 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, | |||
556 | 595 | ||
557 | if (intel_crtc->pipe == 1) | 596 | if (intel_crtc->pipe == 1) |
558 | dp_priv->DP |= DP_PIPEB_SELECT; | 597 | dp_priv->DP |= DP_PIPEB_SELECT; |
598 | |||
599 | if (IS_eDP(intel_output)) { | ||
600 | /* don't miss out required setting for eDP */ | ||
601 | dp_priv->DP |= DP_PLL_ENABLE; | ||
602 | if (adjusted_mode->clock < 200000) | ||
603 | dp_priv->DP |= DP_PLL_FREQ_160MHZ; | ||
604 | else | ||
605 | dp_priv->DP |= DP_PLL_FREQ_270MHZ; | ||
606 | } | ||
559 | } | 607 | } |
560 | 608 | ||
609 | static void igdng_edp_backlight_on (struct drm_device *dev) | ||
610 | { | ||
611 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
612 | u32 pp; | ||
613 | |||
614 | DRM_DEBUG("\n"); | ||
615 | pp = I915_READ(PCH_PP_CONTROL); | ||
616 | pp |= EDP_BLC_ENABLE; | ||
617 | I915_WRITE(PCH_PP_CONTROL, pp); | ||
618 | } | ||
619 | |||
620 | static void igdng_edp_backlight_off (struct drm_device *dev) | ||
621 | { | ||
622 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
623 | u32 pp; | ||
624 | |||
625 | DRM_DEBUG("\n"); | ||
626 | pp = I915_READ(PCH_PP_CONTROL); | ||
627 | pp &= ~EDP_BLC_ENABLE; | ||
628 | I915_WRITE(PCH_PP_CONTROL, pp); | ||
629 | } | ||
561 | 630 | ||
562 | static void | 631 | static void |
563 | intel_dp_dpms(struct drm_encoder *encoder, int mode) | 632 | intel_dp_dpms(struct drm_encoder *encoder, int mode) |
@@ -569,11 +638,17 @@ intel_dp_dpms(struct drm_encoder *encoder, int mode) | |||
569 | uint32_t dp_reg = I915_READ(dp_priv->output_reg); | 638 | uint32_t dp_reg = I915_READ(dp_priv->output_reg); |
570 | 639 | ||
571 | if (mode != DRM_MODE_DPMS_ON) { | 640 | if (mode != DRM_MODE_DPMS_ON) { |
572 | if (dp_reg & DP_PORT_EN) | 641 | if (dp_reg & DP_PORT_EN) { |
573 | intel_dp_link_down(intel_output, dp_priv->DP); | 642 | intel_dp_link_down(intel_output, dp_priv->DP); |
643 | if (IS_eDP(intel_output)) | ||
644 | igdng_edp_backlight_off(dev); | ||
645 | } | ||
574 | } else { | 646 | } else { |
575 | if (!(dp_reg & DP_PORT_EN)) | 647 | if (!(dp_reg & DP_PORT_EN)) { |
576 | intel_dp_link_train(intel_output, dp_priv->DP, dp_priv->link_configuration); | 648 | intel_dp_link_train(intel_output, dp_priv->DP, dp_priv->link_configuration); |
649 | if (IS_eDP(intel_output)) | ||
650 | igdng_edp_backlight_on(dev); | ||
651 | } | ||
577 | } | 652 | } |
578 | dp_priv->dpms_mode = mode; | 653 | dp_priv->dpms_mode = mode; |
579 | } | 654 | } |
@@ -935,6 +1010,23 @@ intel_dp_link_down(struct intel_output *intel_output, uint32_t DP) | |||
935 | struct drm_i915_private *dev_priv = dev->dev_private; | 1010 | struct drm_i915_private *dev_priv = dev->dev_private; |
936 | struct intel_dp_priv *dp_priv = intel_output->dev_priv; | 1011 | struct intel_dp_priv *dp_priv = intel_output->dev_priv; |
937 | 1012 | ||
1013 | DRM_DEBUG("\n"); | ||
1014 | |||
1015 | if (IS_eDP(intel_output)) { | ||
1016 | DP &= ~DP_PLL_ENABLE; | ||
1017 | I915_WRITE(dp_priv->output_reg, DP); | ||
1018 | POSTING_READ(dp_priv->output_reg); | ||
1019 | udelay(100); | ||
1020 | } | ||
1021 | |||
1022 | DP &= ~DP_LINK_TRAIN_MASK; | ||
1023 | I915_WRITE(dp_priv->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE); | ||
1024 | POSTING_READ(dp_priv->output_reg); | ||
1025 | |||
1026 | udelay(17000); | ||
1027 | |||
1028 | if (IS_eDP(intel_output)) | ||
1029 | DP |= DP_LINK_TRAIN_OFF; | ||
938 | I915_WRITE(dp_priv->output_reg, DP & ~DP_PORT_EN); | 1030 | I915_WRITE(dp_priv->output_reg, DP & ~DP_PORT_EN); |
939 | POSTING_READ(dp_priv->output_reg); | 1031 | POSTING_READ(dp_priv->output_reg); |
940 | } | 1032 | } |
@@ -978,6 +1070,24 @@ intel_dp_check_link_status(struct intel_output *intel_output) | |||
978 | intel_dp_link_train(intel_output, dp_priv->DP, dp_priv->link_configuration); | 1070 | intel_dp_link_train(intel_output, dp_priv->DP, dp_priv->link_configuration); |
979 | } | 1071 | } |
980 | 1072 | ||
1073 | static enum drm_connector_status | ||
1074 | igdng_dp_detect(struct drm_connector *connector) | ||
1075 | { | ||
1076 | struct intel_output *intel_output = to_intel_output(connector); | ||
1077 | struct intel_dp_priv *dp_priv = intel_output->dev_priv; | ||
1078 | enum drm_connector_status status; | ||
1079 | |||
1080 | status = connector_status_disconnected; | ||
1081 | if (intel_dp_aux_native_read(intel_output, | ||
1082 | 0x000, dp_priv->dpcd, | ||
1083 | sizeof (dp_priv->dpcd)) == sizeof (dp_priv->dpcd)) | ||
1084 | { | ||
1085 | if (dp_priv->dpcd[0] != 0) | ||
1086 | status = connector_status_connected; | ||
1087 | } | ||
1088 | return status; | ||
1089 | } | ||
1090 | |||
981 | /** | 1091 | /** |
982 | * Uses CRT_HOTPLUG_EN and CRT_HOTPLUG_STAT to detect DP connection. | 1092 | * Uses CRT_HOTPLUG_EN and CRT_HOTPLUG_STAT to detect DP connection. |
983 | * | 1093 | * |
@@ -996,6 +1106,9 @@ intel_dp_detect(struct drm_connector *connector) | |||
996 | 1106 | ||
997 | dp_priv->has_audio = false; | 1107 | dp_priv->has_audio = false; |
998 | 1108 | ||
1109 | if (IS_IGDNG(dev)) | ||
1110 | return igdng_dp_detect(connector); | ||
1111 | |||
999 | temp = I915_READ(PORT_HOTPLUG_EN); | 1112 | temp = I915_READ(PORT_HOTPLUG_EN); |
1000 | 1113 | ||
1001 | I915_WRITE(PORT_HOTPLUG_EN, | 1114 | I915_WRITE(PORT_HOTPLUG_EN, |
@@ -1039,11 +1152,27 @@ intel_dp_detect(struct drm_connector *connector) | |||
1039 | static int intel_dp_get_modes(struct drm_connector *connector) | 1152 | static int intel_dp_get_modes(struct drm_connector *connector) |
1040 | { | 1153 | { |
1041 | struct intel_output *intel_output = to_intel_output(connector); | 1154 | struct intel_output *intel_output = to_intel_output(connector); |
1155 | struct drm_device *dev = intel_output->base.dev; | ||
1156 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
1157 | int ret; | ||
1042 | 1158 | ||
1043 | /* We should parse the EDID data and find out if it has an audio sink | 1159 | /* We should parse the EDID data and find out if it has an audio sink |
1044 | */ | 1160 | */ |
1045 | 1161 | ||
1046 | return intel_ddc_get_modes(intel_output); | 1162 | ret = intel_ddc_get_modes(intel_output); |
1163 | if (ret) | ||
1164 | return ret; | ||
1165 | |||
1166 | /* if eDP has no EDID, try to use fixed panel mode from VBT */ | ||
1167 | if (IS_eDP(intel_output)) { | ||
1168 | if (dev_priv->panel_fixed_mode != NULL) { | ||
1169 | struct drm_display_mode *mode; | ||
1170 | mode = drm_mode_duplicate(dev, dev_priv->panel_fixed_mode); | ||
1171 | drm_mode_probed_add(connector, mode); | ||
1172 | return 1; | ||
1173 | } | ||
1174 | } | ||
1175 | return 0; | ||
1047 | } | 1176 | } |
1048 | 1177 | ||
1049 | static void | 1178 | static void |
@@ -1106,6 +1235,7 @@ intel_dp_init(struct drm_device *dev, int output_reg) | |||
1106 | struct drm_connector *connector; | 1235 | struct drm_connector *connector; |
1107 | struct intel_output *intel_output; | 1236 | struct intel_output *intel_output; |
1108 | struct intel_dp_priv *dp_priv; | 1237 | struct intel_dp_priv *dp_priv; |
1238 | const char *name = NULL; | ||
1109 | 1239 | ||
1110 | intel_output = kcalloc(sizeof(struct intel_output) + | 1240 | intel_output = kcalloc(sizeof(struct intel_output) + |
1111 | sizeof(struct intel_dp_priv), 1, GFP_KERNEL); | 1241 | sizeof(struct intel_dp_priv), 1, GFP_KERNEL); |
@@ -1119,7 +1249,10 @@ intel_dp_init(struct drm_device *dev, int output_reg) | |||
1119 | DRM_MODE_CONNECTOR_DisplayPort); | 1249 | DRM_MODE_CONNECTOR_DisplayPort); |
1120 | drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs); | 1250 | drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs); |
1121 | 1251 | ||
1122 | intel_output->type = INTEL_OUTPUT_DISPLAYPORT; | 1252 | if (output_reg == DP_A) |
1253 | intel_output->type = INTEL_OUTPUT_EDP; | ||
1254 | else | ||
1255 | intel_output->type = INTEL_OUTPUT_DISPLAYPORT; | ||
1123 | 1256 | ||
1124 | connector->interlace_allowed = true; | 1257 | connector->interlace_allowed = true; |
1125 | connector->doublescan_allowed = 0; | 1258 | connector->doublescan_allowed = 0; |
@@ -1139,12 +1272,41 @@ intel_dp_init(struct drm_device *dev, int output_reg) | |||
1139 | drm_sysfs_connector_add(connector); | 1272 | drm_sysfs_connector_add(connector); |
1140 | 1273 | ||
1141 | /* Set up the DDC bus. */ | 1274 | /* Set up the DDC bus. */ |
1142 | intel_dp_i2c_init(intel_output, | 1275 | switch (output_reg) { |
1143 | (output_reg == DP_B) ? "DPDDC-B" : | 1276 | case DP_A: |
1144 | (output_reg == DP_C) ? "DPDDC-C" : "DPDDC-D"); | 1277 | name = "DPDDC-A"; |
1278 | break; | ||
1279 | case DP_B: | ||
1280 | case PCH_DP_B: | ||
1281 | name = "DPDDC-B"; | ||
1282 | break; | ||
1283 | case DP_C: | ||
1284 | case PCH_DP_C: | ||
1285 | name = "DPDDC-C"; | ||
1286 | break; | ||
1287 | case DP_D: | ||
1288 | case PCH_DP_D: | ||
1289 | name = "DPDDC-D"; | ||
1290 | break; | ||
1291 | } | ||
1292 | |||
1293 | intel_dp_i2c_init(intel_output, name); | ||
1294 | |||
1145 | intel_output->ddc_bus = &dp_priv->adapter; | 1295 | intel_output->ddc_bus = &dp_priv->adapter; |
1146 | intel_output->hot_plug = intel_dp_hot_plug; | 1296 | intel_output->hot_plug = intel_dp_hot_plug; |
1147 | 1297 | ||
1298 | if (output_reg == DP_A) { | ||
1299 | /* initialize panel mode from VBT if available for eDP */ | ||
1300 | if (dev_priv->lfp_lvds_vbt_mode) { | ||
1301 | dev_priv->panel_fixed_mode = | ||
1302 | drm_mode_duplicate(dev, dev_priv->lfp_lvds_vbt_mode); | ||
1303 | if (dev_priv->panel_fixed_mode) { | ||
1304 | dev_priv->panel_fixed_mode->type |= | ||
1305 | DRM_MODE_TYPE_PREFERRED; | ||
1306 | } | ||
1307 | } | ||
1308 | } | ||
1309 | |||
1148 | /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written | 1310 | /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written |
1149 | * 0xd. Failure to do so will result in spurious interrupts being | 1311 | * 0xd. Failure to do so will result in spurious interrupts being |
1150 | * generated on the port when a cable is not attached. | 1312 | * generated on the port when a cable is not attached. |
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index 004541c935a8..d6f92ea1b553 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h | |||
@@ -55,6 +55,7 @@ | |||
55 | #define INTEL_OUTPUT_TVOUT 5 | 55 | #define INTEL_OUTPUT_TVOUT 5 |
56 | #define INTEL_OUTPUT_HDMI 6 | 56 | #define INTEL_OUTPUT_HDMI 6 |
57 | #define INTEL_OUTPUT_DISPLAYPORT 7 | 57 | #define INTEL_OUTPUT_DISPLAYPORT 7 |
58 | #define INTEL_OUTPUT_EDP 8 | ||
58 | 59 | ||
59 | #define INTEL_DVO_CHIP_NONE 0 | 60 | #define INTEL_DVO_CHIP_NONE 0 |
60 | #define INTEL_DVO_CHIP_LVDS 1 | 61 | #define INTEL_DVO_CHIP_LVDS 1 |
@@ -121,6 +122,8 @@ extern void intel_dp_init(struct drm_device *dev, int dp_reg); | |||
121 | void | 122 | void |
122 | intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode, | 123 | intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode, |
123 | struct drm_display_mode *adjusted_mode); | 124 | struct drm_display_mode *adjusted_mode); |
125 | extern void intel_edp_link_config (struct intel_output *, int *, int *); | ||
126 | |||
124 | 127 | ||
125 | extern void intel_crtc_load_lut(struct drm_crtc *crtc); | 128 | extern void intel_crtc_load_lut(struct drm_crtc *crtc); |
126 | extern void intel_encoder_prepare (struct drm_encoder *encoder); | 129 | extern void intel_encoder_prepare (struct drm_encoder *encoder); |
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c index 9e30daae37dc..1842290cded3 100644 --- a/drivers/gpu/drm/i915/intel_hdmi.c +++ b/drivers/gpu/drm/i915/intel_hdmi.c | |||
@@ -130,16 +130,17 @@ static bool intel_hdmi_mode_fixup(struct drm_encoder *encoder, | |||
130 | } | 130 | } |
131 | 131 | ||
132 | static enum drm_connector_status | 132 | static enum drm_connector_status |
133 | intel_hdmi_edid_detect(struct drm_connector *connector) | 133 | intel_hdmi_detect(struct drm_connector *connector) |
134 | { | 134 | { |
135 | struct intel_output *intel_output = to_intel_output(connector); | 135 | struct intel_output *intel_output = to_intel_output(connector); |
136 | struct intel_hdmi_priv *hdmi_priv = intel_output->dev_priv; | 136 | struct intel_hdmi_priv *hdmi_priv = intel_output->dev_priv; |
137 | struct edid *edid = NULL; | 137 | struct edid *edid = NULL; |
138 | enum drm_connector_status status = connector_status_disconnected; | 138 | enum drm_connector_status status = connector_status_disconnected; |
139 | 139 | ||
140 | hdmi_priv->has_hdmi_sink = false; | ||
140 | edid = drm_get_edid(&intel_output->base, | 141 | edid = drm_get_edid(&intel_output->base, |
141 | intel_output->ddc_bus); | 142 | intel_output->ddc_bus); |
142 | hdmi_priv->has_hdmi_sink = false; | 143 | |
143 | if (edid) { | 144 | if (edid) { |
144 | if (edid->input & DRM_EDID_INPUT_DIGITAL) { | 145 | if (edid->input & DRM_EDID_INPUT_DIGITAL) { |
145 | status = connector_status_connected; | 146 | status = connector_status_connected; |
@@ -148,65 +149,8 @@ intel_hdmi_edid_detect(struct drm_connector *connector) | |||
148 | intel_output->base.display_info.raw_edid = NULL; | 149 | intel_output->base.display_info.raw_edid = NULL; |
149 | kfree(edid); | 150 | kfree(edid); |
150 | } | 151 | } |
151 | return status; | ||
152 | } | ||
153 | |||
154 | static enum drm_connector_status | ||
155 | igdng_hdmi_detect(struct drm_connector *connector) | ||
156 | { | ||
157 | struct intel_output *intel_output = to_intel_output(connector); | ||
158 | struct intel_hdmi_priv *hdmi_priv = intel_output->dev_priv; | ||
159 | |||
160 | /* FIXME hotplug detect */ | ||
161 | |||
162 | hdmi_priv->has_hdmi_sink = false; | ||
163 | return intel_hdmi_edid_detect(connector); | ||
164 | } | ||
165 | 152 | ||
166 | static enum drm_connector_status | 153 | return status; |
167 | intel_hdmi_detect(struct drm_connector *connector) | ||
168 | { | ||
169 | struct drm_device *dev = connector->dev; | ||
170 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
171 | struct intel_output *intel_output = to_intel_output(connector); | ||
172 | struct intel_hdmi_priv *hdmi_priv = intel_output->dev_priv; | ||
173 | u32 temp, bit; | ||
174 | |||
175 | if (IS_IGDNG(dev)) | ||
176 | return igdng_hdmi_detect(connector); | ||
177 | |||
178 | temp = I915_READ(PORT_HOTPLUG_EN); | ||
179 | |||
180 | switch (hdmi_priv->sdvox_reg) { | ||
181 | case SDVOB: | ||
182 | temp |= HDMIB_HOTPLUG_INT_EN; | ||
183 | break; | ||
184 | case SDVOC: | ||
185 | temp |= HDMIC_HOTPLUG_INT_EN; | ||
186 | break; | ||
187 | default: | ||
188 | return connector_status_unknown; | ||
189 | } | ||
190 | |||
191 | I915_WRITE(PORT_HOTPLUG_EN, temp); | ||
192 | |||
193 | POSTING_READ(PORT_HOTPLUG_EN); | ||
194 | |||
195 | switch (hdmi_priv->sdvox_reg) { | ||
196 | case SDVOB: | ||
197 | bit = HDMIB_HOTPLUG_INT_STATUS; | ||
198 | break; | ||
199 | case SDVOC: | ||
200 | bit = HDMIC_HOTPLUG_INT_STATUS; | ||
201 | break; | ||
202 | default: | ||
203 | return connector_status_unknown; | ||
204 | } | ||
205 | |||
206 | if ((I915_READ(PORT_HOTPLUG_STAT) & bit) != 0) | ||
207 | return intel_hdmi_edid_detect(connector); | ||
208 | else | ||
209 | return connector_status_disconnected; | ||
210 | } | 154 | } |
211 | 155 | ||
212 | static int intel_hdmi_get_modes(struct drm_connector *connector) | 156 | static int intel_hdmi_get_modes(struct drm_connector *connector) |
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c index 9ab38efffecf..3f445a80c552 100644 --- a/drivers/gpu/drm/i915/intel_lvds.c +++ b/drivers/gpu/drm/i915/intel_lvds.c | |||
@@ -780,6 +780,14 @@ static const struct dmi_system_id intel_no_lvds[] = { | |||
780 | }, | 780 | }, |
781 | { | 781 | { |
782 | .callback = intel_no_lvds_dmi_callback, | 782 | .callback = intel_no_lvds_dmi_callback, |
783 | .ident = "AOpen Mini PC MP915", | ||
784 | .matches = { | ||
785 | DMI_MATCH(DMI_BOARD_VENDOR, "AOpen"), | ||
786 | DMI_MATCH(DMI_BOARD_NAME, "i915GMx-F"), | ||
787 | }, | ||
788 | }, | ||
789 | { | ||
790 | .callback = intel_no_lvds_dmi_callback, | ||
783 | .ident = "Aopen i945GTt-VFA", | 791 | .ident = "Aopen i945GTt-VFA", |
784 | .matches = { | 792 | .matches = { |
785 | DMI_MATCH(DMI_PRODUCT_VERSION, "AO00001JW"), | 793 | DMI_MATCH(DMI_PRODUCT_VERSION, "AO00001JW"), |
@@ -884,6 +892,10 @@ void intel_lvds_init(struct drm_device *dev) | |||
884 | if (IS_IGDNG(dev)) { | 892 | if (IS_IGDNG(dev)) { |
885 | if ((I915_READ(PCH_LVDS) & LVDS_DETECTED) == 0) | 893 | if ((I915_READ(PCH_LVDS) & LVDS_DETECTED) == 0) |
886 | return; | 894 | return; |
895 | if (dev_priv->edp_support) { | ||
896 | DRM_DEBUG("disable LVDS for eDP support\n"); | ||
897 | return; | ||
898 | } | ||
887 | gpio = PCH_GPIOC; | 899 | gpio = PCH_GPIOC; |
888 | } | 900 | } |
889 | 901 | ||
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c index 4f0c30948bc4..5371d9332554 100644 --- a/drivers/gpu/drm/i915/intel_sdvo.c +++ b/drivers/gpu/drm/i915/intel_sdvo.c | |||
@@ -31,6 +31,7 @@ | |||
31 | #include "drm.h" | 31 | #include "drm.h" |
32 | #include "drm_crtc.h" | 32 | #include "drm_crtc.h" |
33 | #include "intel_drv.h" | 33 | #include "intel_drv.h" |
34 | #include "drm_edid.h" | ||
34 | #include "i915_drm.h" | 35 | #include "i915_drm.h" |
35 | #include "i915_drv.h" | 36 | #include "i915_drv.h" |
36 | #include "intel_sdvo_regs.h" | 37 | #include "intel_sdvo_regs.h" |
@@ -55,6 +56,12 @@ struct intel_sdvo_priv { | |||
55 | /* Pixel clock limitations reported by the SDVO device, in kHz */ | 56 | /* Pixel clock limitations reported by the SDVO device, in kHz */ |
56 | int pixel_clock_min, pixel_clock_max; | 57 | int pixel_clock_min, pixel_clock_max; |
57 | 58 | ||
59 | /* | ||
60 | * For multiple function SDVO device, | ||
61 | * this is for current attached outputs. | ||
62 | */ | ||
63 | uint16_t attached_output; | ||
64 | |||
58 | /** | 65 | /** |
59 | * This is set if we're going to treat the device as TV-out. | 66 | * This is set if we're going to treat the device as TV-out. |
60 | * | 67 | * |
@@ -114,6 +121,9 @@ struct intel_sdvo_priv { | |||
114 | u32 save_SDVOX; | 121 | u32 save_SDVOX; |
115 | }; | 122 | }; |
116 | 123 | ||
124 | static bool | ||
125 | intel_sdvo_output_setup(struct intel_output *intel_output, uint16_t flags); | ||
126 | |||
117 | /** | 127 | /** |
118 | * Writes the SDVOB or SDVOC with the given value, but always writes both | 128 | * Writes the SDVOB or SDVOC with the given value, but always writes both |
119 | * SDVOB and SDVOC to work around apparent hardware issues (according to | 129 | * SDVOB and SDVOC to work around apparent hardware issues (according to |
@@ -1435,41 +1445,96 @@ void intel_sdvo_set_hotplug(struct drm_connector *connector, int on) | |||
1435 | intel_sdvo_read_response(intel_output, &response, 2); | 1445 | intel_sdvo_read_response(intel_output, &response, 2); |
1436 | } | 1446 | } |
1437 | 1447 | ||
1438 | static void | 1448 | static bool |
1439 | intel_sdvo_hdmi_sink_detect(struct drm_connector *connector) | 1449 | intel_sdvo_multifunc_encoder(struct intel_output *intel_output) |
1450 | { | ||
1451 | struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; | ||
1452 | int caps = 0; | ||
1453 | |||
1454 | if (sdvo_priv->caps.output_flags & | ||
1455 | (SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_TMDS1)) | ||
1456 | caps++; | ||
1457 | if (sdvo_priv->caps.output_flags & | ||
1458 | (SDVO_OUTPUT_RGB0 | SDVO_OUTPUT_RGB1)) | ||
1459 | caps++; | ||
1460 | if (sdvo_priv->caps.output_flags & | ||
1461 | (SDVO_OUTPUT_SVID0 | SDVO_OUTPUT_SVID0)) | ||
1462 | caps++; | ||
1463 | if (sdvo_priv->caps.output_flags & | ||
1464 | (SDVO_OUTPUT_CVBS0 | SDVO_OUTPUT_CVBS1)) | ||
1465 | caps++; | ||
1466 | if (sdvo_priv->caps.output_flags & | ||
1467 | (SDVO_OUTPUT_YPRPB0 | SDVO_OUTPUT_YPRPB1)) | ||
1468 | caps++; | ||
1469 | |||
1470 | if (sdvo_priv->caps.output_flags & | ||
1471 | (SDVO_OUTPUT_SCART0 | SDVO_OUTPUT_SCART1)) | ||
1472 | caps++; | ||
1473 | |||
1474 | if (sdvo_priv->caps.output_flags & | ||
1475 | (SDVO_OUTPUT_LVDS0 | SDVO_OUTPUT_LVDS1)) | ||
1476 | caps++; | ||
1477 | |||
1478 | return (caps > 1); | ||
1479 | } | ||
1480 | |||
1481 | enum drm_connector_status | ||
1482 | intel_sdvo_hdmi_sink_detect(struct drm_connector *connector, u16 response) | ||
1440 | { | 1483 | { |
1441 | struct intel_output *intel_output = to_intel_output(connector); | 1484 | struct intel_output *intel_output = to_intel_output(connector); |
1442 | struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; | 1485 | struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; |
1486 | enum drm_connector_status status = connector_status_connected; | ||
1443 | struct edid *edid = NULL; | 1487 | struct edid *edid = NULL; |
1444 | 1488 | ||
1445 | edid = drm_get_edid(&intel_output->base, | 1489 | edid = drm_get_edid(&intel_output->base, |
1446 | intel_output->ddc_bus); | 1490 | intel_output->ddc_bus); |
1447 | if (edid != NULL) { | 1491 | if (edid != NULL) { |
1448 | sdvo_priv->is_hdmi = drm_detect_hdmi_monitor(edid); | 1492 | /* Don't report the output as connected if it's a DVI-I |
1493 | * connector with a non-digital EDID coming out. | ||
1494 | */ | ||
1495 | if (response & (SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_TMDS1)) { | ||
1496 | if (edid->input & DRM_EDID_INPUT_DIGITAL) | ||
1497 | sdvo_priv->is_hdmi = | ||
1498 | drm_detect_hdmi_monitor(edid); | ||
1499 | else | ||
1500 | status = connector_status_disconnected; | ||
1501 | } | ||
1502 | |||
1449 | kfree(edid); | 1503 | kfree(edid); |
1450 | intel_output->base.display_info.raw_edid = NULL; | 1504 | intel_output->base.display_info.raw_edid = NULL; |
1451 | } | 1505 | |
1506 | } else if (response & (SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_TMDS1)) | ||
1507 | status = connector_status_disconnected; | ||
1508 | |||
1509 | return status; | ||
1452 | } | 1510 | } |
1453 | 1511 | ||
1454 | static enum drm_connector_status intel_sdvo_detect(struct drm_connector *connector) | 1512 | static enum drm_connector_status intel_sdvo_detect(struct drm_connector *connector) |
1455 | { | 1513 | { |
1456 | u8 response[2]; | 1514 | uint16_t response; |
1457 | u8 status; | 1515 | u8 status; |
1458 | struct intel_output *intel_output = to_intel_output(connector); | 1516 | struct intel_output *intel_output = to_intel_output(connector); |
1517 | struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; | ||
1459 | 1518 | ||
1460 | intel_sdvo_write_cmd(intel_output, SDVO_CMD_GET_ATTACHED_DISPLAYS, NULL, 0); | 1519 | intel_sdvo_write_cmd(intel_output, SDVO_CMD_GET_ATTACHED_DISPLAYS, NULL, 0); |
1461 | status = intel_sdvo_read_response(intel_output, &response, 2); | 1520 | status = intel_sdvo_read_response(intel_output, &response, 2); |
1462 | 1521 | ||
1463 | DRM_DEBUG("SDVO response %d %d\n", response[0], response[1]); | 1522 | DRM_DEBUG("SDVO response %d %d\n", response & 0xff, response >> 8); |
1464 | 1523 | ||
1465 | if (status != SDVO_CMD_STATUS_SUCCESS) | 1524 | if (status != SDVO_CMD_STATUS_SUCCESS) |
1466 | return connector_status_unknown; | 1525 | return connector_status_unknown; |
1467 | 1526 | ||
1468 | if ((response[0] != 0) || (response[1] != 0)) { | 1527 | if (response == 0) |
1469 | intel_sdvo_hdmi_sink_detect(connector); | ||
1470 | return connector_status_connected; | ||
1471 | } else | ||
1472 | return connector_status_disconnected; | 1528 | return connector_status_disconnected; |
1529 | |||
1530 | if (intel_sdvo_multifunc_encoder(intel_output) && | ||
1531 | sdvo_priv->attached_output != response) { | ||
1532 | if (sdvo_priv->controlled_output != response && | ||
1533 | intel_sdvo_output_setup(intel_output, response) != true) | ||
1534 | return connector_status_unknown; | ||
1535 | sdvo_priv->attached_output = response; | ||
1536 | } | ||
1537 | return intel_sdvo_hdmi_sink_detect(connector, response); | ||
1473 | } | 1538 | } |
1474 | 1539 | ||
1475 | static void intel_sdvo_get_ddc_modes(struct drm_connector *connector) | 1540 | static void intel_sdvo_get_ddc_modes(struct drm_connector *connector) |
@@ -1866,16 +1931,101 @@ intel_sdvo_get_slave_addr(struct drm_device *dev, int output_device) | |||
1866 | return 0x72; | 1931 | return 0x72; |
1867 | } | 1932 | } |
1868 | 1933 | ||
1934 | static bool | ||
1935 | intel_sdvo_output_setup(struct intel_output *intel_output, uint16_t flags) | ||
1936 | { | ||
1937 | struct drm_connector *connector = &intel_output->base; | ||
1938 | struct drm_encoder *encoder = &intel_output->enc; | ||
1939 | struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; | ||
1940 | bool ret = true, registered = false; | ||
1941 | |||
1942 | sdvo_priv->is_tv = false; | ||
1943 | intel_output->needs_tv_clock = false; | ||
1944 | sdvo_priv->is_lvds = false; | ||
1945 | |||
1946 | if (device_is_registered(&connector->kdev)) { | ||
1947 | drm_sysfs_connector_remove(connector); | ||
1948 | registered = true; | ||
1949 | } | ||
1950 | |||
1951 | if (flags & | ||
1952 | (SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_TMDS1)) { | ||
1953 | if (sdvo_priv->caps.output_flags & SDVO_OUTPUT_TMDS0) | ||
1954 | sdvo_priv->controlled_output = SDVO_OUTPUT_TMDS0; | ||
1955 | else | ||
1956 | sdvo_priv->controlled_output = SDVO_OUTPUT_TMDS1; | ||
1957 | |||
1958 | encoder->encoder_type = DRM_MODE_ENCODER_TMDS; | ||
1959 | connector->connector_type = DRM_MODE_CONNECTOR_DVID; | ||
1960 | |||
1961 | if (intel_sdvo_get_supp_encode(intel_output, | ||
1962 | &sdvo_priv->encode) && | ||
1963 | intel_sdvo_get_digital_encoding_mode(intel_output) && | ||
1964 | sdvo_priv->is_hdmi) { | ||
1965 | /* enable hdmi encoding mode if supported */ | ||
1966 | intel_sdvo_set_encode(intel_output, SDVO_ENCODE_HDMI); | ||
1967 | intel_sdvo_set_colorimetry(intel_output, | ||
1968 | SDVO_COLORIMETRY_RGB256); | ||
1969 | connector->connector_type = DRM_MODE_CONNECTOR_HDMIA; | ||
1970 | } | ||
1971 | } else if (flags & SDVO_OUTPUT_SVID0) { | ||
1972 | |||
1973 | sdvo_priv->controlled_output = SDVO_OUTPUT_SVID0; | ||
1974 | encoder->encoder_type = DRM_MODE_ENCODER_TVDAC; | ||
1975 | connector->connector_type = DRM_MODE_CONNECTOR_SVIDEO; | ||
1976 | sdvo_priv->is_tv = true; | ||
1977 | intel_output->needs_tv_clock = true; | ||
1978 | } else if (flags & SDVO_OUTPUT_RGB0) { | ||
1979 | |||
1980 | sdvo_priv->controlled_output = SDVO_OUTPUT_RGB0; | ||
1981 | encoder->encoder_type = DRM_MODE_ENCODER_DAC; | ||
1982 | connector->connector_type = DRM_MODE_CONNECTOR_VGA; | ||
1983 | } else if (flags & SDVO_OUTPUT_RGB1) { | ||
1984 | |||
1985 | sdvo_priv->controlled_output = SDVO_OUTPUT_RGB1; | ||
1986 | encoder->encoder_type = DRM_MODE_ENCODER_DAC; | ||
1987 | connector->connector_type = DRM_MODE_CONNECTOR_VGA; | ||
1988 | } else if (flags & SDVO_OUTPUT_LVDS0) { | ||
1989 | |||
1990 | sdvo_priv->controlled_output = SDVO_OUTPUT_LVDS0; | ||
1991 | encoder->encoder_type = DRM_MODE_ENCODER_LVDS; | ||
1992 | connector->connector_type = DRM_MODE_CONNECTOR_LVDS; | ||
1993 | sdvo_priv->is_lvds = true; | ||
1994 | } else if (flags & SDVO_OUTPUT_LVDS1) { | ||
1995 | |||
1996 | sdvo_priv->controlled_output = SDVO_OUTPUT_LVDS1; | ||
1997 | encoder->encoder_type = DRM_MODE_ENCODER_LVDS; | ||
1998 | connector->connector_type = DRM_MODE_CONNECTOR_LVDS; | ||
1999 | sdvo_priv->is_lvds = true; | ||
2000 | } else { | ||
2001 | |||
2002 | unsigned char bytes[2]; | ||
2003 | |||
2004 | sdvo_priv->controlled_output = 0; | ||
2005 | memcpy(bytes, &sdvo_priv->caps.output_flags, 2); | ||
2006 | DRM_DEBUG_KMS(I915_SDVO, | ||
2007 | "%s: Unknown SDVO output type (0x%02x%02x)\n", | ||
2008 | SDVO_NAME(sdvo_priv), | ||
2009 | bytes[0], bytes[1]); | ||
2010 | ret = false; | ||
2011 | } | ||
2012 | |||
2013 | if (ret && registered) | ||
2014 | ret = drm_sysfs_connector_add(connector) == 0 ? true : false; | ||
2015 | |||
2016 | |||
2017 | return ret; | ||
2018 | |||
2019 | } | ||
2020 | |||
1869 | bool intel_sdvo_init(struct drm_device *dev, int output_device) | 2021 | bool intel_sdvo_init(struct drm_device *dev, int output_device) |
1870 | { | 2022 | { |
1871 | struct drm_connector *connector; | 2023 | struct drm_connector *connector; |
1872 | struct intel_output *intel_output; | 2024 | struct intel_output *intel_output; |
1873 | struct intel_sdvo_priv *sdvo_priv; | 2025 | struct intel_sdvo_priv *sdvo_priv; |
1874 | 2026 | ||
1875 | int connector_type; | ||
1876 | u8 ch[0x40]; | 2027 | u8 ch[0x40]; |
1877 | int i; | 2028 | int i; |
1878 | int encoder_type; | ||
1879 | 2029 | ||
1880 | intel_output = kcalloc(sizeof(struct intel_output)+sizeof(struct intel_sdvo_priv), 1, GFP_KERNEL); | 2030 | intel_output = kcalloc(sizeof(struct intel_output)+sizeof(struct intel_sdvo_priv), 1, GFP_KERNEL); |
1881 | if (!intel_output) { | 2031 | if (!intel_output) { |
@@ -1925,88 +2075,28 @@ bool intel_sdvo_init(struct drm_device *dev, int output_device) | |||
1925 | intel_output->ddc_bus->algo = &intel_sdvo_i2c_bit_algo; | 2075 | intel_output->ddc_bus->algo = &intel_sdvo_i2c_bit_algo; |
1926 | 2076 | ||
1927 | /* In defaut case sdvo lvds is false */ | 2077 | /* In defaut case sdvo lvds is false */ |
1928 | sdvo_priv->is_lvds = false; | ||
1929 | intel_sdvo_get_capabilities(intel_output, &sdvo_priv->caps); | 2078 | intel_sdvo_get_capabilities(intel_output, &sdvo_priv->caps); |
1930 | 2079 | ||
1931 | if (sdvo_priv->caps.output_flags & | 2080 | if (intel_sdvo_output_setup(intel_output, |
1932 | (SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_TMDS1)) { | 2081 | sdvo_priv->caps.output_flags) != true) { |
1933 | if (sdvo_priv->caps.output_flags & SDVO_OUTPUT_TMDS0) | 2082 | DRM_DEBUG("SDVO output failed to setup on SDVO%c\n", |
1934 | sdvo_priv->controlled_output = SDVO_OUTPUT_TMDS0; | 2083 | output_device == SDVOB ? 'B' : 'C'); |
1935 | else | ||
1936 | sdvo_priv->controlled_output = SDVO_OUTPUT_TMDS1; | ||
1937 | |||
1938 | encoder_type = DRM_MODE_ENCODER_TMDS; | ||
1939 | connector_type = DRM_MODE_CONNECTOR_DVID; | ||
1940 | |||
1941 | if (intel_sdvo_get_supp_encode(intel_output, | ||
1942 | &sdvo_priv->encode) && | ||
1943 | intel_sdvo_get_digital_encoding_mode(intel_output) && | ||
1944 | sdvo_priv->is_hdmi) { | ||
1945 | /* enable hdmi encoding mode if supported */ | ||
1946 | intel_sdvo_set_encode(intel_output, SDVO_ENCODE_HDMI); | ||
1947 | intel_sdvo_set_colorimetry(intel_output, | ||
1948 | SDVO_COLORIMETRY_RGB256); | ||
1949 | connector_type = DRM_MODE_CONNECTOR_HDMIA; | ||
1950 | } | ||
1951 | } | ||
1952 | else if (sdvo_priv->caps.output_flags & SDVO_OUTPUT_SVID0) | ||
1953 | { | ||
1954 | sdvo_priv->controlled_output = SDVO_OUTPUT_SVID0; | ||
1955 | encoder_type = DRM_MODE_ENCODER_TVDAC; | ||
1956 | connector_type = DRM_MODE_CONNECTOR_SVIDEO; | ||
1957 | sdvo_priv->is_tv = true; | ||
1958 | intel_output->needs_tv_clock = true; | ||
1959 | } | ||
1960 | else if (sdvo_priv->caps.output_flags & SDVO_OUTPUT_RGB0) | ||
1961 | { | ||
1962 | sdvo_priv->controlled_output = SDVO_OUTPUT_RGB0; | ||
1963 | encoder_type = DRM_MODE_ENCODER_DAC; | ||
1964 | connector_type = DRM_MODE_CONNECTOR_VGA; | ||
1965 | } | ||
1966 | else if (sdvo_priv->caps.output_flags & SDVO_OUTPUT_RGB1) | ||
1967 | { | ||
1968 | sdvo_priv->controlled_output = SDVO_OUTPUT_RGB1; | ||
1969 | encoder_type = DRM_MODE_ENCODER_DAC; | ||
1970 | connector_type = DRM_MODE_CONNECTOR_VGA; | ||
1971 | } | ||
1972 | else if (sdvo_priv->caps.output_flags & SDVO_OUTPUT_LVDS0) | ||
1973 | { | ||
1974 | sdvo_priv->controlled_output = SDVO_OUTPUT_LVDS0; | ||
1975 | encoder_type = DRM_MODE_ENCODER_LVDS; | ||
1976 | connector_type = DRM_MODE_CONNECTOR_LVDS; | ||
1977 | sdvo_priv->is_lvds = true; | ||
1978 | } | ||
1979 | else if (sdvo_priv->caps.output_flags & SDVO_OUTPUT_LVDS1) | ||
1980 | { | ||
1981 | sdvo_priv->controlled_output = SDVO_OUTPUT_LVDS1; | ||
1982 | encoder_type = DRM_MODE_ENCODER_LVDS; | ||
1983 | connector_type = DRM_MODE_CONNECTOR_LVDS; | ||
1984 | sdvo_priv->is_lvds = true; | ||
1985 | } | ||
1986 | else | ||
1987 | { | ||
1988 | unsigned char bytes[2]; | ||
1989 | |||
1990 | sdvo_priv->controlled_output = 0; | ||
1991 | memcpy (bytes, &sdvo_priv->caps.output_flags, 2); | ||
1992 | DRM_DEBUG_KMS(I915_SDVO, | ||
1993 | "%s: Unknown SDVO output type (0x%02x%02x)\n", | ||
1994 | SDVO_NAME(sdvo_priv), | ||
1995 | bytes[0], bytes[1]); | ||
1996 | encoder_type = DRM_MODE_ENCODER_NONE; | ||
1997 | connector_type = DRM_MODE_CONNECTOR_Unknown; | ||
1998 | goto err_i2c; | 2084 | goto err_i2c; |
1999 | } | 2085 | } |
2000 | 2086 | ||
2087 | |||
2001 | connector = &intel_output->base; | 2088 | connector = &intel_output->base; |
2002 | drm_connector_init(dev, connector, &intel_sdvo_connector_funcs, | 2089 | drm_connector_init(dev, connector, &intel_sdvo_connector_funcs, |
2003 | connector_type); | 2090 | connector->connector_type); |
2091 | |||
2004 | drm_connector_helper_add(connector, &intel_sdvo_connector_helper_funcs); | 2092 | drm_connector_helper_add(connector, &intel_sdvo_connector_helper_funcs); |
2005 | connector->interlace_allowed = 0; | 2093 | connector->interlace_allowed = 0; |
2006 | connector->doublescan_allowed = 0; | 2094 | connector->doublescan_allowed = 0; |
2007 | connector->display_info.subpixel_order = SubPixelHorizontalRGB; | 2095 | connector->display_info.subpixel_order = SubPixelHorizontalRGB; |
2008 | 2096 | ||
2009 | drm_encoder_init(dev, &intel_output->enc, &intel_sdvo_enc_funcs, encoder_type); | 2097 | drm_encoder_init(dev, &intel_output->enc, |
2098 | &intel_sdvo_enc_funcs, intel_output->enc.encoder_type); | ||
2099 | |||
2010 | drm_encoder_helper_add(&intel_output->enc, &intel_sdvo_helper_funcs); | 2100 | drm_encoder_helper_add(&intel_output->enc, &intel_sdvo_helper_funcs); |
2011 | 2101 | ||
2012 | drm_mode_connector_attach_encoder(&intel_output->base, &intel_output->enc); | 2102 | drm_mode_connector_attach_encoder(&intel_output->base, &intel_output->enc); |
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c index a43c98e3f077..da4ab4dc1630 100644 --- a/drivers/gpu/drm/i915/intel_tv.c +++ b/drivers/gpu/drm/i915/intel_tv.c | |||
@@ -1490,6 +1490,27 @@ static struct input_res { | |||
1490 | {"1920x1080", 1920, 1080}, | 1490 | {"1920x1080", 1920, 1080}, |
1491 | }; | 1491 | }; |
1492 | 1492 | ||
1493 | /* | ||
1494 | * Chose preferred mode according to line number of TV format | ||
1495 | */ | ||
1496 | static void | ||
1497 | intel_tv_chose_preferred_modes(struct drm_connector *connector, | ||
1498 | struct drm_display_mode *mode_ptr) | ||
1499 | { | ||
1500 | struct intel_output *intel_output = to_intel_output(connector); | ||
1501 | const struct tv_mode *tv_mode = intel_tv_mode_find(intel_output); | ||
1502 | |||
1503 | if (tv_mode->nbr_end < 480 && mode_ptr->vdisplay == 480) | ||
1504 | mode_ptr->type |= DRM_MODE_TYPE_PREFERRED; | ||
1505 | else if (tv_mode->nbr_end > 480) { | ||
1506 | if (tv_mode->progressive == true && tv_mode->nbr_end < 720) { | ||
1507 | if (mode_ptr->vdisplay == 720) | ||
1508 | mode_ptr->type |= DRM_MODE_TYPE_PREFERRED; | ||
1509 | } else if (mode_ptr->vdisplay == 1080) | ||
1510 | mode_ptr->type |= DRM_MODE_TYPE_PREFERRED; | ||
1511 | } | ||
1512 | } | ||
1513 | |||
1493 | /** | 1514 | /** |
1494 | * Stub get_modes function. | 1515 | * Stub get_modes function. |
1495 | * | 1516 | * |
@@ -1544,6 +1565,7 @@ intel_tv_get_modes(struct drm_connector *connector) | |||
1544 | mode_ptr->clock = (int) tmp; | 1565 | mode_ptr->clock = (int) tmp; |
1545 | 1566 | ||
1546 | mode_ptr->type = DRM_MODE_TYPE_DRIVER; | 1567 | mode_ptr->type = DRM_MODE_TYPE_DRIVER; |
1568 | intel_tv_chose_preferred_modes(connector, mode_ptr); | ||
1547 | drm_mode_probed_add(connector, mode_ptr); | 1569 | drm_mode_probed_add(connector, mode_ptr); |
1548 | count++; | 1570 | count++; |
1549 | } | 1571 | } |
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c index 05a44896dffb..f1ba8ff41130 100644 --- a/drivers/gpu/drm/radeon/r100.c +++ b/drivers/gpu/drm/radeon/r100.c | |||
@@ -722,13 +722,14 @@ int r100_cs_packet_parse(struct radeon_cs_parser *p, | |||
722 | unsigned idx) | 722 | unsigned idx) |
723 | { | 723 | { |
724 | struct radeon_cs_chunk *ib_chunk = &p->chunks[p->chunk_ib_idx]; | 724 | struct radeon_cs_chunk *ib_chunk = &p->chunks[p->chunk_ib_idx]; |
725 | uint32_t header = ib_chunk->kdata[idx]; | 725 | uint32_t header; |
726 | 726 | ||
727 | if (idx >= ib_chunk->length_dw) { | 727 | if (idx >= ib_chunk->length_dw) { |
728 | DRM_ERROR("Can not parse packet at %d after CS end %d !\n", | 728 | DRM_ERROR("Can not parse packet at %d after CS end %d !\n", |
729 | idx, ib_chunk->length_dw); | 729 | idx, ib_chunk->length_dw); |
730 | return -EINVAL; | 730 | return -EINVAL; |
731 | } | 731 | } |
732 | header = ib_chunk->kdata[idx]; | ||
732 | pkt->idx = idx; | 733 | pkt->idx = idx; |
733 | pkt->type = CP_PACKET_GET_TYPE(header); | 734 | pkt->type = CP_PACKET_GET_TYPE(header); |
734 | pkt->count = CP_PACKET_GET_COUNT(header); | 735 | pkt->count = CP_PACKET_GET_COUNT(header); |
diff --git a/drivers/gpu/drm/radeon/r600_cp.c b/drivers/gpu/drm/radeon/r600_cp.c index 146f3570af8e..20f17908b036 100644 --- a/drivers/gpu/drm/radeon/r600_cp.c +++ b/drivers/gpu/drm/radeon/r600_cp.c | |||
@@ -384,8 +384,9 @@ static void r600_cp_load_microcode(drm_radeon_private_t *dev_priv) | |||
384 | DRM_INFO("Loading RV670 PFP Microcode\n"); | 384 | DRM_INFO("Loading RV670 PFP Microcode\n"); |
385 | for (i = 0; i < PFP_UCODE_SIZE; i++) | 385 | for (i = 0; i < PFP_UCODE_SIZE; i++) |
386 | RADEON_WRITE(R600_CP_PFP_UCODE_DATA, RV670_pfp_microcode[i]); | 386 | RADEON_WRITE(R600_CP_PFP_UCODE_DATA, RV670_pfp_microcode[i]); |
387 | } else if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS780)) { | 387 | } else if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS780) || |
388 | DRM_INFO("Loading RS780 CP Microcode\n"); | 388 | ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS880)) { |
389 | DRM_INFO("Loading RS780/RS880 CP Microcode\n"); | ||
389 | for (i = 0; i < PM4_UCODE_SIZE; i++) { | 390 | for (i = 0; i < PM4_UCODE_SIZE; i++) { |
390 | RADEON_WRITE(R600_CP_ME_RAM_DATA, | 391 | RADEON_WRITE(R600_CP_ME_RAM_DATA, |
391 | RS780_cp_microcode[i][0]); | 392 | RS780_cp_microcode[i][0]); |
@@ -396,7 +397,7 @@ static void r600_cp_load_microcode(drm_radeon_private_t *dev_priv) | |||
396 | } | 397 | } |
397 | 398 | ||
398 | RADEON_WRITE(R600_CP_PFP_UCODE_ADDR, 0); | 399 | RADEON_WRITE(R600_CP_PFP_UCODE_ADDR, 0); |
399 | DRM_INFO("Loading RS780 PFP Microcode\n"); | 400 | DRM_INFO("Loading RS780/RS880 PFP Microcode\n"); |
400 | for (i = 0; i < PFP_UCODE_SIZE; i++) | 401 | for (i = 0; i < PFP_UCODE_SIZE; i++) |
401 | RADEON_WRITE(R600_CP_PFP_UCODE_DATA, RS780_pfp_microcode[i]); | 402 | RADEON_WRITE(R600_CP_PFP_UCODE_DATA, RS780_pfp_microcode[i]); |
402 | } | 403 | } |
@@ -783,6 +784,7 @@ static void r600_gfx_init(struct drm_device *dev, | |||
783 | break; | 784 | break; |
784 | case CHIP_RV610: | 785 | case CHIP_RV610: |
785 | case CHIP_RS780: | 786 | case CHIP_RS780: |
787 | case CHIP_RS880: | ||
786 | case CHIP_RV620: | 788 | case CHIP_RV620: |
787 | dev_priv->r600_max_pipes = 1; | 789 | dev_priv->r600_max_pipes = 1; |
788 | dev_priv->r600_max_tile_pipes = 1; | 790 | dev_priv->r600_max_tile_pipes = 1; |
@@ -917,7 +919,8 @@ static void r600_gfx_init(struct drm_device *dev, | |||
917 | ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV630) || | 919 | ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV630) || |
918 | ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV610) || | 920 | ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV610) || |
919 | ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV620) || | 921 | ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV620) || |
920 | ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS780)) | 922 | ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS780) || |
923 | ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS880)) | ||
921 | RADEON_WRITE(R600_DB_DEBUG, R600_PREZ_MUST_WAIT_FOR_POSTZ_DONE); | 924 | RADEON_WRITE(R600_DB_DEBUG, R600_PREZ_MUST_WAIT_FOR_POSTZ_DONE); |
922 | else | 925 | else |
923 | RADEON_WRITE(R600_DB_DEBUG, 0); | 926 | RADEON_WRITE(R600_DB_DEBUG, 0); |
@@ -935,7 +938,8 @@ static void r600_gfx_init(struct drm_device *dev, | |||
935 | sq_ms_fifo_sizes = RADEON_READ(R600_SQ_MS_FIFO_SIZES); | 938 | sq_ms_fifo_sizes = RADEON_READ(R600_SQ_MS_FIFO_SIZES); |
936 | if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV610) || | 939 | if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV610) || |
937 | ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV620) || | 940 | ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV620) || |
938 | ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS780)) { | 941 | ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS780) || |
942 | ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS880)) { | ||
939 | sq_ms_fifo_sizes = (R600_CACHE_FIFO_SIZE(0xa) | | 943 | sq_ms_fifo_sizes = (R600_CACHE_FIFO_SIZE(0xa) | |
940 | R600_FETCH_FIFO_HIWATER(0xa) | | 944 | R600_FETCH_FIFO_HIWATER(0xa) | |
941 | R600_DONE_FIFO_HIWATER(0xe0) | | 945 | R600_DONE_FIFO_HIWATER(0xe0) | |
@@ -978,7 +982,8 @@ static void r600_gfx_init(struct drm_device *dev, | |||
978 | R600_NUM_ES_STACK_ENTRIES(0)); | 982 | R600_NUM_ES_STACK_ENTRIES(0)); |
979 | } else if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV610) || | 983 | } else if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV610) || |
980 | ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV620) || | 984 | ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV620) || |
981 | ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS780)) { | 985 | ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS780) || |
986 | ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS880)) { | ||
982 | /* no vertex cache */ | 987 | /* no vertex cache */ |
983 | sq_config &= ~R600_VC_ENABLE; | 988 | sq_config &= ~R600_VC_ENABLE; |
984 | 989 | ||
@@ -1035,7 +1040,8 @@ static void r600_gfx_init(struct drm_device *dev, | |||
1035 | 1040 | ||
1036 | if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV610) || | 1041 | if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV610) || |
1037 | ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV620) || | 1042 | ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV620) || |
1038 | ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS780)) | 1043 | ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS780) || |
1044 | ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS880)) | ||
1039 | RADEON_WRITE(R600_VGT_CACHE_INVALIDATION, R600_CACHE_INVALIDATION(R600_TC_ONLY)); | 1045 | RADEON_WRITE(R600_VGT_CACHE_INVALIDATION, R600_CACHE_INVALIDATION(R600_TC_ONLY)); |
1040 | else | 1046 | else |
1041 | RADEON_WRITE(R600_VGT_CACHE_INVALIDATION, R600_CACHE_INVALIDATION(R600_VC_AND_TC)); | 1047 | RADEON_WRITE(R600_VGT_CACHE_INVALIDATION, R600_CACHE_INVALIDATION(R600_VC_AND_TC)); |
@@ -1078,6 +1084,7 @@ static void r600_gfx_init(struct drm_device *dev, | |||
1078 | break; | 1084 | break; |
1079 | case CHIP_RV610: | 1085 | case CHIP_RV610: |
1080 | case CHIP_RS780: | 1086 | case CHIP_RS780: |
1087 | case CHIP_RS880: | ||
1081 | case CHIP_RV620: | 1088 | case CHIP_RV620: |
1082 | gs_prim_buffer_depth = 32; | 1089 | gs_prim_buffer_depth = 32; |
1083 | break; | 1090 | break; |
@@ -1123,6 +1130,7 @@ static void r600_gfx_init(struct drm_device *dev, | |||
1123 | switch (dev_priv->flags & RADEON_FAMILY_MASK) { | 1130 | switch (dev_priv->flags & RADEON_FAMILY_MASK) { |
1124 | case CHIP_RV610: | 1131 | case CHIP_RV610: |
1125 | case CHIP_RS780: | 1132 | case CHIP_RS780: |
1133 | case CHIP_RS880: | ||
1126 | case CHIP_RV620: | 1134 | case CHIP_RV620: |
1127 | tc_cntl = R600_TC_L2_SIZE(8); | 1135 | tc_cntl = R600_TC_L2_SIZE(8); |
1128 | break; | 1136 | break; |
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c index a162ade74b7f..9ff6dcb97f9d 100644 --- a/drivers/gpu/drm/radeon/radeon_device.c +++ b/drivers/gpu/drm/radeon/radeon_device.c | |||
@@ -152,7 +152,9 @@ int radeon_mc_setup(struct radeon_device *rdev) | |||
152 | } | 152 | } |
153 | } else { | 153 | } else { |
154 | rdev->mc.vram_location = 0; | 154 | rdev->mc.vram_location = 0; |
155 | rdev->mc.gtt_location = rdev->mc.mc_vram_size; | 155 | tmp = rdev->mc.mc_vram_size; |
156 | tmp = (tmp + rdev->mc.gtt_size - 1) & ~(rdev->mc.gtt_size - 1); | ||
157 | rdev->mc.gtt_location = tmp; | ||
156 | } | 158 | } |
157 | DRM_INFO("radeon: VRAM %uM\n", rdev->mc.real_vram_size >> 20); | 159 | DRM_INFO("radeon: VRAM %uM\n", rdev->mc.real_vram_size >> 20); |
158 | DRM_INFO("radeon: VRAM from 0x%08X to 0x%08X\n", | 160 | DRM_INFO("radeon: VRAM from 0x%08X to 0x%08X\n", |
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c index 3cfcee17dc56..0bd5879a4957 100644 --- a/drivers/gpu/drm/radeon/radeon_drv.c +++ b/drivers/gpu/drm/radeon/radeon_drv.c | |||
@@ -318,6 +318,14 @@ static int __init radeon_init(void) | |||
318 | driver = &driver_old; | 318 | driver = &driver_old; |
319 | driver->num_ioctls = radeon_max_ioctl; | 319 | driver->num_ioctls = radeon_max_ioctl; |
320 | #if defined(CONFIG_DRM_RADEON_KMS) | 320 | #if defined(CONFIG_DRM_RADEON_KMS) |
321 | #ifdef CONFIG_VGA_CONSOLE | ||
322 | if (vgacon_text_force() && radeon_modeset == -1) { | ||
323 | DRM_INFO("VGACON disable radeon kernel modesetting.\n"); | ||
324 | driver = &driver_old; | ||
325 | driver->driver_features &= ~DRIVER_MODESET; | ||
326 | radeon_modeset = 0; | ||
327 | } | ||
328 | #endif | ||
321 | /* if enabled by default */ | 329 | /* if enabled by default */ |
322 | if (radeon_modeset == -1) { | 330 | if (radeon_modeset == -1) { |
323 | DRM_INFO("radeon default to kernel modesetting.\n"); | 331 | DRM_INFO("radeon default to kernel modesetting.\n"); |
@@ -329,17 +337,8 @@ static int __init radeon_init(void) | |||
329 | driver->driver_features |= DRIVER_MODESET; | 337 | driver->driver_features |= DRIVER_MODESET; |
330 | driver->num_ioctls = radeon_max_kms_ioctl; | 338 | driver->num_ioctls = radeon_max_kms_ioctl; |
331 | } | 339 | } |
332 | |||
333 | /* if the vga console setting is enabled still | 340 | /* if the vga console setting is enabled still |
334 | * let modprobe override it */ | 341 | * let modprobe override it */ |
335 | #ifdef CONFIG_VGA_CONSOLE | ||
336 | if (vgacon_text_force() && radeon_modeset == -1) { | ||
337 | DRM_INFO("VGACON disable radeon kernel modesetting.\n"); | ||
338 | driver = &driver_old; | ||
339 | driver->driver_features &= ~DRIVER_MODESET; | ||
340 | radeon_modeset = 0; | ||
341 | } | ||
342 | #endif | ||
343 | #endif | 342 | #endif |
344 | return drm_init(driver); | 343 | return drm_init(driver); |
345 | } | 344 | } |
diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h index 127d0456f628..3933f8216a34 100644 --- a/drivers/gpu/drm/radeon/radeon_drv.h +++ b/drivers/gpu/drm/radeon/radeon_drv.h | |||
@@ -143,6 +143,7 @@ enum radeon_family { | |||
143 | CHIP_RV635, | 143 | CHIP_RV635, |
144 | CHIP_RV670, | 144 | CHIP_RV670, |
145 | CHIP_RS780, | 145 | CHIP_RS780, |
146 | CHIP_RS880, | ||
146 | CHIP_RV770, | 147 | CHIP_RV770, |
147 | CHIP_RV730, | 148 | CHIP_RV730, |
148 | CHIP_RV710, | 149 | CHIP_RV710, |
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c index 937a2f1cdb46..3357110e30ce 100644 --- a/drivers/gpu/drm/radeon/radeon_kms.c +++ b/drivers/gpu/drm/radeon/radeon_kms.c | |||
@@ -58,6 +58,8 @@ int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags) | |||
58 | if (r) { | 58 | if (r) { |
59 | DRM_ERROR("Failed to initialize radeon, disabling IOCTL\n"); | 59 | DRM_ERROR("Failed to initialize radeon, disabling IOCTL\n"); |
60 | radeon_device_fini(rdev); | 60 | radeon_device_fini(rdev); |
61 | kfree(rdev); | ||
62 | dev->dev_private = NULL; | ||
61 | return r; | 63 | return r; |
62 | } | 64 | } |
63 | return 0; | 65 | return 0; |
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c index dd9ac2fed6d6..e98cae3bf4a6 100644 --- a/drivers/gpu/drm/radeon/radeon_object.c +++ b/drivers/gpu/drm/radeon/radeon_object.c | |||
@@ -106,7 +106,7 @@ static inline uint32_t radeon_object_flags_from_domain(uint32_t domain) | |||
106 | flags |= TTM_PL_FLAG_VRAM | TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED; | 106 | flags |= TTM_PL_FLAG_VRAM | TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED; |
107 | } | 107 | } |
108 | if (domain & RADEON_GEM_DOMAIN_GTT) { | 108 | if (domain & RADEON_GEM_DOMAIN_GTT) { |
109 | flags |= TTM_PL_FLAG_TT | TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED; | 109 | flags |= TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING; |
110 | } | 110 | } |
111 | if (domain & RADEON_GEM_DOMAIN_CPU) { | 111 | if (domain & RADEON_GEM_DOMAIN_CPU) { |
112 | flags |= TTM_PL_FLAG_SYSTEM | TTM_PL_MASK_CACHING; | 112 | flags |= TTM_PL_FLAG_SYSTEM | TTM_PL_MASK_CACHING; |
diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c index 551e608702e4..fd8f3ca716ea 100644 --- a/drivers/gpu/drm/radeon/rv515.c +++ b/drivers/gpu/drm/radeon/rv515.c | |||
@@ -370,6 +370,7 @@ void rv515_vram_info(struct radeon_device *rdev) | |||
370 | 370 | ||
371 | rv515_vram_get_type(rdev); | 371 | rv515_vram_get_type(rdev); |
372 | 372 | ||
373 | r100_vram_init_sizes(rdev); | ||
373 | /* FIXME: we should enforce default clock in case GPU is not in | 374 | /* FIXME: we should enforce default clock in case GPU is not in |
374 | * default setup | 375 | * default setup |
375 | */ | 376 | */ |
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index 6538d4236989..c2b0d710d10f 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c | |||
@@ -1182,13 +1182,14 @@ static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev, | |||
1182 | 1182 | ||
1183 | int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type) | 1183 | int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type) |
1184 | { | 1184 | { |
1185 | struct ttm_mem_type_manager *man = &bdev->man[mem_type]; | 1185 | struct ttm_mem_type_manager *man; |
1186 | int ret = -EINVAL; | 1186 | int ret = -EINVAL; |
1187 | 1187 | ||
1188 | if (mem_type >= TTM_NUM_MEM_TYPES) { | 1188 | if (mem_type >= TTM_NUM_MEM_TYPES) { |
1189 | printk(KERN_ERR TTM_PFX "Illegal memory type %d\n", mem_type); | 1189 | printk(KERN_ERR TTM_PFX "Illegal memory type %d\n", mem_type); |
1190 | return ret; | 1190 | return ret; |
1191 | } | 1191 | } |
1192 | man = &bdev->man[mem_type]; | ||
1192 | 1193 | ||
1193 | if (!man->has_type) { | 1194 | if (!man->has_type) { |
1194 | printk(KERN_ERR TTM_PFX "Trying to take down uninitialized " | 1195 | printk(KERN_ERR TTM_PFX "Trying to take down uninitialized " |
@@ -1575,6 +1576,10 @@ int ttm_bo_wait(struct ttm_buffer_object *bo, | |||
1575 | driver->sync_obj_unref(&sync_obj); | 1576 | driver->sync_obj_unref(&sync_obj); |
1576 | driver->sync_obj_unref(&tmp_obj); | 1577 | driver->sync_obj_unref(&tmp_obj); |
1577 | spin_lock(&bo->lock); | 1578 | spin_lock(&bo->lock); |
1579 | } else { | ||
1580 | spin_unlock(&bo->lock); | ||
1581 | driver->sync_obj_unref(&sync_obj); | ||
1582 | spin_lock(&bo->lock); | ||
1578 | } | 1583 | } |
1579 | } | 1584 | } |
1580 | return 0; | 1585 | return 0; |
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c index ce2e6f38ea01..ad4ada07c6cf 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_util.c +++ b/drivers/gpu/drm/ttm/ttm_bo_util.c | |||
@@ -150,7 +150,7 @@ static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src, | |||
150 | #ifdef CONFIG_X86 | 150 | #ifdef CONFIG_X86 |
151 | dst = kmap_atomic_prot(d, KM_USER0, prot); | 151 | dst = kmap_atomic_prot(d, KM_USER0, prot); |
152 | #else | 152 | #else |
153 | if (prot != PAGE_KERNEL) | 153 | if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL)) |
154 | dst = vmap(&d, 1, 0, prot); | 154 | dst = vmap(&d, 1, 0, prot); |
155 | else | 155 | else |
156 | dst = kmap(d); | 156 | dst = kmap(d); |
@@ -163,7 +163,7 @@ static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src, | |||
163 | #ifdef CONFIG_X86 | 163 | #ifdef CONFIG_X86 |
164 | kunmap_atomic(dst, KM_USER0); | 164 | kunmap_atomic(dst, KM_USER0); |
165 | #else | 165 | #else |
166 | if (prot != PAGE_KERNEL) | 166 | if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL)) |
167 | vunmap(dst); | 167 | vunmap(dst); |
168 | else | 168 | else |
169 | kunmap(d); | 169 | kunmap(d); |
@@ -186,7 +186,7 @@ static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst, | |||
186 | #ifdef CONFIG_X86 | 186 | #ifdef CONFIG_X86 |
187 | src = kmap_atomic_prot(s, KM_USER0, prot); | 187 | src = kmap_atomic_prot(s, KM_USER0, prot); |
188 | #else | 188 | #else |
189 | if (prot != PAGE_KERNEL) | 189 | if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL)) |
190 | src = vmap(&s, 1, 0, prot); | 190 | src = vmap(&s, 1, 0, prot); |
191 | else | 191 | else |
192 | src = kmap(s); | 192 | src = kmap(s); |
@@ -199,7 +199,7 @@ static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst, | |||
199 | #ifdef CONFIG_X86 | 199 | #ifdef CONFIG_X86 |
200 | kunmap_atomic(src, KM_USER0); | 200 | kunmap_atomic(src, KM_USER0); |
201 | #else | 201 | #else |
202 | if (prot != PAGE_KERNEL) | 202 | if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL)) |
203 | vunmap(src); | 203 | vunmap(src); |
204 | else | 204 | else |
205 | kunmap(s); | 205 | kunmap(s); |
diff --git a/drivers/input/keyboard/matrix_keypad.c b/drivers/input/keyboard/matrix_keypad.c index e9b2e7cb05be..541b981ff075 100644 --- a/drivers/input/keyboard/matrix_keypad.c +++ b/drivers/input/keyboard/matrix_keypad.c | |||
@@ -27,6 +27,7 @@ struct matrix_keypad { | |||
27 | const struct matrix_keypad_platform_data *pdata; | 27 | const struct matrix_keypad_platform_data *pdata; |
28 | struct input_dev *input_dev; | 28 | struct input_dev *input_dev; |
29 | unsigned short *keycodes; | 29 | unsigned short *keycodes; |
30 | unsigned int row_shift; | ||
30 | 31 | ||
31 | uint32_t last_key_state[MATRIX_MAX_COLS]; | 32 | uint32_t last_key_state[MATRIX_MAX_COLS]; |
32 | struct delayed_work work; | 33 | struct delayed_work work; |
@@ -136,7 +137,7 @@ static void matrix_keypad_scan(struct work_struct *work) | |||
136 | if ((bits_changed & (1 << row)) == 0) | 137 | if ((bits_changed & (1 << row)) == 0) |
137 | continue; | 138 | continue; |
138 | 139 | ||
139 | code = (row << 4) + col; | 140 | code = MATRIX_SCAN_CODE(row, col, keypad->row_shift); |
140 | input_event(input_dev, EV_MSC, MSC_SCAN, code); | 141 | input_event(input_dev, EV_MSC, MSC_SCAN, code); |
141 | input_report_key(input_dev, | 142 | input_report_key(input_dev, |
142 | keypad->keycodes[code], | 143 | keypad->keycodes[code], |
@@ -317,6 +318,7 @@ static int __devinit matrix_keypad_probe(struct platform_device *pdev) | |||
317 | struct matrix_keypad *keypad; | 318 | struct matrix_keypad *keypad; |
318 | struct input_dev *input_dev; | 319 | struct input_dev *input_dev; |
319 | unsigned short *keycodes; | 320 | unsigned short *keycodes; |
321 | unsigned int row_shift; | ||
320 | int i; | 322 | int i; |
321 | int err; | 323 | int err; |
322 | 324 | ||
@@ -332,14 +334,11 @@ static int __devinit matrix_keypad_probe(struct platform_device *pdev) | |||
332 | return -EINVAL; | 334 | return -EINVAL; |
333 | } | 335 | } |
334 | 336 | ||
335 | if (!keymap_data->max_keymap_size) { | 337 | row_shift = get_count_order(pdata->num_col_gpios); |
336 | dev_err(&pdev->dev, "invalid keymap data supplied\n"); | ||
337 | return -EINVAL; | ||
338 | } | ||
339 | 338 | ||
340 | keypad = kzalloc(sizeof(struct matrix_keypad), GFP_KERNEL); | 339 | keypad = kzalloc(sizeof(struct matrix_keypad), GFP_KERNEL); |
341 | keycodes = kzalloc(keymap_data->max_keymap_size * | 340 | keycodes = kzalloc((pdata->num_row_gpios << row_shift) * |
342 | sizeof(keypad->keycodes), | 341 | sizeof(*keycodes), |
343 | GFP_KERNEL); | 342 | GFP_KERNEL); |
344 | input_dev = input_allocate_device(); | 343 | input_dev = input_allocate_device(); |
345 | if (!keypad || !keycodes || !input_dev) { | 344 | if (!keypad || !keycodes || !input_dev) { |
@@ -350,6 +349,7 @@ static int __devinit matrix_keypad_probe(struct platform_device *pdev) | |||
350 | keypad->input_dev = input_dev; | 349 | keypad->input_dev = input_dev; |
351 | keypad->pdata = pdata; | 350 | keypad->pdata = pdata; |
352 | keypad->keycodes = keycodes; | 351 | keypad->keycodes = keycodes; |
352 | keypad->row_shift = row_shift; | ||
353 | keypad->stopped = true; | 353 | keypad->stopped = true; |
354 | INIT_DELAYED_WORK(&keypad->work, matrix_keypad_scan); | 354 | INIT_DELAYED_WORK(&keypad->work, matrix_keypad_scan); |
355 | spin_lock_init(&keypad->lock); | 355 | spin_lock_init(&keypad->lock); |
@@ -363,7 +363,7 @@ static int __devinit matrix_keypad_probe(struct platform_device *pdev) | |||
363 | 363 | ||
364 | input_dev->keycode = keycodes; | 364 | input_dev->keycode = keycodes; |
365 | input_dev->keycodesize = sizeof(*keycodes); | 365 | input_dev->keycodesize = sizeof(*keycodes); |
366 | input_dev->keycodemax = keymap_data->max_keymap_size; | 366 | input_dev->keycodemax = pdata->num_row_gpios << keypad->row_shift; |
367 | 367 | ||
368 | for (i = 0; i < keymap_data->keymap_size; i++) { | 368 | for (i = 0; i < keymap_data->keymap_size; i++) { |
369 | unsigned int key = keymap_data->keymap[i]; | 369 | unsigned int key = keymap_data->keymap[i]; |
@@ -371,7 +371,7 @@ static int __devinit matrix_keypad_probe(struct platform_device *pdev) | |||
371 | unsigned int col = KEY_COL(key); | 371 | unsigned int col = KEY_COL(key); |
372 | unsigned short code = KEY_VAL(key); | 372 | unsigned short code = KEY_VAL(key); |
373 | 373 | ||
374 | keycodes[(row << 4) + col] = code; | 374 | keycodes[MATRIX_SCAN_CODE(row, col, row_shift)] = code; |
375 | __set_bit(code, input_dev->keybit); | 375 | __set_bit(code, input_dev->keybit); |
376 | } | 376 | } |
377 | __clear_bit(KEY_RESERVED, input_dev->keybit); | 377 | __clear_bit(KEY_RESERVED, input_dev->keybit); |
diff --git a/drivers/input/misc/wistron_btns.c b/drivers/input/misc/wistron_btns.c index 26e17a9a22eb..27ee976eb54c 100644 --- a/drivers/input/misc/wistron_btns.c +++ b/drivers/input/misc/wistron_btns.c | |||
@@ -611,6 +611,20 @@ static struct key_entry keymap_wistron_generic[] __initdata = { | |||
611 | { KE_END, 0 } | 611 | { KE_END, 0 } |
612 | }; | 612 | }; |
613 | 613 | ||
614 | static struct key_entry keymap_prestigio[] __initdata = { | ||
615 | { KE_KEY, 0x11, {KEY_PROG1} }, | ||
616 | { KE_KEY, 0x12, {KEY_PROG2} }, | ||
617 | { KE_WIFI, 0x30 }, | ||
618 | { KE_KEY, 0x22, {KEY_REWIND} }, | ||
619 | { KE_KEY, 0x23, {KEY_FORWARD} }, | ||
620 | { KE_KEY, 0x24, {KEY_PLAYPAUSE} }, | ||
621 | { KE_KEY, 0x25, {KEY_STOPCD} }, | ||
622 | { KE_KEY, 0x31, {KEY_MAIL} }, | ||
623 | { KE_KEY, 0x36, {KEY_WWW} }, | ||
624 | { KE_END, 0 } | ||
625 | }; | ||
626 | |||
627 | |||
614 | /* | 628 | /* |
615 | * If your machine is not here (which is currently rather likely), please send | 629 | * If your machine is not here (which is currently rather likely), please send |
616 | * a list of buttons and their key codes (reported when loading this module | 630 | * a list of buttons and their key codes (reported when loading this module |
@@ -971,6 +985,8 @@ static int __init select_keymap(void) | |||
971 | if (keymap_name != NULL) { | 985 | if (keymap_name != NULL) { |
972 | if (strcmp (keymap_name, "1557/MS2141") == 0) | 986 | if (strcmp (keymap_name, "1557/MS2141") == 0) |
973 | keymap = keymap_wistron_ms2141; | 987 | keymap = keymap_wistron_ms2141; |
988 | else if (strcmp (keymap_name, "prestigio") == 0) | ||
989 | keymap = keymap_prestigio; | ||
974 | else if (strcmp (keymap_name, "generic") == 0) | 990 | else if (strcmp (keymap_name, "generic") == 0) |
975 | keymap = keymap_wistron_generic; | 991 | keymap = keymap_wistron_generic; |
976 | else { | 992 | else { |
diff --git a/drivers/input/serio/hp_sdc_mlc.c b/drivers/input/serio/hp_sdc_mlc.c index b587e2d576ac..820e51673b26 100644 --- a/drivers/input/serio/hp_sdc_mlc.c +++ b/drivers/input/serio/hp_sdc_mlc.c | |||
@@ -296,7 +296,7 @@ static void hp_sdc_mlc_out(hil_mlc *mlc) | |||
296 | priv->tseq[3] = 0; | 296 | priv->tseq[3] = 0; |
297 | if (mlc->opacket & HIL_CTRL_APE) { | 297 | if (mlc->opacket & HIL_CTRL_APE) { |
298 | priv->tseq[3] |= HP_SDC_LPC_APE_IPF; | 298 | priv->tseq[3] |= HP_SDC_LPC_APE_IPF; |
299 | down_trylock(&mlc->csem); | 299 | BUG_ON(down_trylock(&mlc->csem)); |
300 | } | 300 | } |
301 | enqueue: | 301 | enqueue: |
302 | hp_sdc_enqueue_transaction(&priv->trans); | 302 | hp_sdc_enqueue_transaction(&priv->trans); |
diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h index 924e8ed7f2cf..ae04d8a494e5 100644 --- a/drivers/input/serio/i8042-x86ia64io.h +++ b/drivers/input/serio/i8042-x86ia64io.h | |||
@@ -78,6 +78,14 @@ static struct dmi_system_id __initdata i8042_dmi_noloop_table[] = { | |||
78 | }, | 78 | }, |
79 | }, | 79 | }, |
80 | { | 80 | { |
81 | .ident = "ASUS G1S", | ||
82 | .matches = { | ||
83 | DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer Inc."), | ||
84 | DMI_MATCH(DMI_BOARD_NAME, "G1S"), | ||
85 | DMI_MATCH(DMI_BOARD_VERSION, "1.0"), | ||
86 | }, | ||
87 | }, | ||
88 | { | ||
81 | /* AUX LOOP command does not raise AUX IRQ */ | 89 | /* AUX LOOP command does not raise AUX IRQ */ |
82 | .ident = "ASUS P65UP5", | 90 | .ident = "ASUS P65UP5", |
83 | .matches = { | 91 | .matches = { |
diff --git a/drivers/isdn/mISDN/l1oip_core.c b/drivers/isdn/mISDN/l1oip_core.c index c3b661a666cb..7e5f30dbc0a0 100644 --- a/drivers/isdn/mISDN/l1oip_core.c +++ b/drivers/isdn/mISDN/l1oip_core.c | |||
@@ -1480,7 +1480,7 @@ l1oip_init(void) | |||
1480 | return -ENOMEM; | 1480 | return -ENOMEM; |
1481 | 1481 | ||
1482 | l1oip_cnt = 0; | 1482 | l1oip_cnt = 0; |
1483 | while (type[l1oip_cnt] && l1oip_cnt < MAX_CARDS) { | 1483 | while (l1oip_cnt < MAX_CARDS && type[l1oip_cnt]) { |
1484 | switch (type[l1oip_cnt] & 0xff) { | 1484 | switch (type[l1oip_cnt] & 0xff) { |
1485 | case 1: | 1485 | case 1: |
1486 | pri = 0; | 1486 | pri = 0; |
diff --git a/drivers/md/linear.c b/drivers/md/linear.c index 5810fa906af0..5fe39c2a3d2b 100644 --- a/drivers/md/linear.c +++ b/drivers/md/linear.c | |||
@@ -220,6 +220,7 @@ static int linear_run (mddev_t *mddev) | |||
220 | mddev->queue->unplug_fn = linear_unplug; | 220 | mddev->queue->unplug_fn = linear_unplug; |
221 | mddev->queue->backing_dev_info.congested_fn = linear_congested; | 221 | mddev->queue->backing_dev_info.congested_fn = linear_congested; |
222 | mddev->queue->backing_dev_info.congested_data = mddev; | 222 | mddev->queue->backing_dev_info.congested_data = mddev; |
223 | md_integrity_register(mddev); | ||
223 | return 0; | 224 | return 0; |
224 | } | 225 | } |
225 | 226 | ||
@@ -256,6 +257,7 @@ static int linear_add(mddev_t *mddev, mdk_rdev_t *rdev) | |||
256 | rcu_assign_pointer(mddev->private, newconf); | 257 | rcu_assign_pointer(mddev->private, newconf); |
257 | md_set_array_sectors(mddev, linear_size(mddev, 0, 0)); | 258 | md_set_array_sectors(mddev, linear_size(mddev, 0, 0)); |
258 | set_capacity(mddev->gendisk, mddev->array_sectors); | 259 | set_capacity(mddev->gendisk, mddev->array_sectors); |
260 | revalidate_disk(mddev->gendisk); | ||
259 | call_rcu(&oldconf->rcu, free_conf); | 261 | call_rcu(&oldconf->rcu, free_conf); |
260 | return 0; | 262 | return 0; |
261 | } | 263 | } |
diff --git a/drivers/md/md.c b/drivers/md/md.c index d4351ff0849f..103f2d33fa89 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c | |||
@@ -359,6 +359,7 @@ static mddev_t * mddev_find(dev_t unit) | |||
359 | else | 359 | else |
360 | new->md_minor = MINOR(unit) >> MdpMinorShift; | 360 | new->md_minor = MINOR(unit) >> MdpMinorShift; |
361 | 361 | ||
362 | mutex_init(&new->open_mutex); | ||
362 | mutex_init(&new->reconfig_mutex); | 363 | mutex_init(&new->reconfig_mutex); |
363 | INIT_LIST_HEAD(&new->disks); | 364 | INIT_LIST_HEAD(&new->disks); |
364 | INIT_LIST_HEAD(&new->all_mddevs); | 365 | INIT_LIST_HEAD(&new->all_mddevs); |
@@ -1308,7 +1309,12 @@ static int super_1_validate(mddev_t *mddev, mdk_rdev_t *rdev) | |||
1308 | } | 1309 | } |
1309 | if (mddev->level != LEVEL_MULTIPATH) { | 1310 | if (mddev->level != LEVEL_MULTIPATH) { |
1310 | int role; | 1311 | int role; |
1311 | role = le16_to_cpu(sb->dev_roles[rdev->desc_nr]); | 1312 | if (rdev->desc_nr < 0 || |
1313 | rdev->desc_nr >= le32_to_cpu(sb->max_dev)) { | ||
1314 | role = 0xffff; | ||
1315 | rdev->desc_nr = -1; | ||
1316 | } else | ||
1317 | role = le16_to_cpu(sb->dev_roles[rdev->desc_nr]); | ||
1312 | switch(role) { | 1318 | switch(role) { |
1313 | case 0xffff: /* spare */ | 1319 | case 0xffff: /* spare */ |
1314 | break; | 1320 | break; |
@@ -1394,8 +1400,14 @@ static void super_1_sync(mddev_t *mddev, mdk_rdev_t *rdev) | |||
1394 | if (rdev2->desc_nr+1 > max_dev) | 1400 | if (rdev2->desc_nr+1 > max_dev) |
1395 | max_dev = rdev2->desc_nr+1; | 1401 | max_dev = rdev2->desc_nr+1; |
1396 | 1402 | ||
1397 | if (max_dev > le32_to_cpu(sb->max_dev)) | 1403 | if (max_dev > le32_to_cpu(sb->max_dev)) { |
1404 | int bmask; | ||
1398 | sb->max_dev = cpu_to_le32(max_dev); | 1405 | sb->max_dev = cpu_to_le32(max_dev); |
1406 | rdev->sb_size = max_dev * 2 + 256; | ||
1407 | bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1; | ||
1408 | if (rdev->sb_size & bmask) | ||
1409 | rdev->sb_size = (rdev->sb_size | bmask) + 1; | ||
1410 | } | ||
1399 | for (i=0; i<max_dev;i++) | 1411 | for (i=0; i<max_dev;i++) |
1400 | sb->dev_roles[i] = cpu_to_le16(0xfffe); | 1412 | sb->dev_roles[i] = cpu_to_le16(0xfffe); |
1401 | 1413 | ||
@@ -1487,37 +1499,76 @@ static int match_mddev_units(mddev_t *mddev1, mddev_t *mddev2) | |||
1487 | 1499 | ||
1488 | static LIST_HEAD(pending_raid_disks); | 1500 | static LIST_HEAD(pending_raid_disks); |
1489 | 1501 | ||
1490 | static void md_integrity_check(mdk_rdev_t *rdev, mddev_t *mddev) | 1502 | /* |
1503 | * Try to register data integrity profile for an mddev | ||
1504 | * | ||
1505 | * This is called when an array is started and after a disk has been kicked | ||
1506 | * from the array. It only succeeds if all working and active component devices | ||
1507 | * are integrity capable with matching profiles. | ||
1508 | */ | ||
1509 | int md_integrity_register(mddev_t *mddev) | ||
1510 | { | ||
1511 | mdk_rdev_t *rdev, *reference = NULL; | ||
1512 | |||
1513 | if (list_empty(&mddev->disks)) | ||
1514 | return 0; /* nothing to do */ | ||
1515 | if (blk_get_integrity(mddev->gendisk)) | ||
1516 | return 0; /* already registered */ | ||
1517 | list_for_each_entry(rdev, &mddev->disks, same_set) { | ||
1518 | /* skip spares and non-functional disks */ | ||
1519 | if (test_bit(Faulty, &rdev->flags)) | ||
1520 | continue; | ||
1521 | if (rdev->raid_disk < 0) | ||
1522 | continue; | ||
1523 | /* | ||
1524 | * If at least one rdev is not integrity capable, we can not | ||
1525 | * enable data integrity for the md device. | ||
1526 | */ | ||
1527 | if (!bdev_get_integrity(rdev->bdev)) | ||
1528 | return -EINVAL; | ||
1529 | if (!reference) { | ||
1530 | /* Use the first rdev as the reference */ | ||
1531 | reference = rdev; | ||
1532 | continue; | ||
1533 | } | ||
1534 | /* does this rdev's profile match the reference profile? */ | ||
1535 | if (blk_integrity_compare(reference->bdev->bd_disk, | ||
1536 | rdev->bdev->bd_disk) < 0) | ||
1537 | return -EINVAL; | ||
1538 | } | ||
1539 | /* | ||
1540 | * All component devices are integrity capable and have matching | ||
1541 | * profiles, register the common profile for the md device. | ||
1542 | */ | ||
1543 | if (blk_integrity_register(mddev->gendisk, | ||
1544 | bdev_get_integrity(reference->bdev)) != 0) { | ||
1545 | printk(KERN_ERR "md: failed to register integrity for %s\n", | ||
1546 | mdname(mddev)); | ||
1547 | return -EINVAL; | ||
1548 | } | ||
1549 | printk(KERN_NOTICE "md: data integrity on %s enabled\n", | ||
1550 | mdname(mddev)); | ||
1551 | return 0; | ||
1552 | } | ||
1553 | EXPORT_SYMBOL(md_integrity_register); | ||
1554 | |||
1555 | /* Disable data integrity if non-capable/non-matching disk is being added */ | ||
1556 | void md_integrity_add_rdev(mdk_rdev_t *rdev, mddev_t *mddev) | ||
1491 | { | 1557 | { |
1492 | struct mdk_personality *pers = mddev->pers; | ||
1493 | struct gendisk *disk = mddev->gendisk; | ||
1494 | struct blk_integrity *bi_rdev = bdev_get_integrity(rdev->bdev); | 1558 | struct blk_integrity *bi_rdev = bdev_get_integrity(rdev->bdev); |
1495 | struct blk_integrity *bi_mddev = blk_get_integrity(disk); | 1559 | struct blk_integrity *bi_mddev = blk_get_integrity(mddev->gendisk); |
1496 | 1560 | ||
1497 | /* Data integrity passthrough not supported on RAID 4, 5 and 6 */ | 1561 | if (!bi_mddev) /* nothing to do */ |
1498 | if (pers && pers->level >= 4 && pers->level <= 6) | ||
1499 | return; | 1562 | return; |
1500 | 1563 | if (rdev->raid_disk < 0) /* skip spares */ | |
1501 | /* If rdev is integrity capable, register profile for mddev */ | ||
1502 | if (!bi_mddev && bi_rdev) { | ||
1503 | if (blk_integrity_register(disk, bi_rdev)) | ||
1504 | printk(KERN_ERR "%s: %s Could not register integrity!\n", | ||
1505 | __func__, disk->disk_name); | ||
1506 | else | ||
1507 | printk(KERN_NOTICE "Enabling data integrity on %s\n", | ||
1508 | disk->disk_name); | ||
1509 | return; | 1564 | return; |
1510 | } | 1565 | if (bi_rdev && blk_integrity_compare(mddev->gendisk, |
1511 | 1566 | rdev->bdev->bd_disk) >= 0) | |
1512 | /* Check that mddev and rdev have matching profiles */ | 1567 | return; |
1513 | if (blk_integrity_compare(disk, rdev->bdev->bd_disk) < 0) { | 1568 | printk(KERN_NOTICE "disabling data integrity on %s\n", mdname(mddev)); |
1514 | printk(KERN_ERR "%s: %s/%s integrity mismatch!\n", __func__, | 1569 | blk_integrity_unregister(mddev->gendisk); |
1515 | disk->disk_name, rdev->bdev->bd_disk->disk_name); | ||
1516 | printk(KERN_NOTICE "Disabling data integrity on %s\n", | ||
1517 | disk->disk_name); | ||
1518 | blk_integrity_unregister(disk); | ||
1519 | } | ||
1520 | } | 1570 | } |
1571 | EXPORT_SYMBOL(md_integrity_add_rdev); | ||
1521 | 1572 | ||
1522 | static int bind_rdev_to_array(mdk_rdev_t * rdev, mddev_t * mddev) | 1573 | static int bind_rdev_to_array(mdk_rdev_t * rdev, mddev_t * mddev) |
1523 | { | 1574 | { |
@@ -1591,7 +1642,6 @@ static int bind_rdev_to_array(mdk_rdev_t * rdev, mddev_t * mddev) | |||
1591 | /* May as well allow recovery to be retried once */ | 1642 | /* May as well allow recovery to be retried once */ |
1592 | mddev->recovery_disabled = 0; | 1643 | mddev->recovery_disabled = 0; |
1593 | 1644 | ||
1594 | md_integrity_check(rdev, mddev); | ||
1595 | return 0; | 1645 | return 0; |
1596 | 1646 | ||
1597 | fail: | 1647 | fail: |
@@ -1925,17 +1975,14 @@ repeat: | |||
1925 | /* otherwise we have to go forward and ... */ | 1975 | /* otherwise we have to go forward and ... */ |
1926 | mddev->events ++; | 1976 | mddev->events ++; |
1927 | if (!mddev->in_sync || mddev->recovery_cp != MaxSector) { /* not clean */ | 1977 | if (!mddev->in_sync || mddev->recovery_cp != MaxSector) { /* not clean */ |
1928 | /* .. if the array isn't clean, insist on an odd 'events' */ | 1978 | /* .. if the array isn't clean, an 'even' event must also go |
1929 | if ((mddev->events&1)==0) { | 1979 | * to spares. */ |
1930 | mddev->events++; | 1980 | if ((mddev->events&1)==0) |
1931 | nospares = 0; | 1981 | nospares = 0; |
1932 | } | ||
1933 | } else { | 1982 | } else { |
1934 | /* otherwise insist on an even 'events' (for clean states) */ | 1983 | /* otherwise an 'odd' event must go to spares */ |
1935 | if ((mddev->events&1)) { | 1984 | if ((mddev->events&1)) |
1936 | mddev->events++; | ||
1937 | nospares = 0; | 1985 | nospares = 0; |
1938 | } | ||
1939 | } | 1986 | } |
1940 | } | 1987 | } |
1941 | 1988 | ||
@@ -2657,6 +2704,7 @@ level_store(mddev_t *mddev, const char *buf, size_t len) | |||
2657 | ssize_t rv = len; | 2704 | ssize_t rv = len; |
2658 | struct mdk_personality *pers; | 2705 | struct mdk_personality *pers; |
2659 | void *priv; | 2706 | void *priv; |
2707 | mdk_rdev_t *rdev; | ||
2660 | 2708 | ||
2661 | if (mddev->pers == NULL) { | 2709 | if (mddev->pers == NULL) { |
2662 | if (len == 0) | 2710 | if (len == 0) |
@@ -2736,6 +2784,12 @@ level_store(mddev_t *mddev, const char *buf, size_t len) | |||
2736 | mddev_suspend(mddev); | 2784 | mddev_suspend(mddev); |
2737 | mddev->pers->stop(mddev); | 2785 | mddev->pers->stop(mddev); |
2738 | module_put(mddev->pers->owner); | 2786 | module_put(mddev->pers->owner); |
2787 | /* Invalidate devices that are now superfluous */ | ||
2788 | list_for_each_entry(rdev, &mddev->disks, same_set) | ||
2789 | if (rdev->raid_disk >= mddev->raid_disks) { | ||
2790 | rdev->raid_disk = -1; | ||
2791 | clear_bit(In_sync, &rdev->flags); | ||
2792 | } | ||
2739 | mddev->pers = pers; | 2793 | mddev->pers = pers; |
2740 | mddev->private = priv; | 2794 | mddev->private = priv; |
2741 | strlcpy(mddev->clevel, pers->name, sizeof(mddev->clevel)); | 2795 | strlcpy(mddev->clevel, pers->name, sizeof(mddev->clevel)); |
@@ -3545,6 +3599,7 @@ max_sync_store(mddev_t *mddev, const char *buf, size_t len) | |||
3545 | if (max < mddev->resync_min) | 3599 | if (max < mddev->resync_min) |
3546 | return -EINVAL; | 3600 | return -EINVAL; |
3547 | if (max < mddev->resync_max && | 3601 | if (max < mddev->resync_max && |
3602 | mddev->ro == 0 && | ||
3548 | test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) | 3603 | test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) |
3549 | return -EBUSY; | 3604 | return -EBUSY; |
3550 | 3605 | ||
@@ -3685,17 +3740,8 @@ array_size_store(mddev_t *mddev, const char *buf, size_t len) | |||
3685 | 3740 | ||
3686 | mddev->array_sectors = sectors; | 3741 | mddev->array_sectors = sectors; |
3687 | set_capacity(mddev->gendisk, mddev->array_sectors); | 3742 | set_capacity(mddev->gendisk, mddev->array_sectors); |
3688 | if (mddev->pers) { | 3743 | if (mddev->pers) |
3689 | struct block_device *bdev = bdget_disk(mddev->gendisk, 0); | 3744 | revalidate_disk(mddev->gendisk); |
3690 | |||
3691 | if (bdev) { | ||
3692 | mutex_lock(&bdev->bd_inode->i_mutex); | ||
3693 | i_size_write(bdev->bd_inode, | ||
3694 | (loff_t)mddev->array_sectors << 9); | ||
3695 | mutex_unlock(&bdev->bd_inode->i_mutex); | ||
3696 | bdput(bdev); | ||
3697 | } | ||
3698 | } | ||
3699 | 3745 | ||
3700 | return len; | 3746 | return len; |
3701 | } | 3747 | } |
@@ -4048,10 +4094,6 @@ static int do_md_run(mddev_t * mddev) | |||
4048 | } | 4094 | } |
4049 | strlcpy(mddev->clevel, pers->name, sizeof(mddev->clevel)); | 4095 | strlcpy(mddev->clevel, pers->name, sizeof(mddev->clevel)); |
4050 | 4096 | ||
4051 | if (pers->level >= 4 && pers->level <= 6) | ||
4052 | /* Cannot support integrity (yet) */ | ||
4053 | blk_integrity_unregister(mddev->gendisk); | ||
4054 | |||
4055 | if (mddev->reshape_position != MaxSector && | 4097 | if (mddev->reshape_position != MaxSector && |
4056 | pers->start_reshape == NULL) { | 4098 | pers->start_reshape == NULL) { |
4057 | /* This personality cannot handle reshaping... */ | 4099 | /* This personality cannot handle reshaping... */ |
@@ -4189,6 +4231,7 @@ static int do_md_run(mddev_t * mddev) | |||
4189 | md_wakeup_thread(mddev->thread); | 4231 | md_wakeup_thread(mddev->thread); |
4190 | md_wakeup_thread(mddev->sync_thread); /* possibly kick off a reshape */ | 4232 | md_wakeup_thread(mddev->sync_thread); /* possibly kick off a reshape */ |
4191 | 4233 | ||
4234 | revalidate_disk(mddev->gendisk); | ||
4192 | mddev->changed = 1; | 4235 | mddev->changed = 1; |
4193 | md_new_event(mddev); | 4236 | md_new_event(mddev); |
4194 | sysfs_notify_dirent(mddev->sysfs_state); | 4237 | sysfs_notify_dirent(mddev->sysfs_state); |
@@ -4260,12 +4303,11 @@ static int do_md_stop(mddev_t * mddev, int mode, int is_open) | |||
4260 | struct gendisk *disk = mddev->gendisk; | 4303 | struct gendisk *disk = mddev->gendisk; |
4261 | mdk_rdev_t *rdev; | 4304 | mdk_rdev_t *rdev; |
4262 | 4305 | ||
4306 | mutex_lock(&mddev->open_mutex); | ||
4263 | if (atomic_read(&mddev->openers) > is_open) { | 4307 | if (atomic_read(&mddev->openers) > is_open) { |
4264 | printk("md: %s still in use.\n",mdname(mddev)); | 4308 | printk("md: %s still in use.\n",mdname(mddev)); |
4265 | return -EBUSY; | 4309 | err = -EBUSY; |
4266 | } | 4310 | } else if (mddev->pers) { |
4267 | |||
4268 | if (mddev->pers) { | ||
4269 | 4311 | ||
4270 | if (mddev->sync_thread) { | 4312 | if (mddev->sync_thread) { |
4271 | set_bit(MD_RECOVERY_FROZEN, &mddev->recovery); | 4313 | set_bit(MD_RECOVERY_FROZEN, &mddev->recovery); |
@@ -4323,7 +4365,10 @@ static int do_md_stop(mddev_t * mddev, int mode, int is_open) | |||
4323 | set_disk_ro(disk, 1); | 4365 | set_disk_ro(disk, 1); |
4324 | clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); | 4366 | clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); |
4325 | } | 4367 | } |
4326 | 4368 | out: | |
4369 | mutex_unlock(&mddev->open_mutex); | ||
4370 | if (err) | ||
4371 | return err; | ||
4327 | /* | 4372 | /* |
4328 | * Free resources if final stop | 4373 | * Free resources if final stop |
4329 | */ | 4374 | */ |
@@ -4389,7 +4434,6 @@ static int do_md_stop(mddev_t * mddev, int mode, int is_open) | |||
4389 | blk_integrity_unregister(disk); | 4434 | blk_integrity_unregister(disk); |
4390 | md_new_event(mddev); | 4435 | md_new_event(mddev); |
4391 | sysfs_notify_dirent(mddev->sysfs_state); | 4436 | sysfs_notify_dirent(mddev->sysfs_state); |
4392 | out: | ||
4393 | return err; | 4437 | return err; |
4394 | } | 4438 | } |
4395 | 4439 | ||
@@ -5087,18 +5131,8 @@ static int update_size(mddev_t *mddev, sector_t num_sectors) | |||
5087 | return -ENOSPC; | 5131 | return -ENOSPC; |
5088 | } | 5132 | } |
5089 | rv = mddev->pers->resize(mddev, num_sectors); | 5133 | rv = mddev->pers->resize(mddev, num_sectors); |
5090 | if (!rv) { | 5134 | if (!rv) |
5091 | struct block_device *bdev; | 5135 | revalidate_disk(mddev->gendisk); |
5092 | |||
5093 | bdev = bdget_disk(mddev->gendisk, 0); | ||
5094 | if (bdev) { | ||
5095 | mutex_lock(&bdev->bd_inode->i_mutex); | ||
5096 | i_size_write(bdev->bd_inode, | ||
5097 | (loff_t)mddev->array_sectors << 9); | ||
5098 | mutex_unlock(&bdev->bd_inode->i_mutex); | ||
5099 | bdput(bdev); | ||
5100 | } | ||
5101 | } | ||
5102 | return rv; | 5136 | return rv; |
5103 | } | 5137 | } |
5104 | 5138 | ||
@@ -5484,12 +5518,12 @@ static int md_open(struct block_device *bdev, fmode_t mode) | |||
5484 | } | 5518 | } |
5485 | BUG_ON(mddev != bdev->bd_disk->private_data); | 5519 | BUG_ON(mddev != bdev->bd_disk->private_data); |
5486 | 5520 | ||
5487 | if ((err = mutex_lock_interruptible_nested(&mddev->reconfig_mutex, 1))) | 5521 | if ((err = mutex_lock_interruptible(&mddev->open_mutex))) |
5488 | goto out; | 5522 | goto out; |
5489 | 5523 | ||
5490 | err = 0; | 5524 | err = 0; |
5491 | atomic_inc(&mddev->openers); | 5525 | atomic_inc(&mddev->openers); |
5492 | mddev_unlock(mddev); | 5526 | mutex_unlock(&mddev->open_mutex); |
5493 | 5527 | ||
5494 | check_disk_change(bdev); | 5528 | check_disk_change(bdev); |
5495 | out: | 5529 | out: |
diff --git a/drivers/md/md.h b/drivers/md/md.h index 9430a110db93..f8fc188bc762 100644 --- a/drivers/md/md.h +++ b/drivers/md/md.h | |||
@@ -223,6 +223,16 @@ struct mddev_s | |||
223 | * so we don't loop trying */ | 223 | * so we don't loop trying */ |
224 | 224 | ||
225 | int in_sync; /* know to not need resync */ | 225 | int in_sync; /* know to not need resync */ |
226 | /* 'open_mutex' avoids races between 'md_open' and 'do_md_stop', so | ||
227 | * that we are never stopping an array while it is open. | ||
228 | * 'reconfig_mutex' protects all other reconfiguration. | ||
229 | * These locks are separate due to conflicting interactions | ||
230 | * with bdev->bd_mutex. | ||
231 | * Lock ordering is: | ||
232 | * reconfig_mutex -> bd_mutex : e.g. do_md_run -> revalidate_disk | ||
233 | * bd_mutex -> open_mutex: e.g. __blkdev_get -> md_open | ||
234 | */ | ||
235 | struct mutex open_mutex; | ||
226 | struct mutex reconfig_mutex; | 236 | struct mutex reconfig_mutex; |
227 | atomic_t active; /* general refcount */ | 237 | atomic_t active; /* general refcount */ |
228 | atomic_t openers; /* number of active opens */ | 238 | atomic_t openers; /* number of active opens */ |
@@ -431,5 +441,7 @@ extern int md_allow_write(mddev_t *mddev); | |||
431 | extern void md_wait_for_blocked_rdev(mdk_rdev_t *rdev, mddev_t *mddev); | 441 | extern void md_wait_for_blocked_rdev(mdk_rdev_t *rdev, mddev_t *mddev); |
432 | extern void md_set_array_sectors(mddev_t *mddev, sector_t array_sectors); | 442 | extern void md_set_array_sectors(mddev_t *mddev, sector_t array_sectors); |
433 | extern int md_check_no_bitmap(mddev_t *mddev); | 443 | extern int md_check_no_bitmap(mddev_t *mddev); |
444 | extern int md_integrity_register(mddev_t *mddev); | ||
445 | void md_integrity_add_rdev(mdk_rdev_t *rdev, mddev_t *mddev); | ||
434 | 446 | ||
435 | #endif /* _MD_MD_H */ | 447 | #endif /* _MD_MD_H */ |
diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c index 237fe3fd235c..7140909f6662 100644 --- a/drivers/md/multipath.c +++ b/drivers/md/multipath.c | |||
@@ -313,6 +313,7 @@ static int multipath_add_disk(mddev_t *mddev, mdk_rdev_t *rdev) | |||
313 | set_bit(In_sync, &rdev->flags); | 313 | set_bit(In_sync, &rdev->flags); |
314 | rcu_assign_pointer(p->rdev, rdev); | 314 | rcu_assign_pointer(p->rdev, rdev); |
315 | err = 0; | 315 | err = 0; |
316 | md_integrity_add_rdev(rdev, mddev); | ||
316 | break; | 317 | break; |
317 | } | 318 | } |
318 | 319 | ||
@@ -345,7 +346,9 @@ static int multipath_remove_disk(mddev_t *mddev, int number) | |||
345 | /* lost the race, try later */ | 346 | /* lost the race, try later */ |
346 | err = -EBUSY; | 347 | err = -EBUSY; |
347 | p->rdev = rdev; | 348 | p->rdev = rdev; |
349 | goto abort; | ||
348 | } | 350 | } |
351 | md_integrity_register(mddev); | ||
349 | } | 352 | } |
350 | abort: | 353 | abort: |
351 | 354 | ||
@@ -519,7 +522,7 @@ static int multipath_run (mddev_t *mddev) | |||
519 | mddev->queue->unplug_fn = multipath_unplug; | 522 | mddev->queue->unplug_fn = multipath_unplug; |
520 | mddev->queue->backing_dev_info.congested_fn = multipath_congested; | 523 | mddev->queue->backing_dev_info.congested_fn = multipath_congested; |
521 | mddev->queue->backing_dev_info.congested_data = mddev; | 524 | mddev->queue->backing_dev_info.congested_data = mddev; |
522 | 525 | md_integrity_register(mddev); | |
523 | return 0; | 526 | return 0; |
524 | 527 | ||
525 | out_free_conf: | 528 | out_free_conf: |
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c index 335f490dcad6..898e2bdfee47 100644 --- a/drivers/md/raid0.c +++ b/drivers/md/raid0.c | |||
@@ -351,6 +351,7 @@ static int raid0_run(mddev_t *mddev) | |||
351 | 351 | ||
352 | blk_queue_merge_bvec(mddev->queue, raid0_mergeable_bvec); | 352 | blk_queue_merge_bvec(mddev->queue, raid0_mergeable_bvec); |
353 | dump_zones(mddev); | 353 | dump_zones(mddev); |
354 | md_integrity_register(mddev); | ||
354 | return 0; | 355 | return 0; |
355 | } | 356 | } |
356 | 357 | ||
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index 0569efba0c02..8726fd7ebce5 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c | |||
@@ -1144,7 +1144,7 @@ static int raid1_add_disk(mddev_t *mddev, mdk_rdev_t *rdev) | |||
1144 | rcu_assign_pointer(p->rdev, rdev); | 1144 | rcu_assign_pointer(p->rdev, rdev); |
1145 | break; | 1145 | break; |
1146 | } | 1146 | } |
1147 | 1147 | md_integrity_add_rdev(rdev, mddev); | |
1148 | print_conf(conf); | 1148 | print_conf(conf); |
1149 | return err; | 1149 | return err; |
1150 | } | 1150 | } |
@@ -1178,7 +1178,9 @@ static int raid1_remove_disk(mddev_t *mddev, int number) | |||
1178 | /* lost the race, try later */ | 1178 | /* lost the race, try later */ |
1179 | err = -EBUSY; | 1179 | err = -EBUSY; |
1180 | p->rdev = rdev; | 1180 | p->rdev = rdev; |
1181 | goto abort; | ||
1181 | } | 1182 | } |
1183 | md_integrity_register(mddev); | ||
1182 | } | 1184 | } |
1183 | abort: | 1185 | abort: |
1184 | 1186 | ||
@@ -2067,7 +2069,7 @@ static int run(mddev_t *mddev) | |||
2067 | mddev->queue->unplug_fn = raid1_unplug; | 2069 | mddev->queue->unplug_fn = raid1_unplug; |
2068 | mddev->queue->backing_dev_info.congested_fn = raid1_congested; | 2070 | mddev->queue->backing_dev_info.congested_fn = raid1_congested; |
2069 | mddev->queue->backing_dev_info.congested_data = mddev; | 2071 | mddev->queue->backing_dev_info.congested_data = mddev; |
2070 | 2072 | md_integrity_register(mddev); | |
2071 | return 0; | 2073 | return 0; |
2072 | 2074 | ||
2073 | out_no_mem: | 2075 | out_no_mem: |
@@ -2132,6 +2134,7 @@ static int raid1_resize(mddev_t *mddev, sector_t sectors) | |||
2132 | return -EINVAL; | 2134 | return -EINVAL; |
2133 | set_capacity(mddev->gendisk, mddev->array_sectors); | 2135 | set_capacity(mddev->gendisk, mddev->array_sectors); |
2134 | mddev->changed = 1; | 2136 | mddev->changed = 1; |
2137 | revalidate_disk(mddev->gendisk); | ||
2135 | if (sectors > mddev->dev_sectors && | 2138 | if (sectors > mddev->dev_sectors && |
2136 | mddev->recovery_cp == MaxSector) { | 2139 | mddev->recovery_cp == MaxSector) { |
2137 | mddev->recovery_cp = mddev->dev_sectors; | 2140 | mddev->recovery_cp = mddev->dev_sectors; |
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index 7298a5e5a183..3d9020cf6f6e 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c | |||
@@ -1170,6 +1170,7 @@ static int raid10_add_disk(mddev_t *mddev, mdk_rdev_t *rdev) | |||
1170 | break; | 1170 | break; |
1171 | } | 1171 | } |
1172 | 1172 | ||
1173 | md_integrity_add_rdev(rdev, mddev); | ||
1173 | print_conf(conf); | 1174 | print_conf(conf); |
1174 | return err; | 1175 | return err; |
1175 | } | 1176 | } |
@@ -1203,7 +1204,9 @@ static int raid10_remove_disk(mddev_t *mddev, int number) | |||
1203 | /* lost the race, try later */ | 1204 | /* lost the race, try later */ |
1204 | err = -EBUSY; | 1205 | err = -EBUSY; |
1205 | p->rdev = rdev; | 1206 | p->rdev = rdev; |
1207 | goto abort; | ||
1206 | } | 1208 | } |
1209 | md_integrity_register(mddev); | ||
1207 | } | 1210 | } |
1208 | abort: | 1211 | abort: |
1209 | 1212 | ||
@@ -2225,6 +2228,7 @@ static int run(mddev_t *mddev) | |||
2225 | 2228 | ||
2226 | if (conf->near_copies < mddev->raid_disks) | 2229 | if (conf->near_copies < mddev->raid_disks) |
2227 | blk_queue_merge_bvec(mddev->queue, raid10_mergeable_bvec); | 2230 | blk_queue_merge_bvec(mddev->queue, raid10_mergeable_bvec); |
2231 | md_integrity_register(mddev); | ||
2228 | return 0; | 2232 | return 0; |
2229 | 2233 | ||
2230 | out_free_conf: | 2234 | out_free_conf: |
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 37835538b58e..b8a2c5dc67ba 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c | |||
@@ -3785,7 +3785,7 @@ static sector_t reshape_request(mddev_t *mddev, sector_t sector_nr, int *skipped | |||
3785 | conf->reshape_progress < raid5_size(mddev, 0, 0)) { | 3785 | conf->reshape_progress < raid5_size(mddev, 0, 0)) { |
3786 | sector_nr = raid5_size(mddev, 0, 0) | 3786 | sector_nr = raid5_size(mddev, 0, 0) |
3787 | - conf->reshape_progress; | 3787 | - conf->reshape_progress; |
3788 | } else if (mddev->delta_disks > 0 && | 3788 | } else if (mddev->delta_disks >= 0 && |
3789 | conf->reshape_progress > 0) | 3789 | conf->reshape_progress > 0) |
3790 | sector_nr = conf->reshape_progress; | 3790 | sector_nr = conf->reshape_progress; |
3791 | sector_div(sector_nr, new_data_disks); | 3791 | sector_div(sector_nr, new_data_disks); |
@@ -3999,6 +3999,9 @@ static inline sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *ski | |||
3999 | return 0; | 3999 | return 0; |
4000 | } | 4000 | } |
4001 | 4001 | ||
4002 | /* Allow raid5_quiesce to complete */ | ||
4003 | wait_event(conf->wait_for_overlap, conf->quiesce != 2); | ||
4004 | |||
4002 | if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) | 4005 | if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) |
4003 | return reshape_request(mddev, sector_nr, skipped); | 4006 | return reshape_request(mddev, sector_nr, skipped); |
4004 | 4007 | ||
@@ -4316,6 +4319,15 @@ raid5_size(mddev_t *mddev, sector_t sectors, int raid_disks) | |||
4316 | return sectors * (raid_disks - conf->max_degraded); | 4319 | return sectors * (raid_disks - conf->max_degraded); |
4317 | } | 4320 | } |
4318 | 4321 | ||
4322 | static void free_conf(raid5_conf_t *conf) | ||
4323 | { | ||
4324 | shrink_stripes(conf); | ||
4325 | safe_put_page(conf->spare_page); | ||
4326 | kfree(conf->disks); | ||
4327 | kfree(conf->stripe_hashtbl); | ||
4328 | kfree(conf); | ||
4329 | } | ||
4330 | |||
4319 | static raid5_conf_t *setup_conf(mddev_t *mddev) | 4331 | static raid5_conf_t *setup_conf(mddev_t *mddev) |
4320 | { | 4332 | { |
4321 | raid5_conf_t *conf; | 4333 | raid5_conf_t *conf; |
@@ -4447,11 +4459,7 @@ static raid5_conf_t *setup_conf(mddev_t *mddev) | |||
4447 | 4459 | ||
4448 | abort: | 4460 | abort: |
4449 | if (conf) { | 4461 | if (conf) { |
4450 | shrink_stripes(conf); | 4462 | free_conf(conf); |
4451 | safe_put_page(conf->spare_page); | ||
4452 | kfree(conf->disks); | ||
4453 | kfree(conf->stripe_hashtbl); | ||
4454 | kfree(conf); | ||
4455 | return ERR_PTR(-EIO); | 4463 | return ERR_PTR(-EIO); |
4456 | } else | 4464 | } else |
4457 | return ERR_PTR(-ENOMEM); | 4465 | return ERR_PTR(-ENOMEM); |
@@ -4501,7 +4509,26 @@ static int run(mddev_t *mddev) | |||
4501 | (old_disks-max_degraded)); | 4509 | (old_disks-max_degraded)); |
4502 | /* here_old is the first stripe that we might need to read | 4510 | /* here_old is the first stripe that we might need to read |
4503 | * from */ | 4511 | * from */ |
4504 | if (here_new >= here_old) { | 4512 | if (mddev->delta_disks == 0) { |
4513 | /* We cannot be sure it is safe to start an in-place | ||
4514 | * reshape. It is only safe if user-space if monitoring | ||
4515 | * and taking constant backups. | ||
4516 | * mdadm always starts a situation like this in | ||
4517 | * readonly mode so it can take control before | ||
4518 | * allowing any writes. So just check for that. | ||
4519 | */ | ||
4520 | if ((here_new * mddev->new_chunk_sectors != | ||
4521 | here_old * mddev->chunk_sectors) || | ||
4522 | mddev->ro == 0) { | ||
4523 | printk(KERN_ERR "raid5: in-place reshape must be started" | ||
4524 | " in read-only mode - aborting\n"); | ||
4525 | return -EINVAL; | ||
4526 | } | ||
4527 | } else if (mddev->delta_disks < 0 | ||
4528 | ? (here_new * mddev->new_chunk_sectors <= | ||
4529 | here_old * mddev->chunk_sectors) | ||
4530 | : (here_new * mddev->new_chunk_sectors >= | ||
4531 | here_old * mddev->chunk_sectors)) { | ||
4505 | /* Reading from the same stripe as writing to - bad */ | 4532 | /* Reading from the same stripe as writing to - bad */ |
4506 | printk(KERN_ERR "raid5: reshape_position too early for " | 4533 | printk(KERN_ERR "raid5: reshape_position too early for " |
4507 | "auto-recovery - aborting.\n"); | 4534 | "auto-recovery - aborting.\n"); |
@@ -4629,12 +4656,8 @@ abort: | |||
4629 | md_unregister_thread(mddev->thread); | 4656 | md_unregister_thread(mddev->thread); |
4630 | mddev->thread = NULL; | 4657 | mddev->thread = NULL; |
4631 | if (conf) { | 4658 | if (conf) { |
4632 | shrink_stripes(conf); | ||
4633 | print_raid5_conf(conf); | 4659 | print_raid5_conf(conf); |
4634 | safe_put_page(conf->spare_page); | 4660 | free_conf(conf); |
4635 | kfree(conf->disks); | ||
4636 | kfree(conf->stripe_hashtbl); | ||
4637 | kfree(conf); | ||
4638 | } | 4661 | } |
4639 | mddev->private = NULL; | 4662 | mddev->private = NULL; |
4640 | printk(KERN_ALERT "raid5: failed to run raid set %s\n", mdname(mddev)); | 4663 | printk(KERN_ALERT "raid5: failed to run raid set %s\n", mdname(mddev)); |
@@ -4649,13 +4672,10 @@ static int stop(mddev_t *mddev) | |||
4649 | 4672 | ||
4650 | md_unregister_thread(mddev->thread); | 4673 | md_unregister_thread(mddev->thread); |
4651 | mddev->thread = NULL; | 4674 | mddev->thread = NULL; |
4652 | shrink_stripes(conf); | ||
4653 | kfree(conf->stripe_hashtbl); | ||
4654 | mddev->queue->backing_dev_info.congested_fn = NULL; | 4675 | mddev->queue->backing_dev_info.congested_fn = NULL; |
4655 | blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/ | 4676 | blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/ |
4656 | sysfs_remove_group(&mddev->kobj, &raid5_attrs_group); | 4677 | sysfs_remove_group(&mddev->kobj, &raid5_attrs_group); |
4657 | kfree(conf->disks); | 4678 | free_conf(conf); |
4658 | kfree(conf); | ||
4659 | mddev->private = NULL; | 4679 | mddev->private = NULL; |
4660 | return 0; | 4680 | return 0; |
4661 | } | 4681 | } |
@@ -4857,6 +4877,7 @@ static int raid5_resize(mddev_t *mddev, sector_t sectors) | |||
4857 | return -EINVAL; | 4877 | return -EINVAL; |
4858 | set_capacity(mddev->gendisk, mddev->array_sectors); | 4878 | set_capacity(mddev->gendisk, mddev->array_sectors); |
4859 | mddev->changed = 1; | 4879 | mddev->changed = 1; |
4880 | revalidate_disk(mddev->gendisk); | ||
4860 | if (sectors > mddev->dev_sectors && mddev->recovery_cp == MaxSector) { | 4881 | if (sectors > mddev->dev_sectors && mddev->recovery_cp == MaxSector) { |
4861 | mddev->recovery_cp = mddev->dev_sectors; | 4882 | mddev->recovery_cp = mddev->dev_sectors; |
4862 | set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); | 4883 | set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); |
@@ -5002,7 +5023,7 @@ static int raid5_start_reshape(mddev_t *mddev) | |||
5002 | spin_unlock_irqrestore(&conf->device_lock, flags); | 5023 | spin_unlock_irqrestore(&conf->device_lock, flags); |
5003 | } | 5024 | } |
5004 | mddev->raid_disks = conf->raid_disks; | 5025 | mddev->raid_disks = conf->raid_disks; |
5005 | mddev->reshape_position = 0; | 5026 | mddev->reshape_position = conf->reshape_progress; |
5006 | set_bit(MD_CHANGE_DEVS, &mddev->flags); | 5027 | set_bit(MD_CHANGE_DEVS, &mddev->flags); |
5007 | 5028 | ||
5008 | clear_bit(MD_RECOVERY_SYNC, &mddev->recovery); | 5029 | clear_bit(MD_RECOVERY_SYNC, &mddev->recovery); |
@@ -5057,7 +5078,6 @@ static void end_reshape(raid5_conf_t *conf) | |||
5057 | */ | 5078 | */ |
5058 | static void raid5_finish_reshape(mddev_t *mddev) | 5079 | static void raid5_finish_reshape(mddev_t *mddev) |
5059 | { | 5080 | { |
5060 | struct block_device *bdev; | ||
5061 | raid5_conf_t *conf = mddev->private; | 5081 | raid5_conf_t *conf = mddev->private; |
5062 | 5082 | ||
5063 | if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) { | 5083 | if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) { |
@@ -5066,15 +5086,7 @@ static void raid5_finish_reshape(mddev_t *mddev) | |||
5066 | md_set_array_sectors(mddev, raid5_size(mddev, 0, 0)); | 5086 | md_set_array_sectors(mddev, raid5_size(mddev, 0, 0)); |
5067 | set_capacity(mddev->gendisk, mddev->array_sectors); | 5087 | set_capacity(mddev->gendisk, mddev->array_sectors); |
5068 | mddev->changed = 1; | 5088 | mddev->changed = 1; |
5069 | 5089 | revalidate_disk(mddev->gendisk); | |
5070 | bdev = bdget_disk(mddev->gendisk, 0); | ||
5071 | if (bdev) { | ||
5072 | mutex_lock(&bdev->bd_inode->i_mutex); | ||
5073 | i_size_write(bdev->bd_inode, | ||
5074 | (loff_t)mddev->array_sectors << 9); | ||
5075 | mutex_unlock(&bdev->bd_inode->i_mutex); | ||
5076 | bdput(bdev); | ||
5077 | } | ||
5078 | } else { | 5090 | } else { |
5079 | int d; | 5091 | int d; |
5080 | mddev->degraded = conf->raid_disks; | 5092 | mddev->degraded = conf->raid_disks; |
@@ -5085,8 +5097,15 @@ static void raid5_finish_reshape(mddev_t *mddev) | |||
5085 | mddev->degraded--; | 5097 | mddev->degraded--; |
5086 | for (d = conf->raid_disks ; | 5098 | for (d = conf->raid_disks ; |
5087 | d < conf->raid_disks - mddev->delta_disks; | 5099 | d < conf->raid_disks - mddev->delta_disks; |
5088 | d++) | 5100 | d++) { |
5089 | raid5_remove_disk(mddev, d); | 5101 | mdk_rdev_t *rdev = conf->disks[d].rdev; |
5102 | if (rdev && raid5_remove_disk(mddev, d) == 0) { | ||
5103 | char nm[20]; | ||
5104 | sprintf(nm, "rd%d", rdev->raid_disk); | ||
5105 | sysfs_remove_link(&mddev->kobj, nm); | ||
5106 | rdev->raid_disk = -1; | ||
5107 | } | ||
5108 | } | ||
5090 | } | 5109 | } |
5091 | mddev->layout = conf->algorithm; | 5110 | mddev->layout = conf->algorithm; |
5092 | mddev->chunk_sectors = conf->chunk_sectors; | 5111 | mddev->chunk_sectors = conf->chunk_sectors; |
@@ -5106,12 +5125,18 @@ static void raid5_quiesce(mddev_t *mddev, int state) | |||
5106 | 5125 | ||
5107 | case 1: /* stop all writes */ | 5126 | case 1: /* stop all writes */ |
5108 | spin_lock_irq(&conf->device_lock); | 5127 | spin_lock_irq(&conf->device_lock); |
5109 | conf->quiesce = 1; | 5128 | /* '2' tells resync/reshape to pause so that all |
5129 | * active stripes can drain | ||
5130 | */ | ||
5131 | conf->quiesce = 2; | ||
5110 | wait_event_lock_irq(conf->wait_for_stripe, | 5132 | wait_event_lock_irq(conf->wait_for_stripe, |
5111 | atomic_read(&conf->active_stripes) == 0 && | 5133 | atomic_read(&conf->active_stripes) == 0 && |
5112 | atomic_read(&conf->active_aligned_reads) == 0, | 5134 | atomic_read(&conf->active_aligned_reads) == 0, |
5113 | conf->device_lock, /* nothing */); | 5135 | conf->device_lock, /* nothing */); |
5136 | conf->quiesce = 1; | ||
5114 | spin_unlock_irq(&conf->device_lock); | 5137 | spin_unlock_irq(&conf->device_lock); |
5138 | /* allow reshape to continue */ | ||
5139 | wake_up(&conf->wait_for_overlap); | ||
5115 | break; | 5140 | break; |
5116 | 5141 | ||
5117 | case 0: /* re-enable writes */ | 5142 | case 0: /* re-enable writes */ |
diff --git a/drivers/mfd/twl4030-irq.c b/drivers/mfd/twl4030-irq.c index bae61b22501c..7d430835655f 100644 --- a/drivers/mfd/twl4030-irq.c +++ b/drivers/mfd/twl4030-irq.c | |||
@@ -180,14 +180,9 @@ static struct completion irq_event; | |||
180 | static int twl4030_irq_thread(void *data) | 180 | static int twl4030_irq_thread(void *data) |
181 | { | 181 | { |
182 | long irq = (long)data; | 182 | long irq = (long)data; |
183 | struct irq_desc *desc = irq_to_desc(irq); | ||
184 | static unsigned i2c_errors; | 183 | static unsigned i2c_errors; |
185 | static const unsigned max_i2c_errors = 100; | 184 | static const unsigned max_i2c_errors = 100; |
186 | 185 | ||
187 | if (!desc) { | ||
188 | pr_err("twl4030: Invalid IRQ: %ld\n", irq); | ||
189 | return -EINVAL; | ||
190 | } | ||
191 | 186 | ||
192 | current->flags |= PF_NOFREEZE; | 187 | current->flags |= PF_NOFREEZE; |
193 | 188 | ||
@@ -240,7 +235,7 @@ static int twl4030_irq_thread(void *data) | |||
240 | } | 235 | } |
241 | local_irq_enable(); | 236 | local_irq_enable(); |
242 | 237 | ||
243 | desc->chip->unmask(irq); | 238 | enable_irq(irq); |
244 | } | 239 | } |
245 | 240 | ||
246 | return 0; | 241 | return 0; |
@@ -255,25 +250,13 @@ static int twl4030_irq_thread(void *data) | |||
255 | * thread. All we do here is acknowledge and mask the interrupt and wakeup | 250 | * thread. All we do here is acknowledge and mask the interrupt and wakeup |
256 | * the kernel thread. | 251 | * the kernel thread. |
257 | */ | 252 | */ |
258 | static void handle_twl4030_pih(unsigned int irq, struct irq_desc *desc) | 253 | static irqreturn_t handle_twl4030_pih(int irq, void *devid) |
259 | { | 254 | { |
260 | /* Acknowledge, clear *AND* mask the interrupt... */ | 255 | /* Acknowledge, clear *AND* mask the interrupt... */ |
261 | desc->chip->ack(irq); | 256 | disable_irq_nosync(irq); |
262 | complete(&irq_event); | 257 | complete(devid); |
263 | } | 258 | return IRQ_HANDLED; |
264 | |||
265 | static struct task_struct *start_twl4030_irq_thread(long irq) | ||
266 | { | ||
267 | struct task_struct *thread; | ||
268 | |||
269 | init_completion(&irq_event); | ||
270 | thread = kthread_run(twl4030_irq_thread, (void *)irq, "twl4030-irq"); | ||
271 | if (!thread) | ||
272 | pr_err("twl4030: could not create irq %ld thread!\n", irq); | ||
273 | |||
274 | return thread; | ||
275 | } | 259 | } |
276 | |||
277 | /*----------------------------------------------------------------------*/ | 260 | /*----------------------------------------------------------------------*/ |
278 | 261 | ||
279 | /* | 262 | /* |
@@ -734,18 +717,28 @@ int twl_init_irq(int irq_num, unsigned irq_base, unsigned irq_end) | |||
734 | } | 717 | } |
735 | 718 | ||
736 | /* install an irq handler to demultiplex the TWL4030 interrupt */ | 719 | /* install an irq handler to demultiplex the TWL4030 interrupt */ |
737 | task = start_twl4030_irq_thread(irq_num); | ||
738 | if (!task) { | ||
739 | pr_err("twl4030: irq thread FAIL\n"); | ||
740 | status = -ESRCH; | ||
741 | goto fail; | ||
742 | } | ||
743 | 720 | ||
744 | set_irq_data(irq_num, task); | ||
745 | set_irq_chained_handler(irq_num, handle_twl4030_pih); | ||
746 | 721 | ||
747 | return status; | 722 | init_completion(&irq_event); |
748 | 723 | ||
724 | status = request_irq(irq_num, handle_twl4030_pih, IRQF_DISABLED, | ||
725 | "TWL4030-PIH", &irq_event); | ||
726 | if (status < 0) { | ||
727 | pr_err("twl4030: could not claim irq%d: %d\n", irq_num, status); | ||
728 | goto fail_rqirq; | ||
729 | } | ||
730 | |||
731 | task = kthread_run(twl4030_irq_thread, (void *)irq_num, "twl4030-irq"); | ||
732 | if (IS_ERR(task)) { | ||
733 | pr_err("twl4030: could not create irq %d thread!\n", irq_num); | ||
734 | status = PTR_ERR(task); | ||
735 | goto fail_kthread; | ||
736 | } | ||
737 | return status; | ||
738 | fail_kthread: | ||
739 | free_irq(irq_num, &irq_event); | ||
740 | fail_rqirq: | ||
741 | /* clean up twl4030_sih_setup */ | ||
749 | fail: | 742 | fail: |
750 | for (i = irq_base; i < irq_end; i++) | 743 | for (i = irq_base; i < irq_end; i++) |
751 | set_irq_chip_and_handler(i, NULL, NULL); | 744 | set_irq_chip_and_handler(i, NULL, NULL); |
diff --git a/drivers/mmc/host/sdhci-of.c b/drivers/mmc/host/sdhci-of.c index 908844327db0..1e8aa590bb39 100644 --- a/drivers/mmc/host/sdhci-of.c +++ b/drivers/mmc/host/sdhci-of.c | |||
@@ -234,7 +234,7 @@ static int __devinit sdhci_of_probe(struct of_device *ofdev, | |||
234 | return -ENODEV; | 234 | return -ENODEV; |
235 | 235 | ||
236 | host = sdhci_alloc_host(&ofdev->dev, sizeof(*of_host)); | 236 | host = sdhci_alloc_host(&ofdev->dev, sizeof(*of_host)); |
237 | if (!host) | 237 | if (IS_ERR(host)) |
238 | return -ENOMEM; | 238 | return -ENOMEM; |
239 | 239 | ||
240 | of_host = sdhci_priv(host); | 240 | of_host = sdhci_priv(host); |
diff --git a/drivers/mtd/maps/Kconfig b/drivers/mtd/maps/Kconfig index 0b98654d8eed..7a58bd5522fd 100644 --- a/drivers/mtd/maps/Kconfig +++ b/drivers/mtd/maps/Kconfig | |||
@@ -284,13 +284,6 @@ config MTD_L440GX | |||
284 | 284 | ||
285 | BE VERY CAREFUL. | 285 | BE VERY CAREFUL. |
286 | 286 | ||
287 | config MTD_SBC8240 | ||
288 | tristate "Flash device on SBC8240" | ||
289 | depends on MTD_JEDECPROBE && 8260 | ||
290 | help | ||
291 | Flash access on the SBC8240 board from Wind River. See | ||
292 | <http://www.windriver.com/products/sbc8240/> | ||
293 | |||
294 | config MTD_TQM8XXL | 287 | config MTD_TQM8XXL |
295 | tristate "CFI Flash device mapped on TQM8XXL" | 288 | tristate "CFI Flash device mapped on TQM8XXL" |
296 | depends on MTD_CFI && TQM8xxL | 289 | depends on MTD_CFI && TQM8xxL |
diff --git a/drivers/mtd/maps/Makefile b/drivers/mtd/maps/Makefile index 8bae7f9850c0..5beb0662d724 100644 --- a/drivers/mtd/maps/Makefile +++ b/drivers/mtd/maps/Makefile | |||
@@ -50,7 +50,6 @@ obj-$(CONFIG_MTD_UCLINUX) += uclinux.o | |||
50 | obj-$(CONFIG_MTD_NETtel) += nettel.o | 50 | obj-$(CONFIG_MTD_NETtel) += nettel.o |
51 | obj-$(CONFIG_MTD_SCB2_FLASH) += scb2_flash.o | 51 | obj-$(CONFIG_MTD_SCB2_FLASH) += scb2_flash.o |
52 | obj-$(CONFIG_MTD_H720X) += h720x-flash.o | 52 | obj-$(CONFIG_MTD_H720X) += h720x-flash.o |
53 | obj-$(CONFIG_MTD_SBC8240) += sbc8240.o | ||
54 | obj-$(CONFIG_MTD_IXP4XX) += ixp4xx.o | 53 | obj-$(CONFIG_MTD_IXP4XX) += ixp4xx.o |
55 | obj-$(CONFIG_MTD_IXP2000) += ixp2000.o | 54 | obj-$(CONFIG_MTD_IXP2000) += ixp2000.o |
56 | obj-$(CONFIG_MTD_WRSBC8260) += wr_sbc82xx_flash.o | 55 | obj-$(CONFIG_MTD_WRSBC8260) += wr_sbc82xx_flash.o |
diff --git a/drivers/mtd/maps/sbc8240.c b/drivers/mtd/maps/sbc8240.c deleted file mode 100644 index d5374cdcb163..000000000000 --- a/drivers/mtd/maps/sbc8240.c +++ /dev/null | |||
@@ -1,250 +0,0 @@ | |||
1 | /* | ||
2 | * Handle mapping of the flash memory access routines on the SBC8240 board. | ||
3 | * | ||
4 | * Carolyn Smith, Tektronix, Inc. | ||
5 | * | ||
6 | * This code is GPLed | ||
7 | */ | ||
8 | |||
9 | /* | ||
10 | * The SBC8240 has 2 flash banks. | ||
11 | * Bank 0 is a 512 KiB AMD AM29F040B; 8 x 64 KiB sectors. | ||
12 | * It contains the U-Boot code (7 sectors) and the environment (1 sector). | ||
13 | * Bank 1 is 4 x 1 MiB AMD AM29LV800BT; 15 x 64 KiB sectors, 1 x 32 KiB sector, | ||
14 | * 2 x 8 KiB sectors, 1 x 16 KiB sectors. | ||
15 | * Both parts are JEDEC compatible. | ||
16 | */ | ||
17 | |||
18 | #include <linux/module.h> | ||
19 | #include <linux/types.h> | ||
20 | #include <linux/kernel.h> | ||
21 | #include <asm/io.h> | ||
22 | |||
23 | #include <linux/mtd/mtd.h> | ||
24 | #include <linux/mtd/map.h> | ||
25 | #include <linux/mtd/cfi.h> | ||
26 | |||
27 | #ifdef CONFIG_MTD_PARTITIONS | ||
28 | #include <linux/mtd/partitions.h> | ||
29 | #endif | ||
30 | |||
31 | #define DEBUG | ||
32 | |||
33 | #ifdef DEBUG | ||
34 | # define debugk(fmt,args...) printk(fmt ,##args) | ||
35 | #else | ||
36 | # define debugk(fmt,args...) | ||
37 | #endif | ||
38 | |||
39 | |||
40 | #define WINDOW_ADDR0 0xFFF00000 /* 512 KiB */ | ||
41 | #define WINDOW_SIZE0 0x00080000 | ||
42 | #define BUSWIDTH0 1 | ||
43 | |||
44 | #define WINDOW_ADDR1 0xFF000000 /* 4 MiB */ | ||
45 | #define WINDOW_SIZE1 0x00400000 | ||
46 | #define BUSWIDTH1 8 | ||
47 | |||
48 | #define MSG_PREFIX "sbc8240:" /* prefix for our printk()'s */ | ||
49 | #define MTDID "sbc8240-%d" /* for mtdparts= partitioning */ | ||
50 | |||
51 | |||
52 | static struct map_info sbc8240_map[2] = { | ||
53 | { | ||
54 | .name = "sbc8240 Flash Bank #0", | ||
55 | .size = WINDOW_SIZE0, | ||
56 | .bankwidth = BUSWIDTH0, | ||
57 | }, | ||
58 | { | ||
59 | .name = "sbc8240 Flash Bank #1", | ||
60 | .size = WINDOW_SIZE1, | ||
61 | .bankwidth = BUSWIDTH1, | ||
62 | } | ||
63 | }; | ||
64 | |||
65 | #define NUM_FLASH_BANKS ARRAY_SIZE(sbc8240_map) | ||
66 | |||
67 | /* | ||
68 | * The following defines the partition layout of SBC8240 boards. | ||
69 | * | ||
70 | * See include/linux/mtd/partitions.h for definition of the | ||
71 | * mtd_partition structure. | ||
72 | * | ||
73 | * The *_max_flash_size is the maximum possible mapped flash size | ||
74 | * which is not necessarily the actual flash size. It must correspond | ||
75 | * to the value specified in the mapping definition defined by the | ||
76 | * "struct map_desc *_io_desc" for the corresponding machine. | ||
77 | */ | ||
78 | |||
79 | #ifdef CONFIG_MTD_PARTITIONS | ||
80 | |||
81 | static struct mtd_partition sbc8240_uboot_partitions [] = { | ||
82 | /* Bank 0 */ | ||
83 | { | ||
84 | .name = "U-boot", /* U-Boot Firmware */ | ||
85 | .offset = 0, | ||
86 | .size = 0x00070000, /* 7 x 64 KiB sectors */ | ||
87 | .mask_flags = MTD_WRITEABLE, /* force read-only */ | ||
88 | }, | ||
89 | { | ||
90 | .name = "environment", /* U-Boot environment */ | ||
91 | .offset = 0x00070000, | ||
92 | .size = 0x00010000, /* 1 x 64 KiB sector */ | ||
93 | }, | ||
94 | }; | ||
95 | |||
96 | static struct mtd_partition sbc8240_fs_partitions [] = { | ||
97 | { | ||
98 | .name = "jffs", /* JFFS filesystem */ | ||
99 | .offset = 0, | ||
100 | .size = 0x003C0000, /* 4 * 15 * 64KiB */ | ||
101 | }, | ||
102 | { | ||
103 | .name = "tmp32", | ||
104 | .offset = 0x003C0000, | ||
105 | .size = 0x00020000, /* 4 * 32KiB */ | ||
106 | }, | ||
107 | { | ||
108 | .name = "tmp8a", | ||
109 | .offset = 0x003E0000, | ||
110 | .size = 0x00008000, /* 4 * 8KiB */ | ||
111 | }, | ||
112 | { | ||
113 | .name = "tmp8b", | ||
114 | .offset = 0x003E8000, | ||
115 | .size = 0x00008000, /* 4 * 8KiB */ | ||
116 | }, | ||
117 | { | ||
118 | .name = "tmp16", | ||
119 | .offset = 0x003F0000, | ||
120 | .size = 0x00010000, /* 4 * 16KiB */ | ||
121 | } | ||
122 | }; | ||
123 | |||
124 | /* trivial struct to describe partition information */ | ||
125 | struct mtd_part_def | ||
126 | { | ||
127 | int nums; | ||
128 | unsigned char *type; | ||
129 | struct mtd_partition* mtd_part; | ||
130 | }; | ||
131 | |||
132 | static struct mtd_info *sbc8240_mtd[NUM_FLASH_BANKS]; | ||
133 | static struct mtd_part_def sbc8240_part_banks[NUM_FLASH_BANKS]; | ||
134 | |||
135 | |||
136 | #endif /* CONFIG_MTD_PARTITIONS */ | ||
137 | |||
138 | |||
139 | static int __init init_sbc8240_mtd (void) | ||
140 | { | ||
141 | static struct _cjs { | ||
142 | u_long addr; | ||
143 | u_long size; | ||
144 | } pt[NUM_FLASH_BANKS] = { | ||
145 | { | ||
146 | .addr = WINDOW_ADDR0, | ||
147 | .size = WINDOW_SIZE0 | ||
148 | }, | ||
149 | { | ||
150 | .addr = WINDOW_ADDR1, | ||
151 | .size = WINDOW_SIZE1 | ||
152 | }, | ||
153 | }; | ||
154 | |||
155 | int devicesfound = 0; | ||
156 | int i,j; | ||
157 | |||
158 | for (i = 0; i < NUM_FLASH_BANKS; i++) { | ||
159 | printk (KERN_NOTICE MSG_PREFIX | ||
160 | "Probing 0x%08lx at 0x%08lx\n", pt[i].size, pt[i].addr); | ||
161 | |||
162 | sbc8240_map[i].map_priv_1 = | ||
163 | (unsigned long) ioremap (pt[i].addr, pt[i].size); | ||
164 | if (!sbc8240_map[i].map_priv_1) { | ||
165 | printk (MSG_PREFIX "failed to ioremap\n"); | ||
166 | for (j = 0; j < i; j++) { | ||
167 | iounmap((void *) sbc8240_map[j].map_priv_1); | ||
168 | sbc8240_map[j].map_priv_1 = 0; | ||
169 | } | ||
170 | return -EIO; | ||
171 | } | ||
172 | simple_map_init(&sbc8240_mtd[i]); | ||
173 | |||
174 | sbc8240_mtd[i] = do_map_probe("jedec_probe", &sbc8240_map[i]); | ||
175 | |||
176 | if (sbc8240_mtd[i]) { | ||
177 | sbc8240_mtd[i]->module = THIS_MODULE; | ||
178 | devicesfound++; | ||
179 | } else { | ||
180 | if (sbc8240_map[i].map_priv_1) { | ||
181 | iounmap((void *) sbc8240_map[i].map_priv_1); | ||
182 | sbc8240_map[i].map_priv_1 = 0; | ||
183 | } | ||
184 | } | ||
185 | } | ||
186 | |||
187 | if (!devicesfound) { | ||
188 | printk(KERN_NOTICE MSG_PREFIX | ||
189 | "No suppported flash chips found!\n"); | ||
190 | return -ENXIO; | ||
191 | } | ||
192 | |||
193 | #ifdef CONFIG_MTD_PARTITIONS | ||
194 | sbc8240_part_banks[0].mtd_part = sbc8240_uboot_partitions; | ||
195 | sbc8240_part_banks[0].type = "static image"; | ||
196 | sbc8240_part_banks[0].nums = ARRAY_SIZE(sbc8240_uboot_partitions); | ||
197 | sbc8240_part_banks[1].mtd_part = sbc8240_fs_partitions; | ||
198 | sbc8240_part_banks[1].type = "static file system"; | ||
199 | sbc8240_part_banks[1].nums = ARRAY_SIZE(sbc8240_fs_partitions); | ||
200 | |||
201 | for (i = 0; i < NUM_FLASH_BANKS; i++) { | ||
202 | |||
203 | if (!sbc8240_mtd[i]) continue; | ||
204 | if (sbc8240_part_banks[i].nums == 0) { | ||
205 | printk (KERN_NOTICE MSG_PREFIX | ||
206 | "No partition info available, registering whole device\n"); | ||
207 | add_mtd_device(sbc8240_mtd[i]); | ||
208 | } else { | ||
209 | printk (KERN_NOTICE MSG_PREFIX | ||
210 | "Using %s partition definition\n", sbc8240_part_banks[i].mtd_part->name); | ||
211 | add_mtd_partitions (sbc8240_mtd[i], | ||
212 | sbc8240_part_banks[i].mtd_part, | ||
213 | sbc8240_part_banks[i].nums); | ||
214 | } | ||
215 | } | ||
216 | #else | ||
217 | printk(KERN_NOTICE MSG_PREFIX | ||
218 | "Registering %d flash banks at once\n", devicesfound); | ||
219 | |||
220 | for (i = 0; i < devicesfound; i++) { | ||
221 | add_mtd_device(sbc8240_mtd[i]); | ||
222 | } | ||
223 | #endif /* CONFIG_MTD_PARTITIONS */ | ||
224 | |||
225 | return devicesfound == 0 ? -ENXIO : 0; | ||
226 | } | ||
227 | |||
228 | static void __exit cleanup_sbc8240_mtd (void) | ||
229 | { | ||
230 | int i; | ||
231 | |||
232 | for (i = 0; i < NUM_FLASH_BANKS; i++) { | ||
233 | if (sbc8240_mtd[i]) { | ||
234 | del_mtd_device (sbc8240_mtd[i]); | ||
235 | map_destroy (sbc8240_mtd[i]); | ||
236 | } | ||
237 | if (sbc8240_map[i].map_priv_1) { | ||
238 | iounmap ((void *) sbc8240_map[i].map_priv_1); | ||
239 | sbc8240_map[i].map_priv_1 = 0; | ||
240 | } | ||
241 | } | ||
242 | } | ||
243 | |||
244 | module_init (init_sbc8240_mtd); | ||
245 | module_exit (cleanup_sbc8240_mtd); | ||
246 | |||
247 | MODULE_LICENSE ("GPL"); | ||
248 | MODULE_AUTHOR ("Carolyn Smith <carolyn.smith@tektronix.com>"); | ||
249 | MODULE_DESCRIPTION ("MTD map driver for SBC8240 boards"); | ||
250 | |||
diff --git a/drivers/mtd/mtd_blkdevs.c b/drivers/mtd/mtd_blkdevs.c index c3f62654b6df..7baba40c1ed2 100644 --- a/drivers/mtd/mtd_blkdevs.c +++ b/drivers/mtd/mtd_blkdevs.c | |||
@@ -144,7 +144,7 @@ static int blktrans_open(struct block_device *bdev, fmode_t mode) | |||
144 | struct mtd_blktrans_ops *tr = dev->tr; | 144 | struct mtd_blktrans_ops *tr = dev->tr; |
145 | int ret = -ENODEV; | 145 | int ret = -ENODEV; |
146 | 146 | ||
147 | if (!try_module_get(dev->mtd->owner)) | 147 | if (!get_mtd_device(NULL, dev->mtd->index)) |
148 | goto out; | 148 | goto out; |
149 | 149 | ||
150 | if (!try_module_get(tr->owner)) | 150 | if (!try_module_get(tr->owner)) |
@@ -158,7 +158,7 @@ static int blktrans_open(struct block_device *bdev, fmode_t mode) | |||
158 | ret = 0; | 158 | ret = 0; |
159 | if (tr->open && (ret = tr->open(dev))) { | 159 | if (tr->open && (ret = tr->open(dev))) { |
160 | dev->mtd->usecount--; | 160 | dev->mtd->usecount--; |
161 | module_put(dev->mtd->owner); | 161 | put_mtd_device(dev->mtd); |
162 | out_tr: | 162 | out_tr: |
163 | module_put(tr->owner); | 163 | module_put(tr->owner); |
164 | } | 164 | } |
@@ -177,7 +177,7 @@ static int blktrans_release(struct gendisk *disk, fmode_t mode) | |||
177 | 177 | ||
178 | if (!ret) { | 178 | if (!ret) { |
179 | dev->mtd->usecount--; | 179 | dev->mtd->usecount--; |
180 | module_put(dev->mtd->owner); | 180 | put_mtd_device(dev->mtd); |
181 | module_put(tr->owner); | 181 | module_put(tr->owner); |
182 | } | 182 | } |
183 | 183 | ||
diff --git a/drivers/mtd/mtdblock.c b/drivers/mtd/mtdblock.c index 208c6faa0358..77db5ce24d92 100644 --- a/drivers/mtd/mtdblock.c +++ b/drivers/mtd/mtdblock.c | |||
@@ -29,6 +29,8 @@ static struct mtdblk_dev { | |||
29 | enum { STATE_EMPTY, STATE_CLEAN, STATE_DIRTY } cache_state; | 29 | enum { STATE_EMPTY, STATE_CLEAN, STATE_DIRTY } cache_state; |
30 | } *mtdblks[MAX_MTD_DEVICES]; | 30 | } *mtdblks[MAX_MTD_DEVICES]; |
31 | 31 | ||
32 | static struct mutex mtdblks_lock; | ||
33 | |||
32 | /* | 34 | /* |
33 | * Cache stuff... | 35 | * Cache stuff... |
34 | * | 36 | * |
@@ -270,15 +272,19 @@ static int mtdblock_open(struct mtd_blktrans_dev *mbd) | |||
270 | 272 | ||
271 | DEBUG(MTD_DEBUG_LEVEL1,"mtdblock_open\n"); | 273 | DEBUG(MTD_DEBUG_LEVEL1,"mtdblock_open\n"); |
272 | 274 | ||
275 | mutex_lock(&mtdblks_lock); | ||
273 | if (mtdblks[dev]) { | 276 | if (mtdblks[dev]) { |
274 | mtdblks[dev]->count++; | 277 | mtdblks[dev]->count++; |
278 | mutex_unlock(&mtdblks_lock); | ||
275 | return 0; | 279 | return 0; |
276 | } | 280 | } |
277 | 281 | ||
278 | /* OK, it's not open. Create cache info for it */ | 282 | /* OK, it's not open. Create cache info for it */ |
279 | mtdblk = kzalloc(sizeof(struct mtdblk_dev), GFP_KERNEL); | 283 | mtdblk = kzalloc(sizeof(struct mtdblk_dev), GFP_KERNEL); |
280 | if (!mtdblk) | 284 | if (!mtdblk) { |
285 | mutex_unlock(&mtdblks_lock); | ||
281 | return -ENOMEM; | 286 | return -ENOMEM; |
287 | } | ||
282 | 288 | ||
283 | mtdblk->count = 1; | 289 | mtdblk->count = 1; |
284 | mtdblk->mtd = mtd; | 290 | mtdblk->mtd = mtd; |
@@ -291,6 +297,7 @@ static int mtdblock_open(struct mtd_blktrans_dev *mbd) | |||
291 | } | 297 | } |
292 | 298 | ||
293 | mtdblks[dev] = mtdblk; | 299 | mtdblks[dev] = mtdblk; |
300 | mutex_unlock(&mtdblks_lock); | ||
294 | 301 | ||
295 | DEBUG(MTD_DEBUG_LEVEL1, "ok\n"); | 302 | DEBUG(MTD_DEBUG_LEVEL1, "ok\n"); |
296 | 303 | ||
@@ -304,6 +311,8 @@ static int mtdblock_release(struct mtd_blktrans_dev *mbd) | |||
304 | 311 | ||
305 | DEBUG(MTD_DEBUG_LEVEL1, "mtdblock_release\n"); | 312 | DEBUG(MTD_DEBUG_LEVEL1, "mtdblock_release\n"); |
306 | 313 | ||
314 | mutex_lock(&mtdblks_lock); | ||
315 | |||
307 | mutex_lock(&mtdblk->cache_mutex); | 316 | mutex_lock(&mtdblk->cache_mutex); |
308 | write_cached_data(mtdblk); | 317 | write_cached_data(mtdblk); |
309 | mutex_unlock(&mtdblk->cache_mutex); | 318 | mutex_unlock(&mtdblk->cache_mutex); |
@@ -316,6 +325,9 @@ static int mtdblock_release(struct mtd_blktrans_dev *mbd) | |||
316 | vfree(mtdblk->cache_data); | 325 | vfree(mtdblk->cache_data); |
317 | kfree(mtdblk); | 326 | kfree(mtdblk); |
318 | } | 327 | } |
328 | |||
329 | mutex_unlock(&mtdblks_lock); | ||
330 | |||
319 | DEBUG(MTD_DEBUG_LEVEL1, "ok\n"); | 331 | DEBUG(MTD_DEBUG_LEVEL1, "ok\n"); |
320 | 332 | ||
321 | return 0; | 333 | return 0; |
@@ -376,6 +388,8 @@ static struct mtd_blktrans_ops mtdblock_tr = { | |||
376 | 388 | ||
377 | static int __init init_mtdblock(void) | 389 | static int __init init_mtdblock(void) |
378 | { | 390 | { |
391 | mutex_init(&mtdblks_lock); | ||
392 | |||
379 | return register_mtd_blktrans(&mtdblock_tr); | 393 | return register_mtd_blktrans(&mtdblock_tr); |
380 | } | 394 | } |
381 | 395 | ||
diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c index fac54a3fa3f1..00ebf7af7467 100644 --- a/drivers/mtd/mtdcore.c +++ b/drivers/mtd/mtdcore.c | |||
@@ -65,8 +65,8 @@ static void mtd_release(struct device *dev) | |||
65 | static int mtd_cls_suspend(struct device *dev, pm_message_t state) | 65 | static int mtd_cls_suspend(struct device *dev, pm_message_t state) |
66 | { | 66 | { |
67 | struct mtd_info *mtd = dev_to_mtd(dev); | 67 | struct mtd_info *mtd = dev_to_mtd(dev); |
68 | 68 | ||
69 | if (mtd->suspend) | 69 | if (mtd && mtd->suspend) |
70 | return mtd->suspend(mtd); | 70 | return mtd->suspend(mtd); |
71 | else | 71 | else |
72 | return 0; | 72 | return 0; |
@@ -76,7 +76,7 @@ static int mtd_cls_resume(struct device *dev) | |||
76 | { | 76 | { |
77 | struct mtd_info *mtd = dev_to_mtd(dev); | 77 | struct mtd_info *mtd = dev_to_mtd(dev); |
78 | 78 | ||
79 | if (mtd->resume) | 79 | if (mtd && mtd->resume) |
80 | mtd->resume(mtd); | 80 | mtd->resume(mtd); |
81 | return 0; | 81 | return 0; |
82 | } | 82 | } |
@@ -298,6 +298,7 @@ int add_mtd_device(struct mtd_info *mtd) | |||
298 | mtd->dev.class = &mtd_class; | 298 | mtd->dev.class = &mtd_class; |
299 | mtd->dev.devt = MTD_DEVT(i); | 299 | mtd->dev.devt = MTD_DEVT(i); |
300 | dev_set_name(&mtd->dev, "mtd%d", i); | 300 | dev_set_name(&mtd->dev, "mtd%d", i); |
301 | dev_set_drvdata(&mtd->dev, mtd); | ||
301 | if (device_register(&mtd->dev) != 0) { | 302 | if (device_register(&mtd->dev) != 0) { |
302 | mtd_table[i] = NULL; | 303 | mtd_table[i] = NULL; |
303 | break; | 304 | break; |
diff --git a/drivers/mtd/onenand/omap2.c b/drivers/mtd/onenand/omap2.c index 38d656b9b2ee..0108ed42e877 100644 --- a/drivers/mtd/onenand/omap2.c +++ b/drivers/mtd/onenand/omap2.c | |||
@@ -266,7 +266,7 @@ static inline int omap2_onenand_bufferram_offset(struct mtd_info *mtd, int area) | |||
266 | 266 | ||
267 | if (ONENAND_CURRENT_BUFFERRAM(this)) { | 267 | if (ONENAND_CURRENT_BUFFERRAM(this)) { |
268 | if (area == ONENAND_DATARAM) | 268 | if (area == ONENAND_DATARAM) |
269 | return mtd->writesize; | 269 | return this->writesize; |
270 | if (area == ONENAND_SPARERAM) | 270 | if (area == ONENAND_SPARERAM) |
271 | return mtd->oobsize; | 271 | return mtd->oobsize; |
272 | } | 272 | } |
@@ -770,6 +770,7 @@ static int __devexit omap2_onenand_remove(struct platform_device *pdev) | |||
770 | } | 770 | } |
771 | iounmap(c->onenand.base); | 771 | iounmap(c->onenand.base); |
772 | release_mem_region(c->phys_base, ONENAND_IO_SIZE); | 772 | release_mem_region(c->phys_base, ONENAND_IO_SIZE); |
773 | gpmc_cs_free(c->gpmc_cs); | ||
773 | kfree(c); | 774 | kfree(c); |
774 | 775 | ||
775 | return 0; | 776 | return 0; |
diff --git a/drivers/mtd/ubi/eba.c b/drivers/mtd/ubi/eba.c index 0f2034c3ed2f..e4d9ef0c965a 100644 --- a/drivers/mtd/ubi/eba.c +++ b/drivers/mtd/ubi/eba.c | |||
@@ -1254,6 +1254,7 @@ out_free: | |||
1254 | if (!ubi->volumes[i]) | 1254 | if (!ubi->volumes[i]) |
1255 | continue; | 1255 | continue; |
1256 | kfree(ubi->volumes[i]->eba_tbl); | 1256 | kfree(ubi->volumes[i]->eba_tbl); |
1257 | ubi->volumes[i]->eba_tbl = NULL; | ||
1257 | } | 1258 | } |
1258 | return err; | 1259 | return err; |
1259 | } | 1260 | } |
diff --git a/drivers/mtd/ubi/scan.c b/drivers/mtd/ubi/scan.c index a423131b6171..b847745394b4 100644 --- a/drivers/mtd/ubi/scan.c +++ b/drivers/mtd/ubi/scan.c | |||
@@ -781,11 +781,22 @@ static int process_eb(struct ubi_device *ubi, struct ubi_scan_info *si, | |||
781 | return -EINVAL; | 781 | return -EINVAL; |
782 | } | 782 | } |
783 | 783 | ||
784 | /* | ||
785 | * Make sure that all PEBs have the same image sequence number. | ||
786 | * This allows us to detect situations when users flash UBI | ||
787 | * images incorrectly, so that the flash has the new UBI image | ||
788 | * and leftovers from the old one. This feature was added | ||
789 | * relatively recently, and the sequence number was always | ||
790 | * zero, because old UBI implementations always set it to zero. | ||
791 | * For this reasons, we do not panic if some PEBs have zero | ||
792 | * sequence number, while other PEBs have non-zero sequence | ||
793 | * number. | ||
794 | */ | ||
784 | image_seq = be32_to_cpu(ech->image_seq); | 795 | image_seq = be32_to_cpu(ech->image_seq); |
785 | if (!si->image_seq_set) { | 796 | if (!si->image_seq_set) { |
786 | ubi->image_seq = image_seq; | 797 | ubi->image_seq = image_seq; |
787 | si->image_seq_set = 1; | 798 | si->image_seq_set = 1; |
788 | } else if (ubi->image_seq != image_seq) { | 799 | } else if (ubi->image_seq && ubi->image_seq != image_seq) { |
789 | ubi_err("bad image sequence number %d in PEB %d, " | 800 | ubi_err("bad image sequence number %d in PEB %d, " |
790 | "expected %d", image_seq, pnum, ubi->image_seq); | 801 | "expected %d", image_seq, pnum, ubi->image_seq); |
791 | ubi_dbg_dump_ec_hdr(ech); | 802 | ubi_dbg_dump_ec_hdr(ech); |
diff --git a/drivers/net/3c515.c b/drivers/net/3c515.c index 3e00fa8ea65f..4a7c32895be5 100644 --- a/drivers/net/3c515.c +++ b/drivers/net/3c515.c | |||
@@ -832,7 +832,9 @@ static int corkscrew_open(struct net_device *dev) | |||
832 | skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */ | 832 | skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */ |
833 | vp->rx_ring[i].addr = isa_virt_to_bus(skb->data); | 833 | vp->rx_ring[i].addr = isa_virt_to_bus(skb->data); |
834 | } | 834 | } |
835 | vp->rx_ring[i - 1].next = isa_virt_to_bus(&vp->rx_ring[0]); /* Wrap the ring. */ | 835 | if (i != 0) |
836 | vp->rx_ring[i - 1].next = | ||
837 | isa_virt_to_bus(&vp->rx_ring[0]); /* Wrap the ring. */ | ||
836 | outl(isa_virt_to_bus(&vp->rx_ring[0]), ioaddr + UpListPtr); | 838 | outl(isa_virt_to_bus(&vp->rx_ring[0]), ioaddr + UpListPtr); |
837 | } | 839 | } |
838 | if (vp->full_bus_master_tx) { /* Boomerang bus master Tx. */ | 840 | if (vp->full_bus_master_tx) { /* Boomerang bus master Tx. */ |
diff --git a/drivers/net/3c59x.c b/drivers/net/3c59x.c index c34aee91250b..c20416850948 100644 --- a/drivers/net/3c59x.c +++ b/drivers/net/3c59x.c | |||
@@ -2721,13 +2721,15 @@ dump_tx_ring(struct net_device *dev) | |||
2721 | &vp->tx_ring[vp->dirty_tx % TX_RING_SIZE]); | 2721 | &vp->tx_ring[vp->dirty_tx % TX_RING_SIZE]); |
2722 | issue_and_wait(dev, DownStall); | 2722 | issue_and_wait(dev, DownStall); |
2723 | for (i = 0; i < TX_RING_SIZE; i++) { | 2723 | for (i = 0; i < TX_RING_SIZE; i++) { |
2724 | pr_err(" %d: @%p length %8.8x status %8.8x\n", i, | 2724 | unsigned int length; |
2725 | &vp->tx_ring[i], | 2725 | |
2726 | #if DO_ZEROCOPY | 2726 | #if DO_ZEROCOPY |
2727 | le32_to_cpu(vp->tx_ring[i].frag[0].length), | 2727 | length = le32_to_cpu(vp->tx_ring[i].frag[0].length); |
2728 | #else | 2728 | #else |
2729 | le32_to_cpu(vp->tx_ring[i].length), | 2729 | length = le32_to_cpu(vp->tx_ring[i].length); |
2730 | #endif | 2730 | #endif |
2731 | pr_err(" %d: @%p length %8.8x status %8.8x\n", | ||
2732 | i, &vp->tx_ring[i], length, | ||
2731 | le32_to_cpu(vp->tx_ring[i].status)); | 2733 | le32_to_cpu(vp->tx_ring[i].status)); |
2732 | } | 2734 | } |
2733 | if (!stalled) | 2735 | if (!stalled) |
diff --git a/drivers/net/eexpress.c b/drivers/net/eexpress.c index 1686dca28748..1f016d66684a 100644 --- a/drivers/net/eexpress.c +++ b/drivers/net/eexpress.c | |||
@@ -1474,13 +1474,13 @@ static void eexp_hw_init586(struct net_device *dev) | |||
1474 | outw(0x0000, ioaddr + 0x800c); | 1474 | outw(0x0000, ioaddr + 0x800c); |
1475 | outw(0x0000, ioaddr + 0x800e); | 1475 | outw(0x0000, ioaddr + 0x800e); |
1476 | 1476 | ||
1477 | for (i = 0; i < (sizeof(start_code)); i+=32) { | 1477 | for (i = 0; i < ARRAY_SIZE(start_code) * 2; i+=32) { |
1478 | int j; | 1478 | int j; |
1479 | outw(i, ioaddr + SM_PTR); | 1479 | outw(i, ioaddr + SM_PTR); |
1480 | for (j = 0; j < 16; j+=2) | 1480 | for (j = 0; j < 16 && (i+j)/2 < ARRAY_SIZE(start_code); j+=2) |
1481 | outw(start_code[(i+j)/2], | 1481 | outw(start_code[(i+j)/2], |
1482 | ioaddr+0x4000+j); | 1482 | ioaddr+0x4000+j); |
1483 | for (j = 0; j < 16; j+=2) | 1483 | for (j = 0; j < 16 && (i+j+16)/2 < ARRAY_SIZE(start_code); j+=2) |
1484 | outw(start_code[(i+j+16)/2], | 1484 | outw(start_code[(i+j+16)/2], |
1485 | ioaddr+0x8000+j); | 1485 | ioaddr+0x8000+j); |
1486 | } | 1486 | } |
diff --git a/drivers/net/ehea/ehea.h b/drivers/net/ehea/ehea.h index 78952f8324e2..fa311a950996 100644 --- a/drivers/net/ehea/ehea.h +++ b/drivers/net/ehea/ehea.h | |||
@@ -40,7 +40,7 @@ | |||
40 | #include <asm/io.h> | 40 | #include <asm/io.h> |
41 | 41 | ||
42 | #define DRV_NAME "ehea" | 42 | #define DRV_NAME "ehea" |
43 | #define DRV_VERSION "EHEA_0101" | 43 | #define DRV_VERSION "EHEA_0102" |
44 | 44 | ||
45 | /* eHEA capability flags */ | 45 | /* eHEA capability flags */ |
46 | #define DLPAR_PORT_ADD_REM 1 | 46 | #define DLPAR_PORT_ADD_REM 1 |
diff --git a/drivers/net/ehea/ehea_main.c b/drivers/net/ehea/ehea_main.c index e8d46cc1bec2..977c3d358279 100644 --- a/drivers/net/ehea/ehea_main.c +++ b/drivers/net/ehea/ehea_main.c | |||
@@ -1545,6 +1545,9 @@ static int ehea_clean_portres(struct ehea_port *port, struct ehea_port_res *pr) | |||
1545 | { | 1545 | { |
1546 | int ret, i; | 1546 | int ret, i; |
1547 | 1547 | ||
1548 | if (pr->qp) | ||
1549 | netif_napi_del(&pr->napi); | ||
1550 | |||
1548 | ret = ehea_destroy_qp(pr->qp); | 1551 | ret = ehea_destroy_qp(pr->qp); |
1549 | 1552 | ||
1550 | if (!ret) { | 1553 | if (!ret) { |
diff --git a/drivers/net/gianfar_ethtool.c b/drivers/net/gianfar_ethtool.c index dbf06e9313cc..2234118eedbb 100644 --- a/drivers/net/gianfar_ethtool.c +++ b/drivers/net/gianfar_ethtool.c | |||
@@ -366,9 +366,8 @@ static int gfar_scoalesce(struct net_device *dev, struct ethtool_coalesce *cvals | |||
366 | return -EINVAL; | 366 | return -EINVAL; |
367 | } | 367 | } |
368 | 368 | ||
369 | priv->rxic = mk_ic_value( | 369 | priv->rxic = mk_ic_value(cvals->rx_max_coalesced_frames, |
370 | gfar_usecs2ticks(priv, cvals->rx_coalesce_usecs), | 370 | gfar_usecs2ticks(priv, cvals->rx_coalesce_usecs)); |
371 | cvals->rx_max_coalesced_frames); | ||
372 | 371 | ||
373 | /* Set up tx coalescing */ | 372 | /* Set up tx coalescing */ |
374 | if ((cvals->tx_coalesce_usecs == 0) || | 373 | if ((cvals->tx_coalesce_usecs == 0) || |
@@ -390,9 +389,8 @@ static int gfar_scoalesce(struct net_device *dev, struct ethtool_coalesce *cvals | |||
390 | return -EINVAL; | 389 | return -EINVAL; |
391 | } | 390 | } |
392 | 391 | ||
393 | priv->txic = mk_ic_value( | 392 | priv->txic = mk_ic_value(cvals->tx_max_coalesced_frames, |
394 | gfar_usecs2ticks(priv, cvals->tx_coalesce_usecs), | 393 | gfar_usecs2ticks(priv, cvals->tx_coalesce_usecs)); |
395 | cvals->tx_max_coalesced_frames); | ||
396 | 394 | ||
397 | gfar_write(&priv->regs->rxic, 0); | 395 | gfar_write(&priv->regs->rxic, 0); |
398 | if (priv->rxcoalescing) | 396 | if (priv->rxcoalescing) |
diff --git a/drivers/net/igbvf/vf.c b/drivers/net/igbvf/vf.c index 2a4faf9ade69..a9a61efa964c 100644 --- a/drivers/net/igbvf/vf.c +++ b/drivers/net/igbvf/vf.c | |||
@@ -274,6 +274,8 @@ static s32 e1000_set_vfta_vf(struct e1000_hw *hw, u16 vid, bool set) | |||
274 | 274 | ||
275 | err = mbx->ops.read_posted(hw, msgbuf, 2); | 275 | err = mbx->ops.read_posted(hw, msgbuf, 2); |
276 | 276 | ||
277 | msgbuf[0] &= ~E1000_VT_MSGTYPE_CTS; | ||
278 | |||
277 | /* if nacked the vlan was rejected */ | 279 | /* if nacked the vlan was rejected */ |
278 | if (!err && (msgbuf[0] == (E1000_VF_SET_VLAN | E1000_VT_MSGTYPE_NACK))) | 280 | if (!err && (msgbuf[0] == (E1000_VF_SET_VLAN | E1000_VT_MSGTYPE_NACK))) |
279 | err = -E1000_ERR_MAC_INIT; | 281 | err = -E1000_ERR_MAC_INIT; |
@@ -317,6 +319,8 @@ static void e1000_rar_set_vf(struct e1000_hw *hw, u8 * addr, u32 index) | |||
317 | if (!ret_val) | 319 | if (!ret_val) |
318 | ret_val = mbx->ops.read_posted(hw, msgbuf, 3); | 320 | ret_val = mbx->ops.read_posted(hw, msgbuf, 3); |
319 | 321 | ||
322 | msgbuf[0] &= ~E1000_VT_MSGTYPE_CTS; | ||
323 | |||
320 | /* if nacked the address was rejected, use "perm_addr" */ | 324 | /* if nacked the address was rejected, use "perm_addr" */ |
321 | if (!ret_val && | 325 | if (!ret_val && |
322 | (msgbuf[0] == (E1000_VF_SET_MAC_ADDR | E1000_VT_MSGTYPE_NACK))) | 326 | (msgbuf[0] == (E1000_VF_SET_MAC_ADDR | E1000_VT_MSGTYPE_NACK))) |
diff --git a/drivers/net/ixgbe/ixgbe.h b/drivers/net/ixgbe/ixgbe.h index 1b12c7ba275f..e11d83d5852b 100644 --- a/drivers/net/ixgbe/ixgbe.h +++ b/drivers/net/ixgbe/ixgbe.h | |||
@@ -96,6 +96,8 @@ | |||
96 | #define IXGBE_TX_FLAGS_VLAN_PRIO_MASK 0x0000e000 | 96 | #define IXGBE_TX_FLAGS_VLAN_PRIO_MASK 0x0000e000 |
97 | #define IXGBE_TX_FLAGS_VLAN_SHIFT 16 | 97 | #define IXGBE_TX_FLAGS_VLAN_SHIFT 16 |
98 | 98 | ||
99 | #define IXGBE_MAX_RSC_INT_RATE 162760 | ||
100 | |||
99 | /* wrapper around a pointer to a socket buffer, | 101 | /* wrapper around a pointer to a socket buffer, |
100 | * so a DMA handle can be stored along with the buffer */ | 102 | * so a DMA handle can be stored along with the buffer */ |
101 | struct ixgbe_tx_buffer { | 103 | struct ixgbe_tx_buffer { |
diff --git a/drivers/net/ixgbe/ixgbe_82598.c b/drivers/net/ixgbe/ixgbe_82598.c index b9923047ce11..522c03bc1dad 100644 --- a/drivers/net/ixgbe/ixgbe_82598.c +++ b/drivers/net/ixgbe/ixgbe_82598.c | |||
@@ -50,6 +50,51 @@ static s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset, | |||
50 | u8 *eeprom_data); | 50 | u8 *eeprom_data); |
51 | 51 | ||
52 | /** | 52 | /** |
53 | * ixgbe_set_pcie_completion_timeout - set pci-e completion timeout | ||
54 | * @hw: pointer to the HW structure | ||
55 | * | ||
56 | * The defaults for 82598 should be in the range of 50us to 50ms, | ||
57 | * however the hardware default for these parts is 500us to 1ms which is less | ||
58 | * than the 10ms recommended by the pci-e spec. To address this we need to | ||
59 | * increase the value to either 10ms to 250ms for capability version 1 config, | ||
60 | * or 16ms to 55ms for version 2. | ||
61 | **/ | ||
62 | void ixgbe_set_pcie_completion_timeout(struct ixgbe_hw *hw) | ||
63 | { | ||
64 | struct ixgbe_adapter *adapter = hw->back; | ||
65 | u32 gcr = IXGBE_READ_REG(hw, IXGBE_GCR); | ||
66 | u16 pcie_devctl2; | ||
67 | |||
68 | /* only take action if timeout value is defaulted to 0 */ | ||
69 | if (gcr & IXGBE_GCR_CMPL_TMOUT_MASK) | ||
70 | goto out; | ||
71 | |||
72 | /* | ||
73 | * if capababilities version is type 1 we can write the | ||
74 | * timeout of 10ms to 250ms through the GCR register | ||
75 | */ | ||
76 | if (!(gcr & IXGBE_GCR_CAP_VER2)) { | ||
77 | gcr |= IXGBE_GCR_CMPL_TMOUT_10ms; | ||
78 | goto out; | ||
79 | } | ||
80 | |||
81 | /* | ||
82 | * for version 2 capabilities we need to write the config space | ||
83 | * directly in order to set the completion timeout value for | ||
84 | * 16ms to 55ms | ||
85 | */ | ||
86 | pci_read_config_word(adapter->pdev, | ||
87 | IXGBE_PCI_DEVICE_CONTROL2, &pcie_devctl2); | ||
88 | pcie_devctl2 |= IXGBE_PCI_DEVICE_CONTROL2_16ms; | ||
89 | pci_write_config_word(adapter->pdev, | ||
90 | IXGBE_PCI_DEVICE_CONTROL2, pcie_devctl2); | ||
91 | out: | ||
92 | /* disable completion timeout resend */ | ||
93 | gcr &= ~IXGBE_GCR_CMPL_TMOUT_RESEND; | ||
94 | IXGBE_WRITE_REG(hw, IXGBE_GCR, gcr); | ||
95 | } | ||
96 | |||
97 | /** | ||
53 | * ixgbe_get_pcie_msix_count_82598 - Gets MSI-X vector count | 98 | * ixgbe_get_pcie_msix_count_82598 - Gets MSI-X vector count |
54 | * @hw: pointer to hardware structure | 99 | * @hw: pointer to hardware structure |
55 | * | 100 | * |
@@ -153,6 +198,26 @@ out: | |||
153 | } | 198 | } |
154 | 199 | ||
155 | /** | 200 | /** |
201 | * ixgbe_start_hw_82598 - Prepare hardware for Tx/Rx | ||
202 | * @hw: pointer to hardware structure | ||
203 | * | ||
204 | * Starts the hardware using the generic start_hw function. | ||
205 | * Then set pcie completion timeout | ||
206 | **/ | ||
207 | s32 ixgbe_start_hw_82598(struct ixgbe_hw *hw) | ||
208 | { | ||
209 | s32 ret_val = 0; | ||
210 | |||
211 | ret_val = ixgbe_start_hw_generic(hw); | ||
212 | |||
213 | /* set the completion timeout for interface */ | ||
214 | if (ret_val == 0) | ||
215 | ixgbe_set_pcie_completion_timeout(hw); | ||
216 | |||
217 | return ret_val; | ||
218 | } | ||
219 | |||
220 | /** | ||
156 | * ixgbe_get_link_capabilities_82598 - Determines link capabilities | 221 | * ixgbe_get_link_capabilities_82598 - Determines link capabilities |
157 | * @hw: pointer to hardware structure | 222 | * @hw: pointer to hardware structure |
158 | * @speed: pointer to link speed | 223 | * @speed: pointer to link speed |
@@ -1085,7 +1150,7 @@ out: | |||
1085 | static struct ixgbe_mac_operations mac_ops_82598 = { | 1150 | static struct ixgbe_mac_operations mac_ops_82598 = { |
1086 | .init_hw = &ixgbe_init_hw_generic, | 1151 | .init_hw = &ixgbe_init_hw_generic, |
1087 | .reset_hw = &ixgbe_reset_hw_82598, | 1152 | .reset_hw = &ixgbe_reset_hw_82598, |
1088 | .start_hw = &ixgbe_start_hw_generic, | 1153 | .start_hw = &ixgbe_start_hw_82598, |
1089 | .clear_hw_cntrs = &ixgbe_clear_hw_cntrs_generic, | 1154 | .clear_hw_cntrs = &ixgbe_clear_hw_cntrs_generic, |
1090 | .get_media_type = &ixgbe_get_media_type_82598, | 1155 | .get_media_type = &ixgbe_get_media_type_82598, |
1091 | .get_supported_physical_layer = &ixgbe_get_supported_physical_layer_82598, | 1156 | .get_supported_physical_layer = &ixgbe_get_supported_physical_layer_82598, |
diff --git a/drivers/net/ixgbe/ixgbe_ethtool.c b/drivers/net/ixgbe/ixgbe_ethtool.c index 2a978008fd6e..79144e950a34 100644 --- a/drivers/net/ixgbe/ixgbe_ethtool.c +++ b/drivers/net/ixgbe/ixgbe_ethtool.c | |||
@@ -1975,7 +1975,10 @@ static int ixgbe_set_coalesce(struct net_device *netdev, | |||
1975 | * any other value means disable eitr, which is best | 1975 | * any other value means disable eitr, which is best |
1976 | * served by setting the interrupt rate very high | 1976 | * served by setting the interrupt rate very high |
1977 | */ | 1977 | */ |
1978 | adapter->eitr_param = IXGBE_MAX_INT_RATE; | 1978 | if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) |
1979 | adapter->eitr_param = IXGBE_MAX_RSC_INT_RATE; | ||
1980 | else | ||
1981 | adapter->eitr_param = IXGBE_MAX_INT_RATE; | ||
1979 | adapter->itr_setting = 0; | 1982 | adapter->itr_setting = 0; |
1980 | } | 1983 | } |
1981 | 1984 | ||
@@ -1999,13 +2002,13 @@ static int ixgbe_set_flags(struct net_device *netdev, u32 data) | |||
1999 | 2002 | ||
2000 | ethtool_op_set_flags(netdev, data); | 2003 | ethtool_op_set_flags(netdev, data); |
2001 | 2004 | ||
2002 | if (!(adapter->flags & IXGBE_FLAG2_RSC_CAPABLE)) | 2005 | if (!(adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE)) |
2003 | return 0; | 2006 | return 0; |
2004 | 2007 | ||
2005 | /* if state changes we need to update adapter->flags and reset */ | 2008 | /* if state changes we need to update adapter->flags and reset */ |
2006 | if ((!!(data & ETH_FLAG_LRO)) != | 2009 | if ((!!(data & ETH_FLAG_LRO)) != |
2007 | (!!(adapter->flags & IXGBE_FLAG2_RSC_ENABLED))) { | 2010 | (!!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED))) { |
2008 | adapter->flags ^= IXGBE_FLAG2_RSC_ENABLED; | 2011 | adapter->flags2 ^= IXGBE_FLAG2_RSC_ENABLED; |
2009 | if (netif_running(netdev)) | 2012 | if (netif_running(netdev)) |
2010 | ixgbe_reinit_locked(adapter); | 2013 | ixgbe_reinit_locked(adapter); |
2011 | else | 2014 | else |
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c index 200454f30f6a..110c65ab5cb5 100644 --- a/drivers/net/ixgbe/ixgbe_main.c +++ b/drivers/net/ixgbe/ixgbe_main.c | |||
@@ -780,7 +780,7 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, | |||
780 | prefetch(next_rxd); | 780 | prefetch(next_rxd); |
781 | cleaned_count++; | 781 | cleaned_count++; |
782 | 782 | ||
783 | if (adapter->flags & IXGBE_FLAG2_RSC_CAPABLE) | 783 | if (adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE) |
784 | rsc_count = ixgbe_get_rsc_count(rx_desc); | 784 | rsc_count = ixgbe_get_rsc_count(rx_desc); |
785 | 785 | ||
786 | if (rsc_count) { | 786 | if (rsc_count) { |
@@ -2036,7 +2036,7 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter) | |||
2036 | IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), psrtype); | 2036 | IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), psrtype); |
2037 | } | 2037 | } |
2038 | } else { | 2038 | } else { |
2039 | if (!(adapter->flags & IXGBE_FLAG2_RSC_ENABLED) && | 2039 | if (!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) && |
2040 | (netdev->mtu <= ETH_DATA_LEN)) | 2040 | (netdev->mtu <= ETH_DATA_LEN)) |
2041 | rx_buf_len = MAXIMUM_ETHERNET_VLAN_SIZE; | 2041 | rx_buf_len = MAXIMUM_ETHERNET_VLAN_SIZE; |
2042 | else | 2042 | else |
@@ -2165,7 +2165,7 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter) | |||
2165 | IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl); | 2165 | IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl); |
2166 | } | 2166 | } |
2167 | 2167 | ||
2168 | if (adapter->flags & IXGBE_FLAG2_RSC_ENABLED) { | 2168 | if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) { |
2169 | /* Enable 82599 HW-RSC */ | 2169 | /* Enable 82599 HW-RSC */ |
2170 | for (i = 0; i < adapter->num_rx_queues; i++) { | 2170 | for (i = 0; i < adapter->num_rx_queues; i++) { |
2171 | j = adapter->rx_ring[i].reg_idx; | 2171 | j = adapter->rx_ring[i].reg_idx; |
@@ -3812,8 +3812,8 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter) | |||
3812 | adapter->max_msix_q_vectors = MAX_MSIX_Q_VECTORS_82598; | 3812 | adapter->max_msix_q_vectors = MAX_MSIX_Q_VECTORS_82598; |
3813 | } else if (hw->mac.type == ixgbe_mac_82599EB) { | 3813 | } else if (hw->mac.type == ixgbe_mac_82599EB) { |
3814 | adapter->max_msix_q_vectors = MAX_MSIX_Q_VECTORS_82599; | 3814 | adapter->max_msix_q_vectors = MAX_MSIX_Q_VECTORS_82599; |
3815 | adapter->flags |= IXGBE_FLAG2_RSC_CAPABLE; | 3815 | adapter->flags2 |= IXGBE_FLAG2_RSC_CAPABLE; |
3816 | adapter->flags |= IXGBE_FLAG2_RSC_ENABLED; | 3816 | adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED; |
3817 | adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE; | 3817 | adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE; |
3818 | adapter->ring_feature[RING_F_FDIR].indices = | 3818 | adapter->ring_feature[RING_F_FDIR].indices = |
3819 | IXGBE_MAX_FDIR_INDICES; | 3819 | IXGBE_MAX_FDIR_INDICES; |
@@ -5360,12 +5360,19 @@ static int ixgbe_del_sanmac_netdev(struct net_device *dev) | |||
5360 | static void ixgbe_netpoll(struct net_device *netdev) | 5360 | static void ixgbe_netpoll(struct net_device *netdev) |
5361 | { | 5361 | { |
5362 | struct ixgbe_adapter *adapter = netdev_priv(netdev); | 5362 | struct ixgbe_adapter *adapter = netdev_priv(netdev); |
5363 | int i; | ||
5363 | 5364 | ||
5364 | disable_irq(adapter->pdev->irq); | ||
5365 | adapter->flags |= IXGBE_FLAG_IN_NETPOLL; | 5365 | adapter->flags |= IXGBE_FLAG_IN_NETPOLL; |
5366 | ixgbe_intr(adapter->pdev->irq, netdev); | 5366 | if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { |
5367 | int num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; | ||
5368 | for (i = 0; i < num_q_vectors; i++) { | ||
5369 | struct ixgbe_q_vector *q_vector = adapter->q_vector[i]; | ||
5370 | ixgbe_msix_clean_many(0, q_vector); | ||
5371 | } | ||
5372 | } else { | ||
5373 | ixgbe_intr(adapter->pdev->irq, netdev); | ||
5374 | } | ||
5367 | adapter->flags &= ~IXGBE_FLAG_IN_NETPOLL; | 5375 | adapter->flags &= ~IXGBE_FLAG_IN_NETPOLL; |
5368 | enable_irq(adapter->pdev->irq); | ||
5369 | } | 5376 | } |
5370 | #endif | 5377 | #endif |
5371 | 5378 | ||
@@ -5611,7 +5618,7 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, | |||
5611 | if (pci_using_dac) | 5618 | if (pci_using_dac) |
5612 | netdev->features |= NETIF_F_HIGHDMA; | 5619 | netdev->features |= NETIF_F_HIGHDMA; |
5613 | 5620 | ||
5614 | if (adapter->flags & IXGBE_FLAG2_RSC_ENABLED) | 5621 | if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) |
5615 | netdev->features |= NETIF_F_LRO; | 5622 | netdev->features |= NETIF_F_LRO; |
5616 | 5623 | ||
5617 | /* make sure the EEPROM is good */ | 5624 | /* make sure the EEPROM is good */ |
diff --git a/drivers/net/ixgbe/ixgbe_type.h b/drivers/net/ixgbe/ixgbe_type.h index fa87309dc087..be90eb4575f6 100644 --- a/drivers/net/ixgbe/ixgbe_type.h +++ b/drivers/net/ixgbe/ixgbe_type.h | |||
@@ -718,6 +718,12 @@ | |||
718 | #define IXGBE_ECC_STATUS_82599 0x110E0 | 718 | #define IXGBE_ECC_STATUS_82599 0x110E0 |
719 | #define IXGBE_BAR_CTRL_82599 0x110F4 | 719 | #define IXGBE_BAR_CTRL_82599 0x110F4 |
720 | 720 | ||
721 | /* PCI Express Control */ | ||
722 | #define IXGBE_GCR_CMPL_TMOUT_MASK 0x0000F000 | ||
723 | #define IXGBE_GCR_CMPL_TMOUT_10ms 0x00001000 | ||
724 | #define IXGBE_GCR_CMPL_TMOUT_RESEND 0x00010000 | ||
725 | #define IXGBE_GCR_CAP_VER2 0x00040000 | ||
726 | |||
721 | /* Time Sync Registers */ | 727 | /* Time Sync Registers */ |
722 | #define IXGBE_TSYNCRXCTL 0x05188 /* Rx Time Sync Control register - RW */ | 728 | #define IXGBE_TSYNCRXCTL 0x05188 /* Rx Time Sync Control register - RW */ |
723 | #define IXGBE_TSYNCTXCTL 0x08C00 /* Tx Time Sync Control register - RW */ | 729 | #define IXGBE_TSYNCTXCTL 0x08C00 /* Tx Time Sync Control register - RW */ |
@@ -1521,6 +1527,7 @@ | |||
1521 | 1527 | ||
1522 | /* PCI Bus Info */ | 1528 | /* PCI Bus Info */ |
1523 | #define IXGBE_PCI_LINK_STATUS 0xB2 | 1529 | #define IXGBE_PCI_LINK_STATUS 0xB2 |
1530 | #define IXGBE_PCI_DEVICE_CONTROL2 0xC8 | ||
1524 | #define IXGBE_PCI_LINK_WIDTH 0x3F0 | 1531 | #define IXGBE_PCI_LINK_WIDTH 0x3F0 |
1525 | #define IXGBE_PCI_LINK_WIDTH_1 0x10 | 1532 | #define IXGBE_PCI_LINK_WIDTH_1 0x10 |
1526 | #define IXGBE_PCI_LINK_WIDTH_2 0x20 | 1533 | #define IXGBE_PCI_LINK_WIDTH_2 0x20 |
@@ -1531,6 +1538,7 @@ | |||
1531 | #define IXGBE_PCI_LINK_SPEED_5000 0x2 | 1538 | #define IXGBE_PCI_LINK_SPEED_5000 0x2 |
1532 | #define IXGBE_PCI_HEADER_TYPE_REGISTER 0x0E | 1539 | #define IXGBE_PCI_HEADER_TYPE_REGISTER 0x0E |
1533 | #define IXGBE_PCI_HEADER_TYPE_MULTIFUNC 0x80 | 1540 | #define IXGBE_PCI_HEADER_TYPE_MULTIFUNC 0x80 |
1541 | #define IXGBE_PCI_DEVICE_CONTROL2_16ms 0x0005 | ||
1534 | 1542 | ||
1535 | /* Number of 100 microseconds we wait for PCI Express master disable */ | 1543 | /* Number of 100 microseconds we wait for PCI Express master disable */ |
1536 | #define IXGBE_PCI_MASTER_DISABLE_TIMEOUT 800 | 1544 | #define IXGBE_PCI_MASTER_DISABLE_TIMEOUT 800 |
diff --git a/drivers/net/mlx4/en_tx.c b/drivers/net/mlx4/en_tx.c index 08c43f2ae72b..5a88b3f57693 100644 --- a/drivers/net/mlx4/en_tx.c +++ b/drivers/net/mlx4/en_tx.c | |||
@@ -249,6 +249,7 @@ static u32 mlx4_en_free_tx_desc(struct mlx4_en_priv *priv, | |||
249 | pci_unmap_page(mdev->pdev, | 249 | pci_unmap_page(mdev->pdev, |
250 | (dma_addr_t) be64_to_cpu(data->addr), | 250 | (dma_addr_t) be64_to_cpu(data->addr), |
251 | frag->size, PCI_DMA_TODEVICE); | 251 | frag->size, PCI_DMA_TODEVICE); |
252 | ++data; | ||
252 | } | 253 | } |
253 | } | 254 | } |
254 | /* Stamp the freed descriptor */ | 255 | /* Stamp the freed descriptor */ |
diff --git a/drivers/net/netxen/netxen_nic_main.c b/drivers/net/netxen/netxen_nic_main.c index 637ac8b89bac..3cd8cfcf627b 100644 --- a/drivers/net/netxen/netxen_nic_main.c +++ b/drivers/net/netxen/netxen_nic_main.c | |||
@@ -221,7 +221,7 @@ netxen_napi_disable(struct netxen_adapter *adapter) | |||
221 | } | 221 | } |
222 | } | 222 | } |
223 | 223 | ||
224 | static int nx_set_dma_mask(struct netxen_adapter *adapter, uint8_t revision_id) | 224 | static int nx_set_dma_mask(struct netxen_adapter *adapter) |
225 | { | 225 | { |
226 | struct pci_dev *pdev = adapter->pdev; | 226 | struct pci_dev *pdev = adapter->pdev; |
227 | uint64_t mask, cmask; | 227 | uint64_t mask, cmask; |
@@ -229,19 +229,17 @@ static int nx_set_dma_mask(struct netxen_adapter *adapter, uint8_t revision_id) | |||
229 | adapter->pci_using_dac = 0; | 229 | adapter->pci_using_dac = 0; |
230 | 230 | ||
231 | mask = DMA_BIT_MASK(32); | 231 | mask = DMA_BIT_MASK(32); |
232 | /* | ||
233 | * Consistent DMA mask is set to 32 bit because it cannot be set to | ||
234 | * 35 bits. For P3 also leave it at 32 bits for now. Only the rings | ||
235 | * come off this pool. | ||
236 | */ | ||
237 | cmask = DMA_BIT_MASK(32); | 232 | cmask = DMA_BIT_MASK(32); |
238 | 233 | ||
234 | if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) { | ||
239 | #ifndef CONFIG_IA64 | 235 | #ifndef CONFIG_IA64 |
240 | if (revision_id >= NX_P3_B0) | ||
241 | mask = DMA_BIT_MASK(39); | ||
242 | else if (revision_id == NX_P2_C1) | ||
243 | mask = DMA_BIT_MASK(35); | 236 | mask = DMA_BIT_MASK(35); |
244 | #endif | 237 | #endif |
238 | } else { | ||
239 | mask = DMA_BIT_MASK(39); | ||
240 | cmask = mask; | ||
241 | } | ||
242 | |||
245 | if (pci_set_dma_mask(pdev, mask) == 0 && | 243 | if (pci_set_dma_mask(pdev, mask) == 0 && |
246 | pci_set_consistent_dma_mask(pdev, cmask) == 0) { | 244 | pci_set_consistent_dma_mask(pdev, cmask) == 0) { |
247 | adapter->pci_using_dac = 1; | 245 | adapter->pci_using_dac = 1; |
@@ -256,7 +254,7 @@ static int | |||
256 | nx_update_dma_mask(struct netxen_adapter *adapter) | 254 | nx_update_dma_mask(struct netxen_adapter *adapter) |
257 | { | 255 | { |
258 | int change, shift, err; | 256 | int change, shift, err; |
259 | uint64_t mask, old_mask; | 257 | uint64_t mask, old_mask, old_cmask; |
260 | struct pci_dev *pdev = adapter->pdev; | 258 | struct pci_dev *pdev = adapter->pdev; |
261 | 259 | ||
262 | change = 0; | 260 | change = 0; |
@@ -272,14 +270,29 @@ nx_update_dma_mask(struct netxen_adapter *adapter) | |||
272 | 270 | ||
273 | if (change) { | 271 | if (change) { |
274 | old_mask = pdev->dma_mask; | 272 | old_mask = pdev->dma_mask; |
273 | old_cmask = pdev->dev.coherent_dma_mask; | ||
274 | |||
275 | mask = (1ULL<<(32+shift)) - 1; | 275 | mask = (1ULL<<(32+shift)) - 1; |
276 | 276 | ||
277 | err = pci_set_dma_mask(pdev, mask); | 277 | err = pci_set_dma_mask(pdev, mask); |
278 | if (err) | 278 | if (err) |
279 | return pci_set_dma_mask(pdev, old_mask); | 279 | goto err_out; |
280 | |||
281 | if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) { | ||
282 | |||
283 | err = pci_set_consistent_dma_mask(pdev, mask); | ||
284 | if (err) | ||
285 | goto err_out; | ||
286 | } | ||
287 | dev_info(&pdev->dev, "using %d-bit dma mask\n", 32+shift); | ||
280 | } | 288 | } |
281 | 289 | ||
282 | return 0; | 290 | return 0; |
291 | |||
292 | err_out: | ||
293 | pci_set_dma_mask(pdev, old_mask); | ||
294 | pci_set_consistent_dma_mask(pdev, old_cmask); | ||
295 | return err; | ||
283 | } | 296 | } |
284 | 297 | ||
285 | static void netxen_check_options(struct netxen_adapter *adapter) | 298 | static void netxen_check_options(struct netxen_adapter *adapter) |
@@ -1006,7 +1019,7 @@ netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
1006 | revision_id = pdev->revision; | 1019 | revision_id = pdev->revision; |
1007 | adapter->ahw.revision_id = revision_id; | 1020 | adapter->ahw.revision_id = revision_id; |
1008 | 1021 | ||
1009 | err = nx_set_dma_mask(adapter, revision_id); | 1022 | err = nx_set_dma_mask(adapter); |
1010 | if (err) | 1023 | if (err) |
1011 | goto err_out_free_netdev; | 1024 | goto err_out_free_netdev; |
1012 | 1025 | ||
diff --git a/drivers/net/pcnet32.c b/drivers/net/pcnet32.c index 28368157dac4..a646a445fda9 100644 --- a/drivers/net/pcnet32.c +++ b/drivers/net/pcnet32.c | |||
@@ -1611,8 +1611,11 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev) | |||
1611 | if (pcnet32_dwio_read_csr(ioaddr, 0) == 4 | 1611 | if (pcnet32_dwio_read_csr(ioaddr, 0) == 4 |
1612 | && pcnet32_dwio_check(ioaddr)) { | 1612 | && pcnet32_dwio_check(ioaddr)) { |
1613 | a = &pcnet32_dwio; | 1613 | a = &pcnet32_dwio; |
1614 | } else | 1614 | } else { |
1615 | if (pcnet32_debug & NETIF_MSG_PROBE) | ||
1616 | printk(KERN_ERR PFX "No access methods\n"); | ||
1615 | goto err_release_region; | 1617 | goto err_release_region; |
1618 | } | ||
1616 | } | 1619 | } |
1617 | 1620 | ||
1618 | chip_version = | 1621 | chip_version = |
@@ -1719,7 +1722,9 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev) | |||
1719 | ret = -ENOMEM; | 1722 | ret = -ENOMEM; |
1720 | goto err_release_region; | 1723 | goto err_release_region; |
1721 | } | 1724 | } |
1722 | SET_NETDEV_DEV(dev, &pdev->dev); | 1725 | |
1726 | if (pdev) | ||
1727 | SET_NETDEV_DEV(dev, &pdev->dev); | ||
1723 | 1728 | ||
1724 | if (pcnet32_debug & NETIF_MSG_PROBE) | 1729 | if (pcnet32_debug & NETIF_MSG_PROBE) |
1725 | printk(KERN_INFO PFX "%s at %#3lx,", chipname, ioaddr); | 1730 | printk(KERN_INFO PFX "%s at %#3lx,", chipname, ioaddr); |
@@ -1818,7 +1823,6 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev) | |||
1818 | 1823 | ||
1819 | spin_lock_init(&lp->lock); | 1824 | spin_lock_init(&lp->lock); |
1820 | 1825 | ||
1821 | SET_NETDEV_DEV(dev, &pdev->dev); | ||
1822 | lp->name = chipname; | 1826 | lp->name = chipname; |
1823 | lp->shared_irq = shared; | 1827 | lp->shared_irq = shared; |
1824 | lp->tx_ring_size = TX_RING_SIZE; /* default tx ring size */ | 1828 | lp->tx_ring_size = TX_RING_SIZE; /* default tx ring size */ |
@@ -1852,12 +1856,6 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev) | |||
1852 | ((cards_found >= MAX_UNITS) || full_duplex[cards_found])) | 1856 | ((cards_found >= MAX_UNITS) || full_duplex[cards_found])) |
1853 | lp->options |= PCNET32_PORT_FD; | 1857 | lp->options |= PCNET32_PORT_FD; |
1854 | 1858 | ||
1855 | if (!a) { | ||
1856 | if (pcnet32_debug & NETIF_MSG_PROBE) | ||
1857 | printk(KERN_ERR PFX "No access methods\n"); | ||
1858 | ret = -ENODEV; | ||
1859 | goto err_free_consistent; | ||
1860 | } | ||
1861 | lp->a = *a; | 1859 | lp->a = *a; |
1862 | 1860 | ||
1863 | /* prior to register_netdev, dev->name is not yet correct */ | 1861 | /* prior to register_netdev, dev->name is not yet correct */ |
@@ -1973,14 +1971,13 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev) | |||
1973 | 1971 | ||
1974 | return 0; | 1972 | return 0; |
1975 | 1973 | ||
1976 | err_free_ring: | 1974 | err_free_ring: |
1977 | pcnet32_free_ring(dev); | 1975 | pcnet32_free_ring(dev); |
1978 | err_free_consistent: | ||
1979 | pci_free_consistent(lp->pci_dev, sizeof(*lp->init_block), | 1976 | pci_free_consistent(lp->pci_dev, sizeof(*lp->init_block), |
1980 | lp->init_block, lp->init_dma_addr); | 1977 | lp->init_block, lp->init_dma_addr); |
1981 | err_free_netdev: | 1978 | err_free_netdev: |
1982 | free_netdev(dev); | 1979 | free_netdev(dev); |
1983 | err_release_region: | 1980 | err_release_region: |
1984 | release_region(ioaddr, PCNET32_TOTAL_SIZE); | 1981 | release_region(ioaddr, PCNET32_TOTAL_SIZE); |
1985 | return ret; | 1982 | return ret; |
1986 | } | 1983 | } |
@@ -2089,6 +2086,7 @@ static void pcnet32_free_ring(struct net_device *dev) | |||
2089 | static int pcnet32_open(struct net_device *dev) | 2086 | static int pcnet32_open(struct net_device *dev) |
2090 | { | 2087 | { |
2091 | struct pcnet32_private *lp = netdev_priv(dev); | 2088 | struct pcnet32_private *lp = netdev_priv(dev); |
2089 | struct pci_dev *pdev = lp->pci_dev; | ||
2092 | unsigned long ioaddr = dev->base_addr; | 2090 | unsigned long ioaddr = dev->base_addr; |
2093 | u16 val; | 2091 | u16 val; |
2094 | int i; | 2092 | int i; |
@@ -2149,9 +2147,9 @@ static int pcnet32_open(struct net_device *dev) | |||
2149 | lp->a.write_csr(ioaddr, 124, val); | 2147 | lp->a.write_csr(ioaddr, 124, val); |
2150 | 2148 | ||
2151 | /* Allied Telesyn AT 2700/2701 FX are 100Mbit only and do not negotiate */ | 2149 | /* Allied Telesyn AT 2700/2701 FX are 100Mbit only and do not negotiate */ |
2152 | if (lp->pci_dev->subsystem_vendor == PCI_VENDOR_ID_AT && | 2150 | if (pdev && pdev->subsystem_vendor == PCI_VENDOR_ID_AT && |
2153 | (lp->pci_dev->subsystem_device == PCI_SUBDEVICE_ID_AT_2700FX || | 2151 | (pdev->subsystem_device == PCI_SUBDEVICE_ID_AT_2700FX || |
2154 | lp->pci_dev->subsystem_device == PCI_SUBDEVICE_ID_AT_2701FX)) { | 2152 | pdev->subsystem_device == PCI_SUBDEVICE_ID_AT_2701FX)) { |
2155 | if (lp->options & PCNET32_PORT_ASEL) { | 2153 | if (lp->options & PCNET32_PORT_ASEL) { |
2156 | lp->options = PCNET32_PORT_FD | PCNET32_PORT_100; | 2154 | lp->options = PCNET32_PORT_FD | PCNET32_PORT_100; |
2157 | if (netif_msg_link(lp)) | 2155 | if (netif_msg_link(lp)) |
diff --git a/drivers/net/ppp_generic.c b/drivers/net/ppp_generic.c index 639d11bc444e..cd37d739ac74 100644 --- a/drivers/net/ppp_generic.c +++ b/drivers/net/ppp_generic.c | |||
@@ -1384,7 +1384,7 @@ static int ppp_mp_explode(struct ppp *ppp, struct sk_buff *skb) | |||
1384 | 1384 | ||
1385 | /* create a fragment for each channel */ | 1385 | /* create a fragment for each channel */ |
1386 | bits = B; | 1386 | bits = B; |
1387 | while (nfree > 0 && len > 0) { | 1387 | while (len > 0) { |
1388 | list = list->next; | 1388 | list = list->next; |
1389 | if (list == &ppp->channels) { | 1389 | if (list == &ppp->channels) { |
1390 | i = 0; | 1390 | i = 0; |
@@ -1431,29 +1431,31 @@ static int ppp_mp_explode(struct ppp *ppp, struct sk_buff *skb) | |||
1431 | *otherwise divide it according to the speed | 1431 | *otherwise divide it according to the speed |
1432 | *of the channel we are going to transmit on | 1432 | *of the channel we are going to transmit on |
1433 | */ | 1433 | */ |
1434 | if (pch->speed == 0) { | 1434 | if (nfree > 0) { |
1435 | flen = totlen/nfree ; | 1435 | if (pch->speed == 0) { |
1436 | if (nbigger > 0) { | 1436 | flen = totlen/nfree ; |
1437 | flen++; | 1437 | if (nbigger > 0) { |
1438 | nbigger--; | 1438 | flen++; |
1439 | } | 1439 | nbigger--; |
1440 | } else { | 1440 | } |
1441 | flen = (((totfree - nzero)*(totlen + hdrlen*totfree)) / | 1441 | } else { |
1442 | ((totspeed*totfree)/pch->speed)) - hdrlen; | 1442 | flen = (((totfree - nzero)*(totlen + hdrlen*totfree)) / |
1443 | if (nbigger > 0) { | 1443 | ((totspeed*totfree)/pch->speed)) - hdrlen; |
1444 | flen += ((totfree - nzero)*pch->speed)/totspeed; | 1444 | if (nbigger > 0) { |
1445 | nbigger -= ((totfree - nzero)*pch->speed)/ | 1445 | flen += ((totfree - nzero)*pch->speed)/totspeed; |
1446 | nbigger -= ((totfree - nzero)*pch->speed)/ | ||
1446 | totspeed; | 1447 | totspeed; |
1448 | } | ||
1447 | } | 1449 | } |
1450 | nfree--; | ||
1448 | } | 1451 | } |
1449 | nfree--; | ||
1450 | 1452 | ||
1451 | /* | 1453 | /* |
1452 | *check if we are on the last channel or | 1454 | *check if we are on the last channel or |
1453 | *we exceded the lenght of the data to | 1455 | *we exceded the lenght of the data to |
1454 | *fragment | 1456 | *fragment |
1455 | */ | 1457 | */ |
1456 | if ((nfree == 0) || (flen > len)) | 1458 | if ((nfree <= 0) || (flen > len)) |
1457 | flen = len; | 1459 | flen = len; |
1458 | /* | 1460 | /* |
1459 | *it is not worth to tx on slow channels: | 1461 | *it is not worth to tx on slow channels: |
@@ -1467,7 +1469,7 @@ static int ppp_mp_explode(struct ppp *ppp, struct sk_buff *skb) | |||
1467 | continue; | 1469 | continue; |
1468 | } | 1470 | } |
1469 | 1471 | ||
1470 | mtu = pch->chan->mtu + 2 - hdrlen; | 1472 | mtu = pch->chan->mtu - hdrlen; |
1471 | if (mtu < 4) | 1473 | if (mtu < 4) |
1472 | mtu = 4; | 1474 | mtu = 4; |
1473 | if (flen > mtu) | 1475 | if (flen > mtu) |
diff --git a/drivers/net/pppoe.c b/drivers/net/pppoe.c index f0031f1f97e5..5f2090233d7b 100644 --- a/drivers/net/pppoe.c +++ b/drivers/net/pppoe.c | |||
@@ -1063,6 +1063,7 @@ static void *pppoe_seq_next(struct seq_file *seq, void *v, loff_t *pos) | |||
1063 | else { | 1063 | else { |
1064 | int hash = hash_item(po->pppoe_pa.sid, po->pppoe_pa.remote); | 1064 | int hash = hash_item(po->pppoe_pa.sid, po->pppoe_pa.remote); |
1065 | 1065 | ||
1066 | po = NULL; | ||
1066 | while (++hash < PPPOE_HASH_SIZE) { | 1067 | while (++hash < PPPOE_HASH_SIZE) { |
1067 | po = pn->hash_table[hash]; | 1068 | po = pn->hash_table[hash]; |
1068 | if (po) | 1069 | if (po) |
diff --git a/drivers/net/pppol2tp.c b/drivers/net/pppol2tp.c index e7935d09c896..e0f9219a0aea 100644 --- a/drivers/net/pppol2tp.c +++ b/drivers/net/pppol2tp.c | |||
@@ -2680,6 +2680,7 @@ out_unregister_pppol2tp_proto: | |||
2680 | static void __exit pppol2tp_exit(void) | 2680 | static void __exit pppol2tp_exit(void) |
2681 | { | 2681 | { |
2682 | unregister_pppox_proto(PX_PROTO_OL2TP); | 2682 | unregister_pppox_proto(PX_PROTO_OL2TP); |
2683 | unregister_pernet_gen_device(pppol2tp_net_id, &pppol2tp_net_ops); | ||
2683 | proto_unregister(&pppol2tp_sk_proto); | 2684 | proto_unregister(&pppol2tp_sk_proto); |
2684 | } | 2685 | } |
2685 | 2686 | ||
diff --git a/drivers/net/s6gmac.c b/drivers/net/s6gmac.c index 5345e47b35ac..4525cbe8dd69 100644 --- a/drivers/net/s6gmac.c +++ b/drivers/net/s6gmac.c | |||
@@ -793,7 +793,7 @@ static inline int s6gmac_phy_start(struct net_device *dev) | |||
793 | struct s6gmac *pd = netdev_priv(dev); | 793 | struct s6gmac *pd = netdev_priv(dev); |
794 | int i = 0; | 794 | int i = 0; |
795 | struct phy_device *p = NULL; | 795 | struct phy_device *p = NULL; |
796 | while ((!(p = pd->mii.bus->phy_map[i])) && (i < PHY_MAX_ADDR)) | 796 | while ((i < PHY_MAX_ADDR) && (!(p = pd->mii.bus->phy_map[i]))) |
797 | i++; | 797 | i++; |
798 | p = phy_connect(dev, dev_name(&p->dev), &s6gmac_adjust_link, 0, | 798 | p = phy_connect(dev, dev_name(&p->dev), &s6gmac_adjust_link, 0, |
799 | PHY_INTERFACE_MODE_RGMII); | 799 | PHY_INTERFACE_MODE_RGMII); |
diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c index 3550c5dcd93c..0a551d8f5d95 100644 --- a/drivers/net/sky2.c +++ b/drivers/net/sky2.c | |||
@@ -1488,6 +1488,8 @@ static int sky2_up(struct net_device *dev) | |||
1488 | sky2_set_vlan_mode(hw, port, sky2->vlgrp != NULL); | 1488 | sky2_set_vlan_mode(hw, port, sky2->vlgrp != NULL); |
1489 | #endif | 1489 | #endif |
1490 | 1490 | ||
1491 | sky2->restarting = 0; | ||
1492 | |||
1491 | err = sky2_rx_start(sky2); | 1493 | err = sky2_rx_start(sky2); |
1492 | if (err) | 1494 | if (err) |
1493 | goto err_out; | 1495 | goto err_out; |
@@ -1500,6 +1502,9 @@ static int sky2_up(struct net_device *dev) | |||
1500 | 1502 | ||
1501 | sky2_set_multicast(dev); | 1503 | sky2_set_multicast(dev); |
1502 | 1504 | ||
1505 | /* wake queue incase we are restarting */ | ||
1506 | netif_wake_queue(dev); | ||
1507 | |||
1503 | if (netif_msg_ifup(sky2)) | 1508 | if (netif_msg_ifup(sky2)) |
1504 | printk(KERN_INFO PFX "%s: enabling interface\n", dev->name); | 1509 | printk(KERN_INFO PFX "%s: enabling interface\n", dev->name); |
1505 | return 0; | 1510 | return 0; |
@@ -1533,6 +1538,8 @@ static inline int tx_dist(unsigned tail, unsigned head) | |||
1533 | /* Number of list elements available for next tx */ | 1538 | /* Number of list elements available for next tx */ |
1534 | static inline int tx_avail(const struct sky2_port *sky2) | 1539 | static inline int tx_avail(const struct sky2_port *sky2) |
1535 | { | 1540 | { |
1541 | if (unlikely(sky2->restarting)) | ||
1542 | return 0; | ||
1536 | return sky2->tx_pending - tx_dist(sky2->tx_cons, sky2->tx_prod); | 1543 | return sky2->tx_pending - tx_dist(sky2->tx_cons, sky2->tx_prod); |
1537 | } | 1544 | } |
1538 | 1545 | ||
@@ -1818,6 +1825,10 @@ static int sky2_down(struct net_device *dev) | |||
1818 | if (netif_msg_ifdown(sky2)) | 1825 | if (netif_msg_ifdown(sky2)) |
1819 | printk(KERN_INFO PFX "%s: disabling interface\n", dev->name); | 1826 | printk(KERN_INFO PFX "%s: disabling interface\n", dev->name); |
1820 | 1827 | ||
1828 | /* explicitly shut off tx incase we're restarting */ | ||
1829 | sky2->restarting = 1; | ||
1830 | netif_tx_disable(dev); | ||
1831 | |||
1821 | /* Force flow control off */ | 1832 | /* Force flow control off */ |
1822 | sky2_write8(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_OFF); | 1833 | sky2_write8(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_OFF); |
1823 | 1834 | ||
@@ -2359,7 +2370,7 @@ static inline void sky2_tx_done(struct net_device *dev, u16 last) | |||
2359 | { | 2370 | { |
2360 | struct sky2_port *sky2 = netdev_priv(dev); | 2371 | struct sky2_port *sky2 = netdev_priv(dev); |
2361 | 2372 | ||
2362 | if (netif_running(dev)) { | 2373 | if (likely(netif_running(dev) && !sky2->restarting)) { |
2363 | netif_tx_lock(dev); | 2374 | netif_tx_lock(dev); |
2364 | sky2_tx_complete(sky2, last); | 2375 | sky2_tx_complete(sky2, last); |
2365 | netif_tx_unlock(dev); | 2376 | netif_tx_unlock(dev); |
@@ -4283,6 +4294,7 @@ static __devinit struct net_device *sky2_init_netdev(struct sky2_hw *hw, | |||
4283 | spin_lock_init(&sky2->phy_lock); | 4294 | spin_lock_init(&sky2->phy_lock); |
4284 | sky2->tx_pending = TX_DEF_PENDING; | 4295 | sky2->tx_pending = TX_DEF_PENDING; |
4285 | sky2->rx_pending = RX_DEF_PENDING; | 4296 | sky2->rx_pending = RX_DEF_PENDING; |
4297 | sky2->restarting = 0; | ||
4286 | 4298 | ||
4287 | hw->dev[port] = dev; | 4299 | hw->dev[port] = dev; |
4288 | 4300 | ||
diff --git a/drivers/net/sky2.h b/drivers/net/sky2.h index b5549c9e5107..4486b066b43f 100644 --- a/drivers/net/sky2.h +++ b/drivers/net/sky2.h | |||
@@ -2051,6 +2051,7 @@ struct sky2_port { | |||
2051 | u8 duplex; /* DUPLEX_HALF, DUPLEX_FULL */ | 2051 | u8 duplex; /* DUPLEX_HALF, DUPLEX_FULL */ |
2052 | u8 rx_csum; | 2052 | u8 rx_csum; |
2053 | u8 wol; | 2053 | u8 wol; |
2054 | u8 restarting; | ||
2054 | enum flow_control flow_mode; | 2055 | enum flow_control flow_mode; |
2055 | enum flow_control flow_status; | 2056 | enum flow_control flow_status; |
2056 | 2057 | ||
diff --git a/drivers/net/tulip/de4x5.c b/drivers/net/tulip/de4x5.c index eb72d2e9ab3d..acfdccd44567 100644 --- a/drivers/net/tulip/de4x5.c +++ b/drivers/net/tulip/de4x5.c | |||
@@ -5059,7 +5059,7 @@ mii_get_phy(struct net_device *dev) | |||
5059 | if ((id == 0) || (id == 65535)) continue; /* Valid ID? */ | 5059 | if ((id == 0) || (id == 65535)) continue; /* Valid ID? */ |
5060 | for (j=0; j<limit; j++) { /* Search PHY table */ | 5060 | for (j=0; j<limit; j++) { /* Search PHY table */ |
5061 | if (id != phy_info[j].id) continue; /* ID match? */ | 5061 | if (id != phy_info[j].id) continue; /* ID match? */ |
5062 | for (k=0; lp->phy[k].id && (k < DE4X5_MAX_PHY); k++); | 5062 | for (k=0; k < DE4X5_MAX_PHY && lp->phy[k].id; k++); |
5063 | if (k < DE4X5_MAX_PHY) { | 5063 | if (k < DE4X5_MAX_PHY) { |
5064 | memcpy((char *)&lp->phy[k], | 5064 | memcpy((char *)&lp->phy[k], |
5065 | (char *)&phy_info[j], sizeof(struct phy_table)); | 5065 | (char *)&phy_info[j], sizeof(struct phy_table)); |
@@ -5072,7 +5072,7 @@ mii_get_phy(struct net_device *dev) | |||
5072 | break; | 5072 | break; |
5073 | } | 5073 | } |
5074 | if ((j == limit) && (i < DE4X5_MAX_MII)) { | 5074 | if ((j == limit) && (i < DE4X5_MAX_MII)) { |
5075 | for (k=0; lp->phy[k].id && (k < DE4X5_MAX_PHY); k++); | 5075 | for (k=0; k < DE4X5_MAX_PHY && lp->phy[k].id; k++); |
5076 | lp->phy[k].addr = i; | 5076 | lp->phy[k].addr = i; |
5077 | lp->phy[k].id = id; | 5077 | lp->phy[k].id = id; |
5078 | lp->phy[k].spd.reg = GENERIC_REG; /* ANLPA register */ | 5078 | lp->phy[k].spd.reg = GENERIC_REG; /* ANLPA register */ |
@@ -5091,7 +5091,7 @@ mii_get_phy(struct net_device *dev) | |||
5091 | purgatory: | 5091 | purgatory: |
5092 | lp->active = 0; | 5092 | lp->active = 0; |
5093 | if (lp->phy[0].id) { /* Reset the PHY devices */ | 5093 | if (lp->phy[0].id) { /* Reset the PHY devices */ |
5094 | for (k=0; lp->phy[k].id && (k < DE4X5_MAX_PHY); k++) { /*For each PHY*/ | 5094 | for (k=0; k < DE4X5_MAX_PHY && lp->phy[k].id; k++) { /*For each PHY*/ |
5095 | mii_wr(MII_CR_RST, MII_CR, lp->phy[k].addr, DE4X5_MII); | 5095 | mii_wr(MII_CR_RST, MII_CR, lp->phy[k].addr, DE4X5_MII); |
5096 | while (mii_rd(MII_CR, lp->phy[k].addr, DE4X5_MII) & MII_CR_RST); | 5096 | while (mii_rd(MII_CR, lp->phy[k].addr, DE4X5_MII) & MII_CR_RST); |
5097 | 5097 | ||
diff --git a/drivers/net/wireless/airo.c b/drivers/net/wireless/airo.c index c70604f0329e..8ce5e4cee168 100644 --- a/drivers/net/wireless/airo.c +++ b/drivers/net/wireless/airo.c | |||
@@ -5918,20 +5918,19 @@ static int airo_set_essid(struct net_device *dev, | |||
5918 | readSsidRid(local, &SSID_rid); | 5918 | readSsidRid(local, &SSID_rid); |
5919 | 5919 | ||
5920 | /* Check if we asked for `any' */ | 5920 | /* Check if we asked for `any' */ |
5921 | if(dwrq->flags == 0) { | 5921 | if (dwrq->flags == 0) { |
5922 | /* Just send an empty SSID list */ | 5922 | /* Just send an empty SSID list */ |
5923 | memset(&SSID_rid, 0, sizeof(SSID_rid)); | 5923 | memset(&SSID_rid, 0, sizeof(SSID_rid)); |
5924 | } else { | 5924 | } else { |
5925 | int index = (dwrq->flags & IW_ENCODE_INDEX) - 1; | 5925 | unsigned index = (dwrq->flags & IW_ENCODE_INDEX) - 1; |
5926 | 5926 | ||
5927 | /* Check the size of the string */ | 5927 | /* Check the size of the string */ |
5928 | if(dwrq->length > IW_ESSID_MAX_SIZE) { | 5928 | if (dwrq->length > IW_ESSID_MAX_SIZE) |
5929 | return -E2BIG ; | 5929 | return -E2BIG ; |
5930 | } | 5930 | |
5931 | /* Check if index is valid */ | 5931 | /* Check if index is valid */ |
5932 | if((index < 0) || (index >= 4)) { | 5932 | if (index >= ARRAY_SIZE(SSID_rid.ssids)) |
5933 | return -EINVAL; | 5933 | return -EINVAL; |
5934 | } | ||
5935 | 5934 | ||
5936 | /* Set the SSID */ | 5935 | /* Set the SSID */ |
5937 | memset(SSID_rid.ssids[index].ssid, 0, | 5936 | memset(SSID_rid.ssids[index].ssid, 0, |
@@ -6819,7 +6818,7 @@ static int airo_set_txpow(struct net_device *dev, | |||
6819 | return -EINVAL; | 6818 | return -EINVAL; |
6820 | } | 6819 | } |
6821 | clear_bit (FLAG_RADIO_OFF, &local->flags); | 6820 | clear_bit (FLAG_RADIO_OFF, &local->flags); |
6822 | for (i = 0; cap_rid.txPowerLevels[i] && (i < 8); i++) | 6821 | for (i = 0; i < 8 && cap_rid.txPowerLevels[i]; i++) |
6823 | if (v == cap_rid.txPowerLevels[i]) { | 6822 | if (v == cap_rid.txPowerLevels[i]) { |
6824 | readConfigRid(local, 1); | 6823 | readConfigRid(local, 1); |
6825 | local->config.txPower = v; | 6824 | local->config.txPower = v; |
diff --git a/drivers/net/wireless/ath/ath9k/eeprom.c b/drivers/net/wireless/ath/ath9k/eeprom.c index a2fda702b620..ce0e86c36a82 100644 --- a/drivers/net/wireless/ath/ath9k/eeprom.c +++ b/drivers/net/wireless/ath/ath9k/eeprom.c | |||
@@ -460,7 +460,7 @@ static int ath9k_hw_4k_check_eeprom(struct ath_hw *ah) | |||
460 | integer = swab32(eep->modalHeader.antCtrlCommon); | 460 | integer = swab32(eep->modalHeader.antCtrlCommon); |
461 | eep->modalHeader.antCtrlCommon = integer; | 461 | eep->modalHeader.antCtrlCommon = integer; |
462 | 462 | ||
463 | for (i = 0; i < AR5416_MAX_CHAINS; i++) { | 463 | for (i = 0; i < AR5416_EEP4K_MAX_CHAINS; i++) { |
464 | integer = swab32(eep->modalHeader.antCtrlChain[i]); | 464 | integer = swab32(eep->modalHeader.antCtrlChain[i]); |
465 | eep->modalHeader.antCtrlChain[i] = integer; | 465 | eep->modalHeader.antCtrlChain[i] = integer; |
466 | } | 466 | } |
@@ -914,7 +914,7 @@ static void ath9k_hw_set_4k_power_per_rate_table(struct ath_hw *ah, | |||
914 | ctlMode, numCtlModes, isHt40CtlMode, | 914 | ctlMode, numCtlModes, isHt40CtlMode, |
915 | (pCtlMode[ctlMode] & EXT_ADDITIVE)); | 915 | (pCtlMode[ctlMode] & EXT_ADDITIVE)); |
916 | 916 | ||
917 | for (i = 0; (i < AR5416_NUM_CTLS) && | 917 | for (i = 0; (i < AR5416_EEP4K_NUM_CTLS) && |
918 | pEepData->ctlIndex[i]; i++) { | 918 | pEepData->ctlIndex[i]; i++) { |
919 | DPRINTF(ah->ah_sc, ATH_DBG_EEPROM, | 919 | DPRINTF(ah->ah_sc, ATH_DBG_EEPROM, |
920 | " LOOP-Ctlidx %d: cfgCtl 0x%2.2x " | 920 | " LOOP-Ctlidx %d: cfgCtl 0x%2.2x " |
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945.h b/drivers/net/wireless/iwlwifi/iwl-3945.h index fbb3a573463e..2de6471d4be9 100644 --- a/drivers/net/wireless/iwlwifi/iwl-3945.h +++ b/drivers/net/wireless/iwlwifi/iwl-3945.h | |||
@@ -112,7 +112,7 @@ enum iwl3945_antenna { | |||
112 | #define IWL_TX_FIFO_NONE 7 | 112 | #define IWL_TX_FIFO_NONE 7 |
113 | 113 | ||
114 | /* Minimum number of queues. MAX_NUM is defined in hw specific files */ | 114 | /* Minimum number of queues. MAX_NUM is defined in hw specific files */ |
115 | #define IWL_MIN_NUM_QUEUES 4 | 115 | #define IWL39_MIN_NUM_QUEUES 4 |
116 | 116 | ||
117 | #define IEEE80211_DATA_LEN 2304 | 117 | #define IEEE80211_DATA_LEN 2304 |
118 | #define IEEE80211_4ADDR_LEN 30 | 118 | #define IEEE80211_4ADDR_LEN 30 |
diff --git a/drivers/net/wireless/iwlwifi/iwl-core.c b/drivers/net/wireless/iwlwifi/iwl-core.c index 6ab07165ea28..18b135f510e5 100644 --- a/drivers/net/wireless/iwlwifi/iwl-core.c +++ b/drivers/net/wireless/iwlwifi/iwl-core.c | |||
@@ -1332,6 +1332,9 @@ int iwl_setup_mac(struct iwl_priv *priv) | |||
1332 | 1332 | ||
1333 | hw->wiphy->custom_regulatory = true; | 1333 | hw->wiphy->custom_regulatory = true; |
1334 | 1334 | ||
1335 | /* Firmware does not support this */ | ||
1336 | hw->wiphy->disable_beacon_hints = true; | ||
1337 | |||
1335 | hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX; | 1338 | hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX; |
1336 | /* we create the 802.11 header and a zero-length SSID element */ | 1339 | /* we create the 802.11 header and a zero-length SSID element */ |
1337 | hw->wiphy->max_scan_ie_len = IWL_MAX_PROBE_REQUEST - 24 - 2; | 1340 | hw->wiphy->max_scan_ie_len = IWL_MAX_PROBE_REQUEST - 24 - 2; |
diff --git a/drivers/net/wireless/iwlwifi/iwl-debugfs.c b/drivers/net/wireless/iwlwifi/iwl-debugfs.c index 11e08c068917..ca00cc8ad4c7 100644 --- a/drivers/net/wireless/iwlwifi/iwl-debugfs.c +++ b/drivers/net/wireless/iwlwifi/iwl-debugfs.c | |||
@@ -308,18 +308,18 @@ static ssize_t iwl_dbgfs_nvm_read(struct file *file, | |||
308 | return -ENODATA; | 308 | return -ENODATA; |
309 | } | 309 | } |
310 | 310 | ||
311 | ptr = priv->eeprom; | ||
312 | if (!ptr) { | ||
313 | IWL_ERR(priv, "Invalid EEPROM/OTP memory\n"); | ||
314 | return -ENOMEM; | ||
315 | } | ||
316 | |||
311 | /* 4 characters for byte 0xYY */ | 317 | /* 4 characters for byte 0xYY */ |
312 | buf = kzalloc(buf_size, GFP_KERNEL); | 318 | buf = kzalloc(buf_size, GFP_KERNEL); |
313 | if (!buf) { | 319 | if (!buf) { |
314 | IWL_ERR(priv, "Can not allocate Buffer\n"); | 320 | IWL_ERR(priv, "Can not allocate Buffer\n"); |
315 | return -ENOMEM; | 321 | return -ENOMEM; |
316 | } | 322 | } |
317 | |||
318 | ptr = priv->eeprom; | ||
319 | if (!ptr) { | ||
320 | IWL_ERR(priv, "Invalid EEPROM/OTP memory\n"); | ||
321 | return -ENOMEM; | ||
322 | } | ||
323 | pos += scnprintf(buf + pos, buf_size - pos, "NVM Type: %s\n", | 323 | pos += scnprintf(buf + pos, buf_size - pos, "NVM Type: %s\n", |
324 | (priv->nvm_device_type == NVM_DEVICE_TYPE_OTP) | 324 | (priv->nvm_device_type == NVM_DEVICE_TYPE_OTP) |
325 | ? "OTP" : "EEPROM"); | 325 | ? "OTP" : "EEPROM"); |
diff --git a/drivers/net/wireless/iwlwifi/iwl-dev.h b/drivers/net/wireless/iwlwifi/iwl-dev.h index e2d620f0b6e8..650e20af20fa 100644 --- a/drivers/net/wireless/iwlwifi/iwl-dev.h +++ b/drivers/net/wireless/iwlwifi/iwl-dev.h | |||
@@ -258,8 +258,10 @@ struct iwl_channel_info { | |||
258 | #define IWL_TX_FIFO_HCCA_2 6 | 258 | #define IWL_TX_FIFO_HCCA_2 6 |
259 | #define IWL_TX_FIFO_NONE 7 | 259 | #define IWL_TX_FIFO_NONE 7 |
260 | 260 | ||
261 | /* Minimum number of queues. MAX_NUM is defined in hw specific files */ | 261 | /* Minimum number of queues. MAX_NUM is defined in hw specific files. |
262 | #define IWL_MIN_NUM_QUEUES 4 | 262 | * Set the minimum to accommodate the 4 standard TX queues, 1 command |
263 | * queue, 2 (unused) HCCA queues, and 4 HT queues (one for each AC) */ | ||
264 | #define IWL_MIN_NUM_QUEUES 10 | ||
263 | 265 | ||
264 | /* Power management (not Tx power) structures */ | 266 | /* Power management (not Tx power) structures */ |
265 | 267 | ||
diff --git a/drivers/net/wireless/iwlwifi/iwl-sta.c b/drivers/net/wireless/iwlwifi/iwl-sta.c index 2addf735b193..ffd5c61a7553 100644 --- a/drivers/net/wireless/iwlwifi/iwl-sta.c +++ b/drivers/net/wireless/iwlwifi/iwl-sta.c | |||
@@ -566,6 +566,8 @@ int iwl_remove_default_wep_key(struct iwl_priv *priv, | |||
566 | unsigned long flags; | 566 | unsigned long flags; |
567 | 567 | ||
568 | spin_lock_irqsave(&priv->sta_lock, flags); | 568 | spin_lock_irqsave(&priv->sta_lock, flags); |
569 | IWL_DEBUG_WEP(priv, "Removing default WEP key: idx=%d\n", | ||
570 | keyconf->keyidx); | ||
569 | 571 | ||
570 | if (!test_and_clear_bit(keyconf->keyidx, &priv->ucode_key_table)) | 572 | if (!test_and_clear_bit(keyconf->keyidx, &priv->ucode_key_table)) |
571 | IWL_ERR(priv, "index %d not used in uCode key table.\n", | 573 | IWL_ERR(priv, "index %d not used in uCode key table.\n", |
@@ -573,6 +575,11 @@ int iwl_remove_default_wep_key(struct iwl_priv *priv, | |||
573 | 575 | ||
574 | priv->default_wep_key--; | 576 | priv->default_wep_key--; |
575 | memset(&priv->wep_keys[keyconf->keyidx], 0, sizeof(priv->wep_keys[0])); | 577 | memset(&priv->wep_keys[keyconf->keyidx], 0, sizeof(priv->wep_keys[0])); |
578 | if (iwl_is_rfkill(priv)) { | ||
579 | IWL_DEBUG_WEP(priv, "Not sending REPLY_WEPKEY command due to RFKILL.\n"); | ||
580 | spin_unlock_irqrestore(&priv->sta_lock, flags); | ||
581 | return 0; | ||
582 | } | ||
576 | ret = iwl_send_static_wepkey_cmd(priv, 1); | 583 | ret = iwl_send_static_wepkey_cmd(priv, 1); |
577 | IWL_DEBUG_WEP(priv, "Remove default WEP key: idx=%d ret=%d\n", | 584 | IWL_DEBUG_WEP(priv, "Remove default WEP key: idx=%d ret=%d\n", |
578 | keyconf->keyidx, ret); | 585 | keyconf->keyidx, ret); |
@@ -853,6 +860,11 @@ int iwl_remove_dynamic_key(struct iwl_priv *priv, | |||
853 | priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK; | 860 | priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK; |
854 | priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK; | 861 | priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK; |
855 | 862 | ||
863 | if (iwl_is_rfkill(priv)) { | ||
864 | IWL_DEBUG_WEP(priv, "Not sending REPLY_ADD_STA command because RFKILL enabled. \n"); | ||
865 | spin_unlock_irqrestore(&priv->sta_lock, flags); | ||
866 | return 0; | ||
867 | } | ||
856 | ret = iwl_send_add_sta(priv, &priv->stations[sta_id].sta, CMD_ASYNC); | 868 | ret = iwl_send_add_sta(priv, &priv->stations[sta_id].sta, CMD_ASYNC); |
857 | spin_unlock_irqrestore(&priv->sta_lock, flags); | 869 | spin_unlock_irqrestore(&priv->sta_lock, flags); |
858 | return ret; | 870 | return ret; |
diff --git a/drivers/net/wireless/iwlwifi/iwl-tx.c b/drivers/net/wireless/iwlwifi/iwl-tx.c index 9bbeec9427f0..2e89040e63be 100644 --- a/drivers/net/wireless/iwlwifi/iwl-tx.c +++ b/drivers/net/wireless/iwlwifi/iwl-tx.c | |||
@@ -720,8 +720,6 @@ int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb) | |||
720 | goto drop_unlock; | 720 | goto drop_unlock; |
721 | } | 721 | } |
722 | 722 | ||
723 | spin_unlock_irqrestore(&priv->lock, flags); | ||
724 | |||
725 | hdr_len = ieee80211_hdrlen(fc); | 723 | hdr_len = ieee80211_hdrlen(fc); |
726 | 724 | ||
727 | /* Find (or create) index into station table for destination station */ | 725 | /* Find (or create) index into station table for destination station */ |
@@ -729,7 +727,7 @@ int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb) | |||
729 | if (sta_id == IWL_INVALID_STATION) { | 727 | if (sta_id == IWL_INVALID_STATION) { |
730 | IWL_DEBUG_DROP(priv, "Dropping - INVALID STATION: %pM\n", | 728 | IWL_DEBUG_DROP(priv, "Dropping - INVALID STATION: %pM\n", |
731 | hdr->addr1); | 729 | hdr->addr1); |
732 | goto drop; | 730 | goto drop_unlock; |
733 | } | 731 | } |
734 | 732 | ||
735 | IWL_DEBUG_TX(priv, "station Id %d\n", sta_id); | 733 | IWL_DEBUG_TX(priv, "station Id %d\n", sta_id); |
@@ -750,14 +748,17 @@ int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb) | |||
750 | txq_id = priv->stations[sta_id].tid[tid].agg.txq_id; | 748 | txq_id = priv->stations[sta_id].tid[tid].agg.txq_id; |
751 | swq_id = iwl_virtual_agg_queue_num(swq_id, txq_id); | 749 | swq_id = iwl_virtual_agg_queue_num(swq_id, txq_id); |
752 | } | 750 | } |
753 | priv->stations[sta_id].tid[tid].tfds_in_queue++; | ||
754 | } | 751 | } |
755 | 752 | ||
756 | txq = &priv->txq[txq_id]; | 753 | txq = &priv->txq[txq_id]; |
757 | q = &txq->q; | 754 | q = &txq->q; |
758 | txq->swq_id = swq_id; | 755 | txq->swq_id = swq_id; |
759 | 756 | ||
760 | spin_lock_irqsave(&priv->lock, flags); | 757 | if (unlikely(iwl_queue_space(q) < q->high_mark)) |
758 | goto drop_unlock; | ||
759 | |||
760 | if (ieee80211_is_data_qos(fc)) | ||
761 | priv->stations[sta_id].tid[tid].tfds_in_queue++; | ||
761 | 762 | ||
762 | /* Set up driver data for this TFD */ | 763 | /* Set up driver data for this TFD */ |
763 | memset(&(txq->txb[q->write_ptr]), 0, sizeof(struct iwl_tx_info)); | 764 | memset(&(txq->txb[q->write_ptr]), 0, sizeof(struct iwl_tx_info)); |
@@ -902,7 +903,6 @@ int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb) | |||
902 | 903 | ||
903 | drop_unlock: | 904 | drop_unlock: |
904 | spin_unlock_irqrestore(&priv->lock, flags); | 905 | spin_unlock_irqrestore(&priv->lock, flags); |
905 | drop: | ||
906 | return -1; | 906 | return -1; |
907 | } | 907 | } |
908 | EXPORT_SYMBOL(iwl_tx_skb); | 908 | EXPORT_SYMBOL(iwl_tx_skb); |
@@ -1171,6 +1171,8 @@ int iwl_tx_agg_start(struct iwl_priv *priv, const u8 *ra, u16 tid, u16 *ssn) | |||
1171 | IWL_ERR(priv, "Start AGG on invalid station\n"); | 1171 | IWL_ERR(priv, "Start AGG on invalid station\n"); |
1172 | return -ENXIO; | 1172 | return -ENXIO; |
1173 | } | 1173 | } |
1174 | if (unlikely(tid >= MAX_TID_COUNT)) | ||
1175 | return -EINVAL; | ||
1174 | 1176 | ||
1175 | if (priv->stations[sta_id].tid[tid].agg.state != IWL_AGG_OFF) { | 1177 | if (priv->stations[sta_id].tid[tid].agg.state != IWL_AGG_OFF) { |
1176 | IWL_ERR(priv, "Start AGG when state is not IWL_AGG_OFF !\n"); | 1178 | IWL_ERR(priv, "Start AGG when state is not IWL_AGG_OFF !\n"); |
diff --git a/drivers/net/wireless/iwlwifi/iwl3945-base.c b/drivers/net/wireless/iwlwifi/iwl3945-base.c index 956798f2c80c..523843369ca2 100644 --- a/drivers/net/wireless/iwlwifi/iwl3945-base.c +++ b/drivers/net/wireless/iwlwifi/iwl3945-base.c | |||
@@ -3968,6 +3968,9 @@ static int iwl3945_setup_mac(struct iwl_priv *priv) | |||
3968 | 3968 | ||
3969 | hw->wiphy->custom_regulatory = true; | 3969 | hw->wiphy->custom_regulatory = true; |
3970 | 3970 | ||
3971 | /* Firmware does not support this */ | ||
3972 | hw->wiphy->disable_beacon_hints = true; | ||
3973 | |||
3971 | hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX_3945; | 3974 | hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX_3945; |
3972 | /* we create the 802.11 header and a zero-length SSID element */ | 3975 | /* we create the 802.11 header and a zero-length SSID element */ |
3973 | hw->wiphy->max_scan_ie_len = IWL_MAX_PROBE_REQUEST - 24 - 2; | 3976 | hw->wiphy->max_scan_ie_len = IWL_MAX_PROBE_REQUEST - 24 - 2; |
@@ -4018,10 +4021,10 @@ static int iwl3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e | |||
4018 | SET_IEEE80211_DEV(hw, &pdev->dev); | 4021 | SET_IEEE80211_DEV(hw, &pdev->dev); |
4019 | 4022 | ||
4020 | if ((iwl3945_mod_params.num_of_queues > IWL39_MAX_NUM_QUEUES) || | 4023 | if ((iwl3945_mod_params.num_of_queues > IWL39_MAX_NUM_QUEUES) || |
4021 | (iwl3945_mod_params.num_of_queues < IWL_MIN_NUM_QUEUES)) { | 4024 | (iwl3945_mod_params.num_of_queues < IWL39_MIN_NUM_QUEUES)) { |
4022 | IWL_ERR(priv, | 4025 | IWL_ERR(priv, |
4023 | "invalid queues_num, should be between %d and %d\n", | 4026 | "invalid queues_num, should be between %d and %d\n", |
4024 | IWL_MIN_NUM_QUEUES, IWL39_MAX_NUM_QUEUES); | 4027 | IWL39_MIN_NUM_QUEUES, IWL39_MAX_NUM_QUEUES); |
4025 | err = -EINVAL; | 4028 | err = -EINVAL; |
4026 | goto out_ieee80211_free_hw; | 4029 | goto out_ieee80211_free_hw; |
4027 | } | 4030 | } |
diff --git a/drivers/net/wireless/iwmc3200wifi/commands.c b/drivers/net/wireless/iwmc3200wifi/commands.c index 834a7f544e5d..e2334d123599 100644 --- a/drivers/net/wireless/iwmc3200wifi/commands.c +++ b/drivers/net/wireless/iwmc3200wifi/commands.c | |||
@@ -220,6 +220,7 @@ int iwm_store_rxiq_calib_result(struct iwm_priv *iwm) | |||
220 | eeprom_rxiq = iwm_eeprom_access(iwm, IWM_EEPROM_CALIB_RXIQ); | 220 | eeprom_rxiq = iwm_eeprom_access(iwm, IWM_EEPROM_CALIB_RXIQ); |
221 | if (IS_ERR(eeprom_rxiq)) { | 221 | if (IS_ERR(eeprom_rxiq)) { |
222 | IWM_ERR(iwm, "Couldn't access EEPROM RX IQ entry\n"); | 222 | IWM_ERR(iwm, "Couldn't access EEPROM RX IQ entry\n"); |
223 | kfree(rxiq); | ||
223 | return PTR_ERR(eeprom_rxiq); | 224 | return PTR_ERR(eeprom_rxiq); |
224 | } | 225 | } |
225 | 226 | ||
diff --git a/drivers/net/wireless/iwmc3200wifi/netdev.c b/drivers/net/wireless/iwmc3200wifi/netdev.c index aea5ccf24ccf..bf294e41753b 100644 --- a/drivers/net/wireless/iwmc3200wifi/netdev.c +++ b/drivers/net/wireless/iwmc3200wifi/netdev.c | |||
@@ -106,10 +106,8 @@ void *iwm_if_alloc(int sizeof_bus, struct device *dev, | |||
106 | int ret = 0; | 106 | int ret = 0; |
107 | 107 | ||
108 | wdev = iwm_wdev_alloc(sizeof_bus, dev); | 108 | wdev = iwm_wdev_alloc(sizeof_bus, dev); |
109 | if (!wdev) { | 109 | if (IS_ERR(wdev)) |
110 | dev_err(dev, "no memory for wireless device instance\n"); | 110 | return wdev; |
111 | return ERR_PTR(-ENOMEM); | ||
112 | } | ||
113 | 111 | ||
114 | iwm = wdev_to_iwm(wdev); | 112 | iwm = wdev_to_iwm(wdev); |
115 | iwm->bus_ops = if_ops; | 113 | iwm->bus_ops = if_ops; |
diff --git a/drivers/net/wireless/libertas/11d.c b/drivers/net/wireless/libertas/11d.c index 9a5408e7d94a..5c6968101f0d 100644 --- a/drivers/net/wireless/libertas/11d.c +++ b/drivers/net/wireless/libertas/11d.c | |||
@@ -47,7 +47,7 @@ static u8 lbs_region_2_code(u8 *region) | |||
47 | { | 47 | { |
48 | u8 i; | 48 | u8 i; |
49 | 49 | ||
50 | for (i = 0; region[i] && i < COUNTRY_CODE_LEN; i++) | 50 | for (i = 0; i < COUNTRY_CODE_LEN && region[i]; i++) |
51 | region[i] = toupper(region[i]); | 51 | region[i] = toupper(region[i]); |
52 | 52 | ||
53 | for (i = 0; i < ARRAY_SIZE(region_code_mapping); i++) { | 53 | for (i = 0; i < ARRAY_SIZE(region_code_mapping); i++) { |
diff --git a/drivers/net/wireless/libertas/assoc.c b/drivers/net/wireless/libertas/assoc.c index b9b374119033..d6997371c27e 100644 --- a/drivers/net/wireless/libertas/assoc.c +++ b/drivers/net/wireless/libertas/assoc.c | |||
@@ -1,6 +1,7 @@ | |||
1 | /* Copyright (C) 2006, Red Hat, Inc. */ | 1 | /* Copyright (C) 2006, Red Hat, Inc. */ |
2 | 2 | ||
3 | #include <linux/types.h> | 3 | #include <linux/types.h> |
4 | #include <linux/kernel.h> | ||
4 | #include <linux/etherdevice.h> | 5 | #include <linux/etherdevice.h> |
5 | #include <linux/ieee80211.h> | 6 | #include <linux/ieee80211.h> |
6 | #include <linux/if_arp.h> | 7 | #include <linux/if_arp.h> |
@@ -43,21 +44,21 @@ static int get_common_rates(struct lbs_private *priv, | |||
43 | u16 *rates_size) | 44 | u16 *rates_size) |
44 | { | 45 | { |
45 | u8 *card_rates = lbs_bg_rates; | 46 | u8 *card_rates = lbs_bg_rates; |
46 | size_t num_card_rates = sizeof(lbs_bg_rates); | ||
47 | int ret = 0, i, j; | 47 | int ret = 0, i, j; |
48 | u8 tmp[30]; | 48 | u8 tmp[(ARRAY_SIZE(lbs_bg_rates) - 1) * (*rates_size - 1)]; |
49 | size_t tmp_size = 0; | 49 | size_t tmp_size = 0; |
50 | 50 | ||
51 | /* For each rate in card_rates that exists in rate1, copy to tmp */ | 51 | /* For each rate in card_rates that exists in rate1, copy to tmp */ |
52 | for (i = 0; card_rates[i] && (i < num_card_rates); i++) { | 52 | for (i = 0; i < ARRAY_SIZE(lbs_bg_rates) && card_rates[i]; i++) { |
53 | for (j = 0; rates[j] && (j < *rates_size); j++) { | 53 | for (j = 0; j < *rates_size && rates[j]; j++) { |
54 | if (rates[j] == card_rates[i]) | 54 | if (rates[j] == card_rates[i]) |
55 | tmp[tmp_size++] = card_rates[i]; | 55 | tmp[tmp_size++] = card_rates[i]; |
56 | } | 56 | } |
57 | } | 57 | } |
58 | 58 | ||
59 | lbs_deb_hex(LBS_DEB_JOIN, "AP rates ", rates, *rates_size); | 59 | lbs_deb_hex(LBS_DEB_JOIN, "AP rates ", rates, *rates_size); |
60 | lbs_deb_hex(LBS_DEB_JOIN, "card rates ", card_rates, num_card_rates); | 60 | lbs_deb_hex(LBS_DEB_JOIN, "card rates ", card_rates, |
61 | ARRAY_SIZE(lbs_bg_rates)); | ||
61 | lbs_deb_hex(LBS_DEB_JOIN, "common rates", tmp, tmp_size); | 62 | lbs_deb_hex(LBS_DEB_JOIN, "common rates", tmp, tmp_size); |
62 | lbs_deb_join("TX data rate 0x%02x\n", priv->cur_rate); | 63 | lbs_deb_join("TX data rate 0x%02x\n", priv->cur_rate); |
63 | 64 | ||
@@ -69,10 +70,7 @@ static int get_common_rates(struct lbs_private *priv, | |||
69 | lbs_pr_alert("Previously set fixed data rate %#x isn't " | 70 | lbs_pr_alert("Previously set fixed data rate %#x isn't " |
70 | "compatible with the network.\n", priv->cur_rate); | 71 | "compatible with the network.\n", priv->cur_rate); |
71 | ret = -1; | 72 | ret = -1; |
72 | goto done; | ||
73 | } | 73 | } |
74 | ret = 0; | ||
75 | |||
76 | done: | 74 | done: |
77 | memset(rates, 0, *rates_size); | 75 | memset(rates, 0, *rates_size); |
78 | *rates_size = min_t(int, tmp_size, *rates_size); | 76 | *rates_size = min_t(int, tmp_size, *rates_size); |
@@ -322,7 +320,7 @@ static int lbs_associate(struct lbs_private *priv, | |||
322 | rates = (struct mrvl_ie_rates_param_set *) pos; | 320 | rates = (struct mrvl_ie_rates_param_set *) pos; |
323 | rates->header.type = cpu_to_le16(TLV_TYPE_RATES); | 321 | rates->header.type = cpu_to_le16(TLV_TYPE_RATES); |
324 | memcpy(&rates->rates, &bss->rates, MAX_RATES); | 322 | memcpy(&rates->rates, &bss->rates, MAX_RATES); |
325 | tmplen = MAX_RATES; | 323 | tmplen = min_t(u16, ARRAY_SIZE(rates->rates), MAX_RATES); |
326 | if (get_common_rates(priv, rates->rates, &tmplen)) { | 324 | if (get_common_rates(priv, rates->rates, &tmplen)) { |
327 | ret = -1; | 325 | ret = -1; |
328 | goto done; | 326 | goto done; |
@@ -598,7 +596,7 @@ static int lbs_adhoc_join(struct lbs_private *priv, | |||
598 | 596 | ||
599 | /* Copy Data rates from the rates recorded in scan response */ | 597 | /* Copy Data rates from the rates recorded in scan response */ |
600 | memset(cmd.bss.rates, 0, sizeof(cmd.bss.rates)); | 598 | memset(cmd.bss.rates, 0, sizeof(cmd.bss.rates)); |
601 | ratesize = min_t(u16, sizeof(cmd.bss.rates), MAX_RATES); | 599 | ratesize = min_t(u16, ARRAY_SIZE(cmd.bss.rates), MAX_RATES); |
602 | memcpy(cmd.bss.rates, bss->rates, ratesize); | 600 | memcpy(cmd.bss.rates, bss->rates, ratesize); |
603 | if (get_common_rates(priv, cmd.bss.rates, &ratesize)) { | 601 | if (get_common_rates(priv, cmd.bss.rates, &ratesize)) { |
604 | lbs_deb_join("ADHOC_JOIN: get_common_rates returned error.\n"); | 602 | lbs_deb_join("ADHOC_JOIN: get_common_rates returned error.\n"); |
diff --git a/drivers/net/wireless/libertas/scan.c b/drivers/net/wireless/libertas/scan.c index 601b54249677..6c95af3023cc 100644 --- a/drivers/net/wireless/libertas/scan.c +++ b/drivers/net/wireless/libertas/scan.c | |||
@@ -5,6 +5,7 @@ | |||
5 | * for sending scan commands to the firmware. | 5 | * for sending scan commands to the firmware. |
6 | */ | 6 | */ |
7 | #include <linux/types.h> | 7 | #include <linux/types.h> |
8 | #include <linux/kernel.h> | ||
8 | #include <linux/etherdevice.h> | 9 | #include <linux/etherdevice.h> |
9 | #include <linux/if_arp.h> | 10 | #include <linux/if_arp.h> |
10 | #include <asm/unaligned.h> | 11 | #include <asm/unaligned.h> |
@@ -876,7 +877,7 @@ static inline char *lbs_translate_scan(struct lbs_private *priv, | |||
876 | iwe.u.bitrate.disabled = 0; | 877 | iwe.u.bitrate.disabled = 0; |
877 | iwe.u.bitrate.value = 0; | 878 | iwe.u.bitrate.value = 0; |
878 | 879 | ||
879 | for (j = 0; bss->rates[j] && (j < sizeof(bss->rates)); j++) { | 880 | for (j = 0; j < ARRAY_SIZE(bss->rates) && bss->rates[j]; j++) { |
880 | /* Bit rate given in 500 kb/s units */ | 881 | /* Bit rate given in 500 kb/s units */ |
881 | iwe.u.bitrate.value = bss->rates[j] * 500000; | 882 | iwe.u.bitrate.value = bss->rates[j] * 500000; |
882 | current_val = iwe_stream_add_value(info, start, current_val, | 883 | current_val = iwe_stream_add_value(info, start, current_val, |
diff --git a/drivers/net/wireless/zd1211rw/zd_mac.c b/drivers/net/wireless/zd1211rw/zd_mac.c index 40b07b988224..3bd3c779fff3 100644 --- a/drivers/net/wireless/zd1211rw/zd_mac.c +++ b/drivers/net/wireless/zd1211rw/zd_mac.c | |||
@@ -698,7 +698,7 @@ int zd_mac_rx(struct ieee80211_hw *hw, const u8 *buffer, unsigned int length) | |||
698 | && !mac->pass_ctrl) | 698 | && !mac->pass_ctrl) |
699 | return 0; | 699 | return 0; |
700 | 700 | ||
701 | fc = *(__le16 *)buffer; | 701 | fc = get_unaligned((__le16*)buffer); |
702 | need_padding = ieee80211_is_data_qos(fc) ^ ieee80211_has_a4(fc); | 702 | need_padding = ieee80211_is_data_qos(fc) ^ ieee80211_has_a4(fc); |
703 | 703 | ||
704 | skb = dev_alloc_skb(length + (need_padding ? 2 : 0)); | 704 | skb = dev_alloc_skb(length + (need_padding ? 2 : 0)); |
diff --git a/drivers/parisc/ccio-dma.c b/drivers/parisc/ccio-dma.c index 0f0e0b919ef4..a45b0c0d574e 100644 --- a/drivers/parisc/ccio-dma.c +++ b/drivers/parisc/ccio-dma.c | |||
@@ -70,7 +70,6 @@ | |||
70 | #undef CCIO_COLLECT_STATS | 70 | #undef CCIO_COLLECT_STATS |
71 | #endif | 71 | #endif |
72 | 72 | ||
73 | #include <linux/proc_fs.h> | ||
74 | #include <asm/runway.h> /* for proc_runway_root */ | 73 | #include <asm/runway.h> /* for proc_runway_root */ |
75 | 74 | ||
76 | #ifdef DEBUG_CCIO_INIT | 75 | #ifdef DEBUG_CCIO_INIT |
diff --git a/drivers/parisc/dino.c b/drivers/parisc/dino.c index c590974e9815..d69bde6a2343 100644 --- a/drivers/parisc/dino.c +++ b/drivers/parisc/dino.c | |||
@@ -614,7 +614,7 @@ dino_fixup_bus(struct pci_bus *bus) | |||
614 | dev_name(&bus->self->dev), i, | 614 | dev_name(&bus->self->dev), i, |
615 | bus->self->resource[i].start, | 615 | bus->self->resource[i].start, |
616 | bus->self->resource[i].end); | 616 | bus->self->resource[i].end); |
617 | pci_assign_resource(bus->self, i); | 617 | WARN_ON(pci_assign_resource(bus->self, i)); |
618 | DBG("DEBUG %s after assign %d [0x%lx,0x%lx]\n", | 618 | DBG("DEBUG %s after assign %d [0x%lx,0x%lx]\n", |
619 | dev_name(&bus->self->dev), i, | 619 | dev_name(&bus->self->dev), i, |
620 | bus->self->resource[i].start, | 620 | bus->self->resource[i].start, |
diff --git a/drivers/parisc/eisa_eeprom.c b/drivers/parisc/eisa_eeprom.c index 685d94e69d44..8c0b26e9b98a 100644 --- a/drivers/parisc/eisa_eeprom.c +++ b/drivers/parisc/eisa_eeprom.c | |||
@@ -55,7 +55,7 @@ static ssize_t eisa_eeprom_read(struct file * file, | |||
55 | ssize_t ret; | 55 | ssize_t ret; |
56 | int i; | 56 | int i; |
57 | 57 | ||
58 | if (*ppos >= HPEE_MAX_LENGTH) | 58 | if (*ppos < 0 || *ppos >= HPEE_MAX_LENGTH) |
59 | return 0; | 59 | return 0; |
60 | 60 | ||
61 | count = *ppos + count < HPEE_MAX_LENGTH ? count : HPEE_MAX_LENGTH - *ppos; | 61 | count = *ppos + count < HPEE_MAX_LENGTH ? count : HPEE_MAX_LENGTH - *ppos; |
diff --git a/drivers/parisc/hppb.c b/drivers/parisc/hppb.c index 13856415b432..815db175d427 100644 --- a/drivers/parisc/hppb.c +++ b/drivers/parisc/hppb.c | |||
@@ -62,7 +62,8 @@ static int hppb_probe(struct parisc_device *dev) | |||
62 | } | 62 | } |
63 | card = card->next; | 63 | card = card->next; |
64 | } | 64 | } |
65 | printk(KERN_INFO "Found GeckoBoa at 0x%x\n", dev->hpa.start); | 65 | printk(KERN_INFO "Found GeckoBoa at 0x%llx\n", |
66 | (unsigned long long) dev->hpa.start); | ||
66 | 67 | ||
67 | card->hpa = dev->hpa.start; | 68 | card->hpa = dev->hpa.start; |
68 | card->mmio_region.name = "HP-PB Bus"; | 69 | card->mmio_region.name = "HP-PB Bus"; |
@@ -73,8 +74,10 @@ static int hppb_probe(struct parisc_device *dev) | |||
73 | 74 | ||
74 | status = ccio_request_resource(dev, &card->mmio_region); | 75 | status = ccio_request_resource(dev, &card->mmio_region); |
75 | if(status < 0) { | 76 | if(status < 0) { |
76 | printk(KERN_ERR "%s: failed to claim HP-PB bus space (%08x, %08x)\n", | 77 | printk(KERN_ERR "%s: failed to claim HP-PB " |
77 | __FILE__, card->mmio_region.start, card->mmio_region.end); | 78 | "bus space (0x%08llx, 0x%08llx)\n", |
79 | __FILE__, (unsigned long long) card->mmio_region.start, | ||
80 | (unsigned long long) card->mmio_region.end); | ||
78 | } | 81 | } |
79 | 82 | ||
80 | return 0; | 83 | return 0; |
diff --git a/drivers/parisc/lba_pci.c b/drivers/parisc/lba_pci.c index ede614616f8e..3aeb3279c92a 100644 --- a/drivers/parisc/lba_pci.c +++ b/drivers/parisc/lba_pci.c | |||
@@ -992,7 +992,7 @@ lba_pat_resources(struct parisc_device *pa_dev, struct lba_device *lba_dev) | |||
992 | return; | 992 | return; |
993 | 993 | ||
994 | io_pdc_cell = kzalloc(sizeof(pdc_pat_cell_mod_maddr_block_t), GFP_KERNEL); | 994 | io_pdc_cell = kzalloc(sizeof(pdc_pat_cell_mod_maddr_block_t), GFP_KERNEL); |
995 | if (!pa_pdc_cell) { | 995 | if (!io_pdc_cell) { |
996 | kfree(pa_pdc_cell); | 996 | kfree(pa_pdc_cell); |
997 | return; | 997 | return; |
998 | } | 998 | } |
diff --git a/drivers/parisc/pdc_stable.c b/drivers/parisc/pdc_stable.c index f9f9a5f1bbd0..13a64bc081b6 100644 --- a/drivers/parisc/pdc_stable.c +++ b/drivers/parisc/pdc_stable.c | |||
@@ -370,7 +370,7 @@ pdcspath_layer_read(struct pdcspath_entry *entry, char *buf) | |||
370 | if (!i) /* entry is not ready */ | 370 | if (!i) /* entry is not ready */ |
371 | return -ENODATA; | 371 | return -ENODATA; |
372 | 372 | ||
373 | for (i = 0; devpath->layers[i] && (likely(i < 6)); i++) | 373 | for (i = 0; i < 6 && devpath->layers[i]; i++) |
374 | out += sprintf(out, "%u ", devpath->layers[i]); | 374 | out += sprintf(out, "%u ", devpath->layers[i]); |
375 | 375 | ||
376 | out += sprintf(out, "\n"); | 376 | out += sprintf(out, "\n"); |
diff --git a/drivers/pci/hotplug/sgi_hotplug.c b/drivers/pci/hotplug/sgi_hotplug.c index a4494d78e7c2..8aebe1e9d3d6 100644 --- a/drivers/pci/hotplug/sgi_hotplug.c +++ b/drivers/pci/hotplug/sgi_hotplug.c | |||
@@ -90,11 +90,10 @@ static struct hotplug_slot_ops sn_hotplug_slot_ops = { | |||
90 | 90 | ||
91 | static DEFINE_MUTEX(sn_hotplug_mutex); | 91 | static DEFINE_MUTEX(sn_hotplug_mutex); |
92 | 92 | ||
93 | static ssize_t path_show (struct hotplug_slot *bss_hotplug_slot, | 93 | static ssize_t path_show(struct pci_slot *pci_slot, char *buf) |
94 | char *buf) | ||
95 | { | 94 | { |
96 | int retval = -ENOENT; | 95 | int retval = -ENOENT; |
97 | struct slot *slot = bss_hotplug_slot->private; | 96 | struct slot *slot = pci_slot->hotplug->private; |
98 | 97 | ||
99 | if (!slot) | 98 | if (!slot) |
100 | return retval; | 99 | return retval; |
@@ -103,7 +102,7 @@ static ssize_t path_show (struct hotplug_slot *bss_hotplug_slot, | |||
103 | return retval; | 102 | return retval; |
104 | } | 103 | } |
105 | 104 | ||
106 | static struct hotplug_slot_attribute sn_slot_path_attr = __ATTR_RO(path); | 105 | static struct pci_slot_attribute sn_slot_path_attr = __ATTR_RO(path); |
107 | 106 | ||
108 | static int sn_pci_slot_valid(struct pci_bus *pci_bus, int device) | 107 | static int sn_pci_slot_valid(struct pci_bus *pci_bus, int device) |
109 | { | 108 | { |
diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c index ebc9b8dca881..2314ad7ee5fe 100644 --- a/drivers/pci/intel-iommu.c +++ b/drivers/pci/intel-iommu.c | |||
@@ -1505,7 +1505,6 @@ static int domain_context_mapping_one(struct dmar_domain *domain, int segment, | |||
1505 | } | 1505 | } |
1506 | 1506 | ||
1507 | set_bit(num, iommu->domain_ids); | 1507 | set_bit(num, iommu->domain_ids); |
1508 | set_bit(iommu->seq_id, &domain->iommu_bmp); | ||
1509 | iommu->domains[num] = domain; | 1508 | iommu->domains[num] = domain; |
1510 | id = num; | 1509 | id = num; |
1511 | } | 1510 | } |
@@ -1648,6 +1647,14 @@ static int domain_context_mapped(struct pci_dev *pdev) | |||
1648 | tmp->devfn); | 1647 | tmp->devfn); |
1649 | } | 1648 | } |
1650 | 1649 | ||
1650 | /* Returns a number of VTD pages, but aligned to MM page size */ | ||
1651 | static inline unsigned long aligned_nrpages(unsigned long host_addr, | ||
1652 | size_t size) | ||
1653 | { | ||
1654 | host_addr &= ~PAGE_MASK; | ||
1655 | return PAGE_ALIGN(host_addr + size) >> VTD_PAGE_SHIFT; | ||
1656 | } | ||
1657 | |||
1651 | static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn, | 1658 | static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn, |
1652 | struct scatterlist *sg, unsigned long phys_pfn, | 1659 | struct scatterlist *sg, unsigned long phys_pfn, |
1653 | unsigned long nr_pages, int prot) | 1660 | unsigned long nr_pages, int prot) |
@@ -1675,7 +1682,7 @@ static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn, | |||
1675 | uint64_t tmp; | 1682 | uint64_t tmp; |
1676 | 1683 | ||
1677 | if (!sg_res) { | 1684 | if (!sg_res) { |
1678 | sg_res = (sg->offset + sg->length + VTD_PAGE_SIZE - 1) >> VTD_PAGE_SHIFT; | 1685 | sg_res = aligned_nrpages(sg->offset, sg->length); |
1679 | sg->dma_address = ((dma_addr_t)iov_pfn << VTD_PAGE_SHIFT) + sg->offset; | 1686 | sg->dma_address = ((dma_addr_t)iov_pfn << VTD_PAGE_SHIFT) + sg->offset; |
1680 | sg->dma_length = sg->length; | 1687 | sg->dma_length = sg->length; |
1681 | pteval = page_to_phys(sg_page(sg)) | prot; | 1688 | pteval = page_to_phys(sg_page(sg)) | prot; |
@@ -2415,14 +2422,6 @@ error: | |||
2415 | return ret; | 2422 | return ret; |
2416 | } | 2423 | } |
2417 | 2424 | ||
2418 | /* Returns a number of VTD pages, but aligned to MM page size */ | ||
2419 | static inline unsigned long aligned_nrpages(unsigned long host_addr, | ||
2420 | size_t size) | ||
2421 | { | ||
2422 | host_addr &= ~PAGE_MASK; | ||
2423 | return PAGE_ALIGN(host_addr + size) >> VTD_PAGE_SHIFT; | ||
2424 | } | ||
2425 | |||
2426 | /* This takes a number of _MM_ pages, not VTD pages */ | 2425 | /* This takes a number of _MM_ pages, not VTD pages */ |
2427 | static struct iova *intel_alloc_iova(struct device *dev, | 2426 | static struct iova *intel_alloc_iova(struct device *dev, |
2428 | struct dmar_domain *domain, | 2427 | struct dmar_domain *domain, |
@@ -2551,6 +2550,7 @@ static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr, | |||
2551 | int prot = 0; | 2550 | int prot = 0; |
2552 | int ret; | 2551 | int ret; |
2553 | struct intel_iommu *iommu; | 2552 | struct intel_iommu *iommu; |
2553 | unsigned long paddr_pfn = paddr >> PAGE_SHIFT; | ||
2554 | 2554 | ||
2555 | BUG_ON(dir == DMA_NONE); | 2555 | BUG_ON(dir == DMA_NONE); |
2556 | 2556 | ||
@@ -2585,7 +2585,7 @@ static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr, | |||
2585 | * is not a big problem | 2585 | * is not a big problem |
2586 | */ | 2586 | */ |
2587 | ret = domain_pfn_mapping(domain, mm_to_dma_pfn(iova->pfn_lo), | 2587 | ret = domain_pfn_mapping(domain, mm_to_dma_pfn(iova->pfn_lo), |
2588 | paddr >> VTD_PAGE_SHIFT, size, prot); | 2588 | mm_to_dma_pfn(paddr_pfn), size, prot); |
2589 | if (ret) | 2589 | if (ret) |
2590 | goto error; | 2590 | goto error; |
2591 | 2591 | ||
@@ -2875,7 +2875,7 @@ static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int ne | |||
2875 | 2875 | ||
2876 | start_vpfn = mm_to_dma_pfn(iova->pfn_lo); | 2876 | start_vpfn = mm_to_dma_pfn(iova->pfn_lo); |
2877 | 2877 | ||
2878 | ret = domain_sg_mapping(domain, start_vpfn, sglist, mm_to_dma_pfn(size), prot); | 2878 | ret = domain_sg_mapping(domain, start_vpfn, sglist, size, prot); |
2879 | if (unlikely(ret)) { | 2879 | if (unlikely(ret)) { |
2880 | /* clear the page */ | 2880 | /* clear the page */ |
2881 | dma_pte_clear_range(domain, start_vpfn, | 2881 | dma_pte_clear_range(domain, start_vpfn, |
@@ -3408,6 +3408,7 @@ static int md_domain_init(struct dmar_domain *domain, int guest_width) | |||
3408 | 3408 | ||
3409 | domain->iommu_count = 0; | 3409 | domain->iommu_count = 0; |
3410 | domain->iommu_coherency = 0; | 3410 | domain->iommu_coherency = 0; |
3411 | domain->iommu_snooping = 0; | ||
3411 | domain->max_addr = 0; | 3412 | domain->max_addr = 0; |
3412 | 3413 | ||
3413 | /* always allocate the top pgd */ | 3414 | /* always allocate the top pgd */ |
diff --git a/drivers/pci/setup-res.c b/drivers/pci/setup-res.c index b711fb7181e2..1898c7b47907 100644 --- a/drivers/pci/setup-res.c +++ b/drivers/pci/setup-res.c | |||
@@ -100,16 +100,16 @@ int pci_claim_resource(struct pci_dev *dev, int resource) | |||
100 | { | 100 | { |
101 | struct resource *res = &dev->resource[resource]; | 101 | struct resource *res = &dev->resource[resource]; |
102 | struct resource *root; | 102 | struct resource *root; |
103 | char *dtype = resource < PCI_BRIDGE_RESOURCES ? "device" : "bridge"; | ||
104 | int err; | 103 | int err; |
105 | 104 | ||
106 | root = pci_find_parent_resource(dev, res); | 105 | root = pci_find_parent_resource(dev, res); |
107 | 106 | ||
108 | err = -EINVAL; | 107 | err = -EINVAL; |
109 | if (root != NULL) | 108 | if (root != NULL) |
110 | err = insert_resource(root, res); | 109 | err = request_resource(root, res); |
111 | 110 | ||
112 | if (err) { | 111 | if (err) { |
112 | const char *dtype = resource < PCI_BRIDGE_RESOURCES ? "device" : "bridge"; | ||
113 | dev_err(&dev->dev, "BAR %d: %s of %s %pR\n", | 113 | dev_err(&dev->dev, "BAR %d: %s of %s %pR\n", |
114 | resource, | 114 | resource, |
115 | root ? "address space collision on" : | 115 | root ? "address space collision on" : |
diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig index 46dad12f952f..77c6097ced80 100644 --- a/drivers/platform/x86/Kconfig +++ b/drivers/platform/x86/Kconfig | |||
@@ -277,31 +277,6 @@ config THINKPAD_ACPI_UNSAFE_LEDS | |||
277 | Say N here, unless you are building a kernel for your own | 277 | Say N here, unless you are building a kernel for your own |
278 | use, and need to control the important firmware LEDs. | 278 | use, and need to control the important firmware LEDs. |
279 | 279 | ||
280 | config THINKPAD_ACPI_DOCK | ||
281 | bool "Legacy Docking Station Support" | ||
282 | depends on THINKPAD_ACPI | ||
283 | depends on ACPI_DOCK=n | ||
284 | default n | ||
285 | ---help--- | ||
286 | Allows the thinkpad_acpi driver to handle docking station events. | ||
287 | This support was made obsolete by the generic ACPI docking station | ||
288 | support (CONFIG_ACPI_DOCK). It will allow locking and removing the | ||
289 | laptop from the docking station, but will not properly connect PCI | ||
290 | devices. | ||
291 | |||
292 | If you are not sure, say N here. | ||
293 | |||
294 | config THINKPAD_ACPI_BAY | ||
295 | bool "Legacy Removable Bay Support" | ||
296 | depends on THINKPAD_ACPI | ||
297 | default y | ||
298 | ---help--- | ||
299 | Allows the thinkpad_acpi driver to handle removable bays. It will | ||
300 | electrically disable the device in the bay, and also generate | ||
301 | notifications when the bay lever is ejected or inserted. | ||
302 | |||
303 | If you are not sure, say Y here. | ||
304 | |||
305 | config THINKPAD_ACPI_VIDEO | 280 | config THINKPAD_ACPI_VIDEO |
306 | bool "Video output control support" | 281 | bool "Video output control support" |
307 | depends on THINKPAD_ACPI | 282 | depends on THINKPAD_ACPI |
diff --git a/drivers/platform/x86/eeepc-laptop.c b/drivers/platform/x86/eeepc-laptop.c index ec560f16d720..222ffb892f22 100644 --- a/drivers/platform/x86/eeepc-laptop.c +++ b/drivers/platform/x86/eeepc-laptop.c | |||
@@ -143,6 +143,7 @@ struct eeepc_hotk { | |||
143 | struct rfkill *bluetooth_rfkill; | 143 | struct rfkill *bluetooth_rfkill; |
144 | struct rfkill *wwan3g_rfkill; | 144 | struct rfkill *wwan3g_rfkill; |
145 | struct hotplug_slot *hotplug_slot; | 145 | struct hotplug_slot *hotplug_slot; |
146 | struct work_struct hotplug_work; | ||
146 | }; | 147 | }; |
147 | 148 | ||
148 | /* The actual device the driver binds to */ | 149 | /* The actual device the driver binds to */ |
@@ -660,7 +661,7 @@ static int eeepc_get_adapter_status(struct hotplug_slot *hotplug_slot, | |||
660 | return 0; | 661 | return 0; |
661 | } | 662 | } |
662 | 663 | ||
663 | static void eeepc_rfkill_hotplug(void) | 664 | static void eeepc_hotplug_work(struct work_struct *work) |
664 | { | 665 | { |
665 | struct pci_dev *dev; | 666 | struct pci_dev *dev; |
666 | struct pci_bus *bus = pci_find_bus(0, 1); | 667 | struct pci_bus *bus = pci_find_bus(0, 1); |
@@ -701,7 +702,7 @@ static void eeepc_rfkill_notify(acpi_handle handle, u32 event, void *data) | |||
701 | if (event != ACPI_NOTIFY_BUS_CHECK) | 702 | if (event != ACPI_NOTIFY_BUS_CHECK) |
702 | return; | 703 | return; |
703 | 704 | ||
704 | eeepc_rfkill_hotplug(); | 705 | schedule_work(&ehotk->hotplug_work); |
705 | } | 706 | } |
706 | 707 | ||
707 | static void eeepc_hotk_notify(struct acpi_device *device, u32 event) | 708 | static void eeepc_hotk_notify(struct acpi_device *device, u32 event) |
@@ -892,7 +893,7 @@ static int eeepc_hotk_resume(struct acpi_device *device) | |||
892 | 893 | ||
893 | rfkill_set_sw_state(ehotk->wlan_rfkill, wlan != 1); | 894 | rfkill_set_sw_state(ehotk->wlan_rfkill, wlan != 1); |
894 | 895 | ||
895 | eeepc_rfkill_hotplug(); | 896 | schedule_work(&ehotk->hotplug_work); |
896 | } | 897 | } |
897 | 898 | ||
898 | if (ehotk->bluetooth_rfkill) | 899 | if (ehotk->bluetooth_rfkill) |
@@ -1093,6 +1094,8 @@ static int eeepc_rfkill_init(struct device *dev) | |||
1093 | { | 1094 | { |
1094 | int result = 0; | 1095 | int result = 0; |
1095 | 1096 | ||
1097 | INIT_WORK(&ehotk->hotplug_work, eeepc_hotplug_work); | ||
1098 | |||
1096 | eeepc_register_rfkill_notifier("\\_SB.PCI0.P0P6"); | 1099 | eeepc_register_rfkill_notifier("\\_SB.PCI0.P0P6"); |
1097 | eeepc_register_rfkill_notifier("\\_SB.PCI0.P0P7"); | 1100 | eeepc_register_rfkill_notifier("\\_SB.PCI0.P0P7"); |
1098 | 1101 | ||
diff --git a/drivers/platform/x86/hp-wmi.c b/drivers/platform/x86/hp-wmi.c index ca508564a181..a2ad53e15874 100644 --- a/drivers/platform/x86/hp-wmi.c +++ b/drivers/platform/x86/hp-wmi.c | |||
@@ -520,11 +520,13 @@ static int hp_wmi_resume_handler(struct platform_device *device) | |||
520 | * the input layer will only actually pass it on if the state | 520 | * the input layer will only actually pass it on if the state |
521 | * changed. | 521 | * changed. |
522 | */ | 522 | */ |
523 | 523 | if (hp_wmi_input_dev) { | |
524 | input_report_switch(hp_wmi_input_dev, SW_DOCK, hp_wmi_dock_state()); | 524 | input_report_switch(hp_wmi_input_dev, SW_DOCK, |
525 | input_report_switch(hp_wmi_input_dev, SW_TABLET_MODE, | 525 | hp_wmi_dock_state()); |
526 | hp_wmi_tablet_state()); | 526 | input_report_switch(hp_wmi_input_dev, SW_TABLET_MODE, |
527 | input_sync(hp_wmi_input_dev); | 527 | hp_wmi_tablet_state()); |
528 | input_sync(hp_wmi_input_dev); | ||
529 | } | ||
528 | 530 | ||
529 | return 0; | 531 | return 0; |
530 | } | 532 | } |
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c index a463fd72c495..e85600852502 100644 --- a/drivers/platform/x86/thinkpad_acpi.c +++ b/drivers/platform/x86/thinkpad_acpi.c | |||
@@ -239,12 +239,6 @@ struct ibm_init_struct { | |||
239 | }; | 239 | }; |
240 | 240 | ||
241 | static struct { | 241 | static struct { |
242 | #ifdef CONFIG_THINKPAD_ACPI_BAY | ||
243 | u32 bay_status:1; | ||
244 | u32 bay_eject:1; | ||
245 | u32 bay_status2:1; | ||
246 | u32 bay_eject2:1; | ||
247 | #endif | ||
248 | u32 bluetooth:1; | 242 | u32 bluetooth:1; |
249 | u32 hotkey:1; | 243 | u32 hotkey:1; |
250 | u32 hotkey_mask:1; | 244 | u32 hotkey_mask:1; |
@@ -589,18 +583,6 @@ static int acpi_ec_write(int i, u8 v) | |||
589 | return 1; | 583 | return 1; |
590 | } | 584 | } |
591 | 585 | ||
592 | #if defined(CONFIG_THINKPAD_ACPI_DOCK) || defined(CONFIG_THINKPAD_ACPI_BAY) | ||
593 | static int _sta(acpi_handle handle) | ||
594 | { | ||
595 | int status; | ||
596 | |||
597 | if (!handle || !acpi_evalf(handle, &status, "_STA", "d")) | ||
598 | status = 0; | ||
599 | |||
600 | return status; | ||
601 | } | ||
602 | #endif | ||
603 | |||
604 | static int issue_thinkpad_cmos_command(int cmos_cmd) | 586 | static int issue_thinkpad_cmos_command(int cmos_cmd) |
605 | { | 587 | { |
606 | if (!cmos_handle) | 588 | if (!cmos_handle) |
@@ -784,6 +766,8 @@ static int dispatch_procfs_write(struct file *file, | |||
784 | 766 | ||
785 | if (!ibm || !ibm->write) | 767 | if (!ibm || !ibm->write) |
786 | return -EINVAL; | 768 | return -EINVAL; |
769 | if (count > PAGE_SIZE - 2) | ||
770 | return -EINVAL; | ||
787 | 771 | ||
788 | kernbuf = kmalloc(count + 2, GFP_KERNEL); | 772 | kernbuf = kmalloc(count + 2, GFP_KERNEL); |
789 | if (!kernbuf) | 773 | if (!kernbuf) |
@@ -4442,293 +4426,6 @@ static struct ibm_struct light_driver_data = { | |||
4442 | }; | 4426 | }; |
4443 | 4427 | ||
4444 | /************************************************************************* | 4428 | /************************************************************************* |
4445 | * Dock subdriver | ||
4446 | */ | ||
4447 | |||
4448 | #ifdef CONFIG_THINKPAD_ACPI_DOCK | ||
4449 | |||
4450 | static void dock_notify(struct ibm_struct *ibm, u32 event); | ||
4451 | static int dock_read(char *p); | ||
4452 | static int dock_write(char *buf); | ||
4453 | |||
4454 | TPACPI_HANDLE(dock, root, "\\_SB.GDCK", /* X30, X31, X40 */ | ||
4455 | "\\_SB.PCI0.DOCK", /* 600e/x,770e,770x,A2xm/p,T20-22,X20-21 */ | ||
4456 | "\\_SB.PCI0.PCI1.DOCK", /* all others */ | ||
4457 | "\\_SB.PCI.ISA.SLCE", /* 570 */ | ||
4458 | ); /* A21e,G4x,R30,R31,R32,R40,R40e,R50e */ | ||
4459 | |||
4460 | /* don't list other alternatives as we install a notify handler on the 570 */ | ||
4461 | TPACPI_HANDLE(pci, root, "\\_SB.PCI"); /* 570 */ | ||
4462 | |||
4463 | static const struct acpi_device_id ibm_pci_device_ids[] = { | ||
4464 | {PCI_ROOT_HID_STRING, 0}, | ||
4465 | {"", 0}, | ||
4466 | }; | ||
4467 | |||
4468 | static struct tp_acpi_drv_struct ibm_dock_acpidriver[2] = { | ||
4469 | { | ||
4470 | .notify = dock_notify, | ||
4471 | .handle = &dock_handle, | ||
4472 | .type = ACPI_SYSTEM_NOTIFY, | ||
4473 | }, | ||
4474 | { | ||
4475 | /* THIS ONE MUST NEVER BE USED FOR DRIVER AUTOLOADING. | ||
4476 | * We just use it to get notifications of dock hotplug | ||
4477 | * in very old thinkpads */ | ||
4478 | .hid = ibm_pci_device_ids, | ||
4479 | .notify = dock_notify, | ||
4480 | .handle = &pci_handle, | ||
4481 | .type = ACPI_SYSTEM_NOTIFY, | ||
4482 | }, | ||
4483 | }; | ||
4484 | |||
4485 | static struct ibm_struct dock_driver_data[2] = { | ||
4486 | { | ||
4487 | .name = "dock", | ||
4488 | .read = dock_read, | ||
4489 | .write = dock_write, | ||
4490 | .acpi = &ibm_dock_acpidriver[0], | ||
4491 | }, | ||
4492 | { | ||
4493 | .name = "dock", | ||
4494 | .acpi = &ibm_dock_acpidriver[1], | ||
4495 | }, | ||
4496 | }; | ||
4497 | |||
4498 | #define dock_docked() (_sta(dock_handle) & 1) | ||
4499 | |||
4500 | static int __init dock_init(struct ibm_init_struct *iibm) | ||
4501 | { | ||
4502 | vdbg_printk(TPACPI_DBG_INIT, "initializing dock subdriver\n"); | ||
4503 | |||
4504 | TPACPI_ACPIHANDLE_INIT(dock); | ||
4505 | |||
4506 | vdbg_printk(TPACPI_DBG_INIT, "dock is %s\n", | ||
4507 | str_supported(dock_handle != NULL)); | ||
4508 | |||
4509 | return (dock_handle)? 0 : 1; | ||
4510 | } | ||
4511 | |||
4512 | static int __init dock_init2(struct ibm_init_struct *iibm) | ||
4513 | { | ||
4514 | int dock2_needed; | ||
4515 | |||
4516 | vdbg_printk(TPACPI_DBG_INIT, "initializing dock subdriver part 2\n"); | ||
4517 | |||
4518 | if (dock_driver_data[0].flags.acpi_driver_registered && | ||
4519 | dock_driver_data[0].flags.acpi_notify_installed) { | ||
4520 | TPACPI_ACPIHANDLE_INIT(pci); | ||
4521 | dock2_needed = (pci_handle != NULL); | ||
4522 | vdbg_printk(TPACPI_DBG_INIT, | ||
4523 | "dock PCI handler for the TP 570 is %s\n", | ||
4524 | str_supported(dock2_needed)); | ||
4525 | } else { | ||
4526 | vdbg_printk(TPACPI_DBG_INIT, | ||
4527 | "dock subdriver part 2 not required\n"); | ||
4528 | dock2_needed = 0; | ||
4529 | } | ||
4530 | |||
4531 | return (dock2_needed)? 0 : 1; | ||
4532 | } | ||
4533 | |||
4534 | static void dock_notify(struct ibm_struct *ibm, u32 event) | ||
4535 | { | ||
4536 | int docked = dock_docked(); | ||
4537 | int pci = ibm->acpi->hid && ibm->acpi->device && | ||
4538 | acpi_match_device_ids(ibm->acpi->device, ibm_pci_device_ids); | ||
4539 | int data; | ||
4540 | |||
4541 | if (event == 1 && !pci) /* 570 */ | ||
4542 | data = 1; /* button */ | ||
4543 | else if (event == 1 && pci) /* 570 */ | ||
4544 | data = 3; /* dock */ | ||
4545 | else if (event == 3 && docked) | ||
4546 | data = 1; /* button */ | ||
4547 | else if (event == 3 && !docked) | ||
4548 | data = 2; /* undock */ | ||
4549 | else if (event == 0 && docked) | ||
4550 | data = 3; /* dock */ | ||
4551 | else { | ||
4552 | printk(TPACPI_ERR "unknown dock event %d, status %d\n", | ||
4553 | event, _sta(dock_handle)); | ||
4554 | data = 0; /* unknown */ | ||
4555 | } | ||
4556 | acpi_bus_generate_proc_event(ibm->acpi->device, event, data); | ||
4557 | acpi_bus_generate_netlink_event(ibm->acpi->device->pnp.device_class, | ||
4558 | dev_name(&ibm->acpi->device->dev), | ||
4559 | event, data); | ||
4560 | } | ||
4561 | |||
4562 | static int dock_read(char *p) | ||
4563 | { | ||
4564 | int len = 0; | ||
4565 | int docked = dock_docked(); | ||
4566 | |||
4567 | if (!dock_handle) | ||
4568 | len += sprintf(p + len, "status:\t\tnot supported\n"); | ||
4569 | else if (!docked) | ||
4570 | len += sprintf(p + len, "status:\t\tundocked\n"); | ||
4571 | else { | ||
4572 | len += sprintf(p + len, "status:\t\tdocked\n"); | ||
4573 | len += sprintf(p + len, "commands:\tdock, undock\n"); | ||
4574 | } | ||
4575 | |||
4576 | return len; | ||
4577 | } | ||
4578 | |||
4579 | static int dock_write(char *buf) | ||
4580 | { | ||
4581 | char *cmd; | ||
4582 | |||
4583 | if (!dock_docked()) | ||
4584 | return -ENODEV; | ||
4585 | |||
4586 | while ((cmd = next_cmd(&buf))) { | ||
4587 | if (strlencmp(cmd, "undock") == 0) { | ||
4588 | if (!acpi_evalf(dock_handle, NULL, "_DCK", "vd", 0) || | ||
4589 | !acpi_evalf(dock_handle, NULL, "_EJ0", "vd", 1)) | ||
4590 | return -EIO; | ||
4591 | } else if (strlencmp(cmd, "dock") == 0) { | ||
4592 | if (!acpi_evalf(dock_handle, NULL, "_DCK", "vd", 1)) | ||
4593 | return -EIO; | ||
4594 | } else | ||
4595 | return -EINVAL; | ||
4596 | } | ||
4597 | |||
4598 | return 0; | ||
4599 | } | ||
4600 | |||
4601 | #endif /* CONFIG_THINKPAD_ACPI_DOCK */ | ||
4602 | |||
4603 | /************************************************************************* | ||
4604 | * Bay subdriver | ||
4605 | */ | ||
4606 | |||
4607 | #ifdef CONFIG_THINKPAD_ACPI_BAY | ||
4608 | |||
4609 | TPACPI_HANDLE(bay, root, "\\_SB.PCI.IDE.SECN.MAST", /* 570 */ | ||
4610 | "\\_SB.PCI0.IDE0.IDES.IDSM", /* 600e/x, 770e, 770x */ | ||
4611 | "\\_SB.PCI0.SATA.SCND.MSTR", /* T60, X60, Z60 */ | ||
4612 | "\\_SB.PCI0.IDE0.SCND.MSTR", /* all others */ | ||
4613 | ); /* A21e, R30, R31 */ | ||
4614 | TPACPI_HANDLE(bay_ej, bay, "_EJ3", /* 600e/x, A2xm/p, A3x */ | ||
4615 | "_EJ0", /* all others */ | ||
4616 | ); /* 570,A21e,G4x,R30,R31,R32,R40e,R50e */ | ||
4617 | TPACPI_HANDLE(bay2, root, "\\_SB.PCI0.IDE0.PRIM.SLAV", /* A3x, R32 */ | ||
4618 | "\\_SB.PCI0.IDE0.IDEP.IDPS", /* 600e/x, 770e, 770x */ | ||
4619 | ); /* all others */ | ||
4620 | TPACPI_HANDLE(bay2_ej, bay2, "_EJ3", /* 600e/x, 770e, A3x */ | ||
4621 | "_EJ0", /* 770x */ | ||
4622 | ); /* all others */ | ||
4623 | |||
4624 | static int __init bay_init(struct ibm_init_struct *iibm) | ||
4625 | { | ||
4626 | vdbg_printk(TPACPI_DBG_INIT, "initializing bay subdriver\n"); | ||
4627 | |||
4628 | TPACPI_ACPIHANDLE_INIT(bay); | ||
4629 | if (bay_handle) | ||
4630 | TPACPI_ACPIHANDLE_INIT(bay_ej); | ||
4631 | TPACPI_ACPIHANDLE_INIT(bay2); | ||
4632 | if (bay2_handle) | ||
4633 | TPACPI_ACPIHANDLE_INIT(bay2_ej); | ||
4634 | |||
4635 | tp_features.bay_status = bay_handle && | ||
4636 | acpi_evalf(bay_handle, NULL, "_STA", "qv"); | ||
4637 | tp_features.bay_status2 = bay2_handle && | ||
4638 | acpi_evalf(bay2_handle, NULL, "_STA", "qv"); | ||
4639 | |||
4640 | tp_features.bay_eject = bay_handle && bay_ej_handle && | ||
4641 | (strlencmp(bay_ej_path, "_EJ0") == 0 || experimental); | ||
4642 | tp_features.bay_eject2 = bay2_handle && bay2_ej_handle && | ||
4643 | (strlencmp(bay2_ej_path, "_EJ0") == 0 || experimental); | ||
4644 | |||
4645 | vdbg_printk(TPACPI_DBG_INIT, | ||
4646 | "bay 1: status %s, eject %s; bay 2: status %s, eject %s\n", | ||
4647 | str_supported(tp_features.bay_status), | ||
4648 | str_supported(tp_features.bay_eject), | ||
4649 | str_supported(tp_features.bay_status2), | ||
4650 | str_supported(tp_features.bay_eject2)); | ||
4651 | |||
4652 | return (tp_features.bay_status || tp_features.bay_eject || | ||
4653 | tp_features.bay_status2 || tp_features.bay_eject2)? 0 : 1; | ||
4654 | } | ||
4655 | |||
4656 | static void bay_notify(struct ibm_struct *ibm, u32 event) | ||
4657 | { | ||
4658 | acpi_bus_generate_proc_event(ibm->acpi->device, event, 0); | ||
4659 | acpi_bus_generate_netlink_event(ibm->acpi->device->pnp.device_class, | ||
4660 | dev_name(&ibm->acpi->device->dev), | ||
4661 | event, 0); | ||
4662 | } | ||
4663 | |||
4664 | #define bay_occupied(b) (_sta(b##_handle) & 1) | ||
4665 | |||
4666 | static int bay_read(char *p) | ||
4667 | { | ||
4668 | int len = 0; | ||
4669 | int occupied = bay_occupied(bay); | ||
4670 | int occupied2 = bay_occupied(bay2); | ||
4671 | int eject, eject2; | ||
4672 | |||
4673 | len += sprintf(p + len, "status:\t\t%s\n", | ||
4674 | tp_features.bay_status ? | ||
4675 | (occupied ? "occupied" : "unoccupied") : | ||
4676 | "not supported"); | ||
4677 | if (tp_features.bay_status2) | ||
4678 | len += sprintf(p + len, "status2:\t%s\n", occupied2 ? | ||
4679 | "occupied" : "unoccupied"); | ||
4680 | |||
4681 | eject = tp_features.bay_eject && occupied; | ||
4682 | eject2 = tp_features.bay_eject2 && occupied2; | ||
4683 | |||
4684 | if (eject && eject2) | ||
4685 | len += sprintf(p + len, "commands:\teject, eject2\n"); | ||
4686 | else if (eject) | ||
4687 | len += sprintf(p + len, "commands:\teject\n"); | ||
4688 | else if (eject2) | ||
4689 | len += sprintf(p + len, "commands:\teject2\n"); | ||
4690 | |||
4691 | return len; | ||
4692 | } | ||
4693 | |||
4694 | static int bay_write(char *buf) | ||
4695 | { | ||
4696 | char *cmd; | ||
4697 | |||
4698 | if (!tp_features.bay_eject && !tp_features.bay_eject2) | ||
4699 | return -ENODEV; | ||
4700 | |||
4701 | while ((cmd = next_cmd(&buf))) { | ||
4702 | if (tp_features.bay_eject && strlencmp(cmd, "eject") == 0) { | ||
4703 | if (!acpi_evalf(bay_ej_handle, NULL, NULL, "vd", 1)) | ||
4704 | return -EIO; | ||
4705 | } else if (tp_features.bay_eject2 && | ||
4706 | strlencmp(cmd, "eject2") == 0) { | ||
4707 | if (!acpi_evalf(bay2_ej_handle, NULL, NULL, "vd", 1)) | ||
4708 | return -EIO; | ||
4709 | } else | ||
4710 | return -EINVAL; | ||
4711 | } | ||
4712 | |||
4713 | return 0; | ||
4714 | } | ||
4715 | |||
4716 | static struct tp_acpi_drv_struct ibm_bay_acpidriver = { | ||
4717 | .notify = bay_notify, | ||
4718 | .handle = &bay_handle, | ||
4719 | .type = ACPI_SYSTEM_NOTIFY, | ||
4720 | }; | ||
4721 | |||
4722 | static struct ibm_struct bay_driver_data = { | ||
4723 | .name = "bay", | ||
4724 | .read = bay_read, | ||
4725 | .write = bay_write, | ||
4726 | .acpi = &ibm_bay_acpidriver, | ||
4727 | }; | ||
4728 | |||
4729 | #endif /* CONFIG_THINKPAD_ACPI_BAY */ | ||
4730 | |||
4731 | /************************************************************************* | ||
4732 | * CMOS subdriver | 4429 | * CMOS subdriver |
4733 | */ | 4430 | */ |
4734 | 4431 | ||
@@ -5945,14 +5642,48 @@ static struct backlight_ops ibm_backlight_data = { | |||
5945 | 5642 | ||
5946 | /* --------------------------------------------------------------------- */ | 5643 | /* --------------------------------------------------------------------- */ |
5947 | 5644 | ||
5645 | /* | ||
5646 | * These are only useful for models that have only one possibility | ||
5647 | * of GPU. If the BIOS model handles both ATI and Intel, don't use | ||
5648 | * these quirks. | ||
5649 | */ | ||
5650 | #define TPACPI_BRGHT_Q_NOEC 0x0001 /* Must NOT use EC HBRV */ | ||
5651 | #define TPACPI_BRGHT_Q_EC 0x0002 /* Should or must use EC HBRV */ | ||
5652 | #define TPACPI_BRGHT_Q_ASK 0x8000 /* Ask for user report */ | ||
5653 | |||
5654 | static const struct tpacpi_quirk brightness_quirk_table[] __initconst = { | ||
5655 | /* Models with ATI GPUs known to require ECNVRAM mode */ | ||
5656 | TPACPI_Q_IBM('1', 'Y', TPACPI_BRGHT_Q_EC), /* T43/p ATI */ | ||
5657 | |||
5658 | /* Models with ATI GPUs (waiting confirmation) */ | ||
5659 | TPACPI_Q_IBM('1', 'R', TPACPI_BRGHT_Q_ASK|TPACPI_BRGHT_Q_EC), | ||
5660 | TPACPI_Q_IBM('1', 'Q', TPACPI_BRGHT_Q_ASK|TPACPI_BRGHT_Q_EC), | ||
5661 | TPACPI_Q_IBM('7', '6', TPACPI_BRGHT_Q_ASK|TPACPI_BRGHT_Q_EC), | ||
5662 | TPACPI_Q_IBM('7', '8', TPACPI_BRGHT_Q_ASK|TPACPI_BRGHT_Q_EC), | ||
5663 | |||
5664 | /* Models with Intel Extreme Graphics 2 (waiting confirmation) */ | ||
5665 | TPACPI_Q_IBM('1', 'V', TPACPI_BRGHT_Q_ASK|TPACPI_BRGHT_Q_NOEC), | ||
5666 | TPACPI_Q_IBM('1', 'W', TPACPI_BRGHT_Q_ASK|TPACPI_BRGHT_Q_NOEC), | ||
5667 | TPACPI_Q_IBM('1', 'U', TPACPI_BRGHT_Q_ASK|TPACPI_BRGHT_Q_NOEC), | ||
5668 | |||
5669 | /* Models with Intel GMA900 */ | ||
5670 | TPACPI_Q_IBM('7', '0', TPACPI_BRGHT_Q_NOEC), /* T43, R52 */ | ||
5671 | TPACPI_Q_IBM('7', '4', TPACPI_BRGHT_Q_NOEC), /* X41 */ | ||
5672 | TPACPI_Q_IBM('7', '5', TPACPI_BRGHT_Q_NOEC), /* X41 Tablet */ | ||
5673 | }; | ||
5674 | |||
5948 | static int __init brightness_init(struct ibm_init_struct *iibm) | 5675 | static int __init brightness_init(struct ibm_init_struct *iibm) |
5949 | { | 5676 | { |
5950 | int b; | 5677 | int b; |
5678 | unsigned long quirks; | ||
5951 | 5679 | ||
5952 | vdbg_printk(TPACPI_DBG_INIT, "initializing brightness subdriver\n"); | 5680 | vdbg_printk(TPACPI_DBG_INIT, "initializing brightness subdriver\n"); |
5953 | 5681 | ||
5954 | mutex_init(&brightness_mutex); | 5682 | mutex_init(&brightness_mutex); |
5955 | 5683 | ||
5684 | quirks = tpacpi_check_quirks(brightness_quirk_table, | ||
5685 | ARRAY_SIZE(brightness_quirk_table)); | ||
5686 | |||
5956 | /* | 5687 | /* |
5957 | * We always attempt to detect acpi support, so as to switch | 5688 | * We always attempt to detect acpi support, so as to switch |
5958 | * Lenovo Vista BIOS to ACPI brightness mode even if we are not | 5689 | * Lenovo Vista BIOS to ACPI brightness mode even if we are not |
@@ -6009,23 +5740,13 @@ static int __init brightness_init(struct ibm_init_struct *iibm) | |||
6009 | /* TPACPI_BRGHT_MODE_AUTO not implemented yet, just use default */ | 5740 | /* TPACPI_BRGHT_MODE_AUTO not implemented yet, just use default */ |
6010 | if (brightness_mode == TPACPI_BRGHT_MODE_AUTO || | 5741 | if (brightness_mode == TPACPI_BRGHT_MODE_AUTO || |
6011 | brightness_mode == TPACPI_BRGHT_MODE_MAX) { | 5742 | brightness_mode == TPACPI_BRGHT_MODE_MAX) { |
6012 | if (thinkpad_id.vendor == PCI_VENDOR_ID_IBM) { | 5743 | if (quirks & TPACPI_BRGHT_Q_EC) |
6013 | /* | 5744 | brightness_mode = TPACPI_BRGHT_MODE_ECNVRAM; |
6014 | * IBM models that define HBRV probably have | 5745 | else |
6015 | * EC-based backlight level control | ||
6016 | */ | ||
6017 | if (acpi_evalf(ec_handle, NULL, "HBRV", "qd")) | ||
6018 | /* T40-T43, R50-R52, R50e, R51e, X31-X41 */ | ||
6019 | brightness_mode = TPACPI_BRGHT_MODE_ECNVRAM; | ||
6020 | else | ||
6021 | /* all other IBM ThinkPads */ | ||
6022 | brightness_mode = TPACPI_BRGHT_MODE_UCMS_STEP; | ||
6023 | } else | ||
6024 | /* All Lenovo ThinkPads */ | ||
6025 | brightness_mode = TPACPI_BRGHT_MODE_UCMS_STEP; | 5746 | brightness_mode = TPACPI_BRGHT_MODE_UCMS_STEP; |
6026 | 5747 | ||
6027 | dbg_printk(TPACPI_DBG_BRGHT, | 5748 | dbg_printk(TPACPI_DBG_BRGHT, |
6028 | "selected brightness_mode=%d\n", | 5749 | "driver auto-selected brightness_mode=%d\n", |
6029 | brightness_mode); | 5750 | brightness_mode); |
6030 | } | 5751 | } |
6031 | 5752 | ||
@@ -6052,6 +5773,15 @@ static int __init brightness_init(struct ibm_init_struct *iibm) | |||
6052 | vdbg_printk(TPACPI_DBG_INIT | TPACPI_DBG_BRGHT, | 5773 | vdbg_printk(TPACPI_DBG_INIT | TPACPI_DBG_BRGHT, |
6053 | "brightness is supported\n"); | 5774 | "brightness is supported\n"); |
6054 | 5775 | ||
5776 | if (quirks & TPACPI_BRGHT_Q_ASK) { | ||
5777 | printk(TPACPI_NOTICE | ||
5778 | "brightness: will use unverified default: " | ||
5779 | "brightness_mode=%d\n", brightness_mode); | ||
5780 | printk(TPACPI_NOTICE | ||
5781 | "brightness: please report to %s whether it works well " | ||
5782 | "or not on your ThinkPad\n", TPACPI_MAIL); | ||
5783 | } | ||
5784 | |||
6055 | ibm_backlight_device->props.max_brightness = | 5785 | ibm_backlight_device->props.max_brightness = |
6056 | (tp_features.bright_16levels)? 15 : 7; | 5786 | (tp_features.bright_16levels)? 15 : 7; |
6057 | ibm_backlight_device->props.brightness = b & TP_EC_BACKLIGHT_LVLMSK; | 5787 | ibm_backlight_device->props.brightness = b & TP_EC_BACKLIGHT_LVLMSK; |
@@ -7854,22 +7584,6 @@ static struct ibm_init_struct ibms_init[] __initdata = { | |||
7854 | .init = light_init, | 7584 | .init = light_init, |
7855 | .data = &light_driver_data, | 7585 | .data = &light_driver_data, |
7856 | }, | 7586 | }, |
7857 | #ifdef CONFIG_THINKPAD_ACPI_DOCK | ||
7858 | { | ||
7859 | .init = dock_init, | ||
7860 | .data = &dock_driver_data[0], | ||
7861 | }, | ||
7862 | { | ||
7863 | .init = dock_init2, | ||
7864 | .data = &dock_driver_data[1], | ||
7865 | }, | ||
7866 | #endif | ||
7867 | #ifdef CONFIG_THINKPAD_ACPI_BAY | ||
7868 | { | ||
7869 | .init = bay_init, | ||
7870 | .data = &bay_driver_data, | ||
7871 | }, | ||
7872 | #endif | ||
7873 | { | 7587 | { |
7874 | .init = cmos_init, | 7588 | .init = cmos_init, |
7875 | .data = &cmos_driver_data, | 7589 | .data = &cmos_driver_data, |
@@ -7968,12 +7682,6 @@ TPACPI_PARAM(hotkey); | |||
7968 | TPACPI_PARAM(bluetooth); | 7682 | TPACPI_PARAM(bluetooth); |
7969 | TPACPI_PARAM(video); | 7683 | TPACPI_PARAM(video); |
7970 | TPACPI_PARAM(light); | 7684 | TPACPI_PARAM(light); |
7971 | #ifdef CONFIG_THINKPAD_ACPI_DOCK | ||
7972 | TPACPI_PARAM(dock); | ||
7973 | #endif | ||
7974 | #ifdef CONFIG_THINKPAD_ACPI_BAY | ||
7975 | TPACPI_PARAM(bay); | ||
7976 | #endif /* CONFIG_THINKPAD_ACPI_BAY */ | ||
7977 | TPACPI_PARAM(cmos); | 7685 | TPACPI_PARAM(cmos); |
7978 | TPACPI_PARAM(led); | 7686 | TPACPI_PARAM(led); |
7979 | TPACPI_PARAM(beep); | 7687 | TPACPI_PARAM(beep); |
diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c index 8030e25152fb..c75d6f35cb5f 100644 --- a/drivers/s390/scsi/zfcp_erp.c +++ b/drivers/s390/scsi/zfcp_erp.c | |||
@@ -553,40 +553,35 @@ static void _zfcp_erp_unit_reopen_all(struct zfcp_port *port, int clear, | |||
553 | _zfcp_erp_unit_reopen(unit, clear, id, ref); | 553 | _zfcp_erp_unit_reopen(unit, clear, id, ref); |
554 | } | 554 | } |
555 | 555 | ||
556 | static void zfcp_erp_strategy_followup_actions(struct zfcp_erp_action *act) | 556 | static void zfcp_erp_strategy_followup_failed(struct zfcp_erp_action *act) |
557 | { | 557 | { |
558 | struct zfcp_adapter *adapter = act->adapter; | ||
559 | struct zfcp_port *port = act->port; | ||
560 | struct zfcp_unit *unit = act->unit; | ||
561 | u32 status = act->status; | ||
562 | |||
563 | /* initiate follow-up actions depending on success of finished action */ | ||
564 | switch (act->action) { | 558 | switch (act->action) { |
565 | |||
566 | case ZFCP_ERP_ACTION_REOPEN_ADAPTER: | 559 | case ZFCP_ERP_ACTION_REOPEN_ADAPTER: |
567 | if (status == ZFCP_ERP_SUCCEEDED) | 560 | _zfcp_erp_adapter_reopen(act->adapter, 0, "ersff_1", NULL); |
568 | _zfcp_erp_port_reopen_all(adapter, 0, "ersfa_1", NULL); | ||
569 | else | ||
570 | _zfcp_erp_adapter_reopen(adapter, 0, "ersfa_2", NULL); | ||
571 | break; | 561 | break; |
572 | |||
573 | case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED: | 562 | case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED: |
574 | if (status == ZFCP_ERP_SUCCEEDED) | 563 | _zfcp_erp_port_forced_reopen(act->port, 0, "ersff_2", NULL); |
575 | _zfcp_erp_port_reopen(port, 0, "ersfa_3", NULL); | ||
576 | else | ||
577 | _zfcp_erp_adapter_reopen(adapter, 0, "ersfa_4", NULL); | ||
578 | break; | 564 | break; |
579 | |||
580 | case ZFCP_ERP_ACTION_REOPEN_PORT: | 565 | case ZFCP_ERP_ACTION_REOPEN_PORT: |
581 | if (status == ZFCP_ERP_SUCCEEDED) | 566 | _zfcp_erp_port_reopen(act->port, 0, "ersff_3", NULL); |
582 | _zfcp_erp_unit_reopen_all(port, 0, "ersfa_5", NULL); | ||
583 | else | ||
584 | _zfcp_erp_port_forced_reopen(port, 0, "ersfa_6", NULL); | ||
585 | break; | 567 | break; |
586 | |||
587 | case ZFCP_ERP_ACTION_REOPEN_UNIT: | 568 | case ZFCP_ERP_ACTION_REOPEN_UNIT: |
588 | if (status != ZFCP_ERP_SUCCEEDED) | 569 | _zfcp_erp_unit_reopen(act->unit, 0, "ersff_4", NULL); |
589 | _zfcp_erp_port_reopen(unit->port, 0, "ersfa_7", NULL); | 570 | break; |
571 | } | ||
572 | } | ||
573 | |||
574 | static void zfcp_erp_strategy_followup_success(struct zfcp_erp_action *act) | ||
575 | { | ||
576 | switch (act->action) { | ||
577 | case ZFCP_ERP_ACTION_REOPEN_ADAPTER: | ||
578 | _zfcp_erp_port_reopen_all(act->adapter, 0, "ersfs_1", NULL); | ||
579 | break; | ||
580 | case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED: | ||
581 | _zfcp_erp_port_reopen(act->port, 0, "ersfs_2", NULL); | ||
582 | break; | ||
583 | case ZFCP_ERP_ACTION_REOPEN_PORT: | ||
584 | _zfcp_erp_unit_reopen_all(act->port, 0, "ersfs_3", NULL); | ||
590 | break; | 585 | break; |
591 | } | 586 | } |
592 | } | 587 | } |
@@ -801,7 +796,7 @@ static int zfcp_erp_port_forced_strategy(struct zfcp_erp_action *erp_action) | |||
801 | return ZFCP_ERP_FAILED; | 796 | return ZFCP_ERP_FAILED; |
802 | 797 | ||
803 | case ZFCP_ERP_STEP_PHYS_PORT_CLOSING: | 798 | case ZFCP_ERP_STEP_PHYS_PORT_CLOSING: |
804 | if (status & ZFCP_STATUS_PORT_PHYS_OPEN) | 799 | if (!(status & ZFCP_STATUS_PORT_PHYS_OPEN)) |
805 | return ZFCP_ERP_SUCCEEDED; | 800 | return ZFCP_ERP_SUCCEEDED; |
806 | } | 801 | } |
807 | return ZFCP_ERP_FAILED; | 802 | return ZFCP_ERP_FAILED; |
@@ -853,11 +848,17 @@ void zfcp_erp_port_strategy_open_lookup(struct work_struct *work) | |||
853 | gid_pn_work); | 848 | gid_pn_work); |
854 | 849 | ||
855 | retval = zfcp_fc_ns_gid_pn(&port->erp_action); | 850 | retval = zfcp_fc_ns_gid_pn(&port->erp_action); |
856 | if (retval == -ENOMEM) | 851 | if (!retval) { |
857 | zfcp_erp_notify(&port->erp_action, ZFCP_ERP_NOMEM); | 852 | port->erp_action.step = ZFCP_ERP_STEP_NAMESERVER_LOOKUP; |
858 | port->erp_action.step = ZFCP_ERP_STEP_NAMESERVER_LOOKUP; | 853 | goto out; |
859 | if (retval) | 854 | } |
860 | zfcp_erp_notify(&port->erp_action, ZFCP_ERP_FAILED); | 855 | if (retval == -ENOMEM) { |
856 | zfcp_erp_notify(&port->erp_action, ZFCP_STATUS_ERP_LOWMEM); | ||
857 | goto out; | ||
858 | } | ||
859 | /* all other error condtions */ | ||
860 | zfcp_erp_notify(&port->erp_action, 0); | ||
861 | out: | ||
861 | zfcp_port_put(port); | 862 | zfcp_port_put(port); |
862 | } | 863 | } |
863 | 864 | ||
@@ -1289,7 +1290,10 @@ static int zfcp_erp_strategy(struct zfcp_erp_action *erp_action) | |||
1289 | retval = zfcp_erp_strategy_statechange(erp_action, retval); | 1290 | retval = zfcp_erp_strategy_statechange(erp_action, retval); |
1290 | if (retval == ZFCP_ERP_EXIT) | 1291 | if (retval == ZFCP_ERP_EXIT) |
1291 | goto unlock; | 1292 | goto unlock; |
1292 | zfcp_erp_strategy_followup_actions(erp_action); | 1293 | if (retval == ZFCP_ERP_SUCCEEDED) |
1294 | zfcp_erp_strategy_followup_success(erp_action); | ||
1295 | if (retval == ZFCP_ERP_FAILED) | ||
1296 | zfcp_erp_strategy_followup_failed(erp_action); | ||
1293 | 1297 | ||
1294 | unlock: | 1298 | unlock: |
1295 | write_unlock(&adapter->erp_lock); | 1299 | write_unlock(&adapter->erp_lock); |
diff --git a/drivers/s390/scsi/zfcp_fc.c b/drivers/s390/scsi/zfcp_fc.c index 2f0705d76b72..47daebfa7e59 100644 --- a/drivers/s390/scsi/zfcp_fc.c +++ b/drivers/s390/scsi/zfcp_fc.c | |||
@@ -79,11 +79,9 @@ static int zfcp_wka_port_get(struct zfcp_wka_port *wka_port) | |||
79 | 79 | ||
80 | mutex_unlock(&wka_port->mutex); | 80 | mutex_unlock(&wka_port->mutex); |
81 | 81 | ||
82 | wait_event_timeout( | 82 | wait_event(wka_port->completion_wq, |
83 | wka_port->completion_wq, | 83 | wka_port->status == ZFCP_WKA_PORT_ONLINE || |
84 | wka_port->status == ZFCP_WKA_PORT_ONLINE || | 84 | wka_port->status == ZFCP_WKA_PORT_OFFLINE); |
85 | wka_port->status == ZFCP_WKA_PORT_OFFLINE, | ||
86 | HZ >> 1); | ||
87 | 85 | ||
88 | if (wka_port->status == ZFCP_WKA_PORT_ONLINE) { | 86 | if (wka_port->status == ZFCP_WKA_PORT_ONLINE) { |
89 | atomic_inc(&wka_port->refcount); | 87 | atomic_inc(&wka_port->refcount); |
diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c index c57658f3d34f..47795fbf081f 100644 --- a/drivers/s390/scsi/zfcp_fsf.c +++ b/drivers/s390/scsi/zfcp_fsf.c | |||
@@ -670,8 +670,11 @@ static int zfcp_fsf_req_sbal_get(struct zfcp_adapter *adapter) | |||
670 | zfcp_fsf_sbal_check(adapter), 5 * HZ); | 670 | zfcp_fsf_sbal_check(adapter), 5 * HZ); |
671 | if (ret > 0) | 671 | if (ret > 0) |
672 | return 0; | 672 | return 0; |
673 | if (!ret) | 673 | if (!ret) { |
674 | atomic_inc(&adapter->qdio_outb_full); | 674 | atomic_inc(&adapter->qdio_outb_full); |
675 | /* assume hanging outbound queue, try queue recovery */ | ||
676 | zfcp_erp_adapter_reopen(adapter, 0, "fsrsg_1", NULL); | ||
677 | } | ||
675 | 678 | ||
676 | spin_lock_bh(&adapter->req_q_lock); | 679 | spin_lock_bh(&adapter->req_q_lock); |
677 | return -EIO; | 680 | return -EIO; |
@@ -722,7 +725,7 @@ static struct zfcp_fsf_req *zfcp_fsf_req_create(struct zfcp_adapter *adapter, | |||
722 | req = zfcp_fsf_alloc_qtcb(pool); | 725 | req = zfcp_fsf_alloc_qtcb(pool); |
723 | 726 | ||
724 | if (unlikely(!req)) | 727 | if (unlikely(!req)) |
725 | return ERR_PTR(-EIO); | 728 | return ERR_PTR(-ENOMEM); |
726 | 729 | ||
727 | if (adapter->req_no == 0) | 730 | if (adapter->req_no == 0) |
728 | adapter->req_no++; | 731 | adapter->req_no++; |
@@ -1010,6 +1013,23 @@ skip_fsfstatus: | |||
1010 | send_ct->handler(send_ct->handler_data); | 1013 | send_ct->handler(send_ct->handler_data); |
1011 | } | 1014 | } |
1012 | 1015 | ||
1016 | static void zfcp_fsf_setup_ct_els_unchained(struct qdio_buffer_element *sbale, | ||
1017 | struct scatterlist *sg_req, | ||
1018 | struct scatterlist *sg_resp) | ||
1019 | { | ||
1020 | sbale[0].flags |= SBAL_FLAGS0_TYPE_WRITE_READ; | ||
1021 | sbale[2].addr = sg_virt(sg_req); | ||
1022 | sbale[2].length = sg_req->length; | ||
1023 | sbale[3].addr = sg_virt(sg_resp); | ||
1024 | sbale[3].length = sg_resp->length; | ||
1025 | sbale[3].flags |= SBAL_FLAGS_LAST_ENTRY; | ||
1026 | } | ||
1027 | |||
1028 | static int zfcp_fsf_one_sbal(struct scatterlist *sg) | ||
1029 | { | ||
1030 | return sg_is_last(sg) && sg->length <= PAGE_SIZE; | ||
1031 | } | ||
1032 | |||
1013 | static int zfcp_fsf_setup_ct_els_sbals(struct zfcp_fsf_req *req, | 1033 | static int zfcp_fsf_setup_ct_els_sbals(struct zfcp_fsf_req *req, |
1014 | struct scatterlist *sg_req, | 1034 | struct scatterlist *sg_req, |
1015 | struct scatterlist *sg_resp, | 1035 | struct scatterlist *sg_resp, |
@@ -1020,30 +1040,30 @@ static int zfcp_fsf_setup_ct_els_sbals(struct zfcp_fsf_req *req, | |||
1020 | int bytes; | 1040 | int bytes; |
1021 | 1041 | ||
1022 | if (!(feat & FSF_FEATURE_ELS_CT_CHAINED_SBALS)) { | 1042 | if (!(feat & FSF_FEATURE_ELS_CT_CHAINED_SBALS)) { |
1023 | if (sg_req->length > PAGE_SIZE || sg_resp->length > PAGE_SIZE || | 1043 | if (!zfcp_fsf_one_sbal(sg_req) || !zfcp_fsf_one_sbal(sg_resp)) |
1024 | !sg_is_last(sg_req) || !sg_is_last(sg_resp)) | ||
1025 | return -EOPNOTSUPP; | 1044 | return -EOPNOTSUPP; |
1026 | 1045 | ||
1027 | sbale[0].flags |= SBAL_FLAGS0_TYPE_WRITE_READ; | 1046 | zfcp_fsf_setup_ct_els_unchained(sbale, sg_req, sg_resp); |
1028 | sbale[2].addr = sg_virt(sg_req); | 1047 | return 0; |
1029 | sbale[2].length = sg_req->length; | 1048 | } |
1030 | sbale[3].addr = sg_virt(sg_resp); | 1049 | |
1031 | sbale[3].length = sg_resp->length; | 1050 | /* use single, unchained SBAL if it can hold the request */ |
1032 | sbale[3].flags |= SBAL_FLAGS_LAST_ENTRY; | 1051 | if (zfcp_fsf_one_sbal(sg_req) && zfcp_fsf_one_sbal(sg_resp)) { |
1052 | zfcp_fsf_setup_ct_els_unchained(sbale, sg_req, sg_resp); | ||
1033 | return 0; | 1053 | return 0; |
1034 | } | 1054 | } |
1035 | 1055 | ||
1036 | bytes = zfcp_qdio_sbals_from_sg(req, SBAL_FLAGS0_TYPE_WRITE_READ, | 1056 | bytes = zfcp_qdio_sbals_from_sg(req, SBAL_FLAGS0_TYPE_WRITE_READ, |
1037 | sg_req, max_sbals); | 1057 | sg_req, max_sbals); |
1038 | if (bytes <= 0) | 1058 | if (bytes <= 0) |
1039 | return -ENOMEM; | 1059 | return -EIO; |
1040 | req->qtcb->bottom.support.req_buf_length = bytes; | 1060 | req->qtcb->bottom.support.req_buf_length = bytes; |
1041 | req->sbale_curr = ZFCP_LAST_SBALE_PER_SBAL; | 1061 | req->sbale_curr = ZFCP_LAST_SBALE_PER_SBAL; |
1042 | 1062 | ||
1043 | bytes = zfcp_qdio_sbals_from_sg(req, SBAL_FLAGS0_TYPE_WRITE_READ, | 1063 | bytes = zfcp_qdio_sbals_from_sg(req, SBAL_FLAGS0_TYPE_WRITE_READ, |
1044 | sg_resp, max_sbals); | 1064 | sg_resp, max_sbals); |
1045 | if (bytes <= 0) | 1065 | if (bytes <= 0) |
1046 | return -ENOMEM; | 1066 | return -EIO; |
1047 | req->qtcb->bottom.support.resp_buf_length = bytes; | 1067 | req->qtcb->bottom.support.resp_buf_length = bytes; |
1048 | 1068 | ||
1049 | return 0; | 1069 | return 0; |
@@ -1607,10 +1627,10 @@ static void zfcp_fsf_open_wka_port_handler(struct zfcp_fsf_req *req) | |||
1607 | case FSF_ACCESS_DENIED: | 1627 | case FSF_ACCESS_DENIED: |
1608 | wka_port->status = ZFCP_WKA_PORT_OFFLINE; | 1628 | wka_port->status = ZFCP_WKA_PORT_OFFLINE; |
1609 | break; | 1629 | break; |
1610 | case FSF_PORT_ALREADY_OPEN: | ||
1611 | break; | ||
1612 | case FSF_GOOD: | 1630 | case FSF_GOOD: |
1613 | wka_port->handle = header->port_handle; | 1631 | wka_port->handle = header->port_handle; |
1632 | /* fall through */ | ||
1633 | case FSF_PORT_ALREADY_OPEN: | ||
1614 | wka_port->status = ZFCP_WKA_PORT_ONLINE; | 1634 | wka_port->status = ZFCP_WKA_PORT_ONLINE; |
1615 | } | 1635 | } |
1616 | out: | 1636 | out: |
@@ -1731,15 +1751,16 @@ static void zfcp_fsf_close_physical_port_handler(struct zfcp_fsf_req *req) | |||
1731 | zfcp_fsf_access_denied_port(req, port); | 1751 | zfcp_fsf_access_denied_port(req, port); |
1732 | break; | 1752 | break; |
1733 | case FSF_PORT_BOXED: | 1753 | case FSF_PORT_BOXED: |
1734 | zfcp_erp_port_boxed(port, "fscpph2", req); | ||
1735 | req->status |= ZFCP_STATUS_FSFREQ_ERROR | | ||
1736 | ZFCP_STATUS_FSFREQ_RETRY; | ||
1737 | /* can't use generic zfcp_erp_modify_port_status because | 1754 | /* can't use generic zfcp_erp_modify_port_status because |
1738 | * ZFCP_STATUS_COMMON_OPEN must not be reset for the port */ | 1755 | * ZFCP_STATUS_COMMON_OPEN must not be reset for the port */ |
1739 | atomic_clear_mask(ZFCP_STATUS_PORT_PHYS_OPEN, &port->status); | 1756 | atomic_clear_mask(ZFCP_STATUS_PORT_PHYS_OPEN, &port->status); |
1740 | list_for_each_entry(unit, &port->unit_list_head, list) | 1757 | list_for_each_entry(unit, &port->unit_list_head, list) |
1741 | atomic_clear_mask(ZFCP_STATUS_COMMON_OPEN, | 1758 | atomic_clear_mask(ZFCP_STATUS_COMMON_OPEN, |
1742 | &unit->status); | 1759 | &unit->status); |
1760 | zfcp_erp_port_boxed(port, "fscpph2", req); | ||
1761 | req->status |= ZFCP_STATUS_FSFREQ_ERROR | | ||
1762 | ZFCP_STATUS_FSFREQ_RETRY; | ||
1763 | |||
1743 | break; | 1764 | break; |
1744 | case FSF_ADAPTER_STATUS_AVAILABLE: | 1765 | case FSF_ADAPTER_STATUS_AVAILABLE: |
1745 | switch (header->fsf_status_qual.word[0]) { | 1766 | switch (header->fsf_status_qual.word[0]) { |
@@ -2541,7 +2562,6 @@ struct zfcp_fsf_req *zfcp_fsf_control_file(struct zfcp_adapter *adapter, | |||
2541 | bytes = zfcp_qdio_sbals_from_sg(req, direction, fsf_cfdc->sg, | 2562 | bytes = zfcp_qdio_sbals_from_sg(req, direction, fsf_cfdc->sg, |
2542 | FSF_MAX_SBALS_PER_REQ); | 2563 | FSF_MAX_SBALS_PER_REQ); |
2543 | if (bytes != ZFCP_CFDC_MAX_SIZE) { | 2564 | if (bytes != ZFCP_CFDC_MAX_SIZE) { |
2544 | retval = -ENOMEM; | ||
2545 | zfcp_fsf_req_free(req); | 2565 | zfcp_fsf_req_free(req); |
2546 | goto out; | 2566 | goto out; |
2547 | } | 2567 | } |
diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c index 967ede73f4c5..6925a1784682 100644 --- a/drivers/s390/scsi/zfcp_scsi.c +++ b/drivers/s390/scsi/zfcp_scsi.c | |||
@@ -167,20 +167,21 @@ static int zfcp_scsi_eh_abort_handler(struct scsi_cmnd *scpnt) | |||
167 | struct zfcp_unit *unit = scpnt->device->hostdata; | 167 | struct zfcp_unit *unit = scpnt->device->hostdata; |
168 | struct zfcp_fsf_req *old_req, *abrt_req; | 168 | struct zfcp_fsf_req *old_req, *abrt_req; |
169 | unsigned long flags; | 169 | unsigned long flags; |
170 | unsigned long old_req_id = (unsigned long) scpnt->host_scribble; | 170 | unsigned long old_reqid = (unsigned long) scpnt->host_scribble; |
171 | int retval = SUCCESS; | 171 | int retval = SUCCESS; |
172 | int retry = 3; | 172 | int retry = 3; |
173 | char *dbf_tag; | ||
173 | 174 | ||
174 | /* avoid race condition between late normal completion and abort */ | 175 | /* avoid race condition between late normal completion and abort */ |
175 | write_lock_irqsave(&adapter->abort_lock, flags); | 176 | write_lock_irqsave(&adapter->abort_lock, flags); |
176 | 177 | ||
177 | spin_lock(&adapter->req_list_lock); | 178 | spin_lock(&adapter->req_list_lock); |
178 | old_req = zfcp_reqlist_find(adapter, old_req_id); | 179 | old_req = zfcp_reqlist_find(adapter, old_reqid); |
179 | spin_unlock(&adapter->req_list_lock); | 180 | spin_unlock(&adapter->req_list_lock); |
180 | if (!old_req) { | 181 | if (!old_req) { |
181 | write_unlock_irqrestore(&adapter->abort_lock, flags); | 182 | write_unlock_irqrestore(&adapter->abort_lock, flags); |
182 | zfcp_scsi_dbf_event_abort("lte1", adapter, scpnt, NULL, | 183 | zfcp_scsi_dbf_event_abort("lte1", adapter, scpnt, NULL, |
183 | old_req_id); | 184 | old_reqid); |
184 | return FAILED; /* completion could be in progress */ | 185 | return FAILED; /* completion could be in progress */ |
185 | } | 186 | } |
186 | old_req->data = NULL; | 187 | old_req->data = NULL; |
@@ -189,7 +190,7 @@ static int zfcp_scsi_eh_abort_handler(struct scsi_cmnd *scpnt) | |||
189 | write_unlock_irqrestore(&adapter->abort_lock, flags); | 190 | write_unlock_irqrestore(&adapter->abort_lock, flags); |
190 | 191 | ||
191 | while (retry--) { | 192 | while (retry--) { |
192 | abrt_req = zfcp_fsf_abort_fcp_command(old_req_id, unit); | 193 | abrt_req = zfcp_fsf_abort_fcp_command(old_reqid, unit); |
193 | if (abrt_req) | 194 | if (abrt_req) |
194 | break; | 195 | break; |
195 | 196 | ||
@@ -197,7 +198,7 @@ static int zfcp_scsi_eh_abort_handler(struct scsi_cmnd *scpnt) | |||
197 | if (!(atomic_read(&adapter->status) & | 198 | if (!(atomic_read(&adapter->status) & |
198 | ZFCP_STATUS_COMMON_RUNNING)) { | 199 | ZFCP_STATUS_COMMON_RUNNING)) { |
199 | zfcp_scsi_dbf_event_abort("nres", adapter, scpnt, NULL, | 200 | zfcp_scsi_dbf_event_abort("nres", adapter, scpnt, NULL, |
200 | old_req_id); | 201 | old_reqid); |
201 | return SUCCESS; | 202 | return SUCCESS; |
202 | } | 203 | } |
203 | } | 204 | } |
@@ -208,13 +209,14 @@ static int zfcp_scsi_eh_abort_handler(struct scsi_cmnd *scpnt) | |||
208 | abrt_req->status & ZFCP_STATUS_FSFREQ_COMPLETED); | 209 | abrt_req->status & ZFCP_STATUS_FSFREQ_COMPLETED); |
209 | 210 | ||
210 | if (abrt_req->status & ZFCP_STATUS_FSFREQ_ABORTSUCCEEDED) | 211 | if (abrt_req->status & ZFCP_STATUS_FSFREQ_ABORTSUCCEEDED) |
211 | zfcp_scsi_dbf_event_abort("okay", adapter, scpnt, abrt_req, 0); | 212 | dbf_tag = "okay"; |
212 | else if (abrt_req->status & ZFCP_STATUS_FSFREQ_ABORTNOTNEEDED) | 213 | else if (abrt_req->status & ZFCP_STATUS_FSFREQ_ABORTNOTNEEDED) |
213 | zfcp_scsi_dbf_event_abort("lte2", adapter, scpnt, abrt_req, 0); | 214 | dbf_tag = "lte2"; |
214 | else { | 215 | else { |
215 | zfcp_scsi_dbf_event_abort("fail", adapter, scpnt, abrt_req, 0); | 216 | dbf_tag = "fail"; |
216 | retval = FAILED; | 217 | retval = FAILED; |
217 | } | 218 | } |
219 | zfcp_scsi_dbf_event_abort(dbf_tag, adapter, scpnt, abrt_req, old_reqid); | ||
218 | zfcp_fsf_req_free(abrt_req); | 220 | zfcp_fsf_req_free(abrt_req); |
219 | return retval; | 221 | return retval; |
220 | } | 222 | } |
@@ -534,6 +536,9 @@ static void zfcp_scsi_rport_register(struct zfcp_port *port) | |||
534 | struct fc_rport_identifiers ids; | 536 | struct fc_rport_identifiers ids; |
535 | struct fc_rport *rport; | 537 | struct fc_rport *rport; |
536 | 538 | ||
539 | if (port->rport) | ||
540 | return; | ||
541 | |||
537 | ids.node_name = port->wwnn; | 542 | ids.node_name = port->wwnn; |
538 | ids.port_name = port->wwpn; | 543 | ids.port_name = port->wwpn; |
539 | ids.port_id = port->d_id; | 544 | ids.port_id = port->d_id; |
@@ -557,8 +562,10 @@ static void zfcp_scsi_rport_block(struct zfcp_port *port) | |||
557 | { | 562 | { |
558 | struct fc_rport *rport = port->rport; | 563 | struct fc_rport *rport = port->rport; |
559 | 564 | ||
560 | if (rport) | 565 | if (rport) { |
561 | fc_remote_port_delete(rport); | 566 | fc_remote_port_delete(rport); |
567 | port->rport = NULL; | ||
568 | } | ||
562 | } | 569 | } |
563 | 570 | ||
564 | void zfcp_scsi_schedule_rport_register(struct zfcp_port *port) | 571 | void zfcp_scsi_schedule_rport_register(struct zfcp_port *port) |
diff --git a/drivers/s390/scsi/zfcp_sysfs.c b/drivers/s390/scsi/zfcp_sysfs.c index 3e51e64d1108..0fe5cce818cb 100644 --- a/drivers/s390/scsi/zfcp_sysfs.c +++ b/drivers/s390/scsi/zfcp_sysfs.c | |||
@@ -494,9 +494,14 @@ static ssize_t zfcp_sysfs_adapter_q_full_show(struct device *dev, | |||
494 | struct Scsi_Host *scsi_host = class_to_shost(dev); | 494 | struct Scsi_Host *scsi_host = class_to_shost(dev); |
495 | struct zfcp_adapter *adapter = | 495 | struct zfcp_adapter *adapter = |
496 | (struct zfcp_adapter *) scsi_host->hostdata[0]; | 496 | (struct zfcp_adapter *) scsi_host->hostdata[0]; |
497 | u64 util; | ||
498 | |||
499 | spin_lock_bh(&adapter->qdio_stat_lock); | ||
500 | util = adapter->req_q_util; | ||
501 | spin_unlock_bh(&adapter->qdio_stat_lock); | ||
497 | 502 | ||
498 | return sprintf(buf, "%d %llu\n", atomic_read(&adapter->qdio_outb_full), | 503 | return sprintf(buf, "%d %llu\n", atomic_read(&adapter->qdio_outb_full), |
499 | (unsigned long long)adapter->req_q_util); | 504 | (unsigned long long)util); |
500 | } | 505 | } |
501 | static DEVICE_ATTR(queue_full, S_IRUGO, zfcp_sysfs_adapter_q_full_show, NULL); | 506 | static DEVICE_ATTR(queue_full, S_IRUGO, zfcp_sysfs_adapter_q_full_show, NULL); |
502 | 507 | ||
diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c index 2bc22be5f849..145ab9ba55ea 100644 --- a/drivers/scsi/libfc/fc_exch.c +++ b/drivers/scsi/libfc/fc_exch.c | |||
@@ -415,9 +415,9 @@ static void fc_exch_timeout(struct work_struct *work) | |||
415 | e_stat = ep->esb_stat; | 415 | e_stat = ep->esb_stat; |
416 | if (e_stat & ESB_ST_COMPLETE) { | 416 | if (e_stat & ESB_ST_COMPLETE) { |
417 | ep->esb_stat = e_stat & ~ESB_ST_REC_QUAL; | 417 | ep->esb_stat = e_stat & ~ESB_ST_REC_QUAL; |
418 | spin_unlock_bh(&ep->ex_lock); | ||
418 | if (e_stat & ESB_ST_REC_QUAL) | 419 | if (e_stat & ESB_ST_REC_QUAL) |
419 | fc_exch_rrq(ep); | 420 | fc_exch_rrq(ep); |
420 | spin_unlock_bh(&ep->ex_lock); | ||
421 | goto done; | 421 | goto done; |
422 | } else { | 422 | } else { |
423 | resp = ep->resp; | 423 | resp = ep->resp; |
@@ -1624,14 +1624,14 @@ static void fc_exch_rrq(struct fc_exch *ep) | |||
1624 | struct fc_lport *lp; | 1624 | struct fc_lport *lp; |
1625 | struct fc_els_rrq *rrq; | 1625 | struct fc_els_rrq *rrq; |
1626 | struct fc_frame *fp; | 1626 | struct fc_frame *fp; |
1627 | struct fc_seq *rrq_sp; | ||
1628 | u32 did; | 1627 | u32 did; |
1629 | 1628 | ||
1630 | lp = ep->lp; | 1629 | lp = ep->lp; |
1631 | 1630 | ||
1632 | fp = fc_frame_alloc(lp, sizeof(*rrq)); | 1631 | fp = fc_frame_alloc(lp, sizeof(*rrq)); |
1633 | if (!fp) | 1632 | if (!fp) |
1634 | return; | 1633 | goto retry; |
1634 | |||
1635 | rrq = fc_frame_payload_get(fp, sizeof(*rrq)); | 1635 | rrq = fc_frame_payload_get(fp, sizeof(*rrq)); |
1636 | memset(rrq, 0, sizeof(*rrq)); | 1636 | memset(rrq, 0, sizeof(*rrq)); |
1637 | rrq->rrq_cmd = ELS_RRQ; | 1637 | rrq->rrq_cmd = ELS_RRQ; |
@@ -1647,13 +1647,20 @@ static void fc_exch_rrq(struct fc_exch *ep) | |||
1647 | fc_host_port_id(lp->host), FC_TYPE_ELS, | 1647 | fc_host_port_id(lp->host), FC_TYPE_ELS, |
1648 | FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0); | 1648 | FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0); |
1649 | 1649 | ||
1650 | rrq_sp = fc_exch_seq_send(lp, fp, fc_exch_rrq_resp, NULL, ep, | 1650 | if (fc_exch_seq_send(lp, fp, fc_exch_rrq_resp, NULL, ep, lp->e_d_tov)) |
1651 | lp->e_d_tov); | 1651 | return; |
1652 | if (!rrq_sp) { | 1652 | |
1653 | ep->esb_stat |= ESB_ST_REC_QUAL; | 1653 | retry: |
1654 | fc_exch_timer_set_locked(ep, ep->r_a_tov); | 1654 | spin_lock_bh(&ep->ex_lock); |
1655 | if (ep->state & (FC_EX_RST_CLEANUP | FC_EX_DONE)) { | ||
1656 | spin_unlock_bh(&ep->ex_lock); | ||
1657 | /* drop hold for rec qual */ | ||
1658 | fc_exch_release(ep); | ||
1655 | return; | 1659 | return; |
1656 | } | 1660 | } |
1661 | ep->esb_stat |= ESB_ST_REC_QUAL; | ||
1662 | fc_exch_timer_set_locked(ep, ep->r_a_tov); | ||
1663 | spin_unlock_bh(&ep->ex_lock); | ||
1657 | } | 1664 | } |
1658 | 1665 | ||
1659 | 1666 | ||
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c index 716cc344c5df..a751f6230c22 100644 --- a/drivers/scsi/libiscsi.c +++ b/drivers/scsi/libiscsi.c | |||
@@ -1974,10 +1974,10 @@ int iscsi_eh_abort(struct scsi_cmnd *sc) | |||
1974 | * good and have never sent us a successful tmf response | 1974 | * good and have never sent us a successful tmf response |
1975 | * then sent more data for the cmd. | 1975 | * then sent more data for the cmd. |
1976 | */ | 1976 | */ |
1977 | spin_lock(&session->lock); | 1977 | spin_lock_bh(&session->lock); |
1978 | fail_scsi_task(task, DID_ABORT); | 1978 | fail_scsi_task(task, DID_ABORT); |
1979 | conn->tmf_state = TMF_INITIAL; | 1979 | conn->tmf_state = TMF_INITIAL; |
1980 | spin_unlock(&session->lock); | 1980 | spin_unlock_bh(&session->lock); |
1981 | iscsi_start_tx(conn); | 1981 | iscsi_start_tx(conn); |
1982 | goto success_unlocked; | 1982 | goto success_unlocked; |
1983 | case TMF_TIMEDOUT: | 1983 | case TMF_TIMEDOUT: |
diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c index 54fa1e42dc4d..b3381959acce 100644 --- a/drivers/scsi/libsas/sas_expander.c +++ b/drivers/scsi/libsas/sas_expander.c | |||
@@ -766,6 +766,7 @@ static int sas_ex_join_wide_port(struct domain_device *parent, int phy_id) | |||
766 | if (!memcmp(phy->attached_sas_addr, ephy->attached_sas_addr, | 766 | if (!memcmp(phy->attached_sas_addr, ephy->attached_sas_addr, |
767 | SAS_ADDR_SIZE) && ephy->port) { | 767 | SAS_ADDR_SIZE) && ephy->port) { |
768 | sas_port_add_phy(ephy->port, phy->phy); | 768 | sas_port_add_phy(ephy->port, phy->phy); |
769 | phy->port = ephy->port; | ||
769 | phy->phy_state = PHY_DEVICE_DISCOVERED; | 770 | phy->phy_state = PHY_DEVICE_DISCOVERED; |
770 | return 0; | 771 | return 0; |
771 | } | 772 | } |
@@ -945,11 +946,21 @@ static int sas_ex_discover_dev(struct domain_device *dev, int phy_id) | |||
945 | if (ex->ex_phy[i].phy_state == PHY_VACANT || | 946 | if (ex->ex_phy[i].phy_state == PHY_VACANT || |
946 | ex->ex_phy[i].phy_state == PHY_NOT_PRESENT) | 947 | ex->ex_phy[i].phy_state == PHY_NOT_PRESENT) |
947 | continue; | 948 | continue; |
948 | 949 | /* | |
950 | * Due to races, the phy might not get added to the | ||
951 | * wide port, so we add the phy to the wide port here. | ||
952 | */ | ||
949 | if (SAS_ADDR(ex->ex_phy[i].attached_sas_addr) == | 953 | if (SAS_ADDR(ex->ex_phy[i].attached_sas_addr) == |
950 | SAS_ADDR(child->sas_addr)) | 954 | SAS_ADDR(child->sas_addr)) { |
951 | ex->ex_phy[i].phy_state= PHY_DEVICE_DISCOVERED; | 955 | ex->ex_phy[i].phy_state= PHY_DEVICE_DISCOVERED; |
956 | res = sas_ex_join_wide_port(dev, i); | ||
957 | if (!res) | ||
958 | SAS_DPRINTK("Attaching ex phy%d to wide port %016llx\n", | ||
959 | i, SAS_ADDR(ex->ex_phy[i].attached_sas_addr)); | ||
960 | |||
961 | } | ||
952 | } | 962 | } |
963 | res = 0; | ||
953 | } | 964 | } |
954 | 965 | ||
955 | return res; | 966 | return res; |
@@ -1598,7 +1609,7 @@ static int sas_get_phy_attached_sas_addr(struct domain_device *dev, | |||
1598 | } | 1609 | } |
1599 | 1610 | ||
1600 | static int sas_find_bcast_phy(struct domain_device *dev, int *phy_id, | 1611 | static int sas_find_bcast_phy(struct domain_device *dev, int *phy_id, |
1601 | int from_phy) | 1612 | int from_phy, bool update) |
1602 | { | 1613 | { |
1603 | struct expander_device *ex = &dev->ex_dev; | 1614 | struct expander_device *ex = &dev->ex_dev; |
1604 | int res = 0; | 1615 | int res = 0; |
@@ -1611,7 +1622,9 @@ static int sas_find_bcast_phy(struct domain_device *dev, int *phy_id, | |||
1611 | if (res) | 1622 | if (res) |
1612 | goto out; | 1623 | goto out; |
1613 | else if (phy_change_count != ex->ex_phy[i].phy_change_count) { | 1624 | else if (phy_change_count != ex->ex_phy[i].phy_change_count) { |
1614 | ex->ex_phy[i].phy_change_count = phy_change_count; | 1625 | if (update) |
1626 | ex->ex_phy[i].phy_change_count = | ||
1627 | phy_change_count; | ||
1615 | *phy_id = i; | 1628 | *phy_id = i; |
1616 | return 0; | 1629 | return 0; |
1617 | } | 1630 | } |
@@ -1653,31 +1666,52 @@ out: | |||
1653 | kfree(rg_req); | 1666 | kfree(rg_req); |
1654 | return res; | 1667 | return res; |
1655 | } | 1668 | } |
1669 | /** | ||
1670 | * sas_find_bcast_dev - find the device issue BROADCAST(CHANGE). | ||
1671 | * @dev:domain device to be detect. | ||
1672 | * @src_dev: the device which originated BROADCAST(CHANGE). | ||
1673 | * | ||
1674 | * Add self-configuration expander suport. Suppose two expander cascading, | ||
1675 | * when the first level expander is self-configuring, hotplug the disks in | ||
1676 | * second level expander, BROADCAST(CHANGE) will not only be originated | ||
1677 | * in the second level expander, but also be originated in the first level | ||
1678 | * expander (see SAS protocol SAS 2r-14, 7.11 for detail), it is to say, | ||
1679 | * expander changed count in two level expanders will all increment at least | ||
1680 | * once, but the phy which chang count has changed is the source device which | ||
1681 | * we concerned. | ||
1682 | */ | ||
1656 | 1683 | ||
1657 | static int sas_find_bcast_dev(struct domain_device *dev, | 1684 | static int sas_find_bcast_dev(struct domain_device *dev, |
1658 | struct domain_device **src_dev) | 1685 | struct domain_device **src_dev) |
1659 | { | 1686 | { |
1660 | struct expander_device *ex = &dev->ex_dev; | 1687 | struct expander_device *ex = &dev->ex_dev; |
1661 | int ex_change_count = -1; | 1688 | int ex_change_count = -1; |
1689 | int phy_id = -1; | ||
1662 | int res; | 1690 | int res; |
1691 | struct domain_device *ch; | ||
1663 | 1692 | ||
1664 | res = sas_get_ex_change_count(dev, &ex_change_count); | 1693 | res = sas_get_ex_change_count(dev, &ex_change_count); |
1665 | if (res) | 1694 | if (res) |
1666 | goto out; | 1695 | goto out; |
1667 | if (ex_change_count != -1 && | 1696 | if (ex_change_count != -1 && ex_change_count != ex->ex_change_count) { |
1668 | ex_change_count != ex->ex_change_count) { | 1697 | /* Just detect if this expander phys phy change count changed, |
1669 | *src_dev = dev; | 1698 | * in order to determine if this expander originate BROADCAST, |
1670 | ex->ex_change_count = ex_change_count; | 1699 | * and do not update phy change count field in our structure. |
1671 | } else { | 1700 | */ |
1672 | struct domain_device *ch; | 1701 | res = sas_find_bcast_phy(dev, &phy_id, 0, false); |
1673 | 1702 | if (phy_id != -1) { | |
1674 | list_for_each_entry(ch, &ex->children, siblings) { | 1703 | *src_dev = dev; |
1675 | if (ch->dev_type == EDGE_DEV || | 1704 | ex->ex_change_count = ex_change_count; |
1676 | ch->dev_type == FANOUT_DEV) { | 1705 | SAS_DPRINTK("Expander phy change count has changed\n"); |
1677 | res = sas_find_bcast_dev(ch, src_dev); | 1706 | return res; |
1678 | if (src_dev) | 1707 | } else |
1679 | return res; | 1708 | SAS_DPRINTK("Expander phys DID NOT change\n"); |
1680 | } | 1709 | } |
1710 | list_for_each_entry(ch, &ex->children, siblings) { | ||
1711 | if (ch->dev_type == EDGE_DEV || ch->dev_type == FANOUT_DEV) { | ||
1712 | res = sas_find_bcast_dev(ch, src_dev); | ||
1713 | if (src_dev) | ||
1714 | return res; | ||
1681 | } | 1715 | } |
1682 | } | 1716 | } |
1683 | out: | 1717 | out: |
@@ -1700,24 +1734,26 @@ static void sas_unregister_ex_tree(struct domain_device *dev) | |||
1700 | } | 1734 | } |
1701 | 1735 | ||
1702 | static void sas_unregister_devs_sas_addr(struct domain_device *parent, | 1736 | static void sas_unregister_devs_sas_addr(struct domain_device *parent, |
1703 | int phy_id) | 1737 | int phy_id, bool last) |
1704 | { | 1738 | { |
1705 | struct expander_device *ex_dev = &parent->ex_dev; | 1739 | struct expander_device *ex_dev = &parent->ex_dev; |
1706 | struct ex_phy *phy = &ex_dev->ex_phy[phy_id]; | 1740 | struct ex_phy *phy = &ex_dev->ex_phy[phy_id]; |
1707 | struct domain_device *child, *n; | 1741 | struct domain_device *child, *n; |
1708 | 1742 | if (last) { | |
1709 | list_for_each_entry_safe(child, n, &ex_dev->children, siblings) { | 1743 | list_for_each_entry_safe(child, n, |
1710 | if (SAS_ADDR(child->sas_addr) == | 1744 | &ex_dev->children, siblings) { |
1711 | SAS_ADDR(phy->attached_sas_addr)) { | 1745 | if (SAS_ADDR(child->sas_addr) == |
1712 | if (child->dev_type == EDGE_DEV || | 1746 | SAS_ADDR(phy->attached_sas_addr)) { |
1713 | child->dev_type == FANOUT_DEV) | 1747 | if (child->dev_type == EDGE_DEV || |
1714 | sas_unregister_ex_tree(child); | 1748 | child->dev_type == FANOUT_DEV) |
1715 | else | 1749 | sas_unregister_ex_tree(child); |
1716 | sas_unregister_dev(child); | 1750 | else |
1717 | break; | 1751 | sas_unregister_dev(child); |
1752 | break; | ||
1753 | } | ||
1718 | } | 1754 | } |
1755 | sas_disable_routing(parent, phy->attached_sas_addr); | ||
1719 | } | 1756 | } |
1720 | sas_disable_routing(parent, phy->attached_sas_addr); | ||
1721 | memset(phy->attached_sas_addr, 0, SAS_ADDR_SIZE); | 1757 | memset(phy->attached_sas_addr, 0, SAS_ADDR_SIZE); |
1722 | sas_port_delete_phy(phy->port, phy->phy); | 1758 | sas_port_delete_phy(phy->port, phy->phy); |
1723 | if (phy->port->num_phys == 0) | 1759 | if (phy->port->num_phys == 0) |
@@ -1770,15 +1806,31 @@ static int sas_discover_new(struct domain_device *dev, int phy_id) | |||
1770 | { | 1806 | { |
1771 | struct ex_phy *ex_phy = &dev->ex_dev.ex_phy[phy_id]; | 1807 | struct ex_phy *ex_phy = &dev->ex_dev.ex_phy[phy_id]; |
1772 | struct domain_device *child; | 1808 | struct domain_device *child; |
1773 | int res; | 1809 | bool found = false; |
1810 | int res, i; | ||
1774 | 1811 | ||
1775 | SAS_DPRINTK("ex %016llx phy%d new device attached\n", | 1812 | SAS_DPRINTK("ex %016llx phy%d new device attached\n", |
1776 | SAS_ADDR(dev->sas_addr), phy_id); | 1813 | SAS_ADDR(dev->sas_addr), phy_id); |
1777 | res = sas_ex_phy_discover(dev, phy_id); | 1814 | res = sas_ex_phy_discover(dev, phy_id); |
1778 | if (res) | 1815 | if (res) |
1779 | goto out; | 1816 | goto out; |
1817 | /* to support the wide port inserted */ | ||
1818 | for (i = 0; i < dev->ex_dev.num_phys; i++) { | ||
1819 | struct ex_phy *ex_phy_temp = &dev->ex_dev.ex_phy[i]; | ||
1820 | if (i == phy_id) | ||
1821 | continue; | ||
1822 | if (SAS_ADDR(ex_phy_temp->attached_sas_addr) == | ||
1823 | SAS_ADDR(ex_phy->attached_sas_addr)) { | ||
1824 | found = true; | ||
1825 | break; | ||
1826 | } | ||
1827 | } | ||
1828 | if (found) { | ||
1829 | sas_ex_join_wide_port(dev, phy_id); | ||
1830 | return 0; | ||
1831 | } | ||
1780 | res = sas_ex_discover_devices(dev, phy_id); | 1832 | res = sas_ex_discover_devices(dev, phy_id); |
1781 | if (res) | 1833 | if (!res) |
1782 | goto out; | 1834 | goto out; |
1783 | list_for_each_entry(child, &dev->ex_dev.children, siblings) { | 1835 | list_for_each_entry(child, &dev->ex_dev.children, siblings) { |
1784 | if (SAS_ADDR(child->sas_addr) == | 1836 | if (SAS_ADDR(child->sas_addr) == |
@@ -1793,7 +1845,7 @@ out: | |||
1793 | return res; | 1845 | return res; |
1794 | } | 1846 | } |
1795 | 1847 | ||
1796 | static int sas_rediscover_dev(struct domain_device *dev, int phy_id) | 1848 | static int sas_rediscover_dev(struct domain_device *dev, int phy_id, bool last) |
1797 | { | 1849 | { |
1798 | struct expander_device *ex = &dev->ex_dev; | 1850 | struct expander_device *ex = &dev->ex_dev; |
1799 | struct ex_phy *phy = &ex->ex_phy[phy_id]; | 1851 | struct ex_phy *phy = &ex->ex_phy[phy_id]; |
@@ -1804,11 +1856,11 @@ static int sas_rediscover_dev(struct domain_device *dev, int phy_id) | |||
1804 | switch (res) { | 1856 | switch (res) { |
1805 | case SMP_RESP_NO_PHY: | 1857 | case SMP_RESP_NO_PHY: |
1806 | phy->phy_state = PHY_NOT_PRESENT; | 1858 | phy->phy_state = PHY_NOT_PRESENT; |
1807 | sas_unregister_devs_sas_addr(dev, phy_id); | 1859 | sas_unregister_devs_sas_addr(dev, phy_id, last); |
1808 | goto out; break; | 1860 | goto out; break; |
1809 | case SMP_RESP_PHY_VACANT: | 1861 | case SMP_RESP_PHY_VACANT: |
1810 | phy->phy_state = PHY_VACANT; | 1862 | phy->phy_state = PHY_VACANT; |
1811 | sas_unregister_devs_sas_addr(dev, phy_id); | 1863 | sas_unregister_devs_sas_addr(dev, phy_id, last); |
1812 | goto out; break; | 1864 | goto out; break; |
1813 | case SMP_RESP_FUNC_ACC: | 1865 | case SMP_RESP_FUNC_ACC: |
1814 | break; | 1866 | break; |
@@ -1816,7 +1868,7 @@ static int sas_rediscover_dev(struct domain_device *dev, int phy_id) | |||
1816 | 1868 | ||
1817 | if (SAS_ADDR(attached_sas_addr) == 0) { | 1869 | if (SAS_ADDR(attached_sas_addr) == 0) { |
1818 | phy->phy_state = PHY_EMPTY; | 1870 | phy->phy_state = PHY_EMPTY; |
1819 | sas_unregister_devs_sas_addr(dev, phy_id); | 1871 | sas_unregister_devs_sas_addr(dev, phy_id, last); |
1820 | } else if (SAS_ADDR(attached_sas_addr) == | 1872 | } else if (SAS_ADDR(attached_sas_addr) == |
1821 | SAS_ADDR(phy->attached_sas_addr)) { | 1873 | SAS_ADDR(phy->attached_sas_addr)) { |
1822 | SAS_DPRINTK("ex %016llx phy 0x%x broadcast flutter\n", | 1874 | SAS_DPRINTK("ex %016llx phy 0x%x broadcast flutter\n", |
@@ -1828,12 +1880,27 @@ out: | |||
1828 | return res; | 1880 | return res; |
1829 | } | 1881 | } |
1830 | 1882 | ||
1883 | /** | ||
1884 | * sas_rediscover - revalidate the domain. | ||
1885 | * @dev:domain device to be detect. | ||
1886 | * @phy_id: the phy id will be detected. | ||
1887 | * | ||
1888 | * NOTE: this process _must_ quit (return) as soon as any connection | ||
1889 | * errors are encountered. Connection recovery is done elsewhere. | ||
1890 | * Discover process only interrogates devices in order to discover the | ||
1891 | * domain.For plugging out, we un-register the device only when it is | ||
1892 | * the last phy in the port, for other phys in this port, we just delete it | ||
1893 | * from the port.For inserting, we do discovery when it is the | ||
1894 | * first phy,for other phys in this port, we add it to the port to | ||
1895 | * forming the wide-port. | ||
1896 | */ | ||
1831 | static int sas_rediscover(struct domain_device *dev, const int phy_id) | 1897 | static int sas_rediscover(struct domain_device *dev, const int phy_id) |
1832 | { | 1898 | { |
1833 | struct expander_device *ex = &dev->ex_dev; | 1899 | struct expander_device *ex = &dev->ex_dev; |
1834 | struct ex_phy *changed_phy = &ex->ex_phy[phy_id]; | 1900 | struct ex_phy *changed_phy = &ex->ex_phy[phy_id]; |
1835 | int res = 0; | 1901 | int res = 0; |
1836 | int i; | 1902 | int i; |
1903 | bool last = true; /* is this the last phy of the port */ | ||
1837 | 1904 | ||
1838 | SAS_DPRINTK("ex %016llx phy%d originated BROADCAST(CHANGE)\n", | 1905 | SAS_DPRINTK("ex %016llx phy%d originated BROADCAST(CHANGE)\n", |
1839 | SAS_ADDR(dev->sas_addr), phy_id); | 1906 | SAS_ADDR(dev->sas_addr), phy_id); |
@@ -1848,13 +1915,13 @@ static int sas_rediscover(struct domain_device *dev, const int phy_id) | |||
1848 | SAS_ADDR(changed_phy->attached_sas_addr)) { | 1915 | SAS_ADDR(changed_phy->attached_sas_addr)) { |
1849 | SAS_DPRINTK("phy%d part of wide port with " | 1916 | SAS_DPRINTK("phy%d part of wide port with " |
1850 | "phy%d\n", phy_id, i); | 1917 | "phy%d\n", phy_id, i); |
1851 | goto out; | 1918 | last = false; |
1919 | break; | ||
1852 | } | 1920 | } |
1853 | } | 1921 | } |
1854 | res = sas_rediscover_dev(dev, phy_id); | 1922 | res = sas_rediscover_dev(dev, phy_id, last); |
1855 | } else | 1923 | } else |
1856 | res = sas_discover_new(dev, phy_id); | 1924 | res = sas_discover_new(dev, phy_id); |
1857 | out: | ||
1858 | return res; | 1925 | return res; |
1859 | } | 1926 | } |
1860 | 1927 | ||
@@ -1881,7 +1948,7 @@ int sas_ex_revalidate_domain(struct domain_device *port_dev) | |||
1881 | 1948 | ||
1882 | do { | 1949 | do { |
1883 | phy_id = -1; | 1950 | phy_id = -1; |
1884 | res = sas_find_bcast_phy(dev, &phy_id, i); | 1951 | res = sas_find_bcast_phy(dev, &phy_id, i, true); |
1885 | if (phy_id == -1) | 1952 | if (phy_id == -1) |
1886 | break; | 1953 | break; |
1887 | res = sas_rediscover(dev, phy_id); | 1954 | res = sas_rediscover(dev, phy_id); |
diff --git a/drivers/scsi/libsas/sas_port.c b/drivers/scsi/libsas/sas_port.c index e6ac59c023f1..fe8b74c706d2 100644 --- a/drivers/scsi/libsas/sas_port.c +++ b/drivers/scsi/libsas/sas_port.c | |||
@@ -56,7 +56,7 @@ static void sas_form_port(struct asd_sas_phy *phy) | |||
56 | } | 56 | } |
57 | } | 57 | } |
58 | 58 | ||
59 | /* find a port */ | 59 | /* see if the phy should be part of a wide port */ |
60 | spin_lock_irqsave(&sas_ha->phy_port_lock, flags); | 60 | spin_lock_irqsave(&sas_ha->phy_port_lock, flags); |
61 | for (i = 0; i < sas_ha->num_phys; i++) { | 61 | for (i = 0; i < sas_ha->num_phys; i++) { |
62 | port = sas_ha->sas_port[i]; | 62 | port = sas_ha->sas_port[i]; |
@@ -69,12 +69,23 @@ static void sas_form_port(struct asd_sas_phy *phy) | |||
69 | SAS_DPRINTK("phy%d matched wide port%d\n", phy->id, | 69 | SAS_DPRINTK("phy%d matched wide port%d\n", phy->id, |
70 | port->id); | 70 | port->id); |
71 | break; | 71 | break; |
72 | } else if (*(u64 *) port->sas_addr == 0 && port->num_phys==0) { | ||
73 | memcpy(port->sas_addr, phy->sas_addr, SAS_ADDR_SIZE); | ||
74 | break; | ||
75 | } | 72 | } |
76 | spin_unlock(&port->phy_list_lock); | 73 | spin_unlock(&port->phy_list_lock); |
77 | } | 74 | } |
75 | /* The phy does not match any existing port, create a new one */ | ||
76 | if (i == sas_ha->num_phys) { | ||
77 | for (i = 0; i < sas_ha->num_phys; i++) { | ||
78 | port = sas_ha->sas_port[i]; | ||
79 | spin_lock(&port->phy_list_lock); | ||
80 | if (*(u64 *)port->sas_addr == 0 | ||
81 | && port->num_phys == 0) { | ||
82 | memcpy(port->sas_addr, phy->sas_addr, | ||
83 | SAS_ADDR_SIZE); | ||
84 | break; | ||
85 | } | ||
86 | spin_unlock(&port->phy_list_lock); | ||
87 | } | ||
88 | } | ||
78 | 89 | ||
79 | if (i >= sas_ha->num_phys) { | 90 | if (i >= sas_ha->num_phys) { |
80 | printk(KERN_NOTICE "%s: couldn't find a free port, bug?\n", | 91 | printk(KERN_NOTICE "%s: couldn't find a free port, bug?\n", |
diff --git a/drivers/scsi/qla4xxx/ql4_dbg.c b/drivers/scsi/qla4xxx/ql4_dbg.c index fcc184cd066d..cbceb0ebabf7 100644 --- a/drivers/scsi/qla4xxx/ql4_dbg.c +++ b/drivers/scsi/qla4xxx/ql4_dbg.c | |||
@@ -15,19 +15,18 @@ void qla4xxx_dump_buffer(void *b, uint32_t size) | |||
15 | uint32_t cnt; | 15 | uint32_t cnt; |
16 | uint8_t *c = b; | 16 | uint8_t *c = b; |
17 | 17 | ||
18 | printk(" 0 1 2 3 4 5 6 7 8 9 Ah Bh Ch Dh Eh " | 18 | printk(" 0 1 2 3 4 5 6 7 8 9 Ah Bh Ch Dh Eh " |
19 | "Fh\n"); | 19 | "Fh\n"); |
20 | printk("------------------------------------------------------------" | 20 | printk("------------------------------------------------------------" |
21 | "--\n"); | 21 | "--\n"); |
22 | for (cnt = 0; cnt < size; cnt++, c++) { | 22 | for (cnt = 0; cnt < size; c++) { |
23 | printk(KERN_DEBUG "%02x", *c); | 23 | printk(KERN_INFO "%02x", *c); |
24 | if (!(cnt % 16)) | 24 | if (!(++cnt % 16)) |
25 | printk(KERN_DEBUG "\n"); | 25 | printk(KERN_INFO "\n"); |
26 | 26 | ||
27 | else | 27 | else |
28 | printk(KERN_DEBUG " "); | 28 | printk(KERN_INFO " "); |
29 | } | 29 | } |
30 | if (cnt % 16) | 30 | printk(KERN_INFO "\n"); |
31 | printk(KERN_DEBUG "\n"); | ||
32 | } | 31 | } |
33 | 32 | ||
diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h index b586f27c3bd4..81b5f29254e2 100644 --- a/drivers/scsi/qla4xxx/ql4_def.h +++ b/drivers/scsi/qla4xxx/ql4_def.h | |||
@@ -100,7 +100,6 @@ | |||
100 | #define MAX_SRBS MAX_CMDS_TO_RISC | 100 | #define MAX_SRBS MAX_CMDS_TO_RISC |
101 | #define MBOX_AEN_REG_COUNT 5 | 101 | #define MBOX_AEN_REG_COUNT 5 |
102 | #define MAX_INIT_RETRIES 5 | 102 | #define MAX_INIT_RETRIES 5 |
103 | #define IOCB_HIWAT_CUSHION 16 | ||
104 | 103 | ||
105 | /* | 104 | /* |
106 | * Buffer sizes | 105 | * Buffer sizes |
@@ -184,6 +183,11 @@ struct srb { | |||
184 | uint16_t cc_stat; | 183 | uint16_t cc_stat; |
185 | u_long r_start; /* Time we recieve a cmd from OS */ | 184 | u_long r_start; /* Time we recieve a cmd from OS */ |
186 | u_long u_start; /* Time when we handed the cmd to F/W */ | 185 | u_long u_start; /* Time when we handed the cmd to F/W */ |
186 | |||
187 | /* Used for extended sense / status continuation */ | ||
188 | uint8_t *req_sense_ptr; | ||
189 | uint16_t req_sense_len; | ||
190 | uint16_t reserved2; | ||
187 | }; | 191 | }; |
188 | 192 | ||
189 | /* | 193 | /* |
@@ -302,7 +306,6 @@ struct scsi_qla_host { | |||
302 | uint32_t tot_ddbs; | 306 | uint32_t tot_ddbs; |
303 | 307 | ||
304 | uint16_t iocb_cnt; | 308 | uint16_t iocb_cnt; |
305 | uint16_t iocb_hiwat; | ||
306 | 309 | ||
307 | /* SRB cache. */ | 310 | /* SRB cache. */ |
308 | #define SRB_MIN_REQ 128 | 311 | #define SRB_MIN_REQ 128 |
@@ -436,6 +439,8 @@ struct scsi_qla_host { | |||
436 | /* Map ddb_list entry by FW ddb index */ | 439 | /* Map ddb_list entry by FW ddb index */ |
437 | struct ddb_entry *fw_ddb_index_map[MAX_DDB_ENTRIES]; | 440 | struct ddb_entry *fw_ddb_index_map[MAX_DDB_ENTRIES]; |
438 | 441 | ||
442 | /* Saved srb for status continuation entry processing */ | ||
443 | struct srb *status_srb; | ||
439 | }; | 444 | }; |
440 | 445 | ||
441 | static inline int is_qla4010(struct scsi_qla_host *ha) | 446 | static inline int is_qla4010(struct scsi_qla_host *ha) |
diff --git a/drivers/scsi/qla4xxx/ql4_fw.h b/drivers/scsi/qla4xxx/ql4_fw.h index 1b667a70cffa..9cd7a608df38 100644 --- a/drivers/scsi/qla4xxx/ql4_fw.h +++ b/drivers/scsi/qla4xxx/ql4_fw.h | |||
@@ -572,6 +572,7 @@ struct conn_event_log_entry { | |||
572 | *************************************************************************/ | 572 | *************************************************************************/ |
573 | #define IOCB_MAX_CDB_LEN 16 /* Bytes in a CBD */ | 573 | #define IOCB_MAX_CDB_LEN 16 /* Bytes in a CBD */ |
574 | #define IOCB_MAX_SENSEDATA_LEN 32 /* Bytes of sense data */ | 574 | #define IOCB_MAX_SENSEDATA_LEN 32 /* Bytes of sense data */ |
575 | #define IOCB_MAX_EXT_SENSEDATA_LEN 60 /* Bytes of extended sense data */ | ||
575 | 576 | ||
576 | /* IOCB header structure */ | 577 | /* IOCB header structure */ |
577 | struct qla4_header { | 578 | struct qla4_header { |
@@ -733,6 +734,12 @@ struct status_entry { | |||
733 | 734 | ||
734 | }; | 735 | }; |
735 | 736 | ||
737 | /* Status Continuation entry */ | ||
738 | struct status_cont_entry { | ||
739 | struct qla4_header hdr; /* 00-03 */ | ||
740 | uint8_t ext_sense_data[IOCB_MAX_EXT_SENSEDATA_LEN]; /* 04-63 */ | ||
741 | }; | ||
742 | |||
736 | struct passthru0 { | 743 | struct passthru0 { |
737 | struct qla4_header hdr; /* 00-03 */ | 744 | struct qla4_header hdr; /* 00-03 */ |
738 | uint32_t handle; /* 04-07 */ | 745 | uint32_t handle; /* 04-07 */ |
diff --git a/drivers/scsi/qla4xxx/ql4_iocb.c b/drivers/scsi/qla4xxx/ql4_iocb.c index 912a67494adf..e0c32159749c 100644 --- a/drivers/scsi/qla4xxx/ql4_iocb.c +++ b/drivers/scsi/qla4xxx/ql4_iocb.c | |||
@@ -10,9 +10,42 @@ | |||
10 | #include "ql4_dbg.h" | 10 | #include "ql4_dbg.h" |
11 | #include "ql4_inline.h" | 11 | #include "ql4_inline.h" |
12 | 12 | ||
13 | |||
14 | #include <scsi/scsi_tcq.h> | 13 | #include <scsi/scsi_tcq.h> |
15 | 14 | ||
15 | static int | ||
16 | qla4xxx_space_in_req_ring(struct scsi_qla_host *ha, uint16_t req_cnt) | ||
17 | { | ||
18 | uint16_t cnt; | ||
19 | |||
20 | /* Calculate number of free request entries. */ | ||
21 | if ((req_cnt + 2) >= ha->req_q_count) { | ||
22 | cnt = (uint16_t) le32_to_cpu(ha->shadow_regs->req_q_out); | ||
23 | if (ha->request_in < cnt) | ||
24 | ha->req_q_count = cnt - ha->request_in; | ||
25 | else | ||
26 | ha->req_q_count = REQUEST_QUEUE_DEPTH - | ||
27 | (ha->request_in - cnt); | ||
28 | } | ||
29 | |||
30 | /* Check if room for request in request ring. */ | ||
31 | if ((req_cnt + 2) < ha->req_q_count) | ||
32 | return 1; | ||
33 | else | ||
34 | return 0; | ||
35 | } | ||
36 | |||
37 | static void qla4xxx_advance_req_ring_ptr(struct scsi_qla_host *ha) | ||
38 | { | ||
39 | /* Advance request queue pointer */ | ||
40 | if (ha->request_in == (REQUEST_QUEUE_DEPTH - 1)) { | ||
41 | ha->request_in = 0; | ||
42 | ha->request_ptr = ha->request_ring; | ||
43 | } else { | ||
44 | ha->request_in++; | ||
45 | ha->request_ptr++; | ||
46 | } | ||
47 | } | ||
48 | |||
16 | /** | 49 | /** |
17 | * qla4xxx_get_req_pkt - returns a valid entry in request queue. | 50 | * qla4xxx_get_req_pkt - returns a valid entry in request queue. |
18 | * @ha: Pointer to host adapter structure. | 51 | * @ha: Pointer to host adapter structure. |
@@ -26,35 +59,18 @@ | |||
26 | static int qla4xxx_get_req_pkt(struct scsi_qla_host *ha, | 59 | static int qla4xxx_get_req_pkt(struct scsi_qla_host *ha, |
27 | struct queue_entry **queue_entry) | 60 | struct queue_entry **queue_entry) |
28 | { | 61 | { |
29 | uint16_t request_in; | 62 | uint16_t req_cnt = 1; |
30 | uint8_t status = QLA_SUCCESS; | ||
31 | |||
32 | *queue_entry = ha->request_ptr; | ||
33 | 63 | ||
34 | /* get the latest request_in and request_out index */ | 64 | if (qla4xxx_space_in_req_ring(ha, req_cnt)) { |
35 | request_in = ha->request_in; | 65 | *queue_entry = ha->request_ptr; |
36 | ha->request_out = (uint16_t) le32_to_cpu(ha->shadow_regs->req_q_out); | ||
37 | |||
38 | /* Advance request queue pointer and check for queue full */ | ||
39 | if (request_in == (REQUEST_QUEUE_DEPTH - 1)) { | ||
40 | request_in = 0; | ||
41 | ha->request_ptr = ha->request_ring; | ||
42 | } else { | ||
43 | request_in++; | ||
44 | ha->request_ptr++; | ||
45 | } | ||
46 | |||
47 | /* request queue is full, try again later */ | ||
48 | if ((ha->iocb_cnt + 1) >= ha->iocb_hiwat) { | ||
49 | /* restore request pointer */ | ||
50 | ha->request_ptr = *queue_entry; | ||
51 | status = QLA_ERROR; | ||
52 | } else { | ||
53 | ha->request_in = request_in; | ||
54 | memset(*queue_entry, 0, sizeof(**queue_entry)); | 66 | memset(*queue_entry, 0, sizeof(**queue_entry)); |
67 | |||
68 | qla4xxx_advance_req_ring_ptr(ha); | ||
69 | ha->req_q_count -= req_cnt; | ||
70 | return QLA_SUCCESS; | ||
55 | } | 71 | } |
56 | 72 | ||
57 | return status; | 73 | return QLA_ERROR; |
58 | } | 74 | } |
59 | 75 | ||
60 | /** | 76 | /** |
@@ -100,21 +116,14 @@ exit_send_marker: | |||
100 | return status; | 116 | return status; |
101 | } | 117 | } |
102 | 118 | ||
103 | static struct continuation_t1_entry* qla4xxx_alloc_cont_entry( | 119 | static struct continuation_t1_entry * |
104 | struct scsi_qla_host *ha) | 120 | qla4xxx_alloc_cont_entry(struct scsi_qla_host *ha) |
105 | { | 121 | { |
106 | struct continuation_t1_entry *cont_entry; | 122 | struct continuation_t1_entry *cont_entry; |
107 | 123 | ||
108 | cont_entry = (struct continuation_t1_entry *)ha->request_ptr; | 124 | cont_entry = (struct continuation_t1_entry *)ha->request_ptr; |
109 | 125 | ||
110 | /* Advance request queue pointer */ | 126 | qla4xxx_advance_req_ring_ptr(ha); |
111 | if (ha->request_in == (REQUEST_QUEUE_DEPTH - 1)) { | ||
112 | ha->request_in = 0; | ||
113 | ha->request_ptr = ha->request_ring; | ||
114 | } else { | ||
115 | ha->request_in++; | ||
116 | ha->request_ptr++; | ||
117 | } | ||
118 | 127 | ||
119 | /* Load packet defaults */ | 128 | /* Load packet defaults */ |
120 | cont_entry->hdr.entryType = ET_CONTINUE; | 129 | cont_entry->hdr.entryType = ET_CONTINUE; |
@@ -197,13 +206,10 @@ int qla4xxx_send_command_to_isp(struct scsi_qla_host *ha, struct srb * srb) | |||
197 | struct scsi_cmnd *cmd = srb->cmd; | 206 | struct scsi_cmnd *cmd = srb->cmd; |
198 | struct ddb_entry *ddb_entry; | 207 | struct ddb_entry *ddb_entry; |
199 | struct command_t3_entry *cmd_entry; | 208 | struct command_t3_entry *cmd_entry; |
200 | |||
201 | int nseg; | 209 | int nseg; |
202 | uint16_t tot_dsds; | 210 | uint16_t tot_dsds; |
203 | uint16_t req_cnt; | 211 | uint16_t req_cnt; |
204 | |||
205 | unsigned long flags; | 212 | unsigned long flags; |
206 | uint16_t cnt; | ||
207 | uint32_t index; | 213 | uint32_t index; |
208 | char tag[2]; | 214 | char tag[2]; |
209 | 215 | ||
@@ -217,6 +223,19 @@ int qla4xxx_send_command_to_isp(struct scsi_qla_host *ha, struct srb * srb) | |||
217 | 223 | ||
218 | index = (uint32_t)cmd->request->tag; | 224 | index = (uint32_t)cmd->request->tag; |
219 | 225 | ||
226 | /* | ||
227 | * Check to see if adapter is online before placing request on | ||
228 | * request queue. If a reset occurs and a request is in the queue, | ||
229 | * the firmware will still attempt to process the request, retrieving | ||
230 | * garbage for pointers. | ||
231 | */ | ||
232 | if (!test_bit(AF_ONLINE, &ha->flags)) { | ||
233 | DEBUG2(printk("scsi%ld: %s: Adapter OFFLINE! " | ||
234 | "Do not issue command.\n", | ||
235 | ha->host_no, __func__)); | ||
236 | goto queuing_error; | ||
237 | } | ||
238 | |||
220 | /* Calculate the number of request entries needed. */ | 239 | /* Calculate the number of request entries needed. */ |
221 | nseg = scsi_dma_map(cmd); | 240 | nseg = scsi_dma_map(cmd); |
222 | if (nseg < 0) | 241 | if (nseg < 0) |
@@ -224,17 +243,7 @@ int qla4xxx_send_command_to_isp(struct scsi_qla_host *ha, struct srb * srb) | |||
224 | tot_dsds = nseg; | 243 | tot_dsds = nseg; |
225 | 244 | ||
226 | req_cnt = qla4xxx_calc_request_entries(tot_dsds); | 245 | req_cnt = qla4xxx_calc_request_entries(tot_dsds); |
227 | 246 | if (!qla4xxx_space_in_req_ring(ha, req_cnt)) | |
228 | if (ha->req_q_count < (req_cnt + 2)) { | ||
229 | cnt = (uint16_t) le32_to_cpu(ha->shadow_regs->req_q_out); | ||
230 | if (ha->request_in < cnt) | ||
231 | ha->req_q_count = cnt - ha->request_in; | ||
232 | else | ||
233 | ha->req_q_count = REQUEST_QUEUE_DEPTH - | ||
234 | (ha->request_in - cnt); | ||
235 | } | ||
236 | |||
237 | if (ha->req_q_count < (req_cnt + 2)) | ||
238 | goto queuing_error; | 247 | goto queuing_error; |
239 | 248 | ||
240 | /* total iocbs active */ | 249 | /* total iocbs active */ |
@@ -286,32 +295,10 @@ int qla4xxx_send_command_to_isp(struct scsi_qla_host *ha, struct srb * srb) | |||
286 | break; | 295 | break; |
287 | } | 296 | } |
288 | 297 | ||
289 | 298 | qla4xxx_advance_req_ring_ptr(ha); | |
290 | /* Advance request queue pointer */ | ||
291 | ha->request_in++; | ||
292 | if (ha->request_in == REQUEST_QUEUE_DEPTH) { | ||
293 | ha->request_in = 0; | ||
294 | ha->request_ptr = ha->request_ring; | ||
295 | } else | ||
296 | ha->request_ptr++; | ||
297 | |||
298 | |||
299 | qla4xxx_build_scsi_iocbs(srb, cmd_entry, tot_dsds); | 299 | qla4xxx_build_scsi_iocbs(srb, cmd_entry, tot_dsds); |
300 | wmb(); | 300 | wmb(); |
301 | 301 | ||
302 | /* | ||
303 | * Check to see if adapter is online before placing request on | ||
304 | * request queue. If a reset occurs and a request is in the queue, | ||
305 | * the firmware will still attempt to process the request, retrieving | ||
306 | * garbage for pointers. | ||
307 | */ | ||
308 | if (!test_bit(AF_ONLINE, &ha->flags)) { | ||
309 | DEBUG2(printk("scsi%ld: %s: Adapter OFFLINE! " | ||
310 | "Do not issue command.\n", | ||
311 | ha->host_no, __func__)); | ||
312 | goto queuing_error; | ||
313 | } | ||
314 | |||
315 | srb->cmd->host_scribble = (unsigned char *)srb; | 302 | srb->cmd->host_scribble = (unsigned char *)srb; |
316 | 303 | ||
317 | /* update counters */ | 304 | /* update counters */ |
diff --git a/drivers/scsi/qla4xxx/ql4_isr.c b/drivers/scsi/qla4xxx/ql4_isr.c index 799120fcb9be..8025ee16588e 100644 --- a/drivers/scsi/qla4xxx/ql4_isr.c +++ b/drivers/scsi/qla4xxx/ql4_isr.c | |||
@@ -11,6 +11,98 @@ | |||
11 | #include "ql4_inline.h" | 11 | #include "ql4_inline.h" |
12 | 12 | ||
13 | /** | 13 | /** |
14 | * qla4xxx_copy_sense - copy sense data into cmd sense buffer | ||
15 | * @ha: Pointer to host adapter structure. | ||
16 | * @sts_entry: Pointer to status entry structure. | ||
17 | * @srb: Pointer to srb structure. | ||
18 | **/ | ||
19 | static void qla4xxx_copy_sense(struct scsi_qla_host *ha, | ||
20 | struct status_entry *sts_entry, | ||
21 | struct srb *srb) | ||
22 | { | ||
23 | struct scsi_cmnd *cmd = srb->cmd; | ||
24 | uint16_t sense_len; | ||
25 | |||
26 | memset(cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE); | ||
27 | sense_len = le16_to_cpu(sts_entry->senseDataByteCnt); | ||
28 | if (sense_len == 0) | ||
29 | return; | ||
30 | |||
31 | /* Save total available sense length, | ||
32 | * not to exceed cmd's sense buffer size */ | ||
33 | sense_len = min_t(uint16_t, sense_len, SCSI_SENSE_BUFFERSIZE); | ||
34 | srb->req_sense_ptr = cmd->sense_buffer; | ||
35 | srb->req_sense_len = sense_len; | ||
36 | |||
37 | /* Copy sense from sts_entry pkt */ | ||
38 | sense_len = min_t(uint16_t, sense_len, IOCB_MAX_SENSEDATA_LEN); | ||
39 | memcpy(cmd->sense_buffer, sts_entry->senseData, sense_len); | ||
40 | |||
41 | DEBUG2(printk(KERN_INFO "scsi%ld:%d:%d:%d: %s: sense key = %x, " | ||
42 | "ASL= %02x, ASC/ASCQ = %02x/%02x\n", ha->host_no, | ||
43 | cmd->device->channel, cmd->device->id, | ||
44 | cmd->device->lun, __func__, | ||
45 | sts_entry->senseData[2] & 0x0f, | ||
46 | sts_entry->senseData[7], | ||
47 | sts_entry->senseData[12], | ||
48 | sts_entry->senseData[13])); | ||
49 | |||
50 | DEBUG5(qla4xxx_dump_buffer(cmd->sense_buffer, sense_len)); | ||
51 | srb->flags |= SRB_GOT_SENSE; | ||
52 | |||
53 | /* Update srb, in case a sts_cont pkt follows */ | ||
54 | srb->req_sense_ptr += sense_len; | ||
55 | srb->req_sense_len -= sense_len; | ||
56 | if (srb->req_sense_len != 0) | ||
57 | ha->status_srb = srb; | ||
58 | else | ||
59 | ha->status_srb = NULL; | ||
60 | } | ||
61 | |||
62 | /** | ||
63 | * qla4xxx_status_cont_entry - Process a Status Continuations entry. | ||
64 | * @ha: SCSI driver HA context | ||
65 | * @sts_cont: Entry pointer | ||
66 | * | ||
67 | * Extended sense data. | ||
68 | */ | ||
69 | static void | ||
70 | qla4xxx_status_cont_entry(struct scsi_qla_host *ha, | ||
71 | struct status_cont_entry *sts_cont) | ||
72 | { | ||
73 | struct srb *srb = ha->status_srb; | ||
74 | struct scsi_cmnd *cmd; | ||
75 | uint8_t sense_len; | ||
76 | |||
77 | if (srb == NULL) | ||
78 | return; | ||
79 | |||
80 | cmd = srb->cmd; | ||
81 | if (cmd == NULL) { | ||
82 | DEBUG2(printk(KERN_INFO "scsi%ld: %s: Cmd already returned " | ||
83 | "back to OS srb=%p srb->state:%d\n", ha->host_no, | ||
84 | __func__, srb, srb->state)); | ||
85 | ha->status_srb = NULL; | ||
86 | return; | ||
87 | } | ||
88 | |||
89 | /* Copy sense data. */ | ||
90 | sense_len = min_t(uint16_t, srb->req_sense_len, | ||
91 | IOCB_MAX_EXT_SENSEDATA_LEN); | ||
92 | memcpy(srb->req_sense_ptr, sts_cont->ext_sense_data, sense_len); | ||
93 | DEBUG5(qla4xxx_dump_buffer(srb->req_sense_ptr, sense_len)); | ||
94 | |||
95 | srb->req_sense_ptr += sense_len; | ||
96 | srb->req_sense_len -= sense_len; | ||
97 | |||
98 | /* Place command on done queue. */ | ||
99 | if (srb->req_sense_len == 0) { | ||
100 | qla4xxx_srb_compl(ha, srb); | ||
101 | ha->status_srb = NULL; | ||
102 | } | ||
103 | } | ||
104 | |||
105 | /** | ||
14 | * qla4xxx_status_entry - processes status IOCBs | 106 | * qla4xxx_status_entry - processes status IOCBs |
15 | * @ha: Pointer to host adapter structure. | 107 | * @ha: Pointer to host adapter structure. |
16 | * @sts_entry: Pointer to status entry structure. | 108 | * @sts_entry: Pointer to status entry structure. |
@@ -23,7 +115,6 @@ static void qla4xxx_status_entry(struct scsi_qla_host *ha, | |||
23 | struct srb *srb; | 115 | struct srb *srb; |
24 | struct ddb_entry *ddb_entry; | 116 | struct ddb_entry *ddb_entry; |
25 | uint32_t residual; | 117 | uint32_t residual; |
26 | uint16_t sensebytecnt; | ||
27 | 118 | ||
28 | srb = qla4xxx_del_from_active_array(ha, le32_to_cpu(sts_entry->handle)); | 119 | srb = qla4xxx_del_from_active_array(ha, le32_to_cpu(sts_entry->handle)); |
29 | if (!srb) { | 120 | if (!srb) { |
@@ -92,24 +183,7 @@ static void qla4xxx_status_entry(struct scsi_qla_host *ha, | |||
92 | break; | 183 | break; |
93 | 184 | ||
94 | /* Copy Sense Data into sense buffer. */ | 185 | /* Copy Sense Data into sense buffer. */ |
95 | memset(cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE); | 186 | qla4xxx_copy_sense(ha, sts_entry, srb); |
96 | |||
97 | sensebytecnt = le16_to_cpu(sts_entry->senseDataByteCnt); | ||
98 | if (sensebytecnt == 0) | ||
99 | break; | ||
100 | |||
101 | memcpy(cmd->sense_buffer, sts_entry->senseData, | ||
102 | min_t(uint16_t, sensebytecnt, SCSI_SENSE_BUFFERSIZE)); | ||
103 | |||
104 | DEBUG2(printk("scsi%ld:%d:%d:%d: %s: sense key = %x, " | ||
105 | "ASC/ASCQ = %02x/%02x\n", ha->host_no, | ||
106 | cmd->device->channel, cmd->device->id, | ||
107 | cmd->device->lun, __func__, | ||
108 | sts_entry->senseData[2] & 0x0f, | ||
109 | sts_entry->senseData[12], | ||
110 | sts_entry->senseData[13])); | ||
111 | |||
112 | srb->flags |= SRB_GOT_SENSE; | ||
113 | break; | 187 | break; |
114 | 188 | ||
115 | case SCS_INCOMPLETE: | 189 | case SCS_INCOMPLETE: |
@@ -176,23 +250,7 @@ static void qla4xxx_status_entry(struct scsi_qla_host *ha, | |||
176 | break; | 250 | break; |
177 | 251 | ||
178 | /* Copy Sense Data into sense buffer. */ | 252 | /* Copy Sense Data into sense buffer. */ |
179 | memset(cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE); | 253 | qla4xxx_copy_sense(ha, sts_entry, srb); |
180 | |||
181 | sensebytecnt = | ||
182 | le16_to_cpu(sts_entry->senseDataByteCnt); | ||
183 | if (sensebytecnt == 0) | ||
184 | break; | ||
185 | |||
186 | memcpy(cmd->sense_buffer, sts_entry->senseData, | ||
187 | min_t(uint16_t, sensebytecnt, SCSI_SENSE_BUFFERSIZE)); | ||
188 | |||
189 | DEBUG2(printk("scsi%ld:%d:%d:%d: %s: sense key = %x, " | ||
190 | "ASC/ASCQ = %02x/%02x\n", ha->host_no, | ||
191 | cmd->device->channel, cmd->device->id, | ||
192 | cmd->device->lun, __func__, | ||
193 | sts_entry->senseData[2] & 0x0f, | ||
194 | sts_entry->senseData[12], | ||
195 | sts_entry->senseData[13])); | ||
196 | } else { | 254 | } else { |
197 | /* | 255 | /* |
198 | * If RISC reports underrun and target does not | 256 | * If RISC reports underrun and target does not |
@@ -268,9 +326,10 @@ static void qla4xxx_status_entry(struct scsi_qla_host *ha, | |||
268 | 326 | ||
269 | status_entry_exit: | 327 | status_entry_exit: |
270 | 328 | ||
271 | /* complete the request */ | 329 | /* complete the request, if not waiting for status_continuation pkt */ |
272 | srb->cc_stat = sts_entry->completionStatus; | 330 | srb->cc_stat = sts_entry->completionStatus; |
273 | qla4xxx_srb_compl(ha, srb); | 331 | if (ha->status_srb == NULL) |
332 | qla4xxx_srb_compl(ha, srb); | ||
274 | } | 333 | } |
275 | 334 | ||
276 | /** | 335 | /** |
@@ -305,10 +364,7 @@ static void qla4xxx_process_response_queue(struct scsi_qla_host * ha) | |||
305 | /* process entry */ | 364 | /* process entry */ |
306 | switch (sts_entry->hdr.entryType) { | 365 | switch (sts_entry->hdr.entryType) { |
307 | case ET_STATUS: | 366 | case ET_STATUS: |
308 | /* | 367 | /* Common status */ |
309 | * Common status - Single completion posted in single | ||
310 | * IOSB. | ||
311 | */ | ||
312 | qla4xxx_status_entry(ha, sts_entry); | 368 | qla4xxx_status_entry(ha, sts_entry); |
313 | break; | 369 | break; |
314 | 370 | ||
@@ -316,9 +372,8 @@ static void qla4xxx_process_response_queue(struct scsi_qla_host * ha) | |||
316 | break; | 372 | break; |
317 | 373 | ||
318 | case ET_STATUS_CONTINUATION: | 374 | case ET_STATUS_CONTINUATION: |
319 | /* Just throw away the status continuation entries */ | 375 | qla4xxx_status_cont_entry(ha, |
320 | DEBUG2(printk("scsi%ld: %s: Status Continuation entry " | 376 | (struct status_cont_entry *) sts_entry); |
321 | "- ignoring\n", ha->host_no, __func__)); | ||
322 | break; | 377 | break; |
323 | 378 | ||
324 | case ET_COMMAND: | 379 | case ET_COMMAND: |
diff --git a/drivers/scsi/qla4xxx/ql4_mbx.c b/drivers/scsi/qla4xxx/ql4_mbx.c index 051b0f5e8c8e..09d6d4b76f39 100644 --- a/drivers/scsi/qla4xxx/ql4_mbx.c +++ b/drivers/scsi/qla4xxx/ql4_mbx.c | |||
@@ -385,16 +385,6 @@ int qla4xxx_get_firmware_status(struct scsi_qla_host * ha) | |||
385 | mbox_sts[0])); | 385 | mbox_sts[0])); |
386 | return QLA_ERROR; | 386 | return QLA_ERROR; |
387 | } | 387 | } |
388 | |||
389 | /* High-water mark of IOCBs */ | ||
390 | ha->iocb_hiwat = mbox_sts[2]; | ||
391 | if (ha->iocb_hiwat > IOCB_HIWAT_CUSHION) | ||
392 | ha->iocb_hiwat -= IOCB_HIWAT_CUSHION; | ||
393 | else | ||
394 | dev_info(&ha->pdev->dev, "WARNING!!! You have less than %d " | ||
395 | "firmware IOCBs available (%d).\n", | ||
396 | IOCB_HIWAT_CUSHION, ha->iocb_hiwat); | ||
397 | |||
398 | return QLA_SUCCESS; | 388 | return QLA_SUCCESS; |
399 | } | 389 | } |
400 | 390 | ||
diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c index ec9da6ce8489..40e3cafb3a9c 100644 --- a/drivers/scsi/qla4xxx/ql4_os.c +++ b/drivers/scsi/qla4xxx/ql4_os.c | |||
@@ -66,6 +66,7 @@ static int qla4xxx_sess_get_param(struct iscsi_cls_session *sess, | |||
66 | static int qla4xxx_host_get_param(struct Scsi_Host *shost, | 66 | static int qla4xxx_host_get_param(struct Scsi_Host *shost, |
67 | enum iscsi_host_param param, char *buf); | 67 | enum iscsi_host_param param, char *buf); |
68 | static void qla4xxx_recovery_timedout(struct iscsi_cls_session *session); | 68 | static void qla4xxx_recovery_timedout(struct iscsi_cls_session *session); |
69 | static enum blk_eh_timer_return qla4xxx_eh_cmd_timed_out(struct scsi_cmnd *sc); | ||
69 | 70 | ||
70 | /* | 71 | /* |
71 | * SCSI host template entry points | 72 | * SCSI host template entry points |
@@ -89,6 +90,7 @@ static struct scsi_host_template qla4xxx_driver_template = { | |||
89 | .eh_device_reset_handler = qla4xxx_eh_device_reset, | 90 | .eh_device_reset_handler = qla4xxx_eh_device_reset, |
90 | .eh_target_reset_handler = qla4xxx_eh_target_reset, | 91 | .eh_target_reset_handler = qla4xxx_eh_target_reset, |
91 | .eh_host_reset_handler = qla4xxx_eh_host_reset, | 92 | .eh_host_reset_handler = qla4xxx_eh_host_reset, |
93 | .eh_timed_out = qla4xxx_eh_cmd_timed_out, | ||
92 | 94 | ||
93 | .slave_configure = qla4xxx_slave_configure, | 95 | .slave_configure = qla4xxx_slave_configure, |
94 | .slave_alloc = qla4xxx_slave_alloc, | 96 | .slave_alloc = qla4xxx_slave_alloc, |
@@ -124,6 +126,21 @@ static struct iscsi_transport qla4xxx_iscsi_transport = { | |||
124 | 126 | ||
125 | static struct scsi_transport_template *qla4xxx_scsi_transport; | 127 | static struct scsi_transport_template *qla4xxx_scsi_transport; |
126 | 128 | ||
129 | static enum blk_eh_timer_return qla4xxx_eh_cmd_timed_out(struct scsi_cmnd *sc) | ||
130 | { | ||
131 | struct iscsi_cls_session *session; | ||
132 | struct ddb_entry *ddb_entry; | ||
133 | |||
134 | session = starget_to_session(scsi_target(sc->device)); | ||
135 | ddb_entry = session->dd_data; | ||
136 | |||
137 | /* if we are not logged in then the LLD is going to clean up the cmd */ | ||
138 | if (atomic_read(&ddb_entry->state) != DDB_STATE_ONLINE) | ||
139 | return BLK_EH_RESET_TIMER; | ||
140 | else | ||
141 | return BLK_EH_NOT_HANDLED; | ||
142 | } | ||
143 | |||
127 | static void qla4xxx_recovery_timedout(struct iscsi_cls_session *session) | 144 | static void qla4xxx_recovery_timedout(struct iscsi_cls_session *session) |
128 | { | 145 | { |
129 | struct ddb_entry *ddb_entry = session->dd_data; | 146 | struct ddb_entry *ddb_entry = session->dd_data; |
@@ -904,18 +921,17 @@ static int qla4xxx_recover_adapter(struct scsi_qla_host *ha, | |||
904 | /* Flush any pending ddb changed AENs */ | 921 | /* Flush any pending ddb changed AENs */ |
905 | qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS); | 922 | qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS); |
906 | 923 | ||
924 | qla4xxx_flush_active_srbs(ha); | ||
925 | |||
907 | /* Reset the firmware. If successful, function | 926 | /* Reset the firmware. If successful, function |
908 | * returns with ISP interrupts enabled. | 927 | * returns with ISP interrupts enabled. |
909 | */ | 928 | */ |
910 | if (status == QLA_SUCCESS) { | 929 | DEBUG2(printk("scsi%ld: %s - Performing soft reset..\n", |
911 | DEBUG2(printk("scsi%ld: %s - Performing soft reset..\n", | 930 | ha->host_no, __func__)); |
912 | ha->host_no, __func__)); | 931 | if (ql4xxx_lock_drvr_wait(ha) == QLA_SUCCESS) |
913 | qla4xxx_flush_active_srbs(ha); | 932 | status = qla4xxx_soft_reset(ha); |
914 | if (ql4xxx_lock_drvr_wait(ha) == QLA_SUCCESS) | 933 | else |
915 | status = qla4xxx_soft_reset(ha); | 934 | status = QLA_ERROR; |
916 | else | ||
917 | status = QLA_ERROR; | ||
918 | } | ||
919 | 935 | ||
920 | /* Flush any pending ddb changed AENs */ | 936 | /* Flush any pending ddb changed AENs */ |
921 | qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS); | 937 | qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS); |
@@ -1527,11 +1543,9 @@ static int qla4xxx_eh_device_reset(struct scsi_cmnd *cmd) | |||
1527 | { | 1543 | { |
1528 | struct scsi_qla_host *ha = to_qla_host(cmd->device->host); | 1544 | struct scsi_qla_host *ha = to_qla_host(cmd->device->host); |
1529 | struct ddb_entry *ddb_entry = cmd->device->hostdata; | 1545 | struct ddb_entry *ddb_entry = cmd->device->hostdata; |
1530 | struct srb *sp; | ||
1531 | int ret = FAILED, stat; | 1546 | int ret = FAILED, stat; |
1532 | 1547 | ||
1533 | sp = (struct srb *) cmd->SCp.ptr; | 1548 | if (!ddb_entry) |
1534 | if (!sp || !ddb_entry) | ||
1535 | return ret; | 1549 | return ret; |
1536 | 1550 | ||
1537 | dev_info(&ha->pdev->dev, | 1551 | dev_info(&ha->pdev->dev, |
@@ -1644,7 +1658,7 @@ static int qla4xxx_eh_host_reset(struct scsi_cmnd *cmd) | |||
1644 | ha = (struct scsi_qla_host *) cmd->device->host->hostdata; | 1658 | ha = (struct scsi_qla_host *) cmd->device->host->hostdata; |
1645 | 1659 | ||
1646 | dev_info(&ha->pdev->dev, | 1660 | dev_info(&ha->pdev->dev, |
1647 | "scsi(%ld:%d:%d:%d): ADAPTER RESET ISSUED.\n", ha->host_no, | 1661 | "scsi(%ld:%d:%d:%d): HOST RESET ISSUED.\n", ha->host_no, |
1648 | cmd->device->channel, cmd->device->id, cmd->device->lun); | 1662 | cmd->device->channel, cmd->device->id, cmd->device->lun); |
1649 | 1663 | ||
1650 | if (qla4xxx_wait_for_hba_online(ha) != QLA_SUCCESS) { | 1664 | if (qla4xxx_wait_for_hba_online(ha) != QLA_SUCCESS) { |
diff --git a/drivers/scsi/qla4xxx/ql4_version.h b/drivers/scsi/qla4xxx/ql4_version.h index ab984cb89cea..6980cb279c81 100644 --- a/drivers/scsi/qla4xxx/ql4_version.h +++ b/drivers/scsi/qla4xxx/ql4_version.h | |||
@@ -5,5 +5,5 @@ | |||
5 | * See LICENSE.qla4xxx for copyright and licensing details. | 5 | * See LICENSE.qla4xxx for copyright and licensing details. |
6 | */ | 6 | */ |
7 | 7 | ||
8 | #define QLA4XXX_DRIVER_VERSION "5.01.00-k8" | 8 | #define QLA4XXX_DRIVER_VERSION "5.01.00-k9" |
9 | 9 | ||
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c index 783e33c65eb7..b47240ca4b19 100644 --- a/drivers/scsi/scsi_transport_iscsi.c +++ b/drivers/scsi/scsi_transport_iscsi.c | |||
@@ -990,7 +990,7 @@ int iscsi_offload_mesg(struct Scsi_Host *shost, | |||
990 | struct iscsi_uevent *ev; | 990 | struct iscsi_uevent *ev; |
991 | int len = NLMSG_SPACE(sizeof(*ev) + data_size); | 991 | int len = NLMSG_SPACE(sizeof(*ev) + data_size); |
992 | 992 | ||
993 | skb = alloc_skb(len, GFP_NOIO); | 993 | skb = alloc_skb(len, GFP_ATOMIC); |
994 | if (!skb) { | 994 | if (!skb) { |
995 | printk(KERN_ERR "can not deliver iscsi offload message:OOM\n"); | 995 | printk(KERN_ERR "can not deliver iscsi offload message:OOM\n"); |
996 | return -ENOMEM; | 996 | return -ENOMEM; |
@@ -1012,7 +1012,7 @@ int iscsi_offload_mesg(struct Scsi_Host *shost, | |||
1012 | 1012 | ||
1013 | memcpy((char *)ev + sizeof(*ev), data, data_size); | 1013 | memcpy((char *)ev + sizeof(*ev), data, data_size); |
1014 | 1014 | ||
1015 | return iscsi_multicast_skb(skb, ISCSI_NL_GRP_UIP, GFP_NOIO); | 1015 | return iscsi_multicast_skb(skb, ISCSI_NL_GRP_UIP, GFP_ATOMIC); |
1016 | } | 1016 | } |
1017 | EXPORT_SYMBOL_GPL(iscsi_offload_mesg); | 1017 | EXPORT_SYMBOL_GPL(iscsi_offload_mesg); |
1018 | 1018 | ||
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index 5616cd780ff3..b7b9fec67a98 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c | |||
@@ -1840,6 +1840,18 @@ static void sd_read_block_characteristics(struct scsi_disk *sdkp) | |||
1840 | kfree(buffer); | 1840 | kfree(buffer); |
1841 | } | 1841 | } |
1842 | 1842 | ||
1843 | static int sd_try_extended_inquiry(struct scsi_device *sdp) | ||
1844 | { | ||
1845 | /* | ||
1846 | * Although VPD inquiries can go to SCSI-2 type devices, | ||
1847 | * some USB ones crash on receiving them, and the pages | ||
1848 | * we currently ask for are for SPC-3 and beyond | ||
1849 | */ | ||
1850 | if (sdp->scsi_level > SCSI_SPC_2) | ||
1851 | return 1; | ||
1852 | return 0; | ||
1853 | } | ||
1854 | |||
1843 | /** | 1855 | /** |
1844 | * sd_revalidate_disk - called the first time a new disk is seen, | 1856 | * sd_revalidate_disk - called the first time a new disk is seen, |
1845 | * performs disk spin up, read_capacity, etc. | 1857 | * performs disk spin up, read_capacity, etc. |
@@ -1877,8 +1889,12 @@ static int sd_revalidate_disk(struct gendisk *disk) | |||
1877 | */ | 1889 | */ |
1878 | if (sdkp->media_present) { | 1890 | if (sdkp->media_present) { |
1879 | sd_read_capacity(sdkp, buffer); | 1891 | sd_read_capacity(sdkp, buffer); |
1880 | sd_read_block_limits(sdkp); | 1892 | |
1881 | sd_read_block_characteristics(sdkp); | 1893 | if (sd_try_extended_inquiry(sdp)) { |
1894 | sd_read_block_limits(sdkp); | ||
1895 | sd_read_block_characteristics(sdkp); | ||
1896 | } | ||
1897 | |||
1882 | sd_read_write_protect_flag(sdkp, buffer); | 1898 | sd_read_write_protect_flag(sdkp, buffer); |
1883 | sd_read_cache_type(sdkp, buffer); | 1899 | sd_read_cache_type(sdkp, buffer); |
1884 | sd_read_app_tag_own(sdkp, buffer); | 1900 | sd_read_app_tag_own(sdkp, buffer); |
diff --git a/drivers/serial/s3c2400.c b/drivers/serial/s3c2400.c index fb00ed5296e6..fed1a9a1ffb4 100644 --- a/drivers/serial/s3c2400.c +++ b/drivers/serial/s3c2400.c | |||
@@ -76,7 +76,7 @@ static int s3c2400_serial_probe(struct platform_device *dev) | |||
76 | return s3c24xx_serial_probe(dev, &s3c2400_uart_inf); | 76 | return s3c24xx_serial_probe(dev, &s3c2400_uart_inf); |
77 | } | 77 | } |
78 | 78 | ||
79 | static struct platform_driver s3c2400_serial_drv = { | 79 | static struct platform_driver s3c2400_serial_driver = { |
80 | .probe = s3c2400_serial_probe, | 80 | .probe = s3c2400_serial_probe, |
81 | .remove = __devexit_p(s3c24xx_serial_remove), | 81 | .remove = __devexit_p(s3c24xx_serial_remove), |
82 | .driver = { | 82 | .driver = { |
@@ -85,16 +85,16 @@ static struct platform_driver s3c2400_serial_drv = { | |||
85 | }, | 85 | }, |
86 | }; | 86 | }; |
87 | 87 | ||
88 | s3c24xx_console_init(&s3c2400_serial_drv, &s3c2400_uart_inf); | 88 | s3c24xx_console_init(&s3c2400_serial_driver, &s3c2400_uart_inf); |
89 | 89 | ||
90 | static inline int s3c2400_serial_init(void) | 90 | static inline int s3c2400_serial_init(void) |
91 | { | 91 | { |
92 | return s3c24xx_serial_init(&s3c2400_serial_drv, &s3c2400_uart_inf); | 92 | return s3c24xx_serial_init(&s3c2400_serial_driver, &s3c2400_uart_inf); |
93 | } | 93 | } |
94 | 94 | ||
95 | static inline void s3c2400_serial_exit(void) | 95 | static inline void s3c2400_serial_exit(void) |
96 | { | 96 | { |
97 | platform_driver_unregister(&s3c2400_serial_drv); | 97 | platform_driver_unregister(&s3c2400_serial_driver); |
98 | } | 98 | } |
99 | 99 | ||
100 | module_init(s3c2400_serial_init); | 100 | module_init(s3c2400_serial_init); |
diff --git a/drivers/serial/s3c2410.c b/drivers/serial/s3c2410.c index b5d7cbcba2ae..c99f0821cae3 100644 --- a/drivers/serial/s3c2410.c +++ b/drivers/serial/s3c2410.c | |||
@@ -88,7 +88,7 @@ static int s3c2410_serial_probe(struct platform_device *dev) | |||
88 | return s3c24xx_serial_probe(dev, &s3c2410_uart_inf); | 88 | return s3c24xx_serial_probe(dev, &s3c2410_uart_inf); |
89 | } | 89 | } |
90 | 90 | ||
91 | static struct platform_driver s3c2410_serial_drv = { | 91 | static struct platform_driver s3c2410_serial_driver = { |
92 | .probe = s3c2410_serial_probe, | 92 | .probe = s3c2410_serial_probe, |
93 | .remove = __devexit_p(s3c24xx_serial_remove), | 93 | .remove = __devexit_p(s3c24xx_serial_remove), |
94 | .driver = { | 94 | .driver = { |
@@ -97,16 +97,16 @@ static struct platform_driver s3c2410_serial_drv = { | |||
97 | }, | 97 | }, |
98 | }; | 98 | }; |
99 | 99 | ||
100 | s3c24xx_console_init(&s3c2410_serial_drv, &s3c2410_uart_inf); | 100 | s3c24xx_console_init(&s3c2410_serial_driver, &s3c2410_uart_inf); |
101 | 101 | ||
102 | static int __init s3c2410_serial_init(void) | 102 | static int __init s3c2410_serial_init(void) |
103 | { | 103 | { |
104 | return s3c24xx_serial_init(&s3c2410_serial_drv, &s3c2410_uart_inf); | 104 | return s3c24xx_serial_init(&s3c2410_serial_driver, &s3c2410_uart_inf); |
105 | } | 105 | } |
106 | 106 | ||
107 | static void __exit s3c2410_serial_exit(void) | 107 | static void __exit s3c2410_serial_exit(void) |
108 | { | 108 | { |
109 | platform_driver_unregister(&s3c2410_serial_drv); | 109 | platform_driver_unregister(&s3c2410_serial_driver); |
110 | } | 110 | } |
111 | 111 | ||
112 | module_init(s3c2410_serial_init); | 112 | module_init(s3c2410_serial_init); |
diff --git a/drivers/serial/s3c2412.c b/drivers/serial/s3c2412.c index 11dcb90bdfef..6e057d8809d3 100644 --- a/drivers/serial/s3c2412.c +++ b/drivers/serial/s3c2412.c | |||
@@ -121,7 +121,7 @@ static int s3c2412_serial_probe(struct platform_device *dev) | |||
121 | return s3c24xx_serial_probe(dev, &s3c2412_uart_inf); | 121 | return s3c24xx_serial_probe(dev, &s3c2412_uart_inf); |
122 | } | 122 | } |
123 | 123 | ||
124 | static struct platform_driver s3c2412_serial_drv = { | 124 | static struct platform_driver s3c2412_serial_driver = { |
125 | .probe = s3c2412_serial_probe, | 125 | .probe = s3c2412_serial_probe, |
126 | .remove = __devexit_p(s3c24xx_serial_remove), | 126 | .remove = __devexit_p(s3c24xx_serial_remove), |
127 | .driver = { | 127 | .driver = { |
@@ -130,16 +130,16 @@ static struct platform_driver s3c2412_serial_drv = { | |||
130 | }, | 130 | }, |
131 | }; | 131 | }; |
132 | 132 | ||
133 | s3c24xx_console_init(&s3c2412_serial_drv, &s3c2412_uart_inf); | 133 | s3c24xx_console_init(&s3c2412_serial_driver, &s3c2412_uart_inf); |
134 | 134 | ||
135 | static inline int s3c2412_serial_init(void) | 135 | static inline int s3c2412_serial_init(void) |
136 | { | 136 | { |
137 | return s3c24xx_serial_init(&s3c2412_serial_drv, &s3c2412_uart_inf); | 137 | return s3c24xx_serial_init(&s3c2412_serial_driver, &s3c2412_uart_inf); |
138 | } | 138 | } |
139 | 139 | ||
140 | static inline void s3c2412_serial_exit(void) | 140 | static inline void s3c2412_serial_exit(void) |
141 | { | 141 | { |
142 | platform_driver_unregister(&s3c2412_serial_drv); | 142 | platform_driver_unregister(&s3c2412_serial_driver); |
143 | } | 143 | } |
144 | 144 | ||
145 | module_init(s3c2412_serial_init); | 145 | module_init(s3c2412_serial_init); |
diff --git a/drivers/serial/s3c2440.c b/drivers/serial/s3c2440.c index 06c5b0cc47a3..69ff5d340f04 100644 --- a/drivers/serial/s3c2440.c +++ b/drivers/serial/s3c2440.c | |||
@@ -151,7 +151,7 @@ static int s3c2440_serial_probe(struct platform_device *dev) | |||
151 | return s3c24xx_serial_probe(dev, &s3c2440_uart_inf); | 151 | return s3c24xx_serial_probe(dev, &s3c2440_uart_inf); |
152 | } | 152 | } |
153 | 153 | ||
154 | static struct platform_driver s3c2440_serial_drv = { | 154 | static struct platform_driver s3c2440_serial_driver = { |
155 | .probe = s3c2440_serial_probe, | 155 | .probe = s3c2440_serial_probe, |
156 | .remove = __devexit_p(s3c24xx_serial_remove), | 156 | .remove = __devexit_p(s3c24xx_serial_remove), |
157 | .driver = { | 157 | .driver = { |
@@ -160,16 +160,16 @@ static struct platform_driver s3c2440_serial_drv = { | |||
160 | }, | 160 | }, |
161 | }; | 161 | }; |
162 | 162 | ||
163 | s3c24xx_console_init(&s3c2440_serial_drv, &s3c2440_uart_inf); | 163 | s3c24xx_console_init(&s3c2440_serial_driver, &s3c2440_uart_inf); |
164 | 164 | ||
165 | static int __init s3c2440_serial_init(void) | 165 | static int __init s3c2440_serial_init(void) |
166 | { | 166 | { |
167 | return s3c24xx_serial_init(&s3c2440_serial_drv, &s3c2440_uart_inf); | 167 | return s3c24xx_serial_init(&s3c2440_serial_driver, &s3c2440_uart_inf); |
168 | } | 168 | } |
169 | 169 | ||
170 | static void __exit s3c2440_serial_exit(void) | 170 | static void __exit s3c2440_serial_exit(void) |
171 | { | 171 | { |
172 | platform_driver_unregister(&s3c2440_serial_drv); | 172 | platform_driver_unregister(&s3c2440_serial_driver); |
173 | } | 173 | } |
174 | 174 | ||
175 | module_init(s3c2440_serial_init); | 175 | module_init(s3c2440_serial_init); |
diff --git a/drivers/serial/s3c24a0.c b/drivers/serial/s3c24a0.c index 786a067d62ac..26c49e18bdd1 100644 --- a/drivers/serial/s3c24a0.c +++ b/drivers/serial/s3c24a0.c | |||
@@ -92,7 +92,7 @@ static int s3c24a0_serial_probe(struct platform_device *dev) | |||
92 | return s3c24xx_serial_probe(dev, &s3c24a0_uart_inf); | 92 | return s3c24xx_serial_probe(dev, &s3c24a0_uart_inf); |
93 | } | 93 | } |
94 | 94 | ||
95 | static struct platform_driver s3c24a0_serial_drv = { | 95 | static struct platform_driver s3c24a0_serial_driver = { |
96 | .probe = s3c24a0_serial_probe, | 96 | .probe = s3c24a0_serial_probe, |
97 | .remove = __devexit_p(s3c24xx_serial_remove), | 97 | .remove = __devexit_p(s3c24xx_serial_remove), |
98 | .driver = { | 98 | .driver = { |
@@ -101,16 +101,16 @@ static struct platform_driver s3c24a0_serial_drv = { | |||
101 | }, | 101 | }, |
102 | }; | 102 | }; |
103 | 103 | ||
104 | s3c24xx_console_init(&s3c24a0_serial_drv, &s3c24a0_uart_inf); | 104 | s3c24xx_console_init(&s3c24a0_serial_driver, &s3c24a0_uart_inf); |
105 | 105 | ||
106 | static int __init s3c24a0_serial_init(void) | 106 | static int __init s3c24a0_serial_init(void) |
107 | { | 107 | { |
108 | return s3c24xx_serial_init(&s3c24a0_serial_drv, &s3c24a0_uart_inf); | 108 | return s3c24xx_serial_init(&s3c24a0_serial_driver, &s3c24a0_uart_inf); |
109 | } | 109 | } |
110 | 110 | ||
111 | static void __exit s3c24a0_serial_exit(void) | 111 | static void __exit s3c24a0_serial_exit(void) |
112 | { | 112 | { |
113 | platform_driver_unregister(&s3c24a0_serial_drv); | 113 | platform_driver_unregister(&s3c24a0_serial_driver); |
114 | } | 114 | } |
115 | 115 | ||
116 | module_init(s3c24a0_serial_init); | 116 | module_init(s3c24a0_serial_init); |
diff --git a/drivers/serial/s3c6400.c b/drivers/serial/s3c6400.c index 48f1a3781f0d..4be92ab50058 100644 --- a/drivers/serial/s3c6400.c +++ b/drivers/serial/s3c6400.c | |||
@@ -122,7 +122,7 @@ static int s3c6400_serial_probe(struct platform_device *dev) | |||
122 | return s3c24xx_serial_probe(dev, &s3c6400_uart_inf); | 122 | return s3c24xx_serial_probe(dev, &s3c6400_uart_inf); |
123 | } | 123 | } |
124 | 124 | ||
125 | static struct platform_driver s3c6400_serial_drv = { | 125 | static struct platform_driver s3c6400_serial_driver = { |
126 | .probe = s3c6400_serial_probe, | 126 | .probe = s3c6400_serial_probe, |
127 | .remove = __devexit_p(s3c24xx_serial_remove), | 127 | .remove = __devexit_p(s3c24xx_serial_remove), |
128 | .driver = { | 128 | .driver = { |
@@ -131,16 +131,16 @@ static struct platform_driver s3c6400_serial_drv = { | |||
131 | }, | 131 | }, |
132 | }; | 132 | }; |
133 | 133 | ||
134 | s3c24xx_console_init(&s3c6400_serial_drv, &s3c6400_uart_inf); | 134 | s3c24xx_console_init(&s3c6400_serial_driver, &s3c6400_uart_inf); |
135 | 135 | ||
136 | static int __init s3c6400_serial_init(void) | 136 | static int __init s3c6400_serial_init(void) |
137 | { | 137 | { |
138 | return s3c24xx_serial_init(&s3c6400_serial_drv, &s3c6400_uart_inf); | 138 | return s3c24xx_serial_init(&s3c6400_serial_driver, &s3c6400_uart_inf); |
139 | } | 139 | } |
140 | 140 | ||
141 | static void __exit s3c6400_serial_exit(void) | 141 | static void __exit s3c6400_serial_exit(void) |
142 | { | 142 | { |
143 | platform_driver_unregister(&s3c6400_serial_drv); | 143 | platform_driver_unregister(&s3c6400_serial_driver); |
144 | } | 144 | } |
145 | 145 | ||
146 | module_init(s3c6400_serial_init); | 146 | module_init(s3c6400_serial_init); |
diff --git a/drivers/serial/serial_ks8695.c b/drivers/serial/serial_ks8695.c index 998e89dc5aaf..e0665630e4da 100644 --- a/drivers/serial/serial_ks8695.c +++ b/drivers/serial/serial_ks8695.c | |||
@@ -549,7 +549,7 @@ static struct uart_port ks8695uart_ports[SERIAL_KS8695_NR] = { | |||
549 | .mapbase = KS8695_UART_VA, | 549 | .mapbase = KS8695_UART_VA, |
550 | .iotype = SERIAL_IO_MEM, | 550 | .iotype = SERIAL_IO_MEM, |
551 | .irq = KS8695_IRQ_UART_TX, | 551 | .irq = KS8695_IRQ_UART_TX, |
552 | .uartclk = CLOCK_TICK_RATE * 16, | 552 | .uartclk = KS8695_CLOCK_RATE * 16, |
553 | .fifosize = 16, | 553 | .fifosize = 16, |
554 | .ops = &ks8695uart_pops, | 554 | .ops = &ks8695uart_pops, |
555 | .flags = ASYNC_BOOT_AUTOCONF, | 555 | .flags = ASYNC_BOOT_AUTOCONF, |
diff --git a/drivers/staging/b3dfg/Kconfig b/drivers/staging/b3dfg/Kconfig index 524231047de5..9e6573cf97d3 100644 --- a/drivers/staging/b3dfg/Kconfig +++ b/drivers/staging/b3dfg/Kconfig | |||
@@ -1,5 +1,6 @@ | |||
1 | config B3DFG | 1 | config B3DFG |
2 | tristate "Brontes 3d Frame Framegrabber" | 2 | tristate "Brontes 3d Frame Framegrabber" |
3 | depends on PCI | ||
3 | default n | 4 | default n |
4 | ---help--- | 5 | ---help--- |
5 | This driver provides support for the Brontes 3d Framegrabber | 6 | This driver provides support for the Brontes 3d Framegrabber |
diff --git a/drivers/staging/heci/Kconfig b/drivers/staging/heci/Kconfig index ae8d588d3a27..c7206f8bcd93 100644 --- a/drivers/staging/heci/Kconfig +++ b/drivers/staging/heci/Kconfig | |||
@@ -1,5 +1,6 @@ | |||
1 | config HECI | 1 | config HECI |
2 | tristate "Intel Management Engine Interface (MEI) Support" | 2 | tristate "Intel Management Engine Interface (MEI) Support" |
3 | depends on PCI | ||
3 | ---help--- | 4 | ---help--- |
4 | The Intel Management Engine Interface (Intel MEI) driver allows | 5 | The Intel Management Engine Interface (Intel MEI) driver allows |
5 | applications to access the Active Management Technology | 6 | applications to access the Active Management Technology |
diff --git a/drivers/staging/rspiusb/rspiusb.c b/drivers/staging/rspiusb/rspiusb.c index 2f8155c1968b..04e2f92c0f62 100644 --- a/drivers/staging/rspiusb/rspiusb.c +++ b/drivers/staging/rspiusb/rspiusb.c | |||
@@ -716,6 +716,8 @@ static int MapUserBuffer(struct ioctl_struct *io, struct device_extension *pdx) | |||
716 | pdx->PixelUrb[frameInfo][i]->transfer_flags = | 716 | pdx->PixelUrb[frameInfo][i]->transfer_flags = |
717 | URB_NO_TRANSFER_DMA_MAP | URB_NO_INTERRUPT; | 717 | URB_NO_TRANSFER_DMA_MAP | URB_NO_INTERRUPT; |
718 | } | 718 | } |
719 | if (i == 0) | ||
720 | return -EINVAL; | ||
719 | /* only interrupt when last URB completes */ | 721 | /* only interrupt when last URB completes */ |
720 | pdx->PixelUrb[frameInfo][--i]->transfer_flags &= ~URB_NO_INTERRUPT; | 722 | pdx->PixelUrb[frameInfo][--i]->transfer_flags &= ~URB_NO_INTERRUPT; |
721 | pdx->pendedPixelUrbs[frameInfo] = | 723 | pdx->pendedPixelUrbs[frameInfo] = |
diff --git a/drivers/staging/rt2860/rt_linux.h b/drivers/staging/rt2860/rt_linux.h index 85175c182432..25b53ac3f820 100644 --- a/drivers/staging/rt2860/rt_linux.h +++ b/drivers/staging/rt2860/rt_linux.h | |||
@@ -43,9 +43,6 @@ | |||
43 | #include "rtmp_type.h" | 43 | #include "rtmp_type.h" |
44 | #include <linux/module.h> | 44 | #include <linux/module.h> |
45 | #include <linux/kernel.h> | 45 | #include <linux/kernel.h> |
46 | #if !defined(RT2860) && !defined(RT30xx) | ||
47 | #include <linux/kthread.h> | ||
48 | #endif | ||
49 | 46 | ||
50 | #include <linux/spinlock.h> | 47 | #include <linux/spinlock.h> |
51 | #include <linux/init.h> | 48 | #include <linux/init.h> |
@@ -166,9 +163,7 @@ typedef int (*HARD_START_XMIT_FUNC)(struct sk_buff *skb, struct net_device *net_ | |||
166 | 163 | ||
167 | #ifndef RT30xx | 164 | #ifndef RT30xx |
168 | typedef struct pid * THREAD_PID; | 165 | typedef struct pid * THREAD_PID; |
169 | #ifdef RT2860 | ||
170 | #define THREAD_PID_INIT_VALUE NULL | 166 | #define THREAD_PID_INIT_VALUE NULL |
171 | #endif | ||
172 | #define GET_PID(_v) find_get_pid(_v) | 167 | #define GET_PID(_v) find_get_pid(_v) |
173 | #define GET_PID_NUMBER(_v) pid_nr(_v) | 168 | #define GET_PID_NUMBER(_v) pid_nr(_v) |
174 | #define CHECK_PID_LEGALITY(_pid) if (pid_nr(_pid) >= 0) | 169 | #define CHECK_PID_LEGALITY(_pid) if (pid_nr(_pid) >= 0) |
@@ -188,12 +183,12 @@ struct os_cookie { | |||
188 | dma_addr_t pAd_pa; | 183 | dma_addr_t pAd_pa; |
189 | #endif | 184 | #endif |
190 | #ifdef RT2870 | 185 | #ifdef RT2870 |
191 | struct usb_device *pUsb_Dev; | 186 | struct usb_device *pUsb_Dev; |
192 | 187 | ||
193 | #ifndef RT30xx | 188 | #ifndef RT30xx |
194 | struct task_struct *MLMEThr_task; | 189 | THREAD_PID MLMEThr_pid; |
195 | struct task_struct *RTUSBCmdThr_task; | 190 | THREAD_PID RTUSBCmdThr_pid; |
196 | struct task_struct *TimerQThr_task; | 191 | THREAD_PID TimerQThr_pid; |
197 | #endif | 192 | #endif |
198 | #ifdef RT30xx | 193 | #ifdef RT30xx |
199 | struct pid *MLMEThr_pid; | 194 | struct pid *MLMEThr_pid; |
diff --git a/drivers/staging/rt2870/2870_main_dev.c b/drivers/staging/rt2870/2870_main_dev.c index dd01c64fbf61..a4e8696ca39c 100644 --- a/drivers/staging/rt2870/2870_main_dev.c +++ b/drivers/staging/rt2870/2870_main_dev.c | |||
@@ -235,7 +235,7 @@ INT MlmeThread( | |||
235 | DBGPRINT(RT_DEBUG_TRACE,( "<---%s\n",__func__)); | 235 | DBGPRINT(RT_DEBUG_TRACE,( "<---%s\n",__func__)); |
236 | 236 | ||
237 | #ifndef RT30xx | 237 | #ifndef RT30xx |
238 | pObj->MLMEThr_task = NULL; | 238 | pObj->MLMEThr_pid = THREAD_PID_INIT_VALUE; |
239 | #endif | 239 | #endif |
240 | #ifdef RT30xx | 240 | #ifdef RT30xx |
241 | pObj->MLMEThr_pid = NULL; | 241 | pObj->MLMEThr_pid = NULL; |
@@ -348,7 +348,7 @@ INT RTUSBCmdThread( | |||
348 | DBGPRINT(RT_DEBUG_TRACE,( "<---RTUSBCmdThread\n")); | 348 | DBGPRINT(RT_DEBUG_TRACE,( "<---RTUSBCmdThread\n")); |
349 | 349 | ||
350 | #ifndef RT30xx | 350 | #ifndef RT30xx |
351 | pObj->RTUSBCmdThr_task = NULL; | 351 | pObj->RTUSBCmdThr_pid = THREAD_PID_INIT_VALUE; |
352 | #endif | 352 | #endif |
353 | #ifdef RT30xx | 353 | #ifdef RT30xx |
354 | pObj->RTUSBCmdThr_pid = NULL; | 354 | pObj->RTUSBCmdThr_pid = NULL; |
@@ -447,7 +447,7 @@ INT TimerQThread( | |||
447 | DBGPRINT(RT_DEBUG_TRACE,( "<---%s\n",__func__)); | 447 | DBGPRINT(RT_DEBUG_TRACE,( "<---%s\n",__func__)); |
448 | 448 | ||
449 | #ifndef RT30xx | 449 | #ifndef RT30xx |
450 | pObj->TimerQThr_task = NULL; | 450 | pObj->TimerQThr_pid = THREAD_PID_INIT_VALUE; |
451 | #endif | 451 | #endif |
452 | #ifdef RT30xx | 452 | #ifdef RT30xx |
453 | pObj->TimerQThr_pid = NULL; | 453 | pObj->TimerQThr_pid = NULL; |
@@ -883,46 +883,69 @@ VOID RT28xxThreadTerminate( | |||
883 | 883 | ||
884 | // Terminate Threads | 884 | // Terminate Threads |
885 | #ifndef RT30xx | 885 | #ifndef RT30xx |
886 | BUG_ON(pObj->TimerQThr_task == NULL); | 886 | CHECK_PID_LEGALITY(pObj->TimerQThr_pid) |
887 | CHECK_PID_LEGALITY(task_pid(pObj->TimerQThr_task)) | ||
888 | { | 887 | { |
889 | POS_COOKIE pObj = (POS_COOKIE)pAd->OS_Cookie; | 888 | POS_COOKIE pObj = (POS_COOKIE)pAd->OS_Cookie; |
890 | 889 | ||
891 | printk(KERN_DEBUG "Terminate the TimerQThr pid=%d!\n", | 890 | printk("Terminate the TimerQThr_pid=%d!\n", GET_PID_NUMBER(pObj->TimerQThr_pid)); |
892 | pid_nr(task_pid(pObj->TimerQThr_task))); | ||
893 | mb(); | 891 | mb(); |
894 | pAd->TimerFunc_kill = 1; | 892 | pAd->TimerFunc_kill = 1; |
895 | mb(); | 893 | mb(); |
896 | kthread_stop(pObj->TimerQThr_task); | 894 | ret = KILL_THREAD_PID(pObj->TimerQThr_pid, SIGTERM, 1); |
897 | pObj->TimerQThr_task = NULL; | 895 | if (ret) |
896 | { | ||
897 | printk(KERN_WARNING "%s: unable to stop TimerQThread, pid=%d, ret=%d!\n", | ||
898 | pAd->net_dev->name, GET_PID_NUMBER(pObj->TimerQThr_pid), ret); | ||
899 | } | ||
900 | else | ||
901 | { | ||
902 | wait_for_completion(&pAd->TimerQComplete); | ||
903 | pObj->TimerQThr_pid = THREAD_PID_INIT_VALUE; | ||
904 | } | ||
898 | } | 905 | } |
899 | 906 | ||
900 | BUG_ON(pObj->MLMEThr_task == NULL); | 907 | CHECK_PID_LEGALITY(pObj->MLMEThr_pid) |
901 | CHECK_PID_LEGALITY(task_pid(pObj->MLMEThr_task)) | ||
902 | { | 908 | { |
903 | printk(KERN_DEBUG "Terminate the MLMEThr pid=%d!\n", | 909 | printk("Terminate the MLMEThr_pid=%d!\n", GET_PID_NUMBER(pObj->MLMEThr_pid)); |
904 | pid_nr(task_pid(pObj->MLMEThr_task))); | ||
905 | mb(); | 910 | mb(); |
906 | pAd->mlme_kill = 1; | 911 | pAd->mlme_kill = 1; |
907 | //RT28XX_MLME_HANDLER(pAd); | 912 | //RT28XX_MLME_HANDLER(pAd); |
908 | mb(); | 913 | mb(); |
909 | kthread_stop(pObj->MLMEThr_task); | 914 | ret = KILL_THREAD_PID(pObj->MLMEThr_pid, SIGTERM, 1); |
910 | pObj->MLMEThr_task = NULL; | 915 | if (ret) |
916 | { | ||
917 | printk (KERN_WARNING "%s: unable to Mlme thread, pid=%d, ret=%d!\n", | ||
918 | pAd->net_dev->name, GET_PID_NUMBER(pObj->MLMEThr_pid), ret); | ||
919 | } | ||
920 | else | ||
921 | { | ||
922 | //wait_for_completion (&pAd->notify); | ||
923 | wait_for_completion (&pAd->mlmeComplete); | ||
924 | pObj->MLMEThr_pid = THREAD_PID_INIT_VALUE; | ||
925 | } | ||
911 | } | 926 | } |
912 | 927 | ||
913 | BUG_ON(pObj->RTUSBCmdThr_task == NULL); | 928 | CHECK_PID_LEGALITY(pObj->RTUSBCmdThr_pid) |
914 | CHECK_PID_LEGALITY(task_pid(pObj->RTUSBCmdThr_task)) | ||
915 | { | 929 | { |
916 | printk(KERN_DEBUG "Terminate the RTUSBCmdThr pid=%d!\n", | 930 | printk("Terminate the RTUSBCmdThr_pid=%d!\n", GET_PID_NUMBER(pObj->RTUSBCmdThr_pid)); |
917 | pid_nr(task_pid(pObj->RTUSBCmdThr_task))); | ||
918 | mb(); | 931 | mb(); |
919 | NdisAcquireSpinLock(&pAd->CmdQLock); | 932 | NdisAcquireSpinLock(&pAd->CmdQLock); |
920 | pAd->CmdQ.CmdQState = RT2870_THREAD_STOPED; | 933 | pAd->CmdQ.CmdQState = RT2870_THREAD_STOPED; |
921 | NdisReleaseSpinLock(&pAd->CmdQLock); | 934 | NdisReleaseSpinLock(&pAd->CmdQLock); |
922 | mb(); | 935 | mb(); |
923 | //RTUSBCMDUp(pAd); | 936 | //RTUSBCMDUp(pAd); |
924 | kthread_stop(pObj->RTUSBCmdThr_task); | 937 | ret = KILL_THREAD_PID(pObj->RTUSBCmdThr_pid, SIGTERM, 1); |
925 | pObj->RTUSBCmdThr_task = NULL; | 938 | if (ret) |
939 | { | ||
940 | printk(KERN_WARNING "%s: unable to RTUSBCmd thread, pid=%d, ret=%d!\n", | ||
941 | pAd->net_dev->name, GET_PID_NUMBER(pObj->RTUSBCmdThr_pid), ret); | ||
942 | } | ||
943 | else | ||
944 | { | ||
945 | //wait_for_completion (&pAd->notify); | ||
946 | wait_for_completion (&pAd->CmdQComplete); | ||
947 | pObj->RTUSBCmdThr_pid = THREAD_PID_INIT_VALUE; | ||
948 | } | ||
926 | } | 949 | } |
927 | #endif | 950 | #endif |
928 | #ifdef RT30xx | 951 | #ifdef RT30xx |
@@ -1045,7 +1068,7 @@ BOOLEAN RT28XXChipsetCheck( | |||
1045 | dev_p->descriptor.idProduct == rtusb_usb_id[i].idProduct) | 1068 | dev_p->descriptor.idProduct == rtusb_usb_id[i].idProduct) |
1046 | { | 1069 | { |
1047 | #ifndef RT30xx | 1070 | #ifndef RT30xx |
1048 | printk(KERN_DEBUG "rt2870: idVendor = 0x%x, idProduct = 0x%x\n", | 1071 | printk("rt2870: idVendor = 0x%x, idProduct = 0x%x\n", |
1049 | #endif | 1072 | #endif |
1050 | #ifdef RT30xx | 1073 | #ifdef RT30xx |
1051 | printk("rt2870: idVendor = 0x%x, idProduct = 0x%x\n", | 1074 | printk("rt2870: idVendor = 0x%x, idProduct = 0x%x\n", |
diff --git a/drivers/staging/rt2870/common/2870_rtmp_init.c b/drivers/staging/rt2870/common/2870_rtmp_init.c index 0f4c8af97e47..80909e9ab5ae 100644 --- a/drivers/staging/rt2870/common/2870_rtmp_init.c +++ b/drivers/staging/rt2870/common/2870_rtmp_init.c | |||
@@ -700,8 +700,8 @@ NDIS_STATUS AdapterBlockAllocateMemory( | |||
700 | usb_dev = pObj->pUsb_Dev; | 700 | usb_dev = pObj->pUsb_Dev; |
701 | 701 | ||
702 | #ifndef RT30xx | 702 | #ifndef RT30xx |
703 | pObj->MLMEThr_task = NULL; | 703 | pObj->MLMEThr_pid = THREAD_PID_INIT_VALUE; |
704 | pObj->RTUSBCmdThr_task = NULL; | 704 | pObj->RTUSBCmdThr_pid = THREAD_PID_INIT_VALUE; |
705 | #endif | 705 | #endif |
706 | #ifdef RT30xx | 706 | #ifdef RT30xx |
707 | pObj->MLMEThr_pid = NULL; | 707 | pObj->MLMEThr_pid = NULL; |
@@ -743,7 +743,7 @@ NDIS_STATUS CreateThreads( | |||
743 | PRTMP_ADAPTER pAd = net_dev->ml_priv; | 743 | PRTMP_ADAPTER pAd = net_dev->ml_priv; |
744 | POS_COOKIE pObj = (POS_COOKIE) pAd->OS_Cookie; | 744 | POS_COOKIE pObj = (POS_COOKIE) pAd->OS_Cookie; |
745 | #ifndef RT30xx | 745 | #ifndef RT30xx |
746 | struct task_struct *tsk; | 746 | pid_t pid_number = -1; |
747 | #endif | 747 | #endif |
748 | #ifdef RT30xx | 748 | #ifdef RT30xx |
749 | pid_t pid_number; | 749 | pid_t pid_number; |
@@ -762,10 +762,10 @@ NDIS_STATUS CreateThreads( | |||
762 | 762 | ||
763 | // Creat MLME Thread | 763 | // Creat MLME Thread |
764 | #ifndef RT30xx | 764 | #ifndef RT30xx |
765 | pObj->MLMEThr_task = NULL; | 765 | pObj->MLMEThr_pid= THREAD_PID_INIT_VALUE; |
766 | tsk = kthread_run(MlmeThread, pAd, "%s", pAd->net_dev->name); | 766 | pid_number = kernel_thread(MlmeThread, pAd, CLONE_VM); |
767 | 767 | if (pid_number < 0) | |
768 | if (IS_ERR(tsk)) { | 768 | { |
769 | #endif | 769 | #endif |
770 | #ifdef RT30xx | 770 | #ifdef RT30xx |
771 | pObj->MLMEThr_pid = NULL; | 771 | pObj->MLMEThr_pid = NULL; |
@@ -778,7 +778,7 @@ NDIS_STATUS CreateThreads( | |||
778 | } | 778 | } |
779 | 779 | ||
780 | #ifndef RT30xx | 780 | #ifndef RT30xx |
781 | pObj->MLMEThr_task = tsk; | 781 | pObj->MLMEThr_pid = GET_PID(pid_number); |
782 | #endif | 782 | #endif |
783 | #ifdef RT30xx | 783 | #ifdef RT30xx |
784 | pObj->MLMEThr_pid = find_get_pid(pid_number); | 784 | pObj->MLMEThr_pid = find_get_pid(pid_number); |
@@ -788,10 +788,9 @@ NDIS_STATUS CreateThreads( | |||
788 | 788 | ||
789 | // Creat Command Thread | 789 | // Creat Command Thread |
790 | #ifndef RT30xx | 790 | #ifndef RT30xx |
791 | pObj->RTUSBCmdThr_task = NULL; | 791 | pObj->RTUSBCmdThr_pid= THREAD_PID_INIT_VALUE; |
792 | tsk = kthread_run(RTUSBCmdThread, pAd, "%s", pAd->net_dev->name); | 792 | pid_number = kernel_thread(RTUSBCmdThread, pAd, CLONE_VM); |
793 | 793 | if (pid_number < 0) | |
794 | if (IS_ERR(tsk) < 0) | ||
795 | #endif | 794 | #endif |
796 | #ifdef RT30xx | 795 | #ifdef RT30xx |
797 | pObj->RTUSBCmdThr_pid = NULL; | 796 | pObj->RTUSBCmdThr_pid = NULL; |
@@ -804,7 +803,7 @@ NDIS_STATUS CreateThreads( | |||
804 | } | 803 | } |
805 | 804 | ||
806 | #ifndef RT30xx | 805 | #ifndef RT30xx |
807 | pObj->RTUSBCmdThr_task = tsk; | 806 | pObj->RTUSBCmdThr_pid = GET_PID(pid_number); |
808 | #endif | 807 | #endif |
809 | #ifdef RT30xx | 808 | #ifdef RT30xx |
810 | pObj->RTUSBCmdThr_pid = find_get_pid(pid_number); | 809 | pObj->RTUSBCmdThr_pid = find_get_pid(pid_number); |
@@ -812,9 +811,9 @@ NDIS_STATUS CreateThreads( | |||
812 | wait_for_completion(&(pAd->CmdQComplete)); | 811 | wait_for_completion(&(pAd->CmdQComplete)); |
813 | 812 | ||
814 | #ifndef RT30xx | 813 | #ifndef RT30xx |
815 | pObj->TimerQThr_task = NULL; | 814 | pObj->TimerQThr_pid= THREAD_PID_INIT_VALUE; |
816 | tsk = kthread_run(TimerQThread, pAd, "%s", pAd->net_dev->name); | 815 | pid_number = kernel_thread(TimerQThread, pAd, CLONE_VM); |
817 | if (IS_ERR(tsk) < 0) | 816 | if (pid_number < 0) |
818 | #endif | 817 | #endif |
819 | #ifdef RT30xx | 818 | #ifdef RT30xx |
820 | pObj->TimerQThr_pid = NULL; | 819 | pObj->TimerQThr_pid = NULL; |
@@ -826,7 +825,7 @@ NDIS_STATUS CreateThreads( | |||
826 | return NDIS_STATUS_FAILURE; | 825 | return NDIS_STATUS_FAILURE; |
827 | } | 826 | } |
828 | #ifndef RT30xx | 827 | #ifndef RT30xx |
829 | pObj->TimerQThr_task = tsk; | 828 | pObj->TimerQThr_pid = GET_PID(pid_number); |
830 | #endif | 829 | #endif |
831 | #ifdef RT30xx | 830 | #ifdef RT30xx |
832 | pObj->TimerQThr_pid = find_get_pid(pid_number); | 831 | pObj->TimerQThr_pid = find_get_pid(pid_number); |
diff --git a/drivers/staging/rt2870/common/rtusb_io.c b/drivers/staging/rt2870/common/rtusb_io.c index fd1b0c18f2a0..704b5c2d5091 100644 --- a/drivers/staging/rt2870/common/rtusb_io.c +++ b/drivers/staging/rt2870/common/rtusb_io.c | |||
@@ -984,8 +984,7 @@ NDIS_STATUS RTUSBEnqueueCmdFromNdis( | |||
984 | POS_COOKIE pObj = (POS_COOKIE) pAd->OS_Cookie; | 984 | POS_COOKIE pObj = (POS_COOKIE) pAd->OS_Cookie; |
985 | 985 | ||
986 | #ifndef RT30xx | 986 | #ifndef RT30xx |
987 | BUG_ON(pObj->RTUSBCmdThr_task == NULL); | 987 | CHECK_PID_LEGALITY(pObj->RTUSBCmdThr_pid) |
988 | CHECK_PID_LEGALITY(task_pid(pObj->RTUSBCmdThr_task)) | ||
989 | #endif | 988 | #endif |
990 | #ifdef RT30xx | 989 | #ifdef RT30xx |
991 | if (pObj->RTUSBCmdThr_pid < 0) | 990 | if (pObj->RTUSBCmdThr_pid < 0) |
diff --git a/drivers/staging/rt2870/rt2870.h b/drivers/staging/rt2870/rt2870.h index 29e3b53e52a1..2b8872b2fd9d 100644 --- a/drivers/staging/rt2870/rt2870.h +++ b/drivers/staging/rt2870/rt2870.h | |||
@@ -79,6 +79,7 @@ | |||
79 | { \ | 79 | { \ |
80 | {USB_DEVICE(0x148F,0x2770)}, /* Ralink */ \ | 80 | {USB_DEVICE(0x148F,0x2770)}, /* Ralink */ \ |
81 | {USB_DEVICE(0x1737,0x0071)}, /* Linksys WUSB600N */ \ | 81 | {USB_DEVICE(0x1737,0x0071)}, /* Linksys WUSB600N */ \ |
82 | {USB_DEVICE(0x1737,0x0070)}, /* Linksys */ \ | ||
82 | {USB_DEVICE(0x148F,0x2870)}, /* Ralink */ \ | 83 | {USB_DEVICE(0x148F,0x2870)}, /* Ralink */ \ |
83 | {USB_DEVICE(0x148F,0x3070)}, /* Ralink */ \ | 84 | {USB_DEVICE(0x148F,0x3070)}, /* Ralink */ \ |
84 | {USB_DEVICE(0x0B05,0x1731)}, /* Asus */ \ | 85 | {USB_DEVICE(0x0B05,0x1731)}, /* Asus */ \ |
@@ -93,12 +94,14 @@ | |||
93 | {USB_DEVICE(0x14B2,0x3C06)}, /* Conceptronic */ \ | 94 | {USB_DEVICE(0x14B2,0x3C06)}, /* Conceptronic */ \ |
94 | {USB_DEVICE(0x14B2,0x3C28)}, /* Conceptronic */ \ | 95 | {USB_DEVICE(0x14B2,0x3C28)}, /* Conceptronic */ \ |
95 | {USB_DEVICE(0x2019,0xED06)}, /* Planex Communications, Inc. */ \ | 96 | {USB_DEVICE(0x2019,0xED06)}, /* Planex Communications, Inc. */ \ |
97 | {USB_DEVICE(0x2019,0xED14)}, /* Planex Communications, Inc. */ \ | ||
96 | {USB_DEVICE(0x2019,0xAB25)}, /* Planex Communications, Inc. RT3070 */ \ | 98 | {USB_DEVICE(0x2019,0xAB25)}, /* Planex Communications, Inc. RT3070 */ \ |
97 | {USB_DEVICE(0x07D1,0x3C09)}, /* D-Link */ \ | 99 | {USB_DEVICE(0x07D1,0x3C09)}, /* D-Link */ \ |
98 | {USB_DEVICE(0x07D1,0x3C11)}, /* D-Link */ \ | 100 | {USB_DEVICE(0x07D1,0x3C11)}, /* D-Link */ \ |
99 | {USB_DEVICE(0x14B2,0x3C07)}, /* AL */ \ | 101 | {USB_DEVICE(0x14B2,0x3C07)}, /* AL */ \ |
100 | {USB_DEVICE(0x14B2,0x3C12)}, /* AL */ \ | 102 | {USB_DEVICE(0x14B2,0x3C12)}, /* AL */ \ |
101 | {USB_DEVICE(0x050D,0x8053)}, /* Belkin */ \ | 103 | {USB_DEVICE(0x050D,0x8053)}, /* Belkin */ \ |
104 | {USB_DEVICE(0x050D,0x815C)}, /* Belkin */ \ | ||
102 | {USB_DEVICE(0x14B2,0x3C23)}, /* Airlink */ \ | 105 | {USB_DEVICE(0x14B2,0x3C23)}, /* Airlink */ \ |
103 | {USB_DEVICE(0x14B2,0x3C27)}, /* Airlink */ \ | 106 | {USB_DEVICE(0x14B2,0x3C27)}, /* Airlink */ \ |
104 | {USB_DEVICE(0x07AA,0x002F)}, /* Corega */ \ | 107 | {USB_DEVICE(0x07AA,0x002F)}, /* Corega */ \ |
@@ -587,16 +590,14 @@ VOID RTUSBBulkRxComplete(purbb_t pUrb, struct pt_regs *pt_regs); | |||
587 | #define RTUSBMlmeUp(pAd) \ | 590 | #define RTUSBMlmeUp(pAd) \ |
588 | { \ | 591 | { \ |
589 | POS_COOKIE pObj = (POS_COOKIE) pAd->OS_Cookie; \ | 592 | POS_COOKIE pObj = (POS_COOKIE) pAd->OS_Cookie; \ |
590 | BUG_ON(pObj->MLMEThr_task == NULL); \ | 593 | CHECK_PID_LEGALITY(pObj->MLMEThr_pid) \ |
591 | CHECK_PID_LEGALITY(task_pid(pObj->MLMEThr_task)) \ | ||
592 | up(&(pAd->mlme_semaphore)); \ | 594 | up(&(pAd->mlme_semaphore)); \ |
593 | } | 595 | } |
594 | 596 | ||
595 | #define RTUSBCMDUp(pAd) \ | 597 | #define RTUSBCMDUp(pAd) \ |
596 | { \ | 598 | { \ |
597 | POS_COOKIE pObj = (POS_COOKIE) pAd->OS_Cookie; \ | 599 | POS_COOKIE pObj = (POS_COOKIE) pAd->OS_Cookie; \ |
598 | BUG_ON(pObj->RTUSBCmdThr_task == NULL); \ | 600 | CHECK_PID_LEGALITY(pObj->RTUSBCmdThr_pid) \ |
599 | CHECK_PID_LEGALITY(task_pid(pObj->RTUSBCmdThr_task)) \ | ||
600 | up(&(pAd->RTUSBCmd_semaphore)); \ | 601 | up(&(pAd->RTUSBCmd_semaphore)); \ |
601 | } | 602 | } |
602 | #endif | 603 | #endif |
diff --git a/drivers/staging/rtl8192su/ieee80211.h b/drivers/staging/rtl8192su/ieee80211.h index 0edb09a536f9..ea9739318037 100644 --- a/drivers/staging/rtl8192su/ieee80211.h +++ b/drivers/staging/rtl8192su/ieee80211.h | |||
@@ -2645,7 +2645,7 @@ extern int ieee80211_encrypt_fragment( | |||
2645 | struct sk_buff *frag, | 2645 | struct sk_buff *frag, |
2646 | int hdr_len); | 2646 | int hdr_len); |
2647 | 2647 | ||
2648 | extern int ieee80211_xmit(struct sk_buff *skb, | 2648 | extern int rtl8192_ieee80211_xmit(struct sk_buff *skb, |
2649 | struct net_device *dev); | 2649 | struct net_device *dev); |
2650 | extern void ieee80211_txb_free(struct ieee80211_txb *); | 2650 | extern void ieee80211_txb_free(struct ieee80211_txb *); |
2651 | 2651 | ||
diff --git a/drivers/staging/rtl8192su/ieee80211/ieee80211.h b/drivers/staging/rtl8192su/ieee80211/ieee80211.h index 720bfcbfadc1..5e3a2cbed2b1 100644 --- a/drivers/staging/rtl8192su/ieee80211/ieee80211.h +++ b/drivers/staging/rtl8192su/ieee80211/ieee80211.h | |||
@@ -2645,7 +2645,7 @@ extern int ieee80211_encrypt_fragment( | |||
2645 | struct sk_buff *frag, | 2645 | struct sk_buff *frag, |
2646 | int hdr_len); | 2646 | int hdr_len); |
2647 | 2647 | ||
2648 | extern int ieee80211_xmit(struct sk_buff *skb, | 2648 | extern int rtl8192_ieee80211_xmit(struct sk_buff *skb, |
2649 | struct net_device *dev); | 2649 | struct net_device *dev); |
2650 | extern void ieee80211_txb_free(struct ieee80211_txb *); | 2650 | extern void ieee80211_txb_free(struct ieee80211_txb *); |
2651 | 2651 | ||
diff --git a/drivers/staging/rtl8192su/ieee80211/ieee80211_tx.c b/drivers/staging/rtl8192su/ieee80211/ieee80211_tx.c index 7294572b990f..cba12b84be5c 100644 --- a/drivers/staging/rtl8192su/ieee80211/ieee80211_tx.c +++ b/drivers/staging/rtl8192su/ieee80211/ieee80211_tx.c | |||
@@ -618,7 +618,7 @@ void ieee80211_query_seqnum(struct ieee80211_device*ieee, struct sk_buff* skb, u | |||
618 | } | 618 | } |
619 | } | 619 | } |
620 | 620 | ||
621 | int ieee80211_xmit(struct sk_buff *skb, struct net_device *dev) | 621 | int rtl8192_ieee80211_xmit(struct sk_buff *skb, struct net_device *dev) |
622 | { | 622 | { |
623 | #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0)) | 623 | #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0)) |
624 | struct ieee80211_device *ieee = netdev_priv(dev); | 624 | struct ieee80211_device *ieee = netdev_priv(dev); |
@@ -943,5 +943,6 @@ int ieee80211_xmit(struct sk_buff *skb, struct net_device *dev) | |||
943 | return 1; | 943 | return 1; |
944 | 944 | ||
945 | } | 945 | } |
946 | EXPORT_SYMBOL(rtl8192_ieee80211_xmit); | ||
946 | 947 | ||
947 | EXPORT_SYMBOL(ieee80211_txb_free); | 948 | EXPORT_SYMBOL(ieee80211_txb_free); |
diff --git a/drivers/staging/rtl8192su/r8192U_core.c b/drivers/staging/rtl8192su/r8192U_core.c index 4ab250743e81..70f81a8f1291 100644 --- a/drivers/staging/rtl8192su/r8192U_core.c +++ b/drivers/staging/rtl8192su/r8192U_core.c | |||
@@ -12142,7 +12142,7 @@ static const struct net_device_ops rtl8192_netdev_ops = { | |||
12142 | .ndo_set_mac_address = r8192_set_mac_adr, | 12142 | .ndo_set_mac_address = r8192_set_mac_adr, |
12143 | .ndo_validate_addr = eth_validate_addr, | 12143 | .ndo_validate_addr = eth_validate_addr, |
12144 | .ndo_change_mtu = eth_change_mtu, | 12144 | .ndo_change_mtu = eth_change_mtu, |
12145 | .ndo_start_xmit = ieee80211_xmit, | 12145 | .ndo_start_xmit = rtl8192_ieee80211_xmit, |
12146 | }; | 12146 | }; |
12147 | 12147 | ||
12148 | #if LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0) | 12148 | #if LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0) |
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c index e1f89416ef8c..2bfc41ece0e1 100644 --- a/drivers/usb/class/cdc-acm.c +++ b/drivers/usb/class/cdc-acm.c | |||
@@ -387,7 +387,6 @@ static void acm_rx_tasklet(unsigned long _acm) | |||
387 | struct acm_ru *rcv; | 387 | struct acm_ru *rcv; |
388 | unsigned long flags; | 388 | unsigned long flags; |
389 | unsigned char throttled; | 389 | unsigned char throttled; |
390 | struct usb_host_endpoint *ep; | ||
391 | 390 | ||
392 | dbg("Entering acm_rx_tasklet"); | 391 | dbg("Entering acm_rx_tasklet"); |
393 | 392 | ||
@@ -463,14 +462,12 @@ urbs: | |||
463 | 462 | ||
464 | rcv->buffer = buf; | 463 | rcv->buffer = buf; |
465 | 464 | ||
466 | ep = (usb_pipein(acm->rx_endpoint) ? acm->dev->ep_in : acm->dev->ep_out) | 465 | if (acm->is_int_ep) |
467 | [usb_pipeendpoint(acm->rx_endpoint)]; | ||
468 | if (usb_endpoint_xfer_int(&ep->desc)) | ||
469 | usb_fill_int_urb(rcv->urb, acm->dev, | 466 | usb_fill_int_urb(rcv->urb, acm->dev, |
470 | acm->rx_endpoint, | 467 | acm->rx_endpoint, |
471 | buf->base, | 468 | buf->base, |
472 | acm->readsize, | 469 | acm->readsize, |
473 | acm_read_bulk, rcv, ep->desc.bInterval); | 470 | acm_read_bulk, rcv, acm->bInterval); |
474 | else | 471 | else |
475 | usb_fill_bulk_urb(rcv->urb, acm->dev, | 472 | usb_fill_bulk_urb(rcv->urb, acm->dev, |
476 | acm->rx_endpoint, | 473 | acm->rx_endpoint, |
@@ -1183,6 +1180,9 @@ made_compressed_probe: | |||
1183 | spin_lock_init(&acm->read_lock); | 1180 | spin_lock_init(&acm->read_lock); |
1184 | mutex_init(&acm->mutex); | 1181 | mutex_init(&acm->mutex); |
1185 | acm->rx_endpoint = usb_rcvbulkpipe(usb_dev, epread->bEndpointAddress); | 1182 | acm->rx_endpoint = usb_rcvbulkpipe(usb_dev, epread->bEndpointAddress); |
1183 | acm->is_int_ep = usb_endpoint_xfer_int(epread); | ||
1184 | if (acm->is_int_ep) | ||
1185 | acm->bInterval = epread->bInterval; | ||
1186 | tty_port_init(&acm->port); | 1186 | tty_port_init(&acm->port); |
1187 | acm->port.ops = &acm_port_ops; | 1187 | acm->port.ops = &acm_port_ops; |
1188 | 1188 | ||
diff --git a/drivers/usb/class/cdc-acm.h b/drivers/usb/class/cdc-acm.h index 1602324808ba..c4a0ee8ffccf 100644 --- a/drivers/usb/class/cdc-acm.h +++ b/drivers/usb/class/cdc-acm.h | |||
@@ -126,6 +126,8 @@ struct acm { | |||
126 | unsigned int ctrl_caps; /* control capabilities from the class specific header */ | 126 | unsigned int ctrl_caps; /* control capabilities from the class specific header */ |
127 | unsigned int susp_count; /* number of suspended interfaces */ | 127 | unsigned int susp_count; /* number of suspended interfaces */ |
128 | int combined_interfaces:1; /* control and data collapsed */ | 128 | int combined_interfaces:1; /* control and data collapsed */ |
129 | int is_int_ep:1; /* interrupt endpoints contrary to spec used */ | ||
130 | u8 bInterval; | ||
129 | struct acm_wb *delayed_wb; /* write queued for a device about to be woken */ | 131 | struct acm_wb *delayed_wb; /* write queued for a device about to be woken */ |
130 | }; | 132 | }; |
131 | 133 | ||
diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c index 38b8bce782d6..4247eccf858c 100644 --- a/drivers/usb/core/devio.c +++ b/drivers/usb/core/devio.c | |||
@@ -595,7 +595,7 @@ static int usbdev_open(struct inode *inode, struct file *file) | |||
595 | if (!ps) | 595 | if (!ps) |
596 | goto out; | 596 | goto out; |
597 | 597 | ||
598 | ret = -ENOENT; | 598 | ret = -ENODEV; |
599 | 599 | ||
600 | /* usbdev device-node */ | 600 | /* usbdev device-node */ |
601 | if (imajor(inode) == USB_DEVICE_MAJOR) | 601 | if (imajor(inode) == USB_DEVICE_MAJOR) |
@@ -1321,7 +1321,8 @@ static int get_urb32(struct usbdevfs_urb *kurb, | |||
1321 | struct usbdevfs_urb32 __user *uurb) | 1321 | struct usbdevfs_urb32 __user *uurb) |
1322 | { | 1322 | { |
1323 | __u32 uptr; | 1323 | __u32 uptr; |
1324 | if (get_user(kurb->type, &uurb->type) || | 1324 | if (!access_ok(VERIFY_READ, uurb, sizeof(*uurb)) || |
1325 | __get_user(kurb->type, &uurb->type) || | ||
1325 | __get_user(kurb->endpoint, &uurb->endpoint) || | 1326 | __get_user(kurb->endpoint, &uurb->endpoint) || |
1326 | __get_user(kurb->status, &uurb->status) || | 1327 | __get_user(kurb->status, &uurb->status) || |
1327 | __get_user(kurb->flags, &uurb->flags) || | 1328 | __get_user(kurb->flags, &uurb->flags) || |
@@ -1536,8 +1537,9 @@ static int proc_ioctl_compat(struct dev_state *ps, compat_uptr_t arg) | |||
1536 | u32 udata; | 1537 | u32 udata; |
1537 | 1538 | ||
1538 | uioc = compat_ptr((long)arg); | 1539 | uioc = compat_ptr((long)arg); |
1539 | if (get_user(ctrl.ifno, &uioc->ifno) || | 1540 | if (!access_ok(VERIFY_READ, uioc, sizeof(*uioc)) || |
1540 | get_user(ctrl.ioctl_code, &uioc->ioctl_code) || | 1541 | __get_user(ctrl.ifno, &uioc->ifno) || |
1542 | __get_user(ctrl.ioctl_code, &uioc->ioctl_code) || | ||
1541 | __get_user(udata, &uioc->data)) | 1543 | __get_user(udata, &uioc->data)) |
1542 | return -EFAULT; | 1544 | return -EFAULT; |
1543 | ctrl.data = compat_ptr(udata); | 1545 | ctrl.data = compat_ptr(udata); |
diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c index 7d03549c3339..11c627ce6022 100644 --- a/drivers/usb/host/ehci-hcd.c +++ b/drivers/usb/host/ehci-hcd.c | |||
@@ -903,7 +903,8 @@ static int ehci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status) | |||
903 | /* already started */ | 903 | /* already started */ |
904 | break; | 904 | break; |
905 | case QH_STATE_IDLE: | 905 | case QH_STATE_IDLE: |
906 | WARN_ON(1); | 906 | /* QH might be waiting for a Clear-TT-Buffer */ |
907 | qh_completions(ehci, qh); | ||
907 | break; | 908 | break; |
908 | } | 909 | } |
909 | break; | 910 | break; |
diff --git a/drivers/usb/host/ehci-q.c b/drivers/usb/host/ehci-q.c index 9a1384747f3b..7673554fa64d 100644 --- a/drivers/usb/host/ehci-q.c +++ b/drivers/usb/host/ehci-q.c | |||
@@ -375,12 +375,11 @@ qh_completions (struct ehci_hcd *ehci, struct ehci_qh *qh) | |||
375 | */ | 375 | */ |
376 | if ((token & QTD_STS_XACT) && | 376 | if ((token & QTD_STS_XACT) && |
377 | QTD_CERR(token) == 0 && | 377 | QTD_CERR(token) == 0 && |
378 | --qh->xacterrs > 0 && | 378 | ++qh->xacterrs < QH_XACTERR_MAX && |
379 | !urb->unlinked) { | 379 | !urb->unlinked) { |
380 | ehci_dbg(ehci, | 380 | ehci_dbg(ehci, |
381 | "detected XactErr len %zu/%zu retry %d\n", | 381 | "detected XactErr len %zu/%zu retry %d\n", |
382 | qtd->length - QTD_LENGTH(token), qtd->length, | 382 | qtd->length - QTD_LENGTH(token), qtd->length, qh->xacterrs); |
383 | QH_XACTERR_MAX - qh->xacterrs); | ||
384 | 383 | ||
385 | /* reset the token in the qtd and the | 384 | /* reset the token in the qtd and the |
386 | * qh overlay (which still contains | 385 | * qh overlay (which still contains |
@@ -494,7 +493,7 @@ halt: | |||
494 | last = qtd; | 493 | last = qtd; |
495 | 494 | ||
496 | /* reinit the xacterr counter for the next qtd */ | 495 | /* reinit the xacterr counter for the next qtd */ |
497 | qh->xacterrs = QH_XACTERR_MAX; | 496 | qh->xacterrs = 0; |
498 | } | 497 | } |
499 | 498 | ||
500 | /* last urb's completion might still need calling */ | 499 | /* last urb's completion might still need calling */ |
@@ -940,7 +939,8 @@ static void qh_link_async (struct ehci_hcd *ehci, struct ehci_qh *qh) | |||
940 | head->qh_next.qh = qh; | 939 | head->qh_next.qh = qh; |
941 | head->hw_next = dma; | 940 | head->hw_next = dma; |
942 | 941 | ||
943 | qh->xacterrs = QH_XACTERR_MAX; | 942 | qh_get(qh); |
943 | qh->xacterrs = 0; | ||
944 | qh->qh_state = QH_STATE_LINKED; | 944 | qh->qh_state = QH_STATE_LINKED; |
945 | /* qtd completions reported later by interrupt */ | 945 | /* qtd completions reported later by interrupt */ |
946 | } | 946 | } |
@@ -1080,7 +1080,7 @@ submit_async ( | |||
1080 | * the HC and TT handle it when the TT has a buffer ready. | 1080 | * the HC and TT handle it when the TT has a buffer ready. |
1081 | */ | 1081 | */ |
1082 | if (likely (qh->qh_state == QH_STATE_IDLE)) | 1082 | if (likely (qh->qh_state == QH_STATE_IDLE)) |
1083 | qh_link_async (ehci, qh_get (qh)); | 1083 | qh_link_async(ehci, qh); |
1084 | done: | 1084 | done: |
1085 | spin_unlock_irqrestore (&ehci->lock, flags); | 1085 | spin_unlock_irqrestore (&ehci->lock, flags); |
1086 | if (unlikely (qh == NULL)) | 1086 | if (unlikely (qh == NULL)) |
@@ -1115,8 +1115,6 @@ static void end_unlink_async (struct ehci_hcd *ehci) | |||
1115 | && HC_IS_RUNNING (ehci_to_hcd(ehci)->state)) | 1115 | && HC_IS_RUNNING (ehci_to_hcd(ehci)->state)) |
1116 | qh_link_async (ehci, qh); | 1116 | qh_link_async (ehci, qh); |
1117 | else { | 1117 | else { |
1118 | qh_put (qh); // refcount from async list | ||
1119 | |||
1120 | /* it's not free to turn the async schedule on/off; leave it | 1118 | /* it's not free to turn the async schedule on/off; leave it |
1121 | * active but idle for a while once it empties. | 1119 | * active but idle for a while once it empties. |
1122 | */ | 1120 | */ |
@@ -1124,6 +1122,7 @@ static void end_unlink_async (struct ehci_hcd *ehci) | |||
1124 | && ehci->async->qh_next.qh == NULL) | 1122 | && ehci->async->qh_next.qh == NULL) |
1125 | timer_action (ehci, TIMER_ASYNC_OFF); | 1123 | timer_action (ehci, TIMER_ASYNC_OFF); |
1126 | } | 1124 | } |
1125 | qh_put(qh); /* refcount from async list */ | ||
1127 | 1126 | ||
1128 | if (next) { | 1127 | if (next) { |
1129 | ehci->reclaim = NULL; | 1128 | ehci->reclaim = NULL; |
diff --git a/drivers/usb/host/ehci-sched.c b/drivers/usb/host/ehci-sched.c index 74f7f83b29ad..edd61ee90323 100644 --- a/drivers/usb/host/ehci-sched.c +++ b/drivers/usb/host/ehci-sched.c | |||
@@ -542,6 +542,7 @@ static int qh_link_periodic (struct ehci_hcd *ehci, struct ehci_qh *qh) | |||
542 | } | 542 | } |
543 | } | 543 | } |
544 | qh->qh_state = QH_STATE_LINKED; | 544 | qh->qh_state = QH_STATE_LINKED; |
545 | qh->xacterrs = 0; | ||
545 | qh_get (qh); | 546 | qh_get (qh); |
546 | 547 | ||
547 | /* update per-qh bandwidth for usbfs */ | 548 | /* update per-qh bandwidth for usbfs */ |
diff --git a/drivers/usb/musb/Kconfig b/drivers/usb/musb/Kconfig index 70073b157f0a..803adcb5ac1d 100644 --- a/drivers/usb/musb/Kconfig +++ b/drivers/usb/musb/Kconfig | |||
@@ -12,6 +12,7 @@ config USB_MUSB_HDRC | |||
12 | depends on !SUPERH | 12 | depends on !SUPERH |
13 | select NOP_USB_XCEIV if ARCH_DAVINCI | 13 | select NOP_USB_XCEIV if ARCH_DAVINCI |
14 | select TWL4030_USB if MACH_OMAP_3430SDP | 14 | select TWL4030_USB if MACH_OMAP_3430SDP |
15 | select NOP_USB_XCEIV if MACH_OMAP3EVM | ||
15 | select USB_OTG_UTILS | 16 | select USB_OTG_UTILS |
16 | tristate 'Inventra Highspeed Dual Role Controller (TI, ADI, ...)' | 17 | tristate 'Inventra Highspeed Dual Role Controller (TI, ADI, ...)' |
17 | help | 18 | help |
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c index b574878c78b2..8fec5d4455c9 100644 --- a/drivers/usb/serial/ftdi_sio.c +++ b/drivers/usb/serial/ftdi_sio.c | |||
@@ -699,6 +699,9 @@ static struct usb_device_id id_table_combined [] = { | |||
699 | .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, | 699 | .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, |
700 | { USB_DEVICE(LARSENBRUSGAARD_VID, LB_ALTITRACK_PID) }, | 700 | { USB_DEVICE(LARSENBRUSGAARD_VID, LB_ALTITRACK_PID) }, |
701 | { USB_DEVICE(GN_OTOMETRICS_VID, AURICAL_USB_PID) }, | 701 | { USB_DEVICE(GN_OTOMETRICS_VID, AURICAL_USB_PID) }, |
702 | { USB_DEVICE(BAYER_VID, BAYER_CONTOUR_CABLE_PID) }, | ||
703 | { USB_DEVICE(FTDI_VID, MARVELL_OPENRD_PID), | ||
704 | .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, | ||
702 | { }, /* Optional parameter entry */ | 705 | { }, /* Optional parameter entry */ |
703 | { } /* Terminating entry */ | 706 | { } /* Terminating entry */ |
704 | }; | 707 | }; |
diff --git a/drivers/usb/serial/ftdi_sio.h b/drivers/usb/serial/ftdi_sio.h index 24dbd99e87d7..8c92b88166ae 100644 --- a/drivers/usb/serial/ftdi_sio.h +++ b/drivers/usb/serial/ftdi_sio.h | |||
@@ -954,6 +954,20 @@ | |||
954 | #define AURICAL_USB_PID 0x0010 /* Aurical USB Audiometer */ | 954 | #define AURICAL_USB_PID 0x0010 /* Aurical USB Audiometer */ |
955 | 955 | ||
956 | /* | 956 | /* |
957 | * Bayer Ascensia Contour blood glucose meter USB-converter cable. | ||
958 | * http://winglucofacts.com/cables/ | ||
959 | */ | ||
960 | #define BAYER_VID 0x1A79 | ||
961 | #define BAYER_CONTOUR_CABLE_PID 0x6001 | ||
962 | |||
963 | /* | ||
964 | * Marvell OpenRD Base, Client | ||
965 | * http://www.open-rd.org | ||
966 | * OpenRD Base, Client use VID 0x0403 | ||
967 | */ | ||
968 | #define MARVELL_OPENRD_PID 0x9e90 | ||
969 | |||
970 | /* | ||
957 | * BmRequestType: 1100 0000b | 971 | * BmRequestType: 1100 0000b |
958 | * bRequest: FTDI_E2_READ | 972 | * bRequest: FTDI_E2_READ |
959 | * wValue: 0 | 973 | * wValue: 0 |
diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c index 7d15bfa7c2db..3e86815b2705 100644 --- a/drivers/usb/serial/pl2303.c +++ b/drivers/usb/serial/pl2303.c | |||
@@ -95,6 +95,7 @@ static struct usb_device_id id_table [] = { | |||
95 | { USB_DEVICE(SUPERIAL_VENDOR_ID, SUPERIAL_PRODUCT_ID) }, | 95 | { USB_DEVICE(SUPERIAL_VENDOR_ID, SUPERIAL_PRODUCT_ID) }, |
96 | { USB_DEVICE(HP_VENDOR_ID, HP_LD220_PRODUCT_ID) }, | 96 | { USB_DEVICE(HP_VENDOR_ID, HP_LD220_PRODUCT_ID) }, |
97 | { USB_DEVICE(CRESSI_VENDOR_ID, CRESSI_EDY_PRODUCT_ID) }, | 97 | { USB_DEVICE(CRESSI_VENDOR_ID, CRESSI_EDY_PRODUCT_ID) }, |
98 | { USB_DEVICE(SONY_VENDOR_ID, SONY_QN3USB_PRODUCT_ID) }, | ||
98 | { } /* Terminating entry */ | 99 | { } /* Terminating entry */ |
99 | }; | 100 | }; |
100 | 101 | ||
diff --git a/drivers/usb/serial/pl2303.h b/drivers/usb/serial/pl2303.h index 12aac7d2462d..ee9505e1dd92 100644 --- a/drivers/usb/serial/pl2303.h +++ b/drivers/usb/serial/pl2303.h | |||
@@ -126,3 +126,7 @@ | |||
126 | /* Cressi Edy (diving computer) PC interface */ | 126 | /* Cressi Edy (diving computer) PC interface */ |
127 | #define CRESSI_VENDOR_ID 0x04b8 | 127 | #define CRESSI_VENDOR_ID 0x04b8 |
128 | #define CRESSI_EDY_PRODUCT_ID 0x0521 | 128 | #define CRESSI_EDY_PRODUCT_ID 0x0521 |
129 | |||
130 | /* Sony, USB data cable for CMD-Jxx mobile phones */ | ||
131 | #define SONY_VENDOR_ID 0x054c | ||
132 | #define SONY_QN3USB_PRODUCT_ID 0x0437 | ||
diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h index 1b9c5dd0fb27..7477d411959f 100644 --- a/drivers/usb/storage/unusual_devs.h +++ b/drivers/usb/storage/unusual_devs.h | |||
@@ -838,6 +838,13 @@ UNUSUAL_DEV( 0x066f, 0x8000, 0x0001, 0x0001, | |||
838 | US_SC_DEVICE, US_PR_DEVICE, NULL, | 838 | US_SC_DEVICE, US_PR_DEVICE, NULL, |
839 | US_FL_FIX_CAPACITY ), | 839 | US_FL_FIX_CAPACITY ), |
840 | 840 | ||
841 | /* Reported by Rogerio Brito <rbrito@ime.usp.br> */ | ||
842 | UNUSUAL_DEV( 0x067b, 0x2317, 0x0001, 0x001, | ||
843 | "Prolific Technology, Inc.", | ||
844 | "Mass Storage Device", | ||
845 | US_SC_DEVICE, US_PR_DEVICE, NULL, | ||
846 | US_FL_NOT_LOCKABLE ), | ||
847 | |||
841 | /* Reported by Richard -=[]=- <micro_flyer@hotmail.com> */ | 848 | /* Reported by Richard -=[]=- <micro_flyer@hotmail.com> */ |
842 | /* Change to bcdDeviceMin (0x0100 to 0x0001) reported by | 849 | /* Change to bcdDeviceMin (0x0100 to 0x0001) reported by |
843 | * Thomas Bartosik <tbartdev@gmx-topmail.de> */ | 850 | * Thomas Bartosik <tbartdev@gmx-topmail.de> */ |
diff --git a/drivers/video/console/fbcon.c b/drivers/video/console/fbcon.c index 471a9a60376a..3a44695b9c09 100644 --- a/drivers/video/console/fbcon.c +++ b/drivers/video/console/fbcon.c | |||
@@ -1082,7 +1082,6 @@ static void fbcon_init(struct vc_data *vc, int init) | |||
1082 | new_rows = FBCON_SWAP(ops->rotate, info->var.yres, info->var.xres); | 1082 | new_rows = FBCON_SWAP(ops->rotate, info->var.yres, info->var.xres); |
1083 | new_cols /= vc->vc_font.width; | 1083 | new_cols /= vc->vc_font.width; |
1084 | new_rows /= vc->vc_font.height; | 1084 | new_rows /= vc->vc_font.height; |
1085 | vc_resize(vc, new_cols, new_rows); | ||
1086 | 1085 | ||
1087 | /* | 1086 | /* |
1088 | * We must always set the mode. The mode of the previous console | 1087 | * We must always set the mode. The mode of the previous console |
@@ -1111,10 +1110,11 @@ static void fbcon_init(struct vc_data *vc, int init) | |||
1111 | * vc_{cols,rows}, but we must not set those if we are only | 1110 | * vc_{cols,rows}, but we must not set those if we are only |
1112 | * resizing the console. | 1111 | * resizing the console. |
1113 | */ | 1112 | */ |
1114 | if (!init) { | 1113 | if (init) { |
1115 | vc->vc_cols = new_cols; | 1114 | vc->vc_cols = new_cols; |
1116 | vc->vc_rows = new_rows; | 1115 | vc->vc_rows = new_rows; |
1117 | } | 1116 | } else |
1117 | vc_resize(vc, new_cols, new_rows); | ||
1118 | 1118 | ||
1119 | if (logo) | 1119 | if (logo) |
1120 | fbcon_prepare_logo(vc, info, cols, rows, new_cols, new_rows); | 1120 | fbcon_prepare_logo(vc, info, cols, rows, new_cols, new_rows); |
diff --git a/drivers/video/console/fbcon_rotate.h b/drivers/video/console/fbcon_rotate.h index 75be5ce53dc5..e233444cda66 100644 --- a/drivers/video/console/fbcon_rotate.h +++ b/drivers/video/console/fbcon_rotate.h | |||
@@ -45,7 +45,7 @@ static inline void rotate_ud(const char *in, char *out, u32 width, u32 height) | |||
45 | width = (width + 7) & ~7; | 45 | width = (width + 7) & ~7; |
46 | 46 | ||
47 | for (i = 0; i < height; i++) { | 47 | for (i = 0; i < height; i++) { |
48 | for (j = 0; j < width; j++) { | 48 | for (j = 0; j < width - shift; j++) { |
49 | if (pattern_test_bit(j, i, width, in)) | 49 | if (pattern_test_bit(j, i, width, in)) |
50 | pattern_set_bit(width - (1 + j + shift), | 50 | pattern_set_bit(width - (1 + j + shift), |
51 | height - (1 + i), | 51 | height - (1 + i), |
diff --git a/drivers/video/console/sticore.c b/drivers/video/console/sticore.c index ef7870f5ea08..857b3668b3ba 100644 --- a/drivers/video/console/sticore.c +++ b/drivers/video/console/sticore.c | |||
@@ -957,9 +957,14 @@ static int __devinit sticore_pci_init(struct pci_dev *pd, | |||
957 | #ifdef CONFIG_PCI | 957 | #ifdef CONFIG_PCI |
958 | unsigned long fb_base, rom_base; | 958 | unsigned long fb_base, rom_base; |
959 | unsigned int fb_len, rom_len; | 959 | unsigned int fb_len, rom_len; |
960 | int err; | ||
960 | struct sti_struct *sti; | 961 | struct sti_struct *sti; |
961 | 962 | ||
962 | pci_enable_device(pd); | 963 | err = pci_enable_device(pd); |
964 | if (err < 0) { | ||
965 | dev_err(&pd->dev, "Cannot enable PCI device\n"); | ||
966 | return err; | ||
967 | } | ||
963 | 968 | ||
964 | fb_base = pci_resource_start(pd, 0); | 969 | fb_base = pci_resource_start(pd, 0); |
965 | fb_len = pci_resource_len(pd, 0); | 970 | fb_len = pci_resource_len(pd, 0); |
@@ -1048,7 +1053,7 @@ static void __devinit sti_init_roms(void) | |||
1048 | 1053 | ||
1049 | /* Register drivers for native & PCI cards */ | 1054 | /* Register drivers for native & PCI cards */ |
1050 | register_parisc_driver(&pa_sti_driver); | 1055 | register_parisc_driver(&pa_sti_driver); |
1051 | pci_register_driver(&pci_sti_driver); | 1056 | WARN_ON(pci_register_driver(&pci_sti_driver)); |
1052 | 1057 | ||
1053 | /* if we didn't find the given default sti, take the first one */ | 1058 | /* if we didn't find the given default sti, take the first one */ |
1054 | if (!default_sti) | 1059 | if (!default_sti) |
diff --git a/drivers/video/mx3fb.c b/drivers/video/mx3fb.c index f8778cde2183..054ef29be479 100644 --- a/drivers/video/mx3fb.c +++ b/drivers/video/mx3fb.c | |||
@@ -669,7 +669,8 @@ static uint32_t bpp_to_pixfmt(int bpp) | |||
669 | } | 669 | } |
670 | 670 | ||
671 | static int mx3fb_blank(int blank, struct fb_info *fbi); | 671 | static int mx3fb_blank(int blank, struct fb_info *fbi); |
672 | static int mx3fb_map_video_memory(struct fb_info *fbi, unsigned int mem_len); | 672 | static int mx3fb_map_video_memory(struct fb_info *fbi, unsigned int mem_len, |
673 | bool lock); | ||
673 | static int mx3fb_unmap_video_memory(struct fb_info *fbi); | 674 | static int mx3fb_unmap_video_memory(struct fb_info *fbi); |
674 | 675 | ||
675 | /** | 676 | /** |
@@ -711,12 +712,7 @@ static void mx3fb_dma_done(void *arg) | |||
711 | complete(&mx3_fbi->flip_cmpl); | 712 | complete(&mx3_fbi->flip_cmpl); |
712 | } | 713 | } |
713 | 714 | ||
714 | /** | 715 | static int __set_par(struct fb_info *fbi, bool lock) |
715 | * mx3fb_set_par() - set framebuffer parameters and change the operating mode. | ||
716 | * @fbi: framebuffer information pointer. | ||
717 | * @return: 0 on success or negative error code on failure. | ||
718 | */ | ||
719 | static int mx3fb_set_par(struct fb_info *fbi) | ||
720 | { | 716 | { |
721 | u32 mem_len; | 717 | u32 mem_len; |
722 | struct ipu_di_signal_cfg sig_cfg; | 718 | struct ipu_di_signal_cfg sig_cfg; |
@@ -727,10 +723,6 @@ static int mx3fb_set_par(struct fb_info *fbi) | |||
727 | struct idmac_video_param *video = &ichan->params.video; | 723 | struct idmac_video_param *video = &ichan->params.video; |
728 | struct scatterlist *sg = mx3_fbi->sg; | 724 | struct scatterlist *sg = mx3_fbi->sg; |
729 | 725 | ||
730 | dev_dbg(mx3fb->dev, "%s [%c]\n", __func__, list_empty(&ichan->queue) ? '-' : '+'); | ||
731 | |||
732 | mutex_lock(&mx3_fbi->mutex); | ||
733 | |||
734 | /* Total cleanup */ | 726 | /* Total cleanup */ |
735 | if (mx3_fbi->txd) | 727 | if (mx3_fbi->txd) |
736 | sdc_disable_channel(mx3_fbi); | 728 | sdc_disable_channel(mx3_fbi); |
@@ -742,10 +734,8 @@ static int mx3fb_set_par(struct fb_info *fbi) | |||
742 | if (fbi->fix.smem_start) | 734 | if (fbi->fix.smem_start) |
743 | mx3fb_unmap_video_memory(fbi); | 735 | mx3fb_unmap_video_memory(fbi); |
744 | 736 | ||
745 | if (mx3fb_map_video_memory(fbi, mem_len) < 0) { | 737 | if (mx3fb_map_video_memory(fbi, mem_len, lock) < 0) |
746 | mutex_unlock(&mx3_fbi->mutex); | ||
747 | return -ENOMEM; | 738 | return -ENOMEM; |
748 | } | ||
749 | } | 739 | } |
750 | 740 | ||
751 | sg_init_table(&sg[0], 1); | 741 | sg_init_table(&sg[0], 1); |
@@ -791,7 +781,6 @@ static int mx3fb_set_par(struct fb_info *fbi) | |||
791 | fbi->var.vsync_len, | 781 | fbi->var.vsync_len, |
792 | fbi->var.lower_margin + | 782 | fbi->var.lower_margin + |
793 | fbi->var.vsync_len, sig_cfg) != 0) { | 783 | fbi->var.vsync_len, sig_cfg) != 0) { |
794 | mutex_unlock(&mx3_fbi->mutex); | ||
795 | dev_err(fbi->device, | 784 | dev_err(fbi->device, |
796 | "mx3fb: Error initializing panel.\n"); | 785 | "mx3fb: Error initializing panel.\n"); |
797 | return -EINVAL; | 786 | return -EINVAL; |
@@ -810,9 +799,30 @@ static int mx3fb_set_par(struct fb_info *fbi) | |||
810 | if (mx3_fbi->blank == FB_BLANK_UNBLANK) | 799 | if (mx3_fbi->blank == FB_BLANK_UNBLANK) |
811 | sdc_enable_channel(mx3_fbi); | 800 | sdc_enable_channel(mx3_fbi); |
812 | 801 | ||
802 | return 0; | ||
803 | } | ||
804 | |||
805 | /** | ||
806 | * mx3fb_set_par() - set framebuffer parameters and change the operating mode. | ||
807 | * @fbi: framebuffer information pointer. | ||
808 | * @return: 0 on success or negative error code on failure. | ||
809 | */ | ||
810 | static int mx3fb_set_par(struct fb_info *fbi) | ||
811 | { | ||
812 | struct mx3fb_info *mx3_fbi = fbi->par; | ||
813 | struct mx3fb_data *mx3fb = mx3_fbi->mx3fb; | ||
814 | struct idmac_channel *ichan = mx3_fbi->idmac_channel; | ||
815 | int ret; | ||
816 | |||
817 | dev_dbg(mx3fb->dev, "%s [%c]\n", __func__, list_empty(&ichan->queue) ? '-' : '+'); | ||
818 | |||
819 | mutex_lock(&mx3_fbi->mutex); | ||
820 | |||
821 | ret = __set_par(fbi, true); | ||
822 | |||
813 | mutex_unlock(&mx3_fbi->mutex); | 823 | mutex_unlock(&mx3_fbi->mutex); |
814 | 824 | ||
815 | return 0; | 825 | return ret; |
816 | } | 826 | } |
817 | 827 | ||
818 | /** | 828 | /** |
@@ -966,21 +976,11 @@ static int mx3fb_setcolreg(unsigned int regno, unsigned int red, | |||
966 | return ret; | 976 | return ret; |
967 | } | 977 | } |
968 | 978 | ||
969 | /** | 979 | static void __blank(int blank, struct fb_info *fbi) |
970 | * mx3fb_blank() - blank the display. | ||
971 | */ | ||
972 | static int mx3fb_blank(int blank, struct fb_info *fbi) | ||
973 | { | 980 | { |
974 | struct mx3fb_info *mx3_fbi = fbi->par; | 981 | struct mx3fb_info *mx3_fbi = fbi->par; |
975 | struct mx3fb_data *mx3fb = mx3_fbi->mx3fb; | 982 | struct mx3fb_data *mx3fb = mx3_fbi->mx3fb; |
976 | 983 | ||
977 | dev_dbg(fbi->device, "%s, blank = %d, base %p, len %u\n", __func__, | ||
978 | blank, fbi->screen_base, fbi->fix.smem_len); | ||
979 | |||
980 | if (mx3_fbi->blank == blank) | ||
981 | return 0; | ||
982 | |||
983 | mutex_lock(&mx3_fbi->mutex); | ||
984 | mx3_fbi->blank = blank; | 984 | mx3_fbi->blank = blank; |
985 | 985 | ||
986 | switch (blank) { | 986 | switch (blank) { |
@@ -999,6 +999,23 @@ static int mx3fb_blank(int blank, struct fb_info *fbi) | |||
999 | sdc_set_brightness(mx3fb, mx3fb->backlight_level); | 999 | sdc_set_brightness(mx3fb, mx3fb->backlight_level); |
1000 | break; | 1000 | break; |
1001 | } | 1001 | } |
1002 | } | ||
1003 | |||
1004 | /** | ||
1005 | * mx3fb_blank() - blank the display. | ||
1006 | */ | ||
1007 | static int mx3fb_blank(int blank, struct fb_info *fbi) | ||
1008 | { | ||
1009 | struct mx3fb_info *mx3_fbi = fbi->par; | ||
1010 | |||
1011 | dev_dbg(fbi->device, "%s, blank = %d, base %p, len %u\n", __func__, | ||
1012 | blank, fbi->screen_base, fbi->fix.smem_len); | ||
1013 | |||
1014 | if (mx3_fbi->blank == blank) | ||
1015 | return 0; | ||
1016 | |||
1017 | mutex_lock(&mx3_fbi->mutex); | ||
1018 | __blank(blank, fbi); | ||
1002 | mutex_unlock(&mx3_fbi->mutex); | 1019 | mutex_unlock(&mx3_fbi->mutex); |
1003 | 1020 | ||
1004 | return 0; | 1021 | return 0; |
@@ -1198,6 +1215,7 @@ static int mx3fb_resume(struct platform_device *pdev) | |||
1198 | * mx3fb_map_video_memory() - allocates the DRAM memory for the frame buffer. | 1215 | * mx3fb_map_video_memory() - allocates the DRAM memory for the frame buffer. |
1199 | * @fbi: framebuffer information pointer | 1216 | * @fbi: framebuffer information pointer |
1200 | * @mem_len: length of mapped memory | 1217 | * @mem_len: length of mapped memory |
1218 | * @lock: do not lock during initialisation | ||
1201 | * @return: Error code indicating success or failure | 1219 | * @return: Error code indicating success or failure |
1202 | * | 1220 | * |
1203 | * This buffer is remapped into a non-cached, non-buffered, memory region to | 1221 | * This buffer is remapped into a non-cached, non-buffered, memory region to |
@@ -1205,7 +1223,8 @@ static int mx3fb_resume(struct platform_device *pdev) | |||
1205 | * area is remapped, all virtual memory access to the video memory should occur | 1223 | * area is remapped, all virtual memory access to the video memory should occur |
1206 | * at the new region. | 1224 | * at the new region. |
1207 | */ | 1225 | */ |
1208 | static int mx3fb_map_video_memory(struct fb_info *fbi, unsigned int mem_len) | 1226 | static int mx3fb_map_video_memory(struct fb_info *fbi, unsigned int mem_len, |
1227 | bool lock) | ||
1209 | { | 1228 | { |
1210 | int retval = 0; | 1229 | int retval = 0; |
1211 | dma_addr_t addr; | 1230 | dma_addr_t addr; |
@@ -1221,10 +1240,12 @@ static int mx3fb_map_video_memory(struct fb_info *fbi, unsigned int mem_len) | |||
1221 | goto err0; | 1240 | goto err0; |
1222 | } | 1241 | } |
1223 | 1242 | ||
1224 | mutex_lock(&fbi->mm_lock); | 1243 | if (lock) |
1244 | mutex_lock(&fbi->mm_lock); | ||
1225 | fbi->fix.smem_start = addr; | 1245 | fbi->fix.smem_start = addr; |
1226 | fbi->fix.smem_len = mem_len; | 1246 | fbi->fix.smem_len = mem_len; |
1227 | mutex_unlock(&fbi->mm_lock); | 1247 | if (lock) |
1248 | mutex_unlock(&fbi->mm_lock); | ||
1228 | 1249 | ||
1229 | dev_dbg(fbi->device, "allocated fb @ p=0x%08x, v=0x%p, size=%d.\n", | 1250 | dev_dbg(fbi->device, "allocated fb @ p=0x%08x, v=0x%p, size=%d.\n", |
1230 | (uint32_t) fbi->fix.smem_start, fbi->screen_base, fbi->fix.smem_len); | 1251 | (uint32_t) fbi->fix.smem_start, fbi->screen_base, fbi->fix.smem_len); |
@@ -1365,6 +1386,11 @@ static int init_fb_chan(struct mx3fb_data *mx3fb, struct idmac_channel *ichan) | |||
1365 | init_completion(&mx3fbi->flip_cmpl); | 1386 | init_completion(&mx3fbi->flip_cmpl); |
1366 | disable_irq(ichan->eof_irq); | 1387 | disable_irq(ichan->eof_irq); |
1367 | dev_dbg(mx3fb->dev, "disabling irq %d\n", ichan->eof_irq); | 1388 | dev_dbg(mx3fb->dev, "disabling irq %d\n", ichan->eof_irq); |
1389 | ret = __set_par(fbi, false); | ||
1390 | if (ret < 0) | ||
1391 | goto esetpar; | ||
1392 | |||
1393 | __blank(FB_BLANK_UNBLANK, fbi); | ||
1368 | 1394 | ||
1369 | dev_info(dev, "registered, using mode %s\n", fb_mode); | 1395 | dev_info(dev, "registered, using mode %s\n", fb_mode); |
1370 | 1396 | ||
diff --git a/drivers/video/via/hw.c b/drivers/video/via/hw.c index fcd53ceb88fa..c8960003f47d 100644 --- a/drivers/video/via/hw.c +++ b/drivers/video/via/hw.c | |||
@@ -2407,14 +2407,14 @@ int viafb_setmode(int vmode_index, int hor_res, int ver_res, int video_bpp, | |||
2407 | viafb_dvi_set_mode(viafb_get_mode_index | 2407 | viafb_dvi_set_mode(viafb_get_mode_index |
2408 | (viaparinfo->tmds_setting_info->h_active, | 2408 | (viaparinfo->tmds_setting_info->h_active, |
2409 | viaparinfo->tmds_setting_info-> | 2409 | viaparinfo->tmds_setting_info-> |
2410 | v_active, 1), | 2410 | v_active), |
2411 | video_bpp1, viaparinfo-> | 2411 | video_bpp1, viaparinfo-> |
2412 | tmds_setting_info->iga_path); | 2412 | tmds_setting_info->iga_path); |
2413 | } else { | 2413 | } else { |
2414 | viafb_dvi_set_mode(viafb_get_mode_index | 2414 | viafb_dvi_set_mode(viafb_get_mode_index |
2415 | (viaparinfo->tmds_setting_info->h_active, | 2415 | (viaparinfo->tmds_setting_info->h_active, |
2416 | viaparinfo-> | 2416 | viaparinfo-> |
2417 | tmds_setting_info->v_active, 0), | 2417 | tmds_setting_info->v_active), |
2418 | video_bpp, viaparinfo-> | 2418 | video_bpp, viaparinfo-> |
2419 | tmds_setting_info->iga_path); | 2419 | tmds_setting_info->iga_path); |
2420 | } | 2420 | } |
diff --git a/drivers/video/via/lcd.c b/drivers/video/via/lcd.c index 6c7290a6a447..78c6b3387947 100644 --- a/drivers/video/via/lcd.c +++ b/drivers/video/via/lcd.c | |||
@@ -580,10 +580,7 @@ static void load_lcd_k400_patch_tbl(int set_hres, int set_vres, | |||
580 | int reg_num = 0; | 580 | int reg_num = 0; |
581 | struct io_reg *lcd_patch_reg = NULL; | 581 | struct io_reg *lcd_patch_reg = NULL; |
582 | 582 | ||
583 | if (viaparinfo->lvds_setting_info->iga_path == IGA2) | 583 | vmode_index = viafb_get_mode_index(set_hres, set_vres); |
584 | vmode_index = viafb_get_mode_index(set_hres, set_vres, 1); | ||
585 | else | ||
586 | vmode_index = viafb_get_mode_index(set_hres, set_vres, 0); | ||
587 | switch (panel_id) { | 584 | switch (panel_id) { |
588 | /* LCD 800x600 */ | 585 | /* LCD 800x600 */ |
589 | case LCD_PANEL_ID1_800X600: | 586 | case LCD_PANEL_ID1_800X600: |
@@ -761,10 +758,7 @@ static void load_lcd_p880_patch_tbl(int set_hres, int set_vres, | |||
761 | int reg_num = 0; | 758 | int reg_num = 0; |
762 | struct io_reg *lcd_patch_reg = NULL; | 759 | struct io_reg *lcd_patch_reg = NULL; |
763 | 760 | ||
764 | if (viaparinfo->lvds_setting_info->iga_path == IGA2) | 761 | vmode_index = viafb_get_mode_index(set_hres, set_vres); |
765 | vmode_index = viafb_get_mode_index(set_hres, set_vres, 1); | ||
766 | else | ||
767 | vmode_index = viafb_get_mode_index(set_hres, set_vres, 0); | ||
768 | 762 | ||
769 | switch (panel_id) { | 763 | switch (panel_id) { |
770 | case LCD_PANEL_ID5_1400X1050: | 764 | case LCD_PANEL_ID5_1400X1050: |
@@ -832,10 +826,7 @@ static void load_lcd_patch_regs(int set_hres, int set_vres, | |||
832 | { | 826 | { |
833 | int vmode_index; | 827 | int vmode_index; |
834 | 828 | ||
835 | if (viaparinfo->lvds_setting_info->iga_path == IGA2) | 829 | vmode_index = viafb_get_mode_index(set_hres, set_vres); |
836 | vmode_index = viafb_get_mode_index(set_hres, set_vres, 1); | ||
837 | else | ||
838 | vmode_index = viafb_get_mode_index(set_hres, set_vres, 0); | ||
839 | 830 | ||
840 | viafb_unlock_crt(); | 831 | viafb_unlock_crt(); |
841 | 832 | ||
diff --git a/drivers/video/via/viafbdev.c b/drivers/video/via/viafbdev.c index a0fec298216e..72833f3334b5 100644 --- a/drivers/video/via/viafbdev.c +++ b/drivers/video/via/viafbdev.c | |||
@@ -32,7 +32,6 @@ static u32 pseudo_pal[17]; | |||
32 | /* video mode */ | 32 | /* video mode */ |
33 | static char *viafb_mode = "640x480"; | 33 | static char *viafb_mode = "640x480"; |
34 | static char *viafb_mode1 = "640x480"; | 34 | static char *viafb_mode1 = "640x480"; |
35 | static int viafb_resMode = VIA_RES_640X480; | ||
36 | 35 | ||
37 | /* Added for specifying active devices.*/ | 36 | /* Added for specifying active devices.*/ |
38 | char *viafb_active_dev = ""; | 37 | char *viafb_active_dev = ""; |
@@ -56,47 +55,47 @@ static void viafb_get_video_device(u32 *video_dev_info); | |||
56 | 55 | ||
57 | /* Mode information */ | 56 | /* Mode information */ |
58 | static const struct viafb_modeinfo viafb_modentry[] = { | 57 | static const struct viafb_modeinfo viafb_modentry[] = { |
59 | {480, 640, VIA_RES_480X640, "480x640"}, | 58 | {480, 640, VIA_RES_480X640}, |
60 | {640, 480, VIA_RES_640X480, "640x480"}, | 59 | {640, 480, VIA_RES_640X480}, |
61 | {800, 480, VIA_RES_800X480, "800x480"}, | 60 | {800, 480, VIA_RES_800X480}, |
62 | {800, 600, VIA_RES_800X600, "800x600"}, | 61 | {800, 600, VIA_RES_800X600}, |
63 | {1024, 768, VIA_RES_1024X768, "1024x768"}, | 62 | {1024, 768, VIA_RES_1024X768}, |
64 | {1152, 864, VIA_RES_1152X864, "1152x864"}, | 63 | {1152, 864, VIA_RES_1152X864}, |
65 | {1280, 1024, VIA_RES_1280X1024, "1280x1024"}, | 64 | {1280, 1024, VIA_RES_1280X1024}, |
66 | {1600, 1200, VIA_RES_1600X1200, "1600x1200"}, | 65 | {1600, 1200, VIA_RES_1600X1200}, |
67 | {1440, 1050, VIA_RES_1440X1050, "1440x1050"}, | 66 | {1440, 1050, VIA_RES_1440X1050}, |
68 | {1280, 768, VIA_RES_1280X768, "1280x768"}, | 67 | {1280, 768, VIA_RES_1280X768,}, |
69 | {1280, 800, VIA_RES_1280X800, "1280x800"}, | 68 | {1280, 800, VIA_RES_1280X800}, |
70 | {1280, 960, VIA_RES_1280X960, "1280x960"}, | 69 | {1280, 960, VIA_RES_1280X960}, |
71 | {1920, 1440, VIA_RES_1920X1440, "1920x1440"}, | 70 | {1920, 1440, VIA_RES_1920X1440}, |
72 | {848, 480, VIA_RES_848X480, "848x480"}, | 71 | {848, 480, VIA_RES_848X480}, |
73 | {1400, 1050, VIA_RES_1400X1050, "1400x1050"}, | 72 | {1400, 1050, VIA_RES_1400X1050}, |
74 | {720, 480, VIA_RES_720X480, "720x480"}, | 73 | {720, 480, VIA_RES_720X480}, |
75 | {720, 576, VIA_RES_720X576, "720x576"}, | 74 | {720, 576, VIA_RES_720X576}, |
76 | {1024, 512, VIA_RES_1024X512, "1024x512"}, | 75 | {1024, 512, VIA_RES_1024X512}, |
77 | {1024, 576, VIA_RES_1024X576, "1024x576"}, | 76 | {1024, 576, VIA_RES_1024X576}, |
78 | {1024, 600, VIA_RES_1024X600, "1024x600"}, | 77 | {1024, 600, VIA_RES_1024X600}, |
79 | {1280, 720, VIA_RES_1280X720, "1280x720"}, | 78 | {1280, 720, VIA_RES_1280X720}, |
80 | {1920, 1080, VIA_RES_1920X1080, "1920x1080"}, | 79 | {1920, 1080, VIA_RES_1920X1080}, |
81 | {1366, 768, VIA_RES_1368X768, "1368x768"}, | 80 | {1366, 768, VIA_RES_1368X768}, |
82 | {1680, 1050, VIA_RES_1680X1050, "1680x1050"}, | 81 | {1680, 1050, VIA_RES_1680X1050}, |
83 | {960, 600, VIA_RES_960X600, "960x600"}, | 82 | {960, 600, VIA_RES_960X600}, |
84 | {1000, 600, VIA_RES_1000X600, "1000x600"}, | 83 | {1000, 600, VIA_RES_1000X600}, |
85 | {1024, 576, VIA_RES_1024X576, "1024x576"}, | 84 | {1024, 576, VIA_RES_1024X576}, |
86 | {1024, 600, VIA_RES_1024X600, "1024x600"}, | 85 | {1024, 600, VIA_RES_1024X600}, |
87 | {1088, 612, VIA_RES_1088X612, "1088x612"}, | 86 | {1088, 612, VIA_RES_1088X612}, |
88 | {1152, 720, VIA_RES_1152X720, "1152x720"}, | 87 | {1152, 720, VIA_RES_1152X720}, |
89 | {1200, 720, VIA_RES_1200X720, "1200x720"}, | 88 | {1200, 720, VIA_RES_1200X720}, |
90 | {1280, 600, VIA_RES_1280X600, "1280x600"}, | 89 | {1280, 600, VIA_RES_1280X600}, |
91 | {1360, 768, VIA_RES_1360X768, "1360x768"}, | 90 | {1360, 768, VIA_RES_1360X768}, |
92 | {1440, 900, VIA_RES_1440X900, "1440x900"}, | 91 | {1440, 900, VIA_RES_1440X900}, |
93 | {1600, 900, VIA_RES_1600X900, "1600x900"}, | 92 | {1600, 900, VIA_RES_1600X900}, |
94 | {1600, 1024, VIA_RES_1600X1024, "1600x1024"}, | 93 | {1600, 1024, VIA_RES_1600X1024}, |
95 | {1792, 1344, VIA_RES_1792X1344, "1792x1344"}, | 94 | {1792, 1344, VIA_RES_1792X1344}, |
96 | {1856, 1392, VIA_RES_1856X1392, "1856x1392"}, | 95 | {1856, 1392, VIA_RES_1856X1392}, |
97 | {1920, 1200, VIA_RES_1920X1200, "1920x1200"}, | 96 | {1920, 1200, VIA_RES_1920X1200}, |
98 | {2048, 1536, VIA_RES_2048X1536, "2048x1536"}, | 97 | {2048, 1536, VIA_RES_2048X1536}, |
99 | {0, 0, VIA_RES_INVALID, "640x480"} | 98 | {0, 0, VIA_RES_INVALID} |
100 | }; | 99 | }; |
101 | 100 | ||
102 | static struct fb_ops viafb_ops; | 101 | static struct fb_ops viafb_ops; |
@@ -177,7 +176,7 @@ static int viafb_check_var(struct fb_var_screeninfo *var, | |||
177 | if (var->vmode & FB_VMODE_INTERLACED || var->vmode & FB_VMODE_DOUBLE) | 176 | if (var->vmode & FB_VMODE_INTERLACED || var->vmode & FB_VMODE_DOUBLE) |
178 | return -EINVAL; | 177 | return -EINVAL; |
179 | 178 | ||
180 | vmode_index = viafb_get_mode_index(var->xres, var->yres, 0); | 179 | vmode_index = viafb_get_mode_index(var->xres, var->yres); |
181 | if (vmode_index == VIA_RES_INVALID) { | 180 | if (vmode_index == VIA_RES_INVALID) { |
182 | DEBUG_MSG(KERN_INFO | 181 | DEBUG_MSG(KERN_INFO |
183 | "viafb: Mode %dx%dx%d not supported!!\n", | 182 | "viafb: Mode %dx%dx%d not supported!!\n", |
@@ -233,14 +232,14 @@ static int viafb_set_par(struct fb_info *info) | |||
233 | viafb_update_device_setting(info->var.xres, info->var.yres, | 232 | viafb_update_device_setting(info->var.xres, info->var.yres, |
234 | info->var.bits_per_pixel, viafb_refresh, 0); | 233 | info->var.bits_per_pixel, viafb_refresh, 0); |
235 | 234 | ||
236 | vmode_index = viafb_get_mode_index(info->var.xres, info->var.yres, 0); | 235 | vmode_index = viafb_get_mode_index(info->var.xres, info->var.yres); |
237 | 236 | ||
238 | if (viafb_SAMM_ON == 1) { | 237 | if (viafb_SAMM_ON == 1) { |
239 | DEBUG_MSG(KERN_INFO | 238 | DEBUG_MSG(KERN_INFO |
240 | "viafb_second_xres = %d, viafb_second_yres = %d, bpp = %d\n", | 239 | "viafb_second_xres = %d, viafb_second_yres = %d, bpp = %d\n", |
241 | viafb_second_xres, viafb_second_yres, viafb_bpp1); | 240 | viafb_second_xres, viafb_second_yres, viafb_bpp1); |
242 | vmode_index1 = viafb_get_mode_index(viafb_second_xres, | 241 | vmode_index1 = viafb_get_mode_index(viafb_second_xres, |
243 | viafb_second_yres, 1); | 242 | viafb_second_yres); |
244 | DEBUG_MSG(KERN_INFO "->viafb_SAMM_ON: index=%d\n", | 243 | DEBUG_MSG(KERN_INFO "->viafb_SAMM_ON: index=%d\n", |
245 | vmode_index1); | 244 | vmode_index1); |
246 | 245 | ||
@@ -1262,7 +1261,7 @@ static int viafb_sync(struct fb_info *info) | |||
1262 | return 0; | 1261 | return 0; |
1263 | } | 1262 | } |
1264 | 1263 | ||
1265 | int viafb_get_mode_index(int hres, int vres, int flag) | 1264 | int viafb_get_mode_index(int hres, int vres) |
1266 | { | 1265 | { |
1267 | u32 i; | 1266 | u32 i; |
1268 | DEBUG_MSG(KERN_INFO "viafb_get_mode_index!\n"); | 1267 | DEBUG_MSG(KERN_INFO "viafb_get_mode_index!\n"); |
@@ -1272,13 +1271,7 @@ int viafb_get_mode_index(int hres, int vres, int flag) | |||
1272 | viafb_modentry[i].yres == vres) | 1271 | viafb_modentry[i].yres == vres) |
1273 | break; | 1272 | break; |
1274 | 1273 | ||
1275 | viafb_resMode = viafb_modentry[i].mode_index; | 1274 | return viafb_modentry[i].mode_index; |
1276 | if (flag) | ||
1277 | viafb_mode1 = viafb_modentry[i].mode_res; | ||
1278 | else | ||
1279 | viafb_mode = viafb_modentry[i].mode_res; | ||
1280 | |||
1281 | return viafb_resMode; | ||
1282 | } | 1275 | } |
1283 | 1276 | ||
1284 | static void check_available_device_to_enable(int device_id) | 1277 | static void check_available_device_to_enable(int device_id) |
@@ -2199,7 +2192,7 @@ static int __devinit via_pci_probe(void) | |||
2199 | strict_strtoul(tmpc, 0, &default_xres); | 2192 | strict_strtoul(tmpc, 0, &default_xres); |
2200 | strict_strtoul(tmpm, 0, &default_yres); | 2193 | strict_strtoul(tmpm, 0, &default_yres); |
2201 | 2194 | ||
2202 | vmode_index = viafb_get_mode_index(default_xres, default_yres, 0); | 2195 | vmode_index = viafb_get_mode_index(default_xres, default_yres); |
2203 | DEBUG_MSG(KERN_INFO "0->index=%d\n", vmode_index); | 2196 | DEBUG_MSG(KERN_INFO "0->index=%d\n", vmode_index); |
2204 | 2197 | ||
2205 | if (viafb_SAMM_ON == 1) { | 2198 | if (viafb_SAMM_ON == 1) { |
diff --git a/drivers/video/via/viafbdev.h b/drivers/video/via/viafbdev.h index a4158e872878..227b000feb38 100644 --- a/drivers/video/via/viafbdev.h +++ b/drivers/video/via/viafbdev.h | |||
@@ -81,7 +81,6 @@ struct viafb_modeinfo { | |||
81 | u32 xres; | 81 | u32 xres; |
82 | u32 yres; | 82 | u32 yres; |
83 | int mode_index; | 83 | int mode_index; |
84 | char *mode_res; | ||
85 | }; | 84 | }; |
86 | extern unsigned int viafb_second_virtual_yres; | 85 | extern unsigned int viafb_second_virtual_yres; |
87 | extern unsigned int viafb_second_virtual_xres; | 86 | extern unsigned int viafb_second_virtual_xres; |
@@ -102,7 +101,7 @@ extern int strict_strtoul(const char *cp, unsigned int base, | |||
102 | void viafb_memory_pitch_patch(struct fb_info *info); | 101 | void viafb_memory_pitch_patch(struct fb_info *info); |
103 | void viafb_fill_var_timing_info(struct fb_var_screeninfo *var, int refresh, | 102 | void viafb_fill_var_timing_info(struct fb_var_screeninfo *var, int refresh, |
104 | int mode_index); | 103 | int mode_index); |
105 | int viafb_get_mode_index(int hres, int vres, int flag); | 104 | int viafb_get_mode_index(int hres, int vres); |
106 | u8 viafb_gpio_i2c_read_lvds(struct lvds_setting_information | 105 | u8 viafb_gpio_i2c_read_lvds(struct lvds_setting_information |
107 | *plvds_setting_info, struct lvds_chip_information | 106 | *plvds_setting_info, struct lvds_chip_information |
108 | *plvds_chip_info, u8 index); | 107 | *plvds_chip_info, u8 index); |
diff --git a/drivers/w1/masters/omap_hdq.c b/drivers/w1/masters/omap_hdq.c index a7e3b706b9d3..0d92969404c3 100644 --- a/drivers/w1/masters/omap_hdq.c +++ b/drivers/w1/masters/omap_hdq.c | |||
@@ -687,6 +687,7 @@ static int omap_hdq_remove(struct platform_device *pdev) | |||
687 | 687 | ||
688 | if (hdq_data->hdq_usecount) { | 688 | if (hdq_data->hdq_usecount) { |
689 | dev_dbg(&pdev->dev, "removed when use count is not zero\n"); | 689 | dev_dbg(&pdev->dev, "removed when use count is not zero\n"); |
690 | mutex_unlock(&hdq_data->hdq_mutex); | ||
690 | return -EBUSY; | 691 | return -EBUSY; |
691 | } | 692 | } |
692 | 693 | ||
diff --git a/drivers/watchdog/coh901327_wdt.c b/drivers/watchdog/coh901327_wdt.c index fecb307d28e9..aec7cefdef21 100644 --- a/drivers/watchdog/coh901327_wdt.c +++ b/drivers/watchdog/coh901327_wdt.c | |||
@@ -18,6 +18,7 @@ | |||
18 | #include <linux/bitops.h> | 18 | #include <linux/bitops.h> |
19 | #include <linux/uaccess.h> | 19 | #include <linux/uaccess.h> |
20 | #include <linux/clk.h> | 20 | #include <linux/clk.h> |
21 | #include <linux/delay.h> | ||
21 | 22 | ||
22 | #define DRV_NAME "WDOG COH 901 327" | 23 | #define DRV_NAME "WDOG COH 901 327" |
23 | 24 | ||
@@ -92,6 +93,8 @@ static struct clk *clk; | |||
92 | static void coh901327_enable(u16 timeout) | 93 | static void coh901327_enable(u16 timeout) |
93 | { | 94 | { |
94 | u16 val; | 95 | u16 val; |
96 | unsigned long freq; | ||
97 | unsigned long delay_ns; | ||
95 | 98 | ||
96 | clk_enable(clk); | 99 | clk_enable(clk); |
97 | /* Restart timer if it is disabled */ | 100 | /* Restart timer if it is disabled */ |
@@ -102,6 +105,14 @@ static void coh901327_enable(u16 timeout) | |||
102 | /* Acknowledge any pending interrupt so it doesn't just fire off */ | 105 | /* Acknowledge any pending interrupt so it doesn't just fire off */ |
103 | writew(U300_WDOG_IER_WILL_BARK_IRQ_ACK_ENABLE, | 106 | writew(U300_WDOG_IER_WILL_BARK_IRQ_ACK_ENABLE, |
104 | virtbase + U300_WDOG_IER); | 107 | virtbase + U300_WDOG_IER); |
108 | /* | ||
109 | * The interrupt is cleared in the 32 kHz clock domain. | ||
110 | * Wait 3 32 kHz cycles for it to take effect | ||
111 | */ | ||
112 | freq = clk_get_rate(clk); | ||
113 | delay_ns = (1000000000 + freq - 1) / freq; /* Freq to ns and round up */ | ||
114 | delay_ns = 3 * delay_ns; /* Wait 3 cycles */ | ||
115 | ndelay(delay_ns); | ||
105 | /* Enable the watchdog interrupt */ | 116 | /* Enable the watchdog interrupt */ |
106 | writew(U300_WDOG_IMR_WILL_BARK_IRQ_ENABLE, virtbase + U300_WDOG_IMR); | 117 | writew(U300_WDOG_IMR_WILL_BARK_IRQ_ENABLE, virtbase + U300_WDOG_IMR); |
107 | /* Activate the watchdog timer */ | 118 | /* Activate the watchdog timer */ |
diff --git a/drivers/watchdog/ks8695_wdt.c b/drivers/watchdog/ks8695_wdt.c index 00b03eb43bf0..e1c82769b08e 100644 --- a/drivers/watchdog/ks8695_wdt.c +++ b/drivers/watchdog/ks8695_wdt.c | |||
@@ -66,7 +66,7 @@ static inline void ks8695_wdt_stop(void) | |||
66 | static inline void ks8695_wdt_start(void) | 66 | static inline void ks8695_wdt_start(void) |
67 | { | 67 | { |
68 | unsigned long tmcon; | 68 | unsigned long tmcon; |
69 | unsigned long tval = wdt_time * CLOCK_TICK_RATE; | 69 | unsigned long tval = wdt_time * KS8695_CLOCK_RATE; |
70 | 70 | ||
71 | spin_lock(&ks8695_lock); | 71 | spin_lock(&ks8695_lock); |
72 | /* disable timer0 */ | 72 | /* disable timer0 */ |
@@ -103,7 +103,7 @@ static inline void ks8695_wdt_reload(void) | |||
103 | static int ks8695_wdt_settimeout(int new_time) | 103 | static int ks8695_wdt_settimeout(int new_time) |
104 | { | 104 | { |
105 | /* | 105 | /* |
106 | * All counting occurs at SLOW_CLOCK / 128 = 0.256 Hz | 106 | * All counting occurs at KS8695_CLOCK_RATE / 128 = 0.256 Hz |
107 | * | 107 | * |
108 | * Since WDV is a 16-bit counter, the maximum period is | 108 | * Since WDV is a 16-bit counter, the maximum period is |
109 | * 65536 / 0.256 = 256 seconds. | 109 | * 65536 / 0.256 = 256 seconds. |
diff --git a/fs/binfmt_flat.c b/fs/binfmt_flat.c index 697f6b5f1313..e92f229e3c6e 100644 --- a/fs/binfmt_flat.c +++ b/fs/binfmt_flat.c | |||
@@ -828,15 +828,22 @@ static int load_flat_shared_library(int id, struct lib_info *libs) | |||
828 | if (IS_ERR(bprm.file)) | 828 | if (IS_ERR(bprm.file)) |
829 | return res; | 829 | return res; |
830 | 830 | ||
831 | bprm.cred = prepare_exec_creds(); | ||
832 | res = -ENOMEM; | ||
833 | if (!bprm.cred) | ||
834 | goto out; | ||
835 | |||
831 | res = prepare_binprm(&bprm); | 836 | res = prepare_binprm(&bprm); |
832 | 837 | ||
833 | if (res <= (unsigned long)-4096) | 838 | if (res <= (unsigned long)-4096) |
834 | res = load_flat_file(&bprm, libs, id, NULL); | 839 | res = load_flat_file(&bprm, libs, id, NULL); |
835 | if (bprm.file) { | 840 | |
836 | allow_write_access(bprm.file); | 841 | abort_creds(bprm.cred); |
837 | fput(bprm.file); | 842 | |
838 | bprm.file = NULL; | 843 | out: |
839 | } | 844 | allow_write_access(bprm.file); |
845 | fput(bprm.file); | ||
846 | |||
840 | return(res); | 847 | return(res); |
841 | } | 848 | } |
842 | 849 | ||
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index dc84daee6bc4..72a2b9c28e9f 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c | |||
@@ -265,10 +265,6 @@ static int caching_kthread(void *data) | |||
265 | 265 | ||
266 | atomic_inc(&block_group->space_info->caching_threads); | 266 | atomic_inc(&block_group->space_info->caching_threads); |
267 | last = max_t(u64, block_group->key.objectid, BTRFS_SUPER_INFO_OFFSET); | 267 | last = max_t(u64, block_group->key.objectid, BTRFS_SUPER_INFO_OFFSET); |
268 | again: | ||
269 | /* need to make sure the commit_root doesn't disappear */ | ||
270 | down_read(&fs_info->extent_commit_sem); | ||
271 | |||
272 | /* | 268 | /* |
273 | * We don't want to deadlock with somebody trying to allocate a new | 269 | * We don't want to deadlock with somebody trying to allocate a new |
274 | * extent for the extent root while also trying to search the extent | 270 | * extent for the extent root while also trying to search the extent |
@@ -282,6 +278,10 @@ again: | |||
282 | key.objectid = last; | 278 | key.objectid = last; |
283 | key.offset = 0; | 279 | key.offset = 0; |
284 | btrfs_set_key_type(&key, BTRFS_EXTENT_ITEM_KEY); | 280 | btrfs_set_key_type(&key, BTRFS_EXTENT_ITEM_KEY); |
281 | again: | ||
282 | /* need to make sure the commit_root doesn't disappear */ | ||
283 | down_read(&fs_info->extent_commit_sem); | ||
284 | |||
285 | ret = btrfs_search_slot(NULL, fs_info->extent_root, &key, path, 0, 0); | 285 | ret = btrfs_search_slot(NULL, fs_info->extent_root, &key, path, 0, 0); |
286 | if (ret < 0) | 286 | if (ret < 0) |
287 | goto err; | 287 | goto err; |
@@ -304,6 +304,19 @@ again: | |||
304 | 304 | ||
305 | if (need_resched() || | 305 | if (need_resched() || |
306 | btrfs_transaction_in_commit(fs_info)) { | 306 | btrfs_transaction_in_commit(fs_info)) { |
307 | leaf = path->nodes[0]; | ||
308 | |||
309 | /* this shouldn't happen, but if the | ||
310 | * leaf is empty just move on. | ||
311 | */ | ||
312 | if (btrfs_header_nritems(leaf) == 0) | ||
313 | break; | ||
314 | /* | ||
315 | * we need to copy the key out so that | ||
316 | * we are sure the next search advances | ||
317 | * us forward in the btree. | ||
318 | */ | ||
319 | btrfs_item_key_to_cpu(leaf, &key, 0); | ||
307 | btrfs_release_path(fs_info->extent_root, path); | 320 | btrfs_release_path(fs_info->extent_root, path); |
308 | up_read(&fs_info->extent_commit_sem); | 321 | up_read(&fs_info->extent_commit_sem); |
309 | schedule_timeout(1); | 322 | schedule_timeout(1); |
diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c index af99b78b288e..5edcee3a617f 100644 --- a/fs/btrfs/free-space-cache.c +++ b/fs/btrfs/free-space-cache.c | |||
@@ -414,11 +414,29 @@ static noinline int remove_from_bitmap(struct btrfs_block_group_cache *block_gro | |||
414 | u64 *offset, u64 *bytes) | 414 | u64 *offset, u64 *bytes) |
415 | { | 415 | { |
416 | u64 end; | 416 | u64 end; |
417 | u64 search_start, search_bytes; | ||
418 | int ret; | ||
417 | 419 | ||
418 | again: | 420 | again: |
419 | end = bitmap_info->offset + | 421 | end = bitmap_info->offset + |
420 | (u64)(BITS_PER_BITMAP * block_group->sectorsize) - 1; | 422 | (u64)(BITS_PER_BITMAP * block_group->sectorsize) - 1; |
421 | 423 | ||
424 | /* | ||
425 | * XXX - this can go away after a few releases. | ||
426 | * | ||
427 | * since the only user of btrfs_remove_free_space is the tree logging | ||
428 | * stuff, and the only way to test that is under crash conditions, we | ||
429 | * want to have this debug stuff here just in case somethings not | ||
430 | * working. Search the bitmap for the space we are trying to use to | ||
431 | * make sure its actually there. If its not there then we need to stop | ||
432 | * because something has gone wrong. | ||
433 | */ | ||
434 | search_start = *offset; | ||
435 | search_bytes = *bytes; | ||
436 | ret = search_bitmap(block_group, bitmap_info, &search_start, | ||
437 | &search_bytes); | ||
438 | BUG_ON(ret < 0 || search_start != *offset); | ||
439 | |||
422 | if (*offset > bitmap_info->offset && *offset + *bytes > end) { | 440 | if (*offset > bitmap_info->offset && *offset + *bytes > end) { |
423 | bitmap_clear_bits(block_group, bitmap_info, *offset, | 441 | bitmap_clear_bits(block_group, bitmap_info, *offset, |
424 | end - *offset + 1); | 442 | end - *offset + 1); |
@@ -430,6 +448,7 @@ again: | |||
430 | } | 448 | } |
431 | 449 | ||
432 | if (*bytes) { | 450 | if (*bytes) { |
451 | struct rb_node *next = rb_next(&bitmap_info->offset_index); | ||
433 | if (!bitmap_info->bytes) { | 452 | if (!bitmap_info->bytes) { |
434 | unlink_free_space(block_group, bitmap_info); | 453 | unlink_free_space(block_group, bitmap_info); |
435 | kfree(bitmap_info->bitmap); | 454 | kfree(bitmap_info->bitmap); |
@@ -438,16 +457,36 @@ again: | |||
438 | recalculate_thresholds(block_group); | 457 | recalculate_thresholds(block_group); |
439 | } | 458 | } |
440 | 459 | ||
441 | bitmap_info = tree_search_offset(block_group, | 460 | /* |
442 | offset_to_bitmap(block_group, | 461 | * no entry after this bitmap, but we still have bytes to |
443 | *offset), | 462 | * remove, so something has gone wrong. |
444 | 1, 0); | 463 | */ |
445 | if (!bitmap_info) | 464 | if (!next) |
446 | return -EINVAL; | 465 | return -EINVAL; |
447 | 466 | ||
467 | bitmap_info = rb_entry(next, struct btrfs_free_space, | ||
468 | offset_index); | ||
469 | |||
470 | /* | ||
471 | * if the next entry isn't a bitmap we need to return to let the | ||
472 | * extent stuff do its work. | ||
473 | */ | ||
448 | if (!bitmap_info->bitmap) | 474 | if (!bitmap_info->bitmap) |
449 | return -EAGAIN; | 475 | return -EAGAIN; |
450 | 476 | ||
477 | /* | ||
478 | * Ok the next item is a bitmap, but it may not actually hold | ||
479 | * the information for the rest of this free space stuff, so | ||
480 | * look for it, and if we don't find it return so we can try | ||
481 | * everything over again. | ||
482 | */ | ||
483 | search_start = *offset; | ||
484 | search_bytes = *bytes; | ||
485 | ret = search_bitmap(block_group, bitmap_info, &search_start, | ||
486 | &search_bytes); | ||
487 | if (ret < 0 || search_start != *offset) | ||
488 | return -EAGAIN; | ||
489 | |||
451 | goto again; | 490 | goto again; |
452 | } else if (!bitmap_info->bytes) { | 491 | } else if (!bitmap_info->bytes) { |
453 | unlink_free_space(block_group, bitmap_info); | 492 | unlink_free_space(block_group, bitmap_info); |
@@ -644,8 +683,17 @@ int btrfs_remove_free_space(struct btrfs_block_group_cache *block_group, | |||
644 | again: | 683 | again: |
645 | info = tree_search_offset(block_group, offset, 0, 0); | 684 | info = tree_search_offset(block_group, offset, 0, 0); |
646 | if (!info) { | 685 | if (!info) { |
647 | WARN_ON(1); | 686 | /* |
648 | goto out_lock; | 687 | * oops didn't find an extent that matched the space we wanted |
688 | * to remove, look for a bitmap instead | ||
689 | */ | ||
690 | info = tree_search_offset(block_group, | ||
691 | offset_to_bitmap(block_group, offset), | ||
692 | 1, 0); | ||
693 | if (!info) { | ||
694 | WARN_ON(1); | ||
695 | goto out_lock; | ||
696 | } | ||
649 | } | 697 | } |
650 | 698 | ||
651 | if (info->bytes < bytes && rb_next(&info->offset_index)) { | 699 | if (info->bytes < bytes && rb_next(&info->offset_index)) { |
@@ -957,8 +1005,15 @@ static u64 btrfs_alloc_from_bitmap(struct btrfs_block_group_cache *block_group, | |||
957 | if (cluster->block_group != block_group) | 1005 | if (cluster->block_group != block_group) |
958 | goto out; | 1006 | goto out; |
959 | 1007 | ||
960 | entry = tree_search_offset(block_group, search_start, 0, 0); | 1008 | /* |
961 | 1009 | * search_start is the beginning of the bitmap, but at some point it may | |
1010 | * be a good idea to point to the actual start of the free area in the | ||
1011 | * bitmap, so do the offset_to_bitmap trick anyway, and set bitmap_only | ||
1012 | * to 1 to make sure we get the bitmap entry | ||
1013 | */ | ||
1014 | entry = tree_search_offset(block_group, | ||
1015 | offset_to_bitmap(block_group, search_start), | ||
1016 | 1, 0); | ||
962 | if (!entry || !entry->bitmap) | 1017 | if (!entry || !entry->bitmap) |
963 | goto out; | 1018 | goto out; |
964 | 1019 | ||
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 56fe83fa60c4..272b9b2bea86 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c | |||
@@ -4785,8 +4785,7 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry, | |||
4785 | * and the replacement file is large. Start IO on it now so | 4785 | * and the replacement file is large. Start IO on it now so |
4786 | * we don't add too much work to the end of the transaction | 4786 | * we don't add too much work to the end of the transaction |
4787 | */ | 4787 | */ |
4788 | if (new_inode && old_inode && S_ISREG(old_inode->i_mode) && | 4788 | if (new_inode && S_ISREG(old_inode->i_mode) && new_inode->i_size && |
4789 | new_inode->i_size && | ||
4790 | old_inode->i_size > BTRFS_ORDERED_OPERATIONS_FLUSH_LIMIT) | 4789 | old_inode->i_size > BTRFS_ORDERED_OPERATIONS_FLUSH_LIMIT) |
4791 | filemap_flush(old_inode->i_mapping); | 4790 | filemap_flush(old_inode->i_mapping); |
4792 | 4791 | ||
diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c index e71264d1c2c9..c04f7f212602 100644 --- a/fs/btrfs/relocation.c +++ b/fs/btrfs/relocation.c | |||
@@ -2553,8 +2553,13 @@ int relocate_inode_pages(struct inode *inode, u64 start, u64 len) | |||
2553 | last_index = (start + len - 1) >> PAGE_CACHE_SHIFT; | 2553 | last_index = (start + len - 1) >> PAGE_CACHE_SHIFT; |
2554 | 2554 | ||
2555 | /* make sure the dirty trick played by the caller work */ | 2555 | /* make sure the dirty trick played by the caller work */ |
2556 | ret = invalidate_inode_pages2_range(inode->i_mapping, | 2556 | while (1) { |
2557 | first_index, last_index); | 2557 | ret = invalidate_inode_pages2_range(inode->i_mapping, |
2558 | first_index, last_index); | ||
2559 | if (ret != -EBUSY) | ||
2560 | break; | ||
2561 | schedule_timeout(HZ/10); | ||
2562 | } | ||
2558 | if (ret) | 2563 | if (ret) |
2559 | goto out_unlock; | 2564 | goto out_unlock; |
2560 | 2565 | ||
diff --git a/fs/btrfs/zlib.c b/fs/btrfs/zlib.c index ecfbce836d32..3e2b90eaa239 100644 --- a/fs/btrfs/zlib.c +++ b/fs/btrfs/zlib.c | |||
@@ -208,7 +208,7 @@ int btrfs_zlib_compress_pages(struct address_space *mapping, | |||
208 | *total_in = 0; | 208 | *total_in = 0; |
209 | 209 | ||
210 | workspace = find_zlib_workspace(); | 210 | workspace = find_zlib_workspace(); |
211 | if (!workspace) | 211 | if (IS_ERR(workspace)) |
212 | return -1; | 212 | return -1; |
213 | 213 | ||
214 | if (Z_OK != zlib_deflateInit(&workspace->def_strm, 3)) { | 214 | if (Z_OK != zlib_deflateInit(&workspace->def_strm, 3)) { |
@@ -366,7 +366,7 @@ int btrfs_zlib_decompress_biovec(struct page **pages_in, | |||
366 | char *kaddr; | 366 | char *kaddr; |
367 | 367 | ||
368 | workspace = find_zlib_workspace(); | 368 | workspace = find_zlib_workspace(); |
369 | if (!workspace) | 369 | if (IS_ERR(workspace)) |
370 | return -ENOMEM; | 370 | return -ENOMEM; |
371 | 371 | ||
372 | data_in = kmap(pages_in[page_in_index]); | 372 | data_in = kmap(pages_in[page_in_index]); |
@@ -547,7 +547,7 @@ int btrfs_zlib_decompress(unsigned char *data_in, | |||
547 | return -ENOMEM; | 547 | return -ENOMEM; |
548 | 548 | ||
549 | workspace = find_zlib_workspace(); | 549 | workspace = find_zlib_workspace(); |
550 | if (!workspace) | 550 | if (IS_ERR(workspace)) |
551 | return -ENOMEM; | 551 | return -ENOMEM; |
552 | 552 | ||
553 | workspace->inf_strm.next_in = data_in; | 553 | workspace->inf_strm.next_in = data_in; |
diff --git a/fs/cifs/CHANGES b/fs/cifs/CHANGES index 92888aa90749..e85b1e4389e0 100644 --- a/fs/cifs/CHANGES +++ b/fs/cifs/CHANGES | |||
@@ -1,3 +1,10 @@ | |||
1 | Version 1.60 | ||
2 | ------------- | ||
3 | Fix memory leak in reconnect. Fix oops in DFS mount error path. | ||
4 | Set s_maxbytes to smaller (the max that vfs can handle) so that | ||
5 | sendfile will now work over cifs mounts again. Add noforcegid | ||
6 | and noforceuid mount parameters. | ||
7 | |||
1 | Version 1.59 | 8 | Version 1.59 |
2 | ------------ | 9 | ------------ |
3 | Client uses server inode numbers (which are persistent) rather than | 10 | Client uses server inode numbers (which are persistent) rather than |
diff --git a/fs/cifs/README b/fs/cifs/README index ad92921dbde4..79c1a93400be 100644 --- a/fs/cifs/README +++ b/fs/cifs/README | |||
@@ -262,11 +262,11 @@ A partial list of the supported mount options follows: | |||
262 | mount. | 262 | mount. |
263 | domain Set the SMB/CIFS workgroup name prepended to the | 263 | domain Set the SMB/CIFS workgroup name prepended to the |
264 | username during CIFS session establishment | 264 | username during CIFS session establishment |
265 | forceuid Set the default uid for inodes based on the uid | 265 | forceuid Set the default uid for inodes to the uid |
266 | passed in. For mounts to servers | 266 | passed in on mount. For mounts to servers |
267 | which do support the CIFS Unix extensions, such as a | 267 | which do support the CIFS Unix extensions, such as a |
268 | properly configured Samba server, the server provides | 268 | properly configured Samba server, the server provides |
269 | the uid, gid and mode so this parameter should not be | 269 | the uid, gid and mode so this parameter should not be |
270 | specified unless the server and clients uid and gid | 270 | specified unless the server and clients uid and gid |
271 | numbering differ. If the server and client are in the | 271 | numbering differ. If the server and client are in the |
272 | same domain (e.g. running winbind or nss_ldap) and | 272 | same domain (e.g. running winbind or nss_ldap) and |
@@ -278,11 +278,7 @@ A partial list of the supported mount options follows: | |||
278 | of existing files will be the uid (gid) of the person | 278 | of existing files will be the uid (gid) of the person |
279 | who executed the mount (root, except when mount.cifs | 279 | who executed the mount (root, except when mount.cifs |
280 | is configured setuid for user mounts) unless the "uid=" | 280 | is configured setuid for user mounts) unless the "uid=" |
281 | (gid) mount option is specified. For the uid (gid) of newly | 281 | (gid) mount option is specified. Also note that permission |
282 | created files and directories, ie files created since | ||
283 | the last mount of the server share, the expected uid | ||
284 | (gid) is cached as long as the inode remains in | ||
285 | memory on the client. Also note that permission | ||
286 | checks (authorization checks) on accesses to a file occur | 282 | checks (authorization checks) on accesses to a file occur |
287 | at the server, but there are cases in which an administrator | 283 | at the server, but there are cases in which an administrator |
288 | may want to restrict at the client as well. For those | 284 | may want to restrict at the client as well. For those |
@@ -290,12 +286,15 @@ A partial list of the supported mount options follows: | |||
290 | (such as Windows), permissions can also be checked at the | 286 | (such as Windows), permissions can also be checked at the |
291 | client, and a crude form of client side permission checking | 287 | client, and a crude form of client side permission checking |
292 | can be enabled by specifying file_mode and dir_mode on | 288 | can be enabled by specifying file_mode and dir_mode on |
293 | the client. Note that the mount.cifs helper must be | 289 | the client. (default) |
294 | at version 1.10 or higher to support specifying the uid | 290 | forcegid (similar to above but for the groupid instead of uid) (default) |
295 | (or gid) in non-numeric form. | 291 | noforceuid Fill in file owner information (uid) by requesting it from |
296 | forcegid (similar to above but for the groupid instead of uid) | 292 | the server if possible. With this option, the value given in |
293 | the uid= option (on mount) will only be used if the server | ||
294 | can not support returning uids on inodes. | ||
295 | noforcegid (similar to above but for the group owner, gid, instead of uid) | ||
297 | uid Set the default uid for inodes, and indicate to the | 296 | uid Set the default uid for inodes, and indicate to the |
298 | cifs kernel driver which local user mounted . If the server | 297 | cifs kernel driver which local user mounted. If the server |
299 | supports the unix extensions the default uid is | 298 | supports the unix extensions the default uid is |
300 | not used to fill in the owner fields of inodes (files) | 299 | not used to fill in the owner fields of inodes (files) |
301 | unless the "forceuid" parameter is specified. | 300 | unless the "forceuid" parameter is specified. |
diff --git a/fs/cifs/cifs_dfs_ref.c b/fs/cifs/cifs_dfs_ref.c index 3bb11be8b6a8..606912d8f2a8 100644 --- a/fs/cifs/cifs_dfs_ref.c +++ b/fs/cifs/cifs_dfs_ref.c | |||
@@ -55,7 +55,7 @@ void cifs_dfs_release_automount_timer(void) | |||
55 | * i.e. strips from UNC trailing path that is not part of share | 55 | * i.e. strips from UNC trailing path that is not part of share |
56 | * name and fixup missing '\' in the begining of DFS node refferal | 56 | * name and fixup missing '\' in the begining of DFS node refferal |
57 | * if neccessary. | 57 | * if neccessary. |
58 | * Returns pointer to share name on success or NULL on error. | 58 | * Returns pointer to share name on success or ERR_PTR on error. |
59 | * Caller is responsible for freeing returned string. | 59 | * Caller is responsible for freeing returned string. |
60 | */ | 60 | */ |
61 | static char *cifs_get_share_name(const char *node_name) | 61 | static char *cifs_get_share_name(const char *node_name) |
@@ -68,7 +68,7 @@ static char *cifs_get_share_name(const char *node_name) | |||
68 | UNC = kmalloc(len+2 /*for term null and additional \ if it's missed */, | 68 | UNC = kmalloc(len+2 /*for term null and additional \ if it's missed */, |
69 | GFP_KERNEL); | 69 | GFP_KERNEL); |
70 | if (!UNC) | 70 | if (!UNC) |
71 | return NULL; | 71 | return ERR_PTR(-ENOMEM); |
72 | 72 | ||
73 | /* get share name and server name */ | 73 | /* get share name and server name */ |
74 | if (node_name[1] != '\\') { | 74 | if (node_name[1] != '\\') { |
@@ -87,7 +87,7 @@ static char *cifs_get_share_name(const char *node_name) | |||
87 | cERROR(1, ("%s: no server name end in node name: %s", | 87 | cERROR(1, ("%s: no server name end in node name: %s", |
88 | __func__, node_name)); | 88 | __func__, node_name)); |
89 | kfree(UNC); | 89 | kfree(UNC); |
90 | return NULL; | 90 | return ERR_PTR(-EINVAL); |
91 | } | 91 | } |
92 | 92 | ||
93 | /* find sharename end */ | 93 | /* find sharename end */ |
@@ -133,6 +133,12 @@ char *cifs_compose_mount_options(const char *sb_mountdata, | |||
133 | return ERR_PTR(-EINVAL); | 133 | return ERR_PTR(-EINVAL); |
134 | 134 | ||
135 | *devname = cifs_get_share_name(ref->node_name); | 135 | *devname = cifs_get_share_name(ref->node_name); |
136 | if (IS_ERR(*devname)) { | ||
137 | rc = PTR_ERR(*devname); | ||
138 | *devname = NULL; | ||
139 | goto compose_mount_options_err; | ||
140 | } | ||
141 | |||
136 | rc = dns_resolve_server_name_to_ip(*devname, &srvIP); | 142 | rc = dns_resolve_server_name_to_ip(*devname, &srvIP); |
137 | if (rc != 0) { | 143 | if (rc != 0) { |
138 | cERROR(1, ("%s: Failed to resolve server part of %s to IP: %d", | 144 | cERROR(1, ("%s: Failed to resolve server part of %s to IP: %d", |
diff --git a/fs/cifs/cifs_unicode.c b/fs/cifs/cifs_unicode.c index 60e3c4253de0..714a542cbafc 100644 --- a/fs/cifs/cifs_unicode.c +++ b/fs/cifs/cifs_unicode.c | |||
@@ -44,7 +44,7 @@ cifs_ucs2_bytes(const __le16 *from, int maxbytes, | |||
44 | int maxwords = maxbytes / 2; | 44 | int maxwords = maxbytes / 2; |
45 | char tmp[NLS_MAX_CHARSET_SIZE]; | 45 | char tmp[NLS_MAX_CHARSET_SIZE]; |
46 | 46 | ||
47 | for (i = 0; from[i] && i < maxwords; i++) { | 47 | for (i = 0; i < maxwords && from[i]; i++) { |
48 | charlen = codepage->uni2char(le16_to_cpu(from[i]), tmp, | 48 | charlen = codepage->uni2char(le16_to_cpu(from[i]), tmp, |
49 | NLS_MAX_CHARSET_SIZE); | 49 | NLS_MAX_CHARSET_SIZE); |
50 | if (charlen > 0) | 50 | if (charlen > 0) |
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c index 44f30504b82d..84b75253b05a 100644 --- a/fs/cifs/cifsfs.c +++ b/fs/cifs/cifsfs.c | |||
@@ -376,10 +376,14 @@ cifs_show_options(struct seq_file *s, struct vfsmount *m) | |||
376 | seq_printf(s, ",uid=%d", cifs_sb->mnt_uid); | 376 | seq_printf(s, ",uid=%d", cifs_sb->mnt_uid); |
377 | if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_UID) | 377 | if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_UID) |
378 | seq_printf(s, ",forceuid"); | 378 | seq_printf(s, ",forceuid"); |
379 | else | ||
380 | seq_printf(s, ",noforceuid"); | ||
379 | 381 | ||
380 | seq_printf(s, ",gid=%d", cifs_sb->mnt_gid); | 382 | seq_printf(s, ",gid=%d", cifs_sb->mnt_gid); |
381 | if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_GID) | 383 | if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_GID) |
382 | seq_printf(s, ",forcegid"); | 384 | seq_printf(s, ",forcegid"); |
385 | else | ||
386 | seq_printf(s, ",noforcegid"); | ||
383 | 387 | ||
384 | cifs_show_address(s, tcon->ses->server); | 388 | cifs_show_address(s, tcon->ses->server); |
385 | 389 | ||
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c index fc44d316d0bb..1f3345d7fa79 100644 --- a/fs/cifs/connect.c +++ b/fs/cifs/connect.c | |||
@@ -803,6 +803,10 @@ cifs_parse_mount_options(char *options, const char *devname, | |||
803 | char *data; | 803 | char *data; |
804 | unsigned int temp_len, i, j; | 804 | unsigned int temp_len, i, j; |
805 | char separator[2]; | 805 | char separator[2]; |
806 | short int override_uid = -1; | ||
807 | short int override_gid = -1; | ||
808 | bool uid_specified = false; | ||
809 | bool gid_specified = false; | ||
806 | 810 | ||
807 | separator[0] = ','; | 811 | separator[0] = ','; |
808 | separator[1] = 0; | 812 | separator[1] = 0; |
@@ -1093,18 +1097,20 @@ cifs_parse_mount_options(char *options, const char *devname, | |||
1093 | "too long.\n"); | 1097 | "too long.\n"); |
1094 | return 1; | 1098 | return 1; |
1095 | } | 1099 | } |
1096 | } else if (strnicmp(data, "uid", 3) == 0) { | 1100 | } else if (!strnicmp(data, "uid", 3) && value && *value) { |
1097 | if (value && *value) | 1101 | vol->linux_uid = simple_strtoul(value, &value, 0); |
1098 | vol->linux_uid = | 1102 | uid_specified = true; |
1099 | simple_strtoul(value, &value, 0); | 1103 | } else if (!strnicmp(data, "forceuid", 8)) { |
1100 | } else if (strnicmp(data, "forceuid", 8) == 0) { | 1104 | override_uid = 1; |
1101 | vol->override_uid = 1; | 1105 | } else if (!strnicmp(data, "noforceuid", 10)) { |
1102 | } else if (strnicmp(data, "gid", 3) == 0) { | 1106 | override_uid = 0; |
1103 | if (value && *value) | 1107 | } else if (!strnicmp(data, "gid", 3) && value && *value) { |
1104 | vol->linux_gid = | 1108 | vol->linux_gid = simple_strtoul(value, &value, 0); |
1105 | simple_strtoul(value, &value, 0); | 1109 | gid_specified = true; |
1106 | } else if (strnicmp(data, "forcegid", 8) == 0) { | 1110 | } else if (!strnicmp(data, "forcegid", 8)) { |
1107 | vol->override_gid = 1; | 1111 | override_gid = 1; |
1112 | } else if (!strnicmp(data, "noforcegid", 10)) { | ||
1113 | override_gid = 0; | ||
1108 | } else if (strnicmp(data, "file_mode", 4) == 0) { | 1114 | } else if (strnicmp(data, "file_mode", 4) == 0) { |
1109 | if (value && *value) { | 1115 | if (value && *value) { |
1110 | vol->file_mode = | 1116 | vol->file_mode = |
@@ -1355,6 +1361,18 @@ cifs_parse_mount_options(char *options, const char *devname, | |||
1355 | if (vol->UNCip == NULL) | 1361 | if (vol->UNCip == NULL) |
1356 | vol->UNCip = &vol->UNC[2]; | 1362 | vol->UNCip = &vol->UNC[2]; |
1357 | 1363 | ||
1364 | if (uid_specified) | ||
1365 | vol->override_uid = override_uid; | ||
1366 | else if (override_uid == 1) | ||
1367 | printk(KERN_NOTICE "CIFS: ignoring forceuid mount option " | ||
1368 | "specified with no uid= option.\n"); | ||
1369 | |||
1370 | if (gid_specified) | ||
1371 | vol->override_gid = override_gid; | ||
1372 | else if (override_gid == 1) | ||
1373 | printk(KERN_NOTICE "CIFS: ignoring forcegid mount option " | ||
1374 | "specified with no gid= option.\n"); | ||
1375 | |||
1358 | return 0; | 1376 | return 0; |
1359 | } | 1377 | } |
1360 | 1378 | ||
@@ -2544,11 +2562,20 @@ remote_path_check: | |||
2544 | 2562 | ||
2545 | if (mount_data != mount_data_global) | 2563 | if (mount_data != mount_data_global) |
2546 | kfree(mount_data); | 2564 | kfree(mount_data); |
2565 | |||
2547 | mount_data = cifs_compose_mount_options( | 2566 | mount_data = cifs_compose_mount_options( |
2548 | cifs_sb->mountdata, full_path + 1, | 2567 | cifs_sb->mountdata, full_path + 1, |
2549 | referrals, &fake_devname); | 2568 | referrals, &fake_devname); |
2550 | kfree(fake_devname); | 2569 | |
2551 | free_dfs_info_array(referrals, num_referrals); | 2570 | free_dfs_info_array(referrals, num_referrals); |
2571 | kfree(fake_devname); | ||
2572 | kfree(full_path); | ||
2573 | |||
2574 | if (IS_ERR(mount_data)) { | ||
2575 | rc = PTR_ERR(mount_data); | ||
2576 | mount_data = NULL; | ||
2577 | goto mount_fail_check; | ||
2578 | } | ||
2552 | 2579 | ||
2553 | if (tcon) | 2580 | if (tcon) |
2554 | cifs_put_tcon(tcon); | 2581 | cifs_put_tcon(tcon); |
@@ -2556,8 +2583,6 @@ remote_path_check: | |||
2556 | cifs_put_smb_ses(pSesInfo); | 2583 | cifs_put_smb_ses(pSesInfo); |
2557 | 2584 | ||
2558 | cleanup_volume_info(&volume_info); | 2585 | cleanup_volume_info(&volume_info); |
2559 | FreeXid(xid); | ||
2560 | kfree(full_path); | ||
2561 | referral_walks_count++; | 2586 | referral_walks_count++; |
2562 | goto try_mount_again; | 2587 | goto try_mount_again; |
2563 | } | 2588 | } |
diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c index f28f070a60fc..f91fd51b32e3 100644 --- a/fs/compat_ioctl.c +++ b/fs/compat_ioctl.c | |||
@@ -1905,6 +1905,7 @@ COMPATIBLE_IOCTL(FIONCLEX) | |||
1905 | COMPATIBLE_IOCTL(FIOASYNC) | 1905 | COMPATIBLE_IOCTL(FIOASYNC) |
1906 | COMPATIBLE_IOCTL(FIONBIO) | 1906 | COMPATIBLE_IOCTL(FIONBIO) |
1907 | COMPATIBLE_IOCTL(FIONREAD) /* This is also TIOCINQ */ | 1907 | COMPATIBLE_IOCTL(FIONREAD) /* This is also TIOCINQ */ |
1908 | COMPATIBLE_IOCTL(FS_IOC_FIEMAP) | ||
1908 | /* 0x00 */ | 1909 | /* 0x00 */ |
1909 | COMPATIBLE_IOCTL(FIBMAP) | 1910 | COMPATIBLE_IOCTL(FIBMAP) |
1910 | COMPATIBLE_IOCTL(FIGETBSZ) | 1911 | COMPATIBLE_IOCTL(FIGETBSZ) |
diff --git a/fs/inode.c b/fs/inode.c index 901bad1e5f12..ae7b67e48661 100644 --- a/fs/inode.c +++ b/fs/inode.c | |||
@@ -120,12 +120,11 @@ static void wake_up_inode(struct inode *inode) | |||
120 | * These are initializations that need to be done on every inode | 120 | * These are initializations that need to be done on every inode |
121 | * allocation as the fields are not initialised by slab allocation. | 121 | * allocation as the fields are not initialised by slab allocation. |
122 | */ | 122 | */ |
123 | struct inode *inode_init_always(struct super_block *sb, struct inode *inode) | 123 | int inode_init_always(struct super_block *sb, struct inode *inode) |
124 | { | 124 | { |
125 | static const struct address_space_operations empty_aops; | 125 | static const struct address_space_operations empty_aops; |
126 | static struct inode_operations empty_iops; | 126 | static struct inode_operations empty_iops; |
127 | static const struct file_operations empty_fops; | 127 | static const struct file_operations empty_fops; |
128 | |||
129 | struct address_space *const mapping = &inode->i_data; | 128 | struct address_space *const mapping = &inode->i_data; |
130 | 129 | ||
131 | inode->i_sb = sb; | 130 | inode->i_sb = sb; |
@@ -152,7 +151,7 @@ struct inode *inode_init_always(struct super_block *sb, struct inode *inode) | |||
152 | inode->dirtied_when = 0; | 151 | inode->dirtied_when = 0; |
153 | 152 | ||
154 | if (security_inode_alloc(inode)) | 153 | if (security_inode_alloc(inode)) |
155 | goto out_free_inode; | 154 | goto out; |
156 | 155 | ||
157 | /* allocate and initialize an i_integrity */ | 156 | /* allocate and initialize an i_integrity */ |
158 | if (ima_inode_alloc(inode)) | 157 | if (ima_inode_alloc(inode)) |
@@ -198,16 +197,12 @@ struct inode *inode_init_always(struct super_block *sb, struct inode *inode) | |||
198 | inode->i_fsnotify_mask = 0; | 197 | inode->i_fsnotify_mask = 0; |
199 | #endif | 198 | #endif |
200 | 199 | ||
201 | return inode; | 200 | return 0; |
202 | 201 | ||
203 | out_free_security: | 202 | out_free_security: |
204 | security_inode_free(inode); | 203 | security_inode_free(inode); |
205 | out_free_inode: | 204 | out: |
206 | if (inode->i_sb->s_op->destroy_inode) | 205 | return -ENOMEM; |
207 | inode->i_sb->s_op->destroy_inode(inode); | ||
208 | else | ||
209 | kmem_cache_free(inode_cachep, (inode)); | ||
210 | return NULL; | ||
211 | } | 206 | } |
212 | EXPORT_SYMBOL(inode_init_always); | 207 | EXPORT_SYMBOL(inode_init_always); |
213 | 208 | ||
@@ -220,12 +215,21 @@ static struct inode *alloc_inode(struct super_block *sb) | |||
220 | else | 215 | else |
221 | inode = kmem_cache_alloc(inode_cachep, GFP_KERNEL); | 216 | inode = kmem_cache_alloc(inode_cachep, GFP_KERNEL); |
222 | 217 | ||
223 | if (inode) | 218 | if (!inode) |
224 | return inode_init_always(sb, inode); | 219 | return NULL; |
225 | return NULL; | 220 | |
221 | if (unlikely(inode_init_always(sb, inode))) { | ||
222 | if (inode->i_sb->s_op->destroy_inode) | ||
223 | inode->i_sb->s_op->destroy_inode(inode); | ||
224 | else | ||
225 | kmem_cache_free(inode_cachep, inode); | ||
226 | return NULL; | ||
227 | } | ||
228 | |||
229 | return inode; | ||
226 | } | 230 | } |
227 | 231 | ||
228 | void destroy_inode(struct inode *inode) | 232 | void __destroy_inode(struct inode *inode) |
229 | { | 233 | { |
230 | BUG_ON(inode_has_buffers(inode)); | 234 | BUG_ON(inode_has_buffers(inode)); |
231 | ima_inode_free(inode); | 235 | ima_inode_free(inode); |
@@ -237,13 +241,17 @@ void destroy_inode(struct inode *inode) | |||
237 | if (inode->i_default_acl && inode->i_default_acl != ACL_NOT_CACHED) | 241 | if (inode->i_default_acl && inode->i_default_acl != ACL_NOT_CACHED) |
238 | posix_acl_release(inode->i_default_acl); | 242 | posix_acl_release(inode->i_default_acl); |
239 | #endif | 243 | #endif |
244 | } | ||
245 | EXPORT_SYMBOL(__destroy_inode); | ||
246 | |||
247 | void destroy_inode(struct inode *inode) | ||
248 | { | ||
249 | __destroy_inode(inode); | ||
240 | if (inode->i_sb->s_op->destroy_inode) | 250 | if (inode->i_sb->s_op->destroy_inode) |
241 | inode->i_sb->s_op->destroy_inode(inode); | 251 | inode->i_sb->s_op->destroy_inode(inode); |
242 | else | 252 | else |
243 | kmem_cache_free(inode_cachep, (inode)); | 253 | kmem_cache_free(inode_cachep, (inode)); |
244 | } | 254 | } |
245 | EXPORT_SYMBOL(destroy_inode); | ||
246 | |||
247 | 255 | ||
248 | /* | 256 | /* |
249 | * These are initializations that only need to be done | 257 | * These are initializations that only need to be done |
diff --git a/fs/jffs2/file.c b/fs/jffs2/file.c index 5edc2bf20581..23c947539864 100644 --- a/fs/jffs2/file.c +++ b/fs/jffs2/file.c | |||
@@ -99,7 +99,7 @@ static int jffs2_do_readpage_nolock (struct inode *inode, struct page *pg) | |||
99 | kunmap(pg); | 99 | kunmap(pg); |
100 | 100 | ||
101 | D2(printk(KERN_DEBUG "readpage finished\n")); | 101 | D2(printk(KERN_DEBUG "readpage finished\n")); |
102 | return 0; | 102 | return ret; |
103 | } | 103 | } |
104 | 104 | ||
105 | int jffs2_do_readpage_unlock(struct inode *inode, struct page *pg) | 105 | int jffs2_do_readpage_unlock(struct inode *inode, struct page *pg) |
diff --git a/fs/namespace.c b/fs/namespace.c index 277c28a63ead..7230787d18b0 100644 --- a/fs/namespace.c +++ b/fs/namespace.c | |||
@@ -316,7 +316,8 @@ EXPORT_SYMBOL_GPL(mnt_clone_write); | |||
316 | */ | 316 | */ |
317 | int mnt_want_write_file(struct file *file) | 317 | int mnt_want_write_file(struct file *file) |
318 | { | 318 | { |
319 | if (!(file->f_mode & FMODE_WRITE)) | 319 | struct inode *inode = file->f_dentry->d_inode; |
320 | if (!(file->f_mode & FMODE_WRITE) || special_file(inode->i_mode)) | ||
320 | return mnt_want_write(file->f_path.mnt); | 321 | return mnt_want_write(file->f_path.mnt); |
321 | else | 322 | else |
322 | return mnt_clone_write(file->f_path.mnt); | 323 | return mnt_clone_write(file->f_path.mnt); |
diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c index 489fc01a3204..e4e089a8f294 100644 --- a/fs/nfs/direct.c +++ b/fs/nfs/direct.c | |||
@@ -255,7 +255,7 @@ static void nfs_direct_read_release(void *calldata) | |||
255 | 255 | ||
256 | if (put_dreq(dreq)) | 256 | if (put_dreq(dreq)) |
257 | nfs_direct_complete(dreq); | 257 | nfs_direct_complete(dreq); |
258 | nfs_readdata_release(calldata); | 258 | nfs_readdata_free(data); |
259 | } | 259 | } |
260 | 260 | ||
261 | static const struct rpc_call_ops nfs_read_direct_ops = { | 261 | static const struct rpc_call_ops nfs_read_direct_ops = { |
@@ -314,14 +314,14 @@ static ssize_t nfs_direct_read_schedule_segment(struct nfs_direct_req *dreq, | |||
314 | data->npages, 1, 0, data->pagevec, NULL); | 314 | data->npages, 1, 0, data->pagevec, NULL); |
315 | up_read(¤t->mm->mmap_sem); | 315 | up_read(¤t->mm->mmap_sem); |
316 | if (result < 0) { | 316 | if (result < 0) { |
317 | nfs_readdata_release(data); | 317 | nfs_readdata_free(data); |
318 | break; | 318 | break; |
319 | } | 319 | } |
320 | if ((unsigned)result < data->npages) { | 320 | if ((unsigned)result < data->npages) { |
321 | bytes = result * PAGE_SIZE; | 321 | bytes = result * PAGE_SIZE; |
322 | if (bytes <= pgbase) { | 322 | if (bytes <= pgbase) { |
323 | nfs_direct_release_pages(data->pagevec, result); | 323 | nfs_direct_release_pages(data->pagevec, result); |
324 | nfs_readdata_release(data); | 324 | nfs_readdata_free(data); |
325 | break; | 325 | break; |
326 | } | 326 | } |
327 | bytes -= pgbase; | 327 | bytes -= pgbase; |
@@ -334,7 +334,7 @@ static ssize_t nfs_direct_read_schedule_segment(struct nfs_direct_req *dreq, | |||
334 | data->inode = inode; | 334 | data->inode = inode; |
335 | data->cred = msg.rpc_cred; | 335 | data->cred = msg.rpc_cred; |
336 | data->args.fh = NFS_FH(inode); | 336 | data->args.fh = NFS_FH(inode); |
337 | data->args.context = get_nfs_open_context(ctx); | 337 | data->args.context = ctx; |
338 | data->args.offset = pos; | 338 | data->args.offset = pos; |
339 | data->args.pgbase = pgbase; | 339 | data->args.pgbase = pgbase; |
340 | data->args.pages = data->pagevec; | 340 | data->args.pages = data->pagevec; |
@@ -441,7 +441,7 @@ static void nfs_direct_free_writedata(struct nfs_direct_req *dreq) | |||
441 | struct nfs_write_data *data = list_entry(dreq->rewrite_list.next, struct nfs_write_data, pages); | 441 | struct nfs_write_data *data = list_entry(dreq->rewrite_list.next, struct nfs_write_data, pages); |
442 | list_del(&data->pages); | 442 | list_del(&data->pages); |
443 | nfs_direct_release_pages(data->pagevec, data->npages); | 443 | nfs_direct_release_pages(data->pagevec, data->npages); |
444 | nfs_writedata_release(data); | 444 | nfs_writedata_free(data); |
445 | } | 445 | } |
446 | } | 446 | } |
447 | 447 | ||
@@ -534,7 +534,7 @@ static void nfs_direct_commit_release(void *calldata) | |||
534 | 534 | ||
535 | dprintk("NFS: %5u commit returned %d\n", data->task.tk_pid, status); | 535 | dprintk("NFS: %5u commit returned %d\n", data->task.tk_pid, status); |
536 | nfs_direct_write_complete(dreq, data->inode); | 536 | nfs_direct_write_complete(dreq, data->inode); |
537 | nfs_commitdata_release(calldata); | 537 | nfs_commit_free(data); |
538 | } | 538 | } |
539 | 539 | ||
540 | static const struct rpc_call_ops nfs_commit_direct_ops = { | 540 | static const struct rpc_call_ops nfs_commit_direct_ops = { |
@@ -570,7 +570,7 @@ static void nfs_direct_commit_schedule(struct nfs_direct_req *dreq) | |||
570 | data->args.fh = NFS_FH(data->inode); | 570 | data->args.fh = NFS_FH(data->inode); |
571 | data->args.offset = 0; | 571 | data->args.offset = 0; |
572 | data->args.count = 0; | 572 | data->args.count = 0; |
573 | data->args.context = get_nfs_open_context(dreq->ctx); | 573 | data->args.context = dreq->ctx; |
574 | data->res.count = 0; | 574 | data->res.count = 0; |
575 | data->res.fattr = &data->fattr; | 575 | data->res.fattr = &data->fattr; |
576 | data->res.verf = &data->verf; | 576 | data->res.verf = &data->verf; |
@@ -734,14 +734,14 @@ static ssize_t nfs_direct_write_schedule_segment(struct nfs_direct_req *dreq, | |||
734 | data->npages, 0, 0, data->pagevec, NULL); | 734 | data->npages, 0, 0, data->pagevec, NULL); |
735 | up_read(¤t->mm->mmap_sem); | 735 | up_read(¤t->mm->mmap_sem); |
736 | if (result < 0) { | 736 | if (result < 0) { |
737 | nfs_writedata_release(data); | 737 | nfs_writedata_free(data); |
738 | break; | 738 | break; |
739 | } | 739 | } |
740 | if ((unsigned)result < data->npages) { | 740 | if ((unsigned)result < data->npages) { |
741 | bytes = result * PAGE_SIZE; | 741 | bytes = result * PAGE_SIZE; |
742 | if (bytes <= pgbase) { | 742 | if (bytes <= pgbase) { |
743 | nfs_direct_release_pages(data->pagevec, result); | 743 | nfs_direct_release_pages(data->pagevec, result); |
744 | nfs_writedata_release(data); | 744 | nfs_writedata_free(data); |
745 | break; | 745 | break; |
746 | } | 746 | } |
747 | bytes -= pgbase; | 747 | bytes -= pgbase; |
@@ -756,7 +756,7 @@ static ssize_t nfs_direct_write_schedule_segment(struct nfs_direct_req *dreq, | |||
756 | data->inode = inode; | 756 | data->inode = inode; |
757 | data->cred = msg.rpc_cred; | 757 | data->cred = msg.rpc_cred; |
758 | data->args.fh = NFS_FH(inode); | 758 | data->args.fh = NFS_FH(inode); |
759 | data->args.context = get_nfs_open_context(ctx); | 759 | data->args.context = ctx; |
760 | data->args.offset = pos; | 760 | data->args.offset = pos; |
761 | data->args.pgbase = pgbase; | 761 | data->args.pgbase = pgbase; |
762 | data->args.pages = data->pagevec; | 762 | data->args.pages = data->pagevec; |
diff --git a/fs/nfs/read.c b/fs/nfs/read.c index 73ea5e8d66ce..12c9e66d3f1d 100644 --- a/fs/nfs/read.c +++ b/fs/nfs/read.c | |||
@@ -60,17 +60,15 @@ struct nfs_read_data *nfs_readdata_alloc(unsigned int pagecount) | |||
60 | return p; | 60 | return p; |
61 | } | 61 | } |
62 | 62 | ||
63 | static void nfs_readdata_free(struct nfs_read_data *p) | 63 | void nfs_readdata_free(struct nfs_read_data *p) |
64 | { | 64 | { |
65 | if (p && (p->pagevec != &p->page_array[0])) | 65 | if (p && (p->pagevec != &p->page_array[0])) |
66 | kfree(p->pagevec); | 66 | kfree(p->pagevec); |
67 | mempool_free(p, nfs_rdata_mempool); | 67 | mempool_free(p, nfs_rdata_mempool); |
68 | } | 68 | } |
69 | 69 | ||
70 | void nfs_readdata_release(void *data) | 70 | static void nfs_readdata_release(struct nfs_read_data *rdata) |
71 | { | 71 | { |
72 | struct nfs_read_data *rdata = data; | ||
73 | |||
74 | put_nfs_open_context(rdata->args.context); | 72 | put_nfs_open_context(rdata->args.context); |
75 | nfs_readdata_free(rdata); | 73 | nfs_readdata_free(rdata); |
76 | } | 74 | } |
diff --git a/fs/nfs/write.c b/fs/nfs/write.c index 0a0a2ff767c3..a34fae21fe10 100644 --- a/fs/nfs/write.c +++ b/fs/nfs/write.c | |||
@@ -87,17 +87,15 @@ struct nfs_write_data *nfs_writedata_alloc(unsigned int pagecount) | |||
87 | return p; | 87 | return p; |
88 | } | 88 | } |
89 | 89 | ||
90 | static void nfs_writedata_free(struct nfs_write_data *p) | 90 | void nfs_writedata_free(struct nfs_write_data *p) |
91 | { | 91 | { |
92 | if (p && (p->pagevec != &p->page_array[0])) | 92 | if (p && (p->pagevec != &p->page_array[0])) |
93 | kfree(p->pagevec); | 93 | kfree(p->pagevec); |
94 | mempool_free(p, nfs_wdata_mempool); | 94 | mempool_free(p, nfs_wdata_mempool); |
95 | } | 95 | } |
96 | 96 | ||
97 | void nfs_writedata_release(void *data) | 97 | static void nfs_writedata_release(struct nfs_write_data *wdata) |
98 | { | 98 | { |
99 | struct nfs_write_data *wdata = data; | ||
100 | |||
101 | put_nfs_open_context(wdata->args.context); | 99 | put_nfs_open_context(wdata->args.context); |
102 | nfs_writedata_free(wdata); | 100 | nfs_writedata_free(wdata); |
103 | } | 101 | } |
diff --git a/fs/nilfs2/mdt.c b/fs/nilfs2/mdt.c index 3d3ddb3f5177..2dfd47714ae5 100644 --- a/fs/nilfs2/mdt.c +++ b/fs/nilfs2/mdt.c | |||
@@ -412,8 +412,10 @@ nilfs_mdt_write_page(struct page *page, struct writeback_control *wbc) | |||
412 | return 0; /* Do not request flush for shadow page cache */ | 412 | return 0; /* Do not request flush for shadow page cache */ |
413 | if (!sb) { | 413 | if (!sb) { |
414 | writer = nilfs_get_writer(NILFS_MDT(inode)->mi_nilfs); | 414 | writer = nilfs_get_writer(NILFS_MDT(inode)->mi_nilfs); |
415 | if (!writer) | 415 | if (!writer) { |
416 | nilfs_put_writer(NILFS_MDT(inode)->mi_nilfs); | ||
416 | return -EROFS; | 417 | return -EROFS; |
418 | } | ||
417 | sb = writer->s_super; | 419 | sb = writer->s_super; |
418 | } | 420 | } |
419 | 421 | ||
diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c index 8b5e4778cf28..51ff3d0a4ee2 100644 --- a/fs/nilfs2/segment.c +++ b/fs/nilfs2/segment.c | |||
@@ -1859,12 +1859,26 @@ static void nilfs_end_page_io(struct page *page, int err) | |||
1859 | if (!page) | 1859 | if (!page) |
1860 | return; | 1860 | return; |
1861 | 1861 | ||
1862 | if (buffer_nilfs_node(page_buffers(page)) && !PageWriteback(page)) | 1862 | if (buffer_nilfs_node(page_buffers(page)) && !PageWriteback(page)) { |
1863 | /* | 1863 | /* |
1864 | * For b-tree node pages, this function may be called twice | 1864 | * For b-tree node pages, this function may be called twice |
1865 | * or more because they might be split in a segment. | 1865 | * or more because they might be split in a segment. |
1866 | */ | 1866 | */ |
1867 | if (PageDirty(page)) { | ||
1868 | /* | ||
1869 | * For pages holding split b-tree node buffers, dirty | ||
1870 | * flag on the buffers may be cleared discretely. | ||
1871 | * In that case, the page is once redirtied for | ||
1872 | * remaining buffers, and it must be cancelled if | ||
1873 | * all the buffers get cleaned later. | ||
1874 | */ | ||
1875 | lock_page(page); | ||
1876 | if (nilfs_page_buffers_clean(page)) | ||
1877 | __nilfs_clear_page_dirty(page); | ||
1878 | unlock_page(page); | ||
1879 | } | ||
1867 | return; | 1880 | return; |
1881 | } | ||
1868 | 1882 | ||
1869 | __nilfs_end_page_io(page, err); | 1883 | __nilfs_end_page_io(page, err); |
1870 | } | 1884 | } |
diff --git a/fs/ocfs2/alloc.c b/fs/ocfs2/alloc.c index 9edcde4974aa..f9a3e8942669 100644 --- a/fs/ocfs2/alloc.c +++ b/fs/ocfs2/alloc.c | |||
@@ -1914,7 +1914,8 @@ static void ocfs2_adjust_adjacent_records(struct ocfs2_extent_rec *left_rec, | |||
1914 | * immediately to their right. | 1914 | * immediately to their right. |
1915 | */ | 1915 | */ |
1916 | left_clusters = le32_to_cpu(right_child_el->l_recs[0].e_cpos); | 1916 | left_clusters = le32_to_cpu(right_child_el->l_recs[0].e_cpos); |
1917 | if (ocfs2_is_empty_extent(&right_child_el->l_recs[0])) { | 1917 | if (!ocfs2_rec_clusters(right_child_el, &right_child_el->l_recs[0])) { |
1918 | BUG_ON(right_child_el->l_tree_depth); | ||
1918 | BUG_ON(le16_to_cpu(right_child_el->l_next_free_rec) <= 1); | 1919 | BUG_ON(le16_to_cpu(right_child_el->l_next_free_rec) <= 1); |
1919 | left_clusters = le32_to_cpu(right_child_el->l_recs[1].e_cpos); | 1920 | left_clusters = le32_to_cpu(right_child_el->l_recs[1].e_cpos); |
1920 | } | 1921 | } |
@@ -2476,15 +2477,37 @@ out_ret_path: | |||
2476 | return ret; | 2477 | return ret; |
2477 | } | 2478 | } |
2478 | 2479 | ||
2479 | static void ocfs2_update_edge_lengths(struct inode *inode, handle_t *handle, | 2480 | static int ocfs2_update_edge_lengths(struct inode *inode, handle_t *handle, |
2480 | struct ocfs2_path *path) | 2481 | int subtree_index, struct ocfs2_path *path) |
2481 | { | 2482 | { |
2482 | int i, idx; | 2483 | int i, idx, ret; |
2483 | struct ocfs2_extent_rec *rec; | 2484 | struct ocfs2_extent_rec *rec; |
2484 | struct ocfs2_extent_list *el; | 2485 | struct ocfs2_extent_list *el; |
2485 | struct ocfs2_extent_block *eb; | 2486 | struct ocfs2_extent_block *eb; |
2486 | u32 range; | 2487 | u32 range; |
2487 | 2488 | ||
2489 | /* | ||
2490 | * In normal tree rotation process, we will never touch the | ||
2491 | * tree branch above subtree_index and ocfs2_extend_rotate_transaction | ||
2492 | * doesn't reserve the credits for them either. | ||
2493 | * | ||
2494 | * But we do have a special case here which will update the rightmost | ||
2495 | * records for all the bh in the path. | ||
2496 | * So we have to allocate extra credits and access them. | ||
2497 | */ | ||
2498 | ret = ocfs2_extend_trans(handle, | ||
2499 | handle->h_buffer_credits + subtree_index); | ||
2500 | if (ret) { | ||
2501 | mlog_errno(ret); | ||
2502 | goto out; | ||
2503 | } | ||
2504 | |||
2505 | ret = ocfs2_journal_access_path(inode, handle, path); | ||
2506 | if (ret) { | ||
2507 | mlog_errno(ret); | ||
2508 | goto out; | ||
2509 | } | ||
2510 | |||
2488 | /* Path should always be rightmost. */ | 2511 | /* Path should always be rightmost. */ |
2489 | eb = (struct ocfs2_extent_block *)path_leaf_bh(path)->b_data; | 2512 | eb = (struct ocfs2_extent_block *)path_leaf_bh(path)->b_data; |
2490 | BUG_ON(eb->h_next_leaf_blk != 0ULL); | 2513 | BUG_ON(eb->h_next_leaf_blk != 0ULL); |
@@ -2505,6 +2528,8 @@ static void ocfs2_update_edge_lengths(struct inode *inode, handle_t *handle, | |||
2505 | 2528 | ||
2506 | ocfs2_journal_dirty(handle, path->p_node[i].bh); | 2529 | ocfs2_journal_dirty(handle, path->p_node[i].bh); |
2507 | } | 2530 | } |
2531 | out: | ||
2532 | return ret; | ||
2508 | } | 2533 | } |
2509 | 2534 | ||
2510 | static void ocfs2_unlink_path(struct inode *inode, handle_t *handle, | 2535 | static void ocfs2_unlink_path(struct inode *inode, handle_t *handle, |
@@ -2717,7 +2742,12 @@ static int ocfs2_rotate_subtree_left(struct inode *inode, handle_t *handle, | |||
2717 | if (del_right_subtree) { | 2742 | if (del_right_subtree) { |
2718 | ocfs2_unlink_subtree(inode, handle, left_path, right_path, | 2743 | ocfs2_unlink_subtree(inode, handle, left_path, right_path, |
2719 | subtree_index, dealloc); | 2744 | subtree_index, dealloc); |
2720 | ocfs2_update_edge_lengths(inode, handle, left_path); | 2745 | ret = ocfs2_update_edge_lengths(inode, handle, subtree_index, |
2746 | left_path); | ||
2747 | if (ret) { | ||
2748 | mlog_errno(ret); | ||
2749 | goto out; | ||
2750 | } | ||
2721 | 2751 | ||
2722 | eb = (struct ocfs2_extent_block *)path_leaf_bh(left_path)->b_data; | 2752 | eb = (struct ocfs2_extent_block *)path_leaf_bh(left_path)->b_data; |
2723 | ocfs2_et_set_last_eb_blk(et, le64_to_cpu(eb->h_blkno)); | 2753 | ocfs2_et_set_last_eb_blk(et, le64_to_cpu(eb->h_blkno)); |
@@ -3034,7 +3064,12 @@ static int ocfs2_remove_rightmost_path(struct inode *inode, handle_t *handle, | |||
3034 | 3064 | ||
3035 | ocfs2_unlink_subtree(inode, handle, left_path, path, | 3065 | ocfs2_unlink_subtree(inode, handle, left_path, path, |
3036 | subtree_index, dealloc); | 3066 | subtree_index, dealloc); |
3037 | ocfs2_update_edge_lengths(inode, handle, left_path); | 3067 | ret = ocfs2_update_edge_lengths(inode, handle, subtree_index, |
3068 | left_path); | ||
3069 | if (ret) { | ||
3070 | mlog_errno(ret); | ||
3071 | goto out; | ||
3072 | } | ||
3038 | 3073 | ||
3039 | eb = (struct ocfs2_extent_block *)path_leaf_bh(left_path)->b_data; | 3074 | eb = (struct ocfs2_extent_block *)path_leaf_bh(left_path)->b_data; |
3040 | ocfs2_et_set_last_eb_blk(et, le64_to_cpu(eb->h_blkno)); | 3075 | ocfs2_et_set_last_eb_blk(et, le64_to_cpu(eb->h_blkno)); |
diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c index b2c52b3a1484..b401654011a2 100644 --- a/fs/ocfs2/aops.c +++ b/fs/ocfs2/aops.c | |||
@@ -193,6 +193,7 @@ static int ocfs2_get_block(struct inode *inode, sector_t iblock, | |||
193 | (unsigned long long)OCFS2_I(inode)->ip_blkno); | 193 | (unsigned long long)OCFS2_I(inode)->ip_blkno); |
194 | mlog(ML_ERROR, "Size %llu, clusters %u\n", (unsigned long long)i_size_read(inode), OCFS2_I(inode)->ip_clusters); | 194 | mlog(ML_ERROR, "Size %llu, clusters %u\n", (unsigned long long)i_size_read(inode), OCFS2_I(inode)->ip_clusters); |
195 | dump_stack(); | 195 | dump_stack(); |
196 | goto bail; | ||
196 | } | 197 | } |
197 | 198 | ||
198 | past_eof = ocfs2_blocks_for_bytes(inode->i_sb, i_size_read(inode)); | 199 | past_eof = ocfs2_blocks_for_bytes(inode->i_sb, i_size_read(inode)); |
@@ -894,18 +895,17 @@ struct ocfs2_write_cluster_desc { | |||
894 | */ | 895 | */ |
895 | unsigned c_new; | 896 | unsigned c_new; |
896 | unsigned c_unwritten; | 897 | unsigned c_unwritten; |
898 | unsigned c_needs_zero; | ||
897 | }; | 899 | }; |
898 | 900 | ||
899 | static inline int ocfs2_should_zero_cluster(struct ocfs2_write_cluster_desc *d) | ||
900 | { | ||
901 | return d->c_new || d->c_unwritten; | ||
902 | } | ||
903 | |||
904 | struct ocfs2_write_ctxt { | 901 | struct ocfs2_write_ctxt { |
905 | /* Logical cluster position / len of write */ | 902 | /* Logical cluster position / len of write */ |
906 | u32 w_cpos; | 903 | u32 w_cpos; |
907 | u32 w_clen; | 904 | u32 w_clen; |
908 | 905 | ||
906 | /* First cluster allocated in a nonsparse extend */ | ||
907 | u32 w_first_new_cpos; | ||
908 | |||
909 | struct ocfs2_write_cluster_desc w_desc[OCFS2_MAX_CLUSTERS_PER_PAGE]; | 909 | struct ocfs2_write_cluster_desc w_desc[OCFS2_MAX_CLUSTERS_PER_PAGE]; |
910 | 910 | ||
911 | /* | 911 | /* |
@@ -983,6 +983,7 @@ static int ocfs2_alloc_write_ctxt(struct ocfs2_write_ctxt **wcp, | |||
983 | return -ENOMEM; | 983 | return -ENOMEM; |
984 | 984 | ||
985 | wc->w_cpos = pos >> osb->s_clustersize_bits; | 985 | wc->w_cpos = pos >> osb->s_clustersize_bits; |
986 | wc->w_first_new_cpos = UINT_MAX; | ||
986 | cend = (pos + len - 1) >> osb->s_clustersize_bits; | 987 | cend = (pos + len - 1) >> osb->s_clustersize_bits; |
987 | wc->w_clen = cend - wc->w_cpos + 1; | 988 | wc->w_clen = cend - wc->w_cpos + 1; |
988 | get_bh(di_bh); | 989 | get_bh(di_bh); |
@@ -1217,20 +1218,18 @@ out: | |||
1217 | */ | 1218 | */ |
1218 | static int ocfs2_write_cluster(struct address_space *mapping, | 1219 | static int ocfs2_write_cluster(struct address_space *mapping, |
1219 | u32 phys, unsigned int unwritten, | 1220 | u32 phys, unsigned int unwritten, |
1221 | unsigned int should_zero, | ||
1220 | struct ocfs2_alloc_context *data_ac, | 1222 | struct ocfs2_alloc_context *data_ac, |
1221 | struct ocfs2_alloc_context *meta_ac, | 1223 | struct ocfs2_alloc_context *meta_ac, |
1222 | struct ocfs2_write_ctxt *wc, u32 cpos, | 1224 | struct ocfs2_write_ctxt *wc, u32 cpos, |
1223 | loff_t user_pos, unsigned user_len) | 1225 | loff_t user_pos, unsigned user_len) |
1224 | { | 1226 | { |
1225 | int ret, i, new, should_zero = 0; | 1227 | int ret, i, new; |
1226 | u64 v_blkno, p_blkno; | 1228 | u64 v_blkno, p_blkno; |
1227 | struct inode *inode = mapping->host; | 1229 | struct inode *inode = mapping->host; |
1228 | struct ocfs2_extent_tree et; | 1230 | struct ocfs2_extent_tree et; |
1229 | 1231 | ||
1230 | new = phys == 0 ? 1 : 0; | 1232 | new = phys == 0 ? 1 : 0; |
1231 | if (new || unwritten) | ||
1232 | should_zero = 1; | ||
1233 | |||
1234 | if (new) { | 1233 | if (new) { |
1235 | u32 tmp_pos; | 1234 | u32 tmp_pos; |
1236 | 1235 | ||
@@ -1301,7 +1300,7 @@ static int ocfs2_write_cluster(struct address_space *mapping, | |||
1301 | if (tmpret) { | 1300 | if (tmpret) { |
1302 | mlog_errno(tmpret); | 1301 | mlog_errno(tmpret); |
1303 | if (ret == 0) | 1302 | if (ret == 0) |
1304 | tmpret = ret; | 1303 | ret = tmpret; |
1305 | } | 1304 | } |
1306 | } | 1305 | } |
1307 | 1306 | ||
@@ -1341,7 +1340,9 @@ static int ocfs2_write_cluster_by_desc(struct address_space *mapping, | |||
1341 | local_len = osb->s_clustersize - cluster_off; | 1340 | local_len = osb->s_clustersize - cluster_off; |
1342 | 1341 | ||
1343 | ret = ocfs2_write_cluster(mapping, desc->c_phys, | 1342 | ret = ocfs2_write_cluster(mapping, desc->c_phys, |
1344 | desc->c_unwritten, data_ac, meta_ac, | 1343 | desc->c_unwritten, |
1344 | desc->c_needs_zero, | ||
1345 | data_ac, meta_ac, | ||
1345 | wc, desc->c_cpos, pos, local_len); | 1346 | wc, desc->c_cpos, pos, local_len); |
1346 | if (ret) { | 1347 | if (ret) { |
1347 | mlog_errno(ret); | 1348 | mlog_errno(ret); |
@@ -1391,14 +1392,14 @@ static void ocfs2_set_target_boundaries(struct ocfs2_super *osb, | |||
1391 | * newly allocated cluster. | 1392 | * newly allocated cluster. |
1392 | */ | 1393 | */ |
1393 | desc = &wc->w_desc[0]; | 1394 | desc = &wc->w_desc[0]; |
1394 | if (ocfs2_should_zero_cluster(desc)) | 1395 | if (desc->c_needs_zero) |
1395 | ocfs2_figure_cluster_boundaries(osb, | 1396 | ocfs2_figure_cluster_boundaries(osb, |
1396 | desc->c_cpos, | 1397 | desc->c_cpos, |
1397 | &wc->w_target_from, | 1398 | &wc->w_target_from, |
1398 | NULL); | 1399 | NULL); |
1399 | 1400 | ||
1400 | desc = &wc->w_desc[wc->w_clen - 1]; | 1401 | desc = &wc->w_desc[wc->w_clen - 1]; |
1401 | if (ocfs2_should_zero_cluster(desc)) | 1402 | if (desc->c_needs_zero) |
1402 | ocfs2_figure_cluster_boundaries(osb, | 1403 | ocfs2_figure_cluster_boundaries(osb, |
1403 | desc->c_cpos, | 1404 | desc->c_cpos, |
1404 | NULL, | 1405 | NULL, |
@@ -1466,13 +1467,28 @@ static int ocfs2_populate_write_desc(struct inode *inode, | |||
1466 | phys++; | 1467 | phys++; |
1467 | } | 1468 | } |
1468 | 1469 | ||
1470 | /* | ||
1471 | * If w_first_new_cpos is < UINT_MAX, we have a non-sparse | ||
1472 | * file that got extended. w_first_new_cpos tells us | ||
1473 | * where the newly allocated clusters are so we can | ||
1474 | * zero them. | ||
1475 | */ | ||
1476 | if (desc->c_cpos >= wc->w_first_new_cpos) { | ||
1477 | BUG_ON(phys == 0); | ||
1478 | desc->c_needs_zero = 1; | ||
1479 | } | ||
1480 | |||
1469 | desc->c_phys = phys; | 1481 | desc->c_phys = phys; |
1470 | if (phys == 0) { | 1482 | if (phys == 0) { |
1471 | desc->c_new = 1; | 1483 | desc->c_new = 1; |
1484 | desc->c_needs_zero = 1; | ||
1472 | *clusters_to_alloc = *clusters_to_alloc + 1; | 1485 | *clusters_to_alloc = *clusters_to_alloc + 1; |
1473 | } | 1486 | } |
1474 | if (ext_flags & OCFS2_EXT_UNWRITTEN) | 1487 | |
1488 | if (ext_flags & OCFS2_EXT_UNWRITTEN) { | ||
1475 | desc->c_unwritten = 1; | 1489 | desc->c_unwritten = 1; |
1490 | desc->c_needs_zero = 1; | ||
1491 | } | ||
1476 | 1492 | ||
1477 | num_clusters--; | 1493 | num_clusters--; |
1478 | } | 1494 | } |
@@ -1632,10 +1648,13 @@ static int ocfs2_expand_nonsparse_inode(struct inode *inode, loff_t pos, | |||
1632 | if (newsize <= i_size_read(inode)) | 1648 | if (newsize <= i_size_read(inode)) |
1633 | return 0; | 1649 | return 0; |
1634 | 1650 | ||
1635 | ret = ocfs2_extend_no_holes(inode, newsize, newsize - len); | 1651 | ret = ocfs2_extend_no_holes(inode, newsize, pos); |
1636 | if (ret) | 1652 | if (ret) |
1637 | mlog_errno(ret); | 1653 | mlog_errno(ret); |
1638 | 1654 | ||
1655 | wc->w_first_new_cpos = | ||
1656 | ocfs2_clusters_for_bytes(inode->i_sb, i_size_read(inode)); | ||
1657 | |||
1639 | return ret; | 1658 | return ret; |
1640 | } | 1659 | } |
1641 | 1660 | ||
@@ -1644,7 +1663,7 @@ int ocfs2_write_begin_nolock(struct address_space *mapping, | |||
1644 | struct page **pagep, void **fsdata, | 1663 | struct page **pagep, void **fsdata, |
1645 | struct buffer_head *di_bh, struct page *mmap_page) | 1664 | struct buffer_head *di_bh, struct page *mmap_page) |
1646 | { | 1665 | { |
1647 | int ret, credits = OCFS2_INODE_UPDATE_CREDITS; | 1666 | int ret, cluster_of_pages, credits = OCFS2_INODE_UPDATE_CREDITS; |
1648 | unsigned int clusters_to_alloc, extents_to_split; | 1667 | unsigned int clusters_to_alloc, extents_to_split; |
1649 | struct ocfs2_write_ctxt *wc; | 1668 | struct ocfs2_write_ctxt *wc; |
1650 | struct inode *inode = mapping->host; | 1669 | struct inode *inode = mapping->host; |
@@ -1722,8 +1741,19 @@ int ocfs2_write_begin_nolock(struct address_space *mapping, | |||
1722 | 1741 | ||
1723 | } | 1742 | } |
1724 | 1743 | ||
1725 | ocfs2_set_target_boundaries(osb, wc, pos, len, | 1744 | /* |
1726 | clusters_to_alloc + extents_to_split); | 1745 | * We have to zero sparse allocated clusters, unwritten extent clusters, |
1746 | * and non-sparse clusters we just extended. For non-sparse writes, | ||
1747 | * we know zeros will only be needed in the first and/or last cluster. | ||
1748 | */ | ||
1749 | if (clusters_to_alloc || extents_to_split || | ||
1750 | wc->w_desc[0].c_needs_zero || | ||
1751 | wc->w_desc[wc->w_clen - 1].c_needs_zero) | ||
1752 | cluster_of_pages = 1; | ||
1753 | else | ||
1754 | cluster_of_pages = 0; | ||
1755 | |||
1756 | ocfs2_set_target_boundaries(osb, wc, pos, len, cluster_of_pages); | ||
1727 | 1757 | ||
1728 | handle = ocfs2_start_trans(osb, credits); | 1758 | handle = ocfs2_start_trans(osb, credits); |
1729 | if (IS_ERR(handle)) { | 1759 | if (IS_ERR(handle)) { |
@@ -1756,8 +1786,7 @@ int ocfs2_write_begin_nolock(struct address_space *mapping, | |||
1756 | * extent. | 1786 | * extent. |
1757 | */ | 1787 | */ |
1758 | ret = ocfs2_grab_pages_for_write(mapping, wc, wc->w_cpos, pos, | 1788 | ret = ocfs2_grab_pages_for_write(mapping, wc, wc->w_cpos, pos, |
1759 | clusters_to_alloc + extents_to_split, | 1789 | cluster_of_pages, mmap_page); |
1760 | mmap_page); | ||
1761 | if (ret) { | 1790 | if (ret) { |
1762 | mlog_errno(ret); | 1791 | mlog_errno(ret); |
1763 | goto out_quota; | 1792 | goto out_quota; |
diff --git a/fs/ocfs2/dcache.c b/fs/ocfs2/dcache.c index b574431a031d..2f28b7de2c8d 100644 --- a/fs/ocfs2/dcache.c +++ b/fs/ocfs2/dcache.c | |||
@@ -310,22 +310,19 @@ out_attach: | |||
310 | return ret; | 310 | return ret; |
311 | } | 311 | } |
312 | 312 | ||
313 | static DEFINE_SPINLOCK(dentry_list_lock); | 313 | DEFINE_SPINLOCK(dentry_list_lock); |
314 | 314 | ||
315 | /* We limit the number of dentry locks to drop in one go. We have | 315 | /* We limit the number of dentry locks to drop in one go. We have |
316 | * this limit so that we don't starve other users of ocfs2_wq. */ | 316 | * this limit so that we don't starve other users of ocfs2_wq. */ |
317 | #define DL_INODE_DROP_COUNT 64 | 317 | #define DL_INODE_DROP_COUNT 64 |
318 | 318 | ||
319 | /* Drop inode references from dentry locks */ | 319 | /* Drop inode references from dentry locks */ |
320 | void ocfs2_drop_dl_inodes(struct work_struct *work) | 320 | static void __ocfs2_drop_dl_inodes(struct ocfs2_super *osb, int drop_count) |
321 | { | 321 | { |
322 | struct ocfs2_super *osb = container_of(work, struct ocfs2_super, | ||
323 | dentry_lock_work); | ||
324 | struct ocfs2_dentry_lock *dl; | 322 | struct ocfs2_dentry_lock *dl; |
325 | int drop_count = DL_INODE_DROP_COUNT; | ||
326 | 323 | ||
327 | spin_lock(&dentry_list_lock); | 324 | spin_lock(&dentry_list_lock); |
328 | while (osb->dentry_lock_list && drop_count--) { | 325 | while (osb->dentry_lock_list && (drop_count < 0 || drop_count--)) { |
329 | dl = osb->dentry_lock_list; | 326 | dl = osb->dentry_lock_list; |
330 | osb->dentry_lock_list = dl->dl_next; | 327 | osb->dentry_lock_list = dl->dl_next; |
331 | spin_unlock(&dentry_list_lock); | 328 | spin_unlock(&dentry_list_lock); |
@@ -333,11 +330,32 @@ void ocfs2_drop_dl_inodes(struct work_struct *work) | |||
333 | kfree(dl); | 330 | kfree(dl); |
334 | spin_lock(&dentry_list_lock); | 331 | spin_lock(&dentry_list_lock); |
335 | } | 332 | } |
336 | if (osb->dentry_lock_list) | 333 | spin_unlock(&dentry_list_lock); |
334 | } | ||
335 | |||
336 | void ocfs2_drop_dl_inodes(struct work_struct *work) | ||
337 | { | ||
338 | struct ocfs2_super *osb = container_of(work, struct ocfs2_super, | ||
339 | dentry_lock_work); | ||
340 | |||
341 | __ocfs2_drop_dl_inodes(osb, DL_INODE_DROP_COUNT); | ||
342 | /* | ||
343 | * Don't queue dropping if umount is in progress. We flush the | ||
344 | * list in ocfs2_dismount_volume | ||
345 | */ | ||
346 | spin_lock(&dentry_list_lock); | ||
347 | if (osb->dentry_lock_list && | ||
348 | !ocfs2_test_osb_flag(osb, OCFS2_OSB_DROP_DENTRY_LOCK_IMMED)) | ||
337 | queue_work(ocfs2_wq, &osb->dentry_lock_work); | 349 | queue_work(ocfs2_wq, &osb->dentry_lock_work); |
338 | spin_unlock(&dentry_list_lock); | 350 | spin_unlock(&dentry_list_lock); |
339 | } | 351 | } |
340 | 352 | ||
353 | /* Flush the whole work queue */ | ||
354 | void ocfs2_drop_all_dl_inodes(struct ocfs2_super *osb) | ||
355 | { | ||
356 | __ocfs2_drop_dl_inodes(osb, -1); | ||
357 | } | ||
358 | |||
341 | /* | 359 | /* |
342 | * ocfs2_dentry_iput() and friends. | 360 | * ocfs2_dentry_iput() and friends. |
343 | * | 361 | * |
@@ -368,7 +386,8 @@ static void ocfs2_drop_dentry_lock(struct ocfs2_super *osb, | |||
368 | /* We leave dropping of inode reference to ocfs2_wq as that can | 386 | /* We leave dropping of inode reference to ocfs2_wq as that can |
369 | * possibly lead to inode deletion which gets tricky */ | 387 | * possibly lead to inode deletion which gets tricky */ |
370 | spin_lock(&dentry_list_lock); | 388 | spin_lock(&dentry_list_lock); |
371 | if (!osb->dentry_lock_list) | 389 | if (!osb->dentry_lock_list && |
390 | !ocfs2_test_osb_flag(osb, OCFS2_OSB_DROP_DENTRY_LOCK_IMMED)) | ||
372 | queue_work(ocfs2_wq, &osb->dentry_lock_work); | 391 | queue_work(ocfs2_wq, &osb->dentry_lock_work); |
373 | dl->dl_next = osb->dentry_lock_list; | 392 | dl->dl_next = osb->dentry_lock_list; |
374 | osb->dentry_lock_list = dl; | 393 | osb->dentry_lock_list = dl; |
diff --git a/fs/ocfs2/dcache.h b/fs/ocfs2/dcache.h index faa12e75f98d..f5dd1789acf1 100644 --- a/fs/ocfs2/dcache.h +++ b/fs/ocfs2/dcache.h | |||
@@ -49,10 +49,13 @@ struct ocfs2_dentry_lock { | |||
49 | int ocfs2_dentry_attach_lock(struct dentry *dentry, struct inode *inode, | 49 | int ocfs2_dentry_attach_lock(struct dentry *dentry, struct inode *inode, |
50 | u64 parent_blkno); | 50 | u64 parent_blkno); |
51 | 51 | ||
52 | extern spinlock_t dentry_list_lock; | ||
53 | |||
52 | void ocfs2_dentry_lock_put(struct ocfs2_super *osb, | 54 | void ocfs2_dentry_lock_put(struct ocfs2_super *osb, |
53 | struct ocfs2_dentry_lock *dl); | 55 | struct ocfs2_dentry_lock *dl); |
54 | 56 | ||
55 | void ocfs2_drop_dl_inodes(struct work_struct *work); | 57 | void ocfs2_drop_dl_inodes(struct work_struct *work); |
58 | void ocfs2_drop_all_dl_inodes(struct ocfs2_super *osb); | ||
56 | 59 | ||
57 | struct dentry *ocfs2_find_local_alias(struct inode *inode, u64 parent_blkno, | 60 | struct dentry *ocfs2_find_local_alias(struct inode *inode, u64 parent_blkno, |
58 | int skip_unhashed); | 61 | int skip_unhashed); |
diff --git a/fs/ocfs2/dlm/dlmast.c b/fs/ocfs2/dlm/dlmast.c index d07ddbe4b283..81eff8e58322 100644 --- a/fs/ocfs2/dlm/dlmast.c +++ b/fs/ocfs2/dlm/dlmast.c | |||
@@ -103,7 +103,6 @@ static void __dlm_queue_ast(struct dlm_ctxt *dlm, struct dlm_lock *lock) | |||
103 | lock->ast_pending, lock->ml.type); | 103 | lock->ast_pending, lock->ml.type); |
104 | BUG(); | 104 | BUG(); |
105 | } | 105 | } |
106 | BUG_ON(!list_empty(&lock->ast_list)); | ||
107 | if (lock->ast_pending) | 106 | if (lock->ast_pending) |
108 | mlog(0, "lock has an ast getting flushed right now\n"); | 107 | mlog(0, "lock has an ast getting flushed right now\n"); |
109 | 108 | ||
diff --git a/fs/ocfs2/dlm/dlmrecovery.c b/fs/ocfs2/dlm/dlmrecovery.c index bcb9260c3735..43e6e3280569 100644 --- a/fs/ocfs2/dlm/dlmrecovery.c +++ b/fs/ocfs2/dlm/dlmrecovery.c | |||
@@ -1118,7 +1118,7 @@ static int dlm_send_mig_lockres_msg(struct dlm_ctxt *dlm, | |||
1118 | 1118 | ||
1119 | mlog(0, "%s:%.*s: sending mig lockres (%s) to %u\n", | 1119 | mlog(0, "%s:%.*s: sending mig lockres (%s) to %u\n", |
1120 | dlm->name, res->lockname.len, res->lockname.name, | 1120 | dlm->name, res->lockname.len, res->lockname.name, |
1121 | orig_flags & DLM_MRES_MIGRATION ? "migrate" : "recovery", | 1121 | orig_flags & DLM_MRES_MIGRATION ? "migration" : "recovery", |
1122 | send_to); | 1122 | send_to); |
1123 | 1123 | ||
1124 | /* send it */ | 1124 | /* send it */ |
diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c index 62442e413a00..aa501d3f93f1 100644 --- a/fs/ocfs2/file.c +++ b/fs/ocfs2/file.c | |||
@@ -1851,6 +1851,7 @@ relock: | |||
1851 | if (ret) | 1851 | if (ret) |
1852 | goto out_dio; | 1852 | goto out_dio; |
1853 | 1853 | ||
1854 | count = ocount; | ||
1854 | ret = generic_write_checks(file, ppos, &count, | 1855 | ret = generic_write_checks(file, ppos, &count, |
1855 | S_ISBLK(inode->i_mode)); | 1856 | S_ISBLK(inode->i_mode)); |
1856 | if (ret) | 1857 | if (ret) |
@@ -1918,8 +1919,10 @@ out_sems: | |||
1918 | 1919 | ||
1919 | mutex_unlock(&inode->i_mutex); | 1920 | mutex_unlock(&inode->i_mutex); |
1920 | 1921 | ||
1922 | if (written) | ||
1923 | ret = written; | ||
1921 | mlog_exit(ret); | 1924 | mlog_exit(ret); |
1922 | return written ? written : ret; | 1925 | return ret; |
1923 | } | 1926 | } |
1924 | 1927 | ||
1925 | static int ocfs2_splice_to_file(struct pipe_inode_info *pipe, | 1928 | static int ocfs2_splice_to_file(struct pipe_inode_info *pipe, |
diff --git a/fs/ocfs2/journal.c b/fs/ocfs2/journal.c index f033760ecbea..c48b93ac6b65 100644 --- a/fs/ocfs2/journal.c +++ b/fs/ocfs2/journal.c | |||
@@ -1954,10 +1954,16 @@ void ocfs2_orphan_scan_init(struct ocfs2_super *osb) | |||
1954 | os->os_osb = osb; | 1954 | os->os_osb = osb; |
1955 | os->os_count = 0; | 1955 | os->os_count = 0; |
1956 | os->os_seqno = 0; | 1956 | os->os_seqno = 0; |
1957 | os->os_scantime = CURRENT_TIME; | ||
1958 | mutex_init(&os->os_lock); | 1957 | mutex_init(&os->os_lock); |
1959 | INIT_DELAYED_WORK(&os->os_orphan_scan_work, ocfs2_orphan_scan_work); | 1958 | INIT_DELAYED_WORK(&os->os_orphan_scan_work, ocfs2_orphan_scan_work); |
1959 | } | ||
1960 | 1960 | ||
1961 | void ocfs2_orphan_scan_start(struct ocfs2_super *osb) | ||
1962 | { | ||
1963 | struct ocfs2_orphan_scan *os; | ||
1964 | |||
1965 | os = &osb->osb_orphan_scan; | ||
1966 | os->os_scantime = CURRENT_TIME; | ||
1961 | if (ocfs2_is_hard_readonly(osb) || ocfs2_mount_local(osb)) | 1967 | if (ocfs2_is_hard_readonly(osb) || ocfs2_mount_local(osb)) |
1962 | atomic_set(&os->os_state, ORPHAN_SCAN_INACTIVE); | 1968 | atomic_set(&os->os_state, ORPHAN_SCAN_INACTIVE); |
1963 | else { | 1969 | else { |
diff --git a/fs/ocfs2/journal.h b/fs/ocfs2/journal.h index 5432c7f79cc6..2c3222aec622 100644 --- a/fs/ocfs2/journal.h +++ b/fs/ocfs2/journal.h | |||
@@ -145,6 +145,7 @@ static inline void ocfs2_inode_set_new(struct ocfs2_super *osb, | |||
145 | 145 | ||
146 | /* Exported only for the journal struct init code in super.c. Do not call. */ | 146 | /* Exported only for the journal struct init code in super.c. Do not call. */ |
147 | void ocfs2_orphan_scan_init(struct ocfs2_super *osb); | 147 | void ocfs2_orphan_scan_init(struct ocfs2_super *osb); |
148 | void ocfs2_orphan_scan_start(struct ocfs2_super *osb); | ||
148 | void ocfs2_orphan_scan_stop(struct ocfs2_super *osb); | 149 | void ocfs2_orphan_scan_stop(struct ocfs2_super *osb); |
149 | void ocfs2_orphan_scan_exit(struct ocfs2_super *osb); | 150 | void ocfs2_orphan_scan_exit(struct ocfs2_super *osb); |
150 | 151 | ||
@@ -329,20 +330,27 @@ int ocfs2_journal_dirty(handle_t *handle, | |||
329 | /* extended attribute block update */ | 330 | /* extended attribute block update */ |
330 | #define OCFS2_XATTR_BLOCK_UPDATE_CREDITS 1 | 331 | #define OCFS2_XATTR_BLOCK_UPDATE_CREDITS 1 |
331 | 332 | ||
333 | /* Update of a single quota block */ | ||
334 | #define OCFS2_QUOTA_BLOCK_UPDATE_CREDITS 1 | ||
335 | |||
332 | /* global quotafile inode update, data block */ | 336 | /* global quotafile inode update, data block */ |
333 | #define OCFS2_QINFO_WRITE_CREDITS (OCFS2_INODE_UPDATE_CREDITS + 1) | 337 | #define OCFS2_QINFO_WRITE_CREDITS (OCFS2_INODE_UPDATE_CREDITS + \ |
338 | OCFS2_QUOTA_BLOCK_UPDATE_CREDITS) | ||
334 | 339 | ||
340 | #define OCFS2_LOCAL_QINFO_WRITE_CREDITS OCFS2_QUOTA_BLOCK_UPDATE_CREDITS | ||
335 | /* | 341 | /* |
336 | * The two writes below can accidentally see global info dirty due | 342 | * The two writes below can accidentally see global info dirty due |
337 | * to set_info() quotactl so make them prepared for the writes. | 343 | * to set_info() quotactl so make them prepared for the writes. |
338 | */ | 344 | */ |
339 | /* quota data block, global info */ | 345 | /* quota data block, global info */ |
340 | /* Write to local quota file */ | 346 | /* Write to local quota file */ |
341 | #define OCFS2_QWRITE_CREDITS (OCFS2_QINFO_WRITE_CREDITS + 1) | 347 | #define OCFS2_QWRITE_CREDITS (OCFS2_QINFO_WRITE_CREDITS + \ |
348 | OCFS2_QUOTA_BLOCK_UPDATE_CREDITS) | ||
342 | 349 | ||
343 | /* global quota data block, local quota data block, global quota inode, | 350 | /* global quota data block, local quota data block, global quota inode, |
344 | * global quota info */ | 351 | * global quota info */ |
345 | #define OCFS2_QSYNC_CREDITS (OCFS2_INODE_UPDATE_CREDITS + 3) | 352 | #define OCFS2_QSYNC_CREDITS (OCFS2_QINFO_WRITE_CREDITS + \ |
353 | 2 * OCFS2_QUOTA_BLOCK_UPDATE_CREDITS) | ||
346 | 354 | ||
347 | static inline int ocfs2_quota_trans_credits(struct super_block *sb) | 355 | static inline int ocfs2_quota_trans_credits(struct super_block *sb) |
348 | { | 356 | { |
@@ -355,11 +363,6 @@ static inline int ocfs2_quota_trans_credits(struct super_block *sb) | |||
355 | return credits; | 363 | return credits; |
356 | } | 364 | } |
357 | 365 | ||
358 | /* Number of credits needed for removing quota structure from file */ | ||
359 | int ocfs2_calc_qdel_credits(struct super_block *sb, int type); | ||
360 | /* Number of credits needed for initialization of new quota structure */ | ||
361 | int ocfs2_calc_qinit_credits(struct super_block *sb, int type); | ||
362 | |||
363 | /* group extend. inode update and last group update. */ | 366 | /* group extend. inode update and last group update. */ |
364 | #define OCFS2_GROUP_EXTEND_CREDITS (OCFS2_INODE_UPDATE_CREDITS + 1) | 367 | #define OCFS2_GROUP_EXTEND_CREDITS (OCFS2_INODE_UPDATE_CREDITS + 1) |
365 | 368 | ||
diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h index c9345ebb8493..39e1d5a39505 100644 --- a/fs/ocfs2/ocfs2.h +++ b/fs/ocfs2/ocfs2.h | |||
@@ -224,10 +224,12 @@ enum ocfs2_mount_options | |||
224 | OCFS2_MOUNT_GRPQUOTA = 1 << 10, /* We support group quotas */ | 224 | OCFS2_MOUNT_GRPQUOTA = 1 << 10, /* We support group quotas */ |
225 | }; | 225 | }; |
226 | 226 | ||
227 | #define OCFS2_OSB_SOFT_RO 0x0001 | 227 | #define OCFS2_OSB_SOFT_RO 0x0001 |
228 | #define OCFS2_OSB_HARD_RO 0x0002 | 228 | #define OCFS2_OSB_HARD_RO 0x0002 |
229 | #define OCFS2_OSB_ERROR_FS 0x0004 | 229 | #define OCFS2_OSB_ERROR_FS 0x0004 |
230 | #define OCFS2_DEFAULT_ATIME_QUANTUM 60 | 230 | #define OCFS2_OSB_DROP_DENTRY_LOCK_IMMED 0x0008 |
231 | |||
232 | #define OCFS2_DEFAULT_ATIME_QUANTUM 60 | ||
231 | 233 | ||
232 | struct ocfs2_journal; | 234 | struct ocfs2_journal; |
233 | struct ocfs2_slot_info; | 235 | struct ocfs2_slot_info; |
@@ -490,6 +492,18 @@ static inline void ocfs2_set_osb_flag(struct ocfs2_super *osb, | |||
490 | spin_unlock(&osb->osb_lock); | 492 | spin_unlock(&osb->osb_lock); |
491 | } | 493 | } |
492 | 494 | ||
495 | |||
496 | static inline unsigned long ocfs2_test_osb_flag(struct ocfs2_super *osb, | ||
497 | unsigned long flag) | ||
498 | { | ||
499 | unsigned long ret; | ||
500 | |||
501 | spin_lock(&osb->osb_lock); | ||
502 | ret = osb->osb_flags & flag; | ||
503 | spin_unlock(&osb->osb_lock); | ||
504 | return ret; | ||
505 | } | ||
506 | |||
493 | static inline void ocfs2_set_ro_flag(struct ocfs2_super *osb, | 507 | static inline void ocfs2_set_ro_flag(struct ocfs2_super *osb, |
494 | int hard) | 508 | int hard) |
495 | { | 509 | { |
diff --git a/fs/ocfs2/quota.h b/fs/ocfs2/quota.h index 7365e2e08706..3fb96fcd4c81 100644 --- a/fs/ocfs2/quota.h +++ b/fs/ocfs2/quota.h | |||
@@ -50,7 +50,6 @@ struct ocfs2_mem_dqinfo { | |||
50 | unsigned int dqi_chunks; /* Number of chunks in local quota file */ | 50 | unsigned int dqi_chunks; /* Number of chunks in local quota file */ |
51 | unsigned int dqi_blocks; /* Number of blocks allocated for local quota file */ | 51 | unsigned int dqi_blocks; /* Number of blocks allocated for local quota file */ |
52 | unsigned int dqi_syncms; /* How often should we sync with other nodes */ | 52 | unsigned int dqi_syncms; /* How often should we sync with other nodes */ |
53 | unsigned int dqi_syncjiff; /* Precomputed dqi_syncms in jiffies */ | ||
54 | struct list_head dqi_chunk; /* List of chunks */ | 53 | struct list_head dqi_chunk; /* List of chunks */ |
55 | struct inode *dqi_gqinode; /* Global quota file inode */ | 54 | struct inode *dqi_gqinode; /* Global quota file inode */ |
56 | struct ocfs2_lock_res dqi_gqlock; /* Lock protecting quota information structure */ | 55 | struct ocfs2_lock_res dqi_gqlock; /* Lock protecting quota information structure */ |
diff --git a/fs/ocfs2/quota_global.c b/fs/ocfs2/quota_global.c index edfa60cd155c..bf7742d0ee3b 100644 --- a/fs/ocfs2/quota_global.c +++ b/fs/ocfs2/quota_global.c | |||
@@ -69,6 +69,7 @@ static void ocfs2_global_mem2diskdqb(void *dp, struct dquot *dquot) | |||
69 | d->dqb_curspace = cpu_to_le64(m->dqb_curspace); | 69 | d->dqb_curspace = cpu_to_le64(m->dqb_curspace); |
70 | d->dqb_btime = cpu_to_le64(m->dqb_btime); | 70 | d->dqb_btime = cpu_to_le64(m->dqb_btime); |
71 | d->dqb_itime = cpu_to_le64(m->dqb_itime); | 71 | d->dqb_itime = cpu_to_le64(m->dqb_itime); |
72 | d->dqb_pad1 = d->dqb_pad2 = 0; | ||
72 | } | 73 | } |
73 | 74 | ||
74 | static int ocfs2_global_is_id(void *dp, struct dquot *dquot) | 75 | static int ocfs2_global_is_id(void *dp, struct dquot *dquot) |
@@ -211,14 +212,13 @@ ssize_t ocfs2_quota_write(struct super_block *sb, int type, | |||
211 | 212 | ||
212 | mutex_lock_nested(&gqinode->i_mutex, I_MUTEX_QUOTA); | 213 | mutex_lock_nested(&gqinode->i_mutex, I_MUTEX_QUOTA); |
213 | if (gqinode->i_size < off + len) { | 214 | if (gqinode->i_size < off + len) { |
214 | down_write(&OCFS2_I(gqinode)->ip_alloc_sem); | 215 | loff_t rounded_end = |
215 | err = ocfs2_extend_no_holes(gqinode, off + len, off); | 216 | ocfs2_align_bytes_to_blocks(sb, off + len); |
216 | up_write(&OCFS2_I(gqinode)->ip_alloc_sem); | 217 | |
217 | if (err < 0) | 218 | /* Space is already allocated in ocfs2_global_read_dquot() */ |
218 | goto out; | ||
219 | err = ocfs2_simple_size_update(gqinode, | 219 | err = ocfs2_simple_size_update(gqinode, |
220 | oinfo->dqi_gqi_bh, | 220 | oinfo->dqi_gqi_bh, |
221 | off + len); | 221 | rounded_end); |
222 | if (err < 0) | 222 | if (err < 0) |
223 | goto out; | 223 | goto out; |
224 | new = 1; | 224 | new = 1; |
@@ -234,7 +234,7 @@ ssize_t ocfs2_quota_write(struct super_block *sb, int type, | |||
234 | } | 234 | } |
235 | if (err) { | 235 | if (err) { |
236 | mlog_errno(err); | 236 | mlog_errno(err); |
237 | return err; | 237 | goto out; |
238 | } | 238 | } |
239 | lock_buffer(bh); | 239 | lock_buffer(bh); |
240 | if (new) | 240 | if (new) |
@@ -342,7 +342,6 @@ int ocfs2_global_read_info(struct super_block *sb, int type) | |||
342 | info->dqi_bgrace = le32_to_cpu(dinfo.dqi_bgrace); | 342 | info->dqi_bgrace = le32_to_cpu(dinfo.dqi_bgrace); |
343 | info->dqi_igrace = le32_to_cpu(dinfo.dqi_igrace); | 343 | info->dqi_igrace = le32_to_cpu(dinfo.dqi_igrace); |
344 | oinfo->dqi_syncms = le32_to_cpu(dinfo.dqi_syncms); | 344 | oinfo->dqi_syncms = le32_to_cpu(dinfo.dqi_syncms); |
345 | oinfo->dqi_syncjiff = msecs_to_jiffies(oinfo->dqi_syncms); | ||
346 | oinfo->dqi_gi.dqi_blocks = le32_to_cpu(dinfo.dqi_blocks); | 345 | oinfo->dqi_gi.dqi_blocks = le32_to_cpu(dinfo.dqi_blocks); |
347 | oinfo->dqi_gi.dqi_free_blk = le32_to_cpu(dinfo.dqi_free_blk); | 346 | oinfo->dqi_gi.dqi_free_blk = le32_to_cpu(dinfo.dqi_free_blk); |
348 | oinfo->dqi_gi.dqi_free_entry = le32_to_cpu(dinfo.dqi_free_entry); | 347 | oinfo->dqi_gi.dqi_free_entry = le32_to_cpu(dinfo.dqi_free_entry); |
@@ -352,7 +351,7 @@ int ocfs2_global_read_info(struct super_block *sb, int type) | |||
352 | oinfo->dqi_gi.dqi_qtree_depth = qtree_depth(&oinfo->dqi_gi); | 351 | oinfo->dqi_gi.dqi_qtree_depth = qtree_depth(&oinfo->dqi_gi); |
353 | INIT_DELAYED_WORK(&oinfo->dqi_sync_work, qsync_work_fn); | 352 | INIT_DELAYED_WORK(&oinfo->dqi_sync_work, qsync_work_fn); |
354 | queue_delayed_work(ocfs2_quota_wq, &oinfo->dqi_sync_work, | 353 | queue_delayed_work(ocfs2_quota_wq, &oinfo->dqi_sync_work, |
355 | oinfo->dqi_syncjiff); | 354 | msecs_to_jiffies(oinfo->dqi_syncms)); |
356 | 355 | ||
357 | out_err: | 356 | out_err: |
358 | mlog_exit(status); | 357 | mlog_exit(status); |
@@ -402,13 +401,36 @@ int ocfs2_global_write_info(struct super_block *sb, int type) | |||
402 | return err; | 401 | return err; |
403 | } | 402 | } |
404 | 403 | ||
404 | static int ocfs2_global_qinit_alloc(struct super_block *sb, int type) | ||
405 | { | ||
406 | struct ocfs2_mem_dqinfo *oinfo = sb_dqinfo(sb, type)->dqi_priv; | ||
407 | |||
408 | /* | ||
409 | * We may need to allocate tree blocks and a leaf block but not the | ||
410 | * root block | ||
411 | */ | ||
412 | return oinfo->dqi_gi.dqi_qtree_depth; | ||
413 | } | ||
414 | |||
415 | static int ocfs2_calc_global_qinit_credits(struct super_block *sb, int type) | ||
416 | { | ||
417 | /* We modify all the allocated blocks, tree root, and info block */ | ||
418 | return (ocfs2_global_qinit_alloc(sb, type) + 2) * | ||
419 | OCFS2_QUOTA_BLOCK_UPDATE_CREDITS; | ||
420 | } | ||
421 | |||
405 | /* Read in information from global quota file and acquire a reference to it. | 422 | /* Read in information from global quota file and acquire a reference to it. |
406 | * dquot_acquire() has already started the transaction and locked quota file */ | 423 | * dquot_acquire() has already started the transaction and locked quota file */ |
407 | int ocfs2_global_read_dquot(struct dquot *dquot) | 424 | int ocfs2_global_read_dquot(struct dquot *dquot) |
408 | { | 425 | { |
409 | int err, err2, ex = 0; | 426 | int err, err2, ex = 0; |
410 | struct ocfs2_mem_dqinfo *info = | 427 | struct super_block *sb = dquot->dq_sb; |
411 | sb_dqinfo(dquot->dq_sb, dquot->dq_type)->dqi_priv; | 428 | int type = dquot->dq_type; |
429 | struct ocfs2_mem_dqinfo *info = sb_dqinfo(sb, type)->dqi_priv; | ||
430 | struct ocfs2_super *osb = OCFS2_SB(sb); | ||
431 | struct inode *gqinode = info->dqi_gqinode; | ||
432 | int need_alloc = ocfs2_global_qinit_alloc(sb, type); | ||
433 | handle_t *handle = NULL; | ||
412 | 434 | ||
413 | err = ocfs2_qinfo_lock(info, 0); | 435 | err = ocfs2_qinfo_lock(info, 0); |
414 | if (err < 0) | 436 | if (err < 0) |
@@ -419,14 +441,33 @@ int ocfs2_global_read_dquot(struct dquot *dquot) | |||
419 | OCFS2_DQUOT(dquot)->dq_use_count++; | 441 | OCFS2_DQUOT(dquot)->dq_use_count++; |
420 | OCFS2_DQUOT(dquot)->dq_origspace = dquot->dq_dqb.dqb_curspace; | 442 | OCFS2_DQUOT(dquot)->dq_origspace = dquot->dq_dqb.dqb_curspace; |
421 | OCFS2_DQUOT(dquot)->dq_originodes = dquot->dq_dqb.dqb_curinodes; | 443 | OCFS2_DQUOT(dquot)->dq_originodes = dquot->dq_dqb.dqb_curinodes; |
444 | ocfs2_qinfo_unlock(info, 0); | ||
445 | |||
422 | if (!dquot->dq_off) { /* No real quota entry? */ | 446 | if (!dquot->dq_off) { /* No real quota entry? */ |
423 | /* Upgrade to exclusive lock for allocation */ | ||
424 | ocfs2_qinfo_unlock(info, 0); | ||
425 | err = ocfs2_qinfo_lock(info, 1); | ||
426 | if (err < 0) | ||
427 | goto out_qlock; | ||
428 | ex = 1; | 447 | ex = 1; |
448 | /* | ||
449 | * Add blocks to quota file before we start a transaction since | ||
450 | * locking allocators ranks above a transaction start | ||
451 | */ | ||
452 | WARN_ON(journal_current_handle()); | ||
453 | down_write(&OCFS2_I(gqinode)->ip_alloc_sem); | ||
454 | err = ocfs2_extend_no_holes(gqinode, | ||
455 | gqinode->i_size + (need_alloc << sb->s_blocksize_bits), | ||
456 | gqinode->i_size); | ||
457 | up_write(&OCFS2_I(gqinode)->ip_alloc_sem); | ||
458 | if (err < 0) | ||
459 | goto out; | ||
429 | } | 460 | } |
461 | |||
462 | handle = ocfs2_start_trans(osb, | ||
463 | ocfs2_calc_global_qinit_credits(sb, type)); | ||
464 | if (IS_ERR(handle)) { | ||
465 | err = PTR_ERR(handle); | ||
466 | goto out; | ||
467 | } | ||
468 | err = ocfs2_qinfo_lock(info, ex); | ||
469 | if (err < 0) | ||
470 | goto out_trans; | ||
430 | err = qtree_write_dquot(&info->dqi_gi, dquot); | 471 | err = qtree_write_dquot(&info->dqi_gi, dquot); |
431 | if (ex && info_dirty(sb_dqinfo(dquot->dq_sb, dquot->dq_type))) { | 472 | if (ex && info_dirty(sb_dqinfo(dquot->dq_sb, dquot->dq_type))) { |
432 | err2 = __ocfs2_global_write_info(dquot->dq_sb, dquot->dq_type); | 473 | err2 = __ocfs2_global_write_info(dquot->dq_sb, dquot->dq_type); |
@@ -438,6 +479,9 @@ out_qlock: | |||
438 | ocfs2_qinfo_unlock(info, 1); | 479 | ocfs2_qinfo_unlock(info, 1); |
439 | else | 480 | else |
440 | ocfs2_qinfo_unlock(info, 0); | 481 | ocfs2_qinfo_unlock(info, 0); |
482 | out_trans: | ||
483 | if (handle) | ||
484 | ocfs2_commit_trans(osb, handle); | ||
441 | out: | 485 | out: |
442 | if (err < 0) | 486 | if (err < 0) |
443 | mlog_errno(err); | 487 | mlog_errno(err); |
@@ -607,7 +651,7 @@ static void qsync_work_fn(struct work_struct *work) | |||
607 | 651 | ||
608 | dquot_scan_active(sb, ocfs2_sync_dquot_helper, oinfo->dqi_type); | 652 | dquot_scan_active(sb, ocfs2_sync_dquot_helper, oinfo->dqi_type); |
609 | queue_delayed_work(ocfs2_quota_wq, &oinfo->dqi_sync_work, | 653 | queue_delayed_work(ocfs2_quota_wq, &oinfo->dqi_sync_work, |
610 | oinfo->dqi_syncjiff); | 654 | msecs_to_jiffies(oinfo->dqi_syncms)); |
611 | } | 655 | } |
612 | 656 | ||
613 | /* | 657 | /* |
@@ -635,20 +679,18 @@ out: | |||
635 | return status; | 679 | return status; |
636 | } | 680 | } |
637 | 681 | ||
638 | int ocfs2_calc_qdel_credits(struct super_block *sb, int type) | 682 | static int ocfs2_calc_qdel_credits(struct super_block *sb, int type) |
639 | { | 683 | { |
640 | struct ocfs2_mem_dqinfo *oinfo; | 684 | struct ocfs2_mem_dqinfo *oinfo = sb_dqinfo(sb, type)->dqi_priv; |
641 | int features[MAXQUOTAS] = { OCFS2_FEATURE_RO_COMPAT_USRQUOTA, | 685 | /* |
642 | OCFS2_FEATURE_RO_COMPAT_GRPQUOTA }; | 686 | * We modify tree, leaf block, global info, local chunk header, |
643 | 687 | * global and local inode; OCFS2_QINFO_WRITE_CREDITS already | |
644 | if (!OCFS2_HAS_RO_COMPAT_FEATURE(sb, features[type])) | 688 | * accounts for inode update |
645 | return 0; | 689 | */ |
646 | 690 | return (oinfo->dqi_gi.dqi_qtree_depth + 2) * | |
647 | oinfo = sb_dqinfo(sb, type)->dqi_priv; | 691 | OCFS2_QUOTA_BLOCK_UPDATE_CREDITS + |
648 | /* We modify tree, leaf block, global info, local chunk header, | 692 | OCFS2_QINFO_WRITE_CREDITS + |
649 | * global and local inode */ | 693 | OCFS2_INODE_UPDATE_CREDITS; |
650 | return oinfo->dqi_gi.dqi_qtree_depth + 2 + 1 + | ||
651 | 2 * OCFS2_INODE_UPDATE_CREDITS; | ||
652 | } | 694 | } |
653 | 695 | ||
654 | static int ocfs2_release_dquot(struct dquot *dquot) | 696 | static int ocfs2_release_dquot(struct dquot *dquot) |
@@ -680,33 +722,10 @@ out: | |||
680 | return status; | 722 | return status; |
681 | } | 723 | } |
682 | 724 | ||
683 | int ocfs2_calc_qinit_credits(struct super_block *sb, int type) | ||
684 | { | ||
685 | struct ocfs2_mem_dqinfo *oinfo; | ||
686 | int features[MAXQUOTAS] = { OCFS2_FEATURE_RO_COMPAT_USRQUOTA, | ||
687 | OCFS2_FEATURE_RO_COMPAT_GRPQUOTA }; | ||
688 | struct ocfs2_dinode *lfe, *gfe; | ||
689 | |||
690 | if (!OCFS2_HAS_RO_COMPAT_FEATURE(sb, features[type])) | ||
691 | return 0; | ||
692 | |||
693 | oinfo = sb_dqinfo(sb, type)->dqi_priv; | ||
694 | gfe = (struct ocfs2_dinode *)oinfo->dqi_gqi_bh->b_data; | ||
695 | lfe = (struct ocfs2_dinode *)oinfo->dqi_lqi_bh->b_data; | ||
696 | /* We can extend local file + global file. In local file we | ||
697 | * can modify info, chunk header block and dquot block. In | ||
698 | * global file we can modify info, tree and leaf block */ | ||
699 | return ocfs2_calc_extend_credits(sb, &lfe->id2.i_list, 0) + | ||
700 | ocfs2_calc_extend_credits(sb, &gfe->id2.i_list, 0) + | ||
701 | 3 + oinfo->dqi_gi.dqi_qtree_depth + 2; | ||
702 | } | ||
703 | |||
704 | static int ocfs2_acquire_dquot(struct dquot *dquot) | 725 | static int ocfs2_acquire_dquot(struct dquot *dquot) |
705 | { | 726 | { |
706 | handle_t *handle; | ||
707 | struct ocfs2_mem_dqinfo *oinfo = | 727 | struct ocfs2_mem_dqinfo *oinfo = |
708 | sb_dqinfo(dquot->dq_sb, dquot->dq_type)->dqi_priv; | 728 | sb_dqinfo(dquot->dq_sb, dquot->dq_type)->dqi_priv; |
709 | struct ocfs2_super *osb = OCFS2_SB(dquot->dq_sb); | ||
710 | int status = 0; | 729 | int status = 0; |
711 | 730 | ||
712 | mlog_entry("id=%u, type=%d", dquot->dq_id, dquot->dq_type); | 731 | mlog_entry("id=%u, type=%d", dquot->dq_id, dquot->dq_type); |
@@ -715,16 +734,7 @@ static int ocfs2_acquire_dquot(struct dquot *dquot) | |||
715 | status = ocfs2_lock_global_qf(oinfo, 1); | 734 | status = ocfs2_lock_global_qf(oinfo, 1); |
716 | if (status < 0) | 735 | if (status < 0) |
717 | goto out; | 736 | goto out; |
718 | handle = ocfs2_start_trans(osb, | ||
719 | ocfs2_calc_qinit_credits(dquot->dq_sb, dquot->dq_type)); | ||
720 | if (IS_ERR(handle)) { | ||
721 | status = PTR_ERR(handle); | ||
722 | mlog_errno(status); | ||
723 | goto out_ilock; | ||
724 | } | ||
725 | status = dquot_acquire(dquot); | 737 | status = dquot_acquire(dquot); |
726 | ocfs2_commit_trans(osb, handle); | ||
727 | out_ilock: | ||
728 | ocfs2_unlock_global_qf(oinfo, 1); | 738 | ocfs2_unlock_global_qf(oinfo, 1); |
729 | out: | 739 | out: |
730 | mlog_exit(status); | 740 | mlog_exit(status); |
diff --git a/fs/ocfs2/quota_local.c b/fs/ocfs2/quota_local.c index 5a460fa82553..bdb09cb6e1fe 100644 --- a/fs/ocfs2/quota_local.c +++ b/fs/ocfs2/quota_local.c | |||
@@ -20,6 +20,7 @@ | |||
20 | #include "sysfile.h" | 20 | #include "sysfile.h" |
21 | #include "dlmglue.h" | 21 | #include "dlmglue.h" |
22 | #include "quota.h" | 22 | #include "quota.h" |
23 | #include "uptodate.h" | ||
23 | 24 | ||
24 | /* Number of local quota structures per block */ | 25 | /* Number of local quota structures per block */ |
25 | static inline unsigned int ol_quota_entries_per_block(struct super_block *sb) | 26 | static inline unsigned int ol_quota_entries_per_block(struct super_block *sb) |
@@ -100,7 +101,8 @@ static int ocfs2_modify_bh(struct inode *inode, struct buffer_head *bh, | |||
100 | handle_t *handle; | 101 | handle_t *handle; |
101 | int status; | 102 | int status; |
102 | 103 | ||
103 | handle = ocfs2_start_trans(OCFS2_SB(sb), 1); | 104 | handle = ocfs2_start_trans(OCFS2_SB(sb), |
105 | OCFS2_QUOTA_BLOCK_UPDATE_CREDITS); | ||
104 | if (IS_ERR(handle)) { | 106 | if (IS_ERR(handle)) { |
105 | status = PTR_ERR(handle); | 107 | status = PTR_ERR(handle); |
106 | mlog_errno(status); | 108 | mlog_errno(status); |
@@ -610,7 +612,8 @@ int ocfs2_finish_quota_recovery(struct ocfs2_super *osb, | |||
610 | goto out_bh; | 612 | goto out_bh; |
611 | /* Mark quota file as clean if we are recovering quota file of | 613 | /* Mark quota file as clean if we are recovering quota file of |
612 | * some other node. */ | 614 | * some other node. */ |
613 | handle = ocfs2_start_trans(osb, 1); | 615 | handle = ocfs2_start_trans(osb, |
616 | OCFS2_LOCAL_QINFO_WRITE_CREDITS); | ||
614 | if (IS_ERR(handle)) { | 617 | if (IS_ERR(handle)) { |
615 | status = PTR_ERR(handle); | 618 | status = PTR_ERR(handle); |
616 | mlog_errno(status); | 619 | mlog_errno(status); |
@@ -940,7 +943,7 @@ static struct ocfs2_quota_chunk *ocfs2_local_quota_add_chunk( | |||
940 | struct ocfs2_local_disk_chunk *dchunk; | 943 | struct ocfs2_local_disk_chunk *dchunk; |
941 | int status; | 944 | int status; |
942 | handle_t *handle; | 945 | handle_t *handle; |
943 | struct buffer_head *bh = NULL; | 946 | struct buffer_head *bh = NULL, *dbh = NULL; |
944 | u64 p_blkno; | 947 | u64 p_blkno; |
945 | 948 | ||
946 | /* We are protected by dqio_sem so no locking needed */ | 949 | /* We are protected by dqio_sem so no locking needed */ |
@@ -964,32 +967,35 @@ static struct ocfs2_quota_chunk *ocfs2_local_quota_add_chunk( | |||
964 | mlog_errno(status); | 967 | mlog_errno(status); |
965 | goto out; | 968 | goto out; |
966 | } | 969 | } |
970 | /* Local quota info and two new blocks we initialize */ | ||
971 | handle = ocfs2_start_trans(OCFS2_SB(sb), | ||
972 | OCFS2_LOCAL_QINFO_WRITE_CREDITS + | ||
973 | 2 * OCFS2_QUOTA_BLOCK_UPDATE_CREDITS); | ||
974 | if (IS_ERR(handle)) { | ||
975 | status = PTR_ERR(handle); | ||
976 | mlog_errno(status); | ||
977 | goto out; | ||
978 | } | ||
967 | 979 | ||
980 | /* Initialize chunk header */ | ||
968 | down_read(&OCFS2_I(lqinode)->ip_alloc_sem); | 981 | down_read(&OCFS2_I(lqinode)->ip_alloc_sem); |
969 | status = ocfs2_extent_map_get_blocks(lqinode, oinfo->dqi_blocks, | 982 | status = ocfs2_extent_map_get_blocks(lqinode, oinfo->dqi_blocks, |
970 | &p_blkno, NULL, NULL); | 983 | &p_blkno, NULL, NULL); |
971 | up_read(&OCFS2_I(lqinode)->ip_alloc_sem); | 984 | up_read(&OCFS2_I(lqinode)->ip_alloc_sem); |
972 | if (status < 0) { | 985 | if (status < 0) { |
973 | mlog_errno(status); | 986 | mlog_errno(status); |
974 | goto out; | 987 | goto out_trans; |
975 | } | 988 | } |
976 | bh = sb_getblk(sb, p_blkno); | 989 | bh = sb_getblk(sb, p_blkno); |
977 | if (!bh) { | 990 | if (!bh) { |
978 | status = -ENOMEM; | 991 | status = -ENOMEM; |
979 | mlog_errno(status); | 992 | mlog_errno(status); |
980 | goto out; | 993 | goto out_trans; |
981 | } | 994 | } |
982 | dchunk = (struct ocfs2_local_disk_chunk *)bh->b_data; | 995 | dchunk = (struct ocfs2_local_disk_chunk *)bh->b_data; |
983 | 996 | ocfs2_set_new_buffer_uptodate(lqinode, bh); | |
984 | handle = ocfs2_start_trans(OCFS2_SB(sb), 2); | ||
985 | if (IS_ERR(handle)) { | ||
986 | status = PTR_ERR(handle); | ||
987 | mlog_errno(status); | ||
988 | goto out; | ||
989 | } | ||
990 | |||
991 | status = ocfs2_journal_access_dq(handle, lqinode, bh, | 997 | status = ocfs2_journal_access_dq(handle, lqinode, bh, |
992 | OCFS2_JOURNAL_ACCESS_WRITE); | 998 | OCFS2_JOURNAL_ACCESS_CREATE); |
993 | if (status < 0) { | 999 | if (status < 0) { |
994 | mlog_errno(status); | 1000 | mlog_errno(status); |
995 | goto out_trans; | 1001 | goto out_trans; |
@@ -999,7 +1005,6 @@ static struct ocfs2_quota_chunk *ocfs2_local_quota_add_chunk( | |||
999 | memset(dchunk->dqc_bitmap, 0, | 1005 | memset(dchunk->dqc_bitmap, 0, |
1000 | sb->s_blocksize - sizeof(struct ocfs2_local_disk_chunk) - | 1006 | sb->s_blocksize - sizeof(struct ocfs2_local_disk_chunk) - |
1001 | OCFS2_QBLK_RESERVED_SPACE); | 1007 | OCFS2_QBLK_RESERVED_SPACE); |
1002 | set_buffer_uptodate(bh); | ||
1003 | unlock_buffer(bh); | 1008 | unlock_buffer(bh); |
1004 | status = ocfs2_journal_dirty(handle, bh); | 1009 | status = ocfs2_journal_dirty(handle, bh); |
1005 | if (status < 0) { | 1010 | if (status < 0) { |
@@ -1007,6 +1012,38 @@ static struct ocfs2_quota_chunk *ocfs2_local_quota_add_chunk( | |||
1007 | goto out_trans; | 1012 | goto out_trans; |
1008 | } | 1013 | } |
1009 | 1014 | ||
1015 | /* Initialize new block with structures */ | ||
1016 | down_read(&OCFS2_I(lqinode)->ip_alloc_sem); | ||
1017 | status = ocfs2_extent_map_get_blocks(lqinode, oinfo->dqi_blocks + 1, | ||
1018 | &p_blkno, NULL, NULL); | ||
1019 | up_read(&OCFS2_I(lqinode)->ip_alloc_sem); | ||
1020 | if (status < 0) { | ||
1021 | mlog_errno(status); | ||
1022 | goto out_trans; | ||
1023 | } | ||
1024 | dbh = sb_getblk(sb, p_blkno); | ||
1025 | if (!dbh) { | ||
1026 | status = -ENOMEM; | ||
1027 | mlog_errno(status); | ||
1028 | goto out_trans; | ||
1029 | } | ||
1030 | ocfs2_set_new_buffer_uptodate(lqinode, dbh); | ||
1031 | status = ocfs2_journal_access_dq(handle, lqinode, dbh, | ||
1032 | OCFS2_JOURNAL_ACCESS_CREATE); | ||
1033 | if (status < 0) { | ||
1034 | mlog_errno(status); | ||
1035 | goto out_trans; | ||
1036 | } | ||
1037 | lock_buffer(dbh); | ||
1038 | memset(dbh->b_data, 0, sb->s_blocksize - OCFS2_QBLK_RESERVED_SPACE); | ||
1039 | unlock_buffer(dbh); | ||
1040 | status = ocfs2_journal_dirty(handle, dbh); | ||
1041 | if (status < 0) { | ||
1042 | mlog_errno(status); | ||
1043 | goto out_trans; | ||
1044 | } | ||
1045 | |||
1046 | /* Update local quotafile info */ | ||
1010 | oinfo->dqi_blocks += 2; | 1047 | oinfo->dqi_blocks += 2; |
1011 | oinfo->dqi_chunks++; | 1048 | oinfo->dqi_chunks++; |
1012 | status = ocfs2_local_write_info(sb, type); | 1049 | status = ocfs2_local_write_info(sb, type); |
@@ -1031,6 +1068,7 @@ out_trans: | |||
1031 | ocfs2_commit_trans(OCFS2_SB(sb), handle); | 1068 | ocfs2_commit_trans(OCFS2_SB(sb), handle); |
1032 | out: | 1069 | out: |
1033 | brelse(bh); | 1070 | brelse(bh); |
1071 | brelse(dbh); | ||
1034 | kmem_cache_free(ocfs2_qf_chunk_cachep, chunk); | 1072 | kmem_cache_free(ocfs2_qf_chunk_cachep, chunk); |
1035 | return ERR_PTR(status); | 1073 | return ERR_PTR(status); |
1036 | } | 1074 | } |
@@ -1048,6 +1086,8 @@ static struct ocfs2_quota_chunk *ocfs2_extend_local_quota_file( | |||
1048 | struct ocfs2_local_disk_chunk *dchunk; | 1086 | struct ocfs2_local_disk_chunk *dchunk; |
1049 | int epb = ol_quota_entries_per_block(sb); | 1087 | int epb = ol_quota_entries_per_block(sb); |
1050 | unsigned int chunk_blocks; | 1088 | unsigned int chunk_blocks; |
1089 | struct buffer_head *bh; | ||
1090 | u64 p_blkno; | ||
1051 | int status; | 1091 | int status; |
1052 | handle_t *handle; | 1092 | handle_t *handle; |
1053 | 1093 | ||
@@ -1075,12 +1115,49 @@ static struct ocfs2_quota_chunk *ocfs2_extend_local_quota_file( | |||
1075 | mlog_errno(status); | 1115 | mlog_errno(status); |
1076 | goto out; | 1116 | goto out; |
1077 | } | 1117 | } |
1078 | handle = ocfs2_start_trans(OCFS2_SB(sb), 2); | 1118 | |
1119 | /* Get buffer from the just added block */ | ||
1120 | down_read(&OCFS2_I(lqinode)->ip_alloc_sem); | ||
1121 | status = ocfs2_extent_map_get_blocks(lqinode, oinfo->dqi_blocks, | ||
1122 | &p_blkno, NULL, NULL); | ||
1123 | up_read(&OCFS2_I(lqinode)->ip_alloc_sem); | ||
1124 | if (status < 0) { | ||
1125 | mlog_errno(status); | ||
1126 | goto out; | ||
1127 | } | ||
1128 | bh = sb_getblk(sb, p_blkno); | ||
1129 | if (!bh) { | ||
1130 | status = -ENOMEM; | ||
1131 | mlog_errno(status); | ||
1132 | goto out; | ||
1133 | } | ||
1134 | ocfs2_set_new_buffer_uptodate(lqinode, bh); | ||
1135 | |||
1136 | /* Local quota info, chunk header and the new block we initialize */ | ||
1137 | handle = ocfs2_start_trans(OCFS2_SB(sb), | ||
1138 | OCFS2_LOCAL_QINFO_WRITE_CREDITS + | ||
1139 | 2 * OCFS2_QUOTA_BLOCK_UPDATE_CREDITS); | ||
1079 | if (IS_ERR(handle)) { | 1140 | if (IS_ERR(handle)) { |
1080 | status = PTR_ERR(handle); | 1141 | status = PTR_ERR(handle); |
1081 | mlog_errno(status); | 1142 | mlog_errno(status); |
1082 | goto out; | 1143 | goto out; |
1083 | } | 1144 | } |
1145 | /* Zero created block */ | ||
1146 | status = ocfs2_journal_access_dq(handle, lqinode, bh, | ||
1147 | OCFS2_JOURNAL_ACCESS_CREATE); | ||
1148 | if (status < 0) { | ||
1149 | mlog_errno(status); | ||
1150 | goto out_trans; | ||
1151 | } | ||
1152 | lock_buffer(bh); | ||
1153 | memset(bh->b_data, 0, sb->s_blocksize); | ||
1154 | unlock_buffer(bh); | ||
1155 | status = ocfs2_journal_dirty(handle, bh); | ||
1156 | if (status < 0) { | ||
1157 | mlog_errno(status); | ||
1158 | goto out_trans; | ||
1159 | } | ||
1160 | /* Update chunk header */ | ||
1084 | status = ocfs2_journal_access_dq(handle, lqinode, chunk->qc_headerbh, | 1161 | status = ocfs2_journal_access_dq(handle, lqinode, chunk->qc_headerbh, |
1085 | OCFS2_JOURNAL_ACCESS_WRITE); | 1162 | OCFS2_JOURNAL_ACCESS_WRITE); |
1086 | if (status < 0) { | 1163 | if (status < 0) { |
@@ -1097,6 +1174,7 @@ static struct ocfs2_quota_chunk *ocfs2_extend_local_quota_file( | |||
1097 | mlog_errno(status); | 1174 | mlog_errno(status); |
1098 | goto out_trans; | 1175 | goto out_trans; |
1099 | } | 1176 | } |
1177 | /* Update file header */ | ||
1100 | oinfo->dqi_blocks++; | 1178 | oinfo->dqi_blocks++; |
1101 | status = ocfs2_local_write_info(sb, type); | 1179 | status = ocfs2_local_write_info(sb, type); |
1102 | if (status < 0) { | 1180 | if (status < 0) { |
diff --git a/fs/ocfs2/stack_o2cb.c b/fs/ocfs2/stack_o2cb.c index 3f661376a2de..e49c41050264 100644 --- a/fs/ocfs2/stack_o2cb.c +++ b/fs/ocfs2/stack_o2cb.c | |||
@@ -17,6 +17,7 @@ | |||
17 | * General Public License for more details. | 17 | * General Public License for more details. |
18 | */ | 18 | */ |
19 | 19 | ||
20 | #include <linux/kernel.h> | ||
20 | #include <linux/crc32.h> | 21 | #include <linux/crc32.h> |
21 | #include <linux/module.h> | 22 | #include <linux/module.h> |
22 | 23 | ||
@@ -153,7 +154,7 @@ static int status_map[] = { | |||
153 | 154 | ||
154 | static int dlm_status_to_errno(enum dlm_status status) | 155 | static int dlm_status_to_errno(enum dlm_status status) |
155 | { | 156 | { |
156 | BUG_ON(status > (sizeof(status_map) / sizeof(status_map[0]))); | 157 | BUG_ON(status < 0 || status >= ARRAY_SIZE(status_map)); |
157 | 158 | ||
158 | return status_map[status]; | 159 | return status_map[status]; |
159 | } | 160 | } |
diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c index 7efb349fb9bd..b0ee0fdf799a 100644 --- a/fs/ocfs2/super.c +++ b/fs/ocfs2/super.c | |||
@@ -777,6 +777,7 @@ static int ocfs2_sb_probe(struct super_block *sb, | |||
777 | } | 777 | } |
778 | di = (struct ocfs2_dinode *) (*bh)->b_data; | 778 | di = (struct ocfs2_dinode *) (*bh)->b_data; |
779 | memset(stats, 0, sizeof(struct ocfs2_blockcheck_stats)); | 779 | memset(stats, 0, sizeof(struct ocfs2_blockcheck_stats)); |
780 | spin_lock_init(&stats->b_lock); | ||
780 | status = ocfs2_verify_volume(di, *bh, blksize, stats); | 781 | status = ocfs2_verify_volume(di, *bh, blksize, stats); |
781 | if (status >= 0) | 782 | if (status >= 0) |
782 | goto bail; | 783 | goto bail; |
@@ -1182,7 +1183,7 @@ static int ocfs2_fill_super(struct super_block *sb, void *data, int silent) | |||
1182 | wake_up(&osb->osb_mount_event); | 1183 | wake_up(&osb->osb_mount_event); |
1183 | 1184 | ||
1184 | /* Start this when the mount is almost sure of being successful */ | 1185 | /* Start this when the mount is almost sure of being successful */ |
1185 | ocfs2_orphan_scan_init(osb); | 1186 | ocfs2_orphan_scan_start(osb); |
1186 | 1187 | ||
1187 | mlog_exit(status); | 1188 | mlog_exit(status); |
1188 | return status; | 1189 | return status; |
@@ -1213,14 +1214,27 @@ static int ocfs2_get_sb(struct file_system_type *fs_type, | |||
1213 | mnt); | 1214 | mnt); |
1214 | } | 1215 | } |
1215 | 1216 | ||
1217 | static void ocfs2_kill_sb(struct super_block *sb) | ||
1218 | { | ||
1219 | struct ocfs2_super *osb = OCFS2_SB(sb); | ||
1220 | |||
1221 | /* Prevent further queueing of inode drop events */ | ||
1222 | spin_lock(&dentry_list_lock); | ||
1223 | ocfs2_set_osb_flag(osb, OCFS2_OSB_DROP_DENTRY_LOCK_IMMED); | ||
1224 | spin_unlock(&dentry_list_lock); | ||
1225 | /* Wait for work to finish and/or remove it */ | ||
1226 | cancel_work_sync(&osb->dentry_lock_work); | ||
1227 | |||
1228 | kill_block_super(sb); | ||
1229 | } | ||
1230 | |||
1216 | static struct file_system_type ocfs2_fs_type = { | 1231 | static struct file_system_type ocfs2_fs_type = { |
1217 | .owner = THIS_MODULE, | 1232 | .owner = THIS_MODULE, |
1218 | .name = "ocfs2", | 1233 | .name = "ocfs2", |
1219 | .get_sb = ocfs2_get_sb, /* is this called when we mount | 1234 | .get_sb = ocfs2_get_sb, /* is this called when we mount |
1220 | * the fs? */ | 1235 | * the fs? */ |
1221 | .kill_sb = kill_block_super, /* set to the generic one | 1236 | .kill_sb = ocfs2_kill_sb, |
1222 | * right now, but do we | 1237 | |
1223 | * need to change that? */ | ||
1224 | .fs_flags = FS_REQUIRES_DEV|FS_RENAME_DOES_D_MOVE, | 1238 | .fs_flags = FS_REQUIRES_DEV|FS_RENAME_DOES_D_MOVE, |
1225 | .next = NULL | 1239 | .next = NULL |
1226 | }; | 1240 | }; |
@@ -1819,6 +1833,12 @@ static void ocfs2_dismount_volume(struct super_block *sb, int mnt_err) | |||
1819 | 1833 | ||
1820 | debugfs_remove(osb->osb_ctxt); | 1834 | debugfs_remove(osb->osb_ctxt); |
1821 | 1835 | ||
1836 | /* | ||
1837 | * Flush inode dropping work queue so that deletes are | ||
1838 | * performed while the filesystem is still working | ||
1839 | */ | ||
1840 | ocfs2_drop_all_dl_inodes(osb); | ||
1841 | |||
1822 | /* Orphan scan should be stopped as early as possible */ | 1842 | /* Orphan scan should be stopped as early as possible */ |
1823 | ocfs2_orphan_scan_stop(osb); | 1843 | ocfs2_orphan_scan_stop(osb); |
1824 | 1844 | ||
@@ -1981,6 +2001,8 @@ static int ocfs2_initialize_super(struct super_block *sb, | |||
1981 | snprintf(osb->dev_str, sizeof(osb->dev_str), "%u,%u", | 2001 | snprintf(osb->dev_str, sizeof(osb->dev_str), "%u,%u", |
1982 | MAJOR(osb->sb->s_dev), MINOR(osb->sb->s_dev)); | 2002 | MAJOR(osb->sb->s_dev), MINOR(osb->sb->s_dev)); |
1983 | 2003 | ||
2004 | ocfs2_orphan_scan_init(osb); | ||
2005 | |||
1984 | status = ocfs2_recovery_init(osb); | 2006 | status = ocfs2_recovery_init(osb); |
1985 | if (status) { | 2007 | if (status) { |
1986 | mlog(ML_ERROR, "Unable to initialize recovery state\n"); | 2008 | mlog(ML_ERROR, "Unable to initialize recovery state\n"); |
diff --git a/fs/ocfs2/xattr.c b/fs/ocfs2/xattr.c index ba320e250747..d1a27cda984f 100644 --- a/fs/ocfs2/xattr.c +++ b/fs/ocfs2/xattr.c | |||
@@ -1052,7 +1052,8 @@ static int ocfs2_xattr_block_get(struct inode *inode, | |||
1052 | struct ocfs2_xattr_block *xb; | 1052 | struct ocfs2_xattr_block *xb; |
1053 | struct ocfs2_xattr_value_root *xv; | 1053 | struct ocfs2_xattr_value_root *xv; |
1054 | size_t size; | 1054 | size_t size; |
1055 | int ret = -ENODATA, name_offset, name_len, block_off, i; | 1055 | int ret = -ENODATA, name_offset, name_len, i; |
1056 | int uninitialized_var(block_off); | ||
1056 | 1057 | ||
1057 | xs->bucket = ocfs2_xattr_bucket_new(inode); | 1058 | xs->bucket = ocfs2_xattr_bucket_new(inode); |
1058 | if (!xs->bucket) { | 1059 | if (!xs->bucket) { |
diff --git a/fs/proc/base.c b/fs/proc/base.c index 3ce5ae9e3d2d..175db258942f 100644 --- a/fs/proc/base.c +++ b/fs/proc/base.c | |||
@@ -234,23 +234,20 @@ static int check_mem_permission(struct task_struct *task) | |||
234 | 234 | ||
235 | struct mm_struct *mm_for_maps(struct task_struct *task) | 235 | struct mm_struct *mm_for_maps(struct task_struct *task) |
236 | { | 236 | { |
237 | struct mm_struct *mm = get_task_mm(task); | 237 | struct mm_struct *mm; |
238 | if (!mm) | 238 | |
239 | if (mutex_lock_killable(&task->cred_guard_mutex)) | ||
239 | return NULL; | 240 | return NULL; |
240 | down_read(&mm->mmap_sem); | 241 | |
241 | task_lock(task); | 242 | mm = get_task_mm(task); |
242 | if (task->mm != mm) | 243 | if (mm && mm != current->mm && |
243 | goto out; | 244 | !ptrace_may_access(task, PTRACE_MODE_READ)) { |
244 | if (task->mm != current->mm && | 245 | mmput(mm); |
245 | __ptrace_may_access(task, PTRACE_MODE_READ) < 0) | 246 | mm = NULL; |
246 | goto out; | 247 | } |
247 | task_unlock(task); | 248 | mutex_unlock(&task->cred_guard_mutex); |
249 | |||
248 | return mm; | 250 | return mm; |
249 | out: | ||
250 | task_unlock(task); | ||
251 | up_read(&mm->mmap_sem); | ||
252 | mmput(mm); | ||
253 | return NULL; | ||
254 | } | 251 | } |
255 | 252 | ||
256 | static int proc_pid_cmdline(struct task_struct *task, char * buffer) | 253 | static int proc_pid_cmdline(struct task_struct *task, char * buffer) |
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c index 6f61b7cc32e0..9bd8be1d235c 100644 --- a/fs/proc/task_mmu.c +++ b/fs/proc/task_mmu.c | |||
@@ -119,6 +119,7 @@ static void *m_start(struct seq_file *m, loff_t *pos) | |||
119 | mm = mm_for_maps(priv->task); | 119 | mm = mm_for_maps(priv->task); |
120 | if (!mm) | 120 | if (!mm) |
121 | return NULL; | 121 | return NULL; |
122 | down_read(&mm->mmap_sem); | ||
122 | 123 | ||
123 | tail_vma = get_gate_vma(priv->task); | 124 | tail_vma = get_gate_vma(priv->task); |
124 | priv->tail_vma = tail_vma; | 125 | priv->tail_vma = tail_vma; |
diff --git a/fs/proc/task_nommu.c b/fs/proc/task_nommu.c index 64a72e2e7650..8f5c05d3dbd3 100644 --- a/fs/proc/task_nommu.c +++ b/fs/proc/task_nommu.c | |||
@@ -189,6 +189,7 @@ static void *m_start(struct seq_file *m, loff_t *pos) | |||
189 | priv->task = NULL; | 189 | priv->task = NULL; |
190 | return NULL; | 190 | return NULL; |
191 | } | 191 | } |
192 | down_read(&mm->mmap_sem); | ||
192 | 193 | ||
193 | /* start from the Nth VMA */ | 194 | /* start from the Nth VMA */ |
194 | for (p = rb_first(&mm->mm_rb); p; p = rb_next(p)) | 195 | for (p = rb_first(&mm->mm_rb); p; p = rb_next(p)) |
diff --git a/fs/xfs/linux-2.6/xfs_buf.c b/fs/xfs/linux-2.6/xfs_buf.c index 0c93c7ef3d18..965df1227d64 100644 --- a/fs/xfs/linux-2.6/xfs_buf.c +++ b/fs/xfs/linux-2.6/xfs_buf.c | |||
@@ -770,7 +770,7 @@ xfs_buf_associate_memory( | |||
770 | bp->b_pages = NULL; | 770 | bp->b_pages = NULL; |
771 | bp->b_addr = mem; | 771 | bp->b_addr = mem; |
772 | 772 | ||
773 | rval = _xfs_buf_get_pages(bp, page_count, 0); | 773 | rval = _xfs_buf_get_pages(bp, page_count, XBF_DONT_BLOCK); |
774 | if (rval) | 774 | if (rval) |
775 | return rval; | 775 | return rval; |
776 | 776 | ||
diff --git a/fs/xfs/xfs_attr.c b/fs/xfs/xfs_attr.c index db15feb906ff..4ece1906bd41 100644 --- a/fs/xfs/xfs_attr.c +++ b/fs/xfs/xfs_attr.c | |||
@@ -2010,7 +2010,9 @@ xfs_attr_rmtval_get(xfs_da_args_t *args) | |||
2010 | dblkno = XFS_FSB_TO_DADDR(mp, map[i].br_startblock); | 2010 | dblkno = XFS_FSB_TO_DADDR(mp, map[i].br_startblock); |
2011 | blkcnt = XFS_FSB_TO_BB(mp, map[i].br_blockcount); | 2011 | blkcnt = XFS_FSB_TO_BB(mp, map[i].br_blockcount); |
2012 | error = xfs_read_buf(mp, mp->m_ddev_targp, dblkno, | 2012 | error = xfs_read_buf(mp, mp->m_ddev_targp, dblkno, |
2013 | blkcnt, XFS_BUF_LOCK, &bp); | 2013 | blkcnt, |
2014 | XFS_BUF_LOCK | XBF_DONT_BLOCK, | ||
2015 | &bp); | ||
2014 | if (error) | 2016 | if (error) |
2015 | return(error); | 2017 | return(error); |
2016 | 2018 | ||
@@ -2141,8 +2143,8 @@ xfs_attr_rmtval_set(xfs_da_args_t *args) | |||
2141 | dblkno = XFS_FSB_TO_DADDR(mp, map.br_startblock), | 2143 | dblkno = XFS_FSB_TO_DADDR(mp, map.br_startblock), |
2142 | blkcnt = XFS_FSB_TO_BB(mp, map.br_blockcount); | 2144 | blkcnt = XFS_FSB_TO_BB(mp, map.br_blockcount); |
2143 | 2145 | ||
2144 | bp = xfs_buf_get_flags(mp->m_ddev_targp, dblkno, | 2146 | bp = xfs_buf_get_flags(mp->m_ddev_targp, dblkno, blkcnt, |
2145 | blkcnt, XFS_BUF_LOCK); | 2147 | XFS_BUF_LOCK | XBF_DONT_BLOCK); |
2146 | ASSERT(bp); | 2148 | ASSERT(bp); |
2147 | ASSERT(!XFS_BUF_GETERROR(bp)); | 2149 | ASSERT(!XFS_BUF_GETERROR(bp)); |
2148 | 2150 | ||
diff --git a/fs/xfs/xfs_bmap.c b/fs/xfs/xfs_bmap.c index 7928b9983c1d..8ee5b5a76a2a 100644 --- a/fs/xfs/xfs_bmap.c +++ b/fs/xfs/xfs_bmap.c | |||
@@ -6009,7 +6009,7 @@ xfs_getbmap( | |||
6009 | */ | 6009 | */ |
6010 | error = ENOMEM; | 6010 | error = ENOMEM; |
6011 | subnex = 16; | 6011 | subnex = 16; |
6012 | map = kmem_alloc(subnex * sizeof(*map), KM_MAYFAIL); | 6012 | map = kmem_alloc(subnex * sizeof(*map), KM_MAYFAIL | KM_NOFS); |
6013 | if (!map) | 6013 | if (!map) |
6014 | goto out_unlock_ilock; | 6014 | goto out_unlock_ilock; |
6015 | 6015 | ||
diff --git a/fs/xfs/xfs_btree.c b/fs/xfs/xfs_btree.c index e9df99574829..26717388acf5 100644 --- a/fs/xfs/xfs_btree.c +++ b/fs/xfs/xfs_btree.c | |||
@@ -120,8 +120,8 @@ xfs_btree_check_sblock( | |||
120 | XFS_RANDOM_BTREE_CHECK_SBLOCK))) { | 120 | XFS_RANDOM_BTREE_CHECK_SBLOCK))) { |
121 | if (bp) | 121 | if (bp) |
122 | xfs_buftrace("SBTREE ERROR", bp); | 122 | xfs_buftrace("SBTREE ERROR", bp); |
123 | XFS_ERROR_REPORT("xfs_btree_check_sblock", XFS_ERRLEVEL_LOW, | 123 | XFS_CORRUPTION_ERROR("xfs_btree_check_sblock", |
124 | cur->bc_mp); | 124 | XFS_ERRLEVEL_LOW, cur->bc_mp, block); |
125 | return XFS_ERROR(EFSCORRUPTED); | 125 | return XFS_ERROR(EFSCORRUPTED); |
126 | } | 126 | } |
127 | return 0; | 127 | return 0; |
diff --git a/fs/xfs/xfs_da_btree.c b/fs/xfs/xfs_da_btree.c index 9ff6e57a5075..2847bbc1c534 100644 --- a/fs/xfs/xfs_da_btree.c +++ b/fs/xfs/xfs_da_btree.c | |||
@@ -2201,7 +2201,7 @@ kmem_zone_t *xfs_dabuf_zone; /* dabuf zone */ | |||
2201 | xfs_da_state_t * | 2201 | xfs_da_state_t * |
2202 | xfs_da_state_alloc(void) | 2202 | xfs_da_state_alloc(void) |
2203 | { | 2203 | { |
2204 | return kmem_zone_zalloc(xfs_da_state_zone, KM_SLEEP); | 2204 | return kmem_zone_zalloc(xfs_da_state_zone, KM_NOFS); |
2205 | } | 2205 | } |
2206 | 2206 | ||
2207 | /* | 2207 | /* |
@@ -2261,9 +2261,9 @@ xfs_da_buf_make(int nbuf, xfs_buf_t **bps, inst_t *ra) | |||
2261 | int off; | 2261 | int off; |
2262 | 2262 | ||
2263 | if (nbuf == 1) | 2263 | if (nbuf == 1) |
2264 | dabuf = kmem_zone_alloc(xfs_dabuf_zone, KM_SLEEP); | 2264 | dabuf = kmem_zone_alloc(xfs_dabuf_zone, KM_NOFS); |
2265 | else | 2265 | else |
2266 | dabuf = kmem_alloc(XFS_DA_BUF_SIZE(nbuf), KM_SLEEP); | 2266 | dabuf = kmem_alloc(XFS_DA_BUF_SIZE(nbuf), KM_NOFS); |
2267 | dabuf->dirty = 0; | 2267 | dabuf->dirty = 0; |
2268 | #ifdef XFS_DABUF_DEBUG | 2268 | #ifdef XFS_DABUF_DEBUG |
2269 | dabuf->ra = ra; | 2269 | dabuf->ra = ra; |
diff --git a/fs/xfs/xfs_dir2.c b/fs/xfs/xfs_dir2.c index c657bec6d951..bb1d58eb3982 100644 --- a/fs/xfs/xfs_dir2.c +++ b/fs/xfs/xfs_dir2.c | |||
@@ -256,7 +256,7 @@ xfs_dir_cilookup_result( | |||
256 | !(args->op_flags & XFS_DA_OP_CILOOKUP)) | 256 | !(args->op_flags & XFS_DA_OP_CILOOKUP)) |
257 | return EEXIST; | 257 | return EEXIST; |
258 | 258 | ||
259 | args->value = kmem_alloc(len, KM_MAYFAIL); | 259 | args->value = kmem_alloc(len, KM_NOFS | KM_MAYFAIL); |
260 | if (!args->value) | 260 | if (!args->value) |
261 | return ENOMEM; | 261 | return ENOMEM; |
262 | 262 | ||
diff --git a/fs/xfs/xfs_fsops.c b/fs/xfs/xfs_fsops.c index cbd451bb4848..2d0b3e1da9e6 100644 --- a/fs/xfs/xfs_fsops.c +++ b/fs/xfs/xfs_fsops.c | |||
@@ -167,17 +167,25 @@ xfs_growfs_data_private( | |||
167 | new = nb - mp->m_sb.sb_dblocks; | 167 | new = nb - mp->m_sb.sb_dblocks; |
168 | oagcount = mp->m_sb.sb_agcount; | 168 | oagcount = mp->m_sb.sb_agcount; |
169 | if (nagcount > oagcount) { | 169 | if (nagcount > oagcount) { |
170 | void *new_perag, *old_perag; | ||
171 | |||
170 | xfs_filestream_flush(mp); | 172 | xfs_filestream_flush(mp); |
173 | |||
174 | new_perag = kmem_zalloc(sizeof(xfs_perag_t) * nagcount, | ||
175 | KM_MAYFAIL); | ||
176 | if (!new_perag) | ||
177 | return XFS_ERROR(ENOMEM); | ||
178 | |||
171 | down_write(&mp->m_peraglock); | 179 | down_write(&mp->m_peraglock); |
172 | mp->m_perag = kmem_realloc(mp->m_perag, | 180 | memcpy(new_perag, mp->m_perag, sizeof(xfs_perag_t) * oagcount); |
173 | sizeof(xfs_perag_t) * nagcount, | 181 | old_perag = mp->m_perag; |
174 | sizeof(xfs_perag_t) * oagcount, | 182 | mp->m_perag = new_perag; |
175 | KM_SLEEP); | 183 | |
176 | memset(&mp->m_perag[oagcount], 0, | ||
177 | (nagcount - oagcount) * sizeof(xfs_perag_t)); | ||
178 | mp->m_flags |= XFS_MOUNT_32BITINODES; | 184 | mp->m_flags |= XFS_MOUNT_32BITINODES; |
179 | nagimax = xfs_initialize_perag(mp, nagcount); | 185 | nagimax = xfs_initialize_perag(mp, nagcount); |
180 | up_write(&mp->m_peraglock); | 186 | up_write(&mp->m_peraglock); |
187 | |||
188 | kmem_free(old_perag); | ||
181 | } | 189 | } |
182 | tp = xfs_trans_alloc(mp, XFS_TRANS_GROWFS); | 190 | tp = xfs_trans_alloc(mp, XFS_TRANS_GROWFS); |
183 | tp->t_flags |= XFS_TRANS_RESERVE; | 191 | tp->t_flags |= XFS_TRANS_RESERVE; |
diff --git a/fs/xfs/xfs_iget.c b/fs/xfs/xfs_iget.c index 5fcec6f020a7..34ec86923f7e 100644 --- a/fs/xfs/xfs_iget.c +++ b/fs/xfs/xfs_iget.c | |||
@@ -64,6 +64,10 @@ xfs_inode_alloc( | |||
64 | ip = kmem_zone_alloc(xfs_inode_zone, KM_SLEEP); | 64 | ip = kmem_zone_alloc(xfs_inode_zone, KM_SLEEP); |
65 | if (!ip) | 65 | if (!ip) |
66 | return NULL; | 66 | return NULL; |
67 | if (inode_init_always(mp->m_super, VFS_I(ip))) { | ||
68 | kmem_zone_free(xfs_inode_zone, ip); | ||
69 | return NULL; | ||
70 | } | ||
67 | 71 | ||
68 | ASSERT(atomic_read(&ip->i_iocount) == 0); | 72 | ASSERT(atomic_read(&ip->i_iocount) == 0); |
69 | ASSERT(atomic_read(&ip->i_pincount) == 0); | 73 | ASSERT(atomic_read(&ip->i_pincount) == 0); |
@@ -105,17 +109,6 @@ xfs_inode_alloc( | |||
105 | #ifdef XFS_DIR2_TRACE | 109 | #ifdef XFS_DIR2_TRACE |
106 | ip->i_dir_trace = ktrace_alloc(XFS_DIR2_KTRACE_SIZE, KM_NOFS); | 110 | ip->i_dir_trace = ktrace_alloc(XFS_DIR2_KTRACE_SIZE, KM_NOFS); |
107 | #endif | 111 | #endif |
108 | /* | ||
109 | * Now initialise the VFS inode. We do this after the xfs_inode | ||
110 | * initialisation as internal failures will result in ->destroy_inode | ||
111 | * being called and that will pass down through the reclaim path and | ||
112 | * free the XFS inode. This path requires the XFS inode to already be | ||
113 | * initialised. Hence if this call fails, the xfs_inode has already | ||
114 | * been freed and we should not reference it at all in the error | ||
115 | * handling. | ||
116 | */ | ||
117 | if (!inode_init_always(mp->m_super, VFS_I(ip))) | ||
118 | return NULL; | ||
119 | 112 | ||
120 | /* prevent anyone from using this yet */ | 113 | /* prevent anyone from using this yet */ |
121 | VFS_I(ip)->i_state = I_NEW|I_LOCK; | 114 | VFS_I(ip)->i_state = I_NEW|I_LOCK; |
@@ -123,6 +116,71 @@ xfs_inode_alloc( | |||
123 | return ip; | 116 | return ip; |
124 | } | 117 | } |
125 | 118 | ||
119 | STATIC void | ||
120 | xfs_inode_free( | ||
121 | struct xfs_inode *ip) | ||
122 | { | ||
123 | switch (ip->i_d.di_mode & S_IFMT) { | ||
124 | case S_IFREG: | ||
125 | case S_IFDIR: | ||
126 | case S_IFLNK: | ||
127 | xfs_idestroy_fork(ip, XFS_DATA_FORK); | ||
128 | break; | ||
129 | } | ||
130 | |||
131 | if (ip->i_afp) | ||
132 | xfs_idestroy_fork(ip, XFS_ATTR_FORK); | ||
133 | |||
134 | #ifdef XFS_INODE_TRACE | ||
135 | ktrace_free(ip->i_trace); | ||
136 | #endif | ||
137 | #ifdef XFS_BMAP_TRACE | ||
138 | ktrace_free(ip->i_xtrace); | ||
139 | #endif | ||
140 | #ifdef XFS_BTREE_TRACE | ||
141 | ktrace_free(ip->i_btrace); | ||
142 | #endif | ||
143 | #ifdef XFS_RW_TRACE | ||
144 | ktrace_free(ip->i_rwtrace); | ||
145 | #endif | ||
146 | #ifdef XFS_ILOCK_TRACE | ||
147 | ktrace_free(ip->i_lock_trace); | ||
148 | #endif | ||
149 | #ifdef XFS_DIR2_TRACE | ||
150 | ktrace_free(ip->i_dir_trace); | ||
151 | #endif | ||
152 | |||
153 | if (ip->i_itemp) { | ||
154 | /* | ||
155 | * Only if we are shutting down the fs will we see an | ||
156 | * inode still in the AIL. If it is there, we should remove | ||
157 | * it to prevent a use-after-free from occurring. | ||
158 | */ | ||
159 | xfs_log_item_t *lip = &ip->i_itemp->ili_item; | ||
160 | struct xfs_ail *ailp = lip->li_ailp; | ||
161 | |||
162 | ASSERT(((lip->li_flags & XFS_LI_IN_AIL) == 0) || | ||
163 | XFS_FORCED_SHUTDOWN(ip->i_mount)); | ||
164 | if (lip->li_flags & XFS_LI_IN_AIL) { | ||
165 | spin_lock(&ailp->xa_lock); | ||
166 | if (lip->li_flags & XFS_LI_IN_AIL) | ||
167 | xfs_trans_ail_delete(ailp, lip); | ||
168 | else | ||
169 | spin_unlock(&ailp->xa_lock); | ||
170 | } | ||
171 | xfs_inode_item_destroy(ip); | ||
172 | ip->i_itemp = NULL; | ||
173 | } | ||
174 | |||
175 | /* asserts to verify all state is correct here */ | ||
176 | ASSERT(atomic_read(&ip->i_iocount) == 0); | ||
177 | ASSERT(atomic_read(&ip->i_pincount) == 0); | ||
178 | ASSERT(!spin_is_locked(&ip->i_flags_lock)); | ||
179 | ASSERT(completion_done(&ip->i_flush)); | ||
180 | |||
181 | kmem_zone_free(xfs_inode_zone, ip); | ||
182 | } | ||
183 | |||
126 | /* | 184 | /* |
127 | * Check the validity of the inode we just found it the cache | 185 | * Check the validity of the inode we just found it the cache |
128 | */ | 186 | */ |
@@ -167,7 +225,7 @@ xfs_iget_cache_hit( | |||
167 | * errors cleanly, then tag it so it can be set up correctly | 225 | * errors cleanly, then tag it so it can be set up correctly |
168 | * later. | 226 | * later. |
169 | */ | 227 | */ |
170 | if (!inode_init_always(mp->m_super, VFS_I(ip))) { | 228 | if (inode_init_always(mp->m_super, VFS_I(ip))) { |
171 | error = ENOMEM; | 229 | error = ENOMEM; |
172 | goto out_error; | 230 | goto out_error; |
173 | } | 231 | } |
@@ -299,7 +357,8 @@ out_preload_end: | |||
299 | if (lock_flags) | 357 | if (lock_flags) |
300 | xfs_iunlock(ip, lock_flags); | 358 | xfs_iunlock(ip, lock_flags); |
301 | out_destroy: | 359 | out_destroy: |
302 | xfs_destroy_inode(ip); | 360 | __destroy_inode(VFS_I(ip)); |
361 | xfs_inode_free(ip); | ||
303 | return error; | 362 | return error; |
304 | } | 363 | } |
305 | 364 | ||
@@ -504,62 +563,7 @@ xfs_ireclaim( | |||
504 | xfs_qm_dqdetach(ip); | 563 | xfs_qm_dqdetach(ip); |
505 | xfs_iunlock(ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL); | 564 | xfs_iunlock(ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL); |
506 | 565 | ||
507 | switch (ip->i_d.di_mode & S_IFMT) { | 566 | xfs_inode_free(ip); |
508 | case S_IFREG: | ||
509 | case S_IFDIR: | ||
510 | case S_IFLNK: | ||
511 | xfs_idestroy_fork(ip, XFS_DATA_FORK); | ||
512 | break; | ||
513 | } | ||
514 | |||
515 | if (ip->i_afp) | ||
516 | xfs_idestroy_fork(ip, XFS_ATTR_FORK); | ||
517 | |||
518 | #ifdef XFS_INODE_TRACE | ||
519 | ktrace_free(ip->i_trace); | ||
520 | #endif | ||
521 | #ifdef XFS_BMAP_TRACE | ||
522 | ktrace_free(ip->i_xtrace); | ||
523 | #endif | ||
524 | #ifdef XFS_BTREE_TRACE | ||
525 | ktrace_free(ip->i_btrace); | ||
526 | #endif | ||
527 | #ifdef XFS_RW_TRACE | ||
528 | ktrace_free(ip->i_rwtrace); | ||
529 | #endif | ||
530 | #ifdef XFS_ILOCK_TRACE | ||
531 | ktrace_free(ip->i_lock_trace); | ||
532 | #endif | ||
533 | #ifdef XFS_DIR2_TRACE | ||
534 | ktrace_free(ip->i_dir_trace); | ||
535 | #endif | ||
536 | if (ip->i_itemp) { | ||
537 | /* | ||
538 | * Only if we are shutting down the fs will we see an | ||
539 | * inode still in the AIL. If it is there, we should remove | ||
540 | * it to prevent a use-after-free from occurring. | ||
541 | */ | ||
542 | xfs_log_item_t *lip = &ip->i_itemp->ili_item; | ||
543 | struct xfs_ail *ailp = lip->li_ailp; | ||
544 | |||
545 | ASSERT(((lip->li_flags & XFS_LI_IN_AIL) == 0) || | ||
546 | XFS_FORCED_SHUTDOWN(ip->i_mount)); | ||
547 | if (lip->li_flags & XFS_LI_IN_AIL) { | ||
548 | spin_lock(&ailp->xa_lock); | ||
549 | if (lip->li_flags & XFS_LI_IN_AIL) | ||
550 | xfs_trans_ail_delete(ailp, lip); | ||
551 | else | ||
552 | spin_unlock(&ailp->xa_lock); | ||
553 | } | ||
554 | xfs_inode_item_destroy(ip); | ||
555 | ip->i_itemp = NULL; | ||
556 | } | ||
557 | /* asserts to verify all state is correct here */ | ||
558 | ASSERT(atomic_read(&ip->i_iocount) == 0); | ||
559 | ASSERT(atomic_read(&ip->i_pincount) == 0); | ||
560 | ASSERT(!spin_is_locked(&ip->i_flags_lock)); | ||
561 | ASSERT(completion_done(&ip->i_flush)); | ||
562 | kmem_zone_free(xfs_inode_zone, ip); | ||
563 | } | 567 | } |
564 | 568 | ||
565 | /* | 569 | /* |
diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c index 1f22d65fed0a..da428b3fe0f5 100644 --- a/fs/xfs/xfs_inode.c +++ b/fs/xfs/xfs_inode.c | |||
@@ -343,6 +343,16 @@ xfs_iformat( | |||
343 | return XFS_ERROR(EFSCORRUPTED); | 343 | return XFS_ERROR(EFSCORRUPTED); |
344 | } | 344 | } |
345 | 345 | ||
346 | if (unlikely((ip->i_d.di_flags & XFS_DIFLAG_REALTIME) && | ||
347 | !ip->i_mount->m_rtdev_targp)) { | ||
348 | xfs_fs_repair_cmn_err(CE_WARN, ip->i_mount, | ||
349 | "corrupt dinode %Lu, has realtime flag set.", | ||
350 | ip->i_ino); | ||
351 | XFS_CORRUPTION_ERROR("xfs_iformat(realtime)", | ||
352 | XFS_ERRLEVEL_LOW, ip->i_mount, dip); | ||
353 | return XFS_ERROR(EFSCORRUPTED); | ||
354 | } | ||
355 | |||
346 | switch (ip->i_d.di_mode & S_IFMT) { | 356 | switch (ip->i_d.di_mode & S_IFMT) { |
347 | case S_IFIFO: | 357 | case S_IFIFO: |
348 | case S_IFCHR: | 358 | case S_IFCHR: |
diff --git a/fs/xfs/xfs_inode.h b/fs/xfs/xfs_inode.h index 1804f866a71d..65f24a3cc992 100644 --- a/fs/xfs/xfs_inode.h +++ b/fs/xfs/xfs_inode.h | |||
@@ -310,23 +310,6 @@ static inline struct inode *VFS_I(struct xfs_inode *ip) | |||
310 | } | 310 | } |
311 | 311 | ||
312 | /* | 312 | /* |
313 | * Get rid of a partially initialized inode. | ||
314 | * | ||
315 | * We have to go through destroy_inode to make sure allocations | ||
316 | * from init_inode_always like the security data are undone. | ||
317 | * | ||
318 | * We mark the inode bad so that it takes the short cut in | ||
319 | * the reclaim path instead of going through the flush path | ||
320 | * which doesn't make sense for an inode that has never seen the | ||
321 | * light of day. | ||
322 | */ | ||
323 | static inline void xfs_destroy_inode(struct xfs_inode *ip) | ||
324 | { | ||
325 | make_bad_inode(VFS_I(ip)); | ||
326 | return destroy_inode(VFS_I(ip)); | ||
327 | } | ||
328 | |||
329 | /* | ||
330 | * i_flags helper functions | 313 | * i_flags helper functions |
331 | */ | 314 | */ |
332 | static inline void | 315 | static inline void |
diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c index 3750f04ede0b..9dbdff3ea484 100644 --- a/fs/xfs/xfs_log.c +++ b/fs/xfs/xfs_log.c | |||
@@ -3180,7 +3180,7 @@ try_again: | |||
3180 | STATIC void | 3180 | STATIC void |
3181 | xlog_state_want_sync(xlog_t *log, xlog_in_core_t *iclog) | 3181 | xlog_state_want_sync(xlog_t *log, xlog_in_core_t *iclog) |
3182 | { | 3182 | { |
3183 | ASSERT(spin_is_locked(&log->l_icloglock)); | 3183 | assert_spin_locked(&log->l_icloglock); |
3184 | 3184 | ||
3185 | if (iclog->ic_state == XLOG_STATE_ACTIVE) { | 3185 | if (iclog->ic_state == XLOG_STATE_ACTIVE) { |
3186 | xlog_state_switch_iclogs(log, iclog, 0); | 3186 | xlog_state_switch_iclogs(log, iclog, 0); |
diff --git a/fs/xfs/xfs_vnodeops.c b/fs/xfs/xfs_vnodeops.c index c4eca5ed5dab..492d75bae2bf 100644 --- a/fs/xfs/xfs_vnodeops.c +++ b/fs/xfs/xfs_vnodeops.c | |||
@@ -538,7 +538,9 @@ xfs_readlink_bmap( | |||
538 | d = XFS_FSB_TO_DADDR(mp, mval[n].br_startblock); | 538 | d = XFS_FSB_TO_DADDR(mp, mval[n].br_startblock); |
539 | byte_cnt = XFS_FSB_TO_B(mp, mval[n].br_blockcount); | 539 | byte_cnt = XFS_FSB_TO_B(mp, mval[n].br_blockcount); |
540 | 540 | ||
541 | bp = xfs_buf_read(mp->m_ddev_targp, d, BTOBB(byte_cnt), 0); | 541 | bp = xfs_buf_read_flags(mp->m_ddev_targp, d, BTOBB(byte_cnt), |
542 | XBF_LOCK | XBF_MAPPED | | ||
543 | XBF_DONT_BLOCK); | ||
542 | error = XFS_BUF_GETERROR(bp); | 544 | error = XFS_BUF_GETERROR(bp); |
543 | if (error) { | 545 | if (error) { |
544 | xfs_ioerror_alert("xfs_readlink", | 546 | xfs_ioerror_alert("xfs_readlink", |
diff --git a/include/acpi/acpiosxf.h b/include/acpi/acpiosxf.h index 3e798593b17b..ab0b85cf21f3 100644 --- a/include/acpi/acpiosxf.h +++ b/include/acpi/acpiosxf.h | |||
@@ -242,6 +242,10 @@ acpi_os_derive_pci_id(acpi_handle rhandle, | |||
242 | acpi_status acpi_os_validate_interface(char *interface); | 242 | acpi_status acpi_os_validate_interface(char *interface); |
243 | acpi_status acpi_osi_invalidate(char* interface); | 243 | acpi_status acpi_osi_invalidate(char* interface); |
244 | 244 | ||
245 | acpi_status | ||
246 | acpi_os_validate_address(u8 space_id, acpi_physical_address address, | ||
247 | acpi_size length, char *name); | ||
248 | |||
245 | u64 acpi_os_get_timer(void); | 249 | u64 acpi_os_get_timer(void); |
246 | 250 | ||
247 | acpi_status acpi_os_signal(u32 function, void *info); | 251 | acpi_status acpi_os_signal(u32 function, void *info); |
diff --git a/include/drm/drm_pciids.h b/include/drm/drm_pciids.h index 7174818c2c13..853508499d20 100644 --- a/include/drm/drm_pciids.h +++ b/include/drm/drm_pciids.h | |||
@@ -257,9 +257,12 @@ | |||
257 | {0x1002, 0x940F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R600|RADEON_NEW_MEMMAP}, \ | 257 | {0x1002, 0x940F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R600|RADEON_NEW_MEMMAP}, \ |
258 | {0x1002, 0x94A0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV740|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ | 258 | {0x1002, 0x94A0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV740|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ |
259 | {0x1002, 0x94A1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV740|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ | 259 | {0x1002, 0x94A1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV740|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ |
260 | {0x1002, 0x94A3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV740|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ | ||
260 | {0x1002, 0x94B1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV740|RADEON_NEW_MEMMAP}, \ | 261 | {0x1002, 0x94B1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV740|RADEON_NEW_MEMMAP}, \ |
261 | {0x1002, 0x94B3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV740|RADEON_NEW_MEMMAP}, \ | 262 | {0x1002, 0x94B3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV740|RADEON_NEW_MEMMAP}, \ |
263 | {0x1002, 0x94B4, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV740|RADEON_NEW_MEMMAP}, \ | ||
262 | {0x1002, 0x94B5, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV740|RADEON_NEW_MEMMAP}, \ | 264 | {0x1002, 0x94B5, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV740|RADEON_NEW_MEMMAP}, \ |
265 | {0x1002, 0x94B9, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV740|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ | ||
263 | {0x1002, 0x9440, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_NEW_MEMMAP}, \ | 266 | {0x1002, 0x9440, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_NEW_MEMMAP}, \ |
264 | {0x1002, 0x9441, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_NEW_MEMMAP}, \ | 267 | {0x1002, 0x9441, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_NEW_MEMMAP}, \ |
265 | {0x1002, 0x9442, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_NEW_MEMMAP}, \ | 268 | {0x1002, 0x9442, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_NEW_MEMMAP}, \ |
@@ -288,6 +291,7 @@ | |||
288 | {0x1002, 0x948F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV730|RADEON_NEW_MEMMAP}, \ | 291 | {0x1002, 0x948F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV730|RADEON_NEW_MEMMAP}, \ |
289 | {0x1002, 0x9490, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV730|RADEON_NEW_MEMMAP}, \ | 292 | {0x1002, 0x9490, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV730|RADEON_NEW_MEMMAP}, \ |
290 | {0x1002, 0x9491, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV730|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ | 293 | {0x1002, 0x9491, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV730|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ |
294 | {0x1002, 0x9495, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV730|RADEON_NEW_MEMMAP}, \ | ||
291 | {0x1002, 0x9498, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV730|RADEON_NEW_MEMMAP}, \ | 295 | {0x1002, 0x9498, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV730|RADEON_NEW_MEMMAP}, \ |
292 | {0x1002, 0x949C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV730|RADEON_NEW_MEMMAP}, \ | 296 | {0x1002, 0x949C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV730|RADEON_NEW_MEMMAP}, \ |
293 | {0x1002, 0x949E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV730|RADEON_NEW_MEMMAP}, \ | 297 | {0x1002, 0x949E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV730|RADEON_NEW_MEMMAP}, \ |
@@ -325,6 +329,7 @@ | |||
325 | {0x1002, 0x9552, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV710|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ | 329 | {0x1002, 0x9552, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV710|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ |
326 | {0x1002, 0x9553, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV710|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ | 330 | {0x1002, 0x9553, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV710|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ |
327 | {0x1002, 0x9555, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV710|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ | 331 | {0x1002, 0x9555, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV710|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ |
332 | {0x1002, 0x9557, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV710|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ | ||
328 | {0x1002, 0x9580, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV630|RADEON_NEW_MEMMAP}, \ | 333 | {0x1002, 0x9580, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV630|RADEON_NEW_MEMMAP}, \ |
329 | {0x1002, 0x9581, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV630|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ | 334 | {0x1002, 0x9581, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV630|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ |
330 | {0x1002, 0x9583, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV630|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ | 335 | {0x1002, 0x9583, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV630|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ |
@@ -365,6 +370,11 @@ | |||
365 | {0x1002, 0x9614, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS780|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ | 370 | {0x1002, 0x9614, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS780|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ |
366 | {0x1002, 0x9615, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS780|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ | 371 | {0x1002, 0x9615, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS780|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ |
367 | {0x1002, 0x9616, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS780|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ | 372 | {0x1002, 0x9616, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS780|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ |
373 | {0x1002, 0x9710, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS880|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ | ||
374 | {0x1002, 0x9711, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS880|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ | ||
375 | {0x1002, 0x9712, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS880|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ | ||
376 | {0x1002, 0x9713, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS880|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ | ||
377 | {0x1002, 0x9714, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS880|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ | ||
368 | {0, 0, 0} | 378 | {0, 0, 0} |
369 | 379 | ||
370 | #define r128_PCI_IDS \ | 380 | #define r128_PCI_IDS \ |
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index e7cb5dbf6c26..69103e053c92 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h | |||
@@ -913,6 +913,7 @@ extern void blk_queue_logical_block_size(struct request_queue *, unsigned short) | |||
913 | extern void blk_queue_physical_block_size(struct request_queue *, unsigned short); | 913 | extern void blk_queue_physical_block_size(struct request_queue *, unsigned short); |
914 | extern void blk_queue_alignment_offset(struct request_queue *q, | 914 | extern void blk_queue_alignment_offset(struct request_queue *q, |
915 | unsigned int alignment); | 915 | unsigned int alignment); |
916 | extern void blk_limits_io_min(struct queue_limits *limits, unsigned int min); | ||
916 | extern void blk_queue_io_min(struct request_queue *q, unsigned int min); | 917 | extern void blk_queue_io_min(struct request_queue *q, unsigned int min); |
917 | extern void blk_queue_io_opt(struct request_queue *q, unsigned int opt); | 918 | extern void blk_queue_io_opt(struct request_queue *q, unsigned int opt); |
918 | extern void blk_set_default_limits(struct queue_limits *lim); | 919 | extern void blk_set_default_limits(struct queue_limits *lim); |
diff --git a/include/linux/clocksource.h b/include/linux/clocksource.h index 1219be4fb42e..83d2fbd81b93 100644 --- a/include/linux/clocksource.h +++ b/include/linux/clocksource.h | |||
@@ -14,6 +14,7 @@ | |||
14 | #include <linux/list.h> | 14 | #include <linux/list.h> |
15 | #include <linux/cache.h> | 15 | #include <linux/cache.h> |
16 | #include <linux/timer.h> | 16 | #include <linux/timer.h> |
17 | #include <linux/init.h> | ||
17 | #include <asm/div64.h> | 18 | #include <asm/div64.h> |
18 | #include <asm/io.h> | 19 | #include <asm/io.h> |
19 | 20 | ||
@@ -148,14 +149,11 @@ extern u64 timecounter_cyc2time(struct timecounter *tc, | |||
148 | * @disable: optional function to disable the clocksource | 149 | * @disable: optional function to disable the clocksource |
149 | * @mask: bitmask for two's complement | 150 | * @mask: bitmask for two's complement |
150 | * subtraction of non 64 bit counters | 151 | * subtraction of non 64 bit counters |
151 | * @mult: cycle to nanosecond multiplier (adjusted by NTP) | 152 | * @mult: cycle to nanosecond multiplier |
152 | * @mult_orig: cycle to nanosecond multiplier (unadjusted by NTP) | ||
153 | * @shift: cycle to nanosecond divisor (power of two) | 153 | * @shift: cycle to nanosecond divisor (power of two) |
154 | * @flags: flags describing special properties | 154 | * @flags: flags describing special properties |
155 | * @vread: vsyscall based read | 155 | * @vread: vsyscall based read |
156 | * @resume: resume function for the clocksource, if necessary | 156 | * @resume: resume function for the clocksource, if necessary |
157 | * @cycle_interval: Used internally by timekeeping core, please ignore. | ||
158 | * @xtime_interval: Used internally by timekeeping core, please ignore. | ||
159 | */ | 157 | */ |
160 | struct clocksource { | 158 | struct clocksource { |
161 | /* | 159 | /* |
@@ -169,7 +167,6 @@ struct clocksource { | |||
169 | void (*disable)(struct clocksource *cs); | 167 | void (*disable)(struct clocksource *cs); |
170 | cycle_t mask; | 168 | cycle_t mask; |
171 | u32 mult; | 169 | u32 mult; |
172 | u32 mult_orig; | ||
173 | u32 shift; | 170 | u32 shift; |
174 | unsigned long flags; | 171 | unsigned long flags; |
175 | cycle_t (*vread)(void); | 172 | cycle_t (*vread)(void); |
@@ -181,19 +178,12 @@ struct clocksource { | |||
181 | #define CLKSRC_FSYS_MMIO_SET(mmio, addr) do { } while (0) | 178 | #define CLKSRC_FSYS_MMIO_SET(mmio, addr) do { } while (0) |
182 | #endif | 179 | #endif |
183 | 180 | ||
184 | /* timekeeping specific data, ignore */ | ||
185 | cycle_t cycle_interval; | ||
186 | u64 xtime_interval; | ||
187 | u32 raw_interval; | ||
188 | /* | 181 | /* |
189 | * Second part is written at each timer interrupt | 182 | * Second part is written at each timer interrupt |
190 | * Keep it in a different cache line to dirty no | 183 | * Keep it in a different cache line to dirty no |
191 | * more than one cache line. | 184 | * more than one cache line. |
192 | */ | 185 | */ |
193 | cycle_t cycle_last ____cacheline_aligned_in_smp; | 186 | cycle_t cycle_last ____cacheline_aligned_in_smp; |
194 | u64 xtime_nsec; | ||
195 | s64 error; | ||
196 | struct timespec raw_time; | ||
197 | 187 | ||
198 | #ifdef CONFIG_CLOCKSOURCE_WATCHDOG | 188 | #ifdef CONFIG_CLOCKSOURCE_WATCHDOG |
199 | /* Watchdog related data, used by the framework */ | 189 | /* Watchdog related data, used by the framework */ |
@@ -202,8 +192,6 @@ struct clocksource { | |||
202 | #endif | 192 | #endif |
203 | }; | 193 | }; |
204 | 194 | ||
205 | extern struct clocksource *clock; /* current clocksource */ | ||
206 | |||
207 | /* | 195 | /* |
208 | * Clock source flags bits:: | 196 | * Clock source flags bits:: |
209 | */ | 197 | */ |
@@ -212,6 +200,7 @@ extern struct clocksource *clock; /* current clocksource */ | |||
212 | 200 | ||
213 | #define CLOCK_SOURCE_WATCHDOG 0x10 | 201 | #define CLOCK_SOURCE_WATCHDOG 0x10 |
214 | #define CLOCK_SOURCE_VALID_FOR_HRES 0x20 | 202 | #define CLOCK_SOURCE_VALID_FOR_HRES 0x20 |
203 | #define CLOCK_SOURCE_UNSTABLE 0x40 | ||
215 | 204 | ||
216 | /* simplify initialization of mask field */ | 205 | /* simplify initialization of mask field */ |
217 | #define CLOCKSOURCE_MASK(bits) (cycle_t)((bits) < 64 ? ((1ULL<<(bits))-1) : -1) | 206 | #define CLOCKSOURCE_MASK(bits) (cycle_t)((bits) < 64 ? ((1ULL<<(bits))-1) : -1) |
@@ -268,108 +257,15 @@ static inline u32 clocksource_hz2mult(u32 hz, u32 shift_constant) | |||
268 | } | 257 | } |
269 | 258 | ||
270 | /** | 259 | /** |
271 | * clocksource_read: - Access the clocksource's current cycle value | 260 | * clocksource_cyc2ns - converts clocksource cycles to nanoseconds |
272 | * @cs: pointer to clocksource being read | ||
273 | * | ||
274 | * Uses the clocksource to return the current cycle_t value | ||
275 | */ | ||
276 | static inline cycle_t clocksource_read(struct clocksource *cs) | ||
277 | { | ||
278 | return cs->read(cs); | ||
279 | } | ||
280 | |||
281 | /** | ||
282 | * clocksource_enable: - enable clocksource | ||
283 | * @cs: pointer to clocksource | ||
284 | * | ||
285 | * Enables the specified clocksource. The clocksource callback | ||
286 | * function should start up the hardware and setup mult and field | ||
287 | * members of struct clocksource to reflect hardware capabilities. | ||
288 | */ | ||
289 | static inline int clocksource_enable(struct clocksource *cs) | ||
290 | { | ||
291 | int ret = 0; | ||
292 | |||
293 | if (cs->enable) | ||
294 | ret = cs->enable(cs); | ||
295 | |||
296 | /* | ||
297 | * The frequency may have changed while the clocksource | ||
298 | * was disabled. If so the code in ->enable() must update | ||
299 | * the mult value to reflect the new frequency. Make sure | ||
300 | * mult_orig follows this change. | ||
301 | */ | ||
302 | cs->mult_orig = cs->mult; | ||
303 | |||
304 | return ret; | ||
305 | } | ||
306 | |||
307 | /** | ||
308 | * clocksource_disable: - disable clocksource | ||
309 | * @cs: pointer to clocksource | ||
310 | * | ||
311 | * Disables the specified clocksource. The clocksource callback | ||
312 | * function should power down the now unused hardware block to | ||
313 | * save power. | ||
314 | */ | ||
315 | static inline void clocksource_disable(struct clocksource *cs) | ||
316 | { | ||
317 | /* | ||
318 | * Save mult_orig in mult so clocksource_enable() can | ||
319 | * restore the value regardless if ->enable() updates | ||
320 | * the value of mult or not. | ||
321 | */ | ||
322 | cs->mult = cs->mult_orig; | ||
323 | |||
324 | if (cs->disable) | ||
325 | cs->disable(cs); | ||
326 | } | ||
327 | |||
328 | /** | ||
329 | * cyc2ns - converts clocksource cycles to nanoseconds | ||
330 | * @cs: Pointer to clocksource | ||
331 | * @cycles: Cycles | ||
332 | * | 261 | * |
333 | * Uses the clocksource and ntp ajdustment to convert cycle_ts to nanoseconds. | 262 | * Converts cycles to nanoseconds, using the given mult and shift. |
334 | * | 263 | * |
335 | * XXX - This could use some mult_lxl_ll() asm optimization | 264 | * XXX - This could use some mult_lxl_ll() asm optimization |
336 | */ | 265 | */ |
337 | static inline s64 cyc2ns(struct clocksource *cs, cycle_t cycles) | 266 | static inline s64 clocksource_cyc2ns(cycle_t cycles, u32 mult, u32 shift) |
338 | { | ||
339 | u64 ret = (u64)cycles; | ||
340 | ret = (ret * cs->mult) >> cs->shift; | ||
341 | return ret; | ||
342 | } | ||
343 | |||
344 | /** | ||
345 | * clocksource_calculate_interval - Calculates a clocksource interval struct | ||
346 | * | ||
347 | * @c: Pointer to clocksource. | ||
348 | * @length_nsec: Desired interval length in nanoseconds. | ||
349 | * | ||
350 | * Calculates a fixed cycle/nsec interval for a given clocksource/adjustment | ||
351 | * pair and interval request. | ||
352 | * | ||
353 | * Unless you're the timekeeping code, you should not be using this! | ||
354 | */ | ||
355 | static inline void clocksource_calculate_interval(struct clocksource *c, | ||
356 | unsigned long length_nsec) | ||
357 | { | 267 | { |
358 | u64 tmp; | 268 | return ((u64) cycles * mult) >> shift; |
359 | |||
360 | /* Do the ns -> cycle conversion first, using original mult */ | ||
361 | tmp = length_nsec; | ||
362 | tmp <<= c->shift; | ||
363 | tmp += c->mult_orig/2; | ||
364 | do_div(tmp, c->mult_orig); | ||
365 | |||
366 | c->cycle_interval = (cycle_t)tmp; | ||
367 | if (c->cycle_interval == 0) | ||
368 | c->cycle_interval = 1; | ||
369 | |||
370 | /* Go back from cycles -> shifted ns, this time use ntp adjused mult */ | ||
371 | c->xtime_interval = (u64)c->cycle_interval * c->mult; | ||
372 | c->raw_interval = ((u64)c->cycle_interval * c->mult_orig) >> c->shift; | ||
373 | } | 269 | } |
374 | 270 | ||
375 | 271 | ||
@@ -380,6 +276,8 @@ extern void clocksource_touch_watchdog(void); | |||
380 | extern struct clocksource* clocksource_get_next(void); | 276 | extern struct clocksource* clocksource_get_next(void); |
381 | extern void clocksource_change_rating(struct clocksource *cs, int rating); | 277 | extern void clocksource_change_rating(struct clocksource *cs, int rating); |
382 | extern void clocksource_resume(void); | 278 | extern void clocksource_resume(void); |
279 | extern struct clocksource * __init __weak clocksource_default_clock(void); | ||
280 | extern void clocksource_mark_unstable(struct clocksource *cs); | ||
383 | 281 | ||
384 | #ifdef CONFIG_GENERIC_TIME_VSYSCALL | 282 | #ifdef CONFIG_GENERIC_TIME_VSYSCALL |
385 | extern void update_vsyscall(struct timespec *ts, struct clocksource *c); | 283 | extern void update_vsyscall(struct timespec *ts, struct clocksource *c); |
@@ -394,4 +292,6 @@ static inline void update_vsyscall_tz(void) | |||
394 | } | 292 | } |
395 | #endif | 293 | #endif |
396 | 294 | ||
295 | extern void timekeeping_notify(struct clocksource *clock); | ||
296 | |||
397 | #endif /* _LINUX_CLOCKSOURCE_H */ | 297 | #endif /* _LINUX_CLOCKSOURCE_H */ |
diff --git a/include/linux/decompress/generic.h b/include/linux/decompress/generic.h index 6dfb856327bb..0c7111a55a1a 100644 --- a/include/linux/decompress/generic.h +++ b/include/linux/decompress/generic.h | |||
@@ -1,31 +1,37 @@ | |||
1 | #ifndef DECOMPRESS_GENERIC_H | 1 | #ifndef DECOMPRESS_GENERIC_H |
2 | #define DECOMPRESS_GENERIC_H | 2 | #define DECOMPRESS_GENERIC_H |
3 | 3 | ||
4 | /* Minimal chunksize to be read. | ||
5 | *Bzip2 prefers at least 4096 | ||
6 | *Lzma prefers 0x10000 */ | ||
7 | #define COMPR_IOBUF_SIZE 4096 | ||
8 | |||
9 | typedef int (*decompress_fn) (unsigned char *inbuf, int len, | 4 | typedef int (*decompress_fn) (unsigned char *inbuf, int len, |
10 | int(*fill)(void*, unsigned int), | 5 | int(*fill)(void*, unsigned int), |
11 | int(*writebb)(void*, unsigned int), | 6 | int(*flush)(void*, unsigned int), |
12 | unsigned char *output, | 7 | unsigned char *outbuf, |
13 | int *posp, | 8 | int *posp, |
14 | void(*error)(char *x)); | 9 | void(*error)(char *x)); |
15 | 10 | ||
16 | /* inbuf - input buffer | 11 | /* inbuf - input buffer |
17 | *len - len of pre-read data in inbuf | 12 | *len - len of pre-read data in inbuf |
18 | *fill - function to fill inbuf if empty | 13 | *fill - function to fill inbuf when empty |
19 | *writebb - function to write out outbug | 14 | *flush - function to write out outbuf |
15 | *outbuf - output buffer | ||
20 | *posp - if non-null, input position (number of bytes read) will be | 16 | *posp - if non-null, input position (number of bytes read) will be |
21 | * returned here | 17 | * returned here |
22 | * | 18 | * |
23 | *If len != 0, the inbuf is initialized (with as much data), and fill | 19 | *If len != 0, inbuf should contain all the necessary input data, and fill |
24 | *should not be called | 20 | *should be NULL |
25 | *If len = 0, the inbuf is allocated, but empty. Its size is IOBUF_SIZE | 21 | *If len = 0, inbuf can be NULL, in which case the decompressor will allocate |
26 | *fill should be called (repeatedly...) to read data, at most IOBUF_SIZE | 22 | *the input buffer. If inbuf != NULL it must be at least XXX_IOBUF_SIZE bytes. |
23 | *fill will be called (repeatedly...) to read data, at most XXX_IOBUF_SIZE | ||
24 | *bytes should be read per call. Replace XXX with the appropriate decompressor | ||
25 | *name, i.e. LZMA_IOBUF_SIZE. | ||
26 | * | ||
27 | *If flush = NULL, outbuf must be large enough to buffer all the expected | ||
28 | *output. If flush != NULL, the output buffer will be allocated by the | ||
29 | *decompressor (outbuf = NULL), and the flush function will be called to | ||
30 | *flush the output buffer at the appropriate time (decompressor and stream | ||
31 | *dependent). | ||
27 | */ | 32 | */ |
28 | 33 | ||
34 | |||
29 | /* Utility routine to detect the decompression method */ | 35 | /* Utility routine to detect the decompression method */ |
30 | decompress_fn decompress_method(const unsigned char *inbuf, int len, | 36 | decompress_fn decompress_method(const unsigned char *inbuf, int len, |
31 | const char **name); | 37 | const char **name); |
diff --git a/include/linux/fs.h b/include/linux/fs.h index a36ffa5a77a4..67888a9e0655 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h | |||
@@ -2137,7 +2137,7 @@ extern loff_t default_llseek(struct file *file, loff_t offset, int origin); | |||
2137 | 2137 | ||
2138 | extern loff_t vfs_llseek(struct file *file, loff_t offset, int origin); | 2138 | extern loff_t vfs_llseek(struct file *file, loff_t offset, int origin); |
2139 | 2139 | ||
2140 | extern struct inode * inode_init_always(struct super_block *, struct inode *); | 2140 | extern int inode_init_always(struct super_block *, struct inode *); |
2141 | extern void inode_init_once(struct inode *); | 2141 | extern void inode_init_once(struct inode *); |
2142 | extern void inode_add_to_lists(struct super_block *, struct inode *); | 2142 | extern void inode_add_to_lists(struct super_block *, struct inode *); |
2143 | extern void iput(struct inode *); | 2143 | extern void iput(struct inode *); |
@@ -2164,6 +2164,7 @@ extern void __iget(struct inode * inode); | |||
2164 | extern void iget_failed(struct inode *); | 2164 | extern void iget_failed(struct inode *); |
2165 | extern void clear_inode(struct inode *); | 2165 | extern void clear_inode(struct inode *); |
2166 | extern void destroy_inode(struct inode *); | 2166 | extern void destroy_inode(struct inode *); |
2167 | extern void __destroy_inode(struct inode *); | ||
2167 | extern struct inode *new_inode(struct super_block *); | 2168 | extern struct inode *new_inode(struct super_block *); |
2168 | extern int should_remove_suid(struct dentry *); | 2169 | extern int should_remove_suid(struct dentry *); |
2169 | extern int file_remove_suid(struct file *); | 2170 | extern int file_remove_suid(struct file *); |
diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h index 5c093ffc655b..a81170de7f6b 100644 --- a/include/linux/ftrace_event.h +++ b/include/linux/ftrace_event.h | |||
@@ -89,7 +89,9 @@ enum print_line_t { | |||
89 | TRACE_TYPE_NO_CONSUME = 3 /* Handled but ask to not consume */ | 89 | TRACE_TYPE_NO_CONSUME = 3 /* Handled but ask to not consume */ |
90 | }; | 90 | }; |
91 | 91 | ||
92 | 92 | void tracing_generic_entry_update(struct trace_entry *entry, | |
93 | unsigned long flags, | ||
94 | int pc); | ||
93 | struct ring_buffer_event * | 95 | struct ring_buffer_event * |
94 | trace_current_buffer_lock_reserve(int type, unsigned long len, | 96 | trace_current_buffer_lock_reserve(int type, unsigned long len, |
95 | unsigned long flags, int pc); | 97 | unsigned long flags, int pc); |
@@ -119,11 +121,9 @@ struct ftrace_event_call { | |||
119 | void *filter; | 121 | void *filter; |
120 | void *mod; | 122 | void *mod; |
121 | 123 | ||
122 | #ifdef CONFIG_EVENT_PROFILE | 124 | atomic_t profile_count; |
123 | atomic_t profile_count; | 125 | int (*profile_enable)(struct ftrace_event_call *); |
124 | int (*profile_enable)(struct ftrace_event_call *); | 126 | void (*profile_disable)(struct ftrace_event_call *); |
125 | void (*profile_disable)(struct ftrace_event_call *); | ||
126 | #endif | ||
127 | }; | 127 | }; |
128 | 128 | ||
129 | #define MAX_FILTER_PRED 32 | 129 | #define MAX_FILTER_PRED 32 |
diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h index 4759917adc71..ff037f0b1b4e 100644 --- a/include/linux/hrtimer.h +++ b/include/linux/hrtimer.h | |||
@@ -91,7 +91,6 @@ enum hrtimer_restart { | |||
91 | * @function: timer expiry callback function | 91 | * @function: timer expiry callback function |
92 | * @base: pointer to the timer base (per cpu and per clock) | 92 | * @base: pointer to the timer base (per cpu and per clock) |
93 | * @state: state information (See bit values above) | 93 | * @state: state information (See bit values above) |
94 | * @cb_entry: list head to enqueue an expired timer into the callback list | ||
95 | * @start_site: timer statistics field to store the site where the timer | 94 | * @start_site: timer statistics field to store the site where the timer |
96 | * was started | 95 | * was started |
97 | * @start_comm: timer statistics field to store the name of the process which | 96 | * @start_comm: timer statistics field to store the name of the process which |
@@ -108,7 +107,6 @@ struct hrtimer { | |||
108 | enum hrtimer_restart (*function)(struct hrtimer *); | 107 | enum hrtimer_restart (*function)(struct hrtimer *); |
109 | struct hrtimer_clock_base *base; | 108 | struct hrtimer_clock_base *base; |
110 | unsigned long state; | 109 | unsigned long state; |
111 | struct list_head cb_entry; | ||
112 | #ifdef CONFIG_TIMER_STATS | 110 | #ifdef CONFIG_TIMER_STATS |
113 | int start_pid; | 111 | int start_pid; |
114 | void *start_site; | 112 | void *start_site; |
diff --git a/include/linux/inetdevice.h b/include/linux/inetdevice.h index acef2a770b6b..ad27c7da8798 100644 --- a/include/linux/inetdevice.h +++ b/include/linux/inetdevice.h | |||
@@ -82,7 +82,7 @@ static inline void ipv4_devconf_setall(struct in_device *in_dev) | |||
82 | 82 | ||
83 | #define IN_DEV_FORWARD(in_dev) IN_DEV_CONF_GET((in_dev), FORWARDING) | 83 | #define IN_DEV_FORWARD(in_dev) IN_DEV_CONF_GET((in_dev), FORWARDING) |
84 | #define IN_DEV_MFORWARD(in_dev) IN_DEV_ANDCONF((in_dev), MC_FORWARDING) | 84 | #define IN_DEV_MFORWARD(in_dev) IN_DEV_ANDCONF((in_dev), MC_FORWARDING) |
85 | #define IN_DEV_RPFILTER(in_dev) IN_DEV_ANDCONF((in_dev), RP_FILTER) | 85 | #define IN_DEV_RPFILTER(in_dev) IN_DEV_MAXCONF((in_dev), RP_FILTER) |
86 | #define IN_DEV_SOURCE_ROUTE(in_dev) IN_DEV_ANDCONF((in_dev), \ | 86 | #define IN_DEV_SOURCE_ROUTE(in_dev) IN_DEV_ANDCONF((in_dev), \ |
87 | ACCEPT_SOURCE_ROUTE) | 87 | ACCEPT_SOURCE_ROUTE) |
88 | #define IN_DEV_BOOTP_RELAY(in_dev) IN_DEV_ANDCONF((in_dev), BOOTP_RELAY) | 88 | #define IN_DEV_BOOTP_RELAY(in_dev) IN_DEV_ANDCONF((in_dev), BOOTP_RELAY) |
diff --git a/include/linux/input/matrix_keypad.h b/include/linux/input/matrix_keypad.h index 7964516c6954..15d5903af2dd 100644 --- a/include/linux/input/matrix_keypad.h +++ b/include/linux/input/matrix_keypad.h | |||
@@ -15,12 +15,13 @@ | |||
15 | #define KEY_COL(k) (((k) >> 16) & 0xff) | 15 | #define KEY_COL(k) (((k) >> 16) & 0xff) |
16 | #define KEY_VAL(k) ((k) & 0xffff) | 16 | #define KEY_VAL(k) ((k) & 0xffff) |
17 | 17 | ||
18 | #define MATRIX_SCAN_CODE(row, col, row_shift) (((row) << (row_shift)) + (col)) | ||
19 | |||
18 | /** | 20 | /** |
19 | * struct matrix_keymap_data - keymap for matrix keyboards | 21 | * struct matrix_keymap_data - keymap for matrix keyboards |
20 | * @keymap: pointer to array of uint32 values encoded with KEY() macro | 22 | * @keymap: pointer to array of uint32 values encoded with KEY() macro |
21 | * representing keymap | 23 | * representing keymap |
22 | * @keymap_size: number of entries (initialized) in this keymap | 24 | * @keymap_size: number of entries (initialized) in this keymap |
23 | * @max_keymap_size: maximum size of keymap supported by the device | ||
24 | * | 25 | * |
25 | * This structure is supposed to be used by platform code to supply | 26 | * This structure is supposed to be used by platform code to supply |
26 | * keymaps to drivers that implement matrix-like keypads/keyboards. | 27 | * keymaps to drivers that implement matrix-like keypads/keyboards. |
@@ -28,14 +29,13 @@ | |||
28 | struct matrix_keymap_data { | 29 | struct matrix_keymap_data { |
29 | const uint32_t *keymap; | 30 | const uint32_t *keymap; |
30 | unsigned int keymap_size; | 31 | unsigned int keymap_size; |
31 | unsigned int max_keymap_size; | ||
32 | }; | 32 | }; |
33 | 33 | ||
34 | /** | 34 | /** |
35 | * struct matrix_keypad_platform_data - platform-dependent keypad data | 35 | * struct matrix_keypad_platform_data - platform-dependent keypad data |
36 | * @keymap_data: pointer to &matrix_keymap_data | 36 | * @keymap_data: pointer to &matrix_keymap_data |
37 | * @row_gpios: array of gpio numbers reporesenting rows | 37 | * @row_gpios: pointer to array of gpio numbers representing rows |
38 | * @col_gpios: array of gpio numbers reporesenting colums | 38 | * @col_gpios: pointer to array of gpio numbers reporesenting colums |
39 | * @num_row_gpios: actual number of row gpios used by device | 39 | * @num_row_gpios: actual number of row gpios used by device |
40 | * @num_col_gpios: actual number of col gpios used by device | 40 | * @num_col_gpios: actual number of col gpios used by device |
41 | * @col_scan_delay_us: delay, measured in microseconds, that is | 41 | * @col_scan_delay_us: delay, measured in microseconds, that is |
@@ -48,8 +48,9 @@ struct matrix_keymap_data { | |||
48 | struct matrix_keypad_platform_data { | 48 | struct matrix_keypad_platform_data { |
49 | const struct matrix_keymap_data *keymap_data; | 49 | const struct matrix_keymap_data *keymap_data; |
50 | 50 | ||
51 | unsigned int row_gpios[MATRIX_MAX_ROWS]; | 51 | const unsigned int *row_gpios; |
52 | unsigned int col_gpios[MATRIX_MAX_COLS]; | 52 | const unsigned int *col_gpios; |
53 | |||
53 | unsigned int num_row_gpios; | 54 | unsigned int num_row_gpios; |
54 | unsigned int num_col_gpios; | 55 | unsigned int num_col_gpios; |
55 | 56 | ||
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 16713dc672e4..3060bdc35ffe 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h | |||
@@ -110,6 +110,7 @@ struct kvm_memory_slot { | |||
110 | 110 | ||
111 | struct kvm_kernel_irq_routing_entry { | 111 | struct kvm_kernel_irq_routing_entry { |
112 | u32 gsi; | 112 | u32 gsi; |
113 | u32 type; | ||
113 | int (*set)(struct kvm_kernel_irq_routing_entry *e, | 114 | int (*set)(struct kvm_kernel_irq_routing_entry *e, |
114 | struct kvm *kvm, int level); | 115 | struct kvm *kvm, int level); |
115 | union { | 116 | union { |
diff --git a/include/linux/mtd/mtd.h b/include/linux/mtd/mtd.h index 5675b63a0631..0f32a9b6ff55 100644 --- a/include/linux/mtd/mtd.h +++ b/include/linux/mtd/mtd.h | |||
@@ -251,7 +251,7 @@ struct mtd_info { | |||
251 | 251 | ||
252 | static inline struct mtd_info *dev_to_mtd(struct device *dev) | 252 | static inline struct mtd_info *dev_to_mtd(struct device *dev) |
253 | { | 253 | { |
254 | return dev ? container_of(dev, struct mtd_info, dev) : NULL; | 254 | return dev ? dev_get_drvdata(dev) : NULL; |
255 | } | 255 | } |
256 | 256 | ||
257 | static inline uint32_t mtd_div_by_eb(uint64_t sz, struct mtd_info *mtd) | 257 | static inline uint32_t mtd_div_by_eb(uint64_t sz, struct mtd_info *mtd) |
diff --git a/include/linux/mtd/partitions.h b/include/linux/mtd/partitions.h index af6dcb992bc3..b70313d33ff8 100644 --- a/include/linux/mtd/partitions.h +++ b/include/linux/mtd/partitions.h | |||
@@ -47,6 +47,8 @@ struct mtd_partition { | |||
47 | #define MTDPART_SIZ_FULL (0) | 47 | #define MTDPART_SIZ_FULL (0) |
48 | 48 | ||
49 | 49 | ||
50 | struct mtd_info; | ||
51 | |||
50 | int add_mtd_partitions(struct mtd_info *, const struct mtd_partition *, int); | 52 | int add_mtd_partitions(struct mtd_info *, const struct mtd_partition *, int); |
51 | int del_mtd_partitions(struct mtd_info *); | 53 | int del_mtd_partitions(struct mtd_info *); |
52 | 54 | ||
diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h index fdffb413b192..f6b90240dd41 100644 --- a/include/linux/nfs_fs.h +++ b/include/linux/nfs_fs.h | |||
@@ -473,7 +473,6 @@ extern int nfs_writepages(struct address_space *, struct writeback_control *); | |||
473 | extern int nfs_flush_incompatible(struct file *file, struct page *page); | 473 | extern int nfs_flush_incompatible(struct file *file, struct page *page); |
474 | extern int nfs_updatepage(struct file *, struct page *, unsigned int, unsigned int); | 474 | extern int nfs_updatepage(struct file *, struct page *, unsigned int, unsigned int); |
475 | extern int nfs_writeback_done(struct rpc_task *, struct nfs_write_data *); | 475 | extern int nfs_writeback_done(struct rpc_task *, struct nfs_write_data *); |
476 | extern void nfs_writedata_release(void *); | ||
477 | 476 | ||
478 | /* | 477 | /* |
479 | * Try to write back everything synchronously (but check the | 478 | * Try to write back everything synchronously (but check the |
@@ -488,7 +487,6 @@ extern int nfs_wb_page_cancel(struct inode *inode, struct page* page); | |||
488 | extern int nfs_commit_inode(struct inode *, int); | 487 | extern int nfs_commit_inode(struct inode *, int); |
489 | extern struct nfs_write_data *nfs_commitdata_alloc(void); | 488 | extern struct nfs_write_data *nfs_commitdata_alloc(void); |
490 | extern void nfs_commit_free(struct nfs_write_data *wdata); | 489 | extern void nfs_commit_free(struct nfs_write_data *wdata); |
491 | extern void nfs_commitdata_release(void *wdata); | ||
492 | #else | 490 | #else |
493 | static inline int | 491 | static inline int |
494 | nfs_commit_inode(struct inode *inode, int how) | 492 | nfs_commit_inode(struct inode *inode, int how) |
@@ -507,6 +505,7 @@ nfs_have_writebacks(struct inode *inode) | |||
507 | * Allocate nfs_write_data structures | 505 | * Allocate nfs_write_data structures |
508 | */ | 506 | */ |
509 | extern struct nfs_write_data *nfs_writedata_alloc(unsigned int npages); | 507 | extern struct nfs_write_data *nfs_writedata_alloc(unsigned int npages); |
508 | extern void nfs_writedata_free(struct nfs_write_data *); | ||
510 | 509 | ||
511 | /* | 510 | /* |
512 | * linux/fs/nfs/read.c | 511 | * linux/fs/nfs/read.c |
@@ -515,7 +514,6 @@ extern int nfs_readpage(struct file *, struct page *); | |||
515 | extern int nfs_readpages(struct file *, struct address_space *, | 514 | extern int nfs_readpages(struct file *, struct address_space *, |
516 | struct list_head *, unsigned); | 515 | struct list_head *, unsigned); |
517 | extern int nfs_readpage_result(struct rpc_task *, struct nfs_read_data *); | 516 | extern int nfs_readpage_result(struct rpc_task *, struct nfs_read_data *); |
518 | extern void nfs_readdata_release(void *data); | ||
519 | extern int nfs_readpage_async(struct nfs_open_context *, struct inode *, | 517 | extern int nfs_readpage_async(struct nfs_open_context *, struct inode *, |
520 | struct page *); | 518 | struct page *); |
521 | 519 | ||
@@ -523,6 +521,7 @@ extern int nfs_readpage_async(struct nfs_open_context *, struct inode *, | |||
523 | * Allocate nfs_read_data structures | 521 | * Allocate nfs_read_data structures |
524 | */ | 522 | */ |
525 | extern struct nfs_read_data *nfs_readdata_alloc(unsigned int npages); | 523 | extern struct nfs_read_data *nfs_readdata_alloc(unsigned int npages); |
524 | extern void nfs_readdata_free(struct nfs_read_data *); | ||
526 | 525 | ||
527 | /* | 526 | /* |
528 | * linux/fs/nfs3proc.c | 527 | * linux/fs/nfs3proc.c |
diff --git a/include/linux/nodemask.h b/include/linux/nodemask.h index 829b94b156f2..b359c4a9ec9e 100644 --- a/include/linux/nodemask.h +++ b/include/linux/nodemask.h | |||
@@ -82,6 +82,12 @@ | |||
82 | * to generate slightly worse code. So use a simple one-line #define | 82 | * to generate slightly worse code. So use a simple one-line #define |
83 | * for node_isset(), instead of wrapping an inline inside a macro, the | 83 | * for node_isset(), instead of wrapping an inline inside a macro, the |
84 | * way we do the other calls. | 84 | * way we do the other calls. |
85 | * | ||
86 | * NODEMASK_SCRATCH | ||
87 | * When doing above logical AND, OR, XOR, Remap operations the callers tend to | ||
88 | * need temporary nodemask_t's on the stack. But if NODES_SHIFT is large, | ||
89 | * nodemask_t's consume too much stack space. NODEMASK_SCRATCH is a helper | ||
90 | * for such situations. See below and CPUMASK_ALLOC also. | ||
85 | */ | 91 | */ |
86 | 92 | ||
87 | #include <linux/kernel.h> | 93 | #include <linux/kernel.h> |
@@ -473,4 +479,26 @@ static inline int num_node_state(enum node_states state) | |||
473 | #define for_each_node(node) for_each_node_state(node, N_POSSIBLE) | 479 | #define for_each_node(node) for_each_node_state(node, N_POSSIBLE) |
474 | #define for_each_online_node(node) for_each_node_state(node, N_ONLINE) | 480 | #define for_each_online_node(node) for_each_node_state(node, N_ONLINE) |
475 | 481 | ||
482 | /* | ||
483 | * For nodemask scrach area.(See CPUMASK_ALLOC() in cpumask.h) | ||
484 | */ | ||
485 | |||
486 | #if NODES_SHIFT > 8 /* nodemask_t > 64 bytes */ | ||
487 | #define NODEMASK_ALLOC(x, m) struct x *m = kmalloc(sizeof(*m), GFP_KERNEL) | ||
488 | #define NODEMASK_FREE(m) kfree(m) | ||
489 | #else | ||
490 | #define NODEMASK_ALLOC(x, m) struct x _m, *m = &_m | ||
491 | #define NODEMASK_FREE(m) | ||
492 | #endif | ||
493 | |||
494 | /* A example struture for using NODEMASK_ALLOC, used in mempolicy. */ | ||
495 | struct nodemask_scratch { | ||
496 | nodemask_t mask1; | ||
497 | nodemask_t mask2; | ||
498 | }; | ||
499 | |||
500 | #define NODEMASK_SCRATCH(x) NODEMASK_ALLOC(nodemask_scratch, x) | ||
501 | #define NODEMASK_SCRATCH_FREE(x) NODEMASK_FREE(x) | ||
502 | |||
503 | |||
476 | #endif /* __LINUX_NODEMASK_H */ | 504 | #endif /* __LINUX_NODEMASK_H */ |
diff --git a/include/linux/perf_counter.h b/include/linux/perf_counter.h index bd15d7a5f5ce..b53f7006cc4e 100644 --- a/include/linux/perf_counter.h +++ b/include/linux/perf_counter.h | |||
@@ -115,27 +115,44 @@ enum perf_counter_sample_format { | |||
115 | PERF_SAMPLE_TID = 1U << 1, | 115 | PERF_SAMPLE_TID = 1U << 1, |
116 | PERF_SAMPLE_TIME = 1U << 2, | 116 | PERF_SAMPLE_TIME = 1U << 2, |
117 | PERF_SAMPLE_ADDR = 1U << 3, | 117 | PERF_SAMPLE_ADDR = 1U << 3, |
118 | PERF_SAMPLE_GROUP = 1U << 4, | 118 | PERF_SAMPLE_READ = 1U << 4, |
119 | PERF_SAMPLE_CALLCHAIN = 1U << 5, | 119 | PERF_SAMPLE_CALLCHAIN = 1U << 5, |
120 | PERF_SAMPLE_ID = 1U << 6, | 120 | PERF_SAMPLE_ID = 1U << 6, |
121 | PERF_SAMPLE_CPU = 1U << 7, | 121 | PERF_SAMPLE_CPU = 1U << 7, |
122 | PERF_SAMPLE_PERIOD = 1U << 8, | 122 | PERF_SAMPLE_PERIOD = 1U << 8, |
123 | PERF_SAMPLE_STREAM_ID = 1U << 9, | 123 | PERF_SAMPLE_STREAM_ID = 1U << 9, |
124 | PERF_SAMPLE_RAW = 1U << 10, | ||
124 | 125 | ||
125 | PERF_SAMPLE_MAX = 1U << 10, /* non-ABI */ | 126 | PERF_SAMPLE_MAX = 1U << 11, /* non-ABI */ |
126 | }; | 127 | }; |
127 | 128 | ||
128 | /* | 129 | /* |
129 | * Bits that can be set in attr.read_format to request that | 130 | * The format of the data returned by read() on a perf counter fd, |
130 | * reads on the counter should return the indicated quantities, | 131 | * as specified by attr.read_format: |
131 | * in increasing order of bit value, after the counter value. | 132 | * |
133 | * struct read_format { | ||
134 | * { u64 value; | ||
135 | * { u64 time_enabled; } && PERF_FORMAT_ENABLED | ||
136 | * { u64 time_running; } && PERF_FORMAT_RUNNING | ||
137 | * { u64 id; } && PERF_FORMAT_ID | ||
138 | * } && !PERF_FORMAT_GROUP | ||
139 | * | ||
140 | * { u64 nr; | ||
141 | * { u64 time_enabled; } && PERF_FORMAT_ENABLED | ||
142 | * { u64 time_running; } && PERF_FORMAT_RUNNING | ||
143 | * { u64 value; | ||
144 | * { u64 id; } && PERF_FORMAT_ID | ||
145 | * } cntr[nr]; | ||
146 | * } && PERF_FORMAT_GROUP | ||
147 | * }; | ||
132 | */ | 148 | */ |
133 | enum perf_counter_read_format { | 149 | enum perf_counter_read_format { |
134 | PERF_FORMAT_TOTAL_TIME_ENABLED = 1U << 0, | 150 | PERF_FORMAT_TOTAL_TIME_ENABLED = 1U << 0, |
135 | PERF_FORMAT_TOTAL_TIME_RUNNING = 1U << 1, | 151 | PERF_FORMAT_TOTAL_TIME_RUNNING = 1U << 1, |
136 | PERF_FORMAT_ID = 1U << 2, | 152 | PERF_FORMAT_ID = 1U << 2, |
153 | PERF_FORMAT_GROUP = 1U << 3, | ||
137 | 154 | ||
138 | PERF_FORMAT_MAX = 1U << 3, /* non-ABI */ | 155 | PERF_FORMAT_MAX = 1U << 4, /* non-ABI */ |
139 | }; | 156 | }; |
140 | 157 | ||
141 | #define PERF_ATTR_SIZE_VER0 64 /* sizeof first published struct */ | 158 | #define PERF_ATTR_SIZE_VER0 64 /* sizeof first published struct */ |
@@ -181,8 +198,9 @@ struct perf_counter_attr { | |||
181 | freq : 1, /* use freq, not period */ | 198 | freq : 1, /* use freq, not period */ |
182 | inherit_stat : 1, /* per task counts */ | 199 | inherit_stat : 1, /* per task counts */ |
183 | enable_on_exec : 1, /* next exec enables */ | 200 | enable_on_exec : 1, /* next exec enables */ |
201 | task : 1, /* trace fork/exit */ | ||
184 | 202 | ||
185 | __reserved_1 : 51; | 203 | __reserved_1 : 50; |
186 | 204 | ||
187 | __u32 wakeup_events; /* wakeup every n events */ | 205 | __u32 wakeup_events; /* wakeup every n events */ |
188 | __u32 __reserved_2; | 206 | __u32 __reserved_2; |
@@ -311,6 +329,15 @@ enum perf_event_type { | |||
311 | /* | 329 | /* |
312 | * struct { | 330 | * struct { |
313 | * struct perf_event_header header; | 331 | * struct perf_event_header header; |
332 | * u32 pid, ppid; | ||
333 | * u32 tid, ptid; | ||
334 | * }; | ||
335 | */ | ||
336 | PERF_EVENT_EXIT = 4, | ||
337 | |||
338 | /* | ||
339 | * struct { | ||
340 | * struct perf_event_header header; | ||
314 | * u64 time; | 341 | * u64 time; |
315 | * u64 id; | 342 | * u64 id; |
316 | * u64 stream_id; | 343 | * u64 stream_id; |
@@ -323,6 +350,7 @@ enum perf_event_type { | |||
323 | * struct { | 350 | * struct { |
324 | * struct perf_event_header header; | 351 | * struct perf_event_header header; |
325 | * u32 pid, ppid; | 352 | * u32 pid, ppid; |
353 | * u32 tid, ptid; | ||
326 | * }; | 354 | * }; |
327 | */ | 355 | */ |
328 | PERF_EVENT_FORK = 7, | 356 | PERF_EVENT_FORK = 7, |
@@ -331,10 +359,8 @@ enum perf_event_type { | |||
331 | * struct { | 359 | * struct { |
332 | * struct perf_event_header header; | 360 | * struct perf_event_header header; |
333 | * u32 pid, tid; | 361 | * u32 pid, tid; |
334 | * u64 value; | 362 | * |
335 | * { u64 time_enabled; } && PERF_FORMAT_ENABLED | 363 | * struct read_format values; |
336 | * { u64 time_running; } && PERF_FORMAT_RUNNING | ||
337 | * { u64 parent_id; } && PERF_FORMAT_ID | ||
338 | * }; | 364 | * }; |
339 | */ | 365 | */ |
340 | PERF_EVENT_READ = 8, | 366 | PERF_EVENT_READ = 8, |
@@ -352,11 +378,24 @@ enum perf_event_type { | |||
352 | * { u32 cpu, res; } && PERF_SAMPLE_CPU | 378 | * { u32 cpu, res; } && PERF_SAMPLE_CPU |
353 | * { u64 period; } && PERF_SAMPLE_PERIOD | 379 | * { u64 period; } && PERF_SAMPLE_PERIOD |
354 | * | 380 | * |
355 | * { u64 nr; | 381 | * { struct read_format values; } && PERF_SAMPLE_READ |
356 | * { u64 id, val; } cnt[nr]; } && PERF_SAMPLE_GROUP | ||
357 | * | 382 | * |
358 | * { u64 nr, | 383 | * { u64 nr, |
359 | * u64 ips[nr]; } && PERF_SAMPLE_CALLCHAIN | 384 | * u64 ips[nr]; } && PERF_SAMPLE_CALLCHAIN |
385 | * | ||
386 | * # | ||
387 | * # The RAW record below is opaque data wrt the ABI | ||
388 | * # | ||
389 | * # That is, the ABI doesn't make any promises wrt to | ||
390 | * # the stability of its content, it may vary depending | ||
391 | * # on event, hardware, kernel version and phase of | ||
392 | * # the moon. | ||
393 | * # | ||
394 | * # In other words, PERF_SAMPLE_RAW contents are not an ABI. | ||
395 | * # | ||
396 | * | ||
397 | * { u32 size; | ||
398 | * char data[size];}&& PERF_SAMPLE_RAW | ||
360 | * }; | 399 | * }; |
361 | */ | 400 | */ |
362 | PERF_EVENT_SAMPLE = 9, | 401 | PERF_EVENT_SAMPLE = 9, |
@@ -402,6 +441,11 @@ struct perf_callchain_entry { | |||
402 | __u64 ip[PERF_MAX_STACK_DEPTH]; | 441 | __u64 ip[PERF_MAX_STACK_DEPTH]; |
403 | }; | 442 | }; |
404 | 443 | ||
444 | struct perf_raw_record { | ||
445 | u32 size; | ||
446 | void *data; | ||
447 | }; | ||
448 | |||
405 | struct task_struct; | 449 | struct task_struct; |
406 | 450 | ||
407 | /** | 451 | /** |
@@ -670,10 +714,13 @@ struct perf_sample_data { | |||
670 | struct pt_regs *regs; | 714 | struct pt_regs *regs; |
671 | u64 addr; | 715 | u64 addr; |
672 | u64 period; | 716 | u64 period; |
717 | struct perf_raw_record *raw; | ||
673 | }; | 718 | }; |
674 | 719 | ||
675 | extern int perf_counter_overflow(struct perf_counter *counter, int nmi, | 720 | extern int perf_counter_overflow(struct perf_counter *counter, int nmi, |
676 | struct perf_sample_data *data); | 721 | struct perf_sample_data *data); |
722 | extern void perf_counter_output(struct perf_counter *counter, int nmi, | ||
723 | struct perf_sample_data *data); | ||
677 | 724 | ||
678 | /* | 725 | /* |
679 | * Return 1 for a software counter, 0 for a hardware counter | 726 | * Return 1 for a software counter, 0 for a hardware counter |
diff --git a/include/linux/time.h b/include/linux/time.h index ea16c1a01d51..256232f7e5e6 100644 --- a/include/linux/time.h +++ b/include/linux/time.h | |||
@@ -101,7 +101,8 @@ extern struct timespec xtime; | |||
101 | extern struct timespec wall_to_monotonic; | 101 | extern struct timespec wall_to_monotonic; |
102 | extern seqlock_t xtime_lock; | 102 | extern seqlock_t xtime_lock; |
103 | 103 | ||
104 | extern unsigned long read_persistent_clock(void); | 104 | extern void read_persistent_clock(struct timespec *ts); |
105 | extern void read_boot_clock(struct timespec *ts); | ||
105 | extern int update_persistent_clock(struct timespec now); | 106 | extern int update_persistent_clock(struct timespec now); |
106 | extern int no_sync_cmos_clock __read_mostly; | 107 | extern int no_sync_cmos_clock __read_mostly; |
107 | void timekeeping_init(void); | 108 | void timekeeping_init(void); |
@@ -109,6 +110,8 @@ extern int timekeeping_suspended; | |||
109 | 110 | ||
110 | unsigned long get_seconds(void); | 111 | unsigned long get_seconds(void); |
111 | struct timespec current_kernel_time(void); | 112 | struct timespec current_kernel_time(void); |
113 | struct timespec __current_kernel_time(void); /* does not hold xtime_lock */ | ||
114 | struct timespec get_monotonic_coarse(void); | ||
112 | 115 | ||
113 | #define CURRENT_TIME (current_kernel_time()) | 116 | #define CURRENT_TIME (current_kernel_time()) |
114 | #define CURRENT_TIME_SEC ((struct timespec) { get_seconds(), 0 }) | 117 | #define CURRENT_TIME_SEC ((struct timespec) { get_seconds(), 0 }) |
@@ -147,6 +150,7 @@ extern struct timespec timespec_trunc(struct timespec t, unsigned gran); | |||
147 | extern int timekeeping_valid_for_hres(void); | 150 | extern int timekeeping_valid_for_hres(void); |
148 | extern void update_wall_time(void); | 151 | extern void update_wall_time(void); |
149 | extern void update_xtime_cache(u64 nsec); | 152 | extern void update_xtime_cache(u64 nsec); |
153 | extern void timekeeping_leap_insert(int leapsecond); | ||
150 | 154 | ||
151 | struct tms; | 155 | struct tms; |
152 | extern void do_sys_times(struct tms *); | 156 | extern void do_sys_times(struct tms *); |
@@ -241,6 +245,8 @@ struct itimerval { | |||
241 | #define CLOCK_PROCESS_CPUTIME_ID 2 | 245 | #define CLOCK_PROCESS_CPUTIME_ID 2 |
242 | #define CLOCK_THREAD_CPUTIME_ID 3 | 246 | #define CLOCK_THREAD_CPUTIME_ID 3 |
243 | #define CLOCK_MONOTONIC_RAW 4 | 247 | #define CLOCK_MONOTONIC_RAW 4 |
248 | #define CLOCK_REALTIME_COARSE 5 | ||
249 | #define CLOCK_MONOTONIC_COARSE 6 | ||
244 | 250 | ||
245 | /* | 251 | /* |
246 | * The IDs of various hardware clocks: | 252 | * The IDs of various hardware clocks: |
diff --git a/include/linux/tty_ldisc.h b/include/linux/tty_ldisc.h index 40f38d896777..0c4ee9b88f85 100644 --- a/include/linux/tty_ldisc.h +++ b/include/linux/tty_ldisc.h | |||
@@ -144,7 +144,7 @@ struct tty_ldisc_ops { | |||
144 | 144 | ||
145 | struct tty_ldisc { | 145 | struct tty_ldisc { |
146 | struct tty_ldisc_ops *ops; | 146 | struct tty_ldisc_ops *ops; |
147 | int refcount; | 147 | atomic_t users; |
148 | }; | 148 | }; |
149 | 149 | ||
150 | #define TTY_LDISC_MAGIC 0x5403 | 150 | #define TTY_LDISC_MAGIC 0x5403 |
diff --git a/include/linux/wait.h b/include/linux/wait.h index 6788e1a4d4ca..cf3c2f5dba51 100644 --- a/include/linux/wait.h +++ b/include/linux/wait.h | |||
@@ -77,7 +77,14 @@ struct task_struct; | |||
77 | #define __WAIT_BIT_KEY_INITIALIZER(word, bit) \ | 77 | #define __WAIT_BIT_KEY_INITIALIZER(word, bit) \ |
78 | { .flags = word, .bit_nr = bit, } | 78 | { .flags = word, .bit_nr = bit, } |
79 | 79 | ||
80 | extern void init_waitqueue_head(wait_queue_head_t *q); | 80 | extern void __init_waitqueue_head(wait_queue_head_t *q, struct lock_class_key *); |
81 | |||
82 | #define init_waitqueue_head(q) \ | ||
83 | do { \ | ||
84 | static struct lock_class_key __key; \ | ||
85 | \ | ||
86 | __init_waitqueue_head((q), &__key); \ | ||
87 | } while (0) | ||
81 | 88 | ||
82 | #ifdef CONFIG_LOCKDEP | 89 | #ifdef CONFIG_LOCKDEP |
83 | # define __WAIT_QUEUE_HEAD_INIT_ONSTACK(name) \ | 90 | # define __WAIT_QUEUE_HEAD_INIT_ONSTACK(name) \ |
diff --git a/include/net/bluetooth/rfcomm.h b/include/net/bluetooth/rfcomm.h index 80072611d26a..c274993234e3 100644 --- a/include/net/bluetooth/rfcomm.h +++ b/include/net/bluetooth/rfcomm.h | |||
@@ -355,7 +355,17 @@ struct rfcomm_dev_list_req { | |||
355 | }; | 355 | }; |
356 | 356 | ||
357 | int rfcomm_dev_ioctl(struct sock *sk, unsigned int cmd, void __user *arg); | 357 | int rfcomm_dev_ioctl(struct sock *sk, unsigned int cmd, void __user *arg); |
358 | |||
359 | #ifdef CONFIG_BT_RFCOMM_TTY | ||
358 | int rfcomm_init_ttys(void); | 360 | int rfcomm_init_ttys(void); |
359 | void rfcomm_cleanup_ttys(void); | 361 | void rfcomm_cleanup_ttys(void); |
360 | 362 | #else | |
363 | static inline int rfcomm_init_ttys(void) | ||
364 | { | ||
365 | return 0; | ||
366 | } | ||
367 | static inline void rfcomm_cleanup_ttys(void) | ||
368 | { | ||
369 | } | ||
370 | #endif | ||
361 | #endif /* __RFCOMM_H */ | 371 | #endif /* __RFCOMM_H */ |
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h index 1a21895b732b..d1892d66701a 100644 --- a/include/net/cfg80211.h +++ b/include/net/cfg80211.h | |||
@@ -979,6 +979,10 @@ struct cfg80211_ops { | |||
979 | * channels at a later time. This can be used for devices which do not | 979 | * channels at a later time. This can be used for devices which do not |
980 | * have calibration information gauranteed for frequencies or settings | 980 | * have calibration information gauranteed for frequencies or settings |
981 | * outside of its regulatory domain. | 981 | * outside of its regulatory domain. |
982 | * @disable_beacon_hints: enable this if your driver needs to ensure that | ||
983 | * passive scan flags and beaconing flags may not be lifted by cfg80211 | ||
984 | * due to regulatory beacon hints. For more information on beacon | ||
985 | * hints read the documenation for regulatory_hint_found_beacon() | ||
982 | * @reg_notifier: the driver's regulatory notification callback | 986 | * @reg_notifier: the driver's regulatory notification callback |
983 | * @regd: the driver's regulatory domain, if one was requested via | 987 | * @regd: the driver's regulatory domain, if one was requested via |
984 | * the regulatory_hint() API. This can be used by the driver | 988 | * the regulatory_hint() API. This can be used by the driver |
@@ -1004,6 +1008,7 @@ struct wiphy { | |||
1004 | 1008 | ||
1005 | bool custom_regulatory; | 1009 | bool custom_regulatory; |
1006 | bool strict_regulatory; | 1010 | bool strict_regulatory; |
1011 | bool disable_beacon_hints; | ||
1007 | 1012 | ||
1008 | enum cfg80211_signal_type signal_type; | 1013 | enum cfg80211_signal_type signal_type; |
1009 | 1014 | ||
diff --git a/include/trace/ftrace.h b/include/trace/ftrace.h index 1867553c61e5..f64fbaae781a 100644 --- a/include/trace/ftrace.h +++ b/include/trace/ftrace.h | |||
@@ -144,6 +144,9 @@ | |||
144 | #undef TP_fast_assign | 144 | #undef TP_fast_assign |
145 | #define TP_fast_assign(args...) args | 145 | #define TP_fast_assign(args...) args |
146 | 146 | ||
147 | #undef TP_perf_assign | ||
148 | #define TP_perf_assign(args...) | ||
149 | |||
147 | #undef TRACE_EVENT | 150 | #undef TRACE_EVENT |
148 | #define TRACE_EVENT(call, proto, args, tstruct, func, print) \ | 151 | #define TRACE_EVENT(call, proto, args, tstruct, func, print) \ |
149 | static int \ | 152 | static int \ |
@@ -345,6 +348,56 @@ static inline int ftrace_get_offsets_##call( \ | |||
345 | 348 | ||
346 | #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) | 349 | #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) |
347 | 350 | ||
351 | #ifdef CONFIG_EVENT_PROFILE | ||
352 | |||
353 | /* | ||
354 | * Generate the functions needed for tracepoint perf_counter support. | ||
355 | * | ||
356 | * NOTE: The insertion profile callback (ftrace_profile_<call>) is defined later | ||
357 | * | ||
358 | * static int ftrace_profile_enable_<call>(struct ftrace_event_call *event_call) | ||
359 | * { | ||
360 | * int ret = 0; | ||
361 | * | ||
362 | * if (!atomic_inc_return(&event_call->profile_count)) | ||
363 | * ret = register_trace_<call>(ftrace_profile_<call>); | ||
364 | * | ||
365 | * return ret; | ||
366 | * } | ||
367 | * | ||
368 | * static void ftrace_profile_disable_<call>(struct ftrace_event_call *event_call) | ||
369 | * { | ||
370 | * if (atomic_add_negative(-1, &event->call->profile_count)) | ||
371 | * unregister_trace_<call>(ftrace_profile_<call>); | ||
372 | * } | ||
373 | * | ||
374 | */ | ||
375 | |||
376 | #undef TRACE_EVENT | ||
377 | #define TRACE_EVENT(call, proto, args, tstruct, assign, print) \ | ||
378 | \ | ||
379 | static void ftrace_profile_##call(proto); \ | ||
380 | \ | ||
381 | static int ftrace_profile_enable_##call(struct ftrace_event_call *event_call) \ | ||
382 | { \ | ||
383 | int ret = 0; \ | ||
384 | \ | ||
385 | if (!atomic_inc_return(&event_call->profile_count)) \ | ||
386 | ret = register_trace_##call(ftrace_profile_##call); \ | ||
387 | \ | ||
388 | return ret; \ | ||
389 | } \ | ||
390 | \ | ||
391 | static void ftrace_profile_disable_##call(struct ftrace_event_call *event_call)\ | ||
392 | { \ | ||
393 | if (atomic_add_negative(-1, &event_call->profile_count)) \ | ||
394 | unregister_trace_##call(ftrace_profile_##call); \ | ||
395 | } | ||
396 | |||
397 | #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) | ||
398 | |||
399 | #endif | ||
400 | |||
348 | /* | 401 | /* |
349 | * Stage 4 of the trace events. | 402 | * Stage 4 of the trace events. |
350 | * | 403 | * |
@@ -447,28 +500,6 @@ static inline int ftrace_get_offsets_##call( \ | |||
447 | #define TP_FMT(fmt, args...) fmt "\n", ##args | 500 | #define TP_FMT(fmt, args...) fmt "\n", ##args |
448 | 501 | ||
449 | #ifdef CONFIG_EVENT_PROFILE | 502 | #ifdef CONFIG_EVENT_PROFILE |
450 | #define _TRACE_PROFILE(call, proto, args) \ | ||
451 | static void ftrace_profile_##call(proto) \ | ||
452 | { \ | ||
453 | extern void perf_tpcounter_event(int); \ | ||
454 | perf_tpcounter_event(event_##call.id); \ | ||
455 | } \ | ||
456 | \ | ||
457 | static int ftrace_profile_enable_##call(struct ftrace_event_call *event_call) \ | ||
458 | { \ | ||
459 | int ret = 0; \ | ||
460 | \ | ||
461 | if (!atomic_inc_return(&event_call->profile_count)) \ | ||
462 | ret = register_trace_##call(ftrace_profile_##call); \ | ||
463 | \ | ||
464 | return ret; \ | ||
465 | } \ | ||
466 | \ | ||
467 | static void ftrace_profile_disable_##call(struct ftrace_event_call *event_call)\ | ||
468 | { \ | ||
469 | if (atomic_add_negative(-1, &event_call->profile_count)) \ | ||
470 | unregister_trace_##call(ftrace_profile_##call); \ | ||
471 | } | ||
472 | 503 | ||
473 | #define _TRACE_PROFILE_INIT(call) \ | 504 | #define _TRACE_PROFILE_INIT(call) \ |
474 | .profile_count = ATOMIC_INIT(-1), \ | 505 | .profile_count = ATOMIC_INIT(-1), \ |
@@ -476,7 +507,6 @@ static void ftrace_profile_disable_##call(struct ftrace_event_call *event_call)\ | |||
476 | .profile_disable = ftrace_profile_disable_##call, | 507 | .profile_disable = ftrace_profile_disable_##call, |
477 | 508 | ||
478 | #else | 509 | #else |
479 | #define _TRACE_PROFILE(call, proto, args) | ||
480 | #define _TRACE_PROFILE_INIT(call) | 510 | #define _TRACE_PROFILE_INIT(call) |
481 | #endif | 511 | #endif |
482 | 512 | ||
@@ -502,7 +532,6 @@ static void ftrace_profile_disable_##call(struct ftrace_event_call *event_call)\ | |||
502 | 532 | ||
503 | #undef TRACE_EVENT | 533 | #undef TRACE_EVENT |
504 | #define TRACE_EVENT(call, proto, args, tstruct, assign, print) \ | 534 | #define TRACE_EVENT(call, proto, args, tstruct, assign, print) \ |
505 | _TRACE_PROFILE(call, PARAMS(proto), PARAMS(args)) \ | ||
506 | \ | 535 | \ |
507 | static struct ftrace_event_call event_##call; \ | 536 | static struct ftrace_event_call event_##call; \ |
508 | \ | 537 | \ |
@@ -586,6 +615,110 @@ __attribute__((section("_ftrace_events"))) event_##call = { \ | |||
586 | 615 | ||
587 | #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) | 616 | #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) |
588 | 617 | ||
589 | #undef _TRACE_PROFILE | 618 | /* |
619 | * Define the insertion callback to profile events | ||
620 | * | ||
621 | * The job is very similar to ftrace_raw_event_<call> except that we don't | ||
622 | * insert in the ring buffer but in a perf counter. | ||
623 | * | ||
624 | * static void ftrace_profile_<call>(proto) | ||
625 | * { | ||
626 | * struct ftrace_data_offsets_<call> __maybe_unused __data_offsets; | ||
627 | * struct ftrace_event_call *event_call = &event_<call>; | ||
628 | * extern void perf_tpcounter_event(int, u64, u64, void *, int); | ||
629 | * struct ftrace_raw_##call *entry; | ||
630 | * u64 __addr = 0, __count = 1; | ||
631 | * unsigned long irq_flags; | ||
632 | * int __entry_size; | ||
633 | * int __data_size; | ||
634 | * int pc; | ||
635 | * | ||
636 | * local_save_flags(irq_flags); | ||
637 | * pc = preempt_count(); | ||
638 | * | ||
639 | * __data_size = ftrace_get_offsets_<call>(&__data_offsets, args); | ||
640 | * | ||
641 | * // Below we want to get the aligned size by taking into account | ||
642 | * // the u32 field that will later store the buffer size | ||
643 | * __entry_size = ALIGN(__data_size + sizeof(*entry) + sizeof(u32), | ||
644 | * sizeof(u64)); | ||
645 | * __entry_size -= sizeof(u32); | ||
646 | * | ||
647 | * do { | ||
648 | * char raw_data[__entry_size]; <- allocate our sample in the stack | ||
649 | * struct trace_entry *ent; | ||
650 | * | ||
651 | * zero dead bytes from alignment to avoid stack leak to userspace: | ||
652 | * | ||
653 | * *(u64 *)(&raw_data[__entry_size - sizeof(u64)]) = 0ULL; | ||
654 | * entry = (struct ftrace_raw_<call> *)raw_data; | ||
655 | * ent = &entry->ent; | ||
656 | * tracing_generic_entry_update(ent, irq_flags, pc); | ||
657 | * ent->type = event_call->id; | ||
658 | * | ||
659 | * <tstruct> <- do some jobs with dynamic arrays | ||
660 | * | ||
661 | * <assign> <- affect our values | ||
662 | * | ||
663 | * perf_tpcounter_event(event_call->id, __addr, __count, entry, | ||
664 | * __entry_size); <- submit them to perf counter | ||
665 | * } while (0); | ||
666 | * | ||
667 | * } | ||
668 | */ | ||
669 | |||
670 | #ifdef CONFIG_EVENT_PROFILE | ||
671 | |||
672 | #undef __perf_addr | ||
673 | #define __perf_addr(a) __addr = (a) | ||
674 | |||
675 | #undef __perf_count | ||
676 | #define __perf_count(c) __count = (c) | ||
677 | |||
678 | #undef TRACE_EVENT | ||
679 | #define TRACE_EVENT(call, proto, args, tstruct, assign, print) \ | ||
680 | static void ftrace_profile_##call(proto) \ | ||
681 | { \ | ||
682 | struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\ | ||
683 | struct ftrace_event_call *event_call = &event_##call; \ | ||
684 | extern void perf_tpcounter_event(int, u64, u64, void *, int); \ | ||
685 | struct ftrace_raw_##call *entry; \ | ||
686 | u64 __addr = 0, __count = 1; \ | ||
687 | unsigned long irq_flags; \ | ||
688 | int __entry_size; \ | ||
689 | int __data_size; \ | ||
690 | int pc; \ | ||
691 | \ | ||
692 | local_save_flags(irq_flags); \ | ||
693 | pc = preempt_count(); \ | ||
694 | \ | ||
695 | __data_size = ftrace_get_offsets_##call(&__data_offsets, args); \ | ||
696 | __entry_size = ALIGN(__data_size + sizeof(*entry) + sizeof(u32),\ | ||
697 | sizeof(u64)); \ | ||
698 | __entry_size -= sizeof(u32); \ | ||
699 | \ | ||
700 | do { \ | ||
701 | char raw_data[__entry_size]; \ | ||
702 | struct trace_entry *ent; \ | ||
703 | \ | ||
704 | *(u64 *)(&raw_data[__entry_size - sizeof(u64)]) = 0ULL; \ | ||
705 | entry = (struct ftrace_raw_##call *)raw_data; \ | ||
706 | ent = &entry->ent; \ | ||
707 | tracing_generic_entry_update(ent, irq_flags, pc); \ | ||
708 | ent->type = event_call->id; \ | ||
709 | \ | ||
710 | tstruct \ | ||
711 | \ | ||
712 | { assign; } \ | ||
713 | \ | ||
714 | perf_tpcounter_event(event_call->id, __addr, __count, entry,\ | ||
715 | __entry_size); \ | ||
716 | } while (0); \ | ||
717 | \ | ||
718 | } | ||
719 | |||
720 | #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) | ||
721 | #endif /* CONFIG_EVENT_PROFILE */ | ||
722 | |||
590 | #undef _TRACE_PROFILE_INIT | 723 | #undef _TRACE_PROFILE_INIT |
591 | 724 | ||
diff --git a/init/Kconfig b/init/Kconfig index cb2c09270226..3f7e60995c80 100644 --- a/init/Kconfig +++ b/init/Kconfig | |||
@@ -940,6 +940,7 @@ menu "Performance Counters" | |||
940 | 940 | ||
941 | config PERF_COUNTERS | 941 | config PERF_COUNTERS |
942 | bool "Kernel Performance Counters" | 942 | bool "Kernel Performance Counters" |
943 | default y if PROFILING | ||
943 | depends on HAVE_PERF_COUNTERS | 944 | depends on HAVE_PERF_COUNTERS |
944 | select ANON_INODES | 945 | select ANON_INODES |
945 | help | 946 | help |
@@ -961,9 +962,17 @@ config PERF_COUNTERS | |||
961 | Say Y if unsure. | 962 | Say Y if unsure. |
962 | 963 | ||
963 | config EVENT_PROFILE | 964 | config EVENT_PROFILE |
964 | bool "Tracepoint profile sources" | 965 | bool "Tracepoint profiling sources" |
965 | depends on PERF_COUNTERS && EVENT_TRACING | 966 | depends on PERF_COUNTERS && EVENT_TRACING |
966 | default y | 967 | default y |
968 | help | ||
969 | Allow the use of tracepoints as software performance counters. | ||
970 | |||
971 | When this is enabled, you can create perf counters based on | ||
972 | tracepoints using PERF_TYPE_TRACEPOINT and the tracepoint ID | ||
973 | found in debugfs://tracing/events/*/*/id. (The -e/--events | ||
974 | option to the perf tool can parse and interpret symbolic | ||
975 | tracepoints, in the subsystem:tracepoint_name format.) | ||
967 | 976 | ||
968 | endmenu | 977 | endmenu |
969 | 978 | ||
diff --git a/kernel/fork.c b/kernel/fork.c index 893ab0bf5e39..14cf79f14237 100644 --- a/kernel/fork.c +++ b/kernel/fork.c | |||
@@ -569,18 +569,18 @@ void mm_release(struct task_struct *tsk, struct mm_struct *mm) | |||
569 | * the value intact in a core dump, and to save the unnecessary | 569 | * the value intact in a core dump, and to save the unnecessary |
570 | * trouble otherwise. Userland only wants this done for a sys_exit. | 570 | * trouble otherwise. Userland only wants this done for a sys_exit. |
571 | */ | 571 | */ |
572 | if (tsk->clear_child_tid | 572 | if (tsk->clear_child_tid) { |
573 | && !(tsk->flags & PF_SIGNALED) | 573 | if (!(tsk->flags & PF_SIGNALED) && |
574 | && atomic_read(&mm->mm_users) > 1) { | 574 | atomic_read(&mm->mm_users) > 1) { |
575 | u32 __user * tidptr = tsk->clear_child_tid; | 575 | /* |
576 | * We don't check the error code - if userspace has | ||
577 | * not set up a proper pointer then tough luck. | ||
578 | */ | ||
579 | put_user(0, tsk->clear_child_tid); | ||
580 | sys_futex(tsk->clear_child_tid, FUTEX_WAKE, | ||
581 | 1, NULL, NULL, 0); | ||
582 | } | ||
576 | tsk->clear_child_tid = NULL; | 583 | tsk->clear_child_tid = NULL; |
577 | |||
578 | /* | ||
579 | * We don't check the error code - if userspace has | ||
580 | * not set up a proper pointer then tough luck. | ||
581 | */ | ||
582 | put_user(0, tidptr); | ||
583 | sys_futex(tidptr, FUTEX_WAKE, 1, NULL, NULL, 0); | ||
584 | } | 584 | } |
585 | } | 585 | } |
586 | 586 | ||
@@ -1270,6 +1270,7 @@ static struct task_struct *copy_process(unsigned long clone_flags, | |||
1270 | write_unlock_irq(&tasklist_lock); | 1270 | write_unlock_irq(&tasklist_lock); |
1271 | proc_fork_connector(p); | 1271 | proc_fork_connector(p); |
1272 | cgroup_post_fork(p); | 1272 | cgroup_post_fork(p); |
1273 | perf_counter_fork(p); | ||
1273 | return p; | 1274 | return p; |
1274 | 1275 | ||
1275 | bad_fork_free_pid: | 1276 | bad_fork_free_pid: |
@@ -1411,9 +1412,6 @@ long do_fork(unsigned long clone_flags, | |||
1411 | init_completion(&vfork); | 1412 | init_completion(&vfork); |
1412 | } | 1413 | } |
1413 | 1414 | ||
1414 | if (!(clone_flags & CLONE_THREAD)) | ||
1415 | perf_counter_fork(p); | ||
1416 | |||
1417 | audit_finish_fork(p); | 1415 | audit_finish_fork(p); |
1418 | tracehook_report_clone(regs, clone_flags, nr, p); | 1416 | tracehook_report_clone(regs, clone_flags, nr, p); |
1419 | 1417 | ||
diff --git a/kernel/futex.c b/kernel/futex.c index 0672ff88f159..e18cfbdc7190 100644 --- a/kernel/futex.c +++ b/kernel/futex.c | |||
@@ -1010,15 +1010,19 @@ void requeue_futex(struct futex_q *q, struct futex_hash_bucket *hb1, | |||
1010 | * requeue_pi_wake_futex() - Wake a task that acquired the lock during requeue | 1010 | * requeue_pi_wake_futex() - Wake a task that acquired the lock during requeue |
1011 | * q: the futex_q | 1011 | * q: the futex_q |
1012 | * key: the key of the requeue target futex | 1012 | * key: the key of the requeue target futex |
1013 | * hb: the hash_bucket of the requeue target futex | ||
1013 | * | 1014 | * |
1014 | * During futex_requeue, with requeue_pi=1, it is possible to acquire the | 1015 | * During futex_requeue, with requeue_pi=1, it is possible to acquire the |
1015 | * target futex if it is uncontended or via a lock steal. Set the futex_q key | 1016 | * target futex if it is uncontended or via a lock steal. Set the futex_q key |
1016 | * to the requeue target futex so the waiter can detect the wakeup on the right | 1017 | * to the requeue target futex so the waiter can detect the wakeup on the right |
1017 | * futex, but remove it from the hb and NULL the rt_waiter so it can detect | 1018 | * futex, but remove it from the hb and NULL the rt_waiter so it can detect |
1018 | * atomic lock acquisition. Must be called with the q->lock_ptr held. | 1019 | * atomic lock acquisition. Set the q->lock_ptr to the requeue target hb->lock |
1020 | * to protect access to the pi_state to fixup the owner later. Must be called | ||
1021 | * with both q->lock_ptr and hb->lock held. | ||
1019 | */ | 1022 | */ |
1020 | static inline | 1023 | static inline |
1021 | void requeue_pi_wake_futex(struct futex_q *q, union futex_key *key) | 1024 | void requeue_pi_wake_futex(struct futex_q *q, union futex_key *key, |
1025 | struct futex_hash_bucket *hb) | ||
1022 | { | 1026 | { |
1023 | drop_futex_key_refs(&q->key); | 1027 | drop_futex_key_refs(&q->key); |
1024 | get_futex_key_refs(key); | 1028 | get_futex_key_refs(key); |
@@ -1030,6 +1034,11 @@ void requeue_pi_wake_futex(struct futex_q *q, union futex_key *key) | |||
1030 | WARN_ON(!q->rt_waiter); | 1034 | WARN_ON(!q->rt_waiter); |
1031 | q->rt_waiter = NULL; | 1035 | q->rt_waiter = NULL; |
1032 | 1036 | ||
1037 | q->lock_ptr = &hb->lock; | ||
1038 | #ifdef CONFIG_DEBUG_PI_LIST | ||
1039 | q->list.plist.lock = &hb->lock; | ||
1040 | #endif | ||
1041 | |||
1033 | wake_up_state(q->task, TASK_NORMAL); | 1042 | wake_up_state(q->task, TASK_NORMAL); |
1034 | } | 1043 | } |
1035 | 1044 | ||
@@ -1088,7 +1097,7 @@ static int futex_proxy_trylock_atomic(u32 __user *pifutex, | |||
1088 | ret = futex_lock_pi_atomic(pifutex, hb2, key2, ps, top_waiter->task, | 1097 | ret = futex_lock_pi_atomic(pifutex, hb2, key2, ps, top_waiter->task, |
1089 | set_waiters); | 1098 | set_waiters); |
1090 | if (ret == 1) | 1099 | if (ret == 1) |
1091 | requeue_pi_wake_futex(top_waiter, key2); | 1100 | requeue_pi_wake_futex(top_waiter, key2, hb2); |
1092 | 1101 | ||
1093 | return ret; | 1102 | return ret; |
1094 | } | 1103 | } |
@@ -1247,8 +1256,15 @@ retry_private: | |||
1247 | if (!match_futex(&this->key, &key1)) | 1256 | if (!match_futex(&this->key, &key1)) |
1248 | continue; | 1257 | continue; |
1249 | 1258 | ||
1250 | WARN_ON(!requeue_pi && this->rt_waiter); | 1259 | /* |
1251 | WARN_ON(requeue_pi && !this->rt_waiter); | 1260 | * FUTEX_WAIT_REQEUE_PI and FUTEX_CMP_REQUEUE_PI should always |
1261 | * be paired with each other and no other futex ops. | ||
1262 | */ | ||
1263 | if ((requeue_pi && !this->rt_waiter) || | ||
1264 | (!requeue_pi && this->rt_waiter)) { | ||
1265 | ret = -EINVAL; | ||
1266 | break; | ||
1267 | } | ||
1252 | 1268 | ||
1253 | /* | 1269 | /* |
1254 | * Wake nr_wake waiters. For requeue_pi, if we acquired the | 1270 | * Wake nr_wake waiters. For requeue_pi, if we acquired the |
@@ -1273,7 +1289,7 @@ retry_private: | |||
1273 | this->task, 1); | 1289 | this->task, 1); |
1274 | if (ret == 1) { | 1290 | if (ret == 1) { |
1275 | /* We got the lock. */ | 1291 | /* We got the lock. */ |
1276 | requeue_pi_wake_futex(this, &key2); | 1292 | requeue_pi_wake_futex(this, &key2, hb2); |
1277 | continue; | 1293 | continue; |
1278 | } else if (ret) { | 1294 | } else if (ret) { |
1279 | /* -EDEADLK */ | 1295 | /* -EDEADLK */ |
diff --git a/kernel/futex_compat.c b/kernel/futex_compat.c index d607a5b9ee29..235716556bf1 100644 --- a/kernel/futex_compat.c +++ b/kernel/futex_compat.c | |||
@@ -180,7 +180,8 @@ asmlinkage long compat_sys_futex(u32 __user *uaddr, int op, u32 val, | |||
180 | int cmd = op & FUTEX_CMD_MASK; | 180 | int cmd = op & FUTEX_CMD_MASK; |
181 | 181 | ||
182 | if (utime && (cmd == FUTEX_WAIT || cmd == FUTEX_LOCK_PI || | 182 | if (utime && (cmd == FUTEX_WAIT || cmd == FUTEX_LOCK_PI || |
183 | cmd == FUTEX_WAIT_BITSET)) { | 183 | cmd == FUTEX_WAIT_BITSET || |
184 | cmd == FUTEX_WAIT_REQUEUE_PI)) { | ||
184 | if (get_compat_timespec(&ts, utime)) | 185 | if (get_compat_timespec(&ts, utime)) |
185 | return -EFAULT; | 186 | return -EFAULT; |
186 | if (!timespec_valid(&ts)) | 187 | if (!timespec_valid(&ts)) |
@@ -191,7 +192,8 @@ asmlinkage long compat_sys_futex(u32 __user *uaddr, int op, u32 val, | |||
191 | t = ktime_add_safe(ktime_get(), t); | 192 | t = ktime_add_safe(ktime_get(), t); |
192 | tp = &t; | 193 | tp = &t; |
193 | } | 194 | } |
194 | if (cmd == FUTEX_REQUEUE || cmd == FUTEX_CMP_REQUEUE) | 195 | if (cmd == FUTEX_REQUEUE || cmd == FUTEX_CMP_REQUEUE || |
196 | cmd == FUTEX_CMP_REQUEUE_PI || cmd == FUTEX_WAKE_OP) | ||
195 | val2 = (int) (unsigned long) utime; | 197 | val2 = (int) (unsigned long) utime; |
196 | 198 | ||
197 | return do_futex(uaddr, op, val, tp, uaddr2, val2, val3); | 199 | return do_futex(uaddr, op, val, tp, uaddr2, val2, val3); |
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c index 49da79ab8486..e2f91ecc01a8 100644 --- a/kernel/hrtimer.c +++ b/kernel/hrtimer.c | |||
@@ -48,37 +48,6 @@ | |||
48 | 48 | ||
49 | #include <asm/uaccess.h> | 49 | #include <asm/uaccess.h> |
50 | 50 | ||
51 | /** | ||
52 | * ktime_get - get the monotonic time in ktime_t format | ||
53 | * | ||
54 | * returns the time in ktime_t format | ||
55 | */ | ||
56 | ktime_t ktime_get(void) | ||
57 | { | ||
58 | struct timespec now; | ||
59 | |||
60 | ktime_get_ts(&now); | ||
61 | |||
62 | return timespec_to_ktime(now); | ||
63 | } | ||
64 | EXPORT_SYMBOL_GPL(ktime_get); | ||
65 | |||
66 | /** | ||
67 | * ktime_get_real - get the real (wall-) time in ktime_t format | ||
68 | * | ||
69 | * returns the time in ktime_t format | ||
70 | */ | ||
71 | ktime_t ktime_get_real(void) | ||
72 | { | ||
73 | struct timespec now; | ||
74 | |||
75 | getnstimeofday(&now); | ||
76 | |||
77 | return timespec_to_ktime(now); | ||
78 | } | ||
79 | |||
80 | EXPORT_SYMBOL_GPL(ktime_get_real); | ||
81 | |||
82 | /* | 51 | /* |
83 | * The timer bases: | 52 | * The timer bases: |
84 | * | 53 | * |
@@ -106,31 +75,6 @@ DEFINE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases) = | |||
106 | } | 75 | } |
107 | }; | 76 | }; |
108 | 77 | ||
109 | /** | ||
110 | * ktime_get_ts - get the monotonic clock in timespec format | ||
111 | * @ts: pointer to timespec variable | ||
112 | * | ||
113 | * The function calculates the monotonic clock from the realtime | ||
114 | * clock and the wall_to_monotonic offset and stores the result | ||
115 | * in normalized timespec format in the variable pointed to by @ts. | ||
116 | */ | ||
117 | void ktime_get_ts(struct timespec *ts) | ||
118 | { | ||
119 | struct timespec tomono; | ||
120 | unsigned long seq; | ||
121 | |||
122 | do { | ||
123 | seq = read_seqbegin(&xtime_lock); | ||
124 | getnstimeofday(ts); | ||
125 | tomono = wall_to_monotonic; | ||
126 | |||
127 | } while (read_seqretry(&xtime_lock, seq)); | ||
128 | |||
129 | set_normalized_timespec(ts, ts->tv_sec + tomono.tv_sec, | ||
130 | ts->tv_nsec + tomono.tv_nsec); | ||
131 | } | ||
132 | EXPORT_SYMBOL_GPL(ktime_get_ts); | ||
133 | |||
134 | /* | 78 | /* |
135 | * Get the coarse grained time at the softirq based on xtime and | 79 | * Get the coarse grained time at the softirq based on xtime and |
136 | * wall_to_monotonic. | 80 | * wall_to_monotonic. |
@@ -1154,7 +1098,6 @@ static void __hrtimer_init(struct hrtimer *timer, clockid_t clock_id, | |||
1154 | clock_id = CLOCK_MONOTONIC; | 1098 | clock_id = CLOCK_MONOTONIC; |
1155 | 1099 | ||
1156 | timer->base = &cpu_base->clock_base[clock_id]; | 1100 | timer->base = &cpu_base->clock_base[clock_id]; |
1157 | INIT_LIST_HEAD(&timer->cb_entry); | ||
1158 | hrtimer_init_timer_hres(timer); | 1101 | hrtimer_init_timer_hres(timer); |
1159 | 1102 | ||
1160 | #ifdef CONFIG_TIMER_STATS | 1103 | #ifdef CONFIG_TIMER_STATS |
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index 61c679db4687..d222515a5a06 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c | |||
@@ -761,7 +761,6 @@ static struct irqaction *__free_irq(unsigned int irq, void *dev_id) | |||
761 | { | 761 | { |
762 | struct irq_desc *desc = irq_to_desc(irq); | 762 | struct irq_desc *desc = irq_to_desc(irq); |
763 | struct irqaction *action, **action_ptr; | 763 | struct irqaction *action, **action_ptr; |
764 | struct task_struct *irqthread; | ||
765 | unsigned long flags; | 764 | unsigned long flags; |
766 | 765 | ||
767 | WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq); | 766 | WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq); |
@@ -809,9 +808,6 @@ static struct irqaction *__free_irq(unsigned int irq, void *dev_id) | |||
809 | desc->chip->disable(irq); | 808 | desc->chip->disable(irq); |
810 | } | 809 | } |
811 | 810 | ||
812 | irqthread = action->thread; | ||
813 | action->thread = NULL; | ||
814 | |||
815 | spin_unlock_irqrestore(&desc->lock, flags); | 811 | spin_unlock_irqrestore(&desc->lock, flags); |
816 | 812 | ||
817 | unregister_handler_proc(irq, action); | 813 | unregister_handler_proc(irq, action); |
@@ -819,12 +815,6 @@ static struct irqaction *__free_irq(unsigned int irq, void *dev_id) | |||
819 | /* Make sure it's not being used on another CPU: */ | 815 | /* Make sure it's not being used on another CPU: */ |
820 | synchronize_irq(irq); | 816 | synchronize_irq(irq); |
821 | 817 | ||
822 | if (irqthread) { | ||
823 | if (!test_bit(IRQTF_DIED, &action->thread_flags)) | ||
824 | kthread_stop(irqthread); | ||
825 | put_task_struct(irqthread); | ||
826 | } | ||
827 | |||
828 | #ifdef CONFIG_DEBUG_SHIRQ | 818 | #ifdef CONFIG_DEBUG_SHIRQ |
829 | /* | 819 | /* |
830 | * It's a shared IRQ -- the driver ought to be prepared for an IRQ | 820 | * It's a shared IRQ -- the driver ought to be prepared for an IRQ |
@@ -840,6 +830,13 @@ static struct irqaction *__free_irq(unsigned int irq, void *dev_id) | |||
840 | local_irq_restore(flags); | 830 | local_irq_restore(flags); |
841 | } | 831 | } |
842 | #endif | 832 | #endif |
833 | |||
834 | if (action->thread) { | ||
835 | if (!test_bit(IRQTF_DIED, &action->thread_flags)) | ||
836 | kthread_stop(action->thread); | ||
837 | put_task_struct(action->thread); | ||
838 | } | ||
839 | |||
843 | return action; | 840 | return action; |
844 | } | 841 | } |
845 | 842 | ||
diff --git a/kernel/irq/numa_migrate.c b/kernel/irq/numa_migrate.c index 2f69bee57bf2..3fd30197da2e 100644 --- a/kernel/irq/numa_migrate.c +++ b/kernel/irq/numa_migrate.c | |||
@@ -107,8 +107,8 @@ out_unlock: | |||
107 | 107 | ||
108 | struct irq_desc *move_irq_desc(struct irq_desc *desc, int node) | 108 | struct irq_desc *move_irq_desc(struct irq_desc *desc, int node) |
109 | { | 109 | { |
110 | /* those all static, do move them */ | 110 | /* those static or target node is -1, do not move them */ |
111 | if (desc->irq < NR_IRQS_LEGACY) | 111 | if (desc->irq < NR_IRQS_LEGACY || node == -1) |
112 | return desc; | 112 | return desc; |
113 | 113 | ||
114 | if (desc->node != node) | 114 | if (desc->node != node) |
diff --git a/kernel/lockdep_proc.c b/kernel/lockdep_proc.c index d7135aa2d2c4..e94caa666dba 100644 --- a/kernel/lockdep_proc.c +++ b/kernel/lockdep_proc.c | |||
@@ -758,7 +758,8 @@ static int __init lockdep_proc_init(void) | |||
758 | &proc_lockdep_stats_operations); | 758 | &proc_lockdep_stats_operations); |
759 | 759 | ||
760 | #ifdef CONFIG_LOCK_STAT | 760 | #ifdef CONFIG_LOCK_STAT |
761 | proc_create("lock_stat", S_IRUSR, NULL, &proc_lock_stat_operations); | 761 | proc_create("lock_stat", S_IRUSR | S_IWUSR, NULL, |
762 | &proc_lock_stat_operations); | ||
762 | #endif | 763 | #endif |
763 | 764 | ||
764 | return 0; | 765 | return 0; |
diff --git a/kernel/panic.c b/kernel/panic.c index 984b3ecbd72c..512ab73b0ca3 100644 --- a/kernel/panic.c +++ b/kernel/panic.c | |||
@@ -301,6 +301,7 @@ int oops_may_print(void) | |||
301 | */ | 301 | */ |
302 | void oops_enter(void) | 302 | void oops_enter(void) |
303 | { | 303 | { |
304 | tracing_off(); | ||
304 | /* can't trust the integrity of the kernel anymore: */ | 305 | /* can't trust the integrity of the kernel anymore: */ |
305 | debug_locks_off(); | 306 | debug_locks_off(); |
306 | do_oops_enter_exit(); | 307 | do_oops_enter_exit(); |
diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index 950931041954..534e20d14d63 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c | |||
@@ -42,6 +42,7 @@ static int perf_overcommit __read_mostly = 1; | |||
42 | static atomic_t nr_counters __read_mostly; | 42 | static atomic_t nr_counters __read_mostly; |
43 | static atomic_t nr_mmap_counters __read_mostly; | 43 | static atomic_t nr_mmap_counters __read_mostly; |
44 | static atomic_t nr_comm_counters __read_mostly; | 44 | static atomic_t nr_comm_counters __read_mostly; |
45 | static atomic_t nr_task_counters __read_mostly; | ||
45 | 46 | ||
46 | /* | 47 | /* |
47 | * perf counter paranoia level: | 48 | * perf counter paranoia level: |
@@ -87,6 +88,7 @@ void __weak hw_perf_disable(void) { barrier(); } | |||
87 | void __weak hw_perf_enable(void) { barrier(); } | 88 | void __weak hw_perf_enable(void) { barrier(); } |
88 | 89 | ||
89 | void __weak hw_perf_counter_setup(int cpu) { barrier(); } | 90 | void __weak hw_perf_counter_setup(int cpu) { barrier(); } |
91 | void __weak hw_perf_counter_setup_online(int cpu) { barrier(); } | ||
90 | 92 | ||
91 | int __weak | 93 | int __weak |
92 | hw_perf_group_sched_in(struct perf_counter *group_leader, | 94 | hw_perf_group_sched_in(struct perf_counter *group_leader, |
@@ -305,6 +307,10 @@ counter_sched_out(struct perf_counter *counter, | |||
305 | return; | 307 | return; |
306 | 308 | ||
307 | counter->state = PERF_COUNTER_STATE_INACTIVE; | 309 | counter->state = PERF_COUNTER_STATE_INACTIVE; |
310 | if (counter->pending_disable) { | ||
311 | counter->pending_disable = 0; | ||
312 | counter->state = PERF_COUNTER_STATE_OFF; | ||
313 | } | ||
308 | counter->tstamp_stopped = ctx->time; | 314 | counter->tstamp_stopped = ctx->time; |
309 | counter->pmu->disable(counter); | 315 | counter->pmu->disable(counter); |
310 | counter->oncpu = -1; | 316 | counter->oncpu = -1; |
@@ -1103,7 +1109,7 @@ static void perf_counter_sync_stat(struct perf_counter_context *ctx, | |||
1103 | __perf_counter_sync_stat(counter, next_counter); | 1109 | __perf_counter_sync_stat(counter, next_counter); |
1104 | 1110 | ||
1105 | counter = list_next_entry(counter, event_entry); | 1111 | counter = list_next_entry(counter, event_entry); |
1106 | next_counter = list_next_entry(counter, event_entry); | 1112 | next_counter = list_next_entry(next_counter, event_entry); |
1107 | } | 1113 | } |
1108 | } | 1114 | } |
1109 | 1115 | ||
@@ -1654,6 +1660,8 @@ static void free_counter(struct perf_counter *counter) | |||
1654 | atomic_dec(&nr_mmap_counters); | 1660 | atomic_dec(&nr_mmap_counters); |
1655 | if (counter->attr.comm) | 1661 | if (counter->attr.comm) |
1656 | atomic_dec(&nr_comm_counters); | 1662 | atomic_dec(&nr_comm_counters); |
1663 | if (counter->attr.task) | ||
1664 | atomic_dec(&nr_task_counters); | ||
1657 | } | 1665 | } |
1658 | 1666 | ||
1659 | if (counter->destroy) | 1667 | if (counter->destroy) |
@@ -1688,14 +1696,133 @@ static int perf_release(struct inode *inode, struct file *file) | |||
1688 | return 0; | 1696 | return 0; |
1689 | } | 1697 | } |
1690 | 1698 | ||
1699 | static int perf_counter_read_size(struct perf_counter *counter) | ||
1700 | { | ||
1701 | int entry = sizeof(u64); /* value */ | ||
1702 | int size = 0; | ||
1703 | int nr = 1; | ||
1704 | |||
1705 | if (counter->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) | ||
1706 | size += sizeof(u64); | ||
1707 | |||
1708 | if (counter->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) | ||
1709 | size += sizeof(u64); | ||
1710 | |||
1711 | if (counter->attr.read_format & PERF_FORMAT_ID) | ||
1712 | entry += sizeof(u64); | ||
1713 | |||
1714 | if (counter->attr.read_format & PERF_FORMAT_GROUP) { | ||
1715 | nr += counter->group_leader->nr_siblings; | ||
1716 | size += sizeof(u64); | ||
1717 | } | ||
1718 | |||
1719 | size += entry * nr; | ||
1720 | |||
1721 | return size; | ||
1722 | } | ||
1723 | |||
1724 | static u64 perf_counter_read_value(struct perf_counter *counter) | ||
1725 | { | ||
1726 | struct perf_counter *child; | ||
1727 | u64 total = 0; | ||
1728 | |||
1729 | total += perf_counter_read(counter); | ||
1730 | list_for_each_entry(child, &counter->child_list, child_list) | ||
1731 | total += perf_counter_read(child); | ||
1732 | |||
1733 | return total; | ||
1734 | } | ||
1735 | |||
1736 | static int perf_counter_read_entry(struct perf_counter *counter, | ||
1737 | u64 read_format, char __user *buf) | ||
1738 | { | ||
1739 | int n = 0, count = 0; | ||
1740 | u64 values[2]; | ||
1741 | |||
1742 | values[n++] = perf_counter_read_value(counter); | ||
1743 | if (read_format & PERF_FORMAT_ID) | ||
1744 | values[n++] = primary_counter_id(counter); | ||
1745 | |||
1746 | count = n * sizeof(u64); | ||
1747 | |||
1748 | if (copy_to_user(buf, values, count)) | ||
1749 | return -EFAULT; | ||
1750 | |||
1751 | return count; | ||
1752 | } | ||
1753 | |||
1754 | static int perf_counter_read_group(struct perf_counter *counter, | ||
1755 | u64 read_format, char __user *buf) | ||
1756 | { | ||
1757 | struct perf_counter *leader = counter->group_leader, *sub; | ||
1758 | int n = 0, size = 0, err = -EFAULT; | ||
1759 | u64 values[3]; | ||
1760 | |||
1761 | values[n++] = 1 + leader->nr_siblings; | ||
1762 | if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) { | ||
1763 | values[n++] = leader->total_time_enabled + | ||
1764 | atomic64_read(&leader->child_total_time_enabled); | ||
1765 | } | ||
1766 | if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) { | ||
1767 | values[n++] = leader->total_time_running + | ||
1768 | atomic64_read(&leader->child_total_time_running); | ||
1769 | } | ||
1770 | |||
1771 | size = n * sizeof(u64); | ||
1772 | |||
1773 | if (copy_to_user(buf, values, size)) | ||
1774 | return -EFAULT; | ||
1775 | |||
1776 | err = perf_counter_read_entry(leader, read_format, buf + size); | ||
1777 | if (err < 0) | ||
1778 | return err; | ||
1779 | |||
1780 | size += err; | ||
1781 | |||
1782 | list_for_each_entry(sub, &leader->sibling_list, list_entry) { | ||
1783 | err = perf_counter_read_entry(counter, read_format, | ||
1784 | buf + size); | ||
1785 | if (err < 0) | ||
1786 | return err; | ||
1787 | |||
1788 | size += err; | ||
1789 | } | ||
1790 | |||
1791 | return size; | ||
1792 | } | ||
1793 | |||
1794 | static int perf_counter_read_one(struct perf_counter *counter, | ||
1795 | u64 read_format, char __user *buf) | ||
1796 | { | ||
1797 | u64 values[4]; | ||
1798 | int n = 0; | ||
1799 | |||
1800 | values[n++] = perf_counter_read_value(counter); | ||
1801 | if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) { | ||
1802 | values[n++] = counter->total_time_enabled + | ||
1803 | atomic64_read(&counter->child_total_time_enabled); | ||
1804 | } | ||
1805 | if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) { | ||
1806 | values[n++] = counter->total_time_running + | ||
1807 | atomic64_read(&counter->child_total_time_running); | ||
1808 | } | ||
1809 | if (read_format & PERF_FORMAT_ID) | ||
1810 | values[n++] = primary_counter_id(counter); | ||
1811 | |||
1812 | if (copy_to_user(buf, values, n * sizeof(u64))) | ||
1813 | return -EFAULT; | ||
1814 | |||
1815 | return n * sizeof(u64); | ||
1816 | } | ||
1817 | |||
1691 | /* | 1818 | /* |
1692 | * Read the performance counter - simple non blocking version for now | 1819 | * Read the performance counter - simple non blocking version for now |
1693 | */ | 1820 | */ |
1694 | static ssize_t | 1821 | static ssize_t |
1695 | perf_read_hw(struct perf_counter *counter, char __user *buf, size_t count) | 1822 | perf_read_hw(struct perf_counter *counter, char __user *buf, size_t count) |
1696 | { | 1823 | { |
1697 | u64 values[4]; | 1824 | u64 read_format = counter->attr.read_format; |
1698 | int n; | 1825 | int ret; |
1699 | 1826 | ||
1700 | /* | 1827 | /* |
1701 | * Return end-of-file for a read on a counter that is in | 1828 | * Return end-of-file for a read on a counter that is in |
@@ -1705,28 +1832,18 @@ perf_read_hw(struct perf_counter *counter, char __user *buf, size_t count) | |||
1705 | if (counter->state == PERF_COUNTER_STATE_ERROR) | 1832 | if (counter->state == PERF_COUNTER_STATE_ERROR) |
1706 | return 0; | 1833 | return 0; |
1707 | 1834 | ||
1835 | if (count < perf_counter_read_size(counter)) | ||
1836 | return -ENOSPC; | ||
1837 | |||
1708 | WARN_ON_ONCE(counter->ctx->parent_ctx); | 1838 | WARN_ON_ONCE(counter->ctx->parent_ctx); |
1709 | mutex_lock(&counter->child_mutex); | 1839 | mutex_lock(&counter->child_mutex); |
1710 | values[0] = perf_counter_read(counter); | 1840 | if (read_format & PERF_FORMAT_GROUP) |
1711 | n = 1; | 1841 | ret = perf_counter_read_group(counter, read_format, buf); |
1712 | if (counter->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) | 1842 | else |
1713 | values[n++] = counter->total_time_enabled + | 1843 | ret = perf_counter_read_one(counter, read_format, buf); |
1714 | atomic64_read(&counter->child_total_time_enabled); | ||
1715 | if (counter->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) | ||
1716 | values[n++] = counter->total_time_running + | ||
1717 | atomic64_read(&counter->child_total_time_running); | ||
1718 | if (counter->attr.read_format & PERF_FORMAT_ID) | ||
1719 | values[n++] = primary_counter_id(counter); | ||
1720 | mutex_unlock(&counter->child_mutex); | 1844 | mutex_unlock(&counter->child_mutex); |
1721 | 1845 | ||
1722 | if (count < n * sizeof(u64)) | 1846 | return ret; |
1723 | return -EINVAL; | ||
1724 | count = n * sizeof(u64); | ||
1725 | |||
1726 | if (copy_to_user(buf, values, count)) | ||
1727 | return -EFAULT; | ||
1728 | |||
1729 | return count; | ||
1730 | } | 1847 | } |
1731 | 1848 | ||
1732 | static ssize_t | 1849 | static ssize_t |
@@ -2230,7 +2347,7 @@ static void perf_pending_counter(struct perf_pending_entry *entry) | |||
2230 | 2347 | ||
2231 | if (counter->pending_disable) { | 2348 | if (counter->pending_disable) { |
2232 | counter->pending_disable = 0; | 2349 | counter->pending_disable = 0; |
2233 | perf_counter_disable(counter); | 2350 | __perf_counter_disable(counter); |
2234 | } | 2351 | } |
2235 | 2352 | ||
2236 | if (counter->pending_wakeup) { | 2353 | if (counter->pending_wakeup) { |
@@ -2615,7 +2732,80 @@ static u32 perf_counter_tid(struct perf_counter *counter, struct task_struct *p) | |||
2615 | return task_pid_nr_ns(p, counter->ns); | 2732 | return task_pid_nr_ns(p, counter->ns); |
2616 | } | 2733 | } |
2617 | 2734 | ||
2618 | static void perf_counter_output(struct perf_counter *counter, int nmi, | 2735 | static void perf_output_read_one(struct perf_output_handle *handle, |
2736 | struct perf_counter *counter) | ||
2737 | { | ||
2738 | u64 read_format = counter->attr.read_format; | ||
2739 | u64 values[4]; | ||
2740 | int n = 0; | ||
2741 | |||
2742 | values[n++] = atomic64_read(&counter->count); | ||
2743 | if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) { | ||
2744 | values[n++] = counter->total_time_enabled + | ||
2745 | atomic64_read(&counter->child_total_time_enabled); | ||
2746 | } | ||
2747 | if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) { | ||
2748 | values[n++] = counter->total_time_running + | ||
2749 | atomic64_read(&counter->child_total_time_running); | ||
2750 | } | ||
2751 | if (read_format & PERF_FORMAT_ID) | ||
2752 | values[n++] = primary_counter_id(counter); | ||
2753 | |||
2754 | perf_output_copy(handle, values, n * sizeof(u64)); | ||
2755 | } | ||
2756 | |||
2757 | /* | ||
2758 | * XXX PERF_FORMAT_GROUP vs inherited counters seems difficult. | ||
2759 | */ | ||
2760 | static void perf_output_read_group(struct perf_output_handle *handle, | ||
2761 | struct perf_counter *counter) | ||
2762 | { | ||
2763 | struct perf_counter *leader = counter->group_leader, *sub; | ||
2764 | u64 read_format = counter->attr.read_format; | ||
2765 | u64 values[5]; | ||
2766 | int n = 0; | ||
2767 | |||
2768 | values[n++] = 1 + leader->nr_siblings; | ||
2769 | |||
2770 | if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) | ||
2771 | values[n++] = leader->total_time_enabled; | ||
2772 | |||
2773 | if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) | ||
2774 | values[n++] = leader->total_time_running; | ||
2775 | |||
2776 | if (leader != counter) | ||
2777 | leader->pmu->read(leader); | ||
2778 | |||
2779 | values[n++] = atomic64_read(&leader->count); | ||
2780 | if (read_format & PERF_FORMAT_ID) | ||
2781 | values[n++] = primary_counter_id(leader); | ||
2782 | |||
2783 | perf_output_copy(handle, values, n * sizeof(u64)); | ||
2784 | |||
2785 | list_for_each_entry(sub, &leader->sibling_list, list_entry) { | ||
2786 | n = 0; | ||
2787 | |||
2788 | if (sub != counter) | ||
2789 | sub->pmu->read(sub); | ||
2790 | |||
2791 | values[n++] = atomic64_read(&sub->count); | ||
2792 | if (read_format & PERF_FORMAT_ID) | ||
2793 | values[n++] = primary_counter_id(sub); | ||
2794 | |||
2795 | perf_output_copy(handle, values, n * sizeof(u64)); | ||
2796 | } | ||
2797 | } | ||
2798 | |||
2799 | static void perf_output_read(struct perf_output_handle *handle, | ||
2800 | struct perf_counter *counter) | ||
2801 | { | ||
2802 | if (counter->attr.read_format & PERF_FORMAT_GROUP) | ||
2803 | perf_output_read_group(handle, counter); | ||
2804 | else | ||
2805 | perf_output_read_one(handle, counter); | ||
2806 | } | ||
2807 | |||
2808 | void perf_counter_output(struct perf_counter *counter, int nmi, | ||
2619 | struct perf_sample_data *data) | 2809 | struct perf_sample_data *data) |
2620 | { | 2810 | { |
2621 | int ret; | 2811 | int ret; |
@@ -2626,10 +2816,6 @@ static void perf_counter_output(struct perf_counter *counter, int nmi, | |||
2626 | struct { | 2816 | struct { |
2627 | u32 pid, tid; | 2817 | u32 pid, tid; |
2628 | } tid_entry; | 2818 | } tid_entry; |
2629 | struct { | ||
2630 | u64 id; | ||
2631 | u64 counter; | ||
2632 | } group_entry; | ||
2633 | struct perf_callchain_entry *callchain = NULL; | 2819 | struct perf_callchain_entry *callchain = NULL; |
2634 | int callchain_size = 0; | 2820 | int callchain_size = 0; |
2635 | u64 time; | 2821 | u64 time; |
@@ -2684,10 +2870,8 @@ static void perf_counter_output(struct perf_counter *counter, int nmi, | |||
2684 | if (sample_type & PERF_SAMPLE_PERIOD) | 2870 | if (sample_type & PERF_SAMPLE_PERIOD) |
2685 | header.size += sizeof(u64); | 2871 | header.size += sizeof(u64); |
2686 | 2872 | ||
2687 | if (sample_type & PERF_SAMPLE_GROUP) { | 2873 | if (sample_type & PERF_SAMPLE_READ) |
2688 | header.size += sizeof(u64) + | 2874 | header.size += perf_counter_read_size(counter); |
2689 | counter->nr_siblings * sizeof(group_entry); | ||
2690 | } | ||
2691 | 2875 | ||
2692 | if (sample_type & PERF_SAMPLE_CALLCHAIN) { | 2876 | if (sample_type & PERF_SAMPLE_CALLCHAIN) { |
2693 | callchain = perf_callchain(data->regs); | 2877 | callchain = perf_callchain(data->regs); |
@@ -2699,6 +2883,18 @@ static void perf_counter_output(struct perf_counter *counter, int nmi, | |||
2699 | header.size += sizeof(u64); | 2883 | header.size += sizeof(u64); |
2700 | } | 2884 | } |
2701 | 2885 | ||
2886 | if (sample_type & PERF_SAMPLE_RAW) { | ||
2887 | int size = sizeof(u32); | ||
2888 | |||
2889 | if (data->raw) | ||
2890 | size += data->raw->size; | ||
2891 | else | ||
2892 | size += sizeof(u32); | ||
2893 | |||
2894 | WARN_ON_ONCE(size & (sizeof(u64)-1)); | ||
2895 | header.size += size; | ||
2896 | } | ||
2897 | |||
2702 | ret = perf_output_begin(&handle, counter, header.size, nmi, 1); | 2898 | ret = perf_output_begin(&handle, counter, header.size, nmi, 1); |
2703 | if (ret) | 2899 | if (ret) |
2704 | return; | 2900 | return; |
@@ -2732,26 +2928,8 @@ static void perf_counter_output(struct perf_counter *counter, int nmi, | |||
2732 | if (sample_type & PERF_SAMPLE_PERIOD) | 2928 | if (sample_type & PERF_SAMPLE_PERIOD) |
2733 | perf_output_put(&handle, data->period); | 2929 | perf_output_put(&handle, data->period); |
2734 | 2930 | ||
2735 | /* | 2931 | if (sample_type & PERF_SAMPLE_READ) |
2736 | * XXX PERF_SAMPLE_GROUP vs inherited counters seems difficult. | 2932 | perf_output_read(&handle, counter); |
2737 | */ | ||
2738 | if (sample_type & PERF_SAMPLE_GROUP) { | ||
2739 | struct perf_counter *leader, *sub; | ||
2740 | u64 nr = counter->nr_siblings; | ||
2741 | |||
2742 | perf_output_put(&handle, nr); | ||
2743 | |||
2744 | leader = counter->group_leader; | ||
2745 | list_for_each_entry(sub, &leader->sibling_list, list_entry) { | ||
2746 | if (sub != counter) | ||
2747 | sub->pmu->read(sub); | ||
2748 | |||
2749 | group_entry.id = primary_counter_id(sub); | ||
2750 | group_entry.counter = atomic64_read(&sub->count); | ||
2751 | |||
2752 | perf_output_put(&handle, group_entry); | ||
2753 | } | ||
2754 | } | ||
2755 | 2933 | ||
2756 | if (sample_type & PERF_SAMPLE_CALLCHAIN) { | 2934 | if (sample_type & PERF_SAMPLE_CALLCHAIN) { |
2757 | if (callchain) | 2935 | if (callchain) |
@@ -2762,6 +2940,22 @@ static void perf_counter_output(struct perf_counter *counter, int nmi, | |||
2762 | } | 2940 | } |
2763 | } | 2941 | } |
2764 | 2942 | ||
2943 | if (sample_type & PERF_SAMPLE_RAW) { | ||
2944 | if (data->raw) { | ||
2945 | perf_output_put(&handle, data->raw->size); | ||
2946 | perf_output_copy(&handle, data->raw->data, data->raw->size); | ||
2947 | } else { | ||
2948 | struct { | ||
2949 | u32 size; | ||
2950 | u32 data; | ||
2951 | } raw = { | ||
2952 | .size = sizeof(u32), | ||
2953 | .data = 0, | ||
2954 | }; | ||
2955 | perf_output_put(&handle, raw); | ||
2956 | } | ||
2957 | } | ||
2958 | |||
2765 | perf_output_end(&handle); | 2959 | perf_output_end(&handle); |
2766 | } | 2960 | } |
2767 | 2961 | ||
@@ -2774,8 +2968,6 @@ struct perf_read_event { | |||
2774 | 2968 | ||
2775 | u32 pid; | 2969 | u32 pid; |
2776 | u32 tid; | 2970 | u32 tid; |
2777 | u64 value; | ||
2778 | u64 format[3]; | ||
2779 | }; | 2971 | }; |
2780 | 2972 | ||
2781 | static void | 2973 | static void |
@@ -2787,80 +2979,74 @@ perf_counter_read_event(struct perf_counter *counter, | |||
2787 | .header = { | 2979 | .header = { |
2788 | .type = PERF_EVENT_READ, | 2980 | .type = PERF_EVENT_READ, |
2789 | .misc = 0, | 2981 | .misc = 0, |
2790 | .size = sizeof(event) - sizeof(event.format), | 2982 | .size = sizeof(event) + perf_counter_read_size(counter), |
2791 | }, | 2983 | }, |
2792 | .pid = perf_counter_pid(counter, task), | 2984 | .pid = perf_counter_pid(counter, task), |
2793 | .tid = perf_counter_tid(counter, task), | 2985 | .tid = perf_counter_tid(counter, task), |
2794 | .value = atomic64_read(&counter->count), | ||
2795 | }; | 2986 | }; |
2796 | int ret, i = 0; | 2987 | int ret; |
2797 | |||
2798 | if (counter->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) { | ||
2799 | event.header.size += sizeof(u64); | ||
2800 | event.format[i++] = counter->total_time_enabled; | ||
2801 | } | ||
2802 | |||
2803 | if (counter->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) { | ||
2804 | event.header.size += sizeof(u64); | ||
2805 | event.format[i++] = counter->total_time_running; | ||
2806 | } | ||
2807 | |||
2808 | if (counter->attr.read_format & PERF_FORMAT_ID) { | ||
2809 | event.header.size += sizeof(u64); | ||
2810 | event.format[i++] = primary_counter_id(counter); | ||
2811 | } | ||
2812 | 2988 | ||
2813 | ret = perf_output_begin(&handle, counter, event.header.size, 0, 0); | 2989 | ret = perf_output_begin(&handle, counter, event.header.size, 0, 0); |
2814 | if (ret) | 2990 | if (ret) |
2815 | return; | 2991 | return; |
2816 | 2992 | ||
2817 | perf_output_copy(&handle, &event, event.header.size); | 2993 | perf_output_put(&handle, event); |
2994 | perf_output_read(&handle, counter); | ||
2995 | |||
2818 | perf_output_end(&handle); | 2996 | perf_output_end(&handle); |
2819 | } | 2997 | } |
2820 | 2998 | ||
2821 | /* | 2999 | /* |
2822 | * fork tracking | 3000 | * task tracking -- fork/exit |
3001 | * | ||
3002 | * enabled by: attr.comm | attr.mmap | attr.task | ||
2823 | */ | 3003 | */ |
2824 | 3004 | ||
2825 | struct perf_fork_event { | 3005 | struct perf_task_event { |
2826 | struct task_struct *task; | 3006 | struct task_struct *task; |
3007 | struct perf_counter_context *task_ctx; | ||
2827 | 3008 | ||
2828 | struct { | 3009 | struct { |
2829 | struct perf_event_header header; | 3010 | struct perf_event_header header; |
2830 | 3011 | ||
2831 | u32 pid; | 3012 | u32 pid; |
2832 | u32 ppid; | 3013 | u32 ppid; |
3014 | u32 tid; | ||
3015 | u32 ptid; | ||
2833 | } event; | 3016 | } event; |
2834 | }; | 3017 | }; |
2835 | 3018 | ||
2836 | static void perf_counter_fork_output(struct perf_counter *counter, | 3019 | static void perf_counter_task_output(struct perf_counter *counter, |
2837 | struct perf_fork_event *fork_event) | 3020 | struct perf_task_event *task_event) |
2838 | { | 3021 | { |
2839 | struct perf_output_handle handle; | 3022 | struct perf_output_handle handle; |
2840 | int size = fork_event->event.header.size; | 3023 | int size = task_event->event.header.size; |
2841 | struct task_struct *task = fork_event->task; | 3024 | struct task_struct *task = task_event->task; |
2842 | int ret = perf_output_begin(&handle, counter, size, 0, 0); | 3025 | int ret = perf_output_begin(&handle, counter, size, 0, 0); |
2843 | 3026 | ||
2844 | if (ret) | 3027 | if (ret) |
2845 | return; | 3028 | return; |
2846 | 3029 | ||
2847 | fork_event->event.pid = perf_counter_pid(counter, task); | 3030 | task_event->event.pid = perf_counter_pid(counter, task); |
2848 | fork_event->event.ppid = perf_counter_pid(counter, task->real_parent); | 3031 | task_event->event.ppid = perf_counter_pid(counter, current); |
3032 | |||
3033 | task_event->event.tid = perf_counter_tid(counter, task); | ||
3034 | task_event->event.ptid = perf_counter_tid(counter, current); | ||
2849 | 3035 | ||
2850 | perf_output_put(&handle, fork_event->event); | 3036 | perf_output_put(&handle, task_event->event); |
2851 | perf_output_end(&handle); | 3037 | perf_output_end(&handle); |
2852 | } | 3038 | } |
2853 | 3039 | ||
2854 | static int perf_counter_fork_match(struct perf_counter *counter) | 3040 | static int perf_counter_task_match(struct perf_counter *counter) |
2855 | { | 3041 | { |
2856 | if (counter->attr.comm || counter->attr.mmap) | 3042 | if (counter->attr.comm || counter->attr.mmap || counter->attr.task) |
2857 | return 1; | 3043 | return 1; |
2858 | 3044 | ||
2859 | return 0; | 3045 | return 0; |
2860 | } | 3046 | } |
2861 | 3047 | ||
2862 | static void perf_counter_fork_ctx(struct perf_counter_context *ctx, | 3048 | static void perf_counter_task_ctx(struct perf_counter_context *ctx, |
2863 | struct perf_fork_event *fork_event) | 3049 | struct perf_task_event *task_event) |
2864 | { | 3050 | { |
2865 | struct perf_counter *counter; | 3051 | struct perf_counter *counter; |
2866 | 3052 | ||
@@ -2869,54 +3055,62 @@ static void perf_counter_fork_ctx(struct perf_counter_context *ctx, | |||
2869 | 3055 | ||
2870 | rcu_read_lock(); | 3056 | rcu_read_lock(); |
2871 | list_for_each_entry_rcu(counter, &ctx->event_list, event_entry) { | 3057 | list_for_each_entry_rcu(counter, &ctx->event_list, event_entry) { |
2872 | if (perf_counter_fork_match(counter)) | 3058 | if (perf_counter_task_match(counter)) |
2873 | perf_counter_fork_output(counter, fork_event); | 3059 | perf_counter_task_output(counter, task_event); |
2874 | } | 3060 | } |
2875 | rcu_read_unlock(); | 3061 | rcu_read_unlock(); |
2876 | } | 3062 | } |
2877 | 3063 | ||
2878 | static void perf_counter_fork_event(struct perf_fork_event *fork_event) | 3064 | static void perf_counter_task_event(struct perf_task_event *task_event) |
2879 | { | 3065 | { |
2880 | struct perf_cpu_context *cpuctx; | 3066 | struct perf_cpu_context *cpuctx; |
2881 | struct perf_counter_context *ctx; | 3067 | struct perf_counter_context *ctx = task_event->task_ctx; |
2882 | 3068 | ||
2883 | cpuctx = &get_cpu_var(perf_cpu_context); | 3069 | cpuctx = &get_cpu_var(perf_cpu_context); |
2884 | perf_counter_fork_ctx(&cpuctx->ctx, fork_event); | 3070 | perf_counter_task_ctx(&cpuctx->ctx, task_event); |
2885 | put_cpu_var(perf_cpu_context); | 3071 | put_cpu_var(perf_cpu_context); |
2886 | 3072 | ||
2887 | rcu_read_lock(); | 3073 | rcu_read_lock(); |
2888 | /* | 3074 | if (!ctx) |
2889 | * doesn't really matter which of the child contexts the | 3075 | ctx = rcu_dereference(task_event->task->perf_counter_ctxp); |
2890 | * events ends up in. | ||
2891 | */ | ||
2892 | ctx = rcu_dereference(current->perf_counter_ctxp); | ||
2893 | if (ctx) | 3076 | if (ctx) |
2894 | perf_counter_fork_ctx(ctx, fork_event); | 3077 | perf_counter_task_ctx(ctx, task_event); |
2895 | rcu_read_unlock(); | 3078 | rcu_read_unlock(); |
2896 | } | 3079 | } |
2897 | 3080 | ||
2898 | void perf_counter_fork(struct task_struct *task) | 3081 | static void perf_counter_task(struct task_struct *task, |
3082 | struct perf_counter_context *task_ctx, | ||
3083 | int new) | ||
2899 | { | 3084 | { |
2900 | struct perf_fork_event fork_event; | 3085 | struct perf_task_event task_event; |
2901 | 3086 | ||
2902 | if (!atomic_read(&nr_comm_counters) && | 3087 | if (!atomic_read(&nr_comm_counters) && |
2903 | !atomic_read(&nr_mmap_counters)) | 3088 | !atomic_read(&nr_mmap_counters) && |
3089 | !atomic_read(&nr_task_counters)) | ||
2904 | return; | 3090 | return; |
2905 | 3091 | ||
2906 | fork_event = (struct perf_fork_event){ | 3092 | task_event = (struct perf_task_event){ |
2907 | .task = task, | 3093 | .task = task, |
2908 | .event = { | 3094 | .task_ctx = task_ctx, |
3095 | .event = { | ||
2909 | .header = { | 3096 | .header = { |
2910 | .type = PERF_EVENT_FORK, | 3097 | .type = new ? PERF_EVENT_FORK : PERF_EVENT_EXIT, |
2911 | .misc = 0, | 3098 | .misc = 0, |
2912 | .size = sizeof(fork_event.event), | 3099 | .size = sizeof(task_event.event), |
2913 | }, | 3100 | }, |
2914 | /* .pid */ | 3101 | /* .pid */ |
2915 | /* .ppid */ | 3102 | /* .ppid */ |
3103 | /* .tid */ | ||
3104 | /* .ptid */ | ||
2916 | }, | 3105 | }, |
2917 | }; | 3106 | }; |
2918 | 3107 | ||
2919 | perf_counter_fork_event(&fork_event); | 3108 | perf_counter_task_event(&task_event); |
3109 | } | ||
3110 | |||
3111 | void perf_counter_fork(struct task_struct *task) | ||
3112 | { | ||
3113 | perf_counter_task(task, NULL, 1); | ||
2920 | } | 3114 | } |
2921 | 3115 | ||
2922 | /* | 3116 | /* |
@@ -3305,125 +3499,111 @@ int perf_counter_overflow(struct perf_counter *counter, int nmi, | |||
3305 | * Generic software counter infrastructure | 3499 | * Generic software counter infrastructure |
3306 | */ | 3500 | */ |
3307 | 3501 | ||
3308 | static void perf_swcounter_update(struct perf_counter *counter) | 3502 | /* |
3503 | * We directly increment counter->count and keep a second value in | ||
3504 | * counter->hw.period_left to count intervals. This period counter | ||
3505 | * is kept in the range [-sample_period, 0] so that we can use the | ||
3506 | * sign as trigger. | ||
3507 | */ | ||
3508 | |||
3509 | static u64 perf_swcounter_set_period(struct perf_counter *counter) | ||
3309 | { | 3510 | { |
3310 | struct hw_perf_counter *hwc = &counter->hw; | 3511 | struct hw_perf_counter *hwc = &counter->hw; |
3311 | u64 prev, now; | 3512 | u64 period = hwc->last_period; |
3312 | s64 delta; | 3513 | u64 nr, offset; |
3514 | s64 old, val; | ||
3515 | |||
3516 | hwc->last_period = hwc->sample_period; | ||
3313 | 3517 | ||
3314 | again: | 3518 | again: |
3315 | prev = atomic64_read(&hwc->prev_count); | 3519 | old = val = atomic64_read(&hwc->period_left); |
3316 | now = atomic64_read(&hwc->count); | 3520 | if (val < 0) |
3317 | if (atomic64_cmpxchg(&hwc->prev_count, prev, now) != prev) | 3521 | return 0; |
3318 | goto again; | ||
3319 | 3522 | ||
3320 | delta = now - prev; | 3523 | nr = div64_u64(period + val, period); |
3524 | offset = nr * period; | ||
3525 | val -= offset; | ||
3526 | if (atomic64_cmpxchg(&hwc->period_left, old, val) != old) | ||
3527 | goto again; | ||
3321 | 3528 | ||
3322 | atomic64_add(delta, &counter->count); | 3529 | return nr; |
3323 | atomic64_sub(delta, &hwc->period_left); | ||
3324 | } | 3530 | } |
3325 | 3531 | ||
3326 | static void perf_swcounter_set_period(struct perf_counter *counter) | 3532 | static void perf_swcounter_overflow(struct perf_counter *counter, |
3533 | int nmi, struct perf_sample_data *data) | ||
3327 | { | 3534 | { |
3328 | struct hw_perf_counter *hwc = &counter->hw; | 3535 | struct hw_perf_counter *hwc = &counter->hw; |
3329 | s64 left = atomic64_read(&hwc->period_left); | 3536 | u64 overflow; |
3330 | s64 period = hwc->sample_period; | ||
3331 | 3537 | ||
3332 | if (unlikely(left <= -period)) { | 3538 | data->period = counter->hw.last_period; |
3333 | left = period; | 3539 | overflow = perf_swcounter_set_period(counter); |
3334 | atomic64_set(&hwc->period_left, left); | ||
3335 | hwc->last_period = period; | ||
3336 | } | ||
3337 | 3540 | ||
3338 | if (unlikely(left <= 0)) { | 3541 | if (hwc->interrupts == MAX_INTERRUPTS) |
3339 | left += period; | 3542 | return; |
3340 | atomic64_add(period, &hwc->period_left); | ||
3341 | hwc->last_period = period; | ||
3342 | } | ||
3343 | 3543 | ||
3344 | atomic64_set(&hwc->prev_count, -left); | 3544 | for (; overflow; overflow--) { |
3345 | atomic64_set(&hwc->count, -left); | 3545 | if (perf_counter_overflow(counter, nmi, data)) { |
3546 | /* | ||
3547 | * We inhibit the overflow from happening when | ||
3548 | * hwc->interrupts == MAX_INTERRUPTS. | ||
3549 | */ | ||
3550 | break; | ||
3551 | } | ||
3552 | } | ||
3346 | } | 3553 | } |
3347 | 3554 | ||
3348 | static enum hrtimer_restart perf_swcounter_hrtimer(struct hrtimer *hrtimer) | 3555 | static void perf_swcounter_unthrottle(struct perf_counter *counter) |
3349 | { | 3556 | { |
3350 | enum hrtimer_restart ret = HRTIMER_RESTART; | ||
3351 | struct perf_sample_data data; | ||
3352 | struct perf_counter *counter; | ||
3353 | u64 period; | ||
3354 | |||
3355 | counter = container_of(hrtimer, struct perf_counter, hw.hrtimer); | ||
3356 | counter->pmu->read(counter); | ||
3357 | |||
3358 | data.addr = 0; | ||
3359 | data.regs = get_irq_regs(); | ||
3360 | /* | 3557 | /* |
3361 | * In case we exclude kernel IPs or are somehow not in interrupt | 3558 | * Nothing to do, we already reset hwc->interrupts. |
3362 | * context, provide the next best thing, the user IP. | ||
3363 | */ | 3559 | */ |
3364 | if ((counter->attr.exclude_kernel || !data.regs) && | 3560 | } |
3365 | !counter->attr.exclude_user) | ||
3366 | data.regs = task_pt_regs(current); | ||
3367 | 3561 | ||
3368 | if (data.regs) { | 3562 | static void perf_swcounter_add(struct perf_counter *counter, u64 nr, |
3369 | if (perf_counter_overflow(counter, 0, &data)) | 3563 | int nmi, struct perf_sample_data *data) |
3370 | ret = HRTIMER_NORESTART; | 3564 | { |
3371 | } | 3565 | struct hw_perf_counter *hwc = &counter->hw; |
3372 | 3566 | ||
3373 | period = max_t(u64, 10000, counter->hw.sample_period); | 3567 | atomic64_add(nr, &counter->count); |
3374 | hrtimer_forward_now(hrtimer, ns_to_ktime(period)); | ||
3375 | 3568 | ||
3376 | return ret; | 3569 | if (!hwc->sample_period) |
3377 | } | 3570 | return; |
3378 | 3571 | ||
3379 | static void perf_swcounter_overflow(struct perf_counter *counter, | 3572 | if (!data->regs) |
3380 | int nmi, struct perf_sample_data *data) | 3573 | return; |
3381 | { | ||
3382 | data->period = counter->hw.last_period; | ||
3383 | 3574 | ||
3384 | perf_swcounter_update(counter); | 3575 | if (!atomic64_add_negative(nr, &hwc->period_left)) |
3385 | perf_swcounter_set_period(counter); | 3576 | perf_swcounter_overflow(counter, nmi, data); |
3386 | if (perf_counter_overflow(counter, nmi, data)) | ||
3387 | /* soft-disable the counter */ | ||
3388 | ; | ||
3389 | } | 3577 | } |
3390 | 3578 | ||
3391 | static int perf_swcounter_is_counting(struct perf_counter *counter) | 3579 | static int perf_swcounter_is_counting(struct perf_counter *counter) |
3392 | { | 3580 | { |
3393 | struct perf_counter_context *ctx; | 3581 | /* |
3394 | unsigned long flags; | 3582 | * The counter is active, we're good! |
3395 | int count; | 3583 | */ |
3396 | |||
3397 | if (counter->state == PERF_COUNTER_STATE_ACTIVE) | 3584 | if (counter->state == PERF_COUNTER_STATE_ACTIVE) |
3398 | return 1; | 3585 | return 1; |
3399 | 3586 | ||
3587 | /* | ||
3588 | * The counter is off/error, not counting. | ||
3589 | */ | ||
3400 | if (counter->state != PERF_COUNTER_STATE_INACTIVE) | 3590 | if (counter->state != PERF_COUNTER_STATE_INACTIVE) |
3401 | return 0; | 3591 | return 0; |
3402 | 3592 | ||
3403 | /* | 3593 | /* |
3404 | * If the counter is inactive, it could be just because | 3594 | * The counter is inactive, if the context is active |
3405 | * its task is scheduled out, or because it's in a group | 3595 | * we're part of a group that didn't make it on the 'pmu', |
3406 | * which could not go on the PMU. We want to count in | 3596 | * not counting. |
3407 | * the first case but not the second. If the context is | ||
3408 | * currently active then an inactive software counter must | ||
3409 | * be the second case. If it's not currently active then | ||
3410 | * we need to know whether the counter was active when the | ||
3411 | * context was last active, which we can determine by | ||
3412 | * comparing counter->tstamp_stopped with ctx->time. | ||
3413 | * | ||
3414 | * We are within an RCU read-side critical section, | ||
3415 | * which protects the existence of *ctx. | ||
3416 | */ | 3597 | */ |
3417 | ctx = counter->ctx; | 3598 | if (counter->ctx->is_active) |
3418 | spin_lock_irqsave(&ctx->lock, flags); | 3599 | return 0; |
3419 | count = 1; | 3600 | |
3420 | /* Re-check state now we have the lock */ | 3601 | /* |
3421 | if (counter->state < PERF_COUNTER_STATE_INACTIVE || | 3602 | * We're inactive and the context is too, this means the |
3422 | counter->ctx->is_active || | 3603 | * task is scheduled out, we're counting events that happen |
3423 | counter->tstamp_stopped < ctx->time) | 3604 | * to us, like migration events. |
3424 | count = 0; | 3605 | */ |
3425 | spin_unlock_irqrestore(&ctx->lock, flags); | 3606 | return 1; |
3426 | return count; | ||
3427 | } | 3607 | } |
3428 | 3608 | ||
3429 | static int perf_swcounter_match(struct perf_counter *counter, | 3609 | static int perf_swcounter_match(struct perf_counter *counter, |
@@ -3449,15 +3629,6 @@ static int perf_swcounter_match(struct perf_counter *counter, | |||
3449 | return 1; | 3629 | return 1; |
3450 | } | 3630 | } |
3451 | 3631 | ||
3452 | static void perf_swcounter_add(struct perf_counter *counter, u64 nr, | ||
3453 | int nmi, struct perf_sample_data *data) | ||
3454 | { | ||
3455 | int neg = atomic64_add_negative(nr, &counter->hw.count); | ||
3456 | |||
3457 | if (counter->hw.sample_period && !neg && data->regs) | ||
3458 | perf_swcounter_overflow(counter, nmi, data); | ||
3459 | } | ||
3460 | |||
3461 | static void perf_swcounter_ctx_event(struct perf_counter_context *ctx, | 3632 | static void perf_swcounter_ctx_event(struct perf_counter_context *ctx, |
3462 | enum perf_type_id type, | 3633 | enum perf_type_id type, |
3463 | u32 event, u64 nr, int nmi, | 3634 | u32 event, u64 nr, int nmi, |
@@ -3536,27 +3707,66 @@ void __perf_swcounter_event(u32 event, u64 nr, int nmi, | |||
3536 | 3707 | ||
3537 | static void perf_swcounter_read(struct perf_counter *counter) | 3708 | static void perf_swcounter_read(struct perf_counter *counter) |
3538 | { | 3709 | { |
3539 | perf_swcounter_update(counter); | ||
3540 | } | 3710 | } |
3541 | 3711 | ||
3542 | static int perf_swcounter_enable(struct perf_counter *counter) | 3712 | static int perf_swcounter_enable(struct perf_counter *counter) |
3543 | { | 3713 | { |
3544 | perf_swcounter_set_period(counter); | 3714 | struct hw_perf_counter *hwc = &counter->hw; |
3715 | |||
3716 | if (hwc->sample_period) { | ||
3717 | hwc->last_period = hwc->sample_period; | ||
3718 | perf_swcounter_set_period(counter); | ||
3719 | } | ||
3545 | return 0; | 3720 | return 0; |
3546 | } | 3721 | } |
3547 | 3722 | ||
3548 | static void perf_swcounter_disable(struct perf_counter *counter) | 3723 | static void perf_swcounter_disable(struct perf_counter *counter) |
3549 | { | 3724 | { |
3550 | perf_swcounter_update(counter); | ||
3551 | } | 3725 | } |
3552 | 3726 | ||
3553 | static const struct pmu perf_ops_generic = { | 3727 | static const struct pmu perf_ops_generic = { |
3554 | .enable = perf_swcounter_enable, | 3728 | .enable = perf_swcounter_enable, |
3555 | .disable = perf_swcounter_disable, | 3729 | .disable = perf_swcounter_disable, |
3556 | .read = perf_swcounter_read, | 3730 | .read = perf_swcounter_read, |
3731 | .unthrottle = perf_swcounter_unthrottle, | ||
3557 | }; | 3732 | }; |
3558 | 3733 | ||
3559 | /* | 3734 | /* |
3735 | * hrtimer based swcounter callback | ||
3736 | */ | ||
3737 | |||
3738 | static enum hrtimer_restart perf_swcounter_hrtimer(struct hrtimer *hrtimer) | ||
3739 | { | ||
3740 | enum hrtimer_restart ret = HRTIMER_RESTART; | ||
3741 | struct perf_sample_data data; | ||
3742 | struct perf_counter *counter; | ||
3743 | u64 period; | ||
3744 | |||
3745 | counter = container_of(hrtimer, struct perf_counter, hw.hrtimer); | ||
3746 | counter->pmu->read(counter); | ||
3747 | |||
3748 | data.addr = 0; | ||
3749 | data.regs = get_irq_regs(); | ||
3750 | /* | ||
3751 | * In case we exclude kernel IPs or are somehow not in interrupt | ||
3752 | * context, provide the next best thing, the user IP. | ||
3753 | */ | ||
3754 | if ((counter->attr.exclude_kernel || !data.regs) && | ||
3755 | !counter->attr.exclude_user) | ||
3756 | data.regs = task_pt_regs(current); | ||
3757 | |||
3758 | if (data.regs) { | ||
3759 | if (perf_counter_overflow(counter, 0, &data)) | ||
3760 | ret = HRTIMER_NORESTART; | ||
3761 | } | ||
3762 | |||
3763 | period = max_t(u64, 10000, counter->hw.sample_period); | ||
3764 | hrtimer_forward_now(hrtimer, ns_to_ktime(period)); | ||
3765 | |||
3766 | return ret; | ||
3767 | } | ||
3768 | |||
3769 | /* | ||
3560 | * Software counter: cpu wall time clock | 3770 | * Software counter: cpu wall time clock |
3561 | */ | 3771 | */ |
3562 | 3772 | ||
@@ -3673,17 +3883,24 @@ static const struct pmu perf_ops_task_clock = { | |||
3673 | }; | 3883 | }; |
3674 | 3884 | ||
3675 | #ifdef CONFIG_EVENT_PROFILE | 3885 | #ifdef CONFIG_EVENT_PROFILE |
3676 | void perf_tpcounter_event(int event_id) | 3886 | void perf_tpcounter_event(int event_id, u64 addr, u64 count, void *record, |
3887 | int entry_size) | ||
3677 | { | 3888 | { |
3889 | struct perf_raw_record raw = { | ||
3890 | .size = entry_size, | ||
3891 | .data = record, | ||
3892 | }; | ||
3893 | |||
3678 | struct perf_sample_data data = { | 3894 | struct perf_sample_data data = { |
3679 | .regs = get_irq_regs(), | 3895 | .regs = get_irq_regs(), |
3680 | .addr = 0, | 3896 | .addr = addr, |
3897 | .raw = &raw, | ||
3681 | }; | 3898 | }; |
3682 | 3899 | ||
3683 | if (!data.regs) | 3900 | if (!data.regs) |
3684 | data.regs = task_pt_regs(current); | 3901 | data.regs = task_pt_regs(current); |
3685 | 3902 | ||
3686 | do_perf_swcounter_event(PERF_TYPE_TRACEPOINT, event_id, 1, 1, &data); | 3903 | do_perf_swcounter_event(PERF_TYPE_TRACEPOINT, event_id, count, 1, &data); |
3687 | } | 3904 | } |
3688 | EXPORT_SYMBOL_GPL(perf_tpcounter_event); | 3905 | EXPORT_SYMBOL_GPL(perf_tpcounter_event); |
3689 | 3906 | ||
@@ -3697,6 +3914,14 @@ static void tp_perf_counter_destroy(struct perf_counter *counter) | |||
3697 | 3914 | ||
3698 | static const struct pmu *tp_perf_counter_init(struct perf_counter *counter) | 3915 | static const struct pmu *tp_perf_counter_init(struct perf_counter *counter) |
3699 | { | 3916 | { |
3917 | /* | ||
3918 | * Raw tracepoint data is a severe data leak, only allow root to | ||
3919 | * have these. | ||
3920 | */ | ||
3921 | if ((counter->attr.sample_type & PERF_SAMPLE_RAW) && | ||
3922 | !capable(CAP_SYS_ADMIN)) | ||
3923 | return ERR_PTR(-EPERM); | ||
3924 | |||
3700 | if (ftrace_profile_enable(counter->attr.config)) | 3925 | if (ftrace_profile_enable(counter->attr.config)) |
3701 | return NULL; | 3926 | return NULL; |
3702 | 3927 | ||
@@ -3830,9 +4055,9 @@ perf_counter_alloc(struct perf_counter_attr *attr, | |||
3830 | atomic64_set(&hwc->period_left, hwc->sample_period); | 4055 | atomic64_set(&hwc->period_left, hwc->sample_period); |
3831 | 4056 | ||
3832 | /* | 4057 | /* |
3833 | * we currently do not support PERF_SAMPLE_GROUP on inherited counters | 4058 | * we currently do not support PERF_FORMAT_GROUP on inherited counters |
3834 | */ | 4059 | */ |
3835 | if (attr->inherit && (attr->sample_type & PERF_SAMPLE_GROUP)) | 4060 | if (attr->inherit && (attr->read_format & PERF_FORMAT_GROUP)) |
3836 | goto done; | 4061 | goto done; |
3837 | 4062 | ||
3838 | switch (attr->type) { | 4063 | switch (attr->type) { |
@@ -3875,6 +4100,8 @@ done: | |||
3875 | atomic_inc(&nr_mmap_counters); | 4100 | atomic_inc(&nr_mmap_counters); |
3876 | if (counter->attr.comm) | 4101 | if (counter->attr.comm) |
3877 | atomic_inc(&nr_comm_counters); | 4102 | atomic_inc(&nr_comm_counters); |
4103 | if (counter->attr.task) | ||
4104 | atomic_inc(&nr_task_counters); | ||
3878 | } | 4105 | } |
3879 | 4106 | ||
3880 | return counter; | 4107 | return counter; |
@@ -4236,8 +4463,10 @@ void perf_counter_exit_task(struct task_struct *child) | |||
4236 | struct perf_counter_context *child_ctx; | 4463 | struct perf_counter_context *child_ctx; |
4237 | unsigned long flags; | 4464 | unsigned long flags; |
4238 | 4465 | ||
4239 | if (likely(!child->perf_counter_ctxp)) | 4466 | if (likely(!child->perf_counter_ctxp)) { |
4467 | perf_counter_task(child, NULL, 0); | ||
4240 | return; | 4468 | return; |
4469 | } | ||
4241 | 4470 | ||
4242 | local_irq_save(flags); | 4471 | local_irq_save(flags); |
4243 | /* | 4472 | /* |
@@ -4262,8 +4491,14 @@ void perf_counter_exit_task(struct task_struct *child) | |||
4262 | * the counters from it. | 4491 | * the counters from it. |
4263 | */ | 4492 | */ |
4264 | unclone_ctx(child_ctx); | 4493 | unclone_ctx(child_ctx); |
4265 | spin_unlock(&child_ctx->lock); | 4494 | spin_unlock_irqrestore(&child_ctx->lock, flags); |
4266 | local_irq_restore(flags); | 4495 | |
4496 | /* | ||
4497 | * Report the task dead after unscheduling the counters so that we | ||
4498 | * won't get any samples after PERF_EVENT_EXIT. We can however still | ||
4499 | * get a few PERF_EVENT_READ events. | ||
4500 | */ | ||
4501 | perf_counter_task(child, child_ctx, 0); | ||
4267 | 4502 | ||
4268 | /* | 4503 | /* |
4269 | * We can recurse on the same lock type through: | 4504 | * We can recurse on the same lock type through: |
@@ -4484,6 +4719,11 @@ perf_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu) | |||
4484 | perf_counter_init_cpu(cpu); | 4719 | perf_counter_init_cpu(cpu); |
4485 | break; | 4720 | break; |
4486 | 4721 | ||
4722 | case CPU_ONLINE: | ||
4723 | case CPU_ONLINE_FROZEN: | ||
4724 | hw_perf_counter_setup_online(cpu); | ||
4725 | break; | ||
4726 | |||
4487 | case CPU_DOWN_PREPARE: | 4727 | case CPU_DOWN_PREPARE: |
4488 | case CPU_DOWN_PREPARE_FROZEN: | 4728 | case CPU_DOWN_PREPARE_FROZEN: |
4489 | perf_counter_exit_cpu(cpu); | 4729 | perf_counter_exit_cpu(cpu); |
@@ -4508,6 +4748,8 @@ void __init perf_counter_init(void) | |||
4508 | { | 4748 | { |
4509 | perf_cpu_notify(&perf_cpu_nb, (unsigned long)CPU_UP_PREPARE, | 4749 | perf_cpu_notify(&perf_cpu_nb, (unsigned long)CPU_UP_PREPARE, |
4510 | (void *)(long)smp_processor_id()); | 4750 | (void *)(long)smp_processor_id()); |
4751 | perf_cpu_notify(&perf_cpu_nb, (unsigned long)CPU_ONLINE, | ||
4752 | (void *)(long)smp_processor_id()); | ||
4511 | register_cpu_notifier(&perf_cpu_nb); | 4753 | register_cpu_notifier(&perf_cpu_nb); |
4512 | } | 4754 | } |
4513 | 4755 | ||
diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c index 18bdde6f676f..12161f74744e 100644 --- a/kernel/posix-cpu-timers.c +++ b/kernel/posix-cpu-timers.c | |||
@@ -521,11 +521,12 @@ void posix_cpu_timers_exit(struct task_struct *tsk) | |||
521 | } | 521 | } |
522 | void posix_cpu_timers_exit_group(struct task_struct *tsk) | 522 | void posix_cpu_timers_exit_group(struct task_struct *tsk) |
523 | { | 523 | { |
524 | struct task_cputime cputime; | 524 | struct signal_struct *const sig = tsk->signal; |
525 | 525 | ||
526 | thread_group_cputimer(tsk, &cputime); | ||
527 | cleanup_timers(tsk->signal->cpu_timers, | 526 | cleanup_timers(tsk->signal->cpu_timers, |
528 | cputime.utime, cputime.stime, cputime.sum_exec_runtime); | 527 | cputime_add(tsk->utime, sig->utime), |
528 | cputime_add(tsk->stime, sig->stime), | ||
529 | tsk->se.sum_exec_runtime + sig->sum_sched_runtime); | ||
529 | } | 530 | } |
530 | 531 | ||
531 | static void clear_dead_task(struct k_itimer *timer, union cpu_time_count now) | 532 | static void clear_dead_task(struct k_itimer *timer, union cpu_time_count now) |
diff --git a/kernel/posix-timers.c b/kernel/posix-timers.c index 052ec4d195c7..495440779ce3 100644 --- a/kernel/posix-timers.c +++ b/kernel/posix-timers.c | |||
@@ -202,6 +202,12 @@ static int no_timer_create(struct k_itimer *new_timer) | |||
202 | return -EOPNOTSUPP; | 202 | return -EOPNOTSUPP; |
203 | } | 203 | } |
204 | 204 | ||
205 | static int no_nsleep(const clockid_t which_clock, int flags, | ||
206 | struct timespec *tsave, struct timespec __user *rmtp) | ||
207 | { | ||
208 | return -EOPNOTSUPP; | ||
209 | } | ||
210 | |||
205 | /* | 211 | /* |
206 | * Return nonzero if we know a priori this clockid_t value is bogus. | 212 | * Return nonzero if we know a priori this clockid_t value is bogus. |
207 | */ | 213 | */ |
@@ -236,6 +242,25 @@ static int posix_get_monotonic_raw(clockid_t which_clock, struct timespec *tp) | |||
236 | return 0; | 242 | return 0; |
237 | } | 243 | } |
238 | 244 | ||
245 | |||
246 | static int posix_get_realtime_coarse(clockid_t which_clock, struct timespec *tp) | ||
247 | { | ||
248 | *tp = current_kernel_time(); | ||
249 | return 0; | ||
250 | } | ||
251 | |||
252 | static int posix_get_monotonic_coarse(clockid_t which_clock, | ||
253 | struct timespec *tp) | ||
254 | { | ||
255 | *tp = get_monotonic_coarse(); | ||
256 | return 0; | ||
257 | } | ||
258 | |||
259 | int posix_get_coarse_res(const clockid_t which_clock, struct timespec *tp) | ||
260 | { | ||
261 | *tp = ktime_to_timespec(KTIME_LOW_RES); | ||
262 | return 0; | ||
263 | } | ||
239 | /* | 264 | /* |
240 | * Initialize everything, well, just everything in Posix clocks/timers ;) | 265 | * Initialize everything, well, just everything in Posix clocks/timers ;) |
241 | */ | 266 | */ |
@@ -254,11 +279,28 @@ static __init int init_posix_timers(void) | |||
254 | .clock_get = posix_get_monotonic_raw, | 279 | .clock_get = posix_get_monotonic_raw, |
255 | .clock_set = do_posix_clock_nosettime, | 280 | .clock_set = do_posix_clock_nosettime, |
256 | .timer_create = no_timer_create, | 281 | .timer_create = no_timer_create, |
282 | .nsleep = no_nsleep, | ||
283 | }; | ||
284 | struct k_clock clock_realtime_coarse = { | ||
285 | .clock_getres = posix_get_coarse_res, | ||
286 | .clock_get = posix_get_realtime_coarse, | ||
287 | .clock_set = do_posix_clock_nosettime, | ||
288 | .timer_create = no_timer_create, | ||
289 | .nsleep = no_nsleep, | ||
290 | }; | ||
291 | struct k_clock clock_monotonic_coarse = { | ||
292 | .clock_getres = posix_get_coarse_res, | ||
293 | .clock_get = posix_get_monotonic_coarse, | ||
294 | .clock_set = do_posix_clock_nosettime, | ||
295 | .timer_create = no_timer_create, | ||
296 | .nsleep = no_nsleep, | ||
257 | }; | 297 | }; |
258 | 298 | ||
259 | register_posix_clock(CLOCK_REALTIME, &clock_realtime); | 299 | register_posix_clock(CLOCK_REALTIME, &clock_realtime); |
260 | register_posix_clock(CLOCK_MONOTONIC, &clock_monotonic); | 300 | register_posix_clock(CLOCK_MONOTONIC, &clock_monotonic); |
261 | register_posix_clock(CLOCK_MONOTONIC_RAW, &clock_monotonic_raw); | 301 | register_posix_clock(CLOCK_MONOTONIC_RAW, &clock_monotonic_raw); |
302 | register_posix_clock(CLOCK_REALTIME_COARSE, &clock_realtime_coarse); | ||
303 | register_posix_clock(CLOCK_MONOTONIC_COARSE, &clock_monotonic_coarse); | ||
262 | 304 | ||
263 | posix_timers_cache = kmem_cache_create("posix_timers_cache", | 305 | posix_timers_cache = kmem_cache_create("posix_timers_cache", |
264 | sizeof (struct k_itimer), 0, SLAB_PANIC, | 306 | sizeof (struct k_itimer), 0, SLAB_PANIC, |
diff --git a/kernel/rtmutex.c b/kernel/rtmutex.c index fcd107a78c5a..29bd4baf9e75 100644 --- a/kernel/rtmutex.c +++ b/kernel/rtmutex.c | |||
@@ -1039,16 +1039,14 @@ int rt_mutex_start_proxy_lock(struct rt_mutex *lock, | |||
1039 | if (!rt_mutex_owner(lock) || try_to_steal_lock(lock, task)) { | 1039 | if (!rt_mutex_owner(lock) || try_to_steal_lock(lock, task)) { |
1040 | /* We got the lock for task. */ | 1040 | /* We got the lock for task. */ |
1041 | debug_rt_mutex_lock(lock); | 1041 | debug_rt_mutex_lock(lock); |
1042 | |||
1043 | rt_mutex_set_owner(lock, task, 0); | 1042 | rt_mutex_set_owner(lock, task, 0); |
1044 | 1043 | spin_unlock(&lock->wait_lock); | |
1045 | rt_mutex_deadlock_account_lock(lock, task); | 1044 | rt_mutex_deadlock_account_lock(lock, task); |
1046 | return 1; | 1045 | return 1; |
1047 | } | 1046 | } |
1048 | 1047 | ||
1049 | ret = task_blocks_on_rt_mutex(lock, waiter, task, detect_deadlock); | 1048 | ret = task_blocks_on_rt_mutex(lock, waiter, task, detect_deadlock); |
1050 | 1049 | ||
1051 | |||
1052 | if (ret && !waiter->task) { | 1050 | if (ret && !waiter->task) { |
1053 | /* | 1051 | /* |
1054 | * Reset the return value. We might have | 1052 | * Reset the return value. We might have |
diff --git a/kernel/sched_cpupri.c b/kernel/sched_cpupri.c index e6c251790dde..d014efbf947a 100644 --- a/kernel/sched_cpupri.c +++ b/kernel/sched_cpupri.c | |||
@@ -81,8 +81,21 @@ int cpupri_find(struct cpupri *cp, struct task_struct *p, | |||
81 | if (cpumask_any_and(&p->cpus_allowed, vec->mask) >= nr_cpu_ids) | 81 | if (cpumask_any_and(&p->cpus_allowed, vec->mask) >= nr_cpu_ids) |
82 | continue; | 82 | continue; |
83 | 83 | ||
84 | if (lowest_mask) | 84 | if (lowest_mask) { |
85 | cpumask_and(lowest_mask, &p->cpus_allowed, vec->mask); | 85 | cpumask_and(lowest_mask, &p->cpus_allowed, vec->mask); |
86 | |||
87 | /* | ||
88 | * We have to ensure that we have at least one bit | ||
89 | * still set in the array, since the map could have | ||
90 | * been concurrently emptied between the first and | ||
91 | * second reads of vec->mask. If we hit this | ||
92 | * condition, simply act as though we never hit this | ||
93 | * priority level and continue on. | ||
94 | */ | ||
95 | if (cpumask_any(lowest_mask) >= nr_cpu_ids) | ||
96 | continue; | ||
97 | } | ||
98 | |||
86 | return 1; | 99 | return 1; |
87 | } | 100 | } |
88 | 101 | ||
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index 9ffb2b2ceba4..652e8bdef9aa 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c | |||
@@ -611,9 +611,13 @@ account_entity_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se) | |||
611 | static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se) | 611 | static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se) |
612 | { | 612 | { |
613 | #ifdef CONFIG_SCHEDSTATS | 613 | #ifdef CONFIG_SCHEDSTATS |
614 | struct task_struct *tsk = NULL; | ||
615 | |||
616 | if (entity_is_task(se)) | ||
617 | tsk = task_of(se); | ||
618 | |||
614 | if (se->sleep_start) { | 619 | if (se->sleep_start) { |
615 | u64 delta = rq_of(cfs_rq)->clock - se->sleep_start; | 620 | u64 delta = rq_of(cfs_rq)->clock - se->sleep_start; |
616 | struct task_struct *tsk = task_of(se); | ||
617 | 621 | ||
618 | if ((s64)delta < 0) | 622 | if ((s64)delta < 0) |
619 | delta = 0; | 623 | delta = 0; |
@@ -624,11 +628,11 @@ static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se) | |||
624 | se->sleep_start = 0; | 628 | se->sleep_start = 0; |
625 | se->sum_sleep_runtime += delta; | 629 | se->sum_sleep_runtime += delta; |
626 | 630 | ||
627 | account_scheduler_latency(tsk, delta >> 10, 1); | 631 | if (tsk) |
632 | account_scheduler_latency(tsk, delta >> 10, 1); | ||
628 | } | 633 | } |
629 | if (se->block_start) { | 634 | if (se->block_start) { |
630 | u64 delta = rq_of(cfs_rq)->clock - se->block_start; | 635 | u64 delta = rq_of(cfs_rq)->clock - se->block_start; |
631 | struct task_struct *tsk = task_of(se); | ||
632 | 636 | ||
633 | if ((s64)delta < 0) | 637 | if ((s64)delta < 0) |
634 | delta = 0; | 638 | delta = 0; |
@@ -639,17 +643,19 @@ static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se) | |||
639 | se->block_start = 0; | 643 | se->block_start = 0; |
640 | se->sum_sleep_runtime += delta; | 644 | se->sum_sleep_runtime += delta; |
641 | 645 | ||
642 | /* | 646 | if (tsk) { |
643 | * Blocking time is in units of nanosecs, so shift by 20 to | 647 | /* |
644 | * get a milliseconds-range estimation of the amount of | 648 | * Blocking time is in units of nanosecs, so shift by |
645 | * time that the task spent sleeping: | 649 | * 20 to get a milliseconds-range estimation of the |
646 | */ | 650 | * amount of time that the task spent sleeping: |
647 | if (unlikely(prof_on == SLEEP_PROFILING)) { | 651 | */ |
648 | 652 | if (unlikely(prof_on == SLEEP_PROFILING)) { | |
649 | profile_hits(SLEEP_PROFILING, (void *)get_wchan(tsk), | 653 | profile_hits(SLEEP_PROFILING, |
650 | delta >> 20); | 654 | (void *)get_wchan(tsk), |
655 | delta >> 20); | ||
656 | } | ||
657 | account_scheduler_latency(tsk, delta >> 10, 0); | ||
651 | } | 658 | } |
652 | account_scheduler_latency(tsk, delta >> 10, 0); | ||
653 | } | 659 | } |
654 | #endif | 660 | #endif |
655 | } | 661 | } |
diff --git a/kernel/signal.c b/kernel/signal.c index ccf1ceedaebe..64c5deeaca5d 100644 --- a/kernel/signal.c +++ b/kernel/signal.c | |||
@@ -2454,11 +2454,9 @@ do_sigaltstack (const stack_t __user *uss, stack_t __user *uoss, unsigned long s | |||
2454 | stack_t oss; | 2454 | stack_t oss; |
2455 | int error; | 2455 | int error; |
2456 | 2456 | ||
2457 | if (uoss) { | 2457 | oss.ss_sp = (void __user *) current->sas_ss_sp; |
2458 | oss.ss_sp = (void __user *) current->sas_ss_sp; | 2458 | oss.ss_size = current->sas_ss_size; |
2459 | oss.ss_size = current->sas_ss_size; | 2459 | oss.ss_flags = sas_ss_flags(sp); |
2460 | oss.ss_flags = sas_ss_flags(sp); | ||
2461 | } | ||
2462 | 2460 | ||
2463 | if (uss) { | 2461 | if (uss) { |
2464 | void __user *ss_sp; | 2462 | void __user *ss_sp; |
@@ -2466,10 +2464,12 @@ do_sigaltstack (const stack_t __user *uss, stack_t __user *uoss, unsigned long s | |||
2466 | int ss_flags; | 2464 | int ss_flags; |
2467 | 2465 | ||
2468 | error = -EFAULT; | 2466 | error = -EFAULT; |
2469 | if (!access_ok(VERIFY_READ, uss, sizeof(*uss)) | 2467 | if (!access_ok(VERIFY_READ, uss, sizeof(*uss))) |
2470 | || __get_user(ss_sp, &uss->ss_sp) | 2468 | goto out; |
2471 | || __get_user(ss_flags, &uss->ss_flags) | 2469 | error = __get_user(ss_sp, &uss->ss_sp) | |
2472 | || __get_user(ss_size, &uss->ss_size)) | 2470 | __get_user(ss_flags, &uss->ss_flags) | |
2471 | __get_user(ss_size, &uss->ss_size); | ||
2472 | if (error) | ||
2473 | goto out; | 2473 | goto out; |
2474 | 2474 | ||
2475 | error = -EPERM; | 2475 | error = -EPERM; |
@@ -2501,13 +2501,16 @@ do_sigaltstack (const stack_t __user *uss, stack_t __user *uoss, unsigned long s | |||
2501 | current->sas_ss_size = ss_size; | 2501 | current->sas_ss_size = ss_size; |
2502 | } | 2502 | } |
2503 | 2503 | ||
2504 | error = 0; | ||
2504 | if (uoss) { | 2505 | if (uoss) { |
2505 | error = -EFAULT; | 2506 | error = -EFAULT; |
2506 | if (copy_to_user(uoss, &oss, sizeof(oss))) | 2507 | if (!access_ok(VERIFY_WRITE, uoss, sizeof(*uoss))) |
2507 | goto out; | 2508 | goto out; |
2509 | error = __put_user(oss.ss_sp, &uoss->ss_sp) | | ||
2510 | __put_user(oss.ss_size, &uoss->ss_size) | | ||
2511 | __put_user(oss.ss_flags, &uoss->ss_flags); | ||
2508 | } | 2512 | } |
2509 | 2513 | ||
2510 | error = 0; | ||
2511 | out: | 2514 | out: |
2512 | return error; | 2515 | return error; |
2513 | } | 2516 | } |
diff --git a/kernel/smp.c b/kernel/smp.c index ad63d8501207..94188b8ecc33 100644 --- a/kernel/smp.c +++ b/kernel/smp.c | |||
@@ -57,7 +57,7 @@ hotplug_cfd(struct notifier_block *nfb, unsigned long action, void *hcpu) | |||
57 | return NOTIFY_BAD; | 57 | return NOTIFY_BAD; |
58 | break; | 58 | break; |
59 | 59 | ||
60 | #ifdef CONFIG_CPU_HOTPLUG | 60 | #ifdef CONFIG_HOTPLUG_CPU |
61 | case CPU_UP_CANCELED: | 61 | case CPU_UP_CANCELED: |
62 | case CPU_UP_CANCELED_FROZEN: | 62 | case CPU_UP_CANCELED_FROZEN: |
63 | 63 | ||
diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c index 7466cb811251..a0af4ffcb6e5 100644 --- a/kernel/time/clocksource.c +++ b/kernel/time/clocksource.c | |||
@@ -21,7 +21,6 @@ | |||
21 | * | 21 | * |
22 | * TODO WishList: | 22 | * TODO WishList: |
23 | * o Allow clocksource drivers to be unregistered | 23 | * o Allow clocksource drivers to be unregistered |
24 | * o get rid of clocksource_jiffies extern | ||
25 | */ | 24 | */ |
26 | 25 | ||
27 | #include <linux/clocksource.h> | 26 | #include <linux/clocksource.h> |
@@ -30,6 +29,7 @@ | |||
30 | #include <linux/module.h> | 29 | #include <linux/module.h> |
31 | #include <linux/sched.h> /* for spin_unlock_irq() using preempt_count() m68k */ | 30 | #include <linux/sched.h> /* for spin_unlock_irq() using preempt_count() m68k */ |
32 | #include <linux/tick.h> | 31 | #include <linux/tick.h> |
32 | #include <linux/kthread.h> | ||
33 | 33 | ||
34 | void timecounter_init(struct timecounter *tc, | 34 | void timecounter_init(struct timecounter *tc, |
35 | const struct cyclecounter *cc, | 35 | const struct cyclecounter *cc, |
@@ -107,50 +107,32 @@ u64 timecounter_cyc2time(struct timecounter *tc, | |||
107 | } | 107 | } |
108 | EXPORT_SYMBOL(timecounter_cyc2time); | 108 | EXPORT_SYMBOL(timecounter_cyc2time); |
109 | 109 | ||
110 | /* XXX - Would like a better way for initializing curr_clocksource */ | ||
111 | extern struct clocksource clocksource_jiffies; | ||
112 | |||
113 | /*[Clocksource internal variables]--------- | 110 | /*[Clocksource internal variables]--------- |
114 | * curr_clocksource: | 111 | * curr_clocksource: |
115 | * currently selected clocksource. Initialized to clocksource_jiffies. | 112 | * currently selected clocksource. |
116 | * next_clocksource: | ||
117 | * pending next selected clocksource. | ||
118 | * clocksource_list: | 113 | * clocksource_list: |
119 | * linked list with the registered clocksources | 114 | * linked list with the registered clocksources |
120 | * clocksource_lock: | 115 | * clocksource_mutex: |
121 | * protects manipulations to curr_clocksource and next_clocksource | 116 | * protects manipulations to curr_clocksource and the clocksource_list |
122 | * and the clocksource_list | ||
123 | * override_name: | 117 | * override_name: |
124 | * Name of the user-specified clocksource. | 118 | * Name of the user-specified clocksource. |
125 | */ | 119 | */ |
126 | static struct clocksource *curr_clocksource = &clocksource_jiffies; | 120 | static struct clocksource *curr_clocksource; |
127 | static struct clocksource *next_clocksource; | ||
128 | static struct clocksource *clocksource_override; | ||
129 | static LIST_HEAD(clocksource_list); | 121 | static LIST_HEAD(clocksource_list); |
130 | static DEFINE_SPINLOCK(clocksource_lock); | 122 | static DEFINE_MUTEX(clocksource_mutex); |
131 | static char override_name[32]; | 123 | static char override_name[32]; |
132 | static int finished_booting; | ||
133 | |||
134 | /* clocksource_done_booting - Called near the end of core bootup | ||
135 | * | ||
136 | * Hack to avoid lots of clocksource churn at boot time. | ||
137 | * We use fs_initcall because we want this to start before | ||
138 | * device_initcall but after subsys_initcall. | ||
139 | */ | ||
140 | static int __init clocksource_done_booting(void) | ||
141 | { | ||
142 | finished_booting = 1; | ||
143 | return 0; | ||
144 | } | ||
145 | fs_initcall(clocksource_done_booting); | ||
146 | 124 | ||
147 | #ifdef CONFIG_CLOCKSOURCE_WATCHDOG | 125 | #ifdef CONFIG_CLOCKSOURCE_WATCHDOG |
148 | static LIST_HEAD(watchdog_list); | 126 | static LIST_HEAD(watchdog_list); |
149 | static struct clocksource *watchdog; | 127 | static struct clocksource *watchdog; |
150 | static struct timer_list watchdog_timer; | 128 | static struct timer_list watchdog_timer; |
129 | static struct work_struct watchdog_work; | ||
151 | static DEFINE_SPINLOCK(watchdog_lock); | 130 | static DEFINE_SPINLOCK(watchdog_lock); |
152 | static cycle_t watchdog_last; | 131 | static cycle_t watchdog_last; |
153 | static unsigned long watchdog_resumed; | 132 | static int watchdog_running; |
133 | |||
134 | static int clocksource_watchdog_kthread(void *data); | ||
135 | static void __clocksource_change_rating(struct clocksource *cs, int rating); | ||
154 | 136 | ||
155 | /* | 137 | /* |
156 | * Interval: 0.5sec Threshold: 0.0625s | 138 | * Interval: 0.5sec Threshold: 0.0625s |
@@ -158,135 +140,247 @@ static unsigned long watchdog_resumed; | |||
158 | #define WATCHDOG_INTERVAL (HZ >> 1) | 140 | #define WATCHDOG_INTERVAL (HZ >> 1) |
159 | #define WATCHDOG_THRESHOLD (NSEC_PER_SEC >> 4) | 141 | #define WATCHDOG_THRESHOLD (NSEC_PER_SEC >> 4) |
160 | 142 | ||
161 | static void clocksource_ratewd(struct clocksource *cs, int64_t delta) | 143 | static void clocksource_watchdog_work(struct work_struct *work) |
162 | { | 144 | { |
163 | if (delta > -WATCHDOG_THRESHOLD && delta < WATCHDOG_THRESHOLD) | 145 | /* |
164 | return; | 146 | * If kthread_run fails the next watchdog scan over the |
147 | * watchdog_list will find the unstable clock again. | ||
148 | */ | ||
149 | kthread_run(clocksource_watchdog_kthread, NULL, "kwatchdog"); | ||
150 | } | ||
165 | 151 | ||
152 | static void __clocksource_unstable(struct clocksource *cs) | ||
153 | { | ||
154 | cs->flags &= ~(CLOCK_SOURCE_VALID_FOR_HRES | CLOCK_SOURCE_WATCHDOG); | ||
155 | cs->flags |= CLOCK_SOURCE_UNSTABLE; | ||
156 | schedule_work(&watchdog_work); | ||
157 | } | ||
158 | |||
159 | static void clocksource_unstable(struct clocksource *cs, int64_t delta) | ||
160 | { | ||
166 | printk(KERN_WARNING "Clocksource %s unstable (delta = %Ld ns)\n", | 161 | printk(KERN_WARNING "Clocksource %s unstable (delta = %Ld ns)\n", |
167 | cs->name, delta); | 162 | cs->name, delta); |
168 | cs->flags &= ~(CLOCK_SOURCE_VALID_FOR_HRES | CLOCK_SOURCE_WATCHDOG); | 163 | __clocksource_unstable(cs); |
169 | clocksource_change_rating(cs, 0); | 164 | } |
170 | list_del(&cs->wd_list); | 165 | |
166 | /** | ||
167 | * clocksource_mark_unstable - mark clocksource unstable via watchdog | ||
168 | * @cs: clocksource to be marked unstable | ||
169 | * | ||
170 | * This function is called instead of clocksource_change_rating from | ||
171 | * cpu hotplug code to avoid a deadlock between the clocksource mutex | ||
172 | * and the cpu hotplug mutex. It defers the update of the clocksource | ||
173 | * to the watchdog thread. | ||
174 | */ | ||
175 | void clocksource_mark_unstable(struct clocksource *cs) | ||
176 | { | ||
177 | unsigned long flags; | ||
178 | |||
179 | spin_lock_irqsave(&watchdog_lock, flags); | ||
180 | if (!(cs->flags & CLOCK_SOURCE_UNSTABLE)) { | ||
181 | if (list_empty(&cs->wd_list)) | ||
182 | list_add(&cs->wd_list, &watchdog_list); | ||
183 | __clocksource_unstable(cs); | ||
184 | } | ||
185 | spin_unlock_irqrestore(&watchdog_lock, flags); | ||
171 | } | 186 | } |
172 | 187 | ||
173 | static void clocksource_watchdog(unsigned long data) | 188 | static void clocksource_watchdog(unsigned long data) |
174 | { | 189 | { |
175 | struct clocksource *cs, *tmp; | 190 | struct clocksource *cs; |
176 | cycle_t csnow, wdnow; | 191 | cycle_t csnow, wdnow; |
177 | int64_t wd_nsec, cs_nsec; | 192 | int64_t wd_nsec, cs_nsec; |
178 | int resumed; | 193 | int next_cpu; |
179 | 194 | ||
180 | spin_lock(&watchdog_lock); | 195 | spin_lock(&watchdog_lock); |
181 | 196 | if (!watchdog_running) | |
182 | resumed = test_and_clear_bit(0, &watchdog_resumed); | 197 | goto out; |
183 | 198 | ||
184 | wdnow = watchdog->read(watchdog); | 199 | wdnow = watchdog->read(watchdog); |
185 | wd_nsec = cyc2ns(watchdog, (wdnow - watchdog_last) & watchdog->mask); | 200 | wd_nsec = clocksource_cyc2ns((wdnow - watchdog_last) & watchdog->mask, |
201 | watchdog->mult, watchdog->shift); | ||
186 | watchdog_last = wdnow; | 202 | watchdog_last = wdnow; |
187 | 203 | ||
188 | list_for_each_entry_safe(cs, tmp, &watchdog_list, wd_list) { | 204 | list_for_each_entry(cs, &watchdog_list, wd_list) { |
189 | csnow = cs->read(cs); | ||
190 | 205 | ||
191 | if (unlikely(resumed)) { | 206 | /* Clocksource already marked unstable? */ |
192 | cs->wd_last = csnow; | 207 | if (cs->flags & CLOCK_SOURCE_UNSTABLE) { |
208 | schedule_work(&watchdog_work); | ||
193 | continue; | 209 | continue; |
194 | } | 210 | } |
195 | 211 | ||
196 | /* Initialized ? */ | 212 | csnow = cs->read(cs); |
213 | |||
214 | /* Clocksource initialized ? */ | ||
197 | if (!(cs->flags & CLOCK_SOURCE_WATCHDOG)) { | 215 | if (!(cs->flags & CLOCK_SOURCE_WATCHDOG)) { |
198 | if ((cs->flags & CLOCK_SOURCE_IS_CONTINUOUS) && | ||
199 | (watchdog->flags & CLOCK_SOURCE_IS_CONTINUOUS)) { | ||
200 | cs->flags |= CLOCK_SOURCE_VALID_FOR_HRES; | ||
201 | /* | ||
202 | * We just marked the clocksource as | ||
203 | * highres-capable, notify the rest of the | ||
204 | * system as well so that we transition | ||
205 | * into high-res mode: | ||
206 | */ | ||
207 | tick_clock_notify(); | ||
208 | } | ||
209 | cs->flags |= CLOCK_SOURCE_WATCHDOG; | 216 | cs->flags |= CLOCK_SOURCE_WATCHDOG; |
210 | cs->wd_last = csnow; | 217 | cs->wd_last = csnow; |
211 | } else { | 218 | continue; |
212 | cs_nsec = cyc2ns(cs, (csnow - cs->wd_last) & cs->mask); | ||
213 | cs->wd_last = csnow; | ||
214 | /* Check the delta. Might remove from the list ! */ | ||
215 | clocksource_ratewd(cs, cs_nsec - wd_nsec); | ||
216 | } | 219 | } |
217 | } | ||
218 | 220 | ||
219 | if (!list_empty(&watchdog_list)) { | 221 | /* Check the deviation from the watchdog clocksource. */ |
220 | /* | 222 | cs_nsec = clocksource_cyc2ns((csnow - cs->wd_last) & |
221 | * Cycle through CPUs to check if the CPUs stay | 223 | cs->mask, cs->mult, cs->shift); |
222 | * synchronized to each other. | 224 | cs->wd_last = csnow; |
223 | */ | 225 | if (abs(cs_nsec - wd_nsec) > WATCHDOG_THRESHOLD) { |
224 | int next_cpu = cpumask_next(raw_smp_processor_id(), | 226 | clocksource_unstable(cs, cs_nsec - wd_nsec); |
225 | cpu_online_mask); | 227 | continue; |
228 | } | ||
226 | 229 | ||
227 | if (next_cpu >= nr_cpu_ids) | 230 | if (!(cs->flags & CLOCK_SOURCE_VALID_FOR_HRES) && |
228 | next_cpu = cpumask_first(cpu_online_mask); | 231 | (cs->flags & CLOCK_SOURCE_IS_CONTINUOUS) && |
229 | watchdog_timer.expires += WATCHDOG_INTERVAL; | 232 | (watchdog->flags & CLOCK_SOURCE_IS_CONTINUOUS)) { |
230 | add_timer_on(&watchdog_timer, next_cpu); | 233 | cs->flags |= CLOCK_SOURCE_VALID_FOR_HRES; |
234 | /* | ||
235 | * We just marked the clocksource as highres-capable, | ||
236 | * notify the rest of the system as well so that we | ||
237 | * transition into high-res mode: | ||
238 | */ | ||
239 | tick_clock_notify(); | ||
240 | } | ||
231 | } | 241 | } |
242 | |||
243 | /* | ||
244 | * Cycle through CPUs to check if the CPUs stay synchronized | ||
245 | * to each other. | ||
246 | */ | ||
247 | next_cpu = cpumask_next(raw_smp_processor_id(), cpu_online_mask); | ||
248 | if (next_cpu >= nr_cpu_ids) | ||
249 | next_cpu = cpumask_first(cpu_online_mask); | ||
250 | watchdog_timer.expires += WATCHDOG_INTERVAL; | ||
251 | add_timer_on(&watchdog_timer, next_cpu); | ||
252 | out: | ||
232 | spin_unlock(&watchdog_lock); | 253 | spin_unlock(&watchdog_lock); |
233 | } | 254 | } |
255 | |||
256 | static inline void clocksource_start_watchdog(void) | ||
257 | { | ||
258 | if (watchdog_running || !watchdog || list_empty(&watchdog_list)) | ||
259 | return; | ||
260 | INIT_WORK(&watchdog_work, clocksource_watchdog_work); | ||
261 | init_timer(&watchdog_timer); | ||
262 | watchdog_timer.function = clocksource_watchdog; | ||
263 | watchdog_last = watchdog->read(watchdog); | ||
264 | watchdog_timer.expires = jiffies + WATCHDOG_INTERVAL; | ||
265 | add_timer_on(&watchdog_timer, cpumask_first(cpu_online_mask)); | ||
266 | watchdog_running = 1; | ||
267 | } | ||
268 | |||
269 | static inline void clocksource_stop_watchdog(void) | ||
270 | { | ||
271 | if (!watchdog_running || (watchdog && !list_empty(&watchdog_list))) | ||
272 | return; | ||
273 | del_timer(&watchdog_timer); | ||
274 | watchdog_running = 0; | ||
275 | } | ||
276 | |||
277 | static inline void clocksource_reset_watchdog(void) | ||
278 | { | ||
279 | struct clocksource *cs; | ||
280 | |||
281 | list_for_each_entry(cs, &watchdog_list, wd_list) | ||
282 | cs->flags &= ~CLOCK_SOURCE_WATCHDOG; | ||
283 | } | ||
284 | |||
234 | static void clocksource_resume_watchdog(void) | 285 | static void clocksource_resume_watchdog(void) |
235 | { | 286 | { |
236 | set_bit(0, &watchdog_resumed); | 287 | unsigned long flags; |
288 | |||
289 | spin_lock_irqsave(&watchdog_lock, flags); | ||
290 | clocksource_reset_watchdog(); | ||
291 | spin_unlock_irqrestore(&watchdog_lock, flags); | ||
237 | } | 292 | } |
238 | 293 | ||
239 | static void clocksource_check_watchdog(struct clocksource *cs) | 294 | static void clocksource_enqueue_watchdog(struct clocksource *cs) |
240 | { | 295 | { |
241 | struct clocksource *cse; | ||
242 | unsigned long flags; | 296 | unsigned long flags; |
243 | 297 | ||
244 | spin_lock_irqsave(&watchdog_lock, flags); | 298 | spin_lock_irqsave(&watchdog_lock, flags); |
245 | if (cs->flags & CLOCK_SOURCE_MUST_VERIFY) { | 299 | if (cs->flags & CLOCK_SOURCE_MUST_VERIFY) { |
246 | int started = !list_empty(&watchdog_list); | 300 | /* cs is a clocksource to be watched. */ |
247 | |||
248 | list_add(&cs->wd_list, &watchdog_list); | 301 | list_add(&cs->wd_list, &watchdog_list); |
249 | if (!started && watchdog) { | 302 | cs->flags &= ~CLOCK_SOURCE_WATCHDOG; |
250 | watchdog_last = watchdog->read(watchdog); | ||
251 | watchdog_timer.expires = jiffies + WATCHDOG_INTERVAL; | ||
252 | add_timer_on(&watchdog_timer, | ||
253 | cpumask_first(cpu_online_mask)); | ||
254 | } | ||
255 | } else { | 303 | } else { |
304 | /* cs is a watchdog. */ | ||
256 | if (cs->flags & CLOCK_SOURCE_IS_CONTINUOUS) | 305 | if (cs->flags & CLOCK_SOURCE_IS_CONTINUOUS) |
257 | cs->flags |= CLOCK_SOURCE_VALID_FOR_HRES; | 306 | cs->flags |= CLOCK_SOURCE_VALID_FOR_HRES; |
258 | 307 | /* Pick the best watchdog. */ | |
259 | if (!watchdog || cs->rating > watchdog->rating) { | 308 | if (!watchdog || cs->rating > watchdog->rating) { |
260 | if (watchdog) | ||
261 | del_timer(&watchdog_timer); | ||
262 | watchdog = cs; | 309 | watchdog = cs; |
263 | init_timer(&watchdog_timer); | ||
264 | watchdog_timer.function = clocksource_watchdog; | ||
265 | |||
266 | /* Reset watchdog cycles */ | 310 | /* Reset watchdog cycles */ |
267 | list_for_each_entry(cse, &watchdog_list, wd_list) | 311 | clocksource_reset_watchdog(); |
268 | cse->flags &= ~CLOCK_SOURCE_WATCHDOG; | 312 | } |
269 | /* Start if list is not empty */ | 313 | } |
270 | if (!list_empty(&watchdog_list)) { | 314 | /* Check if the watchdog timer needs to be started. */ |
271 | watchdog_last = watchdog->read(watchdog); | 315 | clocksource_start_watchdog(); |
272 | watchdog_timer.expires = | 316 | spin_unlock_irqrestore(&watchdog_lock, flags); |
273 | jiffies + WATCHDOG_INTERVAL; | 317 | } |
274 | add_timer_on(&watchdog_timer, | 318 | |
275 | cpumask_first(cpu_online_mask)); | 319 | static void clocksource_dequeue_watchdog(struct clocksource *cs) |
276 | } | 320 | { |
321 | struct clocksource *tmp; | ||
322 | unsigned long flags; | ||
323 | |||
324 | spin_lock_irqsave(&watchdog_lock, flags); | ||
325 | if (cs->flags & CLOCK_SOURCE_MUST_VERIFY) { | ||
326 | /* cs is a watched clocksource. */ | ||
327 | list_del_init(&cs->wd_list); | ||
328 | } else if (cs == watchdog) { | ||
329 | /* Reset watchdog cycles */ | ||
330 | clocksource_reset_watchdog(); | ||
331 | /* Current watchdog is removed. Find an alternative. */ | ||
332 | watchdog = NULL; | ||
333 | list_for_each_entry(tmp, &clocksource_list, list) { | ||
334 | if (tmp == cs || tmp->flags & CLOCK_SOURCE_MUST_VERIFY) | ||
335 | continue; | ||
336 | if (!watchdog || tmp->rating > watchdog->rating) | ||
337 | watchdog = tmp; | ||
277 | } | 338 | } |
278 | } | 339 | } |
340 | cs->flags &= ~CLOCK_SOURCE_WATCHDOG; | ||
341 | /* Check if the watchdog timer needs to be stopped. */ | ||
342 | clocksource_stop_watchdog(); | ||
343 | spin_unlock_irqrestore(&watchdog_lock, flags); | ||
344 | } | ||
345 | |||
346 | static int clocksource_watchdog_kthread(void *data) | ||
347 | { | ||
348 | struct clocksource *cs, *tmp; | ||
349 | unsigned long flags; | ||
350 | LIST_HEAD(unstable); | ||
351 | |||
352 | mutex_lock(&clocksource_mutex); | ||
353 | spin_lock_irqsave(&watchdog_lock, flags); | ||
354 | list_for_each_entry_safe(cs, tmp, &watchdog_list, wd_list) | ||
355 | if (cs->flags & CLOCK_SOURCE_UNSTABLE) { | ||
356 | list_del_init(&cs->wd_list); | ||
357 | list_add(&cs->wd_list, &unstable); | ||
358 | } | ||
359 | /* Check if the watchdog timer needs to be stopped. */ | ||
360 | clocksource_stop_watchdog(); | ||
279 | spin_unlock_irqrestore(&watchdog_lock, flags); | 361 | spin_unlock_irqrestore(&watchdog_lock, flags); |
362 | |||
363 | /* Needs to be done outside of watchdog lock */ | ||
364 | list_for_each_entry_safe(cs, tmp, &unstable, wd_list) { | ||
365 | list_del_init(&cs->wd_list); | ||
366 | __clocksource_change_rating(cs, 0); | ||
367 | } | ||
368 | mutex_unlock(&clocksource_mutex); | ||
369 | return 0; | ||
280 | } | 370 | } |
281 | #else | 371 | |
282 | static void clocksource_check_watchdog(struct clocksource *cs) | 372 | #else /* CONFIG_CLOCKSOURCE_WATCHDOG */ |
373 | |||
374 | static void clocksource_enqueue_watchdog(struct clocksource *cs) | ||
283 | { | 375 | { |
284 | if (cs->flags & CLOCK_SOURCE_IS_CONTINUOUS) | 376 | if (cs->flags & CLOCK_SOURCE_IS_CONTINUOUS) |
285 | cs->flags |= CLOCK_SOURCE_VALID_FOR_HRES; | 377 | cs->flags |= CLOCK_SOURCE_VALID_FOR_HRES; |
286 | } | 378 | } |
287 | 379 | ||
380 | static inline void clocksource_dequeue_watchdog(struct clocksource *cs) { } | ||
288 | static inline void clocksource_resume_watchdog(void) { } | 381 | static inline void clocksource_resume_watchdog(void) { } |
289 | #endif | 382 | |
383 | #endif /* CONFIG_CLOCKSOURCE_WATCHDOG */ | ||
290 | 384 | ||
291 | /** | 385 | /** |
292 | * clocksource_resume - resume the clocksource(s) | 386 | * clocksource_resume - resume the clocksource(s) |
@@ -294,18 +388,16 @@ static inline void clocksource_resume_watchdog(void) { } | |||
294 | void clocksource_resume(void) | 388 | void clocksource_resume(void) |
295 | { | 389 | { |
296 | struct clocksource *cs; | 390 | struct clocksource *cs; |
297 | unsigned long flags; | ||
298 | 391 | ||
299 | spin_lock_irqsave(&clocksource_lock, flags); | 392 | mutex_lock(&clocksource_mutex); |
300 | 393 | ||
301 | list_for_each_entry(cs, &clocksource_list, list) { | 394 | list_for_each_entry(cs, &clocksource_list, list) |
302 | if (cs->resume) | 395 | if (cs->resume) |
303 | cs->resume(); | 396 | cs->resume(); |
304 | } | ||
305 | 397 | ||
306 | clocksource_resume_watchdog(); | 398 | clocksource_resume_watchdog(); |
307 | 399 | ||
308 | spin_unlock_irqrestore(&clocksource_lock, flags); | 400 | mutex_unlock(&clocksource_mutex); |
309 | } | 401 | } |
310 | 402 | ||
311 | /** | 403 | /** |
@@ -320,75 +412,88 @@ void clocksource_touch_watchdog(void) | |||
320 | clocksource_resume_watchdog(); | 412 | clocksource_resume_watchdog(); |
321 | } | 413 | } |
322 | 414 | ||
415 | #ifdef CONFIG_GENERIC_TIME | ||
416 | |||
417 | static int finished_booting; | ||
418 | |||
323 | /** | 419 | /** |
324 | * clocksource_get_next - Returns the selected clocksource | 420 | * clocksource_select - Select the best clocksource available |
421 | * | ||
422 | * Private function. Must hold clocksource_mutex when called. | ||
325 | * | 423 | * |
424 | * Select the clocksource with the best rating, or the clocksource, | ||
425 | * which is selected by userspace override. | ||
326 | */ | 426 | */ |
327 | struct clocksource *clocksource_get_next(void) | 427 | static void clocksource_select(void) |
328 | { | 428 | { |
329 | unsigned long flags; | 429 | struct clocksource *best, *cs; |
330 | 430 | ||
331 | spin_lock_irqsave(&clocksource_lock, flags); | 431 | if (!finished_booting || list_empty(&clocksource_list)) |
332 | if (next_clocksource && finished_booting) { | 432 | return; |
333 | curr_clocksource = next_clocksource; | 433 | /* First clocksource on the list has the best rating. */ |
334 | next_clocksource = NULL; | 434 | best = list_first_entry(&clocksource_list, struct clocksource, list); |
435 | /* Check for the override clocksource. */ | ||
436 | list_for_each_entry(cs, &clocksource_list, list) { | ||
437 | if (strcmp(cs->name, override_name) != 0) | ||
438 | continue; | ||
439 | /* | ||
440 | * Check to make sure we don't switch to a non-highres | ||
441 | * capable clocksource if the tick code is in oneshot | ||
442 | * mode (highres or nohz) | ||
443 | */ | ||
444 | if (!(cs->flags & CLOCK_SOURCE_VALID_FOR_HRES) && | ||
445 | tick_oneshot_mode_active()) { | ||
446 | /* Override clocksource cannot be used. */ | ||
447 | printk(KERN_WARNING "Override clocksource %s is not " | ||
448 | "HRT compatible. Cannot switch while in " | ||
449 | "HRT/NOHZ mode\n", cs->name); | ||
450 | override_name[0] = 0; | ||
451 | } else | ||
452 | /* Override clocksource can be used. */ | ||
453 | best = cs; | ||
454 | break; | ||
455 | } | ||
456 | if (curr_clocksource != best) { | ||
457 | printk(KERN_INFO "Switching to clocksource %s\n", best->name); | ||
458 | curr_clocksource = best; | ||
459 | timekeeping_notify(curr_clocksource); | ||
335 | } | 460 | } |
336 | spin_unlock_irqrestore(&clocksource_lock, flags); | ||
337 | |||
338 | return curr_clocksource; | ||
339 | } | 461 | } |
340 | 462 | ||
341 | /** | 463 | /* |
342 | * select_clocksource - Selects the best registered clocksource. | 464 | * clocksource_done_booting - Called near the end of core bootup |
343 | * | ||
344 | * Private function. Must hold clocksource_lock when called. | ||
345 | * | 465 | * |
346 | * Select the clocksource with the best rating, or the clocksource, | 466 | * Hack to avoid lots of clocksource churn at boot time. |
347 | * which is selected by userspace override. | 467 | * We use fs_initcall because we want this to start before |
468 | * device_initcall but after subsys_initcall. | ||
348 | */ | 469 | */ |
349 | static struct clocksource *select_clocksource(void) | 470 | static int __init clocksource_done_booting(void) |
350 | { | 471 | { |
351 | struct clocksource *next; | 472 | finished_booting = 1; |
352 | 473 | clocksource_select(); | |
353 | if (list_empty(&clocksource_list)) | 474 | return 0; |
354 | return NULL; | 475 | } |
476 | fs_initcall(clocksource_done_booting); | ||
355 | 477 | ||
356 | if (clocksource_override) | 478 | #else /* CONFIG_GENERIC_TIME */ |
357 | next = clocksource_override; | ||
358 | else | ||
359 | next = list_entry(clocksource_list.next, struct clocksource, | ||
360 | list); | ||
361 | 479 | ||
362 | if (next == curr_clocksource) | 480 | static inline void clocksource_select(void) { } |
363 | return NULL; | ||
364 | 481 | ||
365 | return next; | 482 | #endif |
366 | } | ||
367 | 483 | ||
368 | /* | 484 | /* |
369 | * Enqueue the clocksource sorted by rating | 485 | * Enqueue the clocksource sorted by rating |
370 | */ | 486 | */ |
371 | static int clocksource_enqueue(struct clocksource *c) | 487 | static void clocksource_enqueue(struct clocksource *cs) |
372 | { | 488 | { |
373 | struct list_head *tmp, *entry = &clocksource_list; | 489 | struct list_head *entry = &clocksource_list; |
374 | 490 | struct clocksource *tmp; | |
375 | list_for_each(tmp, &clocksource_list) { | ||
376 | struct clocksource *cs; | ||
377 | 491 | ||
378 | cs = list_entry(tmp, struct clocksource, list); | 492 | list_for_each_entry(tmp, &clocksource_list, list) |
379 | if (cs == c) | ||
380 | return -EBUSY; | ||
381 | /* Keep track of the place, where to insert */ | 493 | /* Keep track of the place, where to insert */ |
382 | if (cs->rating >= c->rating) | 494 | if (tmp->rating >= cs->rating) |
383 | entry = tmp; | 495 | entry = &tmp->list; |
384 | } | 496 | list_add(&cs->list, entry); |
385 | list_add(&c->list, entry); | ||
386 | |||
387 | if (strlen(c->name) == strlen(override_name) && | ||
388 | !strcmp(c->name, override_name)) | ||
389 | clocksource_override = c; | ||
390 | |||
391 | return 0; | ||
392 | } | 497 | } |
393 | 498 | ||
394 | /** | 499 | /** |
@@ -397,52 +502,48 @@ static int clocksource_enqueue(struct clocksource *c) | |||
397 | * | 502 | * |
398 | * Returns -EBUSY if registration fails, zero otherwise. | 503 | * Returns -EBUSY if registration fails, zero otherwise. |
399 | */ | 504 | */ |
400 | int clocksource_register(struct clocksource *c) | 505 | int clocksource_register(struct clocksource *cs) |
401 | { | 506 | { |
402 | unsigned long flags; | 507 | mutex_lock(&clocksource_mutex); |
403 | int ret; | 508 | clocksource_enqueue(cs); |
404 | 509 | clocksource_select(); | |
405 | spin_lock_irqsave(&clocksource_lock, flags); | 510 | clocksource_enqueue_watchdog(cs); |
406 | ret = clocksource_enqueue(c); | 511 | mutex_unlock(&clocksource_mutex); |
407 | if (!ret) | 512 | return 0; |
408 | next_clocksource = select_clocksource(); | ||
409 | spin_unlock_irqrestore(&clocksource_lock, flags); | ||
410 | if (!ret) | ||
411 | clocksource_check_watchdog(c); | ||
412 | return ret; | ||
413 | } | 513 | } |
414 | EXPORT_SYMBOL(clocksource_register); | 514 | EXPORT_SYMBOL(clocksource_register); |
415 | 515 | ||
516 | static void __clocksource_change_rating(struct clocksource *cs, int rating) | ||
517 | { | ||
518 | list_del(&cs->list); | ||
519 | cs->rating = rating; | ||
520 | clocksource_enqueue(cs); | ||
521 | clocksource_select(); | ||
522 | } | ||
523 | |||
416 | /** | 524 | /** |
417 | * clocksource_change_rating - Change the rating of a registered clocksource | 525 | * clocksource_change_rating - Change the rating of a registered clocksource |
418 | * | ||
419 | */ | 526 | */ |
420 | void clocksource_change_rating(struct clocksource *cs, int rating) | 527 | void clocksource_change_rating(struct clocksource *cs, int rating) |
421 | { | 528 | { |
422 | unsigned long flags; | 529 | mutex_lock(&clocksource_mutex); |
423 | 530 | __clocksource_change_rating(cs, rating); | |
424 | spin_lock_irqsave(&clocksource_lock, flags); | 531 | mutex_unlock(&clocksource_mutex); |
425 | list_del(&cs->list); | ||
426 | cs->rating = rating; | ||
427 | clocksource_enqueue(cs); | ||
428 | next_clocksource = select_clocksource(); | ||
429 | spin_unlock_irqrestore(&clocksource_lock, flags); | ||
430 | } | 532 | } |
533 | EXPORT_SYMBOL(clocksource_change_rating); | ||
431 | 534 | ||
432 | /** | 535 | /** |
433 | * clocksource_unregister - remove a registered clocksource | 536 | * clocksource_unregister - remove a registered clocksource |
434 | */ | 537 | */ |
435 | void clocksource_unregister(struct clocksource *cs) | 538 | void clocksource_unregister(struct clocksource *cs) |
436 | { | 539 | { |
437 | unsigned long flags; | 540 | mutex_lock(&clocksource_mutex); |
438 | 541 | clocksource_dequeue_watchdog(cs); | |
439 | spin_lock_irqsave(&clocksource_lock, flags); | ||
440 | list_del(&cs->list); | 542 | list_del(&cs->list); |
441 | if (clocksource_override == cs) | 543 | clocksource_select(); |
442 | clocksource_override = NULL; | 544 | mutex_unlock(&clocksource_mutex); |
443 | next_clocksource = select_clocksource(); | ||
444 | spin_unlock_irqrestore(&clocksource_lock, flags); | ||
445 | } | 545 | } |
546 | EXPORT_SYMBOL(clocksource_unregister); | ||
446 | 547 | ||
447 | #ifdef CONFIG_SYSFS | 548 | #ifdef CONFIG_SYSFS |
448 | /** | 549 | /** |
@@ -458,9 +559,9 @@ sysfs_show_current_clocksources(struct sys_device *dev, | |||
458 | { | 559 | { |
459 | ssize_t count = 0; | 560 | ssize_t count = 0; |
460 | 561 | ||
461 | spin_lock_irq(&clocksource_lock); | 562 | mutex_lock(&clocksource_mutex); |
462 | count = snprintf(buf, PAGE_SIZE, "%s\n", curr_clocksource->name); | 563 | count = snprintf(buf, PAGE_SIZE, "%s\n", curr_clocksource->name); |
463 | spin_unlock_irq(&clocksource_lock); | 564 | mutex_unlock(&clocksource_mutex); |
464 | 565 | ||
465 | return count; | 566 | return count; |
466 | } | 567 | } |
@@ -478,9 +579,7 @@ static ssize_t sysfs_override_clocksource(struct sys_device *dev, | |||
478 | struct sysdev_attribute *attr, | 579 | struct sysdev_attribute *attr, |
479 | const char *buf, size_t count) | 580 | const char *buf, size_t count) |
480 | { | 581 | { |
481 | struct clocksource *ovr = NULL; | ||
482 | size_t ret = count; | 582 | size_t ret = count; |
483 | int len; | ||
484 | 583 | ||
485 | /* strings from sysfs write are not 0 terminated! */ | 584 | /* strings from sysfs write are not 0 terminated! */ |
486 | if (count >= sizeof(override_name)) | 585 | if (count >= sizeof(override_name)) |
@@ -490,44 +589,14 @@ static ssize_t sysfs_override_clocksource(struct sys_device *dev, | |||
490 | if (buf[count-1] == '\n') | 589 | if (buf[count-1] == '\n') |
491 | count--; | 590 | count--; |
492 | 591 | ||
493 | spin_lock_irq(&clocksource_lock); | 592 | mutex_lock(&clocksource_mutex); |
494 | 593 | ||
495 | if (count > 0) | 594 | if (count > 0) |
496 | memcpy(override_name, buf, count); | 595 | memcpy(override_name, buf, count); |
497 | override_name[count] = 0; | 596 | override_name[count] = 0; |
597 | clocksource_select(); | ||
498 | 598 | ||
499 | len = strlen(override_name); | 599 | mutex_unlock(&clocksource_mutex); |
500 | if (len) { | ||
501 | struct clocksource *cs; | ||
502 | |||
503 | ovr = clocksource_override; | ||
504 | /* try to select it: */ | ||
505 | list_for_each_entry(cs, &clocksource_list, list) { | ||
506 | if (strlen(cs->name) == len && | ||
507 | !strcmp(cs->name, override_name)) | ||
508 | ovr = cs; | ||
509 | } | ||
510 | } | ||
511 | |||
512 | /* | ||
513 | * Check to make sure we don't switch to a non-highres capable | ||
514 | * clocksource if the tick code is in oneshot mode (highres or nohz) | ||
515 | */ | ||
516 | if (tick_oneshot_mode_active() && ovr && | ||
517 | !(ovr->flags & CLOCK_SOURCE_VALID_FOR_HRES)) { | ||
518 | printk(KERN_WARNING "%s clocksource is not HRT compatible. " | ||
519 | "Cannot switch while in HRT/NOHZ mode\n", ovr->name); | ||
520 | ovr = NULL; | ||
521 | override_name[0] = 0; | ||
522 | } | ||
523 | |||
524 | /* Reselect, when the override name has changed */ | ||
525 | if (ovr != clocksource_override) { | ||
526 | clocksource_override = ovr; | ||
527 | next_clocksource = select_clocksource(); | ||
528 | } | ||
529 | |||
530 | spin_unlock_irq(&clocksource_lock); | ||
531 | 600 | ||
532 | return ret; | 601 | return ret; |
533 | } | 602 | } |
@@ -547,7 +616,7 @@ sysfs_show_available_clocksources(struct sys_device *dev, | |||
547 | struct clocksource *src; | 616 | struct clocksource *src; |
548 | ssize_t count = 0; | 617 | ssize_t count = 0; |
549 | 618 | ||
550 | spin_lock_irq(&clocksource_lock); | 619 | mutex_lock(&clocksource_mutex); |
551 | list_for_each_entry(src, &clocksource_list, list) { | 620 | list_for_each_entry(src, &clocksource_list, list) { |
552 | /* | 621 | /* |
553 | * Don't show non-HRES clocksource if the tick code is | 622 | * Don't show non-HRES clocksource if the tick code is |
@@ -559,7 +628,7 @@ sysfs_show_available_clocksources(struct sys_device *dev, | |||
559 | max((ssize_t)PAGE_SIZE - count, (ssize_t)0), | 628 | max((ssize_t)PAGE_SIZE - count, (ssize_t)0), |
560 | "%s ", src->name); | 629 | "%s ", src->name); |
561 | } | 630 | } |
562 | spin_unlock_irq(&clocksource_lock); | 631 | mutex_unlock(&clocksource_mutex); |
563 | 632 | ||
564 | count += snprintf(buf + count, | 633 | count += snprintf(buf + count, |
565 | max((ssize_t)PAGE_SIZE - count, (ssize_t)0), "\n"); | 634 | max((ssize_t)PAGE_SIZE - count, (ssize_t)0), "\n"); |
@@ -614,11 +683,10 @@ device_initcall(init_clocksource_sysfs); | |||
614 | */ | 683 | */ |
615 | static int __init boot_override_clocksource(char* str) | 684 | static int __init boot_override_clocksource(char* str) |
616 | { | 685 | { |
617 | unsigned long flags; | 686 | mutex_lock(&clocksource_mutex); |
618 | spin_lock_irqsave(&clocksource_lock, flags); | ||
619 | if (str) | 687 | if (str) |
620 | strlcpy(override_name, str, sizeof(override_name)); | 688 | strlcpy(override_name, str, sizeof(override_name)); |
621 | spin_unlock_irqrestore(&clocksource_lock, flags); | 689 | mutex_unlock(&clocksource_mutex); |
622 | return 1; | 690 | return 1; |
623 | } | 691 | } |
624 | 692 | ||
diff --git a/kernel/time/jiffies.c b/kernel/time/jiffies.c index c3f6c30816e3..5404a8456909 100644 --- a/kernel/time/jiffies.c +++ b/kernel/time/jiffies.c | |||
@@ -61,7 +61,6 @@ struct clocksource clocksource_jiffies = { | |||
61 | .read = jiffies_read, | 61 | .read = jiffies_read, |
62 | .mask = 0xffffffff, /*32bits*/ | 62 | .mask = 0xffffffff, /*32bits*/ |
63 | .mult = NSEC_PER_JIFFY << JIFFIES_SHIFT, /* details above */ | 63 | .mult = NSEC_PER_JIFFY << JIFFIES_SHIFT, /* details above */ |
64 | .mult_orig = NSEC_PER_JIFFY << JIFFIES_SHIFT, | ||
65 | .shift = JIFFIES_SHIFT, | 64 | .shift = JIFFIES_SHIFT, |
66 | }; | 65 | }; |
67 | 66 | ||
@@ -71,3 +70,8 @@ static int __init init_jiffies_clocksource(void) | |||
71 | } | 70 | } |
72 | 71 | ||
73 | core_initcall(init_jiffies_clocksource); | 72 | core_initcall(init_jiffies_clocksource); |
73 | |||
74 | struct clocksource * __init __weak clocksource_default_clock(void) | ||
75 | { | ||
76 | return &clocksource_jiffies; | ||
77 | } | ||
diff --git a/kernel/time/ntp.c b/kernel/time/ntp.c index 7fc64375ff43..4800f933910e 100644 --- a/kernel/time/ntp.c +++ b/kernel/time/ntp.c | |||
@@ -194,8 +194,7 @@ static enum hrtimer_restart ntp_leap_second(struct hrtimer *timer) | |||
194 | case TIME_OK: | 194 | case TIME_OK: |
195 | break; | 195 | break; |
196 | case TIME_INS: | 196 | case TIME_INS: |
197 | xtime.tv_sec--; | 197 | timekeeping_leap_insert(-1); |
198 | wall_to_monotonic.tv_sec++; | ||
199 | time_state = TIME_OOP; | 198 | time_state = TIME_OOP; |
200 | printk(KERN_NOTICE | 199 | printk(KERN_NOTICE |
201 | "Clock: inserting leap second 23:59:60 UTC\n"); | 200 | "Clock: inserting leap second 23:59:60 UTC\n"); |
@@ -203,9 +202,8 @@ static enum hrtimer_restart ntp_leap_second(struct hrtimer *timer) | |||
203 | res = HRTIMER_RESTART; | 202 | res = HRTIMER_RESTART; |
204 | break; | 203 | break; |
205 | case TIME_DEL: | 204 | case TIME_DEL: |
206 | xtime.tv_sec++; | 205 | timekeeping_leap_insert(1); |
207 | time_tai--; | 206 | time_tai--; |
208 | wall_to_monotonic.tv_sec--; | ||
209 | time_state = TIME_WAIT; | 207 | time_state = TIME_WAIT; |
210 | printk(KERN_NOTICE | 208 | printk(KERN_NOTICE |
211 | "Clock: deleting leap second 23:59:59 UTC\n"); | 209 | "Clock: deleting leap second 23:59:59 UTC\n"); |
@@ -219,7 +217,6 @@ static enum hrtimer_restart ntp_leap_second(struct hrtimer *timer) | |||
219 | time_state = TIME_OK; | 217 | time_state = TIME_OK; |
220 | break; | 218 | break; |
221 | } | 219 | } |
222 | update_vsyscall(&xtime, clock); | ||
223 | 220 | ||
224 | write_sequnlock(&xtime_lock); | 221 | write_sequnlock(&xtime_lock); |
225 | 222 | ||
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c index e8c77d9c633a..fb0f46fa1ecd 100644 --- a/kernel/time/timekeeping.c +++ b/kernel/time/timekeeping.c | |||
@@ -18,7 +18,117 @@ | |||
18 | #include <linux/jiffies.h> | 18 | #include <linux/jiffies.h> |
19 | #include <linux/time.h> | 19 | #include <linux/time.h> |
20 | #include <linux/tick.h> | 20 | #include <linux/tick.h> |
21 | #include <linux/stop_machine.h> | ||
22 | |||
23 | /* Structure holding internal timekeeping values. */ | ||
24 | struct timekeeper { | ||
25 | /* Current clocksource used for timekeeping. */ | ||
26 | struct clocksource *clock; | ||
27 | /* The shift value of the current clocksource. */ | ||
28 | int shift; | ||
29 | |||
30 | /* Number of clock cycles in one NTP interval. */ | ||
31 | cycle_t cycle_interval; | ||
32 | /* Number of clock shifted nano seconds in one NTP interval. */ | ||
33 | u64 xtime_interval; | ||
34 | /* Raw nano seconds accumulated per NTP interval. */ | ||
35 | u32 raw_interval; | ||
36 | |||
37 | /* Clock shifted nano seconds remainder not stored in xtime.tv_nsec. */ | ||
38 | u64 xtime_nsec; | ||
39 | /* Difference between accumulated time and NTP time in ntp | ||
40 | * shifted nano seconds. */ | ||
41 | s64 ntp_error; | ||
42 | /* Shift conversion between clock shifted nano seconds and | ||
43 | * ntp shifted nano seconds. */ | ||
44 | int ntp_error_shift; | ||
45 | /* NTP adjusted clock multiplier */ | ||
46 | u32 mult; | ||
47 | }; | ||
48 | |||
49 | struct timekeeper timekeeper; | ||
50 | |||
51 | /** | ||
52 | * timekeeper_setup_internals - Set up internals to use clocksource clock. | ||
53 | * | ||
54 | * @clock: Pointer to clocksource. | ||
55 | * | ||
56 | * Calculates a fixed cycle/nsec interval for a given clocksource/adjustment | ||
57 | * pair and interval request. | ||
58 | * | ||
59 | * Unless you're the timekeeping code, you should not be using this! | ||
60 | */ | ||
61 | static void timekeeper_setup_internals(struct clocksource *clock) | ||
62 | { | ||
63 | cycle_t interval; | ||
64 | u64 tmp; | ||
65 | |||
66 | timekeeper.clock = clock; | ||
67 | clock->cycle_last = clock->read(clock); | ||
21 | 68 | ||
69 | /* Do the ns -> cycle conversion first, using original mult */ | ||
70 | tmp = NTP_INTERVAL_LENGTH; | ||
71 | tmp <<= clock->shift; | ||
72 | tmp += clock->mult/2; | ||
73 | do_div(tmp, clock->mult); | ||
74 | if (tmp == 0) | ||
75 | tmp = 1; | ||
76 | |||
77 | interval = (cycle_t) tmp; | ||
78 | timekeeper.cycle_interval = interval; | ||
79 | |||
80 | /* Go back from cycles -> shifted ns */ | ||
81 | timekeeper.xtime_interval = (u64) interval * clock->mult; | ||
82 | timekeeper.raw_interval = | ||
83 | ((u64) interval * clock->mult) >> clock->shift; | ||
84 | |||
85 | timekeeper.xtime_nsec = 0; | ||
86 | timekeeper.shift = clock->shift; | ||
87 | |||
88 | timekeeper.ntp_error = 0; | ||
89 | timekeeper.ntp_error_shift = NTP_SCALE_SHIFT - clock->shift; | ||
90 | |||
91 | /* | ||
92 | * The timekeeper keeps its own mult values for the currently | ||
93 | * active clocksource. These value will be adjusted via NTP | ||
94 | * to counteract clock drifting. | ||
95 | */ | ||
96 | timekeeper.mult = clock->mult; | ||
97 | } | ||
98 | |||
99 | /* Timekeeper helper functions. */ | ||
100 | static inline s64 timekeeping_get_ns(void) | ||
101 | { | ||
102 | cycle_t cycle_now, cycle_delta; | ||
103 | struct clocksource *clock; | ||
104 | |||
105 | /* read clocksource: */ | ||
106 | clock = timekeeper.clock; | ||
107 | cycle_now = clock->read(clock); | ||
108 | |||
109 | /* calculate the delta since the last update_wall_time: */ | ||
110 | cycle_delta = (cycle_now - clock->cycle_last) & clock->mask; | ||
111 | |||
112 | /* return delta convert to nanoseconds using ntp adjusted mult. */ | ||
113 | return clocksource_cyc2ns(cycle_delta, timekeeper.mult, | ||
114 | timekeeper.shift); | ||
115 | } | ||
116 | |||
117 | static inline s64 timekeeping_get_ns_raw(void) | ||
118 | { | ||
119 | cycle_t cycle_now, cycle_delta; | ||
120 | struct clocksource *clock; | ||
121 | |||
122 | /* read clocksource: */ | ||
123 | clock = timekeeper.clock; | ||
124 | cycle_now = clock->read(clock); | ||
125 | |||
126 | /* calculate the delta since the last update_wall_time: */ | ||
127 | cycle_delta = (cycle_now - clock->cycle_last) & clock->mask; | ||
128 | |||
129 | /* return delta convert to nanoseconds using ntp adjusted mult. */ | ||
130 | return clocksource_cyc2ns(cycle_delta, clock->mult, clock->shift); | ||
131 | } | ||
22 | 132 | ||
23 | /* | 133 | /* |
24 | * This read-write spinlock protects us from races in SMP while | 134 | * This read-write spinlock protects us from races in SMP while |
@@ -44,7 +154,12 @@ __cacheline_aligned_in_smp DEFINE_SEQLOCK(xtime_lock); | |||
44 | */ | 154 | */ |
45 | struct timespec xtime __attribute__ ((aligned (16))); | 155 | struct timespec xtime __attribute__ ((aligned (16))); |
46 | struct timespec wall_to_monotonic __attribute__ ((aligned (16))); | 156 | struct timespec wall_to_monotonic __attribute__ ((aligned (16))); |
47 | static unsigned long total_sleep_time; /* seconds */ | 157 | static struct timespec total_sleep_time; |
158 | |||
159 | /* | ||
160 | * The raw monotonic time for the CLOCK_MONOTONIC_RAW posix clock. | ||
161 | */ | ||
162 | struct timespec raw_time; | ||
48 | 163 | ||
49 | /* flag for if timekeeping is suspended */ | 164 | /* flag for if timekeeping is suspended */ |
50 | int __read_mostly timekeeping_suspended; | 165 | int __read_mostly timekeeping_suspended; |
@@ -56,35 +171,44 @@ void update_xtime_cache(u64 nsec) | |||
56 | timespec_add_ns(&xtime_cache, nsec); | 171 | timespec_add_ns(&xtime_cache, nsec); |
57 | } | 172 | } |
58 | 173 | ||
59 | struct clocksource *clock; | 174 | /* must hold xtime_lock */ |
60 | 175 | void timekeeping_leap_insert(int leapsecond) | |
176 | { | ||
177 | xtime.tv_sec += leapsecond; | ||
178 | wall_to_monotonic.tv_sec -= leapsecond; | ||
179 | update_vsyscall(&xtime, timekeeper.clock); | ||
180 | } | ||
61 | 181 | ||
62 | #ifdef CONFIG_GENERIC_TIME | 182 | #ifdef CONFIG_GENERIC_TIME |
183 | |||
63 | /** | 184 | /** |
64 | * clocksource_forward_now - update clock to the current time | 185 | * timekeeping_forward_now - update clock to the current time |
65 | * | 186 | * |
66 | * Forward the current clock to update its state since the last call to | 187 | * Forward the current clock to update its state since the last call to |
67 | * update_wall_time(). This is useful before significant clock changes, | 188 | * update_wall_time(). This is useful before significant clock changes, |
68 | * as it avoids having to deal with this time offset explicitly. | 189 | * as it avoids having to deal with this time offset explicitly. |
69 | */ | 190 | */ |
70 | static void clocksource_forward_now(void) | 191 | static void timekeeping_forward_now(void) |
71 | { | 192 | { |
72 | cycle_t cycle_now, cycle_delta; | 193 | cycle_t cycle_now, cycle_delta; |
194 | struct clocksource *clock; | ||
73 | s64 nsec; | 195 | s64 nsec; |
74 | 196 | ||
75 | cycle_now = clocksource_read(clock); | 197 | clock = timekeeper.clock; |
198 | cycle_now = clock->read(clock); | ||
76 | cycle_delta = (cycle_now - clock->cycle_last) & clock->mask; | 199 | cycle_delta = (cycle_now - clock->cycle_last) & clock->mask; |
77 | clock->cycle_last = cycle_now; | 200 | clock->cycle_last = cycle_now; |
78 | 201 | ||
79 | nsec = cyc2ns(clock, cycle_delta); | 202 | nsec = clocksource_cyc2ns(cycle_delta, timekeeper.mult, |
203 | timekeeper.shift); | ||
80 | 204 | ||
81 | /* If arch requires, add in gettimeoffset() */ | 205 | /* If arch requires, add in gettimeoffset() */ |
82 | nsec += arch_gettimeoffset(); | 206 | nsec += arch_gettimeoffset(); |
83 | 207 | ||
84 | timespec_add_ns(&xtime, nsec); | 208 | timespec_add_ns(&xtime, nsec); |
85 | 209 | ||
86 | nsec = ((s64)cycle_delta * clock->mult_orig) >> clock->shift; | 210 | nsec = clocksource_cyc2ns(cycle_delta, clock->mult, clock->shift); |
87 | clock->raw_time.tv_nsec += nsec; | 211 | timespec_add_ns(&raw_time, nsec); |
88 | } | 212 | } |
89 | 213 | ||
90 | /** | 214 | /** |
@@ -95,7 +219,6 @@ static void clocksource_forward_now(void) | |||
95 | */ | 219 | */ |
96 | void getnstimeofday(struct timespec *ts) | 220 | void getnstimeofday(struct timespec *ts) |
97 | { | 221 | { |
98 | cycle_t cycle_now, cycle_delta; | ||
99 | unsigned long seq; | 222 | unsigned long seq; |
100 | s64 nsecs; | 223 | s64 nsecs; |
101 | 224 | ||
@@ -105,15 +228,7 @@ void getnstimeofday(struct timespec *ts) | |||
105 | seq = read_seqbegin(&xtime_lock); | 228 | seq = read_seqbegin(&xtime_lock); |
106 | 229 | ||
107 | *ts = xtime; | 230 | *ts = xtime; |
108 | 231 | nsecs = timekeeping_get_ns(); | |
109 | /* read clocksource: */ | ||
110 | cycle_now = clocksource_read(clock); | ||
111 | |||
112 | /* calculate the delta since the last update_wall_time: */ | ||
113 | cycle_delta = (cycle_now - clock->cycle_last) & clock->mask; | ||
114 | |||
115 | /* convert to nanoseconds: */ | ||
116 | nsecs = cyc2ns(clock, cycle_delta); | ||
117 | 232 | ||
118 | /* If arch requires, add in gettimeoffset() */ | 233 | /* If arch requires, add in gettimeoffset() */ |
119 | nsecs += arch_gettimeoffset(); | 234 | nsecs += arch_gettimeoffset(); |
@@ -125,6 +240,57 @@ void getnstimeofday(struct timespec *ts) | |||
125 | 240 | ||
126 | EXPORT_SYMBOL(getnstimeofday); | 241 | EXPORT_SYMBOL(getnstimeofday); |
127 | 242 | ||
243 | ktime_t ktime_get(void) | ||
244 | { | ||
245 | unsigned int seq; | ||
246 | s64 secs, nsecs; | ||
247 | |||
248 | WARN_ON(timekeeping_suspended); | ||
249 | |||
250 | do { | ||
251 | seq = read_seqbegin(&xtime_lock); | ||
252 | secs = xtime.tv_sec + wall_to_monotonic.tv_sec; | ||
253 | nsecs = xtime.tv_nsec + wall_to_monotonic.tv_nsec; | ||
254 | nsecs += timekeeping_get_ns(); | ||
255 | |||
256 | } while (read_seqretry(&xtime_lock, seq)); | ||
257 | /* | ||
258 | * Use ktime_set/ktime_add_ns to create a proper ktime on | ||
259 | * 32-bit architectures without CONFIG_KTIME_SCALAR. | ||
260 | */ | ||
261 | return ktime_add_ns(ktime_set(secs, 0), nsecs); | ||
262 | } | ||
263 | EXPORT_SYMBOL_GPL(ktime_get); | ||
264 | |||
265 | /** | ||
266 | * ktime_get_ts - get the monotonic clock in timespec format | ||
267 | * @ts: pointer to timespec variable | ||
268 | * | ||
269 | * The function calculates the monotonic clock from the realtime | ||
270 | * clock and the wall_to_monotonic offset and stores the result | ||
271 | * in normalized timespec format in the variable pointed to by @ts. | ||
272 | */ | ||
273 | void ktime_get_ts(struct timespec *ts) | ||
274 | { | ||
275 | struct timespec tomono; | ||
276 | unsigned int seq; | ||
277 | s64 nsecs; | ||
278 | |||
279 | WARN_ON(timekeeping_suspended); | ||
280 | |||
281 | do { | ||
282 | seq = read_seqbegin(&xtime_lock); | ||
283 | *ts = xtime; | ||
284 | tomono = wall_to_monotonic; | ||
285 | nsecs = timekeeping_get_ns(); | ||
286 | |||
287 | } while (read_seqretry(&xtime_lock, seq)); | ||
288 | |||
289 | set_normalized_timespec(ts, ts->tv_sec + tomono.tv_sec, | ||
290 | ts->tv_nsec + tomono.tv_nsec + nsecs); | ||
291 | } | ||
292 | EXPORT_SYMBOL_GPL(ktime_get_ts); | ||
293 | |||
128 | /** | 294 | /** |
129 | * do_gettimeofday - Returns the time of day in a timeval | 295 | * do_gettimeofday - Returns the time of day in a timeval |
130 | * @tv: pointer to the timeval to be set | 296 | * @tv: pointer to the timeval to be set |
@@ -157,7 +323,7 @@ int do_settimeofday(struct timespec *tv) | |||
157 | 323 | ||
158 | write_seqlock_irqsave(&xtime_lock, flags); | 324 | write_seqlock_irqsave(&xtime_lock, flags); |
159 | 325 | ||
160 | clocksource_forward_now(); | 326 | timekeeping_forward_now(); |
161 | 327 | ||
162 | ts_delta.tv_sec = tv->tv_sec - xtime.tv_sec; | 328 | ts_delta.tv_sec = tv->tv_sec - xtime.tv_sec; |
163 | ts_delta.tv_nsec = tv->tv_nsec - xtime.tv_nsec; | 329 | ts_delta.tv_nsec = tv->tv_nsec - xtime.tv_nsec; |
@@ -167,10 +333,10 @@ int do_settimeofday(struct timespec *tv) | |||
167 | 333 | ||
168 | update_xtime_cache(0); | 334 | update_xtime_cache(0); |
169 | 335 | ||
170 | clock->error = 0; | 336 | timekeeper.ntp_error = 0; |
171 | ntp_clear(); | 337 | ntp_clear(); |
172 | 338 | ||
173 | update_vsyscall(&xtime, clock); | 339 | update_vsyscall(&xtime, timekeeper.clock); |
174 | 340 | ||
175 | write_sequnlock_irqrestore(&xtime_lock, flags); | 341 | write_sequnlock_irqrestore(&xtime_lock, flags); |
176 | 342 | ||
@@ -187,44 +353,97 @@ EXPORT_SYMBOL(do_settimeofday); | |||
187 | * | 353 | * |
188 | * Accumulates current time interval and initializes new clocksource | 354 | * Accumulates current time interval and initializes new clocksource |
189 | */ | 355 | */ |
190 | static void change_clocksource(void) | 356 | static int change_clocksource(void *data) |
191 | { | 357 | { |
192 | struct clocksource *new, *old; | 358 | struct clocksource *new, *old; |
193 | 359 | ||
194 | new = clocksource_get_next(); | 360 | new = (struct clocksource *) data; |
361 | |||
362 | timekeeping_forward_now(); | ||
363 | if (!new->enable || new->enable(new) == 0) { | ||
364 | old = timekeeper.clock; | ||
365 | timekeeper_setup_internals(new); | ||
366 | if (old->disable) | ||
367 | old->disable(old); | ||
368 | } | ||
369 | return 0; | ||
370 | } | ||
195 | 371 | ||
196 | if (clock == new) | 372 | /** |
373 | * timekeeping_notify - Install a new clock source | ||
374 | * @clock: pointer to the clock source | ||
375 | * | ||
376 | * This function is called from clocksource.c after a new, better clock | ||
377 | * source has been registered. The caller holds the clocksource_mutex. | ||
378 | */ | ||
379 | void timekeeping_notify(struct clocksource *clock) | ||
380 | { | ||
381 | if (timekeeper.clock == clock) | ||
197 | return; | 382 | return; |
383 | stop_machine(change_clocksource, clock, NULL); | ||
384 | tick_clock_notify(); | ||
385 | } | ||
198 | 386 | ||
199 | clocksource_forward_now(); | 387 | #else /* GENERIC_TIME */ |
200 | 388 | ||
201 | if (clocksource_enable(new)) | 389 | static inline void timekeeping_forward_now(void) { } |
202 | return; | ||
203 | 390 | ||
204 | new->raw_time = clock->raw_time; | 391 | /** |
205 | old = clock; | 392 | * ktime_get - get the monotonic time in ktime_t format |
206 | clock = new; | 393 | * |
207 | clocksource_disable(old); | 394 | * returns the time in ktime_t format |
395 | */ | ||
396 | ktime_t ktime_get(void) | ||
397 | { | ||
398 | struct timespec now; | ||
208 | 399 | ||
209 | clock->cycle_last = 0; | 400 | ktime_get_ts(&now); |
210 | clock->cycle_last = clocksource_read(clock); | ||
211 | clock->error = 0; | ||
212 | clock->xtime_nsec = 0; | ||
213 | clocksource_calculate_interval(clock, NTP_INTERVAL_LENGTH); | ||
214 | 401 | ||
215 | tick_clock_notify(); | 402 | return timespec_to_ktime(now); |
403 | } | ||
404 | EXPORT_SYMBOL_GPL(ktime_get); | ||
216 | 405 | ||
217 | /* | 406 | /** |
218 | * We're holding xtime lock and waking up klogd would deadlock | 407 | * ktime_get_ts - get the monotonic clock in timespec format |
219 | * us on enqueue. So no printing! | 408 | * @ts: pointer to timespec variable |
220 | printk(KERN_INFO "Time: %s clocksource has been installed.\n", | 409 | * |
221 | clock->name); | 410 | * The function calculates the monotonic clock from the realtime |
222 | */ | 411 | * clock and the wall_to_monotonic offset and stores the result |
412 | * in normalized timespec format in the variable pointed to by @ts. | ||
413 | */ | ||
414 | void ktime_get_ts(struct timespec *ts) | ||
415 | { | ||
416 | struct timespec tomono; | ||
417 | unsigned long seq; | ||
418 | |||
419 | do { | ||
420 | seq = read_seqbegin(&xtime_lock); | ||
421 | getnstimeofday(ts); | ||
422 | tomono = wall_to_monotonic; | ||
423 | |||
424 | } while (read_seqretry(&xtime_lock, seq)); | ||
425 | |||
426 | set_normalized_timespec(ts, ts->tv_sec + tomono.tv_sec, | ||
427 | ts->tv_nsec + tomono.tv_nsec); | ||
223 | } | 428 | } |
224 | #else | 429 | EXPORT_SYMBOL_GPL(ktime_get_ts); |
225 | static inline void clocksource_forward_now(void) { } | 430 | |
226 | static inline void change_clocksource(void) { } | 431 | #endif /* !GENERIC_TIME */ |
227 | #endif | 432 | |
433 | /** | ||
434 | * ktime_get_real - get the real (wall-) time in ktime_t format | ||
435 | * | ||
436 | * returns the time in ktime_t format | ||
437 | */ | ||
438 | ktime_t ktime_get_real(void) | ||
439 | { | ||
440 | struct timespec now; | ||
441 | |||
442 | getnstimeofday(&now); | ||
443 | |||
444 | return timespec_to_ktime(now); | ||
445 | } | ||
446 | EXPORT_SYMBOL_GPL(ktime_get_real); | ||
228 | 447 | ||
229 | /** | 448 | /** |
230 | * getrawmonotonic - Returns the raw monotonic time in a timespec | 449 | * getrawmonotonic - Returns the raw monotonic time in a timespec |
@@ -236,21 +455,11 @@ void getrawmonotonic(struct timespec *ts) | |||
236 | { | 455 | { |
237 | unsigned long seq; | 456 | unsigned long seq; |
238 | s64 nsecs; | 457 | s64 nsecs; |
239 | cycle_t cycle_now, cycle_delta; | ||
240 | 458 | ||
241 | do { | 459 | do { |
242 | seq = read_seqbegin(&xtime_lock); | 460 | seq = read_seqbegin(&xtime_lock); |
243 | 461 | nsecs = timekeeping_get_ns_raw(); | |
244 | /* read clocksource: */ | 462 | *ts = raw_time; |
245 | cycle_now = clocksource_read(clock); | ||
246 | |||
247 | /* calculate the delta since the last update_wall_time: */ | ||
248 | cycle_delta = (cycle_now - clock->cycle_last) & clock->mask; | ||
249 | |||
250 | /* convert to nanoseconds: */ | ||
251 | nsecs = ((s64)cycle_delta * clock->mult_orig) >> clock->shift; | ||
252 | |||
253 | *ts = clock->raw_time; | ||
254 | 463 | ||
255 | } while (read_seqretry(&xtime_lock, seq)); | 464 | } while (read_seqretry(&xtime_lock, seq)); |
256 | 465 | ||
@@ -270,7 +479,7 @@ int timekeeping_valid_for_hres(void) | |||
270 | do { | 479 | do { |
271 | seq = read_seqbegin(&xtime_lock); | 480 | seq = read_seqbegin(&xtime_lock); |
272 | 481 | ||
273 | ret = clock->flags & CLOCK_SOURCE_VALID_FOR_HRES; | 482 | ret = timekeeper.clock->flags & CLOCK_SOURCE_VALID_FOR_HRES; |
274 | 483 | ||
275 | } while (read_seqretry(&xtime_lock, seq)); | 484 | } while (read_seqretry(&xtime_lock, seq)); |
276 | 485 | ||
@@ -278,17 +487,33 @@ int timekeeping_valid_for_hres(void) | |||
278 | } | 487 | } |
279 | 488 | ||
280 | /** | 489 | /** |
281 | * read_persistent_clock - Return time in seconds from the persistent clock. | 490 | * read_persistent_clock - Return time from the persistent clock. |
282 | * | 491 | * |
283 | * Weak dummy function for arches that do not yet support it. | 492 | * Weak dummy function for arches that do not yet support it. |
284 | * Returns seconds from epoch using the battery backed persistent clock. | 493 | * Reads the time from the battery backed persistent clock. |
285 | * Returns zero if unsupported. | 494 | * Returns a timespec with tv_sec=0 and tv_nsec=0 if unsupported. |
286 | * | 495 | * |
287 | * XXX - Do be sure to remove it once all arches implement it. | 496 | * XXX - Do be sure to remove it once all arches implement it. |
288 | */ | 497 | */ |
289 | unsigned long __attribute__((weak)) read_persistent_clock(void) | 498 | void __attribute__((weak)) read_persistent_clock(struct timespec *ts) |
290 | { | 499 | { |
291 | return 0; | 500 | ts->tv_sec = 0; |
501 | ts->tv_nsec = 0; | ||
502 | } | ||
503 | |||
504 | /** | ||
505 | * read_boot_clock - Return time of the system start. | ||
506 | * | ||
507 | * Weak dummy function for arches that do not yet support it. | ||
508 | * Function to read the exact time the system has been started. | ||
509 | * Returns a timespec with tv_sec=0 and tv_nsec=0 if unsupported. | ||
510 | * | ||
511 | * XXX - Do be sure to remove it once all arches implement it. | ||
512 | */ | ||
513 | void __attribute__((weak)) read_boot_clock(struct timespec *ts) | ||
514 | { | ||
515 | ts->tv_sec = 0; | ||
516 | ts->tv_nsec = 0; | ||
292 | } | 517 | } |
293 | 518 | ||
294 | /* | 519 | /* |
@@ -296,29 +521,40 @@ unsigned long __attribute__((weak)) read_persistent_clock(void) | |||
296 | */ | 521 | */ |
297 | void __init timekeeping_init(void) | 522 | void __init timekeeping_init(void) |
298 | { | 523 | { |
524 | struct clocksource *clock; | ||
299 | unsigned long flags; | 525 | unsigned long flags; |
300 | unsigned long sec = read_persistent_clock(); | 526 | struct timespec now, boot; |
527 | |||
528 | read_persistent_clock(&now); | ||
529 | read_boot_clock(&boot); | ||
301 | 530 | ||
302 | write_seqlock_irqsave(&xtime_lock, flags); | 531 | write_seqlock_irqsave(&xtime_lock, flags); |
303 | 532 | ||
304 | ntp_init(); | 533 | ntp_init(); |
305 | 534 | ||
306 | clock = clocksource_get_next(); | 535 | clock = clocksource_default_clock(); |
307 | clocksource_enable(clock); | 536 | if (clock->enable) |
308 | clocksource_calculate_interval(clock, NTP_INTERVAL_LENGTH); | 537 | clock->enable(clock); |
309 | clock->cycle_last = clocksource_read(clock); | 538 | timekeeper_setup_internals(clock); |
310 | 539 | ||
311 | xtime.tv_sec = sec; | 540 | xtime.tv_sec = now.tv_sec; |
312 | xtime.tv_nsec = 0; | 541 | xtime.tv_nsec = now.tv_nsec; |
542 | raw_time.tv_sec = 0; | ||
543 | raw_time.tv_nsec = 0; | ||
544 | if (boot.tv_sec == 0 && boot.tv_nsec == 0) { | ||
545 | boot.tv_sec = xtime.tv_sec; | ||
546 | boot.tv_nsec = xtime.tv_nsec; | ||
547 | } | ||
313 | set_normalized_timespec(&wall_to_monotonic, | 548 | set_normalized_timespec(&wall_to_monotonic, |
314 | -xtime.tv_sec, -xtime.tv_nsec); | 549 | -boot.tv_sec, -boot.tv_nsec); |
315 | update_xtime_cache(0); | 550 | update_xtime_cache(0); |
316 | total_sleep_time = 0; | 551 | total_sleep_time.tv_sec = 0; |
552 | total_sleep_time.tv_nsec = 0; | ||
317 | write_sequnlock_irqrestore(&xtime_lock, flags); | 553 | write_sequnlock_irqrestore(&xtime_lock, flags); |
318 | } | 554 | } |
319 | 555 | ||
320 | /* time in seconds when suspend began */ | 556 | /* time in seconds when suspend began */ |
321 | static unsigned long timekeeping_suspend_time; | 557 | static struct timespec timekeeping_suspend_time; |
322 | 558 | ||
323 | /** | 559 | /** |
324 | * timekeeping_resume - Resumes the generic timekeeping subsystem. | 560 | * timekeeping_resume - Resumes the generic timekeeping subsystem. |
@@ -331,24 +567,24 @@ static unsigned long timekeeping_suspend_time; | |||
331 | static int timekeeping_resume(struct sys_device *dev) | 567 | static int timekeeping_resume(struct sys_device *dev) |
332 | { | 568 | { |
333 | unsigned long flags; | 569 | unsigned long flags; |
334 | unsigned long now = read_persistent_clock(); | 570 | struct timespec ts; |
571 | |||
572 | read_persistent_clock(&ts); | ||
335 | 573 | ||
336 | clocksource_resume(); | 574 | clocksource_resume(); |
337 | 575 | ||
338 | write_seqlock_irqsave(&xtime_lock, flags); | 576 | write_seqlock_irqsave(&xtime_lock, flags); |
339 | 577 | ||
340 | if (now && (now > timekeeping_suspend_time)) { | 578 | if (timespec_compare(&ts, &timekeeping_suspend_time) > 0) { |
341 | unsigned long sleep_length = now - timekeeping_suspend_time; | 579 | ts = timespec_sub(ts, timekeeping_suspend_time); |
342 | 580 | xtime = timespec_add_safe(xtime, ts); | |
343 | xtime.tv_sec += sleep_length; | 581 | wall_to_monotonic = timespec_sub(wall_to_monotonic, ts); |
344 | wall_to_monotonic.tv_sec -= sleep_length; | 582 | total_sleep_time = timespec_add_safe(total_sleep_time, ts); |
345 | total_sleep_time += sleep_length; | ||
346 | } | 583 | } |
347 | update_xtime_cache(0); | 584 | update_xtime_cache(0); |
348 | /* re-base the last cycle value */ | 585 | /* re-base the last cycle value */ |
349 | clock->cycle_last = 0; | 586 | timekeeper.clock->cycle_last = timekeeper.clock->read(timekeeper.clock); |
350 | clock->cycle_last = clocksource_read(clock); | 587 | timekeeper.ntp_error = 0; |
351 | clock->error = 0; | ||
352 | timekeeping_suspended = 0; | 588 | timekeeping_suspended = 0; |
353 | write_sequnlock_irqrestore(&xtime_lock, flags); | 589 | write_sequnlock_irqrestore(&xtime_lock, flags); |
354 | 590 | ||
@@ -366,10 +602,10 @@ static int timekeeping_suspend(struct sys_device *dev, pm_message_t state) | |||
366 | { | 602 | { |
367 | unsigned long flags; | 603 | unsigned long flags; |
368 | 604 | ||
369 | timekeeping_suspend_time = read_persistent_clock(); | 605 | read_persistent_clock(&timekeeping_suspend_time); |
370 | 606 | ||
371 | write_seqlock_irqsave(&xtime_lock, flags); | 607 | write_seqlock_irqsave(&xtime_lock, flags); |
372 | clocksource_forward_now(); | 608 | timekeeping_forward_now(); |
373 | timekeeping_suspended = 1; | 609 | timekeeping_suspended = 1; |
374 | write_sequnlock_irqrestore(&xtime_lock, flags); | 610 | write_sequnlock_irqrestore(&xtime_lock, flags); |
375 | 611 | ||
@@ -404,7 +640,7 @@ device_initcall(timekeeping_init_device); | |||
404 | * If the error is already larger, we look ahead even further | 640 | * If the error is already larger, we look ahead even further |
405 | * to compensate for late or lost adjustments. | 641 | * to compensate for late or lost adjustments. |
406 | */ | 642 | */ |
407 | static __always_inline int clocksource_bigadjust(s64 error, s64 *interval, | 643 | static __always_inline int timekeeping_bigadjust(s64 error, s64 *interval, |
408 | s64 *offset) | 644 | s64 *offset) |
409 | { | 645 | { |
410 | s64 tick_error, i; | 646 | s64 tick_error, i; |
@@ -420,7 +656,7 @@ static __always_inline int clocksource_bigadjust(s64 error, s64 *interval, | |||
420 | * here. This is tuned so that an error of about 1 msec is adjusted | 656 | * here. This is tuned so that an error of about 1 msec is adjusted |
421 | * within about 1 sec (or 2^20 nsec in 2^SHIFT_HZ ticks). | 657 | * within about 1 sec (or 2^20 nsec in 2^SHIFT_HZ ticks). |
422 | */ | 658 | */ |
423 | error2 = clock->error >> (NTP_SCALE_SHIFT + 22 - 2 * SHIFT_HZ); | 659 | error2 = timekeeper.ntp_error >> (NTP_SCALE_SHIFT + 22 - 2 * SHIFT_HZ); |
424 | error2 = abs(error2); | 660 | error2 = abs(error2); |
425 | for (look_ahead = 0; error2 > 0; look_ahead++) | 661 | for (look_ahead = 0; error2 > 0; look_ahead++) |
426 | error2 >>= 2; | 662 | error2 >>= 2; |
@@ -429,8 +665,8 @@ static __always_inline int clocksource_bigadjust(s64 error, s64 *interval, | |||
429 | * Now calculate the error in (1 << look_ahead) ticks, but first | 665 | * Now calculate the error in (1 << look_ahead) ticks, but first |
430 | * remove the single look ahead already included in the error. | 666 | * remove the single look ahead already included in the error. |
431 | */ | 667 | */ |
432 | tick_error = tick_length >> (NTP_SCALE_SHIFT - clock->shift + 1); | 668 | tick_error = tick_length >> (timekeeper.ntp_error_shift + 1); |
433 | tick_error -= clock->xtime_interval >> 1; | 669 | tick_error -= timekeeper.xtime_interval >> 1; |
434 | error = ((error - tick_error) >> look_ahead) + tick_error; | 670 | error = ((error - tick_error) >> look_ahead) + tick_error; |
435 | 671 | ||
436 | /* Finally calculate the adjustment shift value. */ | 672 | /* Finally calculate the adjustment shift value. */ |
@@ -455,18 +691,18 @@ static __always_inline int clocksource_bigadjust(s64 error, s64 *interval, | |||
455 | * this is optimized for the most common adjustments of -1,0,1, | 691 | * this is optimized for the most common adjustments of -1,0,1, |
456 | * for other values we can do a bit more work. | 692 | * for other values we can do a bit more work. |
457 | */ | 693 | */ |
458 | static void clocksource_adjust(s64 offset) | 694 | static void timekeeping_adjust(s64 offset) |
459 | { | 695 | { |
460 | s64 error, interval = clock->cycle_interval; | 696 | s64 error, interval = timekeeper.cycle_interval; |
461 | int adj; | 697 | int adj; |
462 | 698 | ||
463 | error = clock->error >> (NTP_SCALE_SHIFT - clock->shift - 1); | 699 | error = timekeeper.ntp_error >> (timekeeper.ntp_error_shift - 1); |
464 | if (error > interval) { | 700 | if (error > interval) { |
465 | error >>= 2; | 701 | error >>= 2; |
466 | if (likely(error <= interval)) | 702 | if (likely(error <= interval)) |
467 | adj = 1; | 703 | adj = 1; |
468 | else | 704 | else |
469 | adj = clocksource_bigadjust(error, &interval, &offset); | 705 | adj = timekeeping_bigadjust(error, &interval, &offset); |
470 | } else if (error < -interval) { | 706 | } else if (error < -interval) { |
471 | error >>= 2; | 707 | error >>= 2; |
472 | if (likely(error >= -interval)) { | 708 | if (likely(error >= -interval)) { |
@@ -474,15 +710,15 @@ static void clocksource_adjust(s64 offset) | |||
474 | interval = -interval; | 710 | interval = -interval; |
475 | offset = -offset; | 711 | offset = -offset; |
476 | } else | 712 | } else |
477 | adj = clocksource_bigadjust(error, &interval, &offset); | 713 | adj = timekeeping_bigadjust(error, &interval, &offset); |
478 | } else | 714 | } else |
479 | return; | 715 | return; |
480 | 716 | ||
481 | clock->mult += adj; | 717 | timekeeper.mult += adj; |
482 | clock->xtime_interval += interval; | 718 | timekeeper.xtime_interval += interval; |
483 | clock->xtime_nsec -= offset; | 719 | timekeeper.xtime_nsec -= offset; |
484 | clock->error -= (interval - offset) << | 720 | timekeeper.ntp_error -= (interval - offset) << |
485 | (NTP_SCALE_SHIFT - clock->shift); | 721 | timekeeper.ntp_error_shift; |
486 | } | 722 | } |
487 | 723 | ||
488 | /** | 724 | /** |
@@ -492,53 +728,59 @@ static void clocksource_adjust(s64 offset) | |||
492 | */ | 728 | */ |
493 | void update_wall_time(void) | 729 | void update_wall_time(void) |
494 | { | 730 | { |
731 | struct clocksource *clock; | ||
495 | cycle_t offset; | 732 | cycle_t offset; |
733 | u64 nsecs; | ||
496 | 734 | ||
497 | /* Make sure we're fully resumed: */ | 735 | /* Make sure we're fully resumed: */ |
498 | if (unlikely(timekeeping_suspended)) | 736 | if (unlikely(timekeeping_suspended)) |
499 | return; | 737 | return; |
500 | 738 | ||
739 | clock = timekeeper.clock; | ||
501 | #ifdef CONFIG_GENERIC_TIME | 740 | #ifdef CONFIG_GENERIC_TIME |
502 | offset = (clocksource_read(clock) - clock->cycle_last) & clock->mask; | 741 | offset = (clock->read(clock) - clock->cycle_last) & clock->mask; |
503 | #else | 742 | #else |
504 | offset = clock->cycle_interval; | 743 | offset = timekeeper.cycle_interval; |
505 | #endif | 744 | #endif |
506 | clock->xtime_nsec = (s64)xtime.tv_nsec << clock->shift; | 745 | timekeeper.xtime_nsec = (s64)xtime.tv_nsec << timekeeper.shift; |
507 | 746 | ||
508 | /* normally this loop will run just once, however in the | 747 | /* normally this loop will run just once, however in the |
509 | * case of lost or late ticks, it will accumulate correctly. | 748 | * case of lost or late ticks, it will accumulate correctly. |
510 | */ | 749 | */ |
511 | while (offset >= clock->cycle_interval) { | 750 | while (offset >= timekeeper.cycle_interval) { |
751 | u64 nsecps = (u64)NSEC_PER_SEC << timekeeper.shift; | ||
752 | |||
512 | /* accumulate one interval */ | 753 | /* accumulate one interval */ |
513 | offset -= clock->cycle_interval; | 754 | offset -= timekeeper.cycle_interval; |
514 | clock->cycle_last += clock->cycle_interval; | 755 | clock->cycle_last += timekeeper.cycle_interval; |
515 | 756 | ||
516 | clock->xtime_nsec += clock->xtime_interval; | 757 | timekeeper.xtime_nsec += timekeeper.xtime_interval; |
517 | if (clock->xtime_nsec >= (u64)NSEC_PER_SEC << clock->shift) { | 758 | if (timekeeper.xtime_nsec >= nsecps) { |
518 | clock->xtime_nsec -= (u64)NSEC_PER_SEC << clock->shift; | 759 | timekeeper.xtime_nsec -= nsecps; |
519 | xtime.tv_sec++; | 760 | xtime.tv_sec++; |
520 | second_overflow(); | 761 | second_overflow(); |
521 | } | 762 | } |
522 | 763 | ||
523 | clock->raw_time.tv_nsec += clock->raw_interval; | 764 | raw_time.tv_nsec += timekeeper.raw_interval; |
524 | if (clock->raw_time.tv_nsec >= NSEC_PER_SEC) { | 765 | if (raw_time.tv_nsec >= NSEC_PER_SEC) { |
525 | clock->raw_time.tv_nsec -= NSEC_PER_SEC; | 766 | raw_time.tv_nsec -= NSEC_PER_SEC; |
526 | clock->raw_time.tv_sec++; | 767 | raw_time.tv_sec++; |
527 | } | 768 | } |
528 | 769 | ||
529 | /* accumulate error between NTP and clock interval */ | 770 | /* accumulate error between NTP and clock interval */ |
530 | clock->error += tick_length; | 771 | timekeeper.ntp_error += tick_length; |
531 | clock->error -= clock->xtime_interval << (NTP_SCALE_SHIFT - clock->shift); | 772 | timekeeper.ntp_error -= timekeeper.xtime_interval << |
773 | timekeeper.ntp_error_shift; | ||
532 | } | 774 | } |
533 | 775 | ||
534 | /* correct the clock when NTP error is too big */ | 776 | /* correct the clock when NTP error is too big */ |
535 | clocksource_adjust(offset); | 777 | timekeeping_adjust(offset); |
536 | 778 | ||
537 | /* | 779 | /* |
538 | * Since in the loop above, we accumulate any amount of time | 780 | * Since in the loop above, we accumulate any amount of time |
539 | * in xtime_nsec over a second into xtime.tv_sec, its possible for | 781 | * in xtime_nsec over a second into xtime.tv_sec, its possible for |
540 | * xtime_nsec to be fairly small after the loop. Further, if we're | 782 | * xtime_nsec to be fairly small after the loop. Further, if we're |
541 | * slightly speeding the clocksource up in clocksource_adjust(), | 783 | * slightly speeding the clocksource up in timekeeping_adjust(), |
542 | * its possible the required corrective factor to xtime_nsec could | 784 | * its possible the required corrective factor to xtime_nsec could |
543 | * cause it to underflow. | 785 | * cause it to underflow. |
544 | * | 786 | * |
@@ -550,24 +792,25 @@ void update_wall_time(void) | |||
550 | * We'll correct this error next time through this function, when | 792 | * We'll correct this error next time through this function, when |
551 | * xtime_nsec is not as small. | 793 | * xtime_nsec is not as small. |
552 | */ | 794 | */ |
553 | if (unlikely((s64)clock->xtime_nsec < 0)) { | 795 | if (unlikely((s64)timekeeper.xtime_nsec < 0)) { |
554 | s64 neg = -(s64)clock->xtime_nsec; | 796 | s64 neg = -(s64)timekeeper.xtime_nsec; |
555 | clock->xtime_nsec = 0; | 797 | timekeeper.xtime_nsec = 0; |
556 | clock->error += neg << (NTP_SCALE_SHIFT - clock->shift); | 798 | timekeeper.ntp_error += neg << timekeeper.ntp_error_shift; |
557 | } | 799 | } |
558 | 800 | ||
559 | /* store full nanoseconds into xtime after rounding it up and | 801 | /* store full nanoseconds into xtime after rounding it up and |
560 | * add the remainder to the error difference. | 802 | * add the remainder to the error difference. |
561 | */ | 803 | */ |
562 | xtime.tv_nsec = ((s64)clock->xtime_nsec >> clock->shift) + 1; | 804 | xtime.tv_nsec = ((s64) timekeeper.xtime_nsec >> timekeeper.shift) + 1; |
563 | clock->xtime_nsec -= (s64)xtime.tv_nsec << clock->shift; | 805 | timekeeper.xtime_nsec -= (s64) xtime.tv_nsec << timekeeper.shift; |
564 | clock->error += clock->xtime_nsec << (NTP_SCALE_SHIFT - clock->shift); | 806 | timekeeper.ntp_error += timekeeper.xtime_nsec << |
807 | timekeeper.ntp_error_shift; | ||
565 | 808 | ||
566 | update_xtime_cache(cyc2ns(clock, offset)); | 809 | nsecs = clocksource_cyc2ns(offset, timekeeper.mult, timekeeper.shift); |
810 | update_xtime_cache(nsecs); | ||
567 | 811 | ||
568 | /* check to see if there is a new clocksource to use */ | 812 | /* check to see if there is a new clocksource to use */ |
569 | change_clocksource(); | 813 | update_vsyscall(&xtime, timekeeper.clock); |
570 | update_vsyscall(&xtime, clock); | ||
571 | } | 814 | } |
572 | 815 | ||
573 | /** | 816 | /** |
@@ -583,9 +826,12 @@ void update_wall_time(void) | |||
583 | */ | 826 | */ |
584 | void getboottime(struct timespec *ts) | 827 | void getboottime(struct timespec *ts) |
585 | { | 828 | { |
586 | set_normalized_timespec(ts, | 829 | struct timespec boottime = { |
587 | - (wall_to_monotonic.tv_sec + total_sleep_time), | 830 | .tv_sec = wall_to_monotonic.tv_sec + total_sleep_time.tv_sec, |
588 | - wall_to_monotonic.tv_nsec); | 831 | .tv_nsec = wall_to_monotonic.tv_nsec + total_sleep_time.tv_nsec |
832 | }; | ||
833 | |||
834 | set_normalized_timespec(ts, -boottime.tv_sec, -boottime.tv_nsec); | ||
589 | } | 835 | } |
590 | 836 | ||
591 | /** | 837 | /** |
@@ -594,7 +840,7 @@ void getboottime(struct timespec *ts) | |||
594 | */ | 840 | */ |
595 | void monotonic_to_bootbased(struct timespec *ts) | 841 | void monotonic_to_bootbased(struct timespec *ts) |
596 | { | 842 | { |
597 | ts->tv_sec += total_sleep_time; | 843 | *ts = timespec_add_safe(*ts, total_sleep_time); |
598 | } | 844 | } |
599 | 845 | ||
600 | unsigned long get_seconds(void) | 846 | unsigned long get_seconds(void) |
@@ -603,6 +849,10 @@ unsigned long get_seconds(void) | |||
603 | } | 849 | } |
604 | EXPORT_SYMBOL(get_seconds); | 850 | EXPORT_SYMBOL(get_seconds); |
605 | 851 | ||
852 | struct timespec __current_kernel_time(void) | ||
853 | { | ||
854 | return xtime_cache; | ||
855 | } | ||
606 | 856 | ||
607 | struct timespec current_kernel_time(void) | 857 | struct timespec current_kernel_time(void) |
608 | { | 858 | { |
@@ -618,3 +868,20 @@ struct timespec current_kernel_time(void) | |||
618 | return now; | 868 | return now; |
619 | } | 869 | } |
620 | EXPORT_SYMBOL(current_kernel_time); | 870 | EXPORT_SYMBOL(current_kernel_time); |
871 | |||
872 | struct timespec get_monotonic_coarse(void) | ||
873 | { | ||
874 | struct timespec now, mono; | ||
875 | unsigned long seq; | ||
876 | |||
877 | do { | ||
878 | seq = read_seqbegin(&xtime_lock); | ||
879 | |||
880 | now = xtime_cache; | ||
881 | mono = wall_to_monotonic; | ||
882 | } while (read_seqretry(&xtime_lock, seq)); | ||
883 | |||
884 | set_normalized_timespec(&now, now.tv_sec + mono.tv_sec, | ||
885 | now.tv_nsec + mono.tv_nsec); | ||
886 | return now; | ||
887 | } | ||
diff --git a/kernel/timer.c b/kernel/timer.c index a7f07d5a6241..8e92be654dad 100644 --- a/kernel/timer.c +++ b/kernel/timer.c | |||
@@ -72,6 +72,7 @@ struct tvec_base { | |||
72 | spinlock_t lock; | 72 | spinlock_t lock; |
73 | struct timer_list *running_timer; | 73 | struct timer_list *running_timer; |
74 | unsigned long timer_jiffies; | 74 | unsigned long timer_jiffies; |
75 | unsigned long next_timer; | ||
75 | struct tvec_root tv1; | 76 | struct tvec_root tv1; |
76 | struct tvec tv2; | 77 | struct tvec tv2; |
77 | struct tvec tv3; | 78 | struct tvec tv3; |
@@ -622,6 +623,9 @@ __mod_timer(struct timer_list *timer, unsigned long expires, | |||
622 | 623 | ||
623 | if (timer_pending(timer)) { | 624 | if (timer_pending(timer)) { |
624 | detach_timer(timer, 0); | 625 | detach_timer(timer, 0); |
626 | if (timer->expires == base->next_timer && | ||
627 | !tbase_get_deferrable(timer->base)) | ||
628 | base->next_timer = base->timer_jiffies; | ||
625 | ret = 1; | 629 | ret = 1; |
626 | } else { | 630 | } else { |
627 | if (pending_only) | 631 | if (pending_only) |
@@ -663,6 +667,9 @@ __mod_timer(struct timer_list *timer, unsigned long expires, | |||
663 | } | 667 | } |
664 | 668 | ||
665 | timer->expires = expires; | 669 | timer->expires = expires; |
670 | if (time_before(timer->expires, base->next_timer) && | ||
671 | !tbase_get_deferrable(timer->base)) | ||
672 | base->next_timer = timer->expires; | ||
666 | internal_add_timer(base, timer); | 673 | internal_add_timer(base, timer); |
667 | 674 | ||
668 | out_unlock: | 675 | out_unlock: |
@@ -781,6 +788,9 @@ void add_timer_on(struct timer_list *timer, int cpu) | |||
781 | spin_lock_irqsave(&base->lock, flags); | 788 | spin_lock_irqsave(&base->lock, flags); |
782 | timer_set_base(timer, base); | 789 | timer_set_base(timer, base); |
783 | debug_timer_activate(timer); | 790 | debug_timer_activate(timer); |
791 | if (time_before(timer->expires, base->next_timer) && | ||
792 | !tbase_get_deferrable(timer->base)) | ||
793 | base->next_timer = timer->expires; | ||
784 | internal_add_timer(base, timer); | 794 | internal_add_timer(base, timer); |
785 | /* | 795 | /* |
786 | * Check whether the other CPU is idle and needs to be | 796 | * Check whether the other CPU is idle and needs to be |
@@ -817,6 +827,9 @@ int del_timer(struct timer_list *timer) | |||
817 | base = lock_timer_base(timer, &flags); | 827 | base = lock_timer_base(timer, &flags); |
818 | if (timer_pending(timer)) { | 828 | if (timer_pending(timer)) { |
819 | detach_timer(timer, 1); | 829 | detach_timer(timer, 1); |
830 | if (timer->expires == base->next_timer && | ||
831 | !tbase_get_deferrable(timer->base)) | ||
832 | base->next_timer = base->timer_jiffies; | ||
820 | ret = 1; | 833 | ret = 1; |
821 | } | 834 | } |
822 | spin_unlock_irqrestore(&base->lock, flags); | 835 | spin_unlock_irqrestore(&base->lock, flags); |
@@ -850,6 +863,9 @@ int try_to_del_timer_sync(struct timer_list *timer) | |||
850 | ret = 0; | 863 | ret = 0; |
851 | if (timer_pending(timer)) { | 864 | if (timer_pending(timer)) { |
852 | detach_timer(timer, 1); | 865 | detach_timer(timer, 1); |
866 | if (timer->expires == base->next_timer && | ||
867 | !tbase_get_deferrable(timer->base)) | ||
868 | base->next_timer = base->timer_jiffies; | ||
853 | ret = 1; | 869 | ret = 1; |
854 | } | 870 | } |
855 | out: | 871 | out: |
@@ -1007,8 +1023,8 @@ static inline void __run_timers(struct tvec_base *base) | |||
1007 | #ifdef CONFIG_NO_HZ | 1023 | #ifdef CONFIG_NO_HZ |
1008 | /* | 1024 | /* |
1009 | * Find out when the next timer event is due to happen. This | 1025 | * Find out when the next timer event is due to happen. This |
1010 | * is used on S/390 to stop all activity when a cpus is idle. | 1026 | * is used on S/390 to stop all activity when a CPU is idle. |
1011 | * This functions needs to be called disabled. | 1027 | * This function needs to be called with interrupts disabled. |
1012 | */ | 1028 | */ |
1013 | static unsigned long __next_timer_interrupt(struct tvec_base *base) | 1029 | static unsigned long __next_timer_interrupt(struct tvec_base *base) |
1014 | { | 1030 | { |
@@ -1134,7 +1150,9 @@ unsigned long get_next_timer_interrupt(unsigned long now) | |||
1134 | unsigned long expires; | 1150 | unsigned long expires; |
1135 | 1151 | ||
1136 | spin_lock(&base->lock); | 1152 | spin_lock(&base->lock); |
1137 | expires = __next_timer_interrupt(base); | 1153 | if (time_before_eq(base->next_timer, base->timer_jiffies)) |
1154 | base->next_timer = __next_timer_interrupt(base); | ||
1155 | expires = base->next_timer; | ||
1138 | spin_unlock(&base->lock); | 1156 | spin_unlock(&base->lock); |
1139 | 1157 | ||
1140 | if (time_before_eq(expires, now)) | 1158 | if (time_before_eq(expires, now)) |
@@ -1523,6 +1541,7 @@ static int __cpuinit init_timers_cpu(int cpu) | |||
1523 | INIT_LIST_HEAD(base->tv1.vec + j); | 1541 | INIT_LIST_HEAD(base->tv1.vec + j); |
1524 | 1542 | ||
1525 | base->timer_jiffies = jiffies; | 1543 | base->timer_jiffies = jiffies; |
1544 | base->next_timer = base->timer_jiffies; | ||
1526 | return 0; | 1545 | return 0; |
1527 | } | 1546 | } |
1528 | 1547 | ||
@@ -1535,6 +1554,9 @@ static void migrate_timer_list(struct tvec_base *new_base, struct list_head *hea | |||
1535 | timer = list_first_entry(head, struct timer_list, entry); | 1554 | timer = list_first_entry(head, struct timer_list, entry); |
1536 | detach_timer(timer, 0); | 1555 | detach_timer(timer, 0); |
1537 | timer_set_base(timer, new_base); | 1556 | timer_set_base(timer, new_base); |
1557 | if (time_before(timer->expires, new_base->next_timer) && | ||
1558 | !tbase_get_deferrable(timer->base)) | ||
1559 | new_base->next_timer = timer->expires; | ||
1538 | internal_add_timer(new_base, timer); | 1560 | internal_add_timer(new_base, timer); |
1539 | } | 1561 | } |
1540 | } | 1562 | } |
diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c index 1090b0aed9ba..7a34cb563fec 100644 --- a/kernel/trace/blktrace.c +++ b/kernel/trace/blktrace.c | |||
@@ -267,8 +267,8 @@ static void blk_trace_free(struct blk_trace *bt) | |||
267 | { | 267 | { |
268 | debugfs_remove(bt->msg_file); | 268 | debugfs_remove(bt->msg_file); |
269 | debugfs_remove(bt->dropped_file); | 269 | debugfs_remove(bt->dropped_file); |
270 | debugfs_remove(bt->dir); | ||
271 | relay_close(bt->rchan); | 270 | relay_close(bt->rchan); |
271 | debugfs_remove(bt->dir); | ||
272 | free_percpu(bt->sequence); | 272 | free_percpu(bt->sequence); |
273 | free_percpu(bt->msg_data); | 273 | free_percpu(bt->msg_data); |
274 | kfree(bt); | 274 | kfree(bt); |
@@ -378,18 +378,8 @@ static int blk_subbuf_start_callback(struct rchan_buf *buf, void *subbuf, | |||
378 | 378 | ||
379 | static int blk_remove_buf_file_callback(struct dentry *dentry) | 379 | static int blk_remove_buf_file_callback(struct dentry *dentry) |
380 | { | 380 | { |
381 | struct dentry *parent = dentry->d_parent; | ||
382 | debugfs_remove(dentry); | 381 | debugfs_remove(dentry); |
383 | 382 | ||
384 | /* | ||
385 | * this will fail for all but the last file, but that is ok. what we | ||
386 | * care about is the top level buts->name directory going away, when | ||
387 | * the last trace file is gone. Then we don't have to rmdir() that | ||
388 | * manually on trace stop, so it nicely solves the issue with | ||
389 | * force killing of running traces. | ||
390 | */ | ||
391 | |||
392 | debugfs_remove(parent); | ||
393 | return 0; | 383 | return 0; |
394 | } | 384 | } |
395 | 385 | ||
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 1f3ec2afa511..1e1d23c26308 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c | |||
@@ -1662,7 +1662,7 @@ ftrace_regex_open(struct inode *inode, struct file *file, int enable) | |||
1662 | 1662 | ||
1663 | mutex_lock(&ftrace_regex_lock); | 1663 | mutex_lock(&ftrace_regex_lock); |
1664 | if ((file->f_mode & FMODE_WRITE) && | 1664 | if ((file->f_mode & FMODE_WRITE) && |
1665 | !(file->f_flags & O_APPEND)) | 1665 | (file->f_flags & O_TRUNC)) |
1666 | ftrace_filter_reset(enable); | 1666 | ftrace_filter_reset(enable); |
1667 | 1667 | ||
1668 | if (file->f_mode & FMODE_READ) { | 1668 | if (file->f_mode & FMODE_READ) { |
@@ -2577,7 +2577,7 @@ ftrace_graph_open(struct inode *inode, struct file *file) | |||
2577 | 2577 | ||
2578 | mutex_lock(&graph_lock); | 2578 | mutex_lock(&graph_lock); |
2579 | if ((file->f_mode & FMODE_WRITE) && | 2579 | if ((file->f_mode & FMODE_WRITE) && |
2580 | !(file->f_flags & O_APPEND)) { | 2580 | (file->f_flags & O_TRUNC)) { |
2581 | ftrace_graph_count = 0; | 2581 | ftrace_graph_count = 0; |
2582 | memset(ftrace_graph_funcs, 0, sizeof(ftrace_graph_funcs)); | 2582 | memset(ftrace_graph_funcs, 0, sizeof(ftrace_graph_funcs)); |
2583 | } | 2583 | } |
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index bf27bb7a63e2..a330513d96ce 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c | |||
@@ -735,6 +735,7 @@ ring_buffer_free(struct ring_buffer *buffer) | |||
735 | 735 | ||
736 | put_online_cpus(); | 736 | put_online_cpus(); |
737 | 737 | ||
738 | kfree(buffer->buffers); | ||
738 | free_cpumask_var(buffer->cpumask); | 739 | free_cpumask_var(buffer->cpumask); |
739 | 740 | ||
740 | kfree(buffer); | 741 | kfree(buffer); |
@@ -1785,7 +1786,7 @@ void ring_buffer_discard_commit(struct ring_buffer *buffer, | |||
1785 | */ | 1786 | */ |
1786 | RB_WARN_ON(buffer, !local_read(&cpu_buffer->committing)); | 1787 | RB_WARN_ON(buffer, !local_read(&cpu_buffer->committing)); |
1787 | 1788 | ||
1788 | if (!rb_try_to_discard(cpu_buffer, event)) | 1789 | if (rb_try_to_discard(cpu_buffer, event)) |
1789 | goto out; | 1790 | goto out; |
1790 | 1791 | ||
1791 | /* | 1792 | /* |
@@ -2383,7 +2384,6 @@ rb_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts) | |||
2383 | * the box. Return the padding, and we will release | 2384 | * the box. Return the padding, and we will release |
2384 | * the current locks, and try again. | 2385 | * the current locks, and try again. |
2385 | */ | 2386 | */ |
2386 | rb_advance_reader(cpu_buffer); | ||
2387 | return event; | 2387 | return event; |
2388 | 2388 | ||
2389 | case RINGBUF_TYPE_TIME_EXTEND: | 2389 | case RINGBUF_TYPE_TIME_EXTEND: |
@@ -2486,7 +2486,7 @@ static inline int rb_ok_to_lock(void) | |||
2486 | * buffer too. A one time deal is all you get from reading | 2486 | * buffer too. A one time deal is all you get from reading |
2487 | * the ring buffer from an NMI. | 2487 | * the ring buffer from an NMI. |
2488 | */ | 2488 | */ |
2489 | if (likely(!in_nmi() && !oops_in_progress)) | 2489 | if (likely(!in_nmi())) |
2490 | return 1; | 2490 | return 1; |
2491 | 2491 | ||
2492 | tracing_off_permanent(); | 2492 | tracing_off_permanent(); |
@@ -2519,6 +2519,8 @@ ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts) | |||
2519 | if (dolock) | 2519 | if (dolock) |
2520 | spin_lock(&cpu_buffer->reader_lock); | 2520 | spin_lock(&cpu_buffer->reader_lock); |
2521 | event = rb_buffer_peek(buffer, cpu, ts); | 2521 | event = rb_buffer_peek(buffer, cpu, ts); |
2522 | if (event && event->type_len == RINGBUF_TYPE_PADDING) | ||
2523 | rb_advance_reader(cpu_buffer); | ||
2522 | if (dolock) | 2524 | if (dolock) |
2523 | spin_unlock(&cpu_buffer->reader_lock); | 2525 | spin_unlock(&cpu_buffer->reader_lock); |
2524 | local_irq_restore(flags); | 2526 | local_irq_restore(flags); |
@@ -2590,12 +2592,9 @@ ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts) | |||
2590 | spin_lock(&cpu_buffer->reader_lock); | 2592 | spin_lock(&cpu_buffer->reader_lock); |
2591 | 2593 | ||
2592 | event = rb_buffer_peek(buffer, cpu, ts); | 2594 | event = rb_buffer_peek(buffer, cpu, ts); |
2593 | if (!event) | 2595 | if (event) |
2594 | goto out_unlock; | 2596 | rb_advance_reader(cpu_buffer); |
2595 | |||
2596 | rb_advance_reader(cpu_buffer); | ||
2597 | 2597 | ||
2598 | out_unlock: | ||
2599 | if (dolock) | 2598 | if (dolock) |
2600 | spin_unlock(&cpu_buffer->reader_lock); | 2599 | spin_unlock(&cpu_buffer->reader_lock); |
2601 | local_irq_restore(flags); | 2600 | local_irq_restore(flags); |
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 8bc8d8afea6a..c22b40f8f576 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
@@ -848,6 +848,7 @@ tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags, | |||
848 | ((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) | | 848 | ((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) | |
849 | (need_resched() ? TRACE_FLAG_NEED_RESCHED : 0); | 849 | (need_resched() ? TRACE_FLAG_NEED_RESCHED : 0); |
850 | } | 850 | } |
851 | EXPORT_SYMBOL_GPL(tracing_generic_entry_update); | ||
851 | 852 | ||
852 | struct ring_buffer_event *trace_buffer_lock_reserve(struct trace_array *tr, | 853 | struct ring_buffer_event *trace_buffer_lock_reserve(struct trace_array *tr, |
853 | int type, | 854 | int type, |
@@ -2031,7 +2032,7 @@ static int tracing_open(struct inode *inode, struct file *file) | |||
2031 | 2032 | ||
2032 | /* If this file was open for write, then erase contents */ | 2033 | /* If this file was open for write, then erase contents */ |
2033 | if ((file->f_mode & FMODE_WRITE) && | 2034 | if ((file->f_mode & FMODE_WRITE) && |
2034 | !(file->f_flags & O_APPEND)) { | 2035 | (file->f_flags & O_TRUNC)) { |
2035 | long cpu = (long) inode->i_private; | 2036 | long cpu = (long) inode->i_private; |
2036 | 2037 | ||
2037 | if (cpu == TRACE_PIPE_ALL_CPU) | 2038 | if (cpu == TRACE_PIPE_ALL_CPU) |
@@ -3085,7 +3086,8 @@ tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter) | |||
3085 | break; | 3086 | break; |
3086 | } | 3087 | } |
3087 | 3088 | ||
3088 | trace_consume(iter); | 3089 | if (ret != TRACE_TYPE_NO_CONSUME) |
3090 | trace_consume(iter); | ||
3089 | rem -= count; | 3091 | rem -= count; |
3090 | if (!find_next_entry_inc(iter)) { | 3092 | if (!find_next_entry_inc(iter)) { |
3091 | rem = 0; | 3093 | rem = 0; |
@@ -4233,8 +4235,11 @@ static void __ftrace_dump(bool disable_tracing) | |||
4233 | iter.pos = -1; | 4235 | iter.pos = -1; |
4234 | 4236 | ||
4235 | if (find_next_entry_inc(&iter) != NULL) { | 4237 | if (find_next_entry_inc(&iter) != NULL) { |
4236 | print_trace_line(&iter); | 4238 | int ret; |
4237 | trace_consume(&iter); | 4239 | |
4240 | ret = print_trace_line(&iter); | ||
4241 | if (ret != TRACE_TYPE_NO_CONSUME) | ||
4242 | trace_consume(&iter); | ||
4238 | } | 4243 | } |
4239 | 4244 | ||
4240 | trace_printk_seq(&iter.seq); | 4245 | trace_printk_seq(&iter.seq); |
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index 3548ae5cc780..8b9f4f6e9559 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h | |||
@@ -438,10 +438,6 @@ struct trace_entry *tracing_get_trace_entry(struct trace_array *tr, | |||
438 | struct trace_entry *trace_find_next_entry(struct trace_iterator *iter, | 438 | struct trace_entry *trace_find_next_entry(struct trace_iterator *iter, |
439 | int *ent_cpu, u64 *ent_ts); | 439 | int *ent_cpu, u64 *ent_ts); |
440 | 440 | ||
441 | void tracing_generic_entry_update(struct trace_entry *entry, | ||
442 | unsigned long flags, | ||
443 | int pc); | ||
444 | |||
445 | void default_wait_pipe(struct trace_iterator *iter); | 441 | void default_wait_pipe(struct trace_iterator *iter); |
446 | void poll_wait_pipe(struct trace_iterator *iter); | 442 | void poll_wait_pipe(struct trace_iterator *iter); |
447 | 443 | ||
diff --git a/kernel/trace/trace_event_profile.c b/kernel/trace/trace_event_profile.c index 5b5895afecfe..11ba5bb4ed0a 100644 --- a/kernel/trace/trace_event_profile.c +++ b/kernel/trace/trace_event_profile.c | |||
@@ -14,7 +14,7 @@ int ftrace_profile_enable(int event_id) | |||
14 | 14 | ||
15 | mutex_lock(&event_mutex); | 15 | mutex_lock(&event_mutex); |
16 | list_for_each_entry(event, &ftrace_events, list) { | 16 | list_for_each_entry(event, &ftrace_events, list) { |
17 | if (event->id == event_id) { | 17 | if (event->id == event_id && event->profile_enable) { |
18 | ret = event->profile_enable(event); | 18 | ret = event->profile_enable(event); |
19 | break; | 19 | break; |
20 | } | 20 | } |
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c index 53c8fd376a88..e75276a49cf5 100644 --- a/kernel/trace/trace_events.c +++ b/kernel/trace/trace_events.c | |||
@@ -376,7 +376,7 @@ ftrace_event_seq_open(struct inode *inode, struct file *file) | |||
376 | const struct seq_operations *seq_ops; | 376 | const struct seq_operations *seq_ops; |
377 | 377 | ||
378 | if ((file->f_mode & FMODE_WRITE) && | 378 | if ((file->f_mode & FMODE_WRITE) && |
379 | !(file->f_flags & O_APPEND)) | 379 | (file->f_flags & O_TRUNC)) |
380 | ftrace_clear_events(); | 380 | ftrace_clear_events(); |
381 | 381 | ||
382 | seq_ops = inode->i_private; | 382 | seq_ops = inode->i_private; |
@@ -940,7 +940,7 @@ event_create_dir(struct ftrace_event_call *call, struct dentry *d_events, | |||
940 | entry = trace_create_file("enable", 0644, call->dir, call, | 940 | entry = trace_create_file("enable", 0644, call->dir, call, |
941 | enable); | 941 | enable); |
942 | 942 | ||
943 | if (call->id) | 943 | if (call->id && call->profile_enable) |
944 | entry = trace_create_file("id", 0444, call->dir, call, | 944 | entry = trace_create_file("id", 0444, call->dir, call, |
945 | id); | 945 | id); |
946 | 946 | ||
diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c index 936c621bbf46..f32dc9d1ea7b 100644 --- a/kernel/trace/trace_events_filter.c +++ b/kernel/trace/trace_events_filter.c | |||
@@ -624,9 +624,6 @@ static int filter_add_subsystem_pred(struct filter_parse_state *ps, | |||
624 | return -ENOSPC; | 624 | return -ENOSPC; |
625 | } | 625 | } |
626 | 626 | ||
627 | filter->preds[filter->n_preds] = pred; | ||
628 | filter->n_preds++; | ||
629 | |||
630 | list_for_each_entry(call, &ftrace_events, list) { | 627 | list_for_each_entry(call, &ftrace_events, list) { |
631 | 628 | ||
632 | if (!call->define_fields) | 629 | if (!call->define_fields) |
@@ -643,6 +640,9 @@ static int filter_add_subsystem_pred(struct filter_parse_state *ps, | |||
643 | } | 640 | } |
644 | replace_filter_string(call->filter, filter_string); | 641 | replace_filter_string(call->filter, filter_string); |
645 | } | 642 | } |
643 | |||
644 | filter->preds[filter->n_preds] = pred; | ||
645 | filter->n_preds++; | ||
646 | out: | 646 | out: |
647 | return err; | 647 | return err; |
648 | } | 648 | } |
@@ -1029,12 +1029,17 @@ static int replace_preds(struct event_subsystem *system, | |||
1029 | 1029 | ||
1030 | if (elt->op == OP_AND || elt->op == OP_OR) { | 1030 | if (elt->op == OP_AND || elt->op == OP_OR) { |
1031 | pred = create_logical_pred(elt->op); | 1031 | pred = create_logical_pred(elt->op); |
1032 | if (!pred) | ||
1033 | return -ENOMEM; | ||
1032 | if (call) { | 1034 | if (call) { |
1033 | err = filter_add_pred(ps, call, pred); | 1035 | err = filter_add_pred(ps, call, pred); |
1034 | filter_free_pred(pred); | 1036 | filter_free_pred(pred); |
1035 | } else | 1037 | } else { |
1036 | err = filter_add_subsystem_pred(ps, system, | 1038 | err = filter_add_subsystem_pred(ps, system, |
1037 | pred, filter_string); | 1039 | pred, filter_string); |
1040 | if (err) | ||
1041 | filter_free_pred(pred); | ||
1042 | } | ||
1038 | if (err) | 1043 | if (err) |
1039 | return err; | 1044 | return err; |
1040 | 1045 | ||
@@ -1048,12 +1053,17 @@ static int replace_preds(struct event_subsystem *system, | |||
1048 | } | 1053 | } |
1049 | 1054 | ||
1050 | pred = create_pred(elt->op, operand1, operand2); | 1055 | pred = create_pred(elt->op, operand1, operand2); |
1056 | if (!pred) | ||
1057 | return -ENOMEM; | ||
1051 | if (call) { | 1058 | if (call) { |
1052 | err = filter_add_pred(ps, call, pred); | 1059 | err = filter_add_pred(ps, call, pred); |
1053 | filter_free_pred(pred); | 1060 | filter_free_pred(pred); |
1054 | } else | 1061 | } else { |
1055 | err = filter_add_subsystem_pred(ps, system, pred, | 1062 | err = filter_add_subsystem_pred(ps, system, pred, |
1056 | filter_string); | 1063 | filter_string); |
1064 | if (err) | ||
1065 | filter_free_pred(pred); | ||
1066 | } | ||
1057 | if (err) | 1067 | if (err) |
1058 | return err; | 1068 | return err; |
1059 | 1069 | ||
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c index d2249abafb53..420ec3487579 100644 --- a/kernel/trace/trace_functions_graph.c +++ b/kernel/trace/trace_functions_graph.c | |||
@@ -843,9 +843,16 @@ print_graph_function(struct trace_iterator *iter) | |||
843 | 843 | ||
844 | switch (entry->type) { | 844 | switch (entry->type) { |
845 | case TRACE_GRAPH_ENT: { | 845 | case TRACE_GRAPH_ENT: { |
846 | struct ftrace_graph_ent_entry *field; | 846 | /* |
847 | * print_graph_entry() may consume the current event, | ||
848 | * thus @field may become invalid, so we need to save it. | ||
849 | * sizeof(struct ftrace_graph_ent_entry) is very small, | ||
850 | * it can be safely saved at the stack. | ||
851 | */ | ||
852 | struct ftrace_graph_ent_entry *field, saved; | ||
847 | trace_assign_type(field, entry); | 853 | trace_assign_type(field, entry); |
848 | return print_graph_entry(field, s, iter); | 854 | saved = *field; |
855 | return print_graph_entry(&saved, s, iter); | ||
849 | } | 856 | } |
850 | case TRACE_GRAPH_RET: { | 857 | case TRACE_GRAPH_RET: { |
851 | struct ftrace_graph_ret_entry *field; | 858 | struct ftrace_graph_ret_entry *field; |
diff --git a/kernel/trace/trace_printk.c b/kernel/trace/trace_printk.c index 7b6278110827..687699d365ae 100644 --- a/kernel/trace/trace_printk.c +++ b/kernel/trace/trace_printk.c | |||
@@ -176,7 +176,7 @@ static int t_show(struct seq_file *m, void *v) | |||
176 | const char *str = *fmt; | 176 | const char *str = *fmt; |
177 | int i; | 177 | int i; |
178 | 178 | ||
179 | seq_printf(m, "0x%lx : \"", (unsigned long)fmt); | 179 | seq_printf(m, "0x%lx : \"", *(unsigned long *)fmt); |
180 | 180 | ||
181 | /* | 181 | /* |
182 | * Tabs and new lines need to be converted. | 182 | * Tabs and new lines need to be converted. |
diff --git a/kernel/wait.c b/kernel/wait.c index ea7c3b4275cf..c4bd3d825f35 100644 --- a/kernel/wait.c +++ b/kernel/wait.c | |||
@@ -10,13 +10,14 @@ | |||
10 | #include <linux/wait.h> | 10 | #include <linux/wait.h> |
11 | #include <linux/hash.h> | 11 | #include <linux/hash.h> |
12 | 12 | ||
13 | void init_waitqueue_head(wait_queue_head_t *q) | 13 | void __init_waitqueue_head(wait_queue_head_t *q, struct lock_class_key *key) |
14 | { | 14 | { |
15 | spin_lock_init(&q->lock); | 15 | spin_lock_init(&q->lock); |
16 | lockdep_set_class(&q->lock, key); | ||
16 | INIT_LIST_HEAD(&q->task_list); | 17 | INIT_LIST_HEAD(&q->task_list); |
17 | } | 18 | } |
18 | 19 | ||
19 | EXPORT_SYMBOL(init_waitqueue_head); | 20 | EXPORT_SYMBOL(__init_waitqueue_head); |
20 | 21 | ||
21 | void add_wait_queue(wait_queue_head_t *q, wait_queue_t *wait) | 22 | void add_wait_queue(wait_queue_head_t *q, wait_queue_t *wait) |
22 | { | 23 | { |
diff --git a/lib/decompress_bunzip2.c b/lib/decompress_bunzip2.c index 708e2a86d87b..600f473a5610 100644 --- a/lib/decompress_bunzip2.c +++ b/lib/decompress_bunzip2.c | |||
@@ -45,12 +45,14 @@ | |||
45 | */ | 45 | */ |
46 | 46 | ||
47 | 47 | ||
48 | #ifndef STATIC | 48 | #ifdef STATIC |
49 | #define PREBOOT | ||
50 | #else | ||
49 | #include <linux/decompress/bunzip2.h> | 51 | #include <linux/decompress/bunzip2.h> |
50 | #endif /* !STATIC */ | 52 | #include <linux/slab.h> |
53 | #endif /* STATIC */ | ||
51 | 54 | ||
52 | #include <linux/decompress/mm.h> | 55 | #include <linux/decompress/mm.h> |
53 | #include <linux/slab.h> | ||
54 | 56 | ||
55 | #ifndef INT_MAX | 57 | #ifndef INT_MAX |
56 | #define INT_MAX 0x7fffffff | 58 | #define INT_MAX 0x7fffffff |
@@ -681,9 +683,7 @@ STATIC int INIT bunzip2(unsigned char *buf, int len, | |||
681 | set_error_fn(error_fn); | 683 | set_error_fn(error_fn); |
682 | if (flush) | 684 | if (flush) |
683 | outbuf = malloc(BZIP2_IOBUF_SIZE); | 685 | outbuf = malloc(BZIP2_IOBUF_SIZE); |
684 | else | 686 | |
685 | len -= 4; /* Uncompressed size hack active in pre-boot | ||
686 | environment */ | ||
687 | if (!outbuf) { | 687 | if (!outbuf) { |
688 | error("Could not allocate output bufer"); | 688 | error("Could not allocate output bufer"); |
689 | return -1; | 689 | return -1; |
@@ -733,4 +733,14 @@ exit_0: | |||
733 | return i; | 733 | return i; |
734 | } | 734 | } |
735 | 735 | ||
736 | #define decompress bunzip2 | 736 | #ifdef PREBOOT |
737 | STATIC int INIT decompress(unsigned char *buf, int len, | ||
738 | int(*fill)(void*, unsigned int), | ||
739 | int(*flush)(void*, unsigned int), | ||
740 | unsigned char *outbuf, | ||
741 | int *pos, | ||
742 | void(*error_fn)(char *x)) | ||
743 | { | ||
744 | return bunzip2(buf, len - 4, fill, flush, outbuf, pos, error_fn); | ||
745 | } | ||
746 | #endif | ||
diff --git a/lib/decompress_inflate.c b/lib/decompress_inflate.c index e36b296fc9f8..68dfce59c1b8 100644 --- a/lib/decompress_inflate.c +++ b/lib/decompress_inflate.c | |||
@@ -19,13 +19,13 @@ | |||
19 | #include "zlib_inflate/inflate.h" | 19 | #include "zlib_inflate/inflate.h" |
20 | 20 | ||
21 | #include "zlib_inflate/infutil.h" | 21 | #include "zlib_inflate/infutil.h" |
22 | #include <linux/slab.h> | ||
22 | 23 | ||
23 | #endif /* STATIC */ | 24 | #endif /* STATIC */ |
24 | 25 | ||
25 | #include <linux/decompress/mm.h> | 26 | #include <linux/decompress/mm.h> |
26 | #include <linux/slab.h> | ||
27 | 27 | ||
28 | #define INBUF_LEN (16*1024) | 28 | #define GZIP_IOBUF_SIZE (16*1024) |
29 | 29 | ||
30 | /* Included from initramfs et al code */ | 30 | /* Included from initramfs et al code */ |
31 | STATIC int INIT gunzip(unsigned char *buf, int len, | 31 | STATIC int INIT gunzip(unsigned char *buf, int len, |
@@ -55,7 +55,7 @@ STATIC int INIT gunzip(unsigned char *buf, int len, | |||
55 | if (buf) | 55 | if (buf) |
56 | zbuf = buf; | 56 | zbuf = buf; |
57 | else { | 57 | else { |
58 | zbuf = malloc(INBUF_LEN); | 58 | zbuf = malloc(GZIP_IOBUF_SIZE); |
59 | len = 0; | 59 | len = 0; |
60 | } | 60 | } |
61 | if (!zbuf) { | 61 | if (!zbuf) { |
@@ -77,7 +77,7 @@ STATIC int INIT gunzip(unsigned char *buf, int len, | |||
77 | } | 77 | } |
78 | 78 | ||
79 | if (len == 0) | 79 | if (len == 0) |
80 | len = fill(zbuf, INBUF_LEN); | 80 | len = fill(zbuf, GZIP_IOBUF_SIZE); |
81 | 81 | ||
82 | /* verify the gzip header */ | 82 | /* verify the gzip header */ |
83 | if (len < 10 || | 83 | if (len < 10 || |
@@ -113,7 +113,7 @@ STATIC int INIT gunzip(unsigned char *buf, int len, | |||
113 | while (rc == Z_OK) { | 113 | while (rc == Z_OK) { |
114 | if (strm->avail_in == 0) { | 114 | if (strm->avail_in == 0) { |
115 | /* TODO: handle case where both pos and fill are set */ | 115 | /* TODO: handle case where both pos and fill are set */ |
116 | len = fill(zbuf, INBUF_LEN); | 116 | len = fill(zbuf, GZIP_IOBUF_SIZE); |
117 | if (len < 0) { | 117 | if (len < 0) { |
118 | rc = -1; | 118 | rc = -1; |
119 | error("read error"); | 119 | error("read error"); |
diff --git a/lib/decompress_unlzma.c b/lib/decompress_unlzma.c index 32123a1340e6..0b954e04bd30 100644 --- a/lib/decompress_unlzma.c +++ b/lib/decompress_unlzma.c | |||
@@ -29,12 +29,14 @@ | |||
29 | *Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA | 29 | *Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA |
30 | */ | 30 | */ |
31 | 31 | ||
32 | #ifndef STATIC | 32 | #ifdef STATIC |
33 | #define PREBOOT | ||
34 | #else | ||
33 | #include <linux/decompress/unlzma.h> | 35 | #include <linux/decompress/unlzma.h> |
36 | #include <linux/slab.h> | ||
34 | #endif /* STATIC */ | 37 | #endif /* STATIC */ |
35 | 38 | ||
36 | #include <linux/decompress/mm.h> | 39 | #include <linux/decompress/mm.h> |
37 | #include <linux/slab.h> | ||
38 | 40 | ||
39 | #define MIN(a, b) (((a) < (b)) ? (a) : (b)) | 41 | #define MIN(a, b) (((a) < (b)) ? (a) : (b)) |
40 | 42 | ||
@@ -543,9 +545,7 @@ STATIC inline int INIT unlzma(unsigned char *buf, int in_len, | |||
543 | int ret = -1; | 545 | int ret = -1; |
544 | 546 | ||
545 | set_error_fn(error_fn); | 547 | set_error_fn(error_fn); |
546 | if (!flush) | 548 | |
547 | in_len -= 4; /* Uncompressed size hack active in pre-boot | ||
548 | environment */ | ||
549 | if (buf) | 549 | if (buf) |
550 | inbuf = buf; | 550 | inbuf = buf; |
551 | else | 551 | else |
@@ -645,4 +645,15 @@ exit_0: | |||
645 | return ret; | 645 | return ret; |
646 | } | 646 | } |
647 | 647 | ||
648 | #define decompress unlzma | 648 | #ifdef PREBOOT |
649 | STATIC int INIT decompress(unsigned char *buf, int in_len, | ||
650 | int(*fill)(void*, unsigned int), | ||
651 | int(*flush)(void*, unsigned int), | ||
652 | unsigned char *output, | ||
653 | int *posp, | ||
654 | void(*error_fn)(char *x) | ||
655 | ) | ||
656 | { | ||
657 | return unlzma(buf, in_len - 4, fill, flush, output, posp, error_fn); | ||
658 | } | ||
659 | #endif | ||
diff --git a/lib/flex_array.c b/lib/flex_array.c index 0e7894ce8882..08f1636d296a 100644 --- a/lib/flex_array.c +++ b/lib/flex_array.c | |||
@@ -254,7 +254,6 @@ void *flex_array_get(struct flex_array *fa, int element_nr) | |||
254 | { | 254 | { |
255 | int part_nr = fa_element_to_part_nr(fa, element_nr); | 255 | int part_nr = fa_element_to_part_nr(fa, element_nr); |
256 | struct flex_array_part *part; | 256 | struct flex_array_part *part; |
257 | int index; | ||
258 | 257 | ||
259 | if (element_nr >= fa->total_nr_elements) | 258 | if (element_nr >= fa->total_nr_elements) |
260 | return NULL; | 259 | return NULL; |
@@ -264,6 +263,5 @@ void *flex_array_get(struct flex_array *fa, int element_nr) | |||
264 | part = (struct flex_array_part *)&fa->parts[0]; | 263 | part = (struct flex_array_part *)&fa->parts[0]; |
265 | else | 264 | else |
266 | part = fa->parts[part_nr]; | 265 | part = fa->parts[part_nr]; |
267 | index = index_inside_part(fa, element_nr); | ||
268 | return &part->elements[index_inside_part(fa, element_nr)]; | 266 | return &part->elements[index_inside_part(fa, element_nr)]; |
269 | } | 267 | } |
diff --git a/mm/mempolicy.c b/mm/mempolicy.c index e08e2c4da63a..7dd9d9f80694 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c | |||
@@ -191,25 +191,27 @@ static int mpol_new_bind(struct mempolicy *pol, const nodemask_t *nodes) | |||
191 | * Must be called holding task's alloc_lock to protect task's mems_allowed | 191 | * Must be called holding task's alloc_lock to protect task's mems_allowed |
192 | * and mempolicy. May also be called holding the mmap_semaphore for write. | 192 | * and mempolicy. May also be called holding the mmap_semaphore for write. |
193 | */ | 193 | */ |
194 | static int mpol_set_nodemask(struct mempolicy *pol, const nodemask_t *nodes) | 194 | static int mpol_set_nodemask(struct mempolicy *pol, |
195 | const nodemask_t *nodes, struct nodemask_scratch *nsc) | ||
195 | { | 196 | { |
196 | nodemask_t cpuset_context_nmask; | ||
197 | int ret; | 197 | int ret; |
198 | 198 | ||
199 | /* if mode is MPOL_DEFAULT, pol is NULL. This is right. */ | 199 | /* if mode is MPOL_DEFAULT, pol is NULL. This is right. */ |
200 | if (pol == NULL) | 200 | if (pol == NULL) |
201 | return 0; | 201 | return 0; |
202 | /* Check N_HIGH_MEMORY */ | ||
203 | nodes_and(nsc->mask1, | ||
204 | cpuset_current_mems_allowed, node_states[N_HIGH_MEMORY]); | ||
202 | 205 | ||
203 | VM_BUG_ON(!nodes); | 206 | VM_BUG_ON(!nodes); |
204 | if (pol->mode == MPOL_PREFERRED && nodes_empty(*nodes)) | 207 | if (pol->mode == MPOL_PREFERRED && nodes_empty(*nodes)) |
205 | nodes = NULL; /* explicit local allocation */ | 208 | nodes = NULL; /* explicit local allocation */ |
206 | else { | 209 | else { |
207 | if (pol->flags & MPOL_F_RELATIVE_NODES) | 210 | if (pol->flags & MPOL_F_RELATIVE_NODES) |
208 | mpol_relative_nodemask(&cpuset_context_nmask, nodes, | 211 | mpol_relative_nodemask(&nsc->mask2, nodes,&nsc->mask1); |
209 | &cpuset_current_mems_allowed); | ||
210 | else | 212 | else |
211 | nodes_and(cpuset_context_nmask, *nodes, | 213 | nodes_and(nsc->mask2, *nodes, nsc->mask1); |
212 | cpuset_current_mems_allowed); | 214 | |
213 | if (mpol_store_user_nodemask(pol)) | 215 | if (mpol_store_user_nodemask(pol)) |
214 | pol->w.user_nodemask = *nodes; | 216 | pol->w.user_nodemask = *nodes; |
215 | else | 217 | else |
@@ -217,8 +219,10 @@ static int mpol_set_nodemask(struct mempolicy *pol, const nodemask_t *nodes) | |||
217 | cpuset_current_mems_allowed; | 219 | cpuset_current_mems_allowed; |
218 | } | 220 | } |
219 | 221 | ||
220 | ret = mpol_ops[pol->mode].create(pol, | 222 | if (nodes) |
221 | nodes ? &cpuset_context_nmask : NULL); | 223 | ret = mpol_ops[pol->mode].create(pol, &nsc->mask2); |
224 | else | ||
225 | ret = mpol_ops[pol->mode].create(pol, NULL); | ||
222 | return ret; | 226 | return ret; |
223 | } | 227 | } |
224 | 228 | ||
@@ -620,12 +624,17 @@ static long do_set_mempolicy(unsigned short mode, unsigned short flags, | |||
620 | { | 624 | { |
621 | struct mempolicy *new, *old; | 625 | struct mempolicy *new, *old; |
622 | struct mm_struct *mm = current->mm; | 626 | struct mm_struct *mm = current->mm; |
627 | NODEMASK_SCRATCH(scratch); | ||
623 | int ret; | 628 | int ret; |
624 | 629 | ||
625 | new = mpol_new(mode, flags, nodes); | 630 | if (!scratch) |
626 | if (IS_ERR(new)) | 631 | return -ENOMEM; |
627 | return PTR_ERR(new); | ||
628 | 632 | ||
633 | new = mpol_new(mode, flags, nodes); | ||
634 | if (IS_ERR(new)) { | ||
635 | ret = PTR_ERR(new); | ||
636 | goto out; | ||
637 | } | ||
629 | /* | 638 | /* |
630 | * prevent changing our mempolicy while show_numa_maps() | 639 | * prevent changing our mempolicy while show_numa_maps() |
631 | * is using it. | 640 | * is using it. |
@@ -635,13 +644,13 @@ static long do_set_mempolicy(unsigned short mode, unsigned short flags, | |||
635 | if (mm) | 644 | if (mm) |
636 | down_write(&mm->mmap_sem); | 645 | down_write(&mm->mmap_sem); |
637 | task_lock(current); | 646 | task_lock(current); |
638 | ret = mpol_set_nodemask(new, nodes); | 647 | ret = mpol_set_nodemask(new, nodes, scratch); |
639 | if (ret) { | 648 | if (ret) { |
640 | task_unlock(current); | 649 | task_unlock(current); |
641 | if (mm) | 650 | if (mm) |
642 | up_write(&mm->mmap_sem); | 651 | up_write(&mm->mmap_sem); |
643 | mpol_put(new); | 652 | mpol_put(new); |
644 | return ret; | 653 | goto out; |
645 | } | 654 | } |
646 | old = current->mempolicy; | 655 | old = current->mempolicy; |
647 | current->mempolicy = new; | 656 | current->mempolicy = new; |
@@ -654,7 +663,10 @@ static long do_set_mempolicy(unsigned short mode, unsigned short flags, | |||
654 | up_write(&mm->mmap_sem); | 663 | up_write(&mm->mmap_sem); |
655 | 664 | ||
656 | mpol_put(old); | 665 | mpol_put(old); |
657 | return 0; | 666 | ret = 0; |
667 | out: | ||
668 | NODEMASK_SCRATCH_FREE(scratch); | ||
669 | return ret; | ||
658 | } | 670 | } |
659 | 671 | ||
660 | /* | 672 | /* |
@@ -1014,12 +1026,20 @@ static long do_mbind(unsigned long start, unsigned long len, | |||
1014 | if (err) | 1026 | if (err) |
1015 | return err; | 1027 | return err; |
1016 | } | 1028 | } |
1017 | down_write(&mm->mmap_sem); | 1029 | { |
1018 | task_lock(current); | 1030 | NODEMASK_SCRATCH(scratch); |
1019 | err = mpol_set_nodemask(new, nmask); | 1031 | if (scratch) { |
1020 | task_unlock(current); | 1032 | down_write(&mm->mmap_sem); |
1033 | task_lock(current); | ||
1034 | err = mpol_set_nodemask(new, nmask, scratch); | ||
1035 | task_unlock(current); | ||
1036 | if (err) | ||
1037 | up_write(&mm->mmap_sem); | ||
1038 | } else | ||
1039 | err = -ENOMEM; | ||
1040 | NODEMASK_SCRATCH_FREE(scratch); | ||
1041 | } | ||
1021 | if (err) { | 1042 | if (err) { |
1022 | up_write(&mm->mmap_sem); | ||
1023 | mpol_put(new); | 1043 | mpol_put(new); |
1024 | return err; | 1044 | return err; |
1025 | } | 1045 | } |
@@ -1891,6 +1911,7 @@ restart: | |||
1891 | * Install non-NULL @mpol in inode's shared policy rb-tree. | 1911 | * Install non-NULL @mpol in inode's shared policy rb-tree. |
1892 | * On entry, the current task has a reference on a non-NULL @mpol. | 1912 | * On entry, the current task has a reference on a non-NULL @mpol. |
1893 | * This must be released on exit. | 1913 | * This must be released on exit. |
1914 | * This is called at get_inode() calls and we can use GFP_KERNEL. | ||
1894 | */ | 1915 | */ |
1895 | void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol) | 1916 | void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol) |
1896 | { | 1917 | { |
@@ -1902,19 +1923,24 @@ void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol) | |||
1902 | if (mpol) { | 1923 | if (mpol) { |
1903 | struct vm_area_struct pvma; | 1924 | struct vm_area_struct pvma; |
1904 | struct mempolicy *new; | 1925 | struct mempolicy *new; |
1926 | NODEMASK_SCRATCH(scratch); | ||
1905 | 1927 | ||
1928 | if (!scratch) | ||
1929 | return; | ||
1906 | /* contextualize the tmpfs mount point mempolicy */ | 1930 | /* contextualize the tmpfs mount point mempolicy */ |
1907 | new = mpol_new(mpol->mode, mpol->flags, &mpol->w.user_nodemask); | 1931 | new = mpol_new(mpol->mode, mpol->flags, &mpol->w.user_nodemask); |
1908 | if (IS_ERR(new)) { | 1932 | if (IS_ERR(new)) { |
1909 | mpol_put(mpol); /* drop our ref on sb mpol */ | 1933 | mpol_put(mpol); /* drop our ref on sb mpol */ |
1934 | NODEMASK_SCRATCH_FREE(scratch); | ||
1910 | return; /* no valid nodemask intersection */ | 1935 | return; /* no valid nodemask intersection */ |
1911 | } | 1936 | } |
1912 | 1937 | ||
1913 | task_lock(current); | 1938 | task_lock(current); |
1914 | ret = mpol_set_nodemask(new, &mpol->w.user_nodemask); | 1939 | ret = mpol_set_nodemask(new, &mpol->w.user_nodemask, scratch); |
1915 | task_unlock(current); | 1940 | task_unlock(current); |
1916 | mpol_put(mpol); /* drop our ref on sb mpol */ | 1941 | mpol_put(mpol); /* drop our ref on sb mpol */ |
1917 | if (ret) { | 1942 | if (ret) { |
1943 | NODEMASK_SCRATCH_FREE(scratch); | ||
1918 | mpol_put(new); | 1944 | mpol_put(new); |
1919 | return; | 1945 | return; |
1920 | } | 1946 | } |
@@ -1924,6 +1950,7 @@ void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol) | |||
1924 | pvma.vm_end = TASK_SIZE; /* policy covers entire file */ | 1950 | pvma.vm_end = TASK_SIZE; /* policy covers entire file */ |
1925 | mpol_set_shared_policy(sp, &pvma, new); /* adds ref */ | 1951 | mpol_set_shared_policy(sp, &pvma, new); /* adds ref */ |
1926 | mpol_put(new); /* drop initial ref */ | 1952 | mpol_put(new); /* drop initial ref */ |
1953 | NODEMASK_SCRATCH_FREE(scratch); | ||
1927 | } | 1954 | } |
1928 | } | 1955 | } |
1929 | 1956 | ||
@@ -2140,13 +2167,18 @@ int mpol_parse_str(char *str, struct mempolicy **mpol, int no_context) | |||
2140 | err = 1; | 2167 | err = 1; |
2141 | else { | 2168 | else { |
2142 | int ret; | 2169 | int ret; |
2143 | 2170 | NODEMASK_SCRATCH(scratch); | |
2144 | task_lock(current); | 2171 | if (scratch) { |
2145 | ret = mpol_set_nodemask(new, &nodes); | 2172 | task_lock(current); |
2146 | task_unlock(current); | 2173 | ret = mpol_set_nodemask(new, &nodes, scratch); |
2147 | if (ret) | 2174 | task_unlock(current); |
2175 | } else | ||
2176 | ret = -ENOMEM; | ||
2177 | NODEMASK_SCRATCH_FREE(scratch); | ||
2178 | if (ret) { | ||
2148 | err = 1; | 2179 | err = 1; |
2149 | else if (no_context) { | 2180 | mpol_put(new); |
2181 | } else if (no_context) { | ||
2150 | /* save for contextualization */ | 2182 | /* save for contextualization */ |
2151 | new->w.user_nodemask = nodes; | 2183 | new->w.user_nodemask = nodes; |
2152 | } | 2184 | } |
diff --git a/mm/mempool.c b/mm/mempool.c index a46eb1b4bb66..32e75d400503 100644 --- a/mm/mempool.c +++ b/mm/mempool.c | |||
@@ -303,14 +303,14 @@ EXPORT_SYMBOL(mempool_free_slab); | |||
303 | */ | 303 | */ |
304 | void *mempool_kmalloc(gfp_t gfp_mask, void *pool_data) | 304 | void *mempool_kmalloc(gfp_t gfp_mask, void *pool_data) |
305 | { | 305 | { |
306 | size_t size = (size_t)(long)pool_data; | 306 | size_t size = (size_t)pool_data; |
307 | return kmalloc(size, gfp_mask); | 307 | return kmalloc(size, gfp_mask); |
308 | } | 308 | } |
309 | EXPORT_SYMBOL(mempool_kmalloc); | 309 | EXPORT_SYMBOL(mempool_kmalloc); |
310 | 310 | ||
311 | void *mempool_kzalloc(gfp_t gfp_mask, void *pool_data) | 311 | void *mempool_kzalloc(gfp_t gfp_mask, void *pool_data) |
312 | { | 312 | { |
313 | size_t size = (size_t) pool_data; | 313 | size_t size = (size_t)pool_data; |
314 | return kzalloc(size, gfp_mask); | 314 | return kzalloc(size, gfp_mask); |
315 | } | 315 | } |
316 | EXPORT_SYMBOL(mempool_kzalloc); | 316 | EXPORT_SYMBOL(mempool_kzalloc); |
diff --git a/net/bluetooth/rfcomm/core.c b/net/bluetooth/rfcomm/core.c index e50566ebf9f9..94b3388c188b 100644 --- a/net/bluetooth/rfcomm/core.c +++ b/net/bluetooth/rfcomm/core.c | |||
@@ -2080,28 +2080,41 @@ static CLASS_ATTR(rfcomm_dlc, S_IRUGO, rfcomm_dlc_sysfs_show, NULL); | |||
2080 | /* ---- Initialization ---- */ | 2080 | /* ---- Initialization ---- */ |
2081 | static int __init rfcomm_init(void) | 2081 | static int __init rfcomm_init(void) |
2082 | { | 2082 | { |
2083 | int ret; | ||
2084 | |||
2083 | l2cap_load(); | 2085 | l2cap_load(); |
2084 | 2086 | ||
2085 | hci_register_cb(&rfcomm_cb); | 2087 | hci_register_cb(&rfcomm_cb); |
2086 | 2088 | ||
2087 | rfcomm_thread = kthread_run(rfcomm_run, NULL, "krfcommd"); | 2089 | rfcomm_thread = kthread_run(rfcomm_run, NULL, "krfcommd"); |
2088 | if (IS_ERR(rfcomm_thread)) { | 2090 | if (IS_ERR(rfcomm_thread)) { |
2089 | hci_unregister_cb(&rfcomm_cb); | 2091 | ret = PTR_ERR(rfcomm_thread); |
2090 | return PTR_ERR(rfcomm_thread); | 2092 | goto out_thread; |
2091 | } | 2093 | } |
2092 | 2094 | ||
2093 | if (class_create_file(bt_class, &class_attr_rfcomm_dlc) < 0) | 2095 | if (class_create_file(bt_class, &class_attr_rfcomm_dlc) < 0) |
2094 | BT_ERR("Failed to create RFCOMM info file"); | 2096 | BT_ERR("Failed to create RFCOMM info file"); |
2095 | 2097 | ||
2096 | rfcomm_init_sockets(); | 2098 | ret = rfcomm_init_ttys(); |
2099 | if (ret) | ||
2100 | goto out_tty; | ||
2097 | 2101 | ||
2098 | #ifdef CONFIG_BT_RFCOMM_TTY | 2102 | ret = rfcomm_init_sockets(); |
2099 | rfcomm_init_ttys(); | 2103 | if (ret) |
2100 | #endif | 2104 | goto out_sock; |
2101 | 2105 | ||
2102 | BT_INFO("RFCOMM ver %s", VERSION); | 2106 | BT_INFO("RFCOMM ver %s", VERSION); |
2103 | 2107 | ||
2104 | return 0; | 2108 | return 0; |
2109 | |||
2110 | out_sock: | ||
2111 | rfcomm_cleanup_ttys(); | ||
2112 | out_tty: | ||
2113 | kthread_stop(rfcomm_thread); | ||
2114 | out_thread: | ||
2115 | hci_unregister_cb(&rfcomm_cb); | ||
2116 | |||
2117 | return ret; | ||
2105 | } | 2118 | } |
2106 | 2119 | ||
2107 | static void __exit rfcomm_exit(void) | 2120 | static void __exit rfcomm_exit(void) |
@@ -2112,9 +2125,7 @@ static void __exit rfcomm_exit(void) | |||
2112 | 2125 | ||
2113 | kthread_stop(rfcomm_thread); | 2126 | kthread_stop(rfcomm_thread); |
2114 | 2127 | ||
2115 | #ifdef CONFIG_BT_RFCOMM_TTY | ||
2116 | rfcomm_cleanup_ttys(); | 2128 | rfcomm_cleanup_ttys(); |
2117 | #endif | ||
2118 | 2129 | ||
2119 | rfcomm_cleanup_sockets(); | 2130 | rfcomm_cleanup_sockets(); |
2120 | } | 2131 | } |
diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c index 7f482784e9f7..0b85e8116859 100644 --- a/net/bluetooth/rfcomm/sock.c +++ b/net/bluetooth/rfcomm/sock.c | |||
@@ -1132,7 +1132,7 @@ error: | |||
1132 | return err; | 1132 | return err; |
1133 | } | 1133 | } |
1134 | 1134 | ||
1135 | void __exit rfcomm_cleanup_sockets(void) | 1135 | void rfcomm_cleanup_sockets(void) |
1136 | { | 1136 | { |
1137 | class_remove_file(bt_class, &class_attr_rfcomm); | 1137 | class_remove_file(bt_class, &class_attr_rfcomm); |
1138 | 1138 | ||
diff --git a/net/core/dev.c b/net/core/dev.c index 70c27e0c7c32..6a94475aee85 100644 --- a/net/core/dev.c +++ b/net/core/dev.c | |||
@@ -3865,10 +3865,12 @@ int dev_unicast_delete(struct net_device *dev, void *addr) | |||
3865 | 3865 | ||
3866 | ASSERT_RTNL(); | 3866 | ASSERT_RTNL(); |
3867 | 3867 | ||
3868 | netif_addr_lock_bh(dev); | ||
3868 | err = __hw_addr_del(&dev->uc, addr, dev->addr_len, | 3869 | err = __hw_addr_del(&dev->uc, addr, dev->addr_len, |
3869 | NETDEV_HW_ADDR_T_UNICAST); | 3870 | NETDEV_HW_ADDR_T_UNICAST); |
3870 | if (!err) | 3871 | if (!err) |
3871 | __dev_set_rx_mode(dev); | 3872 | __dev_set_rx_mode(dev); |
3873 | netif_addr_unlock_bh(dev); | ||
3872 | return err; | 3874 | return err; |
3873 | } | 3875 | } |
3874 | EXPORT_SYMBOL(dev_unicast_delete); | 3876 | EXPORT_SYMBOL(dev_unicast_delete); |
@@ -3889,10 +3891,12 @@ int dev_unicast_add(struct net_device *dev, void *addr) | |||
3889 | 3891 | ||
3890 | ASSERT_RTNL(); | 3892 | ASSERT_RTNL(); |
3891 | 3893 | ||
3894 | netif_addr_lock_bh(dev); | ||
3892 | err = __hw_addr_add(&dev->uc, addr, dev->addr_len, | 3895 | err = __hw_addr_add(&dev->uc, addr, dev->addr_len, |
3893 | NETDEV_HW_ADDR_T_UNICAST); | 3896 | NETDEV_HW_ADDR_T_UNICAST); |
3894 | if (!err) | 3897 | if (!err) |
3895 | __dev_set_rx_mode(dev); | 3898 | __dev_set_rx_mode(dev); |
3899 | netif_addr_unlock_bh(dev); | ||
3896 | return err; | 3900 | return err; |
3897 | } | 3901 | } |
3898 | EXPORT_SYMBOL(dev_unicast_add); | 3902 | EXPORT_SYMBOL(dev_unicast_add); |
@@ -3949,7 +3953,8 @@ void __dev_addr_unsync(struct dev_addr_list **to, int *to_count, | |||
3949 | * @from: source device | 3953 | * @from: source device |
3950 | * | 3954 | * |
3951 | * Add newly added addresses to the destination device and release | 3955 | * Add newly added addresses to the destination device and release |
3952 | * addresses that have no users left. | 3956 | * addresses that have no users left. The source device must be |
3957 | * locked by netif_tx_lock_bh. | ||
3953 | * | 3958 | * |
3954 | * This function is intended to be called from the dev->set_rx_mode | 3959 | * This function is intended to be called from the dev->set_rx_mode |
3955 | * function of layered software devices. | 3960 | * function of layered software devices. |
@@ -3958,14 +3963,14 @@ int dev_unicast_sync(struct net_device *to, struct net_device *from) | |||
3958 | { | 3963 | { |
3959 | int err = 0; | 3964 | int err = 0; |
3960 | 3965 | ||
3961 | ASSERT_RTNL(); | ||
3962 | |||
3963 | if (to->addr_len != from->addr_len) | 3966 | if (to->addr_len != from->addr_len) |
3964 | return -EINVAL; | 3967 | return -EINVAL; |
3965 | 3968 | ||
3969 | netif_addr_lock_bh(to); | ||
3966 | err = __hw_addr_sync(&to->uc, &from->uc, to->addr_len); | 3970 | err = __hw_addr_sync(&to->uc, &from->uc, to->addr_len); |
3967 | if (!err) | 3971 | if (!err) |
3968 | __dev_set_rx_mode(to); | 3972 | __dev_set_rx_mode(to); |
3973 | netif_addr_unlock_bh(to); | ||
3969 | return err; | 3974 | return err; |
3970 | } | 3975 | } |
3971 | EXPORT_SYMBOL(dev_unicast_sync); | 3976 | EXPORT_SYMBOL(dev_unicast_sync); |
@@ -3981,27 +3986,27 @@ EXPORT_SYMBOL(dev_unicast_sync); | |||
3981 | */ | 3986 | */ |
3982 | void dev_unicast_unsync(struct net_device *to, struct net_device *from) | 3987 | void dev_unicast_unsync(struct net_device *to, struct net_device *from) |
3983 | { | 3988 | { |
3984 | ASSERT_RTNL(); | ||
3985 | |||
3986 | if (to->addr_len != from->addr_len) | 3989 | if (to->addr_len != from->addr_len) |
3987 | return; | 3990 | return; |
3988 | 3991 | ||
3992 | netif_addr_lock_bh(from); | ||
3993 | netif_addr_lock(to); | ||
3989 | __hw_addr_unsync(&to->uc, &from->uc, to->addr_len); | 3994 | __hw_addr_unsync(&to->uc, &from->uc, to->addr_len); |
3990 | __dev_set_rx_mode(to); | 3995 | __dev_set_rx_mode(to); |
3996 | netif_addr_unlock(to); | ||
3997 | netif_addr_unlock_bh(from); | ||
3991 | } | 3998 | } |
3992 | EXPORT_SYMBOL(dev_unicast_unsync); | 3999 | EXPORT_SYMBOL(dev_unicast_unsync); |
3993 | 4000 | ||
3994 | static void dev_unicast_flush(struct net_device *dev) | 4001 | static void dev_unicast_flush(struct net_device *dev) |
3995 | { | 4002 | { |
3996 | /* rtnl_mutex must be held here */ | 4003 | netif_addr_lock_bh(dev); |
3997 | |||
3998 | __hw_addr_flush(&dev->uc); | 4004 | __hw_addr_flush(&dev->uc); |
4005 | netif_addr_unlock_bh(dev); | ||
3999 | } | 4006 | } |
4000 | 4007 | ||
4001 | static void dev_unicast_init(struct net_device *dev) | 4008 | static void dev_unicast_init(struct net_device *dev) |
4002 | { | 4009 | { |
4003 | /* rtnl_mutex must be held here */ | ||
4004 | |||
4005 | __hw_addr_init(&dev->uc); | 4010 | __hw_addr_init(&dev->uc); |
4006 | } | 4011 | } |
4007 | 4012 | ||
diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c index b7292a2719dc..197283072cc8 100644 --- a/net/core/net_namespace.c +++ b/net/core/net_namespace.c | |||
@@ -488,7 +488,7 @@ int net_assign_generic(struct net *net, int id, void *data) | |||
488 | */ | 488 | */ |
489 | 489 | ||
490 | ng->len = id; | 490 | ng->len = id; |
491 | memcpy(&ng->ptr, &old_ng->ptr, old_ng->len); | 491 | memcpy(&ng->ptr, &old_ng->ptr, old_ng->len * sizeof(void*)); |
492 | 492 | ||
493 | rcu_assign_pointer(net->gen, ng); | 493 | rcu_assign_pointer(net->gen, ng); |
494 | call_rcu(&old_ng->rcu, net_generic_release); | 494 | call_rcu(&old_ng->rcu, net_generic_release); |
diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c index c29d75d8f1b1..090e9991ac2a 100644 --- a/net/ipv4/arp.c +++ b/net/ipv4/arp.c | |||
@@ -1304,7 +1304,9 @@ static void arp_format_neigh_entry(struct seq_file *seq, | |||
1304 | hbuffer[k++] = hex_asc_lo(n->ha[j]); | 1304 | hbuffer[k++] = hex_asc_lo(n->ha[j]); |
1305 | hbuffer[k++] = ':'; | 1305 | hbuffer[k++] = ':'; |
1306 | } | 1306 | } |
1307 | hbuffer[--k] = 0; | 1307 | if (k != 0) |
1308 | --k; | ||
1309 | hbuffer[k] = 0; | ||
1308 | #if defined(CONFIG_AX25) || defined(CONFIG_AX25_MODULE) | 1310 | #if defined(CONFIG_AX25) || defined(CONFIG_AX25_MODULE) |
1309 | } | 1311 | } |
1310 | #endif | 1312 | #endif |
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index aca22b00b6a3..07e7e41816be 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c | |||
@@ -721,7 +721,7 @@ void ieee80211_dynamic_ps_timer(unsigned long data) | |||
721 | { | 721 | { |
722 | struct ieee80211_local *local = (void *) data; | 722 | struct ieee80211_local *local = (void *) data; |
723 | 723 | ||
724 | if (local->quiescing) | 724 | if (local->quiescing || local->suspended) |
725 | return; | 725 | return; |
726 | 726 | ||
727 | queue_work(local->hw.workqueue, &local->dynamic_ps_enable_work); | 727 | queue_work(local->hw.workqueue, &local->dynamic_ps_enable_work); |
diff --git a/net/mac80211/pm.c b/net/mac80211/pm.c index 7a549f9deb96..5e3d476972f9 100644 --- a/net/mac80211/pm.c +++ b/net/mac80211/pm.c | |||
@@ -55,15 +55,6 @@ int __ieee80211_suspend(struct ieee80211_hw *hw) | |||
55 | 55 | ||
56 | rcu_read_unlock(); | 56 | rcu_read_unlock(); |
57 | 57 | ||
58 | /* flush again, in case driver queued work */ | ||
59 | flush_workqueue(local->hw.workqueue); | ||
60 | |||
61 | /* stop hardware - this must stop RX */ | ||
62 | if (local->open_count) { | ||
63 | ieee80211_led_radio(local, false); | ||
64 | drv_stop(local); | ||
65 | } | ||
66 | |||
67 | /* remove STAs */ | 58 | /* remove STAs */ |
68 | spin_lock_irqsave(&local->sta_lock, flags); | 59 | spin_lock_irqsave(&local->sta_lock, flags); |
69 | list_for_each_entry(sta, &local->sta_list, list) { | 60 | list_for_each_entry(sta, &local->sta_list, list) { |
@@ -111,7 +102,22 @@ int __ieee80211_suspend(struct ieee80211_hw *hw) | |||
111 | drv_remove_interface(local, &conf); | 102 | drv_remove_interface(local, &conf); |
112 | } | 103 | } |
113 | 104 | ||
105 | /* stop hardware - this must stop RX */ | ||
106 | if (local->open_count) { | ||
107 | ieee80211_led_radio(local, false); | ||
108 | drv_stop(local); | ||
109 | } | ||
110 | |||
111 | /* | ||
112 | * flush again, in case driver queued work -- it | ||
113 | * shouldn't be doing (or cancel everything in the | ||
114 | * stop callback) that but better safe than sorry. | ||
115 | */ | ||
116 | flush_workqueue(local->hw.workqueue); | ||
117 | |||
114 | local->suspended = true; | 118 | local->suspended = true; |
119 | /* need suspended to be visible before quiescing is false */ | ||
120 | barrier(); | ||
115 | local->quiescing = false; | 121 | local->quiescing = false; |
116 | 122 | ||
117 | return 0; | 123 | return 0; |
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c index de5bba7f910a..0936fc24942d 100644 --- a/net/mac80211/rx.c +++ b/net/mac80211/rx.c | |||
@@ -2453,6 +2453,18 @@ void __ieee80211_rx(struct ieee80211_hw *hw, struct sk_buff *skb, | |||
2453 | return; | 2453 | return; |
2454 | } | 2454 | } |
2455 | 2455 | ||
2456 | /* | ||
2457 | * If we're suspending, it is possible although not too likely | ||
2458 | * that we'd be receiving frames after having already partially | ||
2459 | * quiesced the stack. We can't process such frames then since | ||
2460 | * that might, for example, cause stations to be added or other | ||
2461 | * driver callbacks be invoked. | ||
2462 | */ | ||
2463 | if (unlikely(local->quiescing || local->suspended)) { | ||
2464 | kfree_skb(skb); | ||
2465 | return; | ||
2466 | } | ||
2467 | |||
2456 | if (status->flag & RX_FLAG_HT) { | 2468 | if (status->flag & RX_FLAG_HT) { |
2457 | /* rate_idx is MCS index */ | 2469 | /* rate_idx is MCS index */ |
2458 | if (WARN_ON(status->rate_idx < 0 || | 2470 | if (WARN_ON(status->rate_idx < 0 || |
diff --git a/net/netlabel/netlabel_kapi.c b/net/netlabel/netlabel_kapi.c index b0e582f2d37a..16e6c4378ff1 100644 --- a/net/netlabel/netlabel_kapi.c +++ b/net/netlabel/netlabel_kapi.c | |||
@@ -151,7 +151,7 @@ int netlbl_cfg_unlbl_map_add(const char *domain, | |||
151 | addr6 = addr; | 151 | addr6 = addr; |
152 | mask6 = mask; | 152 | mask6 = mask; |
153 | map6 = kzalloc(sizeof(*map6), GFP_ATOMIC); | 153 | map6 = kzalloc(sizeof(*map6), GFP_ATOMIC); |
154 | if (map4 == NULL) | 154 | if (map6 == NULL) |
155 | goto cfg_unlbl_map_add_failure; | 155 | goto cfg_unlbl_map_add_failure; |
156 | map6->type = NETLBL_NLTYPE_UNLABELED; | 156 | map6->type = NETLBL_NLTYPE_UNLABELED; |
157 | ipv6_addr_copy(&map6->list.addr, addr6); | 157 | ipv6_addr_copy(&map6->list.addr, addr6); |
diff --git a/net/socket.c b/net/socket.c index 791d71a36a93..6d4716559047 100644 --- a/net/socket.c +++ b/net/socket.c | |||
@@ -736,7 +736,7 @@ static ssize_t sock_sendpage(struct file *file, struct page *page, | |||
736 | if (more) | 736 | if (more) |
737 | flags |= MSG_MORE; | 737 | flags |= MSG_MORE; |
738 | 738 | ||
739 | return sock->ops->sendpage(sock, page, offset, size, flags); | 739 | return kernel_sendpage(sock, page, offset, size, flags); |
740 | } | 740 | } |
741 | 741 | ||
742 | static ssize_t sock_splice_read(struct file *file, loff_t *ppos, | 742 | static ssize_t sock_splice_read(struct file *file, loff_t *ppos, |
diff --git a/net/wireless/reg.c b/net/wireless/reg.c index 5e14371cda70..75a406d33619 100644 --- a/net/wireless/reg.c +++ b/net/wireless/reg.c | |||
@@ -1089,17 +1089,18 @@ static void handle_reg_beacon(struct wiphy *wiphy, | |||
1089 | 1089 | ||
1090 | chan->beacon_found = true; | 1090 | chan->beacon_found = true; |
1091 | 1091 | ||
1092 | if (wiphy->disable_beacon_hints) | ||
1093 | return; | ||
1094 | |||
1092 | chan_before.center_freq = chan->center_freq; | 1095 | chan_before.center_freq = chan->center_freq; |
1093 | chan_before.flags = chan->flags; | 1096 | chan_before.flags = chan->flags; |
1094 | 1097 | ||
1095 | if ((chan->flags & IEEE80211_CHAN_PASSIVE_SCAN) && | 1098 | if (chan->flags & IEEE80211_CHAN_PASSIVE_SCAN) { |
1096 | !(chan->orig_flags & IEEE80211_CHAN_PASSIVE_SCAN)) { | ||
1097 | chan->flags &= ~IEEE80211_CHAN_PASSIVE_SCAN; | 1099 | chan->flags &= ~IEEE80211_CHAN_PASSIVE_SCAN; |
1098 | channel_changed = true; | 1100 | channel_changed = true; |
1099 | } | 1101 | } |
1100 | 1102 | ||
1101 | if ((chan->flags & IEEE80211_CHAN_NO_IBSS) && | 1103 | if (chan->flags & IEEE80211_CHAN_NO_IBSS) { |
1102 | !(chan->orig_flags & IEEE80211_CHAN_NO_IBSS)) { | ||
1103 | chan->flags &= ~IEEE80211_CHAN_NO_IBSS; | 1104 | chan->flags &= ~IEEE80211_CHAN_NO_IBSS; |
1104 | channel_changed = true; | 1105 | channel_changed = true; |
1105 | } | 1106 | } |
diff --git a/net/wireless/reg.h b/net/wireless/reg.h index e37829a49dc4..4e167a8e11be 100644 --- a/net/wireless/reg.h +++ b/net/wireless/reg.h | |||
@@ -30,7 +30,8 @@ int set_regdom(const struct ieee80211_regdomain *rd); | |||
30 | * non-radar 5 GHz channels. | 30 | * non-radar 5 GHz channels. |
31 | * | 31 | * |
32 | * Drivers do not need to call this, cfg80211 will do it for after a scan | 32 | * Drivers do not need to call this, cfg80211 will do it for after a scan |
33 | * on a newly found BSS. | 33 | * on a newly found BSS. If you cannot make use of this feature you can |
34 | * set the wiphy->disable_beacon_hints to true. | ||
34 | */ | 35 | */ |
35 | int regulatory_hint_found_beacon(struct wiphy *wiphy, | 36 | int regulatory_hint_found_beacon(struct wiphy *wiphy, |
36 | struct ieee80211_channel *beacon_chan, | 37 | struct ieee80211_channel *beacon_chan, |
diff --git a/net/wireless/scan.c b/net/wireless/scan.c index 9271118e1fc4..7e595ce24eeb 100644 --- a/net/wireless/scan.c +++ b/net/wireless/scan.c | |||
@@ -118,7 +118,7 @@ static int cmp_ies(u8 num, u8 *ies1, size_t len1, u8 *ies2, size_t len2) | |||
118 | 118 | ||
119 | if (!ie1 && !ie2) | 119 | if (!ie1 && !ie2) |
120 | return 0; | 120 | return 0; |
121 | if (!ie1) | 121 | if (!ie1 || !ie2) |
122 | return -1; | 122 | return -1; |
123 | 123 | ||
124 | r = memcmp(ie1 + 2, ie2 + 2, min(ie1[1], ie2[1])); | 124 | r = memcmp(ie1 + 2, ie2 + 2, min(ie1[1], ie2[1])); |
@@ -171,6 +171,8 @@ static bool is_mesh(struct cfg80211_bss *a, | |||
171 | ie = find_ie(WLAN_EID_MESH_CONFIG, | 171 | ie = find_ie(WLAN_EID_MESH_CONFIG, |
172 | a->information_elements, | 172 | a->information_elements, |
173 | a->len_information_elements); | 173 | a->len_information_elements); |
174 | if (!ie) | ||
175 | return false; | ||
174 | if (ie[1] != IEEE80211_MESH_CONFIG_LEN) | 176 | if (ie[1] != IEEE80211_MESH_CONFIG_LEN) |
175 | return false; | 177 | return false; |
176 | 178 | ||
diff --git a/scripts/recordmcount.pl b/scripts/recordmcount.pl index 7109e2b5bc0a..911ba7ffab84 100755 --- a/scripts/recordmcount.pl +++ b/scripts/recordmcount.pl | |||
@@ -393,7 +393,7 @@ while (<IN>) { | |||
393 | $read_function = 0; | 393 | $read_function = 0; |
394 | } | 394 | } |
395 | # print out any recorded offsets | 395 | # print out any recorded offsets |
396 | update_funcs() if ($text_found); | 396 | update_funcs() if (defined($ref_func)); |
397 | 397 | ||
398 | # reset all markers and arrays | 398 | # reset all markers and arrays |
399 | $text_found = 0; | 399 | $text_found = 0; |
@@ -403,7 +403,6 @@ while (<IN>) { | |||
403 | # section found, now is this a start of a function? | 403 | # section found, now is this a start of a function? |
404 | } elsif ($read_function && /$function_regex/) { | 404 | } elsif ($read_function && /$function_regex/) { |
405 | $text_found = 1; | 405 | $text_found = 1; |
406 | $offset = hex $1; | ||
407 | $text = $2; | 406 | $text = $2; |
408 | 407 | ||
409 | # if this is either a local function or a weak function | 408 | # if this is either a local function or a weak function |
@@ -412,10 +411,15 @@ while (<IN>) { | |||
412 | if (!defined($locals{$text}) && !defined($weak{$text})) { | 411 | if (!defined($locals{$text}) && !defined($weak{$text})) { |
413 | $ref_func = $text; | 412 | $ref_func = $text; |
414 | $read_function = 0; | 413 | $read_function = 0; |
414 | $offset = hex $1; | ||
415 | } else { | 415 | } else { |
416 | # if we already have a function, and this is weak, skip it | 416 | # if we already have a function, and this is weak, skip it |
417 | if (!defined($ref_func) || !defined($weak{$text})) { | 417 | if (!defined($ref_func) && !defined($weak{$text}) && |
418 | # PPC64 can have symbols that start with .L and | ||
419 | # gcc considers these special. Don't use them! | ||
420 | $text !~ /^\.L/) { | ||
418 | $ref_func = $text; | 421 | $ref_func = $text; |
422 | $offset = hex $1; | ||
419 | } | 423 | } |
420 | } | 424 | } |
421 | } elsif ($read_headers && /$mcount_section/) { | 425 | } elsif ($read_headers && /$mcount_section/) { |
@@ -440,7 +444,7 @@ while (<IN>) { | |||
440 | } | 444 | } |
441 | 445 | ||
442 | # dump out anymore offsets that may have been found | 446 | # dump out anymore offsets that may have been found |
443 | update_funcs() if ($text_found); | 447 | update_funcs() if (defined($ref_func)); |
444 | 448 | ||
445 | # If we did not find any mcount callers, we are done (do nothing). | 449 | # If we did not find any mcount callers, we are done (do nothing). |
446 | if (!$opened) { | 450 | if (!$opened) { |
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c index 15c2a08a66f1..1e8cfc4c2ed6 100644 --- a/security/selinux/hooks.c +++ b/security/selinux/hooks.c | |||
@@ -1285,6 +1285,8 @@ static int inode_doinit_with_dentry(struct inode *inode, struct dentry *opt_dent | |||
1285 | rc = inode->i_op->getxattr(dentry, XATTR_NAME_SELINUX, | 1285 | rc = inode->i_op->getxattr(dentry, XATTR_NAME_SELINUX, |
1286 | context, len); | 1286 | context, len); |
1287 | if (rc == -ERANGE) { | 1287 | if (rc == -ERANGE) { |
1288 | kfree(context); | ||
1289 | |||
1288 | /* Need a larger buffer. Query for the right size. */ | 1290 | /* Need a larger buffer. Query for the right size. */ |
1289 | rc = inode->i_op->getxattr(dentry, XATTR_NAME_SELINUX, | 1291 | rc = inode->i_op->getxattr(dentry, XATTR_NAME_SELINUX, |
1290 | NULL, 0); | 1292 | NULL, 0); |
@@ -1292,7 +1294,6 @@ static int inode_doinit_with_dentry(struct inode *inode, struct dentry *opt_dent | |||
1292 | dput(dentry); | 1294 | dput(dentry); |
1293 | goto out_unlock; | 1295 | goto out_unlock; |
1294 | } | 1296 | } |
1295 | kfree(context); | ||
1296 | len = rc; | 1297 | len = rc; |
1297 | context = kmalloc(len+1, GFP_NOFS); | 1298 | context = kmalloc(len+1, GFP_NOFS); |
1298 | if (!context) { | 1299 | if (!context) { |
diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c index 88480c0c58a0..c7df01b72cac 100644 --- a/sound/pci/hda/hda_codec.c +++ b/sound/pci/hda/hda_codec.c | |||
@@ -174,7 +174,7 @@ static int codec_exec_verb(struct hda_codec *codec, unsigned int cmd, | |||
174 | mutex_lock(&bus->cmd_mutex); | 174 | mutex_lock(&bus->cmd_mutex); |
175 | err = bus->ops.command(bus, cmd); | 175 | err = bus->ops.command(bus, cmd); |
176 | if (!err && res) | 176 | if (!err && res) |
177 | *res = bus->ops.get_response(bus); | 177 | *res = bus->ops.get_response(bus, codec->addr); |
178 | mutex_unlock(&bus->cmd_mutex); | 178 | mutex_unlock(&bus->cmd_mutex); |
179 | snd_hda_power_down(codec); | 179 | snd_hda_power_down(codec); |
180 | if (res && *res == -1 && bus->rirb_error) { | 180 | if (res && *res == -1 && bus->rirb_error) { |
diff --git a/sound/pci/hda/hda_codec.h b/sound/pci/hda/hda_codec.h index cad79efaabc9..1b75f28ed092 100644 --- a/sound/pci/hda/hda_codec.h +++ b/sound/pci/hda/hda_codec.h | |||
@@ -568,7 +568,7 @@ struct hda_bus_ops { | |||
568 | /* send a single command */ | 568 | /* send a single command */ |
569 | int (*command)(struct hda_bus *bus, unsigned int cmd); | 569 | int (*command)(struct hda_bus *bus, unsigned int cmd); |
570 | /* get a response from the last command */ | 570 | /* get a response from the last command */ |
571 | unsigned int (*get_response)(struct hda_bus *bus); | 571 | unsigned int (*get_response)(struct hda_bus *bus, unsigned int addr); |
572 | /* free the private data */ | 572 | /* free the private data */ |
573 | void (*private_free)(struct hda_bus *); | 573 | void (*private_free)(struct hda_bus *); |
574 | /* attach a PCM stream */ | 574 | /* attach a PCM stream */ |
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c index 77c1b840ca8b..175f07a381ba 100644 --- a/sound/pci/hda/hda_intel.c +++ b/sound/pci/hda/hda_intel.c | |||
@@ -253,7 +253,7 @@ enum { SDI0, SDI1, SDI2, SDI3, SDO0, SDO1, SDO2, SDO3 }; | |||
253 | 253 | ||
254 | /* STATESTS int mask: S3,SD2,SD1,SD0 */ | 254 | /* STATESTS int mask: S3,SD2,SD1,SD0 */ |
255 | #define AZX_MAX_CODECS 4 | 255 | #define AZX_MAX_CODECS 4 |
256 | #define STATESTS_INT_MASK 0x0f | 256 | #define STATESTS_INT_MASK ((1 << AZX_MAX_CODECS) - 1) |
257 | 257 | ||
258 | /* SD_CTL bits */ | 258 | /* SD_CTL bits */ |
259 | #define SD_CTL_STREAM_RESET 0x01 /* stream reset bit */ | 259 | #define SD_CTL_STREAM_RESET 0x01 /* stream reset bit */ |
@@ -361,8 +361,8 @@ struct azx_rb { | |||
361 | dma_addr_t addr; /* physical address of CORB/RIRB buffer */ | 361 | dma_addr_t addr; /* physical address of CORB/RIRB buffer */ |
362 | /* for RIRB */ | 362 | /* for RIRB */ |
363 | unsigned short rp, wp; /* read/write pointers */ | 363 | unsigned short rp, wp; /* read/write pointers */ |
364 | int cmds; /* number of pending requests */ | 364 | int cmds[AZX_MAX_CODECS]; /* number of pending requests */ |
365 | u32 res; /* last read value */ | 365 | u32 res[AZX_MAX_CODECS]; /* last read value */ |
366 | }; | 366 | }; |
367 | 367 | ||
368 | struct azx { | 368 | struct azx { |
@@ -418,7 +418,7 @@ struct azx { | |||
418 | unsigned int probing :1; /* codec probing phase */ | 418 | unsigned int probing :1; /* codec probing phase */ |
419 | 419 | ||
420 | /* for debugging */ | 420 | /* for debugging */ |
421 | unsigned int last_cmd; /* last issued command (to sync) */ | 421 | unsigned int last_cmd[AZX_MAX_CODECS]; |
422 | 422 | ||
423 | /* for pending irqs */ | 423 | /* for pending irqs */ |
424 | struct work_struct irq_pending_work; | 424 | struct work_struct irq_pending_work; |
@@ -513,6 +513,7 @@ static int azx_alloc_cmd_io(struct azx *chip) | |||
513 | 513 | ||
514 | static void azx_init_cmd_io(struct azx *chip) | 514 | static void azx_init_cmd_io(struct azx *chip) |
515 | { | 515 | { |
516 | spin_lock_irq(&chip->reg_lock); | ||
516 | /* CORB set up */ | 517 | /* CORB set up */ |
517 | chip->corb.addr = chip->rb.addr; | 518 | chip->corb.addr = chip->rb.addr; |
518 | chip->corb.buf = (u32 *)chip->rb.area; | 519 | chip->corb.buf = (u32 *)chip->rb.area; |
@@ -531,7 +532,8 @@ static void azx_init_cmd_io(struct azx *chip) | |||
531 | /* RIRB set up */ | 532 | /* RIRB set up */ |
532 | chip->rirb.addr = chip->rb.addr + 2048; | 533 | chip->rirb.addr = chip->rb.addr + 2048; |
533 | chip->rirb.buf = (u32 *)(chip->rb.area + 2048); | 534 | chip->rirb.buf = (u32 *)(chip->rb.area + 2048); |
534 | chip->rirb.wp = chip->rirb.rp = chip->rirb.cmds = 0; | 535 | chip->rirb.wp = chip->rirb.rp = 0; |
536 | memset(chip->rirb.cmds, 0, sizeof(chip->rirb.cmds)); | ||
535 | azx_writel(chip, RIRBLBASE, (u32)chip->rirb.addr); | 537 | azx_writel(chip, RIRBLBASE, (u32)chip->rirb.addr); |
536 | azx_writel(chip, RIRBUBASE, upper_32_bits(chip->rirb.addr)); | 538 | azx_writel(chip, RIRBUBASE, upper_32_bits(chip->rirb.addr)); |
537 | 539 | ||
@@ -543,30 +545,60 @@ static void azx_init_cmd_io(struct azx *chip) | |||
543 | azx_writew(chip, RINTCNT, 1); | 545 | azx_writew(chip, RINTCNT, 1); |
544 | /* enable rirb dma and response irq */ | 546 | /* enable rirb dma and response irq */ |
545 | azx_writeb(chip, RIRBCTL, ICH6_RBCTL_DMA_EN | ICH6_RBCTL_IRQ_EN); | 547 | azx_writeb(chip, RIRBCTL, ICH6_RBCTL_DMA_EN | ICH6_RBCTL_IRQ_EN); |
548 | spin_unlock_irq(&chip->reg_lock); | ||
546 | } | 549 | } |
547 | 550 | ||
548 | static void azx_free_cmd_io(struct azx *chip) | 551 | static void azx_free_cmd_io(struct azx *chip) |
549 | { | 552 | { |
553 | spin_lock_irq(&chip->reg_lock); | ||
550 | /* disable ringbuffer DMAs */ | 554 | /* disable ringbuffer DMAs */ |
551 | azx_writeb(chip, RIRBCTL, 0); | 555 | azx_writeb(chip, RIRBCTL, 0); |
552 | azx_writeb(chip, CORBCTL, 0); | 556 | azx_writeb(chip, CORBCTL, 0); |
557 | spin_unlock_irq(&chip->reg_lock); | ||
558 | } | ||
559 | |||
560 | static unsigned int azx_command_addr(u32 cmd) | ||
561 | { | ||
562 | unsigned int addr = cmd >> 28; | ||
563 | |||
564 | if (addr >= AZX_MAX_CODECS) { | ||
565 | snd_BUG(); | ||
566 | addr = 0; | ||
567 | } | ||
568 | |||
569 | return addr; | ||
570 | } | ||
571 | |||
572 | static unsigned int azx_response_addr(u32 res) | ||
573 | { | ||
574 | unsigned int addr = res & 0xf; | ||
575 | |||
576 | if (addr >= AZX_MAX_CODECS) { | ||
577 | snd_BUG(); | ||
578 | addr = 0; | ||
579 | } | ||
580 | |||
581 | return addr; | ||
553 | } | 582 | } |
554 | 583 | ||
555 | /* send a command */ | 584 | /* send a command */ |
556 | static int azx_corb_send_cmd(struct hda_bus *bus, u32 val) | 585 | static int azx_corb_send_cmd(struct hda_bus *bus, u32 val) |
557 | { | 586 | { |
558 | struct azx *chip = bus->private_data; | 587 | struct azx *chip = bus->private_data; |
588 | unsigned int addr = azx_command_addr(val); | ||
559 | unsigned int wp; | 589 | unsigned int wp; |
560 | 590 | ||
591 | spin_lock_irq(&chip->reg_lock); | ||
592 | |||
561 | /* add command to corb */ | 593 | /* add command to corb */ |
562 | wp = azx_readb(chip, CORBWP); | 594 | wp = azx_readb(chip, CORBWP); |
563 | wp++; | 595 | wp++; |
564 | wp %= ICH6_MAX_CORB_ENTRIES; | 596 | wp %= ICH6_MAX_CORB_ENTRIES; |
565 | 597 | ||
566 | spin_lock_irq(&chip->reg_lock); | 598 | chip->rirb.cmds[addr]++; |
567 | chip->rirb.cmds++; | ||
568 | chip->corb.buf[wp] = cpu_to_le32(val); | 599 | chip->corb.buf[wp] = cpu_to_le32(val); |
569 | azx_writel(chip, CORBWP, wp); | 600 | azx_writel(chip, CORBWP, wp); |
601 | |||
570 | spin_unlock_irq(&chip->reg_lock); | 602 | spin_unlock_irq(&chip->reg_lock); |
571 | 603 | ||
572 | return 0; | 604 | return 0; |
@@ -578,13 +610,14 @@ static int azx_corb_send_cmd(struct hda_bus *bus, u32 val) | |||
578 | static void azx_update_rirb(struct azx *chip) | 610 | static void azx_update_rirb(struct azx *chip) |
579 | { | 611 | { |
580 | unsigned int rp, wp; | 612 | unsigned int rp, wp; |
613 | unsigned int addr; | ||
581 | u32 res, res_ex; | 614 | u32 res, res_ex; |
582 | 615 | ||
583 | wp = azx_readb(chip, RIRBWP); | 616 | wp = azx_readb(chip, RIRBWP); |
584 | if (wp == chip->rirb.wp) | 617 | if (wp == chip->rirb.wp) |
585 | return; | 618 | return; |
586 | chip->rirb.wp = wp; | 619 | chip->rirb.wp = wp; |
587 | 620 | ||
588 | while (chip->rirb.rp != wp) { | 621 | while (chip->rirb.rp != wp) { |
589 | chip->rirb.rp++; | 622 | chip->rirb.rp++; |
590 | chip->rirb.rp %= ICH6_MAX_RIRB_ENTRIES; | 623 | chip->rirb.rp %= ICH6_MAX_RIRB_ENTRIES; |
@@ -592,18 +625,24 @@ static void azx_update_rirb(struct azx *chip) | |||
592 | rp = chip->rirb.rp << 1; /* an RIRB entry is 8-bytes */ | 625 | rp = chip->rirb.rp << 1; /* an RIRB entry is 8-bytes */ |
593 | res_ex = le32_to_cpu(chip->rirb.buf[rp + 1]); | 626 | res_ex = le32_to_cpu(chip->rirb.buf[rp + 1]); |
594 | res = le32_to_cpu(chip->rirb.buf[rp]); | 627 | res = le32_to_cpu(chip->rirb.buf[rp]); |
628 | addr = azx_response_addr(res_ex); | ||
595 | if (res_ex & ICH6_RIRB_EX_UNSOL_EV) | 629 | if (res_ex & ICH6_RIRB_EX_UNSOL_EV) |
596 | snd_hda_queue_unsol_event(chip->bus, res, res_ex); | 630 | snd_hda_queue_unsol_event(chip->bus, res, res_ex); |
597 | else if (chip->rirb.cmds) { | 631 | else if (chip->rirb.cmds[addr]) { |
598 | chip->rirb.res = res; | 632 | chip->rirb.res[addr] = res; |
599 | smp_wmb(); | 633 | smp_wmb(); |
600 | chip->rirb.cmds--; | 634 | chip->rirb.cmds[addr]--; |
601 | } | 635 | } else |
636 | snd_printk(KERN_ERR SFX "spurious response %#x:%#x, " | ||
637 | "last cmd=%#08x\n", | ||
638 | res, res_ex, | ||
639 | chip->last_cmd[addr]); | ||
602 | } | 640 | } |
603 | } | 641 | } |
604 | 642 | ||
605 | /* receive a response */ | 643 | /* receive a response */ |
606 | static unsigned int azx_rirb_get_response(struct hda_bus *bus) | 644 | static unsigned int azx_rirb_get_response(struct hda_bus *bus, |
645 | unsigned int addr) | ||
607 | { | 646 | { |
608 | struct azx *chip = bus->private_data; | 647 | struct azx *chip = bus->private_data; |
609 | unsigned long timeout; | 648 | unsigned long timeout; |
@@ -616,10 +655,10 @@ static unsigned int azx_rirb_get_response(struct hda_bus *bus) | |||
616 | azx_update_rirb(chip); | 655 | azx_update_rirb(chip); |
617 | spin_unlock_irq(&chip->reg_lock); | 656 | spin_unlock_irq(&chip->reg_lock); |
618 | } | 657 | } |
619 | if (!chip->rirb.cmds) { | 658 | if (!chip->rirb.cmds[addr]) { |
620 | smp_rmb(); | 659 | smp_rmb(); |
621 | bus->rirb_error = 0; | 660 | bus->rirb_error = 0; |
622 | return chip->rirb.res; /* the last value */ | 661 | return chip->rirb.res[addr]; /* the last value */ |
623 | } | 662 | } |
624 | if (time_after(jiffies, timeout)) | 663 | if (time_after(jiffies, timeout)) |
625 | break; | 664 | break; |
@@ -633,7 +672,8 @@ static unsigned int azx_rirb_get_response(struct hda_bus *bus) | |||
633 | 672 | ||
634 | if (chip->msi) { | 673 | if (chip->msi) { |
635 | snd_printk(KERN_WARNING SFX "No response from codec, " | 674 | snd_printk(KERN_WARNING SFX "No response from codec, " |
636 | "disabling MSI: last cmd=0x%08x\n", chip->last_cmd); | 675 | "disabling MSI: last cmd=0x%08x\n", |
676 | chip->last_cmd[addr]); | ||
637 | free_irq(chip->irq, chip); | 677 | free_irq(chip->irq, chip); |
638 | chip->irq = -1; | 678 | chip->irq = -1; |
639 | pci_disable_msi(chip->pci); | 679 | pci_disable_msi(chip->pci); |
@@ -648,7 +688,7 @@ static unsigned int azx_rirb_get_response(struct hda_bus *bus) | |||
648 | if (!chip->polling_mode) { | 688 | if (!chip->polling_mode) { |
649 | snd_printk(KERN_WARNING SFX "azx_get_response timeout, " | 689 | snd_printk(KERN_WARNING SFX "azx_get_response timeout, " |
650 | "switching to polling mode: last cmd=0x%08x\n", | 690 | "switching to polling mode: last cmd=0x%08x\n", |
651 | chip->last_cmd); | 691 | chip->last_cmd[addr]); |
652 | chip->polling_mode = 1; | 692 | chip->polling_mode = 1; |
653 | goto again; | 693 | goto again; |
654 | } | 694 | } |
@@ -672,7 +712,7 @@ static unsigned int azx_rirb_get_response(struct hda_bus *bus) | |||
672 | 712 | ||
673 | snd_printk(KERN_ERR "hda_intel: azx_get_response timeout, " | 713 | snd_printk(KERN_ERR "hda_intel: azx_get_response timeout, " |
674 | "switching to single_cmd mode: last cmd=0x%08x\n", | 714 | "switching to single_cmd mode: last cmd=0x%08x\n", |
675 | chip->last_cmd); | 715 | chip->last_cmd[addr]); |
676 | chip->single_cmd = 1; | 716 | chip->single_cmd = 1; |
677 | bus->response_reset = 0; | 717 | bus->response_reset = 0; |
678 | /* re-initialize CORB/RIRB */ | 718 | /* re-initialize CORB/RIRB */ |
@@ -692,7 +732,7 @@ static unsigned int azx_rirb_get_response(struct hda_bus *bus) | |||
692 | */ | 732 | */ |
693 | 733 | ||
694 | /* receive a response */ | 734 | /* receive a response */ |
695 | static int azx_single_wait_for_response(struct azx *chip) | 735 | static int azx_single_wait_for_response(struct azx *chip, unsigned int addr) |
696 | { | 736 | { |
697 | int timeout = 50; | 737 | int timeout = 50; |
698 | 738 | ||
@@ -700,7 +740,7 @@ static int azx_single_wait_for_response(struct azx *chip) | |||
700 | /* check IRV busy bit */ | 740 | /* check IRV busy bit */ |
701 | if (azx_readw(chip, IRS) & ICH6_IRS_VALID) { | 741 | if (azx_readw(chip, IRS) & ICH6_IRS_VALID) { |
702 | /* reuse rirb.res as the response return value */ | 742 | /* reuse rirb.res as the response return value */ |
703 | chip->rirb.res = azx_readl(chip, IR); | 743 | chip->rirb.res[addr] = azx_readl(chip, IR); |
704 | return 0; | 744 | return 0; |
705 | } | 745 | } |
706 | udelay(1); | 746 | udelay(1); |
@@ -708,7 +748,7 @@ static int azx_single_wait_for_response(struct azx *chip) | |||
708 | if (printk_ratelimit()) | 748 | if (printk_ratelimit()) |
709 | snd_printd(SFX "get_response timeout: IRS=0x%x\n", | 749 | snd_printd(SFX "get_response timeout: IRS=0x%x\n", |
710 | azx_readw(chip, IRS)); | 750 | azx_readw(chip, IRS)); |
711 | chip->rirb.res = -1; | 751 | chip->rirb.res[addr] = -1; |
712 | return -EIO; | 752 | return -EIO; |
713 | } | 753 | } |
714 | 754 | ||
@@ -716,6 +756,7 @@ static int azx_single_wait_for_response(struct azx *chip) | |||
716 | static int azx_single_send_cmd(struct hda_bus *bus, u32 val) | 756 | static int azx_single_send_cmd(struct hda_bus *bus, u32 val) |
717 | { | 757 | { |
718 | struct azx *chip = bus->private_data; | 758 | struct azx *chip = bus->private_data; |
759 | unsigned int addr = azx_command_addr(val); | ||
719 | int timeout = 50; | 760 | int timeout = 50; |
720 | 761 | ||
721 | bus->rirb_error = 0; | 762 | bus->rirb_error = 0; |
@@ -728,7 +769,7 @@ static int azx_single_send_cmd(struct hda_bus *bus, u32 val) | |||
728 | azx_writel(chip, IC, val); | 769 | azx_writel(chip, IC, val); |
729 | azx_writew(chip, IRS, azx_readw(chip, IRS) | | 770 | azx_writew(chip, IRS, azx_readw(chip, IRS) | |
730 | ICH6_IRS_BUSY); | 771 | ICH6_IRS_BUSY); |
731 | return azx_single_wait_for_response(chip); | 772 | return azx_single_wait_for_response(chip, addr); |
732 | } | 773 | } |
733 | udelay(1); | 774 | udelay(1); |
734 | } | 775 | } |
@@ -739,10 +780,11 @@ static int azx_single_send_cmd(struct hda_bus *bus, u32 val) | |||
739 | } | 780 | } |
740 | 781 | ||
741 | /* receive a response */ | 782 | /* receive a response */ |
742 | static unsigned int azx_single_get_response(struct hda_bus *bus) | 783 | static unsigned int azx_single_get_response(struct hda_bus *bus, |
784 | unsigned int addr) | ||
743 | { | 785 | { |
744 | struct azx *chip = bus->private_data; | 786 | struct azx *chip = bus->private_data; |
745 | return chip->rirb.res; | 787 | return chip->rirb.res[addr]; |
746 | } | 788 | } |
747 | 789 | ||
748 | /* | 790 | /* |
@@ -757,7 +799,7 @@ static int azx_send_cmd(struct hda_bus *bus, unsigned int val) | |||
757 | { | 799 | { |
758 | struct azx *chip = bus->private_data; | 800 | struct azx *chip = bus->private_data; |
759 | 801 | ||
760 | chip->last_cmd = val; | 802 | chip->last_cmd[azx_command_addr(val)] = val; |
761 | if (chip->single_cmd) | 803 | if (chip->single_cmd) |
762 | return azx_single_send_cmd(bus, val); | 804 | return azx_single_send_cmd(bus, val); |
763 | else | 805 | else |
@@ -765,13 +807,14 @@ static int azx_send_cmd(struct hda_bus *bus, unsigned int val) | |||
765 | } | 807 | } |
766 | 808 | ||
767 | /* get a response */ | 809 | /* get a response */ |
768 | static unsigned int azx_get_response(struct hda_bus *bus) | 810 | static unsigned int azx_get_response(struct hda_bus *bus, |
811 | unsigned int addr) | ||
769 | { | 812 | { |
770 | struct azx *chip = bus->private_data; | 813 | struct azx *chip = bus->private_data; |
771 | if (chip->single_cmd) | 814 | if (chip->single_cmd) |
772 | return azx_single_get_response(bus); | 815 | return azx_single_get_response(bus, addr); |
773 | else | 816 | else |
774 | return azx_rirb_get_response(bus); | 817 | return azx_rirb_get_response(bus, addr); |
775 | } | 818 | } |
776 | 819 | ||
777 | #ifdef CONFIG_SND_HDA_POWER_SAVE | 820 | #ifdef CONFIG_SND_HDA_POWER_SAVE |
@@ -1243,10 +1286,12 @@ static int probe_codec(struct azx *chip, int addr) | |||
1243 | (AC_VERB_PARAMETERS << 8) | AC_PAR_VENDOR_ID; | 1286 | (AC_VERB_PARAMETERS << 8) | AC_PAR_VENDOR_ID; |
1244 | unsigned int res; | 1287 | unsigned int res; |
1245 | 1288 | ||
1289 | mutex_lock(&chip->bus->cmd_mutex); | ||
1246 | chip->probing = 1; | 1290 | chip->probing = 1; |
1247 | azx_send_cmd(chip->bus, cmd); | 1291 | azx_send_cmd(chip->bus, cmd); |
1248 | res = azx_get_response(chip->bus); | 1292 | res = azx_get_response(chip->bus, addr); |
1249 | chip->probing = 0; | 1293 | chip->probing = 0; |
1294 | mutex_unlock(&chip->bus->cmd_mutex); | ||
1250 | if (res == -1) | 1295 | if (res == -1) |
1251 | return -EIO; | 1296 | return -EIO; |
1252 | snd_printdd(SFX "codec #%d probed OK\n", addr); | 1297 | snd_printdd(SFX "codec #%d probed OK\n", addr); |
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index b95df5d5dcc2..fea976793ae5 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c | |||
@@ -559,7 +559,7 @@ static int alc_pin_mode_get(struct snd_kcontrol *kcontrol, | |||
559 | 559 | ||
560 | /* Find enumerated value for current pinctl setting */ | 560 | /* Find enumerated value for current pinctl setting */ |
561 | i = alc_pin_mode_min(dir); | 561 | i = alc_pin_mode_min(dir); |
562 | while (alc_pin_mode_values[i] != pinctl && i <= alc_pin_mode_max(dir)) | 562 | while (i <= alc_pin_mode_max(dir) && alc_pin_mode_values[i] != pinctl) |
563 | i++; | 563 | i++; |
564 | *valp = i <= alc_pin_mode_max(dir) ? i: alc_pin_mode_min(dir); | 564 | *valp = i <= alc_pin_mode_max(dir) ? i: alc_pin_mode_min(dir); |
565 | return 0; | 565 | return 0; |
@@ -13563,6 +13563,8 @@ static int patch_alc269(struct hda_codec *codec) | |||
13563 | set_capture_mixer(spec); | 13563 | set_capture_mixer(spec); |
13564 | set_beep_amp(spec, 0x0b, 0x04, HDA_INPUT); | 13564 | set_beep_amp(spec, 0x0b, 0x04, HDA_INPUT); |
13565 | 13565 | ||
13566 | spec->vmaster_nid = 0x02; | ||
13567 | |||
13566 | codec->patch_ops = alc_patch_ops; | 13568 | codec->patch_ops = alc_patch_ops; |
13567 | if (board_config == ALC269_AUTO) | 13569 | if (board_config == ALC269_AUTO) |
13568 | spec->init_hook = alc269_auto_init; | 13570 | spec->init_hook = alc269_auto_init; |
@@ -15157,7 +15159,7 @@ static struct snd_pci_quirk alc861vd_cfg_tbl[] = { | |||
15157 | SND_PCI_QUIRK(0x10de, 0x03f0, "Realtek ALC660 demo", ALC660VD_3ST), | 15159 | SND_PCI_QUIRK(0x10de, 0x03f0, "Realtek ALC660 demo", ALC660VD_3ST), |
15158 | SND_PCI_QUIRK(0x1179, 0xff00, "Toshiba A135", ALC861VD_LENOVO), | 15160 | SND_PCI_QUIRK(0x1179, 0xff00, "Toshiba A135", ALC861VD_LENOVO), |
15159 | /*SND_PCI_QUIRK(0x1179, 0xff00, "DALLAS", ALC861VD_DALLAS),*/ /*lenovo*/ | 15161 | /*SND_PCI_QUIRK(0x1179, 0xff00, "DALLAS", ALC861VD_DALLAS),*/ /*lenovo*/ |
15160 | SND_PCI_QUIRK(0x1179, 0xff01, "DALLAS", ALC861VD_DALLAS), | 15162 | SND_PCI_QUIRK(0x1179, 0xff01, "Toshiba A135", ALC861VD_LENOVO), |
15161 | SND_PCI_QUIRK(0x1179, 0xff03, "Toshiba P205", ALC861VD_LENOVO), | 15163 | SND_PCI_QUIRK(0x1179, 0xff03, "Toshiba P205", ALC861VD_LENOVO), |
15162 | SND_PCI_QUIRK(0x1179, 0xff31, "Toshiba L30-149", ALC861VD_DALLAS), | 15164 | SND_PCI_QUIRK(0x1179, 0xff31, "Toshiba L30-149", ALC861VD_DALLAS), |
15163 | SND_PCI_QUIRK(0x1565, 0x820d, "Biostar NF61S SE", ALC861VD_6ST_DIG), | 15165 | SND_PCI_QUIRK(0x1565, 0x820d, "Biostar NF61S SE", ALC861VD_6ST_DIG), |
@@ -15577,9 +15579,12 @@ static int patch_alc861vd(struct hda_codec *codec) | |||
15577 | spec->stream_digital_playback = &alc861vd_pcm_digital_playback; | 15579 | spec->stream_digital_playback = &alc861vd_pcm_digital_playback; |
15578 | spec->stream_digital_capture = &alc861vd_pcm_digital_capture; | 15580 | spec->stream_digital_capture = &alc861vd_pcm_digital_capture; |
15579 | 15581 | ||
15580 | spec->adc_nids = alc861vd_adc_nids; | 15582 | if (!spec->adc_nids) { |
15581 | spec->num_adc_nids = ARRAY_SIZE(alc861vd_adc_nids); | 15583 | spec->adc_nids = alc861vd_adc_nids; |
15582 | spec->capsrc_nids = alc861vd_capsrc_nids; | 15584 | spec->num_adc_nids = ARRAY_SIZE(alc861vd_adc_nids); |
15585 | } | ||
15586 | if (!spec->capsrc_nids) | ||
15587 | spec->capsrc_nids = alc861vd_capsrc_nids; | ||
15583 | 15588 | ||
15584 | set_capture_mixer(spec); | 15589 | set_capture_mixer(spec); |
15585 | set_beep_amp(spec, 0x0b, 0x05, HDA_INPUT); | 15590 | set_beep_amp(spec, 0x0b, 0x05, HDA_INPUT); |
@@ -17496,9 +17501,12 @@ static int patch_alc662(struct hda_codec *codec) | |||
17496 | spec->stream_digital_playback = &alc662_pcm_digital_playback; | 17501 | spec->stream_digital_playback = &alc662_pcm_digital_playback; |
17497 | spec->stream_digital_capture = &alc662_pcm_digital_capture; | 17502 | spec->stream_digital_capture = &alc662_pcm_digital_capture; |
17498 | 17503 | ||
17499 | spec->adc_nids = alc662_adc_nids; | 17504 | if (!spec->adc_nids) { |
17500 | spec->num_adc_nids = ARRAY_SIZE(alc662_adc_nids); | 17505 | spec->adc_nids = alc662_adc_nids; |
17501 | spec->capsrc_nids = alc662_capsrc_nids; | 17506 | spec->num_adc_nids = ARRAY_SIZE(alc662_adc_nids); |
17507 | } | ||
17508 | if (!spec->capsrc_nids) | ||
17509 | spec->capsrc_nids = alc662_capsrc_nids; | ||
17502 | 17510 | ||
17503 | if (!spec->cap_mixer) | 17511 | if (!spec->cap_mixer) |
17504 | set_capture_mixer(spec); | 17512 | set_capture_mixer(spec); |
diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c index 5383d8cff88b..456ef6ac12e4 100644 --- a/sound/pci/hda/patch_sigmatel.c +++ b/sound/pci/hda/patch_sigmatel.c | |||
@@ -2266,7 +2266,7 @@ static struct snd_pci_quirk stac927x_cfg_tbl[] = { | |||
2266 | SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x01f3, "Dell Inspiron 1420", STAC_DELL_BIOS), | 2266 | SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x01f3, "Dell Inspiron 1420", STAC_DELL_BIOS), |
2267 | SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x0227, "Dell Vostro 1400 ", STAC_DELL_BIOS), | 2267 | SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x0227, "Dell Vostro 1400 ", STAC_DELL_BIOS), |
2268 | SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x022e, "Dell ", STAC_DELL_BIOS), | 2268 | SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x022e, "Dell ", STAC_DELL_BIOS), |
2269 | SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x022f, "Dell Inspiron 1525", STAC_DELL_3ST), | 2269 | SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x022f, "Dell Inspiron 1525", STAC_DELL_BIOS), |
2270 | SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x0242, "Dell ", STAC_DELL_BIOS), | 2270 | SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x0242, "Dell ", STAC_DELL_BIOS), |
2271 | SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x0243, "Dell ", STAC_DELL_BIOS), | 2271 | SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x0243, "Dell ", STAC_DELL_BIOS), |
2272 | SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x02ff, "Dell ", STAC_DELL_BIOS), | 2272 | SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x02ff, "Dell ", STAC_DELL_BIOS), |
@@ -5645,6 +5645,13 @@ static int patch_stac927x(struct hda_codec *codec) | |||
5645 | /* GPIO2 High = Enable EAPD */ | 5645 | /* GPIO2 High = Enable EAPD */ |
5646 | spec->eapd_mask = spec->gpio_mask = spec->gpio_dir = 0x04; | 5646 | spec->eapd_mask = spec->gpio_mask = spec->gpio_dir = 0x04; |
5647 | spec->gpio_data = 0x04; | 5647 | spec->gpio_data = 0x04; |
5648 | switch (codec->subsystem_id) { | ||
5649 | case 0x1028022f: | ||
5650 | /* correct EAPD to be GPIO0 */ | ||
5651 | spec->eapd_mask = spec->gpio_mask = 0x01; | ||
5652 | spec->gpio_dir = spec->gpio_data = 0x01; | ||
5653 | break; | ||
5654 | }; | ||
5648 | spec->dmic_nids = stac927x_dmic_nids; | 5655 | spec->dmic_nids = stac927x_dmic_nids; |
5649 | spec->num_dmics = STAC927X_NUM_DMICS; | 5656 | spec->num_dmics = STAC927X_NUM_DMICS; |
5650 | 5657 | ||
diff --git a/sound/soc/fsl/efika-audio-fabric.c b/sound/soc/fsl/efika-audio-fabric.c index 85b0e7569504..3326e2a1e863 100644 --- a/sound/soc/fsl/efika-audio-fabric.c +++ b/sound/soc/fsl/efika-audio-fabric.c | |||
@@ -30,6 +30,8 @@ | |||
30 | #include "mpc5200_psc_ac97.h" | 30 | #include "mpc5200_psc_ac97.h" |
31 | #include "../codecs/stac9766.h" | 31 | #include "../codecs/stac9766.h" |
32 | 32 | ||
33 | #define DRV_NAME "efika-audio-fabric" | ||
34 | |||
33 | static struct snd_soc_device device; | 35 | static struct snd_soc_device device; |
34 | static struct snd_soc_card card; | 36 | static struct snd_soc_card card; |
35 | 37 | ||
diff --git a/sound/soc/fsl/pcm030-audio-fabric.c b/sound/soc/fsl/pcm030-audio-fabric.c index 8766f7a3893d..b928ef7d28eb 100644 --- a/sound/soc/fsl/pcm030-audio-fabric.c +++ b/sound/soc/fsl/pcm030-audio-fabric.c | |||
@@ -30,6 +30,8 @@ | |||
30 | #include "mpc5200_psc_ac97.h" | 30 | #include "mpc5200_psc_ac97.h" |
31 | #include "../codecs/wm9712.h" | 31 | #include "../codecs/wm9712.h" |
32 | 32 | ||
33 | #define DRV_NAME "pcm030-audio-fabric" | ||
34 | |||
33 | static struct snd_soc_device device; | 35 | static struct snd_soc_device device; |
34 | static struct snd_soc_card card; | 36 | static struct snd_soc_card card; |
35 | 37 | ||
diff --git a/tools/perf/Documentation/perf-examples.txt b/tools/perf/Documentation/perf-examples.txt new file mode 100644 index 000000000000..8eb6c489fb15 --- /dev/null +++ b/tools/perf/Documentation/perf-examples.txt | |||
@@ -0,0 +1,225 @@ | |||
1 | |||
2 | ------------------------------ | ||
3 | ****** perf by examples ****** | ||
4 | ------------------------------ | ||
5 | |||
6 | [ From an e-mail by Ingo Molnar, http://lkml.org/lkml/2009/8/4/346 ] | ||
7 | |||
8 | |||
9 | First, discovery/enumeration of available counters can be done via | ||
10 | 'perf list': | ||
11 | |||
12 | titan:~> perf list | ||
13 | [...] | ||
14 | kmem:kmalloc [Tracepoint event] | ||
15 | kmem:kmem_cache_alloc [Tracepoint event] | ||
16 | kmem:kmalloc_node [Tracepoint event] | ||
17 | kmem:kmem_cache_alloc_node [Tracepoint event] | ||
18 | kmem:kfree [Tracepoint event] | ||
19 | kmem:kmem_cache_free [Tracepoint event] | ||
20 | kmem:mm_page_free_direct [Tracepoint event] | ||
21 | kmem:mm_pagevec_free [Tracepoint event] | ||
22 | kmem:mm_page_alloc [Tracepoint event] | ||
23 | kmem:mm_page_alloc_zone_locked [Tracepoint event] | ||
24 | kmem:mm_page_pcpu_drain [Tracepoint event] | ||
25 | kmem:mm_page_alloc_extfrag [Tracepoint event] | ||
26 | |||
27 | Then any (or all) of the above event sources can be activated and | ||
28 | measured. For example the page alloc/free properties of a 'hackbench | ||
29 | run' are: | ||
30 | |||
31 | titan:~> perf stat -e kmem:mm_page_pcpu_drain -e kmem:mm_page_alloc | ||
32 | -e kmem:mm_pagevec_free -e kmem:mm_page_free_direct ./hackbench 10 | ||
33 | Time: 0.575 | ||
34 | |||
35 | Performance counter stats for './hackbench 10': | ||
36 | |||
37 | 13857 kmem:mm_page_pcpu_drain | ||
38 | 27576 kmem:mm_page_alloc | ||
39 | 6025 kmem:mm_pagevec_free | ||
40 | 20934 kmem:mm_page_free_direct | ||
41 | |||
42 | 0.613972165 seconds time elapsed | ||
43 | |||
44 | You can observe the statistical properties as well, by using the | ||
45 | 'repeat the workload N times' feature of perf stat: | ||
46 | |||
47 | titan:~> perf stat --repeat 5 -e kmem:mm_page_pcpu_drain -e | ||
48 | kmem:mm_page_alloc -e kmem:mm_pagevec_free -e | ||
49 | kmem:mm_page_free_direct ./hackbench 10 | ||
50 | Time: 0.627 | ||
51 | Time: 0.644 | ||
52 | Time: 0.564 | ||
53 | Time: 0.559 | ||
54 | Time: 0.626 | ||
55 | |||
56 | Performance counter stats for './hackbench 10' (5 runs): | ||
57 | |||
58 | 12920 kmem:mm_page_pcpu_drain ( +- 3.359% ) | ||
59 | 25035 kmem:mm_page_alloc ( +- 3.783% ) | ||
60 | 6104 kmem:mm_pagevec_free ( +- 0.934% ) | ||
61 | 18376 kmem:mm_page_free_direct ( +- 4.941% ) | ||
62 | |||
63 | 0.643954516 seconds time elapsed ( +- 2.363% ) | ||
64 | |||
65 | Furthermore, these tracepoints can be used to sample the workload as | ||
66 | well. For example the page allocations done by a 'git gc' can be | ||
67 | captured the following way: | ||
68 | |||
69 | titan:~/git> perf record -f -e kmem:mm_page_alloc -c 1 ./git gc | ||
70 | Counting objects: 1148, done. | ||
71 | Delta compression using up to 2 threads. | ||
72 | Compressing objects: 100% (450/450), done. | ||
73 | Writing objects: 100% (1148/1148), done. | ||
74 | Total 1148 (delta 690), reused 1148 (delta 690) | ||
75 | [ perf record: Captured and wrote 0.267 MB perf.data (~11679 samples) ] | ||
76 | |||
77 | To check which functions generated page allocations: | ||
78 | |||
79 | titan:~/git> perf report | ||
80 | # Samples: 10646 | ||
81 | # | ||
82 | # Overhead Command Shared Object | ||
83 | # ........ ............... .......................... | ||
84 | # | ||
85 | 23.57% git-repack /lib64/libc-2.5.so | ||
86 | 21.81% git /lib64/libc-2.5.so | ||
87 | 14.59% git ./git | ||
88 | 11.79% git-repack ./git | ||
89 | 7.12% git /lib64/ld-2.5.so | ||
90 | 3.16% git-repack /lib64/libpthread-2.5.so | ||
91 | 2.09% git-repack /bin/bash | ||
92 | 1.97% rm /lib64/libc-2.5.so | ||
93 | 1.39% mv /lib64/ld-2.5.so | ||
94 | 1.37% mv /lib64/libc-2.5.so | ||
95 | 1.12% git-repack /lib64/ld-2.5.so | ||
96 | 0.95% rm /lib64/ld-2.5.so | ||
97 | 0.90% git-update-serv /lib64/libc-2.5.so | ||
98 | 0.73% git-update-serv /lib64/ld-2.5.so | ||
99 | 0.68% perf /lib64/libpthread-2.5.so | ||
100 | 0.64% git-repack /usr/lib64/libz.so.1.2.3 | ||
101 | |||
102 | Or to see it on a more finegrained level: | ||
103 | |||
104 | titan:~/git> perf report --sort comm,dso,symbol | ||
105 | # Samples: 10646 | ||
106 | # | ||
107 | # Overhead Command Shared Object Symbol | ||
108 | # ........ ............... .......................... ...... | ||
109 | # | ||
110 | 9.35% git-repack ./git [.] insert_obj_hash | ||
111 | 9.12% git ./git [.] insert_obj_hash | ||
112 | 7.31% git /lib64/libc-2.5.so [.] memcpy | ||
113 | 6.34% git-repack /lib64/libc-2.5.so [.] _int_malloc | ||
114 | 6.24% git-repack /lib64/libc-2.5.so [.] memcpy | ||
115 | 5.82% git-repack /lib64/libc-2.5.so [.] __GI___fork | ||
116 | 5.47% git /lib64/libc-2.5.so [.] _int_malloc | ||
117 | 2.99% git /lib64/libc-2.5.so [.] memset | ||
118 | |||
119 | Furthermore, call-graph sampling can be done too, of page | ||
120 | allocations - to see precisely what kind of page allocations there | ||
121 | are: | ||
122 | |||
123 | titan:~/git> perf record -f -g -e kmem:mm_page_alloc -c 1 ./git gc | ||
124 | Counting objects: 1148, done. | ||
125 | Delta compression using up to 2 threads. | ||
126 | Compressing objects: 100% (450/450), done. | ||
127 | Writing objects: 100% (1148/1148), done. | ||
128 | Total 1148 (delta 690), reused 1148 (delta 690) | ||
129 | [ perf record: Captured and wrote 0.963 MB perf.data (~42069 samples) ] | ||
130 | |||
131 | titan:~/git> perf report -g | ||
132 | # Samples: 10686 | ||
133 | # | ||
134 | # Overhead Command Shared Object | ||
135 | # ........ ............... .......................... | ||
136 | # | ||
137 | 23.25% git-repack /lib64/libc-2.5.so | ||
138 | | | ||
139 | |--50.00%-- _int_free | ||
140 | | | ||
141 | |--37.50%-- __GI___fork | ||
142 | | make_child | ||
143 | | | ||
144 | |--12.50%-- ptmalloc_unlock_all2 | ||
145 | | make_child | ||
146 | | | ||
147 | --6.25%-- __GI_strcpy | ||
148 | 21.61% git /lib64/libc-2.5.so | ||
149 | | | ||
150 | |--30.00%-- __GI_read | ||
151 | | | | ||
152 | | --83.33%-- git_config_from_file | ||
153 | | git_config | ||
154 | | | | ||
155 | [...] | ||
156 | |||
157 | Or you can observe the whole system's page allocations for 10 | ||
158 | seconds: | ||
159 | |||
160 | titan:~/git> perf stat -a -e kmem:mm_page_pcpu_drain -e | ||
161 | kmem:mm_page_alloc -e kmem:mm_pagevec_free -e | ||
162 | kmem:mm_page_free_direct sleep 10 | ||
163 | |||
164 | Performance counter stats for 'sleep 10': | ||
165 | |||
166 | 171585 kmem:mm_page_pcpu_drain | ||
167 | 322114 kmem:mm_page_alloc | ||
168 | 73623 kmem:mm_pagevec_free | ||
169 | 254115 kmem:mm_page_free_direct | ||
170 | |||
171 | 10.000591410 seconds time elapsed | ||
172 | |||
173 | Or observe how fluctuating the page allocations are, via statistical | ||
174 | analysis done over ten 1-second intervals: | ||
175 | |||
176 | titan:~/git> perf stat --repeat 10 -a -e kmem:mm_page_pcpu_drain -e | ||
177 | kmem:mm_page_alloc -e kmem:mm_pagevec_free -e | ||
178 | kmem:mm_page_free_direct sleep 1 | ||
179 | |||
180 | Performance counter stats for 'sleep 1' (10 runs): | ||
181 | |||
182 | 17254 kmem:mm_page_pcpu_drain ( +- 3.709% ) | ||
183 | 34394 kmem:mm_page_alloc ( +- 4.617% ) | ||
184 | 7509 kmem:mm_pagevec_free ( +- 4.820% ) | ||
185 | 25653 kmem:mm_page_free_direct ( +- 3.672% ) | ||
186 | |||
187 | 1.058135029 seconds time elapsed ( +- 3.089% ) | ||
188 | |||
189 | Or you can annotate the recorded 'git gc' run on a per symbol basis | ||
190 | and check which instructions/source-code generated page allocations: | ||
191 | |||
192 | titan:~/git> perf annotate __GI___fork | ||
193 | ------------------------------------------------ | ||
194 | Percent | Source code & Disassembly of libc-2.5.so | ||
195 | ------------------------------------------------ | ||
196 | : | ||
197 | : | ||
198 | : Disassembly of section .plt: | ||
199 | : Disassembly of section .text: | ||
200 | : | ||
201 | : 00000031a2e95560 <__fork>: | ||
202 | [...] | ||
203 | 0.00 : 31a2e95602: b8 38 00 00 00 mov $0x38,%eax | ||
204 | 0.00 : 31a2e95607: 0f 05 syscall | ||
205 | 83.42 : 31a2e95609: 48 3d 00 f0 ff ff cmp $0xfffffffffffff000,%rax | ||
206 | 0.00 : 31a2e9560f: 0f 87 4d 01 00 00 ja 31a2e95762 <__fork+0x202> | ||
207 | 0.00 : 31a2e95615: 85 c0 test %eax,%eax | ||
208 | |||
209 | ( this shows that 83.42% of __GI___fork's page allocations come from | ||
210 | the 0x38 system call it performs. ) | ||
211 | |||
212 | etc. etc. - a lot more is possible. I could list a dozen of | ||
213 | other different usecases straight away - neither of which is | ||
214 | possible via /proc/vmstat. | ||
215 | |||
216 | /proc/vmstat is not in the same league really, in terms of | ||
217 | expressive power of system analysis and performance | ||
218 | analysis. | ||
219 | |||
220 | All that the above results needed were those new tracepoints | ||
221 | in include/tracing/events/kmem.h. | ||
222 | |||
223 | Ingo | ||
224 | |||
225 | |||
diff --git a/tools/perf/Documentation/perf-record.txt b/tools/perf/Documentation/perf-record.txt index 1dbc1eeb4c01..6be696b0a2bb 100644 --- a/tools/perf/Documentation/perf-record.txt +++ b/tools/perf/Documentation/perf-record.txt | |||
@@ -29,13 +29,67 @@ OPTIONS | |||
29 | Select the PMU event. Selection can be a symbolic event name | 29 | Select the PMU event. Selection can be a symbolic event name |
30 | (use 'perf list' to list all events) or a raw PMU | 30 | (use 'perf list' to list all events) or a raw PMU |
31 | event (eventsel+umask) in the form of rNNN where NNN is a | 31 | event (eventsel+umask) in the form of rNNN where NNN is a |
32 | hexadecimal event descriptor. | 32 | hexadecimal event descriptor. |
33 | 33 | ||
34 | -a:: | 34 | -a:: |
35 | system-wide collection | 35 | System-wide collection. |
36 | 36 | ||
37 | -l:: | 37 | -l:: |
38 | scale counter values | 38 | Scale counter values. |
39 | |||
40 | -p:: | ||
41 | --pid=:: | ||
42 | Record events on existing pid. | ||
43 | |||
44 | -r:: | ||
45 | --realtime=:: | ||
46 | Collect data with this RT SCHED_FIFO priority. | ||
47 | -A:: | ||
48 | --append:: | ||
49 | Append to the output file to do incremental profiling. | ||
50 | |||
51 | -f:: | ||
52 | --force:: | ||
53 | Overwrite existing data file. | ||
54 | |||
55 | -c:: | ||
56 | --count=:: | ||
57 | Event period to sample. | ||
58 | |||
59 | -o:: | ||
60 | --output=:: | ||
61 | Output file name. | ||
62 | |||
63 | -i:: | ||
64 | --inherit:: | ||
65 | Child tasks inherit counters. | ||
66 | -F:: | ||
67 | --freq=:: | ||
68 | Profile at this frequency. | ||
69 | |||
70 | -m:: | ||
71 | --mmap-pages=:: | ||
72 | Number of mmap data pages. | ||
73 | |||
74 | -g:: | ||
75 | --call-graph:: | ||
76 | Do call-graph (stack chain/backtrace) recording. | ||
77 | |||
78 | -v:: | ||
79 | --verbose:: | ||
80 | Be more verbose (show counter open errors, etc). | ||
81 | |||
82 | -s:: | ||
83 | --stat:: | ||
84 | Per thread counts. | ||
85 | |||
86 | -d:: | ||
87 | --data:: | ||
88 | Sample addresses. | ||
89 | |||
90 | -n:: | ||
91 | --no-samples:: | ||
92 | Don't sample. | ||
39 | 93 | ||
40 | SEE ALSO | 94 | SEE ALSO |
41 | -------- | 95 | -------- |
diff --git a/tools/perf/Documentation/perf-stat.txt b/tools/perf/Documentation/perf-stat.txt index 0d74346d21ab..484080dd5b6f 100644 --- a/tools/perf/Documentation/perf-stat.txt +++ b/tools/perf/Documentation/perf-stat.txt | |||
@@ -40,7 +40,7 @@ OPTIONS | |||
40 | -a:: | 40 | -a:: |
41 | system-wide collection | 41 | system-wide collection |
42 | 42 | ||
43 | -S:: | 43 | -c:: |
44 | scale counter values | 44 | scale counter values |
45 | 45 | ||
46 | EXAMPLES | 46 | EXAMPLES |
diff --git a/tools/perf/Documentation/perf-top.txt b/tools/perf/Documentation/perf-top.txt index 539d01289725..4a7d558dc309 100644 --- a/tools/perf/Documentation/perf-top.txt +++ b/tools/perf/Documentation/perf-top.txt | |||
@@ -3,36 +3,122 @@ perf-top(1) | |||
3 | 3 | ||
4 | NAME | 4 | NAME |
5 | ---- | 5 | ---- |
6 | perf-top - Run a command and profile it | 6 | perf-top - System profiling tool. |
7 | 7 | ||
8 | SYNOPSIS | 8 | SYNOPSIS |
9 | -------- | 9 | -------- |
10 | [verse] | 10 | [verse] |
11 | 'perf top' [-e <EVENT> | --event=EVENT] [-l] [-a] <command> | 11 | 'perf top' [-e <EVENT> | --event=EVENT] [<options>] |
12 | 12 | ||
13 | DESCRIPTION | 13 | DESCRIPTION |
14 | ----------- | 14 | ----------- |
15 | This command runs a command and gathers a performance counter profile | 15 | This command generates and displays a performance counter profile in realtime. |
16 | from it. | ||
17 | 16 | ||
18 | 17 | ||
19 | OPTIONS | 18 | OPTIONS |
20 | ------- | 19 | ------- |
21 | <command>...:: | 20 | -a:: |
22 | Any command you can specify in a shell. | 21 | --all-cpus:: |
22 | System-wide collection. (default) | ||
23 | |||
24 | -c <count>:: | ||
25 | --count=<count>:: | ||
26 | Event period to sample. | ||
27 | |||
28 | -C <cpu>:: | ||
29 | --CPU=<cpu>:: | ||
30 | CPU to profile. | ||
31 | |||
32 | -d <seconds>:: | ||
33 | --delay=<seconds>:: | ||
34 | Number of seconds to delay between refreshes. | ||
23 | 35 | ||
24 | -e:: | 36 | -e <event>:: |
25 | --event=:: | 37 | --event=<event>:: |
26 | Select the PMU event. Selection can be a symbolic event name | 38 | Select the PMU event. Selection can be a symbolic event name |
27 | (use 'perf list' to list all events) or a raw PMU | 39 | (use 'perf list' to list all events) or a raw PMU |
28 | event (eventsel+umask) in the form of rNNN where NNN is a | 40 | event (eventsel+umask) in the form of rNNN where NNN is a |
29 | hexadecimal event descriptor. | 41 | hexadecimal event descriptor. |
30 | 42 | ||
31 | -a:: | 43 | -E <entries>:: |
32 | system-wide collection | 44 | --entries=<entries>:: |
45 | Display this many functions. | ||
46 | |||
47 | -f <count>:: | ||
48 | --count-filter=<count>:: | ||
49 | Only display functions with more events than this. | ||
50 | |||
51 | -F <freq>:: | ||
52 | --freq=<freq>:: | ||
53 | Profile at this frequency. | ||
54 | |||
55 | -i:: | ||
56 | --inherit:: | ||
57 | Child tasks inherit counters, only makes sens with -p option. | ||
58 | |||
59 | -k <path>:: | ||
60 | --vmlinux=<path>:: | ||
61 | Path to vmlinux. Required for annotation functionality. | ||
62 | |||
63 | -m <pages>:: | ||
64 | --mmap-pages=<pages>:: | ||
65 | Number of mmapped data pages. | ||
66 | |||
67 | -p <pid>:: | ||
68 | --pid=<pid>:: | ||
69 | Profile events on existing pid. | ||
70 | |||
71 | -r <priority>:: | ||
72 | --realtime=<priority>:: | ||
73 | Collect data with this RT SCHED_FIFO priority. | ||
74 | |||
75 | -s <symbol>:: | ||
76 | --sym-annotate=<symbol>:: | ||
77 | Annotate this symbol. Requires -k option. | ||
78 | |||
79 | -v:: | ||
80 | --verbose:: | ||
81 | Be more verbose (show counter open errors, etc). | ||
82 | |||
83 | -z:: | ||
84 | --zero:: | ||
85 | Zero history across display updates. | ||
86 | |||
87 | INTERACTIVE PROMPTING KEYS | ||
88 | -------------------------- | ||
89 | |||
90 | [d]:: | ||
91 | Display refresh delay. | ||
92 | |||
93 | [e]:: | ||
94 | Number of entries to display. | ||
95 | |||
96 | [E]:: | ||
97 | Event to display when multiple counters are active. | ||
98 | |||
99 | [f]:: | ||
100 | Profile display filter (>= hit count). | ||
101 | |||
102 | [F]:: | ||
103 | Annotation display filter (>= % of total). | ||
104 | |||
105 | [s]:: | ||
106 | Annotate symbol. | ||
107 | |||
108 | [S]:: | ||
109 | Stop annotation, return to full profile display. | ||
110 | |||
111 | [w]:: | ||
112 | Toggle between weighted sum and individual count[E]r profile. | ||
113 | |||
114 | [z]:: | ||
115 | Toggle event count zeroing across display updates. | ||
116 | |||
117 | [qQ]:: | ||
118 | Quit. | ||
119 | |||
120 | Pressing any unmapped key displays a menu, and prompts for input. | ||
33 | 121 | ||
34 | -l:: | ||
35 | scale counter values | ||
36 | 122 | ||
37 | SEE ALSO | 123 | SEE ALSO |
38 | -------- | 124 | -------- |
diff --git a/tools/perf/Makefile b/tools/perf/Makefile index a5e9b876ca09..c045b4271e57 100644 --- a/tools/perf/Makefile +++ b/tools/perf/Makefile | |||
@@ -158,8 +158,10 @@ uname_P := $(shell sh -c 'uname -p 2>/dev/null || echo not') | |||
158 | uname_V := $(shell sh -c 'uname -v 2>/dev/null || echo not') | 158 | uname_V := $(shell sh -c 'uname -v 2>/dev/null || echo not') |
159 | 159 | ||
160 | # If we're on a 64-bit kernel, use -m64 | 160 | # If we're on a 64-bit kernel, use -m64 |
161 | ifneq ($(patsubst %64,%,$(uname_M)),$(uname_M)) | 161 | ifndef NO_64BIT |
162 | M64 := -m64 | 162 | ifneq ($(patsubst %64,%,$(uname_M)),$(uname_M)) |
163 | M64 := -m64 | ||
164 | endif | ||
163 | endif | 165 | endif |
164 | 166 | ||
165 | # CFLAGS and LDFLAGS are for the users to override from the command line. | 167 | # CFLAGS and LDFLAGS are for the users to override from the command line. |
@@ -345,7 +347,6 @@ BUILTIN_OBJS += builtin-stat.o | |||
345 | BUILTIN_OBJS += builtin-top.o | 347 | BUILTIN_OBJS += builtin-top.o |
346 | 348 | ||
347 | PERFLIBS = $(LIB_FILE) | 349 | PERFLIBS = $(LIB_FILE) |
348 | EXTLIBS = -lbfd | ||
349 | 350 | ||
350 | # | 351 | # |
351 | # Platform specific tweaks | 352 | # Platform specific tweaks |
@@ -374,6 +375,39 @@ ifeq ($(uname_S),Darwin) | |||
374 | PTHREAD_LIBS = | 375 | PTHREAD_LIBS = |
375 | endif | 376 | endif |
376 | 377 | ||
378 | ifneq ($(shell sh -c "(echo '\#include <libelf.h>'; echo 'int main(void) { Elf * elf = elf_begin(0, ELF_C_READ_MMAP, 0); return (long)elf; }') | $(CC) -x c - $(ALL_CFLAGS) -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64 -o /dev/null $(ALL_LDFLAGS) > /dev/null 2>&1 && echo y"), y) | ||
379 | msg := $(error No libelf.h/libelf found, please install libelf-dev/elfutils-libelf-devel); | ||
380 | endif | ||
381 | |||
382 | ifdef NO_DEMANGLE | ||
383 | BASIC_CFLAGS += -DNO_DEMANGLE | ||
384 | else | ||
385 | has_bfd := $(shell sh -c "(echo '\#include <bfd.h>'; echo 'int main(void) { bfd_demangle(0, 0, 0); return 0; }') | $(CC) -x c - $(ALL_CFLAGS) -o /dev/null $(ALL_LDFLAGS) -lbfd > /dev/null 2>&1 && echo y") | ||
386 | |||
387 | ifeq ($(has_bfd),y) | ||
388 | EXTLIBS += -lbfd | ||
389 | else | ||
390 | has_bfd_iberty := $(shell sh -c "(echo '\#include <bfd.h>'; echo 'int main(void) { bfd_demangle(0, 0, 0); return 0; }') | $(CC) -x c - $(ALL_CFLAGS) -o /dev/null $(ALL_LDFLAGS) -lbfd -liberty > /dev/null 2>&1 && echo y") | ||
391 | ifeq ($(has_bfd_iberty),y) | ||
392 | EXTLIBS += -lbfd -liberty | ||
393 | else | ||
394 | has_bfd_iberty_z := $(shell sh -c "(echo '\#include <bfd.h>'; echo 'int main(void) { bfd_demangle(0, 0, 0); return 0; }') | $(CC) -x c - $(ALL_CFLAGS) -o /dev/null $(ALL_LDFLAGS) -lbfd -liberty -lz > /dev/null 2>&1 && echo y") | ||
395 | ifeq ($(has_bfd_iberty_z),y) | ||
396 | EXTLIBS += -lbfd -liberty -lz | ||
397 | else | ||
398 | has_cplus_demangle := $(shell sh -c "(echo 'extern char *cplus_demangle(const char *, int);'; echo 'int main(void) { cplus_demangle(0, 0); return 0; }') | $(CC) -x c - $(ALL_CFLAGS) -o /dev/null $(ALL_LDFLAGS) -liberty > /dev/null 2>&1 && echo y") | ||
399 | ifeq ($(has_cplus_demangle),y) | ||
400 | EXTLIBS += -liberty | ||
401 | BASIC_CFLAGS += -DHAVE_CPLUS_DEMANGLE | ||
402 | else | ||
403 | msg := $(warning No bfd.h/libbfd found, install binutils-dev[el] to gain symbol demangling) | ||
404 | BASIC_CFLAGS += -DNO_DEMANGLE | ||
405 | endif | ||
406 | endif | ||
407 | endif | ||
408 | endif | ||
409 | endif | ||
410 | |||
377 | ifndef CC_LD_DYNPATH | 411 | ifndef CC_LD_DYNPATH |
378 | ifdef NO_R_TO_GCC_LINKER | 412 | ifdef NO_R_TO_GCC_LINKER |
379 | # Some gcc does not accept and pass -R to the linker to specify | 413 | # Some gcc does not accept and pass -R to the linker to specify |
diff --git a/tools/perf/builtin-list.c b/tools/perf/builtin-list.c index f990fa8a35c9..d88c6961274c 100644 --- a/tools/perf/builtin-list.c +++ b/tools/perf/builtin-list.c | |||
@@ -10,11 +10,12 @@ | |||
10 | 10 | ||
11 | #include "perf.h" | 11 | #include "perf.h" |
12 | 12 | ||
13 | #include "util/parse-options.h" | ||
14 | #include "util/parse-events.h" | 13 | #include "util/parse-events.h" |
14 | #include "util/cache.h" | ||
15 | 15 | ||
16 | int cmd_list(int argc __used, const char **argv __used, const char *prefix __used) | 16 | int cmd_list(int argc __used, const char **argv __used, const char *prefix __used) |
17 | { | 17 | { |
18 | setup_pager(); | ||
18 | print_events(); | 19 | print_events(); |
19 | return 0; | 20 | return 0; |
20 | } | 21 | } |
diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c index 6da09928130f..3d051b9cf25f 100644 --- a/tools/perf/builtin-record.c +++ b/tools/perf/builtin-record.c | |||
@@ -34,7 +34,9 @@ static int output; | |||
34 | static const char *output_name = "perf.data"; | 34 | static const char *output_name = "perf.data"; |
35 | static int group = 0; | 35 | static int group = 0; |
36 | static unsigned int realtime_prio = 0; | 36 | static unsigned int realtime_prio = 0; |
37 | static int raw_samples = 0; | ||
37 | static int system_wide = 0; | 38 | static int system_wide = 0; |
39 | static int profile_cpu = -1; | ||
38 | static pid_t target_pid = -1; | 40 | static pid_t target_pid = -1; |
39 | static int inherit = 1; | 41 | static int inherit = 1; |
40 | static int force = 0; | 42 | static int force = 0; |
@@ -203,46 +205,48 @@ static void sig_atexit(void) | |||
203 | kill(getpid(), signr); | 205 | kill(getpid(), signr); |
204 | } | 206 | } |
205 | 207 | ||
206 | static void pid_synthesize_comm_event(pid_t pid, int full) | 208 | static pid_t pid_synthesize_comm_event(pid_t pid, int full) |
207 | { | 209 | { |
208 | struct comm_event comm_ev; | 210 | struct comm_event comm_ev; |
209 | char filename[PATH_MAX]; | 211 | char filename[PATH_MAX]; |
210 | char bf[BUFSIZ]; | 212 | char bf[BUFSIZ]; |
211 | int fd; | 213 | FILE *fp; |
212 | size_t size; | 214 | size_t size = 0; |
213 | char *field, *sep; | ||
214 | DIR *tasks; | 215 | DIR *tasks; |
215 | struct dirent dirent, *next; | 216 | struct dirent dirent, *next; |
217 | pid_t tgid = 0; | ||
216 | 218 | ||
217 | snprintf(filename, sizeof(filename), "/proc/%d/stat", pid); | 219 | snprintf(filename, sizeof(filename), "/proc/%d/status", pid); |
218 | 220 | ||
219 | fd = open(filename, O_RDONLY); | 221 | fp = fopen(filename, "r"); |
220 | if (fd < 0) { | 222 | if (fd == NULL) { |
221 | /* | 223 | /* |
222 | * We raced with a task exiting - just return: | 224 | * We raced with a task exiting - just return: |
223 | */ | 225 | */ |
224 | if (verbose) | 226 | if (verbose) |
225 | fprintf(stderr, "couldn't open %s\n", filename); | 227 | fprintf(stderr, "couldn't open %s\n", filename); |
226 | return; | 228 | return 0; |
227 | } | 229 | } |
228 | if (read(fd, bf, sizeof(bf)) < 0) { | ||
229 | fprintf(stderr, "couldn't read %s\n", filename); | ||
230 | exit(EXIT_FAILURE); | ||
231 | } | ||
232 | close(fd); | ||
233 | 230 | ||
234 | /* 9027 (cat) R 6747 9027 6747 34816 9027 ... */ | ||
235 | memset(&comm_ev, 0, sizeof(comm_ev)); | 231 | memset(&comm_ev, 0, sizeof(comm_ev)); |
236 | field = strchr(bf, '('); | 232 | while (!comm_ev.comm[0] || !comm_ev.pid) { |
237 | if (field == NULL) | 233 | if (fgets(bf, sizeof(bf), fp) == NULL) |
238 | goto out_failure; | 234 | goto out_failure; |
239 | sep = strchr(++field, ')'); | 235 | |
240 | if (sep == NULL) | 236 | if (memcmp(bf, "Name:", 5) == 0) { |
241 | goto out_failure; | 237 | char *name = bf + 5; |
242 | size = sep - field; | 238 | while (*name && isspace(*name)) |
243 | memcpy(comm_ev.comm, field, size++); | 239 | ++name; |
244 | 240 | size = strlen(name) - 1; | |
245 | comm_ev.pid = pid; | 241 | memcpy(comm_ev.comm, name, size++); |
242 | } else if (memcmp(bf, "Tgid:", 5) == 0) { | ||
243 | char *tgids = bf + 5; | ||
244 | while (*tgids && isspace(*tgids)) | ||
245 | ++tgids; | ||
246 | tgid = comm_ev.pid = atoi(tgids); | ||
247 | } | ||
248 | } | ||
249 | |||
246 | comm_ev.header.type = PERF_EVENT_COMM; | 250 | comm_ev.header.type = PERF_EVENT_COMM; |
247 | size = ALIGN(size, sizeof(u64)); | 251 | size = ALIGN(size, sizeof(u64)); |
248 | comm_ev.header.size = sizeof(comm_ev) - (sizeof(comm_ev.comm) - size); | 252 | comm_ev.header.size = sizeof(comm_ev) - (sizeof(comm_ev.comm) - size); |
@@ -251,7 +255,7 @@ static void pid_synthesize_comm_event(pid_t pid, int full) | |||
251 | comm_ev.tid = pid; | 255 | comm_ev.tid = pid; |
252 | 256 | ||
253 | write_output(&comm_ev, comm_ev.header.size); | 257 | write_output(&comm_ev, comm_ev.header.size); |
254 | return; | 258 | goto out_fclose; |
255 | } | 259 | } |
256 | 260 | ||
257 | snprintf(filename, sizeof(filename), "/proc/%d/task", pid); | 261 | snprintf(filename, sizeof(filename), "/proc/%d/task", pid); |
@@ -268,7 +272,10 @@ static void pid_synthesize_comm_event(pid_t pid, int full) | |||
268 | write_output(&comm_ev, comm_ev.header.size); | 272 | write_output(&comm_ev, comm_ev.header.size); |
269 | } | 273 | } |
270 | closedir(tasks); | 274 | closedir(tasks); |
271 | return; | 275 | |
276 | out_fclose: | ||
277 | fclose(fp); | ||
278 | return tgid; | ||
272 | 279 | ||
273 | out_failure: | 280 | out_failure: |
274 | fprintf(stderr, "couldn't get COMM and pgid, malformed %s\n", | 281 | fprintf(stderr, "couldn't get COMM and pgid, malformed %s\n", |
@@ -276,7 +283,7 @@ out_failure: | |||
276 | exit(EXIT_FAILURE); | 283 | exit(EXIT_FAILURE); |
277 | } | 284 | } |
278 | 285 | ||
279 | static void pid_synthesize_mmap_samples(pid_t pid) | 286 | static void pid_synthesize_mmap_samples(pid_t pid, pid_t tgid) |
280 | { | 287 | { |
281 | char filename[PATH_MAX]; | 288 | char filename[PATH_MAX]; |
282 | FILE *fp; | 289 | FILE *fp; |
@@ -328,7 +335,7 @@ static void pid_synthesize_mmap_samples(pid_t pid) | |||
328 | mmap_ev.len -= mmap_ev.start; | 335 | mmap_ev.len -= mmap_ev.start; |
329 | mmap_ev.header.size = (sizeof(mmap_ev) - | 336 | mmap_ev.header.size = (sizeof(mmap_ev) - |
330 | (sizeof(mmap_ev.filename) - size)); | 337 | (sizeof(mmap_ev.filename) - size)); |
331 | mmap_ev.pid = pid; | 338 | mmap_ev.pid = tgid; |
332 | mmap_ev.tid = pid; | 339 | mmap_ev.tid = pid; |
333 | 340 | ||
334 | write_output(&mmap_ev, mmap_ev.header.size); | 341 | write_output(&mmap_ev, mmap_ev.header.size); |
@@ -347,14 +354,14 @@ static void synthesize_all(void) | |||
347 | 354 | ||
348 | while (!readdir_r(proc, &dirent, &next) && next) { | 355 | while (!readdir_r(proc, &dirent, &next) && next) { |
349 | char *end; | 356 | char *end; |
350 | pid_t pid; | 357 | pid_t pid, tgid; |
351 | 358 | ||
352 | pid = strtol(dirent.d_name, &end, 10); | 359 | pid = strtol(dirent.d_name, &end, 10); |
353 | if (*end) /* only interested in proper numerical dirents */ | 360 | if (*end) /* only interested in proper numerical dirents */ |
354 | continue; | 361 | continue; |
355 | 362 | ||
356 | pid_synthesize_comm_event(pid, 1); | 363 | tgid = pid_synthesize_comm_event(pid, 1); |
357 | pid_synthesize_mmap_samples(pid); | 364 | pid_synthesize_mmap_samples(pid, tgid); |
358 | } | 365 | } |
359 | 366 | ||
360 | closedir(proc); | 367 | closedir(proc); |
@@ -392,7 +399,7 @@ static void create_counter(int counter, int cpu, pid_t pid) | |||
392 | PERF_FORMAT_TOTAL_TIME_RUNNING | | 399 | PERF_FORMAT_TOTAL_TIME_RUNNING | |
393 | PERF_FORMAT_ID; | 400 | PERF_FORMAT_ID; |
394 | 401 | ||
395 | attr->sample_type = PERF_SAMPLE_IP | PERF_SAMPLE_TID; | 402 | attr->sample_type |= PERF_SAMPLE_IP | PERF_SAMPLE_TID; |
396 | 403 | ||
397 | if (freq) { | 404 | if (freq) { |
398 | attr->sample_type |= PERF_SAMPLE_PERIOD; | 405 | attr->sample_type |= PERF_SAMPLE_PERIOD; |
@@ -412,6 +419,9 @@ static void create_counter(int counter, int cpu, pid_t pid) | |||
412 | if (call_graph) | 419 | if (call_graph) |
413 | attr->sample_type |= PERF_SAMPLE_CALLCHAIN; | 420 | attr->sample_type |= PERF_SAMPLE_CALLCHAIN; |
414 | 421 | ||
422 | if (raw_samples) | ||
423 | attr->sample_type |= PERF_SAMPLE_RAW; | ||
424 | |||
415 | attr->mmap = track; | 425 | attr->mmap = track; |
416 | attr->comm = track; | 426 | attr->comm = track; |
417 | attr->inherit = (cpu < 0) && inherit; | 427 | attr->inherit = (cpu < 0) && inherit; |
@@ -425,6 +435,8 @@ try_again: | |||
425 | 435 | ||
426 | if (err == EPERM) | 436 | if (err == EPERM) |
427 | die("Permission error - are you root?\n"); | 437 | die("Permission error - are you root?\n"); |
438 | else if (err == ENODEV && profile_cpu != -1) | ||
439 | die("No such device - did you specify an out-of-range profile CPU?\n"); | ||
428 | 440 | ||
429 | /* | 441 | /* |
430 | * If it's cycles then fall back to hrtimer | 442 | * If it's cycles then fall back to hrtimer |
@@ -524,10 +536,14 @@ static int __cmd_record(int argc, const char **argv) | |||
524 | signal(SIGCHLD, sig_handler); | 536 | signal(SIGCHLD, sig_handler); |
525 | signal(SIGINT, sig_handler); | 537 | signal(SIGINT, sig_handler); |
526 | 538 | ||
527 | if (!stat(output_name, &st) && !force && !append_file) { | 539 | if (!stat(output_name, &st) && st.st_size) { |
528 | fprintf(stderr, "Error, output file %s exists, use -A to append or -f to overwrite.\n", | 540 | if (!force && !append_file) { |
529 | output_name); | 541 | fprintf(stderr, "Error, output file %s exists, use -A to append or -f to overwrite.\n", |
530 | exit(-1); | 542 | output_name); |
543 | exit(-1); | ||
544 | } | ||
545 | } else { | ||
546 | append_file = 0; | ||
531 | } | 547 | } |
532 | 548 | ||
533 | flags = O_CREAT|O_RDWR; | 549 | flags = O_CREAT|O_RDWR; |
@@ -554,16 +570,22 @@ static int __cmd_record(int argc, const char **argv) | |||
554 | if (pid == -1) | 570 | if (pid == -1) |
555 | pid = getpid(); | 571 | pid = getpid(); |
556 | 572 | ||
557 | open_counters(-1, pid); | 573 | open_counters(profile_cpu, pid); |
558 | } else for (i = 0; i < nr_cpus; i++) | 574 | } else { |
559 | open_counters(i, target_pid); | 575 | if (profile_cpu != -1) { |
576 | open_counters(profile_cpu, target_pid); | ||
577 | } else { | ||
578 | for (i = 0; i < nr_cpus; i++) | ||
579 | open_counters(i, target_pid); | ||
580 | } | ||
581 | } | ||
560 | 582 | ||
561 | if (file_new) | 583 | if (file_new) |
562 | perf_header__write(header, output); | 584 | perf_header__write(header, output); |
563 | 585 | ||
564 | if (!system_wide) { | 586 | if (!system_wide) { |
565 | pid_synthesize_comm_event(pid, 0); | 587 | pid_t tgid = pid_synthesize_comm_event(pid, 0); |
566 | pid_synthesize_mmap_samples(pid); | 588 | pid_synthesize_mmap_samples(pid, tgid); |
567 | } else | 589 | } else |
568 | synthesize_all(); | 590 | synthesize_all(); |
569 | 591 | ||
@@ -631,10 +653,14 @@ static const struct option options[] = { | |||
631 | "record events on existing pid"), | 653 | "record events on existing pid"), |
632 | OPT_INTEGER('r', "realtime", &realtime_prio, | 654 | OPT_INTEGER('r', "realtime", &realtime_prio, |
633 | "collect data with this RT SCHED_FIFO priority"), | 655 | "collect data with this RT SCHED_FIFO priority"), |
656 | OPT_BOOLEAN('R', "raw-samples", &raw_samples, | ||
657 | "collect raw sample records from all opened counters"), | ||
634 | OPT_BOOLEAN('a', "all-cpus", &system_wide, | 658 | OPT_BOOLEAN('a', "all-cpus", &system_wide, |
635 | "system-wide collection from all CPUs"), | 659 | "system-wide collection from all CPUs"), |
636 | OPT_BOOLEAN('A', "append", &append_file, | 660 | OPT_BOOLEAN('A', "append", &append_file, |
637 | "append to the output file to do incremental profiling"), | 661 | "append to the output file to do incremental profiling"), |
662 | OPT_INTEGER('C', "profile_cpu", &profile_cpu, | ||
663 | "CPU to profile on"), | ||
638 | OPT_BOOLEAN('f', "force", &force, | 664 | OPT_BOOLEAN('f', "force", &force, |
639 | "overwrite existing data file"), | 665 | "overwrite existing data file"), |
640 | OPT_LONG('c', "count", &default_interval, | 666 | OPT_LONG('c', "count", &default_interval, |
diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c index b20a4b6e31b7..b53a60fc12de 100644 --- a/tools/perf/builtin-report.c +++ b/tools/perf/builtin-report.c | |||
@@ -31,7 +31,7 @@ | |||
31 | static char const *input_name = "perf.data"; | 31 | static char const *input_name = "perf.data"; |
32 | static char *vmlinux = NULL; | 32 | static char *vmlinux = NULL; |
33 | 33 | ||
34 | static char default_sort_order[] = "comm,dso"; | 34 | static char default_sort_order[] = "comm,dso,symbol"; |
35 | static char *sort_order = default_sort_order; | 35 | static char *sort_order = default_sort_order; |
36 | static char *dso_list_str, *comm_list_str, *sym_list_str, | 36 | static char *dso_list_str, *comm_list_str, *sym_list_str, |
37 | *col_width_list_str; | 37 | *col_width_list_str; |
@@ -68,7 +68,7 @@ static int callchain; | |||
68 | 68 | ||
69 | static | 69 | static |
70 | struct callchain_param callchain_param = { | 70 | struct callchain_param callchain_param = { |
71 | .mode = CHAIN_GRAPH_ABS, | 71 | .mode = CHAIN_GRAPH_REL, |
72 | .min_percent = 0.5 | 72 | .min_percent = 0.5 |
73 | }; | 73 | }; |
74 | 74 | ||
@@ -99,6 +99,7 @@ struct comm_event { | |||
99 | struct fork_event { | 99 | struct fork_event { |
100 | struct perf_event_header header; | 100 | struct perf_event_header header; |
101 | u32 pid, ppid; | 101 | u32 pid, ppid; |
102 | u32 tid, ptid; | ||
102 | }; | 103 | }; |
103 | 104 | ||
104 | struct lost_event { | 105 | struct lost_event { |
@@ -111,7 +112,9 @@ struct read_event { | |||
111 | struct perf_event_header header; | 112 | struct perf_event_header header; |
112 | u32 pid,tid; | 113 | u32 pid,tid; |
113 | u64 value; | 114 | u64 value; |
114 | u64 format[3]; | 115 | u64 time_enabled; |
116 | u64 time_running; | ||
117 | u64 id; | ||
115 | }; | 118 | }; |
116 | 119 | ||
117 | typedef union event_union { | 120 | typedef union event_union { |
@@ -252,7 +255,7 @@ static int strcommon(const char *pathname) | |||
252 | { | 255 | { |
253 | int n = 0; | 256 | int n = 0; |
254 | 257 | ||
255 | while (pathname[n] == cwd[n] && n < cwdlen) | 258 | while (n < cwdlen && pathname[n] == cwd[n]) |
256 | ++n; | 259 | ++n; |
257 | 260 | ||
258 | return n; | 261 | return n; |
@@ -697,7 +700,8 @@ sort__sym_print(FILE *fp, struct hist_entry *self, unsigned int width __used) | |||
697 | size_t ret = 0; | 700 | size_t ret = 0; |
698 | 701 | ||
699 | if (verbose) | 702 | if (verbose) |
700 | ret += repsep_fprintf(fp, "%#018llx ", (u64)self->ip); | 703 | ret += repsep_fprintf(fp, "%#018llx %c ", (u64)self->ip, |
704 | dso__symtab_origin(self->dso)); | ||
701 | 705 | ||
702 | ret += repsep_fprintf(fp, "[%c] ", self->level); | 706 | ret += repsep_fprintf(fp, "[%c] ", self->level); |
703 | if (self->sym) { | 707 | if (self->sym) { |
@@ -887,6 +891,21 @@ ipchain__fprintf_graph(FILE *fp, struct callchain_list *chain, int depth, | |||
887 | return ret; | 891 | return ret; |
888 | } | 892 | } |
889 | 893 | ||
894 | static struct symbol *rem_sq_bracket; | ||
895 | static struct callchain_list rem_hits; | ||
896 | |||
897 | static void init_rem_hits(void) | ||
898 | { | ||
899 | rem_sq_bracket = malloc(sizeof(*rem_sq_bracket) + 6); | ||
900 | if (!rem_sq_bracket) { | ||
901 | fprintf(stderr, "Not enough memory to display remaining hits\n"); | ||
902 | return; | ||
903 | } | ||
904 | |||
905 | strcpy(rem_sq_bracket->name, "[...]"); | ||
906 | rem_hits.sym = rem_sq_bracket; | ||
907 | } | ||
908 | |||
890 | static size_t | 909 | static size_t |
891 | callchain__fprintf_graph(FILE *fp, struct callchain_node *self, | 910 | callchain__fprintf_graph(FILE *fp, struct callchain_node *self, |
892 | u64 total_samples, int depth, int depth_mask) | 911 | u64 total_samples, int depth, int depth_mask) |
@@ -896,25 +915,34 @@ callchain__fprintf_graph(FILE *fp, struct callchain_node *self, | |||
896 | struct callchain_list *chain; | 915 | struct callchain_list *chain; |
897 | int new_depth_mask = depth_mask; | 916 | int new_depth_mask = depth_mask; |
898 | u64 new_total; | 917 | u64 new_total; |
918 | u64 remaining; | ||
899 | size_t ret = 0; | 919 | size_t ret = 0; |
900 | int i; | 920 | int i; |
901 | 921 | ||
902 | if (callchain_param.mode == CHAIN_GRAPH_REL) | 922 | if (callchain_param.mode == CHAIN_GRAPH_REL) |
903 | new_total = self->cumul_hit; | 923 | new_total = self->children_hit; |
904 | else | 924 | else |
905 | new_total = total_samples; | 925 | new_total = total_samples; |
906 | 926 | ||
927 | remaining = new_total; | ||
928 | |||
907 | node = rb_first(&self->rb_root); | 929 | node = rb_first(&self->rb_root); |
908 | while (node) { | 930 | while (node) { |
931 | u64 cumul; | ||
932 | |||
909 | child = rb_entry(node, struct callchain_node, rb_node); | 933 | child = rb_entry(node, struct callchain_node, rb_node); |
934 | cumul = cumul_hits(child); | ||
935 | remaining -= cumul; | ||
910 | 936 | ||
911 | /* | 937 | /* |
912 | * The depth mask manages the output of pipes that show | 938 | * The depth mask manages the output of pipes that show |
913 | * the depth. We don't want to keep the pipes of the current | 939 | * the depth. We don't want to keep the pipes of the current |
914 | * level for the last child of this depth | 940 | * level for the last child of this depth. |
941 | * Except if we have remaining filtered hits. They will | ||
942 | * supersede the last child | ||
915 | */ | 943 | */ |
916 | next = rb_next(node); | 944 | next = rb_next(node); |
917 | if (!next) | 945 | if (!next && (callchain_param.mode != CHAIN_GRAPH_REL || !remaining)) |
918 | new_depth_mask &= ~(1 << (depth - 1)); | 946 | new_depth_mask &= ~(1 << (depth - 1)); |
919 | 947 | ||
920 | /* | 948 | /* |
@@ -929,7 +957,7 @@ callchain__fprintf_graph(FILE *fp, struct callchain_node *self, | |||
929 | ret += ipchain__fprintf_graph(fp, chain, depth, | 957 | ret += ipchain__fprintf_graph(fp, chain, depth, |
930 | new_depth_mask, i++, | 958 | new_depth_mask, i++, |
931 | new_total, | 959 | new_total, |
932 | child->cumul_hit); | 960 | cumul); |
933 | } | 961 | } |
934 | ret += callchain__fprintf_graph(fp, child, new_total, | 962 | ret += callchain__fprintf_graph(fp, child, new_total, |
935 | depth + 1, | 963 | depth + 1, |
@@ -937,6 +965,19 @@ callchain__fprintf_graph(FILE *fp, struct callchain_node *self, | |||
937 | node = next; | 965 | node = next; |
938 | } | 966 | } |
939 | 967 | ||
968 | if (callchain_param.mode == CHAIN_GRAPH_REL && | ||
969 | remaining && remaining != new_total) { | ||
970 | |||
971 | if (!rem_sq_bracket) | ||
972 | return ret; | ||
973 | |||
974 | new_depth_mask &= ~(1 << (depth - 1)); | ||
975 | |||
976 | ret += ipchain__fprintf_graph(fp, &rem_hits, depth, | ||
977 | new_depth_mask, 0, new_total, | ||
978 | remaining); | ||
979 | } | ||
980 | |||
940 | return ret; | 981 | return ret; |
941 | } | 982 | } |
942 | 983 | ||
@@ -1357,6 +1398,8 @@ static size_t output__fprintf(FILE *fp, u64 total_samples) | |||
1357 | unsigned int width; | 1398 | unsigned int width; |
1358 | char *col_width = col_width_list_str; | 1399 | char *col_width = col_width_list_str; |
1359 | 1400 | ||
1401 | init_rem_hits(); | ||
1402 | |||
1360 | fprintf(fp, "# Samples: %Ld\n", (u64)total_samples); | 1403 | fprintf(fp, "# Samples: %Ld\n", (u64)total_samples); |
1361 | fprintf(fp, "#\n"); | 1404 | fprintf(fp, "#\n"); |
1362 | 1405 | ||
@@ -1423,11 +1466,13 @@ print_entries: | |||
1423 | if (sort_order == default_sort_order && | 1466 | if (sort_order == default_sort_order && |
1424 | parent_pattern == default_parent_pattern) { | 1467 | parent_pattern == default_parent_pattern) { |
1425 | fprintf(fp, "#\n"); | 1468 | fprintf(fp, "#\n"); |
1426 | fprintf(fp, "# (For more details, try: perf report --sort comm,dso,symbol)\n"); | 1469 | fprintf(fp, "# (For a higher level overview, try: perf report --sort comm,dso)\n"); |
1427 | fprintf(fp, "#\n"); | 1470 | fprintf(fp, "#\n"); |
1428 | } | 1471 | } |
1429 | fprintf(fp, "\n"); | 1472 | fprintf(fp, "\n"); |
1430 | 1473 | ||
1474 | free(rem_sq_bracket); | ||
1475 | |||
1431 | return ret; | 1476 | return ret; |
1432 | } | 1477 | } |
1433 | 1478 | ||
@@ -1481,11 +1526,11 @@ process_sample_event(event_t *event, unsigned long offset, unsigned long head) | |||
1481 | more_data += sizeof(u64); | 1526 | more_data += sizeof(u64); |
1482 | } | 1527 | } |
1483 | 1528 | ||
1484 | dprintf("%p [%p]: PERF_EVENT_SAMPLE (IP, %d): %d: %p period: %Ld\n", | 1529 | dprintf("%p [%p]: PERF_EVENT_SAMPLE (IP, %d): %d/%d: %p period: %Ld\n", |
1485 | (void *)(offset + head), | 1530 | (void *)(offset + head), |
1486 | (void *)(long)(event->header.size), | 1531 | (void *)(long)(event->header.size), |
1487 | event->header.misc, | 1532 | event->header.misc, |
1488 | event->ip.pid, | 1533 | event->ip.pid, event->ip.tid, |
1489 | (void *)(long)ip, | 1534 | (void *)(long)ip, |
1490 | (long long)period); | 1535 | (long long)period); |
1491 | 1536 | ||
@@ -1545,10 +1590,11 @@ process_sample_event(event_t *event, unsigned long offset, unsigned long head) | |||
1545 | if (show & show_mask) { | 1590 | if (show & show_mask) { |
1546 | struct symbol *sym = resolve_symbol(thread, &map, &dso, &ip); | 1591 | struct symbol *sym = resolve_symbol(thread, &map, &dso, &ip); |
1547 | 1592 | ||
1548 | if (dso_list && dso && dso->name && !strlist__has_entry(dso_list, dso->name)) | 1593 | if (dso_list && (!dso || !dso->name || |
1594 | !strlist__has_entry(dso_list, dso->name))) | ||
1549 | return 0; | 1595 | return 0; |
1550 | 1596 | ||
1551 | if (sym_list && sym && !strlist__has_entry(sym_list, sym->name)) | 1597 | if (sym_list && (!sym || !strlist__has_entry(sym_list, sym->name))) |
1552 | return 0; | 1598 | return 0; |
1553 | 1599 | ||
1554 | if (hist_entry__add(thread, map, dso, sym, ip, chain, level, period)) { | 1600 | if (hist_entry__add(thread, map, dso, sym, ip, chain, level, period)) { |
@@ -1567,10 +1613,11 @@ process_mmap_event(event_t *event, unsigned long offset, unsigned long head) | |||
1567 | struct thread *thread = threads__findnew(event->mmap.pid); | 1613 | struct thread *thread = threads__findnew(event->mmap.pid); |
1568 | struct map *map = map__new(&event->mmap); | 1614 | struct map *map = map__new(&event->mmap); |
1569 | 1615 | ||
1570 | dprintf("%p [%p]: PERF_EVENT_MMAP %d: [%p(%p) @ %p]: %s\n", | 1616 | dprintf("%p [%p]: PERF_EVENT_MMAP %d/%d: [%p(%p) @ %p]: %s\n", |
1571 | (void *)(offset + head), | 1617 | (void *)(offset + head), |
1572 | (void *)(long)(event->header.size), | 1618 | (void *)(long)(event->header.size), |
1573 | event->mmap.pid, | 1619 | event->mmap.pid, |
1620 | event->mmap.tid, | ||
1574 | (void *)(long)event->mmap.start, | 1621 | (void *)(long)event->mmap.start, |
1575 | (void *)(long)event->mmap.len, | 1622 | (void *)(long)event->mmap.len, |
1576 | (void *)(long)event->mmap.pgoff, | 1623 | (void *)(long)event->mmap.pgoff, |
@@ -1608,15 +1655,27 @@ process_comm_event(event_t *event, unsigned long offset, unsigned long head) | |||
1608 | } | 1655 | } |
1609 | 1656 | ||
1610 | static int | 1657 | static int |
1611 | process_fork_event(event_t *event, unsigned long offset, unsigned long head) | 1658 | process_task_event(event_t *event, unsigned long offset, unsigned long head) |
1612 | { | 1659 | { |
1613 | struct thread *thread = threads__findnew(event->fork.pid); | 1660 | struct thread *thread = threads__findnew(event->fork.pid); |
1614 | struct thread *parent = threads__findnew(event->fork.ppid); | 1661 | struct thread *parent = threads__findnew(event->fork.ppid); |
1615 | 1662 | ||
1616 | dprintf("%p [%p]: PERF_EVENT_FORK: %d:%d\n", | 1663 | dprintf("%p [%p]: PERF_EVENT_%s: (%d:%d):(%d:%d)\n", |
1617 | (void *)(offset + head), | 1664 | (void *)(offset + head), |
1618 | (void *)(long)(event->header.size), | 1665 | (void *)(long)(event->header.size), |
1619 | event->fork.pid, event->fork.ppid); | 1666 | event->header.type == PERF_EVENT_FORK ? "FORK" : "EXIT", |
1667 | event->fork.pid, event->fork.tid, | ||
1668 | event->fork.ppid, event->fork.ptid); | ||
1669 | |||
1670 | /* | ||
1671 | * A thread clone will have the same PID for both | ||
1672 | * parent and child. | ||
1673 | */ | ||
1674 | if (thread == parent) | ||
1675 | return 0; | ||
1676 | |||
1677 | if (event->header.type == PERF_EVENT_EXIT) | ||
1678 | return 0; | ||
1620 | 1679 | ||
1621 | if (!thread || !parent || thread__fork(thread, parent)) { | 1680 | if (!thread || !parent || thread__fork(thread, parent)) { |
1622 | dprintf("problem processing PERF_EVENT_FORK, skipping event.\n"); | 1681 | dprintf("problem processing PERF_EVENT_FORK, skipping event.\n"); |
@@ -1677,14 +1736,37 @@ static void trace_event(event_t *event) | |||
1677 | dprintf(".\n"); | 1736 | dprintf(".\n"); |
1678 | } | 1737 | } |
1679 | 1738 | ||
1739 | static struct perf_header *header; | ||
1740 | |||
1741 | static struct perf_counter_attr *perf_header__find_attr(u64 id) | ||
1742 | { | ||
1743 | int i; | ||
1744 | |||
1745 | for (i = 0; i < header->attrs; i++) { | ||
1746 | struct perf_header_attr *attr = header->attr[i]; | ||
1747 | int j; | ||
1748 | |||
1749 | for (j = 0; j < attr->ids; j++) { | ||
1750 | if (attr->id[j] == id) | ||
1751 | return &attr->attr; | ||
1752 | } | ||
1753 | } | ||
1754 | |||
1755 | return NULL; | ||
1756 | } | ||
1757 | |||
1680 | static int | 1758 | static int |
1681 | process_read_event(event_t *event, unsigned long offset, unsigned long head) | 1759 | process_read_event(event_t *event, unsigned long offset, unsigned long head) |
1682 | { | 1760 | { |
1683 | dprintf("%p [%p]: PERF_EVENT_READ: %d %d %Lu\n", | 1761 | struct perf_counter_attr *attr = perf_header__find_attr(event->read.id); |
1762 | |||
1763 | dprintf("%p [%p]: PERF_EVENT_READ: %d %d %s %Lu\n", | ||
1684 | (void *)(offset + head), | 1764 | (void *)(offset + head), |
1685 | (void *)(long)(event->header.size), | 1765 | (void *)(long)(event->header.size), |
1686 | event->read.pid, | 1766 | event->read.pid, |
1687 | event->read.tid, | 1767 | event->read.tid, |
1768 | attr ? __event_name(attr->type, attr->config) | ||
1769 | : "FAIL", | ||
1688 | event->read.value); | 1770 | event->read.value); |
1689 | 1771 | ||
1690 | return 0; | 1772 | return 0; |
@@ -1706,7 +1788,8 @@ process_event(event_t *event, unsigned long offset, unsigned long head) | |||
1706 | return process_comm_event(event, offset, head); | 1788 | return process_comm_event(event, offset, head); |
1707 | 1789 | ||
1708 | case PERF_EVENT_FORK: | 1790 | case PERF_EVENT_FORK: |
1709 | return process_fork_event(event, offset, head); | 1791 | case PERF_EVENT_EXIT: |
1792 | return process_task_event(event, offset, head); | ||
1710 | 1793 | ||
1711 | case PERF_EVENT_LOST: | 1794 | case PERF_EVENT_LOST: |
1712 | return process_lost_event(event, offset, head); | 1795 | return process_lost_event(event, offset, head); |
@@ -1729,8 +1812,6 @@ process_event(event_t *event, unsigned long offset, unsigned long head) | |||
1729 | return 0; | 1812 | return 0; |
1730 | } | 1813 | } |
1731 | 1814 | ||
1732 | static struct perf_header *header; | ||
1733 | |||
1734 | static u64 perf_header__sample_type(void) | 1815 | static u64 perf_header__sample_type(void) |
1735 | { | 1816 | { |
1736 | u64 sample_type = 0; | 1817 | u64 sample_type = 0; |
@@ -1798,6 +1879,13 @@ static int __cmd_report(void) | |||
1798 | " -g?\n"); | 1879 | " -g?\n"); |
1799 | exit(-1); | 1880 | exit(-1); |
1800 | } | 1881 | } |
1882 | } else if (callchain_param.mode != CHAIN_NONE && !callchain) { | ||
1883 | callchain = 1; | ||
1884 | if (register_callchain_param(&callchain_param) < 0) { | ||
1885 | fprintf(stderr, "Can't register callchain" | ||
1886 | " params\n"); | ||
1887 | exit(-1); | ||
1888 | } | ||
1801 | } | 1889 | } |
1802 | 1890 | ||
1803 | if (load_kernel() < 0) { | 1891 | if (load_kernel() < 0) { |
@@ -1936,6 +2024,13 @@ parse_callchain_opt(const struct option *opt __used, const char *arg, | |||
1936 | else if (!strncmp(tok, "fractal", strlen(arg))) | 2024 | else if (!strncmp(tok, "fractal", strlen(arg))) |
1937 | callchain_param.mode = CHAIN_GRAPH_REL; | 2025 | callchain_param.mode = CHAIN_GRAPH_REL; |
1938 | 2026 | ||
2027 | else if (!strncmp(tok, "none", strlen(arg))) { | ||
2028 | callchain_param.mode = CHAIN_NONE; | ||
2029 | callchain = 0; | ||
2030 | |||
2031 | return 0; | ||
2032 | } | ||
2033 | |||
1939 | else | 2034 | else |
1940 | return -1; | 2035 | return -1; |
1941 | 2036 | ||
diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c index f9510eeeb6c7..b4b06c7903e1 100644 --- a/tools/perf/builtin-stat.c +++ b/tools/perf/builtin-stat.c | |||
@@ -496,7 +496,7 @@ static const struct option options[] = { | |||
496 | "stat events on existing pid"), | 496 | "stat events on existing pid"), |
497 | OPT_BOOLEAN('a', "all-cpus", &system_wide, | 497 | OPT_BOOLEAN('a', "all-cpus", &system_wide, |
498 | "system-wide collection from all CPUs"), | 498 | "system-wide collection from all CPUs"), |
499 | OPT_BOOLEAN('S', "scale", &scale, | 499 | OPT_BOOLEAN('c', "scale", &scale, |
500 | "scale/normalize counters"), | 500 | "scale/normalize counters"), |
501 | OPT_BOOLEAN('v', "verbose", &verbose, | 501 | OPT_BOOLEAN('v', "verbose", &verbose, |
502 | "be more verbose (show counter open errors, etc)"), | 502 | "be more verbose (show counter open errors, etc)"), |
diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c index c0a423004e15..7de28ce9ca26 100644 --- a/tools/perf/builtin-top.c +++ b/tools/perf/builtin-top.c | |||
@@ -31,6 +31,8 @@ | |||
31 | #include <fcntl.h> | 31 | #include <fcntl.h> |
32 | 32 | ||
33 | #include <stdio.h> | 33 | #include <stdio.h> |
34 | #include <termios.h> | ||
35 | #include <unistd.h> | ||
34 | 36 | ||
35 | #include <errno.h> | 37 | #include <errno.h> |
36 | #include <time.h> | 38 | #include <time.h> |
@@ -54,7 +56,7 @@ static int system_wide = 0; | |||
54 | 56 | ||
55 | static int default_interval = 100000; | 57 | static int default_interval = 100000; |
56 | 58 | ||
57 | static u64 count_filter = 5; | 59 | static int count_filter = 5; |
58 | static int print_entries = 15; | 60 | static int print_entries = 15; |
59 | 61 | ||
60 | static int target_pid = -1; | 62 | static int target_pid = -1; |
@@ -69,15 +71,28 @@ static int freq = 0; | |||
69 | static int verbose = 0; | 71 | static int verbose = 0; |
70 | static char *vmlinux = NULL; | 72 | static char *vmlinux = NULL; |
71 | 73 | ||
72 | static char *sym_filter; | ||
73 | static unsigned long filter_start; | ||
74 | static unsigned long filter_end; | ||
75 | |||
76 | static int delay_secs = 2; | 74 | static int delay_secs = 2; |
77 | static int zero; | 75 | static int zero; |
78 | static int dump_symtab; | 76 | static int dump_symtab; |
79 | 77 | ||
80 | /* | 78 | /* |
79 | * Source | ||
80 | */ | ||
81 | |||
82 | struct source_line { | ||
83 | u64 eip; | ||
84 | unsigned long count[MAX_COUNTERS]; | ||
85 | char *line; | ||
86 | struct source_line *next; | ||
87 | }; | ||
88 | |||
89 | static char *sym_filter = NULL; | ||
90 | struct sym_entry *sym_filter_entry = NULL; | ||
91 | static int sym_pcnt_filter = 5; | ||
92 | static int sym_counter = 0; | ||
93 | static int display_weighted = -1; | ||
94 | |||
95 | /* | ||
81 | * Symbols | 96 | * Symbols |
82 | */ | 97 | */ |
83 | 98 | ||
@@ -91,9 +106,237 @@ struct sym_entry { | |||
91 | unsigned long snap_count; | 106 | unsigned long snap_count; |
92 | double weight; | 107 | double weight; |
93 | int skip; | 108 | int skip; |
109 | struct source_line *source; | ||
110 | struct source_line *lines; | ||
111 | struct source_line **lines_tail; | ||
112 | pthread_mutex_t source_lock; | ||
94 | }; | 113 | }; |
95 | 114 | ||
96 | struct sym_entry *sym_filter_entry; | 115 | /* |
116 | * Source functions | ||
117 | */ | ||
118 | |||
119 | static void parse_source(struct sym_entry *syme) | ||
120 | { | ||
121 | struct symbol *sym; | ||
122 | struct module *module; | ||
123 | struct section *section = NULL; | ||
124 | FILE *file; | ||
125 | char command[PATH_MAX*2], *path = vmlinux; | ||
126 | u64 start, end, len; | ||
127 | |||
128 | if (!syme) | ||
129 | return; | ||
130 | |||
131 | if (syme->lines) { | ||
132 | pthread_mutex_lock(&syme->source_lock); | ||
133 | goto out_assign; | ||
134 | } | ||
135 | |||
136 | sym = (struct symbol *)(syme + 1); | ||
137 | module = sym->module; | ||
138 | |||
139 | if (module) | ||
140 | path = module->path; | ||
141 | if (!path) | ||
142 | return; | ||
143 | |||
144 | start = sym->obj_start; | ||
145 | if (!start) | ||
146 | start = sym->start; | ||
147 | |||
148 | if (module) { | ||
149 | section = module->sections->find_section(module->sections, ".text"); | ||
150 | if (section) | ||
151 | start -= section->vma; | ||
152 | } | ||
153 | |||
154 | end = start + sym->end - sym->start + 1; | ||
155 | len = sym->end - sym->start; | ||
156 | |||
157 | sprintf(command, "objdump --start-address=0x%016Lx --stop-address=0x%016Lx -dS %s", start, end, path); | ||
158 | |||
159 | file = popen(command, "r"); | ||
160 | if (!file) | ||
161 | return; | ||
162 | |||
163 | pthread_mutex_lock(&syme->source_lock); | ||
164 | syme->lines_tail = &syme->lines; | ||
165 | while (!feof(file)) { | ||
166 | struct source_line *src; | ||
167 | size_t dummy = 0; | ||
168 | char *c; | ||
169 | |||
170 | src = malloc(sizeof(struct source_line)); | ||
171 | assert(src != NULL); | ||
172 | memset(src, 0, sizeof(struct source_line)); | ||
173 | |||
174 | if (getline(&src->line, &dummy, file) < 0) | ||
175 | break; | ||
176 | if (!src->line) | ||
177 | break; | ||
178 | |||
179 | c = strchr(src->line, '\n'); | ||
180 | if (c) | ||
181 | *c = 0; | ||
182 | |||
183 | src->next = NULL; | ||
184 | *syme->lines_tail = src; | ||
185 | syme->lines_tail = &src->next; | ||
186 | |||
187 | if (strlen(src->line)>8 && src->line[8] == ':') { | ||
188 | src->eip = strtoull(src->line, NULL, 16); | ||
189 | if (section) | ||
190 | src->eip += section->vma; | ||
191 | } | ||
192 | if (strlen(src->line)>8 && src->line[16] == ':') { | ||
193 | src->eip = strtoull(src->line, NULL, 16); | ||
194 | if (section) | ||
195 | src->eip += section->vma; | ||
196 | } | ||
197 | } | ||
198 | pclose(file); | ||
199 | out_assign: | ||
200 | sym_filter_entry = syme; | ||
201 | pthread_mutex_unlock(&syme->source_lock); | ||
202 | } | ||
203 | |||
204 | static void __zero_source_counters(struct sym_entry *syme) | ||
205 | { | ||
206 | int i; | ||
207 | struct source_line *line; | ||
208 | |||
209 | line = syme->lines; | ||
210 | while (line) { | ||
211 | for (i = 0; i < nr_counters; i++) | ||
212 | line->count[i] = 0; | ||
213 | line = line->next; | ||
214 | } | ||
215 | } | ||
216 | |||
217 | static void record_precise_ip(struct sym_entry *syme, int counter, u64 ip) | ||
218 | { | ||
219 | struct source_line *line; | ||
220 | |||
221 | if (syme != sym_filter_entry) | ||
222 | return; | ||
223 | |||
224 | if (pthread_mutex_trylock(&syme->source_lock)) | ||
225 | return; | ||
226 | |||
227 | if (!syme->source) | ||
228 | goto out_unlock; | ||
229 | |||
230 | for (line = syme->lines; line; line = line->next) { | ||
231 | if (line->eip == ip) { | ||
232 | line->count[counter]++; | ||
233 | break; | ||
234 | } | ||
235 | if (line->eip > ip) | ||
236 | break; | ||
237 | } | ||
238 | out_unlock: | ||
239 | pthread_mutex_unlock(&syme->source_lock); | ||
240 | } | ||
241 | |||
242 | static void lookup_sym_source(struct sym_entry *syme) | ||
243 | { | ||
244 | struct symbol *symbol = (struct symbol *)(syme + 1); | ||
245 | struct source_line *line; | ||
246 | char pattern[PATH_MAX]; | ||
247 | char *idx; | ||
248 | |||
249 | sprintf(pattern, "<%s>:", symbol->name); | ||
250 | |||
251 | if (symbol->module) { | ||
252 | idx = strstr(pattern, "\t"); | ||
253 | if (idx) | ||
254 | *idx = 0; | ||
255 | } | ||
256 | |||
257 | pthread_mutex_lock(&syme->source_lock); | ||
258 | for (line = syme->lines; line; line = line->next) { | ||
259 | if (strstr(line->line, pattern)) { | ||
260 | syme->source = line; | ||
261 | break; | ||
262 | } | ||
263 | } | ||
264 | pthread_mutex_unlock(&syme->source_lock); | ||
265 | } | ||
266 | |||
267 | static void show_lines(struct source_line *queue, int count, int total) | ||
268 | { | ||
269 | int i; | ||
270 | struct source_line *line; | ||
271 | |||
272 | line = queue; | ||
273 | for (i = 0; i < count; i++) { | ||
274 | float pcnt = 100.0*(float)line->count[sym_counter]/(float)total; | ||
275 | |||
276 | printf("%8li %4.1f%%\t%s\n", line->count[sym_counter], pcnt, line->line); | ||
277 | line = line->next; | ||
278 | } | ||
279 | } | ||
280 | |||
281 | #define TRACE_COUNT 3 | ||
282 | |||
283 | static void show_details(struct sym_entry *syme) | ||
284 | { | ||
285 | struct symbol *symbol; | ||
286 | struct source_line *line; | ||
287 | struct source_line *line_queue = NULL; | ||
288 | int displayed = 0; | ||
289 | int line_queue_count = 0, total = 0, more = 0; | ||
290 | |||
291 | if (!syme) | ||
292 | return; | ||
293 | |||
294 | if (!syme->source) | ||
295 | lookup_sym_source(syme); | ||
296 | |||
297 | if (!syme->source) | ||
298 | return; | ||
299 | |||
300 | symbol = (struct symbol *)(syme + 1); | ||
301 | printf("Showing %s for %s\n", event_name(sym_counter), symbol->name); | ||
302 | printf(" Events Pcnt (>=%d%%)\n", sym_pcnt_filter); | ||
303 | |||
304 | pthread_mutex_lock(&syme->source_lock); | ||
305 | line = syme->source; | ||
306 | while (line) { | ||
307 | total += line->count[sym_counter]; | ||
308 | line = line->next; | ||
309 | } | ||
310 | |||
311 | line = syme->source; | ||
312 | while (line) { | ||
313 | float pcnt = 0.0; | ||
314 | |||
315 | if (!line_queue_count) | ||
316 | line_queue = line; | ||
317 | line_queue_count++; | ||
318 | |||
319 | if (line->count[sym_counter]) | ||
320 | pcnt = 100.0 * line->count[sym_counter] / (float)total; | ||
321 | if (pcnt >= (float)sym_pcnt_filter) { | ||
322 | if (displayed <= print_entries) | ||
323 | show_lines(line_queue, line_queue_count, total); | ||
324 | else more++; | ||
325 | displayed += line_queue_count; | ||
326 | line_queue_count = 0; | ||
327 | line_queue = NULL; | ||
328 | } else if (line_queue_count > TRACE_COUNT) { | ||
329 | line_queue = line_queue->next; | ||
330 | line_queue_count--; | ||
331 | } | ||
332 | |||
333 | line->count[sym_counter] = zero ? 0 : line->count[sym_counter] * 7 / 8; | ||
334 | line = line->next; | ||
335 | } | ||
336 | pthread_mutex_unlock(&syme->source_lock); | ||
337 | if (more) | ||
338 | printf("%d lines not displayed, maybe increase display entries [e]\n", more); | ||
339 | } | ||
97 | 340 | ||
98 | struct dso *kernel_dso; | 341 | struct dso *kernel_dso; |
99 | 342 | ||
@@ -112,6 +355,9 @@ static double sym_weight(const struct sym_entry *sym) | |||
112 | double weight = sym->snap_count; | 355 | double weight = sym->snap_count; |
113 | int counter; | 356 | int counter; |
114 | 357 | ||
358 | if (!display_weighted) | ||
359 | return weight; | ||
360 | |||
115 | for (counter = 1; counter < nr_counters-1; counter++) | 361 | for (counter = 1; counter < nr_counters-1; counter++) |
116 | weight *= sym->count[counter]; | 362 | weight *= sym->count[counter]; |
117 | 363 | ||
@@ -159,7 +405,7 @@ static void rb_insert_active_sym(struct rb_root *tree, struct sym_entry *se) | |||
159 | static void print_sym_table(void) | 405 | static void print_sym_table(void) |
160 | { | 406 | { |
161 | int printed = 0, j; | 407 | int printed = 0, j; |
162 | int counter; | 408 | int counter, snap = !display_weighted ? sym_counter : 0; |
163 | float samples_per_sec = samples/delay_secs; | 409 | float samples_per_sec = samples/delay_secs; |
164 | float ksamples_per_sec = (samples-userspace_samples)/delay_secs; | 410 | float ksamples_per_sec = (samples-userspace_samples)/delay_secs; |
165 | float sum_ksamples = 0.0; | 411 | float sum_ksamples = 0.0; |
@@ -175,7 +421,7 @@ static void print_sym_table(void) | |||
175 | pthread_mutex_unlock(&active_symbols_lock); | 421 | pthread_mutex_unlock(&active_symbols_lock); |
176 | 422 | ||
177 | list_for_each_entry_safe_from(syme, n, &active_symbols, node) { | 423 | list_for_each_entry_safe_from(syme, n, &active_symbols, node) { |
178 | syme->snap_count = syme->count[0]; | 424 | syme->snap_count = syme->count[snap]; |
179 | if (syme->snap_count != 0) { | 425 | if (syme->snap_count != 0) { |
180 | syme->weight = sym_weight(syme); | 426 | syme->weight = sym_weight(syme); |
181 | rb_insert_active_sym(&tmp, syme); | 427 | rb_insert_active_sym(&tmp, syme); |
@@ -195,7 +441,7 @@ static void print_sym_table(void) | |||
195 | samples_per_sec, | 441 | samples_per_sec, |
196 | 100.0 - (100.0*((samples_per_sec-ksamples_per_sec)/samples_per_sec))); | 442 | 100.0 - (100.0*((samples_per_sec-ksamples_per_sec)/samples_per_sec))); |
197 | 443 | ||
198 | if (nr_counters == 1) { | 444 | if (nr_counters == 1 || !display_weighted) { |
199 | printf("%Ld", (u64)attrs[0].sample_period); | 445 | printf("%Ld", (u64)attrs[0].sample_period); |
200 | if (freq) | 446 | if (freq) |
201 | printf("Hz "); | 447 | printf("Hz "); |
@@ -203,7 +449,9 @@ static void print_sym_table(void) | |||
203 | printf(" "); | 449 | printf(" "); |
204 | } | 450 | } |
205 | 451 | ||
206 | for (counter = 0; counter < nr_counters; counter++) { | 452 | if (!display_weighted) |
453 | printf("%s", event_name(sym_counter)); | ||
454 | else for (counter = 0; counter < nr_counters; counter++) { | ||
207 | if (counter) | 455 | if (counter) |
208 | printf("/"); | 456 | printf("/"); |
209 | 457 | ||
@@ -228,6 +476,11 @@ static void print_sym_table(void) | |||
228 | 476 | ||
229 | printf("------------------------------------------------------------------------------\n\n"); | 477 | printf("------------------------------------------------------------------------------\n\n"); |
230 | 478 | ||
479 | if (sym_filter_entry) { | ||
480 | show_details(sym_filter_entry); | ||
481 | return; | ||
482 | } | ||
483 | |||
231 | if (nr_counters == 1) | 484 | if (nr_counters == 1) |
232 | printf(" samples pcnt"); | 485 | printf(" samples pcnt"); |
233 | else | 486 | else |
@@ -242,13 +495,13 @@ static void print_sym_table(void) | |||
242 | struct symbol *sym = (struct symbol *)(syme + 1); | 495 | struct symbol *sym = (struct symbol *)(syme + 1); |
243 | double pcnt; | 496 | double pcnt; |
244 | 497 | ||
245 | if (++printed > print_entries || syme->snap_count < count_filter) | 498 | if (++printed > print_entries || (int)syme->snap_count < count_filter) |
246 | continue; | 499 | continue; |
247 | 500 | ||
248 | pcnt = 100.0 - (100.0 * ((sum_ksamples - syme->snap_count) / | 501 | pcnt = 100.0 - (100.0 * ((sum_ksamples - syme->snap_count) / |
249 | sum_ksamples)); | 502 | sum_ksamples)); |
250 | 503 | ||
251 | if (nr_counters == 1) | 504 | if (nr_counters == 1 || !display_weighted) |
252 | printf("%20.2f - ", syme->weight); | 505 | printf("%20.2f - ", syme->weight); |
253 | else | 506 | else |
254 | printf("%9.1f %10ld - ", syme->weight, syme->snap_count); | 507 | printf("%9.1f %10ld - ", syme->weight, syme->snap_count); |
@@ -261,19 +514,250 @@ static void print_sym_table(void) | |||
261 | } | 514 | } |
262 | } | 515 | } |
263 | 516 | ||
517 | static void prompt_integer(int *target, const char *msg) | ||
518 | { | ||
519 | char *buf = malloc(0), *p; | ||
520 | size_t dummy = 0; | ||
521 | int tmp; | ||
522 | |||
523 | fprintf(stdout, "\n%s: ", msg); | ||
524 | if (getline(&buf, &dummy, stdin) < 0) | ||
525 | return; | ||
526 | |||
527 | p = strchr(buf, '\n'); | ||
528 | if (p) | ||
529 | *p = 0; | ||
530 | |||
531 | p = buf; | ||
532 | while(*p) { | ||
533 | if (!isdigit(*p)) | ||
534 | goto out_free; | ||
535 | p++; | ||
536 | } | ||
537 | tmp = strtoul(buf, NULL, 10); | ||
538 | *target = tmp; | ||
539 | out_free: | ||
540 | free(buf); | ||
541 | } | ||
542 | |||
543 | static void prompt_percent(int *target, const char *msg) | ||
544 | { | ||
545 | int tmp = 0; | ||
546 | |||
547 | prompt_integer(&tmp, msg); | ||
548 | if (tmp >= 0 && tmp <= 100) | ||
549 | *target = tmp; | ||
550 | } | ||
551 | |||
552 | static void prompt_symbol(struct sym_entry **target, const char *msg) | ||
553 | { | ||
554 | char *buf = malloc(0), *p; | ||
555 | struct sym_entry *syme = *target, *n, *found = NULL; | ||
556 | size_t dummy = 0; | ||
557 | |||
558 | /* zero counters of active symbol */ | ||
559 | if (syme) { | ||
560 | pthread_mutex_lock(&syme->source_lock); | ||
561 | __zero_source_counters(syme); | ||
562 | *target = NULL; | ||
563 | pthread_mutex_unlock(&syme->source_lock); | ||
564 | } | ||
565 | |||
566 | fprintf(stdout, "\n%s: ", msg); | ||
567 | if (getline(&buf, &dummy, stdin) < 0) | ||
568 | goto out_free; | ||
569 | |||
570 | p = strchr(buf, '\n'); | ||
571 | if (p) | ||
572 | *p = 0; | ||
573 | |||
574 | pthread_mutex_lock(&active_symbols_lock); | ||
575 | syme = list_entry(active_symbols.next, struct sym_entry, node); | ||
576 | pthread_mutex_unlock(&active_symbols_lock); | ||
577 | |||
578 | list_for_each_entry_safe_from(syme, n, &active_symbols, node) { | ||
579 | struct symbol *sym = (struct symbol *)(syme + 1); | ||
580 | |||
581 | if (!strcmp(buf, sym->name)) { | ||
582 | found = syme; | ||
583 | break; | ||
584 | } | ||
585 | } | ||
586 | |||
587 | if (!found) { | ||
588 | fprintf(stderr, "Sorry, %s is not active.\n", sym_filter); | ||
589 | sleep(1); | ||
590 | return; | ||
591 | } else | ||
592 | parse_source(found); | ||
593 | |||
594 | out_free: | ||
595 | free(buf); | ||
596 | } | ||
597 | |||
598 | static void print_mapped_keys(void) | ||
599 | { | ||
600 | char *name = NULL; | ||
601 | |||
602 | if (sym_filter_entry) { | ||
603 | struct symbol *sym = (struct symbol *)(sym_filter_entry+1); | ||
604 | name = sym->name; | ||
605 | } | ||
606 | |||
607 | fprintf(stdout, "\nMapped keys:\n"); | ||
608 | fprintf(stdout, "\t[d] display refresh delay. \t(%d)\n", delay_secs); | ||
609 | fprintf(stdout, "\t[e] display entries (lines). \t(%d)\n", print_entries); | ||
610 | |||
611 | if (nr_counters > 1) | ||
612 | fprintf(stdout, "\t[E] active event counter. \t(%s)\n", event_name(sym_counter)); | ||
613 | |||
614 | fprintf(stdout, "\t[f] profile display filter (count). \t(%d)\n", count_filter); | ||
615 | |||
616 | if (vmlinux) { | ||
617 | fprintf(stdout, "\t[F] annotate display filter (percent). \t(%d%%)\n", sym_pcnt_filter); | ||
618 | fprintf(stdout, "\t[s] annotate symbol. \t(%s)\n", name?: "NULL"); | ||
619 | fprintf(stdout, "\t[S] stop annotation.\n"); | ||
620 | } | ||
621 | |||
622 | if (nr_counters > 1) | ||
623 | fprintf(stdout, "\t[w] toggle display weighted/count[E]r. \t(%d)\n", display_weighted ? 1 : 0); | ||
624 | |||
625 | fprintf(stdout, "\t[z] toggle sample zeroing. \t(%d)\n", zero ? 1 : 0); | ||
626 | fprintf(stdout, "\t[qQ] quit.\n"); | ||
627 | } | ||
628 | |||
629 | static int key_mapped(int c) | ||
630 | { | ||
631 | switch (c) { | ||
632 | case 'd': | ||
633 | case 'e': | ||
634 | case 'f': | ||
635 | case 'z': | ||
636 | case 'q': | ||
637 | case 'Q': | ||
638 | return 1; | ||
639 | case 'E': | ||
640 | case 'w': | ||
641 | return nr_counters > 1 ? 1 : 0; | ||
642 | case 'F': | ||
643 | case 's': | ||
644 | case 'S': | ||
645 | return vmlinux ? 1 : 0; | ||
646 | } | ||
647 | |||
648 | return 0; | ||
649 | } | ||
650 | |||
651 | static void handle_keypress(int c) | ||
652 | { | ||
653 | if (!key_mapped(c)) { | ||
654 | struct pollfd stdin_poll = { .fd = 0, .events = POLLIN }; | ||
655 | struct termios tc, save; | ||
656 | |||
657 | print_mapped_keys(); | ||
658 | fprintf(stdout, "\nEnter selection, or unmapped key to continue: "); | ||
659 | fflush(stdout); | ||
660 | |||
661 | tcgetattr(0, &save); | ||
662 | tc = save; | ||
663 | tc.c_lflag &= ~(ICANON | ECHO); | ||
664 | tc.c_cc[VMIN] = 0; | ||
665 | tc.c_cc[VTIME] = 0; | ||
666 | tcsetattr(0, TCSANOW, &tc); | ||
667 | |||
668 | poll(&stdin_poll, 1, -1); | ||
669 | c = getc(stdin); | ||
670 | |||
671 | tcsetattr(0, TCSAFLUSH, &save); | ||
672 | if (!key_mapped(c)) | ||
673 | return; | ||
674 | } | ||
675 | |||
676 | switch (c) { | ||
677 | case 'd': | ||
678 | prompt_integer(&delay_secs, "Enter display delay"); | ||
679 | break; | ||
680 | case 'e': | ||
681 | prompt_integer(&print_entries, "Enter display entries (lines)"); | ||
682 | break; | ||
683 | case 'E': | ||
684 | if (nr_counters > 1) { | ||
685 | int i; | ||
686 | |||
687 | fprintf(stderr, "\nAvailable events:"); | ||
688 | for (i = 0; i < nr_counters; i++) | ||
689 | fprintf(stderr, "\n\t%d %s", i, event_name(i)); | ||
690 | |||
691 | prompt_integer(&sym_counter, "Enter details event counter"); | ||
692 | |||
693 | if (sym_counter >= nr_counters) { | ||
694 | fprintf(stderr, "Sorry, no such event, using %s.\n", event_name(0)); | ||
695 | sym_counter = 0; | ||
696 | sleep(1); | ||
697 | } | ||
698 | } else sym_counter = 0; | ||
699 | break; | ||
700 | case 'f': | ||
701 | prompt_integer(&count_filter, "Enter display event count filter"); | ||
702 | break; | ||
703 | case 'F': | ||
704 | prompt_percent(&sym_pcnt_filter, "Enter details display event filter (percent)"); | ||
705 | break; | ||
706 | case 'q': | ||
707 | case 'Q': | ||
708 | printf("exiting.\n"); | ||
709 | exit(0); | ||
710 | case 's': | ||
711 | prompt_symbol(&sym_filter_entry, "Enter details symbol"); | ||
712 | break; | ||
713 | case 'S': | ||
714 | if (!sym_filter_entry) | ||
715 | break; | ||
716 | else { | ||
717 | struct sym_entry *syme = sym_filter_entry; | ||
718 | |||
719 | pthread_mutex_lock(&syme->source_lock); | ||
720 | sym_filter_entry = NULL; | ||
721 | __zero_source_counters(syme); | ||
722 | pthread_mutex_unlock(&syme->source_lock); | ||
723 | } | ||
724 | break; | ||
725 | case 'w': | ||
726 | display_weighted = ~display_weighted; | ||
727 | break; | ||
728 | case 'z': | ||
729 | zero = ~zero; | ||
730 | break; | ||
731 | } | ||
732 | } | ||
733 | |||
264 | static void *display_thread(void *arg __used) | 734 | static void *display_thread(void *arg __used) |
265 | { | 735 | { |
266 | struct pollfd stdin_poll = { .fd = 0, .events = POLLIN }; | 736 | struct pollfd stdin_poll = { .fd = 0, .events = POLLIN }; |
267 | int delay_msecs = delay_secs * 1000; | 737 | struct termios tc, save; |
738 | int delay_msecs, c; | ||
739 | |||
740 | tcgetattr(0, &save); | ||
741 | tc = save; | ||
742 | tc.c_lflag &= ~(ICANON | ECHO); | ||
743 | tc.c_cc[VMIN] = 0; | ||
744 | tc.c_cc[VTIME] = 0; | ||
268 | 745 | ||
269 | printf("PerfTop refresh period: %d seconds\n", delay_secs); | 746 | repeat: |
747 | delay_msecs = delay_secs * 1000; | ||
748 | tcsetattr(0, TCSANOW, &tc); | ||
749 | /* trash return*/ | ||
750 | getc(stdin); | ||
270 | 751 | ||
271 | do { | 752 | do { |
272 | print_sym_table(); | 753 | print_sym_table(); |
273 | } while (!poll(&stdin_poll, 1, delay_msecs) == 1); | 754 | } while (!poll(&stdin_poll, 1, delay_msecs) == 1); |
274 | 755 | ||
275 | printf("key pressed - exiting.\n"); | 756 | c = getc(stdin); |
276 | exit(0); | 757 | tcsetattr(0, TCSAFLUSH, &save); |
758 | |||
759 | handle_keypress(c); | ||
760 | goto repeat; | ||
277 | 761 | ||
278 | return NULL; | 762 | return NULL; |
279 | } | 763 | } |
@@ -285,6 +769,7 @@ static const char *skip_symbols[] = { | |||
285 | "enter_idle", | 769 | "enter_idle", |
286 | "exit_idle", | 770 | "exit_idle", |
287 | "mwait_idle", | 771 | "mwait_idle", |
772 | "mwait_idle_with_hints", | ||
288 | "ppc64_runlatch_off", | 773 | "ppc64_runlatch_off", |
289 | "pseries_dedicated_idle_sleep", | 774 | "pseries_dedicated_idle_sleep", |
290 | NULL | 775 | NULL |
@@ -292,7 +777,6 @@ static const char *skip_symbols[] = { | |||
292 | 777 | ||
293 | static int symbol_filter(struct dso *self, struct symbol *sym) | 778 | static int symbol_filter(struct dso *self, struct symbol *sym) |
294 | { | 779 | { |
295 | static int filter_match; | ||
296 | struct sym_entry *syme; | 780 | struct sym_entry *syme; |
297 | const char *name = sym->name; | 781 | const char *name = sym->name; |
298 | int i; | 782 | int i; |
@@ -314,6 +798,10 @@ static int symbol_filter(struct dso *self, struct symbol *sym) | |||
314 | return 1; | 798 | return 1; |
315 | 799 | ||
316 | syme = dso__sym_priv(self, sym); | 800 | syme = dso__sym_priv(self, sym); |
801 | pthread_mutex_init(&syme->source_lock, NULL); | ||
802 | if (!sym_filter_entry && sym_filter && !strcmp(name, sym_filter)) | ||
803 | sym_filter_entry = syme; | ||
804 | |||
317 | for (i = 0; skip_symbols[i]; i++) { | 805 | for (i = 0; skip_symbols[i]; i++) { |
318 | if (!strcmp(skip_symbols[i], name)) { | 806 | if (!strcmp(skip_symbols[i], name)) { |
319 | syme->skip = 1; | 807 | syme->skip = 1; |
@@ -321,29 +809,6 @@ static int symbol_filter(struct dso *self, struct symbol *sym) | |||
321 | } | 809 | } |
322 | } | 810 | } |
323 | 811 | ||
324 | if (filter_match == 1) { | ||
325 | filter_end = sym->start; | ||
326 | filter_match = -1; | ||
327 | if (filter_end - filter_start > 10000) { | ||
328 | fprintf(stderr, | ||
329 | "hm, too large filter symbol <%s> - skipping.\n", | ||
330 | sym_filter); | ||
331 | fprintf(stderr, "symbol filter start: %016lx\n", | ||
332 | filter_start); | ||
333 | fprintf(stderr, " end: %016lx\n", | ||
334 | filter_end); | ||
335 | filter_end = filter_start = 0; | ||
336 | sym_filter = NULL; | ||
337 | sleep(1); | ||
338 | } | ||
339 | } | ||
340 | |||
341 | if (filter_match == 0 && sym_filter && !strcmp(name, sym_filter)) { | ||
342 | filter_match = 1; | ||
343 | filter_start = sym->start; | ||
344 | } | ||
345 | |||
346 | |||
347 | return 0; | 812 | return 0; |
348 | } | 813 | } |
349 | 814 | ||
@@ -379,8 +844,6 @@ out_delete_dso: | |||
379 | return -1; | 844 | return -1; |
380 | } | 845 | } |
381 | 846 | ||
382 | #define TRACE_COUNT 3 | ||
383 | |||
384 | /* | 847 | /* |
385 | * Binary search in the histogram table and record the hit: | 848 | * Binary search in the histogram table and record the hit: |
386 | */ | 849 | */ |
@@ -393,6 +856,7 @@ static void record_ip(u64 ip, int counter) | |||
393 | 856 | ||
394 | if (!syme->skip) { | 857 | if (!syme->skip) { |
395 | syme->count[counter]++; | 858 | syme->count[counter]++; |
859 | record_precise_ip(syme, counter, ip); | ||
396 | pthread_mutex_lock(&active_symbols_lock); | 860 | pthread_mutex_lock(&active_symbols_lock); |
397 | if (list_empty(&syme->node) || !syme->node.next) | 861 | if (list_empty(&syme->node) || !syme->node.next) |
398 | __list_insert_active_sym(syme); | 862 | __list_insert_active_sym(syme); |
@@ -689,8 +1153,8 @@ static const struct option options[] = { | |||
689 | "put the counters into a counter group"), | 1153 | "put the counters into a counter group"), |
690 | OPT_BOOLEAN('i', "inherit", &inherit, | 1154 | OPT_BOOLEAN('i', "inherit", &inherit, |
691 | "child tasks inherit counters"), | 1155 | "child tasks inherit counters"), |
692 | OPT_STRING('s', "sym-filter", &sym_filter, "pattern", | 1156 | OPT_STRING('s', "sym-annotate", &sym_filter, "symbol name", |
693 | "only display symbols matchig this pattern"), | 1157 | "symbol to annotate - requires -k option"), |
694 | OPT_BOOLEAN('z', "zero", &zero, | 1158 | OPT_BOOLEAN('z', "zero", &zero, |
695 | "zero history across updates"), | 1159 | "zero history across updates"), |
696 | OPT_INTEGER('F', "freq", &freq, | 1160 | OPT_INTEGER('F', "freq", &freq, |
@@ -733,6 +1197,7 @@ int cmd_top(int argc, const char **argv, const char *prefix __used) | |||
733 | delay_secs = 1; | 1197 | delay_secs = 1; |
734 | 1198 | ||
735 | parse_symbols(); | 1199 | parse_symbols(); |
1200 | parse_source(sym_filter_entry); | ||
736 | 1201 | ||
737 | /* | 1202 | /* |
738 | * Fill in the ones not specifically initialized via -c: | 1203 | * Fill in the ones not specifically initialized via -c: |
diff --git a/tools/perf/util/callchain.c b/tools/perf/util/callchain.c index 9d3c8141b8c1..011473411642 100644 --- a/tools/perf/util/callchain.c +++ b/tools/perf/util/callchain.c | |||
@@ -13,6 +13,7 @@ | |||
13 | #include <stdio.h> | 13 | #include <stdio.h> |
14 | #include <stdbool.h> | 14 | #include <stdbool.h> |
15 | #include <errno.h> | 15 | #include <errno.h> |
16 | #include <math.h> | ||
16 | 17 | ||
17 | #include "callchain.h" | 18 | #include "callchain.h" |
18 | 19 | ||
@@ -26,10 +27,14 @@ rb_insert_callchain(struct rb_root *root, struct callchain_node *chain, | |||
26 | struct rb_node **p = &root->rb_node; | 27 | struct rb_node **p = &root->rb_node; |
27 | struct rb_node *parent = NULL; | 28 | struct rb_node *parent = NULL; |
28 | struct callchain_node *rnode; | 29 | struct callchain_node *rnode; |
30 | u64 chain_cumul = cumul_hits(chain); | ||
29 | 31 | ||
30 | while (*p) { | 32 | while (*p) { |
33 | u64 rnode_cumul; | ||
34 | |||
31 | parent = *p; | 35 | parent = *p; |
32 | rnode = rb_entry(parent, struct callchain_node, rb_node); | 36 | rnode = rb_entry(parent, struct callchain_node, rb_node); |
37 | rnode_cumul = cumul_hits(rnode); | ||
33 | 38 | ||
34 | switch (mode) { | 39 | switch (mode) { |
35 | case CHAIN_FLAT: | 40 | case CHAIN_FLAT: |
@@ -40,7 +45,7 @@ rb_insert_callchain(struct rb_root *root, struct callchain_node *chain, | |||
40 | break; | 45 | break; |
41 | case CHAIN_GRAPH_ABS: /* Falldown */ | 46 | case CHAIN_GRAPH_ABS: /* Falldown */ |
42 | case CHAIN_GRAPH_REL: | 47 | case CHAIN_GRAPH_REL: |
43 | if (rnode->cumul_hit < chain->cumul_hit) | 48 | if (rnode_cumul < chain_cumul) |
44 | p = &(*p)->rb_left; | 49 | p = &(*p)->rb_left; |
45 | else | 50 | else |
46 | p = &(*p)->rb_right; | 51 | p = &(*p)->rb_right; |
@@ -87,7 +92,7 @@ static void __sort_chain_graph_abs(struct callchain_node *node, | |||
87 | 92 | ||
88 | chain_for_each_child(child, node) { | 93 | chain_for_each_child(child, node) { |
89 | __sort_chain_graph_abs(child, min_hit); | 94 | __sort_chain_graph_abs(child, min_hit); |
90 | if (child->cumul_hit >= min_hit) | 95 | if (cumul_hits(child) >= min_hit) |
91 | rb_insert_callchain(&node->rb_root, child, | 96 | rb_insert_callchain(&node->rb_root, child, |
92 | CHAIN_GRAPH_ABS); | 97 | CHAIN_GRAPH_ABS); |
93 | } | 98 | } |
@@ -108,11 +113,11 @@ static void __sort_chain_graph_rel(struct callchain_node *node, | |||
108 | u64 min_hit; | 113 | u64 min_hit; |
109 | 114 | ||
110 | node->rb_root = RB_ROOT; | 115 | node->rb_root = RB_ROOT; |
111 | min_hit = node->cumul_hit * min_percent / 100.0; | 116 | min_hit = ceil(node->children_hit * min_percent); |
112 | 117 | ||
113 | chain_for_each_child(child, node) { | 118 | chain_for_each_child(child, node) { |
114 | __sort_chain_graph_rel(child, min_percent); | 119 | __sort_chain_graph_rel(child, min_percent); |
115 | if (child->cumul_hit >= min_hit) | 120 | if (cumul_hits(child) >= min_hit) |
116 | rb_insert_callchain(&node->rb_root, child, | 121 | rb_insert_callchain(&node->rb_root, child, |
117 | CHAIN_GRAPH_REL); | 122 | CHAIN_GRAPH_REL); |
118 | } | 123 | } |
@@ -122,7 +127,7 @@ static void | |||
122 | sort_chain_graph_rel(struct rb_root *rb_root, struct callchain_node *chain_root, | 127 | sort_chain_graph_rel(struct rb_root *rb_root, struct callchain_node *chain_root, |
123 | u64 min_hit __used, struct callchain_param *param) | 128 | u64 min_hit __used, struct callchain_param *param) |
124 | { | 129 | { |
125 | __sort_chain_graph_rel(chain_root, param->min_percent); | 130 | __sort_chain_graph_rel(chain_root, param->min_percent / 100.0); |
126 | rb_root->rb_node = chain_root->rb_root.rb_node; | 131 | rb_root->rb_node = chain_root->rb_root.rb_node; |
127 | } | 132 | } |
128 | 133 | ||
@@ -211,7 +216,8 @@ add_child(struct callchain_node *parent, struct ip_callchain *chain, | |||
211 | new = create_child(parent, false); | 216 | new = create_child(parent, false); |
212 | fill_node(new, chain, start, syms); | 217 | fill_node(new, chain, start, syms); |
213 | 218 | ||
214 | new->cumul_hit = new->hit = 1; | 219 | new->children_hit = 0; |
220 | new->hit = 1; | ||
215 | } | 221 | } |
216 | 222 | ||
217 | /* | 223 | /* |
@@ -241,7 +247,8 @@ split_add_child(struct callchain_node *parent, struct ip_callchain *chain, | |||
241 | 247 | ||
242 | /* split the hits */ | 248 | /* split the hits */ |
243 | new->hit = parent->hit; | 249 | new->hit = parent->hit; |
244 | new->cumul_hit = parent->cumul_hit; | 250 | new->children_hit = parent->children_hit; |
251 | parent->children_hit = cumul_hits(new); | ||
245 | new->val_nr = parent->val_nr - idx_local; | 252 | new->val_nr = parent->val_nr - idx_local; |
246 | parent->val_nr = idx_local; | 253 | parent->val_nr = idx_local; |
247 | 254 | ||
@@ -249,6 +256,7 @@ split_add_child(struct callchain_node *parent, struct ip_callchain *chain, | |||
249 | if (idx_total < chain->nr) { | 256 | if (idx_total < chain->nr) { |
250 | parent->hit = 0; | 257 | parent->hit = 0; |
251 | add_child(parent, chain, idx_total, syms); | 258 | add_child(parent, chain, idx_total, syms); |
259 | parent->children_hit++; | ||
252 | } else { | 260 | } else { |
253 | parent->hit = 1; | 261 | parent->hit = 1; |
254 | } | 262 | } |
@@ -269,13 +277,13 @@ __append_chain_children(struct callchain_node *root, struct ip_callchain *chain, | |||
269 | unsigned int ret = __append_chain(rnode, chain, start, syms); | 277 | unsigned int ret = __append_chain(rnode, chain, start, syms); |
270 | 278 | ||
271 | if (!ret) | 279 | if (!ret) |
272 | goto cumul; | 280 | goto inc_children_hit; |
273 | } | 281 | } |
274 | /* nothing in children, add to the current node */ | 282 | /* nothing in children, add to the current node */ |
275 | add_child(root, chain, start, syms); | 283 | add_child(root, chain, start, syms); |
276 | 284 | ||
277 | cumul: | 285 | inc_children_hit: |
278 | root->cumul_hit++; | 286 | root->children_hit++; |
279 | } | 287 | } |
280 | 288 | ||
281 | static int | 289 | static int |
@@ -317,8 +325,6 @@ __append_chain(struct callchain_node *root, struct ip_callchain *chain, | |||
317 | /* we match 100% of the path, increment the hit */ | 325 | /* we match 100% of the path, increment the hit */ |
318 | if (i - start == root->val_nr && i == chain->nr) { | 326 | if (i - start == root->val_nr && i == chain->nr) { |
319 | root->hit++; | 327 | root->hit++; |
320 | root->cumul_hit++; | ||
321 | |||
322 | return 0; | 328 | return 0; |
323 | } | 329 | } |
324 | 330 | ||
@@ -331,5 +337,7 @@ __append_chain(struct callchain_node *root, struct ip_callchain *chain, | |||
331 | void append_chain(struct callchain_node *root, struct ip_callchain *chain, | 337 | void append_chain(struct callchain_node *root, struct ip_callchain *chain, |
332 | struct symbol **syms) | 338 | struct symbol **syms) |
333 | { | 339 | { |
340 | if (!chain->nr) | ||
341 | return; | ||
334 | __append_chain_children(root, chain, syms, 0); | 342 | __append_chain_children(root, chain, syms, 0); |
335 | } | 343 | } |
diff --git a/tools/perf/util/callchain.h b/tools/perf/util/callchain.h index 7812122bea1d..a926ae4f5a16 100644 --- a/tools/perf/util/callchain.h +++ b/tools/perf/util/callchain.h | |||
@@ -7,6 +7,7 @@ | |||
7 | #include "symbol.h" | 7 | #include "symbol.h" |
8 | 8 | ||
9 | enum chain_mode { | 9 | enum chain_mode { |
10 | CHAIN_NONE, | ||
10 | CHAIN_FLAT, | 11 | CHAIN_FLAT, |
11 | CHAIN_GRAPH_ABS, | 12 | CHAIN_GRAPH_ABS, |
12 | CHAIN_GRAPH_REL | 13 | CHAIN_GRAPH_REL |
@@ -21,7 +22,7 @@ struct callchain_node { | |||
21 | struct rb_root rb_root; /* sorted tree of children */ | 22 | struct rb_root rb_root; /* sorted tree of children */ |
22 | unsigned int val_nr; | 23 | unsigned int val_nr; |
23 | u64 hit; | 24 | u64 hit; |
24 | u64 cumul_hit; /* hit + hits of children */ | 25 | u64 children_hit; |
25 | }; | 26 | }; |
26 | 27 | ||
27 | struct callchain_param; | 28 | struct callchain_param; |
@@ -48,6 +49,11 @@ static inline void callchain_init(struct callchain_node *node) | |||
48 | INIT_LIST_HEAD(&node->val); | 49 | INIT_LIST_HEAD(&node->val); |
49 | } | 50 | } |
50 | 51 | ||
52 | static inline u64 cumul_hits(struct callchain_node *node) | ||
53 | { | ||
54 | return node->hit + node->children_hit; | ||
55 | } | ||
56 | |||
51 | int register_callchain_param(struct callchain_param *param); | 57 | int register_callchain_param(struct callchain_param *param); |
52 | void append_chain(struct callchain_node *root, struct ip_callchain *chain, | 58 | void append_chain(struct callchain_node *root, struct ip_callchain *chain, |
53 | struct symbol **syms); | 59 | struct symbol **syms); |
diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c index 450384b3bbe5..b92a457ca32e 100644 --- a/tools/perf/util/header.c +++ b/tools/perf/util/header.c | |||
@@ -185,6 +185,8 @@ static void do_read(int fd, void *buf, size_t size) | |||
185 | 185 | ||
186 | if (ret < 0) | 186 | if (ret < 0) |
187 | die("failed to read"); | 187 | die("failed to read"); |
188 | if (ret == 0) | ||
189 | die("failed to read: missing data"); | ||
188 | 190 | ||
189 | size -= ret; | 191 | size -= ret; |
190 | buf += ret; | 192 | buf += ret; |
@@ -213,9 +215,10 @@ struct perf_header *perf_header__read(int fd) | |||
213 | 215 | ||
214 | for (i = 0; i < nr_attrs; i++) { | 216 | for (i = 0; i < nr_attrs; i++) { |
215 | struct perf_header_attr *attr; | 217 | struct perf_header_attr *attr; |
216 | off_t tmp = lseek(fd, 0, SEEK_CUR); | 218 | off_t tmp; |
217 | 219 | ||
218 | do_read(fd, &f_attr, sizeof(f_attr)); | 220 | do_read(fd, &f_attr, sizeof(f_attr)); |
221 | tmp = lseek(fd, 0, SEEK_CUR); | ||
219 | 222 | ||
220 | attr = perf_header_attr__new(&f_attr.attr); | 223 | attr = perf_header_attr__new(&f_attr.attr); |
221 | 224 | ||
diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c index 7bdad8df22a6..044178408783 100644 --- a/tools/perf/util/parse-events.c +++ b/tools/perf/util/parse-events.c | |||
@@ -121,13 +121,29 @@ static unsigned long hw_cache_stat[C(MAX)] = { | |||
121 | (strcmp(sys_dirent.d_name, ".")) && \ | 121 | (strcmp(sys_dirent.d_name, ".")) && \ |
122 | (strcmp(sys_dirent.d_name, ".."))) | 122 | (strcmp(sys_dirent.d_name, ".."))) |
123 | 123 | ||
124 | static int tp_event_has_id(struct dirent *sys_dir, struct dirent *evt_dir) | ||
125 | { | ||
126 | char evt_path[MAXPATHLEN]; | ||
127 | int fd; | ||
128 | |||
129 | snprintf(evt_path, MAXPATHLEN, "%s/%s/%s/id", debugfs_path, | ||
130 | sys_dir->d_name, evt_dir->d_name); | ||
131 | fd = open(evt_path, O_RDONLY); | ||
132 | if (fd < 0) | ||
133 | return -EINVAL; | ||
134 | close(fd); | ||
135 | |||
136 | return 0; | ||
137 | } | ||
138 | |||
124 | #define for_each_event(sys_dirent, evt_dir, evt_dirent, evt_next, file, st) \ | 139 | #define for_each_event(sys_dirent, evt_dir, evt_dirent, evt_next, file, st) \ |
125 | while (!readdir_r(evt_dir, &evt_dirent, &evt_next) && evt_next) \ | 140 | while (!readdir_r(evt_dir, &evt_dirent, &evt_next) && evt_next) \ |
126 | if (snprintf(file, MAXPATHLEN, "%s/%s/%s", debugfs_path, \ | 141 | if (snprintf(file, MAXPATHLEN, "%s/%s/%s", debugfs_path, \ |
127 | sys_dirent.d_name, evt_dirent.d_name) && \ | 142 | sys_dirent.d_name, evt_dirent.d_name) && \ |
128 | (!stat(file, &st)) && (S_ISDIR(st.st_mode)) && \ | 143 | (!stat(file, &st)) && (S_ISDIR(st.st_mode)) && \ |
129 | (strcmp(evt_dirent.d_name, ".")) && \ | 144 | (strcmp(evt_dirent.d_name, ".")) && \ |
130 | (strcmp(evt_dirent.d_name, ".."))) | 145 | (strcmp(evt_dirent.d_name, "..")) && \ |
146 | (!tp_event_has_id(&sys_dirent, &evt_dirent))) | ||
131 | 147 | ||
132 | #define MAX_EVENT_LENGTH 30 | 148 | #define MAX_EVENT_LENGTH 30 |
133 | 149 | ||
@@ -223,9 +239,15 @@ char *event_name(int counter) | |||
223 | { | 239 | { |
224 | u64 config = attrs[counter].config; | 240 | u64 config = attrs[counter].config; |
225 | int type = attrs[counter].type; | 241 | int type = attrs[counter].type; |
242 | |||
243 | return __event_name(type, config); | ||
244 | } | ||
245 | |||
246 | char *__event_name(int type, u64 config) | ||
247 | { | ||
226 | static char buf[32]; | 248 | static char buf[32]; |
227 | 249 | ||
228 | if (attrs[counter].type == PERF_TYPE_RAW) { | 250 | if (type == PERF_TYPE_RAW) { |
229 | sprintf(buf, "raw 0x%llx", config); | 251 | sprintf(buf, "raw 0x%llx", config); |
230 | return buf; | 252 | return buf; |
231 | } | 253 | } |
@@ -357,6 +379,7 @@ static int parse_tracepoint_event(const char **strp, | |||
357 | struct perf_counter_attr *attr) | 379 | struct perf_counter_attr *attr) |
358 | { | 380 | { |
359 | const char *evt_name; | 381 | const char *evt_name; |
382 | char *flags; | ||
360 | char sys_name[MAX_EVENT_LENGTH]; | 383 | char sys_name[MAX_EVENT_LENGTH]; |
361 | char id_buf[4]; | 384 | char id_buf[4]; |
362 | int fd; | 385 | int fd; |
@@ -378,6 +401,15 @@ static int parse_tracepoint_event(const char **strp, | |||
378 | strncpy(sys_name, *strp, sys_length); | 401 | strncpy(sys_name, *strp, sys_length); |
379 | sys_name[sys_length] = '\0'; | 402 | sys_name[sys_length] = '\0'; |
380 | evt_name = evt_name + 1; | 403 | evt_name = evt_name + 1; |
404 | |||
405 | flags = strchr(evt_name, ':'); | ||
406 | if (flags) { | ||
407 | *flags = '\0'; | ||
408 | flags++; | ||
409 | if (!strncmp(flags, "record", strlen(flags))) | ||
410 | attr->sample_type |= PERF_SAMPLE_RAW; | ||
411 | } | ||
412 | |||
381 | evt_length = strlen(evt_name); | 413 | evt_length = strlen(evt_name); |
382 | if (evt_length >= MAX_EVENT_LENGTH) | 414 | if (evt_length >= MAX_EVENT_LENGTH) |
383 | return 0; | 415 | return 0; |
diff --git a/tools/perf/util/parse-events.h b/tools/perf/util/parse-events.h index 1ea5d09b6eb1..192a962e3a0f 100644 --- a/tools/perf/util/parse-events.h +++ b/tools/perf/util/parse-events.h | |||
@@ -10,6 +10,7 @@ extern int nr_counters; | |||
10 | extern struct perf_counter_attr attrs[MAX_COUNTERS]; | 10 | extern struct perf_counter_attr attrs[MAX_COUNTERS]; |
11 | 11 | ||
12 | extern char *event_name(int ctr); | 12 | extern char *event_name(int ctr); |
13 | extern char *__event_name(int type, u64 config); | ||
13 | 14 | ||
14 | extern int parse_events(const struct option *opt, const char *str, int unset); | 15 | extern int parse_events(const struct option *opt, const char *str, int unset); |
15 | 16 | ||
diff --git a/tools/perf/util/quote.c b/tools/perf/util/quote.c index c6e5dc0dc82f..2726fe40eb5d 100644 --- a/tools/perf/util/quote.c +++ b/tools/perf/util/quote.c | |||
@@ -318,7 +318,7 @@ char *quote_path_relative(const char *in, int len, | |||
318 | strbuf_addch(out, '"'); | 318 | strbuf_addch(out, '"'); |
319 | if (prefix) { | 319 | if (prefix) { |
320 | int off = 0; | 320 | int off = 0; |
321 | while (prefix[off] && off < len && prefix[off] == in[off]) | 321 | while (off < len && prefix[off] && prefix[off] == in[off]) |
322 | if (prefix[off] == '/') { | 322 | if (prefix[off] == '/') { |
323 | prefix += off + 1; | 323 | prefix += off + 1; |
324 | in += off + 1; | 324 | in += off + 1; |
diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c index 28106059bf12..5c0f42e6b33b 100644 --- a/tools/perf/util/symbol.c +++ b/tools/perf/util/symbol.c | |||
@@ -6,14 +6,18 @@ | |||
6 | #include <libelf.h> | 6 | #include <libelf.h> |
7 | #include <gelf.h> | 7 | #include <gelf.h> |
8 | #include <elf.h> | 8 | #include <elf.h> |
9 | #include <bfd.h> | ||
10 | 9 | ||
11 | const char *sym_hist_filter; | 10 | const char *sym_hist_filter; |
12 | 11 | ||
13 | #ifndef DMGL_PARAMS | 12 | enum dso_origin { |
14 | #define DMGL_PARAMS (1 << 0) /* Include function args */ | 13 | DSO__ORIG_KERNEL = 0, |
15 | #define DMGL_ANSI (1 << 1) /* Include const, volatile, etc */ | 14 | DSO__ORIG_JAVA_JIT, |
16 | #endif | 15 | DSO__ORIG_FEDORA, |
16 | DSO__ORIG_UBUNTU, | ||
17 | DSO__ORIG_BUILDID, | ||
18 | DSO__ORIG_DSO, | ||
19 | DSO__ORIG_NOT_FOUND, | ||
20 | }; | ||
17 | 21 | ||
18 | static struct symbol *symbol__new(u64 start, u64 len, | 22 | static struct symbol *symbol__new(u64 start, u64 len, |
19 | const char *name, unsigned int priv_size, | 23 | const char *name, unsigned int priv_size, |
@@ -72,6 +76,7 @@ struct dso *dso__new(const char *name, unsigned int sym_priv_size) | |||
72 | self->sym_priv_size = sym_priv_size; | 76 | self->sym_priv_size = sym_priv_size; |
73 | self->find_symbol = dso__find_symbol; | 77 | self->find_symbol = dso__find_symbol; |
74 | self->slen_calculated = 0; | 78 | self->slen_calculated = 0; |
79 | self->origin = DSO__ORIG_NOT_FOUND; | ||
75 | } | 80 | } |
76 | 81 | ||
77 | return self; | 82 | return self; |
@@ -565,7 +570,7 @@ static int dso__load_sym(struct dso *self, int fd, const char *name, | |||
565 | goto out_elf_end; | 570 | goto out_elf_end; |
566 | 571 | ||
567 | secstrs = elf_getdata(sec_strndx, NULL); | 572 | secstrs = elf_getdata(sec_strndx, NULL); |
568 | if (symstrs == NULL) | 573 | if (secstrs == NULL) |
569 | goto out_elf_end; | 574 | goto out_elf_end; |
570 | 575 | ||
571 | nr_syms = shdr.sh_size / shdr.sh_entsize; | 576 | nr_syms = shdr.sh_size / shdr.sh_entsize; |
@@ -652,11 +657,85 @@ out_close: | |||
652 | return err; | 657 | return err; |
653 | } | 658 | } |
654 | 659 | ||
660 | #define BUILD_ID_SIZE 128 | ||
661 | |||
662 | static char *dso__read_build_id(struct dso *self, int verbose) | ||
663 | { | ||
664 | int i; | ||
665 | GElf_Ehdr ehdr; | ||
666 | GElf_Shdr shdr; | ||
667 | Elf_Data *build_id_data; | ||
668 | Elf_Scn *sec; | ||
669 | char *build_id = NULL, *bid; | ||
670 | unsigned char *raw; | ||
671 | Elf *elf; | ||
672 | int fd = open(self->name, O_RDONLY); | ||
673 | |||
674 | if (fd < 0) | ||
675 | goto out; | ||
676 | |||
677 | elf = elf_begin(fd, ELF_C_READ_MMAP, NULL); | ||
678 | if (elf == NULL) { | ||
679 | if (verbose) | ||
680 | fprintf(stderr, "%s: cannot read %s ELF file.\n", | ||
681 | __func__, self->name); | ||
682 | goto out_close; | ||
683 | } | ||
684 | |||
685 | if (gelf_getehdr(elf, &ehdr) == NULL) { | ||
686 | if (verbose) | ||
687 | fprintf(stderr, "%s: cannot get elf header.\n", __func__); | ||
688 | goto out_elf_end; | ||
689 | } | ||
690 | |||
691 | sec = elf_section_by_name(elf, &ehdr, &shdr, ".note.gnu.build-id", NULL); | ||
692 | if (sec == NULL) | ||
693 | goto out_elf_end; | ||
694 | |||
695 | build_id_data = elf_getdata(sec, NULL); | ||
696 | if (build_id_data == NULL) | ||
697 | goto out_elf_end; | ||
698 | build_id = malloc(BUILD_ID_SIZE); | ||
699 | if (build_id == NULL) | ||
700 | goto out_elf_end; | ||
701 | raw = build_id_data->d_buf + 16; | ||
702 | bid = build_id; | ||
703 | |||
704 | for (i = 0; i < 20; ++i) { | ||
705 | sprintf(bid, "%02x", *raw); | ||
706 | ++raw; | ||
707 | bid += 2; | ||
708 | } | ||
709 | if (verbose >= 2) | ||
710 | printf("%s(%s): %s\n", __func__, self->name, build_id); | ||
711 | out_elf_end: | ||
712 | elf_end(elf); | ||
713 | out_close: | ||
714 | close(fd); | ||
715 | out: | ||
716 | return build_id; | ||
717 | } | ||
718 | |||
719 | char dso__symtab_origin(const struct dso *self) | ||
720 | { | ||
721 | static const char origin[] = { | ||
722 | [DSO__ORIG_KERNEL] = 'k', | ||
723 | [DSO__ORIG_JAVA_JIT] = 'j', | ||
724 | [DSO__ORIG_FEDORA] = 'f', | ||
725 | [DSO__ORIG_UBUNTU] = 'u', | ||
726 | [DSO__ORIG_BUILDID] = 'b', | ||
727 | [DSO__ORIG_DSO] = 'd', | ||
728 | }; | ||
729 | |||
730 | if (self == NULL || self->origin == DSO__ORIG_NOT_FOUND) | ||
731 | return '!'; | ||
732 | return origin[self->origin]; | ||
733 | } | ||
734 | |||
655 | int dso__load(struct dso *self, symbol_filter_t filter, int verbose) | 735 | int dso__load(struct dso *self, symbol_filter_t filter, int verbose) |
656 | { | 736 | { |
657 | int size = strlen(self->name) + sizeof("/usr/lib/debug%s.debug"); | 737 | int size = PATH_MAX; |
658 | char *name = malloc(size); | 738 | char *name = malloc(size), *build_id = NULL; |
659 | int variant = 0; | ||
660 | int ret = -1; | 739 | int ret = -1; |
661 | int fd; | 740 | int fd; |
662 | 741 | ||
@@ -665,26 +744,43 @@ int dso__load(struct dso *self, symbol_filter_t filter, int verbose) | |||
665 | 744 | ||
666 | self->adjust_symbols = 0; | 745 | self->adjust_symbols = 0; |
667 | 746 | ||
668 | if (strncmp(self->name, "/tmp/perf-", 10) == 0) | 747 | if (strncmp(self->name, "/tmp/perf-", 10) == 0) { |
669 | return dso__load_perf_map(self, filter, verbose); | 748 | ret = dso__load_perf_map(self, filter, verbose); |
749 | self->origin = ret > 0 ? DSO__ORIG_JAVA_JIT : | ||
750 | DSO__ORIG_NOT_FOUND; | ||
751 | return ret; | ||
752 | } | ||
753 | |||
754 | self->origin = DSO__ORIG_FEDORA - 1; | ||
670 | 755 | ||
671 | more: | 756 | more: |
672 | do { | 757 | do { |
673 | switch (variant) { | 758 | self->origin++; |
674 | case 0: /* Fedora */ | 759 | switch (self->origin) { |
760 | case DSO__ORIG_FEDORA: | ||
675 | snprintf(name, size, "/usr/lib/debug%s.debug", self->name); | 761 | snprintf(name, size, "/usr/lib/debug%s.debug", self->name); |
676 | break; | 762 | break; |
677 | case 1: /* Ubuntu */ | 763 | case DSO__ORIG_UBUNTU: |
678 | snprintf(name, size, "/usr/lib/debug%s", self->name); | 764 | snprintf(name, size, "/usr/lib/debug%s", self->name); |
679 | break; | 765 | break; |
680 | case 2: /* Sane people */ | 766 | case DSO__ORIG_BUILDID: |
767 | build_id = dso__read_build_id(self, verbose); | ||
768 | if (build_id != NULL) { | ||
769 | snprintf(name, size, | ||
770 | "/usr/lib/debug/.build-id/%.2s/%s.debug", | ||
771 | build_id, build_id + 2); | ||
772 | free(build_id); | ||
773 | break; | ||
774 | } | ||
775 | self->origin++; | ||
776 | /* Fall thru */ | ||
777 | case DSO__ORIG_DSO: | ||
681 | snprintf(name, size, "%s", self->name); | 778 | snprintf(name, size, "%s", self->name); |
682 | break; | 779 | break; |
683 | 780 | ||
684 | default: | 781 | default: |
685 | goto out; | 782 | goto out; |
686 | } | 783 | } |
687 | variant++; | ||
688 | 784 | ||
689 | fd = open(name, O_RDONLY); | 785 | fd = open(name, O_RDONLY); |
690 | } while (fd < 0); | 786 | } while (fd < 0); |
@@ -705,6 +801,8 @@ more: | |||
705 | } | 801 | } |
706 | out: | 802 | out: |
707 | free(name); | 803 | free(name); |
804 | if (ret < 0 && strstr(self->name, " (deleted)") != NULL) | ||
805 | return 0; | ||
708 | return ret; | 806 | return ret; |
709 | } | 807 | } |
710 | 808 | ||
@@ -820,6 +918,9 @@ int dso__load_kernel(struct dso *self, const char *vmlinux, | |||
820 | if (err <= 0) | 918 | if (err <= 0) |
821 | err = dso__load_kallsyms(self, filter, verbose); | 919 | err = dso__load_kallsyms(self, filter, verbose); |
822 | 920 | ||
921 | if (err > 0) | ||
922 | self->origin = DSO__ORIG_KERNEL; | ||
923 | |||
823 | return err; | 924 | return err; |
824 | } | 925 | } |
825 | 926 | ||
diff --git a/tools/perf/util/symbol.h b/tools/perf/util/symbol.h index 2f92b21c712d..b53bf0125c1b 100644 --- a/tools/perf/util/symbol.h +++ b/tools/perf/util/symbol.h | |||
@@ -7,6 +7,30 @@ | |||
7 | #include <linux/rbtree.h> | 7 | #include <linux/rbtree.h> |
8 | #include "module.h" | 8 | #include "module.h" |
9 | 9 | ||
10 | #ifdef HAVE_CPLUS_DEMANGLE | ||
11 | extern char *cplus_demangle(const char *, int); | ||
12 | |||
13 | static inline char *bfd_demangle(void __used *v, const char *c, int i) | ||
14 | { | ||
15 | return cplus_demangle(c, i); | ||
16 | } | ||
17 | #else | ||
18 | #ifdef NO_DEMANGLE | ||
19 | static inline char *bfd_demangle(void __used *v, const char __used *c, | ||
20 | int __used i) | ||
21 | { | ||
22 | return NULL; | ||
23 | } | ||
24 | #else | ||
25 | #include <bfd.h> | ||
26 | #endif | ||
27 | #endif | ||
28 | |||
29 | #ifndef DMGL_PARAMS | ||
30 | #define DMGL_PARAMS (1 << 0) /* Include function args */ | ||
31 | #define DMGL_ANSI (1 << 1) /* Include const, volatile, etc */ | ||
32 | #endif | ||
33 | |||
10 | struct symbol { | 34 | struct symbol { |
11 | struct rb_node rb_node; | 35 | struct rb_node rb_node; |
12 | u64 start; | 36 | u64 start; |
@@ -26,6 +50,7 @@ struct dso { | |||
26 | unsigned int sym_priv_size; | 50 | unsigned int sym_priv_size; |
27 | unsigned char adjust_symbols; | 51 | unsigned char adjust_symbols; |
28 | unsigned char slen_calculated; | 52 | unsigned char slen_calculated; |
53 | unsigned char origin; | ||
29 | char name[0]; | 54 | char name[0]; |
30 | }; | 55 | }; |
31 | 56 | ||
@@ -49,6 +74,7 @@ int dso__load_modules(struct dso *self, symbol_filter_t filter, int verbose); | |||
49 | int dso__load(struct dso *self, symbol_filter_t filter, int verbose); | 74 | int dso__load(struct dso *self, symbol_filter_t filter, int verbose); |
50 | 75 | ||
51 | size_t dso__fprintf(struct dso *self, FILE *fp); | 76 | size_t dso__fprintf(struct dso *self, FILE *fp); |
77 | char dso__symtab_origin(const struct dso *self); | ||
52 | 78 | ||
53 | void symbol__init(void); | 79 | void symbol__init(void); |
54 | #endif /* _PERF_SYMBOL_ */ | 80 | #endif /* _PERF_SYMBOL_ */ |
diff --git a/virt/kvm/ioapic.c b/virt/kvm/ioapic.c index 1eddae94bab3..1150c6d5c7b8 100644 --- a/virt/kvm/ioapic.c +++ b/virt/kvm/ioapic.c | |||
@@ -95,8 +95,6 @@ static int ioapic_service(struct kvm_ioapic *ioapic, unsigned int idx) | |||
95 | if (injected && pent->fields.trig_mode == IOAPIC_LEVEL_TRIG) | 95 | if (injected && pent->fields.trig_mode == IOAPIC_LEVEL_TRIG) |
96 | pent->fields.remote_irr = 1; | 96 | pent->fields.remote_irr = 1; |
97 | } | 97 | } |
98 | if (!pent->fields.trig_mode) | ||
99 | ioapic->irr &= ~(1 << idx); | ||
100 | 98 | ||
101 | return injected; | 99 | return injected; |
102 | } | 100 | } |
@@ -136,7 +134,8 @@ static void ioapic_write_indirect(struct kvm_ioapic *ioapic, u32 val) | |||
136 | mask_after = ioapic->redirtbl[index].fields.mask; | 134 | mask_after = ioapic->redirtbl[index].fields.mask; |
137 | if (mask_before != mask_after) | 135 | if (mask_before != mask_after) |
138 | kvm_fire_mask_notifiers(ioapic->kvm, index, mask_after); | 136 | kvm_fire_mask_notifiers(ioapic->kvm, index, mask_after); |
139 | if (ioapic->irr & (1 << index)) | 137 | if (ioapic->redirtbl[index].fields.trig_mode == IOAPIC_LEVEL_TRIG |
138 | && ioapic->irr & (1 << index)) | ||
140 | ioapic_service(ioapic, index); | 139 | ioapic_service(ioapic, index); |
141 | break; | 140 | break; |
142 | } | 141 | } |
@@ -184,9 +183,10 @@ int kvm_ioapic_set_irq(struct kvm_ioapic *ioapic, int irq, int level) | |||
184 | if (!level) | 183 | if (!level) |
185 | ioapic->irr &= ~mask; | 184 | ioapic->irr &= ~mask; |
186 | else { | 185 | else { |
186 | int edge = (entry.fields.trig_mode == IOAPIC_EDGE_TRIG); | ||
187 | ioapic->irr |= mask; | 187 | ioapic->irr |= mask; |
188 | if ((!entry.fields.trig_mode && old_irr != ioapic->irr) | 188 | if ((edge && old_irr != ioapic->irr) || |
189 | || !entry.fields.remote_irr) | 189 | (!edge && !entry.fields.remote_irr)) |
190 | ret = ioapic_service(ioapic, irq); | 190 | ret = ioapic_service(ioapic, irq); |
191 | } | 191 | } |
192 | } | 192 | } |
diff --git a/virt/kvm/irq_comm.c b/virt/kvm/irq_comm.c index a8bd466d00cc..ddc17f0e2f35 100644 --- a/virt/kvm/irq_comm.c +++ b/virt/kvm/irq_comm.c | |||
@@ -160,7 +160,8 @@ void kvm_notify_acked_irq(struct kvm *kvm, unsigned irqchip, unsigned pin) | |||
160 | unsigned gsi = pin; | 160 | unsigned gsi = pin; |
161 | 161 | ||
162 | list_for_each_entry(e, &kvm->irq_routing, link) | 162 | list_for_each_entry(e, &kvm->irq_routing, link) |
163 | if (e->irqchip.irqchip == irqchip && | 163 | if (e->type == KVM_IRQ_ROUTING_IRQCHIP && |
164 | e->irqchip.irqchip == irqchip && | ||
164 | e->irqchip.pin == pin) { | 165 | e->irqchip.pin == pin) { |
165 | gsi = e->gsi; | 166 | gsi = e->gsi; |
166 | break; | 167 | break; |
@@ -259,6 +260,7 @@ static int setup_routing_entry(struct kvm_kernel_irq_routing_entry *e, | |||
259 | int delta; | 260 | int delta; |
260 | 261 | ||
261 | e->gsi = ue->gsi; | 262 | e->gsi = ue->gsi; |
263 | e->type = ue->type; | ||
262 | switch (ue->type) { | 264 | switch (ue->type) { |
263 | case KVM_IRQ_ROUTING_IRQCHIP: | 265 | case KVM_IRQ_ROUTING_IRQCHIP: |
264 | delta = 0; | 266 | delta = 0; |