aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/watchdog/watchdog-parameters.txt5
-rw-r--r--MAINTAINERS9
-rw-r--r--Makefile72
-rw-r--r--arch/ia64/mm/tlb.c2
-rw-r--r--arch/x86/kernel/cpu/perf_event_amd.c4
-rw-r--r--arch/x86/kernel/traps.c11
-rw-r--r--drivers/acpi/apei/erst.c2
-rw-r--r--drivers/ata/ahci.c10
-rw-r--r--drivers/ata/ata_generic.c30
-rw-r--r--drivers/ata/libahci.c5
-rw-r--r--drivers/char/agp/generic.c6
-rw-r--r--drivers/cpuidle/governors/menu.c4
-rw-r--r--drivers/dma/ppc4xx/adma.c4
-rw-r--r--drivers/edac/amd64_edac.c24
-rw-r--r--drivers/edac/i7core_edac.c53
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c19
-rw-r--r--drivers/gpu/drm/i915/dvo_tfp410.c2
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c2
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c8
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h3
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c2
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c76
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h5
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c6
-rw-r--r--drivers/gpu/drm/i915/intel_display.c60
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c27
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h1
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c4
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c10
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c2
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c35
-rw-r--r--drivers/gpu/drm/radeon/evergreen_cs.c4
-rw-r--r--drivers/gpu/drm/radeon/evergreend.h3
-rw-r--r--drivers/gpu/drm/radeon/r100.c81
-rw-r--r--drivers/gpu/drm/radeon/r200.c5
-rw-r--r--drivers/gpu/drm/radeon/r300.c5
-rw-r--r--drivers/gpu/drm/radeon/r600.c17
-rw-r--r--drivers/gpu/drm/radeon/radeon.h4
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.c7
-rw-r--r--drivers/gpu/drm/radeon/radeon_bios.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_combios.c49
-rw-r--r--drivers/gpu/drm/radeon/radeon_cursor.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c7
-rw-r--r--drivers/gpu/drm/radeon/radeon_encoders.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_encoders.c22
-rw-r--r--drivers/gpu/drm/radeon/radeon_mode.h1
-rw-r--r--drivers/gpu/drm/radeon/radeon_pm.c41
-rw-r--r--drivers/gpu/drm/radeon/reg_srcs/evergreen10
-rw-r--r--drivers/gpu/drm/radeon/rs690.c41
-rw-r--r--drivers/gpu/drm/radeon/rv770.c2
-rw-r--r--drivers/gpu/drm/ttm/ttm_page_alloc.c2
-rw-r--r--fs/xfs/linux-2.6/xfs_export.c11
-rw-r--r--fs/xfs/linux-2.6/xfs_ioctl.c7
-rw-r--r--fs/xfs/linux-2.6/xfs_ioctl32.c15
-rw-r--r--fs/xfs/quota/xfs_qm.c18
-rw-r--r--fs/xfs/quota/xfs_qm_syscalls.c27
-rw-r--r--fs/xfs/xfs_dfrag.c5
-rw-r--r--fs/xfs/xfs_ialloc.c142
-rw-r--r--fs/xfs/xfs_iget.c10
-rw-r--r--fs/xfs/xfs_inode.c5
-rw-r--r--fs/xfs/xfs_inode.h6
-rw-r--r--fs/xfs/xfs_itable.c285
-rw-r--r--fs/xfs/xfs_itable.h17
-rw-r--r--fs/xfs/xfs_log_recover.c2
-rw-r--r--fs/xfs/xfs_mount.c2
-rw-r--r--fs/xfs/xfs_rtalloc.c4
-rw-r--r--fs/xfs/xfs_trans_inode.c2
-rw-r--r--fs/xfs/xfs_vnodeops.c2
-rw-r--r--include/linux/agp_backend.h1
-rw-r--r--include/linux/dynamic_debug.h4
-rw-r--r--include/linux/fb.h4
-rw-r--r--include/linux/pci_ids.h1
-rw-r--r--include/linux/sched.h2
-rw-r--r--init/main.c12
-rw-r--r--kernel/module.c23
-rw-r--r--kernel/sched.c4
-rw-r--r--kernel/time/tick-sched.c16
-rw-r--r--lib/dynamic_debug.c2
-rw-r--r--scripts/package/Makefile2
-rwxr-xr-xscripts/setlocalversion179
-rw-r--r--tools/perf/util/thread.c11
81 files changed, 877 insertions, 758 deletions
diff --git a/Documentation/watchdog/watchdog-parameters.txt b/Documentation/watchdog/watchdog-parameters.txt
index 41c95cc1dc1f..17ddd822b456 100644
--- a/Documentation/watchdog/watchdog-parameters.txt
+++ b/Documentation/watchdog/watchdog-parameters.txt
@@ -125,6 +125,11 @@ ibmasr:
125nowayout: Watchdog cannot be stopped once started 125nowayout: Watchdog cannot be stopped once started
126 (default=kernel config parameter) 126 (default=kernel config parameter)
127------------------------------------------------- 127-------------------------------------------------
128imx2_wdt:
129timeout: Watchdog timeout in seconds (default 60 s)
130nowayout: Watchdog cannot be stopped once started
131 (default=kernel config parameter)
132-------------------------------------------------
128indydog: 133indydog:
129nowayout: Watchdog cannot be stopped once started 134nowayout: Watchdog cannot be stopped once started
130 (default=kernel config parameter) 135 (default=kernel config parameter)
diff --git a/MAINTAINERS b/MAINTAINERS
index 7642365ed6d2..a07a49deb47b 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -2111,11 +2111,18 @@ F: drivers/edac/i5000_edac.c
2111 2111
2112EDAC-I5400 2112EDAC-I5400
2113M: Mauro Carvalho Chehab <mchehab@redhat.com> 2113M: Mauro Carvalho Chehab <mchehab@redhat.com>
2114L: bluesmoke-devel@lists.sourceforge.net (moderated for non-subscribers) 2114L: linux-edac@vger.kernel.org
2115W: bluesmoke.sourceforge.net 2115W: bluesmoke.sourceforge.net
2116S: Maintained 2116S: Maintained
2117F: drivers/edac/i5400_edac.c 2117F: drivers/edac/i5400_edac.c
2118 2118
2119EDAC-I7CORE
2120M: Mauro Carvalho Chehab <mchehab@redhat.com>
2121L: linux-edac@vger.kernel.org
2122W: bluesmoke.sourceforge.net
2123S: Maintained
2124F: drivers/edac/i7core_edac.c linux/edac_mce.h drivers/edac/edac_mce.c
2125
2119EDAC-I82975X 2126EDAC-I82975X
2120M: Ranganathan Desikan <ravi@jetztechnologies.com> 2127M: Ranganathan Desikan <ravi@jetztechnologies.com>
2121M: "Arvind R." <arvind@jetztechnologies.com> 2128M: "Arvind R." <arvind@jetztechnologies.com>
diff --git a/Makefile b/Makefile
index 662e820cfc4a..015eca5a1f18 100644
--- a/Makefile
+++ b/Makefile
@@ -883,80 +883,10 @@ PHONY += $(vmlinux-dirs)
883$(vmlinux-dirs): prepare scripts 883$(vmlinux-dirs): prepare scripts
884 $(Q)$(MAKE) $(build)=$@ 884 $(Q)$(MAKE) $(build)=$@
885 885
886# Build the kernel release string
887#
888# The KERNELRELEASE value built here is stored in the file
889# include/config/kernel.release, and is used when executing several
890# make targets, such as "make install" or "make modules_install."
891#
892# The eventual kernel release string consists of the following fields,
893# shown in a hierarchical format to show how smaller parts are concatenated
894# to form the larger and final value, with values coming from places like
895# the Makefile, kernel config options, make command line options and/or
896# SCM tag information.
897#
898# $(KERNELVERSION)
899# $(VERSION) eg, 2
900# $(PATCHLEVEL) eg, 6
901# $(SUBLEVEL) eg, 18
902# $(EXTRAVERSION) eg, -rc6
903# $(localver-full)
904# $(localver)
905# localversion* (files without backups, containing '~')
906# $(CONFIG_LOCALVERSION) (from kernel config setting)
907# $(LOCALVERSION) (from make command line, if provided)
908# $(localver-extra)
909# $(scm-identifier) (unique SCM tag, if one exists)
910# ./scripts/setlocalversion (only with CONFIG_LOCALVERSION_AUTO)
911# .scmversion (only with CONFIG_LOCALVERSION_AUTO)
912# + (only without CONFIG_LOCALVERSION_AUTO
913# and without LOCALVERSION= and
914# repository is at non-tagged commit)
915#
916# For kernels without CONFIG_LOCALVERSION_AUTO compiled from an SCM that has
917# been revised beyond a tagged commit, `+' is appended to the version string
918# when not overridden by using "make LOCALVERSION=". This indicates that the
919# kernel is not a vanilla release version and has been modified.
920
921pattern = ".*/localversion[^~]*"
922string = $(shell cat /dev/null \
923 `find $(objtree) $(srctree) -maxdepth 1 -regex $(pattern) | sort -u`)
924
925localver = $(subst $(space),, $(string) \
926 $(patsubst "%",%,$(CONFIG_LOCALVERSION)))
927
928# scripts/setlocalversion is called to create a unique identifier if the source
929# is managed by a known SCM and the repository has been revised since the last
930# tagged (release) commit. The format of the identifier is determined by the
931# SCM's implementation.
932#
933# .scmversion is used when generating rpm packages so we do not loose
934# the version information from the SCM when we do the build of the kernel
935# from the copied source
936ifeq ($(wildcard .scmversion),)
937 scm-identifier = $(shell $(CONFIG_SHELL) \
938 $(srctree)/scripts/setlocalversion $(srctree))
939else
940 scm-identifier = $(shell cat .scmversion 2> /dev/null)
941endif
942
943ifdef CONFIG_LOCALVERSION_AUTO
944 localver-extra = $(scm-identifier)
945else
946 ifneq ($(scm-identifier),)
947 ifeq ("$(origin LOCALVERSION)", "undefined")
948 localver-extra = +
949 endif
950 endif
951endif
952
953localver-full = $(localver)$(LOCALVERSION)$(localver-extra)
954
955# Store (new) KERNELRELASE string in include/config/kernel.release 886# Store (new) KERNELRELASE string in include/config/kernel.release
956kernelrelease = $(KERNELVERSION)$(localver-full)
957include/config/kernel.release: include/config/auto.conf FORCE 887include/config/kernel.release: include/config/auto.conf FORCE
958 $(Q)rm -f $@ 888 $(Q)rm -f $@
959 $(Q)echo $(kernelrelease) > $@ 889 $(Q)echo "$(KERNELVERSION)$$($(CONFIG_SHELL) scripts/setlocalversion $(srctree))" > $@
960 890
961 891
962# Things we need to do before we recursively start building the kernel 892# Things we need to do before we recursively start building the kernel
diff --git a/arch/ia64/mm/tlb.c b/arch/ia64/mm/tlb.c
index 5dfd916e9ea6..7b3cdc6c6d91 100644
--- a/arch/ia64/mm/tlb.c
+++ b/arch/ia64/mm/tlb.c
@@ -121,7 +121,7 @@ static inline void down_spin(struct spinaphore *ss)
121 ia64_invala(); 121 ia64_invala();
122 122
123 for (;;) { 123 for (;;) {
124 asm volatile ("ld4.c.nc %0=[%1]" : "=r"(serve) : "r"(&ss->serve) : "memory"); 124 asm volatile ("ld8.c.nc %0=[%1]" : "=r"(serve) : "r"(&ss->serve) : "memory");
125 if (time_before(t, serve)) 125 if (time_before(t, serve))
126 return; 126 return;
127 cpu_relax(); 127 cpu_relax();
diff --git a/arch/x86/kernel/cpu/perf_event_amd.c b/arch/x86/kernel/cpu/perf_event_amd.c
index 611df11ba15e..c2897b7b4a3b 100644
--- a/arch/x86/kernel/cpu/perf_event_amd.c
+++ b/arch/x86/kernel/cpu/perf_event_amd.c
@@ -102,8 +102,8 @@ static const u64 amd_perfmon_event_map[] =
102 [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0, 102 [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0,
103 [PERF_COUNT_HW_CACHE_REFERENCES] = 0x0080, 103 [PERF_COUNT_HW_CACHE_REFERENCES] = 0x0080,
104 [PERF_COUNT_HW_CACHE_MISSES] = 0x0081, 104 [PERF_COUNT_HW_CACHE_MISSES] = 0x0081,
105 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c4, 105 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c2,
106 [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c5, 106 [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c3,
107}; 107};
108 108
109static u64 amd_pmu_event_map(int hw_event) 109static u64 amd_pmu_event_map(int hw_event)
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
index 142d70c74b02..725ef4d17cd5 100644
--- a/arch/x86/kernel/traps.c
+++ b/arch/x86/kernel/traps.c
@@ -526,6 +526,7 @@ asmlinkage __kprobes struct pt_regs *sync_regs(struct pt_regs *eregs)
526dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code) 526dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
527{ 527{
528 struct task_struct *tsk = current; 528 struct task_struct *tsk = current;
529 int user_icebp = 0;
529 unsigned long dr6; 530 unsigned long dr6;
530 int si_code; 531 int si_code;
531 532
@@ -534,6 +535,14 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
534 /* Filter out all the reserved bits which are preset to 1 */ 535 /* Filter out all the reserved bits which are preset to 1 */
535 dr6 &= ~DR6_RESERVED; 536 dr6 &= ~DR6_RESERVED;
536 537
538 /*
539 * If dr6 has no reason to give us about the origin of this trap,
540 * then it's very likely the result of an icebp/int01 trap.
541 * User wants a sigtrap for that.
542 */
543 if (!dr6 && user_mode(regs))
544 user_icebp = 1;
545
537 /* Catch kmemcheck conditions first of all! */ 546 /* Catch kmemcheck conditions first of all! */
538 if ((dr6 & DR_STEP) && kmemcheck_trap(regs)) 547 if ((dr6 & DR_STEP) && kmemcheck_trap(regs))
539 return; 548 return;
@@ -575,7 +584,7 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
575 regs->flags &= ~X86_EFLAGS_TF; 584 regs->flags &= ~X86_EFLAGS_TF;
576 } 585 }
577 si_code = get_si_code(tsk->thread.debugreg6); 586 si_code = get_si_code(tsk->thread.debugreg6);
578 if (tsk->thread.debugreg6 & (DR_STEP | DR_TRAP_BITS)) 587 if (tsk->thread.debugreg6 & (DR_STEP | DR_TRAP_BITS) || user_icebp)
579 send_sigtrap(tsk, regs, error_code, si_code); 588 send_sigtrap(tsk, regs, error_code, si_code);
580 preempt_conditional_cli(regs); 589 preempt_conditional_cli(regs);
581 590
diff --git a/drivers/acpi/apei/erst.c b/drivers/acpi/apei/erst.c
index 2ebc39115507..864dd46c346f 100644
--- a/drivers/acpi/apei/erst.c
+++ b/drivers/acpi/apei/erst.c
@@ -781,7 +781,7 @@ static int __init erst_init(void)
781 status = acpi_get_table(ACPI_SIG_ERST, 0, 781 status = acpi_get_table(ACPI_SIG_ERST, 0,
782 (struct acpi_table_header **)&erst_tab); 782 (struct acpi_table_header **)&erst_tab);
783 if (status == AE_NOT_FOUND) { 783 if (status == AE_NOT_FOUND) {
784 pr_err(ERST_PFX "Table is not found!\n"); 784 pr_info(ERST_PFX "Table is not found!\n");
785 goto err; 785 goto err;
786 } else if (ACPI_FAILURE(status)) { 786 } else if (ACPI_FAILURE(status)) {
787 const char *msg = acpi_format_exception(status); 787 const char *msg = acpi_format_exception(status);
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index 8ca16f54e1ed..f2522534ae63 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -1053,6 +1053,16 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
1053 if (pdev->vendor == PCI_VENDOR_ID_MARVELL && !marvell_enable) 1053 if (pdev->vendor == PCI_VENDOR_ID_MARVELL && !marvell_enable)
1054 return -ENODEV; 1054 return -ENODEV;
1055 1055
1056 /*
1057 * For some reason, MCP89 on MacBook 7,1 doesn't work with
1058 * ahci, use ata_generic instead.
1059 */
1060 if (pdev->vendor == PCI_VENDOR_ID_NVIDIA &&
1061 pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP89_SATA &&
1062 pdev->subsystem_vendor == PCI_VENDOR_ID_APPLE &&
1063 pdev->subsystem_device == 0xcb89)
1064 return -ENODEV;
1065
1056 /* Promise's PDC42819 is a SAS/SATA controller that has an AHCI mode. 1066 /* Promise's PDC42819 is a SAS/SATA controller that has an AHCI mode.
1057 * At the moment, we can only use the AHCI mode. Let the users know 1067 * At the moment, we can only use the AHCI mode. Let the users know
1058 * that for SAS drives they're out of luck. 1068 * that for SAS drives they're out of luck.
diff --git a/drivers/ata/ata_generic.c b/drivers/ata/ata_generic.c
index 573158a9668d..7107a6929deb 100644
--- a/drivers/ata/ata_generic.c
+++ b/drivers/ata/ata_generic.c
@@ -32,6 +32,11 @@
32 * A generic parallel ATA driver using libata 32 * A generic parallel ATA driver using libata
33 */ 33 */
34 34
35enum {
36 ATA_GEN_CLASS_MATCH = (1 << 0),
37 ATA_GEN_FORCE_DMA = (1 << 1),
38};
39
35/** 40/**
36 * generic_set_mode - mode setting 41 * generic_set_mode - mode setting
37 * @link: link to set up 42 * @link: link to set up
@@ -46,13 +51,17 @@
46static int generic_set_mode(struct ata_link *link, struct ata_device **unused) 51static int generic_set_mode(struct ata_link *link, struct ata_device **unused)
47{ 52{
48 struct ata_port *ap = link->ap; 53 struct ata_port *ap = link->ap;
54 const struct pci_device_id *id = ap->host->private_data;
49 int dma_enabled = 0; 55 int dma_enabled = 0;
50 struct ata_device *dev; 56 struct ata_device *dev;
51 struct pci_dev *pdev = to_pci_dev(ap->host->dev); 57 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
52 58
53 /* Bits 5 and 6 indicate if DMA is active on master/slave */ 59 if (id->driver_data & ATA_GEN_FORCE_DMA) {
54 if (ap->ioaddr.bmdma_addr) 60 dma_enabled = 0xff;
61 } else if (ap->ioaddr.bmdma_addr) {
62 /* Bits 5 and 6 indicate if DMA is active on master/slave */
55 dma_enabled = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS); 63 dma_enabled = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS);
64 }
56 65
57 if (pdev->vendor == PCI_VENDOR_ID_CENATEK) 66 if (pdev->vendor == PCI_VENDOR_ID_CENATEK)
58 dma_enabled = 0xFF; 67 dma_enabled = 0xFF;
@@ -126,7 +135,7 @@ static int ata_generic_init_one(struct pci_dev *dev, const struct pci_device_id
126 const struct ata_port_info *ppi[] = { &info, NULL }; 135 const struct ata_port_info *ppi[] = { &info, NULL };
127 136
128 /* Don't use the generic entry unless instructed to do so */ 137 /* Don't use the generic entry unless instructed to do so */
129 if (id->driver_data == 1 && all_generic_ide == 0) 138 if ((id->driver_data & ATA_GEN_CLASS_MATCH) && all_generic_ide == 0)
130 return -ENODEV; 139 return -ENODEV;
131 140
132 /* Devices that need care */ 141 /* Devices that need care */
@@ -155,7 +164,7 @@ static int ata_generic_init_one(struct pci_dev *dev, const struct pci_device_id
155 return rc; 164 return rc;
156 pcim_pin_device(dev); 165 pcim_pin_device(dev);
157 } 166 }
158 return ata_pci_bmdma_init_one(dev, ppi, &generic_sht, NULL, 0); 167 return ata_pci_bmdma_init_one(dev, ppi, &generic_sht, (void *)id, 0);
159} 168}
160 169
161static struct pci_device_id ata_generic[] = { 170static struct pci_device_id ata_generic[] = {
@@ -167,7 +176,15 @@ static struct pci_device_id ata_generic[] = {
167 { PCI_DEVICE(PCI_VENDOR_ID_HINT, PCI_DEVICE_ID_HINT_VXPROII_IDE), }, 176 { PCI_DEVICE(PCI_VENDOR_ID_HINT, PCI_DEVICE_ID_HINT_VXPROII_IDE), },
168 { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C561), }, 177 { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C561), },
169 { PCI_DEVICE(PCI_VENDOR_ID_OPTI, PCI_DEVICE_ID_OPTI_82C558), }, 178 { PCI_DEVICE(PCI_VENDOR_ID_OPTI, PCI_DEVICE_ID_OPTI_82C558), },
170 { PCI_DEVICE(PCI_VENDOR_ID_CENATEK,PCI_DEVICE_ID_CENATEK_IDE), }, 179 { PCI_DEVICE(PCI_VENDOR_ID_CENATEK,PCI_DEVICE_ID_CENATEK_IDE),
180 .driver_data = ATA_GEN_FORCE_DMA },
181 /*
182 * For some reason, MCP89 on MacBook 7,1 doesn't work with
183 * ahci, use ata_generic instead.
184 */
185 { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP89_SATA,
186 PCI_VENDOR_ID_APPLE, 0xcb89,
187 .driver_data = ATA_GEN_FORCE_DMA },
171#if !defined(CONFIG_PATA_TOSHIBA) && !defined(CONFIG_PATA_TOSHIBA_MODULE) 188#if !defined(CONFIG_PATA_TOSHIBA) && !defined(CONFIG_PATA_TOSHIBA_MODULE)
172 { PCI_DEVICE(PCI_VENDOR_ID_TOSHIBA,PCI_DEVICE_ID_TOSHIBA_PICCOLO_1), }, 189 { PCI_DEVICE(PCI_VENDOR_ID_TOSHIBA,PCI_DEVICE_ID_TOSHIBA_PICCOLO_1), },
173 { PCI_DEVICE(PCI_VENDOR_ID_TOSHIBA,PCI_DEVICE_ID_TOSHIBA_PICCOLO_2), }, 190 { PCI_DEVICE(PCI_VENDOR_ID_TOSHIBA,PCI_DEVICE_ID_TOSHIBA_PICCOLO_2), },
@@ -175,7 +192,8 @@ static struct pci_device_id ata_generic[] = {
175 { PCI_DEVICE(PCI_VENDOR_ID_TOSHIBA,PCI_DEVICE_ID_TOSHIBA_PICCOLO_5), }, 192 { PCI_DEVICE(PCI_VENDOR_ID_TOSHIBA,PCI_DEVICE_ID_TOSHIBA_PICCOLO_5), },
176#endif 193#endif
177 /* Must come last. If you add entries adjust this table appropriately */ 194 /* Must come last. If you add entries adjust this table appropriately */
178 { PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_STORAGE_IDE << 8, 0xFFFFFF00UL, 1}, 195 { PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_IDE << 8, 0xFFFFFF00UL),
196 .driver_data = ATA_GEN_CLASS_MATCH },
179 { 0, }, 197 { 0, },
180}; 198};
181 199
diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
index 261f86d102e8..81e772a94d59 100644
--- a/drivers/ata/libahci.c
+++ b/drivers/ata/libahci.c
@@ -324,6 +324,7 @@ static ssize_t ahci_store_em_buffer(struct device *dev,
324 struct ahci_host_priv *hpriv = ap->host->private_data; 324 struct ahci_host_priv *hpriv = ap->host->private_data;
325 void __iomem *mmio = hpriv->mmio; 325 void __iomem *mmio = hpriv->mmio;
326 void __iomem *em_mmio = mmio + hpriv->em_loc; 326 void __iomem *em_mmio = mmio + hpriv->em_loc;
327 const unsigned char *msg_buf = buf;
327 u32 em_ctl, msg; 328 u32 em_ctl, msg;
328 unsigned long flags; 329 unsigned long flags;
329 int i; 330 int i;
@@ -343,8 +344,8 @@ static ssize_t ahci_store_em_buffer(struct device *dev,
343 } 344 }
344 345
345 for (i = 0; i < size; i += 4) { 346 for (i = 0; i < size; i += 4) {
346 msg = buf[i] | buf[i + 1] << 8 | 347 msg = msg_buf[i] | msg_buf[i + 1] << 8 |
347 buf[i + 2] << 16 | buf[i + 3] << 24; 348 msg_buf[i + 2] << 16 | msg_buf[i + 3] << 24;
348 writel(msg, em_mmio + i); 349 writel(msg, em_mmio + i);
349 } 350 }
350 351
diff --git a/drivers/char/agp/generic.c b/drivers/char/agp/generic.c
index 4b51982fd23a..d2abf5143983 100644
--- a/drivers/char/agp/generic.c
+++ b/drivers/char/agp/generic.c
@@ -97,20 +97,18 @@ EXPORT_SYMBOL(agp_flush_chipset);
97void agp_alloc_page_array(size_t size, struct agp_memory *mem) 97void agp_alloc_page_array(size_t size, struct agp_memory *mem)
98{ 98{
99 mem->pages = NULL; 99 mem->pages = NULL;
100 mem->vmalloc_flag = false;
101 100
102 if (size <= 2*PAGE_SIZE) 101 if (size <= 2*PAGE_SIZE)
103 mem->pages = kmalloc(size, GFP_KERNEL | __GFP_NORETRY); 102 mem->pages = kmalloc(size, GFP_KERNEL | __GFP_NOWARN);
104 if (mem->pages == NULL) { 103 if (mem->pages == NULL) {
105 mem->pages = vmalloc(size); 104 mem->pages = vmalloc(size);
106 mem->vmalloc_flag = true;
107 } 105 }
108} 106}
109EXPORT_SYMBOL(agp_alloc_page_array); 107EXPORT_SYMBOL(agp_alloc_page_array);
110 108
111void agp_free_page_array(struct agp_memory *mem) 109void agp_free_page_array(struct agp_memory *mem)
112{ 110{
113 if (mem->vmalloc_flag) { 111 if (is_vmalloc_addr(mem->pages)) {
114 vfree(mem->pages); 112 vfree(mem->pages);
115 } else { 113 } else {
116 kfree(mem->pages); 114 kfree(mem->pages);
diff --git a/drivers/cpuidle/governors/menu.c b/drivers/cpuidle/governors/menu.c
index 52ff8aa63f84..1b128702d300 100644
--- a/drivers/cpuidle/governors/menu.c
+++ b/drivers/cpuidle/governors/menu.c
@@ -143,7 +143,7 @@ static inline int which_bucket(unsigned int duration)
143 * This allows us to calculate 143 * This allows us to calculate
144 * E(duration)|iowait 144 * E(duration)|iowait
145 */ 145 */
146 if (nr_iowait_cpu()) 146 if (nr_iowait_cpu(smp_processor_id()))
147 bucket = BUCKETS/2; 147 bucket = BUCKETS/2;
148 148
149 if (duration < 10) 149 if (duration < 10)
@@ -175,7 +175,7 @@ static inline int performance_multiplier(void)
175 mult += 2 * get_loadavg(); 175 mult += 2 * get_loadavg();
176 176
177 /* for IO wait tasks (per cpu!) we add 5x each */ 177 /* for IO wait tasks (per cpu!) we add 5x each */
178 mult += 10 * nr_iowait_cpu(); 178 mult += 10 * nr_iowait_cpu(smp_processor_id());
179 179
180 return mult; 180 return mult;
181} 181}
diff --git a/drivers/dma/ppc4xx/adma.c b/drivers/dma/ppc4xx/adma.c
index 5a22ca6927e5..7c3747902a37 100644
--- a/drivers/dma/ppc4xx/adma.c
+++ b/drivers/dma/ppc4xx/adma.c
@@ -4257,10 +4257,12 @@ static int ppc440spe_adma_setup_irqs(struct ppc440spe_adma_device *adev,
4257 struct ppc440spe_adma_chan *chan, 4257 struct ppc440spe_adma_chan *chan,
4258 int *initcode) 4258 int *initcode)
4259{ 4259{
4260 struct of_device *ofdev;
4260 struct device_node *np; 4261 struct device_node *np;
4261 int ret; 4262 int ret;
4262 4263
4263 np = container_of(adev->dev, struct of_device, dev)->node; 4264 ofdev = container_of(adev->dev, struct of_device, dev);
4265 np = ofdev->dev.of_node;
4264 if (adev->id != PPC440SPE_XOR_ID) { 4266 if (adev->id != PPC440SPE_XOR_ID) {
4265 adev->err_irq = irq_of_parse_and_map(np, 1); 4267 adev->err_irq = irq_of_parse_and_map(np, 1);
4266 if (adev->err_irq == NO_IRQ) { 4268 if (adev->err_irq == NO_IRQ) {
diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
index cf17dbb8014f..ac9f7985096d 100644
--- a/drivers/edac/amd64_edac.c
+++ b/drivers/edac/amd64_edac.c
@@ -1958,20 +1958,20 @@ static int get_channel_from_ecc_syndrome(struct mem_ctl_info *mci, u16 syndrome)
1958 u32 value = 0; 1958 u32 value = 0;
1959 int err_sym = 0; 1959 int err_sym = 0;
1960 1960
1961 amd64_read_pci_cfg(pvt->misc_f3_ctl, 0x180, &value); 1961 if (boot_cpu_data.x86 == 0x10) {
1962 1962
1963 /* F3x180[EccSymbolSize]=1, x8 symbols */ 1963 amd64_read_pci_cfg(pvt->misc_f3_ctl, 0x180, &value);
1964 if (boot_cpu_data.x86 == 0x10 && 1964
1965 boot_cpu_data.x86_model > 7 && 1965 /* F3x180[EccSymbolSize]=1 => x8 symbols */
1966 value & BIT(25)) { 1966 if (boot_cpu_data.x86_model > 7 &&
1967 err_sym = decode_syndrome(syndrome, x8_vectors, 1967 value & BIT(25)) {
1968 ARRAY_SIZE(x8_vectors), 8); 1968 err_sym = decode_syndrome(syndrome, x8_vectors,
1969 return map_err_sym_to_channel(err_sym, 8); 1969 ARRAY_SIZE(x8_vectors), 8);
1970 } else { 1970 return map_err_sym_to_channel(err_sym, 8);
1971 err_sym = decode_syndrome(syndrome, x4_vectors, 1971 }
1972 ARRAY_SIZE(x4_vectors), 4);
1973 return map_err_sym_to_channel(err_sym, 4);
1974 } 1972 }
1973 err_sym = decode_syndrome(syndrome, x4_vectors, ARRAY_SIZE(x4_vectors), 4);
1974 return map_err_sym_to_channel(err_sym, 4);
1975} 1975}
1976 1976
1977/* 1977/*
diff --git a/drivers/edac/i7core_edac.c b/drivers/edac/i7core_edac.c
index 6b8b7b41ec5f..cc9357da0e34 100644
--- a/drivers/edac/i7core_edac.c
+++ b/drivers/edac/i7core_edac.c
@@ -1233,10 +1233,28 @@ static void __init i7core_xeon_pci_fixup(struct pci_id_table *table)
1233 for (i = 0; i < MAX_SOCKET_BUSES; i++) 1233 for (i = 0; i < MAX_SOCKET_BUSES; i++)
1234 pcibios_scan_specific_bus(255-i); 1234 pcibios_scan_specific_bus(255-i);
1235 } 1235 }
1236 pci_dev_put(pdev);
1236 table++; 1237 table++;
1237 } 1238 }
1238} 1239}
1239 1240
1241static unsigned i7core_pci_lastbus(void)
1242{
1243 int last_bus = 0, bus;
1244 struct pci_bus *b = NULL;
1245
1246 while ((b = pci_find_next_bus(b)) != NULL) {
1247 bus = b->number;
1248 debugf0("Found bus %d\n", bus);
1249 if (bus > last_bus)
1250 last_bus = bus;
1251 }
1252
1253 debugf0("Last bus %d\n", last_bus);
1254
1255 return last_bus;
1256}
1257
1240/* 1258/*
1241 * i7core_get_devices Find and perform 'get' operation on the MCH's 1259 * i7core_get_devices Find and perform 'get' operation on the MCH's
1242 * device/functions we want to reference for this driver 1260 * device/functions we want to reference for this driver
@@ -1244,7 +1262,8 @@ static void __init i7core_xeon_pci_fixup(struct pci_id_table *table)
1244 * Need to 'get' device 16 func 1 and func 2 1262 * Need to 'get' device 16 func 1 and func 2
1245 */ 1263 */
1246int i7core_get_onedevice(struct pci_dev **prev, int devno, 1264int i7core_get_onedevice(struct pci_dev **prev, int devno,
1247 struct pci_id_descr *dev_descr, unsigned n_devs) 1265 struct pci_id_descr *dev_descr, unsigned n_devs,
1266 unsigned last_bus)
1248{ 1267{
1249 struct i7core_dev *i7core_dev; 1268 struct i7core_dev *i7core_dev;
1250 1269
@@ -1291,10 +1310,7 @@ int i7core_get_onedevice(struct pci_dev **prev, int devno,
1291 } 1310 }
1292 bus = pdev->bus->number; 1311 bus = pdev->bus->number;
1293 1312
1294 if (bus == 0x3f) 1313 socket = last_bus - bus;
1295 socket = 0;
1296 else
1297 socket = 255 - bus;
1298 1314
1299 i7core_dev = get_i7core_dev(socket); 1315 i7core_dev = get_i7core_dev(socket);
1300 if (!i7core_dev) { 1316 if (!i7core_dev) {
@@ -1358,17 +1374,21 @@ int i7core_get_onedevice(struct pci_dev **prev, int devno,
1358 1374
1359static int i7core_get_devices(struct pci_id_table *table) 1375static int i7core_get_devices(struct pci_id_table *table)
1360{ 1376{
1361 int i, rc; 1377 int i, rc, last_bus;
1362 struct pci_dev *pdev = NULL; 1378 struct pci_dev *pdev = NULL;
1363 struct pci_id_descr *dev_descr; 1379 struct pci_id_descr *dev_descr;
1364 1380
1381 last_bus = i7core_pci_lastbus();
1382
1365 while (table && table->descr) { 1383 while (table && table->descr) {
1366 dev_descr = table->descr; 1384 dev_descr = table->descr;
1367 for (i = 0; i < table->n_devs; i++) { 1385 for (i = 0; i < table->n_devs; i++) {
1368 pdev = NULL; 1386 pdev = NULL;
1369 do { 1387 do {
1370 rc = i7core_get_onedevice(&pdev, i, &dev_descr[i], 1388 rc = i7core_get_onedevice(&pdev, i,
1371 table->n_devs); 1389 &dev_descr[i],
1390 table->n_devs,
1391 last_bus);
1372 if (rc < 0) { 1392 if (rc < 0) {
1373 if (i == 0) { 1393 if (i == 0) {
1374 i = table->n_devs; 1394 i = table->n_devs;
@@ -1927,21 +1947,26 @@ fail:
1927 * 0 for FOUND a device 1947 * 0 for FOUND a device
1928 * < 0 for error code 1948 * < 0 for error code
1929 */ 1949 */
1950
1951static int probed = 0;
1952
1930static int __devinit i7core_probe(struct pci_dev *pdev, 1953static int __devinit i7core_probe(struct pci_dev *pdev,
1931 const struct pci_device_id *id) 1954 const struct pci_device_id *id)
1932{ 1955{
1933 int dev_idx = id->driver_data;
1934 int rc; 1956 int rc;
1935 struct i7core_dev *i7core_dev; 1957 struct i7core_dev *i7core_dev;
1936 1958
1959 /* get the pci devices we want to reserve for our use */
1960 mutex_lock(&i7core_edac_lock);
1961
1937 /* 1962 /*
1938 * All memory controllers are allocated at the first pass. 1963 * All memory controllers are allocated at the first pass.
1939 */ 1964 */
1940 if (unlikely(dev_idx >= 1)) 1965 if (unlikely(probed >= 1)) {
1966 mutex_unlock(&i7core_edac_lock);
1941 return -EINVAL; 1967 return -EINVAL;
1942 1968 }
1943 /* get the pci devices we want to reserve for our use */ 1969 probed++;
1944 mutex_lock(&i7core_edac_lock);
1945 1970
1946 rc = i7core_get_devices(pci_dev_table); 1971 rc = i7core_get_devices(pci_dev_table);
1947 if (unlikely(rc < 0)) 1972 if (unlikely(rc < 0))
@@ -2013,6 +2038,8 @@ static void __devexit i7core_remove(struct pci_dev *pdev)
2013 i7core_dev->socket); 2038 i7core_dev->socket);
2014 } 2039 }
2015 } 2040 }
2041 probed--;
2042
2016 mutex_unlock(&i7core_edac_lock); 2043 mutex_unlock(&i7core_edac_lock);
2017} 2044}
2018 2045
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index 08c4c926e65f..1f2cc6b09623 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -146,7 +146,7 @@ static bool drm_fb_helper_connector_parse_command_line(struct drm_fb_helper_conn
146 cvt = 1; 146 cvt = 1;
147 break; 147 break;
148 case 'R': 148 case 'R':
149 if (!cvt) 149 if (cvt)
150 rb = 1; 150 rb = 1;
151 break; 151 break;
152 case 'm': 152 case 'm':
@@ -1024,11 +1024,18 @@ static struct drm_display_mode *drm_pick_cmdline_mode(struct drm_fb_helper_conne
1024 } 1024 }
1025 1025
1026create_mode: 1026create_mode:
1027 mode = drm_cvt_mode(fb_helper_conn->connector->dev, cmdline_mode->xres, 1027 if (cmdline_mode->cvt)
1028 cmdline_mode->yres, 1028 mode = drm_cvt_mode(fb_helper_conn->connector->dev,
1029 cmdline_mode->refresh_specified ? cmdline_mode->refresh : 60, 1029 cmdline_mode->xres, cmdline_mode->yres,
1030 cmdline_mode->rb, cmdline_mode->interlace, 1030 cmdline_mode->refresh_specified ? cmdline_mode->refresh : 60,
1031 cmdline_mode->margins); 1031 cmdline_mode->rb, cmdline_mode->interlace,
1032 cmdline_mode->margins);
1033 else
1034 mode = drm_gtf_mode(fb_helper_conn->connector->dev,
1035 cmdline_mode->xres, cmdline_mode->yres,
1036 cmdline_mode->refresh_specified ? cmdline_mode->refresh : 60,
1037 cmdline_mode->interlace,
1038 cmdline_mode->margins);
1032 drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V); 1039 drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V);
1033 list_add(&mode->head, &fb_helper_conn->connector->modes); 1040 list_add(&mode->head, &fb_helper_conn->connector->modes);
1034 return mode; 1041 return mode;
diff --git a/drivers/gpu/drm/i915/dvo_tfp410.c b/drivers/gpu/drm/i915/dvo_tfp410.c
index 66c697bc9b22..56f66426207f 100644
--- a/drivers/gpu/drm/i915/dvo_tfp410.c
+++ b/drivers/gpu/drm/i915/dvo_tfp410.c
@@ -208,7 +208,7 @@ static enum drm_connector_status tfp410_detect(struct intel_dvo_device *dvo)
208 uint8_t ctl2; 208 uint8_t ctl2;
209 209
210 if (tfp410_readb(dvo, TFP410_CTL_2, &ctl2)) { 210 if (tfp410_readb(dvo, TFP410_CTL_2, &ctl2)) {
211 if (ctl2 & TFP410_CTL_2_HTPLG) 211 if (ctl2 & TFP410_CTL_2_RSEN)
212 ret = connector_status_connected; 212 ret = connector_status_connected;
213 else 213 else
214 ret = connector_status_disconnected; 214 ret = connector_status_disconnected;
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 52510ad8b25d..aee83fa178f6 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -620,7 +620,7 @@ static int i915_sr_status(struct seq_file *m, void *unused)
620 drm_i915_private_t *dev_priv = dev->dev_private; 620 drm_i915_private_t *dev_priv = dev->dev_private;
621 bool sr_enabled = false; 621 bool sr_enabled = false;
622 622
623 if (IS_I965G(dev) || IS_I945G(dev) || IS_I945GM(dev)) 623 if (IS_I965GM(dev) || IS_I945G(dev) || IS_I945GM(dev))
624 sr_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN; 624 sr_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN;
625 else if (IS_I915GM(dev)) 625 else if (IS_I915GM(dev))
626 sr_enabled = I915_READ(INSTPM) & INSTPM_SELF_EN; 626 sr_enabled = I915_READ(INSTPM) & INSTPM_SELF_EN;
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 59a2bf8592ec..f00c5ae9556c 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -128,9 +128,11 @@ static int i915_dma_cleanup(struct drm_device * dev)
128 if (dev->irq_enabled) 128 if (dev->irq_enabled)
129 drm_irq_uninstall(dev); 129 drm_irq_uninstall(dev);
130 130
131 mutex_lock(&dev->struct_mutex);
131 intel_cleanup_ring_buffer(dev, &dev_priv->render_ring); 132 intel_cleanup_ring_buffer(dev, &dev_priv->render_ring);
132 if (HAS_BSD(dev)) 133 if (HAS_BSD(dev))
133 intel_cleanup_ring_buffer(dev, &dev_priv->bsd_ring); 134 intel_cleanup_ring_buffer(dev, &dev_priv->bsd_ring);
135 mutex_unlock(&dev->struct_mutex);
134 136
135 /* Clear the HWS virtual address at teardown */ 137 /* Clear the HWS virtual address at teardown */
136 if (I915_NEED_GFX_HWS(dev)) 138 if (I915_NEED_GFX_HWS(dev))
@@ -1229,7 +1231,7 @@ static void i915_warn_stolen(struct drm_device *dev)
1229static void i915_setup_compression(struct drm_device *dev, int size) 1231static void i915_setup_compression(struct drm_device *dev, int size)
1230{ 1232{
1231 struct drm_i915_private *dev_priv = dev->dev_private; 1233 struct drm_i915_private *dev_priv = dev->dev_private;
1232 struct drm_mm_node *compressed_fb, *compressed_llb; 1234 struct drm_mm_node *compressed_fb, *uninitialized_var(compressed_llb);
1233 unsigned long cfb_base; 1235 unsigned long cfb_base;
1234 unsigned long ll_base = 0; 1236 unsigned long ll_base = 0;
1235 1237
@@ -1410,6 +1412,10 @@ static int i915_load_modeset_init(struct drm_device *dev,
1410 if (ret) 1412 if (ret)
1411 goto cleanup_vga_client; 1413 goto cleanup_vga_client;
1412 1414
1415 /* IIR "flip pending" bit means done if this bit is set */
1416 if (IS_GEN3(dev) && (I915_READ(ECOSKPD) & ECO_FLIP_DONE))
1417 dev_priv->flip_pending_is_done = true;
1418
1413 intel_modeset_init(dev); 1419 intel_modeset_init(dev);
1414 1420
1415 ret = drm_irq_install(dev); 1421 ret = drm_irq_install(dev);
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 276583159847..d147ab2f5bfc 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -596,6 +596,7 @@ typedef struct drm_i915_private {
596 struct drm_crtc *plane_to_crtc_mapping[2]; 596 struct drm_crtc *plane_to_crtc_mapping[2];
597 struct drm_crtc *pipe_to_crtc_mapping[2]; 597 struct drm_crtc *pipe_to_crtc_mapping[2];
598 wait_queue_head_t pending_flip_queue; 598 wait_queue_head_t pending_flip_queue;
599 bool flip_pending_is_done;
599 600
600 /* Reclocking support */ 601 /* Reclocking support */
601 bool render_reclock_avail; 602 bool render_reclock_avail;
@@ -1076,7 +1077,7 @@ extern int intel_trans_dp_port_sel (struct drm_crtc *crtc);
1076 drm_i915_private_t *dev_priv = dev->dev_private; \ 1077 drm_i915_private_t *dev_priv = dev->dev_private; \
1077 if (I915_VERBOSE) \ 1078 if (I915_VERBOSE) \
1078 DRM_DEBUG(" BEGIN_LP_RING %x\n", (int)(n)); \ 1079 DRM_DEBUG(" BEGIN_LP_RING %x\n", (int)(n)); \
1079 intel_ring_begin(dev, &dev_priv->render_ring, 4*(n)); \ 1080 intel_ring_begin(dev, &dev_priv->render_ring, (n)); \
1080} while (0) 1081} while (0)
1081 1082
1082 1083
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 9ded3dae6c87..074385882ccf 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -2239,7 +2239,7 @@ i915_gem_object_get_pages(struct drm_gem_object *obj,
2239 mapping = inode->i_mapping; 2239 mapping = inode->i_mapping;
2240 for (i = 0; i < page_count; i++) { 2240 for (i = 0; i < page_count; i++) {
2241 page = read_cache_page_gfp(mapping, i, 2241 page = read_cache_page_gfp(mapping, i,
2242 mapping_gfp_mask (mapping) | 2242 GFP_HIGHUSER |
2243 __GFP_COLD | 2243 __GFP_COLD |
2244 gfpmask); 2244 gfpmask);
2245 if (IS_ERR(page)) 2245 if (IS_ERR(page))
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 2479be001e40..dba53d4b9fb3 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -940,22 +940,30 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
940 if (HAS_BSD(dev) && (iir & I915_BSD_USER_INTERRUPT)) 940 if (HAS_BSD(dev) && (iir & I915_BSD_USER_INTERRUPT))
941 DRM_WAKEUP(&dev_priv->bsd_ring.irq_queue); 941 DRM_WAKEUP(&dev_priv->bsd_ring.irq_queue);
942 942
943 if (iir & I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT) 943 if (iir & I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT) {
944 intel_prepare_page_flip(dev, 0); 944 intel_prepare_page_flip(dev, 0);
945 if (dev_priv->flip_pending_is_done)
946 intel_finish_page_flip_plane(dev, 0);
947 }
945 948
946 if (iir & I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT) 949 if (iir & I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT) {
947 intel_prepare_page_flip(dev, 1); 950 intel_prepare_page_flip(dev, 1);
951 if (dev_priv->flip_pending_is_done)
952 intel_finish_page_flip_plane(dev, 1);
953 }
948 954
949 if (pipea_stats & vblank_status) { 955 if (pipea_stats & vblank_status) {
950 vblank++; 956 vblank++;
951 drm_handle_vblank(dev, 0); 957 drm_handle_vblank(dev, 0);
952 intel_finish_page_flip(dev, 0); 958 if (!dev_priv->flip_pending_is_done)
959 intel_finish_page_flip(dev, 0);
953 } 960 }
954 961
955 if (pipeb_stats & vblank_status) { 962 if (pipeb_stats & vblank_status) {
956 vblank++; 963 vblank++;
957 drm_handle_vblank(dev, 1); 964 drm_handle_vblank(dev, 1);
958 intel_finish_page_flip(dev, 1); 965 if (!dev_priv->flip_pending_is_done)
966 intel_finish_page_flip(dev, 1);
959 } 967 }
960 968
961 if ((pipea_stats & I915_LEGACY_BLC_EVENT_STATUS) || 969 if ((pipea_stats & I915_LEGACY_BLC_EVENT_STATUS) ||
@@ -1387,29 +1395,10 @@ int i915_driver_irq_postinstall(struct drm_device *dev)
1387 dev_priv->pipestat[1] = 0; 1395 dev_priv->pipestat[1] = 0;
1388 1396
1389 if (I915_HAS_HOTPLUG(dev)) { 1397 if (I915_HAS_HOTPLUG(dev)) {
1390 u32 hotplug_en = I915_READ(PORT_HOTPLUG_EN);
1391
1392 /* Note HDMI and DP share bits */
1393 if (dev_priv->hotplug_supported_mask & HDMIB_HOTPLUG_INT_STATUS)
1394 hotplug_en |= HDMIB_HOTPLUG_INT_EN;
1395 if (dev_priv->hotplug_supported_mask & HDMIC_HOTPLUG_INT_STATUS)
1396 hotplug_en |= HDMIC_HOTPLUG_INT_EN;
1397 if (dev_priv->hotplug_supported_mask & HDMID_HOTPLUG_INT_STATUS)
1398 hotplug_en |= HDMID_HOTPLUG_INT_EN;
1399 if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS)
1400 hotplug_en |= SDVOC_HOTPLUG_INT_EN;
1401 if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS)
1402 hotplug_en |= SDVOB_HOTPLUG_INT_EN;
1403 if (dev_priv->hotplug_supported_mask & CRT_HOTPLUG_INT_STATUS)
1404 hotplug_en |= CRT_HOTPLUG_INT_EN;
1405 /* Ignore TV since it's buggy */
1406
1407 I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
1408
1409 /* Enable in IER... */ 1398 /* Enable in IER... */
1410 enable_mask |= I915_DISPLAY_PORT_INTERRUPT; 1399 enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
1411 /* and unmask in IMR */ 1400 /* and unmask in IMR */
1412 i915_enable_irq(dev_priv, I915_DISPLAY_PORT_INTERRUPT); 1401 dev_priv->irq_mask_reg &= ~I915_DISPLAY_PORT_INTERRUPT;
1413 } 1402 }
1414 1403
1415 /* 1404 /*
@@ -1427,16 +1416,41 @@ int i915_driver_irq_postinstall(struct drm_device *dev)
1427 } 1416 }
1428 I915_WRITE(EMR, error_mask); 1417 I915_WRITE(EMR, error_mask);
1429 1418
1430 /* Disable pipe interrupt enables, clear pending pipe status */
1431 I915_WRITE(PIPEASTAT, I915_READ(PIPEASTAT) & 0x8000ffff);
1432 I915_WRITE(PIPEBSTAT, I915_READ(PIPEBSTAT) & 0x8000ffff);
1433 /* Clear pending interrupt status */
1434 I915_WRITE(IIR, I915_READ(IIR));
1435
1436 I915_WRITE(IER, enable_mask);
1437 I915_WRITE(IMR, dev_priv->irq_mask_reg); 1419 I915_WRITE(IMR, dev_priv->irq_mask_reg);
1420 I915_WRITE(IER, enable_mask);
1438 (void) I915_READ(IER); 1421 (void) I915_READ(IER);
1439 1422
1423 if (I915_HAS_HOTPLUG(dev)) {
1424 u32 hotplug_en = I915_READ(PORT_HOTPLUG_EN);
1425
1426 /* Note HDMI and DP share bits */
1427 if (dev_priv->hotplug_supported_mask & HDMIB_HOTPLUG_INT_STATUS)
1428 hotplug_en |= HDMIB_HOTPLUG_INT_EN;
1429 if (dev_priv->hotplug_supported_mask & HDMIC_HOTPLUG_INT_STATUS)
1430 hotplug_en |= HDMIC_HOTPLUG_INT_EN;
1431 if (dev_priv->hotplug_supported_mask & HDMID_HOTPLUG_INT_STATUS)
1432 hotplug_en |= HDMID_HOTPLUG_INT_EN;
1433 if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS)
1434 hotplug_en |= SDVOC_HOTPLUG_INT_EN;
1435 if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS)
1436 hotplug_en |= SDVOB_HOTPLUG_INT_EN;
1437 if (dev_priv->hotplug_supported_mask & CRT_HOTPLUG_INT_STATUS) {
1438 hotplug_en |= CRT_HOTPLUG_INT_EN;
1439
1440 /* Programming the CRT detection parameters tends
1441 to generate a spurious hotplug event about three
1442 seconds later. So just do it once.
1443 */
1444 if (IS_G4X(dev))
1445 hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
1446 hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
1447 }
1448
1449 /* Ignore TV since it's buggy */
1450
1451 I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
1452 }
1453
1440 opregion_enable_asle(dev); 1454 opregion_enable_asle(dev);
1441 1455
1442 return 0; 1456 return 0;
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 64b0a3afd92b..150400f40534 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -178,6 +178,7 @@
178#define MI_OVERLAY_OFF (0x2<<21) 178#define MI_OVERLAY_OFF (0x2<<21)
179#define MI_LOAD_SCAN_LINES_INCL MI_INSTR(0x12, 0) 179#define MI_LOAD_SCAN_LINES_INCL MI_INSTR(0x12, 0)
180#define MI_DISPLAY_FLIP MI_INSTR(0x14, 2) 180#define MI_DISPLAY_FLIP MI_INSTR(0x14, 2)
181#define MI_DISPLAY_FLIP_I915 MI_INSTR(0x14, 1)
181#define MI_DISPLAY_FLIP_PLANE(n) ((n) << 20) 182#define MI_DISPLAY_FLIP_PLANE(n) ((n) << 20)
182#define MI_STORE_DWORD_IMM MI_INSTR(0x20, 1) 183#define MI_STORE_DWORD_IMM MI_INSTR(0x20, 1)
183#define MI_MEM_VIRTUAL (1 << 22) /* 965+ only */ 184#define MI_MEM_VIRTUAL (1 << 22) /* 965+ only */
@@ -368,6 +369,9 @@
368#define CM0_RC_OP_FLUSH_DISABLE (1<<0) 369#define CM0_RC_OP_FLUSH_DISABLE (1<<0)
369#define BB_ADDR 0x02140 /* 8 bytes */ 370#define BB_ADDR 0x02140 /* 8 bytes */
370#define GFX_FLSH_CNTL 0x02170 /* 915+ only */ 371#define GFX_FLSH_CNTL 0x02170 /* 915+ only */
372#define ECOSKPD 0x021d0
373#define ECO_GATING_CX_ONLY (1<<3)
374#define ECO_FLIP_DONE (1<<0)
371 375
372/* GEN6 interrupt control */ 376/* GEN6 interrupt control */
373#define GEN6_RENDER_HWSTAM 0x2098 377#define GEN6_RENDER_HWSTAM 0x2098
@@ -1130,7 +1134,6 @@
1130#define CRT_HOTPLUG_DETECT_DELAY_2G (1 << 4) 1134#define CRT_HOTPLUG_DETECT_DELAY_2G (1 << 4)
1131#define CRT_HOTPLUG_DETECT_VOLTAGE_325MV (0 << 2) 1135#define CRT_HOTPLUG_DETECT_VOLTAGE_325MV (0 << 2)
1132#define CRT_HOTPLUG_DETECT_VOLTAGE_475MV (1 << 2) 1136#define CRT_HOTPLUG_DETECT_VOLTAGE_475MV (1 << 2)
1133#define CRT_HOTPLUG_MASK (0x3fc) /* Bits 9-2 */
1134 1137
1135#define PORT_HOTPLUG_STAT 0x61114 1138#define PORT_HOTPLUG_STAT 0x61114
1136#define HDMIB_HOTPLUG_INT_STATUS (1 << 29) 1139#define HDMIB_HOTPLUG_INT_STATUS (1 << 29)
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index 22ff38455731..ee0732b222a1 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -234,14 +234,8 @@ static bool intel_crt_detect_hotplug(struct drm_connector *connector)
234 else 234 else
235 tries = 1; 235 tries = 1;
236 hotplug_en = orig = I915_READ(PORT_HOTPLUG_EN); 236 hotplug_en = orig = I915_READ(PORT_HOTPLUG_EN);
237 hotplug_en &= CRT_HOTPLUG_MASK;
238 hotplug_en |= CRT_HOTPLUG_FORCE_DETECT; 237 hotplug_en |= CRT_HOTPLUG_FORCE_DETECT;
239 238
240 if (IS_G4X(dev))
241 hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
242
243 hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
244
245 for (i = 0; i < tries ; i++) { 239 for (i = 0; i < tries ; i++) {
246 unsigned long timeout; 240 unsigned long timeout;
247 /* turn on the FORCE_DETECT */ 241 /* turn on the FORCE_DETECT */
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index cc8131ff319f..68dcf36e2793 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -2970,11 +2970,13 @@ static void i965_update_wm(struct drm_device *dev, int planea_clock,
2970 if (srwm < 0) 2970 if (srwm < 0)
2971 srwm = 1; 2971 srwm = 1;
2972 srwm &= 0x3f; 2972 srwm &= 0x3f;
2973 I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN); 2973 if (IS_I965GM(dev))
2974 I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
2974 } else { 2975 } else {
2975 /* Turn off self refresh if both pipes are enabled */ 2976 /* Turn off self refresh if both pipes are enabled */
2976 I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF) 2977 if (IS_I965GM(dev))
2977 & ~FW_BLC_SELF_EN); 2978 I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF)
2979 & ~FW_BLC_SELF_EN);
2978 } 2980 }
2979 2981
2980 DRM_DEBUG_KMS("Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n", 2982 DRM_DEBUG_KMS("Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n",
@@ -4483,6 +4485,7 @@ static void intel_idle_update(struct work_struct *work)
4483 struct drm_device *dev = dev_priv->dev; 4485 struct drm_device *dev = dev_priv->dev;
4484 struct drm_crtc *crtc; 4486 struct drm_crtc *crtc;
4485 struct intel_crtc *intel_crtc; 4487 struct intel_crtc *intel_crtc;
4488 int enabled = 0;
4486 4489
4487 if (!i915_powersave) 4490 if (!i915_powersave)
4488 return; 4491 return;
@@ -4491,21 +4494,22 @@ static void intel_idle_update(struct work_struct *work)
4491 4494
4492 i915_update_gfx_val(dev_priv); 4495 i915_update_gfx_val(dev_priv);
4493 4496
4494 if (IS_I945G(dev) || IS_I945GM(dev)) {
4495 DRM_DEBUG_DRIVER("enable memory self refresh on 945\n");
4496 I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN_MASK | FW_BLC_SELF_EN);
4497 }
4498
4499 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 4497 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
4500 /* Skip inactive CRTCs */ 4498 /* Skip inactive CRTCs */
4501 if (!crtc->fb) 4499 if (!crtc->fb)
4502 continue; 4500 continue;
4503 4501
4502 enabled++;
4504 intel_crtc = to_intel_crtc(crtc); 4503 intel_crtc = to_intel_crtc(crtc);
4505 if (!intel_crtc->busy) 4504 if (!intel_crtc->busy)
4506 intel_decrease_pllclock(crtc); 4505 intel_decrease_pllclock(crtc);
4507 } 4506 }
4508 4507
4508 if ((enabled == 1) && (IS_I945G(dev) || IS_I945GM(dev))) {
4509 DRM_DEBUG_DRIVER("enable memory self refresh on 945\n");
4510 I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN_MASK | FW_BLC_SELF_EN);
4511 }
4512
4509 mutex_unlock(&dev->struct_mutex); 4513 mutex_unlock(&dev->struct_mutex);
4510} 4514}
4511 4515
@@ -4601,10 +4605,10 @@ static void intel_unpin_work_fn(struct work_struct *__work)
4601 kfree(work); 4605 kfree(work);
4602} 4606}
4603 4607
4604void intel_finish_page_flip(struct drm_device *dev, int pipe) 4608static void do_intel_finish_page_flip(struct drm_device *dev,
4609 struct drm_crtc *crtc)
4605{ 4610{
4606 drm_i915_private_t *dev_priv = dev->dev_private; 4611 drm_i915_private_t *dev_priv = dev->dev_private;
4607 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
4608 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4612 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4609 struct intel_unpin_work *work; 4613 struct intel_unpin_work *work;
4610 struct drm_i915_gem_object *obj_priv; 4614 struct drm_i915_gem_object *obj_priv;
@@ -4648,6 +4652,22 @@ void intel_finish_page_flip(struct drm_device *dev, int pipe)
4648 schedule_work(&work->work); 4652 schedule_work(&work->work);
4649} 4653}
4650 4654
4655void intel_finish_page_flip(struct drm_device *dev, int pipe)
4656{
4657 drm_i915_private_t *dev_priv = dev->dev_private;
4658 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
4659
4660 do_intel_finish_page_flip(dev, crtc);
4661}
4662
4663void intel_finish_page_flip_plane(struct drm_device *dev, int plane)
4664{
4665 drm_i915_private_t *dev_priv = dev->dev_private;
4666 struct drm_crtc *crtc = dev_priv->plane_to_crtc_mapping[plane];
4667
4668 do_intel_finish_page_flip(dev, crtc);
4669}
4670
4651void intel_prepare_page_flip(struct drm_device *dev, int plane) 4671void intel_prepare_page_flip(struct drm_device *dev, int plane)
4652{ 4672{
4653 drm_i915_private_t *dev_priv = dev->dev_private; 4673 drm_i915_private_t *dev_priv = dev->dev_private;
@@ -4678,6 +4698,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
4678 unsigned long flags; 4698 unsigned long flags;
4679 int pipesrc_reg = (intel_crtc->pipe == 0) ? PIPEASRC : PIPEBSRC; 4699 int pipesrc_reg = (intel_crtc->pipe == 0) ? PIPEASRC : PIPEBSRC;
4680 int ret, pipesrc; 4700 int ret, pipesrc;
4701 u32 flip_mask;
4681 4702
4682 work = kzalloc(sizeof *work, GFP_KERNEL); 4703 work = kzalloc(sizeof *work, GFP_KERNEL);
4683 if (work == NULL) 4704 if (work == NULL)
@@ -4731,15 +4752,28 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
4731 atomic_inc(&obj_priv->pending_flip); 4752 atomic_inc(&obj_priv->pending_flip);
4732 work->pending_flip_obj = obj; 4753 work->pending_flip_obj = obj;
4733 4754
4755 if (intel_crtc->plane)
4756 flip_mask = I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
4757 else
4758 flip_mask = I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT;
4759
4760 /* Wait for any previous flip to finish */
4761 if (IS_GEN3(dev))
4762 while (I915_READ(ISR) & flip_mask)
4763 ;
4764
4734 BEGIN_LP_RING(4); 4765 BEGIN_LP_RING(4);
4735 OUT_RING(MI_DISPLAY_FLIP |
4736 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
4737 OUT_RING(fb->pitch);
4738 if (IS_I965G(dev)) { 4766 if (IS_I965G(dev)) {
4767 OUT_RING(MI_DISPLAY_FLIP |
4768 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
4769 OUT_RING(fb->pitch);
4739 OUT_RING(obj_priv->gtt_offset | obj_priv->tiling_mode); 4770 OUT_RING(obj_priv->gtt_offset | obj_priv->tiling_mode);
4740 pipesrc = I915_READ(pipesrc_reg); 4771 pipesrc = I915_READ(pipesrc_reg);
4741 OUT_RING(pipesrc & 0x0fff0fff); 4772 OUT_RING(pipesrc & 0x0fff0fff);
4742 } else { 4773 } else {
4774 OUT_RING(MI_DISPLAY_FLIP_I915 |
4775 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
4776 OUT_RING(fb->pitch);
4743 OUT_RING(obj_priv->gtt_offset); 4777 OUT_RING(obj_priv->gtt_offset);
4744 OUT_RING(MI_NOOP); 4778 OUT_RING(MI_NOOP);
4745 } 4779 }
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 49b54f05d3cf..1aac59e83bff 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -136,6 +136,12 @@ intel_dp_link_required(struct drm_device *dev,
136} 136}
137 137
138static int 138static int
139intel_dp_max_data_rate(int max_link_clock, int max_lanes)
140{
141 return (max_link_clock * max_lanes * 8) / 10;
142}
143
144static int
139intel_dp_mode_valid(struct drm_connector *connector, 145intel_dp_mode_valid(struct drm_connector *connector,
140 struct drm_display_mode *mode) 146 struct drm_display_mode *mode)
141{ 147{
@@ -144,8 +150,11 @@ intel_dp_mode_valid(struct drm_connector *connector,
144 int max_link_clock = intel_dp_link_clock(intel_dp_max_link_bw(intel_encoder)); 150 int max_link_clock = intel_dp_link_clock(intel_dp_max_link_bw(intel_encoder));
145 int max_lanes = intel_dp_max_lane_count(intel_encoder); 151 int max_lanes = intel_dp_max_lane_count(intel_encoder);
146 152
147 if (intel_dp_link_required(connector->dev, intel_encoder, mode->clock) 153 /* only refuse the mode on non eDP since we have seen some wierd eDP panels
148 > max_link_clock * max_lanes) 154 which are outside spec tolerances but somehow work by magic */
155 if (!IS_eDP(intel_encoder) &&
156 (intel_dp_link_required(connector->dev, intel_encoder, mode->clock)
157 > intel_dp_max_data_rate(max_link_clock, max_lanes)))
149 return MODE_CLOCK_HIGH; 158 return MODE_CLOCK_HIGH;
150 159
151 if (mode->clock < 10000) 160 if (mode->clock < 10000)
@@ -506,7 +515,7 @@ intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
506 515
507 for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) { 516 for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) {
508 for (clock = 0; clock <= max_clock; clock++) { 517 for (clock = 0; clock <= max_clock; clock++) {
509 int link_avail = intel_dp_link_clock(bws[clock]) * lane_count; 518 int link_avail = intel_dp_max_data_rate(intel_dp_link_clock(bws[clock]), lane_count);
510 519
511 if (intel_dp_link_required(encoder->dev, intel_encoder, mode->clock) 520 if (intel_dp_link_required(encoder->dev, intel_encoder, mode->clock)
512 <= link_avail) { 521 <= link_avail) {
@@ -521,6 +530,18 @@ intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
521 } 530 }
522 } 531 }
523 } 532 }
533
534 if (IS_eDP(intel_encoder)) {
535 /* okay we failed just pick the highest */
536 dp_priv->lane_count = max_lane_count;
537 dp_priv->link_bw = bws[max_clock];
538 adjusted_mode->clock = intel_dp_link_clock(dp_priv->link_bw);
539 DRM_DEBUG_KMS("Force picking display port link bw %02x lane "
540 "count %d clock %d\n",
541 dp_priv->link_bw, dp_priv->lane_count,
542 adjusted_mode->clock);
543 return true;
544 }
524 return false; 545 return false;
525} 546}
526 547
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index df931f787665..72206f37c4fb 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -224,6 +224,7 @@ extern void intel_fbdev_fini(struct drm_device *dev);
224 224
225extern void intel_prepare_page_flip(struct drm_device *dev, int plane); 225extern void intel_prepare_page_flip(struct drm_device *dev, int plane);
226extern void intel_finish_page_flip(struct drm_device *dev, int pipe); 226extern void intel_finish_page_flip(struct drm_device *dev, int pipe);
227extern void intel_finish_page_flip_plane(struct drm_device *dev, int plane);
227 228
228extern void intel_setup_overlay(struct drm_device *dev); 229extern void intel_setup_overlay(struct drm_device *dev);
229extern void intel_cleanup_overlay(struct drm_device *dev); 230extern void intel_cleanup_overlay(struct drm_device *dev);
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index 6a1accd83aec..31df55f0a0a7 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -983,8 +983,8 @@ void intel_lvds_init(struct drm_device *dev)
983 983
984 drm_connector_attach_property(&intel_connector->base, 984 drm_connector_attach_property(&intel_connector->base,
985 dev->mode_config.scaling_mode_property, 985 dev->mode_config.scaling_mode_property,
986 DRM_MODE_SCALE_FULLSCREEN); 986 DRM_MODE_SCALE_ASPECT);
987 lvds_priv->fitting_mode = DRM_MODE_SCALE_FULLSCREEN; 987 lvds_priv->fitting_mode = DRM_MODE_SCALE_ASPECT;
988 /* 988 /*
989 * LVDS discovery: 989 * LVDS discovery:
990 * 1) check for EDID on DDC 990 * 1) check for EDID on DDC
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index cea4f1a8709e..26362f8495a8 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -94,7 +94,7 @@ render_ring_flush(struct drm_device *dev,
94#if WATCH_EXEC 94#if WATCH_EXEC
95 DRM_INFO("%s: queue flush %08x to ring\n", __func__, cmd); 95 DRM_INFO("%s: queue flush %08x to ring\n", __func__, cmd);
96#endif 96#endif
97 intel_ring_begin(dev, ring, 8); 97 intel_ring_begin(dev, ring, 2);
98 intel_ring_emit(dev, ring, cmd); 98 intel_ring_emit(dev, ring, cmd);
99 intel_ring_emit(dev, ring, MI_NOOP); 99 intel_ring_emit(dev, ring, MI_NOOP);
100 intel_ring_advance(dev, ring); 100 intel_ring_advance(dev, ring);
@@ -358,7 +358,7 @@ bsd_ring_flush(struct drm_device *dev,
358 u32 invalidate_domains, 358 u32 invalidate_domains,
359 u32 flush_domains) 359 u32 flush_domains)
360{ 360{
361 intel_ring_begin(dev, ring, 8); 361 intel_ring_begin(dev, ring, 2);
362 intel_ring_emit(dev, ring, MI_FLUSH); 362 intel_ring_emit(dev, ring, MI_FLUSH);
363 intel_ring_emit(dev, ring, MI_NOOP); 363 intel_ring_emit(dev, ring, MI_NOOP);
364 intel_ring_advance(dev, ring); 364 intel_ring_advance(dev, ring);
@@ -687,6 +687,7 @@ int intel_wrap_ring_buffer(struct drm_device *dev,
687 *virt++ = MI_NOOP; 687 *virt++ = MI_NOOP;
688 688
689 ring->tail = 0; 689 ring->tail = 0;
690 ring->space = ring->head - 8;
690 691
691 return 0; 692 return 0;
692} 693}
@@ -721,8 +722,9 @@ int intel_wait_ring_buffer(struct drm_device *dev,
721} 722}
722 723
723void intel_ring_begin(struct drm_device *dev, 724void intel_ring_begin(struct drm_device *dev,
724 struct intel_ring_buffer *ring, int n) 725 struct intel_ring_buffer *ring, int num_dwords)
725{ 726{
727 int n = 4*num_dwords;
726 if (unlikely(ring->tail + n > ring->size)) 728 if (unlikely(ring->tail + n > ring->size))
727 intel_wrap_ring_buffer(dev, ring); 729 intel_wrap_ring_buffer(dev, ring);
728 if (unlikely(ring->space < n)) 730 if (unlikely(ring->space < n))
@@ -752,7 +754,7 @@ void intel_fill_struct(struct drm_device *dev,
752{ 754{
753 unsigned int *virt = ring->virtual_start + ring->tail; 755 unsigned int *virt = ring->virtual_start + ring->tail;
754 BUG_ON((len&~(4-1)) != 0); 756 BUG_ON((len&~(4-1)) != 0);
755 intel_ring_begin(dev, ring, len); 757 intel_ring_begin(dev, ring, len/4);
756 memcpy(virt, data, len); 758 memcpy(virt, data, len);
757 ring->tail += len; 759 ring->tail += len;
758 ring->tail &= ring->size - 1; 760 ring->tail &= ring->size - 1;
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index f3f2827017ef..8c2d6478a221 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -498,7 +498,7 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
498 if ((rdev->family == CHIP_RS600) || 498 if ((rdev->family == CHIP_RS600) ||
499 (rdev->family == CHIP_RS690) || 499 (rdev->family == CHIP_RS690) ||
500 (rdev->family == CHIP_RS740)) 500 (rdev->family == CHIP_RS740))
501 pll->flags |= (RADEON_PLL_USE_FRAC_FB_DIV | 501 pll->flags |= (/*RADEON_PLL_USE_FRAC_FB_DIV |*/
502 RADEON_PLL_PREFER_CLOSEST_LOWER); 502 RADEON_PLL_PREFER_CLOSEST_LOWER);
503 503
504 if (ASIC_IS_DCE32(rdev) && mode->clock > 200000) /* range limits??? */ 504 if (ASIC_IS_DCE32(rdev) && mode->clock > 200000) /* range limits??? */
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index 4b6623df3b96..1caf625e472b 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -607,7 +607,7 @@ static void evergreen_mc_program(struct radeon_device *rdev)
607 WREG32(MC_VM_FB_LOCATION, tmp); 607 WREG32(MC_VM_FB_LOCATION, tmp);
608 WREG32(HDP_NONSURFACE_BASE, (rdev->mc.vram_start >> 8)); 608 WREG32(HDP_NONSURFACE_BASE, (rdev->mc.vram_start >> 8));
609 WREG32(HDP_NONSURFACE_INFO, (2 << 7)); 609 WREG32(HDP_NONSURFACE_INFO, (2 << 7));
610 WREG32(HDP_NONSURFACE_SIZE, (rdev->mc.mc_vram_size - 1) | 0x3FF); 610 WREG32(HDP_NONSURFACE_SIZE, 0x3FFFFFFF);
611 if (rdev->flags & RADEON_IS_AGP) { 611 if (rdev->flags & RADEON_IS_AGP) {
612 WREG32(MC_VM_AGP_TOP, rdev->mc.gtt_end >> 16); 612 WREG32(MC_VM_AGP_TOP, rdev->mc.gtt_end >> 16);
613 WREG32(MC_VM_AGP_BOT, rdev->mc.gtt_start >> 16); 613 WREG32(MC_VM_AGP_BOT, rdev->mc.gtt_start >> 16);
@@ -1222,11 +1222,11 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
1222 ps_thread_count = 128; 1222 ps_thread_count = 128;
1223 1223
1224 sq_thread_resource_mgmt = NUM_PS_THREADS(ps_thread_count); 1224 sq_thread_resource_mgmt = NUM_PS_THREADS(ps_thread_count);
1225 sq_thread_resource_mgmt |= NUM_VS_THREADS(((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8; 1225 sq_thread_resource_mgmt |= NUM_VS_THREADS((((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8);
1226 sq_thread_resource_mgmt |= NUM_GS_THREADS(((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8; 1226 sq_thread_resource_mgmt |= NUM_GS_THREADS((((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8);
1227 sq_thread_resource_mgmt |= NUM_ES_THREADS(((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8; 1227 sq_thread_resource_mgmt |= NUM_ES_THREADS((((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8);
1228 sq_thread_resource_mgmt_2 = NUM_HS_THREADS(((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8; 1228 sq_thread_resource_mgmt_2 = NUM_HS_THREADS((((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8);
1229 sq_thread_resource_mgmt_2 |= NUM_LS_THREADS(((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8; 1229 sq_thread_resource_mgmt_2 |= NUM_LS_THREADS((((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8);
1230 1230
1231 sq_stack_resource_mgmt_1 = NUM_PS_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6); 1231 sq_stack_resource_mgmt_1 = NUM_PS_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6);
1232 sq_stack_resource_mgmt_1 |= NUM_VS_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6); 1232 sq_stack_resource_mgmt_1 |= NUM_VS_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6);
@@ -1260,6 +1260,9 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
1260 WREG32(VGT_GS_VERTEX_REUSE, 16); 1260 WREG32(VGT_GS_VERTEX_REUSE, 16);
1261 WREG32(PA_SC_LINE_STIPPLE_STATE, 0); 1261 WREG32(PA_SC_LINE_STIPPLE_STATE, 0);
1262 1262
1263 WREG32(VGT_VERTEX_REUSE_BLOCK_CNTL, 14);
1264 WREG32(VGT_OUT_DEALLOC_CNTL, 16);
1265
1263 WREG32(CB_PERF_CTR0_SEL_0, 0); 1266 WREG32(CB_PERF_CTR0_SEL_0, 0);
1264 WREG32(CB_PERF_CTR0_SEL_1, 0); 1267 WREG32(CB_PERF_CTR0_SEL_1, 0);
1265 WREG32(CB_PERF_CTR1_SEL_0, 0); 1268 WREG32(CB_PERF_CTR1_SEL_0, 0);
@@ -1269,6 +1272,26 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
1269 WREG32(CB_PERF_CTR3_SEL_0, 0); 1272 WREG32(CB_PERF_CTR3_SEL_0, 0);
1270 WREG32(CB_PERF_CTR3_SEL_1, 0); 1273 WREG32(CB_PERF_CTR3_SEL_1, 0);
1271 1274
1275 /* clear render buffer base addresses */
1276 WREG32(CB_COLOR0_BASE, 0);
1277 WREG32(CB_COLOR1_BASE, 0);
1278 WREG32(CB_COLOR2_BASE, 0);
1279 WREG32(CB_COLOR3_BASE, 0);
1280 WREG32(CB_COLOR4_BASE, 0);
1281 WREG32(CB_COLOR5_BASE, 0);
1282 WREG32(CB_COLOR6_BASE, 0);
1283 WREG32(CB_COLOR7_BASE, 0);
1284 WREG32(CB_COLOR8_BASE, 0);
1285 WREG32(CB_COLOR9_BASE, 0);
1286 WREG32(CB_COLOR10_BASE, 0);
1287 WREG32(CB_COLOR11_BASE, 0);
1288
1289 /* set the shader const cache sizes to 0 */
1290 for (i = SQ_ALU_CONST_BUFFER_SIZE_PS_0; i < 0x28200; i += 4)
1291 WREG32(i, 0);
1292 for (i = SQ_ALU_CONST_BUFFER_SIZE_HS_0; i < 0x29000; i += 4)
1293 WREG32(i, 0);
1294
1272 hdp_host_path_cntl = RREG32(HDP_HOST_PATH_CNTL); 1295 hdp_host_path_cntl = RREG32(HDP_HOST_PATH_CNTL);
1273 WREG32(HDP_HOST_PATH_CNTL, hdp_host_path_cntl); 1296 WREG32(HDP_HOST_PATH_CNTL, hdp_host_path_cntl);
1274 1297
diff --git a/drivers/gpu/drm/radeon/evergreen_cs.c b/drivers/gpu/drm/radeon/evergreen_cs.c
index 64516b950891..010963d4570f 100644
--- a/drivers/gpu/drm/radeon/evergreen_cs.c
+++ b/drivers/gpu/drm/radeon/evergreen_cs.c
@@ -1197,7 +1197,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
1197 DRM_ERROR("bad SET_RESOURCE (tex)\n"); 1197 DRM_ERROR("bad SET_RESOURCE (tex)\n");
1198 return -EINVAL; 1198 return -EINVAL;
1199 } 1199 }
1200 ib[idx+1+(i*8)+3] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); 1200 ib[idx+1+(i*8)+2] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1201 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) 1201 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
1202 ib[idx+1+(i*8)+1] |= TEX_ARRAY_MODE(ARRAY_2D_TILED_THIN1); 1202 ib[idx+1+(i*8)+1] |= TEX_ARRAY_MODE(ARRAY_2D_TILED_THIN1);
1203 else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) 1203 else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
@@ -1209,7 +1209,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
1209 DRM_ERROR("bad SET_RESOURCE (tex)\n"); 1209 DRM_ERROR("bad SET_RESOURCE (tex)\n");
1210 return -EINVAL; 1210 return -EINVAL;
1211 } 1211 }
1212 ib[idx+1+(i*8)+4] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); 1212 ib[idx+1+(i*8)+3] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1213 mipmap = reloc->robj; 1213 mipmap = reloc->robj;
1214 r = evergreen_check_texture_resource(p, idx+1+(i*8), 1214 r = evergreen_check_texture_resource(p, idx+1+(i*8),
1215 texture, mipmap); 1215 texture, mipmap);
diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h
index 79683f6b4452..a1cd621780e2 100644
--- a/drivers/gpu/drm/radeon/evergreend.h
+++ b/drivers/gpu/drm/radeon/evergreend.h
@@ -713,6 +713,9 @@
713#define SQ_GSVS_RING_OFFSET_2 0x28930 713#define SQ_GSVS_RING_OFFSET_2 0x28930
714#define SQ_GSVS_RING_OFFSET_3 0x28934 714#define SQ_GSVS_RING_OFFSET_3 0x28934
715 715
716#define SQ_ALU_CONST_BUFFER_SIZE_PS_0 0x28140
717#define SQ_ALU_CONST_BUFFER_SIZE_HS_0 0x28f80
718
716#define SQ_ALU_CONST_CACHE_PS_0 0x28940 719#define SQ_ALU_CONST_CACHE_PS_0 0x28940
717#define SQ_ALU_CONST_CACHE_PS_1 0x28944 720#define SQ_ALU_CONST_CACHE_PS_1 0x28944
718#define SQ_ALU_CONST_CACHE_PS_2 0x28948 721#define SQ_ALU_CONST_CACHE_PS_2 0x28948
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index cf89aa2eb28c..3970e62eaab8 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -1628,6 +1628,7 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
1628 case RADEON_TXFORMAT_RGB332: 1628 case RADEON_TXFORMAT_RGB332:
1629 case RADEON_TXFORMAT_Y8: 1629 case RADEON_TXFORMAT_Y8:
1630 track->textures[i].cpp = 1; 1630 track->textures[i].cpp = 1;
1631 track->textures[i].compress_format = R100_TRACK_COMP_NONE;
1631 break; 1632 break;
1632 case RADEON_TXFORMAT_AI88: 1633 case RADEON_TXFORMAT_AI88:
1633 case RADEON_TXFORMAT_ARGB1555: 1634 case RADEON_TXFORMAT_ARGB1555:
@@ -1639,12 +1640,14 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
1639 case RADEON_TXFORMAT_LDUDV655: 1640 case RADEON_TXFORMAT_LDUDV655:
1640 case RADEON_TXFORMAT_DUDV88: 1641 case RADEON_TXFORMAT_DUDV88:
1641 track->textures[i].cpp = 2; 1642 track->textures[i].cpp = 2;
1643 track->textures[i].compress_format = R100_TRACK_COMP_NONE;
1642 break; 1644 break;
1643 case RADEON_TXFORMAT_ARGB8888: 1645 case RADEON_TXFORMAT_ARGB8888:
1644 case RADEON_TXFORMAT_RGBA8888: 1646 case RADEON_TXFORMAT_RGBA8888:
1645 case RADEON_TXFORMAT_SHADOW32: 1647 case RADEON_TXFORMAT_SHADOW32:
1646 case RADEON_TXFORMAT_LDUDUV8888: 1648 case RADEON_TXFORMAT_LDUDUV8888:
1647 track->textures[i].cpp = 4; 1649 track->textures[i].cpp = 4;
1650 track->textures[i].compress_format = R100_TRACK_COMP_NONE;
1648 break; 1651 break;
1649 case RADEON_TXFORMAT_DXT1: 1652 case RADEON_TXFORMAT_DXT1:
1650 track->textures[i].cpp = 1; 1653 track->textures[i].cpp = 1;
@@ -2604,12 +2607,6 @@ int r100_set_surface_reg(struct radeon_device *rdev, int reg,
2604 int surf_index = reg * 16; 2607 int surf_index = reg * 16;
2605 int flags = 0; 2608 int flags = 0;
2606 2609
2607 /* r100/r200 divide by 16 */
2608 if (rdev->family < CHIP_R300)
2609 flags = pitch / 16;
2610 else
2611 flags = pitch / 8;
2612
2613 if (rdev->family <= CHIP_RS200) { 2610 if (rdev->family <= CHIP_RS200) {
2614 if ((tiling_flags & (RADEON_TILING_MACRO|RADEON_TILING_MICRO)) 2611 if ((tiling_flags & (RADEON_TILING_MACRO|RADEON_TILING_MICRO))
2615 == (RADEON_TILING_MACRO|RADEON_TILING_MICRO)) 2612 == (RADEON_TILING_MACRO|RADEON_TILING_MICRO))
@@ -2633,6 +2630,20 @@ int r100_set_surface_reg(struct radeon_device *rdev, int reg,
2633 if (tiling_flags & RADEON_TILING_SWAP_32BIT) 2630 if (tiling_flags & RADEON_TILING_SWAP_32BIT)
2634 flags |= RADEON_SURF_AP0_SWP_32BPP | RADEON_SURF_AP1_SWP_32BPP; 2631 flags |= RADEON_SURF_AP0_SWP_32BPP | RADEON_SURF_AP1_SWP_32BPP;
2635 2632
2633 /* when we aren't tiling the pitch seems to needs to be furtherdivided down. - tested on power5 + rn50 server */
2634 if (tiling_flags & (RADEON_TILING_SWAP_16BIT | RADEON_TILING_SWAP_32BIT)) {
2635 if (!(tiling_flags & (RADEON_TILING_MACRO | RADEON_TILING_MICRO)))
2636 if (ASIC_IS_RN50(rdev))
2637 pitch /= 16;
2638 }
2639
2640 /* r100/r200 divide by 16 */
2641 if (rdev->family < CHIP_R300)
2642 flags |= pitch / 16;
2643 else
2644 flags |= pitch / 8;
2645
2646
2636 DRM_DEBUG("writing surface %d %d %x %x\n", reg, flags, offset, offset+obj_size-1); 2647 DRM_DEBUG("writing surface %d %d %x %x\n", reg, flags, offset, offset+obj_size-1);
2637 WREG32(RADEON_SURFACE0_INFO + surf_index, flags); 2648 WREG32(RADEON_SURFACE0_INFO + surf_index, flags);
2638 WREG32(RADEON_SURFACE0_LOWER_BOUND + surf_index, offset); 2649 WREG32(RADEON_SURFACE0_LOWER_BOUND + surf_index, offset);
@@ -3147,33 +3158,6 @@ static inline void r100_cs_track_texture_print(struct r100_cs_track_texture *t)
3147 DRM_ERROR("compress format %d\n", t->compress_format); 3158 DRM_ERROR("compress format %d\n", t->compress_format);
3148} 3159}
3149 3160
3150static int r100_cs_track_cube(struct radeon_device *rdev,
3151 struct r100_cs_track *track, unsigned idx)
3152{
3153 unsigned face, w, h;
3154 struct radeon_bo *cube_robj;
3155 unsigned long size;
3156
3157 for (face = 0; face < 5; face++) {
3158 cube_robj = track->textures[idx].cube_info[face].robj;
3159 w = track->textures[idx].cube_info[face].width;
3160 h = track->textures[idx].cube_info[face].height;
3161
3162 size = w * h;
3163 size *= track->textures[idx].cpp;
3164
3165 size += track->textures[idx].cube_info[face].offset;
3166
3167 if (size > radeon_bo_size(cube_robj)) {
3168 DRM_ERROR("Cube texture offset greater than object size %lu %lu\n",
3169 size, radeon_bo_size(cube_robj));
3170 r100_cs_track_texture_print(&track->textures[idx]);
3171 return -1;
3172 }
3173 }
3174 return 0;
3175}
3176
3177static int r100_track_compress_size(int compress_format, int w, int h) 3161static int r100_track_compress_size(int compress_format, int w, int h)
3178{ 3162{
3179 int block_width, block_height, block_bytes; 3163 int block_width, block_height, block_bytes;
@@ -3204,6 +3188,37 @@ static int r100_track_compress_size(int compress_format, int w, int h)
3204 return sz; 3188 return sz;
3205} 3189}
3206 3190
3191static int r100_cs_track_cube(struct radeon_device *rdev,
3192 struct r100_cs_track *track, unsigned idx)
3193{
3194 unsigned face, w, h;
3195 struct radeon_bo *cube_robj;
3196 unsigned long size;
3197 unsigned compress_format = track->textures[idx].compress_format;
3198
3199 for (face = 0; face < 5; face++) {
3200 cube_robj = track->textures[idx].cube_info[face].robj;
3201 w = track->textures[idx].cube_info[face].width;
3202 h = track->textures[idx].cube_info[face].height;
3203
3204 if (compress_format) {
3205 size = r100_track_compress_size(compress_format, w, h);
3206 } else
3207 size = w * h;
3208 size *= track->textures[idx].cpp;
3209
3210 size += track->textures[idx].cube_info[face].offset;
3211
3212 if (size > radeon_bo_size(cube_robj)) {
3213 DRM_ERROR("Cube texture offset greater than object size %lu %lu\n",
3214 size, radeon_bo_size(cube_robj));
3215 r100_cs_track_texture_print(&track->textures[idx]);
3216 return -1;
3217 }
3218 }
3219 return 0;
3220}
3221
3207static int r100_cs_track_texture_check(struct radeon_device *rdev, 3222static int r100_cs_track_texture_check(struct radeon_device *rdev,
3208 struct r100_cs_track *track) 3223 struct r100_cs_track *track)
3209{ 3224{
diff --git a/drivers/gpu/drm/radeon/r200.c b/drivers/gpu/drm/radeon/r200.c
index 85617c311212..0266d72e0a4c 100644
--- a/drivers/gpu/drm/radeon/r200.c
+++ b/drivers/gpu/drm/radeon/r200.c
@@ -415,6 +415,8 @@ int r200_packet0_check(struct radeon_cs_parser *p,
415 /* 2D, 3D, CUBE */ 415 /* 2D, 3D, CUBE */
416 switch (tmp) { 416 switch (tmp) {
417 case 0: 417 case 0:
418 case 3:
419 case 4:
418 case 5: 420 case 5:
419 case 6: 421 case 6:
420 case 7: 422 case 7:
@@ -450,6 +452,7 @@ int r200_packet0_check(struct radeon_cs_parser *p,
450 case R200_TXFORMAT_RGB332: 452 case R200_TXFORMAT_RGB332:
451 case R200_TXFORMAT_Y8: 453 case R200_TXFORMAT_Y8:
452 track->textures[i].cpp = 1; 454 track->textures[i].cpp = 1;
455 track->textures[i].compress_format = R100_TRACK_COMP_NONE;
453 break; 456 break;
454 case R200_TXFORMAT_AI88: 457 case R200_TXFORMAT_AI88:
455 case R200_TXFORMAT_ARGB1555: 458 case R200_TXFORMAT_ARGB1555:
@@ -461,6 +464,7 @@ int r200_packet0_check(struct radeon_cs_parser *p,
461 case R200_TXFORMAT_DVDU88: 464 case R200_TXFORMAT_DVDU88:
462 case R200_TXFORMAT_AVYU4444: 465 case R200_TXFORMAT_AVYU4444:
463 track->textures[i].cpp = 2; 466 track->textures[i].cpp = 2;
467 track->textures[i].compress_format = R100_TRACK_COMP_NONE;
464 break; 468 break;
465 case R200_TXFORMAT_ARGB8888: 469 case R200_TXFORMAT_ARGB8888:
466 case R200_TXFORMAT_RGBA8888: 470 case R200_TXFORMAT_RGBA8888:
@@ -468,6 +472,7 @@ int r200_packet0_check(struct radeon_cs_parser *p,
468 case R200_TXFORMAT_BGR111110: 472 case R200_TXFORMAT_BGR111110:
469 case R200_TXFORMAT_LDVDU8888: 473 case R200_TXFORMAT_LDVDU8888:
470 track->textures[i].cpp = 4; 474 track->textures[i].cpp = 4;
475 track->textures[i].compress_format = R100_TRACK_COMP_NONE;
471 break; 476 break;
472 case R200_TXFORMAT_DXT1: 477 case R200_TXFORMAT_DXT1:
473 track->textures[i].cpp = 1; 478 track->textures[i].cpp = 1;
diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c
index b2f9efe2897c..7e81db5eb804 100644
--- a/drivers/gpu/drm/radeon/r300.c
+++ b/drivers/gpu/drm/radeon/r300.c
@@ -881,6 +881,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
881 case R300_TX_FORMAT_Y4X4: 881 case R300_TX_FORMAT_Y4X4:
882 case R300_TX_FORMAT_Z3Y3X2: 882 case R300_TX_FORMAT_Z3Y3X2:
883 track->textures[i].cpp = 1; 883 track->textures[i].cpp = 1;
884 track->textures[i].compress_format = R100_TRACK_COMP_NONE;
884 break; 885 break;
885 case R300_TX_FORMAT_X16: 886 case R300_TX_FORMAT_X16:
886 case R300_TX_FORMAT_Y8X8: 887 case R300_TX_FORMAT_Y8X8:
@@ -892,6 +893,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
892 case R300_TX_FORMAT_B8G8_B8G8: 893 case R300_TX_FORMAT_B8G8_B8G8:
893 case R300_TX_FORMAT_G8R8_G8B8: 894 case R300_TX_FORMAT_G8R8_G8B8:
894 track->textures[i].cpp = 2; 895 track->textures[i].cpp = 2;
896 track->textures[i].compress_format = R100_TRACK_COMP_NONE;
895 break; 897 break;
896 case R300_TX_FORMAT_Y16X16: 898 case R300_TX_FORMAT_Y16X16:
897 case R300_TX_FORMAT_Z11Y11X10: 899 case R300_TX_FORMAT_Z11Y11X10:
@@ -902,14 +904,17 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
902 case R300_TX_FORMAT_FL_I32: 904 case R300_TX_FORMAT_FL_I32:
903 case 0x1e: 905 case 0x1e:
904 track->textures[i].cpp = 4; 906 track->textures[i].cpp = 4;
907 track->textures[i].compress_format = R100_TRACK_COMP_NONE;
905 break; 908 break;
906 case R300_TX_FORMAT_W16Z16Y16X16: 909 case R300_TX_FORMAT_W16Z16Y16X16:
907 case R300_TX_FORMAT_FL_R16G16B16A16: 910 case R300_TX_FORMAT_FL_R16G16B16A16:
908 case R300_TX_FORMAT_FL_I32A32: 911 case R300_TX_FORMAT_FL_I32A32:
909 track->textures[i].cpp = 8; 912 track->textures[i].cpp = 8;
913 track->textures[i].compress_format = R100_TRACK_COMP_NONE;
910 break; 914 break;
911 case R300_TX_FORMAT_FL_R32G32B32A32: 915 case R300_TX_FORMAT_FL_R32G32B32A32:
912 track->textures[i].cpp = 16; 916 track->textures[i].cpp = 16;
917 track->textures[i].compress_format = R100_TRACK_COMP_NONE;
913 break; 918 break;
914 case R300_TX_FORMAT_DXT1: 919 case R300_TX_FORMAT_DXT1:
915 track->textures[i].cpp = 1; 920 track->textures[i].cpp = 1;
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index 0e91871f45be..3d6645ce2151 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -130,9 +130,14 @@ void r600_pm_get_dynpm_state(struct radeon_device *rdev)
130 break; 130 break;
131 } 131 }
132 } 132 }
133 } else 133 } else {
134 rdev->pm.requested_power_state_index = 134 if (rdev->pm.current_power_state_index == 0)
135 rdev->pm.current_power_state_index - 1; 135 rdev->pm.requested_power_state_index =
136 rdev->pm.num_power_states - 1;
137 else
138 rdev->pm.requested_power_state_index =
139 rdev->pm.current_power_state_index - 1;
140 }
136 } 141 }
137 rdev->pm.requested_clock_mode_index = 0; 142 rdev->pm.requested_clock_mode_index = 0;
138 /* don't use the power state if crtcs are active and no display flag is set */ 143 /* don't use the power state if crtcs are active and no display flag is set */
@@ -1097,7 +1102,7 @@ static void r600_mc_program(struct radeon_device *rdev)
1097 WREG32(MC_VM_FB_LOCATION, tmp); 1102 WREG32(MC_VM_FB_LOCATION, tmp);
1098 WREG32(HDP_NONSURFACE_BASE, (rdev->mc.vram_start >> 8)); 1103 WREG32(HDP_NONSURFACE_BASE, (rdev->mc.vram_start >> 8));
1099 WREG32(HDP_NONSURFACE_INFO, (2 << 7)); 1104 WREG32(HDP_NONSURFACE_INFO, (2 << 7));
1100 WREG32(HDP_NONSURFACE_SIZE, rdev->mc.mc_vram_size | 0x3FF); 1105 WREG32(HDP_NONSURFACE_SIZE, 0x3FFFFFFF);
1101 if (rdev->flags & RADEON_IS_AGP) { 1106 if (rdev->flags & RADEON_IS_AGP) {
1102 WREG32(MC_VM_AGP_TOP, rdev->mc.gtt_end >> 22); 1107 WREG32(MC_VM_AGP_TOP, rdev->mc.gtt_end >> 22);
1103 WREG32(MC_VM_AGP_BOT, rdev->mc.gtt_start >> 22); 1108 WREG32(MC_VM_AGP_BOT, rdev->mc.gtt_start >> 22);
@@ -1219,8 +1224,10 @@ int r600_mc_init(struct radeon_device *rdev)
1219 rdev->mc.visible_vram_size = rdev->mc.aper_size; 1224 rdev->mc.visible_vram_size = rdev->mc.aper_size;
1220 r600_vram_gtt_location(rdev, &rdev->mc); 1225 r600_vram_gtt_location(rdev, &rdev->mc);
1221 1226
1222 if (rdev->flags & RADEON_IS_IGP) 1227 if (rdev->flags & RADEON_IS_IGP) {
1228 rs690_pm_info(rdev);
1223 rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev); 1229 rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev);
1230 }
1224 radeon_update_bandwidth_info(rdev); 1231 radeon_update_bandwidth_info(rdev);
1225 return 0; 1232 return 0;
1226} 1233}
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 8e1d44ca26ec..ab61aaa887bb 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -177,6 +177,7 @@ void radeon_pm_resume(struct radeon_device *rdev);
177void radeon_combios_get_power_modes(struct radeon_device *rdev); 177void radeon_combios_get_power_modes(struct radeon_device *rdev);
178void radeon_atombios_get_power_modes(struct radeon_device *rdev); 178void radeon_atombios_get_power_modes(struct radeon_device *rdev);
179void radeon_atom_set_voltage(struct radeon_device *rdev, u16 level); 179void radeon_atom_set_voltage(struct radeon_device *rdev, u16 level);
180void rs690_pm_info(struct radeon_device *rdev);
180 181
181/* 182/*
182 * Fences. 183 * Fences.
@@ -619,7 +620,8 @@ enum radeon_dynpm_state {
619 DYNPM_STATE_DISABLED, 620 DYNPM_STATE_DISABLED,
620 DYNPM_STATE_MINIMUM, 621 DYNPM_STATE_MINIMUM,
621 DYNPM_STATE_PAUSED, 622 DYNPM_STATE_PAUSED,
622 DYNPM_STATE_ACTIVE 623 DYNPM_STATE_ACTIVE,
624 DYNPM_STATE_SUSPENDED,
623}; 625};
624enum radeon_dynpm_action { 626enum radeon_dynpm_action {
625 DYNPM_ACTION_NONE, 627 DYNPM_ACTION_NONE,
diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c
index 87f7e2cc52d4..646f96f97c77 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.c
+++ b/drivers/gpu/drm/radeon/radeon_asic.c
@@ -780,6 +780,13 @@ int radeon_asic_init(struct radeon_device *rdev)
780 case CHIP_R423: 780 case CHIP_R423:
781 case CHIP_RV410: 781 case CHIP_RV410:
782 rdev->asic = &r420_asic; 782 rdev->asic = &r420_asic;
783 /* handle macs */
784 if (rdev->bios == NULL) {
785 rdev->asic->get_engine_clock = &radeon_legacy_get_engine_clock;
786 rdev->asic->set_engine_clock = &radeon_legacy_set_engine_clock;
787 rdev->asic->get_memory_clock = &radeon_legacy_get_memory_clock;
788 rdev->asic->set_memory_clock = NULL;
789 }
783 break; 790 break;
784 case CHIP_RS400: 791 case CHIP_RS400:
785 case CHIP_RS480: 792 case CHIP_RS480:
diff --git a/drivers/gpu/drm/radeon/radeon_bios.c b/drivers/gpu/drm/radeon/radeon_bios.c
index fbba938f8048..2c9213739999 100644
--- a/drivers/gpu/drm/radeon/radeon_bios.c
+++ b/drivers/gpu/drm/radeon/radeon_bios.c
@@ -48,6 +48,10 @@ static bool igp_read_bios_from_vram(struct radeon_device *rdev)
48 resource_size_t vram_base; 48 resource_size_t vram_base;
49 resource_size_t size = 256 * 1024; /* ??? */ 49 resource_size_t size = 256 * 1024; /* ??? */
50 50
51 if (!(rdev->flags & RADEON_IS_IGP))
52 if (!radeon_card_posted(rdev))
53 return false;
54
51 rdev->bios = NULL; 55 rdev->bios = NULL;
52 vram_base = drm_get_resource_start(rdev->ddev, 0); 56 vram_base = drm_get_resource_start(rdev->ddev, 0);
53 bios = ioremap(vram_base, size); 57 bios = ioremap(vram_base, size);
diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c
index 1bee2f9e24a5..d1c1d8dd93ce 100644
--- a/drivers/gpu/drm/radeon/radeon_combios.c
+++ b/drivers/gpu/drm/radeon/radeon_combios.c
@@ -1411,6 +1411,11 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
1411 rdev->mode_info.connector_table = CT_IMAC_G5_ISIGHT; 1411 rdev->mode_info.connector_table = CT_IMAC_G5_ISIGHT;
1412 } else 1412 } else
1413#endif /* CONFIG_PPC_PMAC */ 1413#endif /* CONFIG_PPC_PMAC */
1414#ifdef CONFIG_PPC64
1415 if (ASIC_IS_RN50(rdev))
1416 rdev->mode_info.connector_table = CT_RN50_POWER;
1417 else
1418#endif
1414 rdev->mode_info.connector_table = CT_GENERIC; 1419 rdev->mode_info.connector_table = CT_GENERIC;
1415 } 1420 }
1416 1421
@@ -1853,6 +1858,33 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
1853 CONNECTOR_OBJECT_ID_SVIDEO, 1858 CONNECTOR_OBJECT_ID_SVIDEO,
1854 &hpd); 1859 &hpd);
1855 break; 1860 break;
1861 case CT_RN50_POWER:
1862 DRM_INFO("Connector Table: %d (rn50-power)\n",
1863 rdev->mode_info.connector_table);
1864 /* VGA - primary dac */
1865 ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_VGA_DDC);
1866 hpd.hpd = RADEON_HPD_NONE;
1867 radeon_add_legacy_encoder(dev,
1868 radeon_get_encoder_id(dev,
1869 ATOM_DEVICE_CRT1_SUPPORT,
1870 1),
1871 ATOM_DEVICE_CRT1_SUPPORT);
1872 radeon_add_legacy_connector(dev, 0, ATOM_DEVICE_CRT1_SUPPORT,
1873 DRM_MODE_CONNECTOR_VGA, &ddc_i2c,
1874 CONNECTOR_OBJECT_ID_VGA,
1875 &hpd);
1876 ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_CRT2_DDC);
1877 hpd.hpd = RADEON_HPD_NONE;
1878 radeon_add_legacy_encoder(dev,
1879 radeon_get_encoder_id(dev,
1880 ATOM_DEVICE_CRT2_SUPPORT,
1881 2),
1882 ATOM_DEVICE_CRT2_SUPPORT);
1883 radeon_add_legacy_connector(dev, 1, ATOM_DEVICE_CRT2_SUPPORT,
1884 DRM_MODE_CONNECTOR_VGA, &ddc_i2c,
1885 CONNECTOR_OBJECT_ID_VGA,
1886 &hpd);
1887 break;
1856 default: 1888 default:
1857 DRM_INFO("Connector table: %d (invalid)\n", 1889 DRM_INFO("Connector table: %d (invalid)\n",
1858 rdev->mode_info.connector_table); 1890 rdev->mode_info.connector_table);
@@ -1906,15 +1938,6 @@ static bool radeon_apply_legacy_quirks(struct drm_device *dev,
1906 return false; 1938 return false;
1907 } 1939 }
1908 1940
1909 /* Some RV100 cards with 2 VGA ports show up with DVI+VGA */
1910 if (dev->pdev->device == 0x5159 &&
1911 dev->pdev->subsystem_vendor == 0x1002 &&
1912 dev->pdev->subsystem_device == 0x013a) {
1913 if (*legacy_connector == CONNECTOR_DVI_I_LEGACY)
1914 *legacy_connector = CONNECTOR_CRT_LEGACY;
1915
1916 }
1917
1918 /* X300 card with extra non-existent DVI port */ 1941 /* X300 card with extra non-existent DVI port */
1919 if (dev->pdev->device == 0x5B60 && 1942 if (dev->pdev->device == 0x5B60 &&
1920 dev->pdev->subsystem_vendor == 0x17af && 1943 dev->pdev->subsystem_vendor == 0x17af &&
@@ -3019,6 +3042,14 @@ void radeon_combios_asic_init(struct drm_device *dev)
3019 combios_write_ram_size(dev); 3042 combios_write_ram_size(dev);
3020 } 3043 }
3021 3044
3045 /* quirk for rs4xx HP nx6125 laptop to make it resume
3046 * - it hangs on resume inside the dynclk 1 table.
3047 */
3048 if (rdev->family == CHIP_RS480 &&
3049 rdev->pdev->subsystem_vendor == 0x103c &&
3050 rdev->pdev->subsystem_device == 0x308b)
3051 return;
3052
3022 /* DYN CLK 1 */ 3053 /* DYN CLK 1 */
3023 table = combios_get_table_offset(dev, COMBIOS_DYN_CLK_1_TABLE); 3054 table = combios_get_table_offset(dev, COMBIOS_DYN_CLK_1_TABLE);
3024 if (table) 3055 if (table)
diff --git a/drivers/gpu/drm/radeon/radeon_cursor.c b/drivers/gpu/drm/radeon/radeon_cursor.c
index b7023fff89eb..4eb67c0e0996 100644
--- a/drivers/gpu/drm/radeon/radeon_cursor.c
+++ b/drivers/gpu/drm/radeon/radeon_cursor.c
@@ -194,7 +194,7 @@ unpin:
194fail: 194fail:
195 drm_gem_object_unreference_unlocked(obj); 195 drm_gem_object_unreference_unlocked(obj);
196 196
197 return 0; 197 return ret;
198} 198}
199 199
200int radeon_crtc_cursor_move(struct drm_crtc *crtc, 200int radeon_crtc_cursor_move(struct drm_crtc *crtc,
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index f10faed21567..5f317317aba2 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -779,6 +779,7 @@ int radeon_suspend_kms(struct drm_device *dev, pm_message_t state)
779 779
780int radeon_resume_kms(struct drm_device *dev) 780int radeon_resume_kms(struct drm_device *dev)
781{ 781{
782 struct drm_connector *connector;
782 struct radeon_device *rdev = dev->dev_private; 783 struct radeon_device *rdev = dev->dev_private;
783 784
784 if (rdev->powered_down) 785 if (rdev->powered_down)
@@ -797,6 +798,12 @@ int radeon_resume_kms(struct drm_device *dev)
797 radeon_resume(rdev); 798 radeon_resume(rdev);
798 radeon_pm_resume(rdev); 799 radeon_pm_resume(rdev);
799 radeon_restore_bios_scratch_regs(rdev); 800 radeon_restore_bios_scratch_regs(rdev);
801
802 /* turn on display hw */
803 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
804 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
805 }
806
800 radeon_fbdev_set_suspend(rdev, 0); 807 radeon_fbdev_set_suspend(rdev, 0);
801 release_console_sem(); 808 release_console_sem();
802 809
diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c
index 1ebb100015b7..e0b30b264c28 100644
--- a/drivers/gpu/drm/radeon/radeon_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_encoders.c
@@ -1072,6 +1072,8 @@ radeon_atom_encoder_dpms(struct drm_encoder *encoder, int mode)
1072 if (is_dig) { 1072 if (is_dig) {
1073 switch (mode) { 1073 switch (mode) {
1074 case DRM_MODE_DPMS_ON: 1074 case DRM_MODE_DPMS_ON:
1075 if (!ASIC_IS_DCE4(rdev))
1076 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE_OUTPUT, 0, 0);
1075 if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_DP) { 1077 if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_DP) {
1076 struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); 1078 struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
1077 1079
@@ -1079,8 +1081,6 @@ radeon_atom_encoder_dpms(struct drm_encoder *encoder, int mode)
1079 if (ASIC_IS_DCE4(rdev)) 1081 if (ASIC_IS_DCE4(rdev))
1080 atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_VIDEO_ON); 1082 atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_VIDEO_ON);
1081 } 1083 }
1082 if (!ASIC_IS_DCE4(rdev))
1083 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE_OUTPUT, 0, 0);
1084 break; 1084 break;
1085 case DRM_MODE_DPMS_STANDBY: 1085 case DRM_MODE_DPMS_STANDBY:
1086 case DRM_MODE_DPMS_SUSPEND: 1086 case DRM_MODE_DPMS_SUSPEND:
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
index 5b07b8848e09..bad77f40a9da 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
@@ -928,16 +928,14 @@ static void radeon_legacy_tv_dac_mode_set(struct drm_encoder *encoder,
928 if (ASIC_IS_R300(rdev)) { 928 if (ASIC_IS_R300(rdev)) {
929 gpiopad_a = RREG32(RADEON_GPIOPAD_A) | 1; 929 gpiopad_a = RREG32(RADEON_GPIOPAD_A) | 1;
930 disp_output_cntl = RREG32(RADEON_DISP_OUTPUT_CNTL); 930 disp_output_cntl = RREG32(RADEON_DISP_OUTPUT_CNTL);
931 } 931 } else if (rdev->family != CHIP_R200)
932
933 if (rdev->family == CHIP_R200 || ASIC_IS_R300(rdev))
934 disp_tv_out_cntl = RREG32(RADEON_DISP_TV_OUT_CNTL);
935 else
936 disp_hw_debug = RREG32(RADEON_DISP_HW_DEBUG); 932 disp_hw_debug = RREG32(RADEON_DISP_HW_DEBUG);
937 933 else if (rdev->family == CHIP_R200)
938 if (rdev->family == CHIP_R200)
939 fp2_gen_cntl = RREG32(RADEON_FP2_GEN_CNTL); 934 fp2_gen_cntl = RREG32(RADEON_FP2_GEN_CNTL);
940 935
936 if (rdev->family >= CHIP_R200)
937 disp_tv_out_cntl = RREG32(RADEON_DISP_TV_OUT_CNTL);
938
941 if (is_tv) { 939 if (is_tv) {
942 uint32_t dac_cntl; 940 uint32_t dac_cntl;
943 941
@@ -1002,15 +1000,13 @@ static void radeon_legacy_tv_dac_mode_set(struct drm_encoder *encoder,
1002 if (ASIC_IS_R300(rdev)) { 1000 if (ASIC_IS_R300(rdev)) {
1003 WREG32_P(RADEON_GPIOPAD_A, gpiopad_a, ~1); 1001 WREG32_P(RADEON_GPIOPAD_A, gpiopad_a, ~1);
1004 WREG32(RADEON_DISP_OUTPUT_CNTL, disp_output_cntl); 1002 WREG32(RADEON_DISP_OUTPUT_CNTL, disp_output_cntl);
1005 } 1003 } else if (rdev->family != CHIP_R200)
1004 WREG32(RADEON_DISP_HW_DEBUG, disp_hw_debug);
1005 else if (rdev->family == CHIP_R200)
1006 WREG32(RADEON_FP2_GEN_CNTL, fp2_gen_cntl);
1006 1007
1007 if (rdev->family >= CHIP_R200) 1008 if (rdev->family >= CHIP_R200)
1008 WREG32(RADEON_DISP_TV_OUT_CNTL, disp_tv_out_cntl); 1009 WREG32(RADEON_DISP_TV_OUT_CNTL, disp_tv_out_cntl);
1009 else
1010 WREG32(RADEON_DISP_HW_DEBUG, disp_hw_debug);
1011
1012 if (rdev->family == CHIP_R200)
1013 WREG32(RADEON_FP2_GEN_CNTL, fp2_gen_cntl);
1014 1010
1015 if (is_tv) 1011 if (is_tv)
1016 radeon_legacy_tv_mode_set(encoder, mode, adjusted_mode); 1012 radeon_legacy_tv_mode_set(encoder, mode, adjusted_mode);
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
index 67358baf28b2..95696aa57ac8 100644
--- a/drivers/gpu/drm/radeon/radeon_mode.h
+++ b/drivers/gpu/drm/radeon/radeon_mode.h
@@ -206,6 +206,7 @@ enum radeon_connector_table {
206 CT_MINI_INTERNAL, 206 CT_MINI_INTERNAL,
207 CT_IMAC_G5_ISIGHT, 207 CT_IMAC_G5_ISIGHT,
208 CT_EMAC, 208 CT_EMAC,
209 CT_RN50_POWER,
209}; 210};
210 211
211enum radeon_dvo_chip { 212enum radeon_dvo_chip {
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index 63f679a04b25..115d26b762cc 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -397,13 +397,20 @@ static ssize_t radeon_set_pm_method(struct device *dev,
397 rdev->pm.dynpm_planned_action = DYNPM_ACTION_DEFAULT; 397 rdev->pm.dynpm_planned_action = DYNPM_ACTION_DEFAULT;
398 mutex_unlock(&rdev->pm.mutex); 398 mutex_unlock(&rdev->pm.mutex);
399 } else if (strncmp("profile", buf, strlen("profile")) == 0) { 399 } else if (strncmp("profile", buf, strlen("profile")) == 0) {
400 bool flush_wq = false;
401
400 mutex_lock(&rdev->pm.mutex); 402 mutex_lock(&rdev->pm.mutex);
401 rdev->pm.pm_method = PM_METHOD_PROFILE; 403 if (rdev->pm.pm_method == PM_METHOD_DYNPM) {
404 cancel_delayed_work(&rdev->pm.dynpm_idle_work);
405 flush_wq = true;
406 }
402 /* disable dynpm */ 407 /* disable dynpm */
403 rdev->pm.dynpm_state = DYNPM_STATE_DISABLED; 408 rdev->pm.dynpm_state = DYNPM_STATE_DISABLED;
404 rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE; 409 rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE;
405 cancel_delayed_work(&rdev->pm.dynpm_idle_work); 410 rdev->pm.pm_method = PM_METHOD_PROFILE;
406 mutex_unlock(&rdev->pm.mutex); 411 mutex_unlock(&rdev->pm.mutex);
412 if (flush_wq)
413 flush_workqueue(rdev->wq);
407 } else { 414 } else {
408 DRM_ERROR("invalid power method!\n"); 415 DRM_ERROR("invalid power method!\n");
409 goto fail; 416 goto fail;
@@ -418,9 +425,18 @@ static DEVICE_ATTR(power_method, S_IRUGO | S_IWUSR, radeon_get_pm_method, radeon
418 425
419void radeon_pm_suspend(struct radeon_device *rdev) 426void radeon_pm_suspend(struct radeon_device *rdev)
420{ 427{
428 bool flush_wq = false;
429
421 mutex_lock(&rdev->pm.mutex); 430 mutex_lock(&rdev->pm.mutex);
422 cancel_delayed_work(&rdev->pm.dynpm_idle_work); 431 if (rdev->pm.pm_method == PM_METHOD_DYNPM) {
432 cancel_delayed_work(&rdev->pm.dynpm_idle_work);
433 if (rdev->pm.dynpm_state == DYNPM_STATE_ACTIVE)
434 rdev->pm.dynpm_state = DYNPM_STATE_SUSPENDED;
435 flush_wq = true;
436 }
423 mutex_unlock(&rdev->pm.mutex); 437 mutex_unlock(&rdev->pm.mutex);
438 if (flush_wq)
439 flush_workqueue(rdev->wq);
424} 440}
425 441
426void radeon_pm_resume(struct radeon_device *rdev) 442void radeon_pm_resume(struct radeon_device *rdev)
@@ -432,6 +448,12 @@ void radeon_pm_resume(struct radeon_device *rdev)
432 rdev->pm.current_sclk = rdev->clock.default_sclk; 448 rdev->pm.current_sclk = rdev->clock.default_sclk;
433 rdev->pm.current_mclk = rdev->clock.default_mclk; 449 rdev->pm.current_mclk = rdev->clock.default_mclk;
434 rdev->pm.current_vddc = rdev->pm.power_state[rdev->pm.default_power_state_index].clock_info[0].voltage.voltage; 450 rdev->pm.current_vddc = rdev->pm.power_state[rdev->pm.default_power_state_index].clock_info[0].voltage.voltage;
451 if (rdev->pm.pm_method == PM_METHOD_DYNPM
452 && rdev->pm.dynpm_state == DYNPM_STATE_SUSPENDED) {
453 rdev->pm.dynpm_state = DYNPM_STATE_ACTIVE;
454 queue_delayed_work(rdev->wq, &rdev->pm.dynpm_idle_work,
455 msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
456 }
435 mutex_unlock(&rdev->pm.mutex); 457 mutex_unlock(&rdev->pm.mutex);
436 radeon_pm_compute_clocks(rdev); 458 radeon_pm_compute_clocks(rdev);
437} 459}
@@ -486,6 +508,8 @@ int radeon_pm_init(struct radeon_device *rdev)
486void radeon_pm_fini(struct radeon_device *rdev) 508void radeon_pm_fini(struct radeon_device *rdev)
487{ 509{
488 if (rdev->pm.num_power_states > 1) { 510 if (rdev->pm.num_power_states > 1) {
511 bool flush_wq = false;
512
489 mutex_lock(&rdev->pm.mutex); 513 mutex_lock(&rdev->pm.mutex);
490 if (rdev->pm.pm_method == PM_METHOD_PROFILE) { 514 if (rdev->pm.pm_method == PM_METHOD_PROFILE) {
491 rdev->pm.profile = PM_PROFILE_DEFAULT; 515 rdev->pm.profile = PM_PROFILE_DEFAULT;
@@ -493,13 +517,16 @@ void radeon_pm_fini(struct radeon_device *rdev)
493 radeon_pm_set_clocks(rdev); 517 radeon_pm_set_clocks(rdev);
494 } else if (rdev->pm.pm_method == PM_METHOD_DYNPM) { 518 } else if (rdev->pm.pm_method == PM_METHOD_DYNPM) {
495 /* cancel work */ 519 /* cancel work */
496 cancel_delayed_work_sync(&rdev->pm.dynpm_idle_work); 520 cancel_delayed_work(&rdev->pm.dynpm_idle_work);
521 flush_wq = true;
497 /* reset default clocks */ 522 /* reset default clocks */
498 rdev->pm.dynpm_state = DYNPM_STATE_DISABLED; 523 rdev->pm.dynpm_state = DYNPM_STATE_DISABLED;
499 rdev->pm.dynpm_planned_action = DYNPM_ACTION_DEFAULT; 524 rdev->pm.dynpm_planned_action = DYNPM_ACTION_DEFAULT;
500 radeon_pm_set_clocks(rdev); 525 radeon_pm_set_clocks(rdev);
501 } 526 }
502 mutex_unlock(&rdev->pm.mutex); 527 mutex_unlock(&rdev->pm.mutex);
528 if (flush_wq)
529 flush_workqueue(rdev->wq);
503 530
504 device_remove_file(rdev->dev, &dev_attr_power_profile); 531 device_remove_file(rdev->dev, &dev_attr_power_profile);
505 device_remove_file(rdev->dev, &dev_attr_power_method); 532 device_remove_file(rdev->dev, &dev_attr_power_method);
@@ -720,12 +747,12 @@ static void radeon_dynpm_idle_work_handler(struct work_struct *work)
720 radeon_pm_get_dynpm_state(rdev); 747 radeon_pm_get_dynpm_state(rdev);
721 radeon_pm_set_clocks(rdev); 748 radeon_pm_set_clocks(rdev);
722 } 749 }
750
751 queue_delayed_work(rdev->wq, &rdev->pm.dynpm_idle_work,
752 msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
723 } 753 }
724 mutex_unlock(&rdev->pm.mutex); 754 mutex_unlock(&rdev->pm.mutex);
725 ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched); 755 ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched);
726
727 queue_delayed_work(rdev->wq, &rdev->pm.dynpm_idle_work,
728 msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
729} 756}
730 757
731/* 758/*
diff --git a/drivers/gpu/drm/radeon/reg_srcs/evergreen b/drivers/gpu/drm/radeon/reg_srcs/evergreen
index b5c757f68d3c..f78fd592544d 100644
--- a/drivers/gpu/drm/radeon/reg_srcs/evergreen
+++ b/drivers/gpu/drm/radeon/reg_srcs/evergreen
@@ -80,8 +80,8 @@ evergreen 0x9400
800x00028010 DB_RENDER_OVERRIDE2 800x00028010 DB_RENDER_OVERRIDE2
810x00028028 DB_STENCIL_CLEAR 810x00028028 DB_STENCIL_CLEAR
820x0002802C DB_DEPTH_CLEAR 820x0002802C DB_DEPTH_CLEAR
830x00028034 PA_SC_SCREEN_SCISSOR_BR
840x00028030 PA_SC_SCREEN_SCISSOR_TL 830x00028030 PA_SC_SCREEN_SCISSOR_TL
840x00028034 PA_SC_SCREEN_SCISSOR_BR
850x0002805C DB_DEPTH_SLICE 850x0002805C DB_DEPTH_SLICE
860x00028140 SQ_ALU_CONST_BUFFER_SIZE_PS_0 860x00028140 SQ_ALU_CONST_BUFFER_SIZE_PS_0
870x00028144 SQ_ALU_CONST_BUFFER_SIZE_PS_1 870x00028144 SQ_ALU_CONST_BUFFER_SIZE_PS_1
@@ -460,8 +460,8 @@ evergreen 0x9400
4600x00028844 SQ_PGM_RESOURCES_PS 4600x00028844 SQ_PGM_RESOURCES_PS
4610x00028848 SQ_PGM_RESOURCES_2_PS 4610x00028848 SQ_PGM_RESOURCES_2_PS
4620x0002884C SQ_PGM_EXPORTS_PS 4620x0002884C SQ_PGM_EXPORTS_PS
4630x0002885C SQ_PGM_RESOURCES_VS 4630x00028860 SQ_PGM_RESOURCES_VS
4640x00028860 SQ_PGM_RESOURCES_2_VS 4640x00028864 SQ_PGM_RESOURCES_2_VS
4650x00028878 SQ_PGM_RESOURCES_GS 4650x00028878 SQ_PGM_RESOURCES_GS
4660x0002887C SQ_PGM_RESOURCES_2_GS 4660x0002887C SQ_PGM_RESOURCES_2_GS
4670x00028890 SQ_PGM_RESOURCES_ES 4670x00028890 SQ_PGM_RESOURCES_ES
@@ -469,8 +469,8 @@ evergreen 0x9400
4690x000288A8 SQ_PGM_RESOURCES_FS 4690x000288A8 SQ_PGM_RESOURCES_FS
4700x000288BC SQ_PGM_RESOURCES_HS 4700x000288BC SQ_PGM_RESOURCES_HS
4710x000288C0 SQ_PGM_RESOURCES_2_HS 4710x000288C0 SQ_PGM_RESOURCES_2_HS
4720x000288D0 SQ_PGM_RESOURCES_LS 4720x000288D4 SQ_PGM_RESOURCES_LS
4730x000288D4 SQ_PGM_RESOURCES_2_LS 4730x000288D8 SQ_PGM_RESOURCES_2_LS
4740x000288E8 SQ_LDS_ALLOC 4740x000288E8 SQ_LDS_ALLOC
4750x000288EC SQ_LDS_ALLOC_PS 4750x000288EC SQ_LDS_ALLOC_PS
4760x000288F0 SQ_VTX_SEMANTIC_CLEAR 4760x000288F0 SQ_VTX_SEMANTIC_CLEAR
diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
index bcc33195ebc2..f4f0a61bcdce 100644
--- a/drivers/gpu/drm/radeon/rs690.c
+++ b/drivers/gpu/drm/radeon/rs690.c
@@ -79,7 +79,13 @@ void rs690_pm_info(struct radeon_device *rdev)
79 tmp.full = dfixed_const(100); 79 tmp.full = dfixed_const(100);
80 rdev->pm.igp_sideport_mclk.full = dfixed_const(info->info.ulBootUpMemoryClock); 80 rdev->pm.igp_sideport_mclk.full = dfixed_const(info->info.ulBootUpMemoryClock);
81 rdev->pm.igp_sideport_mclk.full = dfixed_div(rdev->pm.igp_sideport_mclk, tmp); 81 rdev->pm.igp_sideport_mclk.full = dfixed_div(rdev->pm.igp_sideport_mclk, tmp);
82 rdev->pm.igp_system_mclk.full = dfixed_const(le16_to_cpu(info->info.usK8MemoryClock)); 82 if (info->info.usK8MemoryClock)
83 rdev->pm.igp_system_mclk.full = dfixed_const(le16_to_cpu(info->info.usK8MemoryClock));
84 else if (rdev->clock.default_mclk) {
85 rdev->pm.igp_system_mclk.full = dfixed_const(rdev->clock.default_mclk);
86 rdev->pm.igp_system_mclk.full = dfixed_div(rdev->pm.igp_system_mclk, tmp);
87 } else
88 rdev->pm.igp_system_mclk.full = dfixed_const(400);
83 rdev->pm.igp_ht_link_clk.full = dfixed_const(le16_to_cpu(info->info.usFSBClock)); 89 rdev->pm.igp_ht_link_clk.full = dfixed_const(le16_to_cpu(info->info.usFSBClock));
84 rdev->pm.igp_ht_link_width.full = dfixed_const(info->info.ucHTLinkWidth); 90 rdev->pm.igp_ht_link_width.full = dfixed_const(info->info.ucHTLinkWidth);
85 break; 91 break;
@@ -87,34 +93,31 @@ void rs690_pm_info(struct radeon_device *rdev)
87 tmp.full = dfixed_const(100); 93 tmp.full = dfixed_const(100);
88 rdev->pm.igp_sideport_mclk.full = dfixed_const(info->info_v2.ulBootUpSidePortClock); 94 rdev->pm.igp_sideport_mclk.full = dfixed_const(info->info_v2.ulBootUpSidePortClock);
89 rdev->pm.igp_sideport_mclk.full = dfixed_div(rdev->pm.igp_sideport_mclk, tmp); 95 rdev->pm.igp_sideport_mclk.full = dfixed_div(rdev->pm.igp_sideport_mclk, tmp);
90 rdev->pm.igp_system_mclk.full = dfixed_const(info->info_v2.ulBootUpUMAClock); 96 if (info->info_v2.ulBootUpUMAClock)
97 rdev->pm.igp_system_mclk.full = dfixed_const(info->info_v2.ulBootUpUMAClock);
98 else if (rdev->clock.default_mclk)
99 rdev->pm.igp_system_mclk.full = dfixed_const(rdev->clock.default_mclk);
100 else
101 rdev->pm.igp_system_mclk.full = dfixed_const(66700);
91 rdev->pm.igp_system_mclk.full = dfixed_div(rdev->pm.igp_system_mclk, tmp); 102 rdev->pm.igp_system_mclk.full = dfixed_div(rdev->pm.igp_system_mclk, tmp);
92 rdev->pm.igp_ht_link_clk.full = dfixed_const(info->info_v2.ulHTLinkFreq); 103 rdev->pm.igp_ht_link_clk.full = dfixed_const(info->info_v2.ulHTLinkFreq);
93 rdev->pm.igp_ht_link_clk.full = dfixed_div(rdev->pm.igp_ht_link_clk, tmp); 104 rdev->pm.igp_ht_link_clk.full = dfixed_div(rdev->pm.igp_ht_link_clk, tmp);
94 rdev->pm.igp_ht_link_width.full = dfixed_const(le16_to_cpu(info->info_v2.usMinHTLinkWidth)); 105 rdev->pm.igp_ht_link_width.full = dfixed_const(le16_to_cpu(info->info_v2.usMinHTLinkWidth));
95 break; 106 break;
96 default: 107 default:
97 tmp.full = dfixed_const(100);
98 /* We assume the slower possible clock ie worst case */ 108 /* We assume the slower possible clock ie worst case */
99 /* DDR 333Mhz */ 109 rdev->pm.igp_sideport_mclk.full = dfixed_const(200);
100 rdev->pm.igp_sideport_mclk.full = dfixed_const(333); 110 rdev->pm.igp_system_mclk.full = dfixed_const(200);
101 /* FIXME: system clock ? */ 111 rdev->pm.igp_ht_link_clk.full = dfixed_const(1000);
102 rdev->pm.igp_system_mclk.full = dfixed_const(100);
103 rdev->pm.igp_system_mclk.full = dfixed_div(rdev->pm.igp_system_mclk, tmp);
104 rdev->pm.igp_ht_link_clk.full = dfixed_const(200);
105 rdev->pm.igp_ht_link_width.full = dfixed_const(8); 112 rdev->pm.igp_ht_link_width.full = dfixed_const(8);
106 DRM_ERROR("No integrated system info for your GPU, using safe default\n"); 113 DRM_ERROR("No integrated system info for your GPU, using safe default\n");
107 break; 114 break;
108 } 115 }
109 } else { 116 } else {
110 tmp.full = dfixed_const(100);
111 /* We assume the slower possible clock ie worst case */ 117 /* We assume the slower possible clock ie worst case */
112 /* DDR 333Mhz */ 118 rdev->pm.igp_sideport_mclk.full = dfixed_const(200);
113 rdev->pm.igp_sideport_mclk.full = dfixed_const(333); 119 rdev->pm.igp_system_mclk.full = dfixed_const(200);
114 /* FIXME: system clock ? */ 120 rdev->pm.igp_ht_link_clk.full = dfixed_const(1000);
115 rdev->pm.igp_system_mclk.full = dfixed_const(100);
116 rdev->pm.igp_system_mclk.full = dfixed_div(rdev->pm.igp_system_mclk, tmp);
117 rdev->pm.igp_ht_link_clk.full = dfixed_const(200);
118 rdev->pm.igp_ht_link_width.full = dfixed_const(8); 121 rdev->pm.igp_ht_link_width.full = dfixed_const(8);
119 DRM_ERROR("No integrated system info for your GPU, using safe default\n"); 122 DRM_ERROR("No integrated system info for your GPU, using safe default\n");
120 } 123 }
@@ -228,10 +231,6 @@ void rs690_crtc_bandwidth_compute(struct radeon_device *rdev,
228 fixed20_12 a, b, c; 231 fixed20_12 a, b, c;
229 fixed20_12 pclk, request_fifo_depth, tolerable_latency, estimated_width; 232 fixed20_12 pclk, request_fifo_depth, tolerable_latency, estimated_width;
230 fixed20_12 consumption_time, line_time, chunk_time, read_delay_latency; 233 fixed20_12 consumption_time, line_time, chunk_time, read_delay_latency;
231 /* FIXME: detect IGP with sideport memory, i don't think there is any
232 * such product available
233 */
234 bool sideport = false;
235 234
236 if (!crtc->base.enabled) { 235 if (!crtc->base.enabled) {
237 /* FIXME: wouldn't it better to set priority mark to maximum */ 236 /* FIXME: wouldn't it better to set priority mark to maximum */
@@ -300,7 +299,7 @@ void rs690_crtc_bandwidth_compute(struct radeon_device *rdev,
300 299
301 /* Maximun bandwidth is the minimun bandwidth of all component */ 300 /* Maximun bandwidth is the minimun bandwidth of all component */
302 rdev->pm.max_bandwidth = rdev->pm.core_bandwidth; 301 rdev->pm.max_bandwidth = rdev->pm.core_bandwidth;
303 if (sideport) { 302 if (rdev->mc.igp_sideport_enabled) {
304 if (rdev->pm.max_bandwidth.full > rdev->pm.sideport_bandwidth.full && 303 if (rdev->pm.max_bandwidth.full > rdev->pm.sideport_bandwidth.full &&
305 rdev->pm.sideport_bandwidth.full) 304 rdev->pm.sideport_bandwidth.full)
306 rdev->pm.max_bandwidth = rdev->pm.sideport_bandwidth; 305 rdev->pm.max_bandwidth = rdev->pm.sideport_bandwidth;
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
index cec536c222c5..b7fd82064922 100644
--- a/drivers/gpu/drm/radeon/rv770.c
+++ b/drivers/gpu/drm/radeon/rv770.c
@@ -224,7 +224,7 @@ static void rv770_mc_program(struct radeon_device *rdev)
224 WREG32(MC_VM_FB_LOCATION, tmp); 224 WREG32(MC_VM_FB_LOCATION, tmp);
225 WREG32(HDP_NONSURFACE_BASE, (rdev->mc.vram_start >> 8)); 225 WREG32(HDP_NONSURFACE_BASE, (rdev->mc.vram_start >> 8));
226 WREG32(HDP_NONSURFACE_INFO, (2 << 7)); 226 WREG32(HDP_NONSURFACE_INFO, (2 << 7));
227 WREG32(HDP_NONSURFACE_SIZE, (rdev->mc.mc_vram_size - 1) | 0x3FF); 227 WREG32(HDP_NONSURFACE_SIZE, 0x3FFFFFFF);
228 if (rdev->flags & RADEON_IS_AGP) { 228 if (rdev->flags & RADEON_IS_AGP) {
229 WREG32(MC_VM_AGP_TOP, rdev->mc.gtt_end >> 16); 229 WREG32(MC_VM_AGP_TOP, rdev->mc.gtt_end >> 16);
230 WREG32(MC_VM_AGP_BOT, rdev->mc.gtt_start >> 16); 230 WREG32(MC_VM_AGP_BOT, rdev->mc.gtt_start >> 16);
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
index ef910694bd63..2f047577b1e3 100644
--- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
+++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
@@ -667,7 +667,7 @@ int ttm_get_pages(struct list_head *pages, int flags,
667{ 667{
668 struct ttm_page_pool *pool = ttm_get_pool(flags, cstate); 668 struct ttm_page_pool *pool = ttm_get_pool(flags, cstate);
669 struct page *p = NULL; 669 struct page *p = NULL;
670 int gfp_flags = 0; 670 int gfp_flags = GFP_USER;
671 int r; 671 int r;
672 672
673 /* set zero flag for page allocation if required */ 673 /* set zero flag for page allocation if required */
diff --git a/fs/xfs/linux-2.6/xfs_export.c b/fs/xfs/linux-2.6/xfs_export.c
index 846b75aeb2ab..e7839ee49e43 100644
--- a/fs/xfs/linux-2.6/xfs_export.c
+++ b/fs/xfs/linux-2.6/xfs_export.c
@@ -128,13 +128,12 @@ xfs_nfs_get_inode(
128 return ERR_PTR(-ESTALE); 128 return ERR_PTR(-ESTALE);
129 129
130 /* 130 /*
131 * The XFS_IGET_BULKSTAT means that an invalid inode number is just 131 * The XFS_IGET_UNTRUSTED means that an invalid inode number is just
132 * fine and not an indication of a corrupted filesystem. Because 132 * fine and not an indication of a corrupted filesystem as clients can
133 * clients can send any kind of invalid file handle, e.g. after 133 * send invalid file handles and we have to handle it gracefully..
134 * a restore on the server we have to deal with this case gracefully.
135 */ 134 */
136 error = xfs_iget(mp, NULL, ino, XFS_IGET_BULKSTAT, 135 error = xfs_iget(mp, NULL, ino, XFS_IGET_UNTRUSTED,
137 XFS_ILOCK_SHARED, &ip, 0); 136 XFS_ILOCK_SHARED, &ip);
138 if (error) { 137 if (error) {
139 /* 138 /*
140 * EINVAL means the inode cluster doesn't exist anymore. 139 * EINVAL means the inode cluster doesn't exist anymore.
diff --git a/fs/xfs/linux-2.6/xfs_ioctl.c b/fs/xfs/linux-2.6/xfs_ioctl.c
index 699b60cbab9c..e59a81062830 100644
--- a/fs/xfs/linux-2.6/xfs_ioctl.c
+++ b/fs/xfs/linux-2.6/xfs_ioctl.c
@@ -679,10 +679,9 @@ xfs_ioc_bulkstat(
679 error = xfs_bulkstat_single(mp, &inlast, 679 error = xfs_bulkstat_single(mp, &inlast,
680 bulkreq.ubuffer, &done); 680 bulkreq.ubuffer, &done);
681 else /* XFS_IOC_FSBULKSTAT */ 681 else /* XFS_IOC_FSBULKSTAT */
682 error = xfs_bulkstat(mp, &inlast, &count, 682 error = xfs_bulkstat(mp, &inlast, &count, xfs_bulkstat_one,
683 (bulkstat_one_pf)xfs_bulkstat_one, NULL, 683 sizeof(xfs_bstat_t), bulkreq.ubuffer,
684 sizeof(xfs_bstat_t), bulkreq.ubuffer, 684 &done);
685 BULKSTAT_FG_QUICK, &done);
686 685
687 if (error) 686 if (error)
688 return -error; 687 return -error;
diff --git a/fs/xfs/linux-2.6/xfs_ioctl32.c b/fs/xfs/linux-2.6/xfs_ioctl32.c
index 9287135e9bfc..52ed49e6465c 100644
--- a/fs/xfs/linux-2.6/xfs_ioctl32.c
+++ b/fs/xfs/linux-2.6/xfs_ioctl32.c
@@ -237,15 +237,12 @@ xfs_bulkstat_one_compat(
237 xfs_ino_t ino, /* inode number to get data for */ 237 xfs_ino_t ino, /* inode number to get data for */
238 void __user *buffer, /* buffer to place output in */ 238 void __user *buffer, /* buffer to place output in */
239 int ubsize, /* size of buffer */ 239 int ubsize, /* size of buffer */
240 void *private_data, /* my private data */
241 xfs_daddr_t bno, /* starting bno of inode cluster */
242 int *ubused, /* bytes used by me */ 240 int *ubused, /* bytes used by me */
243 void *dibuff, /* on-disk inode buffer */
244 int *stat) /* BULKSTAT_RV_... */ 241 int *stat) /* BULKSTAT_RV_... */
245{ 242{
246 return xfs_bulkstat_one_int(mp, ino, buffer, ubsize, 243 return xfs_bulkstat_one_int(mp, ino, buffer, ubsize,
247 xfs_bulkstat_one_fmt_compat, bno, 244 xfs_bulkstat_one_fmt_compat,
248 ubused, dibuff, stat); 245 ubused, stat);
249} 246}
250 247
251/* copied from xfs_ioctl.c */ 248/* copied from xfs_ioctl.c */
@@ -298,13 +295,11 @@ xfs_compat_ioc_bulkstat(
298 int res; 295 int res;
299 296
300 error = xfs_bulkstat_one_compat(mp, inlast, bulkreq.ubuffer, 297 error = xfs_bulkstat_one_compat(mp, inlast, bulkreq.ubuffer,
301 sizeof(compat_xfs_bstat_t), 298 sizeof(compat_xfs_bstat_t), 0, &res);
302 NULL, 0, NULL, NULL, &res);
303 } else if (cmd == XFS_IOC_FSBULKSTAT_32) { 299 } else if (cmd == XFS_IOC_FSBULKSTAT_32) {
304 error = xfs_bulkstat(mp, &inlast, &count, 300 error = xfs_bulkstat(mp, &inlast, &count,
305 xfs_bulkstat_one_compat, NULL, 301 xfs_bulkstat_one_compat, sizeof(compat_xfs_bstat_t),
306 sizeof(compat_xfs_bstat_t), bulkreq.ubuffer, 302 bulkreq.ubuffer, &done);
307 BULKSTAT_FG_QUICK, &done);
308 } else 303 } else
309 error = XFS_ERROR(EINVAL); 304 error = XFS_ERROR(EINVAL);
310 if (error) 305 if (error)
diff --git a/fs/xfs/quota/xfs_qm.c b/fs/xfs/quota/xfs_qm.c
index 2d8b7bc792c9..8c117ff2e3ab 100644
--- a/fs/xfs/quota/xfs_qm.c
+++ b/fs/xfs/quota/xfs_qm.c
@@ -1632,10 +1632,7 @@ xfs_qm_dqusage_adjust(
1632 xfs_ino_t ino, /* inode number to get data for */ 1632 xfs_ino_t ino, /* inode number to get data for */
1633 void __user *buffer, /* not used */ 1633 void __user *buffer, /* not used */
1634 int ubsize, /* not used */ 1634 int ubsize, /* not used */
1635 void *private_data, /* not used */
1636 xfs_daddr_t bno, /* starting block of inode cluster */
1637 int *ubused, /* not used */ 1635 int *ubused, /* not used */
1638 void *dip, /* on-disk inode pointer (not used) */
1639 int *res) /* result code value */ 1636 int *res) /* result code value */
1640{ 1637{
1641 xfs_inode_t *ip; 1638 xfs_inode_t *ip;
@@ -1660,7 +1657,7 @@ xfs_qm_dqusage_adjust(
1660 * the case in all other instances. It's OK that we do this because 1657 * the case in all other instances. It's OK that we do this because
1661 * quotacheck is done only at mount time. 1658 * quotacheck is done only at mount time.
1662 */ 1659 */
1663 if ((error = xfs_iget(mp, NULL, ino, 0, XFS_ILOCK_EXCL, &ip, bno))) { 1660 if ((error = xfs_iget(mp, NULL, ino, 0, XFS_ILOCK_EXCL, &ip))) {
1664 *res = BULKSTAT_RV_NOTHING; 1661 *res = BULKSTAT_RV_NOTHING;
1665 return error; 1662 return error;
1666 } 1663 }
@@ -1796,12 +1793,13 @@ xfs_qm_quotacheck(
1796 * Iterate thru all the inodes in the file system, 1793 * Iterate thru all the inodes in the file system,
1797 * adjusting the corresponding dquot counters in core. 1794 * adjusting the corresponding dquot counters in core.
1798 */ 1795 */
1799 if ((error = xfs_bulkstat(mp, &lastino, &count, 1796 error = xfs_bulkstat(mp, &lastino, &count,
1800 xfs_qm_dqusage_adjust, NULL, 1797 xfs_qm_dqusage_adjust,
1801 structsz, NULL, BULKSTAT_FG_IGET, &done))) 1798 structsz, NULL, &done);
1799 if (error)
1802 break; 1800 break;
1803 1801
1804 } while (! done); 1802 } while (!done);
1805 1803
1806 /* 1804 /*
1807 * We've made all the changes that we need to make incore. 1805 * We've made all the changes that we need to make incore.
@@ -1889,14 +1887,14 @@ xfs_qm_init_quotainos(
1889 mp->m_sb.sb_uquotino != NULLFSINO) { 1887 mp->m_sb.sb_uquotino != NULLFSINO) {
1890 ASSERT(mp->m_sb.sb_uquotino > 0); 1888 ASSERT(mp->m_sb.sb_uquotino > 0);
1891 if ((error = xfs_iget(mp, NULL, mp->m_sb.sb_uquotino, 1889 if ((error = xfs_iget(mp, NULL, mp->m_sb.sb_uquotino,
1892 0, 0, &uip, 0))) 1890 0, 0, &uip)))
1893 return XFS_ERROR(error); 1891 return XFS_ERROR(error);
1894 } 1892 }
1895 if (XFS_IS_OQUOTA_ON(mp) && 1893 if (XFS_IS_OQUOTA_ON(mp) &&
1896 mp->m_sb.sb_gquotino != NULLFSINO) { 1894 mp->m_sb.sb_gquotino != NULLFSINO) {
1897 ASSERT(mp->m_sb.sb_gquotino > 0); 1895 ASSERT(mp->m_sb.sb_gquotino > 0);
1898 if ((error = xfs_iget(mp, NULL, mp->m_sb.sb_gquotino, 1896 if ((error = xfs_iget(mp, NULL, mp->m_sb.sb_gquotino,
1899 0, 0, &gip, 0))) { 1897 0, 0, &gip))) {
1900 if (uip) 1898 if (uip)
1901 IRELE(uip); 1899 IRELE(uip);
1902 return XFS_ERROR(error); 1900 return XFS_ERROR(error);
diff --git a/fs/xfs/quota/xfs_qm_syscalls.c b/fs/xfs/quota/xfs_qm_syscalls.c
index 92b002f1805f..b4487764e923 100644
--- a/fs/xfs/quota/xfs_qm_syscalls.c
+++ b/fs/xfs/quota/xfs_qm_syscalls.c
@@ -262,7 +262,7 @@ xfs_qm_scall_trunc_qfiles(
262 } 262 }
263 263
264 if ((flags & XFS_DQ_USER) && mp->m_sb.sb_uquotino != NULLFSINO) { 264 if ((flags & XFS_DQ_USER) && mp->m_sb.sb_uquotino != NULLFSINO) {
265 error = xfs_iget(mp, NULL, mp->m_sb.sb_uquotino, 0, 0, &qip, 0); 265 error = xfs_iget(mp, NULL, mp->m_sb.sb_uquotino, 0, 0, &qip);
266 if (!error) { 266 if (!error) {
267 error = xfs_truncate_file(mp, qip); 267 error = xfs_truncate_file(mp, qip);
268 IRELE(qip); 268 IRELE(qip);
@@ -271,7 +271,7 @@ xfs_qm_scall_trunc_qfiles(
271 271
272 if ((flags & (XFS_DQ_GROUP|XFS_DQ_PROJ)) && 272 if ((flags & (XFS_DQ_GROUP|XFS_DQ_PROJ)) &&
273 mp->m_sb.sb_gquotino != NULLFSINO) { 273 mp->m_sb.sb_gquotino != NULLFSINO) {
274 error2 = xfs_iget(mp, NULL, mp->m_sb.sb_gquotino, 0, 0, &qip, 0); 274 error2 = xfs_iget(mp, NULL, mp->m_sb.sb_gquotino, 0, 0, &qip);
275 if (!error2) { 275 if (!error2) {
276 error2 = xfs_truncate_file(mp, qip); 276 error2 = xfs_truncate_file(mp, qip);
277 IRELE(qip); 277 IRELE(qip);
@@ -417,12 +417,12 @@ xfs_qm_scall_getqstat(
417 } 417 }
418 if (!uip && mp->m_sb.sb_uquotino != NULLFSINO) { 418 if (!uip && mp->m_sb.sb_uquotino != NULLFSINO) {
419 if (xfs_iget(mp, NULL, mp->m_sb.sb_uquotino, 419 if (xfs_iget(mp, NULL, mp->m_sb.sb_uquotino,
420 0, 0, &uip, 0) == 0) 420 0, 0, &uip) == 0)
421 tempuqip = B_TRUE; 421 tempuqip = B_TRUE;
422 } 422 }
423 if (!gip && mp->m_sb.sb_gquotino != NULLFSINO) { 423 if (!gip && mp->m_sb.sb_gquotino != NULLFSINO) {
424 if (xfs_iget(mp, NULL, mp->m_sb.sb_gquotino, 424 if (xfs_iget(mp, NULL, mp->m_sb.sb_gquotino,
425 0, 0, &gip, 0) == 0) 425 0, 0, &gip) == 0)
426 tempgqip = B_TRUE; 426 tempgqip = B_TRUE;
427 } 427 }
428 if (uip) { 428 if (uip) {
@@ -1109,10 +1109,7 @@ xfs_qm_internalqcheck_adjust(
1109 xfs_ino_t ino, /* inode number to get data for */ 1109 xfs_ino_t ino, /* inode number to get data for */
1110 void __user *buffer, /* not used */ 1110 void __user *buffer, /* not used */
1111 int ubsize, /* not used */ 1111 int ubsize, /* not used */
1112 void *private_data, /* not used */
1113 xfs_daddr_t bno, /* starting block of inode cluster */
1114 int *ubused, /* not used */ 1112 int *ubused, /* not used */
1115 void *dip, /* not used */
1116 int *res) /* bulkstat result code */ 1113 int *res) /* bulkstat result code */
1117{ 1114{
1118 xfs_inode_t *ip; 1115 xfs_inode_t *ip;
@@ -1134,7 +1131,7 @@ xfs_qm_internalqcheck_adjust(
1134 ipreleased = B_FALSE; 1131 ipreleased = B_FALSE;
1135 again: 1132 again:
1136 lock_flags = XFS_ILOCK_SHARED; 1133 lock_flags = XFS_ILOCK_SHARED;
1137 if ((error = xfs_iget(mp, NULL, ino, 0, lock_flags, &ip, bno))) { 1134 if ((error = xfs_iget(mp, NULL, ino, 0, lock_flags, &ip))) {
1138 *res = BULKSTAT_RV_NOTHING; 1135 *res = BULKSTAT_RV_NOTHING;
1139 return (error); 1136 return (error);
1140 } 1137 }
@@ -1205,15 +1202,15 @@ xfs_qm_internalqcheck(
1205 * Iterate thru all the inodes in the file system, 1202 * Iterate thru all the inodes in the file system,
1206 * adjusting the corresponding dquot counters 1203 * adjusting the corresponding dquot counters
1207 */ 1204 */
1208 if ((error = xfs_bulkstat(mp, &lastino, &count, 1205 error = xfs_bulkstat(mp, &lastino, &count,
1209 xfs_qm_internalqcheck_adjust, NULL, 1206 xfs_qm_internalqcheck_adjust,
1210 0, NULL, BULKSTAT_FG_IGET, &done))) { 1207 0, NULL, &done);
1208 if (error) {
1209 cmn_err(CE_DEBUG, "Bulkstat returned error 0x%x", error);
1211 break; 1210 break;
1212 } 1211 }
1213 } while (! done); 1212 } while (!done);
1214 if (error) { 1213
1215 cmn_err(CE_DEBUG, "Bulkstat returned error 0x%x", error);
1216 }
1217 cmn_err(CE_DEBUG, "Checking results against system dquots"); 1214 cmn_err(CE_DEBUG, "Checking results against system dquots");
1218 for (i = 0; i < qmtest_hashmask; i++) { 1215 for (i = 0; i < qmtest_hashmask; i++) {
1219 xfs_dqtest_t *d, *n; 1216 xfs_dqtest_t *d, *n;
diff --git a/fs/xfs/xfs_dfrag.c b/fs/xfs/xfs_dfrag.c
index 5bba29a07812..7f159d2a429a 100644
--- a/fs/xfs/xfs_dfrag.c
+++ b/fs/xfs/xfs_dfrag.c
@@ -69,7 +69,9 @@ xfs_swapext(
69 goto out; 69 goto out;
70 } 70 }
71 71
72 if (!(file->f_mode & FMODE_WRITE) || (file->f_flags & O_APPEND)) { 72 if (!(file->f_mode & FMODE_WRITE) ||
73 !(file->f_mode & FMODE_READ) ||
74 (file->f_flags & O_APPEND)) {
73 error = XFS_ERROR(EBADF); 75 error = XFS_ERROR(EBADF);
74 goto out_put_file; 76 goto out_put_file;
75 } 77 }
@@ -81,6 +83,7 @@ xfs_swapext(
81 } 83 }
82 84
83 if (!(tmp_file->f_mode & FMODE_WRITE) || 85 if (!(tmp_file->f_mode & FMODE_WRITE) ||
86 !(tmp_file->f_mode & FMODE_READ) ||
84 (tmp_file->f_flags & O_APPEND)) { 87 (tmp_file->f_flags & O_APPEND)) {
85 error = XFS_ERROR(EBADF); 88 error = XFS_ERROR(EBADF);
86 goto out_put_tmp_file; 89 goto out_put_tmp_file;
diff --git a/fs/xfs/xfs_ialloc.c b/fs/xfs/xfs_ialloc.c
index 9d884c127bb9..c7142a064c48 100644
--- a/fs/xfs/xfs_ialloc.c
+++ b/fs/xfs/xfs_ialloc.c
@@ -1203,6 +1203,63 @@ error0:
1203 return error; 1203 return error;
1204} 1204}
1205 1205
1206STATIC int
1207xfs_imap_lookup(
1208 struct xfs_mount *mp,
1209 struct xfs_trans *tp,
1210 xfs_agnumber_t agno,
1211 xfs_agino_t agino,
1212 xfs_agblock_t agbno,
1213 xfs_agblock_t *chunk_agbno,
1214 xfs_agblock_t *offset_agbno,
1215 int flags)
1216{
1217 struct xfs_inobt_rec_incore rec;
1218 struct xfs_btree_cur *cur;
1219 struct xfs_buf *agbp;
1220 xfs_agino_t startino;
1221 int error;
1222 int i;
1223
1224 error = xfs_ialloc_read_agi(mp, tp, agno, &agbp);
1225 if (error) {
1226 xfs_fs_cmn_err(CE_ALERT, mp, "xfs_imap: "
1227 "xfs_ialloc_read_agi() returned "
1228 "error %d, agno %d",
1229 error, agno);
1230 return error;
1231 }
1232
1233 /*
1234 * derive and lookup the exact inode record for the given agino. If the
1235 * record cannot be found, then it's an invalid inode number and we
1236 * should abort.
1237 */
1238 cur = xfs_inobt_init_cursor(mp, tp, agbp, agno);
1239 startino = agino & ~(XFS_IALLOC_INODES(mp) - 1);
1240 error = xfs_inobt_lookup(cur, startino, XFS_LOOKUP_EQ, &i);
1241 if (!error) {
1242 if (i)
1243 error = xfs_inobt_get_rec(cur, &rec, &i);
1244 if (!error && i == 0)
1245 error = EINVAL;
1246 }
1247
1248 xfs_trans_brelse(tp, agbp);
1249 xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
1250 if (error)
1251 return error;
1252
1253 /* for untrusted inodes check it is allocated first */
1254 if ((flags & XFS_IGET_UNTRUSTED) &&
1255 (rec.ir_free & XFS_INOBT_MASK(agino - rec.ir_startino)))
1256 return EINVAL;
1257
1258 *chunk_agbno = XFS_AGINO_TO_AGBNO(mp, rec.ir_startino);
1259 *offset_agbno = agbno - *chunk_agbno;
1260 return 0;
1261}
1262
1206/* 1263/*
1207 * Return the location of the inode in imap, for mapping it into a buffer. 1264 * Return the location of the inode in imap, for mapping it into a buffer.
1208 */ 1265 */
@@ -1235,8 +1292,11 @@ xfs_imap(
1235 if (agno >= mp->m_sb.sb_agcount || agbno >= mp->m_sb.sb_agblocks || 1292 if (agno >= mp->m_sb.sb_agcount || agbno >= mp->m_sb.sb_agblocks ||
1236 ino != XFS_AGINO_TO_INO(mp, agno, agino)) { 1293 ino != XFS_AGINO_TO_INO(mp, agno, agino)) {
1237#ifdef DEBUG 1294#ifdef DEBUG
1238 /* no diagnostics for bulkstat, ino comes from userspace */ 1295 /*
1239 if (flags & XFS_IGET_BULKSTAT) 1296 * Don't output diagnostic information for untrusted inodes
1297 * as they can be invalid without implying corruption.
1298 */
1299 if (flags & XFS_IGET_UNTRUSTED)
1240 return XFS_ERROR(EINVAL); 1300 return XFS_ERROR(EINVAL);
1241 if (agno >= mp->m_sb.sb_agcount) { 1301 if (agno >= mp->m_sb.sb_agcount) {
1242 xfs_fs_cmn_err(CE_ALERT, mp, 1302 xfs_fs_cmn_err(CE_ALERT, mp,
@@ -1263,6 +1323,23 @@ xfs_imap(
1263 return XFS_ERROR(EINVAL); 1323 return XFS_ERROR(EINVAL);
1264 } 1324 }
1265 1325
1326 blks_per_cluster = XFS_INODE_CLUSTER_SIZE(mp) >> mp->m_sb.sb_blocklog;
1327
1328 /*
1329 * For bulkstat and handle lookups, we have an untrusted inode number
1330 * that we have to verify is valid. We cannot do this just by reading
1331 * the inode buffer as it may have been unlinked and removed leaving
1332 * inodes in stale state on disk. Hence we have to do a btree lookup
1333 * in all cases where an untrusted inode number is passed.
1334 */
1335 if (flags & XFS_IGET_UNTRUSTED) {
1336 error = xfs_imap_lookup(mp, tp, agno, agino, agbno,
1337 &chunk_agbno, &offset_agbno, flags);
1338 if (error)
1339 return error;
1340 goto out_map;
1341 }
1342
1266 /* 1343 /*
1267 * If the inode cluster size is the same as the blocksize or 1344 * If the inode cluster size is the same as the blocksize or
1268 * smaller we get to the buffer by simple arithmetics. 1345 * smaller we get to the buffer by simple arithmetics.
@@ -1277,24 +1354,6 @@ xfs_imap(
1277 return 0; 1354 return 0;
1278 } 1355 }
1279 1356
1280 blks_per_cluster = XFS_INODE_CLUSTER_SIZE(mp) >> mp->m_sb.sb_blocklog;
1281
1282 /*
1283 * If we get a block number passed from bulkstat we can use it to
1284 * find the buffer easily.
1285 */
1286 if (imap->im_blkno) {
1287 offset = XFS_INO_TO_OFFSET(mp, ino);
1288 ASSERT(offset < mp->m_sb.sb_inopblock);
1289
1290 cluster_agbno = xfs_daddr_to_agbno(mp, imap->im_blkno);
1291 offset += (agbno - cluster_agbno) * mp->m_sb.sb_inopblock;
1292
1293 imap->im_len = XFS_FSB_TO_BB(mp, blks_per_cluster);
1294 imap->im_boffset = (ushort)(offset << mp->m_sb.sb_inodelog);
1295 return 0;
1296 }
1297
1298 /* 1357 /*
1299 * If the inode chunks are aligned then use simple maths to 1358 * If the inode chunks are aligned then use simple maths to
1300 * find the location. Otherwise we have to do a btree 1359 * find the location. Otherwise we have to do a btree
@@ -1304,50 +1363,13 @@ xfs_imap(
1304 offset_agbno = agbno & mp->m_inoalign_mask; 1363 offset_agbno = agbno & mp->m_inoalign_mask;
1305 chunk_agbno = agbno - offset_agbno; 1364 chunk_agbno = agbno - offset_agbno;
1306 } else { 1365 } else {
1307 xfs_btree_cur_t *cur; /* inode btree cursor */ 1366 error = xfs_imap_lookup(mp, tp, agno, agino, agbno,
1308 xfs_inobt_rec_incore_t chunk_rec; 1367 &chunk_agbno, &offset_agbno, flags);
1309 xfs_buf_t *agbp; /* agi buffer */
1310 int i; /* temp state */
1311
1312 error = xfs_ialloc_read_agi(mp, tp, agno, &agbp);
1313 if (error) {
1314 xfs_fs_cmn_err(CE_ALERT, mp, "xfs_imap: "
1315 "xfs_ialloc_read_agi() returned "
1316 "error %d, agno %d",
1317 error, agno);
1318 return error;
1319 }
1320
1321 cur = xfs_inobt_init_cursor(mp, tp, agbp, agno);
1322 error = xfs_inobt_lookup(cur, agino, XFS_LOOKUP_LE, &i);
1323 if (error) {
1324 xfs_fs_cmn_err(CE_ALERT, mp, "xfs_imap: "
1325 "xfs_inobt_lookup() failed");
1326 goto error0;
1327 }
1328
1329 error = xfs_inobt_get_rec(cur, &chunk_rec, &i);
1330 if (error) {
1331 xfs_fs_cmn_err(CE_ALERT, mp, "xfs_imap: "
1332 "xfs_inobt_get_rec() failed");
1333 goto error0;
1334 }
1335 if (i == 0) {
1336#ifdef DEBUG
1337 xfs_fs_cmn_err(CE_ALERT, mp, "xfs_imap: "
1338 "xfs_inobt_get_rec() failed");
1339#endif /* DEBUG */
1340 error = XFS_ERROR(EINVAL);
1341 }
1342 error0:
1343 xfs_trans_brelse(tp, agbp);
1344 xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
1345 if (error) 1368 if (error)
1346 return error; 1369 return error;
1347 chunk_agbno = XFS_AGINO_TO_AGBNO(mp, chunk_rec.ir_startino);
1348 offset_agbno = agbno - chunk_agbno;
1349 } 1370 }
1350 1371
1372out_map:
1351 ASSERT(agbno >= chunk_agbno); 1373 ASSERT(agbno >= chunk_agbno);
1352 cluster_agbno = chunk_agbno + 1374 cluster_agbno = chunk_agbno +
1353 ((offset_agbno / blks_per_cluster) * blks_per_cluster); 1375 ((offset_agbno / blks_per_cluster) * blks_per_cluster);
diff --git a/fs/xfs/xfs_iget.c b/fs/xfs/xfs_iget.c
index 75df75f43d48..8f8b91be2c99 100644
--- a/fs/xfs/xfs_iget.c
+++ b/fs/xfs/xfs_iget.c
@@ -259,7 +259,6 @@ xfs_iget_cache_miss(
259 xfs_trans_t *tp, 259 xfs_trans_t *tp,
260 xfs_ino_t ino, 260 xfs_ino_t ino,
261 struct xfs_inode **ipp, 261 struct xfs_inode **ipp,
262 xfs_daddr_t bno,
263 int flags, 262 int flags,
264 int lock_flags) 263 int lock_flags)
265{ 264{
@@ -272,7 +271,7 @@ xfs_iget_cache_miss(
272 if (!ip) 271 if (!ip)
273 return ENOMEM; 272 return ENOMEM;
274 273
275 error = xfs_iread(mp, tp, ip, bno, flags); 274 error = xfs_iread(mp, tp, ip, flags);
276 if (error) 275 if (error)
277 goto out_destroy; 276 goto out_destroy;
278 277
@@ -358,8 +357,6 @@ out_destroy:
358 * within the file system for the inode being requested. 357 * within the file system for the inode being requested.
359 * lock_flags -- flags indicating how to lock the inode. See the comment 358 * lock_flags -- flags indicating how to lock the inode. See the comment
360 * for xfs_ilock() for a list of valid values. 359 * for xfs_ilock() for a list of valid values.
361 * bno -- the block number starting the buffer containing the inode,
362 * if known (as by bulkstat), else 0.
363 */ 360 */
364int 361int
365xfs_iget( 362xfs_iget(
@@ -368,8 +365,7 @@ xfs_iget(
368 xfs_ino_t ino, 365 xfs_ino_t ino,
369 uint flags, 366 uint flags,
370 uint lock_flags, 367 uint lock_flags,
371 xfs_inode_t **ipp, 368 xfs_inode_t **ipp)
372 xfs_daddr_t bno)
373{ 369{
374 xfs_inode_t *ip; 370 xfs_inode_t *ip;
375 int error; 371 int error;
@@ -397,7 +393,7 @@ again:
397 read_unlock(&pag->pag_ici_lock); 393 read_unlock(&pag->pag_ici_lock);
398 XFS_STATS_INC(xs_ig_missed); 394 XFS_STATS_INC(xs_ig_missed);
399 395
400 error = xfs_iget_cache_miss(mp, pag, tp, ino, &ip, bno, 396 error = xfs_iget_cache_miss(mp, pag, tp, ino, &ip,
401 flags, lock_flags); 397 flags, lock_flags);
402 if (error) 398 if (error)
403 goto out_error_or_again; 399 goto out_error_or_again;
diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c
index d53c39de7d05..b76a829d7e20 100644
--- a/fs/xfs/xfs_inode.c
+++ b/fs/xfs/xfs_inode.c
@@ -177,7 +177,7 @@ xfs_imap_to_bp(
177 if (unlikely(XFS_TEST_ERROR(!di_ok, mp, 177 if (unlikely(XFS_TEST_ERROR(!di_ok, mp,
178 XFS_ERRTAG_ITOBP_INOTOBP, 178 XFS_ERRTAG_ITOBP_INOTOBP,
179 XFS_RANDOM_ITOBP_INOTOBP))) { 179 XFS_RANDOM_ITOBP_INOTOBP))) {
180 if (iget_flags & XFS_IGET_BULKSTAT) { 180 if (iget_flags & XFS_IGET_UNTRUSTED) {
181 xfs_trans_brelse(tp, bp); 181 xfs_trans_brelse(tp, bp);
182 return XFS_ERROR(EINVAL); 182 return XFS_ERROR(EINVAL);
183 } 183 }
@@ -787,7 +787,6 @@ xfs_iread(
787 xfs_mount_t *mp, 787 xfs_mount_t *mp,
788 xfs_trans_t *tp, 788 xfs_trans_t *tp,
789 xfs_inode_t *ip, 789 xfs_inode_t *ip,
790 xfs_daddr_t bno,
791 uint iget_flags) 790 uint iget_flags)
792{ 791{
793 xfs_buf_t *bp; 792 xfs_buf_t *bp;
@@ -797,11 +796,9 @@ xfs_iread(
797 /* 796 /*
798 * Fill in the location information in the in-core inode. 797 * Fill in the location information in the in-core inode.
799 */ 798 */
800 ip->i_imap.im_blkno = bno;
801 error = xfs_imap(mp, tp, ip->i_ino, &ip->i_imap, iget_flags); 799 error = xfs_imap(mp, tp, ip->i_ino, &ip->i_imap, iget_flags);
802 if (error) 800 if (error)
803 return error; 801 return error;
804 ASSERT(bno == 0 || bno == ip->i_imap.im_blkno);
805 802
806 /* 803 /*
807 * Get pointers to the on-disk inode and the buffer containing it. 804 * Get pointers to the on-disk inode and the buffer containing it.
diff --git a/fs/xfs/xfs_inode.h b/fs/xfs/xfs_inode.h
index 9965e40a4615..78550df13cd6 100644
--- a/fs/xfs/xfs_inode.h
+++ b/fs/xfs/xfs_inode.h
@@ -442,7 +442,7 @@ static inline void xfs_ifunlock(xfs_inode_t *ip)
442 * xfs_iget.c prototypes. 442 * xfs_iget.c prototypes.
443 */ 443 */
444int xfs_iget(struct xfs_mount *, struct xfs_trans *, xfs_ino_t, 444int xfs_iget(struct xfs_mount *, struct xfs_trans *, xfs_ino_t,
445 uint, uint, xfs_inode_t **, xfs_daddr_t); 445 uint, uint, xfs_inode_t **);
446void xfs_iput(xfs_inode_t *, uint); 446void xfs_iput(xfs_inode_t *, uint);
447void xfs_iput_new(xfs_inode_t *, uint); 447void xfs_iput_new(xfs_inode_t *, uint);
448void xfs_ilock(xfs_inode_t *, uint); 448void xfs_ilock(xfs_inode_t *, uint);
@@ -500,7 +500,7 @@ do { \
500 * Flags for xfs_iget() 500 * Flags for xfs_iget()
501 */ 501 */
502#define XFS_IGET_CREATE 0x1 502#define XFS_IGET_CREATE 0x1
503#define XFS_IGET_BULKSTAT 0x2 503#define XFS_IGET_UNTRUSTED 0x2
504 504
505int xfs_inotobp(struct xfs_mount *, struct xfs_trans *, 505int xfs_inotobp(struct xfs_mount *, struct xfs_trans *,
506 xfs_ino_t, struct xfs_dinode **, 506 xfs_ino_t, struct xfs_dinode **,
@@ -509,7 +509,7 @@ int xfs_itobp(struct xfs_mount *, struct xfs_trans *,
509 struct xfs_inode *, struct xfs_dinode **, 509 struct xfs_inode *, struct xfs_dinode **,
510 struct xfs_buf **, uint); 510 struct xfs_buf **, uint);
511int xfs_iread(struct xfs_mount *, struct xfs_trans *, 511int xfs_iread(struct xfs_mount *, struct xfs_trans *,
512 struct xfs_inode *, xfs_daddr_t, uint); 512 struct xfs_inode *, uint);
513void xfs_dinode_to_disk(struct xfs_dinode *, 513void xfs_dinode_to_disk(struct xfs_dinode *,
514 struct xfs_icdinode *); 514 struct xfs_icdinode *);
515void xfs_idestroy_fork(struct xfs_inode *, int); 515void xfs_idestroy_fork(struct xfs_inode *, int);
diff --git a/fs/xfs/xfs_itable.c b/fs/xfs/xfs_itable.c
index b1b801e4a28e..2b86f8610512 100644
--- a/fs/xfs/xfs_itable.c
+++ b/fs/xfs/xfs_itable.c
@@ -49,24 +49,40 @@ xfs_internal_inum(
49 (ino == mp->m_sb.sb_uquotino || ino == mp->m_sb.sb_gquotino))); 49 (ino == mp->m_sb.sb_uquotino || ino == mp->m_sb.sb_gquotino)));
50} 50}
51 51
52STATIC int 52/*
53xfs_bulkstat_one_iget( 53 * Return stat information for one inode.
54 xfs_mount_t *mp, /* mount point for filesystem */ 54 * Return 0 if ok, else errno.
55 xfs_ino_t ino, /* inode number to get data for */ 55 */
56 xfs_daddr_t bno, /* starting bno of inode cluster */ 56int
57 xfs_bstat_t *buf, /* return buffer */ 57xfs_bulkstat_one_int(
58 int *stat) /* BULKSTAT_RV_... */ 58 struct xfs_mount *mp, /* mount point for filesystem */
59 xfs_ino_t ino, /* inode to get data for */
60 void __user *buffer, /* buffer to place output in */
61 int ubsize, /* size of buffer */
62 bulkstat_one_fmt_pf formatter, /* formatter, copy to user */
63 int *ubused, /* bytes used by me */
64 int *stat) /* BULKSTAT_RV_... */
59{ 65{
60 xfs_icdinode_t *dic; /* dinode core info pointer */ 66 struct xfs_icdinode *dic; /* dinode core info pointer */
61 xfs_inode_t *ip; /* incore inode pointer */ 67 struct xfs_inode *ip; /* incore inode pointer */
62 struct inode *inode; 68 struct inode *inode;
63 int error; 69 struct xfs_bstat *buf; /* return buffer */
70 int error = 0; /* error value */
71
72 *stat = BULKSTAT_RV_NOTHING;
73
74 if (!buffer || xfs_internal_inum(mp, ino))
75 return XFS_ERROR(EINVAL);
76
77 buf = kmem_alloc(sizeof(*buf), KM_SLEEP | KM_MAYFAIL);
78 if (!buf)
79 return XFS_ERROR(ENOMEM);
64 80
65 error = xfs_iget(mp, NULL, ino, 81 error = xfs_iget(mp, NULL, ino,
66 XFS_IGET_BULKSTAT, XFS_ILOCK_SHARED, &ip, bno); 82 XFS_IGET_UNTRUSTED, XFS_ILOCK_SHARED, &ip);
67 if (error) { 83 if (error) {
68 *stat = BULKSTAT_RV_NOTHING; 84 *stat = BULKSTAT_RV_NOTHING;
69 return error; 85 goto out_free;
70 } 86 }
71 87
72 ASSERT(ip != NULL); 88 ASSERT(ip != NULL);
@@ -127,77 +143,16 @@ xfs_bulkstat_one_iget(
127 buf->bs_blocks = dic->di_nblocks + ip->i_delayed_blks; 143 buf->bs_blocks = dic->di_nblocks + ip->i_delayed_blks;
128 break; 144 break;
129 } 145 }
130
131 xfs_iput(ip, XFS_ILOCK_SHARED); 146 xfs_iput(ip, XFS_ILOCK_SHARED);
132 return error;
133}
134 147
135STATIC void 148 error = formatter(buffer, ubsize, ubused, buf);
136xfs_bulkstat_one_dinode(
137 xfs_mount_t *mp, /* mount point for filesystem */
138 xfs_ino_t ino, /* inode number to get data for */
139 xfs_dinode_t *dic, /* dinode inode pointer */
140 xfs_bstat_t *buf) /* return buffer */
141{
142 /*
143 * The inode format changed when we moved the link count and
144 * made it 32 bits long. If this is an old format inode,
145 * convert it in memory to look like a new one. If it gets
146 * flushed to disk we will convert back before flushing or
147 * logging it. We zero out the new projid field and the old link
148 * count field. We'll handle clearing the pad field (the remains
149 * of the old uuid field) when we actually convert the inode to
150 * the new format. We don't change the version number so that we
151 * can distinguish this from a real new format inode.
152 */
153 if (dic->di_version == 1) {
154 buf->bs_nlink = be16_to_cpu(dic->di_onlink);
155 buf->bs_projid = 0;
156 } else {
157 buf->bs_nlink = be32_to_cpu(dic->di_nlink);
158 buf->bs_projid = be16_to_cpu(dic->di_projid);
159 }
160 149
161 buf->bs_ino = ino; 150 if (!error)
162 buf->bs_mode = be16_to_cpu(dic->di_mode); 151 *stat = BULKSTAT_RV_DIDONE;
163 buf->bs_uid = be32_to_cpu(dic->di_uid);
164 buf->bs_gid = be32_to_cpu(dic->di_gid);
165 buf->bs_size = be64_to_cpu(dic->di_size);
166 buf->bs_atime.tv_sec = be32_to_cpu(dic->di_atime.t_sec);
167 buf->bs_atime.tv_nsec = be32_to_cpu(dic->di_atime.t_nsec);
168 buf->bs_mtime.tv_sec = be32_to_cpu(dic->di_mtime.t_sec);
169 buf->bs_mtime.tv_nsec = be32_to_cpu(dic->di_mtime.t_nsec);
170 buf->bs_ctime.tv_sec = be32_to_cpu(dic->di_ctime.t_sec);
171 buf->bs_ctime.tv_nsec = be32_to_cpu(dic->di_ctime.t_nsec);
172 buf->bs_xflags = xfs_dic2xflags(dic);
173 buf->bs_extsize = be32_to_cpu(dic->di_extsize) << mp->m_sb.sb_blocklog;
174 buf->bs_extents = be32_to_cpu(dic->di_nextents);
175 buf->bs_gen = be32_to_cpu(dic->di_gen);
176 memset(buf->bs_pad, 0, sizeof(buf->bs_pad));
177 buf->bs_dmevmask = be32_to_cpu(dic->di_dmevmask);
178 buf->bs_dmstate = be16_to_cpu(dic->di_dmstate);
179 buf->bs_aextents = be16_to_cpu(dic->di_anextents);
180 buf->bs_forkoff = XFS_DFORK_BOFF(dic);
181 152
182 switch (dic->di_format) { 153 out_free:
183 case XFS_DINODE_FMT_DEV: 154 kmem_free(buf);
184 buf->bs_rdev = xfs_dinode_get_rdev(dic); 155 return error;
185 buf->bs_blksize = BLKDEV_IOSIZE;
186 buf->bs_blocks = 0;
187 break;
188 case XFS_DINODE_FMT_LOCAL:
189 case XFS_DINODE_FMT_UUID:
190 buf->bs_rdev = 0;
191 buf->bs_blksize = mp->m_sb.sb_blocksize;
192 buf->bs_blocks = 0;
193 break;
194 case XFS_DINODE_FMT_EXTENTS:
195 case XFS_DINODE_FMT_BTREE:
196 buf->bs_rdev = 0;
197 buf->bs_blksize = mp->m_sb.sb_blocksize;
198 buf->bs_blocks = be64_to_cpu(dic->di_nblocks);
199 break;
200 }
201} 156}
202 157
203/* Return 0 on success or positive error */ 158/* Return 0 on success or positive error */
@@ -217,118 +172,17 @@ xfs_bulkstat_one_fmt(
217 return 0; 172 return 0;
218} 173}
219 174
220/*
221 * Return stat information for one inode.
222 * Return 0 if ok, else errno.
223 */
224int /* error status */
225xfs_bulkstat_one_int(
226 xfs_mount_t *mp, /* mount point for filesystem */
227 xfs_ino_t ino, /* inode number to get data for */
228 void __user *buffer, /* buffer to place output in */
229 int ubsize, /* size of buffer */
230 bulkstat_one_fmt_pf formatter, /* formatter, copy to user */
231 xfs_daddr_t bno, /* starting bno of inode cluster */
232 int *ubused, /* bytes used by me */
233 void *dibuff, /* on-disk inode buffer */
234 int *stat) /* BULKSTAT_RV_... */
235{
236 xfs_bstat_t *buf; /* return buffer */
237 int error = 0; /* error value */
238 xfs_dinode_t *dip; /* dinode inode pointer */
239
240 dip = (xfs_dinode_t *)dibuff;
241 *stat = BULKSTAT_RV_NOTHING;
242
243 if (!buffer || xfs_internal_inum(mp, ino))
244 return XFS_ERROR(EINVAL);
245
246 buf = kmem_alloc(sizeof(*buf), KM_SLEEP);
247
248 if (dip == NULL) {
249 /* We're not being passed a pointer to a dinode. This happens
250 * if BULKSTAT_FG_IGET is selected. Do the iget.
251 */
252 error = xfs_bulkstat_one_iget(mp, ino, bno, buf, stat);
253 if (error)
254 goto out_free;
255 } else {
256 xfs_bulkstat_one_dinode(mp, ino, dip, buf);
257 }
258
259 error = formatter(buffer, ubsize, ubused, buf);
260 if (error)
261 goto out_free;
262
263 *stat = BULKSTAT_RV_DIDONE;
264
265 out_free:
266 kmem_free(buf);
267 return error;
268}
269
270int 175int
271xfs_bulkstat_one( 176xfs_bulkstat_one(
272 xfs_mount_t *mp, /* mount point for filesystem */ 177 xfs_mount_t *mp, /* mount point for filesystem */
273 xfs_ino_t ino, /* inode number to get data for */ 178 xfs_ino_t ino, /* inode number to get data for */
274 void __user *buffer, /* buffer to place output in */ 179 void __user *buffer, /* buffer to place output in */
275 int ubsize, /* size of buffer */ 180 int ubsize, /* size of buffer */
276 void *private_data, /* my private data */
277 xfs_daddr_t bno, /* starting bno of inode cluster */
278 int *ubused, /* bytes used by me */ 181 int *ubused, /* bytes used by me */
279 void *dibuff, /* on-disk inode buffer */
280 int *stat) /* BULKSTAT_RV_... */ 182 int *stat) /* BULKSTAT_RV_... */
281{ 183{
282 return xfs_bulkstat_one_int(mp, ino, buffer, ubsize, 184 return xfs_bulkstat_one_int(mp, ino, buffer, ubsize,
283 xfs_bulkstat_one_fmt, bno, 185 xfs_bulkstat_one_fmt, ubused, stat);
284 ubused, dibuff, stat);
285}
286
287/*
288 * Test to see whether we can use the ondisk inode directly, based
289 * on the given bulkstat flags, filling in dipp accordingly.
290 * Returns zero if the inode is dodgey.
291 */
292STATIC int
293xfs_bulkstat_use_dinode(
294 xfs_mount_t *mp,
295 int flags,
296 xfs_buf_t *bp,
297 int clustidx,
298 xfs_dinode_t **dipp)
299{
300 xfs_dinode_t *dip;
301 unsigned int aformat;
302
303 *dipp = NULL;
304 if (!bp || (flags & BULKSTAT_FG_IGET))
305 return 1;
306 dip = (xfs_dinode_t *)
307 xfs_buf_offset(bp, clustidx << mp->m_sb.sb_inodelog);
308 /*
309 * Check the buffer containing the on-disk inode for di_mode == 0.
310 * This is to prevent xfs_bulkstat from picking up just reclaimed
311 * inodes that have their in-core state initialized but not flushed
312 * to disk yet. This is a temporary hack that would require a proper
313 * fix in the future.
314 */
315 if (be16_to_cpu(dip->di_magic) != XFS_DINODE_MAGIC ||
316 !XFS_DINODE_GOOD_VERSION(dip->di_version) ||
317 !dip->di_mode)
318 return 0;
319 if (flags & BULKSTAT_FG_QUICK) {
320 *dipp = dip;
321 return 1;
322 }
323 /* BULKSTAT_FG_INLINE: if attr fork is local, or not there, use it */
324 aformat = dip->di_aformat;
325 if ((XFS_DFORK_Q(dip) == 0) ||
326 (aformat == XFS_DINODE_FMT_LOCAL) ||
327 (aformat == XFS_DINODE_FMT_EXTENTS && !dip->di_anextents)) {
328 *dipp = dip;
329 return 1;
330 }
331 return 1;
332} 186}
333 187
334#define XFS_BULKSTAT_UBLEFT(ubleft) ((ubleft) >= statstruct_size) 188#define XFS_BULKSTAT_UBLEFT(ubleft) ((ubleft) >= statstruct_size)
@@ -342,10 +196,8 @@ xfs_bulkstat(
342 xfs_ino_t *lastinop, /* last inode returned */ 196 xfs_ino_t *lastinop, /* last inode returned */
343 int *ubcountp, /* size of buffer/count returned */ 197 int *ubcountp, /* size of buffer/count returned */
344 bulkstat_one_pf formatter, /* func that'd fill a single buf */ 198 bulkstat_one_pf formatter, /* func that'd fill a single buf */
345 void *private_data,/* private data for formatter */
346 size_t statstruct_size, /* sizeof struct filling */ 199 size_t statstruct_size, /* sizeof struct filling */
347 char __user *ubuffer, /* buffer with inode stats */ 200 char __user *ubuffer, /* buffer with inode stats */
348 int flags, /* defined in xfs_itable.h */
349 int *done) /* 1 if there are more stats to get */ 201 int *done) /* 1 if there are more stats to get */
350{ 202{
351 xfs_agblock_t agbno=0;/* allocation group block number */ 203 xfs_agblock_t agbno=0;/* allocation group block number */
@@ -380,14 +232,12 @@ xfs_bulkstat(
380 int ubelem; /* spaces used in user's buffer */ 232 int ubelem; /* spaces used in user's buffer */
381 int ubused; /* bytes used by formatter */ 233 int ubused; /* bytes used by formatter */
382 xfs_buf_t *bp; /* ptr to on-disk inode cluster buf */ 234 xfs_buf_t *bp; /* ptr to on-disk inode cluster buf */
383 xfs_dinode_t *dip; /* ptr into bp for specific inode */
384 235
385 /* 236 /*
386 * Get the last inode value, see if there's nothing to do. 237 * Get the last inode value, see if there's nothing to do.
387 */ 238 */
388 ino = (xfs_ino_t)*lastinop; 239 ino = (xfs_ino_t)*lastinop;
389 lastino = ino; 240 lastino = ino;
390 dip = NULL;
391 agno = XFS_INO_TO_AGNO(mp, ino); 241 agno = XFS_INO_TO_AGNO(mp, ino);
392 agino = XFS_INO_TO_AGINO(mp, ino); 242 agino = XFS_INO_TO_AGINO(mp, ino);
393 if (agno >= mp->m_sb.sb_agcount || 243 if (agno >= mp->m_sb.sb_agcount ||
@@ -612,37 +462,6 @@ xfs_bulkstat(
612 irbp->ir_startino) + 462 irbp->ir_startino) +
613 ((chunkidx & nimask) >> 463 ((chunkidx & nimask) >>
614 mp->m_sb.sb_inopblog); 464 mp->m_sb.sb_inopblog);
615
616 if (flags & (BULKSTAT_FG_QUICK |
617 BULKSTAT_FG_INLINE)) {
618 int offset;
619
620 ino = XFS_AGINO_TO_INO(mp, agno,
621 agino);
622 bno = XFS_AGB_TO_DADDR(mp, agno,
623 agbno);
624
625 /*
626 * Get the inode cluster buffer
627 */
628 if (bp)
629 xfs_buf_relse(bp);
630
631 error = xfs_inotobp(mp, NULL, ino, &dip,
632 &bp, &offset,
633 XFS_IGET_BULKSTAT);
634
635 if (!error)
636 clustidx = offset / mp->m_sb.sb_inodesize;
637 if (XFS_TEST_ERROR(error != 0,
638 mp, XFS_ERRTAG_BULKSTAT_READ_CHUNK,
639 XFS_RANDOM_BULKSTAT_READ_CHUNK)) {
640 bp = NULL;
641 ubleft = 0;
642 rval = error;
643 break;
644 }
645 }
646 } 465 }
647 ino = XFS_AGINO_TO_INO(mp, agno, agino); 466 ino = XFS_AGINO_TO_INO(mp, agno, agino);
648 bno = XFS_AGB_TO_DADDR(mp, agno, agbno); 467 bno = XFS_AGB_TO_DADDR(mp, agno, agbno);
@@ -658,35 +477,13 @@ xfs_bulkstat(
658 * when the chunk is used up. 477 * when the chunk is used up.
659 */ 478 */
660 irbp->ir_freecount++; 479 irbp->ir_freecount++;
661 if (!xfs_bulkstat_use_dinode(mp, flags, bp,
662 clustidx, &dip)) {
663 lastino = ino;
664 continue;
665 }
666 /*
667 * If we need to do an iget, cannot hold bp.
668 * Drop it, until starting the next cluster.
669 */
670 if ((flags & BULKSTAT_FG_INLINE) && !dip) {
671 if (bp)
672 xfs_buf_relse(bp);
673 bp = NULL;
674 }
675 480
676 /* 481 /*
677 * Get the inode and fill in a single buffer. 482 * Get the inode and fill in a single buffer.
678 * BULKSTAT_FG_QUICK uses dip to fill it in.
679 * BULKSTAT_FG_IGET uses igets.
680 * BULKSTAT_FG_INLINE uses dip if we have an
681 * inline attr fork, else igets.
682 * See: xfs_bulkstat_one & xfs_dm_bulkstat_one.
683 * This is also used to count inodes/blks, etc
684 * in xfs_qm_quotacheck.
685 */ 483 */
686 ubused = statstruct_size; 484 ubused = statstruct_size;
687 error = formatter(mp, ino, ubufp, 485 error = formatter(mp, ino, ubufp, ubleft,
688 ubleft, private_data, 486 &ubused, &fmterror);
689 bno, &ubused, dip, &fmterror);
690 if (fmterror == BULKSTAT_RV_NOTHING) { 487 if (fmterror == BULKSTAT_RV_NOTHING) {
691 if (error && error != ENOENT && 488 if (error && error != ENOENT &&
692 error != EINVAL) { 489 error != EINVAL) {
@@ -778,8 +575,7 @@ xfs_bulkstat_single(
778 */ 575 */
779 576
780 ino = (xfs_ino_t)*lastinop; 577 ino = (xfs_ino_t)*lastinop;
781 error = xfs_bulkstat_one(mp, ino, buffer, sizeof(xfs_bstat_t), 578 error = xfs_bulkstat_one(mp, ino, buffer, sizeof(xfs_bstat_t), 0, &res);
782 NULL, 0, NULL, NULL, &res);
783 if (error) { 579 if (error) {
784 /* 580 /*
785 * Special case way failed, do it the "long" way 581 * Special case way failed, do it the "long" way
@@ -788,8 +584,7 @@ xfs_bulkstat_single(
788 (*lastinop)--; 584 (*lastinop)--;
789 count = 1; 585 count = 1;
790 if (xfs_bulkstat(mp, lastinop, &count, xfs_bulkstat_one, 586 if (xfs_bulkstat(mp, lastinop, &count, xfs_bulkstat_one,
791 NULL, sizeof(xfs_bstat_t), buffer, 587 sizeof(xfs_bstat_t), buffer, done))
792 BULKSTAT_FG_IGET, done))
793 return error; 588 return error;
794 if (count == 0 || (xfs_ino_t)*lastinop != ino) 589 if (count == 0 || (xfs_ino_t)*lastinop != ino)
795 return error == EFSCORRUPTED ? 590 return error == EFSCORRUPTED ?
diff --git a/fs/xfs/xfs_itable.h b/fs/xfs/xfs_itable.h
index 20792bf45946..97295d91d170 100644
--- a/fs/xfs/xfs_itable.h
+++ b/fs/xfs/xfs_itable.h
@@ -27,10 +27,7 @@ typedef int (*bulkstat_one_pf)(struct xfs_mount *mp,
27 xfs_ino_t ino, 27 xfs_ino_t ino,
28 void __user *buffer, 28 void __user *buffer,
29 int ubsize, 29 int ubsize,
30 void *private_data,
31 xfs_daddr_t bno,
32 int *ubused, 30 int *ubused,
33 void *dip,
34 int *stat); 31 int *stat);
35 32
36/* 33/*
@@ -41,13 +38,6 @@ typedef int (*bulkstat_one_pf)(struct xfs_mount *mp,
41#define BULKSTAT_RV_GIVEUP 2 38#define BULKSTAT_RV_GIVEUP 2
42 39
43/* 40/*
44 * Values for bulkstat flag argument.
45 */
46#define BULKSTAT_FG_IGET 0x1 /* Go through the buffer cache */
47#define BULKSTAT_FG_QUICK 0x2 /* No iget, walk the dinode cluster */
48#define BULKSTAT_FG_INLINE 0x4 /* No iget if inline attrs */
49
50/*
51 * Return stat information in bulk (by-inode) for the filesystem. 41 * Return stat information in bulk (by-inode) for the filesystem.
52 */ 42 */
53int /* error status */ 43int /* error status */
@@ -56,10 +46,8 @@ xfs_bulkstat(
56 xfs_ino_t *lastino, /* last inode returned */ 46 xfs_ino_t *lastino, /* last inode returned */
57 int *count, /* size of buffer/count returned */ 47 int *count, /* size of buffer/count returned */
58 bulkstat_one_pf formatter, /* func that'd fill a single buf */ 48 bulkstat_one_pf formatter, /* func that'd fill a single buf */
59 void *private_data, /* private data for formatter */
60 size_t statstruct_size,/* sizeof struct that we're filling */ 49 size_t statstruct_size,/* sizeof struct that we're filling */
61 char __user *ubuffer,/* buffer with inode stats */ 50 char __user *ubuffer,/* buffer with inode stats */
62 int flags, /* flag to control access method */
63 int *done); /* 1 if there are more stats to get */ 51 int *done); /* 1 if there are more stats to get */
64 52
65int 53int
@@ -82,9 +70,7 @@ xfs_bulkstat_one_int(
82 void __user *buffer, 70 void __user *buffer,
83 int ubsize, 71 int ubsize,
84 bulkstat_one_fmt_pf formatter, 72 bulkstat_one_fmt_pf formatter,
85 xfs_daddr_t bno,
86 int *ubused, 73 int *ubused,
87 void *dibuff,
88 int *stat); 74 int *stat);
89 75
90int 76int
@@ -93,10 +79,7 @@ xfs_bulkstat_one(
93 xfs_ino_t ino, 79 xfs_ino_t ino,
94 void __user *buffer, 80 void __user *buffer,
95 int ubsize, 81 int ubsize,
96 void *private_data,
97 xfs_daddr_t bno,
98 int *ubused, 82 int *ubused,
99 void *dibuff,
100 int *stat); 83 int *stat);
101 84
102typedef int (*inumbers_fmt_pf)( 85typedef int (*inumbers_fmt_pf)(
diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c
index ed0684cc50ee..9ac5cfab27b9 100644
--- a/fs/xfs/xfs_log_recover.c
+++ b/fs/xfs/xfs_log_recover.c
@@ -3198,7 +3198,7 @@ xlog_recover_process_one_iunlink(
3198 int error; 3198 int error;
3199 3199
3200 ino = XFS_AGINO_TO_INO(mp, agno, agino); 3200 ino = XFS_AGINO_TO_INO(mp, agno, agino);
3201 error = xfs_iget(mp, NULL, ino, 0, 0, &ip, 0); 3201 error = xfs_iget(mp, NULL, ino, 0, 0, &ip);
3202 if (error) 3202 if (error)
3203 goto fail; 3203 goto fail;
3204 3204
diff --git a/fs/xfs/xfs_mount.c b/fs/xfs/xfs_mount.c
index d59f4e8bedcf..69f62d8b2816 100644
--- a/fs/xfs/xfs_mount.c
+++ b/fs/xfs/xfs_mount.c
@@ -1300,7 +1300,7 @@ xfs_mountfs(
1300 * Get and sanity-check the root inode. 1300 * Get and sanity-check the root inode.
1301 * Save the pointer to it in the mount structure. 1301 * Save the pointer to it in the mount structure.
1302 */ 1302 */
1303 error = xfs_iget(mp, NULL, sbp->sb_rootino, 0, XFS_ILOCK_EXCL, &rip, 0); 1303 error = xfs_iget(mp, NULL, sbp->sb_rootino, 0, XFS_ILOCK_EXCL, &rip);
1304 if (error) { 1304 if (error) {
1305 cmn_err(CE_WARN, "XFS: failed to read root inode"); 1305 cmn_err(CE_WARN, "XFS: failed to read root inode");
1306 goto out_log_dealloc; 1306 goto out_log_dealloc;
diff --git a/fs/xfs/xfs_rtalloc.c b/fs/xfs/xfs_rtalloc.c
index 16445518506d..a2d32ce335aa 100644
--- a/fs/xfs/xfs_rtalloc.c
+++ b/fs/xfs/xfs_rtalloc.c
@@ -2277,12 +2277,12 @@ xfs_rtmount_inodes(
2277 sbp = &mp->m_sb; 2277 sbp = &mp->m_sb;
2278 if (sbp->sb_rbmino == NULLFSINO) 2278 if (sbp->sb_rbmino == NULLFSINO)
2279 return 0; 2279 return 0;
2280 error = xfs_iget(mp, NULL, sbp->sb_rbmino, 0, 0, &mp->m_rbmip, 0); 2280 error = xfs_iget(mp, NULL, sbp->sb_rbmino, 0, 0, &mp->m_rbmip);
2281 if (error) 2281 if (error)
2282 return error; 2282 return error;
2283 ASSERT(mp->m_rbmip != NULL); 2283 ASSERT(mp->m_rbmip != NULL);
2284 ASSERT(sbp->sb_rsumino != NULLFSINO); 2284 ASSERT(sbp->sb_rsumino != NULLFSINO);
2285 error = xfs_iget(mp, NULL, sbp->sb_rsumino, 0, 0, &mp->m_rsumip, 0); 2285 error = xfs_iget(mp, NULL, sbp->sb_rsumino, 0, 0, &mp->m_rsumip);
2286 if (error) { 2286 if (error) {
2287 IRELE(mp->m_rbmip); 2287 IRELE(mp->m_rbmip);
2288 return error; 2288 return error;
diff --git a/fs/xfs/xfs_trans_inode.c b/fs/xfs/xfs_trans_inode.c
index 785ff101da0a..2559dfec946b 100644
--- a/fs/xfs/xfs_trans_inode.c
+++ b/fs/xfs/xfs_trans_inode.c
@@ -62,7 +62,7 @@ xfs_trans_iget(
62{ 62{
63 int error; 63 int error;
64 64
65 error = xfs_iget(mp, tp, ino, flags, lock_flags, ipp, 0); 65 error = xfs_iget(mp, tp, ino, flags, lock_flags, ipp);
66 if (!error && tp) 66 if (!error && tp)
67 xfs_trans_ijoin(tp, *ipp, lock_flags); 67 xfs_trans_ijoin(tp, *ipp, lock_flags);
68 return error; 68 return error;
diff --git a/fs/xfs/xfs_vnodeops.c b/fs/xfs/xfs_vnodeops.c
index a06bd62504fc..c1646838898f 100644
--- a/fs/xfs/xfs_vnodeops.c
+++ b/fs/xfs/xfs_vnodeops.c
@@ -1269,7 +1269,7 @@ xfs_lookup(
1269 if (error) 1269 if (error)
1270 goto out; 1270 goto out;
1271 1271
1272 error = xfs_iget(dp->i_mount, NULL, inum, 0, 0, ipp, 0); 1272 error = xfs_iget(dp->i_mount, NULL, inum, 0, 0, ipp);
1273 if (error) 1273 if (error)
1274 goto out_free_name; 1274 goto out_free_name;
1275 1275
diff --git a/include/linux/agp_backend.h b/include/linux/agp_backend.h
index 9101ed64f803..09ea4a1e9505 100644
--- a/include/linux/agp_backend.h
+++ b/include/linux/agp_backend.h
@@ -79,7 +79,6 @@ struct agp_memory {
79 u32 physical; 79 u32 physical;
80 bool is_bound; 80 bool is_bound;
81 bool is_flushed; 81 bool is_flushed;
82 bool vmalloc_flag;
83 /* list of agp_memory mapped to the aperture */ 82 /* list of agp_memory mapped to the aperture */
84 struct list_head mapped_list; 83 struct list_head mapped_list;
85 /* DMA-mapped addresses */ 84 /* DMA-mapped addresses */
diff --git a/include/linux/dynamic_debug.h b/include/linux/dynamic_debug.h
index b3cd4de9432b..52c0da4bdd18 100644
--- a/include/linux/dynamic_debug.h
+++ b/include/linux/dynamic_debug.h
@@ -40,7 +40,7 @@ int ddebug_add_module(struct _ddebug *tab, unsigned int n,
40 const char *modname); 40 const char *modname);
41 41
42#if defined(CONFIG_DYNAMIC_DEBUG) 42#if defined(CONFIG_DYNAMIC_DEBUG)
43extern int ddebug_remove_module(char *mod_name); 43extern int ddebug_remove_module(const char *mod_name);
44 44
45#define __dynamic_dbg_enabled(dd) ({ \ 45#define __dynamic_dbg_enabled(dd) ({ \
46 int __ret = 0; \ 46 int __ret = 0; \
@@ -73,7 +73,7 @@ extern int ddebug_remove_module(char *mod_name);
73 73
74#else 74#else
75 75
76static inline int ddebug_remove_module(char *mod) 76static inline int ddebug_remove_module(const char *mod)
77{ 77{
78 return 0; 78 return 0;
79} 79}
diff --git a/include/linux/fb.h b/include/linux/fb.h
index 907ace3a64c8..8e5a9dfb76bf 100644
--- a/include/linux/fb.h
+++ b/include/linux/fb.h
@@ -786,8 +786,6 @@ struct fb_tile_ops {
786#define FBINFO_MISC_USEREVENT 0x10000 /* event request 786#define FBINFO_MISC_USEREVENT 0x10000 /* event request
787 from userspace */ 787 from userspace */
788#define FBINFO_MISC_TILEBLITTING 0x20000 /* use tile blitting */ 788#define FBINFO_MISC_TILEBLITTING 0x20000 /* use tile blitting */
789#define FBINFO_MISC_FIRMWARE 0x40000 /* a replaceable firmware
790 inited framebuffer */
791 789
792/* A driver may set this flag to indicate that it does want a set_par to be 790/* A driver may set this flag to indicate that it does want a set_par to be
793 * called every time when fbcon_switch is executed. The advantage is that with 791 * called every time when fbcon_switch is executed. The advantage is that with
@@ -801,6 +799,8 @@ struct fb_tile_ops {
801 */ 799 */
802#define FBINFO_MISC_ALWAYS_SETPAR 0x40000 800#define FBINFO_MISC_ALWAYS_SETPAR 0x40000
803 801
802/* where the fb is a firmware driver, and can be replaced with a proper one */
803#define FBINFO_MISC_FIRMWARE 0x80000
804/* 804/*
805 * Host and GPU endianness differ. 805 * Host and GPU endianness differ.
806 */ 806 */
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index 4eb467910a45..3bedcc149c84 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -1261,6 +1261,7 @@
1261#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP77_IDE 0x0759 1261#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP77_IDE 0x0759
1262#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP73_SMBUS 0x07D8 1262#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP73_SMBUS 0x07D8
1263#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP79_SMBUS 0x0AA2 1263#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP79_SMBUS 0x0AA2
1264#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP89_SATA 0x0D85
1264 1265
1265#define PCI_VENDOR_ID_IMS 0x10e0 1266#define PCI_VENDOR_ID_IMS 0x10e0
1266#define PCI_DEVICE_ID_IMS_TT128 0x9128 1267#define PCI_DEVICE_ID_IMS_TT128 0x9128
diff --git a/include/linux/sched.h b/include/linux/sched.h
index f118809c953f..747fcaedddb7 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -139,7 +139,7 @@ extern int nr_processes(void);
139extern unsigned long nr_running(void); 139extern unsigned long nr_running(void);
140extern unsigned long nr_uninterruptible(void); 140extern unsigned long nr_uninterruptible(void);
141extern unsigned long nr_iowait(void); 141extern unsigned long nr_iowait(void);
142extern unsigned long nr_iowait_cpu(void); 142extern unsigned long nr_iowait_cpu(int cpu);
143extern unsigned long this_cpu_load(void); 143extern unsigned long this_cpu_load(void);
144 144
145 145
diff --git a/init/main.c b/init/main.c
index ac2e4a5f59ee..a42fdf4aeba9 100644
--- a/init/main.c
+++ b/init/main.c
@@ -424,18 +424,26 @@ static void __init setup_command_line(char *command_line)
424 * gcc-3.4 accidentally inlines this function, so use noinline. 424 * gcc-3.4 accidentally inlines this function, so use noinline.
425 */ 425 */
426 426
427static __initdata DECLARE_COMPLETION(kthreadd_done);
428
427static noinline void __init_refok rest_init(void) 429static noinline void __init_refok rest_init(void)
428 __releases(kernel_lock) 430 __releases(kernel_lock)
429{ 431{
430 int pid; 432 int pid;
431 433
432 rcu_scheduler_starting(); 434 rcu_scheduler_starting();
435 /*
436 * We need to spawn init first so that it obtains pid 1, however
437 * the init task will end up wanting to create kthreads, which, if
438 * we schedule it before we create kthreadd, will OOPS.
439 */
433 kernel_thread(kernel_init, NULL, CLONE_FS | CLONE_SIGHAND); 440 kernel_thread(kernel_init, NULL, CLONE_FS | CLONE_SIGHAND);
434 numa_default_policy(); 441 numa_default_policy();
435 pid = kernel_thread(kthreadd, NULL, CLONE_FS | CLONE_FILES); 442 pid = kernel_thread(kthreadd, NULL, CLONE_FS | CLONE_FILES);
436 rcu_read_lock(); 443 rcu_read_lock();
437 kthreadd_task = find_task_by_pid_ns(pid, &init_pid_ns); 444 kthreadd_task = find_task_by_pid_ns(pid, &init_pid_ns);
438 rcu_read_unlock(); 445 rcu_read_unlock();
446 complete(&kthreadd_done);
439 unlock_kernel(); 447 unlock_kernel();
440 448
441 /* 449 /*
@@ -857,6 +865,10 @@ static noinline int init_post(void)
857 865
858static int __init kernel_init(void * unused) 866static int __init kernel_init(void * unused)
859{ 867{
868 /*
869 * Wait until kthreadd is all set-up.
870 */
871 wait_for_completion(&kthreadd_done);
860 lock_kernel(); 872 lock_kernel();
861 873
862 /* 874 /*
diff --git a/kernel/module.c b/kernel/module.c
index 8c6b42840dd1..5d2d28197c82 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -2062,6 +2062,12 @@ static void dynamic_debug_setup(struct _ddebug *debug, unsigned int num)
2062#endif 2062#endif
2063} 2063}
2064 2064
2065static void dynamic_debug_remove(struct _ddebug *debug)
2066{
2067 if (debug)
2068 ddebug_remove_module(debug->modname);
2069}
2070
2065static void *module_alloc_update_bounds(unsigned long size) 2071static void *module_alloc_update_bounds(unsigned long size)
2066{ 2072{
2067 void *ret = module_alloc(size); 2073 void *ret = module_alloc(size);
@@ -2124,6 +2130,8 @@ static noinline struct module *load_module(void __user *umod,
2124 void *ptr = NULL; /* Stops spurious gcc warning */ 2130 void *ptr = NULL; /* Stops spurious gcc warning */
2125 unsigned long symoffs, stroffs, *strmap; 2131 unsigned long symoffs, stroffs, *strmap;
2126 void __percpu *percpu; 2132 void __percpu *percpu;
2133 struct _ddebug *debug = NULL;
2134 unsigned int num_debug = 0;
2127 2135
2128 mm_segment_t old_fs; 2136 mm_segment_t old_fs;
2129 2137
@@ -2476,15 +2484,9 @@ static noinline struct module *load_module(void __user *umod,
2476 kfree(strmap); 2484 kfree(strmap);
2477 strmap = NULL; 2485 strmap = NULL;
2478 2486
2479 if (!mod->taints) { 2487 if (!mod->taints)
2480 struct _ddebug *debug;
2481 unsigned int num_debug;
2482
2483 debug = section_objs(hdr, sechdrs, secstrings, "__verbose", 2488 debug = section_objs(hdr, sechdrs, secstrings, "__verbose",
2484 sizeof(*debug), &num_debug); 2489 sizeof(*debug), &num_debug);
2485 if (debug)
2486 dynamic_debug_setup(debug, num_debug);
2487 }
2488 2490
2489 err = module_finalize(hdr, sechdrs, mod); 2491 err = module_finalize(hdr, sechdrs, mod);
2490 if (err < 0) 2492 if (err < 0)
@@ -2526,10 +2528,13 @@ static noinline struct module *load_module(void __user *umod,
2526 goto unlock; 2528 goto unlock;
2527 } 2529 }
2528 2530
2531 if (debug)
2532 dynamic_debug_setup(debug, num_debug);
2533
2529 /* Find duplicate symbols */ 2534 /* Find duplicate symbols */
2530 err = verify_export_symbols(mod); 2535 err = verify_export_symbols(mod);
2531 if (err < 0) 2536 if (err < 0)
2532 goto unlock; 2537 goto ddebug;
2533 2538
2534 list_add_rcu(&mod->list, &modules); 2539 list_add_rcu(&mod->list, &modules);
2535 mutex_unlock(&module_mutex); 2540 mutex_unlock(&module_mutex);
@@ -2557,6 +2562,8 @@ static noinline struct module *load_module(void __user *umod,
2557 mutex_lock(&module_mutex); 2562 mutex_lock(&module_mutex);
2558 /* Unlink carefully: kallsyms could be walking list. */ 2563 /* Unlink carefully: kallsyms could be walking list. */
2559 list_del_rcu(&mod->list); 2564 list_del_rcu(&mod->list);
2565 ddebug:
2566 dynamic_debug_remove(debug);
2560 unlock: 2567 unlock:
2561 mutex_unlock(&module_mutex); 2568 mutex_unlock(&module_mutex);
2562 synchronize_sched(); 2569 synchronize_sched();
diff --git a/kernel/sched.c b/kernel/sched.c
index cb816e36cc8b..f52a8801b7a2 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -2873,9 +2873,9 @@ unsigned long nr_iowait(void)
2873 return sum; 2873 return sum;
2874} 2874}
2875 2875
2876unsigned long nr_iowait_cpu(void) 2876unsigned long nr_iowait_cpu(int cpu)
2877{ 2877{
2878 struct rq *this = this_rq(); 2878 struct rq *this = cpu_rq(cpu);
2879 return atomic_read(&this->nr_iowait); 2879 return atomic_read(&this->nr_iowait);
2880} 2880}
2881 2881
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index 783fbadf2202..813993b5fb61 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -154,14 +154,14 @@ static void tick_nohz_update_jiffies(ktime_t now)
154 * Updates the per cpu time idle statistics counters 154 * Updates the per cpu time idle statistics counters
155 */ 155 */
156static void 156static void
157update_ts_time_stats(struct tick_sched *ts, ktime_t now, u64 *last_update_time) 157update_ts_time_stats(int cpu, struct tick_sched *ts, ktime_t now, u64 *last_update_time)
158{ 158{
159 ktime_t delta; 159 ktime_t delta;
160 160
161 if (ts->idle_active) { 161 if (ts->idle_active) {
162 delta = ktime_sub(now, ts->idle_entrytime); 162 delta = ktime_sub(now, ts->idle_entrytime);
163 ts->idle_sleeptime = ktime_add(ts->idle_sleeptime, delta); 163 ts->idle_sleeptime = ktime_add(ts->idle_sleeptime, delta);
164 if (nr_iowait_cpu() > 0) 164 if (nr_iowait_cpu(cpu) > 0)
165 ts->iowait_sleeptime = ktime_add(ts->iowait_sleeptime, delta); 165 ts->iowait_sleeptime = ktime_add(ts->iowait_sleeptime, delta);
166 ts->idle_entrytime = now; 166 ts->idle_entrytime = now;
167 } 167 }
@@ -175,19 +175,19 @@ static void tick_nohz_stop_idle(int cpu, ktime_t now)
175{ 175{
176 struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); 176 struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
177 177
178 update_ts_time_stats(ts, now, NULL); 178 update_ts_time_stats(cpu, ts, now, NULL);
179 ts->idle_active = 0; 179 ts->idle_active = 0;
180 180
181 sched_clock_idle_wakeup_event(0); 181 sched_clock_idle_wakeup_event(0);
182} 182}
183 183
184static ktime_t tick_nohz_start_idle(struct tick_sched *ts) 184static ktime_t tick_nohz_start_idle(int cpu, struct tick_sched *ts)
185{ 185{
186 ktime_t now; 186 ktime_t now;
187 187
188 now = ktime_get(); 188 now = ktime_get();
189 189
190 update_ts_time_stats(ts, now, NULL); 190 update_ts_time_stats(cpu, ts, now, NULL);
191 191
192 ts->idle_entrytime = now; 192 ts->idle_entrytime = now;
193 ts->idle_active = 1; 193 ts->idle_active = 1;
@@ -216,7 +216,7 @@ u64 get_cpu_idle_time_us(int cpu, u64 *last_update_time)
216 if (!tick_nohz_enabled) 216 if (!tick_nohz_enabled)
217 return -1; 217 return -1;
218 218
219 update_ts_time_stats(ts, ktime_get(), last_update_time); 219 update_ts_time_stats(cpu, ts, ktime_get(), last_update_time);
220 220
221 return ktime_to_us(ts->idle_sleeptime); 221 return ktime_to_us(ts->idle_sleeptime);
222} 222}
@@ -242,7 +242,7 @@ u64 get_cpu_iowait_time_us(int cpu, u64 *last_update_time)
242 if (!tick_nohz_enabled) 242 if (!tick_nohz_enabled)
243 return -1; 243 return -1;
244 244
245 update_ts_time_stats(ts, ktime_get(), last_update_time); 245 update_ts_time_stats(cpu, ts, ktime_get(), last_update_time);
246 246
247 return ktime_to_us(ts->iowait_sleeptime); 247 return ktime_to_us(ts->iowait_sleeptime);
248} 248}
@@ -284,7 +284,7 @@ void tick_nohz_stop_sched_tick(int inidle)
284 */ 284 */
285 ts->inidle = 1; 285 ts->inidle = 1;
286 286
287 now = tick_nohz_start_idle(ts); 287 now = tick_nohz_start_idle(cpu, ts);
288 288
289 /* 289 /*
290 * If this cpu is offline and it is the one which updates 290 * If this cpu is offline and it is the one which updates
diff --git a/lib/dynamic_debug.c b/lib/dynamic_debug.c
index 3df8eb17a607..02afc2533728 100644
--- a/lib/dynamic_debug.c
+++ b/lib/dynamic_debug.c
@@ -692,7 +692,7 @@ static void ddebug_table_free(struct ddebug_table *dt)
692 * Called in response to a module being unloaded. Removes 692 * Called in response to a module being unloaded. Removes
693 * any ddebug_table's which point at the module. 693 * any ddebug_table's which point at the module.
694 */ 694 */
695int ddebug_remove_module(char *mod_name) 695int ddebug_remove_module(const char *mod_name)
696{ 696{
697 struct ddebug_table *dt, *nextdt; 697 struct ddebug_table *dt, *nextdt;
698 int ret = -ENOENT; 698 int ret = -ENOENT;
diff --git a/scripts/package/Makefile b/scripts/package/Makefile
index 62fcc3a7f4d3..3a681ef25306 100644
--- a/scripts/package/Makefile
+++ b/scripts/package/Makefile
@@ -44,7 +44,7 @@ rpm-pkg rpm: $(objtree)/kernel.spec FORCE
44 fi 44 fi
45 $(MAKE) clean 45 $(MAKE) clean
46 $(PREV) ln -sf $(srctree) $(KERNELPATH) 46 $(PREV) ln -sf $(srctree) $(KERNELPATH)
47 $(CONFIG_SHELL) $(srctree)/scripts/setlocalversion > $(objtree)/.scmversion 47 $(CONFIG_SHELL) $(srctree)/scripts/setlocalversion --scm-only > $(objtree)/.scmversion
48 $(PREV) tar -cz $(RCS_TAR_IGNORE) -f $(KERNELPATH).tar.gz $(KERNELPATH)/. 48 $(PREV) tar -cz $(RCS_TAR_IGNORE) -f $(KERNELPATH).tar.gz $(KERNELPATH)/.
49 $(PREV) rm $(KERNELPATH) 49 $(PREV) rm $(KERNELPATH)
50 rm -f $(objtree)/.scmversion 50 rm -f $(objtree)/.scmversion
diff --git a/scripts/setlocalversion b/scripts/setlocalversion
index 46989b88d734..d6a866ed1835 100755
--- a/scripts/setlocalversion
+++ b/scripts/setlocalversion
@@ -10,73 +10,158 @@
10# 10#
11 11
12usage() { 12usage() {
13 echo "Usage: $0 [srctree]" >&2 13 echo "Usage: $0 [--scm-only] [srctree]" >&2
14 exit 1 14 exit 1
15} 15}
16 16
17cd "${1:-.}" || usage 17scm_only=false
18srctree=.
19if test "$1" = "--scm-only"; then
20 scm_only=true
21 shift
22fi
23if test $# -gt 0; then
24 srctree=$1
25 shift
26fi
27if test $# -gt 0 -o ! -d "$srctree"; then
28 usage
29fi
18 30
19# Check for git and a git repo. 31scm_version()
20if head=`git rev-parse --verify --short HEAD 2>/dev/null`; then 32{
33 local short=false
21 34
22 # If we are at a tagged commit (like "v2.6.30-rc6"), we ignore it, 35 cd "$srctree"
23 # because this version is defined in the top level Makefile. 36 if test -e .scmversion; then
24 if [ -z "`git describe --exact-match 2>/dev/null`" ]; then 37 cat "$_"
38 return
39 fi
40 if test "$1" = "--short"; then
41 short=true
42 fi
25 43
26 # If we are past a tagged commit (like "v2.6.30-rc5-302-g72357d5"), 44 # Check for git and a git repo.
27 # we pretty print it. 45 if head=`git rev-parse --verify --short HEAD 2>/dev/null`; then
28 if atag="`git describe 2>/dev/null`"; then 46
29 echo "$atag" | awk -F- '{printf("-%05d-%s", $(NF-1),$(NF))}' 47 # If we are at a tagged commit (like "v2.6.30-rc6"), we ignore
48 # it, because this version is defined in the top level Makefile.
49 if [ -z "`git describe --exact-match 2>/dev/null`" ]; then
50
51 # If only the short version is requested, don't bother
52 # running further git commands
53 if $short; then
54 echo "+"
55 return
56 fi
57 # If we are past a tagged commit (like
58 # "v2.6.30-rc5-302-g72357d5"), we pretty print it.
59 if atag="`git describe 2>/dev/null`"; then
60 echo "$atag" | awk -F- '{printf("-%05d-%s", $(NF-1),$(NF))}'
61
62 # If we don't have a tag at all we print -g{commitish}.
63 else
64 printf '%s%s' -g $head
65 fi
66 fi
30 67
31 # If we don't have a tag at all we print -g{commitish}. 68 # Is this git on svn?
32 else 69 if git config --get svn-remote.svn.url >/dev/null; then
33 printf '%s%s' -g $head 70 printf -- '-svn%s' "`git svn find-rev $head`"
34 fi 71 fi
35 fi
36 72
37 # Is this git on svn? 73 # Update index only on r/w media
38 if git config --get svn-remote.svn.url >/dev/null; then 74 [ -w . ] && git update-index --refresh --unmerged > /dev/null
39 printf -- '-svn%s' "`git svn find-rev $head`"
40 fi
41 75
42 # Update index only on r/w media 76 # Check for uncommitted changes
43 [ -w . ] && git update-index --refresh --unmerged > /dev/null 77 if git diff-index --name-only HEAD | grep -v "^scripts/package" \
78 | read dummy; then
79 printf '%s' -dirty
80 fi
44 81
45 # Check for uncommitted changes 82 # All done with git
46 if git diff-index --name-only HEAD | grep -v "^scripts/package" \ 83 return
47 | read dummy; then
48 printf '%s' -dirty
49 fi 84 fi
50 85
51 # All done with git 86 # Check for mercurial and a mercurial repo.
52 exit 87 if hgid=`hg id 2>/dev/null`; then
53fi 88 tag=`printf '%s' "$hgid" | cut -d' ' -f2`
89
90 # Do we have an untagged version?
91 if [ -z "$tag" -o "$tag" = tip ]; then
92 id=`printf '%s' "$hgid" | sed 's/[+ ].*//'`
93 printf '%s%s' -hg "$id"
94 fi
54 95
55# Check for mercurial and a mercurial repo. 96 # Are there uncommitted changes?
56if hgid=`hg id 2>/dev/null`; then 97 # These are represented by + after the changeset id.
57 tag=`printf '%s' "$hgid" | cut -d' ' -f2` 98 case "$hgid" in
99 *+|*+\ *) printf '%s' -dirty ;;
100 esac
58 101
59 # Do we have an untagged version? 102 # All done with mercurial
60 if [ -z "$tag" -o "$tag" = tip ]; then 103 return
61 id=`printf '%s' "$hgid" | sed 's/[+ ].*//'`
62 printf '%s%s' -hg "$id"
63 fi 104 fi
64 105
65 # Are there uncommitted changes? 106 # Check for svn and a svn repo.
66 # These are represented by + after the changeset id. 107 if rev=`svn info 2>/dev/null | grep '^Last Changed Rev'`; then
67 case "$hgid" in 108 rev=`echo $rev | awk '{print $NF}'`
68 *+|*+\ *) printf '%s' -dirty ;; 109 printf -- '-svn%s' "$rev"
69 esac
70 110
71 # All done with mercurial 111 # All done with svn
112 return
113 fi
114}
115
116collect_files()
117{
118 local file res
119
120 for file; do
121 case "$file" in
122 *\~*)
123 continue
124 ;;
125 esac
126 if test -e "$file"; then
127 res="$res$(cat "$file")"
128 fi
129 done
130 echo "$res"
131}
132
133if $scm_only; then
134 scm_version
72 exit 135 exit
73fi 136fi
74 137
75# Check for svn and a svn repo. 138if test -e include/config/auto.conf; then
76if rev=`svn info 2>/dev/null | grep '^Last Changed Rev'`; then 139 source "$_"
77 rev=`echo $rev | awk '{print $NF}'` 140else
78 printf -- '-svn%s' "$rev" 141 echo "Error: kernelrelease not valid - run 'make prepare' to update it"
142 exit 1
143fi
79 144
80 # All done with svn 145# localversion* files in the build and source directory
81 exit 146res="$(collect_files localversion*)"
147if test ! "$srctree" -ef .; then
148 res="$res$(collect_files "$srctree"/localversion*)"
149fi
150
151# CONFIG_LOCALVERSION and LOCALVERSION (if set)
152res="${res}${CONFIG_LOCALVERSION}${LOCALVERSION}"
153
154# scm version string if not at a tagged commit
155if test "$CONFIG_LOCALVERSION_AUTO" = "y"; then
156 # full scm version string
157 res="$res$(scm_version)"
158else
159 # apped a plus sign if the repository is not in a clean tagged
160 # state and LOCALVERSION= is not specified
161 if test "${LOCALVERSION+set}" != "set"; then
162 scm=$(scm_version --short)
163 res="$res${scm:++}"
164 fi
82fi 165fi
166
167echo "$res"
diff --git a/tools/perf/util/thread.c b/tools/perf/util/thread.c
index 1f7ecd47f499..9a448b47400c 100644
--- a/tools/perf/util/thread.c
+++ b/tools/perf/util/thread.c
@@ -7,6 +7,15 @@
7#include "util.h" 7#include "util.h"
8#include "debug.h" 8#include "debug.h"
9 9
10/* Skip "." and ".." directories */
11static int filter(const struct dirent *dir)
12{
13 if (dir->d_name[0] == '.')
14 return 0;
15 else
16 return 1;
17}
18
10int find_all_tid(int pid, pid_t ** all_tid) 19int find_all_tid(int pid, pid_t ** all_tid)
11{ 20{
12 char name[256]; 21 char name[256];
@@ -16,7 +25,7 @@ int find_all_tid(int pid, pid_t ** all_tid)
16 int i; 25 int i;
17 26
18 sprintf(name, "/proc/%d/task", pid); 27 sprintf(name, "/proc/%d/task", pid);
19 items = scandir(name, &namelist, NULL, NULL); 28 items = scandir(name, &namelist, filter, NULL);
20 if (items <= 0) 29 if (items <= 0)
21 return -ENOENT; 30 return -ENOENT;
22 *all_tid = malloc(sizeof(pid_t) * items); 31 *all_tid = malloc(sizeof(pid_t) * items);