aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2010-04-04 22:37:28 -0400
committerTejun Heo <tj@kernel.org>2010-04-04 22:37:28 -0400
commit336f5899d287f06d8329e208fc14ce50f7ec9698 (patch)
tree9b762d450d5eb248a6ff8317badb7e223d93ed58
parenta4ab2773205e8b94c18625455f85e3b6bb9d7ad6 (diff)
parentdb217dece3003df0841bacf9556b5c06aa097dae (diff)
Merge branch 'master' into export-slabh
-rw-r--r--Documentation/powerpc/dts-bindings/fsl/cpm_qe/qe.txt54
-rw-r--r--MAINTAINERS10
-rw-r--r--Makefile2
-rw-r--r--arch/arm/include/asm/cacheflush.h38
-rw-r--r--arch/arm/include/asm/clkdev.h1
-rw-r--r--arch/arm/include/asm/irq.h1
-rw-r--r--arch/arm/include/asm/outercache.h75
-rw-r--r--arch/arm/include/asm/system.h16
-rw-r--r--arch/arm/kernel/kprobes.c10
-rw-r--r--arch/arm/lib/memmove.S4
-rw-r--r--arch/arm/mm/Kconfig13
-rw-r--r--arch/arm/mm/cache-l2x0.c10
-rw-r--r--arch/arm/vfp/vfpmodule.c2
-rw-r--r--arch/microblaze/Kconfig3
-rw-r--r--arch/microblaze/Makefile4
-rw-r--r--arch/microblaze/boot/Makefile6
-rw-r--r--arch/microblaze/include/asm/processor.h1
-rw-r--r--arch/microblaze/include/asm/segment.h49
-rw-r--r--arch/microblaze/include/asm/thread_info.h5
-rw-r--r--arch/microblaze/include/asm/tlbflush.h3
-rw-r--r--arch/microblaze/include/asm/uaccess.h447
-rw-r--r--arch/microblaze/kernel/dma.c2
-rw-r--r--arch/microblaze/kernel/head.S12
-rw-r--r--arch/microblaze/kernel/hw_exception_handler.S112
-rw-r--r--arch/microblaze/kernel/misc.S15
-rw-r--r--arch/microblaze/kernel/process.c10
-rw-r--r--arch/microblaze/kernel/setup.c24
-rw-r--r--arch/microblaze/kernel/traps.c6
-rw-r--r--arch/microblaze/lib/Makefile3
-rw-r--r--arch/microblaze/lib/fastcopy.S6
-rw-r--r--arch/microblaze/lib/memcpy.c2
-rw-r--r--arch/microblaze/lib/memset.c15
-rw-r--r--arch/microblaze/lib/uaccess.c48
-rw-r--r--arch/microblaze/lib/uaccess_old.S45
-rw-r--r--arch/microblaze/mm/fault.c24
-rw-r--r--arch/microblaze/mm/init.c9
-rw-r--r--arch/microblaze/mm/pgtable.c2
-rw-r--r--arch/powerpc/kernel/misc.S2
-rw-r--r--arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c2
-rw-r--r--arch/sh/configs/ecovec24_defconfig236
-rw-r--r--arch/sh/include/asm/elf.h6
-rw-r--r--arch/sh/include/cpu-sh4/cpu/mmu_context.h2
-rw-r--r--arch/sh/kernel/cpufreq.c4
-rw-r--r--arch/sh/kernel/return_address.c3
-rw-r--r--arch/sh/kernel/smp.c1
-rw-r--r--arch/sh/mm/tlb-pteaex.c28
-rw-r--r--arch/sh/mm/tlb-sh3.c19
-rw-r--r--arch/sh/mm/tlb-sh4.c28
-rw-r--r--arch/sh/mm/tlb-urb.c22
-rw-r--r--arch/sh/mm/tlbflush_32.c28
-rw-r--r--arch/sparc/configs/sparc64_defconfig28
-rw-r--r--arch/sparc/kernel/helpers.S75
-rw-r--r--arch/sparc/kernel/ptrace_32.c4
-rw-r--r--arch/sparc/kernel/ptrace_64.c4
-rw-r--r--arch/sparc/mm/init_64.c2
-rw-r--r--arch/x86/kernel/cpu/perf_event.c54
-rw-r--r--arch/x86/kernel/cpu/perf_event_amd.c80
-rw-r--r--arch/x86/kernel/dumpstack.h5
-rw-r--r--arch/x86/kernel/head32.c4
-rw-r--r--arch/x86/kernel/head64.c3
-rw-r--r--arch/x86/kernel/kgdb.c2
-rw-r--r--arch/x86/kernel/setup.c10
-rw-r--r--arch/x86/kernel/smpboot.c4
-rw-r--r--arch/x86/kernel/vmlinux.lds.S2
-rw-r--r--arch/x86/mm/init.c32
-rw-r--r--drivers/char/tty_io.c2
-rw-r--r--drivers/gpu/drm/drm_crtc_helper.c1
-rw-r--r--drivers/gpu/drm/drm_edid.c9
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c2
-rw-r--r--drivers/gpu/drm/drm_fops.c16
-rw-r--r--drivers/gpu/drm/nouveau/Makefile2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bios.c28
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bios.h3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dma.c5
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.c10
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.h6
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_irq.c609
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_state.c5
-rw-r--r--drivers/gpu/drm/nouveau/nv04_crtc.c6
-rw-r--r--drivers/gpu/drm/nouveau/nv04_fbcon.c6
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.c4
-rw-r--r--drivers/gpu/drm/nouveau/nv50_fb.c32
-rw-r--r--drivers/gpu/drm/nouveau/nv50_fbcon.c2
-rw-r--r--drivers/gpu/drm/nouveau/nv50_graph.c22
-rw-r--r--drivers/gpu/drm/nouveau/nv50_grctx.c13
-rw-r--r--drivers/gpu/drm/radeon/Makefile2
-rw-r--r--drivers/gpu/drm/radeon/atom.c91
-rw-r--r--drivers/gpu/drm/radeon/atom.h8
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c98
-rw-r--r--drivers/gpu/drm/radeon/atombios_dp.c6
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c11
-rw-r--r--drivers/gpu/drm/radeon/r100.c25
-rw-r--r--drivers/gpu/drm/radeon/r200.c1
-rw-r--r--drivers/gpu/drm/radeon/r300.c5
-rw-r--r--drivers/gpu/drm/radeon/r420.c2
-rw-r--r--drivers/gpu/drm/radeon/r520.c9
-rw-r--r--drivers/gpu/drm/radeon/r600.c30
-rw-r--r--drivers/gpu/drm/radeon/r600_audio.c52
-rw-r--r--drivers/gpu/drm/radeon/r600_blit_shaders.c35
-rw-r--r--drivers/gpu/drm/radeon/r600_cp.c3
-rw-r--r--drivers/gpu/drm/radeon/r600_cs.c70
-rw-r--r--drivers/gpu/drm/radeon/r600_hdmi.c191
-rw-r--r--drivers/gpu/drm/radeon/r600_reg.h10
-rw-r--r--drivers/gpu/drm/radeon/r600d.h49
-rw-r--r--drivers/gpu/drm/radeon/radeon.h66
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.c772
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.h545
-rw-r--r--drivers/gpu/drm/radeon/radeon_atombios.c461
-rw-r--r--drivers/gpu/drm/radeon/radeon_combios.c7
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c11
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c237
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c68
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c11
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.h3
-rw-r--r--drivers/gpu/drm/radeon/radeon_encoders.c121
-rw-r--r--drivers/gpu/drm/radeon/radeon_i2c.c153
-rw-r--r--drivers/gpu/drm/radeon/radeon_irq_kms.c20
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_crtc.c8
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_tv.c29
-rw-r--r--drivers/gpu/drm/radeon/radeon_mode.h12
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon_pm.c46
-rw-r--r--drivers/gpu/drm/radeon/radeon_reg.h1
-rw-r--r--drivers/gpu/drm/radeon/reg_srcs/r60075
-rw-r--r--drivers/gpu/drm/radeon/rs400.c7
-rw-r--r--drivers/gpu/drm/radeon/rs600.c33
-rw-r--r--drivers/gpu/drm/radeon/rs600d.h53
-rw-r--r--drivers/gpu/drm/radeon/rs690.c122
-rw-r--r--drivers/gpu/drm/radeon/rs690d.h3
-rw-r--r--drivers/gpu/drm/radeon/rv515.c45
-rw-r--r--drivers/gpu/drm/radeon/rv770.c31
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c4
-rw-r--r--drivers/gpu/drm/ttm/ttm_memory.c18
-rw-r--r--drivers/gpu/drm/ttm/ttm_tt.c23
-rw-r--r--drivers/gpu/drm/vmwgfx/Kconfig2
-rw-r--r--drivers/hid/hid-gyration.c5
-rw-r--r--drivers/hid/usbhid/hid-quirks.c1
-rw-r--r--drivers/misc/kgdbts.c6
-rw-r--r--drivers/of/fdt.c7
-rw-r--r--drivers/pci/quirks.c36
-rw-r--r--drivers/platform/x86/Kconfig10
-rw-r--r--drivers/platform/x86/Makefile1
-rw-r--r--drivers/platform/x86/asus-laptop.c4
-rw-r--r--drivers/platform/x86/eeepc-wmi.c157
-rw-r--r--drivers/serial/sunsu.c4
-rw-r--r--drivers/usb/gadget/at91_udc.c9
-rw-r--r--drivers/usb/gadget/r8a66597-udc.c1
-rw-r--r--drivers/video/sunxvr500.c24
-rw-r--r--fs/fat/namei_vfat.c6
-rw-r--r--fs/logfs/dev_bdev.c9
-rw-r--r--fs/logfs/dir.c4
-rw-r--r--fs/logfs/journal.c7
-rw-r--r--fs/logfs/logfs.h1
-rw-r--r--fs/logfs/readwrite.c13
-rw-r--r--fs/logfs/segment.c54
-rw-r--r--fs/logfs/super.c15
-rw-r--r--fs/proc/base.c5
-rw-r--r--fs/proc/task_mmu.c87
-rw-r--r--fs/reiserfs/super.c10
-rw-r--r--include/drm/drmP.h34
-rw-r--r--include/drm/drm_mem_util.h65
-rw-r--r--include/drm/drm_pciids.h1
-rw-r--r--include/drm/ttm/ttm_bo_driver.h1
-rw-r--r--include/linux/amba/bus.h3
-rw-r--r--include/linux/amba/pl061.h2
-rw-r--r--include/linux/freezer.h7
-rw-r--r--include/linux/perf_event.h21
-rw-r--r--kernel/cgroup_freezer.c9
-rw-r--r--kernel/cred.c6
-rw-r--r--kernel/early_res.c6
-rw-r--r--kernel/kgdb.c205
-rw-r--r--kernel/perf_event.c22
-rw-r--r--kernel/power/process.c5
-rw-r--r--kernel/sched.c2
-rw-r--r--kernel/sched_debug.c4
-rw-r--r--kernel/trace/ring_buffer.c8
-rw-r--r--kernel/trace/trace_clock.c4
-rw-r--r--kernel/trace/trace_event_perf.c11
-rw-r--r--tools/perf/Makefile10
-rw-r--r--tools/perf/util/scripting-engines/trace-event-python.c17
183 files changed, 4561 insertions, 2618 deletions
diff --git a/Documentation/powerpc/dts-bindings/fsl/cpm_qe/qe.txt b/Documentation/powerpc/dts-bindings/fsl/cpm_qe/qe.txt
index 6e37be1eeb2d..4f8930263dd9 100644
--- a/Documentation/powerpc/dts-bindings/fsl/cpm_qe/qe.txt
+++ b/Documentation/powerpc/dts-bindings/fsl/cpm_qe/qe.txt
@@ -21,6 +21,15 @@ Required properties:
21- fsl,qe-num-snums: define how many serial number(SNUM) the QE can use for the 21- fsl,qe-num-snums: define how many serial number(SNUM) the QE can use for the
22 threads. 22 threads.
23 23
24Optional properties:
25- fsl,firmware-phandle:
26 Usage: required only if there is no fsl,qe-firmware child node
27 Value type: <phandle>
28 Definition: Points to a firmware node (see "QE Firmware Node" below)
29 that contains the firmware that should be uploaded for this QE.
30 The compatible property for the firmware node should say,
31 "fsl,qe-firmware".
32
24Recommended properties 33Recommended properties
25- brg-frequency : the internal clock source frequency for baud-rate 34- brg-frequency : the internal clock source frequency for baud-rate
26 generators in Hz. 35 generators in Hz.
@@ -59,3 +68,48 @@ Example:
59 reg = <0 c000>; 68 reg = <0 c000>;
60 }; 69 };
61 }; 70 };
71
72* QE Firmware Node
73
74This node defines a firmware binary that is embedded in the device tree, for
75the purpose of passing the firmware from bootloader to the kernel, or from
76the hypervisor to the guest.
77
78The firmware node itself contains the firmware binary contents, a compatible
79property, and any firmware-specific properties. The node should be placed
80inside a QE node that needs it. Doing so eliminates the need for a
81fsl,firmware-phandle property. Other QE nodes that need the same firmware
82should define an fsl,firmware-phandle property that points to the firmware node
83in the first QE node.
84
85The fsl,firmware property can be specified in the DTS (possibly using incbin)
86or can be inserted by the boot loader at boot time.
87
88Required properties:
89 - compatible
90 Usage: required
91 Value type: <string>
92 Definition: A standard property. Specify a string that indicates what
93 kind of firmware it is. For QE, this should be "fsl,qe-firmware".
94
95 - fsl,firmware
96 Usage: required
97 Value type: <prop-encoded-array>, encoded as an array of bytes
98 Definition: A standard property. This property contains the firmware
99 binary "blob".
100
101Example:
102 qe1@e0080000 {
103 compatible = "fsl,qe";
104 qe_firmware:qe-firmware {
105 compatible = "fsl,qe-firmware";
106 fsl,firmware = [0x70 0xcd 0x00 0x00 0x01 0x46 0x45 ...];
107 };
108 ...
109 };
110
111 qe2@e0090000 {
112 compatible = "fsl,qe";
113 fsl,firmware-phandle = <&qe_firmware>;
114 ...
115 };
diff --git a/MAINTAINERS b/MAINTAINERS
index 088bd41ac71e..3d29fa389888 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -3270,6 +3270,16 @@ S: Maintained
3270F: include/linux/kexec.h 3270F: include/linux/kexec.h
3271F: kernel/kexec.c 3271F: kernel/kexec.c
3272 3272
3273KEYS/KEYRINGS:
3274M: David Howells <dhowells@redhat.com>
3275L: keyrings@linux-nfs.org
3276S: Maintained
3277F: Documentation/keys.txt
3278F: include/linux/key.h
3279F: include/linux/key-type.h
3280F: include/keys/
3281F: security/keys/
3282
3273KGDB 3283KGDB
3274M: Jason Wessel <jason.wessel@windriver.com> 3284M: Jason Wessel <jason.wessel@windriver.com>
3275L: kgdb-bugreport@lists.sourceforge.net 3285L: kgdb-bugreport@lists.sourceforge.net
diff --git a/Makefile b/Makefile
index a5ba759e0fd5..67c1001cfbf5 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
1VERSION = 2 1VERSION = 2
2PATCHLEVEL = 6 2PATCHLEVEL = 6
3SUBLEVEL = 34 3SUBLEVEL = 34
4EXTRAVERSION = -rc2 4EXTRAVERSION = -rc3
5NAME = Man-Eating Seals of Antiquity 5NAME = Man-Eating Seals of Antiquity
6 6
7# *DOCUMENTATION* 7# *DOCUMENTATION*
diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
index 72da7e045c6b..0d08d4170b64 100644
--- a/arch/arm/include/asm/cacheflush.h
+++ b/arch/arm/include/asm/cacheflush.h
@@ -15,6 +15,7 @@
15#include <asm/glue.h> 15#include <asm/glue.h>
16#include <asm/shmparam.h> 16#include <asm/shmparam.h>
17#include <asm/cachetype.h> 17#include <asm/cachetype.h>
18#include <asm/outercache.h>
18 19
19#define CACHE_COLOUR(vaddr) ((vaddr & (SHMLBA - 1)) >> PAGE_SHIFT) 20#define CACHE_COLOUR(vaddr) ((vaddr & (SHMLBA - 1)) >> PAGE_SHIFT)
20 21
@@ -219,12 +220,6 @@ struct cpu_cache_fns {
219 void (*dma_flush_range)(const void *, const void *); 220 void (*dma_flush_range)(const void *, const void *);
220}; 221};
221 222
222struct outer_cache_fns {
223 void (*inv_range)(unsigned long, unsigned long);
224 void (*clean_range)(unsigned long, unsigned long);
225 void (*flush_range)(unsigned long, unsigned long);
226};
227
228/* 223/*
229 * Select the calling method 224 * Select the calling method
230 */ 225 */
@@ -281,37 +276,6 @@ extern void dmac_flush_range(const void *, const void *);
281 276
282#endif 277#endif
283 278
284#ifdef CONFIG_OUTER_CACHE
285
286extern struct outer_cache_fns outer_cache;
287
288static inline void outer_inv_range(unsigned long start, unsigned long end)
289{
290 if (outer_cache.inv_range)
291 outer_cache.inv_range(start, end);
292}
293static inline void outer_clean_range(unsigned long start, unsigned long end)
294{
295 if (outer_cache.clean_range)
296 outer_cache.clean_range(start, end);
297}
298static inline void outer_flush_range(unsigned long start, unsigned long end)
299{
300 if (outer_cache.flush_range)
301 outer_cache.flush_range(start, end);
302}
303
304#else
305
306static inline void outer_inv_range(unsigned long start, unsigned long end)
307{ }
308static inline void outer_clean_range(unsigned long start, unsigned long end)
309{ }
310static inline void outer_flush_range(unsigned long start, unsigned long end)
311{ }
312
313#endif
314
315/* 279/*
316 * Copy user data from/to a page which is mapped into a different 280 * Copy user data from/to a page which is mapped into a different
317 * processes address space. Really, we want to allow our "user 281 * processes address space. Really, we want to allow our "user
diff --git a/arch/arm/include/asm/clkdev.h b/arch/arm/include/asm/clkdev.h
index 7a0690da5e63..b56c1389b6fa 100644
--- a/arch/arm/include/asm/clkdev.h
+++ b/arch/arm/include/asm/clkdev.h
@@ -13,6 +13,7 @@
13#define __ASM_CLKDEV_H 13#define __ASM_CLKDEV_H
14 14
15struct clk; 15struct clk;
16struct device;
16 17
17struct clk_lookup { 18struct clk_lookup {
18 struct list_head node; 19 struct list_head node;
diff --git a/arch/arm/include/asm/irq.h b/arch/arm/include/asm/irq.h
index 328f14a8b790..237282f7c762 100644
--- a/arch/arm/include/asm/irq.h
+++ b/arch/arm/include/asm/irq.h
@@ -17,6 +17,7 @@
17 17
18#ifndef __ASSEMBLY__ 18#ifndef __ASSEMBLY__
19struct irqaction; 19struct irqaction;
20struct pt_regs;
20extern void migrate_irqs(void); 21extern void migrate_irqs(void);
21 22
22extern void asm_do_IRQ(unsigned int, struct pt_regs *); 23extern void asm_do_IRQ(unsigned int, struct pt_regs *);
diff --git a/arch/arm/include/asm/outercache.h b/arch/arm/include/asm/outercache.h
new file mode 100644
index 000000000000..25f76bae57ab
--- /dev/null
+++ b/arch/arm/include/asm/outercache.h
@@ -0,0 +1,75 @@
1/*
2 * arch/arm/include/asm/outercache.h
3 *
4 * Copyright (C) 2010 ARM Ltd.
5 * Written by Catalin Marinas <catalin.marinas@arm.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20
21#ifndef __ASM_OUTERCACHE_H
22#define __ASM_OUTERCACHE_H
23
24struct outer_cache_fns {
25 void (*inv_range)(unsigned long, unsigned long);
26 void (*clean_range)(unsigned long, unsigned long);
27 void (*flush_range)(unsigned long, unsigned long);
28#ifdef CONFIG_OUTER_CACHE_SYNC
29 void (*sync)(void);
30#endif
31};
32
33#ifdef CONFIG_OUTER_CACHE
34
35extern struct outer_cache_fns outer_cache;
36
37static inline void outer_inv_range(unsigned long start, unsigned long end)
38{
39 if (outer_cache.inv_range)
40 outer_cache.inv_range(start, end);
41}
42static inline void outer_clean_range(unsigned long start, unsigned long end)
43{
44 if (outer_cache.clean_range)
45 outer_cache.clean_range(start, end);
46}
47static inline void outer_flush_range(unsigned long start, unsigned long end)
48{
49 if (outer_cache.flush_range)
50 outer_cache.flush_range(start, end);
51}
52
53#else
54
55static inline void outer_inv_range(unsigned long start, unsigned long end)
56{ }
57static inline void outer_clean_range(unsigned long start, unsigned long end)
58{ }
59static inline void outer_flush_range(unsigned long start, unsigned long end)
60{ }
61
62#endif
63
64#ifdef CONFIG_OUTER_CACHE_SYNC
65static inline void outer_sync(void)
66{
67 if (outer_cache.sync)
68 outer_cache.sync();
69}
70#else
71static inline void outer_sync(void)
72{ }
73#endif
74
75#endif /* __ASM_OUTERCACHE_H */
diff --git a/arch/arm/include/asm/system.h b/arch/arm/include/asm/system.h
index ca88e6a84707..4ace45ec3ef8 100644
--- a/arch/arm/include/asm/system.h
+++ b/arch/arm/include/asm/system.h
@@ -60,6 +60,8 @@
60#include <linux/linkage.h> 60#include <linux/linkage.h>
61#include <linux/irqflags.h> 61#include <linux/irqflags.h>
62 62
63#include <asm/outercache.h>
64
63#define __exception __attribute__((section(".exception.text"))) 65#define __exception __attribute__((section(".exception.text")))
64 66
65struct thread_info; 67struct thread_info;
@@ -137,10 +139,12 @@ extern unsigned int user_debug;
137#define dmb() __asm__ __volatile__ ("" : : : "memory") 139#define dmb() __asm__ __volatile__ ("" : : : "memory")
138#endif 140#endif
139 141
140#if __LINUX_ARM_ARCH__ >= 7 || defined(CONFIG_SMP) 142#ifdef CONFIG_ARCH_HAS_BARRIERS
141#define mb() dmb() 143#include <mach/barriers.h>
144#elif __LINUX_ARM_ARCH__ >= 7 || defined(CONFIG_SMP)
145#define mb() do { dsb(); outer_sync(); } while (0)
142#define rmb() dmb() 146#define rmb() dmb()
143#define wmb() dmb() 147#define wmb() mb()
144#else 148#else
145#define mb() do { if (arch_is_coherent()) dmb(); else barrier(); } while (0) 149#define mb() do { if (arch_is_coherent()) dmb(); else barrier(); } while (0)
146#define rmb() do { if (arch_is_coherent()) dmb(); else barrier(); } while (0) 150#define rmb() do { if (arch_is_coherent()) dmb(); else barrier(); } while (0)
@@ -152,9 +156,9 @@ extern unsigned int user_debug;
152#define smp_rmb() barrier() 156#define smp_rmb() barrier()
153#define smp_wmb() barrier() 157#define smp_wmb() barrier()
154#else 158#else
155#define smp_mb() mb() 159#define smp_mb() dmb()
156#define smp_rmb() rmb() 160#define smp_rmb() dmb()
157#define smp_wmb() wmb() 161#define smp_wmb() dmb()
158#endif 162#endif
159 163
160#define read_barrier_depends() do { } while(0) 164#define read_barrier_depends() do { } while(0)
diff --git a/arch/arm/kernel/kprobes.c b/arch/arm/kernel/kprobes.c
index 1fb932b4fece..2ba7deb3072e 100644
--- a/arch/arm/kernel/kprobes.c
+++ b/arch/arm/kernel/kprobes.c
@@ -394,6 +394,14 @@ void __kprobes jprobe_return(void)
394 /* 394 /*
395 * Setup an empty pt_regs. Fill SP and PC fields as 395 * Setup an empty pt_regs. Fill SP and PC fields as
396 * they're needed by longjmp_break_handler. 396 * they're needed by longjmp_break_handler.
397 *
398 * We allocate some slack between the original SP and start of
399 * our fabricated regs. To be precise we want to have worst case
400 * covered which is STMFD with all 16 regs so we allocate 2 *
401 * sizeof(struct_pt_regs)).
402 *
403 * This is to prevent any simulated instruction from writing
404 * over the regs when they are accessing the stack.
397 */ 405 */
398 "sub sp, %0, %1 \n\t" 406 "sub sp, %0, %1 \n\t"
399 "ldr r0, ="__stringify(JPROBE_MAGIC_ADDR)"\n\t" 407 "ldr r0, ="__stringify(JPROBE_MAGIC_ADDR)"\n\t"
@@ -411,7 +419,7 @@ void __kprobes jprobe_return(void)
411 "ldmia sp, {r0 - pc} \n\t" 419 "ldmia sp, {r0 - pc} \n\t"
412 : 420 :
413 : "r" (kcb->jprobe_saved_regs.ARM_sp), 421 : "r" (kcb->jprobe_saved_regs.ARM_sp),
414 "I" (sizeof(struct pt_regs)), 422 "I" (sizeof(struct pt_regs) * 2),
415 "J" (offsetof(struct pt_regs, ARM_sp)), 423 "J" (offsetof(struct pt_regs, ARM_sp)),
416 "J" (offsetof(struct pt_regs, ARM_pc)), 424 "J" (offsetof(struct pt_regs, ARM_pc)),
417 "J" (offsetof(struct pt_regs, ARM_cpsr)) 425 "J" (offsetof(struct pt_regs, ARM_cpsr))
diff --git a/arch/arm/lib/memmove.S b/arch/arm/lib/memmove.S
index 5025c863713d..938fc14f962d 100644
--- a/arch/arm/lib/memmove.S
+++ b/arch/arm/lib/memmove.S
@@ -74,7 +74,7 @@ ENTRY(memmove)
74 rsb ip, ip, #32 74 rsb ip, ip, #32
75 addne pc, pc, ip @ C is always clear here 75 addne pc, pc, ip @ C is always clear here
76 b 7f 76 b 7f
776: nop 776: W(nop)
78 W(ldr) r3, [r1, #-4]! 78 W(ldr) r3, [r1, #-4]!
79 W(ldr) r4, [r1, #-4]! 79 W(ldr) r4, [r1, #-4]!
80 W(ldr) r5, [r1, #-4]! 80 W(ldr) r5, [r1, #-4]!
@@ -85,7 +85,7 @@ ENTRY(memmove)
85 85
86 add pc, pc, ip 86 add pc, pc, ip
87 nop 87 nop
88 nop 88 W(nop)
89 W(str) r3, [r0, #-4]! 89 W(str) r3, [r0, #-4]!
90 W(str) r4, [r0, #-4]! 90 W(str) r4, [r0, #-4]!
91 W(str) r5, [r0, #-4]! 91 W(str) r5, [r0, #-4]!
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
index c4ed9f93f646..5bd7c89a6045 100644
--- a/arch/arm/mm/Kconfig
+++ b/arch/arm/mm/Kconfig
@@ -736,6 +736,12 @@ config NEEDS_SYSCALL_FOR_CMPXCHG
736config OUTER_CACHE 736config OUTER_CACHE
737 bool 737 bool
738 738
739config OUTER_CACHE_SYNC
740 bool
741 help
742 The outer cache has a outer_cache_fns.sync function pointer
743 that can be used to drain the write buffer of the outer cache.
744
739config CACHE_FEROCEON_L2 745config CACHE_FEROCEON_L2
740 bool "Enable the Feroceon L2 cache controller" 746 bool "Enable the Feroceon L2 cache controller"
741 depends on ARCH_KIRKWOOD || ARCH_MV78XX0 747 depends on ARCH_KIRKWOOD || ARCH_MV78XX0
@@ -757,6 +763,7 @@ config CACHE_L2X0
757 REALVIEW_EB_A9MP || ARCH_MX35 || ARCH_MX31 || MACH_REALVIEW_PBX || ARCH_NOMADIK || ARCH_OMAP4 763 REALVIEW_EB_A9MP || ARCH_MX35 || ARCH_MX31 || MACH_REALVIEW_PBX || ARCH_NOMADIK || ARCH_OMAP4
758 default y 764 default y
759 select OUTER_CACHE 765 select OUTER_CACHE
766 select OUTER_CACHE_SYNC
760 help 767 help
761 This option enables the L2x0 PrimeCell. 768 This option enables the L2x0 PrimeCell.
762 769
@@ -781,3 +788,9 @@ config ARM_L1_CACHE_SHIFT
781 int 788 int
782 default 6 if ARM_L1_CACHE_SHIFT_6 789 default 6 if ARM_L1_CACHE_SHIFT_6
783 default 5 790 default 5
791
792config ARCH_HAS_BARRIERS
793 bool
794 help
795 This option allows the use of custom mandatory barriers
796 included via the mach/barriers.h file.
diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c
index 07334632d3e2..21ad68ba22ba 100644
--- a/arch/arm/mm/cache-l2x0.c
+++ b/arch/arm/mm/cache-l2x0.c
@@ -93,6 +93,15 @@ static inline void l2x0_flush_line(unsigned long addr)
93} 93}
94#endif 94#endif
95 95
96static void l2x0_cache_sync(void)
97{
98 unsigned long flags;
99
100 spin_lock_irqsave(&l2x0_lock, flags);
101 cache_sync();
102 spin_unlock_irqrestore(&l2x0_lock, flags);
103}
104
96static inline void l2x0_inv_all(void) 105static inline void l2x0_inv_all(void)
97{ 106{
98 unsigned long flags; 107 unsigned long flags;
@@ -225,6 +234,7 @@ void __init l2x0_init(void __iomem *base, __u32 aux_val, __u32 aux_mask)
225 outer_cache.inv_range = l2x0_inv_range; 234 outer_cache.inv_range = l2x0_inv_range;
226 outer_cache.clean_range = l2x0_clean_range; 235 outer_cache.clean_range = l2x0_clean_range;
227 outer_cache.flush_range = l2x0_flush_range; 236 outer_cache.flush_range = l2x0_flush_range;
237 outer_cache.sync = l2x0_cache_sync;
228 238
229 printk(KERN_INFO "L2X0 cache controller enabled\n"); 239 printk(KERN_INFO "L2X0 cache controller enabled\n");
230} 240}
diff --git a/arch/arm/vfp/vfpmodule.c b/arch/arm/vfp/vfpmodule.c
index 7f3f59fcaa21..a420cb949328 100644
--- a/arch/arm/vfp/vfpmodule.c
+++ b/arch/arm/vfp/vfpmodule.c
@@ -545,7 +545,7 @@ static int __init vfp_init(void)
545 */ 545 */
546 elf_hwcap |= HWCAP_VFP; 546 elf_hwcap |= HWCAP_VFP;
547#ifdef CONFIG_VFPv3 547#ifdef CONFIG_VFPv3
548 if (VFP_arch >= 3) { 548 if (VFP_arch >= 2) {
549 elf_hwcap |= HWCAP_VFPv3; 549 elf_hwcap |= HWCAP_VFPv3;
550 550
551 /* 551 /*
diff --git a/arch/microblaze/Kconfig b/arch/microblaze/Kconfig
index 203ec61c6d4c..76818f926539 100644
--- a/arch/microblaze/Kconfig
+++ b/arch/microblaze/Kconfig
@@ -75,9 +75,6 @@ config LOCKDEP_SUPPORT
75config HAVE_LATENCYTOP_SUPPORT 75config HAVE_LATENCYTOP_SUPPORT
76 def_bool y 76 def_bool y
77 77
78config PCI
79 def_bool n
80
81config DTC 78config DTC
82 def_bool y 79 def_bool y
83 80
diff --git a/arch/microblaze/Makefile b/arch/microblaze/Makefile
index 836832dd9b26..72f6e8583746 100644
--- a/arch/microblaze/Makefile
+++ b/arch/microblaze/Makefile
@@ -84,7 +84,7 @@ define archhelp
84 echo '* linux.bin - Create raw binary' 84 echo '* linux.bin - Create raw binary'
85 echo ' linux.bin.gz - Create compressed raw binary' 85 echo ' linux.bin.gz - Create compressed raw binary'
86 echo ' simpleImage.<dt> - ELF image with $(arch)/boot/dts/<dt>.dts linked in' 86 echo ' simpleImage.<dt> - ELF image with $(arch)/boot/dts/<dt>.dts linked in'
87 echo ' - stripped elf with fdt blob 87 echo ' - stripped elf with fdt blob'
88 echo ' simpleImage.<dt>.unstrip - full ELF image with fdt blob' 88 echo ' simpleImage.<dt>.unstrip - full ELF image with fdt blob'
89 echo ' *_defconfig - Select default config from arch/microblaze/configs' 89 echo ' *_defconfig - Select default config from arch/microblaze/configs'
90 echo '' 90 echo ''
@@ -94,3 +94,5 @@ define archhelp
94 echo ' name of a dts file from the arch/microblaze/boot/dts/ directory' 94 echo ' name of a dts file from the arch/microblaze/boot/dts/ directory'
95 echo ' (minus the .dts extension).' 95 echo ' (minus the .dts extension).'
96endef 96endef
97
98MRPROPER_FILES += $(boot)/simpleImage.*
diff --git a/arch/microblaze/boot/Makefile b/arch/microblaze/boot/Makefile
index 902cf9846c3c..57f50c2371c6 100644
--- a/arch/microblaze/boot/Makefile
+++ b/arch/microblaze/boot/Makefile
@@ -23,8 +23,6 @@ $(obj)/system.dtb: $(obj)/$(DTB).dtb
23endif 23endif
24 24
25$(obj)/linux.bin: vmlinux FORCE 25$(obj)/linux.bin: vmlinux FORCE
26 [ -n $(CONFIG_INITRAMFS_SOURCE) ] && [ ! -e $(CONFIG_INITRAMFS_SOURCE) ] && \
27 touch $(CONFIG_INITRAMFS_SOURCE) || echo "No CPIO image"
28 $(call if_changed,objcopy) 26 $(call if_changed,objcopy)
29 $(call if_changed,uimage) 27 $(call if_changed,uimage)
30 @echo 'Kernel: $@ is ready' ' (#'`cat .version`')' 28 @echo 'Kernel: $@ is ready' ' (#'`cat .version`')'
@@ -62,6 +60,4 @@ quiet_cmd_dtc = DTC $@
62$(obj)/%.dtb: $(dtstree)/%.dts FORCE 60$(obj)/%.dtb: $(dtstree)/%.dts FORCE
63 $(call if_changed,dtc) 61 $(call if_changed,dtc)
64 62
65clean-kernel += linux.bin linux.bin.gz simpleImage.* 63clean-files += *.dtb simpleImage.*.unstrip linux.bin.ub
66
67clean-files += *.dtb simpleImage.*.unstrip
diff --git a/arch/microblaze/include/asm/processor.h b/arch/microblaze/include/asm/processor.h
index 563c6b9453f0..8eeb09211ece 100644
--- a/arch/microblaze/include/asm/processor.h
+++ b/arch/microblaze/include/asm/processor.h
@@ -14,7 +14,6 @@
14#include <asm/ptrace.h> 14#include <asm/ptrace.h>
15#include <asm/setup.h> 15#include <asm/setup.h>
16#include <asm/registers.h> 16#include <asm/registers.h>
17#include <asm/segment.h>
18#include <asm/entry.h> 17#include <asm/entry.h>
19#include <asm/current.h> 18#include <asm/current.h>
20 19
diff --git a/arch/microblaze/include/asm/segment.h b/arch/microblaze/include/asm/segment.h
deleted file mode 100644
index 0e7102c3fb11..000000000000
--- a/arch/microblaze/include/asm/segment.h
+++ /dev/null
@@ -1,49 +0,0 @@
1/*
2 * Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu>
3 * Copyright (C) 2008-2009 PetaLogix
4 * Copyright (C) 2006 Atmark Techno, Inc.
5 *
6 * This file is subject to the terms and conditions of the GNU General Public
7 * License. See the file "COPYING" in the main directory of this archive
8 * for more details.
9 */
10
11#ifndef _ASM_MICROBLAZE_SEGMENT_H
12#define _ASM_MICROBLAZE_SEGMENT_H
13
14# ifndef __ASSEMBLY__
15
16typedef struct {
17 unsigned long seg;
18} mm_segment_t;
19
20/*
21 * On Microblaze the fs value is actually the top of the corresponding
22 * address space.
23 *
24 * The fs value determines whether argument validity checking should be
25 * performed or not. If get_fs() == USER_DS, checking is performed, with
26 * get_fs() == KERNEL_DS, checking is bypassed.
27 *
28 * For historical reasons, these macros are grossly misnamed.
29 *
30 * For non-MMU arch like Microblaze, KERNEL_DS and USER_DS is equal.
31 */
32# define MAKE_MM_SEG(s) ((mm_segment_t) { (s) })
33
34# ifndef CONFIG_MMU
35# define KERNEL_DS MAKE_MM_SEG(0)
36# define USER_DS KERNEL_DS
37# else
38# define KERNEL_DS MAKE_MM_SEG(0xFFFFFFFF)
39# define USER_DS MAKE_MM_SEG(TASK_SIZE - 1)
40# endif
41
42# define get_ds() (KERNEL_DS)
43# define get_fs() (current_thread_info()->addr_limit)
44# define set_fs(val) (current_thread_info()->addr_limit = (val))
45
46# define segment_eq(a, b) ((a).seg == (b).seg)
47
48# endif /* __ASSEMBLY__ */
49#endif /* _ASM_MICROBLAZE_SEGMENT_H */
diff --git a/arch/microblaze/include/asm/thread_info.h b/arch/microblaze/include/asm/thread_info.h
index 6e92885d381a..b2ca80f64640 100644
--- a/arch/microblaze/include/asm/thread_info.h
+++ b/arch/microblaze/include/asm/thread_info.h
@@ -19,7 +19,6 @@
19#ifndef __ASSEMBLY__ 19#ifndef __ASSEMBLY__
20# include <linux/types.h> 20# include <linux/types.h>
21# include <asm/processor.h> 21# include <asm/processor.h>
22# include <asm/segment.h>
23 22
24/* 23/*
25 * low level task data that entry.S needs immediate access to 24 * low level task data that entry.S needs immediate access to
@@ -60,6 +59,10 @@ struct cpu_context {
60 __u32 fsr; 59 __u32 fsr;
61}; 60};
62 61
62typedef struct {
63 unsigned long seg;
64} mm_segment_t;
65
63struct thread_info { 66struct thread_info {
64 struct task_struct *task; /* main task structure */ 67 struct task_struct *task; /* main task structure */
65 struct exec_domain *exec_domain; /* execution domain */ 68 struct exec_domain *exec_domain; /* execution domain */
diff --git a/arch/microblaze/include/asm/tlbflush.h b/arch/microblaze/include/asm/tlbflush.h
index bcb8b41d55af..2e1353c2d18d 100644
--- a/arch/microblaze/include/asm/tlbflush.h
+++ b/arch/microblaze/include/asm/tlbflush.h
@@ -24,6 +24,7 @@ extern void _tlbie(unsigned long address);
24extern void _tlbia(void); 24extern void _tlbia(void);
25 25
26#define __tlbia() { preempt_disable(); _tlbia(); preempt_enable(); } 26#define __tlbia() { preempt_disable(); _tlbia(); preempt_enable(); }
27#define __tlbie(x) { _tlbie(x); }
27 28
28static inline void local_flush_tlb_all(void) 29static inline void local_flush_tlb_all(void)
29 { __tlbia(); } 30 { __tlbia(); }
@@ -31,7 +32,7 @@ static inline void local_flush_tlb_mm(struct mm_struct *mm)
31 { __tlbia(); } 32 { __tlbia(); }
32static inline void local_flush_tlb_page(struct vm_area_struct *vma, 33static inline void local_flush_tlb_page(struct vm_area_struct *vma,
33 unsigned long vmaddr) 34 unsigned long vmaddr)
34 { _tlbie(vmaddr); } 35 { __tlbie(vmaddr); }
35static inline void local_flush_tlb_range(struct vm_area_struct *vma, 36static inline void local_flush_tlb_range(struct vm_area_struct *vma,
36 unsigned long start, unsigned long end) 37 unsigned long start, unsigned long end)
37 { __tlbia(); } 38 { __tlbia(); }
diff --git a/arch/microblaze/include/asm/uaccess.h b/arch/microblaze/include/asm/uaccess.h
index 371bd6e56d9a..446bec29b142 100644
--- a/arch/microblaze/include/asm/uaccess.h
+++ b/arch/microblaze/include/asm/uaccess.h
@@ -22,101 +22,73 @@
22#include <asm/mmu.h> 22#include <asm/mmu.h>
23#include <asm/page.h> 23#include <asm/page.h>
24#include <asm/pgtable.h> 24#include <asm/pgtable.h>
25#include <asm/segment.h>
26#include <linux/string.h> 25#include <linux/string.h>
27 26
28#define VERIFY_READ 0 27#define VERIFY_READ 0
29#define VERIFY_WRITE 1 28#define VERIFY_WRITE 1
30 29
31#define __clear_user(addr, n) (memset((void *)(addr), 0, (n)), 0) 30/*
32 31 * On Microblaze the fs value is actually the top of the corresponding
33#ifndef CONFIG_MMU 32 * address space.
34 33 *
35extern int ___range_ok(unsigned long addr, unsigned long size); 34 * The fs value determines whether argument validity checking should be
36 35 * performed or not. If get_fs() == USER_DS, checking is performed, with
37#define __range_ok(addr, size) \ 36 * get_fs() == KERNEL_DS, checking is bypassed.
38 ___range_ok((unsigned long)(addr), (unsigned long)(size)) 37 *
39 38 * For historical reasons, these macros are grossly misnamed.
40#define access_ok(type, addr, size) (__range_ok((addr), (size)) == 0) 39 *
41#define __access_ok(add, size) (__range_ok((addr), (size)) == 0) 40 * For non-MMU arch like Microblaze, KERNEL_DS and USER_DS is equal.
42 41 */
43/* Undefined function to trigger linker error */ 42# define MAKE_MM_SEG(s) ((mm_segment_t) { (s) })
44extern int bad_user_access_length(void);
45
46/* FIXME this is function for optimalization -> memcpy */
47#define __get_user(var, ptr) \
48({ \
49 int __gu_err = 0; \
50 switch (sizeof(*(ptr))) { \
51 case 1: \
52 case 2: \
53 case 4: \
54 (var) = *(ptr); \
55 break; \
56 case 8: \
57 memcpy((void *) &(var), (ptr), 8); \
58 break; \
59 default: \
60 (var) = 0; \
61 __gu_err = __get_user_bad(); \
62 break; \
63 } \
64 __gu_err; \
65})
66 43
67#define __get_user_bad() (bad_user_access_length(), (-EFAULT)) 44# ifndef CONFIG_MMU
45# define KERNEL_DS MAKE_MM_SEG(0)
46# define USER_DS KERNEL_DS
47# else
48# define KERNEL_DS MAKE_MM_SEG(0xFFFFFFFF)
49# define USER_DS MAKE_MM_SEG(TASK_SIZE - 1)
50# endif
68 51
69/* FIXME is not there defined __pu_val */ 52# define get_ds() (KERNEL_DS)
70#define __put_user(var, ptr) \ 53# define get_fs() (current_thread_info()->addr_limit)
71({ \ 54# define set_fs(val) (current_thread_info()->addr_limit = (val))
72 int __pu_err = 0; \
73 switch (sizeof(*(ptr))) { \
74 case 1: \
75 case 2: \
76 case 4: \
77 *(ptr) = (var); \
78 break; \
79 case 8: { \
80 typeof(*(ptr)) __pu_val = (var); \
81 memcpy(ptr, &__pu_val, sizeof(__pu_val)); \
82 } \
83 break; \
84 default: \
85 __pu_err = __put_user_bad(); \
86 break; \
87 } \
88 __pu_err; \
89})
90 55
91#define __put_user_bad() (bad_user_access_length(), (-EFAULT)) 56# define segment_eq(a, b) ((a).seg == (b).seg)
92 57
93#define put_user(x, ptr) __put_user((x), (ptr)) 58/*
94#define get_user(x, ptr) __get_user((x), (ptr)) 59 * The exception table consists of pairs of addresses: the first is the
60 * address of an instruction that is allowed to fault, and the second is
61 * the address at which the program should continue. No registers are
62 * modified, so it is entirely up to the continuation code to figure out
63 * what to do.
64 *
65 * All the routines below use bits of fixup code that are out of line
66 * with the main instruction path. This means when everything is well,
67 * we don't even have to jump over them. Further, they do not intrude
68 * on our cache or tlb entries.
69 */
70struct exception_table_entry {
71 unsigned long insn, fixup;
72};
95 73
96#define copy_to_user(to, from, n) (memcpy((to), (from), (n)), 0) 74/* Returns 0 if exception not found and fixup otherwise. */
97#define copy_from_user(to, from, n) (memcpy((to), (from), (n)), 0) 75extern unsigned long search_exception_table(unsigned long);
98 76
99#define __copy_to_user(to, from, n) (copy_to_user((to), (from), (n))) 77#ifndef CONFIG_MMU
100#define __copy_from_user(to, from, n) (copy_from_user((to), (from), (n)))
101#define __copy_to_user_inatomic(to, from, n) \
102 (__copy_to_user((to), (from), (n)))
103#define __copy_from_user_inatomic(to, from, n) \
104 (__copy_from_user((to), (from), (n)))
105 78
106static inline unsigned long clear_user(void *addr, unsigned long size) 79/* Check against bounds of physical memory */
80static inline int ___range_ok(unsigned long addr, unsigned long size)
107{ 81{
108 if (access_ok(VERIFY_WRITE, addr, size)) 82 return ((addr < memory_start) ||
109 size = __clear_user(addr, size); 83 ((addr + size) > memory_end));
110 return size;
111} 84}
112 85
113/* Returns 0 if exception not found and fixup otherwise. */ 86#define __range_ok(addr, size) \
114extern unsigned long search_exception_table(unsigned long); 87 ___range_ok((unsigned long)(addr), (unsigned long)(size))
115 88
116extern long strncpy_from_user(char *dst, const char *src, long count); 89#define access_ok(type, addr, size) (__range_ok((addr), (size)) == 0)
117extern long strnlen_user(const char *src, long count);
118 90
119#else /* CONFIG_MMU */ 91#else
120 92
121/* 93/*
122 * Address is valid if: 94 * Address is valid if:
@@ -129,24 +101,88 @@ extern long strnlen_user(const char *src, long count);
129/* || printk("access_ok failed for %s at 0x%08lx (size %d), seg 0x%08x\n", 101/* || printk("access_ok failed for %s at 0x%08lx (size %d), seg 0x%08x\n",
130 type?"WRITE":"READ",addr,size,get_fs().seg)) */ 102 type?"WRITE":"READ",addr,size,get_fs().seg)) */
131 103
132/* 104#endif
133 * All the __XXX versions macros/functions below do not perform
134 * access checking. It is assumed that the necessary checks have been
135 * already performed before the finction (macro) is called.
136 */
137 105
138#define get_user(x, ptr) \ 106#ifdef CONFIG_MMU
139({ \ 107# define __FIXUP_SECTION ".section .fixup,\"ax\"\n"
140 access_ok(VERIFY_READ, (ptr), sizeof(*(ptr))) \ 108# define __EX_TABLE_SECTION ".section __ex_table,\"a\"\n"
141 ? __get_user((x), (ptr)) : -EFAULT; \ 109#else
142}) 110# define __FIXUP_SECTION ".section .discard,\"ax\"\n"
111# define __EX_TABLE_SECTION ".section .discard,\"a\"\n"
112#endif
143 113
144#define put_user(x, ptr) \ 114extern unsigned long __copy_tofrom_user(void __user *to,
145({ \ 115 const void __user *from, unsigned long size);
146 access_ok(VERIFY_WRITE, (ptr), sizeof(*(ptr))) \ 116
147 ? __put_user((x), (ptr)) : -EFAULT; \ 117/* Return: number of not copied bytes, i.e. 0 if OK or non-zero if fail. */
118static inline unsigned long __must_check __clear_user(void __user *to,
119 unsigned long n)
120{
121 /* normal memset with two words to __ex_table */
122 __asm__ __volatile__ ( \
123 "1: sb r0, %2, r0;" \
124 " addik %0, %0, -1;" \
125 " bneid %0, 1b;" \
126 " addik %2, %2, 1;" \
127 "2: " \
128 __EX_TABLE_SECTION \
129 ".word 1b,2b;" \
130 ".previous;" \
131 : "=r"(n) \
132 : "0"(n), "r"(to)
133 );
134 return n;
135}
136
137static inline unsigned long __must_check clear_user(void __user *to,
138 unsigned long n)
139{
140 might_sleep();
141 if (unlikely(!access_ok(VERIFY_WRITE, to, n)))
142 return n;
143
144 return __clear_user(to, n);
145}
146
147/* put_user and get_user macros */
148extern long __user_bad(void);
149
150#define __get_user_asm(insn, __gu_ptr, __gu_val, __gu_err) \
151({ \
152 __asm__ __volatile__ ( \
153 "1:" insn " %1, %2, r0;" \
154 " addk %0, r0, r0;" \
155 "2: " \
156 __FIXUP_SECTION \
157 "3: brid 2b;" \
158 " addik %0, r0, %3;" \
159 ".previous;" \
160 __EX_TABLE_SECTION \
161 ".word 1b,3b;" \
162 ".previous;" \
163 : "=&r"(__gu_err), "=r"(__gu_val) \
164 : "r"(__gu_ptr), "i"(-EFAULT) \
165 ); \
148}) 166})
149 167
168/**
169 * get_user: - Get a simple variable from user space.
170 * @x: Variable to store result.
171 * @ptr: Source address, in user space.
172 *
173 * Context: User context only. This function may sleep.
174 *
175 * This macro copies a single simple variable from user space to kernel
176 * space. It supports simple types like char and int, but not larger
177 * data types like structures or arrays.
178 *
179 * @ptr must have pointer-to-simple-variable type, and the result of
180 * dereferencing @ptr must be assignable to @x without a cast.
181 *
182 * Returns zero on success, or -EFAULT on error.
183 * On error, the variable @x is set to zero.
184 */
185
150#define __get_user(x, ptr) \ 186#define __get_user(x, ptr) \
151({ \ 187({ \
152 unsigned long __gu_val; \ 188 unsigned long __gu_val; \
@@ -163,30 +199,74 @@ extern long strnlen_user(const char *src, long count);
163 __get_user_asm("lw", (ptr), __gu_val, __gu_err); \ 199 __get_user_asm("lw", (ptr), __gu_val, __gu_err); \
164 break; \ 200 break; \
165 default: \ 201 default: \
166 __gu_val = 0; __gu_err = -EINVAL; \ 202 /* __gu_val = 0; __gu_err = -EINVAL;*/ __gu_err = __user_bad();\
167 } \ 203 } \
168 x = (__typeof__(*(ptr))) __gu_val; \ 204 x = (__typeof__(*(ptr))) __gu_val; \
169 __gu_err; \ 205 __gu_err; \
170}) 206})
171 207
172#define __get_user_asm(insn, __gu_ptr, __gu_val, __gu_err) \ 208
209#define get_user(x, ptr) \
173({ \ 210({ \
174 __asm__ __volatile__ ( \ 211 access_ok(VERIFY_READ, (ptr), sizeof(*(ptr))) \
175 "1:" insn " %1, %2, r0; \ 212 ? __get_user((x), (ptr)) : -EFAULT; \
176 addk %0, r0, r0; \ 213})
177 2: \ 214
178 .section .fixup,\"ax\"; \ 215#define __put_user_asm(insn, __gu_ptr, __gu_val, __gu_err) \
179 3: brid 2b; \ 216({ \
180 addik %0, r0, %3; \ 217 __asm__ __volatile__ ( \
181 .previous; \ 218 "1:" insn " %1, %2, r0;" \
182 .section __ex_table,\"a\"; \ 219 " addk %0, r0, r0;" \
183 .word 1b,3b; \ 220 "2: " \
184 .previous;" \ 221 __FIXUP_SECTION \
185 : "=r"(__gu_err), "=r"(__gu_val) \ 222 "3: brid 2b;" \
186 : "r"(__gu_ptr), "i"(-EFAULT) \ 223 " addik %0, r0, %3;" \
187 ); \ 224 ".previous;" \
225 __EX_TABLE_SECTION \
226 ".word 1b,3b;" \
227 ".previous;" \
228 : "=&r"(__gu_err) \
229 : "r"(__gu_val), "r"(__gu_ptr), "i"(-EFAULT) \
230 ); \
188}) 231})
189 232
233#define __put_user_asm_8(__gu_ptr, __gu_val, __gu_err) \
234({ \
235 __asm__ __volatile__ (" lwi %0, %1, 0;" \
236 "1: swi %0, %2, 0;" \
237 " lwi %0, %1, 4;" \
238 "2: swi %0, %2, 4;" \
239 " addk %0, r0, r0;" \
240 "3: " \
241 __FIXUP_SECTION \
242 "4: brid 3b;" \
243 " addik %0, r0, %3;" \
244 ".previous;" \
245 __EX_TABLE_SECTION \
246 ".word 1b,4b,2b,4b;" \
247 ".previous;" \
248 : "=&r"(__gu_err) \
249 : "r"(&__gu_val), "r"(__gu_ptr), "i"(-EFAULT) \
250 ); \
251})
252
253/**
254 * put_user: - Write a simple value into user space.
255 * @x: Value to copy to user space.
256 * @ptr: Destination address, in user space.
257 *
258 * Context: User context only. This function may sleep.
259 *
260 * This macro copies a single simple value from kernel space to user
261 * space. It supports simple types like char and int, but not larger
262 * data types like structures or arrays.
263 *
264 * @ptr must have pointer-to-simple-variable type, and @x must be assignable
265 * to the result of dereferencing @ptr.
266 *
267 * Returns zero on success, or -EFAULT on error.
268 */
269
190#define __put_user(x, ptr) \ 270#define __put_user(x, ptr) \
191({ \ 271({ \
192 __typeof__(*(ptr)) volatile __gu_val = (x); \ 272 __typeof__(*(ptr)) volatile __gu_val = (x); \
@@ -195,7 +275,7 @@ extern long strnlen_user(const char *src, long count);
195 case 1: \ 275 case 1: \
196 __put_user_asm("sb", (ptr), __gu_val, __gu_err); \ 276 __put_user_asm("sb", (ptr), __gu_val, __gu_err); \
197 break; \ 277 break; \
198 case 2: \ 278 case 2: \
199 __put_user_asm("sh", (ptr), __gu_val, __gu_err); \ 279 __put_user_asm("sh", (ptr), __gu_val, __gu_err); \
200 break; \ 280 break; \
201 case 4: \ 281 case 4: \
@@ -205,121 +285,82 @@ extern long strnlen_user(const char *src, long count);
205 __put_user_asm_8((ptr), __gu_val, __gu_err); \ 285 __put_user_asm_8((ptr), __gu_val, __gu_err); \
206 break; \ 286 break; \
207 default: \ 287 default: \
208 __gu_err = -EINVAL; \ 288 /*__gu_err = -EINVAL;*/ __gu_err = __user_bad(); \
209 } \ 289 } \
210 __gu_err; \ 290 __gu_err; \
211}) 291})
212 292
213#define __put_user_asm_8(__gu_ptr, __gu_val, __gu_err) \ 293#ifndef CONFIG_MMU
214({ \
215__asm__ __volatile__ (" lwi %0, %1, 0; \
216 1: swi %0, %2, 0; \
217 lwi %0, %1, 4; \
218 2: swi %0, %2, 4; \
219 addk %0,r0,r0; \
220 3: \
221 .section .fixup,\"ax\"; \
222 4: brid 3b; \
223 addik %0, r0, %3; \
224 .previous; \
225 .section __ex_table,\"a\"; \
226 .word 1b,4b,2b,4b; \
227 .previous;" \
228 : "=&r"(__gu_err) \
229 : "r"(&__gu_val), \
230 "r"(__gu_ptr), "i"(-EFAULT) \
231 ); \
232})
233 294
234#define __put_user_asm(insn, __gu_ptr, __gu_val, __gu_err) \ 295#define put_user(x, ptr) __put_user((x), (ptr))
235({ \
236 __asm__ __volatile__ ( \
237 "1:" insn " %1, %2, r0; \
238 addk %0, r0, r0; \
239 2: \
240 .section .fixup,\"ax\"; \
241 3: brid 2b; \
242 addik %0, r0, %3; \
243 .previous; \
244 .section __ex_table,\"a\"; \
245 .word 1b,3b; \
246 .previous;" \
247 : "=r"(__gu_err) \
248 : "r"(__gu_val), "r"(__gu_ptr), "i"(-EFAULT) \
249 ); \
250})
251 296
252/* 297#else /* CONFIG_MMU */
253 * Return: number of not copied bytes, i.e. 0 if OK or non-zero if fail.
254 */
255static inline int clear_user(char *to, int size)
256{
257 if (size && access_ok(VERIFY_WRITE, to, size)) {
258 __asm__ __volatile__ (" \
259 1: \
260 sb r0, %2, r0; \
261 addik %0, %0, -1; \
262 bneid %0, 1b; \
263 addik %2, %2, 1; \
264 2: \
265 .section __ex_table,\"a\"; \
266 .word 1b,2b; \
267 .section .text;" \
268 : "=r"(size) \
269 : "0"(size), "r"(to)
270 );
271 }
272 return size;
273}
274 298
275#define __copy_from_user(to, from, n) copy_from_user((to), (from), (n)) 299#define put_user(x, ptr) \
300({ \
301 access_ok(VERIFY_WRITE, (ptr), sizeof(*(ptr))) \
302 ? __put_user((x), (ptr)) : -EFAULT; \
303})
304#endif /* CONFIG_MMU */
305
306/* copy_to_from_user */
307#define __copy_from_user(to, from, n) \
308 __copy_tofrom_user((__force void __user *)(to), \
309 (void __user *)(from), (n))
276#define __copy_from_user_inatomic(to, from, n) \ 310#define __copy_from_user_inatomic(to, from, n) \
277 copy_from_user((to), (from), (n)) 311 copy_from_user((to), (from), (n))
278 312
279#define copy_to_user(to, from, n) \ 313static inline long copy_from_user(void *to,
280 (access_ok(VERIFY_WRITE, (to), (n)) ? \ 314 const void __user *from, unsigned long n)
281 __copy_tofrom_user((void __user *)(to), \ 315{
282 (__force const void __user *)(from), (n)) \ 316 might_sleep();
283 : -EFAULT) 317 if (access_ok(VERIFY_READ, from, n))
318 return __copy_from_user(to, from, n);
319 return n;
320}
284 321
285#define __copy_to_user(to, from, n) copy_to_user((to), (from), (n)) 322#define __copy_to_user(to, from, n) \
323 __copy_tofrom_user((void __user *)(to), \
324 (__force const void __user *)(from), (n))
286#define __copy_to_user_inatomic(to, from, n) copy_to_user((to), (from), (n)) 325#define __copy_to_user_inatomic(to, from, n) copy_to_user((to), (from), (n))
287 326
288#define copy_from_user(to, from, n) \ 327static inline long copy_to_user(void __user *to,
289 (access_ok(VERIFY_READ, (from), (n)) ? \ 328 const void *from, unsigned long n)
290 __copy_tofrom_user((__force void __user *)(to), \ 329{
291 (void __user *)(from), (n)) \ 330 might_sleep();
292 : -EFAULT) 331 if (access_ok(VERIFY_WRITE, to, n))
332 return __copy_to_user(to, from, n);
333 return n;
334}
293 335
336/*
337 * Copy a null terminated string from userspace.
338 */
294extern int __strncpy_user(char *to, const char __user *from, int len); 339extern int __strncpy_user(char *to, const char __user *from, int len);
295extern int __strnlen_user(const char __user *sstr, int len);
296 340
297#define strncpy_from_user(to, from, len) \ 341#define __strncpy_from_user __strncpy_user
298 (access_ok(VERIFY_READ, from, 1) ? \
299 __strncpy_user(to, from, len) : -EFAULT)
300#define strnlen_user(str, len) \
301 (access_ok(VERIFY_READ, str, 1) ? __strnlen_user(str, len) : 0)
302 342
303#endif /* CONFIG_MMU */ 343static inline long
304 344strncpy_from_user(char *dst, const char __user *src, long count)
305extern unsigned long __copy_tofrom_user(void __user *to, 345{
306 const void __user *from, unsigned long size); 346 if (!access_ok(VERIFY_READ, src, 1))
347 return -EFAULT;
348 return __strncpy_from_user(dst, src, count);
349}
307 350
308/* 351/*
309 * The exception table consists of pairs of addresses: the first is the 352 * Return the size of a string (including the ending 0)
310 * address of an instruction that is allowed to fault, and the second is
311 * the address at which the program should continue. No registers are
312 * modified, so it is entirely up to the continuation code to figure out
313 * what to do.
314 * 353 *
315 * All the routines below use bits of fixup code that are out of line 354 * Return 0 on exception, a value greater than N if too long
316 * with the main instruction path. This means when everything is well,
317 * we don't even have to jump over them. Further, they do not intrude
318 * on our cache or tlb entries.
319 */ 355 */
320struct exception_table_entry { 356extern int __strnlen_user(const char __user *sstr, int len);
321 unsigned long insn, fixup; 357
322}; 358static inline long strnlen_user(const char __user *src, long n)
359{
360 if (!access_ok(VERIFY_READ, src, 1))
361 return 0;
362 return __strnlen_user(src, n);
363}
323 364
324#endif /* __ASSEMBLY__ */ 365#endif /* __ASSEMBLY__ */
325#endif /* __KERNEL__ */ 366#endif /* __KERNEL__ */
diff --git a/arch/microblaze/kernel/dma.c b/arch/microblaze/kernel/dma.c
index 9d69ca4b9635..ce72dd4967cf 100644
--- a/arch/microblaze/kernel/dma.c
+++ b/arch/microblaze/kernel/dma.c
@@ -38,7 +38,7 @@ static inline void __dma_sync_page(unsigned long paddr, unsigned long offset,
38 38
39static unsigned long get_dma_direct_offset(struct device *dev) 39static unsigned long get_dma_direct_offset(struct device *dev)
40{ 40{
41 if (dev) 41 if (likely(dev))
42 return (unsigned long)dev->archdata.dma_data; 42 return (unsigned long)dev->archdata.dma_data;
43 43
44 return PCI_DRAM_OFFSET; /* FIXME Not sure if is correct */ 44 return PCI_DRAM_OFFSET; /* FIXME Not sure if is correct */
diff --git a/arch/microblaze/kernel/head.S b/arch/microblaze/kernel/head.S
index cb7815cfe5ab..da6a5f5dc766 100644
--- a/arch/microblaze/kernel/head.S
+++ b/arch/microblaze/kernel/head.S
@@ -51,6 +51,12 @@ swapper_pg_dir:
51 51
52 .text 52 .text
53ENTRY(_start) 53ENTRY(_start)
54#if CONFIG_KERNEL_BASE_ADDR == 0
55 brai TOPHYS(real_start)
56 .org 0x100
57real_start:
58#endif
59
54 mfs r1, rmsr 60 mfs r1, rmsr
55 andi r1, r1, ~2 61 andi r1, r1, ~2
56 mts rmsr, r1 62 mts rmsr, r1
@@ -99,8 +105,8 @@ no_fdt_arg:
99 tophys(r4,r4) /* convert to phys address */ 105 tophys(r4,r4) /* convert to phys address */
100 ori r3, r0, COMMAND_LINE_SIZE - 1 /* number of loops */ 106 ori r3, r0, COMMAND_LINE_SIZE - 1 /* number of loops */
101_copy_command_line: 107_copy_command_line:
102 lbu r2, r5, r6 /* r7=r5+r6 - r5 contain pointer to command line */ 108 lbu r2, r5, r6 /* r2=r5+r6 - r5 contain pointer to command line */
103 sb r2, r4, r6 /* addr[r4+r6]= r7*/ 109 sb r2, r4, r6 /* addr[r4+r6]= r2*/
104 addik r6, r6, 1 /* increment counting */ 110 addik r6, r6, 1 /* increment counting */
105 bgtid r3, _copy_command_line /* loop for all entries */ 111 bgtid r3, _copy_command_line /* loop for all entries */
106 addik r3, r3, -1 /* descrement loop */ 112 addik r3, r3, -1 /* descrement loop */
@@ -128,7 +134,7 @@ _copy_bram:
128 * virtual to physical. 134 * virtual to physical.
129 */ 135 */
130 nop 136 nop
131 addik r3, r0, 63 /* Invalidate all TLB entries */ 137 addik r3, r0, MICROBLAZE_TLB_SIZE -1 /* Invalidate all TLB entries */
132_invalidate: 138_invalidate:
133 mts rtlbx, r3 139 mts rtlbx, r3
134 mts rtlbhi, r0 /* flush: ensure V is clear */ 140 mts rtlbhi, r0 /* flush: ensure V is clear */
diff --git a/arch/microblaze/kernel/hw_exception_handler.S b/arch/microblaze/kernel/hw_exception_handler.S
index 2b86c03aa841..995a2123635b 100644
--- a/arch/microblaze/kernel/hw_exception_handler.S
+++ b/arch/microblaze/kernel/hw_exception_handler.S
@@ -313,13 +313,13 @@ _hw_exception_handler:
313 mfs r5, rmsr; 313 mfs r5, rmsr;
314 nop 314 nop
315 swi r5, r1, 0; 315 swi r5, r1, 0;
316 mfs r3, resr 316 mfs r4, resr
317 nop 317 nop
318 mfs r4, rear; 318 mfs r3, rear;
319 nop 319 nop
320 320
321#ifndef CONFIG_MMU 321#ifndef CONFIG_MMU
322 andi r5, r3, 0x1000; /* Check ESR[DS] */ 322 andi r5, r4, 0x1000; /* Check ESR[DS] */
323 beqi r5, not_in_delay_slot; /* Branch if ESR[DS] not set */ 323 beqi r5, not_in_delay_slot; /* Branch if ESR[DS] not set */
324 mfs r17, rbtr; /* ESR[DS] set - return address in BTR */ 324 mfs r17, rbtr; /* ESR[DS] set - return address in BTR */
325 nop 325 nop
@@ -327,13 +327,14 @@ not_in_delay_slot:
327 swi r17, r1, PT_R17 327 swi r17, r1, PT_R17
328#endif 328#endif
329 329
330 andi r5, r3, 0x1F; /* Extract ESR[EXC] */ 330 andi r5, r4, 0x1F; /* Extract ESR[EXC] */
331 331
332#ifdef CONFIG_MMU 332#ifdef CONFIG_MMU
333 /* Calculate exception vector offset = r5 << 2 */ 333 /* Calculate exception vector offset = r5 << 2 */
334 addk r6, r5, r5; /* << 1 */ 334 addk r6, r5, r5; /* << 1 */
335 addk r6, r6, r6; /* << 2 */ 335 addk r6, r6, r6; /* << 2 */
336 336
337#ifdef DEBUG
337/* counting which exception happen */ 338/* counting which exception happen */
338 lwi r5, r0, 0x200 + TOPHYS(r0_ram) 339 lwi r5, r0, 0x200 + TOPHYS(r0_ram)
339 addi r5, r5, 1 340 addi r5, r5, 1
@@ -341,6 +342,7 @@ not_in_delay_slot:
341 lwi r5, r6, 0x200 + TOPHYS(r0_ram) 342 lwi r5, r6, 0x200 + TOPHYS(r0_ram)
342 addi r5, r5, 1 343 addi r5, r5, 1
343 swi r5, r6, 0x200 + TOPHYS(r0_ram) 344 swi r5, r6, 0x200 + TOPHYS(r0_ram)
345#endif
344/* end */ 346/* end */
345 /* Load the HW Exception vector */ 347 /* Load the HW Exception vector */
346 lwi r6, r6, TOPHYS(_MB_HW_ExceptionVectorTable) 348 lwi r6, r6, TOPHYS(_MB_HW_ExceptionVectorTable)
@@ -376,7 +378,7 @@ handle_other_ex: /* Handle Other exceptions here */
376 swi r18, r1, PT_R18 378 swi r18, r1, PT_R18
377 379
378 or r5, r1, r0 380 or r5, r1, r0
379 andi r6, r3, 0x1F; /* Load ESR[EC] */ 381 andi r6, r4, 0x1F; /* Load ESR[EC] */
380 lwi r7, r0, PER_CPU(KM) /* MS: saving current kernel mode to regs */ 382 lwi r7, r0, PER_CPU(KM) /* MS: saving current kernel mode to regs */
381 swi r7, r1, PT_MODE 383 swi r7, r1, PT_MODE
382 mfs r7, rfsr 384 mfs r7, rfsr
@@ -426,11 +428,11 @@ handle_other_ex: /* Handle Other exceptions here */
426 */ 428 */
427handle_unaligned_ex: 429handle_unaligned_ex:
428 /* Working registers already saved: R3, R4, R5, R6 430 /* Working registers already saved: R3, R4, R5, R6
429 * R3 = ESR 431 * R4 = ESR
430 * R4 = EAR 432 * R3 = EAR
431 */ 433 */
432#ifdef CONFIG_MMU 434#ifdef CONFIG_MMU
433 andi r6, r3, 0x1000 /* Check ESR[DS] */ 435 andi r6, r4, 0x1000 /* Check ESR[DS] */
434 beqi r6, _no_delayslot /* Branch if ESR[DS] not set */ 436 beqi r6, _no_delayslot /* Branch if ESR[DS] not set */
435 mfs r17, rbtr; /* ESR[DS] set - return address in BTR */ 437 mfs r17, rbtr; /* ESR[DS] set - return address in BTR */
436 nop 438 nop
@@ -439,7 +441,7 @@ _no_delayslot:
439 RESTORE_STATE; 441 RESTORE_STATE;
440 bri unaligned_data_trap 442 bri unaligned_data_trap
441#endif 443#endif
442 andi r6, r3, 0x3E0; /* Mask and extract the register operand */ 444 andi r6, r4, 0x3E0; /* Mask and extract the register operand */
443 srl r6, r6; /* r6 >> 5 */ 445 srl r6, r6; /* r6 >> 5 */
444 srl r6, r6; 446 srl r6, r6;
445 srl r6, r6; 447 srl r6, r6;
@@ -448,33 +450,33 @@ _no_delayslot:
448 /* Store the register operand in a temporary location */ 450 /* Store the register operand in a temporary location */
449 sbi r6, r0, TOPHYS(ex_reg_op); 451 sbi r6, r0, TOPHYS(ex_reg_op);
450 452
451 andi r6, r3, 0x400; /* Extract ESR[S] */ 453 andi r6, r4, 0x400; /* Extract ESR[S] */
452 bnei r6, ex_sw; 454 bnei r6, ex_sw;
453ex_lw: 455ex_lw:
454 andi r6, r3, 0x800; /* Extract ESR[W] */ 456 andi r6, r4, 0x800; /* Extract ESR[W] */
455 beqi r6, ex_lhw; 457 beqi r6, ex_lhw;
456 lbui r5, r4, 0; /* Exception address in r4 */ 458 lbui r5, r3, 0; /* Exception address in r3 */
457 /* Load a word, byte-by-byte from destination address 459 /* Load a word, byte-by-byte from destination address
458 and save it in tmp space */ 460 and save it in tmp space */
459 sbi r5, r0, TOPHYS(ex_tmp_data_loc_0); 461 sbi r5, r0, TOPHYS(ex_tmp_data_loc_0);
460 lbui r5, r4, 1; 462 lbui r5, r3, 1;
461 sbi r5, r0, TOPHYS(ex_tmp_data_loc_1); 463 sbi r5, r0, TOPHYS(ex_tmp_data_loc_1);
462 lbui r5, r4, 2; 464 lbui r5, r3, 2;
463 sbi r5, r0, TOPHYS(ex_tmp_data_loc_2); 465 sbi r5, r0, TOPHYS(ex_tmp_data_loc_2);
464 lbui r5, r4, 3; 466 lbui r5, r3, 3;
465 sbi r5, r0, TOPHYS(ex_tmp_data_loc_3); 467 sbi r5, r0, TOPHYS(ex_tmp_data_loc_3);
466 /* Get the destination register value into r3 */ 468 /* Get the destination register value into r4 */
467 lwi r3, r0, TOPHYS(ex_tmp_data_loc_0); 469 lwi r4, r0, TOPHYS(ex_tmp_data_loc_0);
468 bri ex_lw_tail; 470 bri ex_lw_tail;
469ex_lhw: 471ex_lhw:
470 lbui r5, r4, 0; /* Exception address in r4 */ 472 lbui r5, r3, 0; /* Exception address in r3 */
471 /* Load a half-word, byte-by-byte from destination 473 /* Load a half-word, byte-by-byte from destination
472 address and save it in tmp space */ 474 address and save it in tmp space */
473 sbi r5, r0, TOPHYS(ex_tmp_data_loc_0); 475 sbi r5, r0, TOPHYS(ex_tmp_data_loc_0);
474 lbui r5, r4, 1; 476 lbui r5, r3, 1;
475 sbi r5, r0, TOPHYS(ex_tmp_data_loc_1); 477 sbi r5, r0, TOPHYS(ex_tmp_data_loc_1);
476 /* Get the destination register value into r3 */ 478 /* Get the destination register value into r4 */
477 lhui r3, r0, TOPHYS(ex_tmp_data_loc_0); 479 lhui r4, r0, TOPHYS(ex_tmp_data_loc_0);
478ex_lw_tail: 480ex_lw_tail:
479 /* Get the destination register number into r5 */ 481 /* Get the destination register number into r5 */
480 lbui r5, r0, TOPHYS(ex_reg_op); 482 lbui r5, r0, TOPHYS(ex_reg_op);
@@ -502,25 +504,25 @@ ex_sw_tail:
502 andi r6, r6, 0x800; /* Extract ESR[W] */ 504 andi r6, r6, 0x800; /* Extract ESR[W] */
503 beqi r6, ex_shw; 505 beqi r6, ex_shw;
504 /* Get the word - delay slot */ 506 /* Get the word - delay slot */
505 swi r3, r0, TOPHYS(ex_tmp_data_loc_0); 507 swi r4, r0, TOPHYS(ex_tmp_data_loc_0);
506 /* Store the word, byte-by-byte into destination address */ 508 /* Store the word, byte-by-byte into destination address */
507 lbui r3, r0, TOPHYS(ex_tmp_data_loc_0); 509 lbui r4, r0, TOPHYS(ex_tmp_data_loc_0);
508 sbi r3, r4, 0; 510 sbi r4, r3, 0;
509 lbui r3, r0, TOPHYS(ex_tmp_data_loc_1); 511 lbui r4, r0, TOPHYS(ex_tmp_data_loc_1);
510 sbi r3, r4, 1; 512 sbi r4, r3, 1;
511 lbui r3, r0, TOPHYS(ex_tmp_data_loc_2); 513 lbui r4, r0, TOPHYS(ex_tmp_data_loc_2);
512 sbi r3, r4, 2; 514 sbi r4, r3, 2;
513 lbui r3, r0, TOPHYS(ex_tmp_data_loc_3); 515 lbui r4, r0, TOPHYS(ex_tmp_data_loc_3);
514 sbi r3, r4, 3; 516 sbi r4, r3, 3;
515 bri ex_handler_done; 517 bri ex_handler_done;
516 518
517ex_shw: 519ex_shw:
518 /* Store the lower half-word, byte-by-byte into destination address */ 520 /* Store the lower half-word, byte-by-byte into destination address */
519 swi r3, r0, TOPHYS(ex_tmp_data_loc_0); 521 swi r4, r0, TOPHYS(ex_tmp_data_loc_0);
520 lbui r3, r0, TOPHYS(ex_tmp_data_loc_2); 522 lbui r4, r0, TOPHYS(ex_tmp_data_loc_2);
521 sbi r3, r4, 0; 523 sbi r4, r3, 0;
522 lbui r3, r0, TOPHYS(ex_tmp_data_loc_3); 524 lbui r4, r0, TOPHYS(ex_tmp_data_loc_3);
523 sbi r3, r4, 1; 525 sbi r4, r3, 1;
524ex_sw_end: /* Exception handling of store word, ends. */ 526ex_sw_end: /* Exception handling of store word, ends. */
525 527
526ex_handler_done: 528ex_handler_done:
@@ -560,21 +562,16 @@ ex_handler_done:
560 */ 562 */
561 mfs r11, rpid 563 mfs r11, rpid
562 nop 564 nop
563 bri 4
564 mfs r3, rear /* Get faulting address */
565 nop
566 /* If we are faulting a kernel address, we have to use the 565 /* If we are faulting a kernel address, we have to use the
567 * kernel page tables. 566 * kernel page tables.
568 */ 567 */
569 ori r4, r0, CONFIG_KERNEL_START 568 ori r5, r0, CONFIG_KERNEL_START
570 cmpu r4, r3, r4 569 cmpu r5, r3, r5
571 bgti r4, ex3 570 bgti r5, ex3
572 /* First, check if it was a zone fault (which means a user 571 /* First, check if it was a zone fault (which means a user
573 * tried to access a kernel or read-protected page - always 572 * tried to access a kernel or read-protected page - always
574 * a SEGV). All other faults here must be stores, so no 573 * a SEGV). All other faults here must be stores, so no
575 * need to check ESR_S as well. */ 574 * need to check ESR_S as well. */
576 mfs r4, resr
577 nop
578 andi r4, r4, 0x800 /* ESR_Z - zone protection */ 575 andi r4, r4, 0x800 /* ESR_Z - zone protection */
579 bnei r4, ex2 576 bnei r4, ex2
580 577
@@ -589,8 +586,6 @@ ex_handler_done:
589 * tried to access a kernel or read-protected page - always 586 * tried to access a kernel or read-protected page - always
590 * a SEGV). All other faults here must be stores, so no 587 * a SEGV). All other faults here must be stores, so no
591 * need to check ESR_S as well. */ 588 * need to check ESR_S as well. */
592 mfs r4, resr
593 nop
594 andi r4, r4, 0x800 /* ESR_Z */ 589 andi r4, r4, 0x800 /* ESR_Z */
595 bnei r4, ex2 590 bnei r4, ex2
596 /* get current task address */ 591 /* get current task address */
@@ -665,8 +660,6 @@ ex_handler_done:
665 * R3 = ESR 660 * R3 = ESR
666 */ 661 */
667 662
668 mfs r3, rear /* Get faulting address */
669 nop
670 RESTORE_STATE; 663 RESTORE_STATE;
671 bri page_fault_instr_trap 664 bri page_fault_instr_trap
672 665
@@ -677,18 +670,15 @@ ex_handler_done:
677 */ 670 */
678 handle_data_tlb_miss_exception: 671 handle_data_tlb_miss_exception:
679 /* Working registers already saved: R3, R4, R5, R6 672 /* Working registers already saved: R3, R4, R5, R6
680 * R3 = ESR 673 * R3 = EAR, R4 = ESR
681 */ 674 */
682 mfs r11, rpid 675 mfs r11, rpid
683 nop 676 nop
684 bri 4
685 mfs r3, rear /* Get faulting address */
686 nop
687 677
688 /* If we are faulting a kernel address, we have to use the 678 /* If we are faulting a kernel address, we have to use the
689 * kernel page tables. */ 679 * kernel page tables. */
690 ori r4, r0, CONFIG_KERNEL_START 680 ori r6, r0, CONFIG_KERNEL_START
691 cmpu r4, r3, r4 681 cmpu r4, r3, r6
692 bgti r4, ex5 682 bgti r4, ex5
693 ori r4, r0, swapper_pg_dir 683 ori r4, r0, swapper_pg_dir
694 mts rpid, r0 /* TLB will have 0 TID */ 684 mts rpid, r0 /* TLB will have 0 TID */
@@ -731,9 +721,8 @@ ex_handler_done:
731 * Many of these bits are software only. Bits we don't set 721 * Many of these bits are software only. Bits we don't set
732 * here we (properly should) assume have the appropriate value. 722 * here we (properly should) assume have the appropriate value.
733 */ 723 */
724 brid finish_tlb_load
734 andni r4, r4, 0x0ce2 /* Make sure 20, 21 are zero */ 725 andni r4, r4, 0x0ce2 /* Make sure 20, 21 are zero */
735
736 bri finish_tlb_load
737 ex7: 726 ex7:
738 /* The bailout. Restore registers to pre-exception conditions 727 /* The bailout. Restore registers to pre-exception conditions
739 * and call the heavyweights to help us out. 728 * and call the heavyweights to help us out.
@@ -754,9 +743,6 @@ ex_handler_done:
754 */ 743 */
755 mfs r11, rpid 744 mfs r11, rpid
756 nop 745 nop
757 bri 4
758 mfs r3, rear /* Get faulting address */
759 nop
760 746
761 /* If we are faulting a kernel address, we have to use the 747 /* If we are faulting a kernel address, we have to use the
762 * kernel page tables. 748 * kernel page tables.
@@ -792,7 +778,7 @@ ex_handler_done:
792 lwi r4, r5, 0 /* Get Linux PTE */ 778 lwi r4, r5, 0 /* Get Linux PTE */
793 779
794 andi r6, r4, _PAGE_PRESENT 780 andi r6, r4, _PAGE_PRESENT
795 beqi r6, ex7 781 beqi r6, ex10
796 782
797 ori r4, r4, _PAGE_ACCESSED 783 ori r4, r4, _PAGE_ACCESSED
798 swi r4, r5, 0 784 swi r4, r5, 0
@@ -805,9 +791,8 @@ ex_handler_done:
805 * Many of these bits are software only. Bits we don't set 791 * Many of these bits are software only. Bits we don't set
806 * here we (properly should) assume have the appropriate value. 792 * here we (properly should) assume have the appropriate value.
807 */ 793 */
794 brid finish_tlb_load
808 andni r4, r4, 0x0ce2 /* Make sure 20, 21 are zero */ 795 andni r4, r4, 0x0ce2 /* Make sure 20, 21 are zero */
809
810 bri finish_tlb_load
811 ex10: 796 ex10:
812 /* The bailout. Restore registers to pre-exception conditions 797 /* The bailout. Restore registers to pre-exception conditions
813 * and call the heavyweights to help us out. 798 * and call the heavyweights to help us out.
@@ -837,9 +822,9 @@ ex_handler_done:
837 andi r5, r5, (MICROBLAZE_TLB_SIZE-1) 822 andi r5, r5, (MICROBLAZE_TLB_SIZE-1)
838 ori r6, r0, 1 823 ori r6, r0, 1
839 cmp r31, r5, r6 824 cmp r31, r5, r6
840 blti r31, sem 825 blti r31, ex12
841 addik r5, r6, 1 826 addik r5, r6, 1
842 sem: 827 ex12:
843 /* MS: save back current TLB index */ 828 /* MS: save back current TLB index */
844 swi r5, r0, TOPHYS(tlb_index) 829 swi r5, r0, TOPHYS(tlb_index)
845 830
@@ -859,7 +844,6 @@ ex_handler_done:
859 nop 844 nop
860 845
861 /* Done...restore registers and get out of here. */ 846 /* Done...restore registers and get out of here. */
862 ex12:
863 mts rpid, r11 847 mts rpid, r11
864 nop 848 nop
865 bri 4 849 bri 4
diff --git a/arch/microblaze/kernel/misc.S b/arch/microblaze/kernel/misc.S
index df16c6287a8e..7cf86498326c 100644
--- a/arch/microblaze/kernel/misc.S
+++ b/arch/microblaze/kernel/misc.S
@@ -26,9 +26,10 @@
26 * We avoid flushing the pinned 0, 1 and possibly 2 entries. 26 * We avoid flushing the pinned 0, 1 and possibly 2 entries.
27 */ 27 */
28.globl _tlbia; 28.globl _tlbia;
29.type _tlbia, @function
29.align 4; 30.align 4;
30_tlbia: 31_tlbia:
31 addik r12, r0, 63 /* flush all entries (63 - 3) */ 32 addik r12, r0, MICROBLAZE_TLB_SIZE - 1 /* flush all entries (63 - 3) */
32 /* isync */ 33 /* isync */
33_tlbia_1: 34_tlbia_1:
34 mts rtlbx, r12 35 mts rtlbx, r12
@@ -41,11 +42,13 @@ _tlbia_1:
41 /* sync */ 42 /* sync */
42 rtsd r15, 8 43 rtsd r15, 8
43 nop 44 nop
45 .size _tlbia, . - _tlbia
44 46
45/* 47/*
46 * Flush MMU TLB for a particular address (in r5) 48 * Flush MMU TLB for a particular address (in r5)
47 */ 49 */
48.globl _tlbie; 50.globl _tlbie;
51.type _tlbie, @function
49.align 4; 52.align 4;
50_tlbie: 53_tlbie:
51 mts rtlbsx, r5 /* look up the address in TLB */ 54 mts rtlbsx, r5 /* look up the address in TLB */
@@ -59,17 +62,20 @@ _tlbie_1:
59 rtsd r15, 8 62 rtsd r15, 8
60 nop 63 nop
61 64
65 .size _tlbie, . - _tlbie
66
62/* 67/*
63 * Allocate TLB entry for early console 68 * Allocate TLB entry for early console
64 */ 69 */
65.globl early_console_reg_tlb_alloc; 70.globl early_console_reg_tlb_alloc;
71.type early_console_reg_tlb_alloc, @function
66.align 4; 72.align 4;
67early_console_reg_tlb_alloc: 73early_console_reg_tlb_alloc:
68 /* 74 /*
69 * Load a TLB entry for the UART, so that microblaze_progress() can use 75 * Load a TLB entry for the UART, so that microblaze_progress() can use
70 * the UARTs nice and early. We use a 4k real==virtual mapping. 76 * the UARTs nice and early. We use a 4k real==virtual mapping.
71 */ 77 */
72 ori r4, r0, 63 78 ori r4, r0, MICROBLAZE_TLB_SIZE - 1
73 mts rtlbx, r4 /* TLB slot 2 */ 79 mts rtlbx, r4 /* TLB slot 2 */
74 80
75 or r4,r5,r0 81 or r4,r5,r0
@@ -86,6 +92,8 @@ early_console_reg_tlb_alloc:
86 rtsd r15, 8 92 rtsd r15, 8
87 nop 93 nop
88 94
95 .size early_console_reg_tlb_alloc, . - early_console_reg_tlb_alloc
96
89/* 97/*
90 * Copy a whole page (4096 bytes). 98 * Copy a whole page (4096 bytes).
91 */ 99 */
@@ -104,6 +112,7 @@ early_console_reg_tlb_alloc:
104#define DCACHE_LINE_BYTES (4 * 4) 112#define DCACHE_LINE_BYTES (4 * 4)
105 113
106.globl copy_page; 114.globl copy_page;
115.type copy_page, @function
107.align 4; 116.align 4;
108copy_page: 117copy_page:
109 ori r11, r0, (PAGE_SIZE/DCACHE_LINE_BYTES) - 1 118 ori r11, r0, (PAGE_SIZE/DCACHE_LINE_BYTES) - 1
@@ -118,3 +127,5 @@ _copy_page_loop:
118 addik r11, r11, -1 127 addik r11, r11, -1
119 rtsd r15, 8 128 rtsd r15, 8
120 nop 129 nop
130
131 .size copy_page, . - copy_page
diff --git a/arch/microblaze/kernel/process.c b/arch/microblaze/kernel/process.c
index 812f1bf06c9e..09bed44dfcd3 100644
--- a/arch/microblaze/kernel/process.c
+++ b/arch/microblaze/kernel/process.c
@@ -15,6 +15,7 @@
15#include <linux/bitops.h> 15#include <linux/bitops.h>
16#include <asm/system.h> 16#include <asm/system.h>
17#include <asm/pgalloc.h> 17#include <asm/pgalloc.h>
18#include <asm/uaccess.h> /* for USER_DS macros */
18#include <asm/cacheflush.h> 19#include <asm/cacheflush.h>
19 20
20void show_regs(struct pt_regs *regs) 21void show_regs(struct pt_regs *regs)
@@ -74,7 +75,10 @@ __setup("hlt", hlt_setup);
74 75
75void default_idle(void) 76void default_idle(void)
76{ 77{
77 if (!hlt_counter) { 78 if (likely(hlt_counter)) {
79 while (!need_resched())
80 cpu_relax();
81 } else {
78 clear_thread_flag(TIF_POLLING_NRFLAG); 82 clear_thread_flag(TIF_POLLING_NRFLAG);
79 smp_mb__after_clear_bit(); 83 smp_mb__after_clear_bit();
80 local_irq_disable(); 84 local_irq_disable();
@@ -82,9 +86,7 @@ void default_idle(void)
82 cpu_sleep(); 86 cpu_sleep();
83 local_irq_enable(); 87 local_irq_enable();
84 set_thread_flag(TIF_POLLING_NRFLAG); 88 set_thread_flag(TIF_POLLING_NRFLAG);
85 } else 89 }
86 while (!need_resched())
87 cpu_relax();
88} 90}
89 91
90void cpu_idle(void) 92void cpu_idle(void)
diff --git a/arch/microblaze/kernel/setup.c b/arch/microblaze/kernel/setup.c
index f974ec7aa357..17c98dbcec88 100644
--- a/arch/microblaze/kernel/setup.c
+++ b/arch/microblaze/kernel/setup.c
@@ -92,6 +92,12 @@ inline unsigned get_romfs_len(unsigned *addr)
92} 92}
93#endif /* CONFIG_MTD_UCLINUX_EBSS */ 93#endif /* CONFIG_MTD_UCLINUX_EBSS */
94 94
95#if defined(CONFIG_EARLY_PRINTK) && defined(CONFIG_SERIAL_UARTLITE_CONSOLE)
96#define eprintk early_printk
97#else
98#define eprintk printk
99#endif
100
95void __init machine_early_init(const char *cmdline, unsigned int ram, 101void __init machine_early_init(const char *cmdline, unsigned int ram,
96 unsigned int fdt, unsigned int msr) 102 unsigned int fdt, unsigned int msr)
97{ 103{
@@ -139,32 +145,32 @@ void __init machine_early_init(const char *cmdline, unsigned int ram,
139 setup_early_printk(NULL); 145 setup_early_printk(NULL);
140#endif 146#endif
141 147
142 early_printk("Ramdisk addr 0x%08x, ", ram); 148 eprintk("Ramdisk addr 0x%08x, ", ram);
143 if (fdt) 149 if (fdt)
144 early_printk("FDT at 0x%08x\n", fdt); 150 eprintk("FDT at 0x%08x\n", fdt);
145 else 151 else
146 early_printk("Compiled-in FDT at 0x%08x\n", 152 eprintk("Compiled-in FDT at 0x%08x\n",
147 (unsigned int)_fdt_start); 153 (unsigned int)_fdt_start);
148 154
149#ifdef CONFIG_MTD_UCLINUX 155#ifdef CONFIG_MTD_UCLINUX
150 early_printk("Found romfs @ 0x%08x (0x%08x)\n", 156 eprintk("Found romfs @ 0x%08x (0x%08x)\n",
151 romfs_base, romfs_size); 157 romfs_base, romfs_size);
152 early_printk("#### klimit %p ####\n", old_klimit); 158 eprintk("#### klimit %p ####\n", old_klimit);
153 BUG_ON(romfs_size < 0); /* What else can we do? */ 159 BUG_ON(romfs_size < 0); /* What else can we do? */
154 160
155 early_printk("Moved 0x%08x bytes from 0x%08x to 0x%08x\n", 161 eprintk("Moved 0x%08x bytes from 0x%08x to 0x%08x\n",
156 romfs_size, romfs_base, (unsigned)&_ebss); 162 romfs_size, romfs_base, (unsigned)&_ebss);
157 163
158 early_printk("New klimit: 0x%08x\n", (unsigned)klimit); 164 eprintk("New klimit: 0x%08x\n", (unsigned)klimit);
159#endif 165#endif
160 166
161#if CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR 167#if CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR
162 if (msr) 168 if (msr)
163 early_printk("!!!Your kernel has setup MSR instruction but " 169 eprintk("!!!Your kernel has setup MSR instruction but "
164 "CPU don't have it %d\n", msr); 170 "CPU don't have it %d\n", msr);
165#else 171#else
166 if (!msr) 172 if (!msr)
167 early_printk("!!!Your kernel not setup MSR instruction but " 173 eprintk("!!!Your kernel not setup MSR instruction but "
168 "CPU have it %d\n", msr); 174 "CPU have it %d\n", msr);
169#endif 175#endif
170 176
diff --git a/arch/microblaze/kernel/traps.c b/arch/microblaze/kernel/traps.c
index eaaaf805f31b..5e4570ef515c 100644
--- a/arch/microblaze/kernel/traps.c
+++ b/arch/microblaze/kernel/traps.c
@@ -22,13 +22,11 @@ void trap_init(void)
22 __enable_hw_exceptions(); 22 __enable_hw_exceptions();
23} 23}
24 24
25static int kstack_depth_to_print = 24; 25static unsigned long kstack_depth_to_print = 24;
26 26
27static int __init kstack_setup(char *s) 27static int __init kstack_setup(char *s)
28{ 28{
29 kstack_depth_to_print = strict_strtoul(s, 0, NULL); 29 return !strict_strtoul(s, 0, &kstack_depth_to_print);
30
31 return 1;
32} 30}
33__setup("kstack=", kstack_setup); 31__setup("kstack=", kstack_setup);
34 32
diff --git a/arch/microblaze/lib/Makefile b/arch/microblaze/lib/Makefile
index b579db068c06..4dfe47d3cd91 100644
--- a/arch/microblaze/lib/Makefile
+++ b/arch/microblaze/lib/Makefile
@@ -10,5 +10,4 @@ else
10lib-y += memcpy.o memmove.o 10lib-y += memcpy.o memmove.o
11endif 11endif
12 12
13lib-$(CONFIG_NO_MMU) += uaccess.o 13lib-y += uaccess_old.o
14lib-$(CONFIG_MMU) += uaccess_old.o
diff --git a/arch/microblaze/lib/fastcopy.S b/arch/microblaze/lib/fastcopy.S
index 02e3ab4eddf3..fdc48bb065d8 100644
--- a/arch/microblaze/lib/fastcopy.S
+++ b/arch/microblaze/lib/fastcopy.S
@@ -30,8 +30,9 @@
30 */ 30 */
31 31
32#include <linux/linkage.h> 32#include <linux/linkage.h>
33 33 .text
34 .globl memcpy 34 .globl memcpy
35 .type memcpy, @function
35 .ent memcpy 36 .ent memcpy
36 37
37memcpy: 38memcpy:
@@ -345,9 +346,11 @@ a_done:
345 rtsd r15, 8 346 rtsd r15, 8
346 nop 347 nop
347 348
349.size memcpy, . - memcpy
348.end memcpy 350.end memcpy
349/*----------------------------------------------------------------------------*/ 351/*----------------------------------------------------------------------------*/
350 .globl memmove 352 .globl memmove
353 .type memmove, @function
351 .ent memmove 354 .ent memmove
352 355
353memmove: 356memmove:
@@ -659,4 +662,5 @@ d_done:
659 rtsd r15, 8 662 rtsd r15, 8
660 nop 663 nop
661 664
665.size memmove, . - memmove
662.end memmove 666.end memmove
diff --git a/arch/microblaze/lib/memcpy.c b/arch/microblaze/lib/memcpy.c
index cc2108b6b260..014bac92bdff 100644
--- a/arch/microblaze/lib/memcpy.c
+++ b/arch/microblaze/lib/memcpy.c
@@ -53,7 +53,7 @@ void *memcpy(void *v_dst, const void *v_src, __kernel_size_t c)
53 const uint32_t *i_src; 53 const uint32_t *i_src;
54 uint32_t *i_dst; 54 uint32_t *i_dst;
55 55
56 if (c >= 4) { 56 if (likely(c >= 4)) {
57 unsigned value, buf_hold; 57 unsigned value, buf_hold;
58 58
59 /* Align the dstination to a word boundry. */ 59 /* Align the dstination to a word boundry. */
diff --git a/arch/microblaze/lib/memset.c b/arch/microblaze/lib/memset.c
index 4df851d41a29..ecfb663e1fc1 100644
--- a/arch/microblaze/lib/memset.c
+++ b/arch/microblaze/lib/memset.c
@@ -33,22 +33,23 @@
33#ifdef __HAVE_ARCH_MEMSET 33#ifdef __HAVE_ARCH_MEMSET
34void *memset(void *v_src, int c, __kernel_size_t n) 34void *memset(void *v_src, int c, __kernel_size_t n)
35{ 35{
36
37 char *src = v_src; 36 char *src = v_src;
38#ifdef CONFIG_OPT_LIB_FUNCTION 37#ifdef CONFIG_OPT_LIB_FUNCTION
39 uint32_t *i_src; 38 uint32_t *i_src;
40 uint32_t w32; 39 uint32_t w32 = 0;
41#endif 40#endif
42 /* Truncate c to 8 bits */ 41 /* Truncate c to 8 bits */
43 c = (c & 0xFF); 42 c = (c & 0xFF);
44 43
45#ifdef CONFIG_OPT_LIB_FUNCTION 44#ifdef CONFIG_OPT_LIB_FUNCTION
46 /* Make a repeating word out of it */ 45 if (unlikely(c)) {
47 w32 = c; 46 /* Make a repeating word out of it */
48 w32 |= w32 << 8; 47 w32 = c;
49 w32 |= w32 << 16; 48 w32 |= w32 << 8;
49 w32 |= w32 << 16;
50 }
50 51
51 if (n >= 4) { 52 if (likely(n >= 4)) {
52 /* Align the destination to a word boundary */ 53 /* Align the destination to a word boundary */
53 /* This is done in an endian independant manner */ 54 /* This is done in an endian independant manner */
54 switch ((unsigned) src & 3) { 55 switch ((unsigned) src & 3) {
diff --git a/arch/microblaze/lib/uaccess.c b/arch/microblaze/lib/uaccess.c
deleted file mode 100644
index a853fe089c44..000000000000
--- a/arch/microblaze/lib/uaccess.c
+++ /dev/null
@@ -1,48 +0,0 @@
1/*
2 * Copyright (C) 2006 Atmark Techno, Inc.
3 *
4 * This file is subject to the terms and conditions of the GNU General Public
5 * License. See the file "COPYING" in the main directory of this archive
6 * for more details.
7 */
8
9#include <linux/string.h>
10#include <asm/uaccess.h>
11
12#include <asm/bug.h>
13
14long strnlen_user(const char __user *src, long count)
15{
16 return strlen(src) + 1;
17}
18
19#define __do_strncpy_from_user(dst, src, count, res) \
20 do { \
21 char *tmp; \
22 strncpy(dst, src, count); \
23 for (tmp = dst; *tmp && count > 0; tmp++, count--) \
24 ; \
25 res = (tmp - dst); \
26 } while (0)
27
28long __strncpy_from_user(char *dst, const char __user *src, long count)
29{
30 long res;
31 __do_strncpy_from_user(dst, src, count, res);
32 return res;
33}
34
35long strncpy_from_user(char *dst, const char __user *src, long count)
36{
37 long res = -EFAULT;
38 if (access_ok(VERIFY_READ, src, 1))
39 __do_strncpy_from_user(dst, src, count, res);
40 return res;
41}
42
43unsigned long __copy_tofrom_user(void __user *to,
44 const void __user *from, unsigned long size)
45{
46 memcpy(to, from, size);
47 return 0;
48}
diff --git a/arch/microblaze/lib/uaccess_old.S b/arch/microblaze/lib/uaccess_old.S
index 67f991c14b8a..5810cec54a7a 100644
--- a/arch/microblaze/lib/uaccess_old.S
+++ b/arch/microblaze/lib/uaccess_old.S
@@ -22,6 +22,7 @@
22 22
23 .text 23 .text
24.globl __strncpy_user; 24.globl __strncpy_user;
25.type __strncpy_user, @function
25.align 4; 26.align 4;
26__strncpy_user: 27__strncpy_user:
27 28
@@ -50,7 +51,7 @@ __strncpy_user:
503: 513:
51 rtsd r15,8 52 rtsd r15,8
52 nop 53 nop
53 54 .size __strncpy_user, . - __strncpy_user
54 55
55 .section .fixup, "ax" 56 .section .fixup, "ax"
56 .align 2 57 .align 2
@@ -72,6 +73,7 @@ __strncpy_user:
72 73
73 .text 74 .text
74.globl __strnlen_user; 75.globl __strnlen_user;
76.type __strnlen_user, @function
75.align 4; 77.align 4;
76__strnlen_user: 78__strnlen_user:
77 addik r3,r6,0 79 addik r3,r6,0
@@ -90,7 +92,7 @@ __strnlen_user:
903: 923:
91 rtsd r15,8 93 rtsd r15,8
92 nop 94 nop
93 95 .size __strnlen_user, . - __strnlen_user
94 96
95 .section .fixup,"ax" 97 .section .fixup,"ax"
964: 984:
@@ -108,6 +110,7 @@ __strnlen_user:
108 */ 110 */
109 .text 111 .text
110.globl __copy_tofrom_user; 112.globl __copy_tofrom_user;
113.type __copy_tofrom_user, @function
111.align 4; 114.align 4;
112__copy_tofrom_user: 115__copy_tofrom_user:
113 /* 116 /*
@@ -116,20 +119,34 @@ __copy_tofrom_user:
116 * r7, r3 - count 119 * r7, r3 - count
117 * r4 - tempval 120 * r4 - tempval
118 */ 121 */
119 addik r3,r7,0 122 beqid r7, 3f /* zero size is not likely */
120 beqi r3,3f 123 andi r3, r7, 0x3 /* filter add count */
1211: 124 bneid r3, 4f /* if is odd value then byte copying */
122 lbu r4,r6,r0 125 or r3, r5, r6 /* find if is any to/from unaligned */
123 addik r6,r6,1 126 andi r3, r3, 0x3 /* mask unaligned */
1242: 127 bneid r3, 1f /* it is unaligned -> then jump */
125 sb r4,r5,r0 128 or r3, r0, r0
126 addik r3,r3,-1 129
127 bneid r3,1b 130/* at least one 4 byte copy */
128 addik r5,r5,1 /* delay slot */ 1315: lw r4, r6, r3
1326: sw r4, r5, r3
133 addik r7, r7, -4
134 bneid r7, 5b
135 addik r3, r3, 4
136 addik r3, r7, 0
137 rtsd r15, 8
138 nop
1394: or r3, r0, r0
1401: lbu r4,r6,r3
1412: sb r4,r5,r3
142 addik r7,r7,-1
143 bneid r7,1b
144 addik r3,r3,1 /* delay slot */
1293: 1453:
146 addik r3,r7,0
130 rtsd r15,8 147 rtsd r15,8
131 nop 148 nop
132 149 .size __copy_tofrom_user, . - __copy_tofrom_user
133 150
134 .section __ex_table,"a" 151 .section __ex_table,"a"
135 .word 1b,3b,2b,3b 152 .word 1b,3b,2b,3b,5b,3b,6b,3b
diff --git a/arch/microblaze/mm/fault.c b/arch/microblaze/mm/fault.c
index d9d249a66ff2..7af87f4b2c2c 100644
--- a/arch/microblaze/mm/fault.c
+++ b/arch/microblaze/mm/fault.c
@@ -106,7 +106,7 @@ void do_page_fault(struct pt_regs *regs, unsigned long address,
106 regs->esr = error_code; 106 regs->esr = error_code;
107 107
108 /* On a kernel SLB miss we can only check for a valid exception entry */ 108 /* On a kernel SLB miss we can only check for a valid exception entry */
109 if (kernel_mode(regs) && (address >= TASK_SIZE)) { 109 if (unlikely(kernel_mode(regs) && (address >= TASK_SIZE))) {
110 printk(KERN_WARNING "kernel task_size exceed"); 110 printk(KERN_WARNING "kernel task_size exceed");
111 _exception(SIGSEGV, regs, code, address); 111 _exception(SIGSEGV, regs, code, address);
112 } 112 }
@@ -122,7 +122,7 @@ void do_page_fault(struct pt_regs *regs, unsigned long address,
122 } 122 }
123#endif /* CONFIG_KGDB */ 123#endif /* CONFIG_KGDB */
124 124
125 if (in_atomic() || !mm) { 125 if (unlikely(in_atomic() || !mm)) {
126 if (kernel_mode(regs)) 126 if (kernel_mode(regs))
127 goto bad_area_nosemaphore; 127 goto bad_area_nosemaphore;
128 128
@@ -150,7 +150,7 @@ void do_page_fault(struct pt_regs *regs, unsigned long address,
150 * source. If this is invalid we can skip the address space check, 150 * source. If this is invalid we can skip the address space check,
151 * thus avoiding the deadlock. 151 * thus avoiding the deadlock.
152 */ 152 */
153 if (!down_read_trylock(&mm->mmap_sem)) { 153 if (unlikely(!down_read_trylock(&mm->mmap_sem))) {
154 if (kernel_mode(regs) && !search_exception_tables(regs->pc)) 154 if (kernel_mode(regs) && !search_exception_tables(regs->pc))
155 goto bad_area_nosemaphore; 155 goto bad_area_nosemaphore;
156 156
@@ -158,16 +158,16 @@ void do_page_fault(struct pt_regs *regs, unsigned long address,
158 } 158 }
159 159
160 vma = find_vma(mm, address); 160 vma = find_vma(mm, address);
161 if (!vma) 161 if (unlikely(!vma))
162 goto bad_area; 162 goto bad_area;
163 163
164 if (vma->vm_start <= address) 164 if (vma->vm_start <= address)
165 goto good_area; 165 goto good_area;
166 166
167 if (!(vma->vm_flags & VM_GROWSDOWN)) 167 if (unlikely(!(vma->vm_flags & VM_GROWSDOWN)))
168 goto bad_area; 168 goto bad_area;
169 169
170 if (!is_write) 170 if (unlikely(!is_write))
171 goto bad_area; 171 goto bad_area;
172 172
173 /* 173 /*
@@ -179,7 +179,7 @@ void do_page_fault(struct pt_regs *regs, unsigned long address,
179 * before setting the user r1. Thus we allow the stack to 179 * before setting the user r1. Thus we allow the stack to
180 * expand to 1MB without further checks. 180 * expand to 1MB without further checks.
181 */ 181 */
182 if (address + 0x100000 < vma->vm_end) { 182 if (unlikely(address + 0x100000 < vma->vm_end)) {
183 183
184 /* get user regs even if this fault is in kernel mode */ 184 /* get user regs even if this fault is in kernel mode */
185 struct pt_regs *uregs = current->thread.regs; 185 struct pt_regs *uregs = current->thread.regs;
@@ -209,15 +209,15 @@ good_area:
209 code = SEGV_ACCERR; 209 code = SEGV_ACCERR;
210 210
211 /* a write */ 211 /* a write */
212 if (is_write) { 212 if (unlikely(is_write)) {
213 if (!(vma->vm_flags & VM_WRITE)) 213 if (unlikely(!(vma->vm_flags & VM_WRITE)))
214 goto bad_area; 214 goto bad_area;
215 /* a read */ 215 /* a read */
216 } else { 216 } else {
217 /* protection fault */ 217 /* protection fault */
218 if (error_code & 0x08000000) 218 if (unlikely(error_code & 0x08000000))
219 goto bad_area; 219 goto bad_area;
220 if (!(vma->vm_flags & (VM_READ | VM_EXEC))) 220 if (unlikely(!(vma->vm_flags & (VM_READ | VM_EXEC))))
221 goto bad_area; 221 goto bad_area;
222 } 222 }
223 223
@@ -235,7 +235,7 @@ survive:
235 goto do_sigbus; 235 goto do_sigbus;
236 BUG(); 236 BUG();
237 } 237 }
238 if (fault & VM_FAULT_MAJOR) 238 if (unlikely(fault & VM_FAULT_MAJOR))
239 current->maj_flt++; 239 current->maj_flt++;
240 else 240 else
241 current->min_flt++; 241 current->min_flt++;
diff --git a/arch/microblaze/mm/init.c b/arch/microblaze/mm/init.c
index 77c9e3033e71..f42c2dde8b1c 100644
--- a/arch/microblaze/mm/init.c
+++ b/arch/microblaze/mm/init.c
@@ -166,7 +166,6 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end)
166 for (addr = begin; addr < end; addr += PAGE_SIZE) { 166 for (addr = begin; addr < end; addr += PAGE_SIZE) {
167 ClearPageReserved(virt_to_page(addr)); 167 ClearPageReserved(virt_to_page(addr));
168 init_page_count(virt_to_page(addr)); 168 init_page_count(virt_to_page(addr));
169 memset((void *)addr, 0xcc, PAGE_SIZE);
170 free_page(addr); 169 free_page(addr);
171 totalram_pages++; 170 totalram_pages++;
172 } 171 }
@@ -209,14 +208,6 @@ void __init mem_init(void)
209} 208}
210 209
211#ifndef CONFIG_MMU 210#ifndef CONFIG_MMU
212/* Check against bounds of physical memory */
213int ___range_ok(unsigned long addr, unsigned long size)
214{
215 return ((addr < memory_start) ||
216 ((addr + size) > memory_end));
217}
218EXPORT_SYMBOL(___range_ok);
219
220int page_is_ram(unsigned long pfn) 211int page_is_ram(unsigned long pfn)
221{ 212{
222 return __range_ok(pfn, 0); 213 return __range_ok(pfn, 0);
diff --git a/arch/microblaze/mm/pgtable.c b/arch/microblaze/mm/pgtable.c
index 63a6fd07c48f..d31312cde6ea 100644
--- a/arch/microblaze/mm/pgtable.c
+++ b/arch/microblaze/mm/pgtable.c
@@ -154,7 +154,7 @@ int map_page(unsigned long va, phys_addr_t pa, int flags)
154 err = 0; 154 err = 0;
155 set_pte_at(&init_mm, va, pg, pfn_pte(pa >> PAGE_SHIFT, 155 set_pte_at(&init_mm, va, pg, pfn_pte(pa >> PAGE_SHIFT,
156 __pgprot(flags))); 156 __pgprot(flags)));
157 if (mem_init_done) 157 if (unlikely(mem_init_done))
158 flush_HPTE(0, va, pmd_val(*pd)); 158 flush_HPTE(0, va, pmd_val(*pd));
159 /* flush_HPTE(0, va, pg); */ 159 /* flush_HPTE(0, va, pg); */
160 } 160 }
diff --git a/arch/powerpc/kernel/misc.S b/arch/powerpc/kernel/misc.S
index b485a87c94e1..22e507c8a556 100644
--- a/arch/powerpc/kernel/misc.S
+++ b/arch/powerpc/kernel/misc.S
@@ -128,7 +128,6 @@ _GLOBAL(__restore_cpu_power7)
128 /* place holder */ 128 /* place holder */
129 blr 129 blr
130 130
131#ifdef CONFIG_EVENT_TRACING
132/* 131/*
133 * Get a minimal set of registers for our caller's nth caller. 132 * Get a minimal set of registers for our caller's nth caller.
134 * r3 = regs pointer, r5 = n. 133 * r3 = regs pointer, r5 = n.
@@ -154,4 +153,3 @@ _GLOBAL(perf_arch_fetch_caller_regs)
154 PPC_STL r4,_NIP-STACK_FRAME_OVERHEAD(r3) 153 PPC_STL r4,_NIP-STACK_FRAME_OVERHEAD(r3)
155 PPC_STL r7,_LINK-STACK_FRAME_OVERHEAD(r3) 154 PPC_STL r7,_LINK-STACK_FRAME_OVERHEAD(r3)
156 blr 155 blr
157#endif /* CONFIG_EVENT_TRACING */
diff --git a/arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c b/arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c
index 929d017535a3..d4f8be307cd5 100644
--- a/arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c
+++ b/arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c
@@ -481,6 +481,8 @@ mpc52xx_lpbfifo_probe(struct of_device *op, const struct of_device_id *match)
481 if (rc) 481 if (rc)
482 goto err_bcom_rx_irq; 482 goto err_bcom_rx_irq;
483 483
484 lpbfifo.dma_irqs_enabled = 1;
485
484 /* Request the Bestcomm transmit (memory --> fifo) task and IRQ */ 486 /* Request the Bestcomm transmit (memory --> fifo) task and IRQ */
485 lpbfifo.bcom_tx_task = 487 lpbfifo.bcom_tx_task =
486 bcom_gen_bd_tx_init(2, res.start + LPBFIFO_REG_FIFO_DATA, 488 bcom_gen_bd_tx_init(2, res.start + LPBFIFO_REG_FIFO_DATA,
diff --git a/arch/sh/configs/ecovec24_defconfig b/arch/sh/configs/ecovec24_defconfig
index 18e3356406f3..6041c66dd10e 100644
--- a/arch/sh/configs/ecovec24_defconfig
+++ b/arch/sh/configs/ecovec24_defconfig
@@ -1,7 +1,7 @@
1# 1#
2# Automatically generated make config: don't edit 2# Automatically generated make config: don't edit
3# Linux kernel version: 2.6.33-rc2 3# Linux kernel version: 2.6.34-rc2
4# Mon Jan 4 11:20:36 2010 4# Mon Mar 29 02:21:58 2010
5# 5#
6CONFIG_SUPERH=y 6CONFIG_SUPERH=y
7CONFIG_SUPERH32=y 7CONFIG_SUPERH32=y
@@ -13,8 +13,8 @@ CONFIG_GENERIC_FIND_NEXT_BIT=y
13CONFIG_GENERIC_HWEIGHT=y 13CONFIG_GENERIC_HWEIGHT=y
14CONFIG_GENERIC_HARDIRQS=y 14CONFIG_GENERIC_HARDIRQS=y
15CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y 15CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y
16CONFIG_GENERIC_IRQ_PROBE=y
17CONFIG_IRQ_PER_CPU=y 16CONFIG_IRQ_PER_CPU=y
17CONFIG_SPARSE_IRQ=y
18CONFIG_GENERIC_GPIO=y 18CONFIG_GENERIC_GPIO=y
19CONFIG_GENERIC_TIME=y 19CONFIG_GENERIC_TIME=y
20CONFIG_GENERIC_CLOCKEVENTS=y 20CONFIG_GENERIC_CLOCKEVENTS=y
@@ -32,6 +32,7 @@ CONFIG_ARCH_NO_VIRT_TO_BUS=y
32CONFIG_ARCH_HAS_DEFAULT_IDLE=y 32CONFIG_ARCH_HAS_DEFAULT_IDLE=y
33CONFIG_ARCH_HAS_CPU_IDLE_WAIT=y 33CONFIG_ARCH_HAS_CPU_IDLE_WAIT=y
34CONFIG_DMA_NONCOHERENT=y 34CONFIG_DMA_NONCOHERENT=y
35CONFIG_NEED_DMA_MAP_STATE=y
35CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config" 36CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
36CONFIG_CONSTRUCTORS=y 37CONFIG_CONSTRUCTORS=y
37 38
@@ -47,9 +48,11 @@ CONFIG_LOCALVERSION=""
47CONFIG_HAVE_KERNEL_GZIP=y 48CONFIG_HAVE_KERNEL_GZIP=y
48CONFIG_HAVE_KERNEL_BZIP2=y 49CONFIG_HAVE_KERNEL_BZIP2=y
49CONFIG_HAVE_KERNEL_LZMA=y 50CONFIG_HAVE_KERNEL_LZMA=y
51CONFIG_HAVE_KERNEL_LZO=y
50CONFIG_KERNEL_GZIP=y 52CONFIG_KERNEL_GZIP=y
51# CONFIG_KERNEL_BZIP2 is not set 53# CONFIG_KERNEL_BZIP2 is not set
52# CONFIG_KERNEL_LZMA is not set 54# CONFIG_KERNEL_LZMA is not set
55# CONFIG_KERNEL_LZO is not set
53CONFIG_SWAP=y 56CONFIG_SWAP=y
54CONFIG_SYSVIPC=y 57CONFIG_SYSVIPC=y
55CONFIG_SYSVIPC_SYSCTL=y 58CONFIG_SYSVIPC_SYSCTL=y
@@ -71,14 +74,8 @@ CONFIG_RCU_FANOUT=32
71# CONFIG_TREE_RCU_TRACE is not set 74# CONFIG_TREE_RCU_TRACE is not set
72# CONFIG_IKCONFIG is not set 75# CONFIG_IKCONFIG is not set
73CONFIG_LOG_BUF_SHIFT=14 76CONFIG_LOG_BUF_SHIFT=14
74CONFIG_GROUP_SCHED=y
75CONFIG_FAIR_GROUP_SCHED=y
76# CONFIG_RT_GROUP_SCHED is not set
77CONFIG_USER_SCHED=y
78# CONFIG_CGROUP_SCHED is not set
79# CONFIG_CGROUPS is not set 77# CONFIG_CGROUPS is not set
80CONFIG_SYSFS_DEPRECATED=y 78# CONFIG_SYSFS_DEPRECATED_V2 is not set
81CONFIG_SYSFS_DEPRECATED_V2=y
82# CONFIG_RELAY is not set 79# CONFIG_RELAY is not set
83# CONFIG_NAMESPACES is not set 80# CONFIG_NAMESPACES is not set
84# CONFIG_BLK_DEV_INITRD is not set 81# CONFIG_BLK_DEV_INITRD is not set
@@ -107,7 +104,7 @@ CONFIG_PERF_USE_VMALLOC=y
107# 104#
108# Kernel Performance Events And Counters 105# Kernel Performance Events And Counters
109# 106#
110# CONFIG_PERF_EVENTS is not set 107CONFIG_PERF_EVENTS=y
111# CONFIG_PERF_COUNTERS is not set 108# CONFIG_PERF_COUNTERS is not set
112CONFIG_VM_EVENT_COUNTERS=y 109CONFIG_VM_EVENT_COUNTERS=y
113CONFIG_COMPAT_BRK=y 110CONFIG_COMPAT_BRK=y
@@ -116,13 +113,13 @@ CONFIG_SLAB=y
116# CONFIG_SLOB is not set 113# CONFIG_SLOB is not set
117# CONFIG_PROFILING is not set 114# CONFIG_PROFILING is not set
118CONFIG_HAVE_OPROFILE=y 115CONFIG_HAVE_OPROFILE=y
119CONFIG_HAVE_IOREMAP_PROT=y
120CONFIG_HAVE_KPROBES=y 116CONFIG_HAVE_KPROBES=y
121CONFIG_HAVE_KRETPROBES=y 117CONFIG_HAVE_KRETPROBES=y
122CONFIG_HAVE_ARCH_TRACEHOOK=y 118CONFIG_HAVE_ARCH_TRACEHOOK=y
123CONFIG_HAVE_DMA_ATTRS=y 119CONFIG_HAVE_DMA_ATTRS=y
124CONFIG_HAVE_CLK=y 120CONFIG_HAVE_CLK=y
125CONFIG_HAVE_DMA_API_DEBUG=y 121CONFIG_HAVE_DMA_API_DEBUG=y
122CONFIG_HAVE_HW_BREAKPOINT=y
126 123
127# 124#
128# GCOV-based kernel profiling 125# GCOV-based kernel profiling
@@ -234,12 +231,12 @@ CONFIG_CPU_SUBTYPE_SH7724=y
234CONFIG_QUICKLIST=y 231CONFIG_QUICKLIST=y
235CONFIG_MMU=y 232CONFIG_MMU=y
236CONFIG_PAGE_OFFSET=0x80000000 233CONFIG_PAGE_OFFSET=0x80000000
237CONFIG_FORCE_MAX_ZONEORDER=11 234CONFIG_FORCE_MAX_ZONEORDER=12
238CONFIG_MEMORY_START=0x08000000 235CONFIG_MEMORY_START=0x08000000
239CONFIG_MEMORY_SIZE=0x10000000 236CONFIG_MEMORY_SIZE=0x10000000
240CONFIG_29BIT=y 237CONFIG_29BIT=y
241# CONFIG_PMB_ENABLE is not set 238# CONFIG_PMB is not set
242# CONFIG_X2TLB is not set 239CONFIG_X2TLB=y
243CONFIG_VSYSCALL=y 240CONFIG_VSYSCALL=y
244CONFIG_ARCH_FLATMEM_ENABLE=y 241CONFIG_ARCH_FLATMEM_ENABLE=y
245CONFIG_ARCH_SPARSEMEM_ENABLE=y 242CONFIG_ARCH_SPARSEMEM_ENABLE=y
@@ -247,6 +244,8 @@ CONFIG_ARCH_SPARSEMEM_DEFAULT=y
247CONFIG_MAX_ACTIVE_REGIONS=1 244CONFIG_MAX_ACTIVE_REGIONS=1
248CONFIG_ARCH_POPULATES_NODE_MAP=y 245CONFIG_ARCH_POPULATES_NODE_MAP=y
249CONFIG_ARCH_SELECT_MEMORY_MODEL=y 246CONFIG_ARCH_SELECT_MEMORY_MODEL=y
247CONFIG_IOREMAP_FIXED=y
248CONFIG_UNCACHED_MAPPING=y
250CONFIG_PAGE_SIZE_4KB=y 249CONFIG_PAGE_SIZE_4KB=y
251# CONFIG_PAGE_SIZE_8KB is not set 250# CONFIG_PAGE_SIZE_8KB is not set
252# CONFIG_PAGE_SIZE_16KB is not set 251# CONFIG_PAGE_SIZE_16KB is not set
@@ -262,7 +261,7 @@ CONFIG_PAGEFLAGS_EXTENDED=y
262CONFIG_SPLIT_PTLOCK_CPUS=4 261CONFIG_SPLIT_PTLOCK_CPUS=4
263# CONFIG_PHYS_ADDR_T_64BIT is not set 262# CONFIG_PHYS_ADDR_T_64BIT is not set
264CONFIG_ZONE_DMA_FLAG=0 263CONFIG_ZONE_DMA_FLAG=0
265CONFIG_NR_QUICK=2 264CONFIG_NR_QUICK=1
266# CONFIG_KSM is not set 265# CONFIG_KSM is not set
267CONFIG_DEFAULT_MMAP_MIN_ADDR=4096 266CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
268 267
@@ -337,7 +336,6 @@ CONFIG_SECCOMP=y
337# CONFIG_PREEMPT_VOLUNTARY is not set 336# CONFIG_PREEMPT_VOLUNTARY is not set
338CONFIG_PREEMPT=y 337CONFIG_PREEMPT=y
339CONFIG_GUSA=y 338CONFIG_GUSA=y
340# CONFIG_SPARSE_IRQ is not set
341 339
342# 340#
343# Boot options 341# Boot options
@@ -347,7 +345,7 @@ CONFIG_BOOT_LINK_OFFSET=0x00800000
347CONFIG_ENTRY_OFFSET=0x00001000 345CONFIG_ENTRY_OFFSET=0x00001000
348CONFIG_CMDLINE_OVERWRITE=y 346CONFIG_CMDLINE_OVERWRITE=y
349# CONFIG_CMDLINE_EXTEND is not set 347# CONFIG_CMDLINE_EXTEND is not set
350CONFIG_CMDLINE="console=tty0, console=ttySC0,115200 root=/dev/nfs ip=dhcp mem=120M memchunk.vpu=4m" 348CONFIG_CMDLINE="console=tty0, console=ttySC0,115200 root=/dev/nfs ip=dhcp mem=248M memchunk.vpu=8m memchunk.veu0=4m"
351 349
352# 350#
353# Bus options 351# Bus options
@@ -373,6 +371,7 @@ CONFIG_SUSPEND=y
373CONFIG_SUSPEND_FREEZER=y 371CONFIG_SUSPEND_FREEZER=y
374# CONFIG_HIBERNATION is not set 372# CONFIG_HIBERNATION is not set
375CONFIG_PM_RUNTIME=y 373CONFIG_PM_RUNTIME=y
374CONFIG_PM_OPS=y
376# CONFIG_CPU_IDLE is not set 375# CONFIG_CPU_IDLE is not set
377CONFIG_NET=y 376CONFIG_NET=y
378 377
@@ -380,7 +379,6 @@ CONFIG_NET=y
380# Networking options 379# Networking options
381# 380#
382CONFIG_PACKET=y 381CONFIG_PACKET=y
383# CONFIG_PACKET_MMAP is not set
384CONFIG_UNIX=y 382CONFIG_UNIX=y
385# CONFIG_NET_KEY is not set 383# CONFIG_NET_KEY is not set
386CONFIG_INET=y 384CONFIG_INET=y
@@ -445,7 +443,45 @@ CONFIG_DEFAULT_TCP_CONG="cubic"
445# CONFIG_NET_PKTGEN is not set 443# CONFIG_NET_PKTGEN is not set
446# CONFIG_HAMRADIO is not set 444# CONFIG_HAMRADIO is not set
447# CONFIG_CAN is not set 445# CONFIG_CAN is not set
448# CONFIG_IRDA is not set 446CONFIG_IRDA=y
447
448#
449# IrDA protocols
450#
451# CONFIG_IRLAN is not set
452# CONFIG_IRCOMM is not set
453# CONFIG_IRDA_ULTRA is not set
454
455#
456# IrDA options
457#
458# CONFIG_IRDA_CACHE_LAST_LSAP is not set
459# CONFIG_IRDA_FAST_RR is not set
460# CONFIG_IRDA_DEBUG is not set
461
462#
463# Infrared-port device drivers
464#
465
466#
467# SIR device drivers
468#
469# CONFIG_IRTTY_SIR is not set
470
471#
472# Dongle support
473#
474CONFIG_SH_SIR=y
475# CONFIG_KINGSUN_DONGLE is not set
476# CONFIG_KSDAZZLE_DONGLE is not set
477# CONFIG_KS959_DONGLE is not set
478
479#
480# FIR device drivers
481#
482# CONFIG_USB_IRDA is not set
483# CONFIG_SIGMATEL_FIR is not set
484# CONFIG_MCS_FIR is not set
449# CONFIG_BT is not set 485# CONFIG_BT is not set
450# CONFIG_AF_RXRPC is not set 486# CONFIG_AF_RXRPC is not set
451CONFIG_WIRELESS=y 487CONFIG_WIRELESS=y
@@ -556,6 +592,7 @@ CONFIG_MTD_NAND_IDS=y
556# CONFIG_MTD_NAND_NANDSIM is not set 592# CONFIG_MTD_NAND_NANDSIM is not set
557# CONFIG_MTD_NAND_PLATFORM is not set 593# CONFIG_MTD_NAND_PLATFORM is not set
558# CONFIG_MTD_ALAUDA is not set 594# CONFIG_MTD_ALAUDA is not set
595# CONFIG_MTD_NAND_SH_FLCTL is not set
559# CONFIG_MTD_ONENAND is not set 596# CONFIG_MTD_ONENAND is not set
560 597
561# 598#
@@ -597,6 +634,7 @@ CONFIG_MISC_DEVICES=y
597# CONFIG_ICS932S401 is not set 634# CONFIG_ICS932S401 is not set
598# CONFIG_ENCLOSURE_SERVICES is not set 635# CONFIG_ENCLOSURE_SERVICES is not set
599# CONFIG_ISL29003 is not set 636# CONFIG_ISL29003 is not set
637# CONFIG_SENSORS_TSL2550 is not set
600# CONFIG_DS1682 is not set 638# CONFIG_DS1682 is not set
601# CONFIG_TI_DAC7512 is not set 639# CONFIG_TI_DAC7512 is not set
602# CONFIG_C2PORT is not set 640# CONFIG_C2PORT is not set
@@ -616,6 +654,7 @@ CONFIG_HAVE_IDE=y
616# 654#
617# SCSI device support 655# SCSI device support
618# 656#
657CONFIG_SCSI_MOD=y
619# CONFIG_RAID_ATTRS is not set 658# CONFIG_RAID_ATTRS is not set
620CONFIG_SCSI=y 659CONFIG_SCSI=y
621CONFIG_SCSI_DMA=y 660CONFIG_SCSI_DMA=y
@@ -768,7 +807,29 @@ CONFIG_KEYBOARD_SH_KEYSC=y
768# CONFIG_INPUT_MOUSE is not set 807# CONFIG_INPUT_MOUSE is not set
769# CONFIG_INPUT_JOYSTICK is not set 808# CONFIG_INPUT_JOYSTICK is not set
770# CONFIG_INPUT_TABLET is not set 809# CONFIG_INPUT_TABLET is not set
771# CONFIG_INPUT_TOUCHSCREEN is not set 810CONFIG_INPUT_TOUCHSCREEN=y
811# CONFIG_TOUCHSCREEN_ADS7846 is not set
812# CONFIG_TOUCHSCREEN_AD7877 is not set
813# CONFIG_TOUCHSCREEN_AD7879_I2C is not set
814# CONFIG_TOUCHSCREEN_AD7879_SPI is not set
815# CONFIG_TOUCHSCREEN_AD7879 is not set
816# CONFIG_TOUCHSCREEN_DYNAPRO is not set
817# CONFIG_TOUCHSCREEN_EETI is not set
818# CONFIG_TOUCHSCREEN_FUJITSU is not set
819# CONFIG_TOUCHSCREEN_GUNZE is not set
820# CONFIG_TOUCHSCREEN_ELO is not set
821# CONFIG_TOUCHSCREEN_WACOM_W8001 is not set
822# CONFIG_TOUCHSCREEN_MCS5000 is not set
823# CONFIG_TOUCHSCREEN_MTOUCH is not set
824# CONFIG_TOUCHSCREEN_INEXIO is not set
825# CONFIG_TOUCHSCREEN_MK712 is not set
826# CONFIG_TOUCHSCREEN_PENMOUNT is not set
827# CONFIG_TOUCHSCREEN_TOUCHRIGHT is not set
828# CONFIG_TOUCHSCREEN_TOUCHWIN is not set
829# CONFIG_TOUCHSCREEN_USB_COMPOSITE is not set
830# CONFIG_TOUCHSCREEN_TOUCHIT213 is not set
831CONFIG_TOUCHSCREEN_TSC2007=y
832# CONFIG_TOUCHSCREEN_W90X900 is not set
772# CONFIG_INPUT_MISC is not set 833# CONFIG_INPUT_MISC is not set
773 834
774# 835#
@@ -802,10 +863,10 @@ CONFIG_SERIAL_SH_SCI_NR_UARTS=6
802CONFIG_SERIAL_SH_SCI_CONSOLE=y 863CONFIG_SERIAL_SH_SCI_CONSOLE=y
803CONFIG_SERIAL_CORE=y 864CONFIG_SERIAL_CORE=y
804CONFIG_SERIAL_CORE_CONSOLE=y 865CONFIG_SERIAL_CORE_CONSOLE=y
866# CONFIG_SERIAL_TIMBERDALE is not set
805CONFIG_UNIX98_PTYS=y 867CONFIG_UNIX98_PTYS=y
806# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set 868# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set
807CONFIG_LEGACY_PTYS=y 869# CONFIG_LEGACY_PTYS is not set
808CONFIG_LEGACY_PTY_COUNT=256
809# CONFIG_IPMI_HANDLER is not set 870# CONFIG_IPMI_HANDLER is not set
810CONFIG_HW_RANDOM=y 871CONFIG_HW_RANDOM=y
811# CONFIG_HW_RANDOM_TIMERIOMEM is not set 872# CONFIG_HW_RANDOM_TIMERIOMEM is not set
@@ -830,6 +891,7 @@ CONFIG_I2C_HELPER_AUTO=y
830# CONFIG_I2C_OCORES is not set 891# CONFIG_I2C_OCORES is not set
831CONFIG_I2C_SH_MOBILE=y 892CONFIG_I2C_SH_MOBILE=y
832# CONFIG_I2C_SIMTEC is not set 893# CONFIG_I2C_SIMTEC is not set
894# CONFIG_I2C_XILINX is not set
833 895
834# 896#
835# External I2C/SMBus adapter drivers 897# External I2C/SMBus adapter drivers
@@ -843,15 +905,9 @@ CONFIG_I2C_SH_MOBILE=y
843# 905#
844# CONFIG_I2C_PCA_PLATFORM is not set 906# CONFIG_I2C_PCA_PLATFORM is not set
845# CONFIG_I2C_STUB is not set 907# CONFIG_I2C_STUB is not set
846
847#
848# Miscellaneous I2C Chip support
849#
850# CONFIG_SENSORS_TSL2550 is not set
851# CONFIG_I2C_DEBUG_CORE is not set 908# CONFIG_I2C_DEBUG_CORE is not set
852# CONFIG_I2C_DEBUG_ALGO is not set 909# CONFIG_I2C_DEBUG_ALGO is not set
853# CONFIG_I2C_DEBUG_BUS is not set 910# CONFIG_I2C_DEBUG_BUS is not set
854# CONFIG_I2C_DEBUG_CHIP is not set
855CONFIG_SPI=y 911CONFIG_SPI=y
856CONFIG_SPI_MASTER=y 912CONFIG_SPI_MASTER=y
857 913
@@ -882,13 +938,16 @@ CONFIG_GPIOLIB=y
882# 938#
883# Memory mapped GPIO expanders: 939# Memory mapped GPIO expanders:
884# 940#
941# CONFIG_GPIO_IT8761E is not set
885 942
886# 943#
887# I2C GPIO expanders: 944# I2C GPIO expanders:
888# 945#
946# CONFIG_GPIO_MAX7300 is not set
889# CONFIG_GPIO_MAX732X is not set 947# CONFIG_GPIO_MAX732X is not set
890# CONFIG_GPIO_PCA953X is not set 948# CONFIG_GPIO_PCA953X is not set
891# CONFIG_GPIO_PCF857X is not set 949# CONFIG_GPIO_PCF857X is not set
950# CONFIG_GPIO_ADP5588 is not set
892 951
893# 952#
894# PCI GPIO expanders: 953# PCI GPIO expanders:
@@ -919,23 +978,26 @@ CONFIG_SSB_POSSIBLE=y
919# 978#
920# Multifunction device drivers 979# Multifunction device drivers
921# 980#
922# CONFIG_MFD_CORE is not set 981CONFIG_MFD_CORE=y
982# CONFIG_MFD_88PM860X is not set
923# CONFIG_MFD_SM501 is not set 983# CONFIG_MFD_SM501 is not set
924# CONFIG_MFD_SH_MOBILE_SDHI is not set 984CONFIG_MFD_SH_MOBILE_SDHI=y
925# CONFIG_HTC_PASIC3 is not set 985# CONFIG_HTC_PASIC3 is not set
986# CONFIG_HTC_I2CPLD is not set
926# CONFIG_TPS65010 is not set 987# CONFIG_TPS65010 is not set
927# CONFIG_TWL4030_CORE is not set 988# CONFIG_TWL4030_CORE is not set
928# CONFIG_MFD_TMIO is not set 989# CONFIG_MFD_TMIO is not set
929# CONFIG_PMIC_DA903X is not set 990# CONFIG_PMIC_DA903X is not set
930# CONFIG_PMIC_ADP5520 is not set 991# CONFIG_PMIC_ADP5520 is not set
992# CONFIG_MFD_MAX8925 is not set
931# CONFIG_MFD_WM8400 is not set 993# CONFIG_MFD_WM8400 is not set
932# CONFIG_MFD_WM831X is not set 994# CONFIG_MFD_WM831X is not set
933# CONFIG_MFD_WM8350_I2C is not set 995# CONFIG_MFD_WM8350_I2C is not set
996# CONFIG_MFD_WM8994 is not set
934# CONFIG_MFD_PCF50633 is not set 997# CONFIG_MFD_PCF50633 is not set
935# CONFIG_MFD_MC13783 is not set 998# CONFIG_MFD_MC13783 is not set
936# CONFIG_AB3100_CORE is not set 999# CONFIG_AB3100_CORE is not set
937# CONFIG_EZX_PCAP is not set 1000# CONFIG_EZX_PCAP is not set
938# CONFIG_MFD_88PM8607 is not set
939# CONFIG_AB4500_CORE is not set 1001# CONFIG_AB4500_CORE is not set
940# CONFIG_REGULATOR is not set 1002# CONFIG_REGULATOR is not set
941CONFIG_MEDIA_SUPPORT=y 1003CONFIG_MEDIA_SUPPORT=y
@@ -985,10 +1047,10 @@ CONFIG_SOC_CAMERA=y
985# CONFIG_SOC_CAMERA_MT9M001 is not set 1047# CONFIG_SOC_CAMERA_MT9M001 is not set
986# CONFIG_SOC_CAMERA_MT9M111 is not set 1048# CONFIG_SOC_CAMERA_MT9M111 is not set
987# CONFIG_SOC_CAMERA_MT9T031 is not set 1049# CONFIG_SOC_CAMERA_MT9T031 is not set
988# CONFIG_SOC_CAMERA_MT9T112 is not set 1050CONFIG_SOC_CAMERA_MT9T112=y
989# CONFIG_SOC_CAMERA_MT9V022 is not set 1051# CONFIG_SOC_CAMERA_MT9V022 is not set
990# CONFIG_SOC_CAMERA_RJ54N1 is not set 1052# CONFIG_SOC_CAMERA_RJ54N1 is not set
991# CONFIG_SOC_CAMERA_TW9910 is not set 1053CONFIG_SOC_CAMERA_TW9910=y
992# CONFIG_SOC_CAMERA_PLATFORM is not set 1054# CONFIG_SOC_CAMERA_PLATFORM is not set
993# CONFIG_SOC_CAMERA_OV772X is not set 1055# CONFIG_SOC_CAMERA_OV772X is not set
994# CONFIG_SOC_CAMERA_OV9640 is not set 1056# CONFIG_SOC_CAMERA_OV9640 is not set
@@ -1001,6 +1063,7 @@ CONFIG_RADIO_ADAPTERS=y
1001# CONFIG_RADIO_SI470X is not set 1063# CONFIG_RADIO_SI470X is not set
1002# CONFIG_USB_MR800 is not set 1064# CONFIG_USB_MR800 is not set
1003# CONFIG_RADIO_TEA5764 is not set 1065# CONFIG_RADIO_TEA5764 is not set
1066# CONFIG_RADIO_SAA7706H is not set
1004# CONFIG_RADIO_TEF6862 is not set 1067# CONFIG_RADIO_TEF6862 is not set
1005# CONFIG_DAB is not set 1068# CONFIG_DAB is not set
1006 1069
@@ -1034,6 +1097,7 @@ CONFIG_FB_DEFERRED_IO=y
1034# 1097#
1035# CONFIG_FB_S1D13XXX is not set 1098# CONFIG_FB_S1D13XXX is not set
1036CONFIG_FB_SH_MOBILE_LCDC=y 1099CONFIG_FB_SH_MOBILE_LCDC=y
1100# CONFIG_FB_TMIO is not set
1037# CONFIG_FB_VIRTUAL is not set 1101# CONFIG_FB_VIRTUAL is not set
1038# CONFIG_FB_METRONOME is not set 1102# CONFIG_FB_METRONOME is not set
1039# CONFIG_FB_MB862XX is not set 1103# CONFIG_FB_MB862XX is not set
@@ -1062,7 +1126,46 @@ CONFIG_LOGO=y
1062# CONFIG_LOGO_SUPERH_MONO is not set 1126# CONFIG_LOGO_SUPERH_MONO is not set
1063# CONFIG_LOGO_SUPERH_VGA16 is not set 1127# CONFIG_LOGO_SUPERH_VGA16 is not set
1064CONFIG_LOGO_SUPERH_CLUT224=y 1128CONFIG_LOGO_SUPERH_CLUT224=y
1065# CONFIG_SOUND is not set 1129CONFIG_SOUND=y
1130CONFIG_SOUND_OSS_CORE=y
1131CONFIG_SOUND_OSS_CORE_PRECLAIM=y
1132CONFIG_SND=y
1133CONFIG_SND_TIMER=y
1134CONFIG_SND_PCM=y
1135CONFIG_SND_JACK=y
1136CONFIG_SND_SEQUENCER=y
1137CONFIG_SND_SEQ_DUMMY=y
1138CONFIG_SND_OSSEMUL=y
1139CONFIG_SND_MIXER_OSS=y
1140CONFIG_SND_PCM_OSS=y
1141CONFIG_SND_PCM_OSS_PLUGINS=y
1142# CONFIG_SND_SEQUENCER_OSS is not set
1143# CONFIG_SND_DYNAMIC_MINORS is not set
1144CONFIG_SND_SUPPORT_OLD_API=y
1145CONFIG_SND_VERBOSE_PROCFS=y
1146# CONFIG_SND_VERBOSE_PRINTK is not set
1147# CONFIG_SND_DEBUG is not set
1148# CONFIG_SND_RAWMIDI_SEQ is not set
1149# CONFIG_SND_OPL3_LIB_SEQ is not set
1150# CONFIG_SND_OPL4_LIB_SEQ is not set
1151# CONFIG_SND_SBAWE_SEQ is not set
1152# CONFIG_SND_EMU10K1_SEQ is not set
1153# CONFIG_SND_DRIVERS is not set
1154# CONFIG_SND_SPI is not set
1155CONFIG_SND_SUPERH=y
1156# CONFIG_SND_USB is not set
1157CONFIG_SND_SOC=y
1158
1159#
1160# SoC Audio support for SuperH
1161#
1162CONFIG_SND_SOC_SH4_FSI=y
1163# CONFIG_SND_FSI_AK4642 is not set
1164CONFIG_SND_FSI_DA7210=y
1165CONFIG_SND_SOC_I2C_AND_SPI=y
1166# CONFIG_SND_SOC_ALL_CODECS is not set
1167CONFIG_SND_SOC_DA7210=y
1168# CONFIG_SOUND_PRIME is not set
1066CONFIG_HID_SUPPORT=y 1169CONFIG_HID_SUPPORT=y
1067CONFIG_HID=y 1170CONFIG_HID=y
1068# CONFIG_HIDRAW is not set 1171# CONFIG_HIDRAW is not set
@@ -1077,6 +1180,7 @@ CONFIG_USB_HID=y
1077# 1180#
1078# Special HID drivers 1181# Special HID drivers
1079# 1182#
1183# CONFIG_HID_3M_PCT is not set
1080# CONFIG_HID_A4TECH is not set 1184# CONFIG_HID_A4TECH is not set
1081# CONFIG_HID_APPLE is not set 1185# CONFIG_HID_APPLE is not set
1082# CONFIG_HID_BELKIN is not set 1186# CONFIG_HID_BELKIN is not set
@@ -1091,12 +1195,16 @@ CONFIG_USB_HID=y
1091# CONFIG_HID_KENSINGTON is not set 1195# CONFIG_HID_KENSINGTON is not set
1092# CONFIG_HID_LOGITECH is not set 1196# CONFIG_HID_LOGITECH is not set
1093# CONFIG_HID_MICROSOFT is not set 1197# CONFIG_HID_MICROSOFT is not set
1198# CONFIG_HID_MOSART is not set
1094# CONFIG_HID_MONTEREY is not set 1199# CONFIG_HID_MONTEREY is not set
1095# CONFIG_HID_NTRIG is not set 1200# CONFIG_HID_NTRIG is not set
1201# CONFIG_HID_ORTEK is not set
1096# CONFIG_HID_PANTHERLORD is not set 1202# CONFIG_HID_PANTHERLORD is not set
1097# CONFIG_HID_PETALYNX is not set 1203# CONFIG_HID_PETALYNX is not set
1204# CONFIG_HID_QUANTA is not set
1098# CONFIG_HID_SAMSUNG is not set 1205# CONFIG_HID_SAMSUNG is not set
1099# CONFIG_HID_SONY is not set 1206# CONFIG_HID_SONY is not set
1207# CONFIG_HID_STANTUM is not set
1100# CONFIG_HID_SUNPLUS is not set 1208# CONFIG_HID_SUNPLUS is not set
1101# CONFIG_HID_GREENASIA is not set 1209# CONFIG_HID_GREENASIA is not set
1102# CONFIG_HID_SMARTJOYPLUS is not set 1210# CONFIG_HID_SMARTJOYPLUS is not set
@@ -1136,6 +1244,7 @@ CONFIG_USB_MON=y
1136# CONFIG_USB_SL811_HCD is not set 1244# CONFIG_USB_SL811_HCD is not set
1137CONFIG_USB_R8A66597_HCD=y 1245CONFIG_USB_R8A66597_HCD=y
1138# CONFIG_USB_HWA_HCD is not set 1246# CONFIG_USB_HWA_HCD is not set
1247# CONFIG_USB_GADGET_MUSB_HDRC is not set
1139 1248
1140# 1249#
1141# USB Device Class drivers 1250# USB Device Class drivers
@@ -1188,7 +1297,6 @@ CONFIG_USB_STORAGE=y
1188# CONFIG_USB_RIO500 is not set 1297# CONFIG_USB_RIO500 is not set
1189# CONFIG_USB_LEGOTOWER is not set 1298# CONFIG_USB_LEGOTOWER is not set
1190# CONFIG_USB_LCD is not set 1299# CONFIG_USB_LCD is not set
1191# CONFIG_USB_BERRY_CHARGE is not set
1192# CONFIG_USB_LED is not set 1300# CONFIG_USB_LED is not set
1193# CONFIG_USB_CYPRESS_CY7C63 is not set 1301# CONFIG_USB_CYPRESS_CY7C63 is not set
1194# CONFIG_USB_CYTHERM is not set 1302# CONFIG_USB_CYTHERM is not set
@@ -1200,8 +1308,45 @@ CONFIG_USB_STORAGE=y
1200# CONFIG_USB_IOWARRIOR is not set 1308# CONFIG_USB_IOWARRIOR is not set
1201# CONFIG_USB_TEST is not set 1309# CONFIG_USB_TEST is not set
1202# CONFIG_USB_ISIGHTFW is not set 1310# CONFIG_USB_ISIGHTFW is not set
1203# CONFIG_USB_VST is not set 1311CONFIG_USB_GADGET=y
1204# CONFIG_USB_GADGET is not set 1312# CONFIG_USB_GADGET_DEBUG_FILES is not set
1313# CONFIG_USB_GADGET_DEBUG_FS is not set
1314CONFIG_USB_GADGET_VBUS_DRAW=2
1315CONFIG_USB_GADGET_SELECTED=y
1316# CONFIG_USB_GADGET_AT91 is not set
1317# CONFIG_USB_GADGET_ATMEL_USBA is not set
1318# CONFIG_USB_GADGET_FSL_USB2 is not set
1319# CONFIG_USB_GADGET_LH7A40X is not set
1320# CONFIG_USB_GADGET_OMAP is not set
1321# CONFIG_USB_GADGET_PXA25X is not set
1322CONFIG_USB_GADGET_R8A66597=y
1323CONFIG_USB_R8A66597=y
1324# CONFIG_USB_GADGET_PXA27X is not set
1325# CONFIG_USB_GADGET_S3C_HSOTG is not set
1326# CONFIG_USB_GADGET_IMX is not set
1327# CONFIG_USB_GADGET_S3C2410 is not set
1328# CONFIG_USB_GADGET_M66592 is not set
1329# CONFIG_USB_GADGET_AMD5536UDC is not set
1330# CONFIG_USB_GADGET_FSL_QE is not set
1331# CONFIG_USB_GADGET_CI13XXX is not set
1332# CONFIG_USB_GADGET_NET2280 is not set
1333# CONFIG_USB_GADGET_GOKU is not set
1334# CONFIG_USB_GADGET_LANGWELL is not set
1335# CONFIG_USB_GADGET_DUMMY_HCD is not set
1336CONFIG_USB_GADGET_DUALSPEED=y
1337# CONFIG_USB_ZERO is not set
1338# CONFIG_USB_AUDIO is not set
1339# CONFIG_USB_ETH is not set
1340# CONFIG_USB_GADGETFS is not set
1341CONFIG_USB_FILE_STORAGE=m
1342# CONFIG_USB_FILE_STORAGE_TEST is not set
1343# CONFIG_USB_MASS_STORAGE is not set
1344# CONFIG_USB_G_SERIAL is not set
1345# CONFIG_USB_MIDI_GADGET is not set
1346# CONFIG_USB_G_PRINTER is not set
1347# CONFIG_USB_CDC_COMPOSITE is not set
1348# CONFIG_USB_G_NOKIA is not set
1349# CONFIG_USB_G_MULTI is not set
1205 1350
1206# 1351#
1207# OTG and related infrastructure 1352# OTG and related infrastructure
@@ -1224,10 +1369,8 @@ CONFIG_MMC_BLOCK_BOUNCE=y
1224# MMC/SD/SDIO Host Controller Drivers 1369# MMC/SD/SDIO Host Controller Drivers
1225# 1370#
1226# CONFIG_MMC_SDHCI is not set 1371# CONFIG_MMC_SDHCI is not set
1227# CONFIG_MMC_AT91 is not set
1228# CONFIG_MMC_ATMELMCI is not set
1229CONFIG_MMC_SPI=y 1372CONFIG_MMC_SPI=y
1230# CONFIG_MMC_TMIO is not set 1373CONFIG_MMC_TMIO=y
1231# CONFIG_MEMSTICK is not set 1374# CONFIG_MEMSTICK is not set
1232# CONFIG_NEW_LEDS is not set 1375# CONFIG_NEW_LEDS is not set
1233# CONFIG_ACCESSIBILITY is not set 1376# CONFIG_ACCESSIBILITY is not set
@@ -1253,10 +1396,10 @@ CONFIG_RTC_INTF_DEV=y
1253# CONFIG_RTC_DRV_DS1374 is not set 1396# CONFIG_RTC_DRV_DS1374 is not set
1254# CONFIG_RTC_DRV_DS1672 is not set 1397# CONFIG_RTC_DRV_DS1672 is not set
1255# CONFIG_RTC_DRV_MAX6900 is not set 1398# CONFIG_RTC_DRV_MAX6900 is not set
1256# CONFIG_RTC_DRV_RS5C372 is not set 1399CONFIG_RTC_DRV_RS5C372=y
1257# CONFIG_RTC_DRV_ISL1208 is not set 1400# CONFIG_RTC_DRV_ISL1208 is not set
1258# CONFIG_RTC_DRV_X1205 is not set 1401# CONFIG_RTC_DRV_X1205 is not set
1259CONFIG_RTC_DRV_PCF8563=y 1402# CONFIG_RTC_DRV_PCF8563 is not set
1260# CONFIG_RTC_DRV_PCF8583 is not set 1403# CONFIG_RTC_DRV_PCF8583 is not set
1261# CONFIG_RTC_DRV_M41T80 is not set 1404# CONFIG_RTC_DRV_M41T80 is not set
1262# CONFIG_RTC_DRV_BQ32K is not set 1405# CONFIG_RTC_DRV_BQ32K is not set
@@ -1303,8 +1446,6 @@ CONFIG_RTC_DRV_PCF8563=y
1303CONFIG_UIO=y 1446CONFIG_UIO=y
1304# CONFIG_UIO_PDRV is not set 1447# CONFIG_UIO_PDRV is not set
1305CONFIG_UIO_PDRV_GENIRQ=y 1448CONFIG_UIO_PDRV_GENIRQ=y
1306# CONFIG_UIO_SMX is not set
1307# CONFIG_UIO_SERCOS3 is not set
1308 1449
1309# 1450#
1310# TI VLYNQ 1451# TI VLYNQ
@@ -1390,6 +1531,7 @@ CONFIG_MISC_FILESYSTEMS=y
1390# CONFIG_EFS_FS is not set 1531# CONFIG_EFS_FS is not set
1391# CONFIG_JFFS2_FS is not set 1532# CONFIG_JFFS2_FS is not set
1392# CONFIG_UBIFS_FS is not set 1533# CONFIG_UBIFS_FS is not set
1534# CONFIG_LOGFS is not set
1393# CONFIG_CRAMFS is not set 1535# CONFIG_CRAMFS is not set
1394# CONFIG_SQUASHFS is not set 1536# CONFIG_SQUASHFS is not set
1395# CONFIG_VXFS_FS is not set 1537# CONFIG_VXFS_FS is not set
@@ -1418,6 +1560,7 @@ CONFIG_SUNRPC=y
1418# CONFIG_RPCSEC_GSS_KRB5 is not set 1560# CONFIG_RPCSEC_GSS_KRB5 is not set
1419# CONFIG_RPCSEC_GSS_SPKM3 is not set 1561# CONFIG_RPCSEC_GSS_SPKM3 is not set
1420# CONFIG_SMB_FS is not set 1562# CONFIG_SMB_FS is not set
1563# CONFIG_CEPH_FS is not set
1421# CONFIG_CIFS is not set 1564# CONFIG_CIFS is not set
1422# CONFIG_NCP_FS is not set 1565# CONFIG_NCP_FS is not set
1423# CONFIG_CODA_FS is not set 1566# CONFIG_CODA_FS is not set
@@ -1487,6 +1630,7 @@ CONFIG_DEBUG_FS=y
1487CONFIG_DEBUG_BUGVERBOSE=y 1630CONFIG_DEBUG_BUGVERBOSE=y
1488# CONFIG_DEBUG_MEMORY_INIT is not set 1631# CONFIG_DEBUG_MEMORY_INIT is not set
1489# CONFIG_RCU_CPU_STALL_DETECTOR is not set 1632# CONFIG_RCU_CPU_STALL_DETECTOR is not set
1633# CONFIG_LKDTM is not set
1490# CONFIG_LATENCYTOP is not set 1634# CONFIG_LATENCYTOP is not set
1491CONFIG_SYSCTL_SYSCALL_CHECK=y 1635CONFIG_SYSCTL_SYSCALL_CHECK=y
1492CONFIG_HAVE_FUNCTION_TRACER=y 1636CONFIG_HAVE_FUNCTION_TRACER=y
@@ -1618,7 +1762,7 @@ CONFIG_CRYPTO_HW=y
1618# 1762#
1619CONFIG_BITREVERSE=y 1763CONFIG_BITREVERSE=y
1620CONFIG_GENERIC_FIND_LAST_BIT=y 1764CONFIG_GENERIC_FIND_LAST_BIT=y
1621# CONFIG_CRC_CCITT is not set 1765CONFIG_CRC_CCITT=y
1622# CONFIG_CRC16 is not set 1766# CONFIG_CRC16 is not set
1623CONFIG_CRC_T10DIF=y 1767CONFIG_CRC_T10DIF=y
1624CONFIG_CRC_ITU_T=y 1768CONFIG_CRC_ITU_T=y
diff --git a/arch/sh/include/asm/elf.h b/arch/sh/include/asm/elf.h
index ac04255022b6..ce830faeebbf 100644
--- a/arch/sh/include/asm/elf.h
+++ b/arch/sh/include/asm/elf.h
@@ -211,7 +211,9 @@ extern void __kernel_vsyscall;
211 211
212#define VSYSCALL_AUX_ENT \ 212#define VSYSCALL_AUX_ENT \
213 if (vdso_enabled) \ 213 if (vdso_enabled) \
214 NEW_AUX_ENT(AT_SYSINFO_EHDR, VDSO_BASE); 214 NEW_AUX_ENT(AT_SYSINFO_EHDR, VDSO_BASE); \
215 else \
216 NEW_AUX_ENT(AT_IGNORE, 0);
215#else 217#else
216#define VSYSCALL_AUX_ENT 218#define VSYSCALL_AUX_ENT
217#endif /* CONFIG_VSYSCALL */ 219#endif /* CONFIG_VSYSCALL */
@@ -219,7 +221,7 @@ extern void __kernel_vsyscall;
219#ifdef CONFIG_SH_FPU 221#ifdef CONFIG_SH_FPU
220#define FPU_AUX_ENT NEW_AUX_ENT(AT_FPUCW, FPSCR_INIT) 222#define FPU_AUX_ENT NEW_AUX_ENT(AT_FPUCW, FPSCR_INIT)
221#else 223#else
222#define FPU_AUX_ENT 224#define FPU_AUX_ENT NEW_AUX_ENT(AT_IGNORE, 0)
223#endif 225#endif
224 226
225extern int l1i_cache_shape, l1d_cache_shape, l2_cache_shape; 227extern int l1i_cache_shape, l1d_cache_shape, l2_cache_shape;
diff --git a/arch/sh/include/cpu-sh4/cpu/mmu_context.h b/arch/sh/include/cpu-sh4/cpu/mmu_context.h
index 310ec92f2759..5963124c1d4a 100644
--- a/arch/sh/include/cpu-sh4/cpu/mmu_context.h
+++ b/arch/sh/include/cpu-sh4/cpu/mmu_context.h
@@ -30,6 +30,8 @@
30#define MMUCR_URB 0x00FC0000 30#define MMUCR_URB 0x00FC0000
31#define MMUCR_URB_SHIFT 18 31#define MMUCR_URB_SHIFT 18
32#define MMUCR_URB_NENTRIES 64 32#define MMUCR_URB_NENTRIES 64
33#define MMUCR_URC 0x0000FC00
34#define MMUCR_URC_SHIFT 10
33 35
34#if defined(CONFIG_32BIT) && defined(CONFIG_CPU_SUBTYPE_ST40) 36#if defined(CONFIG_32BIT) && defined(CONFIG_CPU_SUBTYPE_ST40)
35#define MMUCR_SE (1 << 4) 37#define MMUCR_SE (1 << 4)
diff --git a/arch/sh/kernel/cpufreq.c b/arch/sh/kernel/cpufreq.c
index dce4f3ff0932..0fffacea6ed9 100644
--- a/arch/sh/kernel/cpufreq.c
+++ b/arch/sh/kernel/cpufreq.c
@@ -48,7 +48,7 @@ static int sh_cpufreq_target(struct cpufreq_policy *policy,
48 return -ENODEV; 48 return -ENODEV;
49 49
50 cpus_allowed = current->cpus_allowed; 50 cpus_allowed = current->cpus_allowed;
51 set_cpus_allowed(current, cpumask_of_cpu(cpu)); 51 set_cpus_allowed_ptr(current, cpumask_of(cpu));
52 52
53 BUG_ON(smp_processor_id() != cpu); 53 BUG_ON(smp_processor_id() != cpu);
54 54
@@ -66,7 +66,7 @@ static int sh_cpufreq_target(struct cpufreq_policy *policy,
66 freqs.flags = 0; 66 freqs.flags = 0;
67 67
68 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); 68 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
69 set_cpus_allowed(current, cpus_allowed); 69 set_cpus_allowed_ptr(current, &cpus_allowed);
70 clk_set_rate(cpuclk, freq); 70 clk_set_rate(cpuclk, freq);
71 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); 71 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
72 72
diff --git a/arch/sh/kernel/return_address.c b/arch/sh/kernel/return_address.c
index df3ab5811074..cbf1dd5372b2 100644
--- a/arch/sh/kernel/return_address.c
+++ b/arch/sh/kernel/return_address.c
@@ -9,6 +9,7 @@
9 * for more details. 9 * for more details.
10 */ 10 */
11#include <linux/kernel.h> 11#include <linux/kernel.h>
12#include <linux/module.h>
12#include <asm/dwarf.h> 13#include <asm/dwarf.h>
13 14
14#ifdef CONFIG_DWARF_UNWINDER 15#ifdef CONFIG_DWARF_UNWINDER
@@ -52,3 +53,5 @@ void *return_address(unsigned int depth)
52} 53}
53 54
54#endif 55#endif
56
57EXPORT_SYMBOL_GPL(return_address);
diff --git a/arch/sh/kernel/smp.c b/arch/sh/kernel/smp.c
index e124cf7008df..002cc612deef 100644
--- a/arch/sh/kernel/smp.c
+++ b/arch/sh/kernel/smp.c
@@ -69,6 +69,7 @@ asmlinkage void __cpuinit start_secondary(void)
69 unsigned int cpu; 69 unsigned int cpu;
70 struct mm_struct *mm = &init_mm; 70 struct mm_struct *mm = &init_mm;
71 71
72 enable_mmu();
72 atomic_inc(&mm->mm_count); 73 atomic_inc(&mm->mm_count);
73 atomic_inc(&mm->mm_users); 74 atomic_inc(&mm->mm_users);
74 current->active_mm = mm; 75 current->active_mm = mm;
diff --git a/arch/sh/mm/tlb-pteaex.c b/arch/sh/mm/tlb-pteaex.c
index bdd0982b56ee..b71db6af8060 100644
--- a/arch/sh/mm/tlb-pteaex.c
+++ b/arch/sh/mm/tlb-pteaex.c
@@ -77,3 +77,31 @@ void local_flush_tlb_one(unsigned long asid, unsigned long page)
77 __raw_writel(asid, MMU_ITLB_ADDRESS_ARRAY2 | MMU_PAGE_ASSOC_BIT); 77 __raw_writel(asid, MMU_ITLB_ADDRESS_ARRAY2 | MMU_PAGE_ASSOC_BIT);
78 back_to_cached(); 78 back_to_cached();
79} 79}
80
81void local_flush_tlb_all(void)
82{
83 unsigned long flags, status;
84 int i;
85
86 /*
87 * Flush all the TLB.
88 */
89 local_irq_save(flags);
90 jump_to_uncached();
91
92 status = __raw_readl(MMUCR);
93 status = ((status & MMUCR_URB) >> MMUCR_URB_SHIFT);
94
95 if (status == 0)
96 status = MMUCR_URB_NENTRIES;
97
98 for (i = 0; i < status; i++)
99 __raw_writel(0x0, MMU_UTLB_ADDRESS_ARRAY | (i << 8));
100
101 for (i = 0; i < 4; i++)
102 __raw_writel(0x0, MMU_ITLB_ADDRESS_ARRAY | (i << 8));
103
104 back_to_cached();
105 ctrl_barrier();
106 local_irq_restore(flags);
107}
diff --git a/arch/sh/mm/tlb-sh3.c b/arch/sh/mm/tlb-sh3.c
index 4f5f7cbdd508..7a940dbfc2e9 100644
--- a/arch/sh/mm/tlb-sh3.c
+++ b/arch/sh/mm/tlb-sh3.c
@@ -77,3 +77,22 @@ void local_flush_tlb_one(unsigned long asid, unsigned long page)
77 for (i = 0; i < ways; i++) 77 for (i = 0; i < ways; i++)
78 __raw_writel(data, addr + (i << 8)); 78 __raw_writel(data, addr + (i << 8));
79} 79}
80
81void local_flush_tlb_all(void)
82{
83 unsigned long flags, status;
84
85 /*
86 * Flush all the TLB.
87 *
88 * Write to the MMU control register's bit:
89 * TF-bit for SH-3, TI-bit for SH-4.
90 * It's same position, bit #2.
91 */
92 local_irq_save(flags);
93 status = __raw_readl(MMUCR);
94 status |= 0x04;
95 __raw_writel(status, MMUCR);
96 ctrl_barrier();
97 local_irq_restore(flags);
98}
diff --git a/arch/sh/mm/tlb-sh4.c b/arch/sh/mm/tlb-sh4.c
index ccac77f504a8..cfdf7930d294 100644
--- a/arch/sh/mm/tlb-sh4.c
+++ b/arch/sh/mm/tlb-sh4.c
@@ -80,3 +80,31 @@ void local_flush_tlb_one(unsigned long asid, unsigned long page)
80 __raw_writel(data, addr); 80 __raw_writel(data, addr);
81 back_to_cached(); 81 back_to_cached();
82} 82}
83
84void local_flush_tlb_all(void)
85{
86 unsigned long flags, status;
87 int i;
88
89 /*
90 * Flush all the TLB.
91 */
92 local_irq_save(flags);
93 jump_to_uncached();
94
95 status = __raw_readl(MMUCR);
96 status = ((status & MMUCR_URB) >> MMUCR_URB_SHIFT);
97
98 if (status == 0)
99 status = MMUCR_URB_NENTRIES;
100
101 for (i = 0; i < status; i++)
102 __raw_writel(0x0, MMU_UTLB_ADDRESS_ARRAY | (i << 8));
103
104 for (i = 0; i < 4; i++)
105 __raw_writel(0x0, MMU_ITLB_ADDRESS_ARRAY | (i << 8));
106
107 back_to_cached();
108 ctrl_barrier();
109 local_irq_restore(flags);
110}
diff --git a/arch/sh/mm/tlb-urb.c b/arch/sh/mm/tlb-urb.c
index bb5b9098956d..c92ce20db39b 100644
--- a/arch/sh/mm/tlb-urb.c
+++ b/arch/sh/mm/tlb-urb.c
@@ -24,13 +24,9 @@ void tlb_wire_entry(struct vm_area_struct *vma, unsigned long addr, pte_t pte)
24 24
25 local_irq_save(flags); 25 local_irq_save(flags);
26 26
27 /* Load the entry into the TLB */
28 __update_tlb(vma, addr, pte);
29
30 /* ... and wire it up. */
31 status = __raw_readl(MMUCR); 27 status = __raw_readl(MMUCR);
32 urb = (status & MMUCR_URB) >> MMUCR_URB_SHIFT; 28 urb = (status & MMUCR_URB) >> MMUCR_URB_SHIFT;
33 status &= ~MMUCR_URB; 29 status &= ~MMUCR_URC;
34 30
35 /* 31 /*
36 * Make sure we're not trying to wire the last TLB entry slot. 32 * Make sure we're not trying to wire the last TLB entry slot.
@@ -39,7 +35,23 @@ void tlb_wire_entry(struct vm_area_struct *vma, unsigned long addr, pte_t pte)
39 35
40 urb = urb % MMUCR_URB_NENTRIES; 36 urb = urb % MMUCR_URB_NENTRIES;
41 37
38 /*
39 * Insert this entry into the highest non-wired TLB slot (via
40 * the URC field).
41 */
42 status |= (urb << MMUCR_URC_SHIFT);
43 __raw_writel(status, MMUCR);
44 ctrl_barrier();
45
46 /* Load the entry into the TLB */
47 __update_tlb(vma, addr, pte);
48
49 /* ... and wire it up. */
50 status = __raw_readl(MMUCR);
51
52 status &= ~MMUCR_URB;
42 status |= (urb << MMUCR_URB_SHIFT); 53 status |= (urb << MMUCR_URB_SHIFT);
54
43 __raw_writel(status, MMUCR); 55 __raw_writel(status, MMUCR);
44 ctrl_barrier(); 56 ctrl_barrier();
45 57
diff --git a/arch/sh/mm/tlbflush_32.c b/arch/sh/mm/tlbflush_32.c
index 77dc5efa7127..3fbe03ce8fe3 100644
--- a/arch/sh/mm/tlbflush_32.c
+++ b/arch/sh/mm/tlbflush_32.c
@@ -119,31 +119,3 @@ void local_flush_tlb_mm(struct mm_struct *mm)
119 local_irq_restore(flags); 119 local_irq_restore(flags);
120 } 120 }
121} 121}
122
123void local_flush_tlb_all(void)
124{
125 unsigned long flags, status;
126 int i;
127
128 /*
129 * Flush all the TLB.
130 */
131 local_irq_save(flags);
132 jump_to_uncached();
133
134 status = __raw_readl(MMUCR);
135 status = ((status & MMUCR_URB) >> MMUCR_URB_SHIFT);
136
137 if (status == 0)
138 status = MMUCR_URB_NENTRIES;
139
140 for (i = 0; i < status; i++)
141 __raw_writel(0x0, MMU_UTLB_ADDRESS_ARRAY | (i << 8));
142
143 for (i = 0; i < 4; i++)
144 __raw_writel(0x0, MMU_ITLB_ADDRESS_ARRAY | (i << 8));
145
146 back_to_cached();
147 ctrl_barrier();
148 local_irq_restore(flags);
149}
diff --git a/arch/sparc/configs/sparc64_defconfig b/arch/sparc/configs/sparc64_defconfig
index 56e3163673e3..259e3fd50993 100644
--- a/arch/sparc/configs/sparc64_defconfig
+++ b/arch/sparc/configs/sparc64_defconfig
@@ -1,7 +1,7 @@
1# 1#
2# Automatically generated make config: don't edit 2# Automatically generated make config: don't edit
3# Linux kernel version: 2.6.33 3# Linux kernel version: 2.6.34-rc3
4# Wed Mar 3 02:54:29 2010 4# Sat Apr 3 15:49:56 2010
5# 5#
6CONFIG_64BIT=y 6CONFIG_64BIT=y
7CONFIG_SPARC=y 7CONFIG_SPARC=y
@@ -23,6 +23,7 @@ CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK=y
23CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK=y 23CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK=y
24CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y 24CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y
25CONFIG_MMU=y 25CONFIG_MMU=y
26CONFIG_NEED_DMA_MAP_STATE=y
26CONFIG_ARCH_NO_VIRT_TO_BUS=y 27CONFIG_ARCH_NO_VIRT_TO_BUS=y
27CONFIG_OF=y 28CONFIG_OF=y
28CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC=y 29CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC=y
@@ -439,6 +440,7 @@ CONFIG_MISC_DEVICES=y
439# CONFIG_ENCLOSURE_SERVICES is not set 440# CONFIG_ENCLOSURE_SERVICES is not set
440# CONFIG_HP_ILO is not set 441# CONFIG_HP_ILO is not set
441# CONFIG_ISL29003 is not set 442# CONFIG_ISL29003 is not set
443# CONFIG_SENSORS_TSL2550 is not set
442# CONFIG_DS1682 is not set 444# CONFIG_DS1682 is not set
443# CONFIG_C2PORT is not set 445# CONFIG_C2PORT is not set
444 446
@@ -511,6 +513,7 @@ CONFIG_BLK_DEV_IDEDMA=y
511# 513#
512# SCSI device support 514# SCSI device support
513# 515#
516CONFIG_SCSI_MOD=y
514CONFIG_RAID_ATTRS=m 517CONFIG_RAID_ATTRS=m
515CONFIG_SCSI=y 518CONFIG_SCSI=y
516CONFIG_SCSI_DMA=y 519CONFIG_SCSI_DMA=y
@@ -888,6 +891,7 @@ CONFIG_SERIAL_SUNHV=y
888CONFIG_SERIAL_CORE=y 891CONFIG_SERIAL_CORE=y
889CONFIG_SERIAL_CORE_CONSOLE=y 892CONFIG_SERIAL_CORE_CONSOLE=y
890# CONFIG_SERIAL_JSM is not set 893# CONFIG_SERIAL_JSM is not set
894# CONFIG_SERIAL_TIMBERDALE is not set
891# CONFIG_SERIAL_GRLIB_GAISLER_APBUART is not set 895# CONFIG_SERIAL_GRLIB_GAISLER_APBUART is not set
892CONFIG_UNIX98_PTYS=y 896CONFIG_UNIX98_PTYS=y
893# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set 897# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set
@@ -935,6 +939,7 @@ CONFIG_I2C_ALGOBIT=y
935# 939#
936# CONFIG_I2C_OCORES is not set 940# CONFIG_I2C_OCORES is not set
937# CONFIG_I2C_SIMTEC is not set 941# CONFIG_I2C_SIMTEC is not set
942# CONFIG_I2C_XILINX is not set
938 943
939# 944#
940# External I2C/SMBus adapter drivers 945# External I2C/SMBus adapter drivers
@@ -948,15 +953,9 @@ CONFIG_I2C_ALGOBIT=y
948# 953#
949# CONFIG_I2C_PCA_PLATFORM is not set 954# CONFIG_I2C_PCA_PLATFORM is not set
950# CONFIG_I2C_STUB is not set 955# CONFIG_I2C_STUB is not set
951
952#
953# Miscellaneous I2C Chip support
954#
955# CONFIG_SENSORS_TSL2550 is not set
956# CONFIG_I2C_DEBUG_CORE is not set 956# CONFIG_I2C_DEBUG_CORE is not set
957# CONFIG_I2C_DEBUG_ALGO is not set 957# CONFIG_I2C_DEBUG_ALGO is not set
958# CONFIG_I2C_DEBUG_BUS is not set 958# CONFIG_I2C_DEBUG_BUS is not set
959# CONFIG_I2C_DEBUG_CHIP is not set
960# CONFIG_SPI is not set 959# CONFIG_SPI is not set
961 960
962# 961#
@@ -982,10 +981,11 @@ CONFIG_HWMON=y
982# CONFIG_SENSORS_ADM1029 is not set 981# CONFIG_SENSORS_ADM1029 is not set
983# CONFIG_SENSORS_ADM1031 is not set 982# CONFIG_SENSORS_ADM1031 is not set
984# CONFIG_SENSORS_ADM9240 is not set 983# CONFIG_SENSORS_ADM9240 is not set
984# CONFIG_SENSORS_ADT7411 is not set
985# CONFIG_SENSORS_ADT7462 is not set 985# CONFIG_SENSORS_ADT7462 is not set
986# CONFIG_SENSORS_ADT7470 is not set 986# CONFIG_SENSORS_ADT7470 is not set
987# CONFIG_SENSORS_ADT7473 is not set
988# CONFIG_SENSORS_ADT7475 is not set 987# CONFIG_SENSORS_ADT7475 is not set
988# CONFIG_SENSORS_ASC7621 is not set
989# CONFIG_SENSORS_ATXP1 is not set 989# CONFIG_SENSORS_ATXP1 is not set
990# CONFIG_SENSORS_DS1621 is not set 990# CONFIG_SENSORS_DS1621 is not set
991# CONFIG_SENSORS_I5K_AMB is not set 991# CONFIG_SENSORS_I5K_AMB is not set
@@ -1052,18 +1052,21 @@ CONFIG_SSB_POSSIBLE=y
1052# Multifunction device drivers 1052# Multifunction device drivers
1053# 1053#
1054# CONFIG_MFD_CORE is not set 1054# CONFIG_MFD_CORE is not set
1055# CONFIG_MFD_88PM860X is not set
1055# CONFIG_MFD_SM501 is not set 1056# CONFIG_MFD_SM501 is not set
1056# CONFIG_HTC_PASIC3 is not set 1057# CONFIG_HTC_PASIC3 is not set
1057# CONFIG_TWL4030_CORE is not set 1058# CONFIG_TWL4030_CORE is not set
1058# CONFIG_MFD_TMIO is not set 1059# CONFIG_MFD_TMIO is not set
1059# CONFIG_PMIC_DA903X is not set 1060# CONFIG_PMIC_DA903X is not set
1060# CONFIG_PMIC_ADP5520 is not set 1061# CONFIG_PMIC_ADP5520 is not set
1062# CONFIG_MFD_MAX8925 is not set
1061# CONFIG_MFD_WM8400 is not set 1063# CONFIG_MFD_WM8400 is not set
1062# CONFIG_MFD_WM831X is not set 1064# CONFIG_MFD_WM831X is not set
1063# CONFIG_MFD_WM8350_I2C is not set 1065# CONFIG_MFD_WM8350_I2C is not set
1066# CONFIG_MFD_WM8994 is not set
1064# CONFIG_MFD_PCF50633 is not set 1067# CONFIG_MFD_PCF50633 is not set
1065# CONFIG_AB3100_CORE is not set 1068# CONFIG_AB3100_CORE is not set
1066# CONFIG_MFD_88PM8607 is not set 1069# CONFIG_LPC_SCH is not set
1067# CONFIG_REGULATOR is not set 1070# CONFIG_REGULATOR is not set
1068# CONFIG_MEDIA_SUPPORT is not set 1071# CONFIG_MEDIA_SUPPORT is not set
1069 1072
@@ -1113,6 +1116,7 @@ CONFIG_FB_FFB=y
1113# CONFIG_FB_LEO is not set 1116# CONFIG_FB_LEO is not set
1114CONFIG_FB_XVR500=y 1117CONFIG_FB_XVR500=y
1115CONFIG_FB_XVR2500=y 1118CONFIG_FB_XVR2500=y
1119CONFIG_FB_XVR1000=y
1116# CONFIG_FB_S1D13XXX is not set 1120# CONFIG_FB_S1D13XXX is not set
1117# CONFIG_FB_NVIDIA is not set 1121# CONFIG_FB_NVIDIA is not set
1118# CONFIG_FB_RIVA is not set 1122# CONFIG_FB_RIVA is not set
@@ -1430,7 +1434,6 @@ CONFIG_USB_STORAGE=m
1430# CONFIG_USB_RIO500 is not set 1434# CONFIG_USB_RIO500 is not set
1431# CONFIG_USB_LEGOTOWER is not set 1435# CONFIG_USB_LEGOTOWER is not set
1432# CONFIG_USB_LCD is not set 1436# CONFIG_USB_LCD is not set
1433# CONFIG_USB_BERRY_CHARGE is not set
1434# CONFIG_USB_LED is not set 1437# CONFIG_USB_LED is not set
1435# CONFIG_USB_CYPRESS_CY7C63 is not set 1438# CONFIG_USB_CYPRESS_CY7C63 is not set
1436# CONFIG_USB_CYTHERM is not set 1439# CONFIG_USB_CYTHERM is not set
@@ -1443,7 +1446,6 @@ CONFIG_USB_STORAGE=m
1443# CONFIG_USB_IOWARRIOR is not set 1446# CONFIG_USB_IOWARRIOR is not set
1444# CONFIG_USB_TEST is not set 1447# CONFIG_USB_TEST is not set
1445# CONFIG_USB_ISIGHTFW is not set 1448# CONFIG_USB_ISIGHTFW is not set
1446# CONFIG_USB_VST is not set
1447# CONFIG_USB_GADGET is not set 1449# CONFIG_USB_GADGET is not set
1448 1450
1449# 1451#
@@ -1610,6 +1612,7 @@ CONFIG_MISC_FILESYSTEMS=y
1610# CONFIG_BEFS_FS is not set 1612# CONFIG_BEFS_FS is not set
1611# CONFIG_BFS_FS is not set 1613# CONFIG_BFS_FS is not set
1612# CONFIG_EFS_FS is not set 1614# CONFIG_EFS_FS is not set
1615# CONFIG_LOGFS is not set
1613# CONFIG_CRAMFS is not set 1616# CONFIG_CRAMFS is not set
1614# CONFIG_SQUASHFS is not set 1617# CONFIG_SQUASHFS is not set
1615# CONFIG_VXFS_FS is not set 1618# CONFIG_VXFS_FS is not set
@@ -1624,6 +1627,7 @@ CONFIG_NETWORK_FILESYSTEMS=y
1624# CONFIG_NFS_FS is not set 1627# CONFIG_NFS_FS is not set
1625# CONFIG_NFSD is not set 1628# CONFIG_NFSD is not set
1626# CONFIG_SMB_FS is not set 1629# CONFIG_SMB_FS is not set
1630# CONFIG_CEPH_FS is not set
1627# CONFIG_CIFS is not set 1631# CONFIG_CIFS is not set
1628# CONFIG_NCP_FS is not set 1632# CONFIG_NCP_FS is not set
1629# CONFIG_CODA_FS is not set 1633# CONFIG_CODA_FS is not set
diff --git a/arch/sparc/kernel/helpers.S b/arch/sparc/kernel/helpers.S
index 314dd0c9fc5b..92090cc9e829 100644
--- a/arch/sparc/kernel/helpers.S
+++ b/arch/sparc/kernel/helpers.S
@@ -46,6 +46,81 @@ stack_trace_flush:
46 nop 46 nop
47 .size stack_trace_flush,.-stack_trace_flush 47 .size stack_trace_flush,.-stack_trace_flush
48 48
49#ifdef CONFIG_PERF_EVENTS
50 .globl perf_arch_fetch_caller_regs
51 .type perf_arch_fetch_caller_regs,#function
52perf_arch_fetch_caller_regs:
53 /* We always read the %pstate into %o5 since we will use
54 * that to construct a fake %tstate to store into the regs.
55 */
56 rdpr %pstate, %o5
57 brz,pn %o2, 50f
58 mov %o2, %g7
59
60 /* Turn off interrupts while we walk around the register
61 * window by hand.
62 */
63 wrpr %o5, PSTATE_IE, %pstate
64
65 /* The %canrestore tells us how many register windows are
66 * still live in the chip above us, past that we have to
67 * walk the frame as saved on the stack. We stash away
68 * the %cwp in %g1 so we can return back to the original
69 * register window.
70 */
71 rdpr %cwp, %g1
72 rdpr %canrestore, %g2
73 sub %g1, 1, %g3
74
75 /* We have the skip count in %g7, if it hits zero then
76 * %fp/%i7 are the registers we need. Otherwise if our
77 * %canrestore count maintained in %g2 hits zero we have
78 * to start traversing the stack.
79 */
8010: brz,pn %g2, 4f
81 sub %g2, 1, %g2
82 wrpr %g3, %cwp
83 subcc %g7, 1, %g7
84 bne,pt %xcc, 10b
85 sub %g3, 1, %g3
86
87 /* We found the values we need in the cpu's register
88 * windows.
89 */
90 mov %fp, %g3
91 ba,pt %xcc, 3f
92 mov %i7, %g2
93
9450: mov %fp, %g3
95 ba,pt %xcc, 2f
96 mov %i7, %g2
97
98 /* We hit the end of the valid register windows in the
99 * cpu, start traversing the stack frame.
100 */
1014: mov %fp, %g3
102
10320: ldx [%g3 + STACK_BIAS + RW_V9_I7], %g2
104 subcc %g7, 1, %g7
105 bne,pn %xcc, 20b
106 ldx [%g3 + STACK_BIAS + RW_V9_I6], %g3
107
108 /* Restore the current register window position and
109 * re-enable interrupts.
110 */
1113: wrpr %g1, %cwp
112 wrpr %o5, %pstate
113
1142: stx %g3, [%o0 + PT_V9_FP]
115 sllx %o5, 8, %o5
116 stx %o5, [%o0 + PT_V9_TSTATE]
117 stx %g2, [%o0 + PT_V9_TPC]
118 add %g2, 4, %g2
119 retl
120 stx %g2, [%o0 + PT_V9_TNPC]
121 .size perf_arch_fetch_caller_regs,.-perf_arch_fetch_caller_regs
122#endif /* CONFIG_PERF_EVENTS */
123
49#ifdef CONFIG_SMP 124#ifdef CONFIG_SMP
50 .globl hard_smp_processor_id 125 .globl hard_smp_processor_id
51 .type hard_smp_processor_id,#function 126 .type hard_smp_processor_id,#function
diff --git a/arch/sparc/kernel/ptrace_32.c b/arch/sparc/kernel/ptrace_32.c
index 7e3dfd9bb97e..e608f397e11f 100644
--- a/arch/sparc/kernel/ptrace_32.c
+++ b/arch/sparc/kernel/ptrace_32.c
@@ -65,6 +65,7 @@ static int genregs32_get(struct task_struct *target,
65 *k++ = regs->u_regs[pos++]; 65 *k++ = regs->u_regs[pos++];
66 66
67 reg_window = (unsigned long __user *) regs->u_regs[UREG_I6]; 67 reg_window = (unsigned long __user *) regs->u_regs[UREG_I6];
68 reg_window -= 16;
68 for (; count > 0 && pos < 32; count--) { 69 for (; count > 0 && pos < 32; count--) {
69 if (get_user(*k++, &reg_window[pos++])) 70 if (get_user(*k++, &reg_window[pos++]))
70 return -EFAULT; 71 return -EFAULT;
@@ -76,6 +77,7 @@ static int genregs32_get(struct task_struct *target,
76 } 77 }
77 78
78 reg_window = (unsigned long __user *) regs->u_regs[UREG_I6]; 79 reg_window = (unsigned long __user *) regs->u_regs[UREG_I6];
80 reg_window -= 16;
79 for (; count > 0 && pos < 32; count--) { 81 for (; count > 0 && pos < 32; count--) {
80 if (get_user(reg, &reg_window[pos++]) || 82 if (get_user(reg, &reg_window[pos++]) ||
81 put_user(reg, u++)) 83 put_user(reg, u++))
@@ -141,6 +143,7 @@ static int genregs32_set(struct task_struct *target,
141 regs->u_regs[pos++] = *k++; 143 regs->u_regs[pos++] = *k++;
142 144
143 reg_window = (unsigned long __user *) regs->u_regs[UREG_I6]; 145 reg_window = (unsigned long __user *) regs->u_regs[UREG_I6];
146 reg_window -= 16;
144 for (; count > 0 && pos < 32; count--) { 147 for (; count > 0 && pos < 32; count--) {
145 if (put_user(*k++, &reg_window[pos++])) 148 if (put_user(*k++, &reg_window[pos++]))
146 return -EFAULT; 149 return -EFAULT;
@@ -153,6 +156,7 @@ static int genregs32_set(struct task_struct *target,
153 } 156 }
154 157
155 reg_window = (unsigned long __user *) regs->u_regs[UREG_I6]; 158 reg_window = (unsigned long __user *) regs->u_regs[UREG_I6];
159 reg_window -= 16;
156 for (; count > 0 && pos < 32; count--) { 160 for (; count > 0 && pos < 32; count--) {
157 if (get_user(reg, u++) || 161 if (get_user(reg, u++) ||
158 put_user(reg, &reg_window[pos++])) 162 put_user(reg, &reg_window[pos++]))
diff --git a/arch/sparc/kernel/ptrace_64.c b/arch/sparc/kernel/ptrace_64.c
index 2f6524d1a817..aa90da08bf61 100644
--- a/arch/sparc/kernel/ptrace_64.c
+++ b/arch/sparc/kernel/ptrace_64.c
@@ -492,6 +492,7 @@ static int genregs32_get(struct task_struct *target,
492 *k++ = regs->u_regs[pos++]; 492 *k++ = regs->u_regs[pos++];
493 493
494 reg_window = (compat_ulong_t __user *) regs->u_regs[UREG_I6]; 494 reg_window = (compat_ulong_t __user *) regs->u_regs[UREG_I6];
495 reg_window -= 16;
495 if (target == current) { 496 if (target == current) {
496 for (; count > 0 && pos < 32; count--) { 497 for (; count > 0 && pos < 32; count--) {
497 if (get_user(*k++, &reg_window[pos++])) 498 if (get_user(*k++, &reg_window[pos++]))
@@ -516,6 +517,7 @@ static int genregs32_get(struct task_struct *target,
516 } 517 }
517 518
518 reg_window = (compat_ulong_t __user *) regs->u_regs[UREG_I6]; 519 reg_window = (compat_ulong_t __user *) regs->u_regs[UREG_I6];
520 reg_window -= 16;
519 if (target == current) { 521 if (target == current) {
520 for (; count > 0 && pos < 32; count--) { 522 for (; count > 0 && pos < 32; count--) {
521 if (get_user(reg, &reg_window[pos++]) || 523 if (get_user(reg, &reg_window[pos++]) ||
@@ -599,6 +601,7 @@ static int genregs32_set(struct task_struct *target,
599 regs->u_regs[pos++] = *k++; 601 regs->u_regs[pos++] = *k++;
600 602
601 reg_window = (compat_ulong_t __user *) regs->u_regs[UREG_I6]; 603 reg_window = (compat_ulong_t __user *) regs->u_regs[UREG_I6];
604 reg_window -= 16;
602 if (target == current) { 605 if (target == current) {
603 for (; count > 0 && pos < 32; count--) { 606 for (; count > 0 && pos < 32; count--) {
604 if (put_user(*k++, &reg_window[pos++])) 607 if (put_user(*k++, &reg_window[pos++]))
@@ -625,6 +628,7 @@ static int genregs32_set(struct task_struct *target,
625 } 628 }
626 629
627 reg_window = (compat_ulong_t __user *) regs->u_regs[UREG_I6]; 630 reg_window = (compat_ulong_t __user *) regs->u_regs[UREG_I6];
631 reg_window -= 16;
628 if (target == current) { 632 if (target == current) {
629 for (; count > 0 && pos < 32; count--) { 633 for (; count > 0 && pos < 32; count--) {
630 if (get_user(reg, u++) || 634 if (get_user(reg, u++) ||
diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
index aaebc4815044..b2831dc3c121 100644
--- a/arch/sparc/mm/init_64.c
+++ b/arch/sparc/mm/init_64.c
@@ -2117,7 +2117,7 @@ int __meminit vmemmap_populate(struct page *start, unsigned long nr, int node)
2117 "node=%d entry=%lu/%lu\n", start, block, nr, 2117 "node=%d entry=%lu/%lu\n", start, block, nr,
2118 node, 2118 node,
2119 addr >> VMEMMAP_CHUNK_SHIFT, 2119 addr >> VMEMMAP_CHUNK_SHIFT,
2120 VMEMMAP_SIZE >> VMEMMAP_CHUNK_SHIFT); 2120 VMEMMAP_SIZE);
2121 } 2121 }
2122 } 2122 }
2123 return 0; 2123 return 0;
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
index 0316ffe851bd..db5bdc8addf8 100644
--- a/arch/x86/kernel/cpu/perf_event.c
+++ b/arch/x86/kernel/cpu/perf_event.c
@@ -29,6 +29,7 @@
29#include <asm/apic.h> 29#include <asm/apic.h>
30#include <asm/stacktrace.h> 30#include <asm/stacktrace.h>
31#include <asm/nmi.h> 31#include <asm/nmi.h>
32#include <asm/compat.h>
32 33
33static u64 perf_event_mask __read_mostly; 34static u64 perf_event_mask __read_mostly;
34 35
@@ -159,7 +160,7 @@ struct x86_pmu {
159 struct perf_event *event); 160 struct perf_event *event);
160 struct event_constraint *event_constraints; 161 struct event_constraint *event_constraints;
161 162
162 void (*cpu_prepare)(int cpu); 163 int (*cpu_prepare)(int cpu);
163 void (*cpu_starting)(int cpu); 164 void (*cpu_starting)(int cpu);
164 void (*cpu_dying)(int cpu); 165 void (*cpu_dying)(int cpu);
165 void (*cpu_dead)(int cpu); 166 void (*cpu_dead)(int cpu);
@@ -1334,11 +1335,12 @@ static int __cpuinit
1334x86_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu) 1335x86_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu)
1335{ 1336{
1336 unsigned int cpu = (long)hcpu; 1337 unsigned int cpu = (long)hcpu;
1338 int ret = NOTIFY_OK;
1337 1339
1338 switch (action & ~CPU_TASKS_FROZEN) { 1340 switch (action & ~CPU_TASKS_FROZEN) {
1339 case CPU_UP_PREPARE: 1341 case CPU_UP_PREPARE:
1340 if (x86_pmu.cpu_prepare) 1342 if (x86_pmu.cpu_prepare)
1341 x86_pmu.cpu_prepare(cpu); 1343 ret = x86_pmu.cpu_prepare(cpu);
1342 break; 1344 break;
1343 1345
1344 case CPU_STARTING: 1346 case CPU_STARTING:
@@ -1351,6 +1353,7 @@ x86_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu)
1351 x86_pmu.cpu_dying(cpu); 1353 x86_pmu.cpu_dying(cpu);
1352 break; 1354 break;
1353 1355
1356 case CPU_UP_CANCELED:
1354 case CPU_DEAD: 1357 case CPU_DEAD:
1355 if (x86_pmu.cpu_dead) 1358 if (x86_pmu.cpu_dead)
1356 x86_pmu.cpu_dead(cpu); 1359 x86_pmu.cpu_dead(cpu);
@@ -1360,7 +1363,7 @@ x86_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu)
1360 break; 1363 break;
1361 } 1364 }
1362 1365
1363 return NOTIFY_OK; 1366 return ret;
1364} 1367}
1365 1368
1366static void __init pmu_check_apic(void) 1369static void __init pmu_check_apic(void)
@@ -1629,14 +1632,42 @@ copy_from_user_nmi(void *to, const void __user *from, unsigned long n)
1629 return len; 1632 return len;
1630} 1633}
1631 1634
1632static int copy_stack_frame(const void __user *fp, struct stack_frame *frame) 1635#ifdef CONFIG_COMPAT
1636static inline int
1637perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry *entry)
1633{ 1638{
1634 unsigned long bytes; 1639 /* 32-bit process in 64-bit kernel. */
1640 struct stack_frame_ia32 frame;
1641 const void __user *fp;
1635 1642
1636 bytes = copy_from_user_nmi(frame, fp, sizeof(*frame)); 1643 if (!test_thread_flag(TIF_IA32))
1644 return 0;
1645
1646 fp = compat_ptr(regs->bp);
1647 while (entry->nr < PERF_MAX_STACK_DEPTH) {
1648 unsigned long bytes;
1649 frame.next_frame = 0;
1650 frame.return_address = 0;
1651
1652 bytes = copy_from_user_nmi(&frame, fp, sizeof(frame));
1653 if (bytes != sizeof(frame))
1654 break;
1655
1656 if (fp < compat_ptr(regs->sp))
1657 break;
1637 1658
1638 return bytes == sizeof(*frame); 1659 callchain_store(entry, frame.return_address);
1660 fp = compat_ptr(frame.next_frame);
1661 }
1662 return 1;
1663}
1664#else
1665static inline int
1666perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry *entry)
1667{
1668 return 0;
1639} 1669}
1670#endif
1640 1671
1641static void 1672static void
1642perf_callchain_user(struct pt_regs *regs, struct perf_callchain_entry *entry) 1673perf_callchain_user(struct pt_regs *regs, struct perf_callchain_entry *entry)
@@ -1652,11 +1683,16 @@ perf_callchain_user(struct pt_regs *regs, struct perf_callchain_entry *entry)
1652 callchain_store(entry, PERF_CONTEXT_USER); 1683 callchain_store(entry, PERF_CONTEXT_USER);
1653 callchain_store(entry, regs->ip); 1684 callchain_store(entry, regs->ip);
1654 1685
1686 if (perf_callchain_user32(regs, entry))
1687 return;
1688
1655 while (entry->nr < PERF_MAX_STACK_DEPTH) { 1689 while (entry->nr < PERF_MAX_STACK_DEPTH) {
1690 unsigned long bytes;
1656 frame.next_frame = NULL; 1691 frame.next_frame = NULL;
1657 frame.return_address = 0; 1692 frame.return_address = 0;
1658 1693
1659 if (!copy_stack_frame(fp, &frame)) 1694 bytes = copy_from_user_nmi(&frame, fp, sizeof(frame));
1695 if (bytes != sizeof(frame))
1660 break; 1696 break;
1661 1697
1662 if ((unsigned long)fp < regs->sp) 1698 if ((unsigned long)fp < regs->sp)
@@ -1703,7 +1739,6 @@ struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)
1703 return entry; 1739 return entry;
1704} 1740}
1705 1741
1706#ifdef CONFIG_EVENT_TRACING
1707void perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip, int skip) 1742void perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip, int skip)
1708{ 1743{
1709 regs->ip = ip; 1744 regs->ip = ip;
@@ -1715,4 +1750,3 @@ void perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip, int ski
1715 regs->cs = __KERNEL_CS; 1750 regs->cs = __KERNEL_CS;
1716 local_save_flags(regs->flags); 1751 local_save_flags(regs->flags);
1717} 1752}
1718#endif
diff --git a/arch/x86/kernel/cpu/perf_event_amd.c b/arch/x86/kernel/cpu/perf_event_amd.c
index b87e0b6970cb..db6f7d4056e1 100644
--- a/arch/x86/kernel/cpu/perf_event_amd.c
+++ b/arch/x86/kernel/cpu/perf_event_amd.c
@@ -137,6 +137,13 @@ static inline int amd_is_nb_event(struct hw_perf_event *hwc)
137 return (hwc->config & 0xe0) == 0xe0; 137 return (hwc->config & 0xe0) == 0xe0;
138} 138}
139 139
140static inline int amd_has_nb(struct cpu_hw_events *cpuc)
141{
142 struct amd_nb *nb = cpuc->amd_nb;
143
144 return nb && nb->nb_id != -1;
145}
146
140static void amd_put_event_constraints(struct cpu_hw_events *cpuc, 147static void amd_put_event_constraints(struct cpu_hw_events *cpuc,
141 struct perf_event *event) 148 struct perf_event *event)
142{ 149{
@@ -147,7 +154,7 @@ static void amd_put_event_constraints(struct cpu_hw_events *cpuc,
147 /* 154 /*
148 * only care about NB events 155 * only care about NB events
149 */ 156 */
150 if (!(nb && amd_is_nb_event(hwc))) 157 if (!(amd_has_nb(cpuc) && amd_is_nb_event(hwc)))
151 return; 158 return;
152 159
153 /* 160 /*
@@ -214,7 +221,7 @@ amd_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event)
214 /* 221 /*
215 * if not NB event or no NB, then no constraints 222 * if not NB event or no NB, then no constraints
216 */ 223 */
217 if (!(nb && amd_is_nb_event(hwc))) 224 if (!(amd_has_nb(cpuc) && amd_is_nb_event(hwc)))
218 return &unconstrained; 225 return &unconstrained;
219 226
220 /* 227 /*
@@ -293,51 +300,55 @@ static struct amd_nb *amd_alloc_nb(int cpu, int nb_id)
293 return nb; 300 return nb;
294} 301}
295 302
296static void amd_pmu_cpu_online(int cpu) 303static int amd_pmu_cpu_prepare(int cpu)
304{
305 struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
306
307 WARN_ON_ONCE(cpuc->amd_nb);
308
309 if (boot_cpu_data.x86_max_cores < 2)
310 return NOTIFY_OK;
311
312 cpuc->amd_nb = amd_alloc_nb(cpu, -1);
313 if (!cpuc->amd_nb)
314 return NOTIFY_BAD;
315
316 return NOTIFY_OK;
317}
318
319static void amd_pmu_cpu_starting(int cpu)
297{ 320{
298 struct cpu_hw_events *cpu1, *cpu2; 321 struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
299 struct amd_nb *nb = NULL; 322 struct amd_nb *nb;
300 int i, nb_id; 323 int i, nb_id;
301 324
302 if (boot_cpu_data.x86_max_cores < 2) 325 if (boot_cpu_data.x86_max_cores < 2)
303 return; 326 return;
304 327
305 /*
306 * function may be called too early in the
307 * boot process, in which case nb_id is bogus
308 */
309 nb_id = amd_get_nb_id(cpu); 328 nb_id = amd_get_nb_id(cpu);
310 if (nb_id == BAD_APICID) 329 WARN_ON_ONCE(nb_id == BAD_APICID);
311 return;
312
313 cpu1 = &per_cpu(cpu_hw_events, cpu);
314 cpu1->amd_nb = NULL;
315 330
316 raw_spin_lock(&amd_nb_lock); 331 raw_spin_lock(&amd_nb_lock);
317 332
318 for_each_online_cpu(i) { 333 for_each_online_cpu(i) {
319 cpu2 = &per_cpu(cpu_hw_events, i); 334 nb = per_cpu(cpu_hw_events, i).amd_nb;
320 nb = cpu2->amd_nb; 335 if (WARN_ON_ONCE(!nb))
321 if (!nb)
322 continue; 336 continue;
323 if (nb->nb_id == nb_id)
324 goto found;
325 }
326 337
327 nb = amd_alloc_nb(cpu, nb_id); 338 if (nb->nb_id == nb_id) {
328 if (!nb) { 339 kfree(cpuc->amd_nb);
329 pr_err("perf_events: failed NB allocation for CPU%d\n", cpu); 340 cpuc->amd_nb = nb;
330 raw_spin_unlock(&amd_nb_lock); 341 break;
331 return; 342 }
332 } 343 }
333found: 344
334 nb->refcnt++; 345 cpuc->amd_nb->nb_id = nb_id;
335 cpu1->amd_nb = nb; 346 cpuc->amd_nb->refcnt++;
336 347
337 raw_spin_unlock(&amd_nb_lock); 348 raw_spin_unlock(&amd_nb_lock);
338} 349}
339 350
340static void amd_pmu_cpu_offline(int cpu) 351static void amd_pmu_cpu_dead(int cpu)
341{ 352{
342 struct cpu_hw_events *cpuhw; 353 struct cpu_hw_events *cpuhw;
343 354
@@ -349,8 +360,10 @@ static void amd_pmu_cpu_offline(int cpu)
349 raw_spin_lock(&amd_nb_lock); 360 raw_spin_lock(&amd_nb_lock);
350 361
351 if (cpuhw->amd_nb) { 362 if (cpuhw->amd_nb) {
352 if (--cpuhw->amd_nb->refcnt == 0) 363 struct amd_nb *nb = cpuhw->amd_nb;
353 kfree(cpuhw->amd_nb); 364
365 if (nb->nb_id == -1 || --nb->refcnt == 0)
366 kfree(nb);
354 367
355 cpuhw->amd_nb = NULL; 368 cpuhw->amd_nb = NULL;
356 } 369 }
@@ -379,8 +392,9 @@ static __initconst struct x86_pmu amd_pmu = {
379 .get_event_constraints = amd_get_event_constraints, 392 .get_event_constraints = amd_get_event_constraints,
380 .put_event_constraints = amd_put_event_constraints, 393 .put_event_constraints = amd_put_event_constraints,
381 394
382 .cpu_prepare = amd_pmu_cpu_online, 395 .cpu_prepare = amd_pmu_cpu_prepare,
383 .cpu_dead = amd_pmu_cpu_offline, 396 .cpu_starting = amd_pmu_cpu_starting,
397 .cpu_dead = amd_pmu_cpu_dead,
384}; 398};
385 399
386static __init int amd_pmu_init(void) 400static __init int amd_pmu_init(void)
diff --git a/arch/x86/kernel/dumpstack.h b/arch/x86/kernel/dumpstack.h
index 29e5f7c845b2..e39e77168a37 100644
--- a/arch/x86/kernel/dumpstack.h
+++ b/arch/x86/kernel/dumpstack.h
@@ -30,6 +30,11 @@ struct stack_frame {
30 unsigned long return_address; 30 unsigned long return_address;
31}; 31};
32 32
33struct stack_frame_ia32 {
34 u32 next_frame;
35 u32 return_address;
36};
37
33static inline unsigned long rewind_frame_pointer(int n) 38static inline unsigned long rewind_frame_pointer(int n)
34{ 39{
35 struct stack_frame *frame; 40 struct stack_frame *frame;
diff --git a/arch/x86/kernel/head32.c b/arch/x86/kernel/head32.c
index adedeef1dedc..b2e246037392 100644
--- a/arch/x86/kernel/head32.c
+++ b/arch/x86/kernel/head32.c
@@ -7,6 +7,7 @@
7 7
8#include <linux/init.h> 8#include <linux/init.h>
9#include <linux/start_kernel.h> 9#include <linux/start_kernel.h>
10#include <linux/mm.h>
10 11
11#include <asm/setup.h> 12#include <asm/setup.h>
12#include <asm/sections.h> 13#include <asm/sections.h>
@@ -44,9 +45,10 @@ void __init i386_start_kernel(void)
44#ifdef CONFIG_BLK_DEV_INITRD 45#ifdef CONFIG_BLK_DEV_INITRD
45 /* Reserve INITRD */ 46 /* Reserve INITRD */
46 if (boot_params.hdr.type_of_loader && boot_params.hdr.ramdisk_image) { 47 if (boot_params.hdr.type_of_loader && boot_params.hdr.ramdisk_image) {
48 /* Assume only end is not page aligned */
47 u64 ramdisk_image = boot_params.hdr.ramdisk_image; 49 u64 ramdisk_image = boot_params.hdr.ramdisk_image;
48 u64 ramdisk_size = boot_params.hdr.ramdisk_size; 50 u64 ramdisk_size = boot_params.hdr.ramdisk_size;
49 u64 ramdisk_end = ramdisk_image + ramdisk_size; 51 u64 ramdisk_end = PAGE_ALIGN(ramdisk_image + ramdisk_size);
50 reserve_early(ramdisk_image, ramdisk_end, "RAMDISK"); 52 reserve_early(ramdisk_image, ramdisk_end, "RAMDISK");
51 } 53 }
52#endif 54#endif
diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c
index b5a9896ca1e7..7147143fd614 100644
--- a/arch/x86/kernel/head64.c
+++ b/arch/x86/kernel/head64.c
@@ -103,9 +103,10 @@ void __init x86_64_start_reservations(char *real_mode_data)
103#ifdef CONFIG_BLK_DEV_INITRD 103#ifdef CONFIG_BLK_DEV_INITRD
104 /* Reserve INITRD */ 104 /* Reserve INITRD */
105 if (boot_params.hdr.type_of_loader && boot_params.hdr.ramdisk_image) { 105 if (boot_params.hdr.type_of_loader && boot_params.hdr.ramdisk_image) {
106 /* Assume only end is not page aligned */
106 unsigned long ramdisk_image = boot_params.hdr.ramdisk_image; 107 unsigned long ramdisk_image = boot_params.hdr.ramdisk_image;
107 unsigned long ramdisk_size = boot_params.hdr.ramdisk_size; 108 unsigned long ramdisk_size = boot_params.hdr.ramdisk_size;
108 unsigned long ramdisk_end = ramdisk_image + ramdisk_size; 109 unsigned long ramdisk_end = PAGE_ALIGN(ramdisk_image + ramdisk_size);
109 reserve_early(ramdisk_image, ramdisk_end, "RAMDISK"); 110 reserve_early(ramdisk_image, ramdisk_end, "RAMDISK");
110 } 111 }
111#endif 112#endif
diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c
index bfba6019d762..b2258ca91003 100644
--- a/arch/x86/kernel/kgdb.c
+++ b/arch/x86/kernel/kgdb.c
@@ -618,8 +618,8 @@ int kgdb_arch_init(void)
618 * portion of kgdb because this operation requires mutexs to 618 * portion of kgdb because this operation requires mutexs to
619 * complete. 619 * complete.
620 */ 620 */
621 hw_breakpoint_init(&attr);
621 attr.bp_addr = (unsigned long)kgdb_arch_init; 622 attr.bp_addr = (unsigned long)kgdb_arch_init;
622 attr.type = PERF_TYPE_BREAKPOINT;
623 attr.bp_len = HW_BREAKPOINT_LEN_1; 623 attr.bp_len = HW_BREAKPOINT_LEN_1;
624 attr.bp_type = HW_BREAKPOINT_W; 624 attr.bp_type = HW_BREAKPOINT_W;
625 attr.disabled = 1; 625 attr.disabled = 1;
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index c08d1e3261a8..9570541caf7c 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -313,16 +313,17 @@ static void __init reserve_brk(void)
313#define MAX_MAP_CHUNK (NR_FIX_BTMAPS << PAGE_SHIFT) 313#define MAX_MAP_CHUNK (NR_FIX_BTMAPS << PAGE_SHIFT)
314static void __init relocate_initrd(void) 314static void __init relocate_initrd(void)
315{ 315{
316 316 /* Assume only end is not page aligned */
317 u64 ramdisk_image = boot_params.hdr.ramdisk_image; 317 u64 ramdisk_image = boot_params.hdr.ramdisk_image;
318 u64 ramdisk_size = boot_params.hdr.ramdisk_size; 318 u64 ramdisk_size = boot_params.hdr.ramdisk_size;
319 u64 area_size = PAGE_ALIGN(ramdisk_size);
319 u64 end_of_lowmem = max_low_pfn_mapped << PAGE_SHIFT; 320 u64 end_of_lowmem = max_low_pfn_mapped << PAGE_SHIFT;
320 u64 ramdisk_here; 321 u64 ramdisk_here;
321 unsigned long slop, clen, mapaddr; 322 unsigned long slop, clen, mapaddr;
322 char *p, *q; 323 char *p, *q;
323 324
324 /* We need to move the initrd down into lowmem */ 325 /* We need to move the initrd down into lowmem */
325 ramdisk_here = find_e820_area(0, end_of_lowmem, ramdisk_size, 326 ramdisk_here = find_e820_area(0, end_of_lowmem, area_size,
326 PAGE_SIZE); 327 PAGE_SIZE);
327 328
328 if (ramdisk_here == -1ULL) 329 if (ramdisk_here == -1ULL)
@@ -331,7 +332,7 @@ static void __init relocate_initrd(void)
331 332
332 /* Note: this includes all the lowmem currently occupied by 333 /* Note: this includes all the lowmem currently occupied by
333 the initrd, we rely on that fact to keep the data intact. */ 334 the initrd, we rely on that fact to keep the data intact. */
334 reserve_early(ramdisk_here, ramdisk_here + ramdisk_size, 335 reserve_early(ramdisk_here, ramdisk_here + area_size,
335 "NEW RAMDISK"); 336 "NEW RAMDISK");
336 initrd_start = ramdisk_here + PAGE_OFFSET; 337 initrd_start = ramdisk_here + PAGE_OFFSET;
337 initrd_end = initrd_start + ramdisk_size; 338 initrd_end = initrd_start + ramdisk_size;
@@ -375,9 +376,10 @@ static void __init relocate_initrd(void)
375 376
376static void __init reserve_initrd(void) 377static void __init reserve_initrd(void)
377{ 378{
379 /* Assume only end is not page aligned */
378 u64 ramdisk_image = boot_params.hdr.ramdisk_image; 380 u64 ramdisk_image = boot_params.hdr.ramdisk_image;
379 u64 ramdisk_size = boot_params.hdr.ramdisk_size; 381 u64 ramdisk_size = boot_params.hdr.ramdisk_size;
380 u64 ramdisk_end = ramdisk_image + ramdisk_size; 382 u64 ramdisk_end = PAGE_ALIGN(ramdisk_image + ramdisk_size);
381 u64 end_of_lowmem = max_low_pfn_mapped << PAGE_SHIFT; 383 u64 end_of_lowmem = max_low_pfn_mapped << PAGE_SHIFT;
382 384
383 if (!boot_params.hdr.type_of_loader || 385 if (!boot_params.hdr.type_of_loader ||
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index be40f82b09af..763d815e27a0 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -243,8 +243,6 @@ static void __cpuinit smp_callin(void)
243 end_local_APIC_setup(); 243 end_local_APIC_setup();
244 map_cpu_to_logical_apicid(); 244 map_cpu_to_logical_apicid();
245 245
246 notify_cpu_starting(cpuid);
247
248 /* 246 /*
249 * Need to setup vector mappings before we enable interrupts. 247 * Need to setup vector mappings before we enable interrupts.
250 */ 248 */
@@ -265,6 +263,8 @@ static void __cpuinit smp_callin(void)
265 */ 263 */
266 smp_store_cpu_info(cpuid); 264 smp_store_cpu_info(cpuid);
267 265
266 notify_cpu_starting(cpuid);
267
268 /* 268 /*
269 * Allow the master to continue. 269 * Allow the master to continue.
270 */ 270 */
diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
index 44879df55696..2cc249718c46 100644
--- a/arch/x86/kernel/vmlinux.lds.S
+++ b/arch/x86/kernel/vmlinux.lds.S
@@ -291,8 +291,8 @@ SECTIONS
291 .smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) { 291 .smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) {
292 __smp_locks = .; 292 __smp_locks = .;
293 *(.smp_locks) 293 *(.smp_locks)
294 __smp_locks_end = .;
295 . = ALIGN(PAGE_SIZE); 294 . = ALIGN(PAGE_SIZE);
295 __smp_locks_end = .;
296 } 296 }
297 297
298#ifdef CONFIG_X86_64 298#ifdef CONFIG_X86_64
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
index a4a7d7dc8aa4..b278535b14aa 100644
--- a/arch/x86/mm/init.c
+++ b/arch/x86/mm/init.c
@@ -332,11 +332,23 @@ int devmem_is_allowed(unsigned long pagenr)
332 332
333void free_init_pages(char *what, unsigned long begin, unsigned long end) 333void free_init_pages(char *what, unsigned long begin, unsigned long end)
334{ 334{
335 unsigned long addr = begin; 335 unsigned long addr;
336 unsigned long begin_aligned, end_aligned;
336 337
337 if (addr >= end) 338 /* Make sure boundaries are page aligned */
339 begin_aligned = PAGE_ALIGN(begin);
340 end_aligned = end & PAGE_MASK;
341
342 if (WARN_ON(begin_aligned != begin || end_aligned != end)) {
343 begin = begin_aligned;
344 end = end_aligned;
345 }
346
347 if (begin >= end)
338 return; 348 return;
339 349
350 addr = begin;
351
340 /* 352 /*
341 * If debugging page accesses then do not free this memory but 353 * If debugging page accesses then do not free this memory but
342 * mark them not present - any buggy init-section access will 354 * mark them not present - any buggy init-section access will
@@ -344,7 +356,7 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end)
344 */ 356 */
345#ifdef CONFIG_DEBUG_PAGEALLOC 357#ifdef CONFIG_DEBUG_PAGEALLOC
346 printk(KERN_INFO "debug: unmapping init memory %08lx..%08lx\n", 358 printk(KERN_INFO "debug: unmapping init memory %08lx..%08lx\n",
347 begin, PAGE_ALIGN(end)); 359 begin, end);
348 set_memory_np(begin, (end - begin) >> PAGE_SHIFT); 360 set_memory_np(begin, (end - begin) >> PAGE_SHIFT);
349#else 361#else
350 /* 362 /*
@@ -359,8 +371,7 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end)
359 for (; addr < end; addr += PAGE_SIZE) { 371 for (; addr < end; addr += PAGE_SIZE) {
360 ClearPageReserved(virt_to_page(addr)); 372 ClearPageReserved(virt_to_page(addr));
361 init_page_count(virt_to_page(addr)); 373 init_page_count(virt_to_page(addr));
362 memset((void *)(addr & ~(PAGE_SIZE-1)), 374 memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE);
363 POISON_FREE_INITMEM, PAGE_SIZE);
364 free_page(addr); 375 free_page(addr);
365 totalram_pages++; 376 totalram_pages++;
366 } 377 }
@@ -377,6 +388,15 @@ void free_initmem(void)
377#ifdef CONFIG_BLK_DEV_INITRD 388#ifdef CONFIG_BLK_DEV_INITRD
378void free_initrd_mem(unsigned long start, unsigned long end) 389void free_initrd_mem(unsigned long start, unsigned long end)
379{ 390{
380 free_init_pages("initrd memory", start, end); 391 /*
392 * end could be not aligned, and We can not align that,
393 * decompresser could be confused by aligned initrd_end
394 * We already reserve the end partial page before in
395 * - i386_start_kernel()
396 * - x86_64_start_kernel()
397 * - relocate_initrd()
398 * So here We can do PAGE_ALIGN() safely to get partial page to be freed
399 */
400 free_init_pages("initrd memory", start, PAGE_ALIGN(end));
381} 401}
382#endif 402#endif
diff --git a/drivers/char/tty_io.c b/drivers/char/tty_io.c
index a42c466f7092..6da962c9b21c 100644
--- a/drivers/char/tty_io.c
+++ b/drivers/char/tty_io.c
@@ -1423,6 +1423,8 @@ static void release_one_tty(struct work_struct *work)
1423 list_del_init(&tty->tty_files); 1423 list_del_init(&tty->tty_files);
1424 file_list_unlock(); 1424 file_list_unlock();
1425 1425
1426 put_pid(tty->pgrp);
1427 put_pid(tty->session);
1426 free_tty_struct(tty); 1428 free_tty_struct(tty);
1427} 1429}
1428 1430
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
index f2aaf39be398..51103aa469f8 100644
--- a/drivers/gpu/drm/drm_crtc_helper.c
+++ b/drivers/gpu/drm/drm_crtc_helper.c
@@ -104,6 +104,7 @@ int drm_helper_probe_single_connector_modes(struct drm_connector *connector,
104 if (connector->status == connector_status_disconnected) { 104 if (connector->status == connector_status_disconnected) {
105 DRM_DEBUG_KMS("%s is disconnected\n", 105 DRM_DEBUG_KMS("%s is disconnected\n",
106 drm_get_connector_name(connector)); 106 drm_get_connector_name(connector));
107 drm_mode_connector_update_edid_property(connector, NULL);
107 goto prune; 108 goto prune;
108 } 109 }
109 110
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index d196d7ec9900..2cc6e87d849d 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -708,15 +708,6 @@ static struct drm_display_mode *drm_mode_detailed(struct drm_device *dev,
708 mode->vsync_end = mode->vsync_start + vsync_pulse_width; 708 mode->vsync_end = mode->vsync_start + vsync_pulse_width;
709 mode->vtotal = mode->vdisplay + vblank; 709 mode->vtotal = mode->vdisplay + vblank;
710 710
711 /* perform the basic check for the detailed timing */
712 if (mode->hsync_end > mode->htotal ||
713 mode->vsync_end > mode->vtotal) {
714 drm_mode_destroy(dev, mode);
715 DRM_DEBUG_KMS("Incorrect detailed timing. "
716 "Sync is beyond the blank.\n");
717 return NULL;
718 }
719
720 /* Some EDIDs have bogus h/vtotal values */ 711 /* Some EDIDs have bogus h/vtotal values */
721 if (mode->hsync_end > mode->htotal) 712 if (mode->hsync_end > mode->htotal)
722 mode->htotal = mode->hsync_end + 1; 713 mode->htotal = mode->hsync_end + 1;
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index 85cdf052e458..288ea2f32772 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -284,6 +284,8 @@ static struct sysrq_key_op sysrq_drm_fb_helper_restore_op = {
284 .help_msg = "force-fb(V)", 284 .help_msg = "force-fb(V)",
285 .action_msg = "Restore framebuffer console", 285 .action_msg = "Restore framebuffer console",
286}; 286};
287#else
288static struct sysrq_key_op sysrq_drm_fb_helper_restore_op = { };
287#endif 289#endif
288 290
289static void drm_fb_helper_on(struct fb_info *info) 291static void drm_fb_helper_on(struct fb_info *info)
diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
index 0d55552e1302..9d532d7fdf59 100644
--- a/drivers/gpu/drm/drm_fops.c
+++ b/drivers/gpu/drm/drm_fops.c
@@ -141,14 +141,16 @@ int drm_open(struct inode *inode, struct file *filp)
141 spin_unlock(&dev->count_lock); 141 spin_unlock(&dev->count_lock);
142 } 142 }
143out: 143out:
144 mutex_lock(&dev->struct_mutex); 144 if (!retcode) {
145 if (minor->type == DRM_MINOR_LEGACY) { 145 mutex_lock(&dev->struct_mutex);
146 BUG_ON((dev->dev_mapping != NULL) && 146 if (minor->type == DRM_MINOR_LEGACY) {
147 (dev->dev_mapping != inode->i_mapping)); 147 if (dev->dev_mapping == NULL)
148 if (dev->dev_mapping == NULL) 148 dev->dev_mapping = inode->i_mapping;
149 dev->dev_mapping = inode->i_mapping; 149 else if (dev->dev_mapping != inode->i_mapping)
150 retcode = -ENODEV;
151 }
152 mutex_unlock(&dev->struct_mutex);
150 } 153 }
151 mutex_unlock(&dev->struct_mutex);
152 154
153 return retcode; 155 return retcode;
154} 156}
diff --git a/drivers/gpu/drm/nouveau/Makefile b/drivers/gpu/drm/nouveau/Makefile
index 32db806f3b5a..7f0d807a0d0d 100644
--- a/drivers/gpu/drm/nouveau/Makefile
+++ b/drivers/gpu/drm/nouveau/Makefile
@@ -12,7 +12,7 @@ nouveau-y := nouveau_drv.o nouveau_state.o nouveau_channel.o nouveau_mem.o \
12 nouveau_dp.o nouveau_grctx.o \ 12 nouveau_dp.o nouveau_grctx.o \
13 nv04_timer.o \ 13 nv04_timer.o \
14 nv04_mc.o nv40_mc.o nv50_mc.o \ 14 nv04_mc.o nv40_mc.o nv50_mc.o \
15 nv04_fb.o nv10_fb.o nv40_fb.o \ 15 nv04_fb.o nv10_fb.o nv40_fb.o nv50_fb.o \
16 nv04_fifo.o nv10_fifo.o nv40_fifo.o nv50_fifo.o \ 16 nv04_fifo.o nv10_fifo.o nv40_fifo.o nv50_fifo.o \
17 nv04_graph.o nv10_graph.o nv20_graph.o \ 17 nv04_graph.o nv10_graph.o nv20_graph.o \
18 nv40_graph.o nv50_graph.o \ 18 nv40_graph.o nv50_graph.o \
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
index 75bceee76044..b5a9336a2e88 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
@@ -5211,6 +5211,21 @@ divine_connector_type(struct nvbios *bios, int index)
5211} 5211}
5212 5212
5213static void 5213static void
5214apply_dcb_connector_quirks(struct nvbios *bios, int idx)
5215{
5216 struct dcb_connector_table_entry *cte = &bios->dcb.connector.entry[idx];
5217 struct drm_device *dev = bios->dev;
5218
5219 /* Gigabyte NX85T */
5220 if ((dev->pdev->device == 0x0421) &&
5221 (dev->pdev->subsystem_vendor == 0x1458) &&
5222 (dev->pdev->subsystem_device == 0x344c)) {
5223 if (cte->type == DCB_CONNECTOR_HDMI_1)
5224 cte->type = DCB_CONNECTOR_DVI_I;
5225 }
5226}
5227
5228static void
5214parse_dcb_connector_table(struct nvbios *bios) 5229parse_dcb_connector_table(struct nvbios *bios)
5215{ 5230{
5216 struct drm_device *dev = bios->dev; 5231 struct drm_device *dev = bios->dev;
@@ -5238,13 +5253,14 @@ parse_dcb_connector_table(struct nvbios *bios)
5238 entry = conntab + conntab[1]; 5253 entry = conntab + conntab[1];
5239 cte = &ct->entry[0]; 5254 cte = &ct->entry[0];
5240 for (i = 0; i < conntab[2]; i++, entry += conntab[3], cte++) { 5255 for (i = 0; i < conntab[2]; i++, entry += conntab[3], cte++) {
5256 cte->index = i;
5241 if (conntab[3] == 2) 5257 if (conntab[3] == 2)
5242 cte->entry = ROM16(entry[0]); 5258 cte->entry = ROM16(entry[0]);
5243 else 5259 else
5244 cte->entry = ROM32(entry[0]); 5260 cte->entry = ROM32(entry[0]);
5245 5261
5246 cte->type = (cte->entry & 0x000000ff) >> 0; 5262 cte->type = (cte->entry & 0x000000ff) >> 0;
5247 cte->index = (cte->entry & 0x00000f00) >> 8; 5263 cte->index2 = (cte->entry & 0x00000f00) >> 8;
5248 switch (cte->entry & 0x00033000) { 5264 switch (cte->entry & 0x00033000) {
5249 case 0x00001000: 5265 case 0x00001000:
5250 cte->gpio_tag = 0x07; 5266 cte->gpio_tag = 0x07;
@@ -5266,6 +5282,8 @@ parse_dcb_connector_table(struct nvbios *bios)
5266 if (cte->type == 0xff) 5282 if (cte->type == 0xff)
5267 continue; 5283 continue;
5268 5284
5285 apply_dcb_connector_quirks(bios, i);
5286
5269 NV_INFO(dev, " %d: 0x%08x: type 0x%02x idx %d tag 0x%02x\n", 5287 NV_INFO(dev, " %d: 0x%08x: type 0x%02x idx %d tag 0x%02x\n",
5270 i, cte->entry, cte->type, cte->index, cte->gpio_tag); 5288 i, cte->entry, cte->type, cte->index, cte->gpio_tag);
5271 5289
@@ -5287,10 +5305,16 @@ parse_dcb_connector_table(struct nvbios *bios)
5287 break; 5305 break;
5288 default: 5306 default:
5289 cte->type = divine_connector_type(bios, cte->index); 5307 cte->type = divine_connector_type(bios, cte->index);
5290 NV_WARN(dev, "unknown type, using 0x%02x", cte->type); 5308 NV_WARN(dev, "unknown type, using 0x%02x\n", cte->type);
5291 break; 5309 break;
5292 } 5310 }
5293 5311
5312 if (nouveau_override_conntype) {
5313 int type = divine_connector_type(bios, cte->index);
5314 if (type != cte->type)
5315 NV_WARN(dev, " -> type 0x%02x\n", cte->type);
5316 }
5317
5294 } 5318 }
5295} 5319}
5296 5320
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.h b/drivers/gpu/drm/nouveau/nouveau_bios.h
index 9f688aa9a655..4f88e6924d27 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.h
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.h
@@ -72,9 +72,10 @@ enum dcb_connector_type {
72}; 72};
73 73
74struct dcb_connector_table_entry { 74struct dcb_connector_table_entry {
75 uint8_t index;
75 uint32_t entry; 76 uint32_t entry;
76 enum dcb_connector_type type; 77 enum dcb_connector_type type;
77 uint8_t index; 78 uint8_t index2;
78 uint8_t gpio_tag; 79 uint8_t gpio_tag;
79}; 80};
80 81
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index 69c575dff0a2..9042dd7fb058 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -440,8 +440,7 @@ nouveau_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl)
440 440
441 switch (bo->mem.mem_type) { 441 switch (bo->mem.mem_type) {
442 case TTM_PL_VRAM: 442 case TTM_PL_VRAM:
443 nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_TT | 443 nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_TT);
444 TTM_PL_FLAG_SYSTEM);
445 break; 444 break;
446 default: 445 default:
447 nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_SYSTEM); 446 nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_SYSTEM);
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index 24327f468c4b..14afe1e47e57 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -302,7 +302,7 @@ nouveau_connector_detect(struct drm_connector *connector)
302 302
303detect_analog: 303detect_analog:
304 nv_encoder = find_encoder_by_type(connector, OUTPUT_ANALOG); 304 nv_encoder = find_encoder_by_type(connector, OUTPUT_ANALOG);
305 if (!nv_encoder) 305 if (!nv_encoder && !nouveau_tv_disable)
306 nv_encoder = find_encoder_by_type(connector, OUTPUT_TV); 306 nv_encoder = find_encoder_by_type(connector, OUTPUT_TV);
307 if (nv_encoder) { 307 if (nv_encoder) {
308 struct drm_encoder *encoder = to_drm_encoder(nv_encoder); 308 struct drm_encoder *encoder = to_drm_encoder(nv_encoder);
diff --git a/drivers/gpu/drm/nouveau/nouveau_dma.c b/drivers/gpu/drm/nouveau/nouveau_dma.c
index c8482a108a78..65c441a1999f 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dma.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dma.c
@@ -190,6 +190,11 @@ nv50_dma_push(struct nouveau_channel *chan, struct nouveau_bo *bo,
190 nouveau_bo_wr32(pb, ip++, upper_32_bits(offset) | length << 8); 190 nouveau_bo_wr32(pb, ip++, upper_32_bits(offset) | length << 8);
191 191
192 chan->dma.ib_put = (chan->dma.ib_put + 1) & chan->dma.ib_max; 192 chan->dma.ib_put = (chan->dma.ib_put + 1) & chan->dma.ib_max;
193
194 DRM_MEMORYBARRIER();
195 /* Flush writes. */
196 nouveau_bo_rd32(pb, 0);
197
193 nvchan_wr32(chan, 0x8c, chan->dma.ib_put); 198 nvchan_wr32(chan, 0x8c, chan->dma.ib_put);
194 chan->dma.ib_free--; 199 chan->dma.ib_free--;
195} 200}
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.c b/drivers/gpu/drm/nouveau/nouveau_drv.c
index 30cc09e8a709..1de974acbc65 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.c
@@ -83,6 +83,14 @@ MODULE_PARM_DESC(nofbaccel, "Disable fbcon acceleration");
83int nouveau_nofbaccel = 0; 83int nouveau_nofbaccel = 0;
84module_param_named(nofbaccel, nouveau_nofbaccel, int, 0400); 84module_param_named(nofbaccel, nouveau_nofbaccel, int, 0400);
85 85
86MODULE_PARM_DESC(override_conntype, "Ignore DCB connector type");
87int nouveau_override_conntype = 0;
88module_param_named(override_conntype, nouveau_override_conntype, int, 0400);
89
90MODULE_PARM_DESC(tv_disable, "Disable TV-out detection\n");
91int nouveau_tv_disable = 0;
92module_param_named(tv_disable, nouveau_tv_disable, int, 0400);
93
86MODULE_PARM_DESC(tv_norm, "Default TV norm.\n" 94MODULE_PARM_DESC(tv_norm, "Default TV norm.\n"
87 "\t\tSupported: PAL, PAL-M, PAL-N, PAL-Nc, NTSC-M, NTSC-J,\n" 95 "\t\tSupported: PAL, PAL-M, PAL-N, PAL-Nc, NTSC-M, NTSC-J,\n"
88 "\t\t\thd480i, hd480p, hd576i, hd576p, hd720p, hd1080i.\n" 96 "\t\t\thd480i, hd480p, hd576i, hd576p, hd720p, hd1080i.\n"
@@ -154,9 +162,11 @@ nouveau_pci_suspend(struct pci_dev *pdev, pm_message_t pm_state)
154 if (pm_state.event == PM_EVENT_PRETHAW) 162 if (pm_state.event == PM_EVENT_PRETHAW)
155 return 0; 163 return 0;
156 164
165 NV_INFO(dev, "Disabling fbcon acceleration...\n");
157 fbdev_flags = dev_priv->fbdev_info->flags; 166 fbdev_flags = dev_priv->fbdev_info->flags;
158 dev_priv->fbdev_info->flags |= FBINFO_HWACCEL_DISABLED; 167 dev_priv->fbdev_info->flags |= FBINFO_HWACCEL_DISABLED;
159 168
169 NV_INFO(dev, "Unpinning framebuffer(s)...\n");
160 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 170 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
161 struct nouveau_framebuffer *nouveau_fb; 171 struct nouveau_framebuffer *nouveau_fb;
162 172
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
index 4b9aaf2a8d0f..d8b559011777 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
@@ -681,6 +681,7 @@ extern int nouveau_uscript_tmds;
681extern int nouveau_vram_pushbuf; 681extern int nouveau_vram_pushbuf;
682extern int nouveau_vram_notify; 682extern int nouveau_vram_notify;
683extern int nouveau_fbpercrtc; 683extern int nouveau_fbpercrtc;
684extern int nouveau_tv_disable;
684extern char *nouveau_tv_norm; 685extern char *nouveau_tv_norm;
685extern int nouveau_reg_debug; 686extern int nouveau_reg_debug;
686extern char *nouveau_vbios; 687extern char *nouveau_vbios;
@@ -688,6 +689,7 @@ extern int nouveau_ctxfw;
688extern int nouveau_ignorelid; 689extern int nouveau_ignorelid;
689extern int nouveau_nofbaccel; 690extern int nouveau_nofbaccel;
690extern int nouveau_noaccel; 691extern int nouveau_noaccel;
692extern int nouveau_override_conntype;
691 693
692extern int nouveau_pci_suspend(struct pci_dev *pdev, pm_message_t pm_state); 694extern int nouveau_pci_suspend(struct pci_dev *pdev, pm_message_t pm_state);
693extern int nouveau_pci_resume(struct pci_dev *pdev); 695extern int nouveau_pci_resume(struct pci_dev *pdev);
@@ -926,6 +928,10 @@ extern void nv40_fb_takedown(struct drm_device *);
926extern void nv40_fb_set_region_tiling(struct drm_device *, int, uint32_t, 928extern void nv40_fb_set_region_tiling(struct drm_device *, int, uint32_t,
927 uint32_t, uint32_t); 929 uint32_t, uint32_t);
928 930
931/* nv50_fb.c */
932extern int nv50_fb_init(struct drm_device *);
933extern void nv50_fb_takedown(struct drm_device *);
934
929/* nv04_fifo.c */ 935/* nv04_fifo.c */
930extern int nv04_fifo_init(struct drm_device *); 936extern int nv04_fifo_init(struct drm_device *);
931extern void nv04_fifo_disable(struct drm_device *); 937extern void nv04_fifo_disable(struct drm_device *);
diff --git a/drivers/gpu/drm/nouveau/nouveau_irq.c b/drivers/gpu/drm/nouveau/nouveau_irq.c
index 95220ddebb45..2bd59a92fee5 100644
--- a/drivers/gpu/drm/nouveau/nouveau_irq.c
+++ b/drivers/gpu/drm/nouveau/nouveau_irq.c
@@ -311,6 +311,31 @@ nouveau_print_bitfield_names_(uint32_t value,
311#define nouveau_print_bitfield_names(val, namelist) \ 311#define nouveau_print_bitfield_names(val, namelist) \
312 nouveau_print_bitfield_names_((val), (namelist), ARRAY_SIZE(namelist)) 312 nouveau_print_bitfield_names_((val), (namelist), ARRAY_SIZE(namelist))
313 313
314struct nouveau_enum_names {
315 uint32_t value;
316 const char *name;
317};
318
319static void
320nouveau_print_enum_names_(uint32_t value,
321 const struct nouveau_enum_names *namelist,
322 const int namelist_len)
323{
324 /*
325 * Caller must have already printed the KERN_* log level for us.
326 * Also the caller is responsible for adding the newline.
327 */
328 int i;
329 for (i = 0; i < namelist_len; ++i) {
330 if (value == namelist[i].value) {
331 printk("%s", namelist[i].name);
332 return;
333 }
334 }
335 printk("unknown value 0x%08x", value);
336}
337#define nouveau_print_enum_names(val, namelist) \
338 nouveau_print_enum_names_((val), (namelist), ARRAY_SIZE(namelist))
314 339
315static int 340static int
316nouveau_graph_chid_from_grctx(struct drm_device *dev) 341nouveau_graph_chid_from_grctx(struct drm_device *dev)
@@ -427,14 +452,16 @@ nouveau_graph_dump_trap_info(struct drm_device *dev, const char *id,
427 struct drm_nouveau_private *dev_priv = dev->dev_private; 452 struct drm_nouveau_private *dev_priv = dev->dev_private;
428 uint32_t nsource = trap->nsource, nstatus = trap->nstatus; 453 uint32_t nsource = trap->nsource, nstatus = trap->nstatus;
429 454
430 NV_INFO(dev, "%s - nSource:", id); 455 if (dev_priv->card_type < NV_50) {
431 nouveau_print_bitfield_names(nsource, nsource_names); 456 NV_INFO(dev, "%s - nSource:", id);
432 printk(", nStatus:"); 457 nouveau_print_bitfield_names(nsource, nsource_names);
433 if (dev_priv->card_type < NV_10) 458 printk(", nStatus:");
434 nouveau_print_bitfield_names(nstatus, nstatus_names); 459 if (dev_priv->card_type < NV_10)
435 else 460 nouveau_print_bitfield_names(nstatus, nstatus_names);
436 nouveau_print_bitfield_names(nstatus, nstatus_names_nv10); 461 else
437 printk("\n"); 462 nouveau_print_bitfield_names(nstatus, nstatus_names_nv10);
463 printk("\n");
464 }
438 465
439 NV_INFO(dev, "%s - Ch %d/%d Class 0x%04x Mthd 0x%04x " 466 NV_INFO(dev, "%s - Ch %d/%d Class 0x%04x Mthd 0x%04x "
440 "Data 0x%08x:0x%08x\n", 467 "Data 0x%08x:0x%08x\n",
@@ -578,27 +605,502 @@ nouveau_pgraph_irq_handler(struct drm_device *dev)
578} 605}
579 606
580static void 607static void
608nv50_pfb_vm_trap(struct drm_device *dev, int display, const char *name)
609{
610 struct drm_nouveau_private *dev_priv = dev->dev_private;
611 uint32_t trap[6];
612 int i, ch;
613 uint32_t idx = nv_rd32(dev, 0x100c90);
614 if (idx & 0x80000000) {
615 idx &= 0xffffff;
616 if (display) {
617 for (i = 0; i < 6; i++) {
618 nv_wr32(dev, 0x100c90, idx | i << 24);
619 trap[i] = nv_rd32(dev, 0x100c94);
620 }
621 for (ch = 0; ch < dev_priv->engine.fifo.channels; ch++) {
622 struct nouveau_channel *chan = dev_priv->fifos[ch];
623
624 if (!chan || !chan->ramin)
625 continue;
626
627 if (trap[1] == chan->ramin->instance >> 12)
628 break;
629 }
630 NV_INFO(dev, "%s - VM: Trapped %s at %02x%04x%04x status %08x %08x channel %d\n",
631 name, (trap[5]&0x100?"read":"write"),
632 trap[5]&0xff, trap[4]&0xffff,
633 trap[3]&0xffff, trap[0], trap[2], ch);
634 }
635 nv_wr32(dev, 0x100c90, idx | 0x80000000);
636 } else if (display) {
637 NV_INFO(dev, "%s - no VM fault?\n", name);
638 }
639}
640
641static struct nouveau_enum_names nv50_mp_exec_error_names[] =
642{
643 { 3, "STACK_UNDERFLOW" },
644 { 4, "QUADON_ACTIVE" },
645 { 8, "TIMEOUT" },
646 { 0x10, "INVALID_OPCODE" },
647 { 0x40, "BREAKPOINT" },
648};
649
650static void
651nv50_pgraph_mp_trap(struct drm_device *dev, int tpid, int display)
652{
653 struct drm_nouveau_private *dev_priv = dev->dev_private;
654 uint32_t units = nv_rd32(dev, 0x1540);
655 uint32_t addr, mp10, status, pc, oplow, ophigh;
656 int i;
657 int mps = 0;
658 for (i = 0; i < 4; i++) {
659 if (!(units & 1 << (i+24)))
660 continue;
661 if (dev_priv->chipset < 0xa0)
662 addr = 0x408200 + (tpid << 12) + (i << 7);
663 else
664 addr = 0x408100 + (tpid << 11) + (i << 7);
665 mp10 = nv_rd32(dev, addr + 0x10);
666 status = nv_rd32(dev, addr + 0x14);
667 if (!status)
668 continue;
669 if (display) {
670 nv_rd32(dev, addr + 0x20);
671 pc = nv_rd32(dev, addr + 0x24);
672 oplow = nv_rd32(dev, addr + 0x70);
673 ophigh= nv_rd32(dev, addr + 0x74);
674 NV_INFO(dev, "PGRAPH_TRAP_MP_EXEC - "
675 "TP %d MP %d: ", tpid, i);
676 nouveau_print_enum_names(status,
677 nv50_mp_exec_error_names);
678 printk(" at %06x warp %d, opcode %08x %08x\n",
679 pc&0xffffff, pc >> 24,
680 oplow, ophigh);
681 }
682 nv_wr32(dev, addr + 0x10, mp10);
683 nv_wr32(dev, addr + 0x14, 0);
684 mps++;
685 }
686 if (!mps && display)
687 NV_INFO(dev, "PGRAPH_TRAP_MP_EXEC - TP %d: "
688 "No MPs claiming errors?\n", tpid);
689}
690
691static void
692nv50_pgraph_tp_trap(struct drm_device *dev, int type, uint32_t ustatus_old,
693 uint32_t ustatus_new, int display, const char *name)
694{
695 struct drm_nouveau_private *dev_priv = dev->dev_private;
696 int tps = 0;
697 uint32_t units = nv_rd32(dev, 0x1540);
698 int i, r;
699 uint32_t ustatus_addr, ustatus;
700 for (i = 0; i < 16; i++) {
701 if (!(units & (1 << i)))
702 continue;
703 if (dev_priv->chipset < 0xa0)
704 ustatus_addr = ustatus_old + (i << 12);
705 else
706 ustatus_addr = ustatus_new + (i << 11);
707 ustatus = nv_rd32(dev, ustatus_addr) & 0x7fffffff;
708 if (!ustatus)
709 continue;
710 tps++;
711 switch (type) {
712 case 6: /* texture error... unknown for now */
713 nv50_pfb_vm_trap(dev, display, name);
714 if (display) {
715 NV_ERROR(dev, "magic set %d:\n", i);
716 for (r = ustatus_addr + 4; r <= ustatus_addr + 0x10; r += 4)
717 NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r,
718 nv_rd32(dev, r));
719 }
720 break;
721 case 7: /* MP error */
722 if (ustatus & 0x00010000) {
723 nv50_pgraph_mp_trap(dev, i, display);
724 ustatus &= ~0x00010000;
725 }
726 break;
727 case 8: /* TPDMA error */
728 {
729 uint32_t e0c = nv_rd32(dev, ustatus_addr + 4);
730 uint32_t e10 = nv_rd32(dev, ustatus_addr + 8);
731 uint32_t e14 = nv_rd32(dev, ustatus_addr + 0xc);
732 uint32_t e18 = nv_rd32(dev, ustatus_addr + 0x10);
733 uint32_t e1c = nv_rd32(dev, ustatus_addr + 0x14);
734 uint32_t e20 = nv_rd32(dev, ustatus_addr + 0x18);
735 uint32_t e24 = nv_rd32(dev, ustatus_addr + 0x1c);
736 nv50_pfb_vm_trap(dev, display, name);
737 /* 2d engine destination */
738 if (ustatus & 0x00000010) {
739 if (display) {
740 NV_INFO(dev, "PGRAPH_TRAP_TPDMA_2D - TP %d - Unknown fault at address %02x%08x\n",
741 i, e14, e10);
742 NV_INFO(dev, "PGRAPH_TRAP_TPDMA_2D - TP %d - e0c: %08x, e18: %08x, e1c: %08x, e20: %08x, e24: %08x\n",
743 i, e0c, e18, e1c, e20, e24);
744 }
745 ustatus &= ~0x00000010;
746 }
747 /* Render target */
748 if (ustatus & 0x00000040) {
749 if (display) {
750 NV_INFO(dev, "PGRAPH_TRAP_TPDMA_RT - TP %d - Unknown fault at address %02x%08x\n",
751 i, e14, e10);
752 NV_INFO(dev, "PGRAPH_TRAP_TPDMA_RT - TP %d - e0c: %08x, e18: %08x, e1c: %08x, e20: %08x, e24: %08x\n",
753 i, e0c, e18, e1c, e20, e24);
754 }
755 ustatus &= ~0x00000040;
756 }
757 /* CUDA memory: l[], g[] or stack. */
758 if (ustatus & 0x00000080) {
759 if (display) {
760 if (e18 & 0x80000000) {
761 /* g[] read fault? */
762 NV_INFO(dev, "PGRAPH_TRAP_TPDMA - TP %d - Global read fault at address %02x%08x\n",
763 i, e14, e10 | ((e18 >> 24) & 0x1f));
764 e18 &= ~0x1f000000;
765 } else if (e18 & 0xc) {
766 /* g[] write fault? */
767 NV_INFO(dev, "PGRAPH_TRAP_TPDMA - TP %d - Global write fault at address %02x%08x\n",
768 i, e14, e10 | ((e18 >> 7) & 0x1f));
769 e18 &= ~0x00000f80;
770 } else {
771 NV_INFO(dev, "PGRAPH_TRAP_TPDMA - TP %d - Unknown CUDA fault at address %02x%08x\n",
772 i, e14, e10);
773 }
774 NV_INFO(dev, "PGRAPH_TRAP_TPDMA - TP %d - e0c: %08x, e18: %08x, e1c: %08x, e20: %08x, e24: %08x\n",
775 i, e0c, e18, e1c, e20, e24);
776 }
777 ustatus &= ~0x00000080;
778 }
779 }
780 break;
781 }
782 if (ustatus) {
783 if (display)
784 NV_INFO(dev, "%s - TP%d: Unhandled ustatus 0x%08x\n", name, i, ustatus);
785 }
786 nv_wr32(dev, ustatus_addr, 0xc0000000);
787 }
788
789 if (!tps && display)
790 NV_INFO(dev, "%s - No TPs claiming errors?\n", name);
791}
792
793static void
794nv50_pgraph_trap_handler(struct drm_device *dev)
795{
796 struct nouveau_pgraph_trap trap;
797 uint32_t status = nv_rd32(dev, 0x400108);
798 uint32_t ustatus;
799 int display = nouveau_ratelimit();
800
801
802 if (!status && display) {
803 nouveau_graph_trap_info(dev, &trap);
804 nouveau_graph_dump_trap_info(dev, "PGRAPH_TRAP", &trap);
805 NV_INFO(dev, "PGRAPH_TRAP - no units reporting traps?\n");
806 }
807
808 /* DISPATCH: Relays commands to other units and handles NOTIFY,
809 * COND, QUERY. If you get a trap from it, the command is still stuck
810 * in DISPATCH and you need to do something about it. */
811 if (status & 0x001) {
812 ustatus = nv_rd32(dev, 0x400804) & 0x7fffffff;
813 if (!ustatus && display) {
814 NV_INFO(dev, "PGRAPH_TRAP_DISPATCH - no ustatus?\n");
815 }
816
817 /* Known to be triggered by screwed up NOTIFY and COND... */
818 if (ustatus & 0x00000001) {
819 nv50_pfb_vm_trap(dev, display, "PGRAPH_TRAP_DISPATCH_FAULT");
820 nv_wr32(dev, 0x400500, 0);
821 if (nv_rd32(dev, 0x400808) & 0x80000000) {
822 if (display) {
823 if (nouveau_graph_trapped_channel(dev, &trap.channel))
824 trap.channel = -1;
825 trap.class = nv_rd32(dev, 0x400814);
826 trap.mthd = nv_rd32(dev, 0x400808) & 0x1ffc;
827 trap.subc = (nv_rd32(dev, 0x400808) >> 16) & 0x7;
828 trap.data = nv_rd32(dev, 0x40080c);
829 trap.data2 = nv_rd32(dev, 0x400810);
830 nouveau_graph_dump_trap_info(dev,
831 "PGRAPH_TRAP_DISPATCH_FAULT", &trap);
832 NV_INFO(dev, "PGRAPH_TRAP_DISPATCH_FAULT - 400808: %08x\n", nv_rd32(dev, 0x400808));
833 NV_INFO(dev, "PGRAPH_TRAP_DISPATCH_FAULT - 400848: %08x\n", nv_rd32(dev, 0x400848));
834 }
835 nv_wr32(dev, 0x400808, 0);
836 } else if (display) {
837 NV_INFO(dev, "PGRAPH_TRAP_DISPATCH_FAULT - No stuck command?\n");
838 }
839 nv_wr32(dev, 0x4008e8, nv_rd32(dev, 0x4008e8) & 3);
840 nv_wr32(dev, 0x400848, 0);
841 ustatus &= ~0x00000001;
842 }
843 if (ustatus & 0x00000002) {
844 nv50_pfb_vm_trap(dev, display, "PGRAPH_TRAP_DISPATCH_QUERY");
845 nv_wr32(dev, 0x400500, 0);
846 if (nv_rd32(dev, 0x40084c) & 0x80000000) {
847 if (display) {
848 if (nouveau_graph_trapped_channel(dev, &trap.channel))
849 trap.channel = -1;
850 trap.class = nv_rd32(dev, 0x400814);
851 trap.mthd = nv_rd32(dev, 0x40084c) & 0x1ffc;
852 trap.subc = (nv_rd32(dev, 0x40084c) >> 16) & 0x7;
853 trap.data = nv_rd32(dev, 0x40085c);
854 trap.data2 = 0;
855 nouveau_graph_dump_trap_info(dev,
856 "PGRAPH_TRAP_DISPATCH_QUERY", &trap);
857 NV_INFO(dev, "PGRAPH_TRAP_DISPATCH_QUERY - 40084c: %08x\n", nv_rd32(dev, 0x40084c));
858 }
859 nv_wr32(dev, 0x40084c, 0);
860 } else if (display) {
861 NV_INFO(dev, "PGRAPH_TRAP_DISPATCH_QUERY - No stuck command?\n");
862 }
863 ustatus &= ~0x00000002;
864 }
865 if (ustatus && display)
866 NV_INFO(dev, "PGRAPH_TRAP_DISPATCH - Unhandled ustatus 0x%08x\n", ustatus);
867 nv_wr32(dev, 0x400804, 0xc0000000);
868 nv_wr32(dev, 0x400108, 0x001);
869 status &= ~0x001;
870 }
871
872 /* TRAPs other than dispatch use the "normal" trap regs. */
873 if (status && display) {
874 nouveau_graph_trap_info(dev, &trap);
875 nouveau_graph_dump_trap_info(dev,
876 "PGRAPH_TRAP", &trap);
877 }
878
879 /* M2MF: Memory to memory copy engine. */
880 if (status & 0x002) {
881 ustatus = nv_rd32(dev, 0x406800) & 0x7fffffff;
882 if (!ustatus && display) {
883 NV_INFO(dev, "PGRAPH_TRAP_M2MF - no ustatus?\n");
884 }
885 if (ustatus & 0x00000001) {
886 nv50_pfb_vm_trap(dev, display, "PGRAPH_TRAP_M2MF_NOTIFY");
887 ustatus &= ~0x00000001;
888 }
889 if (ustatus & 0x00000002) {
890 nv50_pfb_vm_trap(dev, display, "PGRAPH_TRAP_M2MF_IN");
891 ustatus &= ~0x00000002;
892 }
893 if (ustatus & 0x00000004) {
894 nv50_pfb_vm_trap(dev, display, "PGRAPH_TRAP_M2MF_OUT");
895 ustatus &= ~0x00000004;
896 }
897 NV_INFO (dev, "PGRAPH_TRAP_M2MF - %08x %08x %08x %08x\n",
898 nv_rd32(dev, 0x406804),
899 nv_rd32(dev, 0x406808),
900 nv_rd32(dev, 0x40680c),
901 nv_rd32(dev, 0x406810));
902 if (ustatus && display)
903 NV_INFO(dev, "PGRAPH_TRAP_M2MF - Unhandled ustatus 0x%08x\n", ustatus);
904 /* No sane way found yet -- just reset the bugger. */
905 nv_wr32(dev, 0x400040, 2);
906 nv_wr32(dev, 0x400040, 0);
907 nv_wr32(dev, 0x406800, 0xc0000000);
908 nv_wr32(dev, 0x400108, 0x002);
909 status &= ~0x002;
910 }
911
912 /* VFETCH: Fetches data from vertex buffers. */
913 if (status & 0x004) {
914 ustatus = nv_rd32(dev, 0x400c04) & 0x7fffffff;
915 if (!ustatus && display) {
916 NV_INFO(dev, "PGRAPH_TRAP_VFETCH - no ustatus?\n");
917 }
918 if (ustatus & 0x00000001) {
919 nv50_pfb_vm_trap(dev, display, "PGRAPH_TRAP_VFETCH_FAULT");
920 NV_INFO (dev, "PGRAPH_TRAP_VFETCH_FAULT - %08x %08x %08x %08x\n",
921 nv_rd32(dev, 0x400c00),
922 nv_rd32(dev, 0x400c08),
923 nv_rd32(dev, 0x400c0c),
924 nv_rd32(dev, 0x400c10));
925 ustatus &= ~0x00000001;
926 }
927 if (ustatus && display)
928 NV_INFO(dev, "PGRAPH_TRAP_VFETCH - Unhandled ustatus 0x%08x\n", ustatus);
929 nv_wr32(dev, 0x400c04, 0xc0000000);
930 nv_wr32(dev, 0x400108, 0x004);
931 status &= ~0x004;
932 }
933
934 /* STRMOUT: DirectX streamout / OpenGL transform feedback. */
935 if (status & 0x008) {
936 ustatus = nv_rd32(dev, 0x401800) & 0x7fffffff;
937 if (!ustatus && display) {
938 NV_INFO(dev, "PGRAPH_TRAP_STRMOUT - no ustatus?\n");
939 }
940 if (ustatus & 0x00000001) {
941 nv50_pfb_vm_trap(dev, display, "PGRAPH_TRAP_STRMOUT_FAULT");
942 NV_INFO (dev, "PGRAPH_TRAP_STRMOUT_FAULT - %08x %08x %08x %08x\n",
943 nv_rd32(dev, 0x401804),
944 nv_rd32(dev, 0x401808),
945 nv_rd32(dev, 0x40180c),
946 nv_rd32(dev, 0x401810));
947 ustatus &= ~0x00000001;
948 }
949 if (ustatus && display)
950 NV_INFO(dev, "PGRAPH_TRAP_STRMOUT - Unhandled ustatus 0x%08x\n", ustatus);
951 /* No sane way found yet -- just reset the bugger. */
952 nv_wr32(dev, 0x400040, 0x80);
953 nv_wr32(dev, 0x400040, 0);
954 nv_wr32(dev, 0x401800, 0xc0000000);
955 nv_wr32(dev, 0x400108, 0x008);
956 status &= ~0x008;
957 }
958
959 /* CCACHE: Handles code and c[] caches and fills them. */
960 if (status & 0x010) {
961 ustatus = nv_rd32(dev, 0x405018) & 0x7fffffff;
962 if (!ustatus && display) {
963 NV_INFO(dev, "PGRAPH_TRAP_CCACHE - no ustatus?\n");
964 }
965 if (ustatus & 0x00000001) {
966 nv50_pfb_vm_trap(dev, display, "PGRAPH_TRAP_CCACHE_FAULT");
967 NV_INFO (dev, "PGRAPH_TRAP_CCACHE_FAULT - %08x %08x %08x %08x %08x %08x %08x\n",
968 nv_rd32(dev, 0x405800),
969 nv_rd32(dev, 0x405804),
970 nv_rd32(dev, 0x405808),
971 nv_rd32(dev, 0x40580c),
972 nv_rd32(dev, 0x405810),
973 nv_rd32(dev, 0x405814),
974 nv_rd32(dev, 0x40581c));
975 ustatus &= ~0x00000001;
976 }
977 if (ustatus && display)
978 NV_INFO(dev, "PGRAPH_TRAP_CCACHE - Unhandled ustatus 0x%08x\n", ustatus);
979 nv_wr32(dev, 0x405018, 0xc0000000);
980 nv_wr32(dev, 0x400108, 0x010);
981 status &= ~0x010;
982 }
983
984 /* Unknown, not seen yet... 0x402000 is the only trap status reg
985 * remaining, so try to handle it anyway. Perhaps related to that
986 * unknown DMA slot on tesla? */
987 if (status & 0x20) {
988 nv50_pfb_vm_trap(dev, display, "PGRAPH_TRAP_UNKC04");
989 ustatus = nv_rd32(dev, 0x402000) & 0x7fffffff;
990 if (display)
991 NV_INFO(dev, "PGRAPH_TRAP_UNKC04 - Unhandled ustatus 0x%08x\n", ustatus);
992 nv_wr32(dev, 0x402000, 0xc0000000);
993 /* no status modifiction on purpose */
994 }
995
996 /* TEXTURE: CUDA texturing units */
997 if (status & 0x040) {
998 nv50_pgraph_tp_trap (dev, 6, 0x408900, 0x408600, display,
999 "PGRAPH_TRAP_TEXTURE");
1000 nv_wr32(dev, 0x400108, 0x040);
1001 status &= ~0x040;
1002 }
1003
1004 /* MP: CUDA execution engines. */
1005 if (status & 0x080) {
1006 nv50_pgraph_tp_trap (dev, 7, 0x408314, 0x40831c, display,
1007 "PGRAPH_TRAP_MP");
1008 nv_wr32(dev, 0x400108, 0x080);
1009 status &= ~0x080;
1010 }
1011
1012 /* TPDMA: Handles TP-initiated uncached memory accesses:
1013 * l[], g[], stack, 2d surfaces, render targets. */
1014 if (status & 0x100) {
1015 nv50_pgraph_tp_trap (dev, 8, 0x408e08, 0x408708, display,
1016 "PGRAPH_TRAP_TPDMA");
1017 nv_wr32(dev, 0x400108, 0x100);
1018 status &= ~0x100;
1019 }
1020
1021 if (status) {
1022 if (display)
1023 NV_INFO(dev, "PGRAPH_TRAP - Unknown trap 0x%08x\n",
1024 status);
1025 nv_wr32(dev, 0x400108, status);
1026 }
1027}
1028
1029/* There must be a *lot* of these. Will take some time to gather them up. */
1030static struct nouveau_enum_names nv50_data_error_names[] =
1031{
1032 { 4, "INVALID_VALUE" },
1033 { 5, "INVALID_ENUM" },
1034 { 8, "INVALID_OBJECT" },
1035 { 0xc, "INVALID_BITFIELD" },
1036 { 0x28, "MP_NO_REG_SPACE" },
1037 { 0x2b, "MP_BLOCK_SIZE_MISMATCH" },
1038};
1039
1040static void
581nv50_pgraph_irq_handler(struct drm_device *dev) 1041nv50_pgraph_irq_handler(struct drm_device *dev)
582{ 1042{
1043 struct nouveau_pgraph_trap trap;
1044 int unhandled = 0;
583 uint32_t status; 1045 uint32_t status;
584 1046
585 while ((status = nv_rd32(dev, NV03_PGRAPH_INTR))) { 1047 while ((status = nv_rd32(dev, NV03_PGRAPH_INTR))) {
586 uint32_t nsource = nv_rd32(dev, NV03_PGRAPH_NSOURCE); 1048 /* NOTIFY: You've set a NOTIFY an a command and it's done. */
587
588 if (status & 0x00000001) { 1049 if (status & 0x00000001) {
589 nouveau_pgraph_intr_notify(dev, nsource); 1050 nouveau_graph_trap_info(dev, &trap);
1051 if (nouveau_ratelimit())
1052 nouveau_graph_dump_trap_info(dev,
1053 "PGRAPH_NOTIFY", &trap);
590 status &= ~0x00000001; 1054 status &= ~0x00000001;
591 nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000001); 1055 nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000001);
592 } 1056 }
593 1057
594 if (status & 0x00000010) { 1058 /* COMPUTE_QUERY: Purpose and exact cause unknown, happens
595 nouveau_pgraph_intr_error(dev, nsource | 1059 * when you write 0x200 to 0x50c0 method 0x31c. */
596 NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD); 1060 if (status & 0x00000002) {
1061 nouveau_graph_trap_info(dev, &trap);
1062 if (nouveau_ratelimit())
1063 nouveau_graph_dump_trap_info(dev,
1064 "PGRAPH_COMPUTE_QUERY", &trap);
1065 status &= ~0x00000002;
1066 nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000002);
1067 }
597 1068
1069 /* Unknown, never seen: 0x4 */
1070
1071 /* ILLEGAL_MTHD: You used a wrong method for this class. */
1072 if (status & 0x00000010) {
1073 nouveau_graph_trap_info(dev, &trap);
1074 if (nouveau_pgraph_intr_swmthd(dev, &trap))
1075 unhandled = 1;
1076 if (unhandled && nouveau_ratelimit())
1077 nouveau_graph_dump_trap_info(dev,
1078 "PGRAPH_ILLEGAL_MTHD", &trap);
598 status &= ~0x00000010; 1079 status &= ~0x00000010;
599 nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000010); 1080 nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000010);
600 } 1081 }
601 1082
1083 /* ILLEGAL_CLASS: You used a wrong class. */
1084 if (status & 0x00000020) {
1085 nouveau_graph_trap_info(dev, &trap);
1086 if (nouveau_ratelimit())
1087 nouveau_graph_dump_trap_info(dev,
1088 "PGRAPH_ILLEGAL_CLASS", &trap);
1089 status &= ~0x00000020;
1090 nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000020);
1091 }
1092
1093 /* DOUBLE_NOTIFY: You tried to set a NOTIFY on another NOTIFY. */
1094 if (status & 0x00000040) {
1095 nouveau_graph_trap_info(dev, &trap);
1096 if (nouveau_ratelimit())
1097 nouveau_graph_dump_trap_info(dev,
1098 "PGRAPH_DOUBLE_NOTIFY", &trap);
1099 status &= ~0x00000040;
1100 nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000040);
1101 }
1102
1103 /* CONTEXT_SWITCH: PGRAPH needs us to load a new context */
602 if (status & 0x00001000) { 1104 if (status & 0x00001000) {
603 nv_wr32(dev, 0x400500, 0x00000000); 1105 nv_wr32(dev, 0x400500, 0x00000000);
604 nv_wr32(dev, NV03_PGRAPH_INTR, 1106 nv_wr32(dev, NV03_PGRAPH_INTR,
@@ -613,49 +1115,59 @@ nv50_pgraph_irq_handler(struct drm_device *dev)
613 status &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH; 1115 status &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH;
614 } 1116 }
615 1117
616 if (status & 0x00100000) { 1118 /* BUFFER_NOTIFY: Your m2mf transfer finished */
617 nouveau_pgraph_intr_error(dev, nsource | 1119 if (status & 0x00010000) {
618 NV03_PGRAPH_NSOURCE_DATA_ERROR); 1120 nouveau_graph_trap_info(dev, &trap);
1121 if (nouveau_ratelimit())
1122 nouveau_graph_dump_trap_info(dev,
1123 "PGRAPH_BUFFER_NOTIFY", &trap);
1124 status &= ~0x00010000;
1125 nv_wr32(dev, NV03_PGRAPH_INTR, 0x00010000);
1126 }
619 1127
1128 /* DATA_ERROR: Invalid value for this method, or invalid
1129 * state in current PGRAPH context for this operation */
1130 if (status & 0x00100000) {
1131 nouveau_graph_trap_info(dev, &trap);
1132 if (nouveau_ratelimit()) {
1133 nouveau_graph_dump_trap_info(dev,
1134 "PGRAPH_DATA_ERROR", &trap);
1135 NV_INFO (dev, "PGRAPH_DATA_ERROR - ");
1136 nouveau_print_enum_names(nv_rd32(dev, 0x400110),
1137 nv50_data_error_names);
1138 printk("\n");
1139 }
620 status &= ~0x00100000; 1140 status &= ~0x00100000;
621 nv_wr32(dev, NV03_PGRAPH_INTR, 0x00100000); 1141 nv_wr32(dev, NV03_PGRAPH_INTR, 0x00100000);
622 } 1142 }
623 1143
1144 /* TRAP: Something bad happened in the middle of command
1145 * execution. Has a billion types, subtypes, and even
1146 * subsubtypes. */
624 if (status & 0x00200000) { 1147 if (status & 0x00200000) {
625 int r; 1148 nv50_pgraph_trap_handler(dev);
626
627 nouveau_pgraph_intr_error(dev, nsource |
628 NV03_PGRAPH_NSOURCE_PROTECTION_ERROR);
629
630 NV_ERROR(dev, "magic set 1:\n");
631 for (r = 0x408900; r <= 0x408910; r += 4)
632 NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r,
633 nv_rd32(dev, r));
634 nv_wr32(dev, 0x408900,
635 nv_rd32(dev, 0x408904) | 0xc0000000);
636 for (r = 0x408e08; r <= 0x408e24; r += 4)
637 NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r,
638 nv_rd32(dev, r));
639 nv_wr32(dev, 0x408e08,
640 nv_rd32(dev, 0x408e08) | 0xc0000000);
641
642 NV_ERROR(dev, "magic set 2:\n");
643 for (r = 0x409900; r <= 0x409910; r += 4)
644 NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r,
645 nv_rd32(dev, r));
646 nv_wr32(dev, 0x409900,
647 nv_rd32(dev, 0x409904) | 0xc0000000);
648 for (r = 0x409e08; r <= 0x409e24; r += 4)
649 NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r,
650 nv_rd32(dev, r));
651 nv_wr32(dev, 0x409e08,
652 nv_rd32(dev, 0x409e08) | 0xc0000000);
653
654 status &= ~0x00200000; 1149 status &= ~0x00200000;
655 nv_wr32(dev, NV03_PGRAPH_NSOURCE, nsource);
656 nv_wr32(dev, NV03_PGRAPH_INTR, 0x00200000); 1150 nv_wr32(dev, NV03_PGRAPH_INTR, 0x00200000);
657 } 1151 }
658 1152
1153 /* Unknown, never seen: 0x00400000 */
1154
1155 /* SINGLE_STEP: Happens on every method if you turned on
1156 * single stepping in 40008c */
1157 if (status & 0x01000000) {
1158 nouveau_graph_trap_info(dev, &trap);
1159 if (nouveau_ratelimit())
1160 nouveau_graph_dump_trap_info(dev,
1161 "PGRAPH_SINGLE_STEP", &trap);
1162 status &= ~0x01000000;
1163 nv_wr32(dev, NV03_PGRAPH_INTR, 0x01000000);
1164 }
1165
1166 /* 0x02000000 happens when you pause a ctxprog...
1167 * but the only way this can happen that I know is by
1168 * poking the relevant MMIO register, and we don't
1169 * do that. */
1170
659 if (status) { 1171 if (status) {
660 NV_INFO(dev, "Unhandled PGRAPH_INTR - 0x%08x\n", 1172 NV_INFO(dev, "Unhandled PGRAPH_INTR - 0x%08x\n",
661 status); 1173 status);
@@ -672,7 +1184,8 @@ nv50_pgraph_irq_handler(struct drm_device *dev)
672 } 1184 }
673 1185
674 nv_wr32(dev, NV03_PMC_INTR_0, NV_PMC_INTR_0_PGRAPH_PENDING); 1186 nv_wr32(dev, NV03_PMC_INTR_0, NV_PMC_INTR_0_PGRAPH_PENDING);
675 nv_wr32(dev, 0x400824, nv_rd32(dev, 0x400824) & ~(1 << 31)); 1187 if (nv_rd32(dev, 0x400824) & (1 << 31))
1188 nv_wr32(dev, 0x400824, nv_rd32(dev, 0x400824) & ~(1 << 31));
676} 1189}
677 1190
678static void 1191static void
diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c
index e67f2ba950a4..10656a6be8e6 100644
--- a/drivers/gpu/drm/nouveau/nouveau_state.c
+++ b/drivers/gpu/drm/nouveau/nouveau_state.c
@@ -36,7 +36,6 @@
36#include "nouveau_drm.h" 36#include "nouveau_drm.h"
37#include "nv50_display.h" 37#include "nv50_display.h"
38 38
39static int nouveau_stub_init(struct drm_device *dev) { return 0; }
40static void nouveau_stub_takedown(struct drm_device *dev) {} 39static void nouveau_stub_takedown(struct drm_device *dev) {}
41 40
42static int nouveau_init_engine_ptrs(struct drm_device *dev) 41static int nouveau_init_engine_ptrs(struct drm_device *dev)
@@ -278,8 +277,8 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
278 engine->timer.init = nv04_timer_init; 277 engine->timer.init = nv04_timer_init;
279 engine->timer.read = nv04_timer_read; 278 engine->timer.read = nv04_timer_read;
280 engine->timer.takedown = nv04_timer_takedown; 279 engine->timer.takedown = nv04_timer_takedown;
281 engine->fb.init = nouveau_stub_init; 280 engine->fb.init = nv50_fb_init;
282 engine->fb.takedown = nouveau_stub_takedown; 281 engine->fb.takedown = nv50_fb_takedown;
283 engine->graph.grclass = nv50_graph_grclass; 282 engine->graph.grclass = nv50_graph_grclass;
284 engine->graph.init = nv50_graph_init; 283 engine->graph.init = nv50_graph_init;
285 engine->graph.takedown = nv50_graph_takedown; 284 engine->graph.takedown = nv50_graph_takedown;
diff --git a/drivers/gpu/drm/nouveau/nv04_crtc.c b/drivers/gpu/drm/nouveau/nv04_crtc.c
index a1d1ebb073d9..eba687f1099e 100644
--- a/drivers/gpu/drm/nouveau/nv04_crtc.c
+++ b/drivers/gpu/drm/nouveau/nv04_crtc.c
@@ -230,9 +230,9 @@ nv_crtc_mode_set_vga(struct drm_crtc *crtc, struct drm_display_mode *mode)
230 struct drm_framebuffer *fb = crtc->fb; 230 struct drm_framebuffer *fb = crtc->fb;
231 231
232 /* Calculate our timings */ 232 /* Calculate our timings */
233 int horizDisplay = (mode->crtc_hdisplay >> 3) - 1; 233 int horizDisplay = (mode->crtc_hdisplay >> 3) - 1;
234 int horizStart = (mode->crtc_hsync_start >> 3) - 1; 234 int horizStart = (mode->crtc_hsync_start >> 3) + 1;
235 int horizEnd = (mode->crtc_hsync_end >> 3) - 1; 235 int horizEnd = (mode->crtc_hsync_end >> 3) + 1;
236 int horizTotal = (mode->crtc_htotal >> 3) - 5; 236 int horizTotal = (mode->crtc_htotal >> 3) - 5;
237 int horizBlankStart = (mode->crtc_hdisplay >> 3) - 1; 237 int horizBlankStart = (mode->crtc_hdisplay >> 3) - 1;
238 int horizBlankEnd = (mode->crtc_htotal >> 3) - 1; 238 int horizBlankEnd = (mode->crtc_htotal >> 3) - 1;
diff --git a/drivers/gpu/drm/nouveau/nv04_fbcon.c b/drivers/gpu/drm/nouveau/nv04_fbcon.c
index 3da90c2c4e63..813b25cec726 100644
--- a/drivers/gpu/drm/nouveau/nv04_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nv04_fbcon.c
@@ -118,8 +118,8 @@ nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
118 return; 118 return;
119 } 119 }
120 120
121 width = ALIGN(image->width, 32); 121 width = ALIGN(image->width, 8);
122 dsize = (width * image->height) >> 5; 122 dsize = ALIGN(width * image->height, 32) >> 5;
123 123
124 if (info->fix.visual == FB_VISUAL_TRUECOLOR || 124 if (info->fix.visual == FB_VISUAL_TRUECOLOR ||
125 info->fix.visual == FB_VISUAL_DIRECTCOLOR) { 125 info->fix.visual == FB_VISUAL_DIRECTCOLOR) {
@@ -136,8 +136,8 @@ nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
136 ((image->dx + image->width) & 0xffff)); 136 ((image->dx + image->width) & 0xffff));
137 OUT_RING(chan, bg); 137 OUT_RING(chan, bg);
138 OUT_RING(chan, fg); 138 OUT_RING(chan, fg);
139 OUT_RING(chan, (image->height << 16) | image->width);
140 OUT_RING(chan, (image->height << 16) | width); 139 OUT_RING(chan, (image->height << 16) | width);
140 OUT_RING(chan, (image->height << 16) | image->width);
141 OUT_RING(chan, (image->dy << 16) | (image->dx & 0xffff)); 141 OUT_RING(chan, (image->dy << 16) | (image->dx & 0xffff));
142 142
143 while (dsize) { 143 while (dsize) {
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c
index 61a89f2dc553..fac6c88a2b1f 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.c
+++ b/drivers/gpu/drm/nouveau/nv50_display.c
@@ -522,8 +522,8 @@ int nv50_display_create(struct drm_device *dev)
522 } 522 }
523 523
524 for (i = 0 ; i < dcb->connector.entries; i++) { 524 for (i = 0 ; i < dcb->connector.entries; i++) {
525 if (i != 0 && dcb->connector.entry[i].index == 525 if (i != 0 && dcb->connector.entry[i].index2 ==
526 dcb->connector.entry[i - 1].index) 526 dcb->connector.entry[i - 1].index2)
527 continue; 527 continue;
528 nouveau_connector_create(dev, &dcb->connector.entry[i]); 528 nouveau_connector_create(dev, &dcb->connector.entry[i]);
529 } 529 }
diff --git a/drivers/gpu/drm/nouveau/nv50_fb.c b/drivers/gpu/drm/nouveau/nv50_fb.c
new file mode 100644
index 000000000000..a95e6941ba88
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv50_fb.c
@@ -0,0 +1,32 @@
1#include "drmP.h"
2#include "drm.h"
3#include "nouveau_drv.h"
4#include "nouveau_drm.h"
5
6int
7nv50_fb_init(struct drm_device *dev)
8{
9 /* This is needed to get meaningful information from 100c90
10 * on traps. No idea what these values mean exactly. */
11 struct drm_nouveau_private *dev_priv = dev->dev_private;
12
13 switch (dev_priv->chipset) {
14 case 0x50:
15 nv_wr32(dev, 0x100c90, 0x0707ff);
16 break;
17 case 0xa5:
18 case 0xa8:
19 nv_wr32(dev, 0x100c90, 0x0d0fff);
20 break;
21 default:
22 nv_wr32(dev, 0x100c90, 0x1d07ff);
23 break;
24 }
25
26 return 0;
27}
28
29void
30nv50_fb_takedown(struct drm_device *dev)
31{
32}
diff --git a/drivers/gpu/drm/nouveau/nv50_fbcon.c b/drivers/gpu/drm/nouveau/nv50_fbcon.c
index 993c7126fbde..25a3cd8794f9 100644
--- a/drivers/gpu/drm/nouveau/nv50_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nv50_fbcon.c
@@ -233,7 +233,7 @@ nv50_fbcon_accel_init(struct fb_info *info)
233 BEGIN_RING(chan, NvSub2D, 0x0808, 3); 233 BEGIN_RING(chan, NvSub2D, 0x0808, 3);
234 OUT_RING(chan, 0); 234 OUT_RING(chan, 0);
235 OUT_RING(chan, 0); 235 OUT_RING(chan, 0);
236 OUT_RING(chan, 0); 236 OUT_RING(chan, 1);
237 BEGIN_RING(chan, NvSub2D, 0x081c, 1); 237 BEGIN_RING(chan, NvSub2D, 0x081c, 1);
238 OUT_RING(chan, 1); 238 OUT_RING(chan, 1);
239 BEGIN_RING(chan, NvSub2D, 0x0840, 4); 239 BEGIN_RING(chan, NvSub2D, 0x0840, 4);
diff --git a/drivers/gpu/drm/nouveau/nv50_graph.c b/drivers/gpu/drm/nouveau/nv50_graph.c
index 857a09671a39..c62b33a02f88 100644
--- a/drivers/gpu/drm/nouveau/nv50_graph.c
+++ b/drivers/gpu/drm/nouveau/nv50_graph.c
@@ -56,6 +56,10 @@ nv50_graph_init_intr(struct drm_device *dev)
56static void 56static void
57nv50_graph_init_regs__nv(struct drm_device *dev) 57nv50_graph_init_regs__nv(struct drm_device *dev)
58{ 58{
59 struct drm_nouveau_private *dev_priv = dev->dev_private;
60 uint32_t units = nv_rd32(dev, 0x1540);
61 int i;
62
59 NV_DEBUG(dev, "\n"); 63 NV_DEBUG(dev, "\n");
60 64
61 nv_wr32(dev, 0x400804, 0xc0000000); 65 nv_wr32(dev, 0x400804, 0xc0000000);
@@ -65,6 +69,20 @@ nv50_graph_init_regs__nv(struct drm_device *dev)
65 nv_wr32(dev, 0x405018, 0xc0000000); 69 nv_wr32(dev, 0x405018, 0xc0000000);
66 nv_wr32(dev, 0x402000, 0xc0000000); 70 nv_wr32(dev, 0x402000, 0xc0000000);
67 71
72 for (i = 0; i < 16; i++) {
73 if (units & 1 << i) {
74 if (dev_priv->chipset < 0xa0) {
75 nv_wr32(dev, 0x408900 + (i << 12), 0xc0000000);
76 nv_wr32(dev, 0x408e08 + (i << 12), 0xc0000000);
77 nv_wr32(dev, 0x408314 + (i << 12), 0xc0000000);
78 } else {
79 nv_wr32(dev, 0x408600 + (i << 11), 0xc0000000);
80 nv_wr32(dev, 0x408708 + (i << 11), 0xc0000000);
81 nv_wr32(dev, 0x40831c + (i << 11), 0xc0000000);
82 }
83 }
84 }
85
68 nv_wr32(dev, 0x400108, 0xffffffff); 86 nv_wr32(dev, 0x400108, 0xffffffff);
69 87
70 nv_wr32(dev, 0x400824, 0x00004000); 88 nv_wr32(dev, 0x400824, 0x00004000);
@@ -229,10 +247,6 @@ nv50_graph_create_context(struct nouveau_channel *chan)
229 nouveau_grctx_vals_load(dev, ctx); 247 nouveau_grctx_vals_load(dev, ctx);
230 } 248 }
231 nv_wo32(dev, ctx, 0x00000/4, chan->ramin->instance >> 12); 249 nv_wo32(dev, ctx, 0x00000/4, chan->ramin->instance >> 12);
232 if ((dev_priv->chipset & 0xf0) == 0xa0)
233 nv_wo32(dev, ctx, 0x00004/4, 0x00000000);
234 else
235 nv_wo32(dev, ctx, 0x0011c/4, 0x00000000);
236 dev_priv->engine.instmem.finish_access(dev); 250 dev_priv->engine.instmem.finish_access(dev);
237 251
238 return 0; 252 return 0;
diff --git a/drivers/gpu/drm/nouveau/nv50_grctx.c b/drivers/gpu/drm/nouveau/nv50_grctx.c
index d105fcd42ca0..546b31949a30 100644
--- a/drivers/gpu/drm/nouveau/nv50_grctx.c
+++ b/drivers/gpu/drm/nouveau/nv50_grctx.c
@@ -64,6 +64,9 @@
64#define CP_FLAG_ALWAYS ((2 * 32) + 13) 64#define CP_FLAG_ALWAYS ((2 * 32) + 13)
65#define CP_FLAG_ALWAYS_FALSE 0 65#define CP_FLAG_ALWAYS_FALSE 0
66#define CP_FLAG_ALWAYS_TRUE 1 66#define CP_FLAG_ALWAYS_TRUE 1
67#define CP_FLAG_INTR ((2 * 32) + 15)
68#define CP_FLAG_INTR_NOT_PENDING 0
69#define CP_FLAG_INTR_PENDING 1
67 70
68#define CP_CTX 0x00100000 71#define CP_CTX 0x00100000
69#define CP_CTX_COUNT 0x000f0000 72#define CP_CTX_COUNT 0x000f0000
@@ -214,6 +217,8 @@ nv50_grctx_init(struct nouveau_grctx *ctx)
214 cp_name(ctx, cp_setup_save); 217 cp_name(ctx, cp_setup_save);
215 cp_set (ctx, UNK1D, SET); 218 cp_set (ctx, UNK1D, SET);
216 cp_wait(ctx, STATUS, BUSY); 219 cp_wait(ctx, STATUS, BUSY);
220 cp_wait(ctx, INTR, PENDING);
221 cp_bra (ctx, STATUS, BUSY, cp_setup_save);
217 cp_set (ctx, UNK01, SET); 222 cp_set (ctx, UNK01, SET);
218 cp_set (ctx, SWAP_DIRECTION, SAVE); 223 cp_set (ctx, SWAP_DIRECTION, SAVE);
219 224
@@ -269,7 +274,7 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx)
269 int offset, base; 274 int offset, base;
270 uint32_t units = nv_rd32 (ctx->dev, 0x1540); 275 uint32_t units = nv_rd32 (ctx->dev, 0x1540);
271 276
272 /* 0800 */ 277 /* 0800: DISPATCH */
273 cp_ctx(ctx, 0x400808, 7); 278 cp_ctx(ctx, 0x400808, 7);
274 gr_def(ctx, 0x400814, 0x00000030); 279 gr_def(ctx, 0x400814, 0x00000030);
275 cp_ctx(ctx, 0x400834, 0x32); 280 cp_ctx(ctx, 0x400834, 0x32);
@@ -300,7 +305,7 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx)
300 gr_def(ctx, 0x400b20, 0x0001629d); 305 gr_def(ctx, 0x400b20, 0x0001629d);
301 } 306 }
302 307
303 /* 0C00 */ 308 /* 0C00: VFETCH */
304 cp_ctx(ctx, 0x400c08, 0x2); 309 cp_ctx(ctx, 0x400c08, 0x2);
305 gr_def(ctx, 0x400c08, 0x0000fe0c); 310 gr_def(ctx, 0x400c08, 0x0000fe0c);
306 311
@@ -326,7 +331,7 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx)
326 cp_ctx(ctx, 0x401540, 0x5); 331 cp_ctx(ctx, 0x401540, 0x5);
327 gr_def(ctx, 0x401550, 0x00001018); 332 gr_def(ctx, 0x401550, 0x00001018);
328 333
329 /* 1800 */ 334 /* 1800: STREAMOUT */
330 cp_ctx(ctx, 0x401814, 0x1); 335 cp_ctx(ctx, 0x401814, 0x1);
331 gr_def(ctx, 0x401814, 0x000000ff); 336 gr_def(ctx, 0x401814, 0x000000ff);
332 if (dev_priv->chipset == 0x50) { 337 if (dev_priv->chipset == 0x50) {
@@ -641,7 +646,7 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx)
641 if (dev_priv->chipset == 0x50) 646 if (dev_priv->chipset == 0x50)
642 cp_ctx(ctx, 0x4063e0, 0x1); 647 cp_ctx(ctx, 0x4063e0, 0x1);
643 648
644 /* 6800 */ 649 /* 6800: M2MF */
645 if (dev_priv->chipset < 0x90) { 650 if (dev_priv->chipset < 0x90) {
646 cp_ctx(ctx, 0x406814, 0x2b); 651 cp_ctx(ctx, 0x406814, 0x2b);
647 gr_def(ctx, 0x406818, 0x00000f80); 652 gr_def(ctx, 0x406818, 0x00000f80);
diff --git a/drivers/gpu/drm/radeon/Makefile b/drivers/gpu/drm/radeon/Makefile
index ed38262d9985..3c91312dea9a 100644
--- a/drivers/gpu/drm/radeon/Makefile
+++ b/drivers/gpu/drm/radeon/Makefile
@@ -50,7 +50,7 @@ $(obj)/r600_cs.o: $(obj)/r600_reg_safe.h
50radeon-y := radeon_drv.o radeon_cp.o radeon_state.o radeon_mem.o \ 50radeon-y := radeon_drv.o radeon_cp.o radeon_state.o radeon_mem.o \
51 radeon_irq.o r300_cmdbuf.o r600_cp.o 51 radeon_irq.o r300_cmdbuf.o r600_cp.o
52# add KMS driver 52# add KMS driver
53radeon-y += radeon_device.o radeon_kms.o \ 53radeon-y += radeon_device.o radeon_asic.o radeon_kms.o \
54 radeon_atombios.o radeon_agp.o atombios_crtc.o radeon_combios.o \ 54 radeon_atombios.o radeon_agp.o atombios_crtc.o radeon_combios.o \
55 atom.o radeon_fence.o radeon_ttm.o radeon_object.o radeon_gart.o \ 55 atom.o radeon_fence.o radeon_ttm.o radeon_object.o radeon_gart.o \
56 radeon_legacy_crtc.o radeon_legacy_encoders.o radeon_connectors.o \ 56 radeon_legacy_crtc.o radeon_legacy_encoders.o radeon_connectors.o \
diff --git a/drivers/gpu/drm/radeon/atom.c b/drivers/gpu/drm/radeon/atom.c
index 8538b88eda35..07b7ebf1f466 100644
--- a/drivers/gpu/drm/radeon/atom.c
+++ b/drivers/gpu/drm/radeon/atom.c
@@ -53,15 +53,17 @@
53 53
54typedef struct { 54typedef struct {
55 struct atom_context *ctx; 55 struct atom_context *ctx;
56
57 uint32_t *ps, *ws; 56 uint32_t *ps, *ws;
58 int ps_shift; 57 int ps_shift;
59 uint16_t start; 58 uint16_t start;
59 unsigned last_jump;
60 unsigned long last_jump_jiffies;
61 bool abort;
60} atom_exec_context; 62} atom_exec_context;
61 63
62int atom_debug = 0; 64int atom_debug = 0;
63static void atom_execute_table_locked(struct atom_context *ctx, int index, uint32_t * params); 65static int atom_execute_table_locked(struct atom_context *ctx, int index, uint32_t * params);
64void atom_execute_table(struct atom_context *ctx, int index, uint32_t * params); 66int atom_execute_table(struct atom_context *ctx, int index, uint32_t * params);
65 67
66static uint32_t atom_arg_mask[8] = 68static uint32_t atom_arg_mask[8] =
67 { 0xFFFFFFFF, 0xFFFF, 0xFFFF00, 0xFFFF0000, 0xFF, 0xFF00, 0xFF0000, 69 { 0xFFFFFFFF, 0xFFFF, 0xFFFF00, 0xFFFF0000, 0xFF, 0xFF00, 0xFF0000,
@@ -605,12 +607,17 @@ static void atom_op_beep(atom_exec_context *ctx, int *ptr, int arg)
605static void atom_op_calltable(atom_exec_context *ctx, int *ptr, int arg) 607static void atom_op_calltable(atom_exec_context *ctx, int *ptr, int arg)
606{ 608{
607 int idx = U8((*ptr)++); 609 int idx = U8((*ptr)++);
610 int r = 0;
611
608 if (idx < ATOM_TABLE_NAMES_CNT) 612 if (idx < ATOM_TABLE_NAMES_CNT)
609 SDEBUG(" table: %d (%s)\n", idx, atom_table_names[idx]); 613 SDEBUG(" table: %d (%s)\n", idx, atom_table_names[idx]);
610 else 614 else
611 SDEBUG(" table: %d\n", idx); 615 SDEBUG(" table: %d\n", idx);
612 if (U16(ctx->ctx->cmd_table + 4 + 2 * idx)) 616 if (U16(ctx->ctx->cmd_table + 4 + 2 * idx))
613 atom_execute_table_locked(ctx->ctx, idx, ctx->ps + ctx->ps_shift); 617 r = atom_execute_table_locked(ctx->ctx, idx, ctx->ps + ctx->ps_shift);
618 if (r) {
619 ctx->abort = true;
620 }
614} 621}
615 622
616static void atom_op_clear(atom_exec_context *ctx, int *ptr, int arg) 623static void atom_op_clear(atom_exec_context *ctx, int *ptr, int arg)
@@ -674,6 +681,8 @@ static void atom_op_eot(atom_exec_context *ctx, int *ptr, int arg)
674static void atom_op_jump(atom_exec_context *ctx, int *ptr, int arg) 681static void atom_op_jump(atom_exec_context *ctx, int *ptr, int arg)
675{ 682{
676 int execute = 0, target = U16(*ptr); 683 int execute = 0, target = U16(*ptr);
684 unsigned long cjiffies;
685
677 (*ptr) += 2; 686 (*ptr) += 2;
678 switch (arg) { 687 switch (arg) {
679 case ATOM_COND_ABOVE: 688 case ATOM_COND_ABOVE:
@@ -701,8 +710,25 @@ static void atom_op_jump(atom_exec_context *ctx, int *ptr, int arg)
701 if (arg != ATOM_COND_ALWAYS) 710 if (arg != ATOM_COND_ALWAYS)
702 SDEBUG(" taken: %s\n", execute ? "yes" : "no"); 711 SDEBUG(" taken: %s\n", execute ? "yes" : "no");
703 SDEBUG(" target: 0x%04X\n", target); 712 SDEBUG(" target: 0x%04X\n", target);
704 if (execute) 713 if (execute) {
714 if (ctx->last_jump == (ctx->start + target)) {
715 cjiffies = jiffies;
716 if (time_after(cjiffies, ctx->last_jump_jiffies)) {
717 cjiffies -= ctx->last_jump_jiffies;
718 if ((jiffies_to_msecs(cjiffies) > 1000)) {
719 DRM_ERROR("atombios stuck in loop for more than 1sec aborting\n");
720 ctx->abort = true;
721 }
722 } else {
723 /* jiffies wrap around we will just wait a little longer */
724 ctx->last_jump_jiffies = jiffies;
725 }
726 } else {
727 ctx->last_jump = ctx->start + target;
728 ctx->last_jump_jiffies = jiffies;
729 }
705 *ptr = ctx->start + target; 730 *ptr = ctx->start + target;
731 }
706} 732}
707 733
708static void atom_op_mask(atom_exec_context *ctx, int *ptr, int arg) 734static void atom_op_mask(atom_exec_context *ctx, int *ptr, int arg)
@@ -1105,7 +1131,7 @@ static struct {
1105 atom_op_shr, ATOM_ARG_MC}, { 1131 atom_op_shr, ATOM_ARG_MC}, {
1106atom_op_debug, 0},}; 1132atom_op_debug, 0},};
1107 1133
1108static void atom_execute_table_locked(struct atom_context *ctx, int index, uint32_t * params) 1134static int atom_execute_table_locked(struct atom_context *ctx, int index, uint32_t * params)
1109{ 1135{
1110 int base = CU16(ctx->cmd_table + 4 + 2 * index); 1136 int base = CU16(ctx->cmd_table + 4 + 2 * index);
1111 int len, ws, ps, ptr; 1137 int len, ws, ps, ptr;
@@ -1113,7 +1139,7 @@ static void atom_execute_table_locked(struct atom_context *ctx, int index, uint3
1113 atom_exec_context ectx; 1139 atom_exec_context ectx;
1114 1140
1115 if (!base) 1141 if (!base)
1116 return; 1142 return -EINVAL;
1117 1143
1118 len = CU16(base + ATOM_CT_SIZE_PTR); 1144 len = CU16(base + ATOM_CT_SIZE_PTR);
1119 ws = CU8(base + ATOM_CT_WS_PTR); 1145 ws = CU8(base + ATOM_CT_WS_PTR);
@@ -1126,6 +1152,8 @@ static void atom_execute_table_locked(struct atom_context *ctx, int index, uint3
1126 ectx.ps_shift = ps / 4; 1152 ectx.ps_shift = ps / 4;
1127 ectx.start = base; 1153 ectx.start = base;
1128 ectx.ps = params; 1154 ectx.ps = params;
1155 ectx.abort = false;
1156 ectx.last_jump = 0;
1129 if (ws) 1157 if (ws)
1130 ectx.ws = kzalloc(4 * ws, GFP_KERNEL); 1158 ectx.ws = kzalloc(4 * ws, GFP_KERNEL);
1131 else 1159 else
@@ -1138,6 +1166,11 @@ static void atom_execute_table_locked(struct atom_context *ctx, int index, uint3
1138 SDEBUG("%s @ 0x%04X\n", atom_op_names[op], ptr - 1); 1166 SDEBUG("%s @ 0x%04X\n", atom_op_names[op], ptr - 1);
1139 else 1167 else
1140 SDEBUG("[%d] @ 0x%04X\n", op, ptr - 1); 1168 SDEBUG("[%d] @ 0x%04X\n", op, ptr - 1);
1169 if (ectx.abort) {
1170 DRM_ERROR("atombios stuck executing %04X (len %d, WS %d, PS %d) @ 0x%04X\n",
1171 base, len, ws, ps, ptr - 1);
1172 return -EINVAL;
1173 }
1141 1174
1142 if (op < ATOM_OP_CNT && op > 0) 1175 if (op < ATOM_OP_CNT && op > 0)
1143 opcode_table[op].func(&ectx, &ptr, 1176 opcode_table[op].func(&ectx, &ptr,
@@ -1153,10 +1186,13 @@ static void atom_execute_table_locked(struct atom_context *ctx, int index, uint3
1153 1186
1154 if (ws) 1187 if (ws)
1155 kfree(ectx.ws); 1188 kfree(ectx.ws);
1189 return 0;
1156} 1190}
1157 1191
1158void atom_execute_table(struct atom_context *ctx, int index, uint32_t * params) 1192int atom_execute_table(struct atom_context *ctx, int index, uint32_t * params)
1159{ 1193{
1194 int r;
1195
1160 mutex_lock(&ctx->mutex); 1196 mutex_lock(&ctx->mutex);
1161 /* reset reg block */ 1197 /* reset reg block */
1162 ctx->reg_block = 0; 1198 ctx->reg_block = 0;
@@ -1164,8 +1200,9 @@ void atom_execute_table(struct atom_context *ctx, int index, uint32_t * params)
1164 ctx->fb_base = 0; 1200 ctx->fb_base = 0;
1165 /* reset io mode */ 1201 /* reset io mode */
1166 ctx->io_mode = ATOM_IO_MM; 1202 ctx->io_mode = ATOM_IO_MM;
1167 atom_execute_table_locked(ctx, index, params); 1203 r = atom_execute_table_locked(ctx, index, params);
1168 mutex_unlock(&ctx->mutex); 1204 mutex_unlock(&ctx->mutex);
1205 return r;
1169} 1206}
1170 1207
1171static int atom_iio_len[] = { 1, 2, 3, 3, 3, 3, 4, 4, 4, 3 }; 1208static int atom_iio_len[] = { 1, 2, 3, 3, 3, 3, 4, 4, 4, 3 };
@@ -1249,9 +1286,7 @@ int atom_asic_init(struct atom_context *ctx)
1249 1286
1250 if (!CU16(ctx->cmd_table + 4 + 2 * ATOM_CMD_INIT)) 1287 if (!CU16(ctx->cmd_table + 4 + 2 * ATOM_CMD_INIT))
1251 return 1; 1288 return 1;
1252 atom_execute_table(ctx, ATOM_CMD_INIT, ps); 1289 return atom_execute_table(ctx, ATOM_CMD_INIT, ps);
1253
1254 return 0;
1255} 1290}
1256 1291
1257void atom_destroy(struct atom_context *ctx) 1292void atom_destroy(struct atom_context *ctx)
@@ -1261,12 +1296,16 @@ void atom_destroy(struct atom_context *ctx)
1261 kfree(ctx); 1296 kfree(ctx);
1262} 1297}
1263 1298
1264void atom_parse_data_header(struct atom_context *ctx, int index, 1299bool atom_parse_data_header(struct atom_context *ctx, int index,
1265 uint16_t * size, uint8_t * frev, uint8_t * crev, 1300 uint16_t * size, uint8_t * frev, uint8_t * crev,
1266 uint16_t * data_start) 1301 uint16_t * data_start)
1267{ 1302{
1268 int offset = index * 2 + 4; 1303 int offset = index * 2 + 4;
1269 int idx = CU16(ctx->data_table + offset); 1304 int idx = CU16(ctx->data_table + offset);
1305 u16 *mdt = (u16 *)(ctx->bios + ctx->data_table + 4);
1306
1307 if (!mdt[index])
1308 return false;
1270 1309
1271 if (size) 1310 if (size)
1272 *size = CU16(idx); 1311 *size = CU16(idx);
@@ -1275,38 +1314,42 @@ void atom_parse_data_header(struct atom_context *ctx, int index,
1275 if (crev) 1314 if (crev)
1276 *crev = CU8(idx + 3); 1315 *crev = CU8(idx + 3);
1277 *data_start = idx; 1316 *data_start = idx;
1278 return; 1317 return true;
1279} 1318}
1280 1319
1281void atom_parse_cmd_header(struct atom_context *ctx, int index, uint8_t * frev, 1320bool atom_parse_cmd_header(struct atom_context *ctx, int index, uint8_t * frev,
1282 uint8_t * crev) 1321 uint8_t * crev)
1283{ 1322{
1284 int offset = index * 2 + 4; 1323 int offset = index * 2 + 4;
1285 int idx = CU16(ctx->cmd_table + offset); 1324 int idx = CU16(ctx->cmd_table + offset);
1325 u16 *mct = (u16 *)(ctx->bios + ctx->cmd_table + 4);
1326
1327 if (!mct[index])
1328 return false;
1286 1329
1287 if (frev) 1330 if (frev)
1288 *frev = CU8(idx + 2); 1331 *frev = CU8(idx + 2);
1289 if (crev) 1332 if (crev)
1290 *crev = CU8(idx + 3); 1333 *crev = CU8(idx + 3);
1291 return; 1334 return true;
1292} 1335}
1293 1336
1294int atom_allocate_fb_scratch(struct atom_context *ctx) 1337int atom_allocate_fb_scratch(struct atom_context *ctx)
1295{ 1338{
1296 int index = GetIndexIntoMasterTable(DATA, VRAM_UsageByFirmware); 1339 int index = GetIndexIntoMasterTable(DATA, VRAM_UsageByFirmware);
1297 uint16_t data_offset; 1340 uint16_t data_offset;
1298 int usage_bytes; 1341 int usage_bytes = 0;
1299 struct _ATOM_VRAM_USAGE_BY_FIRMWARE *firmware_usage; 1342 struct _ATOM_VRAM_USAGE_BY_FIRMWARE *firmware_usage;
1300 1343
1301 atom_parse_data_header(ctx, index, NULL, NULL, NULL, &data_offset); 1344 if (atom_parse_data_header(ctx, index, NULL, NULL, NULL, &data_offset)) {
1345 firmware_usage = (struct _ATOM_VRAM_USAGE_BY_FIRMWARE *)(ctx->bios + data_offset);
1302 1346
1303 firmware_usage = (struct _ATOM_VRAM_USAGE_BY_FIRMWARE *)(ctx->bios + data_offset); 1347 DRM_DEBUG("atom firmware requested %08x %dkb\n",
1348 firmware_usage->asFirmwareVramReserveInfo[0].ulStartAddrUsedByFirmware,
1349 firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb);
1304 1350
1305 DRM_DEBUG("atom firmware requested %08x %dkb\n", 1351 usage_bytes = firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb * 1024;
1306 firmware_usage->asFirmwareVramReserveInfo[0].ulStartAddrUsedByFirmware, 1352 }
1307 firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb);
1308
1309 usage_bytes = firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb * 1024;
1310 if (usage_bytes == 0) 1353 if (usage_bytes == 0)
1311 usage_bytes = 20 * 1024; 1354 usage_bytes = 20 * 1024;
1312 /* allocate some scratch memory */ 1355 /* allocate some scratch memory */
diff --git a/drivers/gpu/drm/radeon/atom.h b/drivers/gpu/drm/radeon/atom.h
index bc73781423a1..cd1b64ab5ca7 100644
--- a/drivers/gpu/drm/radeon/atom.h
+++ b/drivers/gpu/drm/radeon/atom.h
@@ -140,11 +140,13 @@ struct atom_context {
140extern int atom_debug; 140extern int atom_debug;
141 141
142struct atom_context *atom_parse(struct card_info *, void *); 142struct atom_context *atom_parse(struct card_info *, void *);
143void atom_execute_table(struct atom_context *, int, uint32_t *); 143int atom_execute_table(struct atom_context *, int, uint32_t *);
144int atom_asic_init(struct atom_context *); 144int atom_asic_init(struct atom_context *);
145void atom_destroy(struct atom_context *); 145void atom_destroy(struct atom_context *);
146void atom_parse_data_header(struct atom_context *ctx, int index, uint16_t *size, uint8_t *frev, uint8_t *crev, uint16_t *data_start); 146bool atom_parse_data_header(struct atom_context *ctx, int index, uint16_t *size,
147void atom_parse_cmd_header(struct atom_context *ctx, int index, uint8_t *frev, uint8_t *crev); 147 uint8_t *frev, uint8_t *crev, uint16_t *data_start);
148bool atom_parse_cmd_header(struct atom_context *ctx, int index,
149 uint8_t *frev, uint8_t *crev);
148int atom_allocate_fb_scratch(struct atom_context *ctx); 150int atom_allocate_fb_scratch(struct atom_context *ctx);
149#include "atom-types.h" 151#include "atom-types.h"
150#include "atombios.h" 152#include "atombios.h"
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index dd9fdf560611..fd4ef6d18849 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -353,12 +353,55 @@ static void atombios_crtc_set_timing(struct drm_crtc *crtc,
353 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); 353 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
354} 354}
355 355
356static void atombios_disable_ss(struct drm_crtc *crtc)
357{
358 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
359 struct drm_device *dev = crtc->dev;
360 struct radeon_device *rdev = dev->dev_private;
361 u32 ss_cntl;
362
363 if (ASIC_IS_DCE4(rdev)) {
364 switch (radeon_crtc->pll_id) {
365 case ATOM_PPLL1:
366 ss_cntl = RREG32(EVERGREEN_P1PLL_SS_CNTL);
367 ss_cntl &= ~EVERGREEN_PxPLL_SS_EN;
368 WREG32(EVERGREEN_P1PLL_SS_CNTL, ss_cntl);
369 break;
370 case ATOM_PPLL2:
371 ss_cntl = RREG32(EVERGREEN_P2PLL_SS_CNTL);
372 ss_cntl &= ~EVERGREEN_PxPLL_SS_EN;
373 WREG32(EVERGREEN_P2PLL_SS_CNTL, ss_cntl);
374 break;
375 case ATOM_DCPLL:
376 case ATOM_PPLL_INVALID:
377 return;
378 }
379 } else if (ASIC_IS_AVIVO(rdev)) {
380 switch (radeon_crtc->pll_id) {
381 case ATOM_PPLL1:
382 ss_cntl = RREG32(AVIVO_P1PLL_INT_SS_CNTL);
383 ss_cntl &= ~1;
384 WREG32(AVIVO_P1PLL_INT_SS_CNTL, ss_cntl);
385 break;
386 case ATOM_PPLL2:
387 ss_cntl = RREG32(AVIVO_P2PLL_INT_SS_CNTL);
388 ss_cntl &= ~1;
389 WREG32(AVIVO_P2PLL_INT_SS_CNTL, ss_cntl);
390 break;
391 case ATOM_DCPLL:
392 case ATOM_PPLL_INVALID:
393 return;
394 }
395 }
396}
397
398
356union atom_enable_ss { 399union atom_enable_ss {
357 ENABLE_LVDS_SS_PARAMETERS legacy; 400 ENABLE_LVDS_SS_PARAMETERS legacy;
358 ENABLE_SPREAD_SPECTRUM_ON_PPLL_PS_ALLOCATION v1; 401 ENABLE_SPREAD_SPECTRUM_ON_PPLL_PS_ALLOCATION v1;
359}; 402};
360 403
361static void atombios_set_ss(struct drm_crtc *crtc, int enable) 404static void atombios_enable_ss(struct drm_crtc *crtc)
362{ 405{
363 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); 406 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
364 struct drm_device *dev = crtc->dev; 407 struct drm_device *dev = crtc->dev;
@@ -387,9 +430,9 @@ static void atombios_set_ss(struct drm_crtc *crtc, int enable)
387 step = dig->ss->step; 430 step = dig->ss->step;
388 delay = dig->ss->delay; 431 delay = dig->ss->delay;
389 range = dig->ss->range; 432 range = dig->ss->range;
390 } else if (enable) 433 } else
391 return; 434 return;
392 } else if (enable) 435 } else
393 return; 436 return;
394 break; 437 break;
395 } 438 }
@@ -406,13 +449,13 @@ static void atombios_set_ss(struct drm_crtc *crtc, int enable)
406 args.v1.ucSpreadSpectrumDelay = delay; 449 args.v1.ucSpreadSpectrumDelay = delay;
407 args.v1.ucSpreadSpectrumRange = range; 450 args.v1.ucSpreadSpectrumRange = range;
408 args.v1.ucPpll = radeon_crtc->crtc_id ? ATOM_PPLL2 : ATOM_PPLL1; 451 args.v1.ucPpll = radeon_crtc->crtc_id ? ATOM_PPLL2 : ATOM_PPLL1;
409 args.v1.ucEnable = enable; 452 args.v1.ucEnable = ATOM_ENABLE;
410 } else { 453 } else {
411 args.legacy.usSpreadSpectrumPercentage = cpu_to_le16(percentage); 454 args.legacy.usSpreadSpectrumPercentage = cpu_to_le16(percentage);
412 args.legacy.ucSpreadSpectrumType = type; 455 args.legacy.ucSpreadSpectrumType = type;
413 args.legacy.ucSpreadSpectrumStepSize_Delay = (step & 3) << 2; 456 args.legacy.ucSpreadSpectrumStepSize_Delay = (step & 3) << 2;
414 args.legacy.ucSpreadSpectrumStepSize_Delay |= (delay & 7) << 4; 457 args.legacy.ucSpreadSpectrumStepSize_Delay |= (delay & 7) << 4;
415 args.legacy.ucEnable = enable; 458 args.legacy.ucEnable = ATOM_ENABLE;
416 } 459 }
417 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); 460 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
418} 461}
@@ -478,11 +521,6 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
478 /* DVO wants 2x pixel clock if the DVO chip is in 12 bit mode */ 521 /* DVO wants 2x pixel clock if the DVO chip is in 12 bit mode */
479 if (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1) 522 if (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1)
480 adjusted_clock = mode->clock * 2; 523 adjusted_clock = mode->clock * 2;
481 /* LVDS PLL quirks */
482 if (encoder->encoder_type == DRM_MODE_ENCODER_LVDS) {
483 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
484 pll->algo = dig->pll_algo;
485 }
486 } else { 524 } else {
487 if (encoder->encoder_type != DRM_MODE_ENCODER_DAC) 525 if (encoder->encoder_type != DRM_MODE_ENCODER_DAC)
488 pll->flags |= RADEON_PLL_NO_ODD_POST_DIV; 526 pll->flags |= RADEON_PLL_NO_ODD_POST_DIV;
@@ -503,8 +541,9 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
503 int index; 541 int index;
504 542
505 index = GetIndexIntoMasterTable(COMMAND, AdjustDisplayPll); 543 index = GetIndexIntoMasterTable(COMMAND, AdjustDisplayPll);
506 atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, 544 if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev,
507 &crev); 545 &crev))
546 return adjusted_clock;
508 547
509 memset(&args, 0, sizeof(args)); 548 memset(&args, 0, sizeof(args));
510 549
@@ -542,11 +581,16 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
542 } 581 }
543 } else if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) { 582 } else if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
544 /* may want to enable SS on DP/eDP eventually */ 583 /* may want to enable SS on DP/eDP eventually */
545 args.v3.sInput.ucDispPllConfig |= 584 /*args.v3.sInput.ucDispPllConfig |=
546 DISPPLL_CONFIG_SS_ENABLE; 585 DISPPLL_CONFIG_SS_ENABLE;*/
547 if (mode->clock > 165000) 586 if (encoder_mode == ATOM_ENCODER_MODE_DP)
548 args.v3.sInput.ucDispPllConfig |= 587 args.v3.sInput.ucDispPllConfig |=
549 DISPPLL_CONFIG_DUAL_LINK; 588 DISPPLL_CONFIG_COHERENT_MODE;
589 else {
590 if (mode->clock > 165000)
591 args.v3.sInput.ucDispPllConfig |=
592 DISPPLL_CONFIG_DUAL_LINK;
593 }
550 } 594 }
551 atom_execute_table(rdev->mode_info.atom_context, 595 atom_execute_table(rdev->mode_info.atom_context,
552 index, (uint32_t *)&args); 596 index, (uint32_t *)&args);
@@ -592,8 +636,9 @@ static void atombios_crtc_set_dcpll(struct drm_crtc *crtc)
592 memset(&args, 0, sizeof(args)); 636 memset(&args, 0, sizeof(args));
593 637
594 index = GetIndexIntoMasterTable(COMMAND, SetPixelClock); 638 index = GetIndexIntoMasterTable(COMMAND, SetPixelClock);
595 atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, 639 if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev,
596 &crev); 640 &crev))
641 return;
597 642
598 switch (frev) { 643 switch (frev) {
599 case 1: 644 case 1:
@@ -667,8 +712,9 @@ static void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode
667 &ref_div, &post_div); 712 &ref_div, &post_div);
668 713
669 index = GetIndexIntoMasterTable(COMMAND, SetPixelClock); 714 index = GetIndexIntoMasterTable(COMMAND, SetPixelClock);
670 atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, 715 if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev,
671 &crev); 716 &crev))
717 return;
672 718
673 switch (frev) { 719 switch (frev) {
674 case 1: 720 case 1:
@@ -1083,15 +1129,12 @@ int atombios_crtc_mode_set(struct drm_crtc *crtc,
1083 1129
1084 /* TODO color tiling */ 1130 /* TODO color tiling */
1085 1131
1086 /* pick pll */ 1132 atombios_disable_ss(crtc);
1087 radeon_crtc->pll_id = radeon_atom_pick_pll(crtc);
1088
1089 atombios_set_ss(crtc, 0);
1090 /* always set DCPLL */ 1133 /* always set DCPLL */
1091 if (ASIC_IS_DCE4(rdev)) 1134 if (ASIC_IS_DCE4(rdev))
1092 atombios_crtc_set_dcpll(crtc); 1135 atombios_crtc_set_dcpll(crtc);
1093 atombios_crtc_set_pll(crtc, adjusted_mode); 1136 atombios_crtc_set_pll(crtc, adjusted_mode);
1094 atombios_set_ss(crtc, 1); 1137 atombios_enable_ss(crtc);
1095 1138
1096 if (ASIC_IS_DCE4(rdev)) 1139 if (ASIC_IS_DCE4(rdev))
1097 atombios_set_crtc_dtd_timing(crtc, adjusted_mode); 1140 atombios_set_crtc_dtd_timing(crtc, adjusted_mode);
@@ -1120,6 +1163,11 @@ static bool atombios_crtc_mode_fixup(struct drm_crtc *crtc,
1120 1163
1121static void atombios_crtc_prepare(struct drm_crtc *crtc) 1164static void atombios_crtc_prepare(struct drm_crtc *crtc)
1122{ 1165{
1166 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
1167
1168 /* pick pll */
1169 radeon_crtc->pll_id = radeon_atom_pick_pll(crtc);
1170
1123 atombios_lock_crtc(crtc, ATOM_ENABLE); 1171 atombios_lock_crtc(crtc, ATOM_ENABLE);
1124 atombios_crtc_dpms(crtc, DRM_MODE_DPMS_OFF); 1172 atombios_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
1125} 1173}
diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c
index 8a133bda00a2..28b31c64f48d 100644
--- a/drivers/gpu/drm/radeon/atombios_dp.c
+++ b/drivers/gpu/drm/radeon/atombios_dp.c
@@ -745,14 +745,14 @@ void dp_link_train(struct drm_encoder *encoder,
745 >> DP_TRAIN_PRE_EMPHASIS_SHIFT); 745 >> DP_TRAIN_PRE_EMPHASIS_SHIFT);
746 746
747 /* disable the training pattern on the sink */ 747 /* disable the training pattern on the sink */
748 dp_set_training(radeon_connector, DP_TRAINING_PATTERN_DISABLE);
749
750 /* disable the training pattern on the source */
748 if (ASIC_IS_DCE4(rdev)) 751 if (ASIC_IS_DCE4(rdev))
749 atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_LINK_TRAINING_COMPLETE); 752 atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_LINK_TRAINING_COMPLETE);
750 else 753 else
751 radeon_dp_encoder_service(rdev, ATOM_DP_ACTION_TRAINING_COMPLETE, 754 radeon_dp_encoder_service(rdev, ATOM_DP_ACTION_TRAINING_COMPLETE,
752 dig_connector->dp_clock, enc_id, 0); 755 dig_connector->dp_clock, enc_id, 0);
753
754 radeon_dp_encoder_service(rdev, ATOM_DP_ACTION_TRAINING_COMPLETE,
755 dig_connector->dp_clock, enc_id, 0);
756} 756}
757 757
758int radeon_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode, 758int radeon_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index 438226a2290a..e8f447e20507 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -26,6 +26,7 @@
26#include <linux/slab.h> 26#include <linux/slab.h>
27#include "drmP.h" 27#include "drmP.h"
28#include "radeon.h" 28#include "radeon.h"
29#include "radeon_asic.h"
29#include "radeon_drm.h" 30#include "radeon_drm.h"
30#include "rv770d.h" 31#include "rv770d.h"
31#include "atom.h" 32#include "atom.h"
@@ -437,7 +438,6 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
437 438
438int evergreen_mc_init(struct radeon_device *rdev) 439int evergreen_mc_init(struct radeon_device *rdev)
439{ 440{
440 fixed20_12 a;
441 u32 tmp; 441 u32 tmp;
442 int chansize, numchan; 442 int chansize, numchan;
443 443
@@ -482,12 +482,8 @@ int evergreen_mc_init(struct radeon_device *rdev)
482 rdev->mc.real_vram_size = rdev->mc.aper_size; 482 rdev->mc.real_vram_size = rdev->mc.aper_size;
483 } 483 }
484 r600_vram_gtt_location(rdev, &rdev->mc); 484 r600_vram_gtt_location(rdev, &rdev->mc);
485 /* FIXME: we should enforce default clock in case GPU is not in 485 radeon_update_bandwidth_info(rdev);
486 * default setup 486
487 */
488 a.full = rfixed_const(100);
489 rdev->pm.sclk.full = rfixed_const(rdev->clock.default_sclk);
490 rdev->pm.sclk.full = rfixed_div(rdev->pm.sclk, a);
491 return 0; 487 return 0;
492} 488}
493 489
@@ -747,6 +743,7 @@ int evergreen_init(struct radeon_device *rdev)
747 743
748void evergreen_fini(struct radeon_device *rdev) 744void evergreen_fini(struct radeon_device *rdev)
749{ 745{
746 radeon_pm_fini(rdev);
750 evergreen_suspend(rdev); 747 evergreen_suspend(rdev);
751#if 0 748#if 0
752 r600_blit_fini(rdev); 749 r600_blit_fini(rdev);
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index a6e6f179b1d2..c9580497ede4 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -32,6 +32,7 @@
32#include "radeon_drm.h" 32#include "radeon_drm.h"
33#include "radeon_reg.h" 33#include "radeon_reg.h"
34#include "radeon.h" 34#include "radeon.h"
35#include "radeon_asic.h"
35#include "r100d.h" 36#include "r100d.h"
36#include "rs100d.h" 37#include "rs100d.h"
37#include "rv200d.h" 38#include "rv200d.h"
@@ -236,9 +237,9 @@ int r100_pci_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr)
236 237
237void r100_pci_gart_fini(struct radeon_device *rdev) 238void r100_pci_gart_fini(struct radeon_device *rdev)
238{ 239{
240 radeon_gart_fini(rdev);
239 r100_pci_gart_disable(rdev); 241 r100_pci_gart_disable(rdev);
240 radeon_gart_table_ram_free(rdev); 242 radeon_gart_table_ram_free(rdev);
241 radeon_gart_fini(rdev);
242} 243}
243 244
244int r100_irq_set(struct radeon_device *rdev) 245int r100_irq_set(struct radeon_device *rdev)
@@ -313,10 +314,12 @@ int r100_irq_process(struct radeon_device *rdev)
313 /* Vertical blank interrupts */ 314 /* Vertical blank interrupts */
314 if (status & RADEON_CRTC_VBLANK_STAT) { 315 if (status & RADEON_CRTC_VBLANK_STAT) {
315 drm_handle_vblank(rdev->ddev, 0); 316 drm_handle_vblank(rdev->ddev, 0);
317 rdev->pm.vblank_sync = true;
316 wake_up(&rdev->irq.vblank_queue); 318 wake_up(&rdev->irq.vblank_queue);
317 } 319 }
318 if (status & RADEON_CRTC2_VBLANK_STAT) { 320 if (status & RADEON_CRTC2_VBLANK_STAT) {
319 drm_handle_vblank(rdev->ddev, 1); 321 drm_handle_vblank(rdev->ddev, 1);
322 rdev->pm.vblank_sync = true;
320 wake_up(&rdev->irq.vblank_queue); 323 wake_up(&rdev->irq.vblank_queue);
321 } 324 }
322 if (status & RADEON_FP_DETECT_STAT) { 325 if (status & RADEON_FP_DETECT_STAT) {
@@ -742,6 +745,8 @@ int r100_cp_init(struct radeon_device *rdev, unsigned ring_size)
742 udelay(10); 745 udelay(10);
743 rdev->cp.rptr = RREG32(RADEON_CP_RB_RPTR); 746 rdev->cp.rptr = RREG32(RADEON_CP_RB_RPTR);
744 rdev->cp.wptr = RREG32(RADEON_CP_RB_WPTR); 747 rdev->cp.wptr = RREG32(RADEON_CP_RB_WPTR);
748 /* protect against crazy HW on resume */
749 rdev->cp.wptr &= rdev->cp.ptr_mask;
745 /* Set cp mode to bus mastering & enable cp*/ 750 /* Set cp mode to bus mastering & enable cp*/
746 WREG32(RADEON_CP_CSQ_MODE, 751 WREG32(RADEON_CP_CSQ_MODE,
747 REG_SET(RADEON_INDIRECT2_START, indirect2_start) | 752 REG_SET(RADEON_INDIRECT2_START, indirect2_start) |
@@ -1805,6 +1810,7 @@ void r100_set_common_regs(struct radeon_device *rdev)
1805{ 1810{
1806 struct drm_device *dev = rdev->ddev; 1811 struct drm_device *dev = rdev->ddev;
1807 bool force_dac2 = false; 1812 bool force_dac2 = false;
1813 u32 tmp;
1808 1814
1809 /* set these so they don't interfere with anything */ 1815 /* set these so they don't interfere with anything */
1810 WREG32(RADEON_OV0_SCALE_CNTL, 0); 1816 WREG32(RADEON_OV0_SCALE_CNTL, 0);
@@ -1876,6 +1882,12 @@ void r100_set_common_regs(struct radeon_device *rdev)
1876 WREG32(RADEON_DISP_HW_DEBUG, disp_hw_debug); 1882 WREG32(RADEON_DISP_HW_DEBUG, disp_hw_debug);
1877 WREG32(RADEON_DAC_CNTL2, dac2_cntl); 1883 WREG32(RADEON_DAC_CNTL2, dac2_cntl);
1878 } 1884 }
1885
1886 /* switch PM block to ACPI mode */
1887 tmp = RREG32_PLL(RADEON_PLL_PWRMGT_CNTL);
1888 tmp &= ~RADEON_PM_MODE_SEL;
1889 WREG32_PLL(RADEON_PLL_PWRMGT_CNTL, tmp);
1890
1879} 1891}
1880 1892
1881/* 1893/*
@@ -2023,6 +2035,7 @@ void r100_mc_init(struct radeon_device *rdev)
2023 radeon_vram_location(rdev, &rdev->mc, base); 2035 radeon_vram_location(rdev, &rdev->mc, base);
2024 if (!(rdev->flags & RADEON_IS_AGP)) 2036 if (!(rdev->flags & RADEON_IS_AGP))
2025 radeon_gtt_location(rdev, &rdev->mc); 2037 radeon_gtt_location(rdev, &rdev->mc);
2038 radeon_update_bandwidth_info(rdev);
2026} 2039}
2027 2040
2028 2041
@@ -2386,6 +2399,8 @@ void r100_bandwidth_update(struct radeon_device *rdev)
2386 uint32_t pixel_bytes1 = 0; 2399 uint32_t pixel_bytes1 = 0;
2387 uint32_t pixel_bytes2 = 0; 2400 uint32_t pixel_bytes2 = 0;
2388 2401
2402 radeon_update_display_priority(rdev);
2403
2389 if (rdev->mode_info.crtcs[0]->base.enabled) { 2404 if (rdev->mode_info.crtcs[0]->base.enabled) {
2390 mode1 = &rdev->mode_info.crtcs[0]->base.mode; 2405 mode1 = &rdev->mode_info.crtcs[0]->base.mode;
2391 pixel_bytes1 = rdev->mode_info.crtcs[0]->base.fb->bits_per_pixel / 8; 2406 pixel_bytes1 = rdev->mode_info.crtcs[0]->base.fb->bits_per_pixel / 8;
@@ -2414,11 +2429,8 @@ void r100_bandwidth_update(struct radeon_device *rdev)
2414 /* 2429 /*
2415 * determine is there is enough bw for current mode 2430 * determine is there is enough bw for current mode
2416 */ 2431 */
2417 mclk_ff.full = rfixed_const(rdev->clock.default_mclk); 2432 sclk_ff = rdev->pm.sclk;
2418 temp_ff.full = rfixed_const(100); 2433 mclk_ff = rdev->pm.mclk;
2419 mclk_ff.full = rfixed_div(mclk_ff, temp_ff);
2420 sclk_ff.full = rfixed_const(rdev->clock.default_sclk);
2421 sclk_ff.full = rfixed_div(sclk_ff, temp_ff);
2422 2434
2423 temp = (rdev->mc.vram_width / 8) * (rdev->mc.vram_is_ddr ? 2 : 1); 2435 temp = (rdev->mc.vram_width / 8) * (rdev->mc.vram_is_ddr ? 2 : 1);
2424 temp_ff.full = rfixed_const(temp); 2436 temp_ff.full = rfixed_const(temp);
@@ -3441,6 +3453,7 @@ int r100_suspend(struct radeon_device *rdev)
3441 3453
3442void r100_fini(struct radeon_device *rdev) 3454void r100_fini(struct radeon_device *rdev)
3443{ 3455{
3456 radeon_pm_fini(rdev);
3444 r100_cp_fini(rdev); 3457 r100_cp_fini(rdev);
3445 r100_wb_fini(rdev); 3458 r100_wb_fini(rdev);
3446 r100_ib_fini(rdev); 3459 r100_ib_fini(rdev);
diff --git a/drivers/gpu/drm/radeon/r200.c b/drivers/gpu/drm/radeon/r200.c
index 1146c9909c2c..85617c311212 100644
--- a/drivers/gpu/drm/radeon/r200.c
+++ b/drivers/gpu/drm/radeon/r200.c
@@ -30,6 +30,7 @@
30#include "radeon_drm.h" 30#include "radeon_drm.h"
31#include "radeon_reg.h" 31#include "radeon_reg.h"
32#include "radeon.h" 32#include "radeon.h"
33#include "radeon_asic.h"
33 34
34#include "r100d.h" 35#include "r100d.h"
35#include "r200_reg_safe.h" 36#include "r200_reg_safe.h"
diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c
index 5eeb81061a21..561048a7c0a4 100644
--- a/drivers/gpu/drm/radeon/r300.c
+++ b/drivers/gpu/drm/radeon/r300.c
@@ -31,6 +31,7 @@
31#include "drm.h" 31#include "drm.h"
32#include "radeon_reg.h" 32#include "radeon_reg.h"
33#include "radeon.h" 33#include "radeon.h"
34#include "radeon_asic.h"
34#include "radeon_drm.h" 35#include "radeon_drm.h"
35#include "r100_track.h" 36#include "r100_track.h"
36#include "r300d.h" 37#include "r300d.h"
@@ -165,9 +166,9 @@ void rv370_pcie_gart_disable(struct radeon_device *rdev)
165 166
166void rv370_pcie_gart_fini(struct radeon_device *rdev) 167void rv370_pcie_gart_fini(struct radeon_device *rdev)
167{ 168{
169 radeon_gart_fini(rdev);
168 rv370_pcie_gart_disable(rdev); 170 rv370_pcie_gart_disable(rdev);
169 radeon_gart_table_vram_free(rdev); 171 radeon_gart_table_vram_free(rdev);
170 radeon_gart_fini(rdev);
171} 172}
172 173
173void r300_fence_ring_emit(struct radeon_device *rdev, 174void r300_fence_ring_emit(struct radeon_device *rdev,
@@ -482,6 +483,7 @@ void r300_mc_init(struct radeon_device *rdev)
482 radeon_vram_location(rdev, &rdev->mc, base); 483 radeon_vram_location(rdev, &rdev->mc, base);
483 if (!(rdev->flags & RADEON_IS_AGP)) 484 if (!(rdev->flags & RADEON_IS_AGP))
484 radeon_gtt_location(rdev, &rdev->mc); 485 radeon_gtt_location(rdev, &rdev->mc);
486 radeon_update_bandwidth_info(rdev);
485} 487}
486 488
487void rv370_set_pcie_lanes(struct radeon_device *rdev, int lanes) 489void rv370_set_pcie_lanes(struct radeon_device *rdev, int lanes)
@@ -1335,6 +1337,7 @@ int r300_suspend(struct radeon_device *rdev)
1335 1337
1336void r300_fini(struct radeon_device *rdev) 1338void r300_fini(struct radeon_device *rdev)
1337{ 1339{
1340 radeon_pm_fini(rdev);
1338 r100_cp_fini(rdev); 1341 r100_cp_fini(rdev);
1339 r100_wb_fini(rdev); 1342 r100_wb_fini(rdev);
1340 r100_ib_fini(rdev); 1343 r100_ib_fini(rdev);
diff --git a/drivers/gpu/drm/radeon/r420.c b/drivers/gpu/drm/radeon/r420.c
index 00bc77f2d201..3dc968c9f5a4 100644
--- a/drivers/gpu/drm/radeon/r420.c
+++ b/drivers/gpu/drm/radeon/r420.c
@@ -30,6 +30,7 @@
30#include "drmP.h" 30#include "drmP.h"
31#include "radeon_reg.h" 31#include "radeon_reg.h"
32#include "radeon.h" 32#include "radeon.h"
33#include "radeon_asic.h"
33#include "atom.h" 34#include "atom.h"
34#include "r100d.h" 35#include "r100d.h"
35#include "r420d.h" 36#include "r420d.h"
@@ -267,6 +268,7 @@ int r420_suspend(struct radeon_device *rdev)
267 268
268void r420_fini(struct radeon_device *rdev) 269void r420_fini(struct radeon_device *rdev)
269{ 270{
271 radeon_pm_fini(rdev);
270 r100_cp_fini(rdev); 272 r100_cp_fini(rdev);
271 r100_wb_fini(rdev); 273 r100_wb_fini(rdev);
272 r100_ib_fini(rdev); 274 r100_ib_fini(rdev);
diff --git a/drivers/gpu/drm/radeon/r520.c b/drivers/gpu/drm/radeon/r520.c
index 2b8a5dd13516..3c44b8d39318 100644
--- a/drivers/gpu/drm/radeon/r520.c
+++ b/drivers/gpu/drm/radeon/r520.c
@@ -27,6 +27,7 @@
27 */ 27 */
28#include "drmP.h" 28#include "drmP.h"
29#include "radeon.h" 29#include "radeon.h"
30#include "radeon_asic.h"
30#include "atom.h" 31#include "atom.h"
31#include "r520d.h" 32#include "r520d.h"
32 33
@@ -121,19 +122,13 @@ static void r520_vram_get_type(struct radeon_device *rdev)
121 122
122void r520_mc_init(struct radeon_device *rdev) 123void r520_mc_init(struct radeon_device *rdev)
123{ 124{
124 fixed20_12 a;
125 125
126 r520_vram_get_type(rdev); 126 r520_vram_get_type(rdev);
127 r100_vram_init_sizes(rdev); 127 r100_vram_init_sizes(rdev);
128 radeon_vram_location(rdev, &rdev->mc, 0); 128 radeon_vram_location(rdev, &rdev->mc, 0);
129 if (!(rdev->flags & RADEON_IS_AGP)) 129 if (!(rdev->flags & RADEON_IS_AGP))
130 radeon_gtt_location(rdev, &rdev->mc); 130 radeon_gtt_location(rdev, &rdev->mc);
131 /* FIXME: we should enforce default clock in case GPU is not in 131 radeon_update_bandwidth_info(rdev);
132 * default setup
133 */
134 a.full = rfixed_const(100);
135 rdev->pm.sclk.full = rfixed_const(rdev->clock.default_sclk);
136 rdev->pm.sclk.full = rfixed_div(rdev->pm.sclk, a);
137} 132}
138 133
139void r520_mc_program(struct radeon_device *rdev) 134void r520_mc_program(struct radeon_device *rdev)
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index 8ea3658eee99..8f3454e2056a 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -32,6 +32,7 @@
32#include "drmP.h" 32#include "drmP.h"
33#include "radeon_drm.h" 33#include "radeon_drm.h"
34#include "radeon.h" 34#include "radeon.h"
35#include "radeon_asic.h"
35#include "radeon_mode.h" 36#include "radeon_mode.h"
36#include "r600d.h" 37#include "r600d.h"
37#include "atom.h" 38#include "atom.h"
@@ -492,9 +493,9 @@ void r600_pcie_gart_disable(struct radeon_device *rdev)
492 493
493void r600_pcie_gart_fini(struct radeon_device *rdev) 494void r600_pcie_gart_fini(struct radeon_device *rdev)
494{ 495{
496 radeon_gart_fini(rdev);
495 r600_pcie_gart_disable(rdev); 497 r600_pcie_gart_disable(rdev);
496 radeon_gart_table_vram_free(rdev); 498 radeon_gart_table_vram_free(rdev);
497 radeon_gart_fini(rdev);
498} 499}
499 500
500void r600_agp_enable(struct radeon_device *rdev) 501void r600_agp_enable(struct radeon_device *rdev)
@@ -676,7 +677,6 @@ void r600_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
676 677
677int r600_mc_init(struct radeon_device *rdev) 678int r600_mc_init(struct radeon_device *rdev)
678{ 679{
679 fixed20_12 a;
680 u32 tmp; 680 u32 tmp;
681 int chansize, numchan; 681 int chansize, numchan;
682 682
@@ -720,14 +720,10 @@ int r600_mc_init(struct radeon_device *rdev)
720 rdev->mc.real_vram_size = rdev->mc.aper_size; 720 rdev->mc.real_vram_size = rdev->mc.aper_size;
721 } 721 }
722 r600_vram_gtt_location(rdev, &rdev->mc); 722 r600_vram_gtt_location(rdev, &rdev->mc);
723 /* FIXME: we should enforce default clock in case GPU is not in 723
724 * default setup
725 */
726 a.full = rfixed_const(100);
727 rdev->pm.sclk.full = rfixed_const(rdev->clock.default_sclk);
728 rdev->pm.sclk.full = rfixed_div(rdev->pm.sclk, a);
729 if (rdev->flags & RADEON_IS_IGP) 724 if (rdev->flags & RADEON_IS_IGP)
730 rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev); 725 rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev);
726 radeon_update_bandwidth_info(rdev);
731 return 0; 727 return 0;
732} 728}
733 729
@@ -1133,6 +1129,7 @@ void r600_gpu_init(struct radeon_device *rdev)
1133 /* Setup pipes */ 1129 /* Setup pipes */
1134 WREG32(CC_RB_BACKEND_DISABLE, cc_rb_backend_disable); 1130 WREG32(CC_RB_BACKEND_DISABLE, cc_rb_backend_disable);
1135 WREG32(CC_GC_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config); 1131 WREG32(CC_GC_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
1132 WREG32(GC_USER_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
1136 1133
1137 tmp = R6XX_MAX_PIPES - r600_count_pipe_bits((cc_gc_shader_pipe_config & INACTIVE_QD_PIPES_MASK) >> 8); 1134 tmp = R6XX_MAX_PIPES - r600_count_pipe_bits((cc_gc_shader_pipe_config & INACTIVE_QD_PIPES_MASK) >> 8);
1138 WREG32(VGT_OUT_DEALLOC_CNTL, (tmp * 4) & DEALLOC_DIST_MASK); 1135 WREG32(VGT_OUT_DEALLOC_CNTL, (tmp * 4) & DEALLOC_DIST_MASK);
@@ -2120,6 +2117,7 @@ int r600_init(struct radeon_device *rdev)
2120 2117
2121void r600_fini(struct radeon_device *rdev) 2118void r600_fini(struct radeon_device *rdev)
2122{ 2119{
2120 radeon_pm_fini(rdev);
2123 r600_audio_fini(rdev); 2121 r600_audio_fini(rdev);
2124 r600_blit_fini(rdev); 2122 r600_blit_fini(rdev);
2125 r600_cp_fini(rdev); 2123 r600_cp_fini(rdev);
@@ -2399,19 +2397,19 @@ static void r600_disable_interrupt_state(struct radeon_device *rdev)
2399 WREG32(DC_HPD4_INT_CONTROL, tmp); 2397 WREG32(DC_HPD4_INT_CONTROL, tmp);
2400 if (ASIC_IS_DCE32(rdev)) { 2398 if (ASIC_IS_DCE32(rdev)) {
2401 tmp = RREG32(DC_HPD5_INT_CONTROL) & DC_HPDx_INT_POLARITY; 2399 tmp = RREG32(DC_HPD5_INT_CONTROL) & DC_HPDx_INT_POLARITY;
2402 WREG32(DC_HPD5_INT_CONTROL, 0); 2400 WREG32(DC_HPD5_INT_CONTROL, tmp);
2403 tmp = RREG32(DC_HPD6_INT_CONTROL) & DC_HPDx_INT_POLARITY; 2401 tmp = RREG32(DC_HPD6_INT_CONTROL) & DC_HPDx_INT_POLARITY;
2404 WREG32(DC_HPD6_INT_CONTROL, 0); 2402 WREG32(DC_HPD6_INT_CONTROL, tmp);
2405 } 2403 }
2406 } else { 2404 } else {
2407 WREG32(DACA_AUTODETECT_INT_CONTROL, 0); 2405 WREG32(DACA_AUTODETECT_INT_CONTROL, 0);
2408 WREG32(DACB_AUTODETECT_INT_CONTROL, 0); 2406 WREG32(DACB_AUTODETECT_INT_CONTROL, 0);
2409 tmp = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL) & DC_HOT_PLUG_DETECTx_INT_POLARITY; 2407 tmp = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL) & DC_HOT_PLUG_DETECTx_INT_POLARITY;
2410 WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, 0); 2408 WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp);
2411 tmp = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL) & DC_HOT_PLUG_DETECTx_INT_POLARITY; 2409 tmp = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL) & DC_HOT_PLUG_DETECTx_INT_POLARITY;
2412 WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, 0); 2410 WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp);
2413 tmp = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL) & DC_HOT_PLUG_DETECTx_INT_POLARITY; 2411 tmp = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL) & DC_HOT_PLUG_DETECTx_INT_POLARITY;
2414 WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, 0); 2412 WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, tmp);
2415 } 2413 }
2416} 2414}
2417 2415
@@ -2766,6 +2764,7 @@ restart_ih:
2766 case 0: /* D1 vblank */ 2764 case 0: /* D1 vblank */
2767 if (disp_int & LB_D1_VBLANK_INTERRUPT) { 2765 if (disp_int & LB_D1_VBLANK_INTERRUPT) {
2768 drm_handle_vblank(rdev->ddev, 0); 2766 drm_handle_vblank(rdev->ddev, 0);
2767 rdev->pm.vblank_sync = true;
2769 wake_up(&rdev->irq.vblank_queue); 2768 wake_up(&rdev->irq.vblank_queue);
2770 disp_int &= ~LB_D1_VBLANK_INTERRUPT; 2769 disp_int &= ~LB_D1_VBLANK_INTERRUPT;
2771 DRM_DEBUG("IH: D1 vblank\n"); 2770 DRM_DEBUG("IH: D1 vblank\n");
@@ -2787,6 +2786,7 @@ restart_ih:
2787 case 0: /* D2 vblank */ 2786 case 0: /* D2 vblank */
2788 if (disp_int & LB_D2_VBLANK_INTERRUPT) { 2787 if (disp_int & LB_D2_VBLANK_INTERRUPT) {
2789 drm_handle_vblank(rdev->ddev, 1); 2788 drm_handle_vblank(rdev->ddev, 1);
2789 rdev->pm.vblank_sync = true;
2790 wake_up(&rdev->irq.vblank_queue); 2790 wake_up(&rdev->irq.vblank_queue);
2791 disp_int &= ~LB_D2_VBLANK_INTERRUPT; 2791 disp_int &= ~LB_D2_VBLANK_INTERRUPT;
2792 DRM_DEBUG("IH: D2 vblank\n"); 2792 DRM_DEBUG("IH: D2 vblank\n");
@@ -2835,14 +2835,14 @@ restart_ih:
2835 break; 2835 break;
2836 case 10: 2836 case 10:
2837 if (disp_int_cont2 & DC_HPD5_INTERRUPT) { 2837 if (disp_int_cont2 & DC_HPD5_INTERRUPT) {
2838 disp_int_cont &= ~DC_HPD5_INTERRUPT; 2838 disp_int_cont2 &= ~DC_HPD5_INTERRUPT;
2839 queue_hotplug = true; 2839 queue_hotplug = true;
2840 DRM_DEBUG("IH: HPD5\n"); 2840 DRM_DEBUG("IH: HPD5\n");
2841 } 2841 }
2842 break; 2842 break;
2843 case 12: 2843 case 12:
2844 if (disp_int_cont2 & DC_HPD6_INTERRUPT) { 2844 if (disp_int_cont2 & DC_HPD6_INTERRUPT) {
2845 disp_int_cont &= ~DC_HPD6_INTERRUPT; 2845 disp_int_cont2 &= ~DC_HPD6_INTERRUPT;
2846 queue_hotplug = true; 2846 queue_hotplug = true;
2847 DRM_DEBUG("IH: HPD6\n"); 2847 DRM_DEBUG("IH: HPD6\n");
2848 } 2848 }
diff --git a/drivers/gpu/drm/radeon/r600_audio.c b/drivers/gpu/drm/radeon/r600_audio.c
index db928016d034..dac7042b797e 100644
--- a/drivers/gpu/drm/radeon/r600_audio.c
+++ b/drivers/gpu/drm/radeon/r600_audio.c
@@ -182,41 +182,6 @@ int r600_audio_init(struct radeon_device *rdev)
182} 182}
183 183
184/* 184/*
185 * determin how the encoders and audio interface is wired together
186 */
187int r600_audio_tmds_index(struct drm_encoder *encoder)
188{
189 struct drm_device *dev = encoder->dev;
190 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
191 struct drm_encoder *other;
192
193 switch (radeon_encoder->encoder_id) {
194 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
195 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
196 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
197 return 0;
198
199 case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
200 /* special case check if an TMDS1 is present */
201 list_for_each_entry(other, &dev->mode_config.encoder_list, head) {
202 if (to_radeon_encoder(other)->encoder_id ==
203 ENCODER_OBJECT_ID_INTERNAL_TMDS1)
204 return 1;
205 }
206 return 0;
207
208 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
209 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
210 return 1;
211
212 default:
213 DRM_ERROR("Unsupported encoder type 0x%02X\n",
214 radeon_encoder->encoder_id);
215 return -1;
216 }
217}
218
219/*
220 * atach the audio codec to the clock source of the encoder 185 * atach the audio codec to the clock source of the encoder
221 */ 186 */
222void r600_audio_set_clock(struct drm_encoder *encoder, int clock) 187void r600_audio_set_clock(struct drm_encoder *encoder, int clock)
@@ -224,6 +189,7 @@ void r600_audio_set_clock(struct drm_encoder *encoder, int clock)
224 struct drm_device *dev = encoder->dev; 189 struct drm_device *dev = encoder->dev;
225 struct radeon_device *rdev = dev->dev_private; 190 struct radeon_device *rdev = dev->dev_private;
226 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 191 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
192 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
227 int base_rate = 48000; 193 int base_rate = 48000;
228 194
229 switch (radeon_encoder->encoder_id) { 195 switch (radeon_encoder->encoder_id) {
@@ -231,32 +197,34 @@ void r600_audio_set_clock(struct drm_encoder *encoder, int clock)
231 case ENCODER_OBJECT_ID_INTERNAL_LVTM1: 197 case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
232 WREG32_P(R600_AUDIO_TIMING, 0, ~0x301); 198 WREG32_P(R600_AUDIO_TIMING, 0, ~0x301);
233 break; 199 break;
234
235 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: 200 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
236 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: 201 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
237 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: 202 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
238 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA: 203 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
239 WREG32_P(R600_AUDIO_TIMING, 0x100, ~0x301); 204 WREG32_P(R600_AUDIO_TIMING, 0x100, ~0x301);
240 break; 205 break;
241
242 default: 206 default:
243 DRM_ERROR("Unsupported encoder type 0x%02X\n", 207 DRM_ERROR("Unsupported encoder type 0x%02X\n",
244 radeon_encoder->encoder_id); 208 radeon_encoder->encoder_id);
245 return; 209 return;
246 } 210 }
247 211
248 switch (r600_audio_tmds_index(encoder)) { 212 switch (dig->dig_encoder) {
249 case 0: 213 case 0:
250 WREG32(R600_AUDIO_PLL1_MUL, base_rate*50); 214 WREG32(R600_AUDIO_PLL1_MUL, base_rate * 50);
251 WREG32(R600_AUDIO_PLL1_DIV, clock*100); 215 WREG32(R600_AUDIO_PLL1_DIV, clock * 100);
252 WREG32(R600_AUDIO_CLK_SRCSEL, 0); 216 WREG32(R600_AUDIO_CLK_SRCSEL, 0);
253 break; 217 break;
254 218
255 case 1: 219 case 1:
256 WREG32(R600_AUDIO_PLL2_MUL, base_rate*50); 220 WREG32(R600_AUDIO_PLL2_MUL, base_rate * 50);
257 WREG32(R600_AUDIO_PLL2_DIV, clock*100); 221 WREG32(R600_AUDIO_PLL2_DIV, clock * 100);
258 WREG32(R600_AUDIO_CLK_SRCSEL, 1); 222 WREG32(R600_AUDIO_CLK_SRCSEL, 1);
259 break; 223 break;
224 default:
225 dev_err(rdev->dev, "Unsupported DIG on encoder 0x%02X\n",
226 radeon_encoder->encoder_id);
227 return;
260 } 228 }
261} 229}
262 230
diff --git a/drivers/gpu/drm/radeon/r600_blit_shaders.c b/drivers/gpu/drm/radeon/r600_blit_shaders.c
index a112c59f9d82..0271b53fa2dd 100644
--- a/drivers/gpu/drm/radeon/r600_blit_shaders.c
+++ b/drivers/gpu/drm/radeon/r600_blit_shaders.c
@@ -1,7 +1,42 @@
1/*
2 * Copyright 2009 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE COPYRIGHT HOLDER(S) AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 *
23 * Authors:
24 * Alex Deucher <alexander.deucher@amd.com>
25 */
1 26
2#include <linux/types.h> 27#include <linux/types.h>
3#include <linux/kernel.h> 28#include <linux/kernel.h>
4 29
30/*
31 * R6xx+ cards need to use the 3D engine to blit data which requires
32 * quite a bit of hw state setup. Rather than pull the whole 3D driver
33 * (which normally generates the 3D state) into the DRM, we opt to use
34 * statically generated state tables. The regsiter state and shaders
35 * were hand generated to support blitting functionality. See the 3D
36 * driver or documentation for descriptions of the registers and
37 * shader instructions.
38 */
39
5const u32 r6xx_default_state[] = 40const u32 r6xx_default_state[] =
6{ 41{
7 0xc0002400, 42 0xc0002400,
diff --git a/drivers/gpu/drm/radeon/r600_cp.c b/drivers/gpu/drm/radeon/r600_cp.c
index 40416c068d9f..68e6f4349309 100644
--- a/drivers/gpu/drm/radeon/r600_cp.c
+++ b/drivers/gpu/drm/radeon/r600_cp.c
@@ -1548,10 +1548,13 @@ static void r700_gfx_init(struct drm_device *dev,
1548 1548
1549 RADEON_WRITE(R600_CC_RB_BACKEND_DISABLE, cc_rb_backend_disable); 1549 RADEON_WRITE(R600_CC_RB_BACKEND_DISABLE, cc_rb_backend_disable);
1550 RADEON_WRITE(R600_CC_GC_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config); 1550 RADEON_WRITE(R600_CC_GC_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
1551 RADEON_WRITE(R600_GC_USER_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
1551 1552
1552 RADEON_WRITE(R700_CC_SYS_RB_BACKEND_DISABLE, cc_rb_backend_disable); 1553 RADEON_WRITE(R700_CC_SYS_RB_BACKEND_DISABLE, cc_rb_backend_disable);
1553 RADEON_WRITE(R700_CGTS_SYS_TCC_DISABLE, 0); 1554 RADEON_WRITE(R700_CGTS_SYS_TCC_DISABLE, 0);
1554 RADEON_WRITE(R700_CGTS_TCC_DISABLE, 0); 1555 RADEON_WRITE(R700_CGTS_TCC_DISABLE, 0);
1556 RADEON_WRITE(R700_CGTS_USER_SYS_TCC_DISABLE, 0);
1557 RADEON_WRITE(R700_CGTS_USER_TCC_DISABLE, 0);
1555 1558
1556 num_qd_pipes = 1559 num_qd_pipes =
1557 R7XX_MAX_PIPES - r600_count_pipe_bits((cc_gc_shader_pipe_config & R600_INACTIVE_QD_PIPES_MASK) >> 8); 1560 R7XX_MAX_PIPES - r600_count_pipe_bits((cc_gc_shader_pipe_config & R600_INACTIVE_QD_PIPES_MASK) >> 8);
diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c
index cd2c63bce501..c39c1bc13016 100644
--- a/drivers/gpu/drm/radeon/r600_cs.c
+++ b/drivers/gpu/drm/radeon/r600_cs.c
@@ -45,6 +45,7 @@ struct r600_cs_track {
45 u32 nbanks; 45 u32 nbanks;
46 u32 npipes; 46 u32 npipes;
47 /* value we track */ 47 /* value we track */
48 u32 sq_config;
48 u32 nsamples; 49 u32 nsamples;
49 u32 cb_color_base_last[8]; 50 u32 cb_color_base_last[8];
50 struct radeon_bo *cb_color_bo[8]; 51 struct radeon_bo *cb_color_bo[8];
@@ -141,6 +142,8 @@ static void r600_cs_track_init(struct r600_cs_track *track)
141{ 142{
142 int i; 143 int i;
143 144
145 /* assume DX9 mode */
146 track->sq_config = DX9_CONSTS;
144 for (i = 0; i < 8; i++) { 147 for (i = 0; i < 8; i++) {
145 track->cb_color_base_last[i] = 0; 148 track->cb_color_base_last[i] = 0;
146 track->cb_color_size[i] = 0; 149 track->cb_color_size[i] = 0;
@@ -715,6 +718,9 @@ static inline int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx
715 tmp =radeon_get_ib_value(p, idx); 718 tmp =radeon_get_ib_value(p, idx);
716 ib[idx] = 0; 719 ib[idx] = 0;
717 break; 720 break;
721 case SQ_CONFIG:
722 track->sq_config = radeon_get_ib_value(p, idx);
723 break;
718 case R_028800_DB_DEPTH_CONTROL: 724 case R_028800_DB_DEPTH_CONTROL:
719 track->db_depth_control = radeon_get_ib_value(p, idx); 725 track->db_depth_control = radeon_get_ib_value(p, idx);
720 break; 726 break;
@@ -869,6 +875,54 @@ static inline int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx
869 case SQ_PGM_START_VS: 875 case SQ_PGM_START_VS:
870 case SQ_PGM_START_GS: 876 case SQ_PGM_START_GS:
871 case SQ_PGM_START_PS: 877 case SQ_PGM_START_PS:
878 case SQ_ALU_CONST_CACHE_GS_0:
879 case SQ_ALU_CONST_CACHE_GS_1:
880 case SQ_ALU_CONST_CACHE_GS_2:
881 case SQ_ALU_CONST_CACHE_GS_3:
882 case SQ_ALU_CONST_CACHE_GS_4:
883 case SQ_ALU_CONST_CACHE_GS_5:
884 case SQ_ALU_CONST_CACHE_GS_6:
885 case SQ_ALU_CONST_CACHE_GS_7:
886 case SQ_ALU_CONST_CACHE_GS_8:
887 case SQ_ALU_CONST_CACHE_GS_9:
888 case SQ_ALU_CONST_CACHE_GS_10:
889 case SQ_ALU_CONST_CACHE_GS_11:
890 case SQ_ALU_CONST_CACHE_GS_12:
891 case SQ_ALU_CONST_CACHE_GS_13:
892 case SQ_ALU_CONST_CACHE_GS_14:
893 case SQ_ALU_CONST_CACHE_GS_15:
894 case SQ_ALU_CONST_CACHE_PS_0:
895 case SQ_ALU_CONST_CACHE_PS_1:
896 case SQ_ALU_CONST_CACHE_PS_2:
897 case SQ_ALU_CONST_CACHE_PS_3:
898 case SQ_ALU_CONST_CACHE_PS_4:
899 case SQ_ALU_CONST_CACHE_PS_5:
900 case SQ_ALU_CONST_CACHE_PS_6:
901 case SQ_ALU_CONST_CACHE_PS_7:
902 case SQ_ALU_CONST_CACHE_PS_8:
903 case SQ_ALU_CONST_CACHE_PS_9:
904 case SQ_ALU_CONST_CACHE_PS_10:
905 case SQ_ALU_CONST_CACHE_PS_11:
906 case SQ_ALU_CONST_CACHE_PS_12:
907 case SQ_ALU_CONST_CACHE_PS_13:
908 case SQ_ALU_CONST_CACHE_PS_14:
909 case SQ_ALU_CONST_CACHE_PS_15:
910 case SQ_ALU_CONST_CACHE_VS_0:
911 case SQ_ALU_CONST_CACHE_VS_1:
912 case SQ_ALU_CONST_CACHE_VS_2:
913 case SQ_ALU_CONST_CACHE_VS_3:
914 case SQ_ALU_CONST_CACHE_VS_4:
915 case SQ_ALU_CONST_CACHE_VS_5:
916 case SQ_ALU_CONST_CACHE_VS_6:
917 case SQ_ALU_CONST_CACHE_VS_7:
918 case SQ_ALU_CONST_CACHE_VS_8:
919 case SQ_ALU_CONST_CACHE_VS_9:
920 case SQ_ALU_CONST_CACHE_VS_10:
921 case SQ_ALU_CONST_CACHE_VS_11:
922 case SQ_ALU_CONST_CACHE_VS_12:
923 case SQ_ALU_CONST_CACHE_VS_13:
924 case SQ_ALU_CONST_CACHE_VS_14:
925 case SQ_ALU_CONST_CACHE_VS_15:
872 r = r600_cs_packet_next_reloc(p, &reloc); 926 r = r600_cs_packet_next_reloc(p, &reloc);
873 if (r) { 927 if (r) {
874 dev_warn(p->dev, "bad SET_CONTEXT_REG " 928 dev_warn(p->dev, "bad SET_CONTEXT_REG "
@@ -1226,13 +1280,15 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
1226 } 1280 }
1227 break; 1281 break;
1228 case PACKET3_SET_ALU_CONST: 1282 case PACKET3_SET_ALU_CONST:
1229 start_reg = (idx_value << 2) + PACKET3_SET_ALU_CONST_OFFSET; 1283 if (track->sq_config & DX9_CONSTS) {
1230 end_reg = 4 * pkt->count + start_reg - 4; 1284 start_reg = (idx_value << 2) + PACKET3_SET_ALU_CONST_OFFSET;
1231 if ((start_reg < PACKET3_SET_ALU_CONST_OFFSET) || 1285 end_reg = 4 * pkt->count + start_reg - 4;
1232 (start_reg >= PACKET3_SET_ALU_CONST_END) || 1286 if ((start_reg < PACKET3_SET_ALU_CONST_OFFSET) ||
1233 (end_reg >= PACKET3_SET_ALU_CONST_END)) { 1287 (start_reg >= PACKET3_SET_ALU_CONST_END) ||
1234 DRM_ERROR("bad SET_ALU_CONST\n"); 1288 (end_reg >= PACKET3_SET_ALU_CONST_END)) {
1235 return -EINVAL; 1289 DRM_ERROR("bad SET_ALU_CONST\n");
1290 return -EINVAL;
1291 }
1236 } 1292 }
1237 break; 1293 break;
1238 case PACKET3_SET_BOOL_CONST: 1294 case PACKET3_SET_BOOL_CONST:
diff --git a/drivers/gpu/drm/radeon/r600_hdmi.c b/drivers/gpu/drm/radeon/r600_hdmi.c
index fcc949df0e5d..029fa1406d1d 100644
--- a/drivers/gpu/drm/radeon/r600_hdmi.c
+++ b/drivers/gpu/drm/radeon/r600_hdmi.c
@@ -42,13 +42,13 @@ enum r600_hdmi_color_format {
42 */ 42 */
43enum r600_hdmi_iec_status_bits { 43enum r600_hdmi_iec_status_bits {
44 AUDIO_STATUS_DIG_ENABLE = 0x01, 44 AUDIO_STATUS_DIG_ENABLE = 0x01,
45 AUDIO_STATUS_V = 0x02, 45 AUDIO_STATUS_V = 0x02,
46 AUDIO_STATUS_VCFG = 0x04, 46 AUDIO_STATUS_VCFG = 0x04,
47 AUDIO_STATUS_EMPHASIS = 0x08, 47 AUDIO_STATUS_EMPHASIS = 0x08,
48 AUDIO_STATUS_COPYRIGHT = 0x10, 48 AUDIO_STATUS_COPYRIGHT = 0x10,
49 AUDIO_STATUS_NONAUDIO = 0x20, 49 AUDIO_STATUS_NONAUDIO = 0x20,
50 AUDIO_STATUS_PROFESSIONAL = 0x40, 50 AUDIO_STATUS_PROFESSIONAL = 0x40,
51 AUDIO_STATUS_LEVEL = 0x80 51 AUDIO_STATUS_LEVEL = 0x80
52}; 52};
53 53
54struct { 54struct {
@@ -85,7 +85,7 @@ struct {
85static void r600_hdmi_calc_CTS(uint32_t clock, int *CTS, int N, int freq) 85static void r600_hdmi_calc_CTS(uint32_t clock, int *CTS, int N, int freq)
86{ 86{
87 if (*CTS == 0) 87 if (*CTS == 0)
88 *CTS = clock*N/(128*freq)*1000; 88 *CTS = clock * N / (128 * freq) * 1000;
89 DRM_DEBUG("Using ACR timing N=%d CTS=%d for frequency %d\n", 89 DRM_DEBUG("Using ACR timing N=%d CTS=%d for frequency %d\n",
90 N, *CTS, freq); 90 N, *CTS, freq);
91} 91}
@@ -131,11 +131,11 @@ static void r600_hdmi_infoframe_checksum(uint8_t packetType,
131 uint8_t length, 131 uint8_t length,
132 uint8_t *frame) 132 uint8_t *frame)
133{ 133{
134 int i; 134 int i;
135 frame[0] = packetType + versionNumber + length; 135 frame[0] = packetType + versionNumber + length;
136 for (i = 1; i <= length; i++) 136 for (i = 1; i <= length; i++)
137 frame[0] += frame[i]; 137 frame[0] += frame[i];
138 frame[0] = 0x100 - frame[0]; 138 frame[0] = 0x100 - frame[0];
139} 139}
140 140
141/* 141/*
@@ -417,90 +417,141 @@ void r600_hdmi_update_audio_settings(struct drm_encoder *encoder,
417 WREG32_P(offset+R600_HDMI_CNTL, 0x04000000, ~0x04000000); 417 WREG32_P(offset+R600_HDMI_CNTL, 0x04000000, ~0x04000000);
418} 418}
419 419
420/* 420static int r600_hdmi_find_free_block(struct drm_device *dev)
421 * enable/disable the HDMI engine 421{
422 */ 422 struct radeon_device *rdev = dev->dev_private;
423void r600_hdmi_enable(struct drm_encoder *encoder, int enable) 423 struct drm_encoder *encoder;
424 struct radeon_encoder *radeon_encoder;
425 bool free_blocks[3] = { true, true, true };
426
427 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
428 radeon_encoder = to_radeon_encoder(encoder);
429 switch (radeon_encoder->hdmi_offset) {
430 case R600_HDMI_BLOCK1:
431 free_blocks[0] = false;
432 break;
433 case R600_HDMI_BLOCK2:
434 free_blocks[1] = false;
435 break;
436 case R600_HDMI_BLOCK3:
437 free_blocks[2] = false;
438 break;
439 }
440 }
441
442 if (rdev->family == CHIP_RS600 || rdev->family == CHIP_RS690) {
443 return free_blocks[0] ? R600_HDMI_BLOCK1 : 0;
444 } else if (rdev->family >= CHIP_R600) {
445 if (free_blocks[0])
446 return R600_HDMI_BLOCK1;
447 else if (free_blocks[1])
448 return R600_HDMI_BLOCK2;
449 }
450 return 0;
451}
452
453static void r600_hdmi_assign_block(struct drm_encoder *encoder)
424{ 454{
425 struct drm_device *dev = encoder->dev; 455 struct drm_device *dev = encoder->dev;
426 struct radeon_device *rdev = dev->dev_private; 456 struct radeon_device *rdev = dev->dev_private;
427 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 457 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
428 uint32_t offset = to_radeon_encoder(encoder)->hdmi_offset; 458 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
429 459
430 if (!offset) 460 if (!dig) {
461 dev_err(rdev->dev, "Enabling HDMI on non-dig encoder\n");
431 return; 462 return;
463 }
432 464
433 DRM_DEBUG("%s HDMI interface @ 0x%04X\n", enable ? "Enabling" : "Disabling", offset); 465 if (ASIC_IS_DCE4(rdev)) {
434 466 /* TODO */
435 /* some version of atombios ignore the enable HDMI flag 467 } else if (ASIC_IS_DCE3(rdev)) {
436 * so enabling/disabling HDMI was moved here for TMDS1+2 */ 468 radeon_encoder->hdmi_offset = dig->dig_encoder ?
437 switch (radeon_encoder->encoder_id) { 469 R600_HDMI_BLOCK3 : R600_HDMI_BLOCK1;
438 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1: 470 if (ASIC_IS_DCE32(rdev))
439 WREG32_P(AVIVO_TMDSA_CNTL, enable ? 0x4 : 0x0, ~0x4); 471 radeon_encoder->hdmi_config_offset = dig->dig_encoder ?
440 WREG32(offset+R600_HDMI_ENABLE, enable ? 0x101 : 0x0); 472 R600_HDMI_CONFIG2 : R600_HDMI_CONFIG1;
441 break; 473 } else if (rdev->family >= CHIP_R600) {
442 474 radeon_encoder->hdmi_offset = r600_hdmi_find_free_block(dev);
443 case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
444 WREG32_P(AVIVO_LVTMA_CNTL, enable ? 0x4 : 0x0, ~0x4);
445 WREG32(offset+R600_HDMI_ENABLE, enable ? 0x105 : 0x0);
446 break;
447
448 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
449 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
450 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
451 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
452 /* This part is doubtfull in my opinion */
453 WREG32(offset+R600_HDMI_ENABLE, enable ? 0x110 : 0x0);
454 break;
455
456 default:
457 DRM_ERROR("unknown HDMI output type\n");
458 break;
459 } 475 }
460} 476}
461 477
462/* 478/*
463 * determin at which register offset the HDMI encoder is 479 * enable the HDMI engine
464 */ 480 */
465void r600_hdmi_init(struct drm_encoder *encoder) 481void r600_hdmi_enable(struct drm_encoder *encoder)
466{ 482{
483 struct drm_device *dev = encoder->dev;
484 struct radeon_device *rdev = dev->dev_private;
467 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 485 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
468 486
469 switch (radeon_encoder->encoder_id) { 487 if (!radeon_encoder->hdmi_offset) {
470 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1: 488 r600_hdmi_assign_block(encoder);
471 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: 489 if (!radeon_encoder->hdmi_offset) {
472 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: 490 dev_warn(rdev->dev, "Could not find HDMI block for "
473 radeon_encoder->hdmi_offset = R600_HDMI_TMDS1; 491 "0x%x encoder\n", radeon_encoder->encoder_id);
474 break; 492 return;
475 493 }
476 case ENCODER_OBJECT_ID_INTERNAL_LVTM1: 494 }
477 switch (r600_audio_tmds_index(encoder)) { 495
478 case 0: 496 if (ASIC_IS_DCE32(rdev) && !ASIC_IS_DCE4(rdev)) {
479 radeon_encoder->hdmi_offset = R600_HDMI_TMDS1; 497 WREG32_P(radeon_encoder->hdmi_config_offset + 0x4, 0x1, ~0x1);
498 } else if (rdev->family >= CHIP_R600 && !ASIC_IS_DCE3(rdev)) {
499 int offset = radeon_encoder->hdmi_offset;
500 switch (radeon_encoder->encoder_id) {
501 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
502 WREG32_P(AVIVO_TMDSA_CNTL, 0x4, ~0x4);
503 WREG32(offset + R600_HDMI_ENABLE, 0x101);
480 break; 504 break;
481 case 1: 505 case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
482 radeon_encoder->hdmi_offset = R600_HDMI_TMDS2; 506 WREG32_P(AVIVO_LVTMA_CNTL, 0x4, ~0x4);
507 WREG32(offset + R600_HDMI_ENABLE, 0x105);
483 break; 508 break;
484 default: 509 default:
485 radeon_encoder->hdmi_offset = 0; 510 dev_err(rdev->dev, "Unknown HDMI output type\n");
486 break; 511 break;
487 } 512 }
488 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: 513 }
489 radeon_encoder->hdmi_offset = R600_HDMI_TMDS2;
490 break;
491 514
492 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA: 515 DRM_DEBUG("Enabling HDMI interface @ 0x%04X for encoder 0x%x\n",
493 radeon_encoder->hdmi_offset = R600_HDMI_DIG; 516 radeon_encoder->hdmi_offset, radeon_encoder->encoder_id);
494 break; 517}
495 518
496 default: 519/*
497 radeon_encoder->hdmi_offset = 0; 520 * disable the HDMI engine
498 break; 521 */
522void r600_hdmi_disable(struct drm_encoder *encoder)
523{
524 struct drm_device *dev = encoder->dev;
525 struct radeon_device *rdev = dev->dev_private;
526 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
527
528 if (!radeon_encoder->hdmi_offset) {
529 dev_err(rdev->dev, "Disabling not enabled HDMI\n");
530 return;
499 } 531 }
500 532
501 DRM_DEBUG("using HDMI engine at offset 0x%04X for encoder 0x%x\n", 533 DRM_DEBUG("Disabling HDMI interface @ 0x%04X for encoder 0x%x\n",
502 radeon_encoder->hdmi_offset, radeon_encoder->encoder_id); 534 radeon_encoder->hdmi_offset, radeon_encoder->encoder_id);
535
536 if (ASIC_IS_DCE32(rdev) && !ASIC_IS_DCE4(rdev)) {
537 WREG32_P(radeon_encoder->hdmi_config_offset + 0x4, 0, ~0x1);
538 } else if (rdev->family >= CHIP_R600 && !ASIC_IS_DCE3(rdev)) {
539 int offset = radeon_encoder->hdmi_offset;
540 switch (radeon_encoder->encoder_id) {
541 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
542 WREG32_P(AVIVO_TMDSA_CNTL, 0, ~0x4);
543 WREG32(offset + R600_HDMI_ENABLE, 0);
544 break;
545 case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
546 WREG32_P(AVIVO_LVTMA_CNTL, 0, ~0x4);
547 WREG32(offset + R600_HDMI_ENABLE, 0);
548 break;
549 default:
550 dev_err(rdev->dev, "Unknown HDMI output type\n");
551 break;
552 }
553 }
503 554
504 /* TODO: make this configureable */ 555 radeon_encoder->hdmi_offset = 0;
505 radeon_encoder->hdmi_audio_workaround = 0; 556 radeon_encoder->hdmi_config_offset = 0;
506} 557}
diff --git a/drivers/gpu/drm/radeon/r600_reg.h b/drivers/gpu/drm/radeon/r600_reg.h
index d0e28ffdeda9..7b1d22370f6e 100644
--- a/drivers/gpu/drm/radeon/r600_reg.h
+++ b/drivers/gpu/drm/radeon/r600_reg.h
@@ -152,9 +152,9 @@
152#define R600_AUDIO_STATUS_BITS 0x73d8 152#define R600_AUDIO_STATUS_BITS 0x73d8
153 153
154/* HDMI base register addresses */ 154/* HDMI base register addresses */
155#define R600_HDMI_TMDS1 0x7400 155#define R600_HDMI_BLOCK1 0x7400
156#define R600_HDMI_TMDS2 0x7700 156#define R600_HDMI_BLOCK2 0x7700
157#define R600_HDMI_DIG 0x7800 157#define R600_HDMI_BLOCK3 0x7800
158 158
159/* HDMI registers */ 159/* HDMI registers */
160#define R600_HDMI_ENABLE 0x00 160#define R600_HDMI_ENABLE 0x00
@@ -185,4 +185,8 @@
185#define R600_HDMI_AUDIO_DEBUG_2 0xe8 185#define R600_HDMI_AUDIO_DEBUG_2 0xe8
186#define R600_HDMI_AUDIO_DEBUG_3 0xec 186#define R600_HDMI_AUDIO_DEBUG_3 0xec
187 187
188/* HDMI additional config base register addresses */
189#define R600_HDMI_CONFIG1 0x7600
190#define R600_HDMI_CONFIG2 0x7a00
191
188#endif 192#endif
diff --git a/drivers/gpu/drm/radeon/r600d.h b/drivers/gpu/drm/radeon/r600d.h
index 5b2e4d442823..59c1f8793e60 100644
--- a/drivers/gpu/drm/radeon/r600d.h
+++ b/drivers/gpu/drm/radeon/r600d.h
@@ -77,6 +77,55 @@
77#define CB_COLOR0_FRAG 0x280e0 77#define CB_COLOR0_FRAG 0x280e0
78#define CB_COLOR0_MASK 0x28100 78#define CB_COLOR0_MASK 0x28100
79 79
80#define SQ_ALU_CONST_CACHE_PS_0 0x28940
81#define SQ_ALU_CONST_CACHE_PS_1 0x28944
82#define SQ_ALU_CONST_CACHE_PS_2 0x28948
83#define SQ_ALU_CONST_CACHE_PS_3 0x2894c
84#define SQ_ALU_CONST_CACHE_PS_4 0x28950
85#define SQ_ALU_CONST_CACHE_PS_5 0x28954
86#define SQ_ALU_CONST_CACHE_PS_6 0x28958
87#define SQ_ALU_CONST_CACHE_PS_7 0x2895c
88#define SQ_ALU_CONST_CACHE_PS_8 0x28960
89#define SQ_ALU_CONST_CACHE_PS_9 0x28964
90#define SQ_ALU_CONST_CACHE_PS_10 0x28968
91#define SQ_ALU_CONST_CACHE_PS_11 0x2896c
92#define SQ_ALU_CONST_CACHE_PS_12 0x28970
93#define SQ_ALU_CONST_CACHE_PS_13 0x28974
94#define SQ_ALU_CONST_CACHE_PS_14 0x28978
95#define SQ_ALU_CONST_CACHE_PS_15 0x2897c
96#define SQ_ALU_CONST_CACHE_VS_0 0x28980
97#define SQ_ALU_CONST_CACHE_VS_1 0x28984
98#define SQ_ALU_CONST_CACHE_VS_2 0x28988
99#define SQ_ALU_CONST_CACHE_VS_3 0x2898c
100#define SQ_ALU_CONST_CACHE_VS_4 0x28990
101#define SQ_ALU_CONST_CACHE_VS_5 0x28994
102#define SQ_ALU_CONST_CACHE_VS_6 0x28998
103#define SQ_ALU_CONST_CACHE_VS_7 0x2899c
104#define SQ_ALU_CONST_CACHE_VS_8 0x289a0
105#define SQ_ALU_CONST_CACHE_VS_9 0x289a4
106#define SQ_ALU_CONST_CACHE_VS_10 0x289a8
107#define SQ_ALU_CONST_CACHE_VS_11 0x289ac
108#define SQ_ALU_CONST_CACHE_VS_12 0x289b0
109#define SQ_ALU_CONST_CACHE_VS_13 0x289b4
110#define SQ_ALU_CONST_CACHE_VS_14 0x289b8
111#define SQ_ALU_CONST_CACHE_VS_15 0x289bc
112#define SQ_ALU_CONST_CACHE_GS_0 0x289c0
113#define SQ_ALU_CONST_CACHE_GS_1 0x289c4
114#define SQ_ALU_CONST_CACHE_GS_2 0x289c8
115#define SQ_ALU_CONST_CACHE_GS_3 0x289cc
116#define SQ_ALU_CONST_CACHE_GS_4 0x289d0
117#define SQ_ALU_CONST_CACHE_GS_5 0x289d4
118#define SQ_ALU_CONST_CACHE_GS_6 0x289d8
119#define SQ_ALU_CONST_CACHE_GS_7 0x289dc
120#define SQ_ALU_CONST_CACHE_GS_8 0x289e0
121#define SQ_ALU_CONST_CACHE_GS_9 0x289e4
122#define SQ_ALU_CONST_CACHE_GS_10 0x289e8
123#define SQ_ALU_CONST_CACHE_GS_11 0x289ec
124#define SQ_ALU_CONST_CACHE_GS_12 0x289f0
125#define SQ_ALU_CONST_CACHE_GS_13 0x289f4
126#define SQ_ALU_CONST_CACHE_GS_14 0x289f8
127#define SQ_ALU_CONST_CACHE_GS_15 0x289fc
128
80#define CONFIG_MEMSIZE 0x5428 129#define CONFIG_MEMSIZE 0x5428
81#define CONFIG_CNTL 0x5424 130#define CONFIG_CNTL 0x5424
82#define CP_STAT 0x8680 131#define CP_STAT 0x8680
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 829e26e8a4bb..034218c3dbbb 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -91,6 +91,8 @@ extern int radeon_tv;
91extern int radeon_new_pll; 91extern int radeon_new_pll;
92extern int radeon_dynpm; 92extern int radeon_dynpm;
93extern int radeon_audio; 93extern int radeon_audio;
94extern int radeon_disp_priority;
95extern int radeon_hw_i2c;
94 96
95/* 97/*
96 * Copy from radeon_drv.h so we don't have to include both and have conflicting 98 * Copy from radeon_drv.h so we don't have to include both and have conflicting
@@ -168,6 +170,7 @@ struct radeon_clock {
168 * Power management 170 * Power management
169 */ 171 */
170int radeon_pm_init(struct radeon_device *rdev); 172int radeon_pm_init(struct radeon_device *rdev);
173void radeon_pm_fini(struct radeon_device *rdev);
171void radeon_pm_compute_clocks(struct radeon_device *rdev); 174void radeon_pm_compute_clocks(struct radeon_device *rdev);
172void radeon_combios_get_power_modes(struct radeon_device *rdev); 175void radeon_combios_get_power_modes(struct radeon_device *rdev);
173void radeon_atombios_get_power_modes(struct radeon_device *rdev); 176void radeon_atombios_get_power_modes(struct radeon_device *rdev);
@@ -687,6 +690,7 @@ struct radeon_pm {
687 bool downclocked; 690 bool downclocked;
688 int active_crtcs; 691 int active_crtcs;
689 int req_vblank; 692 int req_vblank;
693 bool vblank_sync;
690 fixed20_12 max_bandwidth; 694 fixed20_12 max_bandwidth;
691 fixed20_12 igp_sideport_mclk; 695 fixed20_12 igp_sideport_mclk;
692 fixed20_12 igp_system_mclk; 696 fixed20_12 igp_system_mclk;
@@ -697,6 +701,7 @@ struct radeon_pm {
697 fixed20_12 ht_bandwidth; 701 fixed20_12 ht_bandwidth;
698 fixed20_12 core_bandwidth; 702 fixed20_12 core_bandwidth;
699 fixed20_12 sclk; 703 fixed20_12 sclk;
704 fixed20_12 mclk;
700 fixed20_12 needed_bandwidth; 705 fixed20_12 needed_bandwidth;
701 /* XXX: use a define for num power modes */ 706 /* XXX: use a define for num power modes */
702 struct radeon_power_state power_state[8]; 707 struct radeon_power_state power_state[8];
@@ -707,6 +712,7 @@ struct radeon_pm {
707 struct radeon_power_state *requested_power_state; 712 struct radeon_power_state *requested_power_state;
708 struct radeon_pm_clock_info *requested_clock_mode; 713 struct radeon_pm_clock_info *requested_clock_mode;
709 struct radeon_power_state *default_power_state; 714 struct radeon_power_state *default_power_state;
715 struct radeon_i2c_chan *i2c_bus;
710}; 716};
711 717
712 718
@@ -729,8 +735,6 @@ int radeon_debugfs_add_files(struct radeon_device *rdev,
729 struct drm_info_list *files, 735 struct drm_info_list *files,
730 unsigned nfiles); 736 unsigned nfiles);
731int radeon_debugfs_fence_init(struct radeon_device *rdev); 737int radeon_debugfs_fence_init(struct radeon_device *rdev);
732int r100_debugfs_rbbm_init(struct radeon_device *rdev);
733int r100_debugfs_cp_init(struct radeon_device *rdev);
734 738
735 739
736/* 740/*
@@ -782,7 +786,7 @@ struct radeon_asic {
782 int (*set_surface_reg)(struct radeon_device *rdev, int reg, 786 int (*set_surface_reg)(struct radeon_device *rdev, int reg,
783 uint32_t tiling_flags, uint32_t pitch, 787 uint32_t tiling_flags, uint32_t pitch,
784 uint32_t offset, uint32_t obj_size); 788 uint32_t offset, uint32_t obj_size);
785 int (*clear_surface_reg)(struct radeon_device *rdev, int reg); 789 void (*clear_surface_reg)(struct radeon_device *rdev, int reg);
786 void (*bandwidth_update)(struct radeon_device *rdev); 790 void (*bandwidth_update)(struct radeon_device *rdev);
787 void (*hpd_init)(struct radeon_device *rdev); 791 void (*hpd_init)(struct radeon_device *rdev);
788 void (*hpd_fini)(struct radeon_device *rdev); 792 void (*hpd_fini)(struct radeon_device *rdev);
@@ -862,6 +866,12 @@ union radeon_asic_config {
862 struct rv770_asic rv770; 866 struct rv770_asic rv770;
863}; 867};
864 868
869/*
870 * asic initizalization from radeon_asic.c
871 */
872void radeon_agp_disable(struct radeon_device *rdev);
873int radeon_asic_init(struct radeon_device *rdev);
874
865 875
866/* 876/*
867 * IOCTL. 877 * IOCTL.
@@ -1172,6 +1182,8 @@ extern void radeon_gart_restore(struct radeon_device *rdev);
1172extern int radeon_modeset_init(struct radeon_device *rdev); 1182extern int radeon_modeset_init(struct radeon_device *rdev);
1173extern void radeon_modeset_fini(struct radeon_device *rdev); 1183extern void radeon_modeset_fini(struct radeon_device *rdev);
1174extern bool radeon_card_posted(struct radeon_device *rdev); 1184extern bool radeon_card_posted(struct radeon_device *rdev);
1185extern void radeon_update_bandwidth_info(struct radeon_device *rdev);
1186extern void radeon_update_display_priority(struct radeon_device *rdev);
1175extern bool radeon_boot_test_post_card(struct radeon_device *rdev); 1187extern bool radeon_boot_test_post_card(struct radeon_device *rdev);
1176extern int radeon_clocks_init(struct radeon_device *rdev); 1188extern int radeon_clocks_init(struct radeon_device *rdev);
1177extern void radeon_clocks_fini(struct radeon_device *rdev); 1189extern void radeon_clocks_fini(struct radeon_device *rdev);
@@ -1188,51 +1200,6 @@ extern int radeon_resume_kms(struct drm_device *dev);
1188extern int radeon_suspend_kms(struct drm_device *dev, pm_message_t state); 1200extern int radeon_suspend_kms(struct drm_device *dev, pm_message_t state);
1189 1201
1190/* r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280 */ 1202/* r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280 */
1191struct r100_mc_save {
1192 u32 GENMO_WT;
1193 u32 CRTC_EXT_CNTL;
1194 u32 CRTC_GEN_CNTL;
1195 u32 CRTC2_GEN_CNTL;
1196 u32 CUR_OFFSET;
1197 u32 CUR2_OFFSET;
1198};
1199extern void r100_cp_disable(struct radeon_device *rdev);
1200extern int r100_cp_init(struct radeon_device *rdev, unsigned ring_size);
1201extern void r100_cp_fini(struct radeon_device *rdev);
1202extern void r100_pci_gart_tlb_flush(struct radeon_device *rdev);
1203extern int r100_pci_gart_init(struct radeon_device *rdev);
1204extern void r100_pci_gart_fini(struct radeon_device *rdev);
1205extern int r100_pci_gart_enable(struct radeon_device *rdev);
1206extern void r100_pci_gart_disable(struct radeon_device *rdev);
1207extern int r100_pci_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr);
1208extern int r100_debugfs_mc_info_init(struct radeon_device *rdev);
1209extern int r100_gui_wait_for_idle(struct radeon_device *rdev);
1210extern void r100_ib_fini(struct radeon_device *rdev);
1211extern int r100_ib_init(struct radeon_device *rdev);
1212extern void r100_irq_disable(struct radeon_device *rdev);
1213extern int r100_irq_set(struct radeon_device *rdev);
1214extern void r100_mc_stop(struct radeon_device *rdev, struct r100_mc_save *save);
1215extern void r100_mc_resume(struct radeon_device *rdev, struct r100_mc_save *save);
1216extern void r100_vram_init_sizes(struct radeon_device *rdev);
1217extern void r100_wb_disable(struct radeon_device *rdev);
1218extern void r100_wb_fini(struct radeon_device *rdev);
1219extern int r100_wb_init(struct radeon_device *rdev);
1220extern void r100_hdp_reset(struct radeon_device *rdev);
1221extern int r100_rb2d_reset(struct radeon_device *rdev);
1222extern int r100_cp_reset(struct radeon_device *rdev);
1223extern void r100_vga_render_disable(struct radeon_device *rdev);
1224extern int r100_cs_track_check_pkt3_indx_buffer(struct radeon_cs_parser *p,
1225 struct radeon_cs_packet *pkt,
1226 struct radeon_bo *robj);
1227extern int r100_cs_parse_packet0(struct radeon_cs_parser *p,
1228 struct radeon_cs_packet *pkt,
1229 const unsigned *auth, unsigned n,
1230 radeon_packet0_check_t check);
1231extern int r100_cs_packet_parse(struct radeon_cs_parser *p,
1232 struct radeon_cs_packet *pkt,
1233 unsigned idx);
1234extern void r100_enable_bm(struct radeon_device *rdev);
1235extern void r100_set_common_regs(struct radeon_device *rdev);
1236 1203
1237/* rv200,rv250,rv280 */ 1204/* rv200,rv250,rv280 */
1238extern void r200_set_safe_registers(struct radeon_device *rdev); 1205extern void r200_set_safe_registers(struct radeon_device *rdev);
@@ -1322,7 +1289,8 @@ extern int r600_audio_tmds_index(struct drm_encoder *encoder);
1322extern void r600_audio_set_clock(struct drm_encoder *encoder, int clock); 1289extern void r600_audio_set_clock(struct drm_encoder *encoder, int clock);
1323extern void r600_audio_fini(struct radeon_device *rdev); 1290extern void r600_audio_fini(struct radeon_device *rdev);
1324extern void r600_hdmi_init(struct drm_encoder *encoder); 1291extern void r600_hdmi_init(struct drm_encoder *encoder);
1325extern void r600_hdmi_enable(struct drm_encoder *encoder, int enable); 1292extern void r600_hdmi_enable(struct drm_encoder *encoder);
1293extern void r600_hdmi_disable(struct drm_encoder *encoder);
1326extern void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mode); 1294extern void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mode);
1327extern int r600_hdmi_buffer_status_changed(struct drm_encoder *encoder); 1295extern int r600_hdmi_buffer_status_changed(struct drm_encoder *encoder);
1328extern void r600_hdmi_update_audio_settings(struct drm_encoder *encoder, 1296extern void r600_hdmi_update_audio_settings(struct drm_encoder *encoder,
diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c
new file mode 100644
index 000000000000..a4b4bc9fa322
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_asic.c
@@ -0,0 +1,772 @@
1/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
28
29#include <linux/console.h>
30#include <drm/drmP.h>
31#include <drm/drm_crtc_helper.h>
32#include <drm/radeon_drm.h>
33#include <linux/vgaarb.h>
34#include <linux/vga_switcheroo.h>
35#include "radeon_reg.h"
36#include "radeon.h"
37#include "radeon_asic.h"
38#include "atom.h"
39
40/*
41 * Registers accessors functions.
42 */
43static uint32_t radeon_invalid_rreg(struct radeon_device *rdev, uint32_t reg)
44{
45 DRM_ERROR("Invalid callback to read register 0x%04X\n", reg);
46 BUG_ON(1);
47 return 0;
48}
49
50static void radeon_invalid_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
51{
52 DRM_ERROR("Invalid callback to write register 0x%04X with 0x%08X\n",
53 reg, v);
54 BUG_ON(1);
55}
56
57static void radeon_register_accessor_init(struct radeon_device *rdev)
58{
59 rdev->mc_rreg = &radeon_invalid_rreg;
60 rdev->mc_wreg = &radeon_invalid_wreg;
61 rdev->pll_rreg = &radeon_invalid_rreg;
62 rdev->pll_wreg = &radeon_invalid_wreg;
63 rdev->pciep_rreg = &radeon_invalid_rreg;
64 rdev->pciep_wreg = &radeon_invalid_wreg;
65
66 /* Don't change order as we are overridding accessor. */
67 if (rdev->family < CHIP_RV515) {
68 rdev->pcie_reg_mask = 0xff;
69 } else {
70 rdev->pcie_reg_mask = 0x7ff;
71 }
72 /* FIXME: not sure here */
73 if (rdev->family <= CHIP_R580) {
74 rdev->pll_rreg = &r100_pll_rreg;
75 rdev->pll_wreg = &r100_pll_wreg;
76 }
77 if (rdev->family >= CHIP_R420) {
78 rdev->mc_rreg = &r420_mc_rreg;
79 rdev->mc_wreg = &r420_mc_wreg;
80 }
81 if (rdev->family >= CHIP_RV515) {
82 rdev->mc_rreg = &rv515_mc_rreg;
83 rdev->mc_wreg = &rv515_mc_wreg;
84 }
85 if (rdev->family == CHIP_RS400 || rdev->family == CHIP_RS480) {
86 rdev->mc_rreg = &rs400_mc_rreg;
87 rdev->mc_wreg = &rs400_mc_wreg;
88 }
89 if (rdev->family == CHIP_RS690 || rdev->family == CHIP_RS740) {
90 rdev->mc_rreg = &rs690_mc_rreg;
91 rdev->mc_wreg = &rs690_mc_wreg;
92 }
93 if (rdev->family == CHIP_RS600) {
94 rdev->mc_rreg = &rs600_mc_rreg;
95 rdev->mc_wreg = &rs600_mc_wreg;
96 }
97 if ((rdev->family >= CHIP_R600) && (rdev->family <= CHIP_RV740)) {
98 rdev->pciep_rreg = &r600_pciep_rreg;
99 rdev->pciep_wreg = &r600_pciep_wreg;
100 }
101}
102
103
104/* helper to disable agp */
105void radeon_agp_disable(struct radeon_device *rdev)
106{
107 rdev->flags &= ~RADEON_IS_AGP;
108 if (rdev->family >= CHIP_R600) {
109 DRM_INFO("Forcing AGP to PCIE mode\n");
110 rdev->flags |= RADEON_IS_PCIE;
111 } else if (rdev->family >= CHIP_RV515 ||
112 rdev->family == CHIP_RV380 ||
113 rdev->family == CHIP_RV410 ||
114 rdev->family == CHIP_R423) {
115 DRM_INFO("Forcing AGP to PCIE mode\n");
116 rdev->flags |= RADEON_IS_PCIE;
117 rdev->asic->gart_tlb_flush = &rv370_pcie_gart_tlb_flush;
118 rdev->asic->gart_set_page = &rv370_pcie_gart_set_page;
119 } else {
120 DRM_INFO("Forcing AGP to PCI mode\n");
121 rdev->flags |= RADEON_IS_PCI;
122 rdev->asic->gart_tlb_flush = &r100_pci_gart_tlb_flush;
123 rdev->asic->gart_set_page = &r100_pci_gart_set_page;
124 }
125 rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
126}
127
128/*
129 * ASIC
130 */
131static struct radeon_asic r100_asic = {
132 .init = &r100_init,
133 .fini = &r100_fini,
134 .suspend = &r100_suspend,
135 .resume = &r100_resume,
136 .vga_set_state = &r100_vga_set_state,
137 .gpu_reset = &r100_gpu_reset,
138 .gart_tlb_flush = &r100_pci_gart_tlb_flush,
139 .gart_set_page = &r100_pci_gart_set_page,
140 .cp_commit = &r100_cp_commit,
141 .ring_start = &r100_ring_start,
142 .ring_test = &r100_ring_test,
143 .ring_ib_execute = &r100_ring_ib_execute,
144 .irq_set = &r100_irq_set,
145 .irq_process = &r100_irq_process,
146 .get_vblank_counter = &r100_get_vblank_counter,
147 .fence_ring_emit = &r100_fence_ring_emit,
148 .cs_parse = &r100_cs_parse,
149 .copy_blit = &r100_copy_blit,
150 .copy_dma = NULL,
151 .copy = &r100_copy_blit,
152 .get_engine_clock = &radeon_legacy_get_engine_clock,
153 .set_engine_clock = &radeon_legacy_set_engine_clock,
154 .get_memory_clock = &radeon_legacy_get_memory_clock,
155 .set_memory_clock = NULL,
156 .get_pcie_lanes = NULL,
157 .set_pcie_lanes = NULL,
158 .set_clock_gating = &radeon_legacy_set_clock_gating,
159 .set_surface_reg = r100_set_surface_reg,
160 .clear_surface_reg = r100_clear_surface_reg,
161 .bandwidth_update = &r100_bandwidth_update,
162 .hpd_init = &r100_hpd_init,
163 .hpd_fini = &r100_hpd_fini,
164 .hpd_sense = &r100_hpd_sense,
165 .hpd_set_polarity = &r100_hpd_set_polarity,
166 .ioctl_wait_idle = NULL,
167};
168
169static struct radeon_asic r200_asic = {
170 .init = &r100_init,
171 .fini = &r100_fini,
172 .suspend = &r100_suspend,
173 .resume = &r100_resume,
174 .vga_set_state = &r100_vga_set_state,
175 .gpu_reset = &r100_gpu_reset,
176 .gart_tlb_flush = &r100_pci_gart_tlb_flush,
177 .gart_set_page = &r100_pci_gart_set_page,
178 .cp_commit = &r100_cp_commit,
179 .ring_start = &r100_ring_start,
180 .ring_test = &r100_ring_test,
181 .ring_ib_execute = &r100_ring_ib_execute,
182 .irq_set = &r100_irq_set,
183 .irq_process = &r100_irq_process,
184 .get_vblank_counter = &r100_get_vblank_counter,
185 .fence_ring_emit = &r100_fence_ring_emit,
186 .cs_parse = &r100_cs_parse,
187 .copy_blit = &r100_copy_blit,
188 .copy_dma = &r200_copy_dma,
189 .copy = &r100_copy_blit,
190 .get_engine_clock = &radeon_legacy_get_engine_clock,
191 .set_engine_clock = &radeon_legacy_set_engine_clock,
192 .get_memory_clock = &radeon_legacy_get_memory_clock,
193 .set_memory_clock = NULL,
194 .set_pcie_lanes = NULL,
195 .set_clock_gating = &radeon_legacy_set_clock_gating,
196 .set_surface_reg = r100_set_surface_reg,
197 .clear_surface_reg = r100_clear_surface_reg,
198 .bandwidth_update = &r100_bandwidth_update,
199 .hpd_init = &r100_hpd_init,
200 .hpd_fini = &r100_hpd_fini,
201 .hpd_sense = &r100_hpd_sense,
202 .hpd_set_polarity = &r100_hpd_set_polarity,
203 .ioctl_wait_idle = NULL,
204};
205
206static struct radeon_asic r300_asic = {
207 .init = &r300_init,
208 .fini = &r300_fini,
209 .suspend = &r300_suspend,
210 .resume = &r300_resume,
211 .vga_set_state = &r100_vga_set_state,
212 .gpu_reset = &r300_gpu_reset,
213 .gart_tlb_flush = &r100_pci_gart_tlb_flush,
214 .gart_set_page = &r100_pci_gart_set_page,
215 .cp_commit = &r100_cp_commit,
216 .ring_start = &r300_ring_start,
217 .ring_test = &r100_ring_test,
218 .ring_ib_execute = &r100_ring_ib_execute,
219 .irq_set = &r100_irq_set,
220 .irq_process = &r100_irq_process,
221 .get_vblank_counter = &r100_get_vblank_counter,
222 .fence_ring_emit = &r300_fence_ring_emit,
223 .cs_parse = &r300_cs_parse,
224 .copy_blit = &r100_copy_blit,
225 .copy_dma = &r200_copy_dma,
226 .copy = &r100_copy_blit,
227 .get_engine_clock = &radeon_legacy_get_engine_clock,
228 .set_engine_clock = &radeon_legacy_set_engine_clock,
229 .get_memory_clock = &radeon_legacy_get_memory_clock,
230 .set_memory_clock = NULL,
231 .get_pcie_lanes = &rv370_get_pcie_lanes,
232 .set_pcie_lanes = &rv370_set_pcie_lanes,
233 .set_clock_gating = &radeon_legacy_set_clock_gating,
234 .set_surface_reg = r100_set_surface_reg,
235 .clear_surface_reg = r100_clear_surface_reg,
236 .bandwidth_update = &r100_bandwidth_update,
237 .hpd_init = &r100_hpd_init,
238 .hpd_fini = &r100_hpd_fini,
239 .hpd_sense = &r100_hpd_sense,
240 .hpd_set_polarity = &r100_hpd_set_polarity,
241 .ioctl_wait_idle = NULL,
242};
243
244static struct radeon_asic r300_asic_pcie = {
245 .init = &r300_init,
246 .fini = &r300_fini,
247 .suspend = &r300_suspend,
248 .resume = &r300_resume,
249 .vga_set_state = &r100_vga_set_state,
250 .gpu_reset = &r300_gpu_reset,
251 .gart_tlb_flush = &rv370_pcie_gart_tlb_flush,
252 .gart_set_page = &rv370_pcie_gart_set_page,
253 .cp_commit = &r100_cp_commit,
254 .ring_start = &r300_ring_start,
255 .ring_test = &r100_ring_test,
256 .ring_ib_execute = &r100_ring_ib_execute,
257 .irq_set = &r100_irq_set,
258 .irq_process = &r100_irq_process,
259 .get_vblank_counter = &r100_get_vblank_counter,
260 .fence_ring_emit = &r300_fence_ring_emit,
261 .cs_parse = &r300_cs_parse,
262 .copy_blit = &r100_copy_blit,
263 .copy_dma = &r200_copy_dma,
264 .copy = &r100_copy_blit,
265 .get_engine_clock = &radeon_legacy_get_engine_clock,
266 .set_engine_clock = &radeon_legacy_set_engine_clock,
267 .get_memory_clock = &radeon_legacy_get_memory_clock,
268 .set_memory_clock = NULL,
269 .set_pcie_lanes = &rv370_set_pcie_lanes,
270 .set_clock_gating = &radeon_legacy_set_clock_gating,
271 .set_surface_reg = r100_set_surface_reg,
272 .clear_surface_reg = r100_clear_surface_reg,
273 .bandwidth_update = &r100_bandwidth_update,
274 .hpd_init = &r100_hpd_init,
275 .hpd_fini = &r100_hpd_fini,
276 .hpd_sense = &r100_hpd_sense,
277 .hpd_set_polarity = &r100_hpd_set_polarity,
278 .ioctl_wait_idle = NULL,
279};
280
281static struct radeon_asic r420_asic = {
282 .init = &r420_init,
283 .fini = &r420_fini,
284 .suspend = &r420_suspend,
285 .resume = &r420_resume,
286 .vga_set_state = &r100_vga_set_state,
287 .gpu_reset = &r300_gpu_reset,
288 .gart_tlb_flush = &rv370_pcie_gart_tlb_flush,
289 .gart_set_page = &rv370_pcie_gart_set_page,
290 .cp_commit = &r100_cp_commit,
291 .ring_start = &r300_ring_start,
292 .ring_test = &r100_ring_test,
293 .ring_ib_execute = &r100_ring_ib_execute,
294 .irq_set = &r100_irq_set,
295 .irq_process = &r100_irq_process,
296 .get_vblank_counter = &r100_get_vblank_counter,
297 .fence_ring_emit = &r300_fence_ring_emit,
298 .cs_parse = &r300_cs_parse,
299 .copy_blit = &r100_copy_blit,
300 .copy_dma = &r200_copy_dma,
301 .copy = &r100_copy_blit,
302 .get_engine_clock = &radeon_atom_get_engine_clock,
303 .set_engine_clock = &radeon_atom_set_engine_clock,
304 .get_memory_clock = &radeon_atom_get_memory_clock,
305 .set_memory_clock = &radeon_atom_set_memory_clock,
306 .get_pcie_lanes = &rv370_get_pcie_lanes,
307 .set_pcie_lanes = &rv370_set_pcie_lanes,
308 .set_clock_gating = &radeon_atom_set_clock_gating,
309 .set_surface_reg = r100_set_surface_reg,
310 .clear_surface_reg = r100_clear_surface_reg,
311 .bandwidth_update = &r100_bandwidth_update,
312 .hpd_init = &r100_hpd_init,
313 .hpd_fini = &r100_hpd_fini,
314 .hpd_sense = &r100_hpd_sense,
315 .hpd_set_polarity = &r100_hpd_set_polarity,
316 .ioctl_wait_idle = NULL,
317};
318
319static struct radeon_asic rs400_asic = {
320 .init = &rs400_init,
321 .fini = &rs400_fini,
322 .suspend = &rs400_suspend,
323 .resume = &rs400_resume,
324 .vga_set_state = &r100_vga_set_state,
325 .gpu_reset = &r300_gpu_reset,
326 .gart_tlb_flush = &rs400_gart_tlb_flush,
327 .gart_set_page = &rs400_gart_set_page,
328 .cp_commit = &r100_cp_commit,
329 .ring_start = &r300_ring_start,
330 .ring_test = &r100_ring_test,
331 .ring_ib_execute = &r100_ring_ib_execute,
332 .irq_set = &r100_irq_set,
333 .irq_process = &r100_irq_process,
334 .get_vblank_counter = &r100_get_vblank_counter,
335 .fence_ring_emit = &r300_fence_ring_emit,
336 .cs_parse = &r300_cs_parse,
337 .copy_blit = &r100_copy_blit,
338 .copy_dma = &r200_copy_dma,
339 .copy = &r100_copy_blit,
340 .get_engine_clock = &radeon_legacy_get_engine_clock,
341 .set_engine_clock = &radeon_legacy_set_engine_clock,
342 .get_memory_clock = &radeon_legacy_get_memory_clock,
343 .set_memory_clock = NULL,
344 .get_pcie_lanes = NULL,
345 .set_pcie_lanes = NULL,
346 .set_clock_gating = &radeon_legacy_set_clock_gating,
347 .set_surface_reg = r100_set_surface_reg,
348 .clear_surface_reg = r100_clear_surface_reg,
349 .bandwidth_update = &r100_bandwidth_update,
350 .hpd_init = &r100_hpd_init,
351 .hpd_fini = &r100_hpd_fini,
352 .hpd_sense = &r100_hpd_sense,
353 .hpd_set_polarity = &r100_hpd_set_polarity,
354 .ioctl_wait_idle = NULL,
355};
356
357static struct radeon_asic rs600_asic = {
358 .init = &rs600_init,
359 .fini = &rs600_fini,
360 .suspend = &rs600_suspend,
361 .resume = &rs600_resume,
362 .vga_set_state = &r100_vga_set_state,
363 .gpu_reset = &r300_gpu_reset,
364 .gart_tlb_flush = &rs600_gart_tlb_flush,
365 .gart_set_page = &rs600_gart_set_page,
366 .cp_commit = &r100_cp_commit,
367 .ring_start = &r300_ring_start,
368 .ring_test = &r100_ring_test,
369 .ring_ib_execute = &r100_ring_ib_execute,
370 .irq_set = &rs600_irq_set,
371 .irq_process = &rs600_irq_process,
372 .get_vblank_counter = &rs600_get_vblank_counter,
373 .fence_ring_emit = &r300_fence_ring_emit,
374 .cs_parse = &r300_cs_parse,
375 .copy_blit = &r100_copy_blit,
376 .copy_dma = &r200_copy_dma,
377 .copy = &r100_copy_blit,
378 .get_engine_clock = &radeon_atom_get_engine_clock,
379 .set_engine_clock = &radeon_atom_set_engine_clock,
380 .get_memory_clock = &radeon_atom_get_memory_clock,
381 .set_memory_clock = &radeon_atom_set_memory_clock,
382 .get_pcie_lanes = NULL,
383 .set_pcie_lanes = NULL,
384 .set_clock_gating = &radeon_atom_set_clock_gating,
385 .set_surface_reg = r100_set_surface_reg,
386 .clear_surface_reg = r100_clear_surface_reg,
387 .bandwidth_update = &rs600_bandwidth_update,
388 .hpd_init = &rs600_hpd_init,
389 .hpd_fini = &rs600_hpd_fini,
390 .hpd_sense = &rs600_hpd_sense,
391 .hpd_set_polarity = &rs600_hpd_set_polarity,
392 .ioctl_wait_idle = NULL,
393};
394
395static struct radeon_asic rs690_asic = {
396 .init = &rs690_init,
397 .fini = &rs690_fini,
398 .suspend = &rs690_suspend,
399 .resume = &rs690_resume,
400 .vga_set_state = &r100_vga_set_state,
401 .gpu_reset = &r300_gpu_reset,
402 .gart_tlb_flush = &rs400_gart_tlb_flush,
403 .gart_set_page = &rs400_gart_set_page,
404 .cp_commit = &r100_cp_commit,
405 .ring_start = &r300_ring_start,
406 .ring_test = &r100_ring_test,
407 .ring_ib_execute = &r100_ring_ib_execute,
408 .irq_set = &rs600_irq_set,
409 .irq_process = &rs600_irq_process,
410 .get_vblank_counter = &rs600_get_vblank_counter,
411 .fence_ring_emit = &r300_fence_ring_emit,
412 .cs_parse = &r300_cs_parse,
413 .copy_blit = &r100_copy_blit,
414 .copy_dma = &r200_copy_dma,
415 .copy = &r200_copy_dma,
416 .get_engine_clock = &radeon_atom_get_engine_clock,
417 .set_engine_clock = &radeon_atom_set_engine_clock,
418 .get_memory_clock = &radeon_atom_get_memory_clock,
419 .set_memory_clock = &radeon_atom_set_memory_clock,
420 .get_pcie_lanes = NULL,
421 .set_pcie_lanes = NULL,
422 .set_clock_gating = &radeon_atom_set_clock_gating,
423 .set_surface_reg = r100_set_surface_reg,
424 .clear_surface_reg = r100_clear_surface_reg,
425 .bandwidth_update = &rs690_bandwidth_update,
426 .hpd_init = &rs600_hpd_init,
427 .hpd_fini = &rs600_hpd_fini,
428 .hpd_sense = &rs600_hpd_sense,
429 .hpd_set_polarity = &rs600_hpd_set_polarity,
430 .ioctl_wait_idle = NULL,
431};
432
433static struct radeon_asic rv515_asic = {
434 .init = &rv515_init,
435 .fini = &rv515_fini,
436 .suspend = &rv515_suspend,
437 .resume = &rv515_resume,
438 .vga_set_state = &r100_vga_set_state,
439 .gpu_reset = &rv515_gpu_reset,
440 .gart_tlb_flush = &rv370_pcie_gart_tlb_flush,
441 .gart_set_page = &rv370_pcie_gart_set_page,
442 .cp_commit = &r100_cp_commit,
443 .ring_start = &rv515_ring_start,
444 .ring_test = &r100_ring_test,
445 .ring_ib_execute = &r100_ring_ib_execute,
446 .irq_set = &rs600_irq_set,
447 .irq_process = &rs600_irq_process,
448 .get_vblank_counter = &rs600_get_vblank_counter,
449 .fence_ring_emit = &r300_fence_ring_emit,
450 .cs_parse = &r300_cs_parse,
451 .copy_blit = &r100_copy_blit,
452 .copy_dma = &r200_copy_dma,
453 .copy = &r100_copy_blit,
454 .get_engine_clock = &radeon_atom_get_engine_clock,
455 .set_engine_clock = &radeon_atom_set_engine_clock,
456 .get_memory_clock = &radeon_atom_get_memory_clock,
457 .set_memory_clock = &radeon_atom_set_memory_clock,
458 .get_pcie_lanes = &rv370_get_pcie_lanes,
459 .set_pcie_lanes = &rv370_set_pcie_lanes,
460 .set_clock_gating = &radeon_atom_set_clock_gating,
461 .set_surface_reg = r100_set_surface_reg,
462 .clear_surface_reg = r100_clear_surface_reg,
463 .bandwidth_update = &rv515_bandwidth_update,
464 .hpd_init = &rs600_hpd_init,
465 .hpd_fini = &rs600_hpd_fini,
466 .hpd_sense = &rs600_hpd_sense,
467 .hpd_set_polarity = &rs600_hpd_set_polarity,
468 .ioctl_wait_idle = NULL,
469};
470
471static struct radeon_asic r520_asic = {
472 .init = &r520_init,
473 .fini = &rv515_fini,
474 .suspend = &rv515_suspend,
475 .resume = &r520_resume,
476 .vga_set_state = &r100_vga_set_state,
477 .gpu_reset = &rv515_gpu_reset,
478 .gart_tlb_flush = &rv370_pcie_gart_tlb_flush,
479 .gart_set_page = &rv370_pcie_gart_set_page,
480 .cp_commit = &r100_cp_commit,
481 .ring_start = &rv515_ring_start,
482 .ring_test = &r100_ring_test,
483 .ring_ib_execute = &r100_ring_ib_execute,
484 .irq_set = &rs600_irq_set,
485 .irq_process = &rs600_irq_process,
486 .get_vblank_counter = &rs600_get_vblank_counter,
487 .fence_ring_emit = &r300_fence_ring_emit,
488 .cs_parse = &r300_cs_parse,
489 .copy_blit = &r100_copy_blit,
490 .copy_dma = &r200_copy_dma,
491 .copy = &r100_copy_blit,
492 .get_engine_clock = &radeon_atom_get_engine_clock,
493 .set_engine_clock = &radeon_atom_set_engine_clock,
494 .get_memory_clock = &radeon_atom_get_memory_clock,
495 .set_memory_clock = &radeon_atom_set_memory_clock,
496 .get_pcie_lanes = &rv370_get_pcie_lanes,
497 .set_pcie_lanes = &rv370_set_pcie_lanes,
498 .set_clock_gating = &radeon_atom_set_clock_gating,
499 .set_surface_reg = r100_set_surface_reg,
500 .clear_surface_reg = r100_clear_surface_reg,
501 .bandwidth_update = &rv515_bandwidth_update,
502 .hpd_init = &rs600_hpd_init,
503 .hpd_fini = &rs600_hpd_fini,
504 .hpd_sense = &rs600_hpd_sense,
505 .hpd_set_polarity = &rs600_hpd_set_polarity,
506 .ioctl_wait_idle = NULL,
507};
508
509static struct radeon_asic r600_asic = {
510 .init = &r600_init,
511 .fini = &r600_fini,
512 .suspend = &r600_suspend,
513 .resume = &r600_resume,
514 .cp_commit = &r600_cp_commit,
515 .vga_set_state = &r600_vga_set_state,
516 .gpu_reset = &r600_gpu_reset,
517 .gart_tlb_flush = &r600_pcie_gart_tlb_flush,
518 .gart_set_page = &rs600_gart_set_page,
519 .ring_test = &r600_ring_test,
520 .ring_ib_execute = &r600_ring_ib_execute,
521 .irq_set = &r600_irq_set,
522 .irq_process = &r600_irq_process,
523 .get_vblank_counter = &rs600_get_vblank_counter,
524 .fence_ring_emit = &r600_fence_ring_emit,
525 .cs_parse = &r600_cs_parse,
526 .copy_blit = &r600_copy_blit,
527 .copy_dma = &r600_copy_blit,
528 .copy = &r600_copy_blit,
529 .get_engine_clock = &radeon_atom_get_engine_clock,
530 .set_engine_clock = &radeon_atom_set_engine_clock,
531 .get_memory_clock = &radeon_atom_get_memory_clock,
532 .set_memory_clock = &radeon_atom_set_memory_clock,
533 .get_pcie_lanes = &rv370_get_pcie_lanes,
534 .set_pcie_lanes = NULL,
535 .set_clock_gating = NULL,
536 .set_surface_reg = r600_set_surface_reg,
537 .clear_surface_reg = r600_clear_surface_reg,
538 .bandwidth_update = &rv515_bandwidth_update,
539 .hpd_init = &r600_hpd_init,
540 .hpd_fini = &r600_hpd_fini,
541 .hpd_sense = &r600_hpd_sense,
542 .hpd_set_polarity = &r600_hpd_set_polarity,
543 .ioctl_wait_idle = r600_ioctl_wait_idle,
544};
545
546static struct radeon_asic rs780_asic = {
547 .init = &r600_init,
548 .fini = &r600_fini,
549 .suspend = &r600_suspend,
550 .resume = &r600_resume,
551 .cp_commit = &r600_cp_commit,
552 .vga_set_state = &r600_vga_set_state,
553 .gpu_reset = &r600_gpu_reset,
554 .gart_tlb_flush = &r600_pcie_gart_tlb_flush,
555 .gart_set_page = &rs600_gart_set_page,
556 .ring_test = &r600_ring_test,
557 .ring_ib_execute = &r600_ring_ib_execute,
558 .irq_set = &r600_irq_set,
559 .irq_process = &r600_irq_process,
560 .get_vblank_counter = &rs600_get_vblank_counter,
561 .fence_ring_emit = &r600_fence_ring_emit,
562 .cs_parse = &r600_cs_parse,
563 .copy_blit = &r600_copy_blit,
564 .copy_dma = &r600_copy_blit,
565 .copy = &r600_copy_blit,
566 .get_engine_clock = &radeon_atom_get_engine_clock,
567 .set_engine_clock = &radeon_atom_set_engine_clock,
568 .get_memory_clock = NULL,
569 .set_memory_clock = NULL,
570 .get_pcie_lanes = NULL,
571 .set_pcie_lanes = NULL,
572 .set_clock_gating = NULL,
573 .set_surface_reg = r600_set_surface_reg,
574 .clear_surface_reg = r600_clear_surface_reg,
575 .bandwidth_update = &rs690_bandwidth_update,
576 .hpd_init = &r600_hpd_init,
577 .hpd_fini = &r600_hpd_fini,
578 .hpd_sense = &r600_hpd_sense,
579 .hpd_set_polarity = &r600_hpd_set_polarity,
580 .ioctl_wait_idle = r600_ioctl_wait_idle,
581};
582
583static struct radeon_asic rv770_asic = {
584 .init = &rv770_init,
585 .fini = &rv770_fini,
586 .suspend = &rv770_suspend,
587 .resume = &rv770_resume,
588 .cp_commit = &r600_cp_commit,
589 .gpu_reset = &rv770_gpu_reset,
590 .vga_set_state = &r600_vga_set_state,
591 .gart_tlb_flush = &r600_pcie_gart_tlb_flush,
592 .gart_set_page = &rs600_gart_set_page,
593 .ring_test = &r600_ring_test,
594 .ring_ib_execute = &r600_ring_ib_execute,
595 .irq_set = &r600_irq_set,
596 .irq_process = &r600_irq_process,
597 .get_vblank_counter = &rs600_get_vblank_counter,
598 .fence_ring_emit = &r600_fence_ring_emit,
599 .cs_parse = &r600_cs_parse,
600 .copy_blit = &r600_copy_blit,
601 .copy_dma = &r600_copy_blit,
602 .copy = &r600_copy_blit,
603 .get_engine_clock = &radeon_atom_get_engine_clock,
604 .set_engine_clock = &radeon_atom_set_engine_clock,
605 .get_memory_clock = &radeon_atom_get_memory_clock,
606 .set_memory_clock = &radeon_atom_set_memory_clock,
607 .get_pcie_lanes = &rv370_get_pcie_lanes,
608 .set_pcie_lanes = NULL,
609 .set_clock_gating = &radeon_atom_set_clock_gating,
610 .set_surface_reg = r600_set_surface_reg,
611 .clear_surface_reg = r600_clear_surface_reg,
612 .bandwidth_update = &rv515_bandwidth_update,
613 .hpd_init = &r600_hpd_init,
614 .hpd_fini = &r600_hpd_fini,
615 .hpd_sense = &r600_hpd_sense,
616 .hpd_set_polarity = &r600_hpd_set_polarity,
617 .ioctl_wait_idle = r600_ioctl_wait_idle,
618};
619
620static struct radeon_asic evergreen_asic = {
621 .init = &evergreen_init,
622 .fini = &evergreen_fini,
623 .suspend = &evergreen_suspend,
624 .resume = &evergreen_resume,
625 .cp_commit = NULL,
626 .gpu_reset = &evergreen_gpu_reset,
627 .vga_set_state = &r600_vga_set_state,
628 .gart_tlb_flush = &r600_pcie_gart_tlb_flush,
629 .gart_set_page = &rs600_gart_set_page,
630 .ring_test = NULL,
631 .ring_ib_execute = NULL,
632 .irq_set = NULL,
633 .irq_process = NULL,
634 .get_vblank_counter = NULL,
635 .fence_ring_emit = NULL,
636 .cs_parse = NULL,
637 .copy_blit = NULL,
638 .copy_dma = NULL,
639 .copy = NULL,
640 .get_engine_clock = &radeon_atom_get_engine_clock,
641 .set_engine_clock = &radeon_atom_set_engine_clock,
642 .get_memory_clock = &radeon_atom_get_memory_clock,
643 .set_memory_clock = &radeon_atom_set_memory_clock,
644 .set_pcie_lanes = NULL,
645 .set_clock_gating = NULL,
646 .set_surface_reg = r600_set_surface_reg,
647 .clear_surface_reg = r600_clear_surface_reg,
648 .bandwidth_update = &evergreen_bandwidth_update,
649 .hpd_init = &evergreen_hpd_init,
650 .hpd_fini = &evergreen_hpd_fini,
651 .hpd_sense = &evergreen_hpd_sense,
652 .hpd_set_polarity = &evergreen_hpd_set_polarity,
653};
654
655int radeon_asic_init(struct radeon_device *rdev)
656{
657 radeon_register_accessor_init(rdev);
658 switch (rdev->family) {
659 case CHIP_R100:
660 case CHIP_RV100:
661 case CHIP_RS100:
662 case CHIP_RV200:
663 case CHIP_RS200:
664 rdev->asic = &r100_asic;
665 break;
666 case CHIP_R200:
667 case CHIP_RV250:
668 case CHIP_RS300:
669 case CHIP_RV280:
670 rdev->asic = &r200_asic;
671 break;
672 case CHIP_R300:
673 case CHIP_R350:
674 case CHIP_RV350:
675 case CHIP_RV380:
676 if (rdev->flags & RADEON_IS_PCIE)
677 rdev->asic = &r300_asic_pcie;
678 else
679 rdev->asic = &r300_asic;
680 break;
681 case CHIP_R420:
682 case CHIP_R423:
683 case CHIP_RV410:
684 rdev->asic = &r420_asic;
685 break;
686 case CHIP_RS400:
687 case CHIP_RS480:
688 rdev->asic = &rs400_asic;
689 break;
690 case CHIP_RS600:
691 rdev->asic = &rs600_asic;
692 break;
693 case CHIP_RS690:
694 case CHIP_RS740:
695 rdev->asic = &rs690_asic;
696 break;
697 case CHIP_RV515:
698 rdev->asic = &rv515_asic;
699 break;
700 case CHIP_R520:
701 case CHIP_RV530:
702 case CHIP_RV560:
703 case CHIP_RV570:
704 case CHIP_R580:
705 rdev->asic = &r520_asic;
706 break;
707 case CHIP_R600:
708 case CHIP_RV610:
709 case CHIP_RV630:
710 case CHIP_RV620:
711 case CHIP_RV635:
712 case CHIP_RV670:
713 rdev->asic = &r600_asic;
714 break;
715 case CHIP_RS780:
716 case CHIP_RS880:
717 rdev->asic = &rs780_asic;
718 break;
719 case CHIP_RV770:
720 case CHIP_RV730:
721 case CHIP_RV710:
722 case CHIP_RV740:
723 rdev->asic = &rv770_asic;
724 break;
725 case CHIP_CEDAR:
726 case CHIP_REDWOOD:
727 case CHIP_JUNIPER:
728 case CHIP_CYPRESS:
729 case CHIP_HEMLOCK:
730 rdev->asic = &evergreen_asic;
731 break;
732 default:
733 /* FIXME: not supported yet */
734 return -EINVAL;
735 }
736
737 if (rdev->flags & RADEON_IS_IGP) {
738 rdev->asic->get_memory_clock = NULL;
739 rdev->asic->set_memory_clock = NULL;
740 }
741
742 /* set the number of crtcs */
743 if (rdev->flags & RADEON_SINGLE_CRTC)
744 rdev->num_crtc = 1;
745 else {
746 if (ASIC_IS_DCE4(rdev))
747 rdev->num_crtc = 6;
748 else
749 rdev->num_crtc = 2;
750 }
751
752 return 0;
753}
754
755/*
756 * Wrapper around modesetting bits. Move to radeon_clocks.c?
757 */
758int radeon_clocks_init(struct radeon_device *rdev)
759{
760 int r;
761
762 r = radeon_static_clocks_init(rdev->ddev);
763 if (r) {
764 return r;
765 }
766 DRM_INFO("Clocks initialized !\n");
767 return 0;
768}
769
770void radeon_clocks_fini(struct radeon_device *rdev)
771{
772}
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h
index d3a157b2bcb7..a0b8280663d1 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.h
+++ b/drivers/gpu/drm/radeon/radeon_asic.h
@@ -45,10 +45,18 @@ void radeon_atom_set_clock_gating(struct radeon_device *rdev, int enable);
45/* 45/*
46 * r100,rv100,rs100,rv200,rs200 46 * r100,rv100,rs100,rv200,rs200
47 */ 47 */
48extern int r100_init(struct radeon_device *rdev); 48struct r100_mc_save {
49extern void r100_fini(struct radeon_device *rdev); 49 u32 GENMO_WT;
50extern int r100_suspend(struct radeon_device *rdev); 50 u32 CRTC_EXT_CNTL;
51extern int r100_resume(struct radeon_device *rdev); 51 u32 CRTC_GEN_CNTL;
52 u32 CRTC2_GEN_CNTL;
53 u32 CUR_OFFSET;
54 u32 CUR2_OFFSET;
55};
56int r100_init(struct radeon_device *rdev);
57void r100_fini(struct radeon_device *rdev);
58int r100_suspend(struct radeon_device *rdev);
59int r100_resume(struct radeon_device *rdev);
52uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg); 60uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg);
53void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); 61void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
54void r100_vga_set_state(struct radeon_device *rdev, bool state); 62void r100_vga_set_state(struct radeon_device *rdev, bool state);
@@ -73,7 +81,7 @@ int r100_copy_blit(struct radeon_device *rdev,
73int r100_set_surface_reg(struct radeon_device *rdev, int reg, 81int r100_set_surface_reg(struct radeon_device *rdev, int reg,
74 uint32_t tiling_flags, uint32_t pitch, 82 uint32_t tiling_flags, uint32_t pitch,
75 uint32_t offset, uint32_t obj_size); 83 uint32_t offset, uint32_t obj_size);
76int r100_clear_surface_reg(struct radeon_device *rdev, int reg); 84void r100_clear_surface_reg(struct radeon_device *rdev, int reg);
77void r100_bandwidth_update(struct radeon_device *rdev); 85void r100_bandwidth_update(struct radeon_device *rdev);
78void r100_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib); 86void r100_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
79int r100_ring_test(struct radeon_device *rdev); 87int r100_ring_test(struct radeon_device *rdev);
@@ -82,44 +90,42 @@ void r100_hpd_fini(struct radeon_device *rdev);
82bool r100_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd); 90bool r100_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd);
83void r100_hpd_set_polarity(struct radeon_device *rdev, 91void r100_hpd_set_polarity(struct radeon_device *rdev,
84 enum radeon_hpd_id hpd); 92 enum radeon_hpd_id hpd);
85 93int r100_debugfs_rbbm_init(struct radeon_device *rdev);
86static struct radeon_asic r100_asic = { 94int r100_debugfs_cp_init(struct radeon_device *rdev);
87 .init = &r100_init, 95void r100_cp_disable(struct radeon_device *rdev);
88 .fini = &r100_fini, 96int r100_cp_init(struct radeon_device *rdev, unsigned ring_size);
89 .suspend = &r100_suspend, 97void r100_cp_fini(struct radeon_device *rdev);
90 .resume = &r100_resume, 98int r100_pci_gart_init(struct radeon_device *rdev);
91 .vga_set_state = &r100_vga_set_state, 99void r100_pci_gart_fini(struct radeon_device *rdev);
92 .gpu_reset = &r100_gpu_reset, 100int r100_pci_gart_enable(struct radeon_device *rdev);
93 .gart_tlb_flush = &r100_pci_gart_tlb_flush, 101void r100_pci_gart_disable(struct radeon_device *rdev);
94 .gart_set_page = &r100_pci_gart_set_page, 102int r100_debugfs_mc_info_init(struct radeon_device *rdev);
95 .cp_commit = &r100_cp_commit, 103int r100_gui_wait_for_idle(struct radeon_device *rdev);
96 .ring_start = &r100_ring_start, 104void r100_ib_fini(struct radeon_device *rdev);
97 .ring_test = &r100_ring_test, 105int r100_ib_init(struct radeon_device *rdev);
98 .ring_ib_execute = &r100_ring_ib_execute, 106void r100_irq_disable(struct radeon_device *rdev);
99 .irq_set = &r100_irq_set, 107void r100_mc_stop(struct radeon_device *rdev, struct r100_mc_save *save);
100 .irq_process = &r100_irq_process, 108void r100_mc_resume(struct radeon_device *rdev, struct r100_mc_save *save);
101 .get_vblank_counter = &r100_get_vblank_counter, 109void r100_vram_init_sizes(struct radeon_device *rdev);
102 .fence_ring_emit = &r100_fence_ring_emit, 110void r100_wb_disable(struct radeon_device *rdev);
103 .cs_parse = &r100_cs_parse, 111void r100_wb_fini(struct radeon_device *rdev);
104 .copy_blit = &r100_copy_blit, 112int r100_wb_init(struct radeon_device *rdev);
105 .copy_dma = NULL, 113void r100_hdp_reset(struct radeon_device *rdev);
106 .copy = &r100_copy_blit, 114int r100_rb2d_reset(struct radeon_device *rdev);
107 .get_engine_clock = &radeon_legacy_get_engine_clock, 115int r100_cp_reset(struct radeon_device *rdev);
108 .set_engine_clock = &radeon_legacy_set_engine_clock, 116void r100_vga_render_disable(struct radeon_device *rdev);
109 .get_memory_clock = &radeon_legacy_get_memory_clock, 117int r100_cs_track_check_pkt3_indx_buffer(struct radeon_cs_parser *p,
110 .set_memory_clock = NULL, 118 struct radeon_cs_packet *pkt,
111 .get_pcie_lanes = NULL, 119 struct radeon_bo *robj);
112 .set_pcie_lanes = NULL, 120int r100_cs_parse_packet0(struct radeon_cs_parser *p,
113 .set_clock_gating = &radeon_legacy_set_clock_gating, 121 struct radeon_cs_packet *pkt,
114 .set_surface_reg = r100_set_surface_reg, 122 const unsigned *auth, unsigned n,
115 .clear_surface_reg = r100_clear_surface_reg, 123 radeon_packet0_check_t check);
116 .bandwidth_update = &r100_bandwidth_update, 124int r100_cs_packet_parse(struct radeon_cs_parser *p,
117 .hpd_init = &r100_hpd_init, 125 struct radeon_cs_packet *pkt,
118 .hpd_fini = &r100_hpd_fini, 126 unsigned idx);
119 .hpd_sense = &r100_hpd_sense, 127void r100_enable_bm(struct radeon_device *rdev);
120 .hpd_set_polarity = &r100_hpd_set_polarity, 128void r100_set_common_regs(struct radeon_device *rdev);
121 .ioctl_wait_idle = NULL,
122};
123 129
124/* 130/*
125 * r200,rv250,rs300,rv280 131 * r200,rv250,rs300,rv280
@@ -129,43 +135,6 @@ extern int r200_copy_dma(struct radeon_device *rdev,
129 uint64_t dst_offset, 135 uint64_t dst_offset,
130 unsigned num_pages, 136 unsigned num_pages,
131 struct radeon_fence *fence); 137 struct radeon_fence *fence);
132static struct radeon_asic r200_asic = {
133 .init = &r100_init,
134 .fini = &r100_fini,
135 .suspend = &r100_suspend,
136 .resume = &r100_resume,
137 .vga_set_state = &r100_vga_set_state,
138 .gpu_reset = &r100_gpu_reset,
139 .gart_tlb_flush = &r100_pci_gart_tlb_flush,
140 .gart_set_page = &r100_pci_gart_set_page,
141 .cp_commit = &r100_cp_commit,
142 .ring_start = &r100_ring_start,
143 .ring_test = &r100_ring_test,
144 .ring_ib_execute = &r100_ring_ib_execute,
145 .irq_set = &r100_irq_set,
146 .irq_process = &r100_irq_process,
147 .get_vblank_counter = &r100_get_vblank_counter,
148 .fence_ring_emit = &r100_fence_ring_emit,
149 .cs_parse = &r100_cs_parse,
150 .copy_blit = &r100_copy_blit,
151 .copy_dma = &r200_copy_dma,
152 .copy = &r100_copy_blit,
153 .get_engine_clock = &radeon_legacy_get_engine_clock,
154 .set_engine_clock = &radeon_legacy_set_engine_clock,
155 .get_memory_clock = &radeon_legacy_get_memory_clock,
156 .set_memory_clock = NULL,
157 .set_pcie_lanes = NULL,
158 .set_clock_gating = &radeon_legacy_set_clock_gating,
159 .set_surface_reg = r100_set_surface_reg,
160 .clear_surface_reg = r100_clear_surface_reg,
161 .bandwidth_update = &r100_bandwidth_update,
162 .hpd_init = &r100_hpd_init,
163 .hpd_fini = &r100_hpd_fini,
164 .hpd_sense = &r100_hpd_sense,
165 .hpd_set_polarity = &r100_hpd_set_polarity,
166 .ioctl_wait_idle = NULL,
167};
168
169 138
170/* 139/*
171 * r300,r350,rv350,rv380 140 * r300,r350,rv350,rv380
@@ -186,82 +155,6 @@ extern void rv370_pcie_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v
186extern void rv370_set_pcie_lanes(struct radeon_device *rdev, int lanes); 155extern void rv370_set_pcie_lanes(struct radeon_device *rdev, int lanes);
187extern int rv370_get_pcie_lanes(struct radeon_device *rdev); 156extern int rv370_get_pcie_lanes(struct radeon_device *rdev);
188 157
189static struct radeon_asic r300_asic = {
190 .init = &r300_init,
191 .fini = &r300_fini,
192 .suspend = &r300_suspend,
193 .resume = &r300_resume,
194 .vga_set_state = &r100_vga_set_state,
195 .gpu_reset = &r300_gpu_reset,
196 .gart_tlb_flush = &r100_pci_gart_tlb_flush,
197 .gart_set_page = &r100_pci_gart_set_page,
198 .cp_commit = &r100_cp_commit,
199 .ring_start = &r300_ring_start,
200 .ring_test = &r100_ring_test,
201 .ring_ib_execute = &r100_ring_ib_execute,
202 .irq_set = &r100_irq_set,
203 .irq_process = &r100_irq_process,
204 .get_vblank_counter = &r100_get_vblank_counter,
205 .fence_ring_emit = &r300_fence_ring_emit,
206 .cs_parse = &r300_cs_parse,
207 .copy_blit = &r100_copy_blit,
208 .copy_dma = &r200_copy_dma,
209 .copy = &r100_copy_blit,
210 .get_engine_clock = &radeon_legacy_get_engine_clock,
211 .set_engine_clock = &radeon_legacy_set_engine_clock,
212 .get_memory_clock = &radeon_legacy_get_memory_clock,
213 .set_memory_clock = NULL,
214 .get_pcie_lanes = &rv370_get_pcie_lanes,
215 .set_pcie_lanes = &rv370_set_pcie_lanes,
216 .set_clock_gating = &radeon_legacy_set_clock_gating,
217 .set_surface_reg = r100_set_surface_reg,
218 .clear_surface_reg = r100_clear_surface_reg,
219 .bandwidth_update = &r100_bandwidth_update,
220 .hpd_init = &r100_hpd_init,
221 .hpd_fini = &r100_hpd_fini,
222 .hpd_sense = &r100_hpd_sense,
223 .hpd_set_polarity = &r100_hpd_set_polarity,
224 .ioctl_wait_idle = NULL,
225};
226
227
228static struct radeon_asic r300_asic_pcie = {
229 .init = &r300_init,
230 .fini = &r300_fini,
231 .suspend = &r300_suspend,
232 .resume = &r300_resume,
233 .vga_set_state = &r100_vga_set_state,
234 .gpu_reset = &r300_gpu_reset,
235 .gart_tlb_flush = &rv370_pcie_gart_tlb_flush,
236 .gart_set_page = &rv370_pcie_gart_set_page,
237 .cp_commit = &r100_cp_commit,
238 .ring_start = &r300_ring_start,
239 .ring_test = &r100_ring_test,
240 .ring_ib_execute = &r100_ring_ib_execute,
241 .irq_set = &r100_irq_set,
242 .irq_process = &r100_irq_process,
243 .get_vblank_counter = &r100_get_vblank_counter,
244 .fence_ring_emit = &r300_fence_ring_emit,
245 .cs_parse = &r300_cs_parse,
246 .copy_blit = &r100_copy_blit,
247 .copy_dma = &r200_copy_dma,
248 .copy = &r100_copy_blit,
249 .get_engine_clock = &radeon_legacy_get_engine_clock,
250 .set_engine_clock = &radeon_legacy_set_engine_clock,
251 .get_memory_clock = &radeon_legacy_get_memory_clock,
252 .set_memory_clock = NULL,
253 .set_pcie_lanes = &rv370_set_pcie_lanes,
254 .set_clock_gating = &radeon_legacy_set_clock_gating,
255 .set_surface_reg = r100_set_surface_reg,
256 .clear_surface_reg = r100_clear_surface_reg,
257 .bandwidth_update = &r100_bandwidth_update,
258 .hpd_init = &r100_hpd_init,
259 .hpd_fini = &r100_hpd_fini,
260 .hpd_sense = &r100_hpd_sense,
261 .hpd_set_polarity = &r100_hpd_set_polarity,
262 .ioctl_wait_idle = NULL,
263};
264
265/* 158/*
266 * r420,r423,rv410 159 * r420,r423,rv410
267 */ 160 */
@@ -269,44 +162,6 @@ extern int r420_init(struct radeon_device *rdev);
269extern void r420_fini(struct radeon_device *rdev); 162extern void r420_fini(struct radeon_device *rdev);
270extern int r420_suspend(struct radeon_device *rdev); 163extern int r420_suspend(struct radeon_device *rdev);
271extern int r420_resume(struct radeon_device *rdev); 164extern int r420_resume(struct radeon_device *rdev);
272static struct radeon_asic r420_asic = {
273 .init = &r420_init,
274 .fini = &r420_fini,
275 .suspend = &r420_suspend,
276 .resume = &r420_resume,
277 .vga_set_state = &r100_vga_set_state,
278 .gpu_reset = &r300_gpu_reset,
279 .gart_tlb_flush = &rv370_pcie_gart_tlb_flush,
280 .gart_set_page = &rv370_pcie_gart_set_page,
281 .cp_commit = &r100_cp_commit,
282 .ring_start = &r300_ring_start,
283 .ring_test = &r100_ring_test,
284 .ring_ib_execute = &r100_ring_ib_execute,
285 .irq_set = &r100_irq_set,
286 .irq_process = &r100_irq_process,
287 .get_vblank_counter = &r100_get_vblank_counter,
288 .fence_ring_emit = &r300_fence_ring_emit,
289 .cs_parse = &r300_cs_parse,
290 .copy_blit = &r100_copy_blit,
291 .copy_dma = &r200_copy_dma,
292 .copy = &r100_copy_blit,
293 .get_engine_clock = &radeon_atom_get_engine_clock,
294 .set_engine_clock = &radeon_atom_set_engine_clock,
295 .get_memory_clock = &radeon_atom_get_memory_clock,
296 .set_memory_clock = &radeon_atom_set_memory_clock,
297 .get_pcie_lanes = &rv370_get_pcie_lanes,
298 .set_pcie_lanes = &rv370_set_pcie_lanes,
299 .set_clock_gating = &radeon_atom_set_clock_gating,
300 .set_surface_reg = r100_set_surface_reg,
301 .clear_surface_reg = r100_clear_surface_reg,
302 .bandwidth_update = &r100_bandwidth_update,
303 .hpd_init = &r100_hpd_init,
304 .hpd_fini = &r100_hpd_fini,
305 .hpd_sense = &r100_hpd_sense,
306 .hpd_set_polarity = &r100_hpd_set_polarity,
307 .ioctl_wait_idle = NULL,
308};
309
310 165
311/* 166/*
312 * rs400,rs480 167 * rs400,rs480
@@ -319,44 +174,6 @@ void rs400_gart_tlb_flush(struct radeon_device *rdev);
319int rs400_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr); 174int rs400_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr);
320uint32_t rs400_mc_rreg(struct radeon_device *rdev, uint32_t reg); 175uint32_t rs400_mc_rreg(struct radeon_device *rdev, uint32_t reg);
321void rs400_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); 176void rs400_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
322static struct radeon_asic rs400_asic = {
323 .init = &rs400_init,
324 .fini = &rs400_fini,
325 .suspend = &rs400_suspend,
326 .resume = &rs400_resume,
327 .vga_set_state = &r100_vga_set_state,
328 .gpu_reset = &r300_gpu_reset,
329 .gart_tlb_flush = &rs400_gart_tlb_flush,
330 .gart_set_page = &rs400_gart_set_page,
331 .cp_commit = &r100_cp_commit,
332 .ring_start = &r300_ring_start,
333 .ring_test = &r100_ring_test,
334 .ring_ib_execute = &r100_ring_ib_execute,
335 .irq_set = &r100_irq_set,
336 .irq_process = &r100_irq_process,
337 .get_vblank_counter = &r100_get_vblank_counter,
338 .fence_ring_emit = &r300_fence_ring_emit,
339 .cs_parse = &r300_cs_parse,
340 .copy_blit = &r100_copy_blit,
341 .copy_dma = &r200_copy_dma,
342 .copy = &r100_copy_blit,
343 .get_engine_clock = &radeon_legacy_get_engine_clock,
344 .set_engine_clock = &radeon_legacy_set_engine_clock,
345 .get_memory_clock = &radeon_legacy_get_memory_clock,
346 .set_memory_clock = NULL,
347 .get_pcie_lanes = NULL,
348 .set_pcie_lanes = NULL,
349 .set_clock_gating = &radeon_legacy_set_clock_gating,
350 .set_surface_reg = r100_set_surface_reg,
351 .clear_surface_reg = r100_clear_surface_reg,
352 .bandwidth_update = &r100_bandwidth_update,
353 .hpd_init = &r100_hpd_init,
354 .hpd_fini = &r100_hpd_fini,
355 .hpd_sense = &r100_hpd_sense,
356 .hpd_set_polarity = &r100_hpd_set_polarity,
357 .ioctl_wait_idle = NULL,
358};
359
360 177
361/* 178/*
362 * rs600. 179 * rs600.
@@ -379,45 +196,6 @@ bool rs600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd);
379void rs600_hpd_set_polarity(struct radeon_device *rdev, 196void rs600_hpd_set_polarity(struct radeon_device *rdev,
380 enum radeon_hpd_id hpd); 197 enum radeon_hpd_id hpd);
381 198
382static struct radeon_asic rs600_asic = {
383 .init = &rs600_init,
384 .fini = &rs600_fini,
385 .suspend = &rs600_suspend,
386 .resume = &rs600_resume,
387 .vga_set_state = &r100_vga_set_state,
388 .gpu_reset = &r300_gpu_reset,
389 .gart_tlb_flush = &rs600_gart_tlb_flush,
390 .gart_set_page = &rs600_gart_set_page,
391 .cp_commit = &r100_cp_commit,
392 .ring_start = &r300_ring_start,
393 .ring_test = &r100_ring_test,
394 .ring_ib_execute = &r100_ring_ib_execute,
395 .irq_set = &rs600_irq_set,
396 .irq_process = &rs600_irq_process,
397 .get_vblank_counter = &rs600_get_vblank_counter,
398 .fence_ring_emit = &r300_fence_ring_emit,
399 .cs_parse = &r300_cs_parse,
400 .copy_blit = &r100_copy_blit,
401 .copy_dma = &r200_copy_dma,
402 .copy = &r100_copy_blit,
403 .get_engine_clock = &radeon_atom_get_engine_clock,
404 .set_engine_clock = &radeon_atom_set_engine_clock,
405 .get_memory_clock = &radeon_atom_get_memory_clock,
406 .set_memory_clock = &radeon_atom_set_memory_clock,
407 .get_pcie_lanes = NULL,
408 .set_pcie_lanes = NULL,
409 .set_clock_gating = &radeon_atom_set_clock_gating,
410 .set_surface_reg = r100_set_surface_reg,
411 .clear_surface_reg = r100_clear_surface_reg,
412 .bandwidth_update = &rs600_bandwidth_update,
413 .hpd_init = &rs600_hpd_init,
414 .hpd_fini = &rs600_hpd_fini,
415 .hpd_sense = &rs600_hpd_sense,
416 .hpd_set_polarity = &rs600_hpd_set_polarity,
417 .ioctl_wait_idle = NULL,
418};
419
420
421/* 199/*
422 * rs690,rs740 200 * rs690,rs740
423 */ 201 */
@@ -428,44 +206,6 @@ int rs690_suspend(struct radeon_device *rdev);
428uint32_t rs690_mc_rreg(struct radeon_device *rdev, uint32_t reg); 206uint32_t rs690_mc_rreg(struct radeon_device *rdev, uint32_t reg);
429void rs690_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); 207void rs690_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
430void rs690_bandwidth_update(struct radeon_device *rdev); 208void rs690_bandwidth_update(struct radeon_device *rdev);
431static struct radeon_asic rs690_asic = {
432 .init = &rs690_init,
433 .fini = &rs690_fini,
434 .suspend = &rs690_suspend,
435 .resume = &rs690_resume,
436 .vga_set_state = &r100_vga_set_state,
437 .gpu_reset = &r300_gpu_reset,
438 .gart_tlb_flush = &rs400_gart_tlb_flush,
439 .gart_set_page = &rs400_gart_set_page,
440 .cp_commit = &r100_cp_commit,
441 .ring_start = &r300_ring_start,
442 .ring_test = &r100_ring_test,
443 .ring_ib_execute = &r100_ring_ib_execute,
444 .irq_set = &rs600_irq_set,
445 .irq_process = &rs600_irq_process,
446 .get_vblank_counter = &rs600_get_vblank_counter,
447 .fence_ring_emit = &r300_fence_ring_emit,
448 .cs_parse = &r300_cs_parse,
449 .copy_blit = &r100_copy_blit,
450 .copy_dma = &r200_copy_dma,
451 .copy = &r200_copy_dma,
452 .get_engine_clock = &radeon_atom_get_engine_clock,
453 .set_engine_clock = &radeon_atom_set_engine_clock,
454 .get_memory_clock = &radeon_atom_get_memory_clock,
455 .set_memory_clock = &radeon_atom_set_memory_clock,
456 .get_pcie_lanes = NULL,
457 .set_pcie_lanes = NULL,
458 .set_clock_gating = &radeon_atom_set_clock_gating,
459 .set_surface_reg = r100_set_surface_reg,
460 .clear_surface_reg = r100_clear_surface_reg,
461 .bandwidth_update = &rs690_bandwidth_update,
462 .hpd_init = &rs600_hpd_init,
463 .hpd_fini = &rs600_hpd_fini,
464 .hpd_sense = &rs600_hpd_sense,
465 .hpd_set_polarity = &rs600_hpd_set_polarity,
466 .ioctl_wait_idle = NULL,
467};
468
469 209
470/* 210/*
471 * rv515 211 * rv515
@@ -481,87 +221,12 @@ void rv515_pcie_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
481void rv515_bandwidth_update(struct radeon_device *rdev); 221void rv515_bandwidth_update(struct radeon_device *rdev);
482int rv515_resume(struct radeon_device *rdev); 222int rv515_resume(struct radeon_device *rdev);
483int rv515_suspend(struct radeon_device *rdev); 223int rv515_suspend(struct radeon_device *rdev);
484static struct radeon_asic rv515_asic = {
485 .init = &rv515_init,
486 .fini = &rv515_fini,
487 .suspend = &rv515_suspend,
488 .resume = &rv515_resume,
489 .vga_set_state = &r100_vga_set_state,
490 .gpu_reset = &rv515_gpu_reset,
491 .gart_tlb_flush = &rv370_pcie_gart_tlb_flush,
492 .gart_set_page = &rv370_pcie_gart_set_page,
493 .cp_commit = &r100_cp_commit,
494 .ring_start = &rv515_ring_start,
495 .ring_test = &r100_ring_test,
496 .ring_ib_execute = &r100_ring_ib_execute,
497 .irq_set = &rs600_irq_set,
498 .irq_process = &rs600_irq_process,
499 .get_vblank_counter = &rs600_get_vblank_counter,
500 .fence_ring_emit = &r300_fence_ring_emit,
501 .cs_parse = &r300_cs_parse,
502 .copy_blit = &r100_copy_blit,
503 .copy_dma = &r200_copy_dma,
504 .copy = &r100_copy_blit,
505 .get_engine_clock = &radeon_atom_get_engine_clock,
506 .set_engine_clock = &radeon_atom_set_engine_clock,
507 .get_memory_clock = &radeon_atom_get_memory_clock,
508 .set_memory_clock = &radeon_atom_set_memory_clock,
509 .get_pcie_lanes = &rv370_get_pcie_lanes,
510 .set_pcie_lanes = &rv370_set_pcie_lanes,
511 .set_clock_gating = &radeon_atom_set_clock_gating,
512 .set_surface_reg = r100_set_surface_reg,
513 .clear_surface_reg = r100_clear_surface_reg,
514 .bandwidth_update = &rv515_bandwidth_update,
515 .hpd_init = &rs600_hpd_init,
516 .hpd_fini = &rs600_hpd_fini,
517 .hpd_sense = &rs600_hpd_sense,
518 .hpd_set_polarity = &rs600_hpd_set_polarity,
519 .ioctl_wait_idle = NULL,
520};
521
522 224
523/* 225/*
524 * r520,rv530,rv560,rv570,r580 226 * r520,rv530,rv560,rv570,r580
525 */ 227 */
526int r520_init(struct radeon_device *rdev); 228int r520_init(struct radeon_device *rdev);
527int r520_resume(struct radeon_device *rdev); 229int r520_resume(struct radeon_device *rdev);
528static struct radeon_asic r520_asic = {
529 .init = &r520_init,
530 .fini = &rv515_fini,
531 .suspend = &rv515_suspend,
532 .resume = &r520_resume,
533 .vga_set_state = &r100_vga_set_state,
534 .gpu_reset = &rv515_gpu_reset,
535 .gart_tlb_flush = &rv370_pcie_gart_tlb_flush,
536 .gart_set_page = &rv370_pcie_gart_set_page,
537 .cp_commit = &r100_cp_commit,
538 .ring_start = &rv515_ring_start,
539 .ring_test = &r100_ring_test,
540 .ring_ib_execute = &r100_ring_ib_execute,
541 .irq_set = &rs600_irq_set,
542 .irq_process = &rs600_irq_process,
543 .get_vblank_counter = &rs600_get_vblank_counter,
544 .fence_ring_emit = &r300_fence_ring_emit,
545 .cs_parse = &r300_cs_parse,
546 .copy_blit = &r100_copy_blit,
547 .copy_dma = &r200_copy_dma,
548 .copy = &r100_copy_blit,
549 .get_engine_clock = &radeon_atom_get_engine_clock,
550 .set_engine_clock = &radeon_atom_set_engine_clock,
551 .get_memory_clock = &radeon_atom_get_memory_clock,
552 .set_memory_clock = &radeon_atom_set_memory_clock,
553 .get_pcie_lanes = &rv370_get_pcie_lanes,
554 .set_pcie_lanes = &rv370_set_pcie_lanes,
555 .set_clock_gating = &radeon_atom_set_clock_gating,
556 .set_surface_reg = r100_set_surface_reg,
557 .clear_surface_reg = r100_clear_surface_reg,
558 .bandwidth_update = &rv515_bandwidth_update,
559 .hpd_init = &rs600_hpd_init,
560 .hpd_fini = &rs600_hpd_fini,
561 .hpd_sense = &rs600_hpd_sense,
562 .hpd_set_polarity = &rs600_hpd_set_polarity,
563 .ioctl_wait_idle = NULL,
564};
565 230
566/* 231/*
567 * r600,rv610,rv630,rv620,rv635,rv670,rs780,rs880 232 * r600,rv610,rv630,rv620,rv635,rv670,rs780,rs880
@@ -591,7 +256,7 @@ int r600_gpu_reset(struct radeon_device *rdev);
591int r600_set_surface_reg(struct radeon_device *rdev, int reg, 256int r600_set_surface_reg(struct radeon_device *rdev, int reg,
592 uint32_t tiling_flags, uint32_t pitch, 257 uint32_t tiling_flags, uint32_t pitch,
593 uint32_t offset, uint32_t obj_size); 258 uint32_t offset, uint32_t obj_size);
594int r600_clear_surface_reg(struct radeon_device *rdev, int reg); 259void r600_clear_surface_reg(struct radeon_device *rdev, int reg);
595void r600_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib); 260void r600_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
596int r600_ring_test(struct radeon_device *rdev); 261int r600_ring_test(struct radeon_device *rdev);
597int r600_copy_blit(struct radeon_device *rdev, 262int r600_copy_blit(struct radeon_device *rdev,
@@ -604,43 +269,6 @@ void r600_hpd_set_polarity(struct radeon_device *rdev,
604 enum radeon_hpd_id hpd); 269 enum radeon_hpd_id hpd);
605extern void r600_ioctl_wait_idle(struct radeon_device *rdev, struct radeon_bo *bo); 270extern void r600_ioctl_wait_idle(struct radeon_device *rdev, struct radeon_bo *bo);
606 271
607static struct radeon_asic r600_asic = {
608 .init = &r600_init,
609 .fini = &r600_fini,
610 .suspend = &r600_suspend,
611 .resume = &r600_resume,
612 .cp_commit = &r600_cp_commit,
613 .vga_set_state = &r600_vga_set_state,
614 .gpu_reset = &r600_gpu_reset,
615 .gart_tlb_flush = &r600_pcie_gart_tlb_flush,
616 .gart_set_page = &rs600_gart_set_page,
617 .ring_test = &r600_ring_test,
618 .ring_ib_execute = &r600_ring_ib_execute,
619 .irq_set = &r600_irq_set,
620 .irq_process = &r600_irq_process,
621 .get_vblank_counter = &rs600_get_vblank_counter,
622 .fence_ring_emit = &r600_fence_ring_emit,
623 .cs_parse = &r600_cs_parse,
624 .copy_blit = &r600_copy_blit,
625 .copy_dma = &r600_copy_blit,
626 .copy = &r600_copy_blit,
627 .get_engine_clock = &radeon_atom_get_engine_clock,
628 .set_engine_clock = &radeon_atom_set_engine_clock,
629 .get_memory_clock = &radeon_atom_get_memory_clock,
630 .set_memory_clock = &radeon_atom_set_memory_clock,
631 .get_pcie_lanes = &rv370_get_pcie_lanes,
632 .set_pcie_lanes = NULL,
633 .set_clock_gating = NULL,
634 .set_surface_reg = r600_set_surface_reg,
635 .clear_surface_reg = r600_clear_surface_reg,
636 .bandwidth_update = &rv515_bandwidth_update,
637 .hpd_init = &r600_hpd_init,
638 .hpd_fini = &r600_hpd_fini,
639 .hpd_sense = &r600_hpd_sense,
640 .hpd_set_polarity = &r600_hpd_set_polarity,
641 .ioctl_wait_idle = r600_ioctl_wait_idle,
642};
643
644/* 272/*
645 * rv770,rv730,rv710,rv740 273 * rv770,rv730,rv710,rv740
646 */ 274 */
@@ -650,43 +278,6 @@ int rv770_suspend(struct radeon_device *rdev);
650int rv770_resume(struct radeon_device *rdev); 278int rv770_resume(struct radeon_device *rdev);
651int rv770_gpu_reset(struct radeon_device *rdev); 279int rv770_gpu_reset(struct radeon_device *rdev);
652 280
653static struct radeon_asic rv770_asic = {
654 .init = &rv770_init,
655 .fini = &rv770_fini,
656 .suspend = &rv770_suspend,
657 .resume = &rv770_resume,
658 .cp_commit = &r600_cp_commit,
659 .gpu_reset = &rv770_gpu_reset,
660 .vga_set_state = &r600_vga_set_state,
661 .gart_tlb_flush = &r600_pcie_gart_tlb_flush,
662 .gart_set_page = &rs600_gart_set_page,
663 .ring_test = &r600_ring_test,
664 .ring_ib_execute = &r600_ring_ib_execute,
665 .irq_set = &r600_irq_set,
666 .irq_process = &r600_irq_process,
667 .get_vblank_counter = &rs600_get_vblank_counter,
668 .fence_ring_emit = &r600_fence_ring_emit,
669 .cs_parse = &r600_cs_parse,
670 .copy_blit = &r600_copy_blit,
671 .copy_dma = &r600_copy_blit,
672 .copy = &r600_copy_blit,
673 .get_engine_clock = &radeon_atom_get_engine_clock,
674 .set_engine_clock = &radeon_atom_set_engine_clock,
675 .get_memory_clock = &radeon_atom_get_memory_clock,
676 .set_memory_clock = &radeon_atom_set_memory_clock,
677 .get_pcie_lanes = &rv370_get_pcie_lanes,
678 .set_pcie_lanes = NULL,
679 .set_clock_gating = &radeon_atom_set_clock_gating,
680 .set_surface_reg = r600_set_surface_reg,
681 .clear_surface_reg = r600_clear_surface_reg,
682 .bandwidth_update = &rv515_bandwidth_update,
683 .hpd_init = &r600_hpd_init,
684 .hpd_fini = &r600_hpd_fini,
685 .hpd_sense = &r600_hpd_sense,
686 .hpd_set_polarity = &r600_hpd_set_polarity,
687 .ioctl_wait_idle = r600_ioctl_wait_idle,
688};
689
690/* 281/*
691 * evergreen 282 * evergreen
692 */ 283 */
@@ -701,40 +292,4 @@ void evergreen_hpd_fini(struct radeon_device *rdev);
701bool evergreen_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd); 292bool evergreen_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd);
702void evergreen_hpd_set_polarity(struct radeon_device *rdev, 293void evergreen_hpd_set_polarity(struct radeon_device *rdev,
703 enum radeon_hpd_id hpd); 294 enum radeon_hpd_id hpd);
704
705static struct radeon_asic evergreen_asic = {
706 .init = &evergreen_init,
707 .fini = &evergreen_fini,
708 .suspend = &evergreen_suspend,
709 .resume = &evergreen_resume,
710 .cp_commit = NULL,
711 .gpu_reset = &evergreen_gpu_reset,
712 .vga_set_state = &r600_vga_set_state,
713 .gart_tlb_flush = &r600_pcie_gart_tlb_flush,
714 .gart_set_page = &rs600_gart_set_page,
715 .ring_test = NULL,
716 .ring_ib_execute = NULL,
717 .irq_set = NULL,
718 .irq_process = NULL,
719 .get_vblank_counter = NULL,
720 .fence_ring_emit = NULL,
721 .cs_parse = NULL,
722 .copy_blit = NULL,
723 .copy_dma = NULL,
724 .copy = NULL,
725 .get_engine_clock = &radeon_atom_get_engine_clock,
726 .set_engine_clock = &radeon_atom_set_engine_clock,
727 .get_memory_clock = &radeon_atom_get_memory_clock,
728 .set_memory_clock = &radeon_atom_set_memory_clock,
729 .set_pcie_lanes = NULL,
730 .set_clock_gating = NULL,
731 .set_surface_reg = r600_set_surface_reg,
732 .clear_surface_reg = r600_clear_surface_reg,
733 .bandwidth_update = &evergreen_bandwidth_update,
734 .hpd_init = &evergreen_hpd_init,
735 .hpd_fini = &evergreen_hpd_fini,
736 .hpd_sense = &evergreen_hpd_sense,
737 .hpd_set_polarity = &evergreen_hpd_set_polarity,
738};
739
740#endif 295#endif
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index 93783b15c81d..1fff95505cf5 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -75,46 +75,45 @@ static inline struct radeon_i2c_bus_rec radeon_lookup_i2c_gpio(struct radeon_dev
75 memset(&i2c, 0, sizeof(struct radeon_i2c_bus_rec)); 75 memset(&i2c, 0, sizeof(struct radeon_i2c_bus_rec));
76 i2c.valid = false; 76 i2c.valid = false;
77 77
78 atom_parse_data_header(ctx, index, NULL, NULL, NULL, &data_offset); 78 if (atom_parse_data_header(ctx, index, NULL, NULL, NULL, &data_offset)) {
79 79 i2c_info = (struct _ATOM_GPIO_I2C_INFO *)(ctx->bios + data_offset);
80 i2c_info = (struct _ATOM_GPIO_I2C_INFO *)(ctx->bios + data_offset); 80
81 81 for (i = 0; i < ATOM_MAX_SUPPORTED_DEVICE; i++) {
82 82 gpio = &i2c_info->asGPIO_Info[i];
83 for (i = 0; i < ATOM_MAX_SUPPORTED_DEVICE; i++) { 83
84 gpio = &i2c_info->asGPIO_Info[i]; 84 if (gpio->sucI2cId.ucAccess == id) {
85 85 i2c.mask_clk_reg = le16_to_cpu(gpio->usClkMaskRegisterIndex) * 4;
86 if (gpio->sucI2cId.ucAccess == id) { 86 i2c.mask_data_reg = le16_to_cpu(gpio->usDataMaskRegisterIndex) * 4;
87 i2c.mask_clk_reg = le16_to_cpu(gpio->usClkMaskRegisterIndex) * 4; 87 i2c.en_clk_reg = le16_to_cpu(gpio->usClkEnRegisterIndex) * 4;
88 i2c.mask_data_reg = le16_to_cpu(gpio->usDataMaskRegisterIndex) * 4; 88 i2c.en_data_reg = le16_to_cpu(gpio->usDataEnRegisterIndex) * 4;
89 i2c.en_clk_reg = le16_to_cpu(gpio->usClkEnRegisterIndex) * 4; 89 i2c.y_clk_reg = le16_to_cpu(gpio->usClkY_RegisterIndex) * 4;
90 i2c.en_data_reg = le16_to_cpu(gpio->usDataEnRegisterIndex) * 4; 90 i2c.y_data_reg = le16_to_cpu(gpio->usDataY_RegisterIndex) * 4;
91 i2c.y_clk_reg = le16_to_cpu(gpio->usClkY_RegisterIndex) * 4; 91 i2c.a_clk_reg = le16_to_cpu(gpio->usClkA_RegisterIndex) * 4;
92 i2c.y_data_reg = le16_to_cpu(gpio->usDataY_RegisterIndex) * 4; 92 i2c.a_data_reg = le16_to_cpu(gpio->usDataA_RegisterIndex) * 4;
93 i2c.a_clk_reg = le16_to_cpu(gpio->usClkA_RegisterIndex) * 4; 93 i2c.mask_clk_mask = (1 << gpio->ucClkMaskShift);
94 i2c.a_data_reg = le16_to_cpu(gpio->usDataA_RegisterIndex) * 4; 94 i2c.mask_data_mask = (1 << gpio->ucDataMaskShift);
95 i2c.mask_clk_mask = (1 << gpio->ucClkMaskShift); 95 i2c.en_clk_mask = (1 << gpio->ucClkEnShift);
96 i2c.mask_data_mask = (1 << gpio->ucDataMaskShift); 96 i2c.en_data_mask = (1 << gpio->ucDataEnShift);
97 i2c.en_clk_mask = (1 << gpio->ucClkEnShift); 97 i2c.y_clk_mask = (1 << gpio->ucClkY_Shift);
98 i2c.en_data_mask = (1 << gpio->ucDataEnShift); 98 i2c.y_data_mask = (1 << gpio->ucDataY_Shift);
99 i2c.y_clk_mask = (1 << gpio->ucClkY_Shift); 99 i2c.a_clk_mask = (1 << gpio->ucClkA_Shift);
100 i2c.y_data_mask = (1 << gpio->ucDataY_Shift); 100 i2c.a_data_mask = (1 << gpio->ucDataA_Shift);
101 i2c.a_clk_mask = (1 << gpio->ucClkA_Shift); 101
102 i2c.a_data_mask = (1 << gpio->ucDataA_Shift); 102 if (gpio->sucI2cId.sbfAccess.bfHW_Capable)
103 103 i2c.hw_capable = true;
104 if (gpio->sucI2cId.sbfAccess.bfHW_Capable) 104 else
105 i2c.hw_capable = true; 105 i2c.hw_capable = false;
106 else 106
107 i2c.hw_capable = false; 107 if (gpio->sucI2cId.ucAccess == 0xa0)
108 108 i2c.mm_i2c = true;
109 if (gpio->sucI2cId.ucAccess == 0xa0) 109 else
110 i2c.mm_i2c = true; 110 i2c.mm_i2c = false;
111 else 111
112 i2c.mm_i2c = false; 112 i2c.i2c_id = gpio->sucI2cId.ucAccess;
113 113
114 i2c.i2c_id = gpio->sucI2cId.ucAccess; 114 i2c.valid = true;
115 115 break;
116 i2c.valid = true; 116 }
117 break;
118 } 117 }
119 } 118 }
120 119
@@ -135,20 +134,21 @@ static inline struct radeon_gpio_rec radeon_lookup_gpio(struct radeon_device *rd
135 memset(&gpio, 0, sizeof(struct radeon_gpio_rec)); 134 memset(&gpio, 0, sizeof(struct radeon_gpio_rec));
136 gpio.valid = false; 135 gpio.valid = false;
137 136
138 atom_parse_data_header(ctx, index, &size, NULL, NULL, &data_offset); 137 if (atom_parse_data_header(ctx, index, &size, NULL, NULL, &data_offset)) {
138 gpio_info = (struct _ATOM_GPIO_PIN_LUT *)(ctx->bios + data_offset);
139 139
140 gpio_info = (struct _ATOM_GPIO_PIN_LUT *)(ctx->bios + data_offset); 140 num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) /
141 sizeof(ATOM_GPIO_PIN_ASSIGNMENT);
141 142
142 num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) / sizeof(ATOM_GPIO_PIN_ASSIGNMENT); 143 for (i = 0; i < num_indices; i++) {
143 144 pin = &gpio_info->asGPIO_Pin[i];
144 for (i = 0; i < num_indices; i++) { 145 if (id == pin->ucGPIO_ID) {
145 pin = &gpio_info->asGPIO_Pin[i]; 146 gpio.id = pin->ucGPIO_ID;
146 if (id == pin->ucGPIO_ID) { 147 gpio.reg = pin->usGpioPin_AIndex * 4;
147 gpio.id = pin->ucGPIO_ID; 148 gpio.mask = (1 << pin->ucGpioPinBitShift);
148 gpio.reg = pin->usGpioPin_AIndex * 4; 149 gpio.valid = true;
149 gpio.mask = (1 << pin->ucGpioPinBitShift); 150 break;
150 gpio.valid = true; 151 }
151 break;
152 } 152 }
153 } 153 }
154 154
@@ -264,6 +264,8 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev,
264 if ((supported_device == ATOM_DEVICE_CRT1_SUPPORT) || 264 if ((supported_device == ATOM_DEVICE_CRT1_SUPPORT) ||
265 (supported_device == ATOM_DEVICE_DFP2_SUPPORT)) 265 (supported_device == ATOM_DEVICE_DFP2_SUPPORT))
266 return false; 266 return false;
267 if (supported_device == ATOM_DEVICE_CRT2_SUPPORT)
268 *line_mux = 0x90;
267 } 269 }
268 270
269 /* ASUS HD 3600 XT board lists the DVI port as HDMI */ 271 /* ASUS HD 3600 XT board lists the DVI port as HDMI */
@@ -395,9 +397,7 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
395 struct radeon_gpio_rec gpio; 397 struct radeon_gpio_rec gpio;
396 struct radeon_hpd hpd; 398 struct radeon_hpd hpd;
397 399
398 atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset); 400 if (!atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset))
399
400 if (data_offset == 0)
401 return false; 401 return false;
402 402
403 if (crev < 2) 403 if (crev < 2)
@@ -449,37 +449,43 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
449 GetIndexIntoMasterTable(DATA, 449 GetIndexIntoMasterTable(DATA,
450 IntegratedSystemInfo); 450 IntegratedSystemInfo);
451 451
452 atom_parse_data_header(ctx, index, &size, &frev, 452 if (atom_parse_data_header(ctx, index, &size, &frev,
453 &crev, &igp_offset); 453 &crev, &igp_offset)) {
454 454
455 if (crev >= 2) { 455 if (crev >= 2) {
456 igp_obj = 456 igp_obj =
457 (ATOM_INTEGRATED_SYSTEM_INFO_V2 457 (ATOM_INTEGRATED_SYSTEM_INFO_V2
458 *) (ctx->bios + igp_offset); 458 *) (ctx->bios + igp_offset);
459 459
460 if (igp_obj) { 460 if (igp_obj) {
461 uint32_t slot_config, ct; 461 uint32_t slot_config, ct;
462 462
463 if (con_obj_num == 1) 463 if (con_obj_num == 1)
464 slot_config = 464 slot_config =
465 igp_obj-> 465 igp_obj->
466 ulDDISlot1Config; 466 ulDDISlot1Config;
467 else 467 else
468 slot_config = 468 slot_config =
469 igp_obj-> 469 igp_obj->
470 ulDDISlot2Config; 470 ulDDISlot2Config;
471 471
472 ct = (slot_config >> 16) & 0xff; 472 ct = (slot_config >> 16) & 0xff;
473 connector_type = 473 connector_type =
474 object_connector_convert 474 object_connector_convert
475 [ct]; 475 [ct];
476 connector_object_id = ct; 476 connector_object_id = ct;
477 igp_lane_info = 477 igp_lane_info =
478 slot_config & 0xffff; 478 slot_config & 0xffff;
479 } else
480 continue;
479 } else 481 } else
480 continue; 482 continue;
481 } else 483 } else {
482 continue; 484 igp_lane_info = 0;
485 connector_type =
486 object_connector_convert[con_obj_id];
487 connector_object_id = con_obj_id;
488 }
483 } else { 489 } else {
484 igp_lane_info = 0; 490 igp_lane_info = 0;
485 connector_type = 491 connector_type =
@@ -627,20 +633,23 @@ static uint16_t atombios_get_connector_object_id(struct drm_device *dev,
627 uint8_t frev, crev; 633 uint8_t frev, crev;
628 ATOM_XTMDS_INFO *xtmds; 634 ATOM_XTMDS_INFO *xtmds;
629 635
630 atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset); 636 if (atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset)) {
631 xtmds = (ATOM_XTMDS_INFO *)(ctx->bios + data_offset); 637 xtmds = (ATOM_XTMDS_INFO *)(ctx->bios + data_offset);
632 638
633 if (xtmds->ucSupportedLink & ATOM_XTMDS_SUPPORTED_DUALLINK) { 639 if (xtmds->ucSupportedLink & ATOM_XTMDS_SUPPORTED_DUALLINK) {
634 if (connector_type == DRM_MODE_CONNECTOR_DVII) 640 if (connector_type == DRM_MODE_CONNECTOR_DVII)
635 return CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I; 641 return CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I;
636 else 642 else
637 return CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D; 643 return CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D;
638 } else { 644 } else {
639 if (connector_type == DRM_MODE_CONNECTOR_DVII) 645 if (connector_type == DRM_MODE_CONNECTOR_DVII)
640 return CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I; 646 return CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I;
641 else 647 else
642 return CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D; 648 return CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D;
643 } 649 }
650 } else
651 return supported_devices_connector_object_id_convert
652 [connector_type];
644 } else { 653 } else {
645 return supported_devices_connector_object_id_convert 654 return supported_devices_connector_object_id_convert
646 [connector_type]; 655 [connector_type];
@@ -672,7 +681,8 @@ bool radeon_get_atom_connector_info_from_supported_devices_table(struct
672 int i, j, max_device; 681 int i, j, max_device;
673 struct bios_connector bios_connectors[ATOM_MAX_SUPPORTED_DEVICE]; 682 struct bios_connector bios_connectors[ATOM_MAX_SUPPORTED_DEVICE];
674 683
675 atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset); 684 if (!atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset))
685 return false;
676 686
677 supported_devices = 687 supported_devices =
678 (union atom_supported_devices *)(ctx->bios + data_offset); 688 (union atom_supported_devices *)(ctx->bios + data_offset);
@@ -865,14 +875,11 @@ bool radeon_atom_get_clock_info(struct drm_device *dev)
865 struct radeon_pll *mpll = &rdev->clock.mpll; 875 struct radeon_pll *mpll = &rdev->clock.mpll;
866 uint16_t data_offset; 876 uint16_t data_offset;
867 877
868 atom_parse_data_header(mode_info->atom_context, index, NULL, &frev, 878 if (atom_parse_data_header(mode_info->atom_context, index, NULL,
869 &crev, &data_offset); 879 &frev, &crev, &data_offset)) {
870 880 firmware_info =
871 firmware_info = 881 (union firmware_info *)(mode_info->atom_context->bios +
872 (union firmware_info *)(mode_info->atom_context->bios + 882 data_offset);
873 data_offset);
874
875 if (firmware_info) {
876 /* pixel clocks */ 883 /* pixel clocks */
877 p1pll->reference_freq = 884 p1pll->reference_freq =
878 le16_to_cpu(firmware_info->info.usReferenceClock); 885 le16_to_cpu(firmware_info->info.usReferenceClock);
@@ -887,6 +894,20 @@ bool radeon_atom_get_clock_info(struct drm_device *dev)
887 p1pll->pll_out_max = 894 p1pll->pll_out_max =
888 le32_to_cpu(firmware_info->info.ulMaxPixelClockPLL_Output); 895 le32_to_cpu(firmware_info->info.ulMaxPixelClockPLL_Output);
889 896
897 if (crev >= 4) {
898 p1pll->lcd_pll_out_min =
899 le16_to_cpu(firmware_info->info_14.usLcdMinPixelClockPLL_Output) * 100;
900 if (p1pll->lcd_pll_out_min == 0)
901 p1pll->lcd_pll_out_min = p1pll->pll_out_min;
902 p1pll->lcd_pll_out_max =
903 le16_to_cpu(firmware_info->info_14.usLcdMaxPixelClockPLL_Output) * 100;
904 if (p1pll->lcd_pll_out_max == 0)
905 p1pll->lcd_pll_out_max = p1pll->pll_out_max;
906 } else {
907 p1pll->lcd_pll_out_min = p1pll->pll_out_min;
908 p1pll->lcd_pll_out_max = p1pll->pll_out_max;
909 }
910
890 if (p1pll->pll_out_min == 0) { 911 if (p1pll->pll_out_min == 0) {
891 if (ASIC_IS_AVIVO(rdev)) 912 if (ASIC_IS_AVIVO(rdev))
892 p1pll->pll_out_min = 64800; 913 p1pll->pll_out_min = 64800;
@@ -992,13 +1013,10 @@ bool radeon_atombios_sideport_present(struct radeon_device *rdev)
992 u8 frev, crev; 1013 u8 frev, crev;
993 u16 data_offset; 1014 u16 data_offset;
994 1015
995 atom_parse_data_header(mode_info->atom_context, index, NULL, &frev, 1016 if (atom_parse_data_header(mode_info->atom_context, index, NULL,
996 &crev, &data_offset); 1017 &frev, &crev, &data_offset)) {
997 1018 igp_info = (union igp_info *)(mode_info->atom_context->bios +
998 igp_info = (union igp_info *)(mode_info->atom_context->bios +
999 data_offset); 1019 data_offset);
1000
1001 if (igp_info) {
1002 switch (crev) { 1020 switch (crev) {
1003 case 1: 1021 case 1:
1004 if (igp_info->info.ucMemoryType & 0xf0) 1022 if (igp_info->info.ucMemoryType & 0xf0)
@@ -1029,14 +1047,12 @@ bool radeon_atombios_get_tmds_info(struct radeon_encoder *encoder,
1029 uint16_t maxfreq; 1047 uint16_t maxfreq;
1030 int i; 1048 int i;
1031 1049
1032 atom_parse_data_header(mode_info->atom_context, index, NULL, &frev, 1050 if (atom_parse_data_header(mode_info->atom_context, index, NULL,
1033 &crev, &data_offset); 1051 &frev, &crev, &data_offset)) {
1034 1052 tmds_info =
1035 tmds_info = 1053 (struct _ATOM_TMDS_INFO *)(mode_info->atom_context->bios +
1036 (struct _ATOM_TMDS_INFO *)(mode_info->atom_context->bios + 1054 data_offset);
1037 data_offset);
1038 1055
1039 if (tmds_info) {
1040 maxfreq = le16_to_cpu(tmds_info->usMaxFrequency); 1056 maxfreq = le16_to_cpu(tmds_info->usMaxFrequency);
1041 for (i = 0; i < 4; i++) { 1057 for (i = 0; i < 4; i++) {
1042 tmds->tmds_pll[i].freq = 1058 tmds->tmds_pll[i].freq =
@@ -1085,13 +1101,11 @@ static struct radeon_atom_ss *radeon_atombios_get_ss_info(struct
1085 if (id > ATOM_MAX_SS_ENTRY) 1101 if (id > ATOM_MAX_SS_ENTRY)
1086 return NULL; 1102 return NULL;
1087 1103
1088 atom_parse_data_header(mode_info->atom_context, index, NULL, &frev, 1104 if (atom_parse_data_header(mode_info->atom_context, index, NULL,
1089 &crev, &data_offset); 1105 &frev, &crev, &data_offset)) {
1106 ss_info =
1107 (struct _ATOM_SPREAD_SPECTRUM_INFO *)(mode_info->atom_context->bios + data_offset);
1090 1108
1091 ss_info =
1092 (struct _ATOM_SPREAD_SPECTRUM_INFO *)(mode_info->atom_context->bios + data_offset);
1093
1094 if (ss_info) {
1095 ss = 1109 ss =
1096 kzalloc(sizeof(struct radeon_atom_ss), GFP_KERNEL); 1110 kzalloc(sizeof(struct radeon_atom_ss), GFP_KERNEL);
1097 1111
@@ -1114,30 +1128,6 @@ static struct radeon_atom_ss *radeon_atombios_get_ss_info(struct
1114 return ss; 1128 return ss;
1115} 1129}
1116 1130
1117static void radeon_atom_apply_lvds_quirks(struct drm_device *dev,
1118 struct radeon_encoder_atom_dig *lvds)
1119{
1120
1121 /* Toshiba A300-1BU laptop panel doesn't like new pll divider algo */
1122 if ((dev->pdev->device == 0x95c4) &&
1123 (dev->pdev->subsystem_vendor == 0x1179) &&
1124 (dev->pdev->subsystem_device == 0xff50)) {
1125 if ((lvds->native_mode.hdisplay == 1280) &&
1126 (lvds->native_mode.vdisplay == 800))
1127 lvds->pll_algo = PLL_ALGO_LEGACY;
1128 }
1129
1130 /* Dell Studio 15 laptop panel doesn't like new pll divider algo */
1131 if ((dev->pdev->device == 0x95c4) &&
1132 (dev->pdev->subsystem_vendor == 0x1028) &&
1133 (dev->pdev->subsystem_device == 0x029f)) {
1134 if ((lvds->native_mode.hdisplay == 1280) &&
1135 (lvds->native_mode.vdisplay == 800))
1136 lvds->pll_algo = PLL_ALGO_LEGACY;
1137 }
1138
1139}
1140
1141union lvds_info { 1131union lvds_info {
1142 struct _ATOM_LVDS_INFO info; 1132 struct _ATOM_LVDS_INFO info;
1143 struct _ATOM_LVDS_INFO_V12 info_12; 1133 struct _ATOM_LVDS_INFO_V12 info_12;
@@ -1156,13 +1146,10 @@ struct radeon_encoder_atom_dig *radeon_atombios_get_lvds_info(struct
1156 uint8_t frev, crev; 1146 uint8_t frev, crev;
1157 struct radeon_encoder_atom_dig *lvds = NULL; 1147 struct radeon_encoder_atom_dig *lvds = NULL;
1158 1148
1159 atom_parse_data_header(mode_info->atom_context, index, NULL, &frev, 1149 if (atom_parse_data_header(mode_info->atom_context, index, NULL,
1160 &crev, &data_offset); 1150 &frev, &crev, &data_offset)) {
1161 1151 lvds_info =
1162 lvds_info = 1152 (union lvds_info *)(mode_info->atom_context->bios + data_offset);
1163 (union lvds_info *)(mode_info->atom_context->bios + data_offset);
1164
1165 if (lvds_info) {
1166 lvds = 1153 lvds =
1167 kzalloc(sizeof(struct radeon_encoder_atom_dig), GFP_KERNEL); 1154 kzalloc(sizeof(struct radeon_encoder_atom_dig), GFP_KERNEL);
1168 1155
@@ -1220,9 +1207,6 @@ struct radeon_encoder_atom_dig *radeon_atombios_get_lvds_info(struct
1220 lvds->pll_algo = PLL_ALGO_LEGACY; 1207 lvds->pll_algo = PLL_ALGO_LEGACY;
1221 } 1208 }
1222 1209
1223 /* LVDS quirks */
1224 radeon_atom_apply_lvds_quirks(dev, lvds);
1225
1226 encoder->native_mode = lvds->native_mode; 1210 encoder->native_mode = lvds->native_mode;
1227 } 1211 }
1228 return lvds; 1212 return lvds;
@@ -1241,11 +1225,11 @@ radeon_atombios_get_primary_dac_info(struct radeon_encoder *encoder)
1241 uint8_t bg, dac; 1225 uint8_t bg, dac;
1242 struct radeon_encoder_primary_dac *p_dac = NULL; 1226 struct radeon_encoder_primary_dac *p_dac = NULL;
1243 1227
1244 atom_parse_data_header(mode_info->atom_context, index, NULL, &frev, &crev, &data_offset); 1228 if (atom_parse_data_header(mode_info->atom_context, index, NULL,
1245 1229 &frev, &crev, &data_offset)) {
1246 dac_info = (struct _COMPASSIONATE_DATA *)(mode_info->atom_context->bios + data_offset); 1230 dac_info = (struct _COMPASSIONATE_DATA *)
1231 (mode_info->atom_context->bios + data_offset);
1247 1232
1248 if (dac_info) {
1249 p_dac = kzalloc(sizeof(struct radeon_encoder_primary_dac), GFP_KERNEL); 1233 p_dac = kzalloc(sizeof(struct radeon_encoder_primary_dac), GFP_KERNEL);
1250 1234
1251 if (!p_dac) 1235 if (!p_dac)
@@ -1270,7 +1254,9 @@ bool radeon_atom_get_tv_timings(struct radeon_device *rdev, int index,
1270 u8 frev, crev; 1254 u8 frev, crev;
1271 u16 data_offset, misc; 1255 u16 data_offset, misc;
1272 1256
1273 atom_parse_data_header(mode_info->atom_context, data_index, NULL, &frev, &crev, &data_offset); 1257 if (!atom_parse_data_header(mode_info->atom_context, data_index, NULL,
1258 &frev, &crev, &data_offset))
1259 return false;
1274 1260
1275 switch (crev) { 1261 switch (crev) {
1276 case 1: 1262 case 1:
@@ -1362,47 +1348,50 @@ radeon_atombios_get_tv_info(struct radeon_device *rdev)
1362 struct _ATOM_ANALOG_TV_INFO *tv_info; 1348 struct _ATOM_ANALOG_TV_INFO *tv_info;
1363 enum radeon_tv_std tv_std = TV_STD_NTSC; 1349 enum radeon_tv_std tv_std = TV_STD_NTSC;
1364 1350
1365 atom_parse_data_header(mode_info->atom_context, index, NULL, &frev, &crev, &data_offset); 1351 if (atom_parse_data_header(mode_info->atom_context, index, NULL,
1352 &frev, &crev, &data_offset)) {
1366 1353
1367 tv_info = (struct _ATOM_ANALOG_TV_INFO *)(mode_info->atom_context->bios + data_offset); 1354 tv_info = (struct _ATOM_ANALOG_TV_INFO *)
1355 (mode_info->atom_context->bios + data_offset);
1368 1356
1369 switch (tv_info->ucTV_BootUpDefaultStandard) { 1357 switch (tv_info->ucTV_BootUpDefaultStandard) {
1370 case ATOM_TV_NTSC: 1358 case ATOM_TV_NTSC:
1371 tv_std = TV_STD_NTSC; 1359 tv_std = TV_STD_NTSC;
1372 DRM_INFO("Default TV standard: NTSC\n"); 1360 DRM_INFO("Default TV standard: NTSC\n");
1373 break; 1361 break;
1374 case ATOM_TV_NTSCJ: 1362 case ATOM_TV_NTSCJ:
1375 tv_std = TV_STD_NTSC_J; 1363 tv_std = TV_STD_NTSC_J;
1376 DRM_INFO("Default TV standard: NTSC-J\n"); 1364 DRM_INFO("Default TV standard: NTSC-J\n");
1377 break; 1365 break;
1378 case ATOM_TV_PAL: 1366 case ATOM_TV_PAL:
1379 tv_std = TV_STD_PAL; 1367 tv_std = TV_STD_PAL;
1380 DRM_INFO("Default TV standard: PAL\n"); 1368 DRM_INFO("Default TV standard: PAL\n");
1381 break; 1369 break;
1382 case ATOM_TV_PALM: 1370 case ATOM_TV_PALM:
1383 tv_std = TV_STD_PAL_M; 1371 tv_std = TV_STD_PAL_M;
1384 DRM_INFO("Default TV standard: PAL-M\n"); 1372 DRM_INFO("Default TV standard: PAL-M\n");
1385 break; 1373 break;
1386 case ATOM_TV_PALN: 1374 case ATOM_TV_PALN:
1387 tv_std = TV_STD_PAL_N; 1375 tv_std = TV_STD_PAL_N;
1388 DRM_INFO("Default TV standard: PAL-N\n"); 1376 DRM_INFO("Default TV standard: PAL-N\n");
1389 break; 1377 break;
1390 case ATOM_TV_PALCN: 1378 case ATOM_TV_PALCN:
1391 tv_std = TV_STD_PAL_CN; 1379 tv_std = TV_STD_PAL_CN;
1392 DRM_INFO("Default TV standard: PAL-CN\n"); 1380 DRM_INFO("Default TV standard: PAL-CN\n");
1393 break; 1381 break;
1394 case ATOM_TV_PAL60: 1382 case ATOM_TV_PAL60:
1395 tv_std = TV_STD_PAL_60; 1383 tv_std = TV_STD_PAL_60;
1396 DRM_INFO("Default TV standard: PAL-60\n"); 1384 DRM_INFO("Default TV standard: PAL-60\n");
1397 break; 1385 break;
1398 case ATOM_TV_SECAM: 1386 case ATOM_TV_SECAM:
1399 tv_std = TV_STD_SECAM; 1387 tv_std = TV_STD_SECAM;
1400 DRM_INFO("Default TV standard: SECAM\n"); 1388 DRM_INFO("Default TV standard: SECAM\n");
1401 break; 1389 break;
1402 default: 1390 default:
1403 tv_std = TV_STD_NTSC; 1391 tv_std = TV_STD_NTSC;
1404 DRM_INFO("Unknown TV standard; defaulting to NTSC\n"); 1392 DRM_INFO("Unknown TV standard; defaulting to NTSC\n");
1405 break; 1393 break;
1394 }
1406 } 1395 }
1407 return tv_std; 1396 return tv_std;
1408} 1397}
@@ -1420,11 +1409,12 @@ radeon_atombios_get_tv_dac_info(struct radeon_encoder *encoder)
1420 uint8_t bg, dac; 1409 uint8_t bg, dac;
1421 struct radeon_encoder_tv_dac *tv_dac = NULL; 1410 struct radeon_encoder_tv_dac *tv_dac = NULL;
1422 1411
1423 atom_parse_data_header(mode_info->atom_context, index, NULL, &frev, &crev, &data_offset); 1412 if (atom_parse_data_header(mode_info->atom_context, index, NULL,
1413 &frev, &crev, &data_offset)) {
1424 1414
1425 dac_info = (struct _COMPASSIONATE_DATA *)(mode_info->atom_context->bios + data_offset); 1415 dac_info = (struct _COMPASSIONATE_DATA *)
1416 (mode_info->atom_context->bios + data_offset);
1426 1417
1427 if (dac_info) {
1428 tv_dac = kzalloc(sizeof(struct radeon_encoder_tv_dac), GFP_KERNEL); 1418 tv_dac = kzalloc(sizeof(struct radeon_encoder_tv_dac), GFP_KERNEL);
1429 1419
1430 if (!tv_dac) 1420 if (!tv_dac)
@@ -1447,6 +1437,30 @@ radeon_atombios_get_tv_dac_info(struct radeon_encoder *encoder)
1447 return tv_dac; 1437 return tv_dac;
1448} 1438}
1449 1439
1440static const char *thermal_controller_names[] = {
1441 "NONE",
1442 "LM63",
1443 "ADM1032",
1444 "ADM1030",
1445 "MUA6649",
1446 "LM64",
1447 "F75375",
1448 "ASC7512",
1449};
1450
1451static const char *pp_lib_thermal_controller_names[] = {
1452 "NONE",
1453 "LM63",
1454 "ADM1032",
1455 "ADM1030",
1456 "MUA6649",
1457 "LM64",
1458 "F75375",
1459 "RV6xx",
1460 "RV770",
1461 "ADT7473",
1462};
1463
1450union power_info { 1464union power_info {
1451 struct _ATOM_POWERPLAY_INFO info; 1465 struct _ATOM_POWERPLAY_INFO info;
1452 struct _ATOM_POWERPLAY_INFO_V2 info_2; 1466 struct _ATOM_POWERPLAY_INFO_V2 info_2;
@@ -1466,15 +1480,22 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev)
1466 struct _ATOM_PPLIB_STATE *power_state; 1480 struct _ATOM_PPLIB_STATE *power_state;
1467 int num_modes = 0, i, j; 1481 int num_modes = 0, i, j;
1468 int state_index = 0, mode_index = 0; 1482 int state_index = 0, mode_index = 0;
1469 1483 struct radeon_i2c_bus_rec i2c_bus;
1470 atom_parse_data_header(mode_info->atom_context, index, NULL, &frev, &crev, &data_offset);
1471
1472 power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
1473 1484
1474 rdev->pm.default_power_state = NULL; 1485 rdev->pm.default_power_state = NULL;
1475 1486
1476 if (power_info) { 1487 if (atom_parse_data_header(mode_info->atom_context, index, NULL,
1488 &frev, &crev, &data_offset)) {
1489 power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
1477 if (frev < 4) { 1490 if (frev < 4) {
1491 /* add the i2c bus for thermal/fan chip */
1492 if (power_info->info.ucOverdriveThermalController > 0) {
1493 DRM_INFO("Possible %s thermal controller at 0x%02x\n",
1494 thermal_controller_names[power_info->info.ucOverdriveThermalController],
1495 power_info->info.ucOverdriveControllerAddress >> 1);
1496 i2c_bus = radeon_lookup_i2c_gpio(rdev, power_info->info.ucOverdriveI2cLine);
1497 rdev->pm.i2c_bus = radeon_i2c_create(rdev->ddev, &i2c_bus, "Thermal");
1498 }
1478 num_modes = power_info->info.ucNumOfPowerModeEntries; 1499 num_modes = power_info->info.ucNumOfPowerModeEntries;
1479 if (num_modes > ATOM_MAX_NUMBEROF_POWER_BLOCK) 1500 if (num_modes > ATOM_MAX_NUMBEROF_POWER_BLOCK)
1480 num_modes = ATOM_MAX_NUMBEROF_POWER_BLOCK; 1501 num_modes = ATOM_MAX_NUMBEROF_POWER_BLOCK;
@@ -1684,6 +1705,24 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev)
1684 } 1705 }
1685 } 1706 }
1686 } else if (frev == 4) { 1707 } else if (frev == 4) {
1708 /* add the i2c bus for thermal/fan chip */
1709 /* no support for internal controller yet */
1710 if (power_info->info_4.sThermalController.ucType > 0) {
1711 if ((power_info->info_4.sThermalController.ucType == ATOM_PP_THERMALCONTROLLER_RV6xx) ||
1712 (power_info->info_4.sThermalController.ucType == ATOM_PP_THERMALCONTROLLER_RV770)) {
1713 DRM_INFO("Internal thermal controller %s fan control\n",
1714 (power_info->info_4.sThermalController.ucFanParameters &
1715 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
1716 } else {
1717 DRM_INFO("Possible %s thermal controller at 0x%02x %s fan control\n",
1718 pp_lib_thermal_controller_names[power_info->info_4.sThermalController.ucType],
1719 power_info->info_4.sThermalController.ucI2cAddress >> 1,
1720 (power_info->info_4.sThermalController.ucFanParameters &
1721 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
1722 i2c_bus = radeon_lookup_i2c_gpio(rdev, power_info->info_4.sThermalController.ucI2cLine);
1723 rdev->pm.i2c_bus = radeon_i2c_create(rdev->ddev, &i2c_bus, "Thermal");
1724 }
1725 }
1687 for (i = 0; i < power_info->info_4.ucNumStates; i++) { 1726 for (i = 0; i < power_info->info_4.ucNumStates; i++) {
1688 mode_index = 0; 1727 mode_index = 0;
1689 power_state = (struct _ATOM_PPLIB_STATE *) 1728 power_state = (struct _ATOM_PPLIB_STATE *)
diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c
index e9ea38ece375..2becdeda68a3 100644
--- a/drivers/gpu/drm/radeon/radeon_combios.c
+++ b/drivers/gpu/drm/radeon/radeon_combios.c
@@ -531,10 +531,7 @@ static struct radeon_i2c_bus_rec combios_setup_i2c_bus(struct radeon_device *rde
531 case CHIP_RS300: 531 case CHIP_RS300:
532 switch (ddc_line) { 532 switch (ddc_line) {
533 case RADEON_GPIO_DVI_DDC: 533 case RADEON_GPIO_DVI_DDC:
534 /* in theory this should be hw capable, 534 i2c.hw_capable = true;
535 * but it doesn't seem to work
536 */
537 i2c.hw_capable = false;
538 break; 535 break;
539 default: 536 default:
540 i2c.hw_capable = false; 537 i2c.hw_capable = false;
@@ -633,6 +630,8 @@ bool radeon_combios_get_clock_info(struct drm_device *dev)
633 p1pll->reference_div = RBIOS16(pll_info + 0x10); 630 p1pll->reference_div = RBIOS16(pll_info + 0x10);
634 p1pll->pll_out_min = RBIOS32(pll_info + 0x12); 631 p1pll->pll_out_min = RBIOS32(pll_info + 0x12);
635 p1pll->pll_out_max = RBIOS32(pll_info + 0x16); 632 p1pll->pll_out_max = RBIOS32(pll_info + 0x16);
633 p1pll->lcd_pll_out_min = p1pll->pll_out_min;
634 p1pll->lcd_pll_out_max = p1pll->pll_out_max;
636 635
637 if (rev > 9) { 636 if (rev > 9) {
638 p1pll->pll_in_min = RBIOS32(pll_info + 0x36); 637 p1pll->pll_in_min = RBIOS32(pll_info + 0x36);
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index ee0083f982d8..60d59816b94f 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -940,7 +940,7 @@ static void radeon_dp_connector_destroy(struct drm_connector *connector)
940 if (radeon_connector->edid) 940 if (radeon_connector->edid)
941 kfree(radeon_connector->edid); 941 kfree(radeon_connector->edid);
942 if (radeon_dig_connector->dp_i2c_bus) 942 if (radeon_dig_connector->dp_i2c_bus)
943 radeon_i2c_destroy_dp(radeon_dig_connector->dp_i2c_bus); 943 radeon_i2c_destroy(radeon_dig_connector->dp_i2c_bus);
944 kfree(radeon_connector->con_priv); 944 kfree(radeon_connector->con_priv);
945 drm_sysfs_connector_remove(connector); 945 drm_sysfs_connector_remove(connector);
946 drm_connector_cleanup(connector); 946 drm_connector_cleanup(connector);
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index 70ba02ed7723..f9b0fe002c0a 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -193,9 +193,11 @@ static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error)
193 radeon_bo_list_fence(&parser->validated, parser->ib->fence); 193 radeon_bo_list_fence(&parser->validated, parser->ib->fence);
194 } 194 }
195 radeon_bo_list_unreserve(&parser->validated); 195 radeon_bo_list_unreserve(&parser->validated);
196 for (i = 0; i < parser->nrelocs; i++) { 196 if (parser->relocs != NULL) {
197 if (parser->relocs[i].gobj) 197 for (i = 0; i < parser->nrelocs; i++) {
198 drm_gem_object_unreference_unlocked(parser->relocs[i].gobj); 198 if (parser->relocs[i].gobj)
199 drm_gem_object_unreference_unlocked(parser->relocs[i].gobj);
200 }
199 } 201 }
200 kfree(parser->track); 202 kfree(parser->track);
201 kfree(parser->relocs); 203 kfree(parser->relocs);
@@ -243,7 +245,8 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
243 } 245 }
244 r = radeon_cs_parser_relocs(&parser); 246 r = radeon_cs_parser_relocs(&parser);
245 if (r) { 247 if (r) {
246 DRM_ERROR("Failed to parse relocation !\n"); 248 if (r != -ERESTARTSYS)
249 DRM_ERROR("Failed to parse relocation %d!\n", r);
247 radeon_cs_parser_fini(&parser, r); 250 radeon_cs_parser_fini(&parser, r);
248 mutex_unlock(&rdev->cs_mutex); 251 mutex_unlock(&rdev->cs_mutex);
249 return r; 252 return r;
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 0cc337edf3a3..bddf17f97da8 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -34,7 +34,6 @@
34#include <linux/vga_switcheroo.h> 34#include <linux/vga_switcheroo.h>
35#include "radeon_reg.h" 35#include "radeon_reg.h"
36#include "radeon.h" 36#include "radeon.h"
37#include "radeon_asic.h"
38#include "atom.h" 37#include "atom.h"
39 38
40/* 39/*
@@ -243,6 +242,36 @@ bool radeon_card_posted(struct radeon_device *rdev)
243 242
244} 243}
245 244
245void radeon_update_bandwidth_info(struct radeon_device *rdev)
246{
247 fixed20_12 a;
248 u32 sclk, mclk;
249
250 if (rdev->flags & RADEON_IS_IGP) {
251 sclk = radeon_get_engine_clock(rdev);
252 mclk = rdev->clock.default_mclk;
253
254 a.full = rfixed_const(100);
255 rdev->pm.sclk.full = rfixed_const(sclk);
256 rdev->pm.sclk.full = rfixed_div(rdev->pm.sclk, a);
257 rdev->pm.mclk.full = rfixed_const(mclk);
258 rdev->pm.mclk.full = rfixed_div(rdev->pm.mclk, a);
259
260 a.full = rfixed_const(16);
261 /* core_bandwidth = sclk(Mhz) * 16 */
262 rdev->pm.core_bandwidth.full = rfixed_div(rdev->pm.sclk, a);
263 } else {
264 sclk = radeon_get_engine_clock(rdev);
265 mclk = radeon_get_memory_clock(rdev);
266
267 a.full = rfixed_const(100);
268 rdev->pm.sclk.full = rfixed_const(sclk);
269 rdev->pm.sclk.full = rfixed_div(rdev->pm.sclk, a);
270 rdev->pm.mclk.full = rfixed_const(mclk);
271 rdev->pm.mclk.full = rfixed_div(rdev->pm.mclk, a);
272 }
273}
274
246bool radeon_boot_test_post_card(struct radeon_device *rdev) 275bool radeon_boot_test_post_card(struct radeon_device *rdev)
247{ 276{
248 if (radeon_card_posted(rdev)) 277 if (radeon_card_posted(rdev))
@@ -289,181 +318,6 @@ void radeon_dummy_page_fini(struct radeon_device *rdev)
289} 318}
290 319
291 320
292/*
293 * Registers accessors functions.
294 */
295uint32_t radeon_invalid_rreg(struct radeon_device *rdev, uint32_t reg)
296{
297 DRM_ERROR("Invalid callback to read register 0x%04X\n", reg);
298 BUG_ON(1);
299 return 0;
300}
301
302void radeon_invalid_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
303{
304 DRM_ERROR("Invalid callback to write register 0x%04X with 0x%08X\n",
305 reg, v);
306 BUG_ON(1);
307}
308
309void radeon_register_accessor_init(struct radeon_device *rdev)
310{
311 rdev->mc_rreg = &radeon_invalid_rreg;
312 rdev->mc_wreg = &radeon_invalid_wreg;
313 rdev->pll_rreg = &radeon_invalid_rreg;
314 rdev->pll_wreg = &radeon_invalid_wreg;
315 rdev->pciep_rreg = &radeon_invalid_rreg;
316 rdev->pciep_wreg = &radeon_invalid_wreg;
317
318 /* Don't change order as we are overridding accessor. */
319 if (rdev->family < CHIP_RV515) {
320 rdev->pcie_reg_mask = 0xff;
321 } else {
322 rdev->pcie_reg_mask = 0x7ff;
323 }
324 /* FIXME: not sure here */
325 if (rdev->family <= CHIP_R580) {
326 rdev->pll_rreg = &r100_pll_rreg;
327 rdev->pll_wreg = &r100_pll_wreg;
328 }
329 if (rdev->family >= CHIP_R420) {
330 rdev->mc_rreg = &r420_mc_rreg;
331 rdev->mc_wreg = &r420_mc_wreg;
332 }
333 if (rdev->family >= CHIP_RV515) {
334 rdev->mc_rreg = &rv515_mc_rreg;
335 rdev->mc_wreg = &rv515_mc_wreg;
336 }
337 if (rdev->family == CHIP_RS400 || rdev->family == CHIP_RS480) {
338 rdev->mc_rreg = &rs400_mc_rreg;
339 rdev->mc_wreg = &rs400_mc_wreg;
340 }
341 if (rdev->family == CHIP_RS690 || rdev->family == CHIP_RS740) {
342 rdev->mc_rreg = &rs690_mc_rreg;
343 rdev->mc_wreg = &rs690_mc_wreg;
344 }
345 if (rdev->family == CHIP_RS600) {
346 rdev->mc_rreg = &rs600_mc_rreg;
347 rdev->mc_wreg = &rs600_mc_wreg;
348 }
349 if ((rdev->family >= CHIP_R600) && (rdev->family <= CHIP_RV740)) {
350 rdev->pciep_rreg = &r600_pciep_rreg;
351 rdev->pciep_wreg = &r600_pciep_wreg;
352 }
353}
354
355
356/*
357 * ASIC
358 */
359int radeon_asic_init(struct radeon_device *rdev)
360{
361 radeon_register_accessor_init(rdev);
362 switch (rdev->family) {
363 case CHIP_R100:
364 case CHIP_RV100:
365 case CHIP_RS100:
366 case CHIP_RV200:
367 case CHIP_RS200:
368 rdev->asic = &r100_asic;
369 break;
370 case CHIP_R200:
371 case CHIP_RV250:
372 case CHIP_RS300:
373 case CHIP_RV280:
374 rdev->asic = &r200_asic;
375 break;
376 case CHIP_R300:
377 case CHIP_R350:
378 case CHIP_RV350:
379 case CHIP_RV380:
380 if (rdev->flags & RADEON_IS_PCIE)
381 rdev->asic = &r300_asic_pcie;
382 else
383 rdev->asic = &r300_asic;
384 break;
385 case CHIP_R420:
386 case CHIP_R423:
387 case CHIP_RV410:
388 rdev->asic = &r420_asic;
389 break;
390 case CHIP_RS400:
391 case CHIP_RS480:
392 rdev->asic = &rs400_asic;
393 break;
394 case CHIP_RS600:
395 rdev->asic = &rs600_asic;
396 break;
397 case CHIP_RS690:
398 case CHIP_RS740:
399 rdev->asic = &rs690_asic;
400 break;
401 case CHIP_RV515:
402 rdev->asic = &rv515_asic;
403 break;
404 case CHIP_R520:
405 case CHIP_RV530:
406 case CHIP_RV560:
407 case CHIP_RV570:
408 case CHIP_R580:
409 rdev->asic = &r520_asic;
410 break;
411 case CHIP_R600:
412 case CHIP_RV610:
413 case CHIP_RV630:
414 case CHIP_RV620:
415 case CHIP_RV635:
416 case CHIP_RV670:
417 case CHIP_RS780:
418 case CHIP_RS880:
419 rdev->asic = &r600_asic;
420 break;
421 case CHIP_RV770:
422 case CHIP_RV730:
423 case CHIP_RV710:
424 case CHIP_RV740:
425 rdev->asic = &rv770_asic;
426 break;
427 case CHIP_CEDAR:
428 case CHIP_REDWOOD:
429 case CHIP_JUNIPER:
430 case CHIP_CYPRESS:
431 case CHIP_HEMLOCK:
432 rdev->asic = &evergreen_asic;
433 break;
434 default:
435 /* FIXME: not supported yet */
436 return -EINVAL;
437 }
438
439 if (rdev->flags & RADEON_IS_IGP) {
440 rdev->asic->get_memory_clock = NULL;
441 rdev->asic->set_memory_clock = NULL;
442 }
443
444 return 0;
445}
446
447
448/*
449 * Wrapper around modesetting bits.
450 */
451int radeon_clocks_init(struct radeon_device *rdev)
452{
453 int r;
454
455 r = radeon_static_clocks_init(rdev->ddev);
456 if (r) {
457 return r;
458 }
459 DRM_INFO("Clocks initialized !\n");
460 return 0;
461}
462
463void radeon_clocks_fini(struct radeon_device *rdev)
464{
465}
466
467/* ATOM accessor methods */ 321/* ATOM accessor methods */
468static uint32_t cail_pll_read(struct card_info *info, uint32_t reg) 322static uint32_t cail_pll_read(struct card_info *info, uint32_t reg)
469{ 323{
@@ -568,29 +422,6 @@ static unsigned int radeon_vga_set_decode(void *cookie, bool state)
568 return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM; 422 return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
569} 423}
570 424
571void radeon_agp_disable(struct radeon_device *rdev)
572{
573 rdev->flags &= ~RADEON_IS_AGP;
574 if (rdev->family >= CHIP_R600) {
575 DRM_INFO("Forcing AGP to PCIE mode\n");
576 rdev->flags |= RADEON_IS_PCIE;
577 } else if (rdev->family >= CHIP_RV515 ||
578 rdev->family == CHIP_RV380 ||
579 rdev->family == CHIP_RV410 ||
580 rdev->family == CHIP_R423) {
581 DRM_INFO("Forcing AGP to PCIE mode\n");
582 rdev->flags |= RADEON_IS_PCIE;
583 rdev->asic->gart_tlb_flush = &rv370_pcie_gart_tlb_flush;
584 rdev->asic->gart_set_page = &rv370_pcie_gart_set_page;
585 } else {
586 DRM_INFO("Forcing AGP to PCI mode\n");
587 rdev->flags |= RADEON_IS_PCI;
588 rdev->asic->gart_tlb_flush = &r100_pci_gart_tlb_flush;
589 rdev->asic->gart_set_page = &r100_pci_gart_set_page;
590 }
591 rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
592}
593
594void radeon_check_arguments(struct radeon_device *rdev) 425void radeon_check_arguments(struct radeon_device *rdev)
595{ 426{
596 /* vramlimit must be a power of two */ 427 /* vramlimit must be a power of two */
@@ -732,6 +563,14 @@ int radeon_device_init(struct radeon_device *rdev,
732 return r; 563 return r;
733 radeon_check_arguments(rdev); 564 radeon_check_arguments(rdev);
734 565
566 /* all of the newer IGP chips have an internal gart
567 * However some rs4xx report as AGP, so remove that here.
568 */
569 if ((rdev->family >= CHIP_RS400) &&
570 (rdev->flags & RADEON_IS_IGP)) {
571 rdev->flags &= ~RADEON_IS_AGP;
572 }
573
735 if (rdev->flags & RADEON_IS_AGP && radeon_agpmode == -1) { 574 if (rdev->flags & RADEON_IS_AGP && radeon_agpmode == -1) {
736 radeon_agp_disable(rdev); 575 radeon_agp_disable(rdev);
737 } 576 }
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index ba8d806dcf39..b8d672828246 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -368,10 +368,9 @@ static bool radeon_setup_enc_conn(struct drm_device *dev)
368 368
369 if (rdev->bios) { 369 if (rdev->bios) {
370 if (rdev->is_atom_bios) { 370 if (rdev->is_atom_bios) {
371 if (rdev->family >= CHIP_R600) 371 ret = radeon_get_atom_connector_info_from_supported_devices_table(dev);
372 if (ret == false)
372 ret = radeon_get_atom_connector_info_from_object_table(dev); 373 ret = radeon_get_atom_connector_info_from_object_table(dev);
373 else
374 ret = radeon_get_atom_connector_info_from_supported_devices_table(dev);
375 } else { 374 } else {
376 ret = radeon_get_legacy_connector_info_from_bios(dev); 375 ret = radeon_get_legacy_connector_info_from_bios(dev);
377 if (ret == false) 376 if (ret == false)
@@ -469,10 +468,19 @@ static void radeon_compute_pll_legacy(struct radeon_pll *pll,
469 uint32_t best_error = 0xffffffff; 468 uint32_t best_error = 0xffffffff;
470 uint32_t best_vco_diff = 1; 469 uint32_t best_vco_diff = 1;
471 uint32_t post_div; 470 uint32_t post_div;
471 u32 pll_out_min, pll_out_max;
472 472
473 DRM_DEBUG("PLL freq %llu %u %u\n", freq, pll->min_ref_div, pll->max_ref_div); 473 DRM_DEBUG("PLL freq %llu %u %u\n", freq, pll->min_ref_div, pll->max_ref_div);
474 freq = freq * 1000; 474 freq = freq * 1000;
475 475
476 if (pll->flags & RADEON_PLL_IS_LCD) {
477 pll_out_min = pll->lcd_pll_out_min;
478 pll_out_max = pll->lcd_pll_out_max;
479 } else {
480 pll_out_min = pll->pll_out_min;
481 pll_out_max = pll->pll_out_max;
482 }
483
476 if (pll->flags & RADEON_PLL_USE_REF_DIV) 484 if (pll->flags & RADEON_PLL_USE_REF_DIV)
477 min_ref_div = max_ref_div = pll->reference_div; 485 min_ref_div = max_ref_div = pll->reference_div;
478 else { 486 else {
@@ -536,10 +544,10 @@ static void radeon_compute_pll_legacy(struct radeon_pll *pll,
536 tmp = (uint64_t)pll->reference_freq * feedback_div; 544 tmp = (uint64_t)pll->reference_freq * feedback_div;
537 vco = radeon_div(tmp, ref_div); 545 vco = radeon_div(tmp, ref_div);
538 546
539 if (vco < pll->pll_out_min) { 547 if (vco < pll_out_min) {
540 min_feed_div = feedback_div + 1; 548 min_feed_div = feedback_div + 1;
541 continue; 549 continue;
542 } else if (vco > pll->pll_out_max) { 550 } else if (vco > pll_out_max) {
543 max_feed_div = feedback_div; 551 max_feed_div = feedback_div;
544 continue; 552 continue;
545 } 553 }
@@ -675,6 +683,15 @@ calc_fb_ref_div(struct radeon_pll *pll,
675{ 683{
676 fixed20_12 ffreq, max_error, error, pll_out, a; 684 fixed20_12 ffreq, max_error, error, pll_out, a;
677 u32 vco; 685 u32 vco;
686 u32 pll_out_min, pll_out_max;
687
688 if (pll->flags & RADEON_PLL_IS_LCD) {
689 pll_out_min = pll->lcd_pll_out_min;
690 pll_out_max = pll->lcd_pll_out_max;
691 } else {
692 pll_out_min = pll->pll_out_min;
693 pll_out_max = pll->pll_out_max;
694 }
678 695
679 ffreq.full = rfixed_const(freq); 696 ffreq.full = rfixed_const(freq);
680 /* max_error = ffreq * 0.0025; */ 697 /* max_error = ffreq * 0.0025; */
@@ -686,7 +703,7 @@ calc_fb_ref_div(struct radeon_pll *pll,
686 vco = pll->reference_freq * (((*fb_div) * 10) + (*fb_div_frac)); 703 vco = pll->reference_freq * (((*fb_div) * 10) + (*fb_div_frac));
687 vco = vco / ((*ref_div) * 10); 704 vco = vco / ((*ref_div) * 10);
688 705
689 if ((vco < pll->pll_out_min) || (vco > pll->pll_out_max)) 706 if ((vco < pll_out_min) || (vco > pll_out_max))
690 continue; 707 continue;
691 708
692 /* pll_out = vco / post_div; */ 709 /* pll_out = vco / post_div; */
@@ -714,6 +731,15 @@ static void radeon_compute_pll_new(struct radeon_pll *pll,
714{ 731{
715 u32 fb_div = 0, fb_div_frac = 0, post_div = 0, ref_div = 0; 732 u32 fb_div = 0, fb_div_frac = 0, post_div = 0, ref_div = 0;
716 u32 best_freq = 0, vco_frequency; 733 u32 best_freq = 0, vco_frequency;
734 u32 pll_out_min, pll_out_max;
735
736 if (pll->flags & RADEON_PLL_IS_LCD) {
737 pll_out_min = pll->lcd_pll_out_min;
738 pll_out_max = pll->lcd_pll_out_max;
739 } else {
740 pll_out_min = pll->pll_out_min;
741 pll_out_max = pll->pll_out_max;
742 }
717 743
718 /* freq = freq / 10; */ 744 /* freq = freq / 10; */
719 do_div(freq, 10); 745 do_div(freq, 10);
@@ -724,7 +750,7 @@ static void radeon_compute_pll_new(struct radeon_pll *pll,
724 goto done; 750 goto done;
725 751
726 vco_frequency = freq * post_div; 752 vco_frequency = freq * post_div;
727 if ((vco_frequency < pll->pll_out_min) || (vco_frequency > pll->pll_out_max)) 753 if ((vco_frequency < pll_out_min) || (vco_frequency > pll_out_max))
728 goto done; 754 goto done;
729 755
730 if (pll->flags & RADEON_PLL_USE_REF_DIV) { 756 if (pll->flags & RADEON_PLL_USE_REF_DIV) {
@@ -749,7 +775,7 @@ static void radeon_compute_pll_new(struct radeon_pll *pll,
749 continue; 775 continue;
750 776
751 vco_frequency = freq * post_div; 777 vco_frequency = freq * post_div;
752 if ((vco_frequency < pll->pll_out_min) || (vco_frequency > pll->pll_out_max)) 778 if ((vco_frequency < pll_out_min) || (vco_frequency > pll_out_max))
753 continue; 779 continue;
754 if (pll->flags & RADEON_PLL_USE_REF_DIV) { 780 if (pll->flags & RADEON_PLL_USE_REF_DIV) {
755 ref_div = pll->reference_div; 781 ref_div = pll->reference_div;
@@ -945,6 +971,23 @@ static int radeon_modeset_create_props(struct radeon_device *rdev)
945 return 0; 971 return 0;
946} 972}
947 973
974void radeon_update_display_priority(struct radeon_device *rdev)
975{
976 /* adjustment options for the display watermarks */
977 if ((radeon_disp_priority == 0) || (radeon_disp_priority > 2)) {
978 /* set display priority to high for r3xx, rv515 chips
979 * this avoids flickering due to underflow to the
980 * display controllers during heavy acceleration.
981 */
982 if (ASIC_IS_R300(rdev) || (rdev->family == CHIP_RV515))
983 rdev->disp_priority = 2;
984 else
985 rdev->disp_priority = 0;
986 } else
987 rdev->disp_priority = radeon_disp_priority;
988
989}
990
948int radeon_modeset_init(struct radeon_device *rdev) 991int radeon_modeset_init(struct radeon_device *rdev)
949{ 992{
950 int i; 993 int i;
@@ -976,15 +1019,6 @@ int radeon_modeset_init(struct radeon_device *rdev)
976 radeon_combios_check_hardcoded_edid(rdev); 1019 radeon_combios_check_hardcoded_edid(rdev);
977 } 1020 }
978 1021
979 if (rdev->flags & RADEON_SINGLE_CRTC)
980 rdev->num_crtc = 1;
981 else {
982 if (ASIC_IS_DCE4(rdev))
983 rdev->num_crtc = 6;
984 else
985 rdev->num_crtc = 2;
986 }
987
988 /* allocate crtcs */ 1022 /* allocate crtcs */
989 for (i = 0; i < rdev->num_crtc; i++) { 1023 for (i = 0; i < rdev->num_crtc; i++) {
990 radeon_crtc_init(rdev->ddev, i); 1024 radeon_crtc_init(rdev->ddev, i);
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index 6eec0ece6a6c..055a51732dcb 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -42,9 +42,10 @@
42 * KMS wrapper. 42 * KMS wrapper.
43 * - 2.0.0 - initial interface 43 * - 2.0.0 - initial interface
44 * - 2.1.0 - add square tiling interface 44 * - 2.1.0 - add square tiling interface
45 * - 2.2.0 - add r6xx/r7xx const buffer support
45 */ 46 */
46#define KMS_DRIVER_MAJOR 2 47#define KMS_DRIVER_MAJOR 2
47#define KMS_DRIVER_MINOR 1 48#define KMS_DRIVER_MINOR 2
48#define KMS_DRIVER_PATCHLEVEL 0 49#define KMS_DRIVER_PATCHLEVEL 0
49int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags); 50int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags);
50int radeon_driver_unload_kms(struct drm_device *dev); 51int radeon_driver_unload_kms(struct drm_device *dev);
@@ -91,6 +92,8 @@ int radeon_tv = 1;
91int radeon_new_pll = -1; 92int radeon_new_pll = -1;
92int radeon_dynpm = -1; 93int radeon_dynpm = -1;
93int radeon_audio = 1; 94int radeon_audio = 1;
95int radeon_disp_priority = 0;
96int radeon_hw_i2c = 0;
94 97
95MODULE_PARM_DESC(no_wb, "Disable AGP writeback for scratch registers"); 98MODULE_PARM_DESC(no_wb, "Disable AGP writeback for scratch registers");
96module_param_named(no_wb, radeon_no_wb, int, 0444); 99module_param_named(no_wb, radeon_no_wb, int, 0444);
@@ -134,6 +137,12 @@ module_param_named(dynpm, radeon_dynpm, int, 0444);
134MODULE_PARM_DESC(audio, "Audio enable (0 = disable)"); 137MODULE_PARM_DESC(audio, "Audio enable (0 = disable)");
135module_param_named(audio, radeon_audio, int, 0444); 138module_param_named(audio, radeon_audio, int, 0444);
136 139
140MODULE_PARM_DESC(disp_priority, "Display Priority (0 = auto, 1 = normal, 2 = high)");
141module_param_named(disp_priority, radeon_disp_priority, int, 0444);
142
143MODULE_PARM_DESC(hw_i2c, "hw i2c engine enable (0 = disable)");
144module_param_named(hw_i2c, radeon_hw_i2c, int, 0444);
145
137static int radeon_suspend(struct drm_device *dev, pm_message_t state) 146static int radeon_suspend(struct drm_device *dev, pm_message_t state)
138{ 147{
139 drm_radeon_private_t *dev_priv = dev->dev_private; 148 drm_radeon_private_t *dev_priv = dev->dev_private;
diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h
index ec55f2b23c22..448eba89d1e6 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.h
+++ b/drivers/gpu/drm/radeon/radeon_drv.h
@@ -107,9 +107,10 @@
107 * 1.30- Add support for occlusion queries 107 * 1.30- Add support for occlusion queries
108 * 1.31- Add support for num Z pipes from GET_PARAM 108 * 1.31- Add support for num Z pipes from GET_PARAM
109 * 1.32- fixes for rv740 setup 109 * 1.32- fixes for rv740 setup
110 * 1.33- Add r6xx/r7xx const buffer support
110 */ 111 */
111#define DRIVER_MAJOR 1 112#define DRIVER_MAJOR 1
112#define DRIVER_MINOR 32 113#define DRIVER_MINOR 33
113#define DRIVER_PATCHLEVEL 0 114#define DRIVER_PATCHLEVEL 0
114 115
115enum radeon_cp_microcode_version { 116enum radeon_cp_microcode_version {
diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c
index bc926ea0a530..52d6f96f274b 100644
--- a/drivers/gpu/drm/radeon/radeon_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_encoders.c
@@ -302,7 +302,7 @@ static bool radeon_atom_mode_fixup(struct drm_encoder *encoder,
302 } 302 }
303 303
304 if (ASIC_IS_DCE3(rdev) && 304 if (ASIC_IS_DCE3(rdev) &&
305 (radeon_encoder->active_device & (ATOM_DEVICE_DFP_SUPPORT))) { 305 (radeon_encoder->active_device & (ATOM_DEVICE_DFP_SUPPORT | ATOM_DEVICE_LCD_SUPPORT))) {
306 struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); 306 struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
307 radeon_dp_set_link_config(connector, mode); 307 radeon_dp_set_link_config(connector, mode);
308 } 308 }
@@ -519,7 +519,8 @@ atombios_digital_setup(struct drm_encoder *encoder, int action)
519 break; 519 break;
520 } 520 }
521 521
522 atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev); 522 if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev))
523 return;
523 524
524 switch (frev) { 525 switch (frev) {
525 case 1: 526 case 1:
@@ -593,7 +594,6 @@ atombios_digital_setup(struct drm_encoder *encoder, int action)
593 } 594 }
594 595
595 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); 596 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
596 r600_hdmi_enable(encoder, hdmi_detected);
597} 597}
598 598
599int 599int
@@ -708,7 +708,7 @@ atombios_dig_encoder_setup(struct drm_encoder *encoder, int action)
708 struct radeon_connector_atom_dig *dig_connector = 708 struct radeon_connector_atom_dig *dig_connector =
709 radeon_get_atom_connector_priv_from_encoder(encoder); 709 radeon_get_atom_connector_priv_from_encoder(encoder);
710 union dig_encoder_control args; 710 union dig_encoder_control args;
711 int index = 0, num = 0; 711 int index = 0;
712 uint8_t frev, crev; 712 uint8_t frev, crev;
713 713
714 if (!dig || !dig_connector) 714 if (!dig || !dig_connector)
@@ -724,9 +724,9 @@ atombios_dig_encoder_setup(struct drm_encoder *encoder, int action)
724 else 724 else
725 index = GetIndexIntoMasterTable(COMMAND, DIG1EncoderControl); 725 index = GetIndexIntoMasterTable(COMMAND, DIG1EncoderControl);
726 } 726 }
727 num = dig->dig_encoder + 1;
728 727
729 atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev); 728 if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev))
729 return;
730 730
731 args.v1.ucAction = action; 731 args.v1.ucAction = action;
732 args.v1.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10); 732 args.v1.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
@@ -785,7 +785,7 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t
785 struct drm_connector *connector; 785 struct drm_connector *connector;
786 struct radeon_connector *radeon_connector; 786 struct radeon_connector *radeon_connector;
787 union dig_transmitter_control args; 787 union dig_transmitter_control args;
788 int index = 0, num = 0; 788 int index = 0;
789 uint8_t frev, crev; 789 uint8_t frev, crev;
790 bool is_dp = false; 790 bool is_dp = false;
791 int pll_id = 0; 791 int pll_id = 0;
@@ -814,7 +814,8 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t
814 } 814 }
815 } 815 }
816 816
817 atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev); 817 if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev))
818 return;
818 819
819 args.v1.ucAction = action; 820 args.v1.ucAction = action;
820 if (action == ATOM_TRANSMITTER_ACTION_INIT) { 821 if (action == ATOM_TRANSMITTER_ACTION_INIT) {
@@ -860,15 +861,12 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t
860 switch (radeon_encoder->encoder_id) { 861 switch (radeon_encoder->encoder_id) {
861 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: 862 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
862 args.v3.acConfig.ucTransmitterSel = 0; 863 args.v3.acConfig.ucTransmitterSel = 0;
863 num = 0;
864 break; 864 break;
865 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: 865 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
866 args.v3.acConfig.ucTransmitterSel = 1; 866 args.v3.acConfig.ucTransmitterSel = 1;
867 num = 1;
868 break; 867 break;
869 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: 868 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
870 args.v3.acConfig.ucTransmitterSel = 2; 869 args.v3.acConfig.ucTransmitterSel = 2;
871 num = 2;
872 break; 870 break;
873 } 871 }
874 872
@@ -879,23 +877,19 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t
879 args.v3.acConfig.fCoherentMode = 1; 877 args.v3.acConfig.fCoherentMode = 1;
880 } 878 }
881 } else if (ASIC_IS_DCE32(rdev)) { 879 } else if (ASIC_IS_DCE32(rdev)) {
882 if (dig->dig_encoder == 1) 880 args.v2.acConfig.ucEncoderSel = dig->dig_encoder;
883 args.v2.acConfig.ucEncoderSel = 1;
884 if (dig_connector->linkb) 881 if (dig_connector->linkb)
885 args.v2.acConfig.ucLinkSel = 1; 882 args.v2.acConfig.ucLinkSel = 1;
886 883
887 switch (radeon_encoder->encoder_id) { 884 switch (radeon_encoder->encoder_id) {
888 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: 885 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
889 args.v2.acConfig.ucTransmitterSel = 0; 886 args.v2.acConfig.ucTransmitterSel = 0;
890 num = 0;
891 break; 887 break;
892 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: 888 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
893 args.v2.acConfig.ucTransmitterSel = 1; 889 args.v2.acConfig.ucTransmitterSel = 1;
894 num = 1;
895 break; 890 break;
896 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: 891 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
897 args.v2.acConfig.ucTransmitterSel = 2; 892 args.v2.acConfig.ucTransmitterSel = 2;
898 num = 2;
899 break; 893 break;
900 } 894 }
901 895
@@ -913,31 +907,25 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t
913 else 907 else
914 args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_DIG1_ENCODER; 908 args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_DIG1_ENCODER;
915 909
916 switch (radeon_encoder->encoder_id) { 910 if ((rdev->flags & RADEON_IS_IGP) &&
917 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: 911 (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_UNIPHY)) {
918 if (rdev->flags & RADEON_IS_IGP) { 912 if (is_dp || (radeon_encoder->pixel_clock <= 165000)) {
919 if (radeon_encoder->pixel_clock > 165000) { 913 if (dig_connector->igp_lane_info & 0x1)
920 if (dig_connector->igp_lane_info & 0x3) 914 args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_0_3;
921 args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_0_7; 915 else if (dig_connector->igp_lane_info & 0x2)
922 else if (dig_connector->igp_lane_info & 0xc) 916 args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_4_7;
923 args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_8_15; 917 else if (dig_connector->igp_lane_info & 0x4)
924 } else { 918 args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_8_11;
925 if (dig_connector->igp_lane_info & 0x1) 919 else if (dig_connector->igp_lane_info & 0x8)
926 args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_0_3; 920 args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_12_15;
927 else if (dig_connector->igp_lane_info & 0x2) 921 } else {
928 args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_4_7; 922 if (dig_connector->igp_lane_info & 0x3)
929 else if (dig_connector->igp_lane_info & 0x4) 923 args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_0_7;
930 args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_8_11; 924 else if (dig_connector->igp_lane_info & 0xc)
931 else if (dig_connector->igp_lane_info & 0x8) 925 args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_8_15;
932 args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_12_15;
933 }
934 } 926 }
935 break;
936 } 927 }
937 928
938 if (radeon_encoder->pixel_clock > 165000)
939 args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_8LANE_LINK;
940
941 if (dig_connector->linkb) 929 if (dig_connector->linkb)
942 args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LINKB; 930 args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LINKB;
943 else 931 else
@@ -948,6 +936,8 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t
948 else if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) { 936 else if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) {
949 if (dig->coherent_mode) 937 if (dig->coherent_mode)
950 args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_COHERENT; 938 args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_COHERENT;
939 if (radeon_encoder->pixel_clock > 165000)
940 args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_8LANE_LINK;
951 } 941 }
952 } 942 }
953 943
@@ -1054,16 +1044,25 @@ radeon_atom_encoder_dpms(struct drm_encoder *encoder, int mode)
1054 if (is_dig) { 1044 if (is_dig) {
1055 switch (mode) { 1045 switch (mode) {
1056 case DRM_MODE_DPMS_ON: 1046 case DRM_MODE_DPMS_ON:
1057 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE_OUTPUT, 0, 0); 1047 if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_DP) {
1058 {
1059 struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); 1048 struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
1049
1060 dp_link_train(encoder, connector); 1050 dp_link_train(encoder, connector);
1051 if (ASIC_IS_DCE4(rdev))
1052 atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_VIDEO_ON);
1061 } 1053 }
1054 if (!ASIC_IS_DCE4(rdev))
1055 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE_OUTPUT, 0, 0);
1062 break; 1056 break;
1063 case DRM_MODE_DPMS_STANDBY: 1057 case DRM_MODE_DPMS_STANDBY:
1064 case DRM_MODE_DPMS_SUSPEND: 1058 case DRM_MODE_DPMS_SUSPEND:
1065 case DRM_MODE_DPMS_OFF: 1059 case DRM_MODE_DPMS_OFF:
1066 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE_OUTPUT, 0, 0); 1060 if (!ASIC_IS_DCE4(rdev))
1061 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE_OUTPUT, 0, 0);
1062 if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_DP) {
1063 if (ASIC_IS_DCE4(rdev))
1064 atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_VIDEO_OFF);
1065 }
1067 break; 1066 break;
1068 } 1067 }
1069 } else { 1068 } else {
@@ -1104,7 +1103,8 @@ atombios_set_encoder_crtc_source(struct drm_encoder *encoder)
1104 1103
1105 memset(&args, 0, sizeof(args)); 1104 memset(&args, 0, sizeof(args));
1106 1105
1107 atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev); 1106 if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev))
1107 return;
1108 1108
1109 switch (frev) { 1109 switch (frev) {
1110 case 1: 1110 case 1:
@@ -1216,6 +1216,9 @@ atombios_set_encoder_crtc_source(struct drm_encoder *encoder)
1216 } 1216 }
1217 1217
1218 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); 1218 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
1219
1220 /* update scratch regs with new routing */
1221 radeon_atombios_encoder_crtc_scratch_regs(encoder, radeon_crtc->crtc_id);
1219} 1222}
1220 1223
1221static void 1224static void
@@ -1326,19 +1329,9 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder,
1326 struct drm_device *dev = encoder->dev; 1329 struct drm_device *dev = encoder->dev;
1327 struct radeon_device *rdev = dev->dev_private; 1330 struct radeon_device *rdev = dev->dev_private;
1328 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 1331 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
1329 struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
1330 1332
1331 if (radeon_encoder->active_device &
1332 (ATOM_DEVICE_DFP_SUPPORT | ATOM_DEVICE_LCD_SUPPORT)) {
1333 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
1334 if (dig)
1335 dig->dig_encoder = radeon_atom_pick_dig_encoder(encoder);
1336 }
1337 radeon_encoder->pixel_clock = adjusted_mode->clock; 1333 radeon_encoder->pixel_clock = adjusted_mode->clock;
1338 1334
1339 radeon_atombios_encoder_crtc_scratch_regs(encoder, radeon_crtc->crtc_id);
1340 atombios_set_encoder_crtc_source(encoder);
1341
1342 if (ASIC_IS_AVIVO(rdev)) { 1335 if (ASIC_IS_AVIVO(rdev)) {
1343 if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT | ATOM_DEVICE_TV_SUPPORT)) 1336 if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT | ATOM_DEVICE_TV_SUPPORT))
1344 atombios_yuv_setup(encoder, true); 1337 atombios_yuv_setup(encoder, true);
@@ -1396,9 +1389,10 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder,
1396 } 1389 }
1397 atombios_apply_encoder_quirks(encoder, adjusted_mode); 1390 atombios_apply_encoder_quirks(encoder, adjusted_mode);
1398 1391
1399 /* XXX */ 1392 if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI) {
1400 if (!ASIC_IS_DCE4(rdev)) 1393 r600_hdmi_enable(encoder);
1401 r600_hdmi_setmode(encoder, adjusted_mode); 1394 r600_hdmi_setmode(encoder, adjusted_mode);
1395 }
1402} 1396}
1403 1397
1404static bool 1398static bool
@@ -1418,7 +1412,8 @@ atombios_dac_load_detect(struct drm_encoder *encoder, struct drm_connector *conn
1418 1412
1419 memset(&args, 0, sizeof(args)); 1413 memset(&args, 0, sizeof(args));
1420 1414
1421 atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev); 1415 if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev))
1416 return false;
1422 1417
1423 args.sDacload.ucMisc = 0; 1418 args.sDacload.ucMisc = 0;
1424 1419
@@ -1492,8 +1487,20 @@ radeon_atom_dac_detect(struct drm_encoder *encoder, struct drm_connector *connec
1492 1487
1493static void radeon_atom_encoder_prepare(struct drm_encoder *encoder) 1488static void radeon_atom_encoder_prepare(struct drm_encoder *encoder)
1494{ 1489{
1490 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
1491
1492 if (radeon_encoder->active_device &
1493 (ATOM_DEVICE_DFP_SUPPORT | ATOM_DEVICE_LCD_SUPPORT)) {
1494 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
1495 if (dig)
1496 dig->dig_encoder = radeon_atom_pick_dig_encoder(encoder);
1497 }
1498
1495 radeon_atom_output_lock(encoder, true); 1499 radeon_atom_output_lock(encoder, true);
1496 radeon_atom_encoder_dpms(encoder, DRM_MODE_DPMS_OFF); 1500 radeon_atom_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
1501
1502 /* this is needed for the pll/ss setup to work correctly in some cases */
1503 atombios_set_encoder_crtc_source(encoder);
1497} 1504}
1498 1505
1499static void radeon_atom_encoder_commit(struct drm_encoder *encoder) 1506static void radeon_atom_encoder_commit(struct drm_encoder *encoder)
@@ -1509,6 +1516,8 @@ static void radeon_atom_encoder_disable(struct drm_encoder *encoder)
1509 radeon_atom_encoder_dpms(encoder, DRM_MODE_DPMS_OFF); 1516 radeon_atom_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
1510 1517
1511 if (radeon_encoder_is_digital(encoder)) { 1518 if (radeon_encoder_is_digital(encoder)) {
1519 if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI)
1520 r600_hdmi_disable(encoder);
1512 dig = radeon_encoder->enc_priv; 1521 dig = radeon_encoder->enc_priv;
1513 dig->dig_encoder = -1; 1522 dig->dig_encoder = -1;
1514 } 1523 }
@@ -1659,6 +1668,4 @@ radeon_add_atom_encoder(struct drm_device *dev, uint32_t encoder_id, uint32_t su
1659 drm_encoder_helper_add(encoder, &radeon_atom_dig_helper_funcs); 1668 drm_encoder_helper_add(encoder, &radeon_atom_dig_helper_funcs);
1660 break; 1669 break;
1661 } 1670 }
1662
1663 r600_hdmi_init(encoder);
1664} 1671}
diff --git a/drivers/gpu/drm/radeon/radeon_i2c.c b/drivers/gpu/drm/radeon/radeon_i2c.c
index 4ae50c19589f..5def6f5dff38 100644
--- a/drivers/gpu/drm/radeon/radeon_i2c.c
+++ b/drivers/gpu/drm/radeon/radeon_i2c.c
@@ -59,6 +59,7 @@ bool radeon_ddc_probe(struct radeon_connector *radeon_connector)
59 return false; 59 return false;
60} 60}
61 61
62/* bit banging i2c */
62 63
63static void radeon_i2c_do_lock(struct radeon_i2c_chan *i2c, int lock_state) 64static void radeon_i2c_do_lock(struct radeon_i2c_chan *i2c, int lock_state)
64{ 65{
@@ -181,13 +182,30 @@ static void set_data(void *i2c_priv, int data)
181 WREG32(rec->en_data_reg, val); 182 WREG32(rec->en_data_reg, val);
182} 183}
183 184
185static int pre_xfer(struct i2c_adapter *i2c_adap)
186{
187 struct radeon_i2c_chan *i2c = i2c_get_adapdata(i2c_adap);
188
189 radeon_i2c_do_lock(i2c, 1);
190
191 return 0;
192}
193
194static void post_xfer(struct i2c_adapter *i2c_adap)
195{
196 struct radeon_i2c_chan *i2c = i2c_get_adapdata(i2c_adap);
197
198 radeon_i2c_do_lock(i2c, 0);
199}
200
201/* hw i2c */
202
184static u32 radeon_get_i2c_prescale(struct radeon_device *rdev) 203static u32 radeon_get_i2c_prescale(struct radeon_device *rdev)
185{ 204{
186 struct radeon_pll *spll = &rdev->clock.spll;
187 u32 sclk = radeon_get_engine_clock(rdev); 205 u32 sclk = radeon_get_engine_clock(rdev);
188 u32 prescale = 0; 206 u32 prescale = 0;
189 u32 n, m; 207 u32 nm;
190 u8 loop; 208 u8 n, m, loop;
191 int i2c_clock; 209 int i2c_clock;
192 210
193 switch (rdev->family) { 211 switch (rdev->family) {
@@ -203,13 +221,15 @@ static u32 radeon_get_i2c_prescale(struct radeon_device *rdev)
203 case CHIP_R300: 221 case CHIP_R300:
204 case CHIP_R350: 222 case CHIP_R350:
205 case CHIP_RV350: 223 case CHIP_RV350:
206 n = (spll->reference_freq) / (4 * 6); 224 i2c_clock = 60;
225 nm = (sclk * 10) / (i2c_clock * 4);
207 for (loop = 1; loop < 255; loop++) { 226 for (loop = 1; loop < 255; loop++) {
208 if ((loop * (loop - 1)) > n) 227 if ((nm / loop) < loop)
209 break; 228 break;
210 } 229 }
211 m = loop - 1; 230 n = loop - 1;
212 prescale = m | (loop << 8); 231 m = loop - 2;
232 prescale = m | (n << 8);
213 break; 233 break;
214 case CHIP_RV380: 234 case CHIP_RV380:
215 case CHIP_RS400: 235 case CHIP_RS400:
@@ -217,7 +237,6 @@ static u32 radeon_get_i2c_prescale(struct radeon_device *rdev)
217 case CHIP_R420: 237 case CHIP_R420:
218 case CHIP_R423: 238 case CHIP_R423:
219 case CHIP_RV410: 239 case CHIP_RV410:
220 sclk = radeon_get_engine_clock(rdev);
221 prescale = (((sclk * 10)/(4 * 128 * 100) + 1) << 8) + 128; 240 prescale = (((sclk * 10)/(4 * 128 * 100) + 1) << 8) + 128;
222 break; 241 break;
223 case CHIP_RS600: 242 case CHIP_RS600:
@@ -232,7 +251,6 @@ static u32 radeon_get_i2c_prescale(struct radeon_device *rdev)
232 case CHIP_RV570: 251 case CHIP_RV570:
233 case CHIP_R580: 252 case CHIP_R580:
234 i2c_clock = 50; 253 i2c_clock = 50;
235 sclk = radeon_get_engine_clock(rdev);
236 if (rdev->family == CHIP_R520) 254 if (rdev->family == CHIP_R520)
237 prescale = (127 << 8) + ((sclk * 10) / (4 * 127 * i2c_clock)); 255 prescale = (127 << 8) + ((sclk * 10) / (4 * 127 * i2c_clock));
238 else 256 else
@@ -291,6 +309,7 @@ static int r100_hw_i2c_xfer(struct i2c_adapter *i2c_adap,
291 prescale = radeon_get_i2c_prescale(rdev); 309 prescale = radeon_get_i2c_prescale(rdev);
292 310
293 reg = ((prescale << RADEON_I2C_PRESCALE_SHIFT) | 311 reg = ((prescale << RADEON_I2C_PRESCALE_SHIFT) |
312 RADEON_I2C_DRIVE_EN |
294 RADEON_I2C_START | 313 RADEON_I2C_START |
295 RADEON_I2C_STOP | 314 RADEON_I2C_STOP |
296 RADEON_I2C_GO); 315 RADEON_I2C_GO);
@@ -757,26 +776,13 @@ done:
757 return ret; 776 return ret;
758} 777}
759 778
760static int radeon_sw_i2c_xfer(struct i2c_adapter *i2c_adap, 779static int radeon_hw_i2c_xfer(struct i2c_adapter *i2c_adap,
761 struct i2c_msg *msgs, int num) 780 struct i2c_msg *msgs, int num)
762{ 781{
763 struct radeon_i2c_chan *i2c = i2c_get_adapdata(i2c_adap); 782 struct radeon_i2c_chan *i2c = i2c_get_adapdata(i2c_adap);
764 int ret;
765
766 radeon_i2c_do_lock(i2c, 1);
767 ret = i2c_transfer(&i2c->algo.radeon.bit_adapter, msgs, num);
768 radeon_i2c_do_lock(i2c, 0);
769
770 return ret;
771}
772
773static int radeon_i2c_xfer(struct i2c_adapter *i2c_adap,
774 struct i2c_msg *msgs, int num)
775{
776 struct radeon_i2c_chan *i2c = i2c_get_adapdata(i2c_adap);
777 struct radeon_device *rdev = i2c->dev->dev_private; 783 struct radeon_device *rdev = i2c->dev->dev_private;
778 struct radeon_i2c_bus_rec *rec = &i2c->rec; 784 struct radeon_i2c_bus_rec *rec = &i2c->rec;
779 int ret; 785 int ret = 0;
780 786
781 switch (rdev->family) { 787 switch (rdev->family) {
782 case CHIP_R100: 788 case CHIP_R100:
@@ -797,16 +803,12 @@ static int radeon_i2c_xfer(struct i2c_adapter *i2c_adap,
797 case CHIP_RV410: 803 case CHIP_RV410:
798 case CHIP_RS400: 804 case CHIP_RS400:
799 case CHIP_RS480: 805 case CHIP_RS480:
800 if (rec->hw_capable) 806 ret = r100_hw_i2c_xfer(i2c_adap, msgs, num);
801 ret = r100_hw_i2c_xfer(i2c_adap, msgs, num);
802 else
803 ret = radeon_sw_i2c_xfer(i2c_adap, msgs, num);
804 break; 807 break;
805 case CHIP_RS600: 808 case CHIP_RS600:
806 case CHIP_RS690: 809 case CHIP_RS690:
807 case CHIP_RS740: 810 case CHIP_RS740:
808 /* XXX fill in hw i2c implementation */ 811 /* XXX fill in hw i2c implementation */
809 ret = radeon_sw_i2c_xfer(i2c_adap, msgs, num);
810 break; 812 break;
811 case CHIP_RV515: 813 case CHIP_RV515:
812 case CHIP_R520: 814 case CHIP_R520:
@@ -814,20 +816,16 @@ static int radeon_i2c_xfer(struct i2c_adapter *i2c_adap,
814 case CHIP_RV560: 816 case CHIP_RV560:
815 case CHIP_RV570: 817 case CHIP_RV570:
816 case CHIP_R580: 818 case CHIP_R580:
817 if (rec->hw_capable) { 819 if (rec->mm_i2c)
818 if (rec->mm_i2c) 820 ret = r100_hw_i2c_xfer(i2c_adap, msgs, num);
819 ret = r100_hw_i2c_xfer(i2c_adap, msgs, num); 821 else
820 else 822 ret = r500_hw_i2c_xfer(i2c_adap, msgs, num);
821 ret = r500_hw_i2c_xfer(i2c_adap, msgs, num);
822 } else
823 ret = radeon_sw_i2c_xfer(i2c_adap, msgs, num);
824 break; 823 break;
825 case CHIP_R600: 824 case CHIP_R600:
826 case CHIP_RV610: 825 case CHIP_RV610:
827 case CHIP_RV630: 826 case CHIP_RV630:
828 case CHIP_RV670: 827 case CHIP_RV670:
829 /* XXX fill in hw i2c implementation */ 828 /* XXX fill in hw i2c implementation */
830 ret = radeon_sw_i2c_xfer(i2c_adap, msgs, num);
831 break; 829 break;
832 case CHIP_RV620: 830 case CHIP_RV620:
833 case CHIP_RV635: 831 case CHIP_RV635:
@@ -838,7 +836,6 @@ static int radeon_i2c_xfer(struct i2c_adapter *i2c_adap,
838 case CHIP_RV710: 836 case CHIP_RV710:
839 case CHIP_RV740: 837 case CHIP_RV740:
840 /* XXX fill in hw i2c implementation */ 838 /* XXX fill in hw i2c implementation */
841 ret = radeon_sw_i2c_xfer(i2c_adap, msgs, num);
842 break; 839 break;
843 case CHIP_CEDAR: 840 case CHIP_CEDAR:
844 case CHIP_REDWOOD: 841 case CHIP_REDWOOD:
@@ -846,7 +843,6 @@ static int radeon_i2c_xfer(struct i2c_adapter *i2c_adap,
846 case CHIP_CYPRESS: 843 case CHIP_CYPRESS:
847 case CHIP_HEMLOCK: 844 case CHIP_HEMLOCK:
848 /* XXX fill in hw i2c implementation */ 845 /* XXX fill in hw i2c implementation */
849 ret = radeon_sw_i2c_xfer(i2c_adap, msgs, num);
850 break; 846 break;
851 default: 847 default:
852 DRM_ERROR("i2c: unhandled radeon chip\n"); 848 DRM_ERROR("i2c: unhandled radeon chip\n");
@@ -857,20 +853,21 @@ static int radeon_i2c_xfer(struct i2c_adapter *i2c_adap,
857 return ret; 853 return ret;
858} 854}
859 855
860static u32 radeon_i2c_func(struct i2c_adapter *adap) 856static u32 radeon_hw_i2c_func(struct i2c_adapter *adap)
861{ 857{
862 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL; 858 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
863} 859}
864 860
865static const struct i2c_algorithm radeon_i2c_algo = { 861static const struct i2c_algorithm radeon_i2c_algo = {
866 .master_xfer = radeon_i2c_xfer, 862 .master_xfer = radeon_hw_i2c_xfer,
867 .functionality = radeon_i2c_func, 863 .functionality = radeon_hw_i2c_func,
868}; 864};
869 865
870struct radeon_i2c_chan *radeon_i2c_create(struct drm_device *dev, 866struct radeon_i2c_chan *radeon_i2c_create(struct drm_device *dev,
871 struct radeon_i2c_bus_rec *rec, 867 struct radeon_i2c_bus_rec *rec,
872 const char *name) 868 const char *name)
873{ 869{
870 struct radeon_device *rdev = dev->dev_private;
874 struct radeon_i2c_chan *i2c; 871 struct radeon_i2c_chan *i2c;
875 int ret; 872 int ret;
876 873
@@ -878,37 +875,43 @@ struct radeon_i2c_chan *radeon_i2c_create(struct drm_device *dev,
878 if (i2c == NULL) 875 if (i2c == NULL)
879 return NULL; 876 return NULL;
880 877
881 /* set the internal bit adapter */
882 i2c->algo.radeon.bit_adapter.owner = THIS_MODULE;
883 i2c_set_adapdata(&i2c->algo.radeon.bit_adapter, i2c);
884 sprintf(i2c->algo.radeon.bit_adapter.name, "Radeon internal i2c bit bus %s", name);
885 i2c->algo.radeon.bit_adapter.algo_data = &i2c->algo.radeon.bit_data;
886 i2c->algo.radeon.bit_data.setsda = set_data;
887 i2c->algo.radeon.bit_data.setscl = set_clock;
888 i2c->algo.radeon.bit_data.getsda = get_data;
889 i2c->algo.radeon.bit_data.getscl = get_clock;
890 i2c->algo.radeon.bit_data.udelay = 20;
891 /* vesa says 2.2 ms is enough, 1 jiffy doesn't seem to always
892 * make this, 2 jiffies is a lot more reliable */
893 i2c->algo.radeon.bit_data.timeout = 2;
894 i2c->algo.radeon.bit_data.data = i2c;
895 ret = i2c_bit_add_bus(&i2c->algo.radeon.bit_adapter);
896 if (ret) {
897 DRM_ERROR("Failed to register internal bit i2c %s\n", name);
898 goto out_free;
899 }
900 /* set the radeon i2c adapter */
901 i2c->dev = dev;
902 i2c->rec = *rec; 878 i2c->rec = *rec;
903 i2c->adapter.owner = THIS_MODULE; 879 i2c->adapter.owner = THIS_MODULE;
880 i2c->dev = dev;
904 i2c_set_adapdata(&i2c->adapter, i2c); 881 i2c_set_adapdata(&i2c->adapter, i2c);
905 sprintf(i2c->adapter.name, "Radeon i2c %s", name); 882 if (rec->mm_i2c ||
906 i2c->adapter.algo_data = &i2c->algo.radeon; 883 (rec->hw_capable &&
907 i2c->adapter.algo = &radeon_i2c_algo; 884 radeon_hw_i2c &&
908 ret = i2c_add_adapter(&i2c->adapter); 885 ((rdev->family <= CHIP_RS480) ||
909 if (ret) { 886 ((rdev->family >= CHIP_RV515) && (rdev->family <= CHIP_R580))))) {
910 DRM_ERROR("Failed to register i2c %s\n", name); 887 /* set the radeon hw i2c adapter */
911 goto out_free; 888 sprintf(i2c->adapter.name, "Radeon i2c hw bus %s", name);
889 i2c->adapter.algo = &radeon_i2c_algo;
890 ret = i2c_add_adapter(&i2c->adapter);
891 if (ret) {
892 DRM_ERROR("Failed to register hw i2c %s\n", name);
893 goto out_free;
894 }
895 } else {
896 /* set the radeon bit adapter */
897 sprintf(i2c->adapter.name, "Radeon i2c bit bus %s", name);
898 i2c->adapter.algo_data = &i2c->algo.bit;
899 i2c->algo.bit.pre_xfer = pre_xfer;
900 i2c->algo.bit.post_xfer = post_xfer;
901 i2c->algo.bit.setsda = set_data;
902 i2c->algo.bit.setscl = set_clock;
903 i2c->algo.bit.getsda = get_data;
904 i2c->algo.bit.getscl = get_clock;
905 i2c->algo.bit.udelay = 20;
906 /* vesa says 2.2 ms is enough, 1 jiffy doesn't seem to always
907 * make this, 2 jiffies is a lot more reliable */
908 i2c->algo.bit.timeout = 2;
909 i2c->algo.bit.data = i2c;
910 ret = i2c_bit_add_bus(&i2c->adapter);
911 if (ret) {
912 DRM_ERROR("Failed to register bit i2c %s\n", name);
913 goto out_free;
914 }
912 } 915 }
913 916
914 return i2c; 917 return i2c;
@@ -953,16 +956,6 @@ void radeon_i2c_destroy(struct radeon_i2c_chan *i2c)
953{ 956{
954 if (!i2c) 957 if (!i2c)
955 return; 958 return;
956 i2c_del_adapter(&i2c->algo.radeon.bit_adapter);
957 i2c_del_adapter(&i2c->adapter);
958 kfree(i2c);
959}
960
961void radeon_i2c_destroy_dp(struct radeon_i2c_chan *i2c)
962{
963 if (!i2c)
964 return;
965
966 i2c_del_adapter(&i2c->adapter); 959 i2c_del_adapter(&i2c->adapter);
967 kfree(i2c); 960 kfree(i2c);
968} 961}
diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c
index ea4c645ece11..a212041e8b0b 100644
--- a/drivers/gpu/drm/radeon/radeon_irq_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c
@@ -67,9 +67,10 @@ void radeon_driver_irq_preinstall_kms(struct drm_device *dev)
67 67
68 /* Disable *all* interrupts */ 68 /* Disable *all* interrupts */
69 rdev->irq.sw_int = false; 69 rdev->irq.sw_int = false;
70 for (i = 0; i < 2; i++) { 70 for (i = 0; i < rdev->num_crtc; i++)
71 rdev->irq.crtc_vblank_int[i] = false; 71 rdev->irq.crtc_vblank_int[i] = false;
72 } 72 for (i = 0; i < 6; i++)
73 rdev->irq.hpd[i] = false;
73 radeon_irq_set(rdev); 74 radeon_irq_set(rdev);
74 /* Clear bits */ 75 /* Clear bits */
75 radeon_irq_process(rdev); 76 radeon_irq_process(rdev);
@@ -95,28 +96,29 @@ void radeon_driver_irq_uninstall_kms(struct drm_device *dev)
95 } 96 }
96 /* Disable *all* interrupts */ 97 /* Disable *all* interrupts */
97 rdev->irq.sw_int = false; 98 rdev->irq.sw_int = false;
98 for (i = 0; i < 2; i++) { 99 for (i = 0; i < rdev->num_crtc; i++)
99 rdev->irq.crtc_vblank_int[i] = false; 100 rdev->irq.crtc_vblank_int[i] = false;
101 for (i = 0; i < 6; i++)
100 rdev->irq.hpd[i] = false; 102 rdev->irq.hpd[i] = false;
101 }
102 radeon_irq_set(rdev); 103 radeon_irq_set(rdev);
103} 104}
104 105
105int radeon_irq_kms_init(struct radeon_device *rdev) 106int radeon_irq_kms_init(struct radeon_device *rdev)
106{ 107{
107 int r = 0; 108 int r = 0;
108 int num_crtc = 2;
109 109
110 if (rdev->flags & RADEON_SINGLE_CRTC)
111 num_crtc = 1;
112 spin_lock_init(&rdev->irq.sw_lock); 110 spin_lock_init(&rdev->irq.sw_lock);
113 r = drm_vblank_init(rdev->ddev, num_crtc); 111 r = drm_vblank_init(rdev->ddev, rdev->num_crtc);
114 if (r) { 112 if (r) {
115 return r; 113 return r;
116 } 114 }
117 /* enable msi */ 115 /* enable msi */
118 rdev->msi_enabled = 0; 116 rdev->msi_enabled = 0;
119 if (rdev->family >= CHIP_RV380) { 117 /* MSIs don't seem to work reliably on all IGP
118 * chips. Disable MSI on them for now.
119 */
120 if ((rdev->family >= CHIP_RV380) &&
121 (!(rdev->flags & RADEON_IS_IGP))) {
120 int ret = pci_enable_msi(rdev->pdev); 122 int ret = pci_enable_msi(rdev->pdev);
121 if (!ret) { 123 if (!ret) {
122 rdev->msi_enabled = 1; 124 rdev->msi_enabled = 1;
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
index df23d6a01d02..88865e38fe30 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
@@ -603,6 +603,10 @@ static bool radeon_set_crtc_timing(struct drm_crtc *crtc, struct drm_display_mod
603 ? RADEON_CRTC2_INTERLACE_EN 603 ? RADEON_CRTC2_INTERLACE_EN
604 : 0)); 604 : 0));
605 605
606 /* rs4xx chips seem to like to have the crtc enabled when the timing is set */
607 if ((rdev->family == CHIP_RS400) || (rdev->family == CHIP_RS480))
608 crtc2_gen_cntl |= RADEON_CRTC2_EN;
609
606 disp2_merge_cntl = RREG32(RADEON_DISP2_MERGE_CNTL); 610 disp2_merge_cntl = RREG32(RADEON_DISP2_MERGE_CNTL);
607 disp2_merge_cntl &= ~RADEON_DISP2_RGB_OFFSET_EN; 611 disp2_merge_cntl &= ~RADEON_DISP2_RGB_OFFSET_EN;
608 612
@@ -630,6 +634,10 @@ static bool radeon_set_crtc_timing(struct drm_crtc *crtc, struct drm_display_mod
630 ? RADEON_CRTC_INTERLACE_EN 634 ? RADEON_CRTC_INTERLACE_EN
631 : 0)); 635 : 0));
632 636
637 /* rs4xx chips seem to like to have the crtc enabled when the timing is set */
638 if ((rdev->family == CHIP_RS400) || (rdev->family == CHIP_RS480))
639 crtc_gen_cntl |= RADEON_CRTC_EN;
640
633 crtc_ext_cntl = RREG32(RADEON_CRTC_EXT_CNTL); 641 crtc_ext_cntl = RREG32(RADEON_CRTC_EXT_CNTL);
634 crtc_ext_cntl |= (RADEON_XCRT_CNT_EN | 642 crtc_ext_cntl |= (RADEON_XCRT_CNT_EN |
635 RADEON_CRTC_VSYNC_DIS | 643 RADEON_CRTC_VSYNC_DIS |
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_tv.c b/drivers/gpu/drm/radeon/radeon_legacy_tv.c
index 417684daef4c..f2ed27c8055b 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_tv.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_tv.c
@@ -57,6 +57,10 @@
57#define NTSC_TV_PLL_N_14 693 57#define NTSC_TV_PLL_N_14 693
58#define NTSC_TV_PLL_P_14 7 58#define NTSC_TV_PLL_P_14 7
59 59
60#define PAL_TV_PLL_M_14 19
61#define PAL_TV_PLL_N_14 353
62#define PAL_TV_PLL_P_14 5
63
60#define VERT_LEAD_IN_LINES 2 64#define VERT_LEAD_IN_LINES 2
61#define FRAC_BITS 0xe 65#define FRAC_BITS 0xe
62#define FRAC_MASK 0x3fff 66#define FRAC_MASK 0x3fff
@@ -205,9 +209,24 @@ static const struct radeon_tv_mode_constants available_tv_modes[] = {
205 630627, /* defRestart */ 209 630627, /* defRestart */
206 347, /* crtcPLL_N */ 210 347, /* crtcPLL_N */
207 14, /* crtcPLL_M */ 211 14, /* crtcPLL_M */
208 8, /* crtcPLL_postDiv */ 212 8, /* crtcPLL_postDiv */
209 1022, /* pixToTV */ 213 1022, /* pixToTV */
210 }, 214 },
215 { /* PAL timing for 14 Mhz ref clk */
216 800, /* horResolution */
217 600, /* verResolution */
218 TV_STD_PAL, /* standard */
219 1131, /* horTotal */
220 742, /* verTotal */
221 813, /* horStart */
222 840, /* horSyncStart */
223 633, /* verSyncStart */
224 708369, /* defRestart */
225 211, /* crtcPLL_N */
226 9, /* crtcPLL_M */
227 8, /* crtcPLL_postDiv */
228 759, /* pixToTV */
229 },
211}; 230};
212 231
213#define N_AVAILABLE_MODES ARRAY_SIZE(available_tv_modes) 232#define N_AVAILABLE_MODES ARRAY_SIZE(available_tv_modes)
@@ -242,7 +261,7 @@ static const struct radeon_tv_mode_constants *radeon_legacy_tv_get_std_mode(stru
242 if (pll->reference_freq == 2700) 261 if (pll->reference_freq == 2700)
243 const_ptr = &available_tv_modes[1]; 262 const_ptr = &available_tv_modes[1];
244 else 263 else
245 const_ptr = &available_tv_modes[1]; /* FIX ME */ 264 const_ptr = &available_tv_modes[3];
246 } 265 }
247 return const_ptr; 266 return const_ptr;
248} 267}
@@ -685,9 +704,9 @@ void radeon_legacy_tv_mode_set(struct drm_encoder *encoder,
685 n = PAL_TV_PLL_N_27; 704 n = PAL_TV_PLL_N_27;
686 p = PAL_TV_PLL_P_27; 705 p = PAL_TV_PLL_P_27;
687 } else { 706 } else {
688 m = PAL_TV_PLL_M_27; 707 m = PAL_TV_PLL_M_14;
689 n = PAL_TV_PLL_N_27; 708 n = PAL_TV_PLL_N_14;
690 p = PAL_TV_PLL_P_27; 709 p = PAL_TV_PLL_P_14;
691 } 710 }
692 } 711 }
693 712
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
index 1702b820aa4d..0b8e32776b10 100644
--- a/drivers/gpu/drm/radeon/radeon_mode.h
+++ b/drivers/gpu/drm/radeon/radeon_mode.h
@@ -129,6 +129,7 @@ struct radeon_tmds_pll {
129#define RADEON_PLL_USE_FRAC_FB_DIV (1 << 10) 129#define RADEON_PLL_USE_FRAC_FB_DIV (1 << 10)
130#define RADEON_PLL_PREFER_CLOSEST_LOWER (1 << 11) 130#define RADEON_PLL_PREFER_CLOSEST_LOWER (1 << 11)
131#define RADEON_PLL_USE_POST_DIV (1 << 12) 131#define RADEON_PLL_USE_POST_DIV (1 << 12)
132#define RADEON_PLL_IS_LCD (1 << 13)
132 133
133/* pll algo */ 134/* pll algo */
134enum radeon_pll_algo { 135enum radeon_pll_algo {
@@ -149,6 +150,8 @@ struct radeon_pll {
149 uint32_t pll_in_max; 150 uint32_t pll_in_max;
150 uint32_t pll_out_min; 151 uint32_t pll_out_min;
151 uint32_t pll_out_max; 152 uint32_t pll_out_max;
153 uint32_t lcd_pll_out_min;
154 uint32_t lcd_pll_out_max;
152 uint32_t best_vco; 155 uint32_t best_vco;
153 156
154 /* divider limits */ 157 /* divider limits */
@@ -170,17 +173,12 @@ struct radeon_pll {
170 enum radeon_pll_algo algo; 173 enum radeon_pll_algo algo;
171}; 174};
172 175
173struct i2c_algo_radeon_data {
174 struct i2c_adapter bit_adapter;
175 struct i2c_algo_bit_data bit_data;
176};
177
178struct radeon_i2c_chan { 176struct radeon_i2c_chan {
179 struct i2c_adapter adapter; 177 struct i2c_adapter adapter;
180 struct drm_device *dev; 178 struct drm_device *dev;
181 union { 179 union {
180 struct i2c_algo_bit_data bit;
182 struct i2c_algo_dp_aux_data dp; 181 struct i2c_algo_dp_aux_data dp;
183 struct i2c_algo_radeon_data radeon;
184 } algo; 182 } algo;
185 struct radeon_i2c_bus_rec rec; 183 struct radeon_i2c_bus_rec rec;
186}; 184};
@@ -342,6 +340,7 @@ struct radeon_encoder {
342 struct drm_display_mode native_mode; 340 struct drm_display_mode native_mode;
343 void *enc_priv; 341 void *enc_priv;
344 int hdmi_offset; 342 int hdmi_offset;
343 int hdmi_config_offset;
345 int hdmi_audio_workaround; 344 int hdmi_audio_workaround;
346 int hdmi_buffer_status; 345 int hdmi_buffer_status;
347}; 346};
@@ -431,7 +430,6 @@ extern struct radeon_i2c_chan *radeon_i2c_create(struct drm_device *dev,
431 struct radeon_i2c_bus_rec *rec, 430 struct radeon_i2c_bus_rec *rec,
432 const char *name); 431 const char *name);
433extern void radeon_i2c_destroy(struct radeon_i2c_chan *i2c); 432extern void radeon_i2c_destroy(struct radeon_i2c_chan *i2c);
434extern void radeon_i2c_destroy_dp(struct radeon_i2c_chan *i2c);
435extern void radeon_i2c_get_byte(struct radeon_i2c_chan *i2c_bus, 433extern void radeon_i2c_get_byte(struct radeon_i2c_chan *i2c_bus,
436 u8 slave_addr, 434 u8 slave_addr,
437 u8 addr, 435 u8 addr,
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index ffce2c9e7c76..122774742bd5 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -186,8 +186,10 @@ int radeon_bo_pin(struct radeon_bo *bo, u32 domain, u64 *gpu_addr)
186 return 0; 186 return 0;
187 } 187 }
188 radeon_ttm_placement_from_domain(bo, domain); 188 radeon_ttm_placement_from_domain(bo, domain);
189 /* force to pin into visible video ram */ 189 if (domain == RADEON_GEM_DOMAIN_VRAM) {
190 bo->placement.lpfn = bo->rdev->mc.visible_vram_size >> PAGE_SHIFT; 190 /* force to pin into visible video ram */
191 bo->placement.lpfn = bo->rdev->mc.visible_vram_size >> PAGE_SHIFT;
192 }
191 for (i = 0; i < bo->placement.num_placement; i++) 193 for (i = 0; i < bo->placement.num_placement; i++)
192 bo->placements[i] |= TTM_PL_FLAG_NO_EVICT; 194 bo->placements[i] |= TTM_PL_FLAG_NO_EVICT;
193 r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false); 195 r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index d4d1c39a0e99..a4b57493aa78 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -28,6 +28,7 @@
28#define RADEON_RECLOCK_DELAY_MS 200 28#define RADEON_RECLOCK_DELAY_MS 200
29#define RADEON_WAIT_VBLANK_TIMEOUT 200 29#define RADEON_WAIT_VBLANK_TIMEOUT 200
30 30
31static bool radeon_pm_debug_check_in_vbl(struct radeon_device *rdev, bool finish);
31static void radeon_pm_set_clocks_locked(struct radeon_device *rdev); 32static void radeon_pm_set_clocks_locked(struct radeon_device *rdev);
32static void radeon_pm_set_clocks(struct radeon_device *rdev); 33static void radeon_pm_set_clocks(struct radeon_device *rdev);
33static void radeon_pm_idle_work_handler(struct work_struct *work); 34static void radeon_pm_idle_work_handler(struct work_struct *work);
@@ -179,6 +180,16 @@ static void radeon_get_power_state(struct radeon_device *rdev,
179 rdev->pm.requested_power_state->non_clock_info.pcie_lanes); 180 rdev->pm.requested_power_state->non_clock_info.pcie_lanes);
180} 181}
181 182
183static inline void radeon_sync_with_vblank(struct radeon_device *rdev)
184{
185 if (rdev->pm.active_crtcs) {
186 rdev->pm.vblank_sync = false;
187 wait_event_timeout(
188 rdev->irq.vblank_queue, rdev->pm.vblank_sync,
189 msecs_to_jiffies(RADEON_WAIT_VBLANK_TIMEOUT));
190 }
191}
192
182static void radeon_set_power_state(struct radeon_device *rdev) 193static void radeon_set_power_state(struct radeon_device *rdev)
183{ 194{
184 /* if *_clock_mode are the same, *_power_state are as well */ 195 /* if *_clock_mode are the same, *_power_state are as well */
@@ -189,11 +200,28 @@ static void radeon_set_power_state(struct radeon_device *rdev)
189 rdev->pm.requested_clock_mode->sclk, 200 rdev->pm.requested_clock_mode->sclk,
190 rdev->pm.requested_clock_mode->mclk, 201 rdev->pm.requested_clock_mode->mclk,
191 rdev->pm.requested_power_state->non_clock_info.pcie_lanes); 202 rdev->pm.requested_power_state->non_clock_info.pcie_lanes);
203
192 /* set pcie lanes */ 204 /* set pcie lanes */
205 /* TODO */
206
193 /* set voltage */ 207 /* set voltage */
208 /* TODO */
209
194 /* set engine clock */ 210 /* set engine clock */
211 radeon_sync_with_vblank(rdev);
212 radeon_pm_debug_check_in_vbl(rdev, false);
195 radeon_set_engine_clock(rdev, rdev->pm.requested_clock_mode->sclk); 213 radeon_set_engine_clock(rdev, rdev->pm.requested_clock_mode->sclk);
214 radeon_pm_debug_check_in_vbl(rdev, true);
215
216#if 0
196 /* set memory clock */ 217 /* set memory clock */
218 if (rdev->asic->set_memory_clock) {
219 radeon_sync_with_vblank(rdev);
220 radeon_pm_debug_check_in_vbl(rdev, false);
221 radeon_set_memory_clock(rdev, rdev->pm.requested_clock_mode->mclk);
222 radeon_pm_debug_check_in_vbl(rdev, true);
223 }
224#endif
197 225
198 rdev->pm.current_power_state = rdev->pm.requested_power_state; 226 rdev->pm.current_power_state = rdev->pm.requested_power_state;
199 rdev->pm.current_clock_mode = rdev->pm.requested_clock_mode; 227 rdev->pm.current_clock_mode = rdev->pm.requested_clock_mode;
@@ -229,6 +257,12 @@ int radeon_pm_init(struct radeon_device *rdev)
229 return 0; 257 return 0;
230} 258}
231 259
260void radeon_pm_fini(struct radeon_device *rdev)
261{
262 if (rdev->pm.i2c_bus)
263 radeon_i2c_destroy(rdev->pm.i2c_bus);
264}
265
232void radeon_pm_compute_clocks(struct radeon_device *rdev) 266void radeon_pm_compute_clocks(struct radeon_device *rdev)
233{ 267{
234 struct drm_device *ddev = rdev->ddev; 268 struct drm_device *ddev = rdev->ddev;
@@ -245,7 +279,8 @@ void radeon_pm_compute_clocks(struct radeon_device *rdev)
245 list_for_each_entry(connector, 279 list_for_each_entry(connector,
246 &ddev->mode_config.connector_list, head) { 280 &ddev->mode_config.connector_list, head) {
247 if (connector->encoder && 281 if (connector->encoder &&
248 connector->dpms != DRM_MODE_DPMS_OFF) { 282 connector->encoder->crtc &&
283 connector->dpms != DRM_MODE_DPMS_OFF) {
249 radeon_crtc = to_radeon_crtc(connector->encoder->crtc); 284 radeon_crtc = to_radeon_crtc(connector->encoder->crtc);
250 rdev->pm.active_crtcs |= (1 << radeon_crtc->crtc_id); 285 rdev->pm.active_crtcs |= (1 << radeon_crtc->crtc_id);
251 ++count; 286 ++count;
@@ -333,10 +368,7 @@ static void radeon_pm_set_clocks_locked(struct radeon_device *rdev)
333 break; 368 break;
334 } 369 }
335 370
336 /* check if we are in vblank */
337 radeon_pm_debug_check_in_vbl(rdev, false);
338 radeon_set_power_state(rdev); 371 radeon_set_power_state(rdev);
339 radeon_pm_debug_check_in_vbl(rdev, true);
340 rdev->pm.planned_action = PM_ACTION_NONE; 372 rdev->pm.planned_action = PM_ACTION_NONE;
341} 373}
342 374
@@ -353,10 +385,7 @@ static void radeon_pm_set_clocks(struct radeon_device *rdev)
353 rdev->pm.req_vblank |= (1 << 1); 385 rdev->pm.req_vblank |= (1 << 1);
354 drm_vblank_get(rdev->ddev, 1); 386 drm_vblank_get(rdev->ddev, 1);
355 } 387 }
356 if (rdev->pm.active_crtcs) 388 radeon_pm_set_clocks_locked(rdev);
357 wait_event_interruptible_timeout(
358 rdev->irq.vblank_queue, 0,
359 msecs_to_jiffies(RADEON_WAIT_VBLANK_TIMEOUT));
360 if (rdev->pm.req_vblank & (1 << 0)) { 389 if (rdev->pm.req_vblank & (1 << 0)) {
361 rdev->pm.req_vblank &= ~(1 << 0); 390 rdev->pm.req_vblank &= ~(1 << 0);
362 drm_vblank_put(rdev->ddev, 0); 391 drm_vblank_put(rdev->ddev, 0);
@@ -366,7 +395,6 @@ static void radeon_pm_set_clocks(struct radeon_device *rdev)
366 drm_vblank_put(rdev->ddev, 1); 395 drm_vblank_put(rdev->ddev, 1);
367 } 396 }
368 397
369 radeon_pm_set_clocks_locked(rdev);
370 mutex_unlock(&rdev->cp.mutex); 398 mutex_unlock(&rdev->cp.mutex);
371} 399}
372 400
diff --git a/drivers/gpu/drm/radeon/radeon_reg.h b/drivers/gpu/drm/radeon/radeon_reg.h
index 5c0dc082d330..eabbc9cf30a7 100644
--- a/drivers/gpu/drm/radeon/radeon_reg.h
+++ b/drivers/gpu/drm/radeon/radeon_reg.h
@@ -346,6 +346,7 @@
346# define RADEON_TVPLL_PWRMGT_OFF (1 << 30) 346# define RADEON_TVPLL_PWRMGT_OFF (1 << 30)
347# define RADEON_TVCLK_TURNOFF (1 << 31) 347# define RADEON_TVCLK_TURNOFF (1 << 31)
348#define RADEON_PLL_PWRMGT_CNTL 0x0015 /* PLL */ 348#define RADEON_PLL_PWRMGT_CNTL 0x0015 /* PLL */
349# define RADEON_PM_MODE_SEL (1 << 13)
349# define RADEON_TCL_BYPASS_DISABLE (1 << 20) 350# define RADEON_TCL_BYPASS_DISABLE (1 << 20)
350#define RADEON_CLR_CMP_CLR_3D 0x1a24 351#define RADEON_CLR_CMP_CLR_3D 0x1a24
351#define RADEON_CLR_CMP_CLR_DST 0x15c8 352#define RADEON_CLR_CMP_CLR_DST 0x15c8
diff --git a/drivers/gpu/drm/radeon/reg_srcs/r600 b/drivers/gpu/drm/radeon/reg_srcs/r600
index 8f414a5f520f..af0da4ae3f55 100644
--- a/drivers/gpu/drm/radeon/reg_srcs/r600
+++ b/drivers/gpu/drm/radeon/reg_srcs/r600
@@ -26,20 +26,16 @@ r600 0x9400
260x00028408 VGT_INDX_OFFSET 260x00028408 VGT_INDX_OFFSET
270x00028AA0 VGT_INSTANCE_STEP_RATE_0 270x00028AA0 VGT_INSTANCE_STEP_RATE_0
280x00028AA4 VGT_INSTANCE_STEP_RATE_1 280x00028AA4 VGT_INSTANCE_STEP_RATE_1
290x000088C0 VGT_LAST_COPY_STATE
300x00028400 VGT_MAX_VTX_INDX 290x00028400 VGT_MAX_VTX_INDX
310x000088D8 VGT_MC_LAT_CNTL
320x00028404 VGT_MIN_VTX_INDX 300x00028404 VGT_MIN_VTX_INDX
330x00028A94 VGT_MULTI_PRIM_IB_RESET_EN 310x00028A94 VGT_MULTI_PRIM_IB_RESET_EN
340x0002840C VGT_MULTI_PRIM_IB_RESET_INDX 320x0002840C VGT_MULTI_PRIM_IB_RESET_INDX
350x00008970 VGT_NUM_INDICES 330x00008970 VGT_NUM_INDICES
360x00008974 VGT_NUM_INSTANCES 340x00008974 VGT_NUM_INSTANCES
370x00028A10 VGT_OUTPUT_PATH_CNTL 350x00028A10 VGT_OUTPUT_PATH_CNTL
380x00028C5C VGT_OUT_DEALLOC_CNTL
390x00028A84 VGT_PRIMITIVEID_EN 360x00028A84 VGT_PRIMITIVEID_EN
400x00008958 VGT_PRIMITIVE_TYPE 370x00008958 VGT_PRIMITIVE_TYPE
410x00028AB4 VGT_REUSE_OFF 380x00028AB4 VGT_REUSE_OFF
420x00028C58 VGT_VERTEX_REUSE_BLOCK_CNTL
430x00028AB8 VGT_VTX_CNT_EN 390x00028AB8 VGT_VTX_CNT_EN
440x000088B0 VGT_VTX_VECT_EJECT_REG 400x000088B0 VGT_VTX_VECT_EJECT_REG
450x00028810 PA_CL_CLIP_CNTL 410x00028810 PA_CL_CLIP_CNTL
@@ -280,7 +276,6 @@ r600 0x9400
2800x00028E00 PA_SU_POLY_OFFSET_FRONT_SCALE 2760x00028E00 PA_SU_POLY_OFFSET_FRONT_SCALE
2810x00028814 PA_SU_SC_MODE_CNTL 2770x00028814 PA_SU_SC_MODE_CNTL
2820x00028C08 PA_SU_VTX_CNTL 2780x00028C08 PA_SU_VTX_CNTL
2830x00008C00 SQ_CONFIG
2840x00008C04 SQ_GPR_RESOURCE_MGMT_1 2790x00008C04 SQ_GPR_RESOURCE_MGMT_1
2850x00008C08 SQ_GPR_RESOURCE_MGMT_2 2800x00008C08 SQ_GPR_RESOURCE_MGMT_2
2860x00008C10 SQ_STACK_RESOURCE_MGMT_1 2810x00008C10 SQ_STACK_RESOURCE_MGMT_1
@@ -320,18 +315,6 @@ r600 0x9400
3200x000283FC SQ_VTX_SEMANTIC_31 3150x000283FC SQ_VTX_SEMANTIC_31
3210x000288E0 SQ_VTX_SEMANTIC_CLEAR 3160x000288E0 SQ_VTX_SEMANTIC_CLEAR
3220x0003CFF4 SQ_VTX_START_INST_LOC 3170x0003CFF4 SQ_VTX_START_INST_LOC
3230x0003C000 SQ_TEX_SAMPLER_WORD0_0
3240x0003C004 SQ_TEX_SAMPLER_WORD1_0
3250x0003C008 SQ_TEX_SAMPLER_WORD2_0
3260x00030000 SQ_ALU_CONSTANT0_0
3270x00030004 SQ_ALU_CONSTANT1_0
3280x00030008 SQ_ALU_CONSTANT2_0
3290x0003000C SQ_ALU_CONSTANT3_0
3300x0003E380 SQ_BOOL_CONST_0
3310x0003E384 SQ_BOOL_CONST_1
3320x0003E388 SQ_BOOL_CONST_2
3330x0003E200 SQ_LOOP_CONST_0
3340x0003E200 SQ_LOOP_CONST_DX10_0
3350x000281C0 SQ_ALU_CONST_BUFFER_SIZE_GS_0 3180x000281C0 SQ_ALU_CONST_BUFFER_SIZE_GS_0
3360x000281C4 SQ_ALU_CONST_BUFFER_SIZE_GS_1 3190x000281C4 SQ_ALU_CONST_BUFFER_SIZE_GS_1
3370x000281C8 SQ_ALU_CONST_BUFFER_SIZE_GS_2 3200x000281C8 SQ_ALU_CONST_BUFFER_SIZE_GS_2
@@ -380,54 +363,6 @@ r600 0x9400
3800x000281B4 SQ_ALU_CONST_BUFFER_SIZE_VS_13 3630x000281B4 SQ_ALU_CONST_BUFFER_SIZE_VS_13
3810x000281B8 SQ_ALU_CONST_BUFFER_SIZE_VS_14 3640x000281B8 SQ_ALU_CONST_BUFFER_SIZE_VS_14
3820x000281BC SQ_ALU_CONST_BUFFER_SIZE_VS_15 3650x000281BC SQ_ALU_CONST_BUFFER_SIZE_VS_15
3830x000289C0 SQ_ALU_CONST_CACHE_GS_0
3840x000289C4 SQ_ALU_CONST_CACHE_GS_1
3850x000289C8 SQ_ALU_CONST_CACHE_GS_2
3860x000289CC SQ_ALU_CONST_CACHE_GS_3
3870x000289D0 SQ_ALU_CONST_CACHE_GS_4
3880x000289D4 SQ_ALU_CONST_CACHE_GS_5
3890x000289D8 SQ_ALU_CONST_CACHE_GS_6
3900x000289DC SQ_ALU_CONST_CACHE_GS_7
3910x000289E0 SQ_ALU_CONST_CACHE_GS_8
3920x000289E4 SQ_ALU_CONST_CACHE_GS_9
3930x000289E8 SQ_ALU_CONST_CACHE_GS_10
3940x000289EC SQ_ALU_CONST_CACHE_GS_11
3950x000289F0 SQ_ALU_CONST_CACHE_GS_12
3960x000289F4 SQ_ALU_CONST_CACHE_GS_13
3970x000289F8 SQ_ALU_CONST_CACHE_GS_14
3980x000289FC SQ_ALU_CONST_CACHE_GS_15
3990x00028940 SQ_ALU_CONST_CACHE_PS_0
4000x00028944 SQ_ALU_CONST_CACHE_PS_1
4010x00028948 SQ_ALU_CONST_CACHE_PS_2
4020x0002894C SQ_ALU_CONST_CACHE_PS_3
4030x00028950 SQ_ALU_CONST_CACHE_PS_4
4040x00028954 SQ_ALU_CONST_CACHE_PS_5
4050x00028958 SQ_ALU_CONST_CACHE_PS_6
4060x0002895C SQ_ALU_CONST_CACHE_PS_7
4070x00028960 SQ_ALU_CONST_CACHE_PS_8
4080x00028964 SQ_ALU_CONST_CACHE_PS_9
4090x00028968 SQ_ALU_CONST_CACHE_PS_10
4100x0002896C SQ_ALU_CONST_CACHE_PS_11
4110x00028970 SQ_ALU_CONST_CACHE_PS_12
4120x00028974 SQ_ALU_CONST_CACHE_PS_13
4130x00028978 SQ_ALU_CONST_CACHE_PS_14
4140x0002897C SQ_ALU_CONST_CACHE_PS_15
4150x00028980 SQ_ALU_CONST_CACHE_VS_0
4160x00028984 SQ_ALU_CONST_CACHE_VS_1
4170x00028988 SQ_ALU_CONST_CACHE_VS_2
4180x0002898C SQ_ALU_CONST_CACHE_VS_3
4190x00028990 SQ_ALU_CONST_CACHE_VS_4
4200x00028994 SQ_ALU_CONST_CACHE_VS_5
4210x00028998 SQ_ALU_CONST_CACHE_VS_6
4220x0002899C SQ_ALU_CONST_CACHE_VS_7
4230x000289A0 SQ_ALU_CONST_CACHE_VS_8
4240x000289A4 SQ_ALU_CONST_CACHE_VS_9
4250x000289A8 SQ_ALU_CONST_CACHE_VS_10
4260x000289AC SQ_ALU_CONST_CACHE_VS_11
4270x000289B0 SQ_ALU_CONST_CACHE_VS_12
4280x000289B4 SQ_ALU_CONST_CACHE_VS_13
4290x000289B8 SQ_ALU_CONST_CACHE_VS_14
4300x000289BC SQ_ALU_CONST_CACHE_VS_15
4310x000288D8 SQ_PGM_CF_OFFSET_ES 3660x000288D8 SQ_PGM_CF_OFFSET_ES
4320x000288DC SQ_PGM_CF_OFFSET_FS 3670x000288DC SQ_PGM_CF_OFFSET_FS
4330x000288D4 SQ_PGM_CF_OFFSET_GS 3680x000288D4 SQ_PGM_CF_OFFSET_GS
@@ -494,12 +429,7 @@ r600 0x9400
4940x00028438 SX_ALPHA_REF 4290x00028438 SX_ALPHA_REF
4950x00028410 SX_ALPHA_TEST_CONTROL 4300x00028410 SX_ALPHA_TEST_CONTROL
4960x00028350 SX_MISC 4310x00028350 SX_MISC
4970x0000A020 SMX_DC_CTL0
4980x0000A024 SMX_DC_CTL1
4990x0000A028 SMX_DC_CTL2
5000x00009608 TC_CNTL
5010x00009604 TC_INVALIDATE 4320x00009604 TC_INVALIDATE
5020x00009490 TD_CNTL
5030x00009400 TD_FILTER4 4330x00009400 TD_FILTER4
5040x00009404 TD_FILTER4_1 4340x00009404 TD_FILTER4_1
5050x00009408 TD_FILTER4_2 4350x00009408 TD_FILTER4_2
@@ -824,14 +754,9 @@ r600 0x9400
8240x00028428 CB_FOG_GREEN 7540x00028428 CB_FOG_GREEN
8250x00028424 CB_FOG_RED 7550x00028424 CB_FOG_RED
8260x00008040 WAIT_UNTIL 7560x00008040 WAIT_UNTIL
8270x00008950 CC_GC_SHADER_PIPE_CONFIG
8280x00008954 GC_USER_SHADER_PIPE_CONFIG
8290x00009714 VC_ENHANCE 7570x00009714 VC_ENHANCE
8300x00009830 DB_DEBUG 7580x00009830 DB_DEBUG
8310x00009838 DB_WATERMARKS 7590x00009838 DB_WATERMARKS
8320x00028D28 DB_SRESULTS_COMPARE_STATE0 7600x00028D28 DB_SRESULTS_COMPARE_STATE0
8330x00028D44 DB_ALPHA_TO_MASK 7610x00028D44 DB_ALPHA_TO_MASK
8340x00009504 TA_CNTL
8350x00009700 VC_CNTL 7620x00009700 VC_CNTL
8360x00009718 VC_CONFIG
8370x0000A02C SMX_DC_MC_INTF_CTL
diff --git a/drivers/gpu/drm/radeon/rs400.c b/drivers/gpu/drm/radeon/rs400.c
index 273c7dcd454c..1a41cb268b72 100644
--- a/drivers/gpu/drm/radeon/rs400.c
+++ b/drivers/gpu/drm/radeon/rs400.c
@@ -29,6 +29,7 @@
29#include <linux/slab.h> 29#include <linux/slab.h>
30#include <drm/drmP.h> 30#include <drm/drmP.h>
31#include "radeon.h" 31#include "radeon.h"
32#include "radeon_asic.h"
32#include "rs400d.h" 33#include "rs400d.h"
33 34
34/* This files gather functions specifics to : rs400,rs480 */ 35/* This files gather functions specifics to : rs400,rs480 */
@@ -203,9 +204,9 @@ void rs400_gart_disable(struct radeon_device *rdev)
203 204
204void rs400_gart_fini(struct radeon_device *rdev) 205void rs400_gart_fini(struct radeon_device *rdev)
205{ 206{
207 radeon_gart_fini(rdev);
206 rs400_gart_disable(rdev); 208 rs400_gart_disable(rdev);
207 radeon_gart_table_ram_free(rdev); 209 radeon_gart_table_ram_free(rdev);
208 radeon_gart_fini(rdev);
209} 210}
210 211
211int rs400_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr) 212int rs400_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr)
@@ -265,6 +266,7 @@ void rs400_mc_init(struct radeon_device *rdev)
265 base = (RREG32(RADEON_NB_TOM) & 0xffff) << 16; 266 base = (RREG32(RADEON_NB_TOM) & 0xffff) << 16;
266 radeon_vram_location(rdev, &rdev->mc, base); 267 radeon_vram_location(rdev, &rdev->mc, base);
267 radeon_gtt_location(rdev, &rdev->mc); 268 radeon_gtt_location(rdev, &rdev->mc);
269 radeon_update_bandwidth_info(rdev);
268} 270}
269 271
270uint32_t rs400_mc_rreg(struct radeon_device *rdev, uint32_t reg) 272uint32_t rs400_mc_rreg(struct radeon_device *rdev, uint32_t reg)
@@ -389,6 +391,8 @@ static int rs400_startup(struct radeon_device *rdev)
389{ 391{
390 int r; 392 int r;
391 393
394 r100_set_common_regs(rdev);
395
392 rs400_mc_program(rdev); 396 rs400_mc_program(rdev);
393 /* Resume clock */ 397 /* Resume clock */
394 r300_clock_startup(rdev); 398 r300_clock_startup(rdev);
@@ -454,6 +458,7 @@ int rs400_suspend(struct radeon_device *rdev)
454 458
455void rs400_fini(struct radeon_device *rdev) 459void rs400_fini(struct radeon_device *rdev)
456{ 460{
461 radeon_pm_fini(rdev);
457 r100_cp_fini(rdev); 462 r100_cp_fini(rdev);
458 r100_wb_fini(rdev); 463 r100_wb_fini(rdev);
459 r100_ib_fini(rdev); 464 r100_ib_fini(rdev);
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c
index 47f046b78c6b..abf824c2123d 100644
--- a/drivers/gpu/drm/radeon/rs600.c
+++ b/drivers/gpu/drm/radeon/rs600.c
@@ -37,6 +37,7 @@
37 */ 37 */
38#include "drmP.h" 38#include "drmP.h"
39#include "radeon.h" 39#include "radeon.h"
40#include "radeon_asic.h"
40#include "atom.h" 41#include "atom.h"
41#include "rs600d.h" 42#include "rs600d.h"
42 43
@@ -267,9 +268,9 @@ void rs600_gart_disable(struct radeon_device *rdev)
267 268
268void rs600_gart_fini(struct radeon_device *rdev) 269void rs600_gart_fini(struct radeon_device *rdev)
269{ 270{
271 radeon_gart_fini(rdev);
270 rs600_gart_disable(rdev); 272 rs600_gart_disable(rdev);
271 radeon_gart_table_vram_free(rdev); 273 radeon_gart_table_vram_free(rdev);
272 radeon_gart_fini(rdev);
273} 274}
274 275
275#define R600_PTE_VALID (1 << 0) 276#define R600_PTE_VALID (1 << 0)
@@ -392,10 +393,12 @@ int rs600_irq_process(struct radeon_device *rdev)
392 /* Vertical blank interrupts */ 393 /* Vertical blank interrupts */
393 if (G_007EDC_LB_D1_VBLANK_INTERRUPT(r500_disp_int)) { 394 if (G_007EDC_LB_D1_VBLANK_INTERRUPT(r500_disp_int)) {
394 drm_handle_vblank(rdev->ddev, 0); 395 drm_handle_vblank(rdev->ddev, 0);
396 rdev->pm.vblank_sync = true;
395 wake_up(&rdev->irq.vblank_queue); 397 wake_up(&rdev->irq.vblank_queue);
396 } 398 }
397 if (G_007EDC_LB_D2_VBLANK_INTERRUPT(r500_disp_int)) { 399 if (G_007EDC_LB_D2_VBLANK_INTERRUPT(r500_disp_int)) {
398 drm_handle_vblank(rdev->ddev, 1); 400 drm_handle_vblank(rdev->ddev, 1);
401 rdev->pm.vblank_sync = true;
399 wake_up(&rdev->irq.vblank_queue); 402 wake_up(&rdev->irq.vblank_queue);
400 } 403 }
401 if (G_007EDC_DC_HOT_PLUG_DETECT1_INTERRUPT(r500_disp_int)) { 404 if (G_007EDC_DC_HOT_PLUG_DETECT1_INTERRUPT(r500_disp_int)) {
@@ -472,13 +475,38 @@ void rs600_mc_init(struct radeon_device *rdev)
472 rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev); 475 rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev);
473 base = RREG32_MC(R_000004_MC_FB_LOCATION); 476 base = RREG32_MC(R_000004_MC_FB_LOCATION);
474 base = G_000004_MC_FB_START(base) << 16; 477 base = G_000004_MC_FB_START(base) << 16;
478 rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev);
475 radeon_vram_location(rdev, &rdev->mc, base); 479 radeon_vram_location(rdev, &rdev->mc, base);
476 radeon_gtt_location(rdev, &rdev->mc); 480 radeon_gtt_location(rdev, &rdev->mc);
481 radeon_update_bandwidth_info(rdev);
477} 482}
478 483
479void rs600_bandwidth_update(struct radeon_device *rdev) 484void rs600_bandwidth_update(struct radeon_device *rdev)
480{ 485{
481 /* FIXME: implement, should this be like rs690 ? */ 486 struct drm_display_mode *mode0 = NULL;
487 struct drm_display_mode *mode1 = NULL;
488 u32 d1mode_priority_a_cnt, d2mode_priority_a_cnt;
489 /* FIXME: implement full support */
490
491 radeon_update_display_priority(rdev);
492
493 if (rdev->mode_info.crtcs[0]->base.enabled)
494 mode0 = &rdev->mode_info.crtcs[0]->base.mode;
495 if (rdev->mode_info.crtcs[1]->base.enabled)
496 mode1 = &rdev->mode_info.crtcs[1]->base.mode;
497
498 rs690_line_buffer_adjust(rdev, mode0, mode1);
499
500 if (rdev->disp_priority == 2) {
501 d1mode_priority_a_cnt = RREG32(R_006548_D1MODE_PRIORITY_A_CNT);
502 d2mode_priority_a_cnt = RREG32(R_006D48_D2MODE_PRIORITY_A_CNT);
503 d1mode_priority_a_cnt |= S_006548_D1MODE_PRIORITY_A_ALWAYS_ON(1);
504 d2mode_priority_a_cnt |= S_006D48_D2MODE_PRIORITY_A_ALWAYS_ON(1);
505 WREG32(R_006548_D1MODE_PRIORITY_A_CNT, d1mode_priority_a_cnt);
506 WREG32(R_00654C_D1MODE_PRIORITY_B_CNT, d1mode_priority_a_cnt);
507 WREG32(R_006D48_D2MODE_PRIORITY_A_CNT, d2mode_priority_a_cnt);
508 WREG32(R_006D4C_D2MODE_PRIORITY_B_CNT, d2mode_priority_a_cnt);
509 }
482} 510}
483 511
484uint32_t rs600_mc_rreg(struct radeon_device *rdev, uint32_t reg) 512uint32_t rs600_mc_rreg(struct radeon_device *rdev, uint32_t reg)
@@ -598,6 +626,7 @@ int rs600_suspend(struct radeon_device *rdev)
598 626
599void rs600_fini(struct radeon_device *rdev) 627void rs600_fini(struct radeon_device *rdev)
600{ 628{
629 radeon_pm_fini(rdev);
601 r100_cp_fini(rdev); 630 r100_cp_fini(rdev);
602 r100_wb_fini(rdev); 631 r100_wb_fini(rdev);
603 r100_ib_fini(rdev); 632 r100_ib_fini(rdev);
diff --git a/drivers/gpu/drm/radeon/rs600d.h b/drivers/gpu/drm/radeon/rs600d.h
index c1c8f5885cbb..e52d2695510b 100644
--- a/drivers/gpu/drm/radeon/rs600d.h
+++ b/drivers/gpu/drm/radeon/rs600d.h
@@ -535,4 +535,57 @@
535#define G_00016C_INVALIDATE_L1_TLB(x) (((x) >> 20) & 0x1) 535#define G_00016C_INVALIDATE_L1_TLB(x) (((x) >> 20) & 0x1)
536#define C_00016C_INVALIDATE_L1_TLB 0xFFEFFFFF 536#define C_00016C_INVALIDATE_L1_TLB 0xFFEFFFFF
537 537
538#define R_006548_D1MODE_PRIORITY_A_CNT 0x006548
539#define S_006548_D1MODE_PRIORITY_MARK_A(x) (((x) & 0x7FFF) << 0)
540#define G_006548_D1MODE_PRIORITY_MARK_A(x) (((x) >> 0) & 0x7FFF)
541#define C_006548_D1MODE_PRIORITY_MARK_A 0xFFFF8000
542#define S_006548_D1MODE_PRIORITY_A_OFF(x) (((x) & 0x1) << 16)
543#define G_006548_D1MODE_PRIORITY_A_OFF(x) (((x) >> 16) & 0x1)
544#define C_006548_D1MODE_PRIORITY_A_OFF 0xFFFEFFFF
545#define S_006548_D1MODE_PRIORITY_A_ALWAYS_ON(x) (((x) & 0x1) << 20)
546#define G_006548_D1MODE_PRIORITY_A_ALWAYS_ON(x) (((x) >> 20) & 0x1)
547#define C_006548_D1MODE_PRIORITY_A_ALWAYS_ON 0xFFEFFFFF
548#define S_006548_D1MODE_PRIORITY_A_FORCE_MASK(x) (((x) & 0x1) << 24)
549#define G_006548_D1MODE_PRIORITY_A_FORCE_MASK(x) (((x) >> 24) & 0x1)
550#define C_006548_D1MODE_PRIORITY_A_FORCE_MASK 0xFEFFFFFF
551#define R_00654C_D1MODE_PRIORITY_B_CNT 0x00654C
552#define S_00654C_D1MODE_PRIORITY_MARK_B(x) (((x) & 0x7FFF) << 0)
553#define G_00654C_D1MODE_PRIORITY_MARK_B(x) (((x) >> 0) & 0x7FFF)
554#define C_00654C_D1MODE_PRIORITY_MARK_B 0xFFFF8000
555#define S_00654C_D1MODE_PRIORITY_B_OFF(x) (((x) & 0x1) << 16)
556#define G_00654C_D1MODE_PRIORITY_B_OFF(x) (((x) >> 16) & 0x1)
557#define C_00654C_D1MODE_PRIORITY_B_OFF 0xFFFEFFFF
558#define S_00654C_D1MODE_PRIORITY_B_ALWAYS_ON(x) (((x) & 0x1) << 20)
559#define G_00654C_D1MODE_PRIORITY_B_ALWAYS_ON(x) (((x) >> 20) & 0x1)
560#define C_00654C_D1MODE_PRIORITY_B_ALWAYS_ON 0xFFEFFFFF
561#define S_00654C_D1MODE_PRIORITY_B_FORCE_MASK(x) (((x) & 0x1) << 24)
562#define G_00654C_D1MODE_PRIORITY_B_FORCE_MASK(x) (((x) >> 24) & 0x1)
563#define C_00654C_D1MODE_PRIORITY_B_FORCE_MASK 0xFEFFFFFF
564#define R_006D48_D2MODE_PRIORITY_A_CNT 0x006D48
565#define S_006D48_D2MODE_PRIORITY_MARK_A(x) (((x) & 0x7FFF) << 0)
566#define G_006D48_D2MODE_PRIORITY_MARK_A(x) (((x) >> 0) & 0x7FFF)
567#define C_006D48_D2MODE_PRIORITY_MARK_A 0xFFFF8000
568#define S_006D48_D2MODE_PRIORITY_A_OFF(x) (((x) & 0x1) << 16)
569#define G_006D48_D2MODE_PRIORITY_A_OFF(x) (((x) >> 16) & 0x1)
570#define C_006D48_D2MODE_PRIORITY_A_OFF 0xFFFEFFFF
571#define S_006D48_D2MODE_PRIORITY_A_ALWAYS_ON(x) (((x) & 0x1) << 20)
572#define G_006D48_D2MODE_PRIORITY_A_ALWAYS_ON(x) (((x) >> 20) & 0x1)
573#define C_006D48_D2MODE_PRIORITY_A_ALWAYS_ON 0xFFEFFFFF
574#define S_006D48_D2MODE_PRIORITY_A_FORCE_MASK(x) (((x) & 0x1) << 24)
575#define G_006D48_D2MODE_PRIORITY_A_FORCE_MASK(x) (((x) >> 24) & 0x1)
576#define C_006D48_D2MODE_PRIORITY_A_FORCE_MASK 0xFEFFFFFF
577#define R_006D4C_D2MODE_PRIORITY_B_CNT 0x006D4C
578#define S_006D4C_D2MODE_PRIORITY_MARK_B(x) (((x) & 0x7FFF) << 0)
579#define G_006D4C_D2MODE_PRIORITY_MARK_B(x) (((x) >> 0) & 0x7FFF)
580#define C_006D4C_D2MODE_PRIORITY_MARK_B 0xFFFF8000
581#define S_006D4C_D2MODE_PRIORITY_B_OFF(x) (((x) & 0x1) << 16)
582#define G_006D4C_D2MODE_PRIORITY_B_OFF(x) (((x) >> 16) & 0x1)
583#define C_006D4C_D2MODE_PRIORITY_B_OFF 0xFFFEFFFF
584#define S_006D4C_D2MODE_PRIORITY_B_ALWAYS_ON(x) (((x) & 0x1) << 20)
585#define G_006D4C_D2MODE_PRIORITY_B_ALWAYS_ON(x) (((x) >> 20) & 0x1)
586#define C_006D4C_D2MODE_PRIORITY_B_ALWAYS_ON 0xFFEFFFFF
587#define S_006D4C_D2MODE_PRIORITY_B_FORCE_MASK(x) (((x) & 0x1) << 24)
588#define G_006D4C_D2MODE_PRIORITY_B_FORCE_MASK(x) (((x) >> 24) & 0x1)
589#define C_006D4C_D2MODE_PRIORITY_B_FORCE_MASK 0xFEFFFFFF
590
538#endif 591#endif
diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
index 83b9174f76f2..bbf3da790fd5 100644
--- a/drivers/gpu/drm/radeon/rs690.c
+++ b/drivers/gpu/drm/radeon/rs690.c
@@ -27,6 +27,7 @@
27 */ 27 */
28#include "drmP.h" 28#include "drmP.h"
29#include "radeon.h" 29#include "radeon.h"
30#include "radeon_asic.h"
30#include "atom.h" 31#include "atom.h"
31#include "rs690d.h" 32#include "rs690d.h"
32 33
@@ -57,42 +58,57 @@ static void rs690_gpu_init(struct radeon_device *rdev)
57 } 58 }
58} 59}
59 60
61union igp_info {
62 struct _ATOM_INTEGRATED_SYSTEM_INFO info;
63 struct _ATOM_INTEGRATED_SYSTEM_INFO_V2 info_v2;
64};
65
60void rs690_pm_info(struct radeon_device *rdev) 66void rs690_pm_info(struct radeon_device *rdev)
61{ 67{
62 int index = GetIndexIntoMasterTable(DATA, IntegratedSystemInfo); 68 int index = GetIndexIntoMasterTable(DATA, IntegratedSystemInfo);
63 struct _ATOM_INTEGRATED_SYSTEM_INFO *info; 69 union igp_info *info;
64 struct _ATOM_INTEGRATED_SYSTEM_INFO_V2 *info_v2;
65 void *ptr;
66 uint16_t data_offset; 70 uint16_t data_offset;
67 uint8_t frev, crev; 71 uint8_t frev, crev;
68 fixed20_12 tmp; 72 fixed20_12 tmp;
69 73
70 atom_parse_data_header(rdev->mode_info.atom_context, index, NULL, 74 if (atom_parse_data_header(rdev->mode_info.atom_context, index, NULL,
71 &frev, &crev, &data_offset); 75 &frev, &crev, &data_offset)) {
72 ptr = rdev->mode_info.atom_context->bios + data_offset; 76 info = (union igp_info *)(rdev->mode_info.atom_context->bios + data_offset);
73 info = (struct _ATOM_INTEGRATED_SYSTEM_INFO *)ptr; 77
74 info_v2 = (struct _ATOM_INTEGRATED_SYSTEM_INFO_V2 *)ptr; 78 /* Get various system informations from bios */
75 /* Get various system informations from bios */ 79 switch (crev) {
76 switch (crev) { 80 case 1:
77 case 1: 81 tmp.full = rfixed_const(100);
78 tmp.full = rfixed_const(100); 82 rdev->pm.igp_sideport_mclk.full = rfixed_const(info->info.ulBootUpMemoryClock);
79 rdev->pm.igp_sideport_mclk.full = rfixed_const(info->ulBootUpMemoryClock); 83 rdev->pm.igp_sideport_mclk.full = rfixed_div(rdev->pm.igp_sideport_mclk, tmp);
80 rdev->pm.igp_sideport_mclk.full = rfixed_div(rdev->pm.igp_sideport_mclk, tmp); 84 rdev->pm.igp_system_mclk.full = rfixed_const(le16_to_cpu(info->info.usK8MemoryClock));
81 rdev->pm.igp_system_mclk.full = rfixed_const(le16_to_cpu(info->usK8MemoryClock)); 85 rdev->pm.igp_ht_link_clk.full = rfixed_const(le16_to_cpu(info->info.usFSBClock));
82 rdev->pm.igp_ht_link_clk.full = rfixed_const(le16_to_cpu(info->usFSBClock)); 86 rdev->pm.igp_ht_link_width.full = rfixed_const(info->info.ucHTLinkWidth);
83 rdev->pm.igp_ht_link_width.full = rfixed_const(info->ucHTLinkWidth); 87 break;
84 break; 88 case 2:
85 case 2: 89 tmp.full = rfixed_const(100);
86 tmp.full = rfixed_const(100); 90 rdev->pm.igp_sideport_mclk.full = rfixed_const(info->info_v2.ulBootUpSidePortClock);
87 rdev->pm.igp_sideport_mclk.full = rfixed_const(info_v2->ulBootUpSidePortClock); 91 rdev->pm.igp_sideport_mclk.full = rfixed_div(rdev->pm.igp_sideport_mclk, tmp);
88 rdev->pm.igp_sideport_mclk.full = rfixed_div(rdev->pm.igp_sideport_mclk, tmp); 92 rdev->pm.igp_system_mclk.full = rfixed_const(info->info_v2.ulBootUpUMAClock);
89 rdev->pm.igp_system_mclk.full = rfixed_const(info_v2->ulBootUpUMAClock); 93 rdev->pm.igp_system_mclk.full = rfixed_div(rdev->pm.igp_system_mclk, tmp);
90 rdev->pm.igp_system_mclk.full = rfixed_div(rdev->pm.igp_system_mclk, tmp); 94 rdev->pm.igp_ht_link_clk.full = rfixed_const(info->info_v2.ulHTLinkFreq);
91 rdev->pm.igp_ht_link_clk.full = rfixed_const(info_v2->ulHTLinkFreq); 95 rdev->pm.igp_ht_link_clk.full = rfixed_div(rdev->pm.igp_ht_link_clk, tmp);
92 rdev->pm.igp_ht_link_clk.full = rfixed_div(rdev->pm.igp_ht_link_clk, tmp); 96 rdev->pm.igp_ht_link_width.full = rfixed_const(le16_to_cpu(info->info_v2.usMinHTLinkWidth));
93 rdev->pm.igp_ht_link_width.full = rfixed_const(le16_to_cpu(info_v2->usMinHTLinkWidth)); 97 break;
94 break; 98 default:
95 default: 99 tmp.full = rfixed_const(100);
100 /* We assume the slower possible clock ie worst case */
101 /* DDR 333Mhz */
102 rdev->pm.igp_sideport_mclk.full = rfixed_const(333);
103 /* FIXME: system clock ? */
104 rdev->pm.igp_system_mclk.full = rfixed_const(100);
105 rdev->pm.igp_system_mclk.full = rfixed_div(rdev->pm.igp_system_mclk, tmp);
106 rdev->pm.igp_ht_link_clk.full = rfixed_const(200);
107 rdev->pm.igp_ht_link_width.full = rfixed_const(8);
108 DRM_ERROR("No integrated system info for your GPU, using safe default\n");
109 break;
110 }
111 } else {
96 tmp.full = rfixed_const(100); 112 tmp.full = rfixed_const(100);
97 /* We assume the slower possible clock ie worst case */ 113 /* We assume the slower possible clock ie worst case */
98 /* DDR 333Mhz */ 114 /* DDR 333Mhz */
@@ -103,7 +119,6 @@ void rs690_pm_info(struct radeon_device *rdev)
103 rdev->pm.igp_ht_link_clk.full = rfixed_const(200); 119 rdev->pm.igp_ht_link_clk.full = rfixed_const(200);
104 rdev->pm.igp_ht_link_width.full = rfixed_const(8); 120 rdev->pm.igp_ht_link_width.full = rfixed_const(8);
105 DRM_ERROR("No integrated system info for your GPU, using safe default\n"); 121 DRM_ERROR("No integrated system info for your GPU, using safe default\n");
106 break;
107 } 122 }
108 /* Compute various bandwidth */ 123 /* Compute various bandwidth */
109 /* k8_bandwidth = (memory_clk / 2) * 2 * 8 * 0.5 = memory_clk * 4 */ 124 /* k8_bandwidth = (memory_clk / 2) * 2 * 8 * 0.5 = memory_clk * 4 */
@@ -131,7 +146,6 @@ void rs690_pm_info(struct radeon_device *rdev)
131 146
132void rs690_mc_init(struct radeon_device *rdev) 147void rs690_mc_init(struct radeon_device *rdev)
133{ 148{
134 fixed20_12 a;
135 u64 base; 149 u64 base;
136 150
137 rs400_gart_adjust_size(rdev); 151 rs400_gart_adjust_size(rdev);
@@ -145,18 +159,10 @@ void rs690_mc_init(struct radeon_device *rdev)
145 base = RREG32_MC(R_000100_MCCFG_FB_LOCATION); 159 base = RREG32_MC(R_000100_MCCFG_FB_LOCATION);
146 base = G_000100_MC_FB_START(base) << 16; 160 base = G_000100_MC_FB_START(base) << 16;
147 rs690_pm_info(rdev); 161 rs690_pm_info(rdev);
148 /* FIXME: we should enforce default clock in case GPU is not in
149 * default setup
150 */
151 a.full = rfixed_const(100);
152 rdev->pm.sclk.full = rfixed_const(rdev->clock.default_sclk);
153 rdev->pm.sclk.full = rfixed_div(rdev->pm.sclk, a);
154 a.full = rfixed_const(16);
155 /* core_bandwidth = sclk(Mhz) * 16 */
156 rdev->pm.core_bandwidth.full = rfixed_div(rdev->pm.sclk, a);
157 rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev); 162 rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev);
158 radeon_vram_location(rdev, &rdev->mc, base); 163 radeon_vram_location(rdev, &rdev->mc, base);
159 radeon_gtt_location(rdev, &rdev->mc); 164 radeon_gtt_location(rdev, &rdev->mc);
165 radeon_update_bandwidth_info(rdev);
160} 166}
161 167
162void rs690_line_buffer_adjust(struct radeon_device *rdev, 168void rs690_line_buffer_adjust(struct radeon_device *rdev,
@@ -394,10 +400,12 @@ void rs690_bandwidth_update(struct radeon_device *rdev)
394 struct drm_display_mode *mode1 = NULL; 400 struct drm_display_mode *mode1 = NULL;
395 struct rs690_watermark wm0; 401 struct rs690_watermark wm0;
396 struct rs690_watermark wm1; 402 struct rs690_watermark wm1;
397 u32 tmp; 403 u32 tmp, d1mode_priority_a_cnt, d2mode_priority_a_cnt;
398 fixed20_12 priority_mark02, priority_mark12, fill_rate; 404 fixed20_12 priority_mark02, priority_mark12, fill_rate;
399 fixed20_12 a, b; 405 fixed20_12 a, b;
400 406
407 radeon_update_display_priority(rdev);
408
401 if (rdev->mode_info.crtcs[0]->base.enabled) 409 if (rdev->mode_info.crtcs[0]->base.enabled)
402 mode0 = &rdev->mode_info.crtcs[0]->base.mode; 410 mode0 = &rdev->mode_info.crtcs[0]->base.mode;
403 if (rdev->mode_info.crtcs[1]->base.enabled) 411 if (rdev->mode_info.crtcs[1]->base.enabled)
@@ -407,7 +415,8 @@ void rs690_bandwidth_update(struct radeon_device *rdev)
407 * modes if the user specifies HIGH for displaypriority 415 * modes if the user specifies HIGH for displaypriority
408 * option. 416 * option.
409 */ 417 */
410 if (rdev->disp_priority == 2) { 418 if ((rdev->disp_priority == 2) &&
419 ((rdev->family == CHIP_RS690) || (rdev->family == CHIP_RS740))) {
411 tmp = RREG32_MC(R_000104_MC_INIT_MISC_LAT_TIMER); 420 tmp = RREG32_MC(R_000104_MC_INIT_MISC_LAT_TIMER);
412 tmp &= C_000104_MC_DISP0R_INIT_LAT; 421 tmp &= C_000104_MC_DISP0R_INIT_LAT;
413 tmp &= C_000104_MC_DISP1R_INIT_LAT; 422 tmp &= C_000104_MC_DISP1R_INIT_LAT;
@@ -482,10 +491,16 @@ void rs690_bandwidth_update(struct radeon_device *rdev)
482 priority_mark12.full = 0; 491 priority_mark12.full = 0;
483 if (wm1.priority_mark_max.full > priority_mark12.full) 492 if (wm1.priority_mark_max.full > priority_mark12.full)
484 priority_mark12.full = wm1.priority_mark_max.full; 493 priority_mark12.full = wm1.priority_mark_max.full;
485 WREG32(R_006548_D1MODE_PRIORITY_A_CNT, rfixed_trunc(priority_mark02)); 494 d1mode_priority_a_cnt = rfixed_trunc(priority_mark02);
486 WREG32(R_00654C_D1MODE_PRIORITY_B_CNT, rfixed_trunc(priority_mark02)); 495 d2mode_priority_a_cnt = rfixed_trunc(priority_mark12);
487 WREG32(R_006D48_D2MODE_PRIORITY_A_CNT, rfixed_trunc(priority_mark12)); 496 if (rdev->disp_priority == 2) {
488 WREG32(R_006D4C_D2MODE_PRIORITY_B_CNT, rfixed_trunc(priority_mark12)); 497 d1mode_priority_a_cnt |= S_006548_D1MODE_PRIORITY_A_ALWAYS_ON(1);
498 d2mode_priority_a_cnt |= S_006D48_D2MODE_PRIORITY_A_ALWAYS_ON(1);
499 }
500 WREG32(R_006548_D1MODE_PRIORITY_A_CNT, d1mode_priority_a_cnt);
501 WREG32(R_00654C_D1MODE_PRIORITY_B_CNT, d1mode_priority_a_cnt);
502 WREG32(R_006D48_D2MODE_PRIORITY_A_CNT, d2mode_priority_a_cnt);
503 WREG32(R_006D4C_D2MODE_PRIORITY_B_CNT, d2mode_priority_a_cnt);
489 } else if (mode0) { 504 } else if (mode0) {
490 if (rfixed_trunc(wm0.dbpp) > 64) 505 if (rfixed_trunc(wm0.dbpp) > 64)
491 a.full = rfixed_mul(wm0.dbpp, wm0.num_line_pair); 506 a.full = rfixed_mul(wm0.dbpp, wm0.num_line_pair);
@@ -512,8 +527,11 @@ void rs690_bandwidth_update(struct radeon_device *rdev)
512 priority_mark02.full = 0; 527 priority_mark02.full = 0;
513 if (wm0.priority_mark_max.full > priority_mark02.full) 528 if (wm0.priority_mark_max.full > priority_mark02.full)
514 priority_mark02.full = wm0.priority_mark_max.full; 529 priority_mark02.full = wm0.priority_mark_max.full;
515 WREG32(R_006548_D1MODE_PRIORITY_A_CNT, rfixed_trunc(priority_mark02)); 530 d1mode_priority_a_cnt = rfixed_trunc(priority_mark02);
516 WREG32(R_00654C_D1MODE_PRIORITY_B_CNT, rfixed_trunc(priority_mark02)); 531 if (rdev->disp_priority == 2)
532 d1mode_priority_a_cnt |= S_006548_D1MODE_PRIORITY_A_ALWAYS_ON(1);
533 WREG32(R_006548_D1MODE_PRIORITY_A_CNT, d1mode_priority_a_cnt);
534 WREG32(R_00654C_D1MODE_PRIORITY_B_CNT, d1mode_priority_a_cnt);
517 WREG32(R_006D48_D2MODE_PRIORITY_A_CNT, 535 WREG32(R_006D48_D2MODE_PRIORITY_A_CNT,
518 S_006D48_D2MODE_PRIORITY_A_OFF(1)); 536 S_006D48_D2MODE_PRIORITY_A_OFF(1));
519 WREG32(R_006D4C_D2MODE_PRIORITY_B_CNT, 537 WREG32(R_006D4C_D2MODE_PRIORITY_B_CNT,
@@ -544,12 +562,15 @@ void rs690_bandwidth_update(struct radeon_device *rdev)
544 priority_mark12.full = 0; 562 priority_mark12.full = 0;
545 if (wm1.priority_mark_max.full > priority_mark12.full) 563 if (wm1.priority_mark_max.full > priority_mark12.full)
546 priority_mark12.full = wm1.priority_mark_max.full; 564 priority_mark12.full = wm1.priority_mark_max.full;
565 d2mode_priority_a_cnt = rfixed_trunc(priority_mark12);
566 if (rdev->disp_priority == 2)
567 d2mode_priority_a_cnt |= S_006D48_D2MODE_PRIORITY_A_ALWAYS_ON(1);
547 WREG32(R_006548_D1MODE_PRIORITY_A_CNT, 568 WREG32(R_006548_D1MODE_PRIORITY_A_CNT,
548 S_006548_D1MODE_PRIORITY_A_OFF(1)); 569 S_006548_D1MODE_PRIORITY_A_OFF(1));
549 WREG32(R_00654C_D1MODE_PRIORITY_B_CNT, 570 WREG32(R_00654C_D1MODE_PRIORITY_B_CNT,
550 S_00654C_D1MODE_PRIORITY_B_OFF(1)); 571 S_00654C_D1MODE_PRIORITY_B_OFF(1));
551 WREG32(R_006D48_D2MODE_PRIORITY_A_CNT, rfixed_trunc(priority_mark12)); 572 WREG32(R_006D48_D2MODE_PRIORITY_A_CNT, d2mode_priority_a_cnt);
552 WREG32(R_006D4C_D2MODE_PRIORITY_B_CNT, rfixed_trunc(priority_mark12)); 573 WREG32(R_006D4C_D2MODE_PRIORITY_B_CNT, d2mode_priority_a_cnt);
553 } 574 }
554} 575}
555 576
@@ -657,6 +678,7 @@ int rs690_suspend(struct radeon_device *rdev)
657 678
658void rs690_fini(struct radeon_device *rdev) 679void rs690_fini(struct radeon_device *rdev)
659{ 680{
681 radeon_pm_fini(rdev);
660 r100_cp_fini(rdev); 682 r100_cp_fini(rdev);
661 r100_wb_fini(rdev); 683 r100_wb_fini(rdev);
662 r100_ib_fini(rdev); 684 r100_ib_fini(rdev);
diff --git a/drivers/gpu/drm/radeon/rs690d.h b/drivers/gpu/drm/radeon/rs690d.h
index 62d31e7a897f..36e6398a98ae 100644
--- a/drivers/gpu/drm/radeon/rs690d.h
+++ b/drivers/gpu/drm/radeon/rs690d.h
@@ -182,6 +182,9 @@
182#define S_006548_D1MODE_PRIORITY_A_OFF(x) (((x) & 0x1) << 16) 182#define S_006548_D1MODE_PRIORITY_A_OFF(x) (((x) & 0x1) << 16)
183#define G_006548_D1MODE_PRIORITY_A_OFF(x) (((x) >> 16) & 0x1) 183#define G_006548_D1MODE_PRIORITY_A_OFF(x) (((x) >> 16) & 0x1)
184#define C_006548_D1MODE_PRIORITY_A_OFF 0xFFFEFFFF 184#define C_006548_D1MODE_PRIORITY_A_OFF 0xFFFEFFFF
185#define S_006548_D1MODE_PRIORITY_A_ALWAYS_ON(x) (((x) & 0x1) << 20)
186#define G_006548_D1MODE_PRIORITY_A_ALWAYS_ON(x) (((x) >> 20) & 0x1)
187#define C_006548_D1MODE_PRIORITY_A_ALWAYS_ON 0xFFEFFFFF
185#define S_006548_D1MODE_PRIORITY_A_FORCE_MASK(x) (((x) & 0x1) << 24) 188#define S_006548_D1MODE_PRIORITY_A_FORCE_MASK(x) (((x) & 0x1) << 24)
186#define G_006548_D1MODE_PRIORITY_A_FORCE_MASK(x) (((x) >> 24) & 0x1) 189#define G_006548_D1MODE_PRIORITY_A_FORCE_MASK(x) (((x) >> 24) & 0x1)
187#define C_006548_D1MODE_PRIORITY_A_FORCE_MASK 0xFEFFFFFF 190#define C_006548_D1MODE_PRIORITY_A_FORCE_MASK 0xFEFFFFFF
diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c
index 903b1e496ba4..9035121f4b58 100644
--- a/drivers/gpu/drm/radeon/rv515.c
+++ b/drivers/gpu/drm/radeon/rv515.c
@@ -30,6 +30,7 @@
30#include "drmP.h" 30#include "drmP.h"
31#include "rv515d.h" 31#include "rv515d.h"
32#include "radeon.h" 32#include "radeon.h"
33#include "radeon_asic.h"
33#include "atom.h" 34#include "atom.h"
34#include "rv515_reg_safe.h" 35#include "rv515_reg_safe.h"
35 36
@@ -280,19 +281,13 @@ static void rv515_vram_get_type(struct radeon_device *rdev)
280 281
281void rv515_mc_init(struct radeon_device *rdev) 282void rv515_mc_init(struct radeon_device *rdev)
282{ 283{
283 fixed20_12 a;
284 284
285 rv515_vram_get_type(rdev); 285 rv515_vram_get_type(rdev);
286 r100_vram_init_sizes(rdev); 286 r100_vram_init_sizes(rdev);
287 radeon_vram_location(rdev, &rdev->mc, 0); 287 radeon_vram_location(rdev, &rdev->mc, 0);
288 if (!(rdev->flags & RADEON_IS_AGP)) 288 if (!(rdev->flags & RADEON_IS_AGP))
289 radeon_gtt_location(rdev, &rdev->mc); 289 radeon_gtt_location(rdev, &rdev->mc);
290 /* FIXME: we should enforce default clock in case GPU is not in 290 radeon_update_bandwidth_info(rdev);
291 * default setup
292 */
293 a.full = rfixed_const(100);
294 rdev->pm.sclk.full = rfixed_const(rdev->clock.default_sclk);
295 rdev->pm.sclk.full = rfixed_div(rdev->pm.sclk, a);
296} 291}
297 292
298uint32_t rv515_mc_rreg(struct radeon_device *rdev, uint32_t reg) 293uint32_t rv515_mc_rreg(struct radeon_device *rdev, uint32_t reg)
@@ -540,6 +535,7 @@ void rv515_set_safe_registers(struct radeon_device *rdev)
540 535
541void rv515_fini(struct radeon_device *rdev) 536void rv515_fini(struct radeon_device *rdev)
542{ 537{
538 radeon_pm_fini(rdev);
543 r100_cp_fini(rdev); 539 r100_cp_fini(rdev);
544 r100_wb_fini(rdev); 540 r100_wb_fini(rdev);
545 r100_ib_fini(rdev); 541 r100_ib_fini(rdev);
@@ -1021,7 +1017,7 @@ void rv515_bandwidth_avivo_update(struct radeon_device *rdev)
1021 struct drm_display_mode *mode1 = NULL; 1017 struct drm_display_mode *mode1 = NULL;
1022 struct rv515_watermark wm0; 1018 struct rv515_watermark wm0;
1023 struct rv515_watermark wm1; 1019 struct rv515_watermark wm1;
1024 u32 tmp; 1020 u32 tmp, d1mode_priority_a_cnt, d2mode_priority_a_cnt;
1025 fixed20_12 priority_mark02, priority_mark12, fill_rate; 1021 fixed20_12 priority_mark02, priority_mark12, fill_rate;
1026 fixed20_12 a, b; 1022 fixed20_12 a, b;
1027 1023
@@ -1089,10 +1085,16 @@ void rv515_bandwidth_avivo_update(struct radeon_device *rdev)
1089 priority_mark12.full = 0; 1085 priority_mark12.full = 0;
1090 if (wm1.priority_mark_max.full > priority_mark12.full) 1086 if (wm1.priority_mark_max.full > priority_mark12.full)
1091 priority_mark12.full = wm1.priority_mark_max.full; 1087 priority_mark12.full = wm1.priority_mark_max.full;
1092 WREG32(D1MODE_PRIORITY_A_CNT, rfixed_trunc(priority_mark02)); 1088 d1mode_priority_a_cnt = rfixed_trunc(priority_mark02);
1093 WREG32(D1MODE_PRIORITY_B_CNT, rfixed_trunc(priority_mark02)); 1089 d2mode_priority_a_cnt = rfixed_trunc(priority_mark12);
1094 WREG32(D2MODE_PRIORITY_A_CNT, rfixed_trunc(priority_mark12)); 1090 if (rdev->disp_priority == 2) {
1095 WREG32(D2MODE_PRIORITY_B_CNT, rfixed_trunc(priority_mark12)); 1091 d1mode_priority_a_cnt |= MODE_PRIORITY_ALWAYS_ON;
1092 d2mode_priority_a_cnt |= MODE_PRIORITY_ALWAYS_ON;
1093 }
1094 WREG32(D1MODE_PRIORITY_A_CNT, d1mode_priority_a_cnt);
1095 WREG32(D1MODE_PRIORITY_B_CNT, d1mode_priority_a_cnt);
1096 WREG32(D2MODE_PRIORITY_A_CNT, d2mode_priority_a_cnt);
1097 WREG32(D2MODE_PRIORITY_B_CNT, d2mode_priority_a_cnt);
1096 } else if (mode0) { 1098 } else if (mode0) {
1097 if (rfixed_trunc(wm0.dbpp) > 64) 1099 if (rfixed_trunc(wm0.dbpp) > 64)
1098 a.full = rfixed_div(wm0.dbpp, wm0.num_line_pair); 1100 a.full = rfixed_div(wm0.dbpp, wm0.num_line_pair);
@@ -1119,8 +1121,11 @@ void rv515_bandwidth_avivo_update(struct radeon_device *rdev)
1119 priority_mark02.full = 0; 1121 priority_mark02.full = 0;
1120 if (wm0.priority_mark_max.full > priority_mark02.full) 1122 if (wm0.priority_mark_max.full > priority_mark02.full)
1121 priority_mark02.full = wm0.priority_mark_max.full; 1123 priority_mark02.full = wm0.priority_mark_max.full;
1122 WREG32(D1MODE_PRIORITY_A_CNT, rfixed_trunc(priority_mark02)); 1124 d1mode_priority_a_cnt = rfixed_trunc(priority_mark02);
1123 WREG32(D1MODE_PRIORITY_B_CNT, rfixed_trunc(priority_mark02)); 1125 if (rdev->disp_priority == 2)
1126 d1mode_priority_a_cnt |= MODE_PRIORITY_ALWAYS_ON;
1127 WREG32(D1MODE_PRIORITY_A_CNT, d1mode_priority_a_cnt);
1128 WREG32(D1MODE_PRIORITY_B_CNT, d1mode_priority_a_cnt);
1124 WREG32(D2MODE_PRIORITY_A_CNT, MODE_PRIORITY_OFF); 1129 WREG32(D2MODE_PRIORITY_A_CNT, MODE_PRIORITY_OFF);
1125 WREG32(D2MODE_PRIORITY_B_CNT, MODE_PRIORITY_OFF); 1130 WREG32(D2MODE_PRIORITY_B_CNT, MODE_PRIORITY_OFF);
1126 } else { 1131 } else {
@@ -1149,10 +1154,13 @@ void rv515_bandwidth_avivo_update(struct radeon_device *rdev)
1149 priority_mark12.full = 0; 1154 priority_mark12.full = 0;
1150 if (wm1.priority_mark_max.full > priority_mark12.full) 1155 if (wm1.priority_mark_max.full > priority_mark12.full)
1151 priority_mark12.full = wm1.priority_mark_max.full; 1156 priority_mark12.full = wm1.priority_mark_max.full;
1157 d2mode_priority_a_cnt = rfixed_trunc(priority_mark12);
1158 if (rdev->disp_priority == 2)
1159 d2mode_priority_a_cnt |= MODE_PRIORITY_ALWAYS_ON;
1152 WREG32(D1MODE_PRIORITY_A_CNT, MODE_PRIORITY_OFF); 1160 WREG32(D1MODE_PRIORITY_A_CNT, MODE_PRIORITY_OFF);
1153 WREG32(D1MODE_PRIORITY_B_CNT, MODE_PRIORITY_OFF); 1161 WREG32(D1MODE_PRIORITY_B_CNT, MODE_PRIORITY_OFF);
1154 WREG32(D2MODE_PRIORITY_A_CNT, rfixed_trunc(priority_mark12)); 1162 WREG32(D2MODE_PRIORITY_A_CNT, d2mode_priority_a_cnt);
1155 WREG32(D2MODE_PRIORITY_B_CNT, rfixed_trunc(priority_mark12)); 1163 WREG32(D2MODE_PRIORITY_B_CNT, d2mode_priority_a_cnt);
1156 } 1164 }
1157} 1165}
1158 1166
@@ -1162,6 +1170,8 @@ void rv515_bandwidth_update(struct radeon_device *rdev)
1162 struct drm_display_mode *mode0 = NULL; 1170 struct drm_display_mode *mode0 = NULL;
1163 struct drm_display_mode *mode1 = NULL; 1171 struct drm_display_mode *mode1 = NULL;
1164 1172
1173 radeon_update_display_priority(rdev);
1174
1165 if (rdev->mode_info.crtcs[0]->base.enabled) 1175 if (rdev->mode_info.crtcs[0]->base.enabled)
1166 mode0 = &rdev->mode_info.crtcs[0]->base.mode; 1176 mode0 = &rdev->mode_info.crtcs[0]->base.mode;
1167 if (rdev->mode_info.crtcs[1]->base.enabled) 1177 if (rdev->mode_info.crtcs[1]->base.enabled)
@@ -1171,7 +1181,8 @@ void rv515_bandwidth_update(struct radeon_device *rdev)
1171 * modes if the user specifies HIGH for displaypriority 1181 * modes if the user specifies HIGH for displaypriority
1172 * option. 1182 * option.
1173 */ 1183 */
1174 if (rdev->disp_priority == 2) { 1184 if ((rdev->disp_priority == 2) &&
1185 (rdev->family == CHIP_RV515)) {
1175 tmp = RREG32_MC(MC_MISC_LAT_TIMER); 1186 tmp = RREG32_MC(MC_MISC_LAT_TIMER);
1176 tmp &= ~MC_DISP1R_INIT_LAT_MASK; 1187 tmp &= ~MC_DISP1R_INIT_LAT_MASK;
1177 tmp &= ~MC_DISP0R_INIT_LAT_MASK; 1188 tmp &= ~MC_DISP0R_INIT_LAT_MASK;
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
index 188e62d10f8f..97958a64df1a 100644
--- a/drivers/gpu/drm/radeon/rv770.c
+++ b/drivers/gpu/drm/radeon/rv770.c
@@ -30,6 +30,7 @@
30#include <linux/slab.h> 30#include <linux/slab.h>
31#include "drmP.h" 31#include "drmP.h"
32#include "radeon.h" 32#include "radeon.h"
33#include "radeon_asic.h"
33#include "radeon_drm.h" 34#include "radeon_drm.h"
34#include "rv770d.h" 35#include "rv770d.h"
35#include "atom.h" 36#include "atom.h"
@@ -126,9 +127,9 @@ void rv770_pcie_gart_disable(struct radeon_device *rdev)
126 127
127void rv770_pcie_gart_fini(struct radeon_device *rdev) 128void rv770_pcie_gart_fini(struct radeon_device *rdev)
128{ 129{
130 radeon_gart_fini(rdev);
129 rv770_pcie_gart_disable(rdev); 131 rv770_pcie_gart_disable(rdev);
130 radeon_gart_table_vram_free(rdev); 132 radeon_gart_table_vram_free(rdev);
131 radeon_gart_fini(rdev);
132} 133}
133 134
134 135
@@ -648,10 +649,13 @@ static void rv770_gpu_init(struct radeon_device *rdev)
648 649
649 WREG32(CC_RB_BACKEND_DISABLE, cc_rb_backend_disable); 650 WREG32(CC_RB_BACKEND_DISABLE, cc_rb_backend_disable);
650 WREG32(CC_GC_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config); 651 WREG32(CC_GC_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
652 WREG32(GC_USER_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
651 WREG32(CC_SYS_RB_BACKEND_DISABLE, cc_rb_backend_disable); 653 WREG32(CC_SYS_RB_BACKEND_DISABLE, cc_rb_backend_disable);
652 654
653 WREG32(CGTS_SYS_TCC_DISABLE, 0); 655 WREG32(CGTS_SYS_TCC_DISABLE, 0);
654 WREG32(CGTS_TCC_DISABLE, 0); 656 WREG32(CGTS_TCC_DISABLE, 0);
657 WREG32(CGTS_USER_SYS_TCC_DISABLE, 0);
658 WREG32(CGTS_USER_TCC_DISABLE, 0);
655 659
656 num_qd_pipes = 660 num_qd_pipes =
657 R7XX_MAX_PIPES - r600_count_pipe_bits((cc_gc_shader_pipe_config & INACTIVE_QD_PIPES_MASK) >> 8); 661 R7XX_MAX_PIPES - r600_count_pipe_bits((cc_gc_shader_pipe_config & INACTIVE_QD_PIPES_MASK) >> 8);
@@ -865,7 +869,6 @@ static void rv770_gpu_init(struct radeon_device *rdev)
865 869
866int rv770_mc_init(struct radeon_device *rdev) 870int rv770_mc_init(struct radeon_device *rdev)
867{ 871{
868 fixed20_12 a;
869 u32 tmp; 872 u32 tmp;
870 int chansize, numchan; 873 int chansize, numchan;
871 874
@@ -909,12 +912,8 @@ int rv770_mc_init(struct radeon_device *rdev)
909 rdev->mc.real_vram_size = rdev->mc.aper_size; 912 rdev->mc.real_vram_size = rdev->mc.aper_size;
910 } 913 }
911 r600_vram_gtt_location(rdev, &rdev->mc); 914 r600_vram_gtt_location(rdev, &rdev->mc);
912 /* FIXME: we should enforce default clock in case GPU is not in 915 radeon_update_bandwidth_info(rdev);
913 * default setup 916
914 */
915 a.full = rfixed_const(100);
916 rdev->pm.sclk.full = rfixed_const(rdev->clock.default_sclk);
917 rdev->pm.sclk.full = rfixed_div(rdev->pm.sclk, a);
918 return 0; 917 return 0;
919} 918}
920 919
@@ -1014,6 +1013,13 @@ int rv770_resume(struct radeon_device *rdev)
1014 DRM_ERROR("radeon: failled testing IB (%d).\n", r); 1013 DRM_ERROR("radeon: failled testing IB (%d).\n", r);
1015 return r; 1014 return r;
1016 } 1015 }
1016
1017 r = r600_audio_init(rdev);
1018 if (r) {
1019 dev_err(rdev->dev, "radeon: audio init failed\n");
1020 return r;
1021 }
1022
1017 return r; 1023 return r;
1018 1024
1019} 1025}
@@ -1022,6 +1028,7 @@ int rv770_suspend(struct radeon_device *rdev)
1022{ 1028{
1023 int r; 1029 int r;
1024 1030
1031 r600_audio_fini(rdev);
1025 /* FIXME: we should wait for ring to be empty */ 1032 /* FIXME: we should wait for ring to be empty */
1026 r700_cp_stop(rdev); 1033 r700_cp_stop(rdev);
1027 rdev->cp.ready = false; 1034 rdev->cp.ready = false;
@@ -1145,11 +1152,19 @@ int rv770_init(struct radeon_device *rdev)
1145 } 1152 }
1146 } 1153 }
1147 } 1154 }
1155
1156 r = r600_audio_init(rdev);
1157 if (r) {
1158 dev_err(rdev->dev, "radeon: audio init failed\n");
1159 return r;
1160 }
1161
1148 return 0; 1162 return 0;
1149} 1163}
1150 1164
1151void rv770_fini(struct radeon_device *rdev) 1165void rv770_fini(struct radeon_device *rdev)
1152{ 1166{
1167 radeon_pm_fini(rdev);
1153 r600_blit_fini(rdev); 1168 r600_blit_fini(rdev);
1154 r600_cp_fini(rdev); 1169 r600_cp_fini(rdev);
1155 r600_wb_fini(rdev); 1170 r600_wb_fini(rdev);
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 89c38c49066f..dd47b2a9a791 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -1425,8 +1425,8 @@ int ttm_bo_global_init(struct ttm_global_reference *ref)
1425 1425
1426 atomic_set(&glob->bo_count, 0); 1426 atomic_set(&glob->bo_count, 0);
1427 1427
1428 kobject_init(&glob->kobj, &ttm_bo_glob_kobj_type); 1428 ret = kobject_init_and_add(
1429 ret = kobject_add(&glob->kobj, ttm_get_kobj(), "buffer_objects"); 1429 &glob->kobj, &ttm_bo_glob_kobj_type, ttm_get_kobj(), "buffer_objects");
1430 if (unlikely(ret != 0)) 1430 if (unlikely(ret != 0))
1431 kobject_put(&glob->kobj); 1431 kobject_put(&glob->kobj);
1432 return ret; 1432 return ret;
diff --git a/drivers/gpu/drm/ttm/ttm_memory.c b/drivers/gpu/drm/ttm/ttm_memory.c
index e055a3af926d..801b702566e6 100644
--- a/drivers/gpu/drm/ttm/ttm_memory.c
+++ b/drivers/gpu/drm/ttm/ttm_memory.c
@@ -261,8 +261,8 @@ static int ttm_mem_init_kernel_zone(struct ttm_mem_global *glob,
261 zone->used_mem = 0; 261 zone->used_mem = 0;
262 zone->glob = glob; 262 zone->glob = glob;
263 glob->zone_kernel = zone; 263 glob->zone_kernel = zone;
264 kobject_init(&zone->kobj, &ttm_mem_zone_kobj_type); 264 ret = kobject_init_and_add(
265 ret = kobject_add(&zone->kobj, &glob->kobj, zone->name); 265 &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, zone->name);
266 if (unlikely(ret != 0)) { 266 if (unlikely(ret != 0)) {
267 kobject_put(&zone->kobj); 267 kobject_put(&zone->kobj);
268 return ret; 268 return ret;
@@ -297,8 +297,8 @@ static int ttm_mem_init_highmem_zone(struct ttm_mem_global *glob,
297 zone->used_mem = 0; 297 zone->used_mem = 0;
298 zone->glob = glob; 298 zone->glob = glob;
299 glob->zone_highmem = zone; 299 glob->zone_highmem = zone;
300 kobject_init(&zone->kobj, &ttm_mem_zone_kobj_type); 300 ret = kobject_init_and_add(
301 ret = kobject_add(&zone->kobj, &glob->kobj, zone->name); 301 &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, zone->name);
302 if (unlikely(ret != 0)) { 302 if (unlikely(ret != 0)) {
303 kobject_put(&zone->kobj); 303 kobject_put(&zone->kobj);
304 return ret; 304 return ret;
@@ -344,8 +344,8 @@ static int ttm_mem_init_dma32_zone(struct ttm_mem_global *glob,
344 zone->used_mem = 0; 344 zone->used_mem = 0;
345 zone->glob = glob; 345 zone->glob = glob;
346 glob->zone_dma32 = zone; 346 glob->zone_dma32 = zone;
347 kobject_init(&zone->kobj, &ttm_mem_zone_kobj_type); 347 ret = kobject_init_and_add(
348 ret = kobject_add(&zone->kobj, &glob->kobj, zone->name); 348 &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, zone->name);
349 if (unlikely(ret != 0)) { 349 if (unlikely(ret != 0)) {
350 kobject_put(&zone->kobj); 350 kobject_put(&zone->kobj);
351 return ret; 351 return ret;
@@ -366,10 +366,8 @@ int ttm_mem_global_init(struct ttm_mem_global *glob)
366 glob->swap_queue = create_singlethread_workqueue("ttm_swap"); 366 glob->swap_queue = create_singlethread_workqueue("ttm_swap");
367 INIT_WORK(&glob->work, ttm_shrink_work); 367 INIT_WORK(&glob->work, ttm_shrink_work);
368 init_waitqueue_head(&glob->queue); 368 init_waitqueue_head(&glob->queue);
369 kobject_init(&glob->kobj, &ttm_mem_glob_kobj_type); 369 ret = kobject_init_and_add(
370 ret = kobject_add(&glob->kobj, 370 &glob->kobj, &ttm_mem_glob_kobj_type, ttm_get_kobj(), "memory_accounting");
371 ttm_get_kobj(),
372 "memory_accounting");
373 if (unlikely(ret != 0)) { 371 if (unlikely(ret != 0)) {
374 kobject_put(&glob->kobj); 372 kobject_put(&glob->kobj);
375 return ret; 373 return ret;
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c
index 0ef7f73ea56c..d5fd5b8faeb3 100644
--- a/drivers/gpu/drm/ttm/ttm_tt.c
+++ b/drivers/gpu/drm/ttm/ttm_tt.c
@@ -28,7 +28,6 @@
28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com> 28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
29 */ 29 */
30 30
31#include <linux/vmalloc.h>
32#include <linux/sched.h> 31#include <linux/sched.h>
33#include <linux/highmem.h> 32#include <linux/highmem.h>
34#include <linux/pagemap.h> 33#include <linux/pagemap.h>
@@ -36,6 +35,7 @@
36#include <linux/swap.h> 35#include <linux/swap.h>
37#include <linux/slab.h> 36#include <linux/slab.h>
38#include "drm_cache.h" 37#include "drm_cache.h"
38#include "drm_mem_util.h"
39#include "ttm/ttm_module.h" 39#include "ttm/ttm_module.h"
40#include "ttm/ttm_bo_driver.h" 40#include "ttm/ttm_bo_driver.h"
41#include "ttm/ttm_placement.h" 41#include "ttm/ttm_placement.h"
@@ -44,32 +44,15 @@ static int ttm_tt_swapin(struct ttm_tt *ttm);
44 44
45/** 45/**
46 * Allocates storage for pointers to the pages that back the ttm. 46 * Allocates storage for pointers to the pages that back the ttm.
47 *
48 * Uses kmalloc if possible. Otherwise falls back to vmalloc.
49 */ 47 */
50static void ttm_tt_alloc_page_directory(struct ttm_tt *ttm) 48static void ttm_tt_alloc_page_directory(struct ttm_tt *ttm)
51{ 49{
52 unsigned long size = ttm->num_pages * sizeof(*ttm->pages); 50 ttm->pages = drm_calloc_large(ttm->num_pages, sizeof(*ttm->pages));
53 ttm->pages = NULL;
54
55 if (size <= PAGE_SIZE)
56 ttm->pages = kzalloc(size, GFP_KERNEL);
57
58 if (!ttm->pages) {
59 ttm->pages = vmalloc_user(size);
60 if (ttm->pages)
61 ttm->page_flags |= TTM_PAGE_FLAG_VMALLOC;
62 }
63} 51}
64 52
65static void ttm_tt_free_page_directory(struct ttm_tt *ttm) 53static void ttm_tt_free_page_directory(struct ttm_tt *ttm)
66{ 54{
67 if (ttm->page_flags & TTM_PAGE_FLAG_VMALLOC) { 55 drm_free_large(ttm->pages);
68 vfree(ttm->pages);
69 ttm->page_flags &= ~TTM_PAGE_FLAG_VMALLOC;
70 } else {
71 kfree(ttm->pages);
72 }
73 ttm->pages = NULL; 56 ttm->pages = NULL;
74} 57}
75 58
diff --git a/drivers/gpu/drm/vmwgfx/Kconfig b/drivers/gpu/drm/vmwgfx/Kconfig
index f20b8bcbef39..30ad13344f7b 100644
--- a/drivers/gpu/drm/vmwgfx/Kconfig
+++ b/drivers/gpu/drm/vmwgfx/Kconfig
@@ -1,6 +1,6 @@
1config DRM_VMWGFX 1config DRM_VMWGFX
2 tristate "DRM driver for VMware Virtual GPU" 2 tristate "DRM driver for VMware Virtual GPU"
3 depends on DRM && PCI 3 depends on DRM && PCI && FB
4 select FB_DEFERRED_IO 4 select FB_DEFERRED_IO
5 select FB_CFB_FILLRECT 5 select FB_CFB_FILLRECT
6 select FB_CFB_COPYAREA 6 select FB_CFB_COPYAREA
diff --git a/drivers/hid/hid-gyration.c b/drivers/hid/hid-gyration.c
index cab13e8c7d29..62416e6baeca 100644
--- a/drivers/hid/hid-gyration.c
+++ b/drivers/hid/hid-gyration.c
@@ -53,10 +53,13 @@ static int gyration_input_mapping(struct hid_device *hdev, struct hid_input *hi,
53static int gyration_event(struct hid_device *hdev, struct hid_field *field, 53static int gyration_event(struct hid_device *hdev, struct hid_field *field,
54 struct hid_usage *usage, __s32 value) 54 struct hid_usage *usage, __s32 value)
55{ 55{
56 struct input_dev *input = field->hidinput->input; 56
57 if (!(hdev->claimed & HID_CLAIMED_INPUT) || !field->hidinput)
58 return 0;
57 59
58 if ((usage->hid & HID_USAGE_PAGE) == HID_UP_GENDESK && 60 if ((usage->hid & HID_USAGE_PAGE) == HID_UP_GENDESK &&
59 (usage->hid & 0xff) == 0x82) { 61 (usage->hid & 0xff) == 0x82) {
62 struct input_dev *input = field->hidinput->input;
60 input_event(input, usage->type, usage->code, 1); 63 input_event(input, usage->type, usage->code, 1);
61 input_sync(input); 64 input_sync(input);
62 input_event(input, usage->type, usage->code, 0); 65 input_event(input, usage->type, usage->code, 0);
diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
index 2cacbe81c4e7..1152f9b5fd44 100644
--- a/drivers/hid/usbhid/hid-quirks.c
+++ b/drivers/hid/usbhid/hid-quirks.c
@@ -61,6 +61,7 @@ static const struct hid_blacklist {
61 { USB_VENDOR_ID_DMI, USB_DEVICE_ID_DMI_ENC, HID_QUIRK_NOGET }, 61 { USB_VENDOR_ID_DMI, USB_DEVICE_ID_DMI_ENC, HID_QUIRK_NOGET },
62 { USB_VENDOR_ID_ELO, USB_DEVICE_ID_ELO_TS2700, HID_QUIRK_NOGET }, 62 { USB_VENDOR_ID_ELO, USB_DEVICE_ID_ELO_TS2700, HID_QUIRK_NOGET },
63 { USB_VENDOR_ID_PRODIGE, USB_DEVICE_ID_PRODIGE_CORDLESS, HID_QUIRK_NOGET }, 63 { USB_VENDOR_ID_PRODIGE, USB_DEVICE_ID_PRODIGE_CORDLESS, HID_QUIRK_NOGET },
64 { USB_VENDOR_ID_QUANTA, USB_DEVICE_ID_PIXART_IMAGING_INC_OPTICAL_TOUCH_SCREEN, HID_QUIRK_NOGET },
64 { USB_VENDOR_ID_SUN, USB_DEVICE_ID_RARITAN_KVM_DONGLE, HID_QUIRK_NOGET }, 65 { USB_VENDOR_ID_SUN, USB_DEVICE_ID_RARITAN_KVM_DONGLE, HID_QUIRK_NOGET },
65 { USB_VENDOR_ID_TURBOX, USB_DEVICE_ID_TURBOX_KEYBOARD, HID_QUIRK_NOGET }, 66 { USB_VENDOR_ID_TURBOX, USB_DEVICE_ID_TURBOX_KEYBOARD, HID_QUIRK_NOGET },
66 { USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_PF1209, HID_QUIRK_MULTI_INPUT }, 67 { USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_PF1209, HID_QUIRK_MULTI_INPUT },
diff --git a/drivers/misc/kgdbts.c b/drivers/misc/kgdbts.c
index fcb6ec1af173..72450237a0f4 100644
--- a/drivers/misc/kgdbts.c
+++ b/drivers/misc/kgdbts.c
@@ -295,6 +295,10 @@ static int check_and_rewind_pc(char *put_str, char *arg)
295 /* On x86 a breakpoint stop requires it to be decremented */ 295 /* On x86 a breakpoint stop requires it to be decremented */
296 if (addr + 1 == kgdbts_regs.ip) 296 if (addr + 1 == kgdbts_regs.ip)
297 offset = -1; 297 offset = -1;
298#elif defined(CONFIG_SUPERH)
299 /* On SUPERH a breakpoint stop requires it to be decremented */
300 if (addr + 2 == kgdbts_regs.pc)
301 offset = -2;
298#endif 302#endif
299 if (strcmp(arg, "silent") && 303 if (strcmp(arg, "silent") &&
300 instruction_pointer(&kgdbts_regs) + offset != addr) { 304 instruction_pointer(&kgdbts_regs) + offset != addr) {
@@ -305,6 +309,8 @@ static int check_and_rewind_pc(char *put_str, char *arg)
305#ifdef CONFIG_X86 309#ifdef CONFIG_X86
306 /* On x86 adjust the instruction pointer if needed */ 310 /* On x86 adjust the instruction pointer if needed */
307 kgdbts_regs.ip += offset; 311 kgdbts_regs.ip += offset;
312#elif defined(CONFIG_SUPERH)
313 kgdbts_regs.pc += offset;
308#endif 314#endif
309 return 0; 315 return 0;
310} 316}
diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c
index 406757a9d7ea..dee4fb56b094 100644
--- a/drivers/of/fdt.c
+++ b/drivers/of/fdt.c
@@ -376,8 +376,11 @@ unsigned long __init unflatten_dt_node(unsigned long mem,
376 if (!np->type) 376 if (!np->type)
377 np->type = "<NULL>"; 377 np->type = "<NULL>";
378 } 378 }
379 while (tag == OF_DT_BEGIN_NODE) { 379 while (tag == OF_DT_BEGIN_NODE || tag == OF_DT_NOP) {
380 mem = unflatten_dt_node(mem, p, np, allnextpp, fpsize); 380 if (tag == OF_DT_NOP)
381 *p += 4;
382 else
383 mem = unflatten_dt_node(mem, p, np, allnextpp, fpsize);
381 tag = be32_to_cpup((__be32 *)(*p)); 384 tag = be32_to_cpup((__be32 *)(*p));
382 } 385 }
383 if (tag != OF_DT_END_NODE) { 386 if (tag != OF_DT_END_NODE) {
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index 3ea0b29c0104..27c0e6eb7136 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -2123,6 +2123,9 @@ static void __devinit quirk_disable_msi(struct pci_dev *dev)
2123 } 2123 }
2124} 2124}
2125DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE, quirk_disable_msi); 2125DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE, quirk_disable_msi);
2126DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x9602, quirk_disable_msi);
2127DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ASUSTEK, 0x9602, quirk_disable_msi);
2128DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AI, 0x9602, quirk_disable_msi);
2126DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, 0xa238, quirk_disable_msi); 2129DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, 0xa238, quirk_disable_msi);
2127 2130
2128/* Go through the list of Hypertransport capabilities and 2131/* Go through the list of Hypertransport capabilities and
@@ -2495,39 +2498,6 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x4374,
2495DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x4375, 2498DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x4375,
2496 quirk_msi_intx_disable_bug); 2499 quirk_msi_intx_disable_bug);
2497 2500
2498/*
2499 * MSI does not work with the AMD RS780/RS880 internal graphics and HDMI audio
2500 * devices unless the BIOS has initialized the nb_cntl.strap_msi_enable bit.
2501 */
2502static void __init rs780_int_gfx_disable_msi(struct pci_dev *int_gfx_bridge)
2503{
2504 u32 nb_cntl;
2505
2506 if (!int_gfx_bridge->subordinate)
2507 return;
2508
2509 pci_bus_write_config_dword(int_gfx_bridge->bus, PCI_DEVFN(0, 0),
2510 0x60, 0);
2511 pci_bus_read_config_dword(int_gfx_bridge->bus, PCI_DEVFN(0, 0),
2512 0x64, &nb_cntl);
2513
2514 if (!(nb_cntl & BIT(10))) {
2515 dev_warn(&int_gfx_bridge->dev,
2516 FW_WARN "RS780: MSI for internal graphics disabled\n");
2517 int_gfx_bridge->subordinate->bus_flags |= PCI_BUS_FLAGS_NO_MSI;
2518 }
2519}
2520
2521#define PCI_DEVICE_ID_AMD_RS780_P2P_INT_GFX 0x9602
2522
2523DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD,
2524 PCI_DEVICE_ID_AMD_RS780_P2P_INT_GFX,
2525 rs780_int_gfx_disable_msi);
2526/* wrong vendor ID on M4A785TD motherboard: */
2527DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ASUSTEK,
2528 PCI_DEVICE_ID_AMD_RS780_P2P_INT_GFX,
2529 rs780_int_gfx_disable_msi);
2530
2531#endif /* CONFIG_PCI_MSI */ 2501#endif /* CONFIG_PCI_MSI */
2532 2502
2533#ifdef CONFIG_PCI_IOV 2503#ifdef CONFIG_PCI_IOV
diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
index e631dbeafd79..7bec4588c268 100644
--- a/drivers/platform/x86/Kconfig
+++ b/drivers/platform/x86/Kconfig
@@ -385,6 +385,16 @@ config EEEPC_LAPTOP
385 385
386 If you have an Eee PC laptop, say Y or M here. 386 If you have an Eee PC laptop, say Y or M here.
387 387
388config EEEPC_WMI
389 tristate "Eee PC WMI Hotkey Driver (EXPERIMENTAL)"
390 depends on ACPI_WMI
391 depends on INPUT
392 depends on EXPERIMENTAL
393 ---help---
394 Say Y here if you want to support WMI-based hotkeys on Eee PC laptops.
395
396 To compile this driver as a module, choose M here: the module will
397 be called eeepc-wmi.
388 398
389config ACPI_WMI 399config ACPI_WMI
390 tristate "WMI" 400 tristate "WMI"
diff --git a/drivers/platform/x86/Makefile b/drivers/platform/x86/Makefile
index 9cd9fa0a27e6..a906490e3530 100644
--- a/drivers/platform/x86/Makefile
+++ b/drivers/platform/x86/Makefile
@@ -4,6 +4,7 @@
4# 4#
5obj-$(CONFIG_ASUS_LAPTOP) += asus-laptop.o 5obj-$(CONFIG_ASUS_LAPTOP) += asus-laptop.o
6obj-$(CONFIG_EEEPC_LAPTOP) += eeepc-laptop.o 6obj-$(CONFIG_EEEPC_LAPTOP) += eeepc-laptop.o
7obj-$(CONFIG_EEEPC_WMI) += eeepc-wmi.o
7obj-$(CONFIG_MSI_LAPTOP) += msi-laptop.o 8obj-$(CONFIG_MSI_LAPTOP) += msi-laptop.o
8obj-$(CONFIG_ACPI_CMPC) += classmate-laptop.o 9obj-$(CONFIG_ACPI_CMPC) += classmate-laptop.o
9obj-$(CONFIG_COMPAL_LAPTOP) += compal-laptop.o 10obj-$(CONFIG_COMPAL_LAPTOP) += compal-laptop.o
diff --git a/drivers/platform/x86/asus-laptop.c b/drivers/platform/x86/asus-laptop.c
index c2d4569aef3a..52262b012abb 100644
--- a/drivers/platform/x86/asus-laptop.c
+++ b/drivers/platform/x86/asus-laptop.c
@@ -140,7 +140,7 @@ MODULE_PARM_DESC(bluetooth_status, "Set the wireless status on boot "
140 140
141/* Backlight */ 141/* Backlight */
142static acpi_handle lcd_switch_handle; 142static acpi_handle lcd_switch_handle;
143static const char *lcd_switch_paths[] = { 143static char *lcd_switch_paths[] = {
144 "\\_SB.PCI0.SBRG.EC0._Q10", /* All new models */ 144 "\\_SB.PCI0.SBRG.EC0._Q10", /* All new models */
145 "\\_SB.PCI0.ISA.EC0._Q10", /* A1x */ 145 "\\_SB.PCI0.ISA.EC0._Q10", /* A1x */
146 "\\_SB.PCI0.PX40.ECD0._Q10", /* L3C */ 146 "\\_SB.PCI0.PX40.ECD0._Q10", /* L3C */
@@ -154,7 +154,7 @@ static const char *lcd_switch_paths[] = {
154#define METHOD_SWITCH_DISPLAY "SDSP" 154#define METHOD_SWITCH_DISPLAY "SDSP"
155 155
156static acpi_handle display_get_handle; 156static acpi_handle display_get_handle;
157static const char *display_get_paths[] = { 157static char *display_get_paths[] = {
158 /* A6B, A6K A6R A7D F3JM L4R M6R A3G M6A M6V VX-1 V6J V6V W3Z */ 158 /* A6B, A6K A6R A7D F3JM L4R M6R A3G M6A M6V VX-1 V6J V6V W3Z */
159 "\\_SB.PCI0.P0P1.VGA.GETD", 159 "\\_SB.PCI0.P0P1.VGA.GETD",
160 /* A3E A4K, A4D A4L A6J A7J A8J Z71V M9V S5A M5A z33A W1Jc W2V G1 */ 160 /* A3E A4K, A4D A4L A6J A7J A8J Z71V M9V S5A M5A z33A W1Jc W2V G1 */
diff --git a/drivers/platform/x86/eeepc-wmi.c b/drivers/platform/x86/eeepc-wmi.c
new file mode 100644
index 000000000000..2466b7b7fb0e
--- /dev/null
+++ b/drivers/platform/x86/eeepc-wmi.c
@@ -0,0 +1,157 @@
1/*
2 * Eee PC WMI hotkey driver
3 *
4 * Copyright(C) 2010 Intel Corporation.
5 *
6 * Portions based on wistron_btns.c:
7 * Copyright (C) 2005 Miloslav Trmac <mitr@volny.cz>
8 * Copyright (C) 2005 Bernhard Rosenkraenzer <bero@arklinux.org>
9 * Copyright (C) 2005 Dmitry Torokhov <dtor@mail.ru>
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2 of the License, or
14 * (at your option) any later version.
15 *
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
24 */
25
26#include <linux/kernel.h>
27#include <linux/module.h>
28#include <linux/init.h>
29#include <linux/types.h>
30#include <linux/input.h>
31#include <linux/input/sparse-keymap.h>
32#include <acpi/acpi_bus.h>
33#include <acpi/acpi_drivers.h>
34
35MODULE_AUTHOR("Yong Wang <yong.y.wang@intel.com>");
36MODULE_DESCRIPTION("Eee PC WMI Hotkey Driver");
37MODULE_LICENSE("GPL");
38
39#define EEEPC_WMI_EVENT_GUID "ABBC0F72-8EA1-11D1-00A0-C90629100000"
40
41MODULE_ALIAS("wmi:"EEEPC_WMI_EVENT_GUID);
42
43#define NOTIFY_BRNUP_MIN 0x11
44#define NOTIFY_BRNUP_MAX 0x1f
45#define NOTIFY_BRNDOWN_MIN 0x20
46#define NOTIFY_BRNDOWN_MAX 0x2e
47
48static const struct key_entry eeepc_wmi_keymap[] = {
49 /* Sleep already handled via generic ACPI code */
50 { KE_KEY, 0x5d, { KEY_WLAN } },
51 { KE_KEY, 0x32, { KEY_MUTE } },
52 { KE_KEY, 0x31, { KEY_VOLUMEDOWN } },
53 { KE_KEY, 0x30, { KEY_VOLUMEUP } },
54 { KE_IGNORE, NOTIFY_BRNDOWN_MIN, { KEY_BRIGHTNESSDOWN } },
55 { KE_IGNORE, NOTIFY_BRNUP_MIN, { KEY_BRIGHTNESSUP } },
56 { KE_KEY, 0xcc, { KEY_SWITCHVIDEOMODE } },
57 { KE_END, 0},
58};
59
60static struct input_dev *eeepc_wmi_input_dev;
61
62static void eeepc_wmi_notify(u32 value, void *context)
63{
64 struct acpi_buffer response = { ACPI_ALLOCATE_BUFFER, NULL };
65 union acpi_object *obj;
66 acpi_status status;
67 int code;
68
69 status = wmi_get_event_data(value, &response);
70 if (status != AE_OK) {
71 pr_err("EEEPC WMI: bad event status 0x%x\n", status);
72 return;
73 }
74
75 obj = (union acpi_object *)response.pointer;
76
77 if (obj && obj->type == ACPI_TYPE_INTEGER) {
78 code = obj->integer.value;
79
80 if (code >= NOTIFY_BRNUP_MIN && code <= NOTIFY_BRNUP_MAX)
81 code = NOTIFY_BRNUP_MIN;
82 else if (code >= NOTIFY_BRNDOWN_MIN && code <= NOTIFY_BRNDOWN_MAX)
83 code = NOTIFY_BRNDOWN_MIN;
84
85 if (!sparse_keymap_report_event(eeepc_wmi_input_dev,
86 code, 1, true))
87 pr_info("EEEPC WMI: Unknown key %x pressed\n", code);
88 }
89
90 kfree(obj);
91}
92
93static int eeepc_wmi_input_setup(void)
94{
95 int err;
96
97 eeepc_wmi_input_dev = input_allocate_device();
98 if (!eeepc_wmi_input_dev)
99 return -ENOMEM;
100
101 eeepc_wmi_input_dev->name = "Eee PC WMI hotkeys";
102 eeepc_wmi_input_dev->phys = "wmi/input0";
103 eeepc_wmi_input_dev->id.bustype = BUS_HOST;
104
105 err = sparse_keymap_setup(eeepc_wmi_input_dev, eeepc_wmi_keymap, NULL);
106 if (err)
107 goto err_free_dev;
108
109 err = input_register_device(eeepc_wmi_input_dev);
110 if (err)
111 goto err_free_keymap;
112
113 return 0;
114
115err_free_keymap:
116 sparse_keymap_free(eeepc_wmi_input_dev);
117err_free_dev:
118 input_free_device(eeepc_wmi_input_dev);
119 return err;
120}
121
122static int __init eeepc_wmi_init(void)
123{
124 int err;
125 acpi_status status;
126
127 if (!wmi_has_guid(EEEPC_WMI_EVENT_GUID)) {
128 pr_warning("EEEPC WMI: No known WMI GUID found\n");
129 return -ENODEV;
130 }
131
132 err = eeepc_wmi_input_setup();
133 if (err)
134 return err;
135
136 status = wmi_install_notify_handler(EEEPC_WMI_EVENT_GUID,
137 eeepc_wmi_notify, NULL);
138 if (ACPI_FAILURE(status)) {
139 sparse_keymap_free(eeepc_wmi_input_dev);
140 input_unregister_device(eeepc_wmi_input_dev);
141 pr_err("EEEPC WMI: Unable to register notify handler - %d\n",
142 status);
143 return -ENODEV;
144 }
145
146 return 0;
147}
148
149static void __exit eeepc_wmi_exit(void)
150{
151 wmi_remove_notify_handler(EEEPC_WMI_EVENT_GUID);
152 sparse_keymap_free(eeepc_wmi_input_dev);
153 input_unregister_device(eeepc_wmi_input_dev);
154}
155
156module_init(eeepc_wmi_init);
157module_exit(eeepc_wmi_exit);
diff --git a/drivers/serial/sunsu.c b/drivers/serial/sunsu.c
index 81fc269046fd..01f7731e59b8 100644
--- a/drivers/serial/sunsu.c
+++ b/drivers/serial/sunsu.c
@@ -1454,8 +1454,10 @@ static int __devinit su_probe(struct of_device *op, const struct of_device_id *m
1454 if (up->su_type == SU_PORT_KBD || up->su_type == SU_PORT_MS) { 1454 if (up->su_type == SU_PORT_KBD || up->su_type == SU_PORT_MS) {
1455 err = sunsu_kbd_ms_init(up); 1455 err = sunsu_kbd_ms_init(up);
1456 if (err) { 1456 if (err) {
1457 of_iounmap(&op->resource[0],
1458 up->port.membase, up->reg_size);
1457 kfree(up); 1459 kfree(up);
1458 goto out_unmap; 1460 return err;
1459 } 1461 }
1460 dev_set_drvdata(&op->dev, up); 1462 dev_set_drvdata(&op->dev, up);
1461 1463
diff --git a/drivers/usb/gadget/at91_udc.c b/drivers/usb/gadget/at91_udc.c
index 12ac9cd32a07..df1bae9b048e 100644
--- a/drivers/usb/gadget/at91_udc.c
+++ b/drivers/usb/gadget/at91_udc.c
@@ -1370,6 +1370,12 @@ static irqreturn_t at91_udc_irq (int irq, void *_udc)
1370{ 1370{
1371 struct at91_udc *udc = _udc; 1371 struct at91_udc *udc = _udc;
1372 u32 rescans = 5; 1372 u32 rescans = 5;
1373 int disable_clock = 0;
1374
1375 if (!udc->clocked) {
1376 clk_on(udc);
1377 disable_clock = 1;
1378 }
1373 1379
1374 while (rescans--) { 1380 while (rescans--) {
1375 u32 status; 1381 u32 status;
@@ -1458,6 +1464,9 @@ static irqreturn_t at91_udc_irq (int irq, void *_udc)
1458 } 1464 }
1459 } 1465 }
1460 1466
1467 if (disable_clock)
1468 clk_off(udc);
1469
1461 return IRQ_HANDLED; 1470 return IRQ_HANDLED;
1462} 1471}
1463 1472
diff --git a/drivers/usb/gadget/r8a66597-udc.c b/drivers/usb/gadget/r8a66597-udc.c
index e848ecb896fd..888d8f166c0b 100644
--- a/drivers/usb/gadget/r8a66597-udc.c
+++ b/drivers/usb/gadget/r8a66597-udc.c
@@ -23,7 +23,6 @@
23#include <linux/module.h> 23#include <linux/module.h>
24#include <linux/interrupt.h> 24#include <linux/interrupt.h>
25#include <linux/delay.h> 25#include <linux/delay.h>
26#include <linux/err.h>
27#include <linux/io.h> 26#include <linux/io.h>
28#include <linux/platform_device.h> 27#include <linux/platform_device.h>
29#include <linux/clk.h> 28#include <linux/clk.h>
diff --git a/drivers/video/sunxvr500.c b/drivers/video/sunxvr500.c
index 03a03f302cb3..b9c2b948d34d 100644
--- a/drivers/video/sunxvr500.c
+++ b/drivers/video/sunxvr500.c
@@ -241,11 +241,27 @@ static int __devinit e3d_set_fbinfo(struct e3d_info *ep)
241static int __devinit e3d_pci_register(struct pci_dev *pdev, 241static int __devinit e3d_pci_register(struct pci_dev *pdev,
242 const struct pci_device_id *ent) 242 const struct pci_device_id *ent)
243{ 243{
244 struct device_node *of_node;
245 const char *device_type;
244 struct fb_info *info; 246 struct fb_info *info;
245 struct e3d_info *ep; 247 struct e3d_info *ep;
246 unsigned int line_length; 248 unsigned int line_length;
247 int err; 249 int err;
248 250
251 of_node = pci_device_to_OF_node(pdev);
252 if (!of_node) {
253 printk(KERN_ERR "e3d: Cannot find OF node of %s\n",
254 pci_name(pdev));
255 return -ENODEV;
256 }
257
258 device_type = of_get_property(of_node, "device_type", NULL);
259 if (!device_type) {
260 printk(KERN_INFO "e3d: Ignoring secondary output device "
261 "at %s\n", pci_name(pdev));
262 return -ENODEV;
263 }
264
249 err = pci_enable_device(pdev); 265 err = pci_enable_device(pdev);
250 if (err < 0) { 266 if (err < 0) {
251 printk(KERN_ERR "e3d: Cannot enable PCI device %s\n", 267 printk(KERN_ERR "e3d: Cannot enable PCI device %s\n",
@@ -264,13 +280,7 @@ static int __devinit e3d_pci_register(struct pci_dev *pdev,
264 ep->info = info; 280 ep->info = info;
265 ep->pdev = pdev; 281 ep->pdev = pdev;
266 spin_lock_init(&ep->lock); 282 spin_lock_init(&ep->lock);
267 ep->of_node = pci_device_to_OF_node(pdev); 283 ep->of_node = of_node;
268 if (!ep->of_node) {
269 printk(KERN_ERR "e3d: Cannot find OF node of %s\n",
270 pci_name(pdev));
271 err = -ENODEV;
272 goto err_release_fb;
273 }
274 284
275 /* Read the PCI base register of the frame buffer, which we 285 /* Read the PCI base register of the frame buffer, which we
276 * need in order to interpret the RAMDAC_VID_*FB* values in 286 * need in order to interpret the RAMDAC_VID_*FB* values in
diff --git a/fs/fat/namei_vfat.c b/fs/fat/namei_vfat.c
index c1ef50154868..6fcc7e71fbaa 100644
--- a/fs/fat/namei_vfat.c
+++ b/fs/fat/namei_vfat.c
@@ -309,7 +309,7 @@ static int vfat_create_shortname(struct inode *dir, struct nls_table *nls,
309{ 309{
310 struct fat_mount_options *opts = &MSDOS_SB(dir->i_sb)->options; 310 struct fat_mount_options *opts = &MSDOS_SB(dir->i_sb)->options;
311 wchar_t *ip, *ext_start, *end, *name_start; 311 wchar_t *ip, *ext_start, *end, *name_start;
312 unsigned char base[9], ext[4], buf[8], *p; 312 unsigned char base[9], ext[4], buf[5], *p;
313 unsigned char charbuf[NLS_MAX_CHARSET_SIZE]; 313 unsigned char charbuf[NLS_MAX_CHARSET_SIZE];
314 int chl, chi; 314 int chl, chi;
315 int sz = 0, extlen, baselen, i, numtail_baselen, numtail2_baselen; 315 int sz = 0, extlen, baselen, i, numtail_baselen, numtail2_baselen;
@@ -467,7 +467,7 @@ static int vfat_create_shortname(struct inode *dir, struct nls_table *nls,
467 return 0; 467 return 0;
468 } 468 }
469 469
470 i = jiffies & 0xffff; 470 i = jiffies;
471 sz = (jiffies >> 16) & 0x7; 471 sz = (jiffies >> 16) & 0x7;
472 if (baselen > 2) { 472 if (baselen > 2) {
473 baselen = numtail2_baselen; 473 baselen = numtail2_baselen;
@@ -476,7 +476,7 @@ static int vfat_create_shortname(struct inode *dir, struct nls_table *nls,
476 name_res[baselen + 4] = '~'; 476 name_res[baselen + 4] = '~';
477 name_res[baselen + 5] = '1' + sz; 477 name_res[baselen + 5] = '1' + sz;
478 while (1) { 478 while (1) {
479 sprintf(buf, "%04X", i); 479 snprintf(buf, sizeof(buf), "%04X", i & 0xffff);
480 memcpy(&name_res[baselen], buf, 4); 480 memcpy(&name_res[baselen], buf, 4);
481 if (vfat_find_form(dir, name_res) < 0) 481 if (vfat_find_form(dir, name_res) < 0)
482 break; 482 break;
diff --git a/fs/logfs/dev_bdev.c b/fs/logfs/dev_bdev.c
index 18e8c144c7f1..243c00071f76 100644
--- a/fs/logfs/dev_bdev.c
+++ b/fs/logfs/dev_bdev.c
@@ -81,6 +81,7 @@ static void writeseg_end_io(struct bio *bio, int err)
81 prefetchw(&bvec->bv_page->flags); 81 prefetchw(&bvec->bv_page->flags);
82 82
83 end_page_writeback(page); 83 end_page_writeback(page);
84 page_cache_release(page);
84 } while (bvec >= bio->bi_io_vec); 85 } while (bvec >= bio->bi_io_vec);
85 bio_put(bio); 86 bio_put(bio);
86 if (atomic_dec_and_test(&super->s_pending_writes)) 87 if (atomic_dec_and_test(&super->s_pending_writes))
@@ -98,8 +99,10 @@ static int __bdev_writeseg(struct super_block *sb, u64 ofs, pgoff_t index,
98 unsigned int max_pages = queue_max_hw_sectors(q) >> (PAGE_SHIFT - 9); 99 unsigned int max_pages = queue_max_hw_sectors(q) >> (PAGE_SHIFT - 9);
99 int i; 100 int i;
100 101
102 if (max_pages > BIO_MAX_PAGES)
103 max_pages = BIO_MAX_PAGES;
101 bio = bio_alloc(GFP_NOFS, max_pages); 104 bio = bio_alloc(GFP_NOFS, max_pages);
102 BUG_ON(!bio); /* FIXME: handle this */ 105 BUG_ON(!bio);
103 106
104 for (i = 0; i < nr_pages; i++) { 107 for (i = 0; i < nr_pages; i++) {
105 if (i >= max_pages) { 108 if (i >= max_pages) {
@@ -192,8 +195,10 @@ static int do_erase(struct super_block *sb, u64 ofs, pgoff_t index,
192 unsigned int max_pages = queue_max_hw_sectors(q) >> (PAGE_SHIFT - 9); 195 unsigned int max_pages = queue_max_hw_sectors(q) >> (PAGE_SHIFT - 9);
193 int i; 196 int i;
194 197
198 if (max_pages > BIO_MAX_PAGES)
199 max_pages = BIO_MAX_PAGES;
195 bio = bio_alloc(GFP_NOFS, max_pages); 200 bio = bio_alloc(GFP_NOFS, max_pages);
196 BUG_ON(!bio); /* FIXME: handle this */ 201 BUG_ON(!bio);
197 202
198 for (i = 0; i < nr_pages; i++) { 203 for (i = 0; i < nr_pages; i++) {
199 if (i >= max_pages) { 204 if (i >= max_pages) {
diff --git a/fs/logfs/dir.c b/fs/logfs/dir.c
index e1cb99566100..2396a85c0f55 100644
--- a/fs/logfs/dir.c
+++ b/fs/logfs/dir.c
@@ -303,12 +303,12 @@ static int __logfs_readdir(struct file *file, void *buf, filldir_t filldir)
303 (filler_t *)logfs_readpage, NULL); 303 (filler_t *)logfs_readpage, NULL);
304 if (IS_ERR(page)) 304 if (IS_ERR(page))
305 return PTR_ERR(page); 305 return PTR_ERR(page);
306 dd = kmap_atomic(page, KM_USER0); 306 dd = kmap(page);
307 BUG_ON(dd->namelen == 0); 307 BUG_ON(dd->namelen == 0);
308 308
309 full = filldir(buf, (char *)dd->name, be16_to_cpu(dd->namelen), 309 full = filldir(buf, (char *)dd->name, be16_to_cpu(dd->namelen),
310 pos, be64_to_cpu(dd->ino), dd->type); 310 pos, be64_to_cpu(dd->ino), dd->type);
311 kunmap_atomic(dd, KM_USER0); 311 kunmap(page);
312 page_cache_release(page); 312 page_cache_release(page);
313 if (full) 313 if (full)
314 break; 314 break;
diff --git a/fs/logfs/journal.c b/fs/logfs/journal.c
index f186043e862a..33bd260b8309 100644
--- a/fs/logfs/journal.c
+++ b/fs/logfs/journal.c
@@ -801,6 +801,7 @@ void do_logfs_journal_wl_pass(struct super_block *sb)
801{ 801{
802 struct logfs_super *super = logfs_super(sb); 802 struct logfs_super *super = logfs_super(sb);
803 struct logfs_area *area = super->s_journal_area; 803 struct logfs_area *area = super->s_journal_area;
804 struct btree_head32 *head = &super->s_reserved_segments;
804 u32 segno, ec; 805 u32 segno, ec;
805 int i, err; 806 int i, err;
806 807
@@ -808,6 +809,7 @@ void do_logfs_journal_wl_pass(struct super_block *sb)
808 /* Drop old segments */ 809 /* Drop old segments */
809 journal_for_each(i) 810 journal_for_each(i)
810 if (super->s_journal_seg[i]) { 811 if (super->s_journal_seg[i]) {
812 btree_remove32(head, super->s_journal_seg[i]);
811 logfs_set_segment_unreserved(sb, 813 logfs_set_segment_unreserved(sb,
812 super->s_journal_seg[i], 814 super->s_journal_seg[i],
813 super->s_journal_ec[i]); 815 super->s_journal_ec[i]);
@@ -820,8 +822,13 @@ void do_logfs_journal_wl_pass(struct super_block *sb)
820 super->s_journal_seg[i] = segno; 822 super->s_journal_seg[i] = segno;
821 super->s_journal_ec[i] = ec; 823 super->s_journal_ec[i] = ec;
822 logfs_set_segment_reserved(sb, segno); 824 logfs_set_segment_reserved(sb, segno);
825 err = btree_insert32(head, segno, (void *)1, GFP_KERNEL);
826 BUG_ON(err); /* mempool should prevent this */
827 err = logfs_erase_segment(sb, segno, 1);
828 BUG_ON(err); /* FIXME: remount-ro would be nicer */
823 } 829 }
824 /* Manually move journal_area */ 830 /* Manually move journal_area */
831 freeseg(sb, area->a_segno);
825 area->a_segno = super->s_journal_seg[0]; 832 area->a_segno = super->s_journal_seg[0];
826 area->a_is_open = 0; 833 area->a_is_open = 0;
827 area->a_used_bytes = 0; 834 area->a_used_bytes = 0;
diff --git a/fs/logfs/logfs.h b/fs/logfs/logfs.h
index 129779431373..b84b0eec6024 100644
--- a/fs/logfs/logfs.h
+++ b/fs/logfs/logfs.h
@@ -587,6 +587,7 @@ void move_page_to_btree(struct page *page);
587int logfs_init_mapping(struct super_block *sb); 587int logfs_init_mapping(struct super_block *sb);
588void logfs_sync_area(struct logfs_area *area); 588void logfs_sync_area(struct logfs_area *area);
589void logfs_sync_segments(struct super_block *sb); 589void logfs_sync_segments(struct super_block *sb);
590void freeseg(struct super_block *sb, u32 segno);
590 591
591/* area handling */ 592/* area handling */
592int logfs_init_areas(struct super_block *sb); 593int logfs_init_areas(struct super_block *sb);
diff --git a/fs/logfs/readwrite.c b/fs/logfs/readwrite.c
index d5919af2c7a7..bff40253dfb2 100644
--- a/fs/logfs/readwrite.c
+++ b/fs/logfs/readwrite.c
@@ -1595,7 +1595,6 @@ int logfs_delete(struct inode *inode, pgoff_t index,
1595 return ret; 1595 return ret;
1596} 1596}
1597 1597
1598/* Rewrite cannot mark the inode dirty but has to write it immediatly. */
1599int logfs_rewrite_block(struct inode *inode, u64 bix, u64 ofs, 1598int logfs_rewrite_block(struct inode *inode, u64 bix, u64 ofs,
1600 gc_level_t gc_level, long flags) 1599 gc_level_t gc_level, long flags)
1601{ 1600{
@@ -1612,6 +1611,18 @@ int logfs_rewrite_block(struct inode *inode, u64 bix, u64 ofs,
1612 if (level != 0) 1611 if (level != 0)
1613 alloc_indirect_block(inode, page, 0); 1612 alloc_indirect_block(inode, page, 0);
1614 err = logfs_write_buf(inode, page, flags); 1613 err = logfs_write_buf(inode, page, flags);
1614 if (!err && shrink_level(gc_level) == 0) {
1615 /* Rewrite cannot mark the inode dirty but has to
1616 * write it immediatly.
1617 * Q: Can't we just create an alias for the inode
1618 * instead? And if not, why not?
1619 */
1620 if (inode->i_ino == LOGFS_INO_MASTER)
1621 logfs_write_anchor(inode->i_sb);
1622 else {
1623 err = __logfs_write_inode(inode, flags);
1624 }
1625 }
1615 } 1626 }
1616 logfs_put_write_page(page); 1627 logfs_put_write_page(page);
1617 return err; 1628 return err;
diff --git a/fs/logfs/segment.c b/fs/logfs/segment.c
index 614d7a6fda2d..801a3a141625 100644
--- a/fs/logfs/segment.c
+++ b/fs/logfs/segment.c
@@ -94,50 +94,58 @@ void __logfs_buf_write(struct logfs_area *area, u64 ofs, void *buf, size_t len,
94 } while (len); 94 } while (len);
95} 95}
96 96
97/* 97static void pad_partial_page(struct logfs_area *area)
98 * bdev_writeseg will write full pages. Memset the tail to prevent data leaks.
99 */
100static void pad_wbuf(struct logfs_area *area, int final)
101{ 98{
102 struct super_block *sb = area->a_sb; 99 struct super_block *sb = area->a_sb;
103 struct logfs_super *super = logfs_super(sb);
104 struct page *page; 100 struct page *page;
105 u64 ofs = dev_ofs(sb, area->a_segno, area->a_used_bytes); 101 u64 ofs = dev_ofs(sb, area->a_segno, area->a_used_bytes);
106 pgoff_t index = ofs >> PAGE_SHIFT; 102 pgoff_t index = ofs >> PAGE_SHIFT;
107 long offset = ofs & (PAGE_SIZE-1); 103 long offset = ofs & (PAGE_SIZE-1);
108 u32 len = PAGE_SIZE - offset; 104 u32 len = PAGE_SIZE - offset;
109 105
110 if (len == PAGE_SIZE) { 106 if (len % PAGE_SIZE) {
111 /* The math in this function can surely use some love */ 107 page = get_mapping_page(sb, index, 0);
112 len = 0;
113 }
114 if (len) {
115 BUG_ON(area->a_used_bytes >= super->s_segsize);
116
117 page = get_mapping_page(area->a_sb, index, 0);
118 BUG_ON(!page); /* FIXME: reserve a pool */ 108 BUG_ON(!page); /* FIXME: reserve a pool */
119 memset(page_address(page) + offset, 0xff, len); 109 memset(page_address(page) + offset, 0xff, len);
120 SetPagePrivate(page); 110 SetPagePrivate(page);
121 page_cache_release(page); 111 page_cache_release(page);
122 } 112 }
113}
123 114
124 if (!final) 115static void pad_full_pages(struct logfs_area *area)
125 return; 116{
117 struct super_block *sb = area->a_sb;
118 struct logfs_super *super = logfs_super(sb);
119 u64 ofs = dev_ofs(sb, area->a_segno, area->a_used_bytes);
120 u32 len = super->s_segsize - area->a_used_bytes;
121 pgoff_t index = PAGE_CACHE_ALIGN(ofs) >> PAGE_CACHE_SHIFT;
122 pgoff_t no_indizes = len >> PAGE_CACHE_SHIFT;
123 struct page *page;
126 124
127 area->a_used_bytes += len; 125 while (no_indizes) {
128 for ( ; area->a_used_bytes < super->s_segsize; 126 page = get_mapping_page(sb, index, 0);
129 area->a_used_bytes += PAGE_SIZE) {
130 /* Memset another page */
131 index++;
132 page = get_mapping_page(area->a_sb, index, 0);
133 BUG_ON(!page); /* FIXME: reserve a pool */ 127 BUG_ON(!page); /* FIXME: reserve a pool */
134 memset(page_address(page), 0xff, PAGE_SIZE); 128 SetPageUptodate(page);
129 memset(page_address(page), 0xff, PAGE_CACHE_SIZE);
135 SetPagePrivate(page); 130 SetPagePrivate(page);
136 page_cache_release(page); 131 page_cache_release(page);
132 index++;
133 no_indizes--;
137 } 134 }
138} 135}
139 136
140/* 137/*
138 * bdev_writeseg will write full pages. Memset the tail to prevent data leaks.
139 * Also make sure we allocate (and memset) all pages for final writeout.
140 */
141static void pad_wbuf(struct logfs_area *area, int final)
142{
143 pad_partial_page(area);
144 if (final)
145 pad_full_pages(area);
146}
147
148/*
141 * We have to be careful with the alias tree. Since lookup is done by bix, 149 * We have to be careful with the alias tree. Since lookup is done by bix,
142 * it needs to be normalized, so 14, 15, 16, etc. all match when dealing with 150 * it needs to be normalized, so 14, 15, 16, etc. all match when dealing with
143 * indirect blocks. So always use it through accessor functions. 151 * indirect blocks. So always use it through accessor functions.
@@ -684,7 +692,7 @@ int logfs_segment_delete(struct inode *inode, struct logfs_shadow *shadow)
684 return 0; 692 return 0;
685} 693}
686 694
687static void freeseg(struct super_block *sb, u32 segno) 695void freeseg(struct super_block *sb, u32 segno)
688{ 696{
689 struct logfs_super *super = logfs_super(sb); 697 struct logfs_super *super = logfs_super(sb);
690 struct address_space *mapping = super->s_mapping_inode->i_mapping; 698 struct address_space *mapping = super->s_mapping_inode->i_mapping;
diff --git a/fs/logfs/super.c b/fs/logfs/super.c
index 46990eafe052..b60bfac3263c 100644
--- a/fs/logfs/super.c
+++ b/fs/logfs/super.c
@@ -278,7 +278,7 @@ static int logfs_recover_sb(struct super_block *sb)
278 } 278 }
279 if (valid0 && valid1 && ds_cmp(ds0, ds1)) { 279 if (valid0 && valid1 && ds_cmp(ds0, ds1)) {
280 printk(KERN_INFO"Superblocks don't match - fixing.\n"); 280 printk(KERN_INFO"Superblocks don't match - fixing.\n");
281 return write_one_sb(sb, super->s_devops->find_last_sb); 281 return logfs_write_sb(sb);
282 } 282 }
283 /* If neither is valid now, something's wrong. Didn't we properly 283 /* If neither is valid now, something's wrong. Didn't we properly
284 * check them before?!? */ 284 * check them before?!? */
@@ -290,6 +290,10 @@ static int logfs_make_writeable(struct super_block *sb)
290{ 290{
291 int err; 291 int err;
292 292
293 err = logfs_open_segfile(sb);
294 if (err)
295 return err;
296
293 /* Repair any broken superblock copies */ 297 /* Repair any broken superblock copies */
294 err = logfs_recover_sb(sb); 298 err = logfs_recover_sb(sb);
295 if (err) 299 if (err)
@@ -300,10 +304,6 @@ static int logfs_make_writeable(struct super_block *sb)
300 if (err) 304 if (err)
301 return err; 305 return err;
302 306
303 err = logfs_open_segfile(sb);
304 if (err)
305 return err;
306
307 /* Do one GC pass before any data gets dirtied */ 307 /* Do one GC pass before any data gets dirtied */
308 logfs_gc_pass(sb); 308 logfs_gc_pass(sb);
309 309
@@ -329,7 +329,7 @@ static int logfs_get_sb_final(struct super_block *sb, struct vfsmount *mnt)
329 329
330 sb->s_root = d_alloc_root(rootdir); 330 sb->s_root = d_alloc_root(rootdir);
331 if (!sb->s_root) 331 if (!sb->s_root)
332 goto fail; 332 goto fail2;
333 333
334 super->s_erase_page = alloc_pages(GFP_KERNEL, 0); 334 super->s_erase_page = alloc_pages(GFP_KERNEL, 0);
335 if (!super->s_erase_page) 335 if (!super->s_erase_page)
@@ -573,8 +573,7 @@ int logfs_get_sb_device(struct file_system_type *type, int flags,
573 return 0; 573 return 0;
574 574
575err1: 575err1:
576 up_write(&sb->s_umount); 576 deactivate_locked_super(sb);
577 deactivate_super(sb);
578 return err; 577 return err;
579err0: 578err0:
580 kfree(super); 579 kfree(super);
diff --git a/fs/proc/base.c b/fs/proc/base.c
index 9e82adc37b0c..7621db800a74 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -443,12 +443,13 @@ static const struct file_operations proc_lstats_operations = {
443unsigned long badness(struct task_struct *p, unsigned long uptime); 443unsigned long badness(struct task_struct *p, unsigned long uptime);
444static int proc_oom_score(struct task_struct *task, char *buffer) 444static int proc_oom_score(struct task_struct *task, char *buffer)
445{ 445{
446 unsigned long points; 446 unsigned long points = 0;
447 struct timespec uptime; 447 struct timespec uptime;
448 448
449 do_posix_clock_monotonic_gettime(&uptime); 449 do_posix_clock_monotonic_gettime(&uptime);
450 read_lock(&tasklist_lock); 450 read_lock(&tasklist_lock);
451 points = badness(task->group_leader, uptime.tv_sec); 451 if (pid_alive(task))
452 points = badness(task, uptime.tv_sec);
452 read_unlock(&tasklist_lock); 453 read_unlock(&tasklist_lock);
453 return sprintf(buffer, "%lu\n", points); 454 return sprintf(buffer, "%lu\n", points);
454} 455}
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index 2d45889931f6..caf0337dff73 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -407,6 +407,7 @@ static int show_smap(struct seq_file *m, void *v)
407 407
408 memset(&mss, 0, sizeof mss); 408 memset(&mss, 0, sizeof mss);
409 mss.vma = vma; 409 mss.vma = vma;
410 /* mmap_sem is held in m_start */
410 if (vma->vm_mm && !is_vm_hugetlb_page(vma)) 411 if (vma->vm_mm && !is_vm_hugetlb_page(vma))
411 walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk); 412 walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
412 413
@@ -553,7 +554,8 @@ const struct file_operations proc_clear_refs_operations = {
553}; 554};
554 555
555struct pagemapread { 556struct pagemapread {
556 u64 __user *out, *end; 557 int pos, len;
558 u64 *buffer;
557}; 559};
558 560
559#define PM_ENTRY_BYTES sizeof(u64) 561#define PM_ENTRY_BYTES sizeof(u64)
@@ -576,10 +578,8 @@ struct pagemapread {
576static int add_to_pagemap(unsigned long addr, u64 pfn, 578static int add_to_pagemap(unsigned long addr, u64 pfn,
577 struct pagemapread *pm) 579 struct pagemapread *pm)
578{ 580{
579 if (put_user(pfn, pm->out)) 581 pm->buffer[pm->pos++] = pfn;
580 return -EFAULT; 582 if (pm->pos >= pm->len)
581 pm->out++;
582 if (pm->out >= pm->end)
583 return PM_END_OF_BUFFER; 583 return PM_END_OF_BUFFER;
584 return 0; 584 return 0;
585} 585}
@@ -721,21 +721,20 @@ static int pagemap_hugetlb_range(pte_t *pte, unsigned long addr,
721 * determine which areas of memory are actually mapped and llseek to 721 * determine which areas of memory are actually mapped and llseek to
722 * skip over unmapped regions. 722 * skip over unmapped regions.
723 */ 723 */
724#define PAGEMAP_WALK_SIZE (PMD_SIZE)
724static ssize_t pagemap_read(struct file *file, char __user *buf, 725static ssize_t pagemap_read(struct file *file, char __user *buf,
725 size_t count, loff_t *ppos) 726 size_t count, loff_t *ppos)
726{ 727{
727 struct task_struct *task = get_proc_task(file->f_path.dentry->d_inode); 728 struct task_struct *task = get_proc_task(file->f_path.dentry->d_inode);
728 struct page **pages, *page;
729 unsigned long uaddr, uend;
730 struct mm_struct *mm; 729 struct mm_struct *mm;
731 struct pagemapread pm; 730 struct pagemapread pm;
732 int pagecount;
733 int ret = -ESRCH; 731 int ret = -ESRCH;
734 struct mm_walk pagemap_walk = {}; 732 struct mm_walk pagemap_walk = {};
735 unsigned long src; 733 unsigned long src;
736 unsigned long svpfn; 734 unsigned long svpfn;
737 unsigned long start_vaddr; 735 unsigned long start_vaddr;
738 unsigned long end_vaddr; 736 unsigned long end_vaddr;
737 int copied = 0;
739 738
740 if (!task) 739 if (!task)
741 goto out; 740 goto out;
@@ -758,35 +757,12 @@ static ssize_t pagemap_read(struct file *file, char __user *buf,
758 if (!mm) 757 if (!mm)
759 goto out_task; 758 goto out_task;
760 759
761 760 pm.len = PM_ENTRY_BYTES * (PAGEMAP_WALK_SIZE >> PAGE_SHIFT);
762 uaddr = (unsigned long)buf & PAGE_MASK; 761 pm.buffer = kmalloc(pm.len, GFP_TEMPORARY);
763 uend = (unsigned long)(buf + count);
764 pagecount = (PAGE_ALIGN(uend) - uaddr) / PAGE_SIZE;
765 ret = 0;
766 if (pagecount == 0)
767 goto out_mm;
768 pages = kcalloc(pagecount, sizeof(struct page *), GFP_KERNEL);
769 ret = -ENOMEM; 762 ret = -ENOMEM;
770 if (!pages) 763 if (!pm.buffer)
771 goto out_mm; 764 goto out_mm;
772 765
773 down_read(&current->mm->mmap_sem);
774 ret = get_user_pages(current, current->mm, uaddr, pagecount,
775 1, 0, pages, NULL);
776 up_read(&current->mm->mmap_sem);
777
778 if (ret < 0)
779 goto out_free;
780
781 if (ret != pagecount) {
782 pagecount = ret;
783 ret = -EFAULT;
784 goto out_pages;
785 }
786
787 pm.out = (u64 __user *)buf;
788 pm.end = (u64 __user *)(buf + count);
789
790 pagemap_walk.pmd_entry = pagemap_pte_range; 766 pagemap_walk.pmd_entry = pagemap_pte_range;
791 pagemap_walk.pte_hole = pagemap_pte_hole; 767 pagemap_walk.pte_hole = pagemap_pte_hole;
792 pagemap_walk.hugetlb_entry = pagemap_hugetlb_range; 768 pagemap_walk.hugetlb_entry = pagemap_hugetlb_range;
@@ -808,23 +784,36 @@ static ssize_t pagemap_read(struct file *file, char __user *buf,
808 * user buffer is tracked in "pm", and the walk 784 * user buffer is tracked in "pm", and the walk
809 * will stop when we hit the end of the buffer. 785 * will stop when we hit the end of the buffer.
810 */ 786 */
811 ret = walk_page_range(start_vaddr, end_vaddr, &pagemap_walk); 787 ret = 0;
812 if (ret == PM_END_OF_BUFFER) 788 while (count && (start_vaddr < end_vaddr)) {
813 ret = 0; 789 int len;
814 /* don't need mmap_sem for these, but this looks cleaner */ 790 unsigned long end;
815 *ppos += (char __user *)pm.out - buf; 791
816 if (!ret) 792 pm.pos = 0;
817 ret = (char __user *)pm.out - buf; 793 end = start_vaddr + PAGEMAP_WALK_SIZE;
818 794 /* overflow ? */
819out_pages: 795 if (end < start_vaddr || end > end_vaddr)
820 for (; pagecount; pagecount--) { 796 end = end_vaddr;
821 page = pages[pagecount-1]; 797 down_read(&mm->mmap_sem);
822 if (!PageReserved(page)) 798 ret = walk_page_range(start_vaddr, end, &pagemap_walk);
823 SetPageDirty(page); 799 up_read(&mm->mmap_sem);
824 page_cache_release(page); 800 start_vaddr = end;
801
802 len = min(count, PM_ENTRY_BYTES * pm.pos);
803 if (copy_to_user(buf, pm.buffer, len) < 0) {
804 ret = -EFAULT;
805 goto out_free;
806 }
807 copied += len;
808 buf += len;
809 count -= len;
825 } 810 }
811 *ppos += copied;
812 if (!ret || ret == PM_END_OF_BUFFER)
813 ret = copied;
814
826out_free: 815out_free:
827 kfree(pages); 816 kfree(pm.buffer);
828out_mm: 817out_mm:
829 mmput(mm); 818 mmput(mm);
830out_task: 819out_task:
diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c
index d8fd90d83ab3..59125fb36d42 100644
--- a/fs/reiserfs/super.c
+++ b/fs/reiserfs/super.c
@@ -1619,10 +1619,8 @@ static int reiserfs_fill_super(struct super_block *s, void *data, int silent)
1619 save_mount_options(s, data); 1619 save_mount_options(s, data);
1620 1620
1621 sbi = kzalloc(sizeof(struct reiserfs_sb_info), GFP_KERNEL); 1621 sbi = kzalloc(sizeof(struct reiserfs_sb_info), GFP_KERNEL);
1622 if (!sbi) { 1622 if (!sbi)
1623 errval = -ENOMEM; 1623 return -ENOMEM;
1624 goto error_alloc;
1625 }
1626 s->s_fs_info = sbi; 1624 s->s_fs_info = sbi;
1627 /* Set default values for options: non-aggressive tails, RO on errors */ 1625 /* Set default values for options: non-aggressive tails, RO on errors */
1628 REISERFS_SB(s)->s_mount_opt |= (1 << REISERFS_SMALLTAIL); 1626 REISERFS_SB(s)->s_mount_opt |= (1 << REISERFS_SMALLTAIL);
@@ -1879,12 +1877,12 @@ static int reiserfs_fill_super(struct super_block *s, void *data, int silent)
1879 return (0); 1877 return (0);
1880 1878
1881error: 1879error:
1882 reiserfs_write_unlock(s);
1883error_alloc:
1884 if (jinit_done) { /* kill the commit thread, free journal ram */ 1880 if (jinit_done) { /* kill the commit thread, free journal ram */
1885 journal_release_error(NULL, s); 1881 journal_release_error(NULL, s);
1886 } 1882 }
1887 1883
1884 reiserfs_write_unlock(s);
1885
1888 reiserfs_free_bitmap_cache(s); 1886 reiserfs_free_bitmap_cache(s);
1889 if (SB_BUFFER_WITH_SB(s)) 1887 if (SB_BUFFER_WITH_SB(s))
1890 brelse(SB_BUFFER_WITH_SB(s)); 1888 brelse(SB_BUFFER_WITH_SB(s));
diff --git a/include/drm/drmP.h b/include/drm/drmP.h
index cca2845bca55..2f3b3a00b7a3 100644
--- a/include/drm/drmP.h
+++ b/include/drm/drmP.h
@@ -1546,39 +1546,7 @@ static __inline__ void drm_core_dropmap(struct drm_local_map *map)
1546{ 1546{
1547} 1547}
1548 1548
1549 1549#include "drm_mem_util.h"
1550static __inline__ void *drm_calloc_large(size_t nmemb, size_t size)
1551{
1552 if (size != 0 && nmemb > ULONG_MAX / size)
1553 return NULL;
1554
1555 if (size * nmemb <= PAGE_SIZE)
1556 return kcalloc(nmemb, size, GFP_KERNEL);
1557
1558 return __vmalloc(size * nmemb,
1559 GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL);
1560}
1561
1562/* Modeled after cairo's malloc_ab, it's like calloc but without the zeroing. */
1563static __inline__ void *drm_malloc_ab(size_t nmemb, size_t size)
1564{
1565 if (size != 0 && nmemb > ULONG_MAX / size)
1566 return NULL;
1567
1568 if (size * nmemb <= PAGE_SIZE)
1569 return kmalloc(nmemb * size, GFP_KERNEL);
1570
1571 return __vmalloc(size * nmemb,
1572 GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL);
1573}
1574
1575static __inline void drm_free_large(void *ptr)
1576{
1577 if (!is_vmalloc_addr(ptr))
1578 return kfree(ptr);
1579
1580 vfree(ptr);
1581}
1582/*@}*/ 1550/*@}*/
1583 1551
1584#endif /* __KERNEL__ */ 1552#endif /* __KERNEL__ */
diff --git a/include/drm/drm_mem_util.h b/include/drm/drm_mem_util.h
new file mode 100644
index 000000000000..6bd325fedc87
--- /dev/null
+++ b/include/drm/drm_mem_util.h
@@ -0,0 +1,65 @@
1/*
2 * Copyright © 2008 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Jesse Barnes <jbarnes@virtuousgeek.org>
25 *
26 */
27#ifndef _DRM_MEM_UTIL_H_
28#define _DRM_MEM_UTIL_H_
29
30#include <linux/vmalloc.h>
31
32static __inline__ void *drm_calloc_large(size_t nmemb, size_t size)
33{
34 if (size != 0 && nmemb > ULONG_MAX / size)
35 return NULL;
36
37 if (size * nmemb <= PAGE_SIZE)
38 return kcalloc(nmemb, size, GFP_KERNEL);
39
40 return __vmalloc(size * nmemb,
41 GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL);
42}
43
44/* Modeled after cairo's malloc_ab, it's like calloc but without the zeroing. */
45static __inline__ void *drm_malloc_ab(size_t nmemb, size_t size)
46{
47 if (size != 0 && nmemb > ULONG_MAX / size)
48 return NULL;
49
50 if (size * nmemb <= PAGE_SIZE)
51 return kmalloc(nmemb * size, GFP_KERNEL);
52
53 return __vmalloc(size * nmemb,
54 GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL);
55}
56
57static __inline void drm_free_large(void *ptr)
58{
59 if (!is_vmalloc_addr(ptr))
60 return kfree(ptr);
61
62 vfree(ptr);
63}
64
65#endif
diff --git a/include/drm/drm_pciids.h b/include/drm/drm_pciids.h
index 676104b7818c..04a6ebc27b96 100644
--- a/include/drm/drm_pciids.h
+++ b/include/drm/drm_pciids.h
@@ -410,6 +410,7 @@
410 {0x1002, 0x9712, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS880|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ 410 {0x1002, 0x9712, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS880|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
411 {0x1002, 0x9713, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS880|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ 411 {0x1002, 0x9713, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS880|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
412 {0x1002, 0x9714, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS880|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ 412 {0x1002, 0x9714, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS880|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
413 {0x1002, 0x9715, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS880|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
413 {0, 0, 0} 414 {0, 0, 0}
414 415
415#define r128_PCI_IDS \ 416#define r128_PCI_IDS \
diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h
index e3f1b4a4b601..e929c27ede22 100644
--- a/include/drm/ttm/ttm_bo_driver.h
+++ b/include/drm/ttm/ttm_bo_driver.h
@@ -115,7 +115,6 @@ struct ttm_backend {
115 struct ttm_backend_func *func; 115 struct ttm_backend_func *func;
116}; 116};
117 117
118#define TTM_PAGE_FLAG_VMALLOC (1 << 0)
119#define TTM_PAGE_FLAG_USER (1 << 1) 118#define TTM_PAGE_FLAG_USER (1 << 1)
120#define TTM_PAGE_FLAG_USER_DIRTY (1 << 2) 119#define TTM_PAGE_FLAG_USER_DIRTY (1 << 2)
121#define TTM_PAGE_FLAG_WRITE (1 << 3) 120#define TTM_PAGE_FLAG_WRITE (1 << 3)
diff --git a/include/linux/amba/bus.h b/include/linux/amba/bus.h
index 6816be6c3f77..8b1038607831 100644
--- a/include/linux/amba/bus.h
+++ b/include/linux/amba/bus.h
@@ -14,6 +14,9 @@
14#ifndef ASMARM_AMBA_H 14#ifndef ASMARM_AMBA_H
15#define ASMARM_AMBA_H 15#define ASMARM_AMBA_H
16 16
17#include <linux/device.h>
18#include <linux/resource.h>
19
17#define AMBA_NR_IRQS 2 20#define AMBA_NR_IRQS 2
18 21
19struct amba_device { 22struct amba_device {
diff --git a/include/linux/amba/pl061.h b/include/linux/amba/pl061.h
index b4fbd9862606..5ddd9ad4b19c 100644
--- a/include/linux/amba/pl061.h
+++ b/include/linux/amba/pl061.h
@@ -1,3 +1,5 @@
1#include <linux/types.h>
2
1/* platform data for the PL061 GPIO driver */ 3/* platform data for the PL061 GPIO driver */
2 4
3struct pl061_platform_data { 5struct pl061_platform_data {
diff --git a/include/linux/freezer.h b/include/linux/freezer.h
index 5a361f85cfec..da7e52b099f3 100644
--- a/include/linux/freezer.h
+++ b/include/linux/freezer.h
@@ -64,9 +64,12 @@ extern bool freeze_task(struct task_struct *p, bool sig_only);
64extern void cancel_freezing(struct task_struct *p); 64extern void cancel_freezing(struct task_struct *p);
65 65
66#ifdef CONFIG_CGROUP_FREEZER 66#ifdef CONFIG_CGROUP_FREEZER
67extern int cgroup_frozen(struct task_struct *task); 67extern int cgroup_freezing_or_frozen(struct task_struct *task);
68#else /* !CONFIG_CGROUP_FREEZER */ 68#else /* !CONFIG_CGROUP_FREEZER */
69static inline int cgroup_frozen(struct task_struct *task) { return 0; } 69static inline int cgroup_freezing_or_frozen(struct task_struct *task)
70{
71 return 0;
72}
70#endif /* !CONFIG_CGROUP_FREEZER */ 73#endif /* !CONFIG_CGROUP_FREEZER */
71 74
72/* 75/*
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 95477038a72a..c8e375440403 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -842,13 +842,6 @@ extern atomic_t perf_swevent_enabled[PERF_COUNT_SW_MAX];
842 842
843extern void __perf_sw_event(u32, u64, int, struct pt_regs *, u64); 843extern void __perf_sw_event(u32, u64, int, struct pt_regs *, u64);
844 844
845static inline void
846perf_sw_event(u32 event_id, u64 nr, int nmi, struct pt_regs *regs, u64 addr)
847{
848 if (atomic_read(&perf_swevent_enabled[event_id]))
849 __perf_sw_event(event_id, nr, nmi, regs, addr);
850}
851
852extern void 845extern void
853perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip, int skip); 846perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip, int skip);
854 847
@@ -887,6 +880,20 @@ static inline void perf_fetch_caller_regs(struct pt_regs *regs, int skip)
887 return perf_arch_fetch_caller_regs(regs, ip, skip); 880 return perf_arch_fetch_caller_regs(regs, ip, skip);
888} 881}
889 882
883static inline void
884perf_sw_event(u32 event_id, u64 nr, int nmi, struct pt_regs *regs, u64 addr)
885{
886 if (atomic_read(&perf_swevent_enabled[event_id])) {
887 struct pt_regs hot_regs;
888
889 if (!regs) {
890 perf_fetch_caller_regs(&hot_regs, 1);
891 regs = &hot_regs;
892 }
893 __perf_sw_event(event_id, nr, nmi, regs, addr);
894 }
895}
896
890extern void __perf_event_mmap(struct vm_area_struct *vma); 897extern void __perf_event_mmap(struct vm_area_struct *vma);
891 898
892static inline void perf_event_mmap(struct vm_area_struct *vma) 899static inline void perf_event_mmap(struct vm_area_struct *vma)
diff --git a/kernel/cgroup_freezer.c b/kernel/cgroup_freezer.c
index d2ccd2798d7a..da5e13975531 100644
--- a/kernel/cgroup_freezer.c
+++ b/kernel/cgroup_freezer.c
@@ -48,17 +48,20 @@ static inline struct freezer *task_freezer(struct task_struct *task)
48 struct freezer, css); 48 struct freezer, css);
49} 49}
50 50
51int cgroup_frozen(struct task_struct *task) 51int cgroup_freezing_or_frozen(struct task_struct *task)
52{ 52{
53 struct freezer *freezer; 53 struct freezer *freezer;
54 enum freezer_state state; 54 enum freezer_state state;
55 55
56 task_lock(task); 56 task_lock(task);
57 freezer = task_freezer(task); 57 freezer = task_freezer(task);
58 state = freezer->state; 58 if (!freezer->css.cgroup->parent)
59 state = CGROUP_THAWED; /* root cgroup can't be frozen */
60 else
61 state = freezer->state;
59 task_unlock(task); 62 task_unlock(task);
60 63
61 return state == CGROUP_FROZEN; 64 return (state == CGROUP_FREEZING) || (state == CGROUP_FROZEN);
62} 65}
63 66
64/* 67/*
diff --git a/kernel/cred.c b/kernel/cred.c
index d84bdef17c9f..e1dbe9eef800 100644
--- a/kernel/cred.c
+++ b/kernel/cred.c
@@ -365,7 +365,7 @@ struct cred *prepare_usermodehelper_creds(void)
365 365
366 new = kmem_cache_alloc(cred_jar, GFP_ATOMIC); 366 new = kmem_cache_alloc(cred_jar, GFP_ATOMIC);
367 if (!new) 367 if (!new)
368 return NULL; 368 goto free_tgcred;
369 369
370 kdebug("prepare_usermodehelper_creds() alloc %p", new); 370 kdebug("prepare_usermodehelper_creds() alloc %p", new);
371 371
@@ -398,6 +398,10 @@ struct cred *prepare_usermodehelper_creds(void)
398 398
399error: 399error:
400 put_cred(new); 400 put_cred(new);
401free_tgcred:
402#ifdef CONFIG_KEYS
403 kfree(tgcred);
404#endif
401 return NULL; 405 return NULL;
402} 406}
403 407
diff --git a/kernel/early_res.c b/kernel/early_res.c
index 3cb2c661bb78..31aa9332ef3f 100644
--- a/kernel/early_res.c
+++ b/kernel/early_res.c
@@ -333,6 +333,12 @@ void __init free_early_partial(u64 start, u64 end)
333 struct early_res *r; 333 struct early_res *r;
334 int i; 334 int i;
335 335
336 if (start == end)
337 return;
338
339 if (WARN_ONCE(start > end, " wrong range [%#llx, %#llx]\n", start, end))
340 return;
341
336try_next: 342try_next:
337 i = find_overlapped_early(start, end); 343 i = find_overlapped_early(start, end);
338 if (i >= max_early_res) 344 if (i >= max_early_res)
diff --git a/kernel/kgdb.c b/kernel/kgdb.c
index 761fdd2b3034..11f3515ca83f 100644
--- a/kernel/kgdb.c
+++ b/kernel/kgdb.c
@@ -69,9 +69,16 @@ struct kgdb_state {
69 struct pt_regs *linux_regs; 69 struct pt_regs *linux_regs;
70}; 70};
71 71
72/* Exception state values */
73#define DCPU_WANT_MASTER 0x1 /* Waiting to become a master kgdb cpu */
74#define DCPU_NEXT_MASTER 0x2 /* Transition from one master cpu to another */
75#define DCPU_IS_SLAVE 0x4 /* Slave cpu enter exception */
76#define DCPU_SSTEP 0x8 /* CPU is single stepping */
77
72static struct debuggerinfo_struct { 78static struct debuggerinfo_struct {
73 void *debuggerinfo; 79 void *debuggerinfo;
74 struct task_struct *task; 80 struct task_struct *task;
81 int exception_state;
75} kgdb_info[NR_CPUS]; 82} kgdb_info[NR_CPUS];
76 83
77/** 84/**
@@ -391,27 +398,22 @@ int kgdb_mem2hex(char *mem, char *buf, int count)
391 398
392/* 399/*
393 * Copy the binary array pointed to by buf into mem. Fix $, #, and 400 * Copy the binary array pointed to by buf into mem. Fix $, #, and
394 * 0x7d escaped with 0x7d. Return a pointer to the character after 401 * 0x7d escaped with 0x7d. Return -EFAULT on failure or 0 on success.
395 * the last byte written. 402 * The input buf is overwitten with the result to write to mem.
396 */ 403 */
397static int kgdb_ebin2mem(char *buf, char *mem, int count) 404static int kgdb_ebin2mem(char *buf, char *mem, int count)
398{ 405{
399 int err = 0; 406 int size = 0;
400 char c; 407 char *c = buf;
401 408
402 while (count-- > 0) { 409 while (count-- > 0) {
403 c = *buf++; 410 c[size] = *buf++;
404 if (c == 0x7d) 411 if (c[size] == 0x7d)
405 c = *buf++ ^ 0x20; 412 c[size] = *buf++ ^ 0x20;
406 413 size++;
407 err = probe_kernel_write(mem, &c, 1);
408 if (err)
409 break;
410
411 mem++;
412 } 414 }
413 415
414 return err; 416 return probe_kernel_write(mem, c, size);
415} 417}
416 418
417/* 419/*
@@ -563,49 +565,6 @@ static struct task_struct *getthread(struct pt_regs *regs, int tid)
563} 565}
564 566
565/* 567/*
566 * CPU debug state control:
567 */
568
569#ifdef CONFIG_SMP
570static void kgdb_wait(struct pt_regs *regs)
571{
572 unsigned long flags;
573 int cpu;
574
575 local_irq_save(flags);
576 cpu = raw_smp_processor_id();
577 kgdb_info[cpu].debuggerinfo = regs;
578 kgdb_info[cpu].task = current;
579 /*
580 * Make sure the above info reaches the primary CPU before
581 * our cpu_in_kgdb[] flag setting does:
582 */
583 smp_wmb();
584 atomic_set(&cpu_in_kgdb[cpu], 1);
585
586 /* Disable any cpu specific hw breakpoints */
587 kgdb_disable_hw_debug(regs);
588
589 /* Wait till primary CPU is done with debugging */
590 while (atomic_read(&passive_cpu_wait[cpu]))
591 cpu_relax();
592
593 kgdb_info[cpu].debuggerinfo = NULL;
594 kgdb_info[cpu].task = NULL;
595
596 /* fix up hardware debug registers on local cpu */
597 if (arch_kgdb_ops.correct_hw_break)
598 arch_kgdb_ops.correct_hw_break();
599
600 /* Signal the primary CPU that we are done: */
601 atomic_set(&cpu_in_kgdb[cpu], 0);
602 touch_softlockup_watchdog_sync();
603 clocksource_touch_watchdog();
604 local_irq_restore(flags);
605}
606#endif
607
608/*
609 * Some architectures need cache flushes when we set/clear a 568 * Some architectures need cache flushes when we set/clear a
610 * breakpoint: 569 * breakpoint:
611 */ 570 */
@@ -1400,34 +1359,13 @@ static int kgdb_reenter_check(struct kgdb_state *ks)
1400 return 1; 1359 return 1;
1401} 1360}
1402 1361
1403/* 1362static int kgdb_cpu_enter(struct kgdb_state *ks, struct pt_regs *regs)
1404 * kgdb_handle_exception() - main entry point from a kernel exception
1405 *
1406 * Locking hierarchy:
1407 * interface locks, if any (begin_session)
1408 * kgdb lock (kgdb_active)
1409 */
1410int
1411kgdb_handle_exception(int evector, int signo, int ecode, struct pt_regs *regs)
1412{ 1363{
1413 struct kgdb_state kgdb_var;
1414 struct kgdb_state *ks = &kgdb_var;
1415 unsigned long flags; 1364 unsigned long flags;
1416 int sstep_tries = 100; 1365 int sstep_tries = 100;
1417 int error = 0; 1366 int error = 0;
1418 int i, cpu; 1367 int i, cpu;
1419 1368 int trace_on = 0;
1420 ks->cpu = raw_smp_processor_id();
1421 ks->ex_vector = evector;
1422 ks->signo = signo;
1423 ks->ex_vector = evector;
1424 ks->err_code = ecode;
1425 ks->kgdb_usethreadid = 0;
1426 ks->linux_regs = regs;
1427
1428 if (kgdb_reenter_check(ks))
1429 return 0; /* Ouch, double exception ! */
1430
1431acquirelock: 1369acquirelock:
1432 /* 1370 /*
1433 * Interrupts will be restored by the 'trap return' code, except when 1371 * Interrupts will be restored by the 'trap return' code, except when
@@ -1435,13 +1373,43 @@ acquirelock:
1435 */ 1373 */
1436 local_irq_save(flags); 1374 local_irq_save(flags);
1437 1375
1438 cpu = raw_smp_processor_id(); 1376 cpu = ks->cpu;
1377 kgdb_info[cpu].debuggerinfo = regs;
1378 kgdb_info[cpu].task = current;
1379 /*
1380 * Make sure the above info reaches the primary CPU before
1381 * our cpu_in_kgdb[] flag setting does:
1382 */
1383 atomic_inc(&cpu_in_kgdb[cpu]);
1439 1384
1440 /* 1385 /*
1441 * Acquire the kgdb_active lock: 1386 * CPU will loop if it is a slave or request to become a kgdb
1387 * master cpu and acquire the kgdb_active lock:
1442 */ 1388 */
1443 while (atomic_cmpxchg(&kgdb_active, -1, cpu) != -1) 1389 while (1) {
1390 if (kgdb_info[cpu].exception_state & DCPU_WANT_MASTER) {
1391 if (atomic_cmpxchg(&kgdb_active, -1, cpu) == cpu)
1392 break;
1393 } else if (kgdb_info[cpu].exception_state & DCPU_IS_SLAVE) {
1394 if (!atomic_read(&passive_cpu_wait[cpu]))
1395 goto return_normal;
1396 } else {
1397return_normal:
1398 /* Return to normal operation by executing any
1399 * hw breakpoint fixup.
1400 */
1401 if (arch_kgdb_ops.correct_hw_break)
1402 arch_kgdb_ops.correct_hw_break();
1403 if (trace_on)
1404 tracing_on();
1405 atomic_dec(&cpu_in_kgdb[cpu]);
1406 touch_softlockup_watchdog_sync();
1407 clocksource_touch_watchdog();
1408 local_irq_restore(flags);
1409 return 0;
1410 }
1444 cpu_relax(); 1411 cpu_relax();
1412 }
1445 1413
1446 /* 1414 /*
1447 * For single stepping, try to only enter on the processor 1415 * For single stepping, try to only enter on the processor
@@ -1475,9 +1443,6 @@ acquirelock:
1475 if (kgdb_io_ops->pre_exception) 1443 if (kgdb_io_ops->pre_exception)
1476 kgdb_io_ops->pre_exception(); 1444 kgdb_io_ops->pre_exception();
1477 1445
1478 kgdb_info[ks->cpu].debuggerinfo = ks->linux_regs;
1479 kgdb_info[ks->cpu].task = current;
1480
1481 kgdb_disable_hw_debug(ks->linux_regs); 1446 kgdb_disable_hw_debug(ks->linux_regs);
1482 1447
1483 /* 1448 /*
@@ -1486,15 +1451,9 @@ acquirelock:
1486 */ 1451 */
1487 if (!kgdb_single_step) { 1452 if (!kgdb_single_step) {
1488 for (i = 0; i < NR_CPUS; i++) 1453 for (i = 0; i < NR_CPUS; i++)
1489 atomic_set(&passive_cpu_wait[i], 1); 1454 atomic_inc(&passive_cpu_wait[i]);
1490 } 1455 }
1491 1456
1492 /*
1493 * spin_lock code is good enough as a barrier so we don't
1494 * need one here:
1495 */
1496 atomic_set(&cpu_in_kgdb[ks->cpu], 1);
1497
1498#ifdef CONFIG_SMP 1457#ifdef CONFIG_SMP
1499 /* Signal the other CPUs to enter kgdb_wait() */ 1458 /* Signal the other CPUs to enter kgdb_wait() */
1500 if ((!kgdb_single_step) && kgdb_do_roundup) 1459 if ((!kgdb_single_step) && kgdb_do_roundup)
@@ -1518,6 +1477,9 @@ acquirelock:
1518 kgdb_single_step = 0; 1477 kgdb_single_step = 0;
1519 kgdb_contthread = current; 1478 kgdb_contthread = current;
1520 exception_level = 0; 1479 exception_level = 0;
1480 trace_on = tracing_is_on();
1481 if (trace_on)
1482 tracing_off();
1521 1483
1522 /* Talk to debugger with gdbserial protocol */ 1484 /* Talk to debugger with gdbserial protocol */
1523 error = gdb_serial_stub(ks); 1485 error = gdb_serial_stub(ks);
@@ -1526,13 +1488,11 @@ acquirelock:
1526 if (kgdb_io_ops->post_exception) 1488 if (kgdb_io_ops->post_exception)
1527 kgdb_io_ops->post_exception(); 1489 kgdb_io_ops->post_exception();
1528 1490
1529 kgdb_info[ks->cpu].debuggerinfo = NULL; 1491 atomic_dec(&cpu_in_kgdb[ks->cpu]);
1530 kgdb_info[ks->cpu].task = NULL;
1531 atomic_set(&cpu_in_kgdb[ks->cpu], 0);
1532 1492
1533 if (!kgdb_single_step) { 1493 if (!kgdb_single_step) {
1534 for (i = NR_CPUS-1; i >= 0; i--) 1494 for (i = NR_CPUS-1; i >= 0; i--)
1535 atomic_set(&passive_cpu_wait[i], 0); 1495 atomic_dec(&passive_cpu_wait[i]);
1536 /* 1496 /*
1537 * Wait till all the CPUs have quit 1497 * Wait till all the CPUs have quit
1538 * from the debugger. 1498 * from the debugger.
@@ -1551,6 +1511,8 @@ kgdb_restore:
1551 else 1511 else
1552 kgdb_sstep_pid = 0; 1512 kgdb_sstep_pid = 0;
1553 } 1513 }
1514 if (trace_on)
1515 tracing_on();
1554 /* Free kgdb_active */ 1516 /* Free kgdb_active */
1555 atomic_set(&kgdb_active, -1); 1517 atomic_set(&kgdb_active, -1);
1556 touch_softlockup_watchdog_sync(); 1518 touch_softlockup_watchdog_sync();
@@ -1560,13 +1522,52 @@ kgdb_restore:
1560 return error; 1522 return error;
1561} 1523}
1562 1524
1525/*
1526 * kgdb_handle_exception() - main entry point from a kernel exception
1527 *
1528 * Locking hierarchy:
1529 * interface locks, if any (begin_session)
1530 * kgdb lock (kgdb_active)
1531 */
1532int
1533kgdb_handle_exception(int evector, int signo, int ecode, struct pt_regs *regs)
1534{
1535 struct kgdb_state kgdb_var;
1536 struct kgdb_state *ks = &kgdb_var;
1537 int ret;
1538
1539 ks->cpu = raw_smp_processor_id();
1540 ks->ex_vector = evector;
1541 ks->signo = signo;
1542 ks->ex_vector = evector;
1543 ks->err_code = ecode;
1544 ks->kgdb_usethreadid = 0;
1545 ks->linux_regs = regs;
1546
1547 if (kgdb_reenter_check(ks))
1548 return 0; /* Ouch, double exception ! */
1549 kgdb_info[ks->cpu].exception_state |= DCPU_WANT_MASTER;
1550 ret = kgdb_cpu_enter(ks, regs);
1551 kgdb_info[ks->cpu].exception_state &= ~DCPU_WANT_MASTER;
1552 return ret;
1553}
1554
1563int kgdb_nmicallback(int cpu, void *regs) 1555int kgdb_nmicallback(int cpu, void *regs)
1564{ 1556{
1565#ifdef CONFIG_SMP 1557#ifdef CONFIG_SMP
1558 struct kgdb_state kgdb_var;
1559 struct kgdb_state *ks = &kgdb_var;
1560
1561 memset(ks, 0, sizeof(struct kgdb_state));
1562 ks->cpu = cpu;
1563 ks->linux_regs = regs;
1564
1566 if (!atomic_read(&cpu_in_kgdb[cpu]) && 1565 if (!atomic_read(&cpu_in_kgdb[cpu]) &&
1567 atomic_read(&kgdb_active) != cpu && 1566 atomic_read(&kgdb_active) != -1 &&
1568 atomic_read(&cpu_in_kgdb[atomic_read(&kgdb_active)])) { 1567 atomic_read(&kgdb_active) != cpu) {
1569 kgdb_wait((struct pt_regs *)regs); 1568 kgdb_info[cpu].exception_state |= DCPU_IS_SLAVE;
1569 kgdb_cpu_enter(ks, regs);
1570 kgdb_info[cpu].exception_state &= ~DCPU_IS_SLAVE;
1570 return 0; 1571 return 0;
1571 } 1572 }
1572#endif 1573#endif
@@ -1742,11 +1743,11 @@ EXPORT_SYMBOL_GPL(kgdb_unregister_io_module);
1742 */ 1743 */
1743void kgdb_breakpoint(void) 1744void kgdb_breakpoint(void)
1744{ 1745{
1745 atomic_set(&kgdb_setting_breakpoint, 1); 1746 atomic_inc(&kgdb_setting_breakpoint);
1746 wmb(); /* Sync point before breakpoint */ 1747 wmb(); /* Sync point before breakpoint */
1747 arch_kgdb_breakpoint(); 1748 arch_kgdb_breakpoint();
1748 wmb(); /* Sync point after breakpoint */ 1749 wmb(); /* Sync point after breakpoint */
1749 atomic_set(&kgdb_setting_breakpoint, 0); 1750 atomic_dec(&kgdb_setting_breakpoint);
1750} 1751}
1751EXPORT_SYMBOL_GPL(kgdb_breakpoint); 1752EXPORT_SYMBOL_GPL(kgdb_breakpoint);
1752 1753
diff --git a/kernel/perf_event.c b/kernel/perf_event.c
index a77266e3e3e1..2f3fbf84215a 100644
--- a/kernel/perf_event.c
+++ b/kernel/perf_event.c
@@ -1165,11 +1165,9 @@ void perf_event_task_sched_out(struct task_struct *task,
1165 struct perf_event_context *ctx = task->perf_event_ctxp; 1165 struct perf_event_context *ctx = task->perf_event_ctxp;
1166 struct perf_event_context *next_ctx; 1166 struct perf_event_context *next_ctx;
1167 struct perf_event_context *parent; 1167 struct perf_event_context *parent;
1168 struct pt_regs *regs;
1169 int do_switch = 1; 1168 int do_switch = 1;
1170 1169
1171 regs = task_pt_regs(task); 1170 perf_sw_event(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, 1, NULL, 0);
1172 perf_sw_event(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, 1, regs, 0);
1173 1171
1174 if (likely(!ctx || !cpuctx->task_ctx)) 1172 if (likely(!ctx || !cpuctx->task_ctx))
1175 return; 1173 return;
@@ -2787,12 +2785,11 @@ __weak struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)
2787 return NULL; 2785 return NULL;
2788} 2786}
2789 2787
2790#ifdef CONFIG_EVENT_TRACING
2791__weak 2788__weak
2792void perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip, int skip) 2789void perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip, int skip)
2793{ 2790{
2794} 2791}
2795#endif 2792
2796 2793
2797/* 2794/*
2798 * Output 2795 * Output
@@ -3379,15 +3376,23 @@ static void perf_event_task_output(struct perf_event *event,
3379 struct perf_task_event *task_event) 3376 struct perf_task_event *task_event)
3380{ 3377{
3381 struct perf_output_handle handle; 3378 struct perf_output_handle handle;
3382 int size;
3383 struct task_struct *task = task_event->task; 3379 struct task_struct *task = task_event->task;
3384 int ret; 3380 unsigned long flags;
3381 int size, ret;
3382
3383 /*
3384 * If this CPU attempts to acquire an rq lock held by a CPU spinning
3385 * in perf_output_lock() from interrupt context, it's game over.
3386 */
3387 local_irq_save(flags);
3385 3388
3386 size = task_event->event_id.header.size; 3389 size = task_event->event_id.header.size;
3387 ret = perf_output_begin(&handle, event, size, 0, 0); 3390 ret = perf_output_begin(&handle, event, size, 0, 0);
3388 3391
3389 if (ret) 3392 if (ret) {
3393 local_irq_restore(flags);
3390 return; 3394 return;
3395 }
3391 3396
3392 task_event->event_id.pid = perf_event_pid(event, task); 3397 task_event->event_id.pid = perf_event_pid(event, task);
3393 task_event->event_id.ppid = perf_event_pid(event, current); 3398 task_event->event_id.ppid = perf_event_pid(event, current);
@@ -3398,6 +3403,7 @@ static void perf_event_task_output(struct perf_event *event,
3398 perf_output_put(&handle, task_event->event_id); 3403 perf_output_put(&handle, task_event->event_id);
3399 3404
3400 perf_output_end(&handle); 3405 perf_output_end(&handle);
3406 local_irq_restore(flags);
3401} 3407}
3402 3408
3403static int perf_event_task_match(struct perf_event *event) 3409static int perf_event_task_match(struct perf_event *event)
diff --git a/kernel/power/process.c b/kernel/power/process.c
index 5ade1bdcf366..71ae29052ab6 100644
--- a/kernel/power/process.c
+++ b/kernel/power/process.c
@@ -88,12 +88,11 @@ static int try_to_freeze_tasks(bool sig_only)
88 printk(KERN_ERR "Freezing of tasks failed after %d.%02d seconds " 88 printk(KERN_ERR "Freezing of tasks failed after %d.%02d seconds "
89 "(%d tasks refusing to freeze):\n", 89 "(%d tasks refusing to freeze):\n",
90 elapsed_csecs / 100, elapsed_csecs % 100, todo); 90 elapsed_csecs / 100, elapsed_csecs % 100, todo);
91 show_state();
92 read_lock(&tasklist_lock); 91 read_lock(&tasklist_lock);
93 do_each_thread(g, p) { 92 do_each_thread(g, p) {
94 task_lock(p); 93 task_lock(p);
95 if (freezing(p) && !freezer_should_skip(p)) 94 if (freezing(p) && !freezer_should_skip(p))
96 printk(KERN_ERR " %s\n", p->comm); 95 sched_show_task(p);
97 cancel_freezing(p); 96 cancel_freezing(p);
98 task_unlock(p); 97 task_unlock(p);
99 } while_each_thread(g, p); 98 } while_each_thread(g, p);
@@ -145,7 +144,7 @@ static void thaw_tasks(bool nosig_only)
145 if (nosig_only && should_send_signal(p)) 144 if (nosig_only && should_send_signal(p))
146 continue; 145 continue;
147 146
148 if (cgroup_frozen(p)) 147 if (cgroup_freezing_or_frozen(p))
149 continue; 148 continue;
150 149
151 thaw_process(p); 150 thaw_process(p);
diff --git a/kernel/sched.c b/kernel/sched.c
index 86c7cc1d7c9d..a3dff1f3f9b0 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -5388,7 +5388,7 @@ int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
5388 5388
5389 get_task_struct(mt); 5389 get_task_struct(mt);
5390 task_rq_unlock(rq, &flags); 5390 task_rq_unlock(rq, &flags);
5391 wake_up_process(rq->migration_thread); 5391 wake_up_process(mt);
5392 put_task_struct(mt); 5392 put_task_struct(mt);
5393 wait_for_completion(&req.done); 5393 wait_for_completion(&req.done);
5394 tlb_migrate_finish(p->mm); 5394 tlb_migrate_finish(p->mm);
diff --git a/kernel/sched_debug.c b/kernel/sched_debug.c
index 67f95aada4b9..9b49db144037 100644
--- a/kernel/sched_debug.c
+++ b/kernel/sched_debug.c
@@ -518,8 +518,4 @@ void proc_sched_set_task(struct task_struct *p)
518 p->se.nr_wakeups_idle = 0; 518 p->se.nr_wakeups_idle = 0;
519 p->sched_info.bkl_count = 0; 519 p->sched_info.bkl_count = 0;
520#endif 520#endif
521 p->se.sum_exec_runtime = 0;
522 p->se.prev_sum_exec_runtime = 0;
523 p->nvcsw = 0;
524 p->nivcsw = 0;
525} 521}
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index 2c839ca5e5ce..41ca394feb22 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -1210,18 +1210,19 @@ rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned nr_pages)
1210 1210
1211 for (i = 0; i < nr_pages; i++) { 1211 for (i = 0; i < nr_pages; i++) {
1212 if (RB_WARN_ON(cpu_buffer, list_empty(cpu_buffer->pages))) 1212 if (RB_WARN_ON(cpu_buffer, list_empty(cpu_buffer->pages)))
1213 return; 1213 goto out;
1214 p = cpu_buffer->pages->next; 1214 p = cpu_buffer->pages->next;
1215 bpage = list_entry(p, struct buffer_page, list); 1215 bpage = list_entry(p, struct buffer_page, list);
1216 list_del_init(&bpage->list); 1216 list_del_init(&bpage->list);
1217 free_buffer_page(bpage); 1217 free_buffer_page(bpage);
1218 } 1218 }
1219 if (RB_WARN_ON(cpu_buffer, list_empty(cpu_buffer->pages))) 1219 if (RB_WARN_ON(cpu_buffer, list_empty(cpu_buffer->pages)))
1220 return; 1220 goto out;
1221 1221
1222 rb_reset_cpu(cpu_buffer); 1222 rb_reset_cpu(cpu_buffer);
1223 rb_check_pages(cpu_buffer); 1223 rb_check_pages(cpu_buffer);
1224 1224
1225out:
1225 spin_unlock_irq(&cpu_buffer->reader_lock); 1226 spin_unlock_irq(&cpu_buffer->reader_lock);
1226} 1227}
1227 1228
@@ -1238,7 +1239,7 @@ rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer,
1238 1239
1239 for (i = 0; i < nr_pages; i++) { 1240 for (i = 0; i < nr_pages; i++) {
1240 if (RB_WARN_ON(cpu_buffer, list_empty(pages))) 1241 if (RB_WARN_ON(cpu_buffer, list_empty(pages)))
1241 return; 1242 goto out;
1242 p = pages->next; 1243 p = pages->next;
1243 bpage = list_entry(p, struct buffer_page, list); 1244 bpage = list_entry(p, struct buffer_page, list);
1244 list_del_init(&bpage->list); 1245 list_del_init(&bpage->list);
@@ -1247,6 +1248,7 @@ rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer,
1247 rb_reset_cpu(cpu_buffer); 1248 rb_reset_cpu(cpu_buffer);
1248 rb_check_pages(cpu_buffer); 1249 rb_check_pages(cpu_buffer);
1249 1250
1251out:
1250 spin_unlock_irq(&cpu_buffer->reader_lock); 1252 spin_unlock_irq(&cpu_buffer->reader_lock);
1251} 1253}
1252 1254
diff --git a/kernel/trace/trace_clock.c b/kernel/trace/trace_clock.c
index 6fbfb8f417b9..9d589d8dcd1a 100644
--- a/kernel/trace/trace_clock.c
+++ b/kernel/trace/trace_clock.c
@@ -84,7 +84,7 @@ u64 notrace trace_clock_global(void)
84 int this_cpu; 84 int this_cpu;
85 u64 now; 85 u64 now;
86 86
87 raw_local_irq_save(flags); 87 local_irq_save(flags);
88 88
89 this_cpu = raw_smp_processor_id(); 89 this_cpu = raw_smp_processor_id();
90 now = cpu_clock(this_cpu); 90 now = cpu_clock(this_cpu);
@@ -110,7 +110,7 @@ u64 notrace trace_clock_global(void)
110 arch_spin_unlock(&trace_clock_struct.lock); 110 arch_spin_unlock(&trace_clock_struct.lock);
111 111
112 out: 112 out:
113 raw_local_irq_restore(flags); 113 local_irq_restore(flags);
114 114
115 return now; 115 return now;
116} 116}
diff --git a/kernel/trace/trace_event_perf.c b/kernel/trace/trace_event_perf.c
index 81f691eb3a30..0565bb42566f 100644
--- a/kernel/trace/trace_event_perf.c
+++ b/kernel/trace/trace_event_perf.c
@@ -17,7 +17,12 @@ EXPORT_SYMBOL_GPL(perf_arch_fetch_caller_regs);
17static char *perf_trace_buf; 17static char *perf_trace_buf;
18static char *perf_trace_buf_nmi; 18static char *perf_trace_buf_nmi;
19 19
20typedef typeof(char [PERF_MAX_TRACE_SIZE]) perf_trace_t ; 20/*
21 * Force it to be aligned to unsigned long to avoid misaligned accesses
22 * suprises
23 */
24typedef typeof(unsigned long [PERF_MAX_TRACE_SIZE / sizeof(unsigned long)])
25 perf_trace_t;
21 26
22/* Count the events in use (per event id, not per instance) */ 27/* Count the events in use (per event id, not per instance) */
23static int total_ref_count; 28static int total_ref_count;
@@ -130,6 +135,8 @@ __kprobes void *perf_trace_buf_prepare(int size, unsigned short type,
130 char *trace_buf, *raw_data; 135 char *trace_buf, *raw_data;
131 int pc, cpu; 136 int pc, cpu;
132 137
138 BUILD_BUG_ON(PERF_MAX_TRACE_SIZE % sizeof(unsigned long));
139
133 pc = preempt_count(); 140 pc = preempt_count();
134 141
135 /* Protect the per cpu buffer, begin the rcu read side */ 142 /* Protect the per cpu buffer, begin the rcu read side */
@@ -152,7 +159,7 @@ __kprobes void *perf_trace_buf_prepare(int size, unsigned short type,
152 raw_data = per_cpu_ptr(trace_buf, cpu); 159 raw_data = per_cpu_ptr(trace_buf, cpu);
153 160
154 /* zero the dead bytes from align to not leak stack to user */ 161 /* zero the dead bytes from align to not leak stack to user */
155 *(u64 *)(&raw_data[size - sizeof(u64)]) = 0ULL; 162 memset(&raw_data[size - sizeof(u64)], 0, sizeof(u64));
156 163
157 entry = (struct trace_entry *)raw_data; 164 entry = (struct trace_entry *)raw_data;
158 tracing_generic_entry_update(entry, *irq_flags, pc); 165 tracing_generic_entry_update(entry, *irq_flags, pc);
diff --git a/tools/perf/Makefile b/tools/perf/Makefile
index 8a8f52db7e38..bc0f670a8338 100644
--- a/tools/perf/Makefile
+++ b/tools/perf/Makefile
@@ -200,7 +200,7 @@ endif
200 200
201CFLAGS = -ggdb3 -Wall -Wextra -std=gnu99 -Werror $(CFLAGS_OPTIMIZE) -D_FORTIFY_SOURCE=2 $(EXTRA_WARNINGS) $(EXTRA_CFLAGS) 201CFLAGS = -ggdb3 -Wall -Wextra -std=gnu99 -Werror $(CFLAGS_OPTIMIZE) -D_FORTIFY_SOURCE=2 $(EXTRA_WARNINGS) $(EXTRA_CFLAGS)
202EXTLIBS = -lpthread -lrt -lelf -lm 202EXTLIBS = -lpthread -lrt -lelf -lm
203ALL_CFLAGS = $(CFLAGS) 203ALL_CFLAGS = $(CFLAGS) -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64
204ALL_LDFLAGS = $(LDFLAGS) 204ALL_LDFLAGS = $(LDFLAGS)
205STRIP ?= strip 205STRIP ?= strip
206 206
@@ -492,19 +492,19 @@ ifeq ($(uname_S),Darwin)
492 PTHREAD_LIBS = 492 PTHREAD_LIBS =
493endif 493endif
494 494
495ifeq ($(shell sh -c "(echo '\#include <libelf.h>'; echo 'int main(void) { Elf * elf = elf_begin(0, ELF_C_READ, 0); return (long)elf; }') | $(CC) -x c - $(ALL_CFLAGS) -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64 -o $(BITBUCKET) $(ALL_LDFLAGS) $(EXTLIBS) "$(QUIET_STDERR)" && echo y"), y) 495ifeq ($(shell sh -c "(echo '\#include <libelf.h>'; echo 'int main(void) { Elf * elf = elf_begin(0, ELF_C_READ, 0); return (long)elf; }') | $(CC) -x c - $(ALL_CFLAGS) -o $(BITBUCKET) $(ALL_LDFLAGS) $(EXTLIBS) "$(QUIET_STDERR)" && echo y"), y)
496ifneq ($(shell sh -c "(echo '\#include <gnu/libc-version.h>'; echo 'int main(void) { const char * version = gnu_get_libc_version(); return (long)version; }') | $(CC) -x c - $(ALL_CFLAGS) -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64 -o $(BITBUCKET) $(ALL_LDFLAGS) $(EXTLIBS) "$(QUIET_STDERR)" && echo y"), y) 496ifneq ($(shell sh -c "(echo '\#include <gnu/libc-version.h>'; echo 'int main(void) { const char * version = gnu_get_libc_version(); return (long)version; }') | $(CC) -x c - $(ALL_CFLAGS) -o $(BITBUCKET) $(ALL_LDFLAGS) $(EXTLIBS) "$(QUIET_STDERR)" && echo y"), y)
497 msg := $(error No gnu/libc-version.h found, please install glibc-dev[el]/glibc-static); 497 msg := $(error No gnu/libc-version.h found, please install glibc-dev[el]/glibc-static);
498endif 498endif
499 499
500 ifneq ($(shell sh -c "(echo '\#include <libelf.h>'; echo 'int main(void) { Elf * elf = elf_begin(0, ELF_C_READ_MMAP, 0); return (long)elf; }') | $(CC) -x c - $(ALL_CFLAGS) -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64 -o $(BITBUCKET) $(ALL_LDFLAGS) $(EXTLIBS) "$(QUIET_STDERR)" && echo y"), y) 500 ifneq ($(shell sh -c "(echo '\#include <libelf.h>'; echo 'int main(void) { Elf * elf = elf_begin(0, ELF_C_READ_MMAP, 0); return (long)elf; }') | $(CC) -x c - $(ALL_CFLAGS) -o $(BITBUCKET) $(ALL_LDFLAGS) $(EXTLIBS) "$(QUIET_STDERR)" && echo y"), y)
501 BASIC_CFLAGS += -DLIBELF_NO_MMAP 501 BASIC_CFLAGS += -DLIBELF_NO_MMAP
502 endif 502 endif
503else 503else
504 msg := $(error No libelf.h/libelf found, please install libelf-dev/elfutils-libelf-devel and glibc-dev[el]); 504 msg := $(error No libelf.h/libelf found, please install libelf-dev/elfutils-libelf-devel and glibc-dev[el]);
505endif 505endif
506 506
507ifneq ($(shell sh -c "(echo '\#include <dwarf.h>'; echo '\#include <libdw.h>'; echo 'int main(void) { Dwarf *dbg; dbg = dwarf_begin(0, DWARF_C_READ); return (long)dbg; }') | $(CC) -x c - $(ALL_CFLAGS) -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64 -I/usr/include/elfutils -ldw -lelf -o $(BITBUCKET) $(ALL_LDFLAGS) $(EXTLIBS) "$(QUIET_STDERR)" && echo y"), y) 507ifneq ($(shell sh -c "(echo '\#include <dwarf.h>'; echo '\#include <libdw.h>'; echo 'int main(void) { Dwarf *dbg; dbg = dwarf_begin(0, DWARF_C_READ); return (long)dbg; }') | $(CC) -x c - $(ALL_CFLAGS) -I/usr/include/elfutils -ldw -lelf -o $(BITBUCKET) $(ALL_LDFLAGS) $(EXTLIBS) "$(QUIET_STDERR)" && echo y"), y)
508 msg := $(warning No libdw.h found or old libdw.h found, disables dwarf support. Please install elfutils-devel/elfutils-dev); 508 msg := $(warning No libdw.h found or old libdw.h found, disables dwarf support. Please install elfutils-devel/elfutils-dev);
509 BASIC_CFLAGS += -DNO_DWARF_SUPPORT 509 BASIC_CFLAGS += -DNO_DWARF_SUPPORT
510else 510else
diff --git a/tools/perf/util/scripting-engines/trace-event-python.c b/tools/perf/util/scripting-engines/trace-event-python.c
index 33a414bbba3e..6a72f14c5986 100644
--- a/tools/perf/util/scripting-engines/trace-event-python.c
+++ b/tools/perf/util/scripting-engines/trace-event-python.c
@@ -208,7 +208,7 @@ static void python_process_event(int cpu, void *data,
208 int size __unused, 208 int size __unused,
209 unsigned long long nsecs, char *comm) 209 unsigned long long nsecs, char *comm)
210{ 210{
211 PyObject *handler, *retval, *context, *t; 211 PyObject *handler, *retval, *context, *t, *obj;
212 static char handler_name[256]; 212 static char handler_name[256];
213 struct format_field *field; 213 struct format_field *field;
214 unsigned long long val; 214 unsigned long long val;
@@ -256,16 +256,23 @@ static void python_process_event(int cpu, void *data,
256 offset &= 0xffff; 256 offset &= 0xffff;
257 } else 257 } else
258 offset = field->offset; 258 offset = field->offset;
259 PyTuple_SetItem(t, n++, 259 obj = PyString_FromString((char *)data + offset);
260 PyString_FromString((char *)data + offset));
261 } else { /* FIELD_IS_NUMERIC */ 260 } else { /* FIELD_IS_NUMERIC */
262 val = read_size(data + field->offset, field->size); 261 val = read_size(data + field->offset, field->size);
263 if (field->flags & FIELD_IS_SIGNED) { 262 if (field->flags & FIELD_IS_SIGNED) {
264 PyTuple_SetItem(t, n++, PyInt_FromLong(val)); 263 if ((long long)val >= LONG_MIN &&
264 (long long)val <= LONG_MAX)
265 obj = PyInt_FromLong(val);
266 else
267 obj = PyLong_FromLongLong(val);
265 } else { 268 } else {
266 PyTuple_SetItem(t, n++, PyInt_FromLong(val)); 269 if (val <= LONG_MAX)
270 obj = PyInt_FromLong(val);
271 else
272 obj = PyLong_FromUnsignedLongLong(val);
267 } 273 }
268 } 274 }
275 PyTuple_SetItem(t, n++, obj);
269 } 276 }
270 277
271 if (_PyTuple_Resize(&t, n) == -1) 278 if (_PyTuple_Resize(&t, n) == -1)