aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorOlof Johansson <olof@lixom.net>2012-09-13 01:34:11 -0400
committerOlof Johansson <olof@lixom.net>2012-09-13 01:34:11 -0400
commit025c95a6826ad8acfe871f33c2fa9208beeb38df (patch)
tree220fe156ff60db13bd3a1319f5e20d6b723de5f4
parent0558d7a8ed44e6e53aadb04d2e23145efb2aa8a4 (diff)
parent7f744b17140af1a9c8804a1c81c9dae6bb52a7fb (diff)
Merge branch 'clk' of git://github.com/hzhuang1/linux into next/cleanup
* 'clk' of git://github.com/hzhuang1/linux: ARM: mmp: remove unused definition in APBC and APMU ARM: mmp: move mmp2 clock definition to separated file arm: mmp: move pxa910 clock definition to separated file arm: mmp: move pxa168 clock definition to separated file arm: mmp: make private clock definition exclude from common clock + Linux 3.6-rc4
-rw-r--r--Documentation/block/00-INDEX10
-rw-r--r--Documentation/block/cfq-iosched.txt77
-rw-r--r--Documentation/block/queue-sysfs.txt64
-rw-r--r--Documentation/devicetree/bindings/mmc/fsl-imx-esdhc.txt8
-rw-r--r--Documentation/watchdog/src/watchdog-test.c2
-rw-r--r--Makefile2
-rw-r--r--arch/arm/Kconfig1
-rw-r--r--arch/arm/boot/dts/am33xx.dtsi5
-rw-r--r--arch/arm/boot/dts/imx51-babbage.dts4
-rw-r--r--arch/arm/boot/dts/kirkwood-iconnect.dts6
-rw-r--r--arch/arm/boot/dts/twl6030.dtsi3
-rw-r--r--arch/arm/configs/u8500_defconfig1
-rw-r--r--arch/arm/mach-dove/common.c3
-rw-r--r--arch/arm/mach-exynos/mach-origen.c7
-rw-r--r--arch/arm/mach-exynos/mach-smdkv310.c7
-rw-r--r--arch/arm/mach-imx/Makefile10
-rw-r--r--arch/arm/mach-imx/clk-imx6q.c8
-rw-r--r--arch/arm/mach-imx/headsmp.S (renamed from arch/arm/mach-imx/head-v7.S)0
-rw-r--r--arch/arm/mach-imx/hotplug.c23
-rw-r--r--arch/arm/mach-imx/mach-imx6q.c4
-rw-r--r--arch/arm/mach-kirkwood/Makefile.boot3
-rw-r--r--arch/arm/mach-kirkwood/common.c4
-rw-r--r--arch/arm/mach-mmp/Makefile8
-rw-r--r--arch/arm/mach-mmp/clock-mmp2.c111
-rw-r--r--arch/arm/mach-mmp/clock-pxa168.c91
-rw-r--r--arch/arm/mach-mmp/clock-pxa910.c67
-rw-r--r--arch/arm/mach-mmp/common.h3
-rw-r--r--arch/arm/mach-mmp/include/mach/regs-apbc.h95
-rw-r--r--arch/arm/mach-mmp/include/mach/regs-apmu.h15
-rw-r--r--arch/arm/mach-mmp/mmp2.c71
-rw-r--r--arch/arm/mach-mmp/pxa168.c58
-rw-r--r--arch/arm/mach-mmp/pxa910.c40
-rw-r--r--arch/arm/mach-mmp/sram.c2
-rw-r--r--arch/arm/mach-mv78xx0/addr-map.c2
-rw-r--r--arch/arm/mach-mv78xx0/common.c6
-rw-r--r--arch/arm/mach-omap2/Kconfig3
-rw-r--r--arch/arm/mach-omap2/board-igep0020.c2
-rw-r--r--arch/arm/mach-omap2/board-omap3evm.c1
-rw-r--r--arch/arm/mach-omap2/common-board-devices.c11
-rw-r--r--arch/arm/mach-omap2/common-board-devices.h1
-rw-r--r--arch/arm/mach-omap2/cpuidle44xx.c3
-rw-r--r--arch/arm/mach-omap2/mux.h1
-rw-r--r--arch/arm/mach-omap2/opp4xxx_data.c2
-rw-r--r--arch/arm/mach-omap2/pm34xx.c19
-rw-r--r--arch/arm/mach-omap2/sleep44xx.S8
-rw-r--r--arch/arm/mach-omap2/twl-common.c1
-rw-r--r--arch/arm/mach-orion5x/common.c3
-rw-r--r--arch/arm/mach-s3c24xx/include/mach/dma.h3
-rw-r--r--arch/arm/mach-ux500/Kconfig1
-rw-r--r--arch/arm/mach-ux500/board-mop500-msp.c10
-rw-r--r--arch/arm/mach-ux500/board-mop500.c4
-rw-r--r--arch/arm/plat-omap/dmtimer.c6
-rw-r--r--arch/arm/plat-omap/include/plat/cpu.h3
-rw-r--r--arch/arm/plat-omap/include/plat/multi.h9
-rw-r--r--arch/arm/plat-omap/include/plat/uncompress.h4
-rw-r--r--arch/arm/plat-orion/common.c8
-rw-r--r--arch/arm/plat-orion/include/plat/common.h6
-rw-r--r--arch/arm/plat-s3c24xx/dma.c2
-rw-r--r--arch/arm/plat-samsung/devs.c29
-rw-r--r--arch/arm/plat-samsung/include/plat/hdmi.h16
-rw-r--r--arch/arm/plat-samsung/pm.c2
-rw-r--r--arch/mips/Kconfig1
-rw-r--r--arch/mips/alchemy/board-mtx1.c2
-rw-r--r--arch/mips/ath79/dev-usb.c2
-rw-r--r--arch/mips/ath79/gpio.c6
-rw-r--r--arch/mips/bcm63xx/dev-spi.c4
-rw-r--r--arch/mips/cavium-octeon/octeon-irq.c89
-rw-r--r--arch/mips/include/asm/mach-ath79/ar71xx_regs.h3
-rw-r--r--arch/mips/include/asm/mach-ath79/cpu-feature-overrides.h1
-rw-r--r--arch/mips/include/asm/mach-bcm63xx/bcm63xx_dev_spi.h2
-rw-r--r--arch/mips/include/asm/mach-bcm63xx/bcm63xx_regs.h13
-rw-r--r--arch/mips/include/asm/mach-cavium-octeon/irq.h10
-rw-r--r--arch/mips/include/asm/module.h1
-rw-r--r--arch/mips/include/asm/r4k-timer.h8
-rw-r--r--arch/mips/kernel/module.c43
-rw-r--r--arch/mips/kernel/smp.c4
-rw-r--r--arch/mips/kernel/sync-r4k.c26
-rw-r--r--arch/mips/mti-malta/malta-pci.c13
-rw-r--r--arch/mips/pci/pci-ar724x.c22
-rw-r--r--arch/parisc/include/asm/atomic.h4
-rw-r--r--arch/parisc/kernel/process.c2
-rw-r--r--arch/parisc/kernel/sys_parisc.c8
-rw-r--r--arch/powerpc/boot/dts/fsl/p4080si-post.dtsi7
-rw-r--r--arch/powerpc/configs/85xx/p1023rds_defconfig31
-rw-r--r--arch/powerpc/configs/corenet32_smp_defconfig29
-rw-r--r--arch/powerpc/configs/corenet64_smp_defconfig1
-rw-r--r--arch/powerpc/configs/g5_defconfig103
-rw-r--r--arch/powerpc/configs/mpc83xx_defconfig18
-rw-r--r--arch/powerpc/configs/mpc85xx_defconfig33
-rw-r--r--arch/powerpc/configs/mpc85xx_smp_defconfig32
-rw-r--r--arch/powerpc/include/asm/cputable.h2
-rw-r--r--arch/powerpc/include/asm/kvm_host.h1
-rw-r--r--arch/powerpc/include/asm/kvm_ppc.h12
-rw-r--r--arch/powerpc/include/asm/mpic_msgr.h1
-rw-r--r--arch/powerpc/kernel/dma-iommu.c9
-rw-r--r--arch/powerpc/kernel/hw_breakpoint.c2
-rw-r--r--arch/powerpc/kernel/kgdb.c27
-rw-r--r--arch/powerpc/kernel/syscalls.c8
-rw-r--r--arch/powerpc/kvm/book3s_32_mmu_host.c3
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu_host.c2
-rw-r--r--arch/powerpc/kvm/book3s_hv_rmhandlers.S12
-rw-r--r--arch/powerpc/kvm/e500_tlb.c11
-rw-r--r--arch/powerpc/lib/copyuser_power7.S35
-rw-r--r--arch/powerpc/lib/memcpy_power7.S4
-rw-r--r--arch/powerpc/mm/mem.c1
-rw-r--r--arch/powerpc/perf/core-book3s.c2
-rw-r--r--arch/powerpc/sysdev/fsl_pci.c13
-rw-r--r--arch/powerpc/sysdev/mpic_msgr.c3
-rw-r--r--arch/powerpc/xmon/xmon.c84
-rw-r--r--arch/s390/include/asm/elf.h3
-rw-r--r--arch/s390/include/asm/posix_types.h3
-rw-r--r--arch/s390/include/asm/smp.h1
-rw-r--r--arch/x86/include/asm/spinlock.h3
-rw-r--r--arch/x86/kernel/alternative.c2
-rw-r--r--arch/x86/kernel/irq.c2
-rw-r--r--arch/x86/kernel/microcode_amd.c7
-rw-r--r--arch/x86/kvm/emulate.c30
-rw-r--r--arch/x86/kvm/mmu.c13
-rw-r--r--arch/x86/kvm/x86.c5
-rw-r--r--arch/x86/xen/enlighten.c118
-rw-r--r--arch/x86/xen/p2m.c95
-rw-r--r--arch/x86/xen/setup.c9
-rw-r--r--arch/x86/xen/suspend.c2
-rw-r--r--arch/x86/xen/xen-ops.h2
-rw-r--r--block/blk-lib.c41
-rw-r--r--block/blk-merge.c117
-rw-r--r--block/genhd.c2
-rw-r--r--drivers/ata/Kconfig2
-rw-r--r--drivers/ata/ahci.c8
-rw-r--r--drivers/ata/ahci.h1
-rw-r--r--drivers/ata/ata_piix.c8
-rw-r--r--drivers/ata/libahci.c3
-rw-r--r--drivers/ata/libata-acpi.c15
-rw-r--r--drivers/ata/libata-core.c3
-rw-r--r--drivers/ata/pata_atiixp.c16
-rw-r--r--drivers/block/drbd/drbd_bitmap.c15
-rw-r--r--drivers/block/drbd/drbd_int.h1
-rw-r--r--drivers/block/drbd/drbd_main.c28
-rw-r--r--drivers/block/drbd/drbd_nl.c4
-rw-r--r--drivers/block/drbd/drbd_req.c36
-rw-r--r--drivers/cpufreq/omap-cpufreq.c4
-rw-r--r--drivers/crypto/caam/jr.c10
-rw-r--r--drivers/crypto/hifn_795x.c4
-rw-r--r--drivers/gpu/drm/drm_crtc.c2
-rw-r--r--drivers/gpu/drm/drm_edid.c3
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_display.c3
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c2
-rw-r--r--drivers/gpu/drm/i915/intel_display.c12
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c8
-rw-r--r--drivers/gpu/drm/i915/intel_sprite.c4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_state.c6
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c36
-rw-r--r--drivers/gpu/drm/radeon/atombios_dp.c29
-rw-r--r--drivers/gpu/drm/radeon/atombios_encoders.c140
-rw-r--r--drivers/gpu/drm/radeon/r600_cs.c30
-rw-r--r--drivers/gpu/drm/radeon/r600d.h8
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c5
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c3
-rw-r--r--drivers/gpu/drm/radeon/reg_srcs/r6001
-rw-r--r--drivers/hid/hid-core.c1
-rw-r--r--drivers/hwmon/asus_atk0110.c6
-rw-r--r--drivers/ide/ide-pm.c4
-rw-r--r--drivers/s390/block/dasd_eckd.c2
-rw-r--r--drivers/s390/block/dasd_ioctl.c7
-rw-r--r--drivers/spi/spi-bcm63xx.c31
-rw-r--r--drivers/watchdog/booke_wdt.c7
-rw-r--r--drivers/watchdog/da9052_wdt.c1
-rw-r--r--drivers/xen/platform-pci.c15
-rw-r--r--fs/bio.c11
-rw-r--r--fs/block_dev.c3
-rw-r--r--fs/btrfs/backref.c4
-rw-r--r--fs/btrfs/compression.c1
-rw-r--r--fs/btrfs/ctree.c9
-rw-r--r--fs/btrfs/ctree.h3
-rw-r--r--fs/btrfs/delayed-inode.c12
-rw-r--r--fs/btrfs/delayed-ref.c163
-rw-r--r--fs/btrfs/delayed-ref.h4
-rw-r--r--fs/btrfs/disk-io.c53
-rw-r--r--fs/btrfs/disk-io.h2
-rw-r--r--fs/btrfs/extent-tree.c123
-rw-r--r--fs/btrfs/extent_io.c17
-rw-r--r--fs/btrfs/file-item.c4
-rw-r--r--fs/btrfs/inode.c326
-rw-r--r--fs/btrfs/ioctl.c2
-rw-r--r--fs/btrfs/locking.c2
-rw-r--r--fs/btrfs/qgroup.c12
-rw-r--r--fs/btrfs/root-tree.c4
-rw-r--r--fs/btrfs/super.c15
-rw-r--r--fs/btrfs/transaction.c3
-rw-r--r--fs/btrfs/volumes.c33
-rw-r--r--fs/btrfs/volumes.h2
-rw-r--r--fs/buffer.c66
-rw-r--r--fs/direct-io.c5
-rw-r--r--fs/jbd/journal.c5
-rw-r--r--fs/logfs/dev_bdev.c15
-rw-r--r--fs/logfs/inode.c18
-rw-r--r--fs/logfs/journal.c2
-rw-r--r--fs/logfs/readwrite.c1
-rw-r--r--fs/logfs/segment.c2
-rw-r--r--fs/nfsd/nfs4callback.c4
-rw-r--r--fs/nfsd/state.h1
-rw-r--r--fs/quota/dquot.c2
-rw-r--r--fs/reiserfs/bitmap.c2
-rw-r--r--fs/reiserfs/inode.c2
-rw-r--r--fs/ubifs/debug.h2
-rw-r--r--fs/ubifs/lpt.c5
-rw-r--r--fs/ubifs/recovery.c2
-rw-r--r--fs/ubifs/replay.c3
-rw-r--r--fs/ubifs/super.c3
-rw-r--r--fs/udf/inode.c5
-rw-r--r--fs/udf/super.c7
-rw-r--r--fs/xfs/xfs_discard.c6
-rw-r--r--fs/xfs/xfs_ialloc.c17
-rw-r--r--fs/xfs/xfs_rtalloc.c2
-rw-r--r--include/drm/drm_crtc.h3
-rw-r--r--include/drm/drm_mode.h5
-rw-r--r--include/linux/blkdev.h14
-rw-r--r--include/linux/cpuidle.h4
-rw-r--r--include/linux/ktime.h7
-rw-r--r--include/linux/mv643xx_eth.h2
-rw-r--r--include/linux/time.h29
-rw-r--r--include/xen/events.h2
-rw-r--r--kernel/fork.c4
-rw-r--r--kernel/time/timekeeping.c39
-rw-r--r--kernel/trace/trace_syscalls.c4
-rw-r--r--mm/filemap.c7
-rw-r--r--mm/mmap.c5
-rw-r--r--mm/slab.c1
-rw-r--r--net/sunrpc/svc_xprt.c10
-rw-r--r--net/sunrpc/svcsock.c2
-rw-r--r--tools/perf/util/python-ext-sources2
-rw-r--r--virt/kvm/kvm_main.c7
232 files changed, 2223 insertions, 1617 deletions
diff --git a/Documentation/block/00-INDEX b/Documentation/block/00-INDEX
index d111e3b23db0..d18ecd827c40 100644
--- a/Documentation/block/00-INDEX
+++ b/Documentation/block/00-INDEX
@@ -3,15 +3,21 @@
3biodoc.txt 3biodoc.txt
4 - Notes on the Generic Block Layer Rewrite in Linux 2.5 4 - Notes on the Generic Block Layer Rewrite in Linux 2.5
5capability.txt 5capability.txt
6 - Generic Block Device Capability (/sys/block/<disk>/capability) 6 - Generic Block Device Capability (/sys/block/<device>/capability)
7cfq-iosched.txt
8 - CFQ IO scheduler tunables
9data-integrity.txt
10 - Block data integrity
7deadline-iosched.txt 11deadline-iosched.txt
8 - Deadline IO scheduler tunables 12 - Deadline IO scheduler tunables
9ioprio.txt 13ioprio.txt
10 - Block io priorities (in CFQ scheduler) 14 - Block io priorities (in CFQ scheduler)
15queue-sysfs.txt
16 - Queue's sysfs entries
11request.txt 17request.txt
12 - The members of struct request (in include/linux/blkdev.h) 18 - The members of struct request (in include/linux/blkdev.h)
13stat.txt 19stat.txt
14 - Block layer statistics in /sys/block/<dev>/stat 20 - Block layer statistics in /sys/block/<device>/stat
15switching-sched.txt 21switching-sched.txt
16 - Switching I/O schedulers at runtime 22 - Switching I/O schedulers at runtime
17writeback_cache_control.txt 23writeback_cache_control.txt
diff --git a/Documentation/block/cfq-iosched.txt b/Documentation/block/cfq-iosched.txt
index 6d670f570451..d89b4fe724d7 100644
--- a/Documentation/block/cfq-iosched.txt
+++ b/Documentation/block/cfq-iosched.txt
@@ -1,3 +1,14 @@
1CFQ (Complete Fairness Queueing)
2===============================
3
4The main aim of CFQ scheduler is to provide a fair allocation of the disk
5I/O bandwidth for all the processes which requests an I/O operation.
6
7CFQ maintains the per process queue for the processes which request I/O
8operation(syncronous requests). In case of asynchronous requests, all the
9requests from all the processes are batched together according to their
10process's I/O priority.
11
1CFQ ioscheduler tunables 12CFQ ioscheduler tunables
2======================== 13========================
3 14
@@ -25,6 +36,72 @@ there are multiple spindles behind single LUN (Host based hardware RAID
25controller or for storage arrays), setting slice_idle=0 might end up in better 36controller or for storage arrays), setting slice_idle=0 might end up in better
26throughput and acceptable latencies. 37throughput and acceptable latencies.
27 38
39back_seek_max
40-------------
41This specifies, given in Kbytes, the maximum "distance" for backward seeking.
42The distance is the amount of space from the current head location to the
43sectors that are backward in terms of distance.
44
45This parameter allows the scheduler to anticipate requests in the "backward"
46direction and consider them as being the "next" if they are within this
47distance from the current head location.
48
49back_seek_penalty
50-----------------
51This parameter is used to compute the cost of backward seeking. If the
52backward distance of request is just 1/back_seek_penalty from a "front"
53request, then the seeking cost of two requests is considered equivalent.
54
55So scheduler will not bias toward one or the other request (otherwise scheduler
56will bias toward front request). Default value of back_seek_penalty is 2.
57
58fifo_expire_async
59-----------------
60This parameter is used to set the timeout of asynchronous requests. Default
61value of this is 248ms.
62
63fifo_expire_sync
64----------------
65This parameter is used to set the timeout of synchronous requests. Default
66value of this is 124ms. In case to favor synchronous requests over asynchronous
67one, this value should be decreased relative to fifo_expire_async.
68
69slice_async
70-----------
71This parameter is same as of slice_sync but for asynchronous queue. The
72default value is 40ms.
73
74slice_async_rq
75--------------
76This parameter is used to limit the dispatching of asynchronous request to
77device request queue in queue's slice time. The maximum number of request that
78are allowed to be dispatched also depends upon the io priority. Default value
79for this is 2.
80
81slice_sync
82----------
83When a queue is selected for execution, the queues IO requests are only
84executed for a certain amount of time(time_slice) before switching to another
85queue. This parameter is used to calculate the time slice of synchronous
86queue.
87
88time_slice is computed using the below equation:-
89time_slice = slice_sync + (slice_sync/5 * (4 - prio)). To increase the
90time_slice of synchronous queue, increase the value of slice_sync. Default
91value is 100ms.
92
93quantum
94-------
95This specifies the number of request dispatched to the device queue. In a
96queue's time slice, a request will not be dispatched if the number of request
97in the device exceeds this parameter. This parameter is used for synchronous
98request.
99
100In case of storage with several disk, this setting can limit the parallel
101processing of request. Therefore, increasing the value can imporve the
102performace although this can cause the latency of some I/O to increase due
103to more number of requests.
104
28CFQ IOPS Mode for group scheduling 105CFQ IOPS Mode for group scheduling
29=================================== 106===================================
30Basic CFQ design is to provide priority based time slices. Higher priority 107Basic CFQ design is to provide priority based time slices. Higher priority
diff --git a/Documentation/block/queue-sysfs.txt b/Documentation/block/queue-sysfs.txt
index 6518a55273e7..e54ac1d53403 100644
--- a/Documentation/block/queue-sysfs.txt
+++ b/Documentation/block/queue-sysfs.txt
@@ -9,20 +9,71 @@ These files are the ones found in the /sys/block/xxx/queue/ directory.
9Files denoted with a RO postfix are readonly and the RW postfix means 9Files denoted with a RO postfix are readonly and the RW postfix means
10read-write. 10read-write.
11 11
12add_random (RW)
13----------------
14This file allows to trun off the disk entropy contribution. Default
15value of this file is '1'(on).
16
17discard_granularity (RO)
18-----------------------
19This shows the size of internal allocation of the device in bytes, if
20reported by the device. A value of '0' means device does not support
21the discard functionality.
22
23discard_max_bytes (RO)
24----------------------
25Devices that support discard functionality may have internal limits on
26the number of bytes that can be trimmed or unmapped in a single operation.
27The discard_max_bytes parameter is set by the device driver to the maximum
28number of bytes that can be discarded in a single operation. Discard
29requests issued to the device must not exceed this limit. A discard_max_bytes
30value of 0 means that the device does not support discard functionality.
31
32discard_zeroes_data (RO)
33------------------------
34When read, this file will show if the discarded block are zeroed by the
35device or not. If its value is '1' the blocks are zeroed otherwise not.
36
12hw_sector_size (RO) 37hw_sector_size (RO)
13------------------- 38-------------------
14This is the hardware sector size of the device, in bytes. 39This is the hardware sector size of the device, in bytes.
15 40
41iostats (RW)
42-------------
43This file is used to control (on/off) the iostats accounting of the
44disk.
45
46logical_block_size (RO)
47-----------------------
48This is the logcal block size of the device, in bytes.
49
16max_hw_sectors_kb (RO) 50max_hw_sectors_kb (RO)
17---------------------- 51----------------------
18This is the maximum number of kilobytes supported in a single data transfer. 52This is the maximum number of kilobytes supported in a single data transfer.
19 53
54max_integrity_segments (RO)
55---------------------------
56When read, this file shows the max limit of integrity segments as
57set by block layer which a hardware controller can handle.
58
20max_sectors_kb (RW) 59max_sectors_kb (RW)
21------------------- 60-------------------
22This is the maximum number of kilobytes that the block layer will allow 61This is the maximum number of kilobytes that the block layer will allow
23for a filesystem request. Must be smaller than or equal to the maximum 62for a filesystem request. Must be smaller than or equal to the maximum
24size allowed by the hardware. 63size allowed by the hardware.
25 64
65max_segments (RO)
66-----------------
67Maximum number of segments of the device.
68
69max_segment_size (RO)
70---------------------
71Maximum segment size of the device.
72
73minimum_io_size (RO)
74--------------------
75This is the smallest preferred io size reported by the device.
76
26nomerges (RW) 77nomerges (RW)
27------------- 78-------------
28This enables the user to disable the lookup logic involved with IO 79This enables the user to disable the lookup logic involved with IO
@@ -45,11 +96,24 @@ per-block-cgroup request pool. IOW, if there are N block cgroups,
45each request queue may have upto N request pools, each independently 96each request queue may have upto N request pools, each independently
46regulated by nr_requests. 97regulated by nr_requests.
47 98
99optimal_io_size (RO)
100--------------------
101This is the optimal io size reported by the device.
102
103physical_block_size (RO)
104------------------------
105This is the physical block size of device, in bytes.
106
48read_ahead_kb (RW) 107read_ahead_kb (RW)
49------------------ 108------------------
50Maximum number of kilobytes to read-ahead for filesystems on this block 109Maximum number of kilobytes to read-ahead for filesystems on this block
51device. 110device.
52 111
112rotational (RW)
113---------------
114This file is used to stat if the device is of rotational type or
115non-rotational type.
116
53rq_affinity (RW) 117rq_affinity (RW)
54---------------- 118----------------
55If this option is '1', the block layer will migrate request completions to the 119If this option is '1', the block layer will migrate request completions to the
diff --git a/Documentation/devicetree/bindings/mmc/fsl-imx-esdhc.txt b/Documentation/devicetree/bindings/mmc/fsl-imx-esdhc.txt
index 70cd49b1caa8..1dd622546d06 100644
--- a/Documentation/devicetree/bindings/mmc/fsl-imx-esdhc.txt
+++ b/Documentation/devicetree/bindings/mmc/fsl-imx-esdhc.txt
@@ -10,8 +10,8 @@ Required properties:
10- compatible : Should be "fsl,<chip>-esdhc" 10- compatible : Should be "fsl,<chip>-esdhc"
11 11
12Optional properties: 12Optional properties:
13- fsl,cd-internal : Indicate to use controller internal card detection 13- fsl,cd-controller : Indicate to use controller internal card detection
14- fsl,wp-internal : Indicate to use controller internal write protection 14- fsl,wp-controller : Indicate to use controller internal write protection
15 15
16Examples: 16Examples:
17 17
@@ -19,8 +19,8 @@ esdhc@70004000 {
19 compatible = "fsl,imx51-esdhc"; 19 compatible = "fsl,imx51-esdhc";
20 reg = <0x70004000 0x4000>; 20 reg = <0x70004000 0x4000>;
21 interrupts = <1>; 21 interrupts = <1>;
22 fsl,cd-internal; 22 fsl,cd-controller;
23 fsl,wp-internal; 23 fsl,wp-controller;
24}; 24};
25 25
26esdhc@70008000 { 26esdhc@70008000 {
diff --git a/Documentation/watchdog/src/watchdog-test.c b/Documentation/watchdog/src/watchdog-test.c
index 73ff5cc93e05..3da822967ee0 100644
--- a/Documentation/watchdog/src/watchdog-test.c
+++ b/Documentation/watchdog/src/watchdog-test.c
@@ -31,7 +31,7 @@ static void keep_alive(void)
31 * or "-e" to enable the card. 31 * or "-e" to enable the card.
32 */ 32 */
33 33
34void term(int sig) 34static void term(int sig)
35{ 35{
36 close(fd); 36 close(fd);
37 fprintf(stderr, "Stopping watchdog ticks...\n"); 37 fprintf(stderr, "Stopping watchdog ticks...\n");
diff --git a/Makefile b/Makefile
index 354026873b13..371ce8899f5c 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
1VERSION = 3 1VERSION = 3
2PATCHLEVEL = 6 2PATCHLEVEL = 6
3SUBLEVEL = 0 3SUBLEVEL = 0
4EXTRAVERSION = -rc3 4EXTRAVERSION = -rc4
5NAME = Saber-toothed Squirrel 5NAME = Saber-toothed Squirrel
6 6
7# *DOCUMENTATION* 7# *DOCUMENTATION*
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 012016733838..d1799267a01f 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -2121,6 +2121,7 @@ source "drivers/cpufreq/Kconfig"
2121config CPU_FREQ_IMX 2121config CPU_FREQ_IMX
2122 tristate "CPUfreq driver for i.MX CPUs" 2122 tristate "CPUfreq driver for i.MX CPUs"
2123 depends on ARCH_MXC && CPU_FREQ 2123 depends on ARCH_MXC && CPU_FREQ
2124 select CPU_FREQ_TABLE
2124 help 2125 help
2125 This enables the CPUfreq driver for i.MX CPUs. 2126 This enables the CPUfreq driver for i.MX CPUs.
2126 2127
diff --git a/arch/arm/boot/dts/am33xx.dtsi b/arch/arm/boot/dts/am33xx.dtsi
index 59509c48d7e5..bd0cff3f808c 100644
--- a/arch/arm/boot/dts/am33xx.dtsi
+++ b/arch/arm/boot/dts/am33xx.dtsi
@@ -154,5 +154,10 @@
154 #size-cells = <0>; 154 #size-cells = <0>;
155 ti,hwmods = "i2c3"; 155 ti,hwmods = "i2c3";
156 }; 156 };
157
158 wdt2: wdt@44e35000 {
159 compatible = "ti,omap3-wdt";
160 ti,hwmods = "wd_timer2";
161 };
157 }; 162 };
158}; 163};
diff --git a/arch/arm/boot/dts/imx51-babbage.dts b/arch/arm/boot/dts/imx51-babbage.dts
index cd86177a3ea2..59d9789e5508 100644
--- a/arch/arm/boot/dts/imx51-babbage.dts
+++ b/arch/arm/boot/dts/imx51-babbage.dts
@@ -25,8 +25,8 @@
25 aips@70000000 { /* aips-1 */ 25 aips@70000000 { /* aips-1 */
26 spba@70000000 { 26 spba@70000000 {
27 esdhc@70004000 { /* ESDHC1 */ 27 esdhc@70004000 { /* ESDHC1 */
28 fsl,cd-internal; 28 fsl,cd-controller;
29 fsl,wp-internal; 29 fsl,wp-controller;
30 status = "okay"; 30 status = "okay";
31 }; 31 };
32 32
diff --git a/arch/arm/boot/dts/kirkwood-iconnect.dts b/arch/arm/boot/dts/kirkwood-iconnect.dts
index 52d947045106..f8ca6fa88192 100644
--- a/arch/arm/boot/dts/kirkwood-iconnect.dts
+++ b/arch/arm/boot/dts/kirkwood-iconnect.dts
@@ -41,9 +41,13 @@
41 }; 41 };
42 power-blue { 42 power-blue {
43 label = "power:blue"; 43 label = "power:blue";
44 gpios = <&gpio1 11 0>; 44 gpios = <&gpio1 10 0>;
45 linux,default-trigger = "timer"; 45 linux,default-trigger = "timer";
46 }; 46 };
47 power-red {
48 label = "power:red";
49 gpios = <&gpio1 11 0>;
50 };
47 usb1 { 51 usb1 {
48 label = "usb1:blue"; 52 label = "usb1:blue";
49 gpios = <&gpio1 12 0>; 53 gpios = <&gpio1 12 0>;
diff --git a/arch/arm/boot/dts/twl6030.dtsi b/arch/arm/boot/dts/twl6030.dtsi
index 3b2f3510d7eb..d351b27d7213 100644
--- a/arch/arm/boot/dts/twl6030.dtsi
+++ b/arch/arm/boot/dts/twl6030.dtsi
@@ -66,6 +66,7 @@
66 66
67 vcxio: regulator@8 { 67 vcxio: regulator@8 {
68 compatible = "ti,twl6030-vcxio"; 68 compatible = "ti,twl6030-vcxio";
69 regulator-always-on;
69 }; 70 };
70 71
71 vusb: regulator@9 { 72 vusb: regulator@9 {
@@ -74,10 +75,12 @@
74 75
75 v1v8: regulator@10 { 76 v1v8: regulator@10 {
76 compatible = "ti,twl6030-v1v8"; 77 compatible = "ti,twl6030-v1v8";
78 regulator-always-on;
77 }; 79 };
78 80
79 v2v1: regulator@11 { 81 v2v1: regulator@11 {
80 compatible = "ti,twl6030-v2v1"; 82 compatible = "ti,twl6030-v2v1";
83 regulator-always-on;
81 }; 84 };
82 85
83 clk32kg: regulator@12 { 86 clk32kg: regulator@12 {
diff --git a/arch/arm/configs/u8500_defconfig b/arch/arm/configs/u8500_defconfig
index 2d4f661d1cf6..da6845493caa 100644
--- a/arch/arm/configs/u8500_defconfig
+++ b/arch/arm/configs/u8500_defconfig
@@ -86,6 +86,7 @@ CONFIG_NEW_LEDS=y
86CONFIG_LEDS_CLASS=y 86CONFIG_LEDS_CLASS=y
87CONFIG_LEDS_LM3530=y 87CONFIG_LEDS_LM3530=y
88CONFIG_LEDS_LP5521=y 88CONFIG_LEDS_LP5521=y
89CONFIG_LEDS_GPIO=y
89CONFIG_RTC_CLASS=y 90CONFIG_RTC_CLASS=y
90CONFIG_RTC_DRV_AB8500=y 91CONFIG_RTC_DRV_AB8500=y
91CONFIG_RTC_DRV_PL031=y 92CONFIG_RTC_DRV_PL031=y
diff --git a/arch/arm/mach-dove/common.c b/arch/arm/mach-dove/common.c
index ed4fa5f316ea..cc4c6a5a357c 100644
--- a/arch/arm/mach-dove/common.c
+++ b/arch/arm/mach-dove/common.c
@@ -92,7 +92,8 @@ void __init dove_ehci1_init(void)
92void __init dove_ge00_init(struct mv643xx_eth_platform_data *eth_data) 92void __init dove_ge00_init(struct mv643xx_eth_platform_data *eth_data)
93{ 93{
94 orion_ge00_init(eth_data, DOVE_GE00_PHYS_BASE, 94 orion_ge00_init(eth_data, DOVE_GE00_PHYS_BASE,
95 IRQ_DOVE_GE00_SUM, IRQ_DOVE_GE00_ERR); 95 IRQ_DOVE_GE00_SUM, IRQ_DOVE_GE00_ERR,
96 1600);
96} 97}
97 98
98/***************************************************************************** 99/*****************************************************************************
diff --git a/arch/arm/mach-exynos/mach-origen.c b/arch/arm/mach-exynos/mach-origen.c
index 5ca80307d6d7..4e574c24581c 100644
--- a/arch/arm/mach-exynos/mach-origen.c
+++ b/arch/arm/mach-exynos/mach-origen.c
@@ -42,6 +42,7 @@
42#include <plat/backlight.h> 42#include <plat/backlight.h>
43#include <plat/fb.h> 43#include <plat/fb.h>
44#include <plat/mfc.h> 44#include <plat/mfc.h>
45#include <plat/hdmi.h>
45 46
46#include <mach/ohci.h> 47#include <mach/ohci.h>
47#include <mach/map.h> 48#include <mach/map.h>
@@ -734,6 +735,11 @@ static void __init origen_bt_setup(void)
734 s3c_gpio_setpull(EXYNOS4_GPX2(2), S3C_GPIO_PULL_NONE); 735 s3c_gpio_setpull(EXYNOS4_GPX2(2), S3C_GPIO_PULL_NONE);
735} 736}
736 737
738/* I2C module and id for HDMIPHY */
739static struct i2c_board_info hdmiphy_info = {
740 I2C_BOARD_INFO("hdmiphy-exynos4210", 0x38),
741};
742
737static void s5p_tv_setup(void) 743static void s5p_tv_setup(void)
738{ 744{
739 /* Direct HPD to HDMI chip */ 745 /* Direct HPD to HDMI chip */
@@ -781,6 +787,7 @@ static void __init origen_machine_init(void)
781 787
782 s5p_tv_setup(); 788 s5p_tv_setup();
783 s5p_i2c_hdmiphy_set_platdata(NULL); 789 s5p_i2c_hdmiphy_set_platdata(NULL);
790 s5p_hdmi_set_platdata(&hdmiphy_info, NULL, 0);
784 791
785#ifdef CONFIG_DRM_EXYNOS 792#ifdef CONFIG_DRM_EXYNOS
786 s5p_device_fimd0.dev.platform_data = &drm_fimd_pdata; 793 s5p_device_fimd0.dev.platform_data = &drm_fimd_pdata;
diff --git a/arch/arm/mach-exynos/mach-smdkv310.c b/arch/arm/mach-exynos/mach-smdkv310.c
index 3cfa688d274a..73f2bce097e1 100644
--- a/arch/arm/mach-exynos/mach-smdkv310.c
+++ b/arch/arm/mach-exynos/mach-smdkv310.c
@@ -40,6 +40,7 @@
40#include <plat/mfc.h> 40#include <plat/mfc.h>
41#include <plat/ehci.h> 41#include <plat/ehci.h>
42#include <plat/clock.h> 42#include <plat/clock.h>
43#include <plat/hdmi.h>
43 44
44#include <mach/map.h> 45#include <mach/map.h>
45#include <mach/ohci.h> 46#include <mach/ohci.h>
@@ -354,6 +355,11 @@ static struct platform_pwm_backlight_data smdkv310_bl_data = {
354 .pwm_period_ns = 1000, 355 .pwm_period_ns = 1000,
355}; 356};
356 357
358/* I2C module and id for HDMIPHY */
359static struct i2c_board_info hdmiphy_info = {
360 I2C_BOARD_INFO("hdmiphy-exynos4210", 0x38),
361};
362
357static void s5p_tv_setup(void) 363static void s5p_tv_setup(void)
358{ 364{
359 /* direct HPD to HDMI chip */ 365 /* direct HPD to HDMI chip */
@@ -388,6 +394,7 @@ static void __init smdkv310_machine_init(void)
388 394
389 s5p_tv_setup(); 395 s5p_tv_setup();
390 s5p_i2c_hdmiphy_set_platdata(NULL); 396 s5p_i2c_hdmiphy_set_platdata(NULL);
397 s5p_hdmi_set_platdata(&hdmiphy_info, NULL, 0);
391 398
392 samsung_keypad_set_platdata(&smdkv310_keypad_data); 399 samsung_keypad_set_platdata(&smdkv310_keypad_data);
393 400
diff --git a/arch/arm/mach-imx/Makefile b/arch/arm/mach-imx/Makefile
index 07f7c226e4cf..d004d37ad9d8 100644
--- a/arch/arm/mach-imx/Makefile
+++ b/arch/arm/mach-imx/Makefile
@@ -9,7 +9,8 @@ obj-$(CONFIG_SOC_IMX27) += clk-imx27.o mm-imx27.o ehci-imx27.o
9obj-$(CONFIG_SOC_IMX31) += mm-imx3.o cpu-imx31.o clk-imx31.o iomux-imx31.o ehci-imx31.o pm-imx3.o 9obj-$(CONFIG_SOC_IMX31) += mm-imx3.o cpu-imx31.o clk-imx31.o iomux-imx31.o ehci-imx31.o pm-imx3.o
10obj-$(CONFIG_SOC_IMX35) += mm-imx3.o cpu-imx35.o clk-imx35.o ehci-imx35.o pm-imx3.o 10obj-$(CONFIG_SOC_IMX35) += mm-imx3.o cpu-imx35.o clk-imx35.o ehci-imx35.o pm-imx3.o
11 11
12obj-$(CONFIG_SOC_IMX5) += cpu-imx5.o mm-imx5.o clk-imx51-imx53.o ehci-imx5.o pm-imx5.o cpu_op-mx51.o 12imx5-pm-$(CONFIG_PM) += pm-imx5.o
13obj-$(CONFIG_SOC_IMX5) += cpu-imx5.o mm-imx5.o clk-imx51-imx53.o ehci-imx5.o $(imx5-pm-y) cpu_op-mx51.o
13 14
14obj-$(CONFIG_COMMON_CLK) += clk-pllv1.o clk-pllv2.o clk-pllv3.o clk-gate2.o \ 15obj-$(CONFIG_COMMON_CLK) += clk-pllv1.o clk-pllv2.o clk-pllv3.o clk-gate2.o \
15 clk-pfd.o clk-busy.o 16 clk-pfd.o clk-busy.o
@@ -70,14 +71,13 @@ obj-$(CONFIG_DEBUG_LL) += lluart.o
70obj-$(CONFIG_HAVE_IMX_GPC) += gpc.o 71obj-$(CONFIG_HAVE_IMX_GPC) += gpc.o
71obj-$(CONFIG_HAVE_IMX_MMDC) += mmdc.o 72obj-$(CONFIG_HAVE_IMX_MMDC) += mmdc.o
72obj-$(CONFIG_HAVE_IMX_SRC) += src.o 73obj-$(CONFIG_HAVE_IMX_SRC) += src.o
73obj-$(CONFIG_CPU_V7) += head-v7.o 74AFLAGS_headsmp.o :=-Wa,-march=armv7-a
74AFLAGS_head-v7.o :=-Wa,-march=armv7-a 75obj-$(CONFIG_SMP) += headsmp.o platsmp.o
75obj-$(CONFIG_SMP) += platsmp.o
76obj-$(CONFIG_HOTPLUG_CPU) += hotplug.o 76obj-$(CONFIG_HOTPLUG_CPU) += hotplug.o
77obj-$(CONFIG_SOC_IMX6Q) += clk-imx6q.o mach-imx6q.o 77obj-$(CONFIG_SOC_IMX6Q) += clk-imx6q.o mach-imx6q.o
78 78
79ifeq ($(CONFIG_PM),y) 79ifeq ($(CONFIG_PM),y)
80obj-$(CONFIG_SOC_IMX6Q) += pm-imx6q.o 80obj-$(CONFIG_SOC_IMX6Q) += pm-imx6q.o headsmp.o
81endif 81endif
82 82
83# i.MX5 based machines 83# i.MX5 based machines
diff --git a/arch/arm/mach-imx/clk-imx6q.c b/arch/arm/mach-imx/clk-imx6q.c
index ea89520b6e22..4233d9e3531d 100644
--- a/arch/arm/mach-imx/clk-imx6q.c
+++ b/arch/arm/mach-imx/clk-imx6q.c
@@ -152,7 +152,7 @@ enum mx6q_clks {
152 ssi2, ssi3, uart_ipg, uart_serial, usboh3, usdhc1, usdhc2, usdhc3, 152 ssi2, ssi3, uart_ipg, uart_serial, usboh3, usdhc1, usdhc2, usdhc3,
153 usdhc4, vdo_axi, vpu_axi, cko1, pll1_sys, pll2_bus, pll3_usb_otg, 153 usdhc4, vdo_axi, vpu_axi, cko1, pll1_sys, pll2_bus, pll3_usb_otg,
154 pll4_audio, pll5_video, pll6_mlb, pll7_usb_host, pll8_enet, ssi1_ipg, 154 pll4_audio, pll5_video, pll6_mlb, pll7_usb_host, pll8_enet, ssi1_ipg,
155 ssi2_ipg, ssi3_ipg, rom, usbphy1, usbphy2, 155 ssi2_ipg, ssi3_ipg, rom, usbphy1, usbphy2, ldb_di0_div_3_5, ldb_di1_div_3_5,
156 clk_max 156 clk_max
157}; 157};
158 158
@@ -288,8 +288,10 @@ int __init mx6q_clocks_init(void)
288 clk[gpu3d_shader] = imx_clk_divider("gpu3d_shader", "gpu3d_shader_sel", base + 0x18, 29, 3); 288 clk[gpu3d_shader] = imx_clk_divider("gpu3d_shader", "gpu3d_shader_sel", base + 0x18, 29, 3);
289 clk[ipu1_podf] = imx_clk_divider("ipu1_podf", "ipu1_sel", base + 0x3c, 11, 3); 289 clk[ipu1_podf] = imx_clk_divider("ipu1_podf", "ipu1_sel", base + 0x3c, 11, 3);
290 clk[ipu2_podf] = imx_clk_divider("ipu2_podf", "ipu2_sel", base + 0x3c, 16, 3); 290 clk[ipu2_podf] = imx_clk_divider("ipu2_podf", "ipu2_sel", base + 0x3c, 16, 3);
291 clk[ldb_di0_podf] = imx_clk_divider("ldb_di0_podf", "ldb_di0_sel", base + 0x20, 10, 1); 291 clk[ldb_di0_div_3_5] = imx_clk_fixed_factor("ldb_di0_div_3_5", "ldb_di0_sel", 2, 7);
292 clk[ldb_di1_podf] = imx_clk_divider("ldb_di1_podf", "ldb_di1_sel", base + 0x20, 11, 1); 292 clk[ldb_di0_podf] = imx_clk_divider("ldb_di0_podf", "ldb_di0_div_3_5", base + 0x20, 10, 1);
293 clk[ldb_di1_div_3_5] = imx_clk_fixed_factor("ldb_di1_div_3_5", "ldb_di1_sel", 2, 7);
294 clk[ldb_di1_podf] = imx_clk_divider("ldb_di1_podf", "ldb_di1_div_3_5", base + 0x20, 11, 1);
293 clk[ipu1_di0_pre] = imx_clk_divider("ipu1_di0_pre", "ipu1_di0_pre_sel", base + 0x34, 3, 3); 295 clk[ipu1_di0_pre] = imx_clk_divider("ipu1_di0_pre", "ipu1_di0_pre_sel", base + 0x34, 3, 3);
294 clk[ipu1_di1_pre] = imx_clk_divider("ipu1_di1_pre", "ipu1_di1_pre_sel", base + 0x34, 12, 3); 296 clk[ipu1_di1_pre] = imx_clk_divider("ipu1_di1_pre", "ipu1_di1_pre_sel", base + 0x34, 12, 3);
295 clk[ipu2_di0_pre] = imx_clk_divider("ipu2_di0_pre", "ipu2_di0_pre_sel", base + 0x38, 3, 3); 297 clk[ipu2_di0_pre] = imx_clk_divider("ipu2_di0_pre", "ipu2_di0_pre_sel", base + 0x38, 3, 3);
diff --git a/arch/arm/mach-imx/head-v7.S b/arch/arm/mach-imx/headsmp.S
index 7e49deb128a4..7e49deb128a4 100644
--- a/arch/arm/mach-imx/head-v7.S
+++ b/arch/arm/mach-imx/headsmp.S
diff --git a/arch/arm/mach-imx/hotplug.c b/arch/arm/mach-imx/hotplug.c
index 20ed2d56c1af..f8f7437c83b8 100644
--- a/arch/arm/mach-imx/hotplug.c
+++ b/arch/arm/mach-imx/hotplug.c
@@ -42,22 +42,6 @@ static inline void cpu_enter_lowpower(void)
42 : "cc"); 42 : "cc");
43} 43}
44 44
45static inline void cpu_leave_lowpower(void)
46{
47 unsigned int v;
48
49 asm volatile(
50 "mrc p15, 0, %0, c1, c0, 0\n"
51 " orr %0, %0, %1\n"
52 " mcr p15, 0, %0, c1, c0, 0\n"
53 " mrc p15, 0, %0, c1, c0, 1\n"
54 " orr %0, %0, %2\n"
55 " mcr p15, 0, %0, c1, c0, 1\n"
56 : "=&r" (v)
57 : "Ir" (CR_C), "Ir" (0x40)
58 : "cc");
59}
60
61/* 45/*
62 * platform-specific code to shutdown a CPU 46 * platform-specific code to shutdown a CPU
63 * 47 *
@@ -67,11 +51,10 @@ void platform_cpu_die(unsigned int cpu)
67{ 51{
68 cpu_enter_lowpower(); 52 cpu_enter_lowpower();
69 imx_enable_cpu(cpu, false); 53 imx_enable_cpu(cpu, false);
70 cpu_do_idle();
71 cpu_leave_lowpower();
72 54
73 /* We should never return from idle */ 55 /* spin here until hardware takes it down */
74 panic("cpu %d unexpectedly exit from shutdown\n", cpu); 56 while (1)
57 ;
75} 58}
76 59
77int platform_cpu_disable(unsigned int cpu) 60int platform_cpu_disable(unsigned int cpu)
diff --git a/arch/arm/mach-imx/mach-imx6q.c b/arch/arm/mach-imx/mach-imx6q.c
index 5ec0608f2a76..045b3f6a387d 100644
--- a/arch/arm/mach-imx/mach-imx6q.c
+++ b/arch/arm/mach-imx/mach-imx6q.c
@@ -71,7 +71,7 @@ soft:
71/* For imx6q sabrelite board: set KSZ9021RN RGMII pad skew */ 71/* For imx6q sabrelite board: set KSZ9021RN RGMII pad skew */
72static int ksz9021rn_phy_fixup(struct phy_device *phydev) 72static int ksz9021rn_phy_fixup(struct phy_device *phydev)
73{ 73{
74 if (IS_ENABLED(CONFIG_PHYLIB)) { 74 if (IS_BUILTIN(CONFIG_PHYLIB)) {
75 /* min rx data delay */ 75 /* min rx data delay */
76 phy_write(phydev, 0x0b, 0x8105); 76 phy_write(phydev, 0x0b, 0x8105);
77 phy_write(phydev, 0x0c, 0x0000); 77 phy_write(phydev, 0x0c, 0x0000);
@@ -112,7 +112,7 @@ put_clk:
112 112
113static void __init imx6q_sabrelite_init(void) 113static void __init imx6q_sabrelite_init(void)
114{ 114{
115 if (IS_ENABLED(CONFIG_PHYLIB)) 115 if (IS_BUILTIN(CONFIG_PHYLIB))
116 phy_register_fixup_for_uid(PHY_ID_KSZ9021, MICREL_PHY_ID_MASK, 116 phy_register_fixup_for_uid(PHY_ID_KSZ9021, MICREL_PHY_ID_MASK,
117 ksz9021rn_phy_fixup); 117 ksz9021rn_phy_fixup);
118 imx6q_sabrelite_cko1_setup(); 118 imx6q_sabrelite_cko1_setup();
diff --git a/arch/arm/mach-kirkwood/Makefile.boot b/arch/arm/mach-kirkwood/Makefile.boot
index a5717558ee89..a13299d758e1 100644
--- a/arch/arm/mach-kirkwood/Makefile.boot
+++ b/arch/arm/mach-kirkwood/Makefile.boot
@@ -7,7 +7,8 @@ dtb-$(CONFIG_MACH_DLINK_KIRKWOOD_DT) += kirkwood-dns320.dtb
7dtb-$(CONFIG_MACH_DLINK_KIRKWOOD_DT) += kirkwood-dns325.dtb 7dtb-$(CONFIG_MACH_DLINK_KIRKWOOD_DT) += kirkwood-dns325.dtb
8dtb-$(CONFIG_MACH_ICONNECT_DT) += kirkwood-iconnect.dtb 8dtb-$(CONFIG_MACH_ICONNECT_DT) += kirkwood-iconnect.dtb
9dtb-$(CONFIG_MACH_IB62X0_DT) += kirkwood-ib62x0.dtb 9dtb-$(CONFIG_MACH_IB62X0_DT) += kirkwood-ib62x0.dtb
10dtb-$(CONFIG_MACH_TS219_DT) += kirkwood-qnap-ts219.dtb 10dtb-$(CONFIG_MACH_TS219_DT) += kirkwood-ts219-6281.dtb
11dtb-$(CONFIG_MACH_TS219_DT) += kirkwood-ts219-6282.dtb
11dtb-$(CONFIG_MACH_GOFLEXNET_DT) += kirkwood-goflexnet.dtb 12dtb-$(CONFIG_MACH_GOFLEXNET_DT) += kirkwood-goflexnet.dtb
12dtb-$(CONFIG_MACH_LSXL_DT) += kirkwood-lschlv2.dtb 13dtb-$(CONFIG_MACH_LSXL_DT) += kirkwood-lschlv2.dtb
13dtb-$(CONFIG_MACH_LSXL_DT) += kirkwood-lsxhl.dtb 14dtb-$(CONFIG_MACH_LSXL_DT) += kirkwood-lsxhl.dtb
diff --git a/arch/arm/mach-kirkwood/common.c b/arch/arm/mach-kirkwood/common.c
index 31d9f400ed82..936b31df644c 100644
--- a/arch/arm/mach-kirkwood/common.c
+++ b/arch/arm/mach-kirkwood/common.c
@@ -291,7 +291,7 @@ void __init kirkwood_ge00_init(struct mv643xx_eth_platform_data *eth_data)
291{ 291{
292 orion_ge00_init(eth_data, 292 orion_ge00_init(eth_data,
293 GE00_PHYS_BASE, IRQ_KIRKWOOD_GE00_SUM, 293 GE00_PHYS_BASE, IRQ_KIRKWOOD_GE00_SUM,
294 IRQ_KIRKWOOD_GE00_ERR); 294 IRQ_KIRKWOOD_GE00_ERR, 1600);
295 /* The interface forgets the MAC address assigned by u-boot if 295 /* The interface forgets the MAC address assigned by u-boot if
296 the clock is turned off, so claim the clk now. */ 296 the clock is turned off, so claim the clk now. */
297 clk_prepare_enable(ge0); 297 clk_prepare_enable(ge0);
@@ -305,7 +305,7 @@ void __init kirkwood_ge01_init(struct mv643xx_eth_platform_data *eth_data)
305{ 305{
306 orion_ge01_init(eth_data, 306 orion_ge01_init(eth_data,
307 GE01_PHYS_BASE, IRQ_KIRKWOOD_GE01_SUM, 307 GE01_PHYS_BASE, IRQ_KIRKWOOD_GE01_SUM,
308 IRQ_KIRKWOOD_GE01_ERR); 308 IRQ_KIRKWOOD_GE01_ERR, 1600);
309 clk_prepare_enable(ge1); 309 clk_prepare_enable(ge1);
310} 310}
311 311
diff --git a/arch/arm/mach-mmp/Makefile b/arch/arm/mach-mmp/Makefile
index b786f7e6cd1f..095c155d6fb8 100644
--- a/arch/arm/mach-mmp/Makefile
+++ b/arch/arm/mach-mmp/Makefile
@@ -2,13 +2,19 @@
2# Makefile for Marvell's PXA168 processors line 2# Makefile for Marvell's PXA168 processors line
3# 3#
4 4
5obj-y += common.o clock.o devices.o time.o irq.o 5obj-y += common.o devices.o time.o irq.o
6 6
7# SoC support 7# SoC support
8obj-$(CONFIG_CPU_PXA168) += pxa168.o 8obj-$(CONFIG_CPU_PXA168) += pxa168.o
9obj-$(CONFIG_CPU_PXA910) += pxa910.o 9obj-$(CONFIG_CPU_PXA910) += pxa910.o
10obj-$(CONFIG_CPU_MMP2) += mmp2.o sram.o 10obj-$(CONFIG_CPU_MMP2) += mmp2.o sram.o
11 11
12ifeq ($(CONFIG_COMMON_CLK), )
13obj-y += clock.o
14obj-$(CONFIG_CPU_PXA168) += clock-pxa168.o
15obj-$(CONFIG_CPU_PXA910) += clock-pxa910.o
16obj-$(CONFIG_CPU_MMP2) += clock-mmp2.o
17endif
12ifeq ($(CONFIG_PM),y) 18ifeq ($(CONFIG_PM),y)
13obj-$(CONFIG_CPU_PXA910) += pm-pxa910.o 19obj-$(CONFIG_CPU_PXA910) += pm-pxa910.o
14obj-$(CONFIG_CPU_MMP2) += pm-mmp2.o 20obj-$(CONFIG_CPU_MMP2) += pm-mmp2.o
diff --git a/arch/arm/mach-mmp/clock-mmp2.c b/arch/arm/mach-mmp/clock-mmp2.c
new file mode 100644
index 000000000000..21d22002cd19
--- /dev/null
+++ b/arch/arm/mach-mmp/clock-mmp2.c
@@ -0,0 +1,111 @@
1#include <linux/module.h>
2#include <linux/kernel.h>
3#include <linux/init.h>
4#include <linux/list.h>
5#include <linux/io.h>
6#include <linux/clk.h>
7
8#include <mach/addr-map.h>
9
10#include "common.h"
11#include "clock.h"
12
13/*
14 * APB Clock register offsets for MMP2
15 */
16#define APBC_RTC APBC_REG(0x000)
17#define APBC_TWSI1 APBC_REG(0x004)
18#define APBC_TWSI2 APBC_REG(0x008)
19#define APBC_TWSI3 APBC_REG(0x00c)
20#define APBC_TWSI4 APBC_REG(0x010)
21#define APBC_KPC APBC_REG(0x018)
22#define APBC_UART1 APBC_REG(0x02c)
23#define APBC_UART2 APBC_REG(0x030)
24#define APBC_UART3 APBC_REG(0x034)
25#define APBC_GPIO APBC_REG(0x038)
26#define APBC_PWM0 APBC_REG(0x03c)
27#define APBC_PWM1 APBC_REG(0x040)
28#define APBC_PWM2 APBC_REG(0x044)
29#define APBC_PWM3 APBC_REG(0x048)
30#define APBC_SSP0 APBC_REG(0x04c)
31#define APBC_SSP1 APBC_REG(0x050)
32#define APBC_SSP2 APBC_REG(0x054)
33#define APBC_SSP3 APBC_REG(0x058)
34#define APBC_SSP4 APBC_REG(0x05c)
35#define APBC_SSP5 APBC_REG(0x060)
36#define APBC_TWSI5 APBC_REG(0x07c)
37#define APBC_TWSI6 APBC_REG(0x080)
38#define APBC_UART4 APBC_REG(0x088)
39
40#define APMU_USB APMU_REG(0x05c)
41#define APMU_NAND APMU_REG(0x060)
42#define APMU_SDH0 APMU_REG(0x054)
43#define APMU_SDH1 APMU_REG(0x058)
44#define APMU_SDH2 APMU_REG(0x0e8)
45#define APMU_SDH3 APMU_REG(0x0ec)
46
47static void sdhc_clk_enable(struct clk *clk)
48{
49 uint32_t clk_rst;
50
51 clk_rst = __raw_readl(clk->clk_rst);
52 clk_rst |= clk->enable_val;
53 __raw_writel(clk_rst, clk->clk_rst);
54}
55
56static void sdhc_clk_disable(struct clk *clk)
57{
58 uint32_t clk_rst;
59
60 clk_rst = __raw_readl(clk->clk_rst);
61 clk_rst &= ~clk->enable_val;
62 __raw_writel(clk_rst, clk->clk_rst);
63}
64
65struct clkops sdhc_clk_ops = {
66 .enable = sdhc_clk_enable,
67 .disable = sdhc_clk_disable,
68};
69
70/* APB peripheral clocks */
71static APBC_CLK(uart1, UART1, 1, 26000000);
72static APBC_CLK(uart2, UART2, 1, 26000000);
73static APBC_CLK(uart3, UART3, 1, 26000000);
74static APBC_CLK(uart4, UART4, 1, 26000000);
75static APBC_CLK(twsi1, TWSI1, 0, 26000000);
76static APBC_CLK(twsi2, TWSI2, 0, 26000000);
77static APBC_CLK(twsi3, TWSI3, 0, 26000000);
78static APBC_CLK(twsi4, TWSI4, 0, 26000000);
79static APBC_CLK(twsi5, TWSI5, 0, 26000000);
80static APBC_CLK(twsi6, TWSI6, 0, 26000000);
81static APBC_CLK(gpio, GPIO, 0, 26000000);
82
83static APMU_CLK(nand, NAND, 0xbf, 100000000);
84static APMU_CLK_OPS(sdh0, SDH0, 0x1b, 200000000, &sdhc_clk_ops);
85static APMU_CLK_OPS(sdh1, SDH1, 0x1b, 200000000, &sdhc_clk_ops);
86static APMU_CLK_OPS(sdh2, SDH2, 0x1b, 200000000, &sdhc_clk_ops);
87static APMU_CLK_OPS(sdh3, SDH3, 0x1b, 200000000, &sdhc_clk_ops);
88
89static struct clk_lookup mmp2_clkregs[] = {
90 INIT_CLKREG(&clk_uart1, "pxa2xx-uart.0", NULL),
91 INIT_CLKREG(&clk_uart2, "pxa2xx-uart.1", NULL),
92 INIT_CLKREG(&clk_uart3, "pxa2xx-uart.2", NULL),
93 INIT_CLKREG(&clk_uart4, "pxa2xx-uart.3", NULL),
94 INIT_CLKREG(&clk_twsi1, "pxa2xx-i2c.0", NULL),
95 INIT_CLKREG(&clk_twsi2, "pxa2xx-i2c.1", NULL),
96 INIT_CLKREG(&clk_twsi3, "pxa2xx-i2c.2", NULL),
97 INIT_CLKREG(&clk_twsi4, "pxa2xx-i2c.3", NULL),
98 INIT_CLKREG(&clk_twsi5, "pxa2xx-i2c.4", NULL),
99 INIT_CLKREG(&clk_twsi6, "pxa2xx-i2c.5", NULL),
100 INIT_CLKREG(&clk_nand, "pxa3xx-nand", NULL),
101 INIT_CLKREG(&clk_gpio, "pxa-gpio", NULL),
102 INIT_CLKREG(&clk_sdh0, "sdhci-pxav3.0", "PXA-SDHCLK"),
103 INIT_CLKREG(&clk_sdh1, "sdhci-pxav3.1", "PXA-SDHCLK"),
104 INIT_CLKREG(&clk_sdh2, "sdhci-pxav3.2", "PXA-SDHCLK"),
105 INIT_CLKREG(&clk_sdh3, "sdhci-pxav3.3", "PXA-SDHCLK"),
106};
107
108void __init mmp2_clk_init(void)
109{
110 clkdev_add_table(ARRAY_AND_SIZE(mmp2_clkregs));
111}
diff --git a/arch/arm/mach-mmp/clock-pxa168.c b/arch/arm/mach-mmp/clock-pxa168.c
new file mode 100644
index 000000000000..5e6c18ccebd4
--- /dev/null
+++ b/arch/arm/mach-mmp/clock-pxa168.c
@@ -0,0 +1,91 @@
1#include <linux/module.h>
2#include <linux/kernel.h>
3#include <linux/init.h>
4#include <linux/list.h>
5#include <linux/io.h>
6#include <linux/clk.h>
7
8#include <mach/addr-map.h>
9
10#include "common.h"
11#include "clock.h"
12
13/*
14 * APB clock register offsets for PXA168
15 */
16#define APBC_UART1 APBC_REG(0x000)
17#define APBC_UART2 APBC_REG(0x004)
18#define APBC_GPIO APBC_REG(0x008)
19#define APBC_PWM1 APBC_REG(0x00c)
20#define APBC_PWM2 APBC_REG(0x010)
21#define APBC_PWM3 APBC_REG(0x014)
22#define APBC_PWM4 APBC_REG(0x018)
23#define APBC_RTC APBC_REG(0x028)
24#define APBC_TWSI0 APBC_REG(0x02c)
25#define APBC_KPC APBC_REG(0x030)
26#define APBC_TWSI1 APBC_REG(0x06c)
27#define APBC_UART3 APBC_REG(0x070)
28#define APBC_SSP1 APBC_REG(0x81c)
29#define APBC_SSP2 APBC_REG(0x820)
30#define APBC_SSP3 APBC_REG(0x84c)
31#define APBC_SSP4 APBC_REG(0x858)
32#define APBC_SSP5 APBC_REG(0x85c)
33
34#define APMU_NAND APMU_REG(0x060)
35#define APMU_LCD APMU_REG(0x04c)
36#define APMU_ETH APMU_REG(0x0fc)
37#define APMU_USB APMU_REG(0x05c)
38
39/* APB peripheral clocks */
40static APBC_CLK(uart1, UART1, 1, 14745600);
41static APBC_CLK(uart2, UART2, 1, 14745600);
42static APBC_CLK(uart3, UART3, 1, 14745600);
43static APBC_CLK(twsi0, TWSI0, 1, 33000000);
44static APBC_CLK(twsi1, TWSI1, 1, 33000000);
45static APBC_CLK(pwm1, PWM1, 1, 13000000);
46static APBC_CLK(pwm2, PWM2, 1, 13000000);
47static APBC_CLK(pwm3, PWM3, 1, 13000000);
48static APBC_CLK(pwm4, PWM4, 1, 13000000);
49static APBC_CLK(ssp1, SSP1, 4, 0);
50static APBC_CLK(ssp2, SSP2, 4, 0);
51static APBC_CLK(ssp3, SSP3, 4, 0);
52static APBC_CLK(ssp4, SSP4, 4, 0);
53static APBC_CLK(ssp5, SSP5, 4, 0);
54static APBC_CLK(gpio, GPIO, 0, 13000000);
55static APBC_CLK(keypad, KPC, 0, 32000);
56static APBC_CLK(rtc, RTC, 8, 32768);
57
58static APMU_CLK(nand, NAND, 0x19b, 156000000);
59static APMU_CLK(lcd, LCD, 0x7f, 312000000);
60static APMU_CLK(eth, ETH, 0x09, 0);
61static APMU_CLK(usb, USB, 0x12, 0);
62
63/* device and clock bindings */
64static struct clk_lookup pxa168_clkregs[] = {
65 INIT_CLKREG(&clk_uart1, "pxa2xx-uart.0", NULL),
66 INIT_CLKREG(&clk_uart2, "pxa2xx-uart.1", NULL),
67 INIT_CLKREG(&clk_uart3, "pxa2xx-uart.2", NULL),
68 INIT_CLKREG(&clk_twsi0, "pxa2xx-i2c.0", NULL),
69 INIT_CLKREG(&clk_twsi1, "pxa2xx-i2c.1", NULL),
70 INIT_CLKREG(&clk_pwm1, "pxa168-pwm.0", NULL),
71 INIT_CLKREG(&clk_pwm2, "pxa168-pwm.1", NULL),
72 INIT_CLKREG(&clk_pwm3, "pxa168-pwm.2", NULL),
73 INIT_CLKREG(&clk_pwm4, "pxa168-pwm.3", NULL),
74 INIT_CLKREG(&clk_ssp1, "pxa168-ssp.0", NULL),
75 INIT_CLKREG(&clk_ssp2, "pxa168-ssp.1", NULL),
76 INIT_CLKREG(&clk_ssp3, "pxa168-ssp.2", NULL),
77 INIT_CLKREG(&clk_ssp4, "pxa168-ssp.3", NULL),
78 INIT_CLKREG(&clk_ssp5, "pxa168-ssp.4", NULL),
79 INIT_CLKREG(&clk_nand, "pxa3xx-nand", NULL),
80 INIT_CLKREG(&clk_lcd, "pxa168-fb", NULL),
81 INIT_CLKREG(&clk_gpio, "pxa-gpio", NULL),
82 INIT_CLKREG(&clk_keypad, "pxa27x-keypad", NULL),
83 INIT_CLKREG(&clk_eth, "pxa168-eth", "MFUCLK"),
84 INIT_CLKREG(&clk_usb, NULL, "PXA168-USBCLK"),
85 INIT_CLKREG(&clk_rtc, "sa1100-rtc", NULL),
86};
87
88void __init pxa168_clk_init(void)
89{
90 clkdev_add_table(ARRAY_AND_SIZE(pxa168_clkregs));
91}
diff --git a/arch/arm/mach-mmp/clock-pxa910.c b/arch/arm/mach-mmp/clock-pxa910.c
new file mode 100644
index 000000000000..933ea71d0b56
--- /dev/null
+++ b/arch/arm/mach-mmp/clock-pxa910.c
@@ -0,0 +1,67 @@
1#include <linux/module.h>
2#include <linux/kernel.h>
3#include <linux/init.h>
4#include <linux/list.h>
5#include <linux/io.h>
6#include <linux/clk.h>
7
8#include <mach/addr-map.h>
9
10#include "common.h"
11#include "clock.h"
12
13/*
14 * APB Clock register offsets for PXA910
15 */
16#define APBC_UART0 APBC_REG(0x000)
17#define APBC_UART1 APBC_REG(0x004)
18#define APBC_GPIO APBC_REG(0x008)
19#define APBC_PWM1 APBC_REG(0x00c)
20#define APBC_PWM2 APBC_REG(0x010)
21#define APBC_PWM3 APBC_REG(0x014)
22#define APBC_PWM4 APBC_REG(0x018)
23#define APBC_SSP1 APBC_REG(0x01c)
24#define APBC_SSP2 APBC_REG(0x020)
25#define APBC_RTC APBC_REG(0x028)
26#define APBC_TWSI0 APBC_REG(0x02c)
27#define APBC_KPC APBC_REG(0x030)
28#define APBC_SSP3 APBC_REG(0x04c)
29#define APBC_TWSI1 APBC_REG(0x06c)
30
31#define APMU_NAND APMU_REG(0x060)
32#define APMU_USB APMU_REG(0x05c)
33
34static APBC_CLK(uart1, UART0, 1, 14745600);
35static APBC_CLK(uart2, UART1, 1, 14745600);
36static APBC_CLK(twsi0, TWSI0, 1, 33000000);
37static APBC_CLK(twsi1, TWSI1, 1, 33000000);
38static APBC_CLK(pwm1, PWM1, 1, 13000000);
39static APBC_CLK(pwm2, PWM2, 1, 13000000);
40static APBC_CLK(pwm3, PWM3, 1, 13000000);
41static APBC_CLK(pwm4, PWM4, 1, 13000000);
42static APBC_CLK(gpio, GPIO, 0, 13000000);
43static APBC_CLK(rtc, RTC, 8, 32768);
44
45static APMU_CLK(nand, NAND, 0x19b, 156000000);
46static APMU_CLK(u2o, USB, 0x1b, 480000000);
47
48/* device and clock bindings */
49static struct clk_lookup pxa910_clkregs[] = {
50 INIT_CLKREG(&clk_uart1, "pxa2xx-uart.0", NULL),
51 INIT_CLKREG(&clk_uart2, "pxa2xx-uart.1", NULL),
52 INIT_CLKREG(&clk_twsi0, "pxa2xx-i2c.0", NULL),
53 INIT_CLKREG(&clk_twsi1, "pxa2xx-i2c.1", NULL),
54 INIT_CLKREG(&clk_pwm1, "pxa910-pwm.0", NULL),
55 INIT_CLKREG(&clk_pwm2, "pxa910-pwm.1", NULL),
56 INIT_CLKREG(&clk_pwm3, "pxa910-pwm.2", NULL),
57 INIT_CLKREG(&clk_pwm4, "pxa910-pwm.3", NULL),
58 INIT_CLKREG(&clk_nand, "pxa3xx-nand", NULL),
59 INIT_CLKREG(&clk_gpio, "pxa-gpio", NULL),
60 INIT_CLKREG(&clk_u2o, NULL, "U2OCLK"),
61 INIT_CLKREG(&clk_rtc, "sa1100-rtc", NULL),
62};
63
64void __init pxa910_clk_init(void)
65{
66 clkdev_add_table(ARRAY_AND_SIZE(pxa910_clkregs));
67}
diff --git a/arch/arm/mach-mmp/common.h b/arch/arm/mach-mmp/common.h
index 1c9d6c1ea97a..bd453274fca2 100644
--- a/arch/arm/mach-mmp/common.h
+++ b/arch/arm/mach-mmp/common.h
@@ -7,3 +7,6 @@ extern void timer_init(int irq);
7extern void __init icu_init_irq(void); 7extern void __init icu_init_irq(void);
8extern void __init mmp_map_io(void); 8extern void __init mmp_map_io(void);
9extern void mmp_restart(char, const char *); 9extern void mmp_restart(char, const char *);
10extern void __init pxa168_clk_init(void);
11extern void __init pxa910_clk_init(void);
12extern void __init mmp2_clk_init(void);
diff --git a/arch/arm/mach-mmp/include/mach/regs-apbc.h b/arch/arm/mach-mmp/include/mach/regs-apbc.h
index 68b0c93ec6a1..ddc812f40341 100644
--- a/arch/arm/mach-mmp/include/mach/regs-apbc.h
+++ b/arch/arm/mach-mmp/include/mach/regs-apbc.h
@@ -13,101 +13,6 @@
13 13
14#include <mach/addr-map.h> 14#include <mach/addr-map.h>
15 15
16/*
17 * APB clock register offsets for PXA168
18 */
19#define APBC_PXA168_UART1 APBC_REG(0x000)
20#define APBC_PXA168_UART2 APBC_REG(0x004)
21#define APBC_PXA168_GPIO APBC_REG(0x008)
22#define APBC_PXA168_PWM1 APBC_REG(0x00c)
23#define APBC_PXA168_PWM2 APBC_REG(0x010)
24#define APBC_PXA168_PWM3 APBC_REG(0x014)
25#define APBC_PXA168_PWM4 APBC_REG(0x018)
26#define APBC_PXA168_RTC APBC_REG(0x028)
27#define APBC_PXA168_TWSI0 APBC_REG(0x02c)
28#define APBC_PXA168_KPC APBC_REG(0x030)
29#define APBC_PXA168_TIMERS APBC_REG(0x034)
30#define APBC_PXA168_AIB APBC_REG(0x03c)
31#define APBC_PXA168_SW_JTAG APBC_REG(0x040)
32#define APBC_PXA168_ONEWIRE APBC_REG(0x048)
33#define APBC_PXA168_ASFAR APBC_REG(0x050)
34#define APBC_PXA168_ASSAR APBC_REG(0x054)
35#define APBC_PXA168_TWSI1 APBC_REG(0x06c)
36#define APBC_PXA168_UART3 APBC_REG(0x070)
37#define APBC_PXA168_AC97 APBC_REG(0x084)
38#define APBC_PXA168_SSP1 APBC_REG(0x81c)
39#define APBC_PXA168_SSP2 APBC_REG(0x820)
40#define APBC_PXA168_SSP3 APBC_REG(0x84c)
41#define APBC_PXA168_SSP4 APBC_REG(0x858)
42#define APBC_PXA168_SSP5 APBC_REG(0x85c)
43
44/*
45 * APB Clock register offsets for PXA910
46 */
47#define APBC_PXA910_UART0 APBC_REG(0x000)
48#define APBC_PXA910_UART1 APBC_REG(0x004)
49#define APBC_PXA910_GPIO APBC_REG(0x008)
50#define APBC_PXA910_PWM1 APBC_REG(0x00c)
51#define APBC_PXA910_PWM2 APBC_REG(0x010)
52#define APBC_PXA910_PWM3 APBC_REG(0x014)
53#define APBC_PXA910_PWM4 APBC_REG(0x018)
54#define APBC_PXA910_SSP1 APBC_REG(0x01c)
55#define APBC_PXA910_SSP2 APBC_REG(0x020)
56#define APBC_PXA910_IPC APBC_REG(0x024)
57#define APBC_PXA910_RTC APBC_REG(0x028)
58#define APBC_PXA910_TWSI0 APBC_REG(0x02c)
59#define APBC_PXA910_KPC APBC_REG(0x030)
60#define APBC_PXA910_TIMERS APBC_REG(0x034)
61#define APBC_PXA910_TBROT APBC_REG(0x038)
62#define APBC_PXA910_AIB APBC_REG(0x03c)
63#define APBC_PXA910_SW_JTAG APBC_REG(0x040)
64#define APBC_PXA910_TIMERS1 APBC_REG(0x044)
65#define APBC_PXA910_ONEWIRE APBC_REG(0x048)
66#define APBC_PXA910_SSP3 APBC_REG(0x04c)
67#define APBC_PXA910_ASFAR APBC_REG(0x050)
68#define APBC_PXA910_ASSAR APBC_REG(0x054)
69
70/*
71 * APB Clock register offsets for MMP2
72 */
73#define APBC_MMP2_RTC APBC_REG(0x000)
74#define APBC_MMP2_TWSI1 APBC_REG(0x004)
75#define APBC_MMP2_TWSI2 APBC_REG(0x008)
76#define APBC_MMP2_TWSI3 APBC_REG(0x00c)
77#define APBC_MMP2_TWSI4 APBC_REG(0x010)
78#define APBC_MMP2_ONEWIRE APBC_REG(0x014)
79#define APBC_MMP2_KPC APBC_REG(0x018)
80#define APBC_MMP2_TB_ROTARY APBC_REG(0x01c)
81#define APBC_MMP2_SW_JTAG APBC_REG(0x020)
82#define APBC_MMP2_TIMERS APBC_REG(0x024)
83#define APBC_MMP2_UART1 APBC_REG(0x02c)
84#define APBC_MMP2_UART2 APBC_REG(0x030)
85#define APBC_MMP2_UART3 APBC_REG(0x034)
86#define APBC_MMP2_GPIO APBC_REG(0x038)
87#define APBC_MMP2_PWM0 APBC_REG(0x03c)
88#define APBC_MMP2_PWM1 APBC_REG(0x040)
89#define APBC_MMP2_PWM2 APBC_REG(0x044)
90#define APBC_MMP2_PWM3 APBC_REG(0x048)
91#define APBC_MMP2_SSP0 APBC_REG(0x04c)
92#define APBC_MMP2_SSP1 APBC_REG(0x050)
93#define APBC_MMP2_SSP2 APBC_REG(0x054)
94#define APBC_MMP2_SSP3 APBC_REG(0x058)
95#define APBC_MMP2_SSP4 APBC_REG(0x05c)
96#define APBC_MMP2_SSP5 APBC_REG(0x060)
97#define APBC_MMP2_AIB APBC_REG(0x064)
98#define APBC_MMP2_ASFAR APBC_REG(0x068)
99#define APBC_MMP2_ASSAR APBC_REG(0x06c)
100#define APBC_MMP2_USIM APBC_REG(0x070)
101#define APBC_MMP2_MPMU APBC_REG(0x074)
102#define APBC_MMP2_IPC APBC_REG(0x078)
103#define APBC_MMP2_TWSI5 APBC_REG(0x07c)
104#define APBC_MMP2_TWSI6 APBC_REG(0x080)
105#define APBC_MMP2_TWSI_INTSTS APBC_REG(0x084)
106#define APBC_MMP2_UART4 APBC_REG(0x088)
107#define APBC_MMP2_RIPC APBC_REG(0x08c)
108#define APBC_MMP2_THSENS1 APBC_REG(0x090) /* Thermal Sensor */
109#define APBC_MMP2_THSENS_INTSTS APBC_REG(0x0a4)
110
111/* Common APB clock register bit definitions */ 16/* Common APB clock register bit definitions */
112#define APBC_APBCLK (1 << 0) /* APB Bus Clock Enable */ 17#define APBC_APBCLK (1 << 0) /* APB Bus Clock Enable */
113#define APBC_FNCLK (1 << 1) /* Functional Clock Enable */ 18#define APBC_FNCLK (1 << 1) /* Functional Clock Enable */
diff --git a/arch/arm/mach-mmp/include/mach/regs-apmu.h b/arch/arm/mach-mmp/include/mach/regs-apmu.h
index 7af8deb63e83..93c8d0e29bb9 100644
--- a/arch/arm/mach-mmp/include/mach/regs-apmu.h
+++ b/arch/arm/mach-mmp/include/mach/regs-apmu.h
@@ -13,21 +13,6 @@
13 13
14#include <mach/addr-map.h> 14#include <mach/addr-map.h>
15 15
16/* Clock Reset Control */
17#define APMU_IRE APMU_REG(0x048)
18#define APMU_LCD APMU_REG(0x04c)
19#define APMU_CCIC APMU_REG(0x050)
20#define APMU_SDH0 APMU_REG(0x054)
21#define APMU_SDH1 APMU_REG(0x058)
22#define APMU_USB APMU_REG(0x05c)
23#define APMU_NAND APMU_REG(0x060)
24#define APMU_DMA APMU_REG(0x064)
25#define APMU_GEU APMU_REG(0x068)
26#define APMU_BUS APMU_REG(0x06c)
27#define APMU_SDH2 APMU_REG(0x0e8)
28#define APMU_SDH3 APMU_REG(0x0ec)
29#define APMU_ETH APMU_REG(0x0fc)
30
31#define APMU_FNCLK_EN (1 << 4) 16#define APMU_FNCLK_EN (1 << 4)
32#define APMU_AXICLK_EN (1 << 3) 17#define APMU_AXICLK_EN (1 << 3)
33#define APMU_FNRST_DIS (1 << 1) 18#define APMU_FNRST_DIS (1 << 1)
diff --git a/arch/arm/mach-mmp/mmp2.c b/arch/arm/mach-mmp/mmp2.c
index c709a24a9d25..c2ce3d05b044 100644
--- a/arch/arm/mach-mmp/mmp2.c
+++ b/arch/arm/mach-mmp/mmp2.c
@@ -20,7 +20,6 @@
20#include <asm/mach/time.h> 20#include <asm/mach/time.h>
21#include <mach/addr-map.h> 21#include <mach/addr-map.h>
22#include <mach/regs-apbc.h> 22#include <mach/regs-apbc.h>
23#include <mach/regs-apmu.h>
24#include <mach/cputype.h> 23#include <mach/cputype.h>
25#include <mach/irqs.h> 24#include <mach/irqs.h>
26#include <mach/dma.h> 25#include <mach/dma.h>
@@ -29,7 +28,6 @@
29#include <mach/mmp2.h> 28#include <mach/mmp2.h>
30 29
31#include "common.h" 30#include "common.h"
32#include "clock.h"
33 31
34#define MFPR_VIRT_BASE (APB_VIRT_BASE + 0x1e000) 32#define MFPR_VIRT_BASE (APB_VIRT_BASE + 0x1e000)
35 33
@@ -98,67 +96,6 @@ void __init mmp2_init_irq(void)
98 mmp2_init_icu(); 96 mmp2_init_icu();
99} 97}
100 98
101static void sdhc_clk_enable(struct clk *clk)
102{
103 uint32_t clk_rst;
104
105 clk_rst = __raw_readl(clk->clk_rst);
106 clk_rst |= clk->enable_val;
107 __raw_writel(clk_rst, clk->clk_rst);
108}
109
110static void sdhc_clk_disable(struct clk *clk)
111{
112 uint32_t clk_rst;
113
114 clk_rst = __raw_readl(clk->clk_rst);
115 clk_rst &= ~clk->enable_val;
116 __raw_writel(clk_rst, clk->clk_rst);
117}
118
119struct clkops sdhc_clk_ops = {
120 .enable = sdhc_clk_enable,
121 .disable = sdhc_clk_disable,
122};
123
124/* APB peripheral clocks */
125static APBC_CLK(uart1, MMP2_UART1, 1, 26000000);
126static APBC_CLK(uart2, MMP2_UART2, 1, 26000000);
127static APBC_CLK(uart3, MMP2_UART3, 1, 26000000);
128static APBC_CLK(uart4, MMP2_UART4, 1, 26000000);
129static APBC_CLK(twsi1, MMP2_TWSI1, 0, 26000000);
130static APBC_CLK(twsi2, MMP2_TWSI2, 0, 26000000);
131static APBC_CLK(twsi3, MMP2_TWSI3, 0, 26000000);
132static APBC_CLK(twsi4, MMP2_TWSI4, 0, 26000000);
133static APBC_CLK(twsi5, MMP2_TWSI5, 0, 26000000);
134static APBC_CLK(twsi6, MMP2_TWSI6, 0, 26000000);
135static APBC_CLK(gpio, MMP2_GPIO, 0, 26000000);
136
137static APMU_CLK(nand, NAND, 0xbf, 100000000);
138static APMU_CLK_OPS(sdh0, SDH0, 0x1b, 200000000, &sdhc_clk_ops);
139static APMU_CLK_OPS(sdh1, SDH1, 0x1b, 200000000, &sdhc_clk_ops);
140static APMU_CLK_OPS(sdh2, SDH2, 0x1b, 200000000, &sdhc_clk_ops);
141static APMU_CLK_OPS(sdh3, SDH3, 0x1b, 200000000, &sdhc_clk_ops);
142
143static struct clk_lookup mmp2_clkregs[] = {
144 INIT_CLKREG(&clk_uart1, "pxa2xx-uart.0", NULL),
145 INIT_CLKREG(&clk_uart2, "pxa2xx-uart.1", NULL),
146 INIT_CLKREG(&clk_uart3, "pxa2xx-uart.2", NULL),
147 INIT_CLKREG(&clk_uart4, "pxa2xx-uart.3", NULL),
148 INIT_CLKREG(&clk_twsi1, "pxa2xx-i2c.0", NULL),
149 INIT_CLKREG(&clk_twsi2, "pxa2xx-i2c.1", NULL),
150 INIT_CLKREG(&clk_twsi3, "pxa2xx-i2c.2", NULL),
151 INIT_CLKREG(&clk_twsi4, "pxa2xx-i2c.3", NULL),
152 INIT_CLKREG(&clk_twsi5, "pxa2xx-i2c.4", NULL),
153 INIT_CLKREG(&clk_twsi6, "pxa2xx-i2c.5", NULL),
154 INIT_CLKREG(&clk_nand, "pxa3xx-nand", NULL),
155 INIT_CLKREG(&clk_gpio, "pxa-gpio", NULL),
156 INIT_CLKREG(&clk_sdh0, "sdhci-pxav3.0", "PXA-SDHCLK"),
157 INIT_CLKREG(&clk_sdh1, "sdhci-pxav3.1", "PXA-SDHCLK"),
158 INIT_CLKREG(&clk_sdh2, "sdhci-pxav3.2", "PXA-SDHCLK"),
159 INIT_CLKREG(&clk_sdh3, "sdhci-pxav3.3", "PXA-SDHCLK"),
160};
161
162static int __init mmp2_init(void) 99static int __init mmp2_init(void)
163{ 100{
164 if (cpu_is_mmp2()) { 101 if (cpu_is_mmp2()) {
@@ -168,25 +105,27 @@ static int __init mmp2_init(void)
168 mfp_init_base(MFPR_VIRT_BASE); 105 mfp_init_base(MFPR_VIRT_BASE);
169 mfp_init_addr(mmp2_addr_map); 106 mfp_init_addr(mmp2_addr_map);
170 pxa_init_dma(IRQ_MMP2_DMA_RIQ, 16); 107 pxa_init_dma(IRQ_MMP2_DMA_RIQ, 16);
171 clkdev_add_table(ARRAY_AND_SIZE(mmp2_clkregs)); 108 mmp2_clk_init();
172 } 109 }
173 110
174 return 0; 111 return 0;
175} 112}
176postcore_initcall(mmp2_init); 113postcore_initcall(mmp2_init);
177 114
115#define APBC_TIMERS APBC_REG(0x024)
116
178static void __init mmp2_timer_init(void) 117static void __init mmp2_timer_init(void)
179{ 118{
180 unsigned long clk_rst; 119 unsigned long clk_rst;
181 120
182 __raw_writel(APBC_APBCLK | APBC_RST, APBC_MMP2_TIMERS); 121 __raw_writel(APBC_APBCLK | APBC_RST, APBC_TIMERS);
183 122
184 /* 123 /*
185 * enable bus/functional clock, enable 6.5MHz (divider 4), 124 * enable bus/functional clock, enable 6.5MHz (divider 4),
186 * release reset 125 * release reset
187 */ 126 */
188 clk_rst = APBC_APBCLK | APBC_FNCLK | APBC_FNCLKSEL(1); 127 clk_rst = APBC_APBCLK | APBC_FNCLK | APBC_FNCLKSEL(1);
189 __raw_writel(clk_rst, APBC_MMP2_TIMERS); 128 __raw_writel(clk_rst, APBC_TIMERS);
190 129
191 timer_init(IRQ_MMP2_TIMER1); 130 timer_init(IRQ_MMP2_TIMER1);
192} 131}
diff --git a/arch/arm/mach-mmp/pxa168.c b/arch/arm/mach-mmp/pxa168.c
index 62d787c34475..b7f074f15498 100644
--- a/arch/arm/mach-mmp/pxa168.c
+++ b/arch/arm/mach-mmp/pxa168.c
@@ -18,8 +18,8 @@
18 18
19#include <asm/mach/time.h> 19#include <asm/mach/time.h>
20#include <asm/system_misc.h> 20#include <asm/system_misc.h>
21#include <mach/addr-map.h>
22#include <mach/cputype.h> 21#include <mach/cputype.h>
22#include <mach/addr-map.h>
23#include <mach/regs-apbc.h> 23#include <mach/regs-apbc.h>
24#include <mach/regs-apmu.h> 24#include <mach/regs-apmu.h>
25#include <mach/irqs.h> 25#include <mach/irqs.h>
@@ -50,62 +50,13 @@ void __init pxa168_init_irq(void)
50 icu_init_irq(); 50 icu_init_irq();
51} 51}
52 52
53/* APB peripheral clocks */
54static APBC_CLK(uart1, PXA168_UART1, 1, 14745600);
55static APBC_CLK(uart2, PXA168_UART2, 1, 14745600);
56static APBC_CLK(uart3, PXA168_UART3, 1, 14745600);
57static APBC_CLK(twsi0, PXA168_TWSI0, 1, 33000000);
58static APBC_CLK(twsi1, PXA168_TWSI1, 1, 33000000);
59static APBC_CLK(pwm1, PXA168_PWM1, 1, 13000000);
60static APBC_CLK(pwm2, PXA168_PWM2, 1, 13000000);
61static APBC_CLK(pwm3, PXA168_PWM3, 1, 13000000);
62static APBC_CLK(pwm4, PXA168_PWM4, 1, 13000000);
63static APBC_CLK(ssp1, PXA168_SSP1, 4, 0);
64static APBC_CLK(ssp2, PXA168_SSP2, 4, 0);
65static APBC_CLK(ssp3, PXA168_SSP3, 4, 0);
66static APBC_CLK(ssp4, PXA168_SSP4, 4, 0);
67static APBC_CLK(ssp5, PXA168_SSP5, 4, 0);
68static APBC_CLK(gpio, PXA168_GPIO, 0, 13000000);
69static APBC_CLK(keypad, PXA168_KPC, 0, 32000);
70static APBC_CLK(rtc, PXA168_RTC, 8, 32768);
71
72static APMU_CLK(nand, NAND, 0x19b, 156000000);
73static APMU_CLK(lcd, LCD, 0x7f, 312000000);
74static APMU_CLK(eth, ETH, 0x09, 0);
75static APMU_CLK(usb, USB, 0x12, 0);
76
77/* device and clock bindings */
78static struct clk_lookup pxa168_clkregs[] = {
79 INIT_CLKREG(&clk_uart1, "pxa2xx-uart.0", NULL),
80 INIT_CLKREG(&clk_uart2, "pxa2xx-uart.1", NULL),
81 INIT_CLKREG(&clk_uart3, "pxa2xx-uart.2", NULL),
82 INIT_CLKREG(&clk_twsi0, "pxa2xx-i2c.0", NULL),
83 INIT_CLKREG(&clk_twsi1, "pxa2xx-i2c.1", NULL),
84 INIT_CLKREG(&clk_pwm1, "pxa168-pwm.0", NULL),
85 INIT_CLKREG(&clk_pwm2, "pxa168-pwm.1", NULL),
86 INIT_CLKREG(&clk_pwm3, "pxa168-pwm.2", NULL),
87 INIT_CLKREG(&clk_pwm4, "pxa168-pwm.3", NULL),
88 INIT_CLKREG(&clk_ssp1, "pxa168-ssp.0", NULL),
89 INIT_CLKREG(&clk_ssp2, "pxa168-ssp.1", NULL),
90 INIT_CLKREG(&clk_ssp3, "pxa168-ssp.2", NULL),
91 INIT_CLKREG(&clk_ssp4, "pxa168-ssp.3", NULL),
92 INIT_CLKREG(&clk_ssp5, "pxa168-ssp.4", NULL),
93 INIT_CLKREG(&clk_nand, "pxa3xx-nand", NULL),
94 INIT_CLKREG(&clk_lcd, "pxa168-fb", NULL),
95 INIT_CLKREG(&clk_gpio, "pxa-gpio", NULL),
96 INIT_CLKREG(&clk_keypad, "pxa27x-keypad", NULL),
97 INIT_CLKREG(&clk_eth, "pxa168-eth", "MFUCLK"),
98 INIT_CLKREG(&clk_usb, NULL, "PXA168-USBCLK"),
99 INIT_CLKREG(&clk_rtc, "sa1100-rtc", NULL),
100};
101
102static int __init pxa168_init(void) 53static int __init pxa168_init(void)
103{ 54{
104 if (cpu_is_pxa168()) { 55 if (cpu_is_pxa168()) {
105 mfp_init_base(MFPR_VIRT_BASE); 56 mfp_init_base(MFPR_VIRT_BASE);
106 mfp_init_addr(pxa168_mfp_addr_map); 57 mfp_init_addr(pxa168_mfp_addr_map);
107 pxa_init_dma(IRQ_PXA168_DMA_INT0, 32); 58 pxa_init_dma(IRQ_PXA168_DMA_INT0, 32);
108 clkdev_add_table(ARRAY_AND_SIZE(pxa168_clkregs)); 59 pxa168_clk_init();
109 } 60 }
110 61
111 return 0; 62 return 0;
@@ -114,6 +65,7 @@ postcore_initcall(pxa168_init);
114 65
115/* system timer - clock enabled, 3.25MHz */ 66/* system timer - clock enabled, 3.25MHz */
116#define TIMER_CLK_RST (APBC_APBCLK | APBC_FNCLK | APBC_FNCLKSEL(3)) 67#define TIMER_CLK_RST (APBC_APBCLK | APBC_FNCLK | APBC_FNCLKSEL(3))
68#define APBC_TIMERS APBC_REG(0x34)
117 69
118static void __init pxa168_timer_init(void) 70static void __init pxa168_timer_init(void)
119{ 71{
@@ -121,10 +73,10 @@ static void __init pxa168_timer_init(void)
121 * ourselves instead of using clk_* API. Clock rate is defined 73 * ourselves instead of using clk_* API. Clock rate is defined
122 * by APBC_TIMERS_CLK_RST (3.25MHz) and enabled free-running 74 * by APBC_TIMERS_CLK_RST (3.25MHz) and enabled free-running
123 */ 75 */
124 __raw_writel(APBC_APBCLK | APBC_RST, APBC_PXA168_TIMERS); 76 __raw_writel(APBC_APBCLK | APBC_RST, APBC_TIMERS);
125 77
126 /* 3.25MHz, bus/functional clock enabled, release reset */ 78 /* 3.25MHz, bus/functional clock enabled, release reset */
127 __raw_writel(TIMER_CLK_RST, APBC_PXA168_TIMERS); 79 __raw_writel(TIMER_CLK_RST, APBC_TIMERS);
128 80
129 timer_init(IRQ_PXA168_TIMER1); 81 timer_init(IRQ_PXA168_TIMER1);
130} 82}
diff --git a/arch/arm/mach-mmp/pxa910.c b/arch/arm/mach-mmp/pxa910.c
index 6da52e9f2bdc..7d84521bb715 100644
--- a/arch/arm/mach-mmp/pxa910.c
+++ b/arch/arm/mach-mmp/pxa910.c
@@ -17,7 +17,6 @@
17#include <asm/mach/time.h> 17#include <asm/mach/time.h>
18#include <mach/addr-map.h> 18#include <mach/addr-map.h>
19#include <mach/regs-apbc.h> 19#include <mach/regs-apbc.h>
20#include <mach/regs-apmu.h>
21#include <mach/cputype.h> 20#include <mach/cputype.h>
22#include <mach/irqs.h> 21#include <mach/irqs.h>
23#include <mach/dma.h> 22#include <mach/dma.h>
@@ -25,7 +24,6 @@
25#include <mach/devices.h> 24#include <mach/devices.h>
26 25
27#include "common.h" 26#include "common.h"
28#include "clock.h"
29 27
30#define MFPR_VIRT_BASE (APB_VIRT_BASE + 0x1e000) 28#define MFPR_VIRT_BASE (APB_VIRT_BASE + 0x1e000)
31 29
@@ -82,44 +80,13 @@ void __init pxa910_init_irq(void)
82 icu_init_irq(); 80 icu_init_irq();
83} 81}
84 82
85/* APB peripheral clocks */
86static APBC_CLK(uart1, PXA910_UART0, 1, 14745600);
87static APBC_CLK(uart2, PXA910_UART1, 1, 14745600);
88static APBC_CLK(twsi0, PXA168_TWSI0, 1, 33000000);
89static APBC_CLK(twsi1, PXA168_TWSI1, 1, 33000000);
90static APBC_CLK(pwm1, PXA910_PWM1, 1, 13000000);
91static APBC_CLK(pwm2, PXA910_PWM2, 1, 13000000);
92static APBC_CLK(pwm3, PXA910_PWM3, 1, 13000000);
93static APBC_CLK(pwm4, PXA910_PWM4, 1, 13000000);
94static APBC_CLK(gpio, PXA910_GPIO, 0, 13000000);
95static APBC_CLK(rtc, PXA910_RTC, 8, 32768);
96
97static APMU_CLK(nand, NAND, 0x19b, 156000000);
98static APMU_CLK(u2o, USB, 0x1b, 480000000);
99
100/* device and clock bindings */
101static struct clk_lookup pxa910_clkregs[] = {
102 INIT_CLKREG(&clk_uart1, "pxa2xx-uart.0", NULL),
103 INIT_CLKREG(&clk_uart2, "pxa2xx-uart.1", NULL),
104 INIT_CLKREG(&clk_twsi0, "pxa2xx-i2c.0", NULL),
105 INIT_CLKREG(&clk_twsi1, "pxa2xx-i2c.1", NULL),
106 INIT_CLKREG(&clk_pwm1, "pxa910-pwm.0", NULL),
107 INIT_CLKREG(&clk_pwm2, "pxa910-pwm.1", NULL),
108 INIT_CLKREG(&clk_pwm3, "pxa910-pwm.2", NULL),
109 INIT_CLKREG(&clk_pwm4, "pxa910-pwm.3", NULL),
110 INIT_CLKREG(&clk_nand, "pxa3xx-nand", NULL),
111 INIT_CLKREG(&clk_gpio, "pxa-gpio", NULL),
112 INIT_CLKREG(&clk_u2o, NULL, "U2OCLK"),
113 INIT_CLKREG(&clk_rtc, "sa1100-rtc", NULL),
114};
115
116static int __init pxa910_init(void) 83static int __init pxa910_init(void)
117{ 84{
118 if (cpu_is_pxa910()) { 85 if (cpu_is_pxa910()) {
119 mfp_init_base(MFPR_VIRT_BASE); 86 mfp_init_base(MFPR_VIRT_BASE);
120 mfp_init_addr(pxa910_mfp_addr_map); 87 mfp_init_addr(pxa910_mfp_addr_map);
121 pxa_init_dma(IRQ_PXA910_DMA_INT0, 32); 88 pxa_init_dma(IRQ_PXA910_DMA_INT0, 32);
122 clkdev_add_table(ARRAY_AND_SIZE(pxa910_clkregs)); 89 pxa910_clk_init();
123 } 90 }
124 91
125 return 0; 92 return 0;
@@ -128,12 +95,13 @@ postcore_initcall(pxa910_init);
128 95
129/* system timer - clock enabled, 3.25MHz */ 96/* system timer - clock enabled, 3.25MHz */
130#define TIMER_CLK_RST (APBC_APBCLK | APBC_FNCLK | APBC_FNCLKSEL(3)) 97#define TIMER_CLK_RST (APBC_APBCLK | APBC_FNCLK | APBC_FNCLKSEL(3))
98#define APBC_TIMERS APBC_REG(0x34)
131 99
132static void __init pxa910_timer_init(void) 100static void __init pxa910_timer_init(void)
133{ 101{
134 /* reset and configure */ 102 /* reset and configure */
135 __raw_writel(APBC_APBCLK | APBC_RST, APBC_PXA910_TIMERS); 103 __raw_writel(APBC_APBCLK | APBC_RST, APBC_TIMERS);
136 __raw_writel(TIMER_CLK_RST, APBC_PXA910_TIMERS); 104 __raw_writel(TIMER_CLK_RST, APBC_TIMERS);
137 105
138 timer_init(IRQ_PXA910_AP1_TIMER1); 106 timer_init(IRQ_PXA910_AP1_TIMER1);
139} 107}
diff --git a/arch/arm/mach-mmp/sram.c b/arch/arm/mach-mmp/sram.c
index 4304f9519372..7e8a5a2e1ec7 100644
--- a/arch/arm/mach-mmp/sram.c
+++ b/arch/arm/mach-mmp/sram.c
@@ -68,7 +68,7 @@ static int __devinit sram_probe(struct platform_device *pdev)
68 struct resource *res; 68 struct resource *res;
69 int ret = 0; 69 int ret = 0;
70 70
71 if (!pdata && !pdata->pool_name) 71 if (!pdata || !pdata->pool_name)
72 return -ENODEV; 72 return -ENODEV;
73 73
74 info = kzalloc(sizeof(*info), GFP_KERNEL); 74 info = kzalloc(sizeof(*info), GFP_KERNEL);
diff --git a/arch/arm/mach-mv78xx0/addr-map.c b/arch/arm/mach-mv78xx0/addr-map.c
index 7764d9386f2a..137e479d15a0 100644
--- a/arch/arm/mach-mv78xx0/addr-map.c
+++ b/arch/arm/mach-mv78xx0/addr-map.c
@@ -38,7 +38,7 @@
38#define WIN0_OFF(n) (BRIDGE_VIRT_BASE + 0x0000 + ((n) << 4)) 38#define WIN0_OFF(n) (BRIDGE_VIRT_BASE + 0x0000 + ((n) << 4))
39#define WIN8_OFF(n) (BRIDGE_VIRT_BASE + 0x0900 + (((n) - 8) << 4)) 39#define WIN8_OFF(n) (BRIDGE_VIRT_BASE + 0x0900 + (((n) - 8) << 4))
40 40
41static void __init __iomem *win_cfg_base(int win) 41static void __init __iomem *win_cfg_base(const struct orion_addr_map_cfg *cfg, int win)
42{ 42{
43 /* 43 /*
44 * Find the control register base address for this window. 44 * Find the control register base address for this window.
diff --git a/arch/arm/mach-mv78xx0/common.c b/arch/arm/mach-mv78xx0/common.c
index 20826449e61b..6b0c38735527 100644
--- a/arch/arm/mach-mv78xx0/common.c
+++ b/arch/arm/mach-mv78xx0/common.c
@@ -208,7 +208,8 @@ void __init mv78xx0_ge00_init(struct mv643xx_eth_platform_data *eth_data)
208{ 208{
209 orion_ge00_init(eth_data, 209 orion_ge00_init(eth_data,
210 GE00_PHYS_BASE, IRQ_MV78XX0_GE00_SUM, 210 GE00_PHYS_BASE, IRQ_MV78XX0_GE00_SUM,
211 IRQ_MV78XX0_GE_ERR); 211 IRQ_MV78XX0_GE_ERR,
212 MV643XX_TX_CSUM_DEFAULT_LIMIT);
212} 213}
213 214
214 215
@@ -219,7 +220,8 @@ void __init mv78xx0_ge01_init(struct mv643xx_eth_platform_data *eth_data)
219{ 220{
220 orion_ge01_init(eth_data, 221 orion_ge01_init(eth_data,
221 GE01_PHYS_BASE, IRQ_MV78XX0_GE01_SUM, 222 GE01_PHYS_BASE, IRQ_MV78XX0_GE01_SUM,
222 NO_IRQ); 223 NO_IRQ,
224 MV643XX_TX_CSUM_DEFAULT_LIMIT);
223} 225}
224 226
225 227
diff --git a/arch/arm/mach-omap2/Kconfig b/arch/arm/mach-omap2/Kconfig
index dd2db025f778..fcd4e85c4ddc 100644
--- a/arch/arm/mach-omap2/Kconfig
+++ b/arch/arm/mach-omap2/Kconfig
@@ -62,13 +62,14 @@ config ARCH_OMAP4
62 select PM_OPP if PM 62 select PM_OPP if PM
63 select USB_ARCH_HAS_EHCI if USB_SUPPORT 63 select USB_ARCH_HAS_EHCI if USB_SUPPORT
64 select ARM_CPU_SUSPEND if PM 64 select ARM_CPU_SUSPEND if PM
65 select ARCH_NEEDS_CPU_IDLE_COUPLED 65 select ARCH_NEEDS_CPU_IDLE_COUPLED if SMP
66 66
67config SOC_OMAP5 67config SOC_OMAP5
68 bool "TI OMAP5" 68 bool "TI OMAP5"
69 select CPU_V7 69 select CPU_V7
70 select ARM_GIC 70 select ARM_GIC
71 select HAVE_SMP 71 select HAVE_SMP
72 select ARM_CPU_SUSPEND if PM
72 73
73comment "OMAP Core Type" 74comment "OMAP Core Type"
74 depends on ARCH_OMAP2 75 depends on ARCH_OMAP2
diff --git a/arch/arm/mach-omap2/board-igep0020.c b/arch/arm/mach-omap2/board-igep0020.c
index 74915295482e..28214483aaba 100644
--- a/arch/arm/mach-omap2/board-igep0020.c
+++ b/arch/arm/mach-omap2/board-igep0020.c
@@ -554,6 +554,8 @@ static const struct usbhs_omap_board_data igep3_usbhs_bdata __initconst = {
554 554
555#ifdef CONFIG_OMAP_MUX 555#ifdef CONFIG_OMAP_MUX
556static struct omap_board_mux board_mux[] __initdata = { 556static struct omap_board_mux board_mux[] __initdata = {
557 /* SMSC9221 LAN Controller ETH IRQ (GPIO_176) */
558 OMAP3_MUX(MCSPI1_CS2, OMAP_MUX_MODE4 | OMAP_PIN_INPUT),
557 { .reg_offset = OMAP_MUX_TERMINATOR }, 559 { .reg_offset = OMAP_MUX_TERMINATOR },
558}; 560};
559#endif 561#endif
diff --git a/arch/arm/mach-omap2/board-omap3evm.c b/arch/arm/mach-omap2/board-omap3evm.c
index ef230a0eb5eb..0d362e9f9cb9 100644
--- a/arch/arm/mach-omap2/board-omap3evm.c
+++ b/arch/arm/mach-omap2/board-omap3evm.c
@@ -58,6 +58,7 @@
58#include "hsmmc.h" 58#include "hsmmc.h"
59#include "common-board-devices.h" 59#include "common-board-devices.h"
60 60
61#define OMAP3_EVM_TS_GPIO 175
61#define OMAP3_EVM_EHCI_VBUS 22 62#define OMAP3_EVM_EHCI_VBUS 22
62#define OMAP3_EVM_EHCI_SELECT 61 63#define OMAP3_EVM_EHCI_SELECT 61
63 64
diff --git a/arch/arm/mach-omap2/common-board-devices.c b/arch/arm/mach-omap2/common-board-devices.c
index 14734746457c..c1875862679f 100644
--- a/arch/arm/mach-omap2/common-board-devices.c
+++ b/arch/arm/mach-omap2/common-board-devices.c
@@ -35,16 +35,6 @@ static struct omap2_mcspi_device_config ads7846_mcspi_config = {
35 .turbo_mode = 0, 35 .turbo_mode = 0,
36}; 36};
37 37
38/*
39 * ADS7846 driver maybe request a gpio according to the value
40 * of pdata->get_pendown_state, but we have done this. So set
41 * get_pendown_state to avoid twice gpio requesting.
42 */
43static int omap3_get_pendown_state(void)
44{
45 return !gpio_get_value(OMAP3_EVM_TS_GPIO);
46}
47
48static struct ads7846_platform_data ads7846_config = { 38static struct ads7846_platform_data ads7846_config = {
49 .x_max = 0x0fff, 39 .x_max = 0x0fff,
50 .y_max = 0x0fff, 40 .y_max = 0x0fff,
@@ -55,7 +45,6 @@ static struct ads7846_platform_data ads7846_config = {
55 .debounce_rep = 1, 45 .debounce_rep = 1,
56 .gpio_pendown = -EINVAL, 46 .gpio_pendown = -EINVAL,
57 .keep_vref_on = 1, 47 .keep_vref_on = 1,
58 .get_pendown_state = &omap3_get_pendown_state,
59}; 48};
60 49
61static struct spi_board_info ads7846_spi_board_info __initdata = { 50static struct spi_board_info ads7846_spi_board_info __initdata = {
diff --git a/arch/arm/mach-omap2/common-board-devices.h b/arch/arm/mach-omap2/common-board-devices.h
index 4c4ef6a6166b..a0b4a42836ab 100644
--- a/arch/arm/mach-omap2/common-board-devices.h
+++ b/arch/arm/mach-omap2/common-board-devices.h
@@ -4,7 +4,6 @@
4#include "twl-common.h" 4#include "twl-common.h"
5 5
6#define NAND_BLOCK_SIZE SZ_128K 6#define NAND_BLOCK_SIZE SZ_128K
7#define OMAP3_EVM_TS_GPIO 175
8 7
9struct mtd_partition; 8struct mtd_partition;
10struct ads7846_platform_data; 9struct ads7846_platform_data;
diff --git a/arch/arm/mach-omap2/cpuidle44xx.c b/arch/arm/mach-omap2/cpuidle44xx.c
index ee05e193fc61..288bee6cbb76 100644
--- a/arch/arm/mach-omap2/cpuidle44xx.c
+++ b/arch/arm/mach-omap2/cpuidle44xx.c
@@ -238,8 +238,9 @@ int __init omap4_idle_init(void)
238 for_each_cpu(cpu_id, cpu_online_mask) { 238 for_each_cpu(cpu_id, cpu_online_mask) {
239 dev = &per_cpu(omap4_idle_dev, cpu_id); 239 dev = &per_cpu(omap4_idle_dev, cpu_id);
240 dev->cpu = cpu_id; 240 dev->cpu = cpu_id;
241#ifdef CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED
241 dev->coupled_cpus = *cpu_online_mask; 242 dev->coupled_cpus = *cpu_online_mask;
242 243#endif
243 cpuidle_register_driver(&omap4_idle_driver); 244 cpuidle_register_driver(&omap4_idle_driver);
244 245
245 if (cpuidle_register_device(dev)) { 246 if (cpuidle_register_device(dev)) {
diff --git a/arch/arm/mach-omap2/mux.h b/arch/arm/mach-omap2/mux.h
index 471e62a74a16..76f9b3c2f586 100644
--- a/arch/arm/mach-omap2/mux.h
+++ b/arch/arm/mach-omap2/mux.h
@@ -127,7 +127,6 @@ struct omap_mux_partition {
127 * @gpio: GPIO number 127 * @gpio: GPIO number
128 * @muxnames: available signal modes for a ball 128 * @muxnames: available signal modes for a ball
129 * @balls: available balls on the package 129 * @balls: available balls on the package
130 * @partition: mux partition
131 */ 130 */
132struct omap_mux { 131struct omap_mux {
133 u16 reg_offset; 132 u16 reg_offset;
diff --git a/arch/arm/mach-omap2/opp4xxx_data.c b/arch/arm/mach-omap2/opp4xxx_data.c
index 2293ba27101b..c95415da23c2 100644
--- a/arch/arm/mach-omap2/opp4xxx_data.c
+++ b/arch/arm/mach-omap2/opp4xxx_data.c
@@ -94,7 +94,7 @@ int __init omap4_opp_init(void)
94{ 94{
95 int r = -ENODEV; 95 int r = -ENODEV;
96 96
97 if (!cpu_is_omap44xx()) 97 if (!cpu_is_omap443x())
98 return r; 98 return r;
99 99
100 r = omap_init_opp_table(omap44xx_opp_def_list, 100 r = omap_init_opp_table(omap44xx_opp_def_list,
diff --git a/arch/arm/mach-omap2/pm34xx.c b/arch/arm/mach-omap2/pm34xx.c
index e4fc88c65dbd..05bd8f02723f 100644
--- a/arch/arm/mach-omap2/pm34xx.c
+++ b/arch/arm/mach-omap2/pm34xx.c
@@ -272,21 +272,16 @@ void omap_sram_idle(void)
272 per_next_state = pwrdm_read_next_pwrst(per_pwrdm); 272 per_next_state = pwrdm_read_next_pwrst(per_pwrdm);
273 core_next_state = pwrdm_read_next_pwrst(core_pwrdm); 273 core_next_state = pwrdm_read_next_pwrst(core_pwrdm);
274 274
275 if (mpu_next_state < PWRDM_POWER_ON) { 275 pwrdm_pre_transition(NULL);
276 pwrdm_pre_transition(mpu_pwrdm);
277 pwrdm_pre_transition(neon_pwrdm);
278 }
279 276
280 /* PER */ 277 /* PER */
281 if (per_next_state < PWRDM_POWER_ON) { 278 if (per_next_state < PWRDM_POWER_ON) {
282 pwrdm_pre_transition(per_pwrdm);
283 per_going_off = (per_next_state == PWRDM_POWER_OFF) ? 1 : 0; 279 per_going_off = (per_next_state == PWRDM_POWER_OFF) ? 1 : 0;
284 omap2_gpio_prepare_for_idle(per_going_off); 280 omap2_gpio_prepare_for_idle(per_going_off);
285 } 281 }
286 282
287 /* CORE */ 283 /* CORE */
288 if (core_next_state < PWRDM_POWER_ON) { 284 if (core_next_state < PWRDM_POWER_ON) {
289 pwrdm_pre_transition(core_pwrdm);
290 if (core_next_state == PWRDM_POWER_OFF) { 285 if (core_next_state == PWRDM_POWER_OFF) {
291 omap3_core_save_context(); 286 omap3_core_save_context();
292 omap3_cm_save_context(); 287 omap3_cm_save_context();
@@ -339,20 +334,14 @@ void omap_sram_idle(void)
339 omap2_prm_clear_mod_reg_bits(OMAP3430_AUTO_OFF_MASK, 334 omap2_prm_clear_mod_reg_bits(OMAP3430_AUTO_OFF_MASK,
340 OMAP3430_GR_MOD, 335 OMAP3430_GR_MOD,
341 OMAP3_PRM_VOLTCTRL_OFFSET); 336 OMAP3_PRM_VOLTCTRL_OFFSET);
342 pwrdm_post_transition(core_pwrdm);
343 } 337 }
344 omap3_intc_resume_idle(); 338 omap3_intc_resume_idle();
345 339
340 pwrdm_post_transition(NULL);
341
346 /* PER */ 342 /* PER */
347 if (per_next_state < PWRDM_POWER_ON) { 343 if (per_next_state < PWRDM_POWER_ON)
348 omap2_gpio_resume_after_idle(); 344 omap2_gpio_resume_after_idle();
349 pwrdm_post_transition(per_pwrdm);
350 }
351
352 if (mpu_next_state < PWRDM_POWER_ON) {
353 pwrdm_post_transition(mpu_pwrdm);
354 pwrdm_post_transition(neon_pwrdm);
355 }
356} 345}
357 346
358static void omap3_pm_idle(void) 347static void omap3_pm_idle(void)
diff --git a/arch/arm/mach-omap2/sleep44xx.S b/arch/arm/mach-omap2/sleep44xx.S
index 9f6b83d1b193..91e71d8f46f0 100644
--- a/arch/arm/mach-omap2/sleep44xx.S
+++ b/arch/arm/mach-omap2/sleep44xx.S
@@ -56,9 +56,13 @@ ppa_por_params:
56 * The restore function pointer is stored at CPUx_WAKEUP_NS_PA_ADDR_OFFSET. 56 * The restore function pointer is stored at CPUx_WAKEUP_NS_PA_ADDR_OFFSET.
57 * It returns to the caller for CPU INACTIVE and ON power states or in case 57 * It returns to the caller for CPU INACTIVE and ON power states or in case
58 * CPU failed to transition to targeted OFF/DORMANT state. 58 * CPU failed to transition to targeted OFF/DORMANT state.
59 *
60 * omap4_finish_suspend() calls v7_flush_dcache_all() which doesn't save
61 * stack frame and it expects the caller to take care of it. Hence the entire
62 * stack frame is saved to avoid possible stack corruption.
59 */ 63 */
60ENTRY(omap4_finish_suspend) 64ENTRY(omap4_finish_suspend)
61 stmfd sp!, {lr} 65 stmfd sp!, {r4-r12, lr}
62 cmp r0, #0x0 66 cmp r0, #0x0
63 beq do_WFI @ No lowpower state, jump to WFI 67 beq do_WFI @ No lowpower state, jump to WFI
64 68
@@ -226,7 +230,7 @@ scu_gp_clear:
226skip_scu_gp_clear: 230skip_scu_gp_clear:
227 isb 231 isb
228 dsb 232 dsb
229 ldmfd sp!, {pc} 233 ldmfd sp!, {r4-r12, pc}
230ENDPROC(omap4_finish_suspend) 234ENDPROC(omap4_finish_suspend)
231 235
232/* 236/*
diff --git a/arch/arm/mach-omap2/twl-common.c b/arch/arm/mach-omap2/twl-common.c
index de47f170ba50..db5ff6642375 100644
--- a/arch/arm/mach-omap2/twl-common.c
+++ b/arch/arm/mach-omap2/twl-common.c
@@ -67,6 +67,7 @@ void __init omap_pmic_init(int bus, u32 clkrate,
67 const char *pmic_type, int pmic_irq, 67 const char *pmic_type, int pmic_irq,
68 struct twl4030_platform_data *pmic_data) 68 struct twl4030_platform_data *pmic_data)
69{ 69{
70 omap_mux_init_signal("sys_nirq", OMAP_PIN_INPUT_PULLUP | OMAP_PIN_OFF_WAKEUPENABLE);
70 strncpy(pmic_i2c_board_info.type, pmic_type, 71 strncpy(pmic_i2c_board_info.type, pmic_type,
71 sizeof(pmic_i2c_board_info.type)); 72 sizeof(pmic_i2c_board_info.type));
72 pmic_i2c_board_info.irq = pmic_irq; 73 pmic_i2c_board_info.irq = pmic_irq;
diff --git a/arch/arm/mach-orion5x/common.c b/arch/arm/mach-orion5x/common.c
index 70f7d712d6f4..87a6cdabcad5 100644
--- a/arch/arm/mach-orion5x/common.c
+++ b/arch/arm/mach-orion5x/common.c
@@ -99,7 +99,8 @@ void __init orion5x_eth_init(struct mv643xx_eth_platform_data *eth_data)
99{ 99{
100 orion_ge00_init(eth_data, 100 orion_ge00_init(eth_data,
101 ORION5X_ETH_PHYS_BASE, IRQ_ORION5X_ETH_SUM, 101 ORION5X_ETH_PHYS_BASE, IRQ_ORION5X_ETH_SUM,
102 IRQ_ORION5X_ETH_ERR); 102 IRQ_ORION5X_ETH_ERR,
103 MV643XX_TX_CSUM_DEFAULT_LIMIT);
103} 104}
104 105
105 106
diff --git a/arch/arm/mach-s3c24xx/include/mach/dma.h b/arch/arm/mach-s3c24xx/include/mach/dma.h
index 454831b66037..ee99fd56c043 100644
--- a/arch/arm/mach-s3c24xx/include/mach/dma.h
+++ b/arch/arm/mach-s3c24xx/include/mach/dma.h
@@ -24,7 +24,8 @@
24*/ 24*/
25 25
26enum dma_ch { 26enum dma_ch {
27 DMACH_XD0, 27 DMACH_DT_PROP = -1, /* not yet supported, do not use */
28 DMACH_XD0 = 0,
28 DMACH_XD1, 29 DMACH_XD1,
29 DMACH_SDI, 30 DMACH_SDI,
30 DMACH_SPI0, 31 DMACH_SPI0,
diff --git a/arch/arm/mach-ux500/Kconfig b/arch/arm/mach-ux500/Kconfig
index c013bbf79cac..53d3d46dec12 100644
--- a/arch/arm/mach-ux500/Kconfig
+++ b/arch/arm/mach-ux500/Kconfig
@@ -41,7 +41,6 @@ config MACH_HREFV60
41config MACH_SNOWBALL 41config MACH_SNOWBALL
42 bool "U8500 Snowball platform" 42 bool "U8500 Snowball platform"
43 select MACH_MOP500 43 select MACH_MOP500
44 select LEDS_GPIO
45 help 44 help
46 Include support for the snowball development platform. 45 Include support for the snowball development platform.
47 46
diff --git a/arch/arm/mach-ux500/board-mop500-msp.c b/arch/arm/mach-ux500/board-mop500-msp.c
index 996048038743..df15646036aa 100644
--- a/arch/arm/mach-ux500/board-mop500-msp.c
+++ b/arch/arm/mach-ux500/board-mop500-msp.c
@@ -191,9 +191,9 @@ static struct platform_device *db8500_add_msp_i2s(struct device *parent,
191 return pdev; 191 return pdev;
192} 192}
193 193
194/* Platform device for ASoC U8500 machine */ 194/* Platform device for ASoC MOP500 machine */
195static struct platform_device snd_soc_u8500 = { 195static struct platform_device snd_soc_mop500 = {
196 .name = "snd-soc-u8500", 196 .name = "snd-soc-mop500",
197 .id = 0, 197 .id = 0,
198 .dev = { 198 .dev = {
199 .platform_data = NULL, 199 .platform_data = NULL,
@@ -227,8 +227,8 @@ int mop500_msp_init(struct device *parent)
227{ 227{
228 struct platform_device *msp1; 228 struct platform_device *msp1;
229 229
230 pr_info("%s: Register platform-device 'snd-soc-u8500'.\n", __func__); 230 pr_info("%s: Register platform-device 'snd-soc-mop500'.\n", __func__);
231 platform_device_register(&snd_soc_u8500); 231 platform_device_register(&snd_soc_mop500);
232 232
233 pr_info("Initialize MSP I2S-devices.\n"); 233 pr_info("Initialize MSP I2S-devices.\n");
234 db8500_add_msp_i2s(parent, 0, U8500_MSP0_BASE, IRQ_DB8500_MSP0, 234 db8500_add_msp_i2s(parent, 0, U8500_MSP0_BASE, IRQ_DB8500_MSP0,
diff --git a/arch/arm/mach-ux500/board-mop500.c b/arch/arm/mach-ux500/board-mop500.c
index 8674a890fd1c..a534d8880de1 100644
--- a/arch/arm/mach-ux500/board-mop500.c
+++ b/arch/arm/mach-ux500/board-mop500.c
@@ -797,6 +797,7 @@ static void __init u8500_init_machine(void)
797 ARRAY_SIZE(mop500_platform_devs)); 797 ARRAY_SIZE(mop500_platform_devs));
798 798
799 mop500_sdi_init(parent); 799 mop500_sdi_init(parent);
800 mop500_msp_init(parent);
800 i2c0_devs = ARRAY_SIZE(mop500_i2c0_devices); 801 i2c0_devs = ARRAY_SIZE(mop500_i2c0_devices);
801 i2c_register_board_info(0, mop500_i2c0_devices, i2c0_devs); 802 i2c_register_board_info(0, mop500_i2c0_devices, i2c0_devs);
802 i2c_register_board_info(2, mop500_i2c2_devices, 803 i2c_register_board_info(2, mop500_i2c2_devices,
@@ -804,6 +805,8 @@ static void __init u8500_init_machine(void)
804 805
805 mop500_uib_init(); 806 mop500_uib_init();
806 807
808 } else if (of_machine_is_compatible("calaosystems,snowball-a9500")) {
809 mop500_msp_init(parent);
807 } else if (of_machine_is_compatible("st-ericsson,hrefv60+")) { 810 } else if (of_machine_is_compatible("st-ericsson,hrefv60+")) {
808 /* 811 /*
809 * The HREFv60 board removed a GPIO expander and routed 812 * The HREFv60 board removed a GPIO expander and routed
@@ -815,6 +818,7 @@ static void __init u8500_init_machine(void)
815 ARRAY_SIZE(mop500_platform_devs)); 818 ARRAY_SIZE(mop500_platform_devs));
816 819
817 hrefv60_sdi_init(parent); 820 hrefv60_sdi_init(parent);
821 mop500_msp_init(parent);
818 822
819 i2c0_devs = ARRAY_SIZE(mop500_i2c0_devices); 823 i2c0_devs = ARRAY_SIZE(mop500_i2c0_devices);
820 i2c0_devs -= NUM_PRE_V60_I2C0_DEVICES; 824 i2c0_devs -= NUM_PRE_V60_I2C0_DEVICES;
diff --git a/arch/arm/plat-omap/dmtimer.c b/arch/arm/plat-omap/dmtimer.c
index 626ad8cad7a9..938b50a33439 100644
--- a/arch/arm/plat-omap/dmtimer.c
+++ b/arch/arm/plat-omap/dmtimer.c
@@ -189,6 +189,7 @@ struct omap_dm_timer *omap_dm_timer_request(void)
189 timer->reserved = 1; 189 timer->reserved = 1;
190 break; 190 break;
191 } 191 }
192 spin_unlock_irqrestore(&dm_timer_lock, flags);
192 193
193 if (timer) { 194 if (timer) {
194 ret = omap_dm_timer_prepare(timer); 195 ret = omap_dm_timer_prepare(timer);
@@ -197,7 +198,6 @@ struct omap_dm_timer *omap_dm_timer_request(void)
197 timer = NULL; 198 timer = NULL;
198 } 199 }
199 } 200 }
200 spin_unlock_irqrestore(&dm_timer_lock, flags);
201 201
202 if (!timer) 202 if (!timer)
203 pr_debug("%s: timer request failed!\n", __func__); 203 pr_debug("%s: timer request failed!\n", __func__);
@@ -220,6 +220,7 @@ struct omap_dm_timer *omap_dm_timer_request_specific(int id)
220 break; 220 break;
221 } 221 }
222 } 222 }
223 spin_unlock_irqrestore(&dm_timer_lock, flags);
223 224
224 if (timer) { 225 if (timer) {
225 ret = omap_dm_timer_prepare(timer); 226 ret = omap_dm_timer_prepare(timer);
@@ -228,7 +229,6 @@ struct omap_dm_timer *omap_dm_timer_request_specific(int id)
228 timer = NULL; 229 timer = NULL;
229 } 230 }
230 } 231 }
231 spin_unlock_irqrestore(&dm_timer_lock, flags);
232 232
233 if (!timer) 233 if (!timer)
234 pr_debug("%s: timer%d request failed!\n", __func__, id); 234 pr_debug("%s: timer%d request failed!\n", __func__, id);
@@ -258,7 +258,7 @@ EXPORT_SYMBOL_GPL(omap_dm_timer_enable);
258 258
259void omap_dm_timer_disable(struct omap_dm_timer *timer) 259void omap_dm_timer_disable(struct omap_dm_timer *timer)
260{ 260{
261 pm_runtime_put(&timer->pdev->dev); 261 pm_runtime_put_sync(&timer->pdev->dev);
262} 262}
263EXPORT_SYMBOL_GPL(omap_dm_timer_disable); 263EXPORT_SYMBOL_GPL(omap_dm_timer_disable);
264 264
diff --git a/arch/arm/plat-omap/include/plat/cpu.h b/arch/arm/plat-omap/include/plat/cpu.h
index 68b180edcfff..bb5d08a70dbc 100644
--- a/arch/arm/plat-omap/include/plat/cpu.h
+++ b/arch/arm/plat-omap/include/plat/cpu.h
@@ -372,7 +372,8 @@ IS_OMAP_TYPE(3430, 0x3430)
372#define cpu_class_is_omap1() (cpu_is_omap7xx() || cpu_is_omap15xx() || \ 372#define cpu_class_is_omap1() (cpu_is_omap7xx() || cpu_is_omap15xx() || \
373 cpu_is_omap16xx()) 373 cpu_is_omap16xx())
374#define cpu_class_is_omap2() (cpu_is_omap24xx() || cpu_is_omap34xx() || \ 374#define cpu_class_is_omap2() (cpu_is_omap24xx() || cpu_is_omap34xx() || \
375 cpu_is_omap44xx() || soc_is_omap54xx()) 375 cpu_is_omap44xx() || soc_is_omap54xx() || \
376 soc_is_am33xx())
376 377
377/* Various silicon revisions for omap2 */ 378/* Various silicon revisions for omap2 */
378#define OMAP242X_CLASS 0x24200024 379#define OMAP242X_CLASS 0x24200024
diff --git a/arch/arm/plat-omap/include/plat/multi.h b/arch/arm/plat-omap/include/plat/multi.h
index 045e320f1067..324d31b14852 100644
--- a/arch/arm/plat-omap/include/plat/multi.h
+++ b/arch/arm/plat-omap/include/plat/multi.h
@@ -108,4 +108,13 @@
108# endif 108# endif
109#endif 109#endif
110 110
111#ifdef CONFIG_SOC_AM33XX
112# ifdef OMAP_NAME
113# undef MULTI_OMAP2
114# define MULTI_OMAP2
115# else
116# define OMAP_NAME am33xx
117# endif
118#endif
119
111#endif /* __PLAT_OMAP_MULTI_H */ 120#endif /* __PLAT_OMAP_MULTI_H */
diff --git a/arch/arm/plat-omap/include/plat/uncompress.h b/arch/arm/plat-omap/include/plat/uncompress.h
index b8d19a136781..7f7b112acccb 100644
--- a/arch/arm/plat-omap/include/plat/uncompress.h
+++ b/arch/arm/plat-omap/include/plat/uncompress.h
@@ -110,7 +110,7 @@ static inline void flush(void)
110 _DEBUG_LL_ENTRY(mach, AM33XX_UART##p##_BASE, OMAP_PORT_SHIFT, \ 110 _DEBUG_LL_ENTRY(mach, AM33XX_UART##p##_BASE, OMAP_PORT_SHIFT, \
111 AM33XXUART##p) 111 AM33XXUART##p)
112 112
113static inline void __arch_decomp_setup(unsigned long arch_id) 113static inline void arch_decomp_setup(void)
114{ 114{
115 int port = 0; 115 int port = 0;
116 116
@@ -198,8 +198,6 @@ static inline void __arch_decomp_setup(unsigned long arch_id)
198 } while (0); 198 } while (0);
199} 199}
200 200
201#define arch_decomp_setup() __arch_decomp_setup(arch_id)
202
203/* 201/*
204 * nothing to do 202 * nothing to do
205 */ 203 */
diff --git a/arch/arm/plat-orion/common.c b/arch/arm/plat-orion/common.c
index d245a87dc014..b8b747a9d360 100644
--- a/arch/arm/plat-orion/common.c
+++ b/arch/arm/plat-orion/common.c
@@ -291,10 +291,12 @@ static struct platform_device orion_ge00 = {
291void __init orion_ge00_init(struct mv643xx_eth_platform_data *eth_data, 291void __init orion_ge00_init(struct mv643xx_eth_platform_data *eth_data,
292 unsigned long mapbase, 292 unsigned long mapbase,
293 unsigned long irq, 293 unsigned long irq,
294 unsigned long irq_err) 294 unsigned long irq_err,
295 unsigned int tx_csum_limit)
295{ 296{
296 fill_resources(&orion_ge00_shared, orion_ge00_shared_resources, 297 fill_resources(&orion_ge00_shared, orion_ge00_shared_resources,
297 mapbase + 0x2000, SZ_16K - 1, irq_err); 298 mapbase + 0x2000, SZ_16K - 1, irq_err);
299 orion_ge00_shared_data.tx_csum_limit = tx_csum_limit;
298 ge_complete(&orion_ge00_shared_data, 300 ge_complete(&orion_ge00_shared_data,
299 orion_ge00_resources, irq, &orion_ge00_shared, 301 orion_ge00_resources, irq, &orion_ge00_shared,
300 eth_data, &orion_ge00); 302 eth_data, &orion_ge00);
@@ -343,10 +345,12 @@ static struct platform_device orion_ge01 = {
343void __init orion_ge01_init(struct mv643xx_eth_platform_data *eth_data, 345void __init orion_ge01_init(struct mv643xx_eth_platform_data *eth_data,
344 unsigned long mapbase, 346 unsigned long mapbase,
345 unsigned long irq, 347 unsigned long irq,
346 unsigned long irq_err) 348 unsigned long irq_err,
349 unsigned int tx_csum_limit)
347{ 350{
348 fill_resources(&orion_ge01_shared, orion_ge01_shared_resources, 351 fill_resources(&orion_ge01_shared, orion_ge01_shared_resources,
349 mapbase + 0x2000, SZ_16K - 1, irq_err); 352 mapbase + 0x2000, SZ_16K - 1, irq_err);
353 orion_ge01_shared_data.tx_csum_limit = tx_csum_limit;
350 ge_complete(&orion_ge01_shared_data, 354 ge_complete(&orion_ge01_shared_data,
351 orion_ge01_resources, irq, &orion_ge01_shared, 355 orion_ge01_resources, irq, &orion_ge01_shared,
352 eth_data, &orion_ge01); 356 eth_data, &orion_ge01);
diff --git a/arch/arm/plat-orion/include/plat/common.h b/arch/arm/plat-orion/include/plat/common.h
index e00fdb213609..ae2377ef63e5 100644
--- a/arch/arm/plat-orion/include/plat/common.h
+++ b/arch/arm/plat-orion/include/plat/common.h
@@ -39,12 +39,14 @@ void __init orion_rtc_init(unsigned long mapbase,
39void __init orion_ge00_init(struct mv643xx_eth_platform_data *eth_data, 39void __init orion_ge00_init(struct mv643xx_eth_platform_data *eth_data,
40 unsigned long mapbase, 40 unsigned long mapbase,
41 unsigned long irq, 41 unsigned long irq,
42 unsigned long irq_err); 42 unsigned long irq_err,
43 unsigned int tx_csum_limit);
43 44
44void __init orion_ge01_init(struct mv643xx_eth_platform_data *eth_data, 45void __init orion_ge01_init(struct mv643xx_eth_platform_data *eth_data,
45 unsigned long mapbase, 46 unsigned long mapbase,
46 unsigned long irq, 47 unsigned long irq,
47 unsigned long irq_err); 48 unsigned long irq_err,
49 unsigned int tx_csum_limit);
48 50
49void __init orion_ge10_init(struct mv643xx_eth_platform_data *eth_data, 51void __init orion_ge10_init(struct mv643xx_eth_platform_data *eth_data,
50 unsigned long mapbase, 52 unsigned long mapbase,
diff --git a/arch/arm/plat-s3c24xx/dma.c b/arch/arm/plat-s3c24xx/dma.c
index 28f898f75380..db98e7021f0d 100644
--- a/arch/arm/plat-s3c24xx/dma.c
+++ b/arch/arm/plat-s3c24xx/dma.c
@@ -430,7 +430,7 @@ s3c2410_dma_canload(struct s3c2410_dma_chan *chan)
430 * when necessary. 430 * when necessary.
431*/ 431*/
432 432
433int s3c2410_dma_enqueue(unsigned int channel, void *id, 433int s3c2410_dma_enqueue(enum dma_ch channel, void *id,
434 dma_addr_t data, int size) 434 dma_addr_t data, int size)
435{ 435{
436 struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel); 436 struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel);
diff --git a/arch/arm/plat-samsung/devs.c b/arch/arm/plat-samsung/devs.c
index 8154fab70de8..6ff45d53362c 100644
--- a/arch/arm/plat-samsung/devs.c
+++ b/arch/arm/plat-samsung/devs.c
@@ -32,6 +32,8 @@
32#include <linux/platform_data/s3c-hsudc.h> 32#include <linux/platform_data/s3c-hsudc.h>
33#include <linux/platform_data/s3c-hsotg.h> 33#include <linux/platform_data/s3c-hsotg.h>
34 34
35#include <media/s5p_hdmi.h>
36
35#include <asm/irq.h> 37#include <asm/irq.h>
36#include <asm/mach/arch.h> 38#include <asm/mach/arch.h>
37#include <asm/mach/map.h> 39#include <asm/mach/map.h>
@@ -747,7 +749,8 @@ void __init s5p_i2c_hdmiphy_set_platdata(struct s3c2410_platform_i2c *pd)
747 if (!pd) { 749 if (!pd) {
748 pd = &default_i2c_data; 750 pd = &default_i2c_data;
749 751
750 if (soc_is_exynos4210()) 752 if (soc_is_exynos4210() ||
753 soc_is_exynos4212() || soc_is_exynos4412())
751 pd->bus_num = 8; 754 pd->bus_num = 8;
752 else if (soc_is_s5pv210()) 755 else if (soc_is_s5pv210())
753 pd->bus_num = 3; 756 pd->bus_num = 3;
@@ -758,6 +761,30 @@ void __init s5p_i2c_hdmiphy_set_platdata(struct s3c2410_platform_i2c *pd)
758 npd = s3c_set_platdata(pd, sizeof(struct s3c2410_platform_i2c), 761 npd = s3c_set_platdata(pd, sizeof(struct s3c2410_platform_i2c),
759 &s5p_device_i2c_hdmiphy); 762 &s5p_device_i2c_hdmiphy);
760} 763}
764
765struct s5p_hdmi_platform_data s5p_hdmi_def_platdata;
766
767void __init s5p_hdmi_set_platdata(struct i2c_board_info *hdmiphy_info,
768 struct i2c_board_info *mhl_info, int mhl_bus)
769{
770 struct s5p_hdmi_platform_data *pd = &s5p_hdmi_def_platdata;
771
772 if (soc_is_exynos4210() ||
773 soc_is_exynos4212() || soc_is_exynos4412())
774 pd->hdmiphy_bus = 8;
775 else if (soc_is_s5pv210())
776 pd->hdmiphy_bus = 3;
777 else
778 pd->hdmiphy_bus = 0;
779
780 pd->hdmiphy_info = hdmiphy_info;
781 pd->mhl_info = mhl_info;
782 pd->mhl_bus = mhl_bus;
783
784 s3c_set_platdata(pd, sizeof(struct s5p_hdmi_platform_data),
785 &s5p_device_hdmi);
786}
787
761#endif /* CONFIG_S5P_DEV_I2C_HDMIPHY */ 788#endif /* CONFIG_S5P_DEV_I2C_HDMIPHY */
762 789
763/* I2S */ 790/* I2S */
diff --git a/arch/arm/plat-samsung/include/plat/hdmi.h b/arch/arm/plat-samsung/include/plat/hdmi.h
new file mode 100644
index 000000000000..331d046ac2c5
--- /dev/null
+++ b/arch/arm/plat-samsung/include/plat/hdmi.h
@@ -0,0 +1,16 @@
1/*
2 * Copyright (C) 2012 Samsung Electronics Co.Ltd
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License as published by the
6 * Free Software Foundation; either version 2 of the License, or (at your
7 * option) any later version.
8 */
9
10#ifndef __PLAT_SAMSUNG_HDMI_H
11#define __PLAT_SAMSUNG_HDMI_H __FILE__
12
13extern void s5p_hdmi_set_platdata(struct i2c_board_info *hdmiphy_info,
14 struct i2c_board_info *mhl_info, int mhl_bus);
15
16#endif /* __PLAT_SAMSUNG_HDMI_H */
diff --git a/arch/arm/plat-samsung/pm.c b/arch/arm/plat-samsung/pm.c
index 64ab65f0fdbc..15070284343e 100644
--- a/arch/arm/plat-samsung/pm.c
+++ b/arch/arm/plat-samsung/pm.c
@@ -74,7 +74,7 @@ unsigned char pm_uart_udivslot;
74 74
75#ifdef CONFIG_SAMSUNG_PM_DEBUG 75#ifdef CONFIG_SAMSUNG_PM_DEBUG
76 76
77struct pm_uart_save uart_save[CONFIG_SERIAL_SAMSUNG_UARTS]; 77static struct pm_uart_save uart_save[CONFIG_SERIAL_SAMSUNG_UARTS];
78 78
79static void s3c_pm_save_uart(unsigned int uart, struct pm_uart_save *save) 79static void s3c_pm_save_uart(unsigned int uart, struct pm_uart_save *save)
80{ 80{
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index 331d574df99c..faf65286574e 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -89,6 +89,7 @@ config ATH79
89 select CEVT_R4K 89 select CEVT_R4K
90 select CSRC_R4K 90 select CSRC_R4K
91 select DMA_NONCOHERENT 91 select DMA_NONCOHERENT
92 select HAVE_CLK
92 select IRQ_CPU 93 select IRQ_CPU
93 select MIPS_MACHINE 94 select MIPS_MACHINE
94 select SYS_HAS_CPU_MIPS32_R2 95 select SYS_HAS_CPU_MIPS32_R2
diff --git a/arch/mips/alchemy/board-mtx1.c b/arch/mips/alchemy/board-mtx1.c
index 99969484c475..a124c251c0c9 100644
--- a/arch/mips/alchemy/board-mtx1.c
+++ b/arch/mips/alchemy/board-mtx1.c
@@ -228,6 +228,8 @@ static int mtx1_pci_idsel(unsigned int devsel, int assert)
228 * adapter on the mtx-1 "singleboard" variant. It triggers a custom 228 * adapter on the mtx-1 "singleboard" variant. It triggers a custom
229 * logic chip connected to EXT_IO3 (GPIO1) to suppress IDSEL signals. 229 * logic chip connected to EXT_IO3 (GPIO1) to suppress IDSEL signals.
230 */ 230 */
231 udelay(1);
232
231 if (assert && devsel != 0) 233 if (assert && devsel != 0)
232 /* Suppress signal to Cardbus */ 234 /* Suppress signal to Cardbus */
233 alchemy_gpio_set_value(1, 0); /* set EXT_IO3 OFF */ 235 alchemy_gpio_set_value(1, 0); /* set EXT_IO3 OFF */
diff --git a/arch/mips/ath79/dev-usb.c b/arch/mips/ath79/dev-usb.c
index 36e9570e7bc4..b2a2311ec85b 100644
--- a/arch/mips/ath79/dev-usb.c
+++ b/arch/mips/ath79/dev-usb.c
@@ -145,6 +145,8 @@ static void __init ar7240_usb_setup(void)
145 145
146 ath79_ohci_resources[0].start = AR7240_OHCI_BASE; 146 ath79_ohci_resources[0].start = AR7240_OHCI_BASE;
147 ath79_ohci_resources[0].end = AR7240_OHCI_BASE + AR7240_OHCI_SIZE - 1; 147 ath79_ohci_resources[0].end = AR7240_OHCI_BASE + AR7240_OHCI_SIZE - 1;
148 ath79_ohci_resources[1].start = ATH79_CPU_IRQ_USB;
149 ath79_ohci_resources[1].end = ATH79_CPU_IRQ_USB;
148 platform_device_register(&ath79_ohci_device); 150 platform_device_register(&ath79_ohci_device);
149} 151}
150 152
diff --git a/arch/mips/ath79/gpio.c b/arch/mips/ath79/gpio.c
index 29054f211832..48fe762d2526 100644
--- a/arch/mips/ath79/gpio.c
+++ b/arch/mips/ath79/gpio.c
@@ -188,8 +188,10 @@ void __init ath79_gpio_init(void)
188 188
189 if (soc_is_ar71xx()) 189 if (soc_is_ar71xx())
190 ath79_gpio_count = AR71XX_GPIO_COUNT; 190 ath79_gpio_count = AR71XX_GPIO_COUNT;
191 else if (soc_is_ar724x()) 191 else if (soc_is_ar7240())
192 ath79_gpio_count = AR724X_GPIO_COUNT; 192 ath79_gpio_count = AR7240_GPIO_COUNT;
193 else if (soc_is_ar7241() || soc_is_ar7242())
194 ath79_gpio_count = AR7241_GPIO_COUNT;
193 else if (soc_is_ar913x()) 195 else if (soc_is_ar913x())
194 ath79_gpio_count = AR913X_GPIO_COUNT; 196 ath79_gpio_count = AR913X_GPIO_COUNT;
195 else if (soc_is_ar933x()) 197 else if (soc_is_ar933x())
diff --git a/arch/mips/bcm63xx/dev-spi.c b/arch/mips/bcm63xx/dev-spi.c
index e39f73048d4f..f1c9c3e2f678 100644
--- a/arch/mips/bcm63xx/dev-spi.c
+++ b/arch/mips/bcm63xx/dev-spi.c
@@ -106,11 +106,15 @@ int __init bcm63xx_spi_register(void)
106 if (BCMCPU_IS_6338() || BCMCPU_IS_6348()) { 106 if (BCMCPU_IS_6338() || BCMCPU_IS_6348()) {
107 spi_resources[0].end += BCM_6338_RSET_SPI_SIZE - 1; 107 spi_resources[0].end += BCM_6338_RSET_SPI_SIZE - 1;
108 spi_pdata.fifo_size = SPI_6338_MSG_DATA_SIZE; 108 spi_pdata.fifo_size = SPI_6338_MSG_DATA_SIZE;
109 spi_pdata.msg_type_shift = SPI_6338_MSG_TYPE_SHIFT;
110 spi_pdata.msg_ctl_width = SPI_6338_MSG_CTL_WIDTH;
109 } 111 }
110 112
111 if (BCMCPU_IS_6358() || BCMCPU_IS_6368()) { 113 if (BCMCPU_IS_6358() || BCMCPU_IS_6368()) {
112 spi_resources[0].end += BCM_6358_RSET_SPI_SIZE - 1; 114 spi_resources[0].end += BCM_6358_RSET_SPI_SIZE - 1;
113 spi_pdata.fifo_size = SPI_6358_MSG_DATA_SIZE; 115 spi_pdata.fifo_size = SPI_6358_MSG_DATA_SIZE;
116 spi_pdata.msg_type_shift = SPI_6358_MSG_TYPE_SHIFT;
117 spi_pdata.msg_ctl_width = SPI_6358_MSG_CTL_WIDTH;
114 } 118 }
115 119
116 bcm63xx_spi_regs_init(); 120 bcm63xx_spi_regs_init();
diff --git a/arch/mips/cavium-octeon/octeon-irq.c b/arch/mips/cavium-octeon/octeon-irq.c
index 7fb1f222b8a5..274cd4fad30c 100644
--- a/arch/mips/cavium-octeon/octeon-irq.c
+++ b/arch/mips/cavium-octeon/octeon-irq.c
@@ -61,6 +61,12 @@ static void octeon_irq_set_ciu_mapping(int irq, int line, int bit,
61 octeon_irq_ciu_to_irq[line][bit] = irq; 61 octeon_irq_ciu_to_irq[line][bit] = irq;
62} 62}
63 63
64static void octeon_irq_force_ciu_mapping(struct irq_domain *domain,
65 int irq, int line, int bit)
66{
67 irq_domain_associate(domain, irq, line << 6 | bit);
68}
69
64static int octeon_coreid_for_cpu(int cpu) 70static int octeon_coreid_for_cpu(int cpu)
65{ 71{
66#ifdef CONFIG_SMP 72#ifdef CONFIG_SMP
@@ -183,19 +189,9 @@ static void __init octeon_irq_init_core(void)
183 mutex_init(&cd->core_irq_mutex); 189 mutex_init(&cd->core_irq_mutex);
184 190
185 irq = OCTEON_IRQ_SW0 + i; 191 irq = OCTEON_IRQ_SW0 + i;
186 switch (irq) { 192 irq_set_chip_data(irq, cd);
187 case OCTEON_IRQ_TIMER: 193 irq_set_chip_and_handler(irq, &octeon_irq_chip_core,
188 case OCTEON_IRQ_SW0: 194 handle_percpu_irq);
189 case OCTEON_IRQ_SW1:
190 case OCTEON_IRQ_5:
191 case OCTEON_IRQ_PERF:
192 irq_set_chip_data(irq, cd);
193 irq_set_chip_and_handler(irq, &octeon_irq_chip_core,
194 handle_percpu_irq);
195 break;
196 default:
197 break;
198 }
199 } 195 }
200} 196}
201 197
@@ -890,7 +886,6 @@ static int octeon_irq_gpio_xlat(struct irq_domain *d,
890 unsigned int type; 886 unsigned int type;
891 unsigned int pin; 887 unsigned int pin;
892 unsigned int trigger; 888 unsigned int trigger;
893 struct octeon_irq_gpio_domain_data *gpiod;
894 889
895 if (d->of_node != node) 890 if (d->of_node != node)
896 return -EINVAL; 891 return -EINVAL;
@@ -925,8 +920,7 @@ static int octeon_irq_gpio_xlat(struct irq_domain *d,
925 break; 920 break;
926 } 921 }
927 *out_type = type; 922 *out_type = type;
928 gpiod = d->host_data; 923 *out_hwirq = pin;
929 *out_hwirq = gpiod->base_hwirq + pin;
930 924
931 return 0; 925 return 0;
932} 926}
@@ -996,19 +990,21 @@ static int octeon_irq_ciu_map(struct irq_domain *d,
996static int octeon_irq_gpio_map(struct irq_domain *d, 990static int octeon_irq_gpio_map(struct irq_domain *d,
997 unsigned int virq, irq_hw_number_t hw) 991 unsigned int virq, irq_hw_number_t hw)
998{ 992{
999 unsigned int line = hw >> 6; 993 struct octeon_irq_gpio_domain_data *gpiod = d->host_data;
1000 unsigned int bit = hw & 63; 994 unsigned int line, bit;
1001 995
1002 if (!octeon_irq_virq_in_range(virq)) 996 if (!octeon_irq_virq_in_range(virq))
1003 return -EINVAL; 997 return -EINVAL;
1004 998
999 hw += gpiod->base_hwirq;
1000 line = hw >> 6;
1001 bit = hw & 63;
1005 if (line > 1 || octeon_irq_ciu_to_irq[line][bit] != 0) 1002 if (line > 1 || octeon_irq_ciu_to_irq[line][bit] != 0)
1006 return -EINVAL; 1003 return -EINVAL;
1007 1004
1008 octeon_irq_set_ciu_mapping(virq, line, bit, 1005 octeon_irq_set_ciu_mapping(virq, line, bit,
1009 octeon_irq_gpio_chip, 1006 octeon_irq_gpio_chip,
1010 octeon_irq_handle_gpio); 1007 octeon_irq_handle_gpio);
1011
1012 return 0; 1008 return 0;
1013} 1009}
1014 1010
@@ -1149,6 +1145,7 @@ static void __init octeon_irq_init_ciu(void)
1149 struct irq_chip *chip_wd; 1145 struct irq_chip *chip_wd;
1150 struct device_node *gpio_node; 1146 struct device_node *gpio_node;
1151 struct device_node *ciu_node; 1147 struct device_node *ciu_node;
1148 struct irq_domain *ciu_domain = NULL;
1152 1149
1153 octeon_irq_init_ciu_percpu(); 1150 octeon_irq_init_ciu_percpu();
1154 octeon_irq_setup_secondary = octeon_irq_setup_secondary_ciu; 1151 octeon_irq_setup_secondary = octeon_irq_setup_secondary_ciu;
@@ -1177,31 +1174,6 @@ static void __init octeon_irq_init_ciu(void)
1177 /* Mips internal */ 1174 /* Mips internal */
1178 octeon_irq_init_core(); 1175 octeon_irq_init_core();
1179 1176
1180 /* CIU_0 */
1181 for (i = 0; i < 16; i++)
1182 octeon_irq_set_ciu_mapping(i + OCTEON_IRQ_WORKQ0, 0, i + 0, chip, handle_level_irq);
1183
1184 octeon_irq_set_ciu_mapping(OCTEON_IRQ_MBOX0, 0, 32, chip_mbox, handle_percpu_irq);
1185 octeon_irq_set_ciu_mapping(OCTEON_IRQ_MBOX1, 0, 33, chip_mbox, handle_percpu_irq);
1186
1187 for (i = 0; i < 4; i++)
1188 octeon_irq_set_ciu_mapping(i + OCTEON_IRQ_PCI_INT0, 0, i + 36, chip, handle_level_irq);
1189 for (i = 0; i < 4; i++)
1190 octeon_irq_set_ciu_mapping(i + OCTEON_IRQ_PCI_MSI0, 0, i + 40, chip, handle_level_irq);
1191
1192 octeon_irq_set_ciu_mapping(OCTEON_IRQ_RML, 0, 46, chip, handle_level_irq);
1193 for (i = 0; i < 4; i++)
1194 octeon_irq_set_ciu_mapping(i + OCTEON_IRQ_TIMER0, 0, i + 52, chip, handle_edge_irq);
1195
1196 octeon_irq_set_ciu_mapping(OCTEON_IRQ_USB0, 0, 56, chip, handle_level_irq);
1197 octeon_irq_set_ciu_mapping(OCTEON_IRQ_BOOTDMA, 0, 63, chip, handle_level_irq);
1198
1199 /* CIU_1 */
1200 for (i = 0; i < 16; i++)
1201 octeon_irq_set_ciu_mapping(i + OCTEON_IRQ_WDOG0, 1, i + 0, chip_wd, handle_level_irq);
1202
1203 octeon_irq_set_ciu_mapping(OCTEON_IRQ_USB1, 1, 17, chip, handle_level_irq);
1204
1205 gpio_node = of_find_compatible_node(NULL, NULL, "cavium,octeon-3860-gpio"); 1177 gpio_node = of_find_compatible_node(NULL, NULL, "cavium,octeon-3860-gpio");
1206 if (gpio_node) { 1178 if (gpio_node) {
1207 struct octeon_irq_gpio_domain_data *gpiod; 1179 struct octeon_irq_gpio_domain_data *gpiod;
@@ -1219,10 +1191,35 @@ static void __init octeon_irq_init_ciu(void)
1219 1191
1220 ciu_node = of_find_compatible_node(NULL, NULL, "cavium,octeon-3860-ciu"); 1192 ciu_node = of_find_compatible_node(NULL, NULL, "cavium,octeon-3860-ciu");
1221 if (ciu_node) { 1193 if (ciu_node) {
1222 irq_domain_add_tree(ciu_node, &octeon_irq_domain_ciu_ops, NULL); 1194 ciu_domain = irq_domain_add_tree(ciu_node, &octeon_irq_domain_ciu_ops, NULL);
1223 of_node_put(ciu_node); 1195 of_node_put(ciu_node);
1224 } else 1196 } else
1225 pr_warn("Cannot find device node for cavium,octeon-3860-ciu.\n"); 1197 panic("Cannot find device node for cavium,octeon-3860-ciu.");
1198
1199 /* CIU_0 */
1200 for (i = 0; i < 16; i++)
1201 octeon_irq_force_ciu_mapping(ciu_domain, i + OCTEON_IRQ_WORKQ0, 0, i + 0);
1202
1203 octeon_irq_set_ciu_mapping(OCTEON_IRQ_MBOX0, 0, 32, chip_mbox, handle_percpu_irq);
1204 octeon_irq_set_ciu_mapping(OCTEON_IRQ_MBOX1, 0, 33, chip_mbox, handle_percpu_irq);
1205
1206 for (i = 0; i < 4; i++)
1207 octeon_irq_force_ciu_mapping(ciu_domain, i + OCTEON_IRQ_PCI_INT0, 0, i + 36);
1208 for (i = 0; i < 4; i++)
1209 octeon_irq_force_ciu_mapping(ciu_domain, i + OCTEON_IRQ_PCI_MSI0, 0, i + 40);
1210
1211 octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_RML, 0, 46);
1212 for (i = 0; i < 4; i++)
1213 octeon_irq_force_ciu_mapping(ciu_domain, i + OCTEON_IRQ_TIMER0, 0, i + 52);
1214
1215 octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_USB0, 0, 56);
1216 octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_BOOTDMA, 0, 63);
1217
1218 /* CIU_1 */
1219 for (i = 0; i < 16; i++)
1220 octeon_irq_set_ciu_mapping(i + OCTEON_IRQ_WDOG0, 1, i + 0, chip_wd, handle_level_irq);
1221
1222 octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_USB1, 1, 17);
1226 1223
1227 /* Enable the CIU lines */ 1224 /* Enable the CIU lines */
1228 set_c0_status(STATUSF_IP3 | STATUSF_IP2); 1225 set_c0_status(STATUSF_IP3 | STATUSF_IP2);
diff --git a/arch/mips/include/asm/mach-ath79/ar71xx_regs.h b/arch/mips/include/asm/mach-ath79/ar71xx_regs.h
index 1caa78ad06d5..dde504477fac 100644
--- a/arch/mips/include/asm/mach-ath79/ar71xx_regs.h
+++ b/arch/mips/include/asm/mach-ath79/ar71xx_regs.h
@@ -393,7 +393,8 @@
393#define AR71XX_GPIO_REG_FUNC 0x28 393#define AR71XX_GPIO_REG_FUNC 0x28
394 394
395#define AR71XX_GPIO_COUNT 16 395#define AR71XX_GPIO_COUNT 16
396#define AR724X_GPIO_COUNT 18 396#define AR7240_GPIO_COUNT 18
397#define AR7241_GPIO_COUNT 20
397#define AR913X_GPIO_COUNT 22 398#define AR913X_GPIO_COUNT 22
398#define AR933X_GPIO_COUNT 30 399#define AR933X_GPIO_COUNT 30
399#define AR934X_GPIO_COUNT 23 400#define AR934X_GPIO_COUNT 23
diff --git a/arch/mips/include/asm/mach-ath79/cpu-feature-overrides.h b/arch/mips/include/asm/mach-ath79/cpu-feature-overrides.h
index 4476fa03bf36..6ddae926bf79 100644
--- a/arch/mips/include/asm/mach-ath79/cpu-feature-overrides.h
+++ b/arch/mips/include/asm/mach-ath79/cpu-feature-overrides.h
@@ -42,7 +42,6 @@
42#define cpu_has_mips64r1 0 42#define cpu_has_mips64r1 0
43#define cpu_has_mips64r2 0 43#define cpu_has_mips64r2 0
44 44
45#define cpu_has_dsp 0
46#define cpu_has_mipsmt 0 45#define cpu_has_mipsmt 0
47 46
48#define cpu_has_64bits 0 47#define cpu_has_64bits 0
diff --git a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_dev_spi.h b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_dev_spi.h
index 7d98dbe5d4b5..c9bae1362606 100644
--- a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_dev_spi.h
+++ b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_dev_spi.h
@@ -9,6 +9,8 @@ int __init bcm63xx_spi_register(void);
9 9
10struct bcm63xx_spi_pdata { 10struct bcm63xx_spi_pdata {
11 unsigned int fifo_size; 11 unsigned int fifo_size;
12 unsigned int msg_type_shift;
13 unsigned int msg_ctl_width;
12 int bus_num; 14 int bus_num;
13 int num_chipselect; 15 int num_chipselect;
14 u32 speed_hz; 16 u32 speed_hz;
diff --git a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_regs.h b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_regs.h
index 4ccc2a748aff..61f2a2a5099d 100644
--- a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_regs.h
+++ b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_regs.h
@@ -1054,7 +1054,8 @@
1054#define SPI_6338_FILL_BYTE 0x07 1054#define SPI_6338_FILL_BYTE 0x07
1055#define SPI_6338_MSG_TAIL 0x09 1055#define SPI_6338_MSG_TAIL 0x09
1056#define SPI_6338_RX_TAIL 0x0b 1056#define SPI_6338_RX_TAIL 0x0b
1057#define SPI_6338_MSG_CTL 0x40 1057#define SPI_6338_MSG_CTL 0x40 /* 8-bits register */
1058#define SPI_6338_MSG_CTL_WIDTH 8
1058#define SPI_6338_MSG_DATA 0x41 1059#define SPI_6338_MSG_DATA 0x41
1059#define SPI_6338_MSG_DATA_SIZE 0x3f 1060#define SPI_6338_MSG_DATA_SIZE 0x3f
1060#define SPI_6338_RX_DATA 0x80 1061#define SPI_6338_RX_DATA 0x80
@@ -1070,7 +1071,8 @@
1070#define SPI_6348_FILL_BYTE 0x07 1071#define SPI_6348_FILL_BYTE 0x07
1071#define SPI_6348_MSG_TAIL 0x09 1072#define SPI_6348_MSG_TAIL 0x09
1072#define SPI_6348_RX_TAIL 0x0b 1073#define SPI_6348_RX_TAIL 0x0b
1073#define SPI_6348_MSG_CTL 0x40 1074#define SPI_6348_MSG_CTL 0x40 /* 8-bits register */
1075#define SPI_6348_MSG_CTL_WIDTH 8
1074#define SPI_6348_MSG_DATA 0x41 1076#define SPI_6348_MSG_DATA 0x41
1075#define SPI_6348_MSG_DATA_SIZE 0x3f 1077#define SPI_6348_MSG_DATA_SIZE 0x3f
1076#define SPI_6348_RX_DATA 0x80 1078#define SPI_6348_RX_DATA 0x80
@@ -1078,6 +1080,7 @@
1078 1080
1079/* BCM 6358 SPI core */ 1081/* BCM 6358 SPI core */
1080#define SPI_6358_MSG_CTL 0x00 /* 16-bits register */ 1082#define SPI_6358_MSG_CTL 0x00 /* 16-bits register */
1083#define SPI_6358_MSG_CTL_WIDTH 16
1081#define SPI_6358_MSG_DATA 0x02 1084#define SPI_6358_MSG_DATA 0x02
1082#define SPI_6358_MSG_DATA_SIZE 0x21e 1085#define SPI_6358_MSG_DATA_SIZE 0x21e
1083#define SPI_6358_RX_DATA 0x400 1086#define SPI_6358_RX_DATA 0x400
@@ -1094,6 +1097,7 @@
1094 1097
1095/* BCM 6358 SPI core */ 1098/* BCM 6358 SPI core */
1096#define SPI_6368_MSG_CTL 0x00 /* 16-bits register */ 1099#define SPI_6368_MSG_CTL 0x00 /* 16-bits register */
1100#define SPI_6368_MSG_CTL_WIDTH 16
1097#define SPI_6368_MSG_DATA 0x02 1101#define SPI_6368_MSG_DATA 0x02
1098#define SPI_6368_MSG_DATA_SIZE 0x21e 1102#define SPI_6368_MSG_DATA_SIZE 0x21e
1099#define SPI_6368_RX_DATA 0x400 1103#define SPI_6368_RX_DATA 0x400
@@ -1115,7 +1119,10 @@
1115#define SPI_HD_W 0x01 1119#define SPI_HD_W 0x01
1116#define SPI_HD_R 0x02 1120#define SPI_HD_R 0x02
1117#define SPI_BYTE_CNT_SHIFT 0 1121#define SPI_BYTE_CNT_SHIFT 0
1118#define SPI_MSG_TYPE_SHIFT 14 1122#define SPI_6338_MSG_TYPE_SHIFT 6
1123#define SPI_6348_MSG_TYPE_SHIFT 6
1124#define SPI_6358_MSG_TYPE_SHIFT 14
1125#define SPI_6368_MSG_TYPE_SHIFT 14
1119 1126
1120/* Command */ 1127/* Command */
1121#define SPI_CMD_NOOP 0x00 1128#define SPI_CMD_NOOP 0x00
diff --git a/arch/mips/include/asm/mach-cavium-octeon/irq.h b/arch/mips/include/asm/mach-cavium-octeon/irq.h
index 418992042f6f..c22a3078bf11 100644
--- a/arch/mips/include/asm/mach-cavium-octeon/irq.h
+++ b/arch/mips/include/asm/mach-cavium-octeon/irq.h
@@ -21,14 +21,10 @@ enum octeon_irq {
21 OCTEON_IRQ_TIMER, 21 OCTEON_IRQ_TIMER,
22/* sources in CIU_INTX_EN0 */ 22/* sources in CIU_INTX_EN0 */
23 OCTEON_IRQ_WORKQ0, 23 OCTEON_IRQ_WORKQ0,
24 OCTEON_IRQ_GPIO0 = OCTEON_IRQ_WORKQ0 + 16, 24 OCTEON_IRQ_WDOG0 = OCTEON_IRQ_WORKQ0 + 16,
25 OCTEON_IRQ_WDOG0 = OCTEON_IRQ_GPIO0 + 16,
26 OCTEON_IRQ_WDOG15 = OCTEON_IRQ_WDOG0 + 15, 25 OCTEON_IRQ_WDOG15 = OCTEON_IRQ_WDOG0 + 15,
27 OCTEON_IRQ_MBOX0 = OCTEON_IRQ_WDOG0 + 16, 26 OCTEON_IRQ_MBOX0 = OCTEON_IRQ_WDOG0 + 16,
28 OCTEON_IRQ_MBOX1, 27 OCTEON_IRQ_MBOX1,
29 OCTEON_IRQ_UART0,
30 OCTEON_IRQ_UART1,
31 OCTEON_IRQ_UART2,
32 OCTEON_IRQ_PCI_INT0, 28 OCTEON_IRQ_PCI_INT0,
33 OCTEON_IRQ_PCI_INT1, 29 OCTEON_IRQ_PCI_INT1,
34 OCTEON_IRQ_PCI_INT2, 30 OCTEON_IRQ_PCI_INT2,
@@ -38,8 +34,6 @@ enum octeon_irq {
38 OCTEON_IRQ_PCI_MSI2, 34 OCTEON_IRQ_PCI_MSI2,
39 OCTEON_IRQ_PCI_MSI3, 35 OCTEON_IRQ_PCI_MSI3,
40 36
41 OCTEON_IRQ_TWSI,
42 OCTEON_IRQ_TWSI2,
43 OCTEON_IRQ_RML, 37 OCTEON_IRQ_RML,
44 OCTEON_IRQ_TIMER0, 38 OCTEON_IRQ_TIMER0,
45 OCTEON_IRQ_TIMER1, 39 OCTEON_IRQ_TIMER1,
@@ -47,8 +41,6 @@ enum octeon_irq {
47 OCTEON_IRQ_TIMER3, 41 OCTEON_IRQ_TIMER3,
48 OCTEON_IRQ_USB0, 42 OCTEON_IRQ_USB0,
49 OCTEON_IRQ_USB1, 43 OCTEON_IRQ_USB1,
50 OCTEON_IRQ_MII0,
51 OCTEON_IRQ_MII1,
52 OCTEON_IRQ_BOOTDMA, 44 OCTEON_IRQ_BOOTDMA,
53#ifndef CONFIG_PCI_MSI 45#ifndef CONFIG_PCI_MSI
54 OCTEON_IRQ_LAST = 127 46 OCTEON_IRQ_LAST = 127
diff --git a/arch/mips/include/asm/module.h b/arch/mips/include/asm/module.h
index 7531ecd654d6..dca8bce8c7ab 100644
--- a/arch/mips/include/asm/module.h
+++ b/arch/mips/include/asm/module.h
@@ -10,6 +10,7 @@ struct mod_arch_specific {
10 struct list_head dbe_list; 10 struct list_head dbe_list;
11 const struct exception_table_entry *dbe_start; 11 const struct exception_table_entry *dbe_start;
12 const struct exception_table_entry *dbe_end; 12 const struct exception_table_entry *dbe_end;
13 struct mips_hi16 *r_mips_hi16_list;
13}; 14};
14 15
15typedef uint8_t Elf64_Byte; /* Type for a 8-bit quantity. */ 16typedef uint8_t Elf64_Byte; /* Type for a 8-bit quantity. */
diff --git a/arch/mips/include/asm/r4k-timer.h b/arch/mips/include/asm/r4k-timer.h
index a37d12b3b61c..afe9e0e03fe9 100644
--- a/arch/mips/include/asm/r4k-timer.h
+++ b/arch/mips/include/asm/r4k-timer.h
@@ -12,16 +12,16 @@
12 12
13#ifdef CONFIG_SYNC_R4K 13#ifdef CONFIG_SYNC_R4K
14 14
15extern void synchronise_count_master(void); 15extern void synchronise_count_master(int cpu);
16extern void synchronise_count_slave(void); 16extern void synchronise_count_slave(int cpu);
17 17
18#else 18#else
19 19
20static inline void synchronise_count_master(void) 20static inline void synchronise_count_master(int cpu)
21{ 21{
22} 22}
23 23
24static inline void synchronise_count_slave(void) 24static inline void synchronise_count_slave(int cpu)
25{ 25{
26} 26}
27 27
diff --git a/arch/mips/kernel/module.c b/arch/mips/kernel/module.c
index a5066b1c3de3..4f8c3cba8c0c 100644
--- a/arch/mips/kernel/module.c
+++ b/arch/mips/kernel/module.c
@@ -39,8 +39,6 @@ struct mips_hi16 {
39 Elf_Addr value; 39 Elf_Addr value;
40}; 40};
41 41
42static struct mips_hi16 *mips_hi16_list;
43
44static LIST_HEAD(dbe_list); 42static LIST_HEAD(dbe_list);
45static DEFINE_SPINLOCK(dbe_lock); 43static DEFINE_SPINLOCK(dbe_lock);
46 44
@@ -128,8 +126,8 @@ static int apply_r_mips_hi16_rel(struct module *me, u32 *location, Elf_Addr v)
128 126
129 n->addr = (Elf_Addr *)location; 127 n->addr = (Elf_Addr *)location;
130 n->value = v; 128 n->value = v;
131 n->next = mips_hi16_list; 129 n->next = me->arch.r_mips_hi16_list;
132 mips_hi16_list = n; 130 me->arch.r_mips_hi16_list = n;
133 131
134 return 0; 132 return 0;
135} 133}
@@ -142,18 +140,28 @@ static int apply_r_mips_hi16_rela(struct module *me, u32 *location, Elf_Addr v)
142 return 0; 140 return 0;
143} 141}
144 142
143static void free_relocation_chain(struct mips_hi16 *l)
144{
145 struct mips_hi16 *next;
146
147 while (l) {
148 next = l->next;
149 kfree(l);
150 l = next;
151 }
152}
153
145static int apply_r_mips_lo16_rel(struct module *me, u32 *location, Elf_Addr v) 154static int apply_r_mips_lo16_rel(struct module *me, u32 *location, Elf_Addr v)
146{ 155{
147 unsigned long insnlo = *location; 156 unsigned long insnlo = *location;
157 struct mips_hi16 *l;
148 Elf_Addr val, vallo; 158 Elf_Addr val, vallo;
149 159
150 /* Sign extend the addend we extract from the lo insn. */ 160 /* Sign extend the addend we extract from the lo insn. */
151 vallo = ((insnlo & 0xffff) ^ 0x8000) - 0x8000; 161 vallo = ((insnlo & 0xffff) ^ 0x8000) - 0x8000;
152 162
153 if (mips_hi16_list != NULL) { 163 if (me->arch.r_mips_hi16_list != NULL) {
154 struct mips_hi16 *l; 164 l = me->arch.r_mips_hi16_list;
155
156 l = mips_hi16_list;
157 while (l != NULL) { 165 while (l != NULL) {
158 struct mips_hi16 *next; 166 struct mips_hi16 *next;
159 unsigned long insn; 167 unsigned long insn;
@@ -188,7 +196,7 @@ static int apply_r_mips_lo16_rel(struct module *me, u32 *location, Elf_Addr v)
188 l = next; 196 l = next;
189 } 197 }
190 198
191 mips_hi16_list = NULL; 199 me->arch.r_mips_hi16_list = NULL;
192 } 200 }
193 201
194 /* 202 /*
@@ -201,6 +209,9 @@ static int apply_r_mips_lo16_rel(struct module *me, u32 *location, Elf_Addr v)
201 return 0; 209 return 0;
202 210
203out_danger: 211out_danger:
212 free_relocation_chain(l);
213 me->arch.r_mips_hi16_list = NULL;
214
204 pr_err("module %s: dangerous R_MIPS_LO16 REL relocation\n", me->name); 215 pr_err("module %s: dangerous R_MIPS_LO16 REL relocation\n", me->name);
205 216
206 return -ENOEXEC; 217 return -ENOEXEC;
@@ -273,6 +284,7 @@ int apply_relocate(Elf_Shdr *sechdrs, const char *strtab,
273 pr_debug("Applying relocate section %u to %u\n", relsec, 284 pr_debug("Applying relocate section %u to %u\n", relsec,
274 sechdrs[relsec].sh_info); 285 sechdrs[relsec].sh_info);
275 286
287 me->arch.r_mips_hi16_list = NULL;
276 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) { 288 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
277 /* This is where to make the change */ 289 /* This is where to make the change */
278 location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr 290 location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
@@ -296,6 +308,19 @@ int apply_relocate(Elf_Shdr *sechdrs, const char *strtab,
296 return res; 308 return res;
297 } 309 }
298 310
311 /*
312 * Normally the hi16 list should be deallocated at this point. A
313 * malformed binary however could contain a series of R_MIPS_HI16
314 * relocations not followed by a R_MIPS_LO16 relocation. In that
315 * case, free up the list and return an error.
316 */
317 if (me->arch.r_mips_hi16_list) {
318 free_relocation_chain(me->arch.r_mips_hi16_list);
319 me->arch.r_mips_hi16_list = NULL;
320
321 return -ENOEXEC;
322 }
323
299 return 0; 324 return 0;
300} 325}
301 326
diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c
index 31637d8c8738..9005bf9fb859 100644
--- a/arch/mips/kernel/smp.c
+++ b/arch/mips/kernel/smp.c
@@ -130,7 +130,7 @@ asmlinkage __cpuinit void start_secondary(void)
130 130
131 cpu_set(cpu, cpu_callin_map); 131 cpu_set(cpu, cpu_callin_map);
132 132
133 synchronise_count_slave(); 133 synchronise_count_slave(cpu);
134 134
135 /* 135 /*
136 * irq will be enabled in ->smp_finish(), enabling it too early 136 * irq will be enabled in ->smp_finish(), enabling it too early
@@ -173,7 +173,6 @@ void smp_send_stop(void)
173void __init smp_cpus_done(unsigned int max_cpus) 173void __init smp_cpus_done(unsigned int max_cpus)
174{ 174{
175 mp_ops->cpus_done(); 175 mp_ops->cpus_done();
176 synchronise_count_master();
177} 176}
178 177
179/* called from main before smp_init() */ 178/* called from main before smp_init() */
@@ -206,6 +205,7 @@ int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *tidle)
206 while (!cpu_isset(cpu, cpu_callin_map)) 205 while (!cpu_isset(cpu, cpu_callin_map))
207 udelay(100); 206 udelay(100);
208 207
208 synchronise_count_master(cpu);
209 return 0; 209 return 0;
210} 210}
211 211
diff --git a/arch/mips/kernel/sync-r4k.c b/arch/mips/kernel/sync-r4k.c
index 842d55e411fd..7f1eca3858de 100644
--- a/arch/mips/kernel/sync-r4k.c
+++ b/arch/mips/kernel/sync-r4k.c
@@ -28,12 +28,11 @@ static atomic_t __cpuinitdata count_reference = ATOMIC_INIT(0);
28#define COUNTON 100 28#define COUNTON 100
29#define NR_LOOPS 5 29#define NR_LOOPS 5
30 30
31void __cpuinit synchronise_count_master(void) 31void __cpuinit synchronise_count_master(int cpu)
32{ 32{
33 int i; 33 int i;
34 unsigned long flags; 34 unsigned long flags;
35 unsigned int initcount; 35 unsigned int initcount;
36 int nslaves;
37 36
38#ifdef CONFIG_MIPS_MT_SMTC 37#ifdef CONFIG_MIPS_MT_SMTC
39 /* 38 /*
@@ -43,8 +42,7 @@ void __cpuinit synchronise_count_master(void)
43 return; 42 return;
44#endif 43#endif
45 44
46 printk(KERN_INFO "Synchronize counters across %u CPUs: ", 45 printk(KERN_INFO "Synchronize counters for CPU %u: ", cpu);
47 num_online_cpus());
48 46
49 local_irq_save(flags); 47 local_irq_save(flags);
50 48
@@ -52,7 +50,7 @@ void __cpuinit synchronise_count_master(void)
52 * Notify the slaves that it's time to start 50 * Notify the slaves that it's time to start
53 */ 51 */
54 atomic_set(&count_reference, read_c0_count()); 52 atomic_set(&count_reference, read_c0_count());
55 atomic_set(&count_start_flag, 1); 53 atomic_set(&count_start_flag, cpu);
56 smp_wmb(); 54 smp_wmb();
57 55
58 /* Count will be initialised to current timer for all CPU's */ 56 /* Count will be initialised to current timer for all CPU's */
@@ -69,10 +67,9 @@ void __cpuinit synchronise_count_master(void)
69 * two CPUs. 67 * two CPUs.
70 */ 68 */
71 69
72 nslaves = num_online_cpus()-1;
73 for (i = 0; i < NR_LOOPS; i++) { 70 for (i = 0; i < NR_LOOPS; i++) {
74 /* slaves loop on '!= ncpus' */ 71 /* slaves loop on '!= 2' */
75 while (atomic_read(&count_count_start) != nslaves) 72 while (atomic_read(&count_count_start) != 1)
76 mb(); 73 mb();
77 atomic_set(&count_count_stop, 0); 74 atomic_set(&count_count_stop, 0);
78 smp_wmb(); 75 smp_wmb();
@@ -89,7 +86,7 @@ void __cpuinit synchronise_count_master(void)
89 /* 86 /*
90 * Wait for all slaves to leave the synchronization point: 87 * Wait for all slaves to leave the synchronization point:
91 */ 88 */
92 while (atomic_read(&count_count_stop) != nslaves) 89 while (atomic_read(&count_count_stop) != 1)
93 mb(); 90 mb();
94 atomic_set(&count_count_start, 0); 91 atomic_set(&count_count_start, 0);
95 smp_wmb(); 92 smp_wmb();
@@ -97,6 +94,7 @@ void __cpuinit synchronise_count_master(void)
97 } 94 }
98 /* Arrange for an interrupt in a short while */ 95 /* Arrange for an interrupt in a short while */
99 write_c0_compare(read_c0_count() + COUNTON); 96 write_c0_compare(read_c0_count() + COUNTON);
97 atomic_set(&count_start_flag, 0);
100 98
101 local_irq_restore(flags); 99 local_irq_restore(flags);
102 100
@@ -108,11 +106,10 @@ void __cpuinit synchronise_count_master(void)
108 printk("done.\n"); 106 printk("done.\n");
109} 107}
110 108
111void __cpuinit synchronise_count_slave(void) 109void __cpuinit synchronise_count_slave(int cpu)
112{ 110{
113 int i; 111 int i;
114 unsigned int initcount; 112 unsigned int initcount;
115 int ncpus;
116 113
117#ifdef CONFIG_MIPS_MT_SMTC 114#ifdef CONFIG_MIPS_MT_SMTC
118 /* 115 /*
@@ -127,16 +124,15 @@ void __cpuinit synchronise_count_slave(void)
127 * so we first wait for the master to say everyone is ready 124 * so we first wait for the master to say everyone is ready
128 */ 125 */
129 126
130 while (!atomic_read(&count_start_flag)) 127 while (atomic_read(&count_start_flag) != cpu)
131 mb(); 128 mb();
132 129
133 /* Count will be initialised to next expire for all CPU's */ 130 /* Count will be initialised to next expire for all CPU's */
134 initcount = atomic_read(&count_reference); 131 initcount = atomic_read(&count_reference);
135 132
136 ncpus = num_online_cpus();
137 for (i = 0; i < NR_LOOPS; i++) { 133 for (i = 0; i < NR_LOOPS; i++) {
138 atomic_inc(&count_count_start); 134 atomic_inc(&count_count_start);
139 while (atomic_read(&count_count_start) != ncpus) 135 while (atomic_read(&count_count_start) != 2)
140 mb(); 136 mb();
141 137
142 /* 138 /*
@@ -146,7 +142,7 @@ void __cpuinit synchronise_count_slave(void)
146 write_c0_count(initcount); 142 write_c0_count(initcount);
147 143
148 atomic_inc(&count_count_stop); 144 atomic_inc(&count_count_stop);
149 while (atomic_read(&count_count_stop) != ncpus) 145 while (atomic_read(&count_count_stop) != 2)
150 mb(); 146 mb();
151 } 147 }
152 /* Arrange for an interrupt in a short while */ 148 /* Arrange for an interrupt in a short while */
diff --git a/arch/mips/mti-malta/malta-pci.c b/arch/mips/mti-malta/malta-pci.c
index 284dea54faf5..2147cb34e705 100644
--- a/arch/mips/mti-malta/malta-pci.c
+++ b/arch/mips/mti-malta/malta-pci.c
@@ -252,16 +252,3 @@ void __init mips_pcibios_init(void)
252 252
253 register_pci_controller(controller); 253 register_pci_controller(controller);
254} 254}
255
256/* Enable PCI 2.1 compatibility in PIIX4 */
257static void __devinit quirk_dlcsetup(struct pci_dev *dev)
258{
259 u8 odlc, ndlc;
260 (void) pci_read_config_byte(dev, 0x82, &odlc);
261 /* Enable passive releases and delayed transaction */
262 ndlc = odlc | 7;
263 (void) pci_write_config_byte(dev, 0x82, ndlc);
264}
265
266DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82371AB_0,
267 quirk_dlcsetup);
diff --git a/arch/mips/pci/pci-ar724x.c b/arch/mips/pci/pci-ar724x.c
index 414a7459858d..86d77a666458 100644
--- a/arch/mips/pci/pci-ar724x.c
+++ b/arch/mips/pci/pci-ar724x.c
@@ -23,9 +23,12 @@
23#define AR724X_PCI_MEM_BASE 0x10000000 23#define AR724X_PCI_MEM_BASE 0x10000000
24#define AR724X_PCI_MEM_SIZE 0x08000000 24#define AR724X_PCI_MEM_SIZE 0x08000000
25 25
26#define AR724X_PCI_REG_RESET 0x18
26#define AR724X_PCI_REG_INT_STATUS 0x4c 27#define AR724X_PCI_REG_INT_STATUS 0x4c
27#define AR724X_PCI_REG_INT_MASK 0x50 28#define AR724X_PCI_REG_INT_MASK 0x50
28 29
30#define AR724X_PCI_RESET_LINK_UP BIT(0)
31
29#define AR724X_PCI_INT_DEV0 BIT(14) 32#define AR724X_PCI_INT_DEV0 BIT(14)
30 33
31#define AR724X_PCI_IRQ_COUNT 1 34#define AR724X_PCI_IRQ_COUNT 1
@@ -38,6 +41,15 @@ static void __iomem *ar724x_pci_ctrl_base;
38 41
39static u32 ar724x_pci_bar0_value; 42static u32 ar724x_pci_bar0_value;
40static bool ar724x_pci_bar0_is_cached; 43static bool ar724x_pci_bar0_is_cached;
44static bool ar724x_pci_link_up;
45
46static inline bool ar724x_pci_check_link(void)
47{
48 u32 reset;
49
50 reset = __raw_readl(ar724x_pci_ctrl_base + AR724X_PCI_REG_RESET);
51 return reset & AR724X_PCI_RESET_LINK_UP;
52}
41 53
42static int ar724x_pci_read(struct pci_bus *bus, unsigned int devfn, int where, 54static int ar724x_pci_read(struct pci_bus *bus, unsigned int devfn, int where,
43 int size, uint32_t *value) 55 int size, uint32_t *value)
@@ -46,6 +58,9 @@ static int ar724x_pci_read(struct pci_bus *bus, unsigned int devfn, int where,
46 void __iomem *base; 58 void __iomem *base;
47 u32 data; 59 u32 data;
48 60
61 if (!ar724x_pci_link_up)
62 return PCIBIOS_DEVICE_NOT_FOUND;
63
49 if (devfn) 64 if (devfn)
50 return PCIBIOS_DEVICE_NOT_FOUND; 65 return PCIBIOS_DEVICE_NOT_FOUND;
51 66
@@ -96,6 +111,9 @@ static int ar724x_pci_write(struct pci_bus *bus, unsigned int devfn, int where,
96 u32 data; 111 u32 data;
97 int s; 112 int s;
98 113
114 if (!ar724x_pci_link_up)
115 return PCIBIOS_DEVICE_NOT_FOUND;
116
99 if (devfn) 117 if (devfn)
100 return PCIBIOS_DEVICE_NOT_FOUND; 118 return PCIBIOS_DEVICE_NOT_FOUND;
101 119
@@ -280,6 +298,10 @@ int __init ar724x_pcibios_init(int irq)
280 if (ar724x_pci_ctrl_base == NULL) 298 if (ar724x_pci_ctrl_base == NULL)
281 goto err_unmap_devcfg; 299 goto err_unmap_devcfg;
282 300
301 ar724x_pci_link_up = ar724x_pci_check_link();
302 if (!ar724x_pci_link_up)
303 pr_warn("ar724x: PCIe link is down\n");
304
283 ar724x_pci_irq_init(irq); 305 ar724x_pci_irq_init(irq);
284 register_pci_controller(&ar724x_pci_controller); 306 register_pci_controller(&ar724x_pci_controller);
285 307
diff --git a/arch/parisc/include/asm/atomic.h b/arch/parisc/include/asm/atomic.h
index 6c6defc24619..af9cf30ed474 100644
--- a/arch/parisc/include/asm/atomic.h
+++ b/arch/parisc/include/asm/atomic.h
@@ -141,7 +141,7 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
141 141
142#define atomic_sub_and_test(i,v) (atomic_sub_return((i),(v)) == 0) 142#define atomic_sub_and_test(i,v) (atomic_sub_return((i),(v)) == 0)
143 143
144#define ATOMIC_INIT(i) ((atomic_t) { (i) }) 144#define ATOMIC_INIT(i) { (i) }
145 145
146#define smp_mb__before_atomic_dec() smp_mb() 146#define smp_mb__before_atomic_dec() smp_mb()
147#define smp_mb__after_atomic_dec() smp_mb() 147#define smp_mb__after_atomic_dec() smp_mb()
@@ -150,7 +150,7 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
150 150
151#ifdef CONFIG_64BIT 151#ifdef CONFIG_64BIT
152 152
153#define ATOMIC64_INIT(i) ((atomic64_t) { (i) }) 153#define ATOMIC64_INIT(i) { (i) }
154 154
155static __inline__ s64 155static __inline__ s64
156__atomic64_add_return(s64 i, atomic64_t *v) 156__atomic64_add_return(s64 i, atomic64_t *v)
diff --git a/arch/parisc/kernel/process.c b/arch/parisc/kernel/process.c
index d4b94b395c16..2c05a9292a81 100644
--- a/arch/parisc/kernel/process.c
+++ b/arch/parisc/kernel/process.c
@@ -309,7 +309,7 @@ copy_thread(unsigned long clone_flags, unsigned long usp,
309 cregs->ksp = (unsigned long)stack 309 cregs->ksp = (unsigned long)stack
310 + (pregs->gr[21] & (THREAD_SIZE - 1)); 310 + (pregs->gr[21] & (THREAD_SIZE - 1));
311 cregs->gr[30] = usp; 311 cregs->gr[30] = usp;
312 if (p->personality == PER_HPUX) { 312 if (personality(p->personality) == PER_HPUX) {
313#ifdef CONFIG_HPUX 313#ifdef CONFIG_HPUX
314 cregs->kpc = (unsigned long) &hpux_child_return; 314 cregs->kpc = (unsigned long) &hpux_child_return;
315#else 315#else
diff --git a/arch/parisc/kernel/sys_parisc.c b/arch/parisc/kernel/sys_parisc.c
index c9b932260f47..7426e40699bd 100644
--- a/arch/parisc/kernel/sys_parisc.c
+++ b/arch/parisc/kernel/sys_parisc.c
@@ -225,12 +225,12 @@ long parisc_personality(unsigned long personality)
225 long err; 225 long err;
226 226
227 if (personality(current->personality) == PER_LINUX32 227 if (personality(current->personality) == PER_LINUX32
228 && personality == PER_LINUX) 228 && personality(personality) == PER_LINUX)
229 personality = PER_LINUX32; 229 personality = (personality & ~PER_MASK) | PER_LINUX32;
230 230
231 err = sys_personality(personality); 231 err = sys_personality(personality);
232 if (err == PER_LINUX32) 232 if (personality(err) == PER_LINUX32)
233 err = PER_LINUX; 233 err = (err & ~PER_MASK) | PER_LINUX;
234 234
235 return err; 235 return err;
236} 236}
diff --git a/arch/powerpc/boot/dts/fsl/p4080si-post.dtsi b/arch/powerpc/boot/dts/fsl/p4080si-post.dtsi
index 8d35d2c1f694..4f9c9f682ecf 100644
--- a/arch/powerpc/boot/dts/fsl/p4080si-post.dtsi
+++ b/arch/powerpc/boot/dts/fsl/p4080si-post.dtsi
@@ -345,6 +345,13 @@
345/include/ "qoriq-duart-1.dtsi" 345/include/ "qoriq-duart-1.dtsi"
346/include/ "qoriq-gpio-0.dtsi" 346/include/ "qoriq-gpio-0.dtsi"
347/include/ "qoriq-usb2-mph-0.dtsi" 347/include/ "qoriq-usb2-mph-0.dtsi"
348 usb@210000 {
349 compatible = "fsl-usb2-mph-v1.6", "fsl,mpc85xx-usb2-mph", "fsl-usb2-mph";
350 port0;
351 };
348/include/ "qoriq-usb2-dr-0.dtsi" 352/include/ "qoriq-usb2-dr-0.dtsi"
353 usb@211000 {
354 compatible = "fsl-usb2-dr-v1.6", "fsl,mpc85xx-usb2-dr", "fsl-usb2-dr";
355 };
349/include/ "qoriq-sec4.0-0.dtsi" 356/include/ "qoriq-sec4.0-0.dtsi"
350}; 357};
diff --git a/arch/powerpc/configs/85xx/p1023rds_defconfig b/arch/powerpc/configs/85xx/p1023rds_defconfig
index f4337bacd0e7..26e541c4662b 100644
--- a/arch/powerpc/configs/85xx/p1023rds_defconfig
+++ b/arch/powerpc/configs/85xx/p1023rds_defconfig
@@ -6,28 +6,27 @@ CONFIG_SYSVIPC=y
6CONFIG_POSIX_MQUEUE=y 6CONFIG_POSIX_MQUEUE=y
7CONFIG_BSD_PROCESS_ACCT=y 7CONFIG_BSD_PROCESS_ACCT=y
8CONFIG_AUDIT=y 8CONFIG_AUDIT=y
9CONFIG_SPARSE_IRQ=y 9CONFIG_IRQ_DOMAIN_DEBUG=y
10CONFIG_NO_HZ=y
11CONFIG_HIGH_RES_TIMERS=y
10CONFIG_IKCONFIG=y 12CONFIG_IKCONFIG=y
11CONFIG_IKCONFIG_PROC=y 13CONFIG_IKCONFIG_PROC=y
12CONFIG_LOG_BUF_SHIFT=14 14CONFIG_LOG_BUF_SHIFT=14
13CONFIG_BLK_DEV_INITRD=y 15CONFIG_BLK_DEV_INITRD=y
14# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
15CONFIG_KALLSYMS_ALL=y 16CONFIG_KALLSYMS_ALL=y
16CONFIG_KALLSYMS_EXTRA_PASS=y
17CONFIG_EMBEDDED=y 17CONFIG_EMBEDDED=y
18CONFIG_MODULES=y 18CONFIG_MODULES=y
19CONFIG_MODULE_UNLOAD=y 19CONFIG_MODULE_UNLOAD=y
20CONFIG_MODULE_FORCE_UNLOAD=y 20CONFIG_MODULE_FORCE_UNLOAD=y
21CONFIG_MODVERSIONS=y 21CONFIG_MODVERSIONS=y
22# CONFIG_BLK_DEV_BSG is not set 22# CONFIG_BLK_DEV_BSG is not set
23CONFIG_PARTITION_ADVANCED=y
24CONFIG_MAC_PARTITION=y
23CONFIG_P1023_RDS=y 25CONFIG_P1023_RDS=y
24CONFIG_QUICC_ENGINE=y 26CONFIG_QUICC_ENGINE=y
25CONFIG_QE_GPIO=y 27CONFIG_QE_GPIO=y
26CONFIG_CPM2=y 28CONFIG_CPM2=y
27CONFIG_GPIO_MPC8XXX=y
28CONFIG_HIGHMEM=y 29CONFIG_HIGHMEM=y
29CONFIG_NO_HZ=y
30CONFIG_HIGH_RES_TIMERS=y
31# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set 30# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
32CONFIG_BINFMT_MISC=m 31CONFIG_BINFMT_MISC=m
33CONFIG_MATH_EMULATION=y 32CONFIG_MATH_EMULATION=y
@@ -63,11 +62,11 @@ CONFIG_INET_ESP=y
63CONFIG_IPV6=y 62CONFIG_IPV6=y
64CONFIG_IP_SCTP=m 63CONFIG_IP_SCTP=m
65CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" 64CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
65CONFIG_DEVTMPFS=y
66CONFIG_PROC_DEVICETREE=y 66CONFIG_PROC_DEVICETREE=y
67CONFIG_BLK_DEV_LOOP=y 67CONFIG_BLK_DEV_LOOP=y
68CONFIG_BLK_DEV_RAM=y 68CONFIG_BLK_DEV_RAM=y
69CONFIG_BLK_DEV_RAM_SIZE=131072 69CONFIG_BLK_DEV_RAM_SIZE=131072
70CONFIG_MISC_DEVICES=y
71CONFIG_EEPROM_LEGACY=y 70CONFIG_EEPROM_LEGACY=y
72CONFIG_BLK_DEV_SD=y 71CONFIG_BLK_DEV_SD=y
73CONFIG_CHR_DEV_ST=y 72CONFIG_CHR_DEV_ST=y
@@ -80,15 +79,14 @@ CONFIG_SATA_FSL=y
80CONFIG_SATA_SIL24=y 79CONFIG_SATA_SIL24=y
81CONFIG_NETDEVICES=y 80CONFIG_NETDEVICES=y
82CONFIG_DUMMY=y 81CONFIG_DUMMY=y
82CONFIG_FS_ENET=y
83CONFIG_FSL_PQ_MDIO=y
84CONFIG_E1000E=y
83CONFIG_MARVELL_PHY=y 85CONFIG_MARVELL_PHY=y
84CONFIG_DAVICOM_PHY=y 86CONFIG_DAVICOM_PHY=y
85CONFIG_CICADA_PHY=y 87CONFIG_CICADA_PHY=y
86CONFIG_VITESSE_PHY=y 88CONFIG_VITESSE_PHY=y
87CONFIG_FIXED_PHY=y 89CONFIG_FIXED_PHY=y
88CONFIG_NET_ETHERNET=y
89CONFIG_FS_ENET=y
90CONFIG_E1000E=y
91CONFIG_FSL_PQ_MDIO=y
92CONFIG_INPUT_FF_MEMLESS=m 90CONFIG_INPUT_FF_MEMLESS=m
93# CONFIG_INPUT_MOUSEDEV is not set 91# CONFIG_INPUT_MOUSEDEV is not set
94# CONFIG_INPUT_KEYBOARD is not set 92# CONFIG_INPUT_KEYBOARD is not set
@@ -98,16 +96,15 @@ CONFIG_SERIAL_8250=y
98CONFIG_SERIAL_8250_CONSOLE=y 96CONFIG_SERIAL_8250_CONSOLE=y
99CONFIG_SERIAL_8250_NR_UARTS=2 97CONFIG_SERIAL_8250_NR_UARTS=2
100CONFIG_SERIAL_8250_RUNTIME_UARTS=2 98CONFIG_SERIAL_8250_RUNTIME_UARTS=2
101CONFIG_SERIAL_8250_EXTENDED=y
102CONFIG_SERIAL_8250_MANY_PORTS=y 99CONFIG_SERIAL_8250_MANY_PORTS=y
103CONFIG_SERIAL_8250_DETECT_IRQ=y 100CONFIG_SERIAL_8250_DETECT_IRQ=y
104CONFIG_SERIAL_8250_RSA=y 101CONFIG_SERIAL_8250_RSA=y
105CONFIG_SERIAL_QE=m 102CONFIG_SERIAL_QE=m
106CONFIG_HW_RANDOM=y
107CONFIG_NVRAM=y 103CONFIG_NVRAM=y
108CONFIG_I2C=y 104CONFIG_I2C=y
109CONFIG_I2C_CPM=m 105CONFIG_I2C_CPM=m
110CONFIG_I2C_MPC=y 106CONFIG_I2C_MPC=y
107CONFIG_GPIO_MPC8XXX=y
111# CONFIG_HWMON is not set 108# CONFIG_HWMON is not set
112CONFIG_VIDEO_OUTPUT_CONTROL=y 109CONFIG_VIDEO_OUTPUT_CONTROL=y
113CONFIG_SOUND=y 110CONFIG_SOUND=y
@@ -123,7 +120,6 @@ CONFIG_DMADEVICES=y
123CONFIG_FSL_DMA=y 120CONFIG_FSL_DMA=y
124# CONFIG_NET_DMA is not set 121# CONFIG_NET_DMA is not set
125CONFIG_STAGING=y 122CONFIG_STAGING=y
126# CONFIG_STAGING_EXCLUDE_BUILD is not set
127CONFIG_EXT2_FS=y 123CONFIG_EXT2_FS=y
128CONFIG_EXT3_FS=y 124CONFIG_EXT3_FS=y
129# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set 125# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
@@ -150,22 +146,15 @@ CONFIG_QNX4FS_FS=m
150CONFIG_SYSV_FS=m 146CONFIG_SYSV_FS=m
151CONFIG_UFS_FS=m 147CONFIG_UFS_FS=m
152CONFIG_NFS_FS=y 148CONFIG_NFS_FS=y
153CONFIG_NFS_V3=y
154CONFIG_NFS_V4=y 149CONFIG_NFS_V4=y
155CONFIG_ROOT_NFS=y 150CONFIG_ROOT_NFS=y
156CONFIG_NFSD=y 151CONFIG_NFSD=y
157CONFIG_PARTITION_ADVANCED=y
158CONFIG_MAC_PARTITION=y
159CONFIG_CRC_T10DIF=y 152CONFIG_CRC_T10DIF=y
160CONFIG_FRAME_WARN=8092 153CONFIG_FRAME_WARN=8092
161CONFIG_DEBUG_FS=y 154CONFIG_DEBUG_FS=y
162CONFIG_DEBUG_KERNEL=y
163CONFIG_DETECT_HUNG_TASK=y 155CONFIG_DETECT_HUNG_TASK=y
164# CONFIG_DEBUG_BUGVERBOSE is not set 156# CONFIG_DEBUG_BUGVERBOSE is not set
165CONFIG_DEBUG_INFO=y 157CONFIG_DEBUG_INFO=y
166# CONFIG_RCU_CPU_STALL_DETECTOR is not set
167CONFIG_SYSCTL_SYSCALL_CHECK=y
168CONFIG_IRQ_DOMAIN_DEBUG=y
169CONFIG_CRYPTO_PCBC=m 158CONFIG_CRYPTO_PCBC=m
170CONFIG_CRYPTO_SHA256=y 159CONFIG_CRYPTO_SHA256=y
171CONFIG_CRYPTO_SHA512=y 160CONFIG_CRYPTO_SHA512=y
diff --git a/arch/powerpc/configs/corenet32_smp_defconfig b/arch/powerpc/configs/corenet32_smp_defconfig
index cbb98c1234fd..8b3d57c1ebe8 100644
--- a/arch/powerpc/configs/corenet32_smp_defconfig
+++ b/arch/powerpc/configs/corenet32_smp_defconfig
@@ -6,8 +6,8 @@ CONFIG_SYSVIPC=y
6CONFIG_POSIX_MQUEUE=y 6CONFIG_POSIX_MQUEUE=y
7CONFIG_BSD_PROCESS_ACCT=y 7CONFIG_BSD_PROCESS_ACCT=y
8CONFIG_AUDIT=y 8CONFIG_AUDIT=y
9CONFIG_SPARSE_IRQ=y 9CONFIG_NO_HZ=y
10CONFIG_RCU_TRACE=y 10CONFIG_HIGH_RES_TIMERS=y
11CONFIG_IKCONFIG=y 11CONFIG_IKCONFIG=y
12CONFIG_IKCONFIG_PROC=y 12CONFIG_IKCONFIG_PROC=y
13CONFIG_LOG_BUF_SHIFT=14 13CONFIG_LOG_BUF_SHIFT=14
@@ -21,23 +21,22 @@ CONFIG_MODULE_UNLOAD=y
21CONFIG_MODULE_FORCE_UNLOAD=y 21CONFIG_MODULE_FORCE_UNLOAD=y
22CONFIG_MODVERSIONS=y 22CONFIG_MODVERSIONS=y
23# CONFIG_BLK_DEV_BSG is not set 23# CONFIG_BLK_DEV_BSG is not set
24CONFIG_PARTITION_ADVANCED=y
25CONFIG_MAC_PARTITION=y
24CONFIG_P2041_RDB=y 26CONFIG_P2041_RDB=y
25CONFIG_P3041_DS=y 27CONFIG_P3041_DS=y
26CONFIG_P4080_DS=y 28CONFIG_P4080_DS=y
27CONFIG_P5020_DS=y 29CONFIG_P5020_DS=y
28CONFIG_HIGHMEM=y 30CONFIG_HIGHMEM=y
29CONFIG_NO_HZ=y
30CONFIG_HIGH_RES_TIMERS=y
31# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set 31# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
32CONFIG_BINFMT_MISC=m 32CONFIG_BINFMT_MISC=m
33CONFIG_KEXEC=y 33CONFIG_KEXEC=y
34CONFIG_IRQ_ALL_CPUS=y 34CONFIG_IRQ_ALL_CPUS=y
35CONFIG_FORCE_MAX_ZONEORDER=13 35CONFIG_FORCE_MAX_ZONEORDER=13
36CONFIG_FSL_LBC=y
37CONFIG_PCI=y 36CONFIG_PCI=y
38CONFIG_PCIEPORTBUS=y 37CONFIG_PCIEPORTBUS=y
39CONFIG_PCI_MSI=y
40# CONFIG_PCIEASPM is not set 38# CONFIG_PCIEASPM is not set
39CONFIG_PCI_MSI=y
41CONFIG_RAPIDIO=y 40CONFIG_RAPIDIO=y
42CONFIG_FSL_RIO=y 41CONFIG_FSL_RIO=y
43CONFIG_NET=y 42CONFIG_NET=y
@@ -70,6 +69,7 @@ CONFIG_INET_IPCOMP=y
70CONFIG_IPV6=y 69CONFIG_IPV6=y
71CONFIG_IP_SCTP=m 70CONFIG_IP_SCTP=m
72CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" 71CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
72CONFIG_DEVTMPFS=y
73CONFIG_MTD=y 73CONFIG_MTD=y
74CONFIG_MTD_CMDLINE_PARTS=y 74CONFIG_MTD_CMDLINE_PARTS=y
75CONFIG_MTD_CHAR=y 75CONFIG_MTD_CHAR=y
@@ -77,17 +77,14 @@ CONFIG_MTD_BLOCK=y
77CONFIG_MTD_CFI=y 77CONFIG_MTD_CFI=y
78CONFIG_MTD_CFI_AMDSTD=y 78CONFIG_MTD_CFI_AMDSTD=y
79CONFIG_MTD_PHYSMAP_OF=y 79CONFIG_MTD_PHYSMAP_OF=y
80CONFIG_MTD_M25P80=y
80CONFIG_MTD_NAND=y 81CONFIG_MTD_NAND=y
81CONFIG_MTD_NAND_ECC=y
82CONFIG_MTD_NAND_IDS=y
83CONFIG_MTD_NAND_FSL_IFC=y
84CONFIG_MTD_NAND_FSL_ELBC=y 82CONFIG_MTD_NAND_FSL_ELBC=y
85CONFIG_MTD_M25P80=y 83CONFIG_MTD_NAND_FSL_IFC=y
86CONFIG_PROC_DEVICETREE=y 84CONFIG_PROC_DEVICETREE=y
87CONFIG_BLK_DEV_LOOP=y 85CONFIG_BLK_DEV_LOOP=y
88CONFIG_BLK_DEV_RAM=y 86CONFIG_BLK_DEV_RAM=y
89CONFIG_BLK_DEV_RAM_SIZE=131072 87CONFIG_BLK_DEV_RAM_SIZE=131072
90CONFIG_MISC_DEVICES=y
91CONFIG_BLK_DEV_SD=y 88CONFIG_BLK_DEV_SD=y
92CONFIG_CHR_DEV_ST=y 89CONFIG_CHR_DEV_ST=y
93CONFIG_BLK_DEV_SR=y 90CONFIG_BLK_DEV_SR=y
@@ -115,11 +112,9 @@ CONFIG_SERIO_LIBPS2=y
115CONFIG_PPC_EPAPR_HV_BYTECHAN=y 112CONFIG_PPC_EPAPR_HV_BYTECHAN=y
116CONFIG_SERIAL_8250=y 113CONFIG_SERIAL_8250=y
117CONFIG_SERIAL_8250_CONSOLE=y 114CONFIG_SERIAL_8250_CONSOLE=y
118CONFIG_SERIAL_8250_EXTENDED=y
119CONFIG_SERIAL_8250_MANY_PORTS=y 115CONFIG_SERIAL_8250_MANY_PORTS=y
120CONFIG_SERIAL_8250_DETECT_IRQ=y 116CONFIG_SERIAL_8250_DETECT_IRQ=y
121CONFIG_SERIAL_8250_RSA=y 117CONFIG_SERIAL_8250_RSA=y
122CONFIG_HW_RANDOM=y
123CONFIG_NVRAM=y 118CONFIG_NVRAM=y
124CONFIG_I2C=y 119CONFIG_I2C=y
125CONFIG_I2C_CHARDEV=y 120CONFIG_I2C_CHARDEV=y
@@ -132,7 +127,6 @@ CONFIG_SPI_FSL_ESPI=y
132CONFIG_VIDEO_OUTPUT_CONTROL=y 127CONFIG_VIDEO_OUTPUT_CONTROL=y
133CONFIG_USB_HID=m 128CONFIG_USB_HID=m
134CONFIG_USB=y 129CONFIG_USB=y
135CONFIG_USB_DEVICEFS=y
136CONFIG_USB_MON=y 130CONFIG_USB_MON=y
137CONFIG_USB_EHCI_HCD=y 131CONFIG_USB_EHCI_HCD=y
138CONFIG_USB_EHCI_FSL=y 132CONFIG_USB_EHCI_FSL=y
@@ -142,8 +136,6 @@ CONFIG_USB_OHCI_HCD_PPC_OF_LE=y
142CONFIG_USB_STORAGE=y 136CONFIG_USB_STORAGE=y
143CONFIG_MMC=y 137CONFIG_MMC=y
144CONFIG_MMC_SDHCI=y 138CONFIG_MMC_SDHCI=y
145CONFIG_MMC_SDHCI_OF=y
146CONFIG_MMC_SDHCI_OF_ESDHC=y
147CONFIG_EDAC=y 139CONFIG_EDAC=y
148CONFIG_EDAC_MM_EDAC=y 140CONFIG_EDAC_MM_EDAC=y
149CONFIG_EDAC_MPC85XX=y 141CONFIG_EDAC_MPC85XX=y
@@ -170,19 +162,16 @@ CONFIG_HUGETLBFS=y
170CONFIG_JFFS2_FS=y 162CONFIG_JFFS2_FS=y
171CONFIG_CRAMFS=y 163CONFIG_CRAMFS=y
172CONFIG_NFS_FS=y 164CONFIG_NFS_FS=y
173CONFIG_NFS_V3=y
174CONFIG_NFS_V4=y 165CONFIG_NFS_V4=y
175CONFIG_ROOT_NFS=y 166CONFIG_ROOT_NFS=y
176CONFIG_NFSD=m 167CONFIG_NFSD=m
177CONFIG_PARTITION_ADVANCED=y
178CONFIG_MAC_PARTITION=y
179CONFIG_NLS_ISO8859_1=y 168CONFIG_NLS_ISO8859_1=y
180CONFIG_NLS_UTF8=m 169CONFIG_NLS_UTF8=m
181CONFIG_MAGIC_SYSRQ=y 170CONFIG_MAGIC_SYSRQ=y
182CONFIG_DEBUG_SHIRQ=y 171CONFIG_DEBUG_SHIRQ=y
183CONFIG_DETECT_HUNG_TASK=y 172CONFIG_DETECT_HUNG_TASK=y
184CONFIG_DEBUG_INFO=y 173CONFIG_DEBUG_INFO=y
185CONFIG_SYSCTL_SYSCALL_CHECK=y 174CONFIG_RCU_TRACE=y
186CONFIG_CRYPTO_NULL=y 175CONFIG_CRYPTO_NULL=y
187CONFIG_CRYPTO_PCBC=m 176CONFIG_CRYPTO_PCBC=m
188CONFIG_CRYPTO_MD4=y 177CONFIG_CRYPTO_MD4=y
diff --git a/arch/powerpc/configs/corenet64_smp_defconfig b/arch/powerpc/configs/corenet64_smp_defconfig
index dd89de8b0b7f..0516e22ca3de 100644
--- a/arch/powerpc/configs/corenet64_smp_defconfig
+++ b/arch/powerpc/configs/corenet64_smp_defconfig
@@ -56,6 +56,7 @@ CONFIG_INET_ESP=y
56CONFIG_IPV6=y 56CONFIG_IPV6=y
57CONFIG_IP_SCTP=m 57CONFIG_IP_SCTP=m
58CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" 58CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
59CONFIG_DEVTMPFS=y
59CONFIG_MTD=y 60CONFIG_MTD=y
60CONFIG_MTD_CMDLINE_PARTS=y 61CONFIG_MTD_CMDLINE_PARTS=y
61CONFIG_MTD_CHAR=y 62CONFIG_MTD_CHAR=y
diff --git a/arch/powerpc/configs/g5_defconfig b/arch/powerpc/configs/g5_defconfig
index 15130066e5e2..07b7f2af2dca 100644
--- a/arch/powerpc/configs/g5_defconfig
+++ b/arch/powerpc/configs/g5_defconfig
@@ -1,8 +1,10 @@
1CONFIG_PPC64=y
2CONFIG_ALTIVEC=y
3CONFIG_SMP=y
4CONFIG_NR_CPUS=4
1CONFIG_EXPERIMENTAL=y 5CONFIG_EXPERIMENTAL=y
2CONFIG_SYSVIPC=y 6CONFIG_SYSVIPC=y
3CONFIG_POSIX_MQUEUE=y 7CONFIG_POSIX_MQUEUE=y
4CONFIG_NO_HZ=y
5CONFIG_HIGH_RES_TIMERS=y
6CONFIG_IKCONFIG=y 8CONFIG_IKCONFIG=y
7CONFIG_IKCONFIG_PROC=y 9CONFIG_IKCONFIG_PROC=y
8CONFIG_BLK_DEV_INITRD=y 10CONFIG_BLK_DEV_INITRD=y
@@ -13,15 +15,16 @@ CONFIG_MODULES=y
13CONFIG_MODULE_UNLOAD=y 15CONFIG_MODULE_UNLOAD=y
14CONFIG_MODVERSIONS=y 16CONFIG_MODVERSIONS=y
15CONFIG_MODULE_SRCVERSION_ALL=y 17CONFIG_MODULE_SRCVERSION_ALL=y
16CONFIG_PARTITION_ADVANCED=y 18# CONFIG_PPC_PSERIES is not set
17CONFIG_MAC_PARTITION=y
18CONFIG_SMP=y
19CONFIG_NR_CPUS=4
20CONFIG_KEXEC=y
21# CONFIG_RELOCATABLE is not set
22CONFIG_CPU_FREQ=y 19CONFIG_CPU_FREQ=y
23CONFIG_CPU_FREQ_GOV_POWERSAVE=y 20CONFIG_CPU_FREQ_GOV_POWERSAVE=y
24CONFIG_CPU_FREQ_GOV_USERSPACE=y 21CONFIG_CPU_FREQ_GOV_USERSPACE=y
22CONFIG_CPU_FREQ_PMAC64=y
23CONFIG_NO_HZ=y
24CONFIG_HIGH_RES_TIMERS=y
25CONFIG_KEXEC=y
26CONFIG_IRQ_ALL_CPUS=y
27# CONFIG_MIGRATION is not set
25CONFIG_PCI_MSI=y 28CONFIG_PCI_MSI=y
26CONFIG_NET=y 29CONFIG_NET=y
27CONFIG_PACKET=y 30CONFIG_PACKET=y
@@ -49,6 +52,7 @@ CONFIG_NF_CT_NETLINK=m
49CONFIG_NF_CONNTRACK_IPV4=m 52CONFIG_NF_CONNTRACK_IPV4=m
50CONFIG_IP_NF_QUEUE=m 53CONFIG_IP_NF_QUEUE=m
51CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" 54CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
55CONFIG_PROC_DEVICETREE=y
52CONFIG_BLK_DEV_LOOP=y 56CONFIG_BLK_DEV_LOOP=y
53CONFIG_BLK_DEV_NBD=m 57CONFIG_BLK_DEV_NBD=m
54CONFIG_BLK_DEV_RAM=y 58CONFIG_BLK_DEV_RAM=y
@@ -56,6 +60,8 @@ CONFIG_BLK_DEV_RAM_SIZE=65536
56CONFIG_CDROM_PKTCDVD=m 60CONFIG_CDROM_PKTCDVD=m
57CONFIG_IDE=y 61CONFIG_IDE=y
58CONFIG_BLK_DEV_IDECD=y 62CONFIG_BLK_DEV_IDECD=y
63CONFIG_BLK_DEV_IDE_PMAC=y
64CONFIG_BLK_DEV_IDE_PMAC_ATA100FIRST=y
59CONFIG_BLK_DEV_SD=y 65CONFIG_BLK_DEV_SD=y
60CONFIG_CHR_DEV_ST=y 66CONFIG_CHR_DEV_ST=y
61CONFIG_BLK_DEV_SR=y 67CONFIG_BLK_DEV_SR=y
@@ -79,24 +85,33 @@ CONFIG_DM_CRYPT=m
79CONFIG_DM_SNAPSHOT=m 85CONFIG_DM_SNAPSHOT=m
80CONFIG_DM_MIRROR=m 86CONFIG_DM_MIRROR=m
81CONFIG_DM_ZERO=m 87CONFIG_DM_ZERO=m
82CONFIG_MACINTOSH_DRIVERS=y 88CONFIG_IEEE1394=y
89CONFIG_IEEE1394_OHCI1394=y
90CONFIG_IEEE1394_SBP2=m
91CONFIG_IEEE1394_ETH1394=m
92CONFIG_IEEE1394_RAWIO=y
93CONFIG_IEEE1394_VIDEO1394=m
94CONFIG_IEEE1394_DV1394=m
95CONFIG_ADB_PMU=y
96CONFIG_PMAC_SMU=y
83CONFIG_MAC_EMUMOUSEBTN=y 97CONFIG_MAC_EMUMOUSEBTN=y
98CONFIG_THERM_PM72=y
99CONFIG_WINDFARM=y
100CONFIG_WINDFARM_PM81=y
101CONFIG_WINDFARM_PM91=y
102CONFIG_WINDFARM_PM112=y
103CONFIG_WINDFARM_PM121=y
84CONFIG_NETDEVICES=y 104CONFIG_NETDEVICES=y
85CONFIG_BONDING=m
86CONFIG_DUMMY=m 105CONFIG_DUMMY=m
87CONFIG_MII=y 106CONFIG_BONDING=m
88CONFIG_TUN=m 107CONFIG_TUN=m
108CONFIG_NET_ETHERNET=y
109CONFIG_MII=y
110CONFIG_SUNGEM=y
89CONFIG_ACENIC=m 111CONFIG_ACENIC=m
90CONFIG_ACENIC_OMIT_TIGON_I=y 112CONFIG_ACENIC_OMIT_TIGON_I=y
91CONFIG_TIGON3=y
92CONFIG_E1000=y 113CONFIG_E1000=y
93CONFIG_SUNGEM=y 114CONFIG_TIGON3=y
94CONFIG_PPP=m
95CONFIG_PPP_BSDCOMP=m
96CONFIG_PPP_DEFLATE=m
97CONFIG_PPPOE=m
98CONFIG_PPP_ASYNC=m
99CONFIG_PPP_SYNC_TTY=m
100CONFIG_USB_CATC=m 115CONFIG_USB_CATC=m
101CONFIG_USB_KAWETH=m 116CONFIG_USB_KAWETH=m
102CONFIG_USB_PEGASUS=m 117CONFIG_USB_PEGASUS=m
@@ -106,24 +121,36 @@ CONFIG_USB_USBNET=m
106# CONFIG_USB_NET_NET1080 is not set 121# CONFIG_USB_NET_NET1080 is not set
107# CONFIG_USB_NET_CDC_SUBSET is not set 122# CONFIG_USB_NET_CDC_SUBSET is not set
108# CONFIG_USB_NET_ZAURUS is not set 123# CONFIG_USB_NET_ZAURUS is not set
124CONFIG_PPP=m
125CONFIG_PPP_ASYNC=m
126CONFIG_PPP_SYNC_TTY=m
127CONFIG_PPP_DEFLATE=m
128CONFIG_PPP_BSDCOMP=m
129CONFIG_PPPOE=m
109# CONFIG_INPUT_MOUSEDEV_PSAUX is not set 130# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
110CONFIG_INPUT_JOYDEV=m 131CONFIG_INPUT_JOYDEV=m
111CONFIG_INPUT_EVDEV=y 132CONFIG_INPUT_EVDEV=y
133# CONFIG_KEYBOARD_ATKBD is not set
112# CONFIG_MOUSE_PS2 is not set 134# CONFIG_MOUSE_PS2 is not set
135# CONFIG_SERIO_I8042 is not set
113# CONFIG_SERIO_SERPORT is not set 136# CONFIG_SERIO_SERPORT is not set
114CONFIG_VT_HW_CONSOLE_BINDING=y
115# CONFIG_HW_RANDOM is not set 137# CONFIG_HW_RANDOM is not set
116CONFIG_GEN_RTC=y 138CONFIG_GEN_RTC=y
117CONFIG_RAW_DRIVER=y 139CONFIG_RAW_DRIVER=y
118CONFIG_I2C_CHARDEV=y 140CONFIG_I2C_CHARDEV=y
119# CONFIG_HWMON is not set 141# CONFIG_HWMON is not set
120CONFIG_AGP=y 142CONFIG_AGP=m
121CONFIG_DRM=y 143CONFIG_AGP_UNINORTH=m
122CONFIG_DRM_NOUVEAU=y
123CONFIG_VIDEO_OUTPUT_CONTROL=m 144CONFIG_VIDEO_OUTPUT_CONTROL=m
145CONFIG_FB=y
124CONFIG_FIRMWARE_EDID=y 146CONFIG_FIRMWARE_EDID=y
125CONFIG_FB_TILEBLITTING=y 147CONFIG_FB_TILEBLITTING=y
148CONFIG_FB_OF=y
149CONFIG_FB_NVIDIA=y
150CONFIG_FB_NVIDIA_I2C=y
126CONFIG_FB_RADEON=y 151CONFIG_FB_RADEON=y
152# CONFIG_VGA_CONSOLE is not set
153CONFIG_FRAMEBUFFER_CONSOLE=y
127CONFIG_LOGO=y 154CONFIG_LOGO=y
128CONFIG_SOUND=m 155CONFIG_SOUND=m
129CONFIG_SND=m 156CONFIG_SND=m
@@ -131,7 +158,15 @@ CONFIG_SND_SEQUENCER=m
131CONFIG_SND_MIXER_OSS=m 158CONFIG_SND_MIXER_OSS=m
132CONFIG_SND_PCM_OSS=m 159CONFIG_SND_PCM_OSS=m
133CONFIG_SND_SEQUENCER_OSS=y 160CONFIG_SND_SEQUENCER_OSS=y
161CONFIG_SND_POWERMAC=m
162CONFIG_SND_AOA=m
163CONFIG_SND_AOA_FABRIC_LAYOUT=m
164CONFIG_SND_AOA_ONYX=m
165CONFIG_SND_AOA_TAS=m
166CONFIG_SND_AOA_TOONIE=m
134CONFIG_SND_USB_AUDIO=m 167CONFIG_SND_USB_AUDIO=m
168CONFIG_HID_PID=y
169CONFIG_USB_HIDDEV=y
135CONFIG_HID_GYRATION=y 170CONFIG_HID_GYRATION=y
136CONFIG_LOGITECH_FF=y 171CONFIG_LOGITECH_FF=y
137CONFIG_HID_PANTHERLORD=y 172CONFIG_HID_PANTHERLORD=y
@@ -139,12 +174,13 @@ CONFIG_HID_PETALYNX=y
139CONFIG_HID_SAMSUNG=y 174CONFIG_HID_SAMSUNG=y
140CONFIG_HID_SONY=y 175CONFIG_HID_SONY=y
141CONFIG_HID_SUNPLUS=y 176CONFIG_HID_SUNPLUS=y
142CONFIG_HID_PID=y
143CONFIG_USB_HIDDEV=y
144CONFIG_USB=y 177CONFIG_USB=y
178CONFIG_USB_DEVICEFS=y
145CONFIG_USB_MON=y 179CONFIG_USB_MON=y
146CONFIG_USB_EHCI_HCD=y 180CONFIG_USB_EHCI_HCD=y
181# CONFIG_USB_EHCI_HCD_PPC_OF is not set
147CONFIG_USB_OHCI_HCD=y 182CONFIG_USB_OHCI_HCD=y
183CONFIG_USB_OHCI_HCD_PPC_OF_BE=y
148CONFIG_USB_ACM=m 184CONFIG_USB_ACM=m
149CONFIG_USB_PRINTER=y 185CONFIG_USB_PRINTER=y
150CONFIG_USB_STORAGE=y 186CONFIG_USB_STORAGE=y
@@ -208,6 +244,8 @@ CONFIG_REISERFS_FS_POSIX_ACL=y
208CONFIG_REISERFS_FS_SECURITY=y 244CONFIG_REISERFS_FS_SECURITY=y
209CONFIG_XFS_FS=m 245CONFIG_XFS_FS=m
210CONFIG_XFS_POSIX_ACL=y 246CONFIG_XFS_POSIX_ACL=y
247CONFIG_INOTIFY=y
248CONFIG_AUTOFS_FS=m
211CONFIG_ISO9660_FS=y 249CONFIG_ISO9660_FS=y
212CONFIG_JOLIET=y 250CONFIG_JOLIET=y
213CONFIG_ZISOFS=y 251CONFIG_ZISOFS=y
@@ -221,12 +259,14 @@ CONFIG_HFS_FS=m
221CONFIG_HFSPLUS_FS=m 259CONFIG_HFSPLUS_FS=m
222CONFIG_CRAMFS=y 260CONFIG_CRAMFS=y
223CONFIG_NFS_FS=y 261CONFIG_NFS_FS=y
262CONFIG_NFS_V3=y
224CONFIG_NFS_V3_ACL=y 263CONFIG_NFS_V3_ACL=y
225CONFIG_NFS_V4=y 264CONFIG_NFS_V4=y
226CONFIG_NFSD=y 265CONFIG_NFSD=y
227CONFIG_NFSD_V3_ACL=y 266CONFIG_NFSD_V3_ACL=y
228CONFIG_NFSD_V4=y 267CONFIG_NFSD_V4=y
229CONFIG_CIFS=m 268CONFIG_CIFS=m
269CONFIG_PARTITION_ADVANCED=y
230CONFIG_NLS_CODEPAGE_437=y 270CONFIG_NLS_CODEPAGE_437=y
231CONFIG_NLS_CODEPAGE_1250=y 271CONFIG_NLS_CODEPAGE_1250=y
232CONFIG_NLS_CODEPAGE_1251=y 272CONFIG_NLS_CODEPAGE_1251=y
@@ -234,23 +274,29 @@ CONFIG_NLS_ASCII=y
234CONFIG_NLS_ISO8859_1=y 274CONFIG_NLS_ISO8859_1=y
235CONFIG_NLS_ISO8859_15=y 275CONFIG_NLS_ISO8859_15=y
236CONFIG_NLS_UTF8=y 276CONFIG_NLS_UTF8=y
277CONFIG_CRC_T10DIF=y
278CONFIG_LIBCRC32C=m
237CONFIG_MAGIC_SYSRQ=y 279CONFIG_MAGIC_SYSRQ=y
238# CONFIG_UNUSED_SYMBOLS is not set
239CONFIG_DEBUG_FS=y 280CONFIG_DEBUG_FS=y
240CONFIG_DEBUG_KERNEL=y 281CONFIG_DEBUG_KERNEL=y
241CONFIG_DEBUG_MUTEXES=y 282CONFIG_DEBUG_MUTEXES=y
283# CONFIG_RCU_CPU_STALL_DETECTOR is not set
242CONFIG_LATENCYTOP=y 284CONFIG_LATENCYTOP=y
243CONFIG_STRICT_DEVMEM=y 285CONFIG_SYSCTL_SYSCALL_CHECK=y
286CONFIG_BOOTX_TEXT=y
244CONFIG_CRYPTO_NULL=m 287CONFIG_CRYPTO_NULL=m
245CONFIG_CRYPTO_TEST=m 288CONFIG_CRYPTO_TEST=m
289CONFIG_CRYPTO_ECB=m
246CONFIG_CRYPTO_PCBC=m 290CONFIG_CRYPTO_PCBC=m
247CONFIG_CRYPTO_HMAC=y 291CONFIG_CRYPTO_HMAC=y
292CONFIG_CRYPTO_MD4=m
248CONFIG_CRYPTO_MICHAEL_MIC=m 293CONFIG_CRYPTO_MICHAEL_MIC=m
249CONFIG_CRYPTO_SHA256=m 294CONFIG_CRYPTO_SHA256=m
250CONFIG_CRYPTO_SHA512=m 295CONFIG_CRYPTO_SHA512=m
251CONFIG_CRYPTO_WP512=m 296CONFIG_CRYPTO_WP512=m
252CONFIG_CRYPTO_AES=m 297CONFIG_CRYPTO_AES=m
253CONFIG_CRYPTO_ANUBIS=m 298CONFIG_CRYPTO_ANUBIS=m
299CONFIG_CRYPTO_ARC4=m
254CONFIG_CRYPTO_BLOWFISH=m 300CONFIG_CRYPTO_BLOWFISH=m
255CONFIG_CRYPTO_CAST5=m 301CONFIG_CRYPTO_CAST5=m
256CONFIG_CRYPTO_CAST6=m 302CONFIG_CRYPTO_CAST6=m
@@ -260,6 +306,3 @@ CONFIG_CRYPTO_TEA=m
260CONFIG_CRYPTO_TWOFISH=m 306CONFIG_CRYPTO_TWOFISH=m
261# CONFIG_CRYPTO_ANSI_CPRNG is not set 307# CONFIG_CRYPTO_ANSI_CPRNG is not set
262# CONFIG_CRYPTO_HW is not set 308# CONFIG_CRYPTO_HW is not set
263# CONFIG_VIRTUALIZATION is not set
264CONFIG_CRC_T10DIF=y
265CONFIG_LIBCRC32C=m
diff --git a/arch/powerpc/configs/mpc83xx_defconfig b/arch/powerpc/configs/mpc83xx_defconfig
index 5aac9a8bc53b..9352e4430c3b 100644
--- a/arch/powerpc/configs/mpc83xx_defconfig
+++ b/arch/powerpc/configs/mpc83xx_defconfig
@@ -2,12 +2,12 @@ CONFIG_EXPERIMENTAL=y
2CONFIG_SYSVIPC=y 2CONFIG_SYSVIPC=y
3CONFIG_LOG_BUF_SHIFT=14 3CONFIG_LOG_BUF_SHIFT=14
4CONFIG_BLK_DEV_INITRD=y 4CONFIG_BLK_DEV_INITRD=y
5# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
6CONFIG_EXPERT=y 5CONFIG_EXPERT=y
7CONFIG_SLAB=y 6CONFIG_SLAB=y
8CONFIG_MODULES=y 7CONFIG_MODULES=y
9CONFIG_MODULE_UNLOAD=y 8CONFIG_MODULE_UNLOAD=y
10# CONFIG_BLK_DEV_BSG is not set 9# CONFIG_BLK_DEV_BSG is not set
10CONFIG_PARTITION_ADVANCED=y
11# CONFIG_PPC_CHRP is not set 11# CONFIG_PPC_CHRP is not set
12# CONFIG_PPC_PMAC is not set 12# CONFIG_PPC_PMAC is not set
13CONFIG_PPC_83xx=y 13CONFIG_PPC_83xx=y
@@ -25,7 +25,6 @@ CONFIG_ASP834x=y
25CONFIG_QUICC_ENGINE=y 25CONFIG_QUICC_ENGINE=y
26CONFIG_QE_GPIO=y 26CONFIG_QE_GPIO=y
27CONFIG_MATH_EMULATION=y 27CONFIG_MATH_EMULATION=y
28CONFIG_SPARSE_IRQ=y
29CONFIG_PCI=y 28CONFIG_PCI=y
30CONFIG_NET=y 29CONFIG_NET=y
31CONFIG_PACKET=y 30CONFIG_PACKET=y
@@ -42,10 +41,9 @@ CONFIG_INET_ESP=y
42# CONFIG_INET_LRO is not set 41# CONFIG_INET_LRO is not set
43# CONFIG_IPV6 is not set 42# CONFIG_IPV6 is not set
44CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" 43CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
44CONFIG_DEVTMPFS=y
45# CONFIG_FW_LOADER is not set 45# CONFIG_FW_LOADER is not set
46CONFIG_MTD=y 46CONFIG_MTD=y
47CONFIG_MTD_PARTITIONS=y
48CONFIG_MTD_OF_PARTS=y
49CONFIG_MTD_CHAR=y 47CONFIG_MTD_CHAR=y
50CONFIG_MTD_BLOCK=y 48CONFIG_MTD_BLOCK=y
51CONFIG_MTD_CFI=y 49CONFIG_MTD_CFI=y
@@ -64,15 +62,14 @@ CONFIG_ATA=y
64CONFIG_SATA_FSL=y 62CONFIG_SATA_FSL=y
65CONFIG_SATA_SIL=y 63CONFIG_SATA_SIL=y
66CONFIG_NETDEVICES=y 64CONFIG_NETDEVICES=y
65CONFIG_MII=y
66CONFIG_UCC_GETH=y
67CONFIG_GIANFAR=y
67CONFIG_MARVELL_PHY=y 68CONFIG_MARVELL_PHY=y
68CONFIG_DAVICOM_PHY=y 69CONFIG_DAVICOM_PHY=y
69CONFIG_VITESSE_PHY=y 70CONFIG_VITESSE_PHY=y
70CONFIG_ICPLUS_PHY=y 71CONFIG_ICPLUS_PHY=y
71CONFIG_FIXED_PHY=y 72CONFIG_FIXED_PHY=y
72CONFIG_NET_ETHERNET=y
73CONFIG_MII=y
74CONFIG_GIANFAR=y
75CONFIG_UCC_GETH=y
76CONFIG_INPUT_FF_MEMLESS=m 73CONFIG_INPUT_FF_MEMLESS=m
77# CONFIG_INPUT_MOUSEDEV is not set 74# CONFIG_INPUT_MOUSEDEV is not set
78# CONFIG_INPUT_KEYBOARD is not set 75# CONFIG_INPUT_KEYBOARD is not set
@@ -112,17 +109,12 @@ CONFIG_RTC_DRV_DS1374=y
112CONFIG_EXT2_FS=y 109CONFIG_EXT2_FS=y
113CONFIG_EXT3_FS=y 110CONFIG_EXT3_FS=y
114# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set 111# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
115CONFIG_INOTIFY=y
116CONFIG_PROC_KCORE=y 112CONFIG_PROC_KCORE=y
117CONFIG_TMPFS=y 113CONFIG_TMPFS=y
118CONFIG_NFS_FS=y 114CONFIG_NFS_FS=y
119CONFIG_NFS_V3=y
120CONFIG_NFS_V4=y 115CONFIG_NFS_V4=y
121CONFIG_ROOT_NFS=y 116CONFIG_ROOT_NFS=y
122CONFIG_PARTITION_ADVANCED=y
123CONFIG_CRC_T10DIF=y 117CONFIG_CRC_T10DIF=y
124# CONFIG_RCU_CPU_STALL_DETECTOR is not set
125CONFIG_SYSCTL_SYSCALL_CHECK=y
126CONFIG_CRYPTO_ECB=m 118CONFIG_CRYPTO_ECB=m
127CONFIG_CRYPTO_PCBC=m 119CONFIG_CRYPTO_PCBC=m
128CONFIG_CRYPTO_SHA256=y 120CONFIG_CRYPTO_SHA256=y
diff --git a/arch/powerpc/configs/mpc85xx_defconfig b/arch/powerpc/configs/mpc85xx_defconfig
index 03ee911c4577..8b5bda27d248 100644
--- a/arch/powerpc/configs/mpc85xx_defconfig
+++ b/arch/powerpc/configs/mpc85xx_defconfig
@@ -5,7 +5,9 @@ CONFIG_SYSVIPC=y
5CONFIG_POSIX_MQUEUE=y 5CONFIG_POSIX_MQUEUE=y
6CONFIG_BSD_PROCESS_ACCT=y 6CONFIG_BSD_PROCESS_ACCT=y
7CONFIG_AUDIT=y 7CONFIG_AUDIT=y
8CONFIG_SPARSE_IRQ=y 8CONFIG_IRQ_DOMAIN_DEBUG=y
9CONFIG_NO_HZ=y
10CONFIG_HIGH_RES_TIMERS=y
9CONFIG_IKCONFIG=y 11CONFIG_IKCONFIG=y
10CONFIG_IKCONFIG_PROC=y 12CONFIG_IKCONFIG_PROC=y
11CONFIG_LOG_BUF_SHIFT=14 13CONFIG_LOG_BUF_SHIFT=14
@@ -17,6 +19,8 @@ CONFIG_MODULE_UNLOAD=y
17CONFIG_MODULE_FORCE_UNLOAD=y 19CONFIG_MODULE_FORCE_UNLOAD=y
18CONFIG_MODVERSIONS=y 20CONFIG_MODVERSIONS=y
19# CONFIG_BLK_DEV_BSG is not set 21# CONFIG_BLK_DEV_BSG is not set
22CONFIG_PARTITION_ADVANCED=y
23CONFIG_MAC_PARTITION=y
20CONFIG_MPC8540_ADS=y 24CONFIG_MPC8540_ADS=y
21CONFIG_MPC8560_ADS=y 25CONFIG_MPC8560_ADS=y
22CONFIG_MPC85xx_CDS=y 26CONFIG_MPC85xx_CDS=y
@@ -40,8 +44,6 @@ CONFIG_SBC8548=y
40CONFIG_QUICC_ENGINE=y 44CONFIG_QUICC_ENGINE=y
41CONFIG_QE_GPIO=y 45CONFIG_QE_GPIO=y
42CONFIG_HIGHMEM=y 46CONFIG_HIGHMEM=y
43CONFIG_NO_HZ=y
44CONFIG_HIGH_RES_TIMERS=y
45CONFIG_BINFMT_MISC=m 47CONFIG_BINFMT_MISC=m
46CONFIG_MATH_EMULATION=y 48CONFIG_MATH_EMULATION=y
47CONFIG_FORCE_MAX_ZONEORDER=12 49CONFIG_FORCE_MAX_ZONEORDER=12
@@ -74,36 +76,25 @@ CONFIG_INET_ESP=y
74CONFIG_IPV6=y 76CONFIG_IPV6=y
75CONFIG_IP_SCTP=m 77CONFIG_IP_SCTP=m
76CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" 78CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
79CONFIG_DEVTMPFS=y
77CONFIG_MTD=y 80CONFIG_MTD=y
78CONFIG_MTD_CMDLINE_PARTS=y 81CONFIG_MTD_CMDLINE_PARTS=y
79CONFIG_MTD_CHAR=y 82CONFIG_MTD_CHAR=y
80CONFIG_MTD_BLOCK=y 83CONFIG_MTD_BLOCK=y
81CONFIG_MTD_CFI=y
82CONFIG_FTL=y 84CONFIG_FTL=y
83CONFIG_MTD_GEN_PROBE=y 85CONFIG_MTD_CFI=y
84CONFIG_MTD_MAP_BANK_WIDTH_1=y
85CONFIG_MTD_MAP_BANK_WIDTH_2=y
86CONFIG_MTD_MAP_BANK_WIDTH_4=y
87CONFIG_MTD_CFI_I1=y
88CONFIG_MTD_CFI_I2=y
89CONFIG_MTD_CFI_INTELEXT=y 86CONFIG_MTD_CFI_INTELEXT=y
90CONFIG_MTD_CFI_AMDSTD=y 87CONFIG_MTD_CFI_AMDSTD=y
91CONFIG_MTD_CFI_UTIL=y
92CONFIG_MTD_PHYSMAP_OF=y 88CONFIG_MTD_PHYSMAP_OF=y
93CONFIG_MTD_PARTITIONS=y 89CONFIG_MTD_M25P80=y
94CONFIG_MTD_OF_PARTS=y
95CONFIG_MTD_NAND=y 90CONFIG_MTD_NAND=y
96CONFIG_MTD_NAND_FSL_ELBC=y 91CONFIG_MTD_NAND_FSL_ELBC=y
97CONFIG_MTD_NAND_FSL_IFC=y 92CONFIG_MTD_NAND_FSL_IFC=y
98CONFIG_MTD_NAND_IDS=y
99CONFIG_MTD_NAND_ECC=y
100CONFIG_MTD_M25P80=y
101CONFIG_PROC_DEVICETREE=y 93CONFIG_PROC_DEVICETREE=y
102CONFIG_BLK_DEV_LOOP=y 94CONFIG_BLK_DEV_LOOP=y
103CONFIG_BLK_DEV_NBD=y 95CONFIG_BLK_DEV_NBD=y
104CONFIG_BLK_DEV_RAM=y 96CONFIG_BLK_DEV_RAM=y
105CONFIG_BLK_DEV_RAM_SIZE=131072 97CONFIG_BLK_DEV_RAM_SIZE=131072
106CONFIG_MISC_DEVICES=y
107CONFIG_EEPROM_LEGACY=y 98CONFIG_EEPROM_LEGACY=y
108CONFIG_BLK_DEV_SD=y 99CONFIG_BLK_DEV_SD=y
109CONFIG_CHR_DEV_ST=y 100CONFIG_CHR_DEV_ST=y
@@ -115,6 +106,7 @@ CONFIG_ATA=y
115CONFIG_SATA_AHCI=y 106CONFIG_SATA_AHCI=y
116CONFIG_SATA_FSL=y 107CONFIG_SATA_FSL=y
117CONFIG_PATA_ALI=y 108CONFIG_PATA_ALI=y
109CONFIG_PATA_VIA=y
118CONFIG_NETDEVICES=y 110CONFIG_NETDEVICES=y
119CONFIG_DUMMY=y 111CONFIG_DUMMY=y
120CONFIG_FS_ENET=y 112CONFIG_FS_ENET=y
@@ -134,7 +126,6 @@ CONFIG_SERIAL_8250=y
134CONFIG_SERIAL_8250_CONSOLE=y 126CONFIG_SERIAL_8250_CONSOLE=y
135CONFIG_SERIAL_8250_NR_UARTS=2 127CONFIG_SERIAL_8250_NR_UARTS=2
136CONFIG_SERIAL_8250_RUNTIME_UARTS=2 128CONFIG_SERIAL_8250_RUNTIME_UARTS=2
137CONFIG_SERIAL_8250_EXTENDED=y
138CONFIG_SERIAL_8250_MANY_PORTS=y 129CONFIG_SERIAL_8250_MANY_PORTS=y
139CONFIG_SERIAL_8250_DETECT_IRQ=y 130CONFIG_SERIAL_8250_DETECT_IRQ=y
140CONFIG_SERIAL_8250_RSA=y 131CONFIG_SERIAL_8250_RSA=y
@@ -183,7 +174,6 @@ CONFIG_HID_SAMSUNG=y
183CONFIG_HID_SONY=y 174CONFIG_HID_SONY=y
184CONFIG_HID_SUNPLUS=y 175CONFIG_HID_SUNPLUS=y
185CONFIG_USB=y 176CONFIG_USB=y
186CONFIG_USB_DEVICEFS=y
187CONFIG_USB_MON=y 177CONFIG_USB_MON=y
188CONFIG_USB_EHCI_HCD=y 178CONFIG_USB_EHCI_HCD=y
189CONFIG_USB_EHCI_FSL=y 179CONFIG_USB_EHCI_FSL=y
@@ -229,18 +219,13 @@ CONFIG_QNX4FS_FS=m
229CONFIG_SYSV_FS=m 219CONFIG_SYSV_FS=m
230CONFIG_UFS_FS=m 220CONFIG_UFS_FS=m
231CONFIG_NFS_FS=y 221CONFIG_NFS_FS=y
232CONFIG_NFS_V3=y
233CONFIG_NFS_V4=y 222CONFIG_NFS_V4=y
234CONFIG_ROOT_NFS=y 223CONFIG_ROOT_NFS=y
235CONFIG_NFSD=y 224CONFIG_NFSD=y
236CONFIG_PARTITION_ADVANCED=y
237CONFIG_MAC_PARTITION=y
238CONFIG_CRC_T10DIF=y 225CONFIG_CRC_T10DIF=y
239CONFIG_DEBUG_FS=y 226CONFIG_DEBUG_FS=y
240CONFIG_DETECT_HUNG_TASK=y 227CONFIG_DETECT_HUNG_TASK=y
241CONFIG_DEBUG_INFO=y 228CONFIG_DEBUG_INFO=y
242CONFIG_SYSCTL_SYSCALL_CHECK=y
243CONFIG_IRQ_DOMAIN_DEBUG=y
244CONFIG_CRYPTO_PCBC=m 229CONFIG_CRYPTO_PCBC=m
245CONFIG_CRYPTO_SHA256=y 230CONFIG_CRYPTO_SHA256=y
246CONFIG_CRYPTO_SHA512=y 231CONFIG_CRYPTO_SHA512=y
diff --git a/arch/powerpc/configs/mpc85xx_smp_defconfig b/arch/powerpc/configs/mpc85xx_smp_defconfig
index fdfa84dc908f..b0974e7e98ae 100644
--- a/arch/powerpc/configs/mpc85xx_smp_defconfig
+++ b/arch/powerpc/configs/mpc85xx_smp_defconfig
@@ -7,7 +7,9 @@ CONFIG_SYSVIPC=y
7CONFIG_POSIX_MQUEUE=y 7CONFIG_POSIX_MQUEUE=y
8CONFIG_BSD_PROCESS_ACCT=y 8CONFIG_BSD_PROCESS_ACCT=y
9CONFIG_AUDIT=y 9CONFIG_AUDIT=y
10CONFIG_SPARSE_IRQ=y 10CONFIG_IRQ_DOMAIN_DEBUG=y
11CONFIG_NO_HZ=y
12CONFIG_HIGH_RES_TIMERS=y
11CONFIG_IKCONFIG=y 13CONFIG_IKCONFIG=y
12CONFIG_IKCONFIG_PROC=y 14CONFIG_IKCONFIG_PROC=y
13CONFIG_LOG_BUF_SHIFT=14 15CONFIG_LOG_BUF_SHIFT=14
@@ -19,6 +21,8 @@ CONFIG_MODULE_UNLOAD=y
19CONFIG_MODULE_FORCE_UNLOAD=y 21CONFIG_MODULE_FORCE_UNLOAD=y
20CONFIG_MODVERSIONS=y 22CONFIG_MODVERSIONS=y
21# CONFIG_BLK_DEV_BSG is not set 23# CONFIG_BLK_DEV_BSG is not set
24CONFIG_PARTITION_ADVANCED=y
25CONFIG_MAC_PARTITION=y
22CONFIG_MPC8540_ADS=y 26CONFIG_MPC8540_ADS=y
23CONFIG_MPC8560_ADS=y 27CONFIG_MPC8560_ADS=y
24CONFIG_MPC85xx_CDS=y 28CONFIG_MPC85xx_CDS=y
@@ -42,8 +46,6 @@ CONFIG_SBC8548=y
42CONFIG_QUICC_ENGINE=y 46CONFIG_QUICC_ENGINE=y
43CONFIG_QE_GPIO=y 47CONFIG_QE_GPIO=y
44CONFIG_HIGHMEM=y 48CONFIG_HIGHMEM=y
45CONFIG_NO_HZ=y
46CONFIG_HIGH_RES_TIMERS=y
47CONFIG_BINFMT_MISC=m 49CONFIG_BINFMT_MISC=m
48CONFIG_MATH_EMULATION=y 50CONFIG_MATH_EMULATION=y
49CONFIG_IRQ_ALL_CPUS=y 51CONFIG_IRQ_ALL_CPUS=y
@@ -77,36 +79,25 @@ CONFIG_INET_ESP=y
77CONFIG_IPV6=y 79CONFIG_IPV6=y
78CONFIG_IP_SCTP=m 80CONFIG_IP_SCTP=m
79CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" 81CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
82CONFIG_DEVTMPFS=y
80CONFIG_MTD=y 83CONFIG_MTD=y
81CONFIG_MTD_CMDLINE_PARTS=y 84CONFIG_MTD_CMDLINE_PARTS=y
82CONFIG_MTD_CHAR=y 85CONFIG_MTD_CHAR=y
83CONFIG_MTD_BLOCK=y 86CONFIG_MTD_BLOCK=y
84CONFIG_MTD_CFI=y
85CONFIG_FTL=y 87CONFIG_FTL=y
86CONFIG_MTD_GEN_PROBE=y 88CONFIG_MTD_CFI=y
87CONFIG_MTD_MAP_BANK_WIDTH_1=y
88CONFIG_MTD_MAP_BANK_WIDTH_2=y
89CONFIG_MTD_MAP_BANK_WIDTH_4=y
90CONFIG_MTD_CFI_I1=y
91CONFIG_MTD_CFI_I2=y
92CONFIG_MTD_CFI_INTELEXT=y 89CONFIG_MTD_CFI_INTELEXT=y
93CONFIG_MTD_CFI_AMDSTD=y 90CONFIG_MTD_CFI_AMDSTD=y
94CONFIG_MTD_CFI_UTIL=y
95CONFIG_MTD_PHYSMAP_OF=y 91CONFIG_MTD_PHYSMAP_OF=y
96CONFIG_MTD_PARTITIONS=y 92CONFIG_MTD_M25P80=y
97CONFIG_MTD_OF_PARTS=y
98CONFIG_MTD_NAND=y 93CONFIG_MTD_NAND=y
99CONFIG_MTD_NAND_FSL_ELBC=y 94CONFIG_MTD_NAND_FSL_ELBC=y
100CONFIG_MTD_NAND_FSL_IFC=y 95CONFIG_MTD_NAND_FSL_IFC=y
101CONFIG_MTD_NAND_IDS=y
102CONFIG_MTD_NAND_ECC=y
103CONFIG_MTD_M25P80=y
104CONFIG_PROC_DEVICETREE=y 96CONFIG_PROC_DEVICETREE=y
105CONFIG_BLK_DEV_LOOP=y 97CONFIG_BLK_DEV_LOOP=y
106CONFIG_BLK_DEV_NBD=y 98CONFIG_BLK_DEV_NBD=y
107CONFIG_BLK_DEV_RAM=y 99CONFIG_BLK_DEV_RAM=y
108CONFIG_BLK_DEV_RAM_SIZE=131072 100CONFIG_BLK_DEV_RAM_SIZE=131072
109CONFIG_MISC_DEVICES=y
110CONFIG_EEPROM_LEGACY=y 101CONFIG_EEPROM_LEGACY=y
111CONFIG_BLK_DEV_SD=y 102CONFIG_BLK_DEV_SD=y
112CONFIG_CHR_DEV_ST=y 103CONFIG_CHR_DEV_ST=y
@@ -137,7 +128,6 @@ CONFIG_SERIAL_8250=y
137CONFIG_SERIAL_8250_CONSOLE=y 128CONFIG_SERIAL_8250_CONSOLE=y
138CONFIG_SERIAL_8250_NR_UARTS=2 129CONFIG_SERIAL_8250_NR_UARTS=2
139CONFIG_SERIAL_8250_RUNTIME_UARTS=2 130CONFIG_SERIAL_8250_RUNTIME_UARTS=2
140CONFIG_SERIAL_8250_EXTENDED=y
141CONFIG_SERIAL_8250_MANY_PORTS=y 131CONFIG_SERIAL_8250_MANY_PORTS=y
142CONFIG_SERIAL_8250_DETECT_IRQ=y 132CONFIG_SERIAL_8250_DETECT_IRQ=y
143CONFIG_SERIAL_8250_RSA=y 133CONFIG_SERIAL_8250_RSA=y
@@ -186,7 +176,6 @@ CONFIG_HID_SAMSUNG=y
186CONFIG_HID_SONY=y 176CONFIG_HID_SONY=y
187CONFIG_HID_SUNPLUS=y 177CONFIG_HID_SUNPLUS=y
188CONFIG_USB=y 178CONFIG_USB=y
189CONFIG_USB_DEVICEFS=y
190CONFIG_USB_MON=y 179CONFIG_USB_MON=y
191CONFIG_USB_EHCI_HCD=y 180CONFIG_USB_EHCI_HCD=y
192CONFIG_USB_EHCI_FSL=y 181CONFIG_USB_EHCI_FSL=y
@@ -232,18 +221,13 @@ CONFIG_QNX4FS_FS=m
232CONFIG_SYSV_FS=m 221CONFIG_SYSV_FS=m
233CONFIG_UFS_FS=m 222CONFIG_UFS_FS=m
234CONFIG_NFS_FS=y 223CONFIG_NFS_FS=y
235CONFIG_NFS_V3=y
236CONFIG_NFS_V4=y 224CONFIG_NFS_V4=y
237CONFIG_ROOT_NFS=y 225CONFIG_ROOT_NFS=y
238CONFIG_NFSD=y 226CONFIG_NFSD=y
239CONFIG_PARTITION_ADVANCED=y
240CONFIG_MAC_PARTITION=y
241CONFIG_CRC_T10DIF=y 227CONFIG_CRC_T10DIF=y
242CONFIG_DEBUG_FS=y 228CONFIG_DEBUG_FS=y
243CONFIG_DETECT_HUNG_TASK=y 229CONFIG_DETECT_HUNG_TASK=y
244CONFIG_DEBUG_INFO=y 230CONFIG_DEBUG_INFO=y
245CONFIG_SYSCTL_SYSCALL_CHECK=y
246CONFIG_IRQ_DOMAIN_DEBUG=y
247CONFIG_CRYPTO_PCBC=m 231CONFIG_CRYPTO_PCBC=m
248CONFIG_CRYPTO_SHA256=y 232CONFIG_CRYPTO_SHA256=y
249CONFIG_CRYPTO_SHA512=y 233CONFIG_CRYPTO_SHA512=y
diff --git a/arch/powerpc/include/asm/cputable.h b/arch/powerpc/include/asm/cputable.h
index 50d82c8a037f..b3c083de17ad 100644
--- a/arch/powerpc/include/asm/cputable.h
+++ b/arch/powerpc/include/asm/cputable.h
@@ -553,9 +553,7 @@ static inline int cpu_has_feature(unsigned long feature)
553 & feature); 553 & feature);
554} 554}
555 555
556#ifdef CONFIG_HAVE_HW_BREAKPOINT
557#define HBP_NUM 1 556#define HBP_NUM 1
558#endif /* CONFIG_HAVE_HW_BREAKPOINT */
559 557
560#endif /* !__ASSEMBLY__ */ 558#endif /* !__ASSEMBLY__ */
561 559
diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h
index 50ea12fd7bf5..a8bf5c673a3c 100644
--- a/arch/powerpc/include/asm/kvm_host.h
+++ b/arch/powerpc/include/asm/kvm_host.h
@@ -33,6 +33,7 @@
33#include <asm/kvm_asm.h> 33#include <asm/kvm_asm.h>
34#include <asm/processor.h> 34#include <asm/processor.h>
35#include <asm/page.h> 35#include <asm/page.h>
36#include <asm/cacheflush.h>
36 37
37#define KVM_MAX_VCPUS NR_CPUS 38#define KVM_MAX_VCPUS NR_CPUS
38#define KVM_MAX_VCORES NR_CPUS 39#define KVM_MAX_VCORES NR_CPUS
diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h
index 0124937a23b9..e006f0bdea95 100644
--- a/arch/powerpc/include/asm/kvm_ppc.h
+++ b/arch/powerpc/include/asm/kvm_ppc.h
@@ -219,4 +219,16 @@ void kvmppc_claim_lpid(long lpid);
219void kvmppc_free_lpid(long lpid); 219void kvmppc_free_lpid(long lpid);
220void kvmppc_init_lpid(unsigned long nr_lpids); 220void kvmppc_init_lpid(unsigned long nr_lpids);
221 221
222static inline void kvmppc_mmu_flush_icache(pfn_t pfn)
223{
224 /* Clear i-cache for new pages */
225 struct page *page;
226 page = pfn_to_page(pfn);
227 if (!test_bit(PG_arch_1, &page->flags)) {
228 flush_dcache_icache_page(page);
229 set_bit(PG_arch_1, &page->flags);
230 }
231}
232
233
222#endif /* __POWERPC_KVM_PPC_H__ */ 234#endif /* __POWERPC_KVM_PPC_H__ */
diff --git a/arch/powerpc/include/asm/mpic_msgr.h b/arch/powerpc/include/asm/mpic_msgr.h
index 326d33ca55cd..d4f471fb1031 100644
--- a/arch/powerpc/include/asm/mpic_msgr.h
+++ b/arch/powerpc/include/asm/mpic_msgr.h
@@ -14,6 +14,7 @@
14#include <linux/types.h> 14#include <linux/types.h>
15#include <linux/spinlock.h> 15#include <linux/spinlock.h>
16#include <asm/smp.h> 16#include <asm/smp.h>
17#include <asm/io.h>
17 18
18struct mpic_msgr { 19struct mpic_msgr {
19 u32 __iomem *base; 20 u32 __iomem *base;
diff --git a/arch/powerpc/kernel/dma-iommu.c b/arch/powerpc/kernel/dma-iommu.c
index 2d7bb8ced136..e4897523de41 100644
--- a/arch/powerpc/kernel/dma-iommu.c
+++ b/arch/powerpc/kernel/dma-iommu.c
@@ -83,11 +83,10 @@ static int dma_iommu_dma_supported(struct device *dev, u64 mask)
83 return 0; 83 return 0;
84 } 84 }
85 85
86 if ((tbl->it_offset + tbl->it_size) > (mask >> IOMMU_PAGE_SHIFT)) { 86 if (tbl->it_offset > (mask >> IOMMU_PAGE_SHIFT)) {
87 dev_info(dev, "Warning: IOMMU window too big for device mask\n"); 87 dev_info(dev, "Warning: IOMMU offset too big for device mask\n");
88 dev_info(dev, "mask: 0x%08llx, table end: 0x%08lx\n", 88 dev_info(dev, "mask: 0x%08llx, table offset: 0x%08lx\n",
89 mask, (tbl->it_offset + tbl->it_size) << 89 mask, tbl->it_offset << IOMMU_PAGE_SHIFT);
90 IOMMU_PAGE_SHIFT);
91 return 0; 90 return 0;
92 } else 91 } else
93 return 1; 92 return 1;
diff --git a/arch/powerpc/kernel/hw_breakpoint.c b/arch/powerpc/kernel/hw_breakpoint.c
index f3a82dde61db..956a4c496de9 100644
--- a/arch/powerpc/kernel/hw_breakpoint.c
+++ b/arch/powerpc/kernel/hw_breakpoint.c
@@ -253,7 +253,7 @@ int __kprobes hw_breakpoint_handler(struct die_args *args)
253 253
254 /* Do not emulate user-space instructions, instead single-step them */ 254 /* Do not emulate user-space instructions, instead single-step them */
255 if (user_mode(regs)) { 255 if (user_mode(regs)) {
256 bp->ctx->task->thread.last_hit_ubp = bp; 256 current->thread.last_hit_ubp = bp;
257 regs->msr |= MSR_SE; 257 regs->msr |= MSR_SE;
258 goto out; 258 goto out;
259 } 259 }
diff --git a/arch/powerpc/kernel/kgdb.c b/arch/powerpc/kernel/kgdb.c
index 782bd0a3c2f0..c470a40b29f5 100644
--- a/arch/powerpc/kernel/kgdb.c
+++ b/arch/powerpc/kernel/kgdb.c
@@ -25,6 +25,7 @@
25#include <asm/processor.h> 25#include <asm/processor.h>
26#include <asm/machdep.h> 26#include <asm/machdep.h>
27#include <asm/debug.h> 27#include <asm/debug.h>
28#include <linux/slab.h>
28 29
29/* 30/*
30 * This table contains the mapping between PowerPC hardware trap types, and 31 * This table contains the mapping between PowerPC hardware trap types, and
@@ -101,6 +102,21 @@ static int computeSignal(unsigned int tt)
101 return SIGHUP; /* default for things we don't know about */ 102 return SIGHUP; /* default for things we don't know about */
102} 103}
103 104
105/**
106 *
107 * kgdb_skipexception - Bail out of KGDB when we've been triggered.
108 * @exception: Exception vector number
109 * @regs: Current &struct pt_regs.
110 *
111 * On some architectures we need to skip a breakpoint exception when
112 * it occurs after a breakpoint has been removed.
113 *
114 */
115int kgdb_skipexception(int exception, struct pt_regs *regs)
116{
117 return kgdb_isremovedbreak(regs->nip);
118}
119
104static int kgdb_call_nmi_hook(struct pt_regs *regs) 120static int kgdb_call_nmi_hook(struct pt_regs *regs)
105{ 121{
106 kgdb_nmicallback(raw_smp_processor_id(), regs); 122 kgdb_nmicallback(raw_smp_processor_id(), regs);
@@ -138,6 +154,8 @@ static int kgdb_handle_breakpoint(struct pt_regs *regs)
138static int kgdb_singlestep(struct pt_regs *regs) 154static int kgdb_singlestep(struct pt_regs *regs)
139{ 155{
140 struct thread_info *thread_info, *exception_thread_info; 156 struct thread_info *thread_info, *exception_thread_info;
157 struct thread_info *backup_current_thread_info = \
158 (struct thread_info *)kmalloc(sizeof(struct thread_info), GFP_KERNEL);
141 159
142 if (user_mode(regs)) 160 if (user_mode(regs))
143 return 0; 161 return 0;
@@ -155,13 +173,17 @@ static int kgdb_singlestep(struct pt_regs *regs)
155 thread_info = (struct thread_info *)(regs->gpr[1] & ~(THREAD_SIZE-1)); 173 thread_info = (struct thread_info *)(regs->gpr[1] & ~(THREAD_SIZE-1));
156 exception_thread_info = current_thread_info(); 174 exception_thread_info = current_thread_info();
157 175
158 if (thread_info != exception_thread_info) 176 if (thread_info != exception_thread_info) {
177 /* Save the original current_thread_info. */
178 memcpy(backup_current_thread_info, exception_thread_info, sizeof *thread_info);
159 memcpy(exception_thread_info, thread_info, sizeof *thread_info); 179 memcpy(exception_thread_info, thread_info, sizeof *thread_info);
180 }
160 181
161 kgdb_handle_exception(0, SIGTRAP, 0, regs); 182 kgdb_handle_exception(0, SIGTRAP, 0, regs);
162 183
163 if (thread_info != exception_thread_info) 184 if (thread_info != exception_thread_info)
164 memcpy(thread_info, exception_thread_info, sizeof *thread_info); 185 /* Restore current_thread_info lastly. */
186 memcpy(exception_thread_info, backup_current_thread_info, sizeof *thread_info);
165 187
166 return 1; 188 return 1;
167} 189}
@@ -410,7 +432,6 @@ int kgdb_arch_handle_exception(int vector, int signo, int err_code,
410#else 432#else
411 linux_regs->msr |= MSR_SE; 433 linux_regs->msr |= MSR_SE;
412#endif 434#endif
413 kgdb_single_step = 1;
414 atomic_set(&kgdb_cpu_doing_single_step, 435 atomic_set(&kgdb_cpu_doing_single_step,
415 raw_smp_processor_id()); 436 raw_smp_processor_id());
416 } 437 }
diff --git a/arch/powerpc/kernel/syscalls.c b/arch/powerpc/kernel/syscalls.c
index f2496f2faecc..4e3cc47f26b9 100644
--- a/arch/powerpc/kernel/syscalls.c
+++ b/arch/powerpc/kernel/syscalls.c
@@ -107,11 +107,11 @@ long ppc64_personality(unsigned long personality)
107 long ret; 107 long ret;
108 108
109 if (personality(current->personality) == PER_LINUX32 109 if (personality(current->personality) == PER_LINUX32
110 && personality == PER_LINUX) 110 && personality(personality) == PER_LINUX)
111 personality = PER_LINUX32; 111 personality = (personality & ~PER_MASK) | PER_LINUX32;
112 ret = sys_personality(personality); 112 ret = sys_personality(personality);
113 if (ret == PER_LINUX32) 113 if (personality(ret) == PER_LINUX32)
114 ret = PER_LINUX; 114 ret = (ret & ~PER_MASK) | PER_LINUX;
115 return ret; 115 return ret;
116} 116}
117#endif 117#endif
diff --git a/arch/powerpc/kvm/book3s_32_mmu_host.c b/arch/powerpc/kvm/book3s_32_mmu_host.c
index f922c29bb234..837f13e7b6bf 100644
--- a/arch/powerpc/kvm/book3s_32_mmu_host.c
+++ b/arch/powerpc/kvm/book3s_32_mmu_host.c
@@ -211,6 +211,9 @@ next_pteg:
211 pteg1 |= PP_RWRX; 211 pteg1 |= PP_RWRX;
212 } 212 }
213 213
214 if (orig_pte->may_execute)
215 kvmppc_mmu_flush_icache(hpaddr >> PAGE_SHIFT);
216
214 local_irq_disable(); 217 local_irq_disable();
215 218
216 if (pteg[rr]) { 219 if (pteg[rr]) {
diff --git a/arch/powerpc/kvm/book3s_64_mmu_host.c b/arch/powerpc/kvm/book3s_64_mmu_host.c
index 10fc8ec9d2a8..0688b6b39585 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_host.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_host.c
@@ -126,6 +126,8 @@ int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte)
126 126
127 if (!orig_pte->may_execute) 127 if (!orig_pte->may_execute)
128 rflags |= HPTE_R_N; 128 rflags |= HPTE_R_N;
129 else
130 kvmppc_mmu_flush_icache(hpaddr >> PAGE_SHIFT);
129 131
130 hash = hpt_hash(va, PTE_SIZE, MMU_SEGSIZE_256M); 132 hash = hpt_hash(va, PTE_SIZE, MMU_SEGSIZE_256M);
131 133
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
index 5a84c8d3d040..44b72feaff7d 100644
--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
@@ -1421,13 +1421,13 @@ _GLOBAL(kvmppc_h_cede)
1421 sync /* order setting ceded vs. testing prodded */ 1421 sync /* order setting ceded vs. testing prodded */
1422 lbz r5,VCPU_PRODDED(r3) 1422 lbz r5,VCPU_PRODDED(r3)
1423 cmpwi r5,0 1423 cmpwi r5,0
1424 bne 1f 1424 bne kvm_cede_prodded
1425 li r0,0 /* set trap to 0 to say hcall is handled */ 1425 li r0,0 /* set trap to 0 to say hcall is handled */
1426 stw r0,VCPU_TRAP(r3) 1426 stw r0,VCPU_TRAP(r3)
1427 li r0,H_SUCCESS 1427 li r0,H_SUCCESS
1428 std r0,VCPU_GPR(R3)(r3) 1428 std r0,VCPU_GPR(R3)(r3)
1429BEGIN_FTR_SECTION 1429BEGIN_FTR_SECTION
1430 b 2f /* just send it up to host on 970 */ 1430 b kvm_cede_exit /* just send it up to host on 970 */
1431END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_206) 1431END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_206)
1432 1432
1433 /* 1433 /*
@@ -1446,7 +1446,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_206)
1446 or r4,r4,r0 1446 or r4,r4,r0
1447 PPC_POPCNTW(R7,R4) 1447 PPC_POPCNTW(R7,R4)
1448 cmpw r7,r8 1448 cmpw r7,r8
1449 bge 2f 1449 bge kvm_cede_exit
1450 stwcx. r4,0,r6 1450 stwcx. r4,0,r6
1451 bne 31b 1451 bne 31b
1452 li r0,1 1452 li r0,1
@@ -1555,7 +1555,8 @@ kvm_end_cede:
1555 b hcall_real_fallback 1555 b hcall_real_fallback
1556 1556
1557 /* cede when already previously prodded case */ 1557 /* cede when already previously prodded case */
15581: li r0,0 1558kvm_cede_prodded:
1559 li r0,0
1559 stb r0,VCPU_PRODDED(r3) 1560 stb r0,VCPU_PRODDED(r3)
1560 sync /* order testing prodded vs. clearing ceded */ 1561 sync /* order testing prodded vs. clearing ceded */
1561 stb r0,VCPU_CEDED(r3) 1562 stb r0,VCPU_CEDED(r3)
@@ -1563,7 +1564,8 @@ kvm_end_cede:
1563 blr 1564 blr
1564 1565
1565 /* we've ceded but we want to give control to the host */ 1566 /* we've ceded but we want to give control to the host */
15662: li r3,H_TOO_HARD 1567kvm_cede_exit:
1568 li r3,H_TOO_HARD
1567 blr 1569 blr
1568 1570
1569secondary_too_late: 1571secondary_too_late:
diff --git a/arch/powerpc/kvm/e500_tlb.c b/arch/powerpc/kvm/e500_tlb.c
index c510fc961302..a2b66717813d 100644
--- a/arch/powerpc/kvm/e500_tlb.c
+++ b/arch/powerpc/kvm/e500_tlb.c
@@ -322,11 +322,11 @@ static inline void kvmppc_e500_ref_release(struct tlbe_ref *ref)
322static void clear_tlb1_bitmap(struct kvmppc_vcpu_e500 *vcpu_e500) 322static void clear_tlb1_bitmap(struct kvmppc_vcpu_e500 *vcpu_e500)
323{ 323{
324 if (vcpu_e500->g2h_tlb1_map) 324 if (vcpu_e500->g2h_tlb1_map)
325 memset(vcpu_e500->g2h_tlb1_map, 325 memset(vcpu_e500->g2h_tlb1_map, 0,
326 sizeof(u64) * vcpu_e500->gtlb_params[1].entries, 0); 326 sizeof(u64) * vcpu_e500->gtlb_params[1].entries);
327 if (vcpu_e500->h2g_tlb1_rmap) 327 if (vcpu_e500->h2g_tlb1_rmap)
328 memset(vcpu_e500->h2g_tlb1_rmap, 328 memset(vcpu_e500->h2g_tlb1_rmap, 0,
329 sizeof(unsigned int) * host_tlb_params[1].entries, 0); 329 sizeof(unsigned int) * host_tlb_params[1].entries);
330} 330}
331 331
332static void clear_tlb_privs(struct kvmppc_vcpu_e500 *vcpu_e500) 332static void clear_tlb_privs(struct kvmppc_vcpu_e500 *vcpu_e500)
@@ -539,6 +539,9 @@ static inline void kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
539 539
540 kvmppc_e500_setup_stlbe(&vcpu_e500->vcpu, gtlbe, tsize, 540 kvmppc_e500_setup_stlbe(&vcpu_e500->vcpu, gtlbe, tsize,
541 ref, gvaddr, stlbe); 541 ref, gvaddr, stlbe);
542
543 /* Clear i-cache for new pages */
544 kvmppc_mmu_flush_icache(pfn);
542} 545}
543 546
544/* XXX only map the one-one case, for now use TLB0 */ 547/* XXX only map the one-one case, for now use TLB0 */
diff --git a/arch/powerpc/lib/copyuser_power7.S b/arch/powerpc/lib/copyuser_power7.S
index f9ede7c6606e..0d24ff15f5f6 100644
--- a/arch/powerpc/lib/copyuser_power7.S
+++ b/arch/powerpc/lib/copyuser_power7.S
@@ -288,7 +288,7 @@ err1; stb r0,0(r3)
288 std r0,16(r1) 288 std r0,16(r1)
289 stdu r1,-STACKFRAMESIZE(r1) 289 stdu r1,-STACKFRAMESIZE(r1)
290 bl .enter_vmx_usercopy 290 bl .enter_vmx_usercopy
291 cmpwi r3,0 291 cmpwi cr1,r3,0
292 ld r0,STACKFRAMESIZE+16(r1) 292 ld r0,STACKFRAMESIZE+16(r1)
293 ld r3,STACKFRAMESIZE+48(r1) 293 ld r3,STACKFRAMESIZE+48(r1)
294 ld r4,STACKFRAMESIZE+56(r1) 294 ld r4,STACKFRAMESIZE+56(r1)
@@ -326,38 +326,7 @@ err1; stb r0,0(r3)
326 dcbt r0,r8,0b01010 /* GO */ 326 dcbt r0,r8,0b01010 /* GO */
327.machine pop 327.machine pop
328 328
329 /* 329 beq cr1,.Lunwind_stack_nonvmx_copy
330 * We prefetch both the source and destination using enhanced touch
331 * instructions. We use a stream ID of 0 for the load side and
332 * 1 for the store side.
333 */
334 clrrdi r6,r4,7
335 clrrdi r9,r3,7
336 ori r9,r9,1 /* stream=1 */
337
338 srdi r7,r5,7 /* length in cachelines, capped at 0x3FF */
339 cmpldi cr1,r7,0x3FF
340 ble cr1,1f
341 li r7,0x3FF
3421: lis r0,0x0E00 /* depth=7 */
343 sldi r7,r7,7
344 or r7,r7,r0
345 ori r10,r7,1 /* stream=1 */
346
347 lis r8,0x8000 /* GO=1 */
348 clrldi r8,r8,32
349
350.machine push
351.machine "power4"
352 dcbt r0,r6,0b01000
353 dcbt r0,r7,0b01010
354 dcbtst r0,r9,0b01000
355 dcbtst r0,r10,0b01010
356 eieio
357 dcbt r0,r8,0b01010 /* GO */
358.machine pop
359
360 beq .Lunwind_stack_nonvmx_copy
361 330
362 /* 331 /*
363 * If source and destination are not relatively aligned we use a 332 * If source and destination are not relatively aligned we use a
diff --git a/arch/powerpc/lib/memcpy_power7.S b/arch/powerpc/lib/memcpy_power7.S
index 0efdc51bc716..7ba6c96de778 100644
--- a/arch/powerpc/lib/memcpy_power7.S
+++ b/arch/powerpc/lib/memcpy_power7.S
@@ -222,7 +222,7 @@ _GLOBAL(memcpy_power7)
222 std r0,16(r1) 222 std r0,16(r1)
223 stdu r1,-STACKFRAMESIZE(r1) 223 stdu r1,-STACKFRAMESIZE(r1)
224 bl .enter_vmx_copy 224 bl .enter_vmx_copy
225 cmpwi r3,0 225 cmpwi cr1,r3,0
226 ld r0,STACKFRAMESIZE+16(r1) 226 ld r0,STACKFRAMESIZE+16(r1)
227 ld r3,STACKFRAMESIZE+48(r1) 227 ld r3,STACKFRAMESIZE+48(r1)
228 ld r4,STACKFRAMESIZE+56(r1) 228 ld r4,STACKFRAMESIZE+56(r1)
@@ -260,7 +260,7 @@ _GLOBAL(memcpy_power7)
260 dcbt r0,r8,0b01010 /* GO */ 260 dcbt r0,r8,0b01010 /* GO */
261.machine pop 261.machine pop
262 262
263 beq .Lunwind_stack_nonvmx_copy 263 beq cr1,.Lunwind_stack_nonvmx_copy
264 264
265 /* 265 /*
266 * If source and destination are not relatively aligned we use a 266 * If source and destination are not relatively aligned we use a
diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
index baaafde7d135..fbdad0e3929a 100644
--- a/arch/powerpc/mm/mem.c
+++ b/arch/powerpc/mm/mem.c
@@ -469,6 +469,7 @@ void flush_dcache_icache_page(struct page *page)
469 __flush_dcache_icache_phys(page_to_pfn(page) << PAGE_SHIFT); 469 __flush_dcache_icache_phys(page_to_pfn(page) << PAGE_SHIFT);
470#endif 470#endif
471} 471}
472EXPORT_SYMBOL(flush_dcache_icache_page);
472 473
473void clear_user_page(void *page, unsigned long vaddr, struct page *pg) 474void clear_user_page(void *page, unsigned long vaddr, struct page *pg)
474{ 475{
diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c
index 77b49ddda9d3..7cd2dbd6e4c4 100644
--- a/arch/powerpc/perf/core-book3s.c
+++ b/arch/powerpc/perf/core-book3s.c
@@ -1431,7 +1431,7 @@ static void perf_event_interrupt(struct pt_regs *regs)
1431 if (!event->hw.idx || is_limited_pmc(event->hw.idx)) 1431 if (!event->hw.idx || is_limited_pmc(event->hw.idx))
1432 continue; 1432 continue;
1433 val = read_pmc(event->hw.idx); 1433 val = read_pmc(event->hw.idx);
1434 if ((int)val < 0) { 1434 if (pmc_overflow(val)) {
1435 /* event has overflowed */ 1435 /* event has overflowed */
1436 found = 1; 1436 found = 1;
1437 record_and_restart(event, val, regs); 1437 record_and_restart(event, val, regs);
diff --git a/arch/powerpc/sysdev/fsl_pci.c b/arch/powerpc/sysdev/fsl_pci.c
index a7b2a600d0a4..c37f46136321 100644
--- a/arch/powerpc/sysdev/fsl_pci.c
+++ b/arch/powerpc/sysdev/fsl_pci.c
@@ -465,7 +465,7 @@ int __init fsl_add_bridge(struct device_node *dev, int is_primary)
465 iounmap(hose->cfg_data); 465 iounmap(hose->cfg_data);
466 iounmap(hose->cfg_addr); 466 iounmap(hose->cfg_addr);
467 pcibios_free_controller(hose); 467 pcibios_free_controller(hose);
468 return 0; 468 return -ENODEV;
469 } 469 }
470 470
471 setup_pci_cmd(hose); 471 setup_pci_cmd(hose);
@@ -827,6 +827,7 @@ struct device_node *fsl_pci_primary;
827 827
828void __devinit fsl_pci_init(void) 828void __devinit fsl_pci_init(void)
829{ 829{
830 int ret;
830 struct device_node *node; 831 struct device_node *node;
831 struct pci_controller *hose; 832 struct pci_controller *hose;
832 dma_addr_t max = 0xffffffff; 833 dma_addr_t max = 0xffffffff;
@@ -855,10 +856,12 @@ void __devinit fsl_pci_init(void)
855 if (!fsl_pci_primary) 856 if (!fsl_pci_primary)
856 fsl_pci_primary = node; 857 fsl_pci_primary = node;
857 858
858 fsl_add_bridge(node, fsl_pci_primary == node); 859 ret = fsl_add_bridge(node, fsl_pci_primary == node);
859 hose = pci_find_hose_for_OF_device(node); 860 if (ret == 0) {
860 max = min(max, hose->dma_window_base_cur + 861 hose = pci_find_hose_for_OF_device(node);
861 hose->dma_window_size); 862 max = min(max, hose->dma_window_base_cur +
863 hose->dma_window_size);
864 }
862 } 865 }
863 } 866 }
864 867
diff --git a/arch/powerpc/sysdev/mpic_msgr.c b/arch/powerpc/sysdev/mpic_msgr.c
index 483d8fa72e8b..e961f8c4a8f0 100644
--- a/arch/powerpc/sysdev/mpic_msgr.c
+++ b/arch/powerpc/sysdev/mpic_msgr.c
@@ -14,6 +14,9 @@
14#include <linux/list.h> 14#include <linux/list.h>
15#include <linux/of_platform.h> 15#include <linux/of_platform.h>
16#include <linux/errno.h> 16#include <linux/errno.h>
17#include <linux/err.h>
18#include <linux/export.h>
19#include <linux/slab.h>
17#include <asm/prom.h> 20#include <asm/prom.h>
18#include <asm/hw_irq.h> 21#include <asm/hw_irq.h>
19#include <asm/ppc-pci.h> 22#include <asm/ppc-pci.h>
diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c
index eab3492a45c5..9b49c65ee7a4 100644
--- a/arch/powerpc/xmon/xmon.c
+++ b/arch/powerpc/xmon/xmon.c
@@ -17,6 +17,7 @@
17#include <linux/reboot.h> 17#include <linux/reboot.h>
18#include <linux/delay.h> 18#include <linux/delay.h>
19#include <linux/kallsyms.h> 19#include <linux/kallsyms.h>
20#include <linux/kmsg_dump.h>
20#include <linux/cpumask.h> 21#include <linux/cpumask.h>
21#include <linux/export.h> 22#include <linux/export.h>
22#include <linux/sysrq.h> 23#include <linux/sysrq.h>
@@ -894,13 +895,13 @@ cmds(struct pt_regs *excp)
894#endif 895#endif
895 default: 896 default:
896 printf("Unrecognized command: "); 897 printf("Unrecognized command: ");
897 do { 898 do {
898 if (' ' < cmd && cmd <= '~') 899 if (' ' < cmd && cmd <= '~')
899 putchar(cmd); 900 putchar(cmd);
900 else 901 else
901 printf("\\x%x", cmd); 902 printf("\\x%x", cmd);
902 cmd = inchar(); 903 cmd = inchar();
903 } while (cmd != '\n'); 904 } while (cmd != '\n');
904 printf(" (type ? for help)\n"); 905 printf(" (type ? for help)\n");
905 break; 906 break;
906 } 907 }
@@ -1097,7 +1098,7 @@ static long check_bp_loc(unsigned long addr)
1097 return 1; 1098 return 1;
1098} 1099}
1099 1100
1100static char *breakpoint_help_string = 1101static char *breakpoint_help_string =
1101 "Breakpoint command usage:\n" 1102 "Breakpoint command usage:\n"
1102 "b show breakpoints\n" 1103 "b show breakpoints\n"
1103 "b <addr> [cnt] set breakpoint at given instr addr\n" 1104 "b <addr> [cnt] set breakpoint at given instr addr\n"
@@ -1193,7 +1194,7 @@ bpt_cmds(void)
1193 1194
1194 default: 1195 default:
1195 termch = cmd; 1196 termch = cmd;
1196 cmd = skipbl(); 1197 cmd = skipbl();
1197 if (cmd == '?') { 1198 if (cmd == '?') {
1198 printf(breakpoint_help_string); 1199 printf(breakpoint_help_string);
1199 break; 1200 break;
@@ -1359,7 +1360,7 @@ static void xmon_show_stack(unsigned long sp, unsigned long lr,
1359 sp + REGS_OFFSET); 1360 sp + REGS_OFFSET);
1360 break; 1361 break;
1361 } 1362 }
1362 printf("--- Exception: %lx %s at ", regs.trap, 1363 printf("--- Exception: %lx %s at ", regs.trap,
1363 getvecname(TRAP(&regs))); 1364 getvecname(TRAP(&regs)));
1364 pc = regs.nip; 1365 pc = regs.nip;
1365 lr = regs.link; 1366 lr = regs.link;
@@ -1623,14 +1624,14 @@ static void super_regs(void)
1623 1624
1624 cmd = skipbl(); 1625 cmd = skipbl();
1625 if (cmd == '\n') { 1626 if (cmd == '\n') {
1626 unsigned long sp, toc; 1627 unsigned long sp, toc;
1627 asm("mr %0,1" : "=r" (sp) :); 1628 asm("mr %0,1" : "=r" (sp) :);
1628 asm("mr %0,2" : "=r" (toc) :); 1629 asm("mr %0,2" : "=r" (toc) :);
1629 1630
1630 printf("msr = "REG" sprg0= "REG"\n", 1631 printf("msr = "REG" sprg0= "REG"\n",
1631 mfmsr(), mfspr(SPRN_SPRG0)); 1632 mfmsr(), mfspr(SPRN_SPRG0));
1632 printf("pvr = "REG" sprg1= "REG"\n", 1633 printf("pvr = "REG" sprg1= "REG"\n",
1633 mfspr(SPRN_PVR), mfspr(SPRN_SPRG1)); 1634 mfspr(SPRN_PVR), mfspr(SPRN_SPRG1));
1634 printf("dec = "REG" sprg2= "REG"\n", 1635 printf("dec = "REG" sprg2= "REG"\n",
1635 mfspr(SPRN_DEC), mfspr(SPRN_SPRG2)); 1636 mfspr(SPRN_DEC), mfspr(SPRN_SPRG2));
1636 printf("sp = "REG" sprg3= "REG"\n", sp, mfspr(SPRN_SPRG3)); 1637 printf("sp = "REG" sprg3= "REG"\n", sp, mfspr(SPRN_SPRG3));
@@ -1783,7 +1784,7 @@ byterev(unsigned char *val, int size)
1783static int brev; 1784static int brev;
1784static int mnoread; 1785static int mnoread;
1785 1786
1786static char *memex_help_string = 1787static char *memex_help_string =
1787 "Memory examine command usage:\n" 1788 "Memory examine command usage:\n"
1788 "m [addr] [flags] examine/change memory\n" 1789 "m [addr] [flags] examine/change memory\n"
1789 " addr is optional. will start where left off.\n" 1790 " addr is optional. will start where left off.\n"
@@ -1798,7 +1799,7 @@ static char *memex_help_string =
1798 "NOTE: flags are saved as defaults\n" 1799 "NOTE: flags are saved as defaults\n"
1799 ""; 1800 "";
1800 1801
1801static char *memex_subcmd_help_string = 1802static char *memex_subcmd_help_string =
1802 "Memory examine subcommands:\n" 1803 "Memory examine subcommands:\n"
1803 " hexval write this val to current location\n" 1804 " hexval write this val to current location\n"
1804 " 'string' write chars from string to this location\n" 1805 " 'string' write chars from string to this location\n"
@@ -2064,7 +2065,7 @@ prdump(unsigned long adrs, long ndump)
2064 nr = mread(adrs, temp, r); 2065 nr = mread(adrs, temp, r);
2065 adrs += nr; 2066 adrs += nr;
2066 for (m = 0; m < r; ++m) { 2067 for (m = 0; m < r; ++m) {
2067 if ((m & (sizeof(long) - 1)) == 0 && m > 0) 2068 if ((m & (sizeof(long) - 1)) == 0 && m > 0)
2068 putchar(' '); 2069 putchar(' ');
2069 if (m < nr) 2070 if (m < nr)
2070 printf("%.2x", temp[m]); 2071 printf("%.2x", temp[m]);
@@ -2072,7 +2073,7 @@ prdump(unsigned long adrs, long ndump)
2072 printf("%s", fault_chars[fault_type]); 2073 printf("%s", fault_chars[fault_type]);
2073 } 2074 }
2074 for (; m < 16; ++m) { 2075 for (; m < 16; ++m) {
2075 if ((m & (sizeof(long) - 1)) == 0) 2076 if ((m & (sizeof(long) - 1)) == 0)
2076 putchar(' '); 2077 putchar(' ');
2077 printf(" "); 2078 printf(" ");
2078 } 2079 }
@@ -2148,45 +2149,28 @@ print_address(unsigned long addr)
2148void 2149void
2149dump_log_buf(void) 2150dump_log_buf(void)
2150{ 2151{
2151 const unsigned long size = 128; 2152 struct kmsg_dumper dumper = { .active = 1 };
2152 unsigned long end, addr; 2153 unsigned char buf[128];
2153 unsigned char buf[size + 1]; 2154 size_t len;
2154 2155
2155 addr = 0; 2156 if (setjmp(bus_error_jmp) != 0) {
2156 buf[size] = '\0'; 2157 printf("Error dumping printk buffer!\n");
2157 2158 return;
2158 if (setjmp(bus_error_jmp) != 0) { 2159 }
2159 printf("Unable to lookup symbol __log_buf!\n"); 2160
2160 return; 2161 catch_memory_errors = 1;
2161 } 2162 sync();
2162 2163
2163 catch_memory_errors = 1; 2164 kmsg_dump_rewind_nolock(&dumper);
2164 sync(); 2165 while (kmsg_dump_get_line_nolock(&dumper, false, buf, sizeof(buf), &len)) {
2165 addr = kallsyms_lookup_name("__log_buf"); 2166 buf[len] = '\0';
2166 2167 printf("%s", buf);
2167 if (! addr) 2168 }
2168 printf("Symbol __log_buf not found!\n"); 2169
2169 else { 2170 sync();
2170 end = addr + (1 << CONFIG_LOG_BUF_SHIFT); 2171 /* wait a little while to see if we get a machine check */
2171 while (addr < end) { 2172 __delay(200);
2172 if (! mread(addr, buf, size)) { 2173 catch_memory_errors = 0;
2173 printf("Can't read memory at address 0x%lx\n", addr);
2174 break;
2175 }
2176
2177 printf("%s", buf);
2178
2179 if (strlen(buf) < size)
2180 break;
2181
2182 addr += size;
2183 }
2184 }
2185
2186 sync();
2187 /* wait a little while to see if we get a machine check */
2188 __delay(200);
2189 catch_memory_errors = 0;
2190} 2174}
2191 2175
2192/* 2176/*
diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h
index 32e8449640fa..9b94a160fe7f 100644
--- a/arch/s390/include/asm/elf.h
+++ b/arch/s390/include/asm/elf.h
@@ -180,7 +180,8 @@ extern char elf_platform[];
180#define ELF_PLATFORM (elf_platform) 180#define ELF_PLATFORM (elf_platform)
181 181
182#ifndef CONFIG_64BIT 182#ifndef CONFIG_64BIT
183#define SET_PERSONALITY(ex) set_personality(PER_LINUX) 183#define SET_PERSONALITY(ex) \
184 set_personality(PER_LINUX | (current->personality & (~PER_MASK)))
184#else /* CONFIG_64BIT */ 185#else /* CONFIG_64BIT */
185#define SET_PERSONALITY(ex) \ 186#define SET_PERSONALITY(ex) \
186do { \ 187do { \
diff --git a/arch/s390/include/asm/posix_types.h b/arch/s390/include/asm/posix_types.h
index 7bcc14e395f0..bf2a2ad2f800 100644
--- a/arch/s390/include/asm/posix_types.h
+++ b/arch/s390/include/asm/posix_types.h
@@ -13,6 +13,7 @@
13 */ 13 */
14 14
15typedef unsigned long __kernel_size_t; 15typedef unsigned long __kernel_size_t;
16typedef long __kernel_ssize_t;
16#define __kernel_size_t __kernel_size_t 17#define __kernel_size_t __kernel_size_t
17 18
18typedef unsigned short __kernel_old_dev_t; 19typedef unsigned short __kernel_old_dev_t;
@@ -25,7 +26,6 @@ typedef unsigned short __kernel_mode_t;
25typedef unsigned short __kernel_ipc_pid_t; 26typedef unsigned short __kernel_ipc_pid_t;
26typedef unsigned short __kernel_uid_t; 27typedef unsigned short __kernel_uid_t;
27typedef unsigned short __kernel_gid_t; 28typedef unsigned short __kernel_gid_t;
28typedef int __kernel_ssize_t;
29typedef int __kernel_ptrdiff_t; 29typedef int __kernel_ptrdiff_t;
30 30
31#else /* __s390x__ */ 31#else /* __s390x__ */
@@ -35,7 +35,6 @@ typedef unsigned int __kernel_mode_t;
35typedef int __kernel_ipc_pid_t; 35typedef int __kernel_ipc_pid_t;
36typedef unsigned int __kernel_uid_t; 36typedef unsigned int __kernel_uid_t;
37typedef unsigned int __kernel_gid_t; 37typedef unsigned int __kernel_gid_t;
38typedef long __kernel_ssize_t;
39typedef long __kernel_ptrdiff_t; 38typedef long __kernel_ptrdiff_t;
40typedef unsigned long __kernel_sigset_t; /* at least 32 bits */ 39typedef unsigned long __kernel_sigset_t; /* at least 32 bits */
41 40
diff --git a/arch/s390/include/asm/smp.h b/arch/s390/include/asm/smp.h
index a0a8340daafa..ce26ac3cb162 100644
--- a/arch/s390/include/asm/smp.h
+++ b/arch/s390/include/asm/smp.h
@@ -44,6 +44,7 @@ static inline void smp_call_online_cpu(void (*func)(void *), void *data)
44} 44}
45 45
46static inline int smp_find_processor_id(int address) { return 0; } 46static inline int smp_find_processor_id(int address) { return 0; }
47static inline int smp_store_status(int cpu) { return 0; }
47static inline int smp_vcpu_scheduled(int cpu) { return 1; } 48static inline int smp_vcpu_scheduled(int cpu) { return 1; }
48static inline void smp_yield_cpu(int cpu) { } 49static inline void smp_yield_cpu(int cpu) { }
49static inline void smp_yield(void) { } 50static inline void smp_yield(void) { }
diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h
index b315a33867f2..33692eaabab5 100644
--- a/arch/x86/include/asm/spinlock.h
+++ b/arch/x86/include/asm/spinlock.h
@@ -12,8 +12,7 @@
12 * Simple spin lock operations. There are two variants, one clears IRQ's 12 * Simple spin lock operations. There are two variants, one clears IRQ's
13 * on the local processor, one does not. 13 * on the local processor, one does not.
14 * 14 *
15 * These are fair FIFO ticket locks, which are currently limited to 256 15 * These are fair FIFO ticket locks, which support up to 2^16 CPUs.
16 * CPUs.
17 * 16 *
18 * (the type definitions are in asm/spinlock_types.h) 17 * (the type definitions are in asm/spinlock_types.h)
19 */ 18 */
diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
index afb7ff79a29f..ced4534baed5 100644
--- a/arch/x86/kernel/alternative.c
+++ b/arch/x86/kernel/alternative.c
@@ -165,7 +165,7 @@ static const unsigned char * const k7_nops[ASM_NOP_MAX+2] =
165#endif 165#endif
166 166
167#ifdef P6_NOP1 167#ifdef P6_NOP1
168static const unsigned char __initconst_or_module p6nops[] = 168static const unsigned char p6nops[] =
169{ 169{
170 P6_NOP1, 170 P6_NOP1,
171 P6_NOP2, 171 P6_NOP2,
diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
index 7ad683d78645..d44f7829968e 100644
--- a/arch/x86/kernel/irq.c
+++ b/arch/x86/kernel/irq.c
@@ -270,7 +270,7 @@ void fixup_irqs(void)
270 270
271 if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) { 271 if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) {
272 break_affinity = 1; 272 break_affinity = 1;
273 affinity = cpu_all_mask; 273 affinity = cpu_online_mask;
274 } 274 }
275 275
276 chip = irq_data_get_irq_chip(data); 276 chip = irq_data_get_irq_chip(data);
diff --git a/arch/x86/kernel/microcode_amd.c b/arch/x86/kernel/microcode_amd.c
index 8a2ce8fd41c0..82746f942cd8 100644
--- a/arch/x86/kernel/microcode_amd.c
+++ b/arch/x86/kernel/microcode_amd.c
@@ -143,11 +143,12 @@ static int get_matching_microcode(int cpu, const u8 *ucode_ptr,
143 unsigned int *current_size) 143 unsigned int *current_size)
144{ 144{
145 struct microcode_header_amd *mc_hdr; 145 struct microcode_header_amd *mc_hdr;
146 unsigned int actual_size; 146 unsigned int actual_size, patch_size;
147 u16 equiv_cpu_id; 147 u16 equiv_cpu_id;
148 148
149 /* size of the current patch we're staring at */ 149 /* size of the current patch we're staring at */
150 *current_size = *(u32 *)(ucode_ptr + 4) + SECTION_HDR_SIZE; 150 patch_size = *(u32 *)(ucode_ptr + 4);
151 *current_size = patch_size + SECTION_HDR_SIZE;
151 152
152 equiv_cpu_id = find_equiv_id(); 153 equiv_cpu_id = find_equiv_id();
153 if (!equiv_cpu_id) 154 if (!equiv_cpu_id)
@@ -174,7 +175,7 @@ static int get_matching_microcode(int cpu, const u8 *ucode_ptr,
174 /* 175 /*
175 * now that the header looks sane, verify its size 176 * now that the header looks sane, verify its size
176 */ 177 */
177 actual_size = verify_ucode_size(cpu, *current_size, leftover_size); 178 actual_size = verify_ucode_size(cpu, patch_size, leftover_size);
178 if (!actual_size) 179 if (!actual_size)
179 return 0; 180 return 0;
180 181
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
index 97d9a9914ba8..a3b57a27be88 100644
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -475,13 +475,26 @@ register_address(struct x86_emulate_ctxt *ctxt, unsigned long reg)
475 return address_mask(ctxt, reg); 475 return address_mask(ctxt, reg);
476} 476}
477 477
478static void masked_increment(ulong *reg, ulong mask, int inc)
479{
480 assign_masked(reg, *reg + inc, mask);
481}
482
478static inline void 483static inline void
479register_address_increment(struct x86_emulate_ctxt *ctxt, unsigned long *reg, int inc) 484register_address_increment(struct x86_emulate_ctxt *ctxt, unsigned long *reg, int inc)
480{ 485{
486 ulong mask;
487
481 if (ctxt->ad_bytes == sizeof(unsigned long)) 488 if (ctxt->ad_bytes == sizeof(unsigned long))
482 *reg += inc; 489 mask = ~0UL;
483 else 490 else
484 *reg = (*reg & ~ad_mask(ctxt)) | ((*reg + inc) & ad_mask(ctxt)); 491 mask = ad_mask(ctxt);
492 masked_increment(reg, mask, inc);
493}
494
495static void rsp_increment(struct x86_emulate_ctxt *ctxt, int inc)
496{
497 masked_increment(&ctxt->regs[VCPU_REGS_RSP], stack_mask(ctxt), inc);
485} 498}
486 499
487static inline void jmp_rel(struct x86_emulate_ctxt *ctxt, int rel) 500static inline void jmp_rel(struct x86_emulate_ctxt *ctxt, int rel)
@@ -1522,8 +1535,8 @@ static int push(struct x86_emulate_ctxt *ctxt, void *data, int bytes)
1522{ 1535{
1523 struct segmented_address addr; 1536 struct segmented_address addr;
1524 1537
1525 register_address_increment(ctxt, &ctxt->regs[VCPU_REGS_RSP], -bytes); 1538 rsp_increment(ctxt, -bytes);
1526 addr.ea = register_address(ctxt, ctxt->regs[VCPU_REGS_RSP]); 1539 addr.ea = ctxt->regs[VCPU_REGS_RSP] & stack_mask(ctxt);
1527 addr.seg = VCPU_SREG_SS; 1540 addr.seg = VCPU_SREG_SS;
1528 1541
1529 return segmented_write(ctxt, addr, data, bytes); 1542 return segmented_write(ctxt, addr, data, bytes);
@@ -1542,13 +1555,13 @@ static int emulate_pop(struct x86_emulate_ctxt *ctxt,
1542 int rc; 1555 int rc;
1543 struct segmented_address addr; 1556 struct segmented_address addr;
1544 1557
1545 addr.ea = register_address(ctxt, ctxt->regs[VCPU_REGS_RSP]); 1558 addr.ea = ctxt->regs[VCPU_REGS_RSP] & stack_mask(ctxt);
1546 addr.seg = VCPU_SREG_SS; 1559 addr.seg = VCPU_SREG_SS;
1547 rc = segmented_read(ctxt, addr, dest, len); 1560 rc = segmented_read(ctxt, addr, dest, len);
1548 if (rc != X86EMUL_CONTINUE) 1561 if (rc != X86EMUL_CONTINUE)
1549 return rc; 1562 return rc;
1550 1563
1551 register_address_increment(ctxt, &ctxt->regs[VCPU_REGS_RSP], len); 1564 rsp_increment(ctxt, len);
1552 return rc; 1565 return rc;
1553} 1566}
1554 1567
@@ -1688,8 +1701,7 @@ static int em_popa(struct x86_emulate_ctxt *ctxt)
1688 1701
1689 while (reg >= VCPU_REGS_RAX) { 1702 while (reg >= VCPU_REGS_RAX) {
1690 if (reg == VCPU_REGS_RSP) { 1703 if (reg == VCPU_REGS_RSP) {
1691 register_address_increment(ctxt, &ctxt->regs[VCPU_REGS_RSP], 1704 rsp_increment(ctxt, ctxt->op_bytes);
1692 ctxt->op_bytes);
1693 --reg; 1705 --reg;
1694 } 1706 }
1695 1707
@@ -2825,7 +2837,7 @@ static int em_ret_near_imm(struct x86_emulate_ctxt *ctxt)
2825 rc = emulate_pop(ctxt, &ctxt->dst.val, ctxt->op_bytes); 2837 rc = emulate_pop(ctxt, &ctxt->dst.val, ctxt->op_bytes);
2826 if (rc != X86EMUL_CONTINUE) 2838 if (rc != X86EMUL_CONTINUE)
2827 return rc; 2839 return rc;
2828 register_address_increment(ctxt, &ctxt->regs[VCPU_REGS_RSP], ctxt->src.val); 2840 rsp_increment(ctxt, ctxt->src.val);
2829 return X86EMUL_CONTINUE; 2841 return X86EMUL_CONTINUE;
2830} 2842}
2831 2843
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 01ca00423938..7fbd0d273ea8 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -4113,16 +4113,21 @@ static int mmu_shrink(struct shrinker *shrink, struct shrink_control *sc)
4113 LIST_HEAD(invalid_list); 4113 LIST_HEAD(invalid_list);
4114 4114
4115 /* 4115 /*
4116 * Never scan more than sc->nr_to_scan VM instances.
4117 * Will not hit this condition practically since we do not try
4118 * to shrink more than one VM and it is very unlikely to see
4119 * !n_used_mmu_pages so many times.
4120 */
4121 if (!nr_to_scan--)
4122 break;
4123 /*
4116 * n_used_mmu_pages is accessed without holding kvm->mmu_lock 4124 * n_used_mmu_pages is accessed without holding kvm->mmu_lock
4117 * here. We may skip a VM instance errorneosly, but we do not 4125 * here. We may skip a VM instance errorneosly, but we do not
4118 * want to shrink a VM that only started to populate its MMU 4126 * want to shrink a VM that only started to populate its MMU
4119 * anyway. 4127 * anyway.
4120 */ 4128 */
4121 if (kvm->arch.n_used_mmu_pages > 0) { 4129 if (!kvm->arch.n_used_mmu_pages)
4122 if (!nr_to_scan--)
4123 break;
4124 continue; 4130 continue;
4125 }
4126 4131
4127 idx = srcu_read_lock(&kvm->srcu); 4132 idx = srcu_read_lock(&kvm->srcu);
4128 spin_lock(&kvm->mmu_lock); 4133 spin_lock(&kvm->mmu_lock);
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 42bce48f6928..148ed666e311 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -806,7 +806,7 @@ EXPORT_SYMBOL_GPL(kvm_rdpmc);
806 * kvm-specific. Those are put in the beginning of the list. 806 * kvm-specific. Those are put in the beginning of the list.
807 */ 807 */
808 808
809#define KVM_SAVE_MSRS_BEGIN 9 809#define KVM_SAVE_MSRS_BEGIN 10
810static u32 msrs_to_save[] = { 810static u32 msrs_to_save[] = {
811 MSR_KVM_SYSTEM_TIME, MSR_KVM_WALL_CLOCK, 811 MSR_KVM_SYSTEM_TIME, MSR_KVM_WALL_CLOCK,
812 MSR_KVM_SYSTEM_TIME_NEW, MSR_KVM_WALL_CLOCK_NEW, 812 MSR_KVM_SYSTEM_TIME_NEW, MSR_KVM_WALL_CLOCK_NEW,
@@ -2000,6 +2000,9 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
2000 case MSR_KVM_STEAL_TIME: 2000 case MSR_KVM_STEAL_TIME:
2001 data = vcpu->arch.st.msr_val; 2001 data = vcpu->arch.st.msr_val;
2002 break; 2002 break;
2003 case MSR_KVM_PV_EOI_EN:
2004 data = vcpu->arch.pv_eoi.msr_val;
2005 break;
2003 case MSR_IA32_P5_MC_ADDR: 2006 case MSR_IA32_P5_MC_ADDR:
2004 case MSR_IA32_P5_MC_TYPE: 2007 case MSR_IA32_P5_MC_TYPE:
2005 case MSR_IA32_MCG_CAP: 2008 case MSR_IA32_MCG_CAP:
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index bf4bda6d3e9a..9642d4a38602 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -31,7 +31,6 @@
31#include <linux/pci.h> 31#include <linux/pci.h>
32#include <linux/gfp.h> 32#include <linux/gfp.h>
33#include <linux/memblock.h> 33#include <linux/memblock.h>
34#include <linux/syscore_ops.h>
35 34
36#include <xen/xen.h> 35#include <xen/xen.h>
37#include <xen/interface/xen.h> 36#include <xen/interface/xen.h>
@@ -1470,130 +1469,38 @@ asmlinkage void __init xen_start_kernel(void)
1470#endif 1469#endif
1471} 1470}
1472 1471
1473#ifdef CONFIG_XEN_PVHVM 1472void __ref xen_hvm_init_shared_info(void)
1474/*
1475 * The pfn containing the shared_info is located somewhere in RAM. This
1476 * will cause trouble if the current kernel is doing a kexec boot into a
1477 * new kernel. The new kernel (and its startup code) can not know where
1478 * the pfn is, so it can not reserve the page. The hypervisor will
1479 * continue to update the pfn, and as a result memory corruption occours
1480 * in the new kernel.
1481 *
1482 * One way to work around this issue is to allocate a page in the
1483 * xen-platform pci device's BAR memory range. But pci init is done very
1484 * late and the shared_info page is already in use very early to read
1485 * the pvclock. So moving the pfn from RAM to MMIO is racy because some
1486 * code paths on other vcpus could access the pfn during the small
1487 * window when the old pfn is moved to the new pfn. There is even a
1488 * small window were the old pfn is not backed by a mfn, and during that
1489 * time all reads return -1.
1490 *
1491 * Because it is not known upfront where the MMIO region is located it
1492 * can not be used right from the start in xen_hvm_init_shared_info.
1493 *
1494 * To minimise trouble the move of the pfn is done shortly before kexec.
1495 * This does not eliminate the race because all vcpus are still online
1496 * when the syscore_ops will be called. But hopefully there is no work
1497 * pending at this point in time. Also the syscore_op is run last which
1498 * reduces the risk further.
1499 */
1500
1501static struct shared_info *xen_hvm_shared_info;
1502
1503static void xen_hvm_connect_shared_info(unsigned long pfn)
1504{ 1473{
1474 int cpu;
1505 struct xen_add_to_physmap xatp; 1475 struct xen_add_to_physmap xatp;
1476 static struct shared_info *shared_info_page = 0;
1506 1477
1478 if (!shared_info_page)
1479 shared_info_page = (struct shared_info *)
1480 extend_brk(PAGE_SIZE, PAGE_SIZE);
1507 xatp.domid = DOMID_SELF; 1481 xatp.domid = DOMID_SELF;
1508 xatp.idx = 0; 1482 xatp.idx = 0;
1509 xatp.space = XENMAPSPACE_shared_info; 1483 xatp.space = XENMAPSPACE_shared_info;
1510 xatp.gpfn = pfn; 1484 xatp.gpfn = __pa(shared_info_page) >> PAGE_SHIFT;
1511 if (HYPERVISOR_memory_op(XENMEM_add_to_physmap, &xatp)) 1485 if (HYPERVISOR_memory_op(XENMEM_add_to_physmap, &xatp))
1512 BUG(); 1486 BUG();
1513 1487
1514} 1488 HYPERVISOR_shared_info = (struct shared_info *)shared_info_page;
1515static void xen_hvm_set_shared_info(struct shared_info *sip)
1516{
1517 int cpu;
1518
1519 HYPERVISOR_shared_info = sip;
1520 1489
1521 /* xen_vcpu is a pointer to the vcpu_info struct in the shared_info 1490 /* xen_vcpu is a pointer to the vcpu_info struct in the shared_info
1522 * page, we use it in the event channel upcall and in some pvclock 1491 * page, we use it in the event channel upcall and in some pvclock
1523 * related functions. We don't need the vcpu_info placement 1492 * related functions. We don't need the vcpu_info placement
1524 * optimizations because we don't use any pv_mmu or pv_irq op on 1493 * optimizations because we don't use any pv_mmu or pv_irq op on
1525 * HVM. 1494 * HVM.
1526 * When xen_hvm_set_shared_info is run at boot time only vcpu 0 is 1495 * When xen_hvm_init_shared_info is run at boot time only vcpu 0 is
1527 * online but xen_hvm_set_shared_info is run at resume time too and 1496 * online but xen_hvm_init_shared_info is run at resume time too and
1528 * in that case multiple vcpus might be online. */ 1497 * in that case multiple vcpus might be online. */
1529 for_each_online_cpu(cpu) { 1498 for_each_online_cpu(cpu) {
1530 per_cpu(xen_vcpu, cpu) = &HYPERVISOR_shared_info->vcpu_info[cpu]; 1499 per_cpu(xen_vcpu, cpu) = &HYPERVISOR_shared_info->vcpu_info[cpu];
1531 } 1500 }
1532} 1501}
1533 1502
1534/* Reconnect the shared_info pfn to a mfn */ 1503#ifdef CONFIG_XEN_PVHVM
1535void xen_hvm_resume_shared_info(void)
1536{
1537 xen_hvm_connect_shared_info(__pa(xen_hvm_shared_info) >> PAGE_SHIFT);
1538}
1539
1540#ifdef CONFIG_KEXEC
1541static struct shared_info *xen_hvm_shared_info_kexec;
1542static unsigned long xen_hvm_shared_info_pfn_kexec;
1543
1544/* Remember a pfn in MMIO space for kexec reboot */
1545void __devinit xen_hvm_prepare_kexec(struct shared_info *sip, unsigned long pfn)
1546{
1547 xen_hvm_shared_info_kexec = sip;
1548 xen_hvm_shared_info_pfn_kexec = pfn;
1549}
1550
1551static void xen_hvm_syscore_shutdown(void)
1552{
1553 struct xen_memory_reservation reservation = {
1554 .domid = DOMID_SELF,
1555 .nr_extents = 1,
1556 };
1557 unsigned long prev_pfn;
1558 int rc;
1559
1560 if (!xen_hvm_shared_info_kexec)
1561 return;
1562
1563 prev_pfn = __pa(xen_hvm_shared_info) >> PAGE_SHIFT;
1564 set_xen_guest_handle(reservation.extent_start, &prev_pfn);
1565
1566 /* Move pfn to MMIO, disconnects previous pfn from mfn */
1567 xen_hvm_connect_shared_info(xen_hvm_shared_info_pfn_kexec);
1568
1569 /* Update pointers, following hypercall is also a memory barrier */
1570 xen_hvm_set_shared_info(xen_hvm_shared_info_kexec);
1571
1572 /* Allocate new mfn for previous pfn */
1573 do {
1574 rc = HYPERVISOR_memory_op(XENMEM_populate_physmap, &reservation);
1575 if (rc == 0)
1576 msleep(123);
1577 } while (rc == 0);
1578
1579 /* Make sure the previous pfn is really connected to a (new) mfn */
1580 BUG_ON(rc != 1);
1581}
1582
1583static struct syscore_ops xen_hvm_syscore_ops = {
1584 .shutdown = xen_hvm_syscore_shutdown,
1585};
1586#endif
1587
1588/* Use a pfn in RAM, may move to MMIO before kexec. */
1589static void __init xen_hvm_init_shared_info(void)
1590{
1591 /* Remember pointer for resume */
1592 xen_hvm_shared_info = extend_brk(PAGE_SIZE, PAGE_SIZE);
1593 xen_hvm_connect_shared_info(__pa(xen_hvm_shared_info) >> PAGE_SHIFT);
1594 xen_hvm_set_shared_info(xen_hvm_shared_info);
1595}
1596
1597static void __init init_hvm_pv_info(void) 1504static void __init init_hvm_pv_info(void)
1598{ 1505{
1599 int major, minor; 1506 int major, minor;
@@ -1644,9 +1551,6 @@ static void __init xen_hvm_guest_init(void)
1644 init_hvm_pv_info(); 1551 init_hvm_pv_info();
1645 1552
1646 xen_hvm_init_shared_info(); 1553 xen_hvm_init_shared_info();
1647#ifdef CONFIG_KEXEC
1648 register_syscore_ops(&xen_hvm_syscore_ops);
1649#endif
1650 1554
1651 if (xen_feature(XENFEAT_hvm_callback_vector)) 1555 if (xen_feature(XENFEAT_hvm_callback_vector))
1652 xen_have_vector_callback = 1; 1556 xen_have_vector_callback = 1;
diff --git a/arch/x86/xen/p2m.c b/arch/x86/xen/p2m.c
index b2e91d40a4cb..d4b255463253 100644
--- a/arch/x86/xen/p2m.c
+++ b/arch/x86/xen/p2m.c
@@ -196,9 +196,11 @@ RESERVE_BRK(p2m_mid_identity, PAGE_SIZE * 2 * 3);
196 196
197/* When we populate back during bootup, the amount of pages can vary. The 197/* When we populate back during bootup, the amount of pages can vary. The
198 * max we have is seen is 395979, but that does not mean it can't be more. 198 * max we have is seen is 395979, but that does not mean it can't be more.
199 * But some machines can have 3GB I/O holes even. So lets reserve enough 199 * Some machines can have 3GB I/O holes even. With early_can_reuse_p2m_middle
200 * for 4GB of I/O and E820 holes. */ 200 * it can re-use Xen provided mfn_list array, so we only need to allocate at
201RESERVE_BRK(p2m_populated, PMD_SIZE * 4); 201 * most three P2M top nodes. */
202RESERVE_BRK(p2m_populated, PAGE_SIZE * 3);
203
202static inline unsigned p2m_top_index(unsigned long pfn) 204static inline unsigned p2m_top_index(unsigned long pfn)
203{ 205{
204 BUG_ON(pfn >= MAX_P2M_PFN); 206 BUG_ON(pfn >= MAX_P2M_PFN);
@@ -575,12 +577,99 @@ static bool __init early_alloc_p2m(unsigned long pfn)
575 } 577 }
576 return true; 578 return true;
577} 579}
580
581/*
582 * Skim over the P2M tree looking at pages that are either filled with
583 * INVALID_P2M_ENTRY or with 1:1 PFNs. If found, re-use that page and
584 * replace the P2M leaf with a p2m_missing or p2m_identity.
585 * Stick the old page in the new P2M tree location.
586 */
587bool __init early_can_reuse_p2m_middle(unsigned long set_pfn, unsigned long set_mfn)
588{
589 unsigned topidx;
590 unsigned mididx;
591 unsigned ident_pfns;
592 unsigned inv_pfns;
593 unsigned long *p2m;
594 unsigned long *mid_mfn_p;
595 unsigned idx;
596 unsigned long pfn;
597
598 /* We only look when this entails a P2M middle layer */
599 if (p2m_index(set_pfn))
600 return false;
601
602 for (pfn = 0; pfn <= MAX_DOMAIN_PAGES; pfn += P2M_PER_PAGE) {
603 topidx = p2m_top_index(pfn);
604
605 if (!p2m_top[topidx])
606 continue;
607
608 if (p2m_top[topidx] == p2m_mid_missing)
609 continue;
610
611 mididx = p2m_mid_index(pfn);
612 p2m = p2m_top[topidx][mididx];
613 if (!p2m)
614 continue;
615
616 if ((p2m == p2m_missing) || (p2m == p2m_identity))
617 continue;
618
619 if ((unsigned long)p2m == INVALID_P2M_ENTRY)
620 continue;
621
622 ident_pfns = 0;
623 inv_pfns = 0;
624 for (idx = 0; idx < P2M_PER_PAGE; idx++) {
625 /* IDENTITY_PFNs are 1:1 */
626 if (p2m[idx] == IDENTITY_FRAME(pfn + idx))
627 ident_pfns++;
628 else if (p2m[idx] == INVALID_P2M_ENTRY)
629 inv_pfns++;
630 else
631 break;
632 }
633 if ((ident_pfns == P2M_PER_PAGE) || (inv_pfns == P2M_PER_PAGE))
634 goto found;
635 }
636 return false;
637found:
638 /* Found one, replace old with p2m_identity or p2m_missing */
639 p2m_top[topidx][mididx] = (ident_pfns ? p2m_identity : p2m_missing);
640 /* And the other for save/restore.. */
641 mid_mfn_p = p2m_top_mfn_p[topidx];
642 /* NOTE: Even if it is a p2m_identity it should still be point to
643 * a page filled with INVALID_P2M_ENTRY entries. */
644 mid_mfn_p[mididx] = virt_to_mfn(p2m_missing);
645
646 /* Reset where we want to stick the old page in. */
647 topidx = p2m_top_index(set_pfn);
648 mididx = p2m_mid_index(set_pfn);
649
650 /* This shouldn't happen */
651 if (WARN_ON(p2m_top[topidx] == p2m_mid_missing))
652 early_alloc_p2m(set_pfn);
653
654 if (WARN_ON(p2m_top[topidx][mididx] != p2m_missing))
655 return false;
656
657 p2m_init(p2m);
658 p2m_top[topidx][mididx] = p2m;
659 mid_mfn_p = p2m_top_mfn_p[topidx];
660 mid_mfn_p[mididx] = virt_to_mfn(p2m);
661
662 return true;
663}
578bool __init early_set_phys_to_machine(unsigned long pfn, unsigned long mfn) 664bool __init early_set_phys_to_machine(unsigned long pfn, unsigned long mfn)
579{ 665{
580 if (unlikely(!__set_phys_to_machine(pfn, mfn))) { 666 if (unlikely(!__set_phys_to_machine(pfn, mfn))) {
581 if (!early_alloc_p2m(pfn)) 667 if (!early_alloc_p2m(pfn))
582 return false; 668 return false;
583 669
670 if (early_can_reuse_p2m_middle(pfn, mfn))
671 return __set_phys_to_machine(pfn, mfn);
672
584 if (!early_alloc_p2m_middle(pfn, false /* boundary crossover OK!*/)) 673 if (!early_alloc_p2m_middle(pfn, false /* boundary crossover OK!*/))
585 return false; 674 return false;
586 675
diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c
index ead85576d54a..d11ca11d14fc 100644
--- a/arch/x86/xen/setup.c
+++ b/arch/x86/xen/setup.c
@@ -78,9 +78,16 @@ static void __init xen_add_extra_mem(u64 start, u64 size)
78 memblock_reserve(start, size); 78 memblock_reserve(start, size);
79 79
80 xen_max_p2m_pfn = PFN_DOWN(start + size); 80 xen_max_p2m_pfn = PFN_DOWN(start + size);
81 for (pfn = PFN_DOWN(start); pfn < xen_max_p2m_pfn; pfn++) {
82 unsigned long mfn = pfn_to_mfn(pfn);
83
84 if (WARN(mfn == pfn, "Trying to over-write 1-1 mapping (pfn: %lx)\n", pfn))
85 continue;
86 WARN(mfn != INVALID_P2M_ENTRY, "Trying to remove %lx which has %lx mfn!\n",
87 pfn, mfn);
81 88
82 for (pfn = PFN_DOWN(start); pfn <= xen_max_p2m_pfn; pfn++)
83 __set_phys_to_machine(pfn, INVALID_P2M_ENTRY); 89 __set_phys_to_machine(pfn, INVALID_P2M_ENTRY);
90 }
84} 91}
85 92
86static unsigned long __init xen_do_chunk(unsigned long start, 93static unsigned long __init xen_do_chunk(unsigned long start,
diff --git a/arch/x86/xen/suspend.c b/arch/x86/xen/suspend.c
index ae8a00c39de4..45329c8c226e 100644
--- a/arch/x86/xen/suspend.c
+++ b/arch/x86/xen/suspend.c
@@ -30,7 +30,7 @@ void xen_arch_hvm_post_suspend(int suspend_cancelled)
30{ 30{
31#ifdef CONFIG_XEN_PVHVM 31#ifdef CONFIG_XEN_PVHVM
32 int cpu; 32 int cpu;
33 xen_hvm_resume_shared_info(); 33 xen_hvm_init_shared_info();
34 xen_callback_vector(); 34 xen_callback_vector();
35 xen_unplug_emulated_devices(); 35 xen_unplug_emulated_devices();
36 if (xen_feature(XENFEAT_hvm_safe_pvclock)) { 36 if (xen_feature(XENFEAT_hvm_safe_pvclock)) {
diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h
index 1e4329e04e0f..202d4c150154 100644
--- a/arch/x86/xen/xen-ops.h
+++ b/arch/x86/xen/xen-ops.h
@@ -41,7 +41,7 @@ void xen_enable_syscall(void);
41void xen_vcpu_restore(void); 41void xen_vcpu_restore(void);
42 42
43void xen_callback_vector(void); 43void xen_callback_vector(void);
44void xen_hvm_resume_shared_info(void); 44void xen_hvm_init_shared_info(void);
45void xen_unplug_emulated_devices(void); 45void xen_unplug_emulated_devices(void);
46 46
47void __init xen_build_dynamic_phys_to_machine(void); 47void __init xen_build_dynamic_phys_to_machine(void);
diff --git a/block/blk-lib.c b/block/blk-lib.c
index 2b461b496a78..19cc761cacb2 100644
--- a/block/blk-lib.c
+++ b/block/blk-lib.c
@@ -44,6 +44,7 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
44 struct request_queue *q = bdev_get_queue(bdev); 44 struct request_queue *q = bdev_get_queue(bdev);
45 int type = REQ_WRITE | REQ_DISCARD; 45 int type = REQ_WRITE | REQ_DISCARD;
46 unsigned int max_discard_sectors; 46 unsigned int max_discard_sectors;
47 unsigned int granularity, alignment, mask;
47 struct bio_batch bb; 48 struct bio_batch bb;
48 struct bio *bio; 49 struct bio *bio;
49 int ret = 0; 50 int ret = 0;
@@ -54,18 +55,20 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
54 if (!blk_queue_discard(q)) 55 if (!blk_queue_discard(q))
55 return -EOPNOTSUPP; 56 return -EOPNOTSUPP;
56 57
58 /* Zero-sector (unknown) and one-sector granularities are the same. */
59 granularity = max(q->limits.discard_granularity >> 9, 1U);
60 mask = granularity - 1;
61 alignment = (bdev_discard_alignment(bdev) >> 9) & mask;
62
57 /* 63 /*
58 * Ensure that max_discard_sectors is of the proper 64 * Ensure that max_discard_sectors is of the proper
59 * granularity 65 * granularity, so that requests stay aligned after a split.
60 */ 66 */
61 max_discard_sectors = min(q->limits.max_discard_sectors, UINT_MAX >> 9); 67 max_discard_sectors = min(q->limits.max_discard_sectors, UINT_MAX >> 9);
68 max_discard_sectors = round_down(max_discard_sectors, granularity);
62 if (unlikely(!max_discard_sectors)) { 69 if (unlikely(!max_discard_sectors)) {
63 /* Avoid infinite loop below. Being cautious never hurts. */ 70 /* Avoid infinite loop below. Being cautious never hurts. */
64 return -EOPNOTSUPP; 71 return -EOPNOTSUPP;
65 } else if (q->limits.discard_granularity) {
66 unsigned int disc_sects = q->limits.discard_granularity >> 9;
67
68 max_discard_sectors &= ~(disc_sects - 1);
69 } 72 }
70 73
71 if (flags & BLKDEV_DISCARD_SECURE) { 74 if (flags & BLKDEV_DISCARD_SECURE) {
@@ -79,25 +82,37 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
79 bb.wait = &wait; 82 bb.wait = &wait;
80 83
81 while (nr_sects) { 84 while (nr_sects) {
85 unsigned int req_sects;
86 sector_t end_sect;
87
82 bio = bio_alloc(gfp_mask, 1); 88 bio = bio_alloc(gfp_mask, 1);
83 if (!bio) { 89 if (!bio) {
84 ret = -ENOMEM; 90 ret = -ENOMEM;
85 break; 91 break;
86 } 92 }
87 93
94 req_sects = min_t(sector_t, nr_sects, max_discard_sectors);
95
96 /*
97 * If splitting a request, and the next starting sector would be
98 * misaligned, stop the discard at the previous aligned sector.
99 */
100 end_sect = sector + req_sects;
101 if (req_sects < nr_sects && (end_sect & mask) != alignment) {
102 end_sect =
103 round_down(end_sect - alignment, granularity)
104 + alignment;
105 req_sects = end_sect - sector;
106 }
107
88 bio->bi_sector = sector; 108 bio->bi_sector = sector;
89 bio->bi_end_io = bio_batch_end_io; 109 bio->bi_end_io = bio_batch_end_io;
90 bio->bi_bdev = bdev; 110 bio->bi_bdev = bdev;
91 bio->bi_private = &bb; 111 bio->bi_private = &bb;
92 112
93 if (nr_sects > max_discard_sectors) { 113 bio->bi_size = req_sects << 9;
94 bio->bi_size = max_discard_sectors << 9; 114 nr_sects -= req_sects;
95 nr_sects -= max_discard_sectors; 115 sector = end_sect;
96 sector += max_discard_sectors;
97 } else {
98 bio->bi_size = nr_sects << 9;
99 nr_sects = 0;
100 }
101 116
102 atomic_inc(&bb.done); 117 atomic_inc(&bb.done);
103 submit_bio(type, bio); 118 submit_bio(type, bio);
diff --git a/block/blk-merge.c b/block/blk-merge.c
index 160035f54882..e76279e41162 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -110,6 +110,49 @@ static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio,
110 return 0; 110 return 0;
111} 111}
112 112
113static void
114__blk_segment_map_sg(struct request_queue *q, struct bio_vec *bvec,
115 struct scatterlist *sglist, struct bio_vec **bvprv,
116 struct scatterlist **sg, int *nsegs, int *cluster)
117{
118
119 int nbytes = bvec->bv_len;
120
121 if (*bvprv && *cluster) {
122 if ((*sg)->length + nbytes > queue_max_segment_size(q))
123 goto new_segment;
124
125 if (!BIOVEC_PHYS_MERGEABLE(*bvprv, bvec))
126 goto new_segment;
127 if (!BIOVEC_SEG_BOUNDARY(q, *bvprv, bvec))
128 goto new_segment;
129
130 (*sg)->length += nbytes;
131 } else {
132new_segment:
133 if (!*sg)
134 *sg = sglist;
135 else {
136 /*
137 * If the driver previously mapped a shorter
138 * list, we could see a termination bit
139 * prematurely unless it fully inits the sg
140 * table on each mapping. We KNOW that there
141 * must be more entries here or the driver
142 * would be buggy, so force clear the
143 * termination bit to avoid doing a full
144 * sg_init_table() in drivers for each command.
145 */
146 (*sg)->page_link &= ~0x02;
147 *sg = sg_next(*sg);
148 }
149
150 sg_set_page(*sg, bvec->bv_page, nbytes, bvec->bv_offset);
151 (*nsegs)++;
152 }
153 *bvprv = bvec;
154}
155
113/* 156/*
114 * map a request to scatterlist, return number of sg entries setup. Caller 157 * map a request to scatterlist, return number of sg entries setup. Caller
115 * must make sure sg can hold rq->nr_phys_segments entries 158 * must make sure sg can hold rq->nr_phys_segments entries
@@ -131,41 +174,8 @@ int blk_rq_map_sg(struct request_queue *q, struct request *rq,
131 bvprv = NULL; 174 bvprv = NULL;
132 sg = NULL; 175 sg = NULL;
133 rq_for_each_segment(bvec, rq, iter) { 176 rq_for_each_segment(bvec, rq, iter) {
134 int nbytes = bvec->bv_len; 177 __blk_segment_map_sg(q, bvec, sglist, &bvprv, &sg,
135 178 &nsegs, &cluster);
136 if (bvprv && cluster) {
137 if (sg->length + nbytes > queue_max_segment_size(q))
138 goto new_segment;
139
140 if (!BIOVEC_PHYS_MERGEABLE(bvprv, bvec))
141 goto new_segment;
142 if (!BIOVEC_SEG_BOUNDARY(q, bvprv, bvec))
143 goto new_segment;
144
145 sg->length += nbytes;
146 } else {
147new_segment:
148 if (!sg)
149 sg = sglist;
150 else {
151 /*
152 * If the driver previously mapped a shorter
153 * list, we could see a termination bit
154 * prematurely unless it fully inits the sg
155 * table on each mapping. We KNOW that there
156 * must be more entries here or the driver
157 * would be buggy, so force clear the
158 * termination bit to avoid doing a full
159 * sg_init_table() in drivers for each command.
160 */
161 sg->page_link &= ~0x02;
162 sg = sg_next(sg);
163 }
164
165 sg_set_page(sg, bvec->bv_page, nbytes, bvec->bv_offset);
166 nsegs++;
167 }
168 bvprv = bvec;
169 } /* segments in rq */ 179 } /* segments in rq */
170 180
171 181
@@ -199,6 +209,43 @@ new_segment:
199} 209}
200EXPORT_SYMBOL(blk_rq_map_sg); 210EXPORT_SYMBOL(blk_rq_map_sg);
201 211
212/**
213 * blk_bio_map_sg - map a bio to a scatterlist
214 * @q: request_queue in question
215 * @bio: bio being mapped
216 * @sglist: scatterlist being mapped
217 *
218 * Note:
219 * Caller must make sure sg can hold bio->bi_phys_segments entries
220 *
221 * Will return the number of sg entries setup
222 */
223int blk_bio_map_sg(struct request_queue *q, struct bio *bio,
224 struct scatterlist *sglist)
225{
226 struct bio_vec *bvec, *bvprv;
227 struct scatterlist *sg;
228 int nsegs, cluster;
229 unsigned long i;
230
231 nsegs = 0;
232 cluster = blk_queue_cluster(q);
233
234 bvprv = NULL;
235 sg = NULL;
236 bio_for_each_segment(bvec, bio, i) {
237 __blk_segment_map_sg(q, bvec, sglist, &bvprv, &sg,
238 &nsegs, &cluster);
239 } /* segments in bio */
240
241 if (sg)
242 sg_mark_end(sg);
243
244 BUG_ON(bio->bi_phys_segments && nsegs > bio->bi_phys_segments);
245 return nsegs;
246}
247EXPORT_SYMBOL(blk_bio_map_sg);
248
202static inline int ll_new_hw_segment(struct request_queue *q, 249static inline int ll_new_hw_segment(struct request_queue *q,
203 struct request *req, 250 struct request *req,
204 struct bio *bio) 251 struct bio *bio)
diff --git a/block/genhd.c b/block/genhd.c
index cac7366957c3..d839723303c8 100644
--- a/block/genhd.c
+++ b/block/genhd.c
@@ -835,7 +835,7 @@ static void disk_seqf_stop(struct seq_file *seqf, void *v)
835 835
836static void *show_partition_start(struct seq_file *seqf, loff_t *pos) 836static void *show_partition_start(struct seq_file *seqf, loff_t *pos)
837{ 837{
838 static void *p; 838 void *p;
839 839
840 p = disk_seqf_start(seqf, pos); 840 p = disk_seqf_start(seqf, pos);
841 if (!IS_ERR_OR_NULL(p) && !*pos) 841 if (!IS_ERR_OR_NULL(p) && !*pos)
diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig
index 2be8ef1d3093..27cecd313e75 100644
--- a/drivers/ata/Kconfig
+++ b/drivers/ata/Kconfig
@@ -115,7 +115,7 @@ config SATA_SIL24
115 If unsure, say N. 115 If unsure, say N.
116 116
117config ATA_SFF 117config ATA_SFF
118 bool "ATA SFF support" 118 bool "ATA SFF support (for legacy IDE and PATA)"
119 default y 119 default y
120 help 120 help
121 This option adds support for ATA controllers with SFF 121 This option adds support for ATA controllers with SFF
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index 062e6a1a248f..50d5dea0ff59 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -256,6 +256,14 @@ static const struct pci_device_id ahci_pci_tbl[] = {
256 { PCI_VDEVICE(INTEL, 0x8c07), board_ahci }, /* Lynx Point RAID */ 256 { PCI_VDEVICE(INTEL, 0x8c07), board_ahci }, /* Lynx Point RAID */
257 { PCI_VDEVICE(INTEL, 0x8c0e), board_ahci }, /* Lynx Point RAID */ 257 { PCI_VDEVICE(INTEL, 0x8c0e), board_ahci }, /* Lynx Point RAID */
258 { PCI_VDEVICE(INTEL, 0x8c0f), board_ahci }, /* Lynx Point RAID */ 258 { PCI_VDEVICE(INTEL, 0x8c0f), board_ahci }, /* Lynx Point RAID */
259 { PCI_VDEVICE(INTEL, 0x9c02), board_ahci }, /* Lynx Point-LP AHCI */
260 { PCI_VDEVICE(INTEL, 0x9c03), board_ahci }, /* Lynx Point-LP AHCI */
261 { PCI_VDEVICE(INTEL, 0x9c04), board_ahci }, /* Lynx Point-LP RAID */
262 { PCI_VDEVICE(INTEL, 0x9c05), board_ahci }, /* Lynx Point-LP RAID */
263 { PCI_VDEVICE(INTEL, 0x9c06), board_ahci }, /* Lynx Point-LP RAID */
264 { PCI_VDEVICE(INTEL, 0x9c07), board_ahci }, /* Lynx Point-LP RAID */
265 { PCI_VDEVICE(INTEL, 0x9c0e), board_ahci }, /* Lynx Point-LP RAID */
266 { PCI_VDEVICE(INTEL, 0x9c0f), board_ahci }, /* Lynx Point-LP RAID */
259 267
260 /* JMicron 360/1/3/5/6, match class to avoid IDE function */ 268 /* JMicron 360/1/3/5/6, match class to avoid IDE function */
261 { PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, 269 { PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
diff --git a/drivers/ata/ahci.h b/drivers/ata/ahci.h
index c2594ddf25b0..57eb1c212a4c 100644
--- a/drivers/ata/ahci.h
+++ b/drivers/ata/ahci.h
@@ -320,6 +320,7 @@ extern struct device_attribute *ahci_sdev_attrs[];
320extern struct ata_port_operations ahci_ops; 320extern struct ata_port_operations ahci_ops;
321extern struct ata_port_operations ahci_pmp_retry_srst_ops; 321extern struct ata_port_operations ahci_pmp_retry_srst_ops;
322 322
323unsigned int ahci_dev_classify(struct ata_port *ap);
323void ahci_fill_cmd_slot(struct ahci_port_priv *pp, unsigned int tag, 324void ahci_fill_cmd_slot(struct ahci_port_priv *pp, unsigned int tag,
324 u32 opts); 325 u32 opts);
325void ahci_save_initial_config(struct device *dev, 326void ahci_save_initial_config(struct device *dev,
diff --git a/drivers/ata/ata_piix.c b/drivers/ata/ata_piix.c
index 3c809bfbccf5..ef773e12af79 100644
--- a/drivers/ata/ata_piix.c
+++ b/drivers/ata/ata_piix.c
@@ -329,6 +329,14 @@ static const struct pci_device_id piix_pci_tbl[] = {
329 { 0x8086, 0x8c08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata }, 329 { 0x8086, 0x8c08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
330 /* SATA Controller IDE (Lynx Point) */ 330 /* SATA Controller IDE (Lynx Point) */
331 { 0x8086, 0x8c09, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata }, 331 { 0x8086, 0x8c09, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
332 /* SATA Controller IDE (Lynx Point-LP) */
333 { 0x8086, 0x9c00, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_snb },
334 /* SATA Controller IDE (Lynx Point-LP) */
335 { 0x8086, 0x9c01, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_snb },
336 /* SATA Controller IDE (Lynx Point-LP) */
337 { 0x8086, 0x9c08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
338 /* SATA Controller IDE (Lynx Point-LP) */
339 { 0x8086, 0x9c09, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
332 /* SATA Controller IDE (DH89xxCC) */ 340 /* SATA Controller IDE (DH89xxCC) */
333 { 0x8086, 0x2326, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata }, 341 { 0x8086, 0x2326, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
334 { } /* terminate list */ 342 { } /* terminate list */
diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
index f9eaa82311a9..555c07afa05b 100644
--- a/drivers/ata/libahci.c
+++ b/drivers/ata/libahci.c
@@ -1139,7 +1139,7 @@ static void ahci_dev_config(struct ata_device *dev)
1139 } 1139 }
1140} 1140}
1141 1141
1142static unsigned int ahci_dev_classify(struct ata_port *ap) 1142unsigned int ahci_dev_classify(struct ata_port *ap)
1143{ 1143{
1144 void __iomem *port_mmio = ahci_port_base(ap); 1144 void __iomem *port_mmio = ahci_port_base(ap);
1145 struct ata_taskfile tf; 1145 struct ata_taskfile tf;
@@ -1153,6 +1153,7 @@ static unsigned int ahci_dev_classify(struct ata_port *ap)
1153 1153
1154 return ata_dev_classify(&tf); 1154 return ata_dev_classify(&tf);
1155} 1155}
1156EXPORT_SYMBOL_GPL(ahci_dev_classify);
1156 1157
1157void ahci_fill_cmd_slot(struct ahci_port_priv *pp, unsigned int tag, 1158void ahci_fill_cmd_slot(struct ahci_port_priv *pp, unsigned int tag,
1158 u32 opts) 1159 u32 opts)
diff --git a/drivers/ata/libata-acpi.c b/drivers/ata/libata-acpi.c
index 902b5a457170..fd9ecf74e631 100644
--- a/drivers/ata/libata-acpi.c
+++ b/drivers/ata/libata-acpi.c
@@ -60,17 +60,7 @@ acpi_handle ata_ap_acpi_handle(struct ata_port *ap)
60 if (ap->flags & ATA_FLAG_ACPI_SATA) 60 if (ap->flags & ATA_FLAG_ACPI_SATA)
61 return NULL; 61 return NULL;
62 62
63 /* 63 return acpi_get_child(DEVICE_ACPI_HANDLE(ap->host->dev), ap->port_no);
64 * If acpi bind operation has already happened, we can get the handle
65 * for the port by checking the corresponding scsi_host device's
66 * firmware node, otherwise we will need to find out the handle from
67 * its parent's acpi node.
68 */
69 if (ap->scsi_host)
70 return DEVICE_ACPI_HANDLE(&ap->scsi_host->shost_gendev);
71 else
72 return acpi_get_child(DEVICE_ACPI_HANDLE(ap->host->dev),
73 ap->port_no);
74} 64}
75EXPORT_SYMBOL(ata_ap_acpi_handle); 65EXPORT_SYMBOL(ata_ap_acpi_handle);
76 66
@@ -1101,6 +1091,9 @@ static int ata_acpi_bind_host(struct ata_port *ap, acpi_handle *handle)
1101 if (!*handle) 1091 if (!*handle)
1102 return -ENODEV; 1092 return -ENODEV;
1103 1093
1094 if (ata_acpi_gtm(ap, &ap->__acpi_init_gtm) == 0)
1095 ap->pflags |= ATA_PFLAG_INIT_GTM_VALID;
1096
1104 return 0; 1097 return 0;
1105} 1098}
1106 1099
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index fadd5866d40f..8e1039c8e159 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -4062,7 +4062,7 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
4062 { "_NEC DV5800A", NULL, ATA_HORKAGE_NODMA }, 4062 { "_NEC DV5800A", NULL, ATA_HORKAGE_NODMA },
4063 { "SAMSUNG CD-ROM SN-124", "N001", ATA_HORKAGE_NODMA }, 4063 { "SAMSUNG CD-ROM SN-124", "N001", ATA_HORKAGE_NODMA },
4064 { "Seagate STT20000A", NULL, ATA_HORKAGE_NODMA }, 4064 { "Seagate STT20000A", NULL, ATA_HORKAGE_NODMA },
4065 { "2GB ATA Flash Disk", "ADMA428M", ATA_HORKAGE_NODMA }, 4065 { " 2GB ATA Flash Disk", "ADMA428M", ATA_HORKAGE_NODMA },
4066 /* Odd clown on sil3726/4726 PMPs */ 4066 /* Odd clown on sil3726/4726 PMPs */
4067 { "Config Disk", NULL, ATA_HORKAGE_DISABLE }, 4067 { "Config Disk", NULL, ATA_HORKAGE_DISABLE },
4068 4068
@@ -4128,6 +4128,7 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
4128 4128
4129 /* Devices that do not need bridging limits applied */ 4129 /* Devices that do not need bridging limits applied */
4130 { "MTRON MSP-SATA*", NULL, ATA_HORKAGE_BRIDGE_OK, }, 4130 { "MTRON MSP-SATA*", NULL, ATA_HORKAGE_BRIDGE_OK, },
4131 { "BUFFALO HD-QSU2/R5", NULL, ATA_HORKAGE_BRIDGE_OK, },
4131 4132
4132 /* Devices which aren't very happy with higher link speeds */ 4133 /* Devices which aren't very happy with higher link speeds */
4133 { "WD My Book", NULL, ATA_HORKAGE_1_5_GBPS, }, 4134 { "WD My Book", NULL, ATA_HORKAGE_1_5_GBPS, },
diff --git a/drivers/ata/pata_atiixp.c b/drivers/ata/pata_atiixp.c
index 361c75cea57b..24e51056ac26 100644
--- a/drivers/ata/pata_atiixp.c
+++ b/drivers/ata/pata_atiixp.c
@@ -20,6 +20,7 @@
20#include <linux/delay.h> 20#include <linux/delay.h>
21#include <scsi/scsi_host.h> 21#include <scsi/scsi_host.h>
22#include <linux/libata.h> 22#include <linux/libata.h>
23#include <linux/dmi.h>
23 24
24#define DRV_NAME "pata_atiixp" 25#define DRV_NAME "pata_atiixp"
25#define DRV_VERSION "0.4.6" 26#define DRV_VERSION "0.4.6"
@@ -33,11 +34,26 @@ enum {
33 ATIIXP_IDE_UDMA_MODE = 0x56 34 ATIIXP_IDE_UDMA_MODE = 0x56
34}; 35};
35 36
37static const struct dmi_system_id attixp_cable_override_dmi_table[] = {
38 {
39 /* Board has onboard PATA<->SATA converters */
40 .ident = "MSI E350DM-E33",
41 .matches = {
42 DMI_MATCH(DMI_BOARD_VENDOR, "MSI"),
43 DMI_MATCH(DMI_BOARD_NAME, "E350DM-E33(MS-7720)"),
44 },
45 },
46 { }
47};
48
36static int atiixp_cable_detect(struct ata_port *ap) 49static int atiixp_cable_detect(struct ata_port *ap)
37{ 50{
38 struct pci_dev *pdev = to_pci_dev(ap->host->dev); 51 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
39 u8 udma; 52 u8 udma;
40 53
54 if (dmi_check_system(attixp_cable_override_dmi_table))
55 return ATA_CBL_PATA40_SHORT;
56
41 /* Hack from drivers/ide/pci. Really we want to know how to do the 57 /* Hack from drivers/ide/pci. Really we want to know how to do the
42 raw detection not play follow the bios mode guess */ 58 raw detection not play follow the bios mode guess */
43 pci_read_config_byte(pdev, ATIIXP_IDE_UDMA_MODE + ap->port_no, &udma); 59 pci_read_config_byte(pdev, ATIIXP_IDE_UDMA_MODE + ap->port_no, &udma);
diff --git a/drivers/block/drbd/drbd_bitmap.c b/drivers/block/drbd/drbd_bitmap.c
index ba91b408abad..d84566496746 100644
--- a/drivers/block/drbd/drbd_bitmap.c
+++ b/drivers/block/drbd/drbd_bitmap.c
@@ -889,6 +889,7 @@ struct bm_aio_ctx {
889 unsigned int done; 889 unsigned int done;
890 unsigned flags; 890 unsigned flags;
891#define BM_AIO_COPY_PAGES 1 891#define BM_AIO_COPY_PAGES 1
892#define BM_WRITE_ALL_PAGES 2
892 int error; 893 int error;
893 struct kref kref; 894 struct kref kref;
894}; 895};
@@ -1059,7 +1060,8 @@ static int bm_rw(struct drbd_conf *mdev, int rw, unsigned flags, unsigned lazy_w
1059 if (lazy_writeout_upper_idx && i == lazy_writeout_upper_idx) 1060 if (lazy_writeout_upper_idx && i == lazy_writeout_upper_idx)
1060 break; 1061 break;
1061 if (rw & WRITE) { 1062 if (rw & WRITE) {
1062 if (bm_test_page_unchanged(b->bm_pages[i])) { 1063 if (!(flags & BM_WRITE_ALL_PAGES) &&
1064 bm_test_page_unchanged(b->bm_pages[i])) {
1063 dynamic_dev_dbg(DEV, "skipped bm write for idx %u\n", i); 1065 dynamic_dev_dbg(DEV, "skipped bm write for idx %u\n", i);
1064 continue; 1066 continue;
1065 } 1067 }
@@ -1141,6 +1143,17 @@ int drbd_bm_write(struct drbd_conf *mdev) __must_hold(local)
1141} 1143}
1142 1144
1143/** 1145/**
1146 * drbd_bm_write_all() - Write the whole bitmap to its on disk location.
1147 * @mdev: DRBD device.
1148 *
1149 * Will write all pages.
1150 */
1151int drbd_bm_write_all(struct drbd_conf *mdev) __must_hold(local)
1152{
1153 return bm_rw(mdev, WRITE, BM_WRITE_ALL_PAGES, 0);
1154}
1155
1156/**
1144 * drbd_bm_lazy_write_out() - Write bitmap pages 0 to @upper_idx-1, if they have changed. 1157 * drbd_bm_lazy_write_out() - Write bitmap pages 0 to @upper_idx-1, if they have changed.
1145 * @mdev: DRBD device. 1158 * @mdev: DRBD device.
1146 * @upper_idx: 0: write all changed pages; +ve: page index to stop scanning for changed pages 1159 * @upper_idx: 0: write all changed pages; +ve: page index to stop scanning for changed pages
diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
index b2ca143d0053..b953cc7c9c00 100644
--- a/drivers/block/drbd/drbd_int.h
+++ b/drivers/block/drbd/drbd_int.h
@@ -1469,6 +1469,7 @@ extern int drbd_bm_e_weight(struct drbd_conf *mdev, unsigned long enr);
1469extern int drbd_bm_write_page(struct drbd_conf *mdev, unsigned int idx) __must_hold(local); 1469extern int drbd_bm_write_page(struct drbd_conf *mdev, unsigned int idx) __must_hold(local);
1470extern int drbd_bm_read(struct drbd_conf *mdev) __must_hold(local); 1470extern int drbd_bm_read(struct drbd_conf *mdev) __must_hold(local);
1471extern int drbd_bm_write(struct drbd_conf *mdev) __must_hold(local); 1471extern int drbd_bm_write(struct drbd_conf *mdev) __must_hold(local);
1472extern int drbd_bm_write_all(struct drbd_conf *mdev) __must_hold(local);
1472extern int drbd_bm_write_copy_pages(struct drbd_conf *mdev) __must_hold(local); 1473extern int drbd_bm_write_copy_pages(struct drbd_conf *mdev) __must_hold(local);
1473extern unsigned long drbd_bm_ALe_set_all(struct drbd_conf *mdev, 1474extern unsigned long drbd_bm_ALe_set_all(struct drbd_conf *mdev,
1474 unsigned long al_enr); 1475 unsigned long al_enr);
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
index dbe6135a2abe..f93a0320e952 100644
--- a/drivers/block/drbd/drbd_main.c
+++ b/drivers/block/drbd/drbd_main.c
@@ -79,6 +79,7 @@ static int w_md_sync(struct drbd_conf *mdev, struct drbd_work *w, int unused);
79static void md_sync_timer_fn(unsigned long data); 79static void md_sync_timer_fn(unsigned long data);
80static int w_bitmap_io(struct drbd_conf *mdev, struct drbd_work *w, int unused); 80static int w_bitmap_io(struct drbd_conf *mdev, struct drbd_work *w, int unused);
81static int w_go_diskless(struct drbd_conf *mdev, struct drbd_work *w, int unused); 81static int w_go_diskless(struct drbd_conf *mdev, struct drbd_work *w, int unused);
82static void _tl_clear(struct drbd_conf *mdev);
82 83
83MODULE_AUTHOR("Philipp Reisner <phil@linbit.com>, " 84MODULE_AUTHOR("Philipp Reisner <phil@linbit.com>, "
84 "Lars Ellenberg <lars@linbit.com>"); 85 "Lars Ellenberg <lars@linbit.com>");
@@ -432,19 +433,10 @@ static void _tl_restart(struct drbd_conf *mdev, enum drbd_req_event what)
432 433
433 /* Actions operating on the disk state, also want to work on 434 /* Actions operating on the disk state, also want to work on
434 requests that got barrier acked. */ 435 requests that got barrier acked. */
435 switch (what) {
436 case fail_frozen_disk_io:
437 case restart_frozen_disk_io:
438 list_for_each_safe(le, tle, &mdev->barrier_acked_requests) {
439 req = list_entry(le, struct drbd_request, tl_requests);
440 _req_mod(req, what);
441 }
442 436
443 case connection_lost_while_pending: 437 list_for_each_safe(le, tle, &mdev->barrier_acked_requests) {
444 case resend: 438 req = list_entry(le, struct drbd_request, tl_requests);
445 break; 439 _req_mod(req, what);
446 default:
447 dev_err(DEV, "what = %d in _tl_restart()\n", what);
448 } 440 }
449} 441}
450 442
@@ -459,11 +451,16 @@ static void _tl_restart(struct drbd_conf *mdev, enum drbd_req_event what)
459 */ 451 */
460void tl_clear(struct drbd_conf *mdev) 452void tl_clear(struct drbd_conf *mdev)
461{ 453{
454 spin_lock_irq(&mdev->req_lock);
455 _tl_clear(mdev);
456 spin_unlock_irq(&mdev->req_lock);
457}
458
459static void _tl_clear(struct drbd_conf *mdev)
460{
462 struct list_head *le, *tle; 461 struct list_head *le, *tle;
463 struct drbd_request *r; 462 struct drbd_request *r;
464 463
465 spin_lock_irq(&mdev->req_lock);
466
467 _tl_restart(mdev, connection_lost_while_pending); 464 _tl_restart(mdev, connection_lost_while_pending);
468 465
469 /* we expect this list to be empty. */ 466 /* we expect this list to be empty. */
@@ -482,7 +479,6 @@ void tl_clear(struct drbd_conf *mdev)
482 479
483 memset(mdev->app_reads_hash, 0, APP_R_HSIZE*sizeof(void *)); 480 memset(mdev->app_reads_hash, 0, APP_R_HSIZE*sizeof(void *));
484 481
485 spin_unlock_irq(&mdev->req_lock);
486} 482}
487 483
488void tl_restart(struct drbd_conf *mdev, enum drbd_req_event what) 484void tl_restart(struct drbd_conf *mdev, enum drbd_req_event what)
@@ -1476,12 +1472,12 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
1476 if (ns.susp_fen) { 1472 if (ns.susp_fen) {
1477 /* case1: The outdate peer handler is successful: */ 1473 /* case1: The outdate peer handler is successful: */
1478 if (os.pdsk > D_OUTDATED && ns.pdsk <= D_OUTDATED) { 1474 if (os.pdsk > D_OUTDATED && ns.pdsk <= D_OUTDATED) {
1479 tl_clear(mdev);
1480 if (test_bit(NEW_CUR_UUID, &mdev->flags)) { 1475 if (test_bit(NEW_CUR_UUID, &mdev->flags)) {
1481 drbd_uuid_new_current(mdev); 1476 drbd_uuid_new_current(mdev);
1482 clear_bit(NEW_CUR_UUID, &mdev->flags); 1477 clear_bit(NEW_CUR_UUID, &mdev->flags);
1483 } 1478 }
1484 spin_lock_irq(&mdev->req_lock); 1479 spin_lock_irq(&mdev->req_lock);
1480 _tl_clear(mdev);
1485 _drbd_set_state(_NS(mdev, susp_fen, 0), CS_VERBOSE, NULL); 1481 _drbd_set_state(_NS(mdev, susp_fen, 0), CS_VERBOSE, NULL);
1486 spin_unlock_irq(&mdev->req_lock); 1482 spin_unlock_irq(&mdev->req_lock);
1487 } 1483 }
diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c
index fb9dce8daa24..edb490aad8b4 100644
--- a/drivers/block/drbd/drbd_nl.c
+++ b/drivers/block/drbd/drbd_nl.c
@@ -674,8 +674,8 @@ enum determine_dev_size drbd_determine_dev_size(struct drbd_conf *mdev, enum dds
674 la_size_changed && md_moved ? "size changed and md moved" : 674 la_size_changed && md_moved ? "size changed and md moved" :
675 la_size_changed ? "size changed" : "md moved"); 675 la_size_changed ? "size changed" : "md moved");
676 /* next line implicitly does drbd_suspend_io()+drbd_resume_io() */ 676 /* next line implicitly does drbd_suspend_io()+drbd_resume_io() */
677 err = drbd_bitmap_io(mdev, &drbd_bm_write, 677 err = drbd_bitmap_io(mdev, md_moved ? &drbd_bm_write_all : &drbd_bm_write,
678 "size changed", BM_LOCKED_MASK); 678 "size changed", BM_LOCKED_MASK);
679 if (err) { 679 if (err) {
680 rv = dev_size_error; 680 rv = dev_size_error;
681 goto out; 681 goto out;
diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c
index 910335c30927..01b2ac641c7b 100644
--- a/drivers/block/drbd/drbd_req.c
+++ b/drivers/block/drbd/drbd_req.c
@@ -695,6 +695,12 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
695 break; 695 break;
696 696
697 case resend: 697 case resend:
698 /* Simply complete (local only) READs. */
699 if (!(req->rq_state & RQ_WRITE) && !req->w.cb) {
700 _req_may_be_done(req, m);
701 break;
702 }
703
698 /* If RQ_NET_OK is already set, we got a P_WRITE_ACK or P_RECV_ACK 704 /* If RQ_NET_OK is already set, we got a P_WRITE_ACK or P_RECV_ACK
699 before the connection loss (B&C only); only P_BARRIER_ACK was missing. 705 before the connection loss (B&C only); only P_BARRIER_ACK was missing.
700 Trowing them out of the TL here by pretending we got a BARRIER_ACK 706 Trowing them out of the TL here by pretending we got a BARRIER_ACK
@@ -834,7 +840,15 @@ static int drbd_make_request_common(struct drbd_conf *mdev, struct bio *bio, uns
834 req->private_bio = NULL; 840 req->private_bio = NULL;
835 } 841 }
836 if (rw == WRITE) { 842 if (rw == WRITE) {
837 remote = 1; 843 /* Need to replicate writes. Unless it is an empty flush,
844 * which is better mapped to a DRBD P_BARRIER packet,
845 * also for drbd wire protocol compatibility reasons. */
846 if (unlikely(size == 0)) {
847 /* The only size==0 bios we expect are empty flushes. */
848 D_ASSERT(bio->bi_rw & REQ_FLUSH);
849 remote = 0;
850 } else
851 remote = 1;
838 } else { 852 } else {
839 /* READ || READA */ 853 /* READ || READA */
840 if (local) { 854 if (local) {
@@ -870,8 +884,11 @@ static int drbd_make_request_common(struct drbd_conf *mdev, struct bio *bio, uns
870 * extent. This waits for any resync activity in the corresponding 884 * extent. This waits for any resync activity in the corresponding
871 * resync extent to finish, and, if necessary, pulls in the target 885 * resync extent to finish, and, if necessary, pulls in the target
872 * extent into the activity log, which involves further disk io because 886 * extent into the activity log, which involves further disk io because
873 * of transactional on-disk meta data updates. */ 887 * of transactional on-disk meta data updates.
874 if (rw == WRITE && local && !test_bit(AL_SUSPENDED, &mdev->flags)) { 888 * Empty flushes don't need to go into the activity log, they can only
889 * flush data for pending writes which are already in there. */
890 if (rw == WRITE && local && size
891 && !test_bit(AL_SUSPENDED, &mdev->flags)) {
875 req->rq_state |= RQ_IN_ACT_LOG; 892 req->rq_state |= RQ_IN_ACT_LOG;
876 drbd_al_begin_io(mdev, sector); 893 drbd_al_begin_io(mdev, sector);
877 } 894 }
@@ -994,7 +1011,10 @@ allocate_barrier:
994 if (rw == WRITE && _req_conflicts(req)) 1011 if (rw == WRITE && _req_conflicts(req))
995 goto fail_conflicting; 1012 goto fail_conflicting;
996 1013
997 list_add_tail(&req->tl_requests, &mdev->newest_tle->requests); 1014 /* no point in adding empty flushes to the transfer log,
1015 * they are mapped to drbd barriers already. */
1016 if (likely(size!=0))
1017 list_add_tail(&req->tl_requests, &mdev->newest_tle->requests);
998 1018
999 /* NOTE remote first: to get the concurrent write detection right, 1019 /* NOTE remote first: to get the concurrent write detection right,
1000 * we must register the request before start of local IO. */ 1020 * we must register the request before start of local IO. */
@@ -1014,6 +1034,14 @@ allocate_barrier:
1014 mdev->net_conf->on_congestion != OC_BLOCK && mdev->agreed_pro_version >= 96) 1034 mdev->net_conf->on_congestion != OC_BLOCK && mdev->agreed_pro_version >= 96)
1015 maybe_pull_ahead(mdev); 1035 maybe_pull_ahead(mdev);
1016 1036
1037 /* If this was a flush, queue a drbd barrier/start a new epoch.
1038 * Unless the current epoch was empty anyways, or we are not currently
1039 * replicating, in which case there is no point. */
1040 if (unlikely(bio->bi_rw & REQ_FLUSH)
1041 && mdev->newest_tle->n_writes
1042 && drbd_should_do_remote(mdev->state))
1043 queue_barrier(mdev);
1044
1017 spin_unlock_irq(&mdev->req_lock); 1045 spin_unlock_irq(&mdev->req_lock);
1018 kfree(b); /* if someone else has beaten us to it... */ 1046 kfree(b); /* if someone else has beaten us to it... */
1019 1047
diff --git a/drivers/cpufreq/omap-cpufreq.c b/drivers/cpufreq/omap-cpufreq.c
index 17fa04d08be9..b47034e650a5 100644
--- a/drivers/cpufreq/omap-cpufreq.c
+++ b/drivers/cpufreq/omap-cpufreq.c
@@ -218,7 +218,7 @@ static int __cpuinit omap_cpu_init(struct cpufreq_policy *policy)
218 218
219 policy->cur = policy->min = policy->max = omap_getspeed(policy->cpu); 219 policy->cur = policy->min = policy->max = omap_getspeed(policy->cpu);
220 220
221 if (atomic_inc_return(&freq_table_users) == 1) 221 if (!freq_table)
222 result = opp_init_cpufreq_table(mpu_dev, &freq_table); 222 result = opp_init_cpufreq_table(mpu_dev, &freq_table);
223 223
224 if (result) { 224 if (result) {
@@ -227,6 +227,8 @@ static int __cpuinit omap_cpu_init(struct cpufreq_policy *policy)
227 goto fail_ck; 227 goto fail_ck;
228 } 228 }
229 229
230 atomic_inc_return(&freq_table_users);
231
230 result = cpufreq_frequency_table_cpuinfo(policy, freq_table); 232 result = cpufreq_frequency_table_cpuinfo(policy, freq_table);
231 if (result) 233 if (result)
232 goto fail_table; 234 goto fail_table;
diff --git a/drivers/crypto/caam/jr.c b/drivers/crypto/caam/jr.c
index 53c8c51d5881..93d14070141a 100644
--- a/drivers/crypto/caam/jr.c
+++ b/drivers/crypto/caam/jr.c
@@ -63,7 +63,7 @@ static void caam_jr_dequeue(unsigned long devarg)
63 63
64 head = ACCESS_ONCE(jrp->head); 64 head = ACCESS_ONCE(jrp->head);
65 65
66 spin_lock_bh(&jrp->outlock); 66 spin_lock(&jrp->outlock);
67 67
68 sw_idx = tail = jrp->tail; 68 sw_idx = tail = jrp->tail;
69 hw_idx = jrp->out_ring_read_index; 69 hw_idx = jrp->out_ring_read_index;
@@ -115,7 +115,7 @@ static void caam_jr_dequeue(unsigned long devarg)
115 jrp->tail = tail; 115 jrp->tail = tail;
116 } 116 }
117 117
118 spin_unlock_bh(&jrp->outlock); 118 spin_unlock(&jrp->outlock);
119 119
120 /* Finally, execute user's callback */ 120 /* Finally, execute user's callback */
121 usercall(dev, userdesc, userstatus, userarg); 121 usercall(dev, userdesc, userstatus, userarg);
@@ -236,14 +236,14 @@ int caam_jr_enqueue(struct device *dev, u32 *desc,
236 return -EIO; 236 return -EIO;
237 } 237 }
238 238
239 spin_lock(&jrp->inplock); 239 spin_lock_bh(&jrp->inplock);
240 240
241 head = jrp->head; 241 head = jrp->head;
242 tail = ACCESS_ONCE(jrp->tail); 242 tail = ACCESS_ONCE(jrp->tail);
243 243
244 if (!rd_reg32(&jrp->rregs->inpring_avail) || 244 if (!rd_reg32(&jrp->rregs->inpring_avail) ||
245 CIRC_SPACE(head, tail, JOBR_DEPTH) <= 0) { 245 CIRC_SPACE(head, tail, JOBR_DEPTH) <= 0) {
246 spin_unlock(&jrp->inplock); 246 spin_unlock_bh(&jrp->inplock);
247 dma_unmap_single(dev, desc_dma, desc_size, DMA_TO_DEVICE); 247 dma_unmap_single(dev, desc_dma, desc_size, DMA_TO_DEVICE);
248 return -EBUSY; 248 return -EBUSY;
249 } 249 }
@@ -265,7 +265,7 @@ int caam_jr_enqueue(struct device *dev, u32 *desc,
265 265
266 wr_reg32(&jrp->rregs->inpring_jobadd, 1); 266 wr_reg32(&jrp->rregs->inpring_jobadd, 1);
267 267
268 spin_unlock(&jrp->inplock); 268 spin_unlock_bh(&jrp->inplock);
269 269
270 return 0; 270 return 0;
271} 271}
diff --git a/drivers/crypto/hifn_795x.c b/drivers/crypto/hifn_795x.c
index c9c4befb5a8d..df14358d7fa1 100644
--- a/drivers/crypto/hifn_795x.c
+++ b/drivers/crypto/hifn_795x.c
@@ -821,8 +821,8 @@ static int hifn_register_rng(struct hifn_device *dev)
821 /* 821 /*
822 * We must wait at least 256 Pk_clk cycles between two reads of the rng. 822 * We must wait at least 256 Pk_clk cycles between two reads of the rng.
823 */ 823 */
824 dev->rng_wait_time = DIV_ROUND_UP(NSEC_PER_SEC, dev->pk_clk_freq) * 824 dev->rng_wait_time = DIV_ROUND_UP_ULL(NSEC_PER_SEC,
825 256; 825 dev->pk_clk_freq) * 256;
826 826
827 dev->rng.name = dev->name; 827 dev->rng.name = dev->name;
828 dev->rng.data_present = hifn_rng_data_present, 828 dev->rng.data_present = hifn_rng_data_present,
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index 08a7aa722d6b..6fbfc244748f 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -1981,7 +1981,7 @@ int drm_mode_cursor_ioctl(struct drm_device *dev,
1981 if (!drm_core_check_feature(dev, DRIVER_MODESET)) 1981 if (!drm_core_check_feature(dev, DRIVER_MODESET))
1982 return -EINVAL; 1982 return -EINVAL;
1983 1983
1984 if (!req->flags) 1984 if (!req->flags || (~DRM_MODE_CURSOR_FLAGS & req->flags))
1985 return -EINVAL; 1985 return -EINVAL;
1986 1986
1987 mutex_lock(&dev->mode_config.mutex); 1987 mutex_lock(&dev->mode_config.mutex);
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index a8743c399e83..b7ee230572b7 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -87,6 +87,9 @@ static struct edid_quirk {
87 int product_id; 87 int product_id;
88 u32 quirks; 88 u32 quirks;
89} edid_quirk_list[] = { 89} edid_quirk_list[] = {
90 /* ASUS VW222S */
91 { "ACI", 0x22a2, EDID_QUIRK_FORCE_REDUCED_BLANKING },
92
90 /* Acer AL1706 */ 93 /* Acer AL1706 */
91 { "ACR", 44358, EDID_QUIRK_PREFER_LARGE_60 }, 94 { "ACR", 44358, EDID_QUIRK_PREFER_LARGE_60 },
92 /* Acer F51 */ 95 /* Acer F51 */
diff --git a/drivers/gpu/drm/gma500/psb_intel_display.c b/drivers/gpu/drm/gma500/psb_intel_display.c
index 30dc22a7156c..8033526bb53b 100644
--- a/drivers/gpu/drm/gma500/psb_intel_display.c
+++ b/drivers/gpu/drm/gma500/psb_intel_display.c
@@ -1362,6 +1362,9 @@ void psb_intel_crtc_init(struct drm_device *dev, int pipe,
1362 (struct drm_connector **) (psb_intel_crtc + 1); 1362 (struct drm_connector **) (psb_intel_crtc + 1);
1363 psb_intel_crtc->mode_set.num_connectors = 0; 1363 psb_intel_crtc->mode_set.num_connectors = 0;
1364 psb_intel_cursor_init(dev, psb_intel_crtc); 1364 psb_intel_cursor_init(dev, psb_intel_crtc);
1365
1366 /* Set to true so that the pipe is forced off on initial config. */
1367 psb_intel_crtc->active = true;
1365} 1368}
1366 1369
1367int psb_intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data, 1370int psb_intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index d9a5372ec56f..60815b861ec2 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -72,7 +72,7 @@ int i915_gem_init_aliasing_ppgtt(struct drm_device *dev)
72 /* ppgtt PDEs reside in the global gtt pagetable, which has 512*1024 72 /* ppgtt PDEs reside in the global gtt pagetable, which has 512*1024
73 * entries. For aliasing ppgtt support we just steal them at the end for 73 * entries. For aliasing ppgtt support we just steal them at the end for
74 * now. */ 74 * now. */
75 first_pd_entry_in_global_pt = 512*1024 - I915_PPGTT_PD_ENTRIES; 75 first_pd_entry_in_global_pt = dev_priv->mm.gtt->gtt_total_entries - I915_PPGTT_PD_ENTRIES;
76 76
77 ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL); 77 ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
78 if (!ppgtt) 78 if (!ppgtt)
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index a69a3d0d3acf..2dfa6cf4886b 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -1384,7 +1384,7 @@ static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv,
1384 enum pipe pipe, int reg) 1384 enum pipe pipe, int reg)
1385{ 1385{
1386 u32 val = I915_READ(reg); 1386 u32 val = I915_READ(reg);
1387 WARN(hdmi_pipe_enabled(dev_priv, val, pipe), 1387 WARN(hdmi_pipe_enabled(dev_priv, pipe, val),
1388 "PCH HDMI (0x%08x) enabled on transcoder %c, should be disabled\n", 1388 "PCH HDMI (0x%08x) enabled on transcoder %c, should be disabled\n",
1389 reg, pipe_name(pipe)); 1389 reg, pipe_name(pipe));
1390 1390
@@ -1404,13 +1404,13 @@ static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv,
1404 1404
1405 reg = PCH_ADPA; 1405 reg = PCH_ADPA;
1406 val = I915_READ(reg); 1406 val = I915_READ(reg);
1407 WARN(adpa_pipe_enabled(dev_priv, val, pipe), 1407 WARN(adpa_pipe_enabled(dev_priv, pipe, val),
1408 "PCH VGA enabled on transcoder %c, should be disabled\n", 1408 "PCH VGA enabled on transcoder %c, should be disabled\n",
1409 pipe_name(pipe)); 1409 pipe_name(pipe));
1410 1410
1411 reg = PCH_LVDS; 1411 reg = PCH_LVDS;
1412 val = I915_READ(reg); 1412 val = I915_READ(reg);
1413 WARN(lvds_pipe_enabled(dev_priv, val, pipe), 1413 WARN(lvds_pipe_enabled(dev_priv, pipe, val),
1414 "PCH LVDS enabled on transcoder %c, should be disabled\n", 1414 "PCH LVDS enabled on transcoder %c, should be disabled\n",
1415 pipe_name(pipe)); 1415 pipe_name(pipe));
1416 1416
@@ -1872,7 +1872,7 @@ static void disable_pch_hdmi(struct drm_i915_private *dev_priv,
1872 enum pipe pipe, int reg) 1872 enum pipe pipe, int reg)
1873{ 1873{
1874 u32 val = I915_READ(reg); 1874 u32 val = I915_READ(reg);
1875 if (hdmi_pipe_enabled(dev_priv, val, pipe)) { 1875 if (hdmi_pipe_enabled(dev_priv, pipe, val)) {
1876 DRM_DEBUG_KMS("Disabling pch HDMI %x on pipe %d\n", 1876 DRM_DEBUG_KMS("Disabling pch HDMI %x on pipe %d\n",
1877 reg, pipe); 1877 reg, pipe);
1878 I915_WRITE(reg, val & ~PORT_ENABLE); 1878 I915_WRITE(reg, val & ~PORT_ENABLE);
@@ -1894,12 +1894,12 @@ static void intel_disable_pch_ports(struct drm_i915_private *dev_priv,
1894 1894
1895 reg = PCH_ADPA; 1895 reg = PCH_ADPA;
1896 val = I915_READ(reg); 1896 val = I915_READ(reg);
1897 if (adpa_pipe_enabled(dev_priv, val, pipe)) 1897 if (adpa_pipe_enabled(dev_priv, pipe, val))
1898 I915_WRITE(reg, val & ~ADPA_DAC_ENABLE); 1898 I915_WRITE(reg, val & ~ADPA_DAC_ENABLE);
1899 1899
1900 reg = PCH_LVDS; 1900 reg = PCH_LVDS;
1901 val = I915_READ(reg); 1901 val = I915_READ(reg);
1902 if (lvds_pipe_enabled(dev_priv, val, pipe)) { 1902 if (lvds_pipe_enabled(dev_priv, pipe, val)) {
1903 DRM_DEBUG_KMS("disable lvds on pipe %d val 0x%08x\n", pipe, val); 1903 DRM_DEBUG_KMS("disable lvds on pipe %d val 0x%08x\n", pipe, val);
1904 I915_WRITE(reg, val & ~LVDS_PORT_EN); 1904 I915_WRITE(reg, val & ~LVDS_PORT_EN);
1905 POSTING_READ(reg); 1905 POSTING_READ(reg);
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index e05c0d3e3440..e9a6f6aaed85 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -780,6 +780,14 @@ static const struct dmi_system_id intel_no_lvds[] = {
780 DMI_MATCH(DMI_BOARD_NAME, "ZBOXSD-ID12/ID13"), 780 DMI_MATCH(DMI_BOARD_NAME, "ZBOXSD-ID12/ID13"),
781 }, 781 },
782 }, 782 },
783 {
784 .callback = intel_no_lvds_dmi_callback,
785 .ident = "Gigabyte GA-D525TUD",
786 .matches = {
787 DMI_MATCH(DMI_BOARD_VENDOR, "Gigabyte Technology Co., Ltd."),
788 DMI_MATCH(DMI_BOARD_NAME, "D525TUD"),
789 },
790 },
783 791
784 { } /* terminating entry */ 792 { } /* terminating entry */
785}; 793};
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
index cc8df4de2d92..7644f31a3778 100644
--- a/drivers/gpu/drm/i915/intel_sprite.c
+++ b/drivers/gpu/drm/i915/intel_sprite.c
@@ -60,11 +60,11 @@ ivb_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
60 60
61 switch (fb->pixel_format) { 61 switch (fb->pixel_format) {
62 case DRM_FORMAT_XBGR8888: 62 case DRM_FORMAT_XBGR8888:
63 sprctl |= SPRITE_FORMAT_RGBX888; 63 sprctl |= SPRITE_FORMAT_RGBX888 | SPRITE_RGB_ORDER_RGBX;
64 pixel_size = 4; 64 pixel_size = 4;
65 break; 65 break;
66 case DRM_FORMAT_XRGB8888: 66 case DRM_FORMAT_XRGB8888:
67 sprctl |= SPRITE_FORMAT_RGBX888 | SPRITE_RGB_ORDER_RGBX; 67 sprctl |= SPRITE_FORMAT_RGBX888;
68 pixel_size = 4; 68 pixel_size = 4;
69 break; 69 break;
70 case DRM_FORMAT_YUYV: 70 case DRM_FORMAT_YUYV:
diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c
index 1866dbb49979..c61014442aa9 100644
--- a/drivers/gpu/drm/nouveau/nouveau_state.c
+++ b/drivers/gpu/drm/nouveau/nouveau_state.c
@@ -736,9 +736,11 @@ nouveau_card_init(struct drm_device *dev)
736 } 736 }
737 break; 737 break;
738 case NV_C0: 738 case NV_C0:
739 nvc0_copy_create(dev, 1); 739 if (!(nv_rd32(dev, 0x022500) & 0x00000200))
740 nvc0_copy_create(dev, 1);
740 case NV_D0: 741 case NV_D0:
741 nvc0_copy_create(dev, 0); 742 if (!(nv_rd32(dev, 0x022500) & 0x00000100))
743 nvc0_copy_create(dev, 0);
742 break; 744 break;
743 default: 745 default:
744 break; 746 break;
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index f4d4505fe831..2817101fb167 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -258,7 +258,6 @@ void atombios_crtc_dpms(struct drm_crtc *crtc, int mode)
258 radeon_crtc->enabled = true; 258 radeon_crtc->enabled = true;
259 /* adjust pm to dpms changes BEFORE enabling crtcs */ 259 /* adjust pm to dpms changes BEFORE enabling crtcs */
260 radeon_pm_compute_clocks(rdev); 260 radeon_pm_compute_clocks(rdev);
261 /* disable crtc pair power gating before programming */
262 if (ASIC_IS_DCE6(rdev) && !radeon_crtc->in_mode_set) 261 if (ASIC_IS_DCE6(rdev) && !radeon_crtc->in_mode_set)
263 atombios_powergate_crtc(crtc, ATOM_DISABLE); 262 atombios_powergate_crtc(crtc, ATOM_DISABLE);
264 atombios_enable_crtc(crtc, ATOM_ENABLE); 263 atombios_enable_crtc(crtc, ATOM_ENABLE);
@@ -278,25 +277,8 @@ void atombios_crtc_dpms(struct drm_crtc *crtc, int mode)
278 atombios_enable_crtc_memreq(crtc, ATOM_DISABLE); 277 atombios_enable_crtc_memreq(crtc, ATOM_DISABLE);
279 atombios_enable_crtc(crtc, ATOM_DISABLE); 278 atombios_enable_crtc(crtc, ATOM_DISABLE);
280 radeon_crtc->enabled = false; 279 radeon_crtc->enabled = false;
281 /* power gating is per-pair */ 280 if (ASIC_IS_DCE6(rdev) && !radeon_crtc->in_mode_set)
282 if (ASIC_IS_DCE6(rdev) && !radeon_crtc->in_mode_set) { 281 atombios_powergate_crtc(crtc, ATOM_ENABLE);
283 struct drm_crtc *other_crtc;
284 struct radeon_crtc *other_radeon_crtc;
285 list_for_each_entry(other_crtc, &rdev->ddev->mode_config.crtc_list, head) {
286 other_radeon_crtc = to_radeon_crtc(other_crtc);
287 if (((radeon_crtc->crtc_id == 0) && (other_radeon_crtc->crtc_id == 1)) ||
288 ((radeon_crtc->crtc_id == 1) && (other_radeon_crtc->crtc_id == 0)) ||
289 ((radeon_crtc->crtc_id == 2) && (other_radeon_crtc->crtc_id == 3)) ||
290 ((radeon_crtc->crtc_id == 3) && (other_radeon_crtc->crtc_id == 2)) ||
291 ((radeon_crtc->crtc_id == 4) && (other_radeon_crtc->crtc_id == 5)) ||
292 ((radeon_crtc->crtc_id == 5) && (other_radeon_crtc->crtc_id == 4))) {
293 /* if both crtcs in the pair are off, enable power gating */
294 if (other_radeon_crtc->enabled == false)
295 atombios_powergate_crtc(crtc, ATOM_ENABLE);
296 break;
297 }
298 }
299 }
300 /* adjust pm to dpms changes AFTER disabling crtcs */ 282 /* adjust pm to dpms changes AFTER disabling crtcs */
301 radeon_pm_compute_clocks(rdev); 283 radeon_pm_compute_clocks(rdev);
302 break; 284 break;
@@ -1682,9 +1664,22 @@ static void atombios_crtc_disable(struct drm_crtc *crtc)
1682 struct drm_device *dev = crtc->dev; 1664 struct drm_device *dev = crtc->dev;
1683 struct radeon_device *rdev = dev->dev_private; 1665 struct radeon_device *rdev = dev->dev_private;
1684 struct radeon_atom_ss ss; 1666 struct radeon_atom_ss ss;
1667 int i;
1685 1668
1686 atombios_crtc_dpms(crtc, DRM_MODE_DPMS_OFF); 1669 atombios_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
1687 1670
1671 for (i = 0; i < rdev->num_crtc; i++) {
1672 if (rdev->mode_info.crtcs[i] &&
1673 rdev->mode_info.crtcs[i]->enabled &&
1674 i != radeon_crtc->crtc_id &&
1675 radeon_crtc->pll_id == rdev->mode_info.crtcs[i]->pll_id) {
1676 /* one other crtc is using this pll don't turn
1677 * off the pll
1678 */
1679 goto done;
1680 }
1681 }
1682
1688 switch (radeon_crtc->pll_id) { 1683 switch (radeon_crtc->pll_id) {
1689 case ATOM_PPLL1: 1684 case ATOM_PPLL1:
1690 case ATOM_PPLL2: 1685 case ATOM_PPLL2:
@@ -1701,6 +1696,7 @@ static void atombios_crtc_disable(struct drm_crtc *crtc)
1701 default: 1696 default:
1702 break; 1697 break;
1703 } 1698 }
1699done:
1704 radeon_crtc->pll_id = -1; 1700 radeon_crtc->pll_id = -1;
1705} 1701}
1706 1702
diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c
index 7712cf5ab33b..3623b98ed3fe 100644
--- a/drivers/gpu/drm/radeon/atombios_dp.c
+++ b/drivers/gpu/drm/radeon/atombios_dp.c
@@ -577,30 +577,25 @@ int radeon_dp_get_panel_mode(struct drm_encoder *encoder,
577 struct radeon_device *rdev = dev->dev_private; 577 struct radeon_device *rdev = dev->dev_private;
578 struct radeon_connector *radeon_connector = to_radeon_connector(connector); 578 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
579 int panel_mode = DP_PANEL_MODE_EXTERNAL_DP_MODE; 579 int panel_mode = DP_PANEL_MODE_EXTERNAL_DP_MODE;
580 u16 dp_bridge = radeon_connector_encoder_get_dp_bridge_encoder_id(connector);
581 u8 tmp;
580 582
581 if (!ASIC_IS_DCE4(rdev)) 583 if (!ASIC_IS_DCE4(rdev))
582 return panel_mode; 584 return panel_mode;
583 585
584 if (radeon_connector_encoder_get_dp_bridge_encoder_id(connector) == 586 if (dp_bridge != ENCODER_OBJECT_ID_NONE) {
585 ENCODER_OBJECT_ID_NUTMEG) 587 /* DP bridge chips */
586 panel_mode = DP_PANEL_MODE_INTERNAL_DP1_MODE; 588 tmp = radeon_read_dpcd_reg(radeon_connector, DP_EDP_CONFIGURATION_CAP);
587 else if (radeon_connector_encoder_get_dp_bridge_encoder_id(connector) == 589 if (tmp & 1)
588 ENCODER_OBJECT_ID_TRAVIS) { 590 panel_mode = DP_PANEL_MODE_INTERNAL_DP2_MODE;
589 u8 id[6]; 591 else if ((dp_bridge == ENCODER_OBJECT_ID_NUTMEG) ||
590 int i; 592 (dp_bridge == ENCODER_OBJECT_ID_TRAVIS))
591 for (i = 0; i < 6; i++)
592 id[i] = radeon_read_dpcd_reg(radeon_connector, 0x503 + i);
593 if (id[0] == 0x73 &&
594 id[1] == 0x69 &&
595 id[2] == 0x76 &&
596 id[3] == 0x61 &&
597 id[4] == 0x72 &&
598 id[5] == 0x54)
599 panel_mode = DP_PANEL_MODE_INTERNAL_DP1_MODE; 593 panel_mode = DP_PANEL_MODE_INTERNAL_DP1_MODE;
600 else 594 else
601 panel_mode = DP_PANEL_MODE_INTERNAL_DP2_MODE; 595 panel_mode = DP_PANEL_MODE_EXTERNAL_DP_MODE;
602 } else if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) { 596 } else if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
603 u8 tmp = radeon_read_dpcd_reg(radeon_connector, DP_EDP_CONFIGURATION_CAP); 597 /* eDP */
598 tmp = radeon_read_dpcd_reg(radeon_connector, DP_EDP_CONFIGURATION_CAP);
604 if (tmp & 1) 599 if (tmp & 1)
605 panel_mode = DP_PANEL_MODE_INTERNAL_DP2_MODE; 600 panel_mode = DP_PANEL_MODE_INTERNAL_DP2_MODE;
606 } 601 }
diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
index f9bc27fe269a..6e8803a1170c 100644
--- a/drivers/gpu/drm/radeon/atombios_encoders.c
+++ b/drivers/gpu/drm/radeon/atombios_encoders.c
@@ -1379,6 +1379,8 @@ radeon_atom_encoder_dpms_dig(struct drm_encoder *encoder, int mode)
1379 struct drm_device *dev = encoder->dev; 1379 struct drm_device *dev = encoder->dev;
1380 struct radeon_device *rdev = dev->dev_private; 1380 struct radeon_device *rdev = dev->dev_private;
1381 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 1381 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
1382 struct drm_encoder *ext_encoder = radeon_get_external_encoder(encoder);
1383 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
1382 struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); 1384 struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
1383 struct radeon_connector *radeon_connector = NULL; 1385 struct radeon_connector *radeon_connector = NULL;
1384 struct radeon_connector_atom_dig *radeon_dig_connector = NULL; 1386 struct radeon_connector_atom_dig *radeon_dig_connector = NULL;
@@ -1390,19 +1392,37 @@ radeon_atom_encoder_dpms_dig(struct drm_encoder *encoder, int mode)
1390 1392
1391 switch (mode) { 1393 switch (mode) {
1392 case DRM_MODE_DPMS_ON: 1394 case DRM_MODE_DPMS_ON:
1393 /* some early dce3.2 boards have a bug in their transmitter control table */ 1395 if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE5(rdev)) {
1394 if ((rdev->family == CHIP_RV710) || (rdev->family == CHIP_RV730) || 1396 if (!connector)
1395 ASIC_IS_DCE41(rdev) || ASIC_IS_DCE5(rdev)) { 1397 dig->panel_mode = DP_PANEL_MODE_EXTERNAL_DP_MODE;
1396 if (ASIC_IS_DCE6(rdev)) { 1398 else
1397 /* It seems we need to call ATOM_ENCODER_CMD_SETUP again 1399 dig->panel_mode = radeon_dp_get_panel_mode(encoder, connector);
1398 * before reenabling encoder on DPMS ON, otherwise we never 1400
1399 * get picture 1401 /* setup and enable the encoder */
1400 */ 1402 atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_SETUP, 0);
1401 atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_SETUP, 0); 1403 atombios_dig_encoder_setup(encoder,
1404 ATOM_ENCODER_CMD_SETUP_PANEL_MODE,
1405 dig->panel_mode);
1406 if (ext_encoder) {
1407 if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE61(rdev))
1408 atombios_external_encoder_setup(encoder, ext_encoder,
1409 EXTERNAL_ENCODER_ACTION_V3_ENCODER_SETUP);
1402 } 1410 }
1403 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0); 1411 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0);
1404 } else { 1412 } else if (ASIC_IS_DCE4(rdev)) {
1413 /* setup and enable the encoder */
1414 atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_SETUP, 0);
1415 /* enable the transmitter */
1416 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0);
1405 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE_OUTPUT, 0, 0); 1417 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE_OUTPUT, 0, 0);
1418 } else {
1419 /* setup and enable the encoder and transmitter */
1420 atombios_dig_encoder_setup(encoder, ATOM_ENABLE, 0);
1421 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_SETUP, 0, 0);
1422 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0);
1423 /* some early dce3.2 boards have a bug in their transmitter control table */
1424 if ((rdev->family != CHIP_RV710) || (rdev->family != CHIP_RV730))
1425 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE_OUTPUT, 0, 0);
1406 } 1426 }
1407 if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(encoder)) && connector) { 1427 if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(encoder)) && connector) {
1408 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) { 1428 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
@@ -1420,10 +1440,19 @@ radeon_atom_encoder_dpms_dig(struct drm_encoder *encoder, int mode)
1420 case DRM_MODE_DPMS_STANDBY: 1440 case DRM_MODE_DPMS_STANDBY:
1421 case DRM_MODE_DPMS_SUSPEND: 1441 case DRM_MODE_DPMS_SUSPEND:
1422 case DRM_MODE_DPMS_OFF: 1442 case DRM_MODE_DPMS_OFF:
1423 if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE5(rdev)) 1443 if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE5(rdev)) {
1444 /* disable the transmitter */
1424 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0); 1445 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0);
1425 else 1446 } else if (ASIC_IS_DCE4(rdev)) {
1447 /* disable the transmitter */
1448 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE_OUTPUT, 0, 0);
1449 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0);
1450 } else {
1451 /* disable the encoder and transmitter */
1426 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE_OUTPUT, 0, 0); 1452 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE_OUTPUT, 0, 0);
1453 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0);
1454 atombios_dig_encoder_setup(encoder, ATOM_DISABLE, 0);
1455 }
1427 if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(encoder)) && connector) { 1456 if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(encoder)) && connector) {
1428 if (ASIC_IS_DCE4(rdev)) 1457 if (ASIC_IS_DCE4(rdev))
1429 atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_VIDEO_OFF, 0); 1458 atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_VIDEO_OFF, 0);
@@ -1740,13 +1769,34 @@ static int radeon_atom_pick_dig_encoder(struct drm_encoder *encoder)
1740 struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc); 1769 struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
1741 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 1770 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
1742 struct drm_encoder *test_encoder; 1771 struct drm_encoder *test_encoder;
1743 struct radeon_encoder_atom_dig *dig; 1772 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
1744 uint32_t dig_enc_in_use = 0; 1773 uint32_t dig_enc_in_use = 0;
1745 1774
1746 /* DCE4/5 */ 1775 if (ASIC_IS_DCE6(rdev)) {
1747 if (ASIC_IS_DCE4(rdev)) { 1776 /* DCE6 */
1748 dig = radeon_encoder->enc_priv; 1777 switch (radeon_encoder->encoder_id) {
1749 if (ASIC_IS_DCE41(rdev)) { 1778 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
1779 if (dig->linkb)
1780 return 1;
1781 else
1782 return 0;
1783 break;
1784 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
1785 if (dig->linkb)
1786 return 3;
1787 else
1788 return 2;
1789 break;
1790 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
1791 if (dig->linkb)
1792 return 5;
1793 else
1794 return 4;
1795 break;
1796 }
1797 } else if (ASIC_IS_DCE4(rdev)) {
1798 /* DCE4/5 */
1799 if (ASIC_IS_DCE41(rdev) && !ASIC_IS_DCE61(rdev)) {
1750 /* ontario follows DCE4 */ 1800 /* ontario follows DCE4 */
1751 if (rdev->family == CHIP_PALM) { 1801 if (rdev->family == CHIP_PALM) {
1752 if (dig->linkb) 1802 if (dig->linkb)
@@ -1848,10 +1898,12 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder,
1848 struct drm_device *dev = encoder->dev; 1898 struct drm_device *dev = encoder->dev;
1849 struct radeon_device *rdev = dev->dev_private; 1899 struct radeon_device *rdev = dev->dev_private;
1850 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 1900 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
1851 struct drm_encoder *ext_encoder = radeon_get_external_encoder(encoder);
1852 1901
1853 radeon_encoder->pixel_clock = adjusted_mode->clock; 1902 radeon_encoder->pixel_clock = adjusted_mode->clock;
1854 1903
1904 /* need to call this here rather than in prepare() since we need some crtc info */
1905 radeon_atom_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
1906
1855 if (ASIC_IS_AVIVO(rdev) && !ASIC_IS_DCE4(rdev)) { 1907 if (ASIC_IS_AVIVO(rdev) && !ASIC_IS_DCE4(rdev)) {
1856 if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT | ATOM_DEVICE_TV_SUPPORT)) 1908 if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT | ATOM_DEVICE_TV_SUPPORT))
1857 atombios_yuv_setup(encoder, true); 1909 atombios_yuv_setup(encoder, true);
@@ -1870,38 +1922,7 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder,
1870 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: 1922 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
1871 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: 1923 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
1872 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA: 1924 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
1873 if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE5(rdev)) { 1925 /* handled in dpms */
1874 struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
1875 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
1876
1877 if (!connector)
1878 dig->panel_mode = DP_PANEL_MODE_EXTERNAL_DP_MODE;
1879 else
1880 dig->panel_mode = radeon_dp_get_panel_mode(encoder, connector);
1881
1882 /* setup and enable the encoder */
1883 atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_SETUP, 0);
1884 atombios_dig_encoder_setup(encoder,
1885 ATOM_ENCODER_CMD_SETUP_PANEL_MODE,
1886 dig->panel_mode);
1887 } else if (ASIC_IS_DCE4(rdev)) {
1888 /* disable the transmitter */
1889 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0);
1890 /* setup and enable the encoder */
1891 atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_SETUP, 0);
1892
1893 /* enable the transmitter */
1894 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0);
1895 } else {
1896 /* disable the encoder and transmitter */
1897 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0);
1898 atombios_dig_encoder_setup(encoder, ATOM_DISABLE, 0);
1899
1900 /* setup and enable the encoder and transmitter */
1901 atombios_dig_encoder_setup(encoder, ATOM_ENABLE, 0);
1902 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_SETUP, 0, 0);
1903 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0);
1904 }
1905 break; 1926 break;
1906 case ENCODER_OBJECT_ID_INTERNAL_DDI: 1927 case ENCODER_OBJECT_ID_INTERNAL_DDI:
1907 case ENCODER_OBJECT_ID_INTERNAL_DVO1: 1928 case ENCODER_OBJECT_ID_INTERNAL_DVO1:
@@ -1922,14 +1943,6 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder,
1922 break; 1943 break;
1923 } 1944 }
1924 1945
1925 if (ext_encoder) {
1926 if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE61(rdev))
1927 atombios_external_encoder_setup(encoder, ext_encoder,
1928 EXTERNAL_ENCODER_ACTION_V3_ENCODER_SETUP);
1929 else
1930 atombios_external_encoder_setup(encoder, ext_encoder, ATOM_ENABLE);
1931 }
1932
1933 atombios_apply_encoder_quirks(encoder, adjusted_mode); 1946 atombios_apply_encoder_quirks(encoder, adjusted_mode);
1934 1947
1935 if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI) { 1948 if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI) {
@@ -2116,7 +2129,6 @@ static void radeon_atom_encoder_prepare(struct drm_encoder *encoder)
2116 } 2129 }
2117 2130
2118 radeon_atom_output_lock(encoder, true); 2131 radeon_atom_output_lock(encoder, true);
2119 radeon_atom_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
2120 2132
2121 if (connector) { 2133 if (connector) {
2122 struct radeon_connector *radeon_connector = to_radeon_connector(connector); 2134 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
@@ -2137,6 +2149,7 @@ static void radeon_atom_encoder_prepare(struct drm_encoder *encoder)
2137 2149
2138static void radeon_atom_encoder_commit(struct drm_encoder *encoder) 2150static void radeon_atom_encoder_commit(struct drm_encoder *encoder)
2139{ 2151{
2152 /* need to call this here as we need the crtc set up */
2140 radeon_atom_encoder_dpms(encoder, DRM_MODE_DPMS_ON); 2153 radeon_atom_encoder_dpms(encoder, DRM_MODE_DPMS_ON);
2141 radeon_atom_output_lock(encoder, false); 2154 radeon_atom_output_lock(encoder, false);
2142} 2155}
@@ -2177,14 +2190,7 @@ static void radeon_atom_encoder_disable(struct drm_encoder *encoder)
2177 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: 2190 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
2178 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: 2191 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
2179 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA: 2192 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
2180 if (ASIC_IS_DCE4(rdev)) 2193 /* handled in dpms */
2181 /* disable the transmitter */
2182 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0);
2183 else {
2184 /* disable the encoder and transmitter */
2185 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0);
2186 atombios_dig_encoder_setup(encoder, ATOM_DISABLE, 0);
2187 }
2188 break; 2194 break;
2189 case ENCODER_OBJECT_ID_INTERNAL_DDI: 2195 case ENCODER_OBJECT_ID_INTERNAL_DDI:
2190 case ENCODER_OBJECT_ID_INTERNAL_DVO1: 2196 case ENCODER_OBJECT_ID_INTERNAL_DVO1:
diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c
index ab74e6b149e7..f37676d7f217 100644
--- a/drivers/gpu/drm/radeon/r600_cs.c
+++ b/drivers/gpu/drm/radeon/r600_cs.c
@@ -63,6 +63,7 @@ struct r600_cs_track {
63 u32 cb_color_size_idx[8]; /* unused */ 63 u32 cb_color_size_idx[8]; /* unused */
64 u32 cb_target_mask; 64 u32 cb_target_mask;
65 u32 cb_shader_mask; /* unused */ 65 u32 cb_shader_mask; /* unused */
66 bool is_resolve;
66 u32 cb_color_size[8]; 67 u32 cb_color_size[8];
67 u32 vgt_strmout_en; 68 u32 vgt_strmout_en;
68 u32 vgt_strmout_buffer_en; 69 u32 vgt_strmout_buffer_en;
@@ -315,7 +316,15 @@ static void r600_cs_track_init(struct r600_cs_track *track)
315 track->cb_color_bo[i] = NULL; 316 track->cb_color_bo[i] = NULL;
316 track->cb_color_bo_offset[i] = 0xFFFFFFFF; 317 track->cb_color_bo_offset[i] = 0xFFFFFFFF;
317 track->cb_color_bo_mc[i] = 0xFFFFFFFF; 318 track->cb_color_bo_mc[i] = 0xFFFFFFFF;
318 } 319 track->cb_color_frag_bo[i] = NULL;
320 track->cb_color_frag_offset[i] = 0xFFFFFFFF;
321 track->cb_color_tile_bo[i] = NULL;
322 track->cb_color_tile_offset[i] = 0xFFFFFFFF;
323 track->cb_color_mask[i] = 0xFFFFFFFF;
324 }
325 track->is_resolve = false;
326 track->nsamples = 16;
327 track->log_nsamples = 4;
319 track->cb_target_mask = 0xFFFFFFFF; 328 track->cb_target_mask = 0xFFFFFFFF;
320 track->cb_shader_mask = 0xFFFFFFFF; 329 track->cb_shader_mask = 0xFFFFFFFF;
321 track->cb_dirty = true; 330 track->cb_dirty = true;
@@ -352,6 +361,8 @@ static int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i)
352 volatile u32 *ib = p->ib.ptr; 361 volatile u32 *ib = p->ib.ptr;
353 unsigned array_mode; 362 unsigned array_mode;
354 u32 format; 363 u32 format;
364 /* When resolve is used, the second colorbuffer has always 1 sample. */
365 unsigned nsamples = track->is_resolve && i == 1 ? 1 : track->nsamples;
355 366
356 size = radeon_bo_size(track->cb_color_bo[i]) - track->cb_color_bo_offset[i]; 367 size = radeon_bo_size(track->cb_color_bo[i]) - track->cb_color_bo_offset[i];
357 format = G_0280A0_FORMAT(track->cb_color_info[i]); 368 format = G_0280A0_FORMAT(track->cb_color_info[i]);
@@ -375,7 +386,7 @@ static int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i)
375 array_check.group_size = track->group_size; 386 array_check.group_size = track->group_size;
376 array_check.nbanks = track->nbanks; 387 array_check.nbanks = track->nbanks;
377 array_check.npipes = track->npipes; 388 array_check.npipes = track->npipes;
378 array_check.nsamples = track->nsamples; 389 array_check.nsamples = nsamples;
379 array_check.blocksize = r600_fmt_get_blocksize(format); 390 array_check.blocksize = r600_fmt_get_blocksize(format);
380 if (r600_get_array_mode_alignment(&array_check, 391 if (r600_get_array_mode_alignment(&array_check,
381 &pitch_align, &height_align, &depth_align, &base_align)) { 392 &pitch_align, &height_align, &depth_align, &base_align)) {
@@ -421,7 +432,7 @@ static int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i)
421 432
422 /* check offset */ 433 /* check offset */
423 tmp = r600_fmt_get_nblocksy(format, height) * r600_fmt_get_nblocksx(format, pitch) * 434 tmp = r600_fmt_get_nblocksy(format, height) * r600_fmt_get_nblocksx(format, pitch) *
424 r600_fmt_get_blocksize(format) * track->nsamples; 435 r600_fmt_get_blocksize(format) * nsamples;
425 switch (array_mode) { 436 switch (array_mode) {
426 default: 437 default:
427 case V_0280A0_ARRAY_LINEAR_GENERAL: 438 case V_0280A0_ARRAY_LINEAR_GENERAL:
@@ -792,6 +803,12 @@ static int r600_cs_track_check(struct radeon_cs_parser *p)
792 */ 803 */
793 if (track->cb_dirty) { 804 if (track->cb_dirty) {
794 tmp = track->cb_target_mask; 805 tmp = track->cb_target_mask;
806
807 /* We must check both colorbuffers for RESOLVE. */
808 if (track->is_resolve) {
809 tmp |= 0xff;
810 }
811
795 for (i = 0; i < 8; i++) { 812 for (i = 0; i < 8; i++) {
796 if ((tmp >> (i * 4)) & 0xF) { 813 if ((tmp >> (i * 4)) & 0xF) {
797 /* at least one component is enabled */ 814 /* at least one component is enabled */
@@ -1281,6 +1298,11 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
1281 track->nsamples = 1 << tmp; 1298 track->nsamples = 1 << tmp;
1282 track->cb_dirty = true; 1299 track->cb_dirty = true;
1283 break; 1300 break;
1301 case R_028808_CB_COLOR_CONTROL:
1302 tmp = G_028808_SPECIAL_OP(radeon_get_ib_value(p, idx));
1303 track->is_resolve = tmp == V_028808_SPECIAL_RESOLVE_BOX;
1304 track->cb_dirty = true;
1305 break;
1284 case R_0280A0_CB_COLOR0_INFO: 1306 case R_0280A0_CB_COLOR0_INFO:
1285 case R_0280A4_CB_COLOR1_INFO: 1307 case R_0280A4_CB_COLOR1_INFO:
1286 case R_0280A8_CB_COLOR2_INFO: 1308 case R_0280A8_CB_COLOR2_INFO:
@@ -1416,7 +1438,7 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
1416 case R_028118_CB_COLOR6_MASK: 1438 case R_028118_CB_COLOR6_MASK:
1417 case R_02811C_CB_COLOR7_MASK: 1439 case R_02811C_CB_COLOR7_MASK:
1418 tmp = (reg - R_028100_CB_COLOR0_MASK) / 4; 1440 tmp = (reg - R_028100_CB_COLOR0_MASK) / 4;
1419 track->cb_color_mask[tmp] = ib[idx]; 1441 track->cb_color_mask[tmp] = radeon_get_ib_value(p, idx);
1420 if (G_0280A0_TILE_MODE(track->cb_color_info[tmp])) { 1442 if (G_0280A0_TILE_MODE(track->cb_color_info[tmp])) {
1421 track->cb_dirty = true; 1443 track->cb_dirty = true;
1422 } 1444 }
diff --git a/drivers/gpu/drm/radeon/r600d.h b/drivers/gpu/drm/radeon/r600d.h
index bdb69a63062f..fa6f37099ba9 100644
--- a/drivers/gpu/drm/radeon/r600d.h
+++ b/drivers/gpu/drm/radeon/r600d.h
@@ -66,6 +66,14 @@
66#define CC_RB_BACKEND_DISABLE 0x98F4 66#define CC_RB_BACKEND_DISABLE 0x98F4
67#define BACKEND_DISABLE(x) ((x) << 16) 67#define BACKEND_DISABLE(x) ((x) << 16)
68 68
69#define R_028808_CB_COLOR_CONTROL 0x28808
70#define S_028808_SPECIAL_OP(x) (((x) & 0x7) << 4)
71#define G_028808_SPECIAL_OP(x) (((x) >> 4) & 0x7)
72#define C_028808_SPECIAL_OP 0xFFFFFF8F
73#define V_028808_SPECIAL_NORMAL 0x00
74#define V_028808_SPECIAL_DISABLE 0x01
75#define V_028808_SPECIAL_RESOLVE_BOX 0x07
76
69#define CB_COLOR0_BASE 0x28040 77#define CB_COLOR0_BASE 0x28040
70#define CB_COLOR1_BASE 0x28044 78#define CB_COLOR1_BASE 0x28044
71#define CB_COLOR2_BASE 0x28048 79#define CB_COLOR2_BASE 0x28048
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index d2e243867ac6..7a3daebd732d 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -1051,7 +1051,7 @@ int radeon_device_init(struct radeon_device *rdev,
1051 if (rdev->flags & RADEON_IS_AGP) 1051 if (rdev->flags & RADEON_IS_AGP)
1052 rdev->need_dma32 = true; 1052 rdev->need_dma32 = true;
1053 if ((rdev->flags & RADEON_IS_PCI) && 1053 if ((rdev->flags & RADEON_IS_PCI) &&
1054 (rdev->family < CHIP_RS400)) 1054 (rdev->family <= CHIP_RS740))
1055 rdev->need_dma32 = true; 1055 rdev->need_dma32 = true;
1056 1056
1057 dma_bits = rdev->need_dma32 ? 32 : 40; 1057 dma_bits = rdev->need_dma32 ? 32 : 40;
@@ -1346,12 +1346,15 @@ retry:
1346 for (i = 0; i < RADEON_NUM_RINGS; ++i) { 1346 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
1347 radeon_ring_restore(rdev, &rdev->ring[i], 1347 radeon_ring_restore(rdev, &rdev->ring[i],
1348 ring_sizes[i], ring_data[i]); 1348 ring_sizes[i], ring_data[i]);
1349 ring_sizes[i] = 0;
1350 ring_data[i] = NULL;
1349 } 1351 }
1350 1352
1351 r = radeon_ib_ring_tests(rdev); 1353 r = radeon_ib_ring_tests(rdev);
1352 if (r) { 1354 if (r) {
1353 dev_err(rdev->dev, "ib ring test failed (%d).\n", r); 1355 dev_err(rdev->dev, "ib ring test failed (%d).\n", r);
1354 if (saved) { 1356 if (saved) {
1357 saved = false;
1355 radeon_suspend(rdev); 1358 radeon_suspend(rdev);
1356 goto retry; 1359 goto retry;
1357 } 1360 }
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index 27d22d709c90..8c593ea82c41 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -63,9 +63,10 @@
63 * 2.19.0 - r600-eg: MSAA textures 63 * 2.19.0 - r600-eg: MSAA textures
64 * 2.20.0 - r600-si: RADEON_INFO_TIMESTAMP query 64 * 2.20.0 - r600-si: RADEON_INFO_TIMESTAMP query
65 * 2.21.0 - r600-r700: FMASK and CMASK 65 * 2.21.0 - r600-r700: FMASK and CMASK
66 * 2.22.0 - r600 only: RESOLVE_BOX allowed
66 */ 67 */
67#define KMS_DRIVER_MAJOR 2 68#define KMS_DRIVER_MAJOR 2
68#define KMS_DRIVER_MINOR 21 69#define KMS_DRIVER_MINOR 22
69#define KMS_DRIVER_PATCHLEVEL 0 70#define KMS_DRIVER_PATCHLEVEL 0
70int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags); 71int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags);
71int radeon_driver_unload_kms(struct drm_device *dev); 72int radeon_driver_unload_kms(struct drm_device *dev);
diff --git a/drivers/gpu/drm/radeon/reg_srcs/r600 b/drivers/gpu/drm/radeon/reg_srcs/r600
index f93e45d869f4..20bfbda7b3f1 100644
--- a/drivers/gpu/drm/radeon/reg_srcs/r600
+++ b/drivers/gpu/drm/radeon/reg_srcs/r600
@@ -744,7 +744,6 @@ r600 0x9400
7440x00028C38 CB_CLRCMP_DST 7440x00028C38 CB_CLRCMP_DST
7450x00028C3C CB_CLRCMP_MSK 7450x00028C3C CB_CLRCMP_MSK
7460x00028C34 CB_CLRCMP_SRC 7460x00028C34 CB_CLRCMP_SRC
7470x00028808 CB_COLOR_CONTROL
7480x0002842C CB_FOG_BLUE 7470x0002842C CB_FOG_BLUE
7490x00028428 CB_FOG_GREEN 7480x00028428 CB_FOG_GREEN
7500x00028424 CB_FOG_RED 7490x00028424 CB_FOG_RED
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index 60ea284407ce..8bf8a64e5115 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -1624,7 +1624,6 @@ static const struct hid_device_id hid_have_special_driver[] = {
1624 { HID_USB_DEVICE(USB_VENDOR_ID_ORTEK, USB_DEVICE_ID_ORTEK_WKB2000) }, 1624 { HID_USB_DEVICE(USB_VENDOR_ID_ORTEK, USB_DEVICE_ID_ORTEK_WKB2000) },
1625 { HID_USB_DEVICE(USB_VENDOR_ID_PETALYNX, USB_DEVICE_ID_PETALYNX_MAXTER_REMOTE) }, 1625 { HID_USB_DEVICE(USB_VENDOR_ID_PETALYNX, USB_DEVICE_ID_PETALYNX_MAXTER_REMOTE) },
1626 { HID_USB_DEVICE(USB_VENDOR_ID_PRIMAX, USB_DEVICE_ID_PRIMAX_KEYBOARD) }, 1626 { HID_USB_DEVICE(USB_VENDOR_ID_PRIMAX, USB_DEVICE_ID_PRIMAX_KEYBOARD) },
1627 { HID_USB_DEVICE(USB_VENDOR_ID_QUANTA, USB_DEVICE_ID_PIXART_IMAGING_INC_OPTICAL_TOUCH_SCREEN) },
1628 { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_KONE) }, 1627 { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_KONE) },
1629 { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_ARVO) }, 1628 { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_ARVO) },
1630 { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_ISKU) }, 1629 { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_ISKU) },
diff --git a/drivers/hwmon/asus_atk0110.c b/drivers/hwmon/asus_atk0110.c
index 351d1f4593e7..4ee578948723 100644
--- a/drivers/hwmon/asus_atk0110.c
+++ b/drivers/hwmon/asus_atk0110.c
@@ -34,6 +34,12 @@ static const struct dmi_system_id __initconst atk_force_new_if[] = {
34 .matches = { 34 .matches = {
35 DMI_MATCH(DMI_BOARD_NAME, "SABERTOOTH X58") 35 DMI_MATCH(DMI_BOARD_NAME, "SABERTOOTH X58")
36 } 36 }
37 }, {
38 /* Old interface reads the same sensor for fan0 and fan1 */
39 .ident = "Asus M5A78L",
40 .matches = {
41 DMI_MATCH(DMI_BOARD_NAME, "M5A78L")
42 }
37 }, 43 },
38 { } 44 { }
39}; 45};
diff --git a/drivers/ide/ide-pm.c b/drivers/ide/ide-pm.c
index 92406097efeb..8d1e32d7cd97 100644
--- a/drivers/ide/ide-pm.c
+++ b/drivers/ide/ide-pm.c
@@ -4,7 +4,7 @@
4 4
5int generic_ide_suspend(struct device *dev, pm_message_t mesg) 5int generic_ide_suspend(struct device *dev, pm_message_t mesg)
6{ 6{
7 ide_drive_t *drive = dev_get_drvdata(dev); 7 ide_drive_t *drive = to_ide_device(dev);
8 ide_drive_t *pair = ide_get_pair_dev(drive); 8 ide_drive_t *pair = ide_get_pair_dev(drive);
9 ide_hwif_t *hwif = drive->hwif; 9 ide_hwif_t *hwif = drive->hwif;
10 struct request *rq; 10 struct request *rq;
@@ -40,7 +40,7 @@ int generic_ide_suspend(struct device *dev, pm_message_t mesg)
40 40
41int generic_ide_resume(struct device *dev) 41int generic_ide_resume(struct device *dev)
42{ 42{
43 ide_drive_t *drive = dev_get_drvdata(dev); 43 ide_drive_t *drive = to_ide_device(dev);
44 ide_drive_t *pair = ide_get_pair_dev(drive); 44 ide_drive_t *pair = ide_get_pair_dev(drive);
45 ide_hwif_t *hwif = drive->hwif; 45 ide_hwif_t *hwif = drive->hwif;
46 struct request *rq; 46 struct request *rq;
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
index 40a826a7295f..2fb2b9ea97ec 100644
--- a/drivers/s390/block/dasd_eckd.c
+++ b/drivers/s390/block/dasd_eckd.c
@@ -3804,7 +3804,7 @@ dasd_eckd_ioctl(struct dasd_block *block, unsigned int cmd, void __user *argp)
3804 case BIODASDSYMMIO: 3804 case BIODASDSYMMIO:
3805 return dasd_symm_io(device, argp); 3805 return dasd_symm_io(device, argp);
3806 default: 3806 default:
3807 return -ENOIOCTLCMD; 3807 return -ENOTTY;
3808 } 3808 }
3809} 3809}
3810 3810
diff --git a/drivers/s390/block/dasd_ioctl.c b/drivers/s390/block/dasd_ioctl.c
index cceae70279f6..654c6921a6d4 100644
--- a/drivers/s390/block/dasd_ioctl.c
+++ b/drivers/s390/block/dasd_ioctl.c
@@ -498,12 +498,9 @@ int dasd_ioctl(struct block_device *bdev, fmode_t mode,
498 break; 498 break;
499 default: 499 default:
500 /* if the discipline has an ioctl method try it. */ 500 /* if the discipline has an ioctl method try it. */
501 if (base->discipline->ioctl) { 501 rc = -ENOTTY;
502 if (base->discipline->ioctl)
502 rc = base->discipline->ioctl(block, cmd, argp); 503 rc = base->discipline->ioctl(block, cmd, argp);
503 if (rc == -ENOIOCTLCMD)
504 rc = -EINVAL;
505 } else
506 rc = -EINVAL;
507 } 504 }
508 dasd_put_device(base); 505 dasd_put_device(base);
509 return rc; 506 return rc;
diff --git a/drivers/spi/spi-bcm63xx.c b/drivers/spi/spi-bcm63xx.c
index ea0aaa3f13d0..a9f4049c6769 100644
--- a/drivers/spi/spi-bcm63xx.c
+++ b/drivers/spi/spi-bcm63xx.c
@@ -47,6 +47,8 @@ struct bcm63xx_spi {
47 /* Platform data */ 47 /* Platform data */
48 u32 speed_hz; 48 u32 speed_hz;
49 unsigned fifo_size; 49 unsigned fifo_size;
50 unsigned int msg_type_shift;
51 unsigned int msg_ctl_width;
50 52
51 /* Data buffers */ 53 /* Data buffers */
52 const unsigned char *tx_ptr; 54 const unsigned char *tx_ptr;
@@ -221,13 +223,20 @@ static unsigned int bcm63xx_txrx_bufs(struct spi_device *spi,
221 msg_ctl = (t->len << SPI_BYTE_CNT_SHIFT); 223 msg_ctl = (t->len << SPI_BYTE_CNT_SHIFT);
222 224
223 if (t->rx_buf && t->tx_buf) 225 if (t->rx_buf && t->tx_buf)
224 msg_ctl |= (SPI_FD_RW << SPI_MSG_TYPE_SHIFT); 226 msg_ctl |= (SPI_FD_RW << bs->msg_type_shift);
225 else if (t->rx_buf) 227 else if (t->rx_buf)
226 msg_ctl |= (SPI_HD_R << SPI_MSG_TYPE_SHIFT); 228 msg_ctl |= (SPI_HD_R << bs->msg_type_shift);
227 else if (t->tx_buf) 229 else if (t->tx_buf)
228 msg_ctl |= (SPI_HD_W << SPI_MSG_TYPE_SHIFT); 230 msg_ctl |= (SPI_HD_W << bs->msg_type_shift);
229 231
230 bcm_spi_writew(bs, msg_ctl, SPI_MSG_CTL); 232 switch (bs->msg_ctl_width) {
233 case 8:
234 bcm_spi_writeb(bs, msg_ctl, SPI_MSG_CTL);
235 break;
236 case 16:
237 bcm_spi_writew(bs, msg_ctl, SPI_MSG_CTL);
238 break;
239 }
231 240
232 /* Issue the transfer */ 241 /* Issue the transfer */
233 cmd = SPI_CMD_START_IMMEDIATE; 242 cmd = SPI_CMD_START_IMMEDIATE;
@@ -406,9 +415,21 @@ static int __devinit bcm63xx_spi_probe(struct platform_device *pdev)
406 master->transfer_one_message = bcm63xx_spi_transfer_one; 415 master->transfer_one_message = bcm63xx_spi_transfer_one;
407 master->mode_bits = MODEBITS; 416 master->mode_bits = MODEBITS;
408 bs->speed_hz = pdata->speed_hz; 417 bs->speed_hz = pdata->speed_hz;
418 bs->msg_type_shift = pdata->msg_type_shift;
419 bs->msg_ctl_width = pdata->msg_ctl_width;
409 bs->tx_io = (u8 *)(bs->regs + bcm63xx_spireg(SPI_MSG_DATA)); 420 bs->tx_io = (u8 *)(bs->regs + bcm63xx_spireg(SPI_MSG_DATA));
410 bs->rx_io = (const u8 *)(bs->regs + bcm63xx_spireg(SPI_RX_DATA)); 421 bs->rx_io = (const u8 *)(bs->regs + bcm63xx_spireg(SPI_RX_DATA));
411 422
423 switch (bs->msg_ctl_width) {
424 case 8:
425 case 16:
426 break;
427 default:
428 dev_err(dev, "unsupported MSG_CTL width: %d\n",
429 bs->msg_ctl_width);
430 goto out_clk_disable;
431 }
432
412 /* Initialize hardware */ 433 /* Initialize hardware */
413 clk_enable(bs->clk); 434 clk_enable(bs->clk);
414 bcm_spi_writeb(bs, SPI_INTR_CLEAR_ALL, SPI_INT_STATUS); 435 bcm_spi_writeb(bs, SPI_INTR_CLEAR_ALL, SPI_INT_STATUS);
diff --git a/drivers/watchdog/booke_wdt.c b/drivers/watchdog/booke_wdt.c
index 3fe82d0e8caa..5b06d31ab6a9 100644
--- a/drivers/watchdog/booke_wdt.c
+++ b/drivers/watchdog/booke_wdt.c
@@ -166,18 +166,17 @@ static long booke_wdt_ioctl(struct file *file,
166 166
167 switch (cmd) { 167 switch (cmd) {
168 case WDIOC_GETSUPPORT: 168 case WDIOC_GETSUPPORT:
169 if (copy_to_user((void *)arg, &ident, sizeof(ident))) 169 return copy_to_user(p, &ident, sizeof(ident)) ? -EFAULT : 0;
170 return -EFAULT;
171 case WDIOC_GETSTATUS: 170 case WDIOC_GETSTATUS:
172 return put_user(0, p); 171 return put_user(0, p);
173 case WDIOC_GETBOOTSTATUS: 172 case WDIOC_GETBOOTSTATUS:
174 /* XXX: something is clearing TSR */ 173 /* XXX: something is clearing TSR */
175 tmp = mfspr(SPRN_TSR) & TSR_WRS(3); 174 tmp = mfspr(SPRN_TSR) & TSR_WRS(3);
176 /* returns CARDRESET if last reset was caused by the WDT */ 175 /* returns CARDRESET if last reset was caused by the WDT */
177 return (tmp ? WDIOF_CARDRESET : 0); 176 return put_user((tmp ? WDIOF_CARDRESET : 0), p);
178 case WDIOC_SETOPTIONS: 177 case WDIOC_SETOPTIONS:
179 if (get_user(tmp, p)) 178 if (get_user(tmp, p))
180 return -EINVAL; 179 return -EFAULT;
181 if (tmp == WDIOS_ENABLECARD) { 180 if (tmp == WDIOS_ENABLECARD) {
182 booke_wdt_ping(); 181 booke_wdt_ping();
183 break; 182 break;
diff --git a/drivers/watchdog/da9052_wdt.c b/drivers/watchdog/da9052_wdt.c
index 3f75129eb0a9..f7abbaeebcaf 100644
--- a/drivers/watchdog/da9052_wdt.c
+++ b/drivers/watchdog/da9052_wdt.c
@@ -21,7 +21,6 @@
21#include <linux/types.h> 21#include <linux/types.h>
22#include <linux/kernel.h> 22#include <linux/kernel.h>
23#include <linux/jiffies.h> 23#include <linux/jiffies.h>
24#include <linux/delay.h>
25 24
26#include <linux/mfd/da9052/reg.h> 25#include <linux/mfd/da9052/reg.h>
27#include <linux/mfd/da9052/da9052.h> 26#include <linux/mfd/da9052/da9052.h>
diff --git a/drivers/xen/platform-pci.c b/drivers/xen/platform-pci.c
index d4c50d63acbc..97ca359ae2bd 100644
--- a/drivers/xen/platform-pci.c
+++ b/drivers/xen/platform-pci.c
@@ -101,19 +101,6 @@ static int platform_pci_resume(struct pci_dev *pdev)
101 return 0; 101 return 0;
102} 102}
103 103
104static void __devinit prepare_shared_info(void)
105{
106#ifdef CONFIG_KEXEC
107 unsigned long addr;
108 struct shared_info *hvm_shared_info;
109
110 addr = alloc_xen_mmio(PAGE_SIZE);
111 hvm_shared_info = ioremap(addr, PAGE_SIZE);
112 memset(hvm_shared_info, 0, PAGE_SIZE);
113 xen_hvm_prepare_kexec(hvm_shared_info, addr >> PAGE_SHIFT);
114#endif
115}
116
117static int __devinit platform_pci_init(struct pci_dev *pdev, 104static int __devinit platform_pci_init(struct pci_dev *pdev,
118 const struct pci_device_id *ent) 105 const struct pci_device_id *ent)
119{ 106{
@@ -151,8 +138,6 @@ static int __devinit platform_pci_init(struct pci_dev *pdev,
151 platform_mmio = mmio_addr; 138 platform_mmio = mmio_addr;
152 platform_mmiolen = mmio_len; 139 platform_mmiolen = mmio_len;
153 140
154 prepare_shared_info();
155
156 if (!xen_have_vector_callback) { 141 if (!xen_have_vector_callback) {
157 ret = xen_allocate_irq(pdev); 142 ret = xen_allocate_irq(pdev);
158 if (ret) { 143 if (ret) {
diff --git a/fs/bio.c b/fs/bio.c
index 5eaa70c9d96e..71072ab99128 100644
--- a/fs/bio.c
+++ b/fs/bio.c
@@ -73,7 +73,7 @@ static struct kmem_cache *bio_find_or_create_slab(unsigned int extra_size)
73{ 73{
74 unsigned int sz = sizeof(struct bio) + extra_size; 74 unsigned int sz = sizeof(struct bio) + extra_size;
75 struct kmem_cache *slab = NULL; 75 struct kmem_cache *slab = NULL;
76 struct bio_slab *bslab; 76 struct bio_slab *bslab, *new_bio_slabs;
77 unsigned int i, entry = -1; 77 unsigned int i, entry = -1;
78 78
79 mutex_lock(&bio_slab_lock); 79 mutex_lock(&bio_slab_lock);
@@ -97,11 +97,12 @@ static struct kmem_cache *bio_find_or_create_slab(unsigned int extra_size)
97 97
98 if (bio_slab_nr == bio_slab_max && entry == -1) { 98 if (bio_slab_nr == bio_slab_max && entry == -1) {
99 bio_slab_max <<= 1; 99 bio_slab_max <<= 1;
100 bio_slabs = krealloc(bio_slabs, 100 new_bio_slabs = krealloc(bio_slabs,
101 bio_slab_max * sizeof(struct bio_slab), 101 bio_slab_max * sizeof(struct bio_slab),
102 GFP_KERNEL); 102 GFP_KERNEL);
103 if (!bio_slabs) 103 if (!new_bio_slabs)
104 goto out_unlock; 104 goto out_unlock;
105 bio_slabs = new_bio_slabs;
105 } 106 }
106 if (entry == -1) 107 if (entry == -1)
107 entry = bio_slab_nr++; 108 entry = bio_slab_nr++;
diff --git a/fs/block_dev.c b/fs/block_dev.c
index 1e519195d45b..38e721b35d45 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -1578,10 +1578,12 @@ ssize_t blkdev_aio_write(struct kiocb *iocb, const struct iovec *iov,
1578 unsigned long nr_segs, loff_t pos) 1578 unsigned long nr_segs, loff_t pos)
1579{ 1579{
1580 struct file *file = iocb->ki_filp; 1580 struct file *file = iocb->ki_filp;
1581 struct blk_plug plug;
1581 ssize_t ret; 1582 ssize_t ret;
1582 1583
1583 BUG_ON(iocb->ki_pos != pos); 1584 BUG_ON(iocb->ki_pos != pos);
1584 1585
1586 blk_start_plug(&plug);
1585 ret = __generic_file_aio_write(iocb, iov, nr_segs, &iocb->ki_pos); 1587 ret = __generic_file_aio_write(iocb, iov, nr_segs, &iocb->ki_pos);
1586 if (ret > 0 || ret == -EIOCBQUEUED) { 1588 if (ret > 0 || ret == -EIOCBQUEUED) {
1587 ssize_t err; 1589 ssize_t err;
@@ -1590,6 +1592,7 @@ ssize_t blkdev_aio_write(struct kiocb *iocb, const struct iovec *iov,
1590 if (err < 0 && ret > 0) 1592 if (err < 0 && ret > 0)
1591 ret = err; 1593 ret = err;
1592 } 1594 }
1595 blk_finish_plug(&plug);
1593 return ret; 1596 return ret;
1594} 1597}
1595EXPORT_SYMBOL_GPL(blkdev_aio_write); 1598EXPORT_SYMBOL_GPL(blkdev_aio_write);
diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c
index a256f3b2a845..ff6475f409d6 100644
--- a/fs/btrfs/backref.c
+++ b/fs/btrfs/backref.c
@@ -1438,10 +1438,10 @@ int iterate_inodes_from_logical(u64 logical, struct btrfs_fs_info *fs_info,
1438 ret = extent_from_logical(fs_info, logical, path, 1438 ret = extent_from_logical(fs_info, logical, path,
1439 &found_key); 1439 &found_key);
1440 btrfs_release_path(path); 1440 btrfs_release_path(path);
1441 if (ret & BTRFS_EXTENT_FLAG_TREE_BLOCK)
1442 ret = -EINVAL;
1443 if (ret < 0) 1441 if (ret < 0)
1444 return ret; 1442 return ret;
1443 if (ret & BTRFS_EXTENT_FLAG_TREE_BLOCK)
1444 return -EINVAL;
1445 1445
1446 extent_item_pos = logical - found_key.objectid; 1446 extent_item_pos = logical - found_key.objectid;
1447 ret = iterate_extent_inodes(fs_info, found_key.objectid, 1447 ret = iterate_extent_inodes(fs_info, found_key.objectid,
diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c
index 86eff48dab78..43d1c5a3a030 100644
--- a/fs/btrfs/compression.c
+++ b/fs/btrfs/compression.c
@@ -818,6 +818,7 @@ static void free_workspace(int type, struct list_head *workspace)
818 btrfs_compress_op[idx]->free_workspace(workspace); 818 btrfs_compress_op[idx]->free_workspace(workspace);
819 atomic_dec(alloc_workspace); 819 atomic_dec(alloc_workspace);
820wake: 820wake:
821 smp_mb();
821 if (waitqueue_active(workspace_wait)) 822 if (waitqueue_active(workspace_wait))
822 wake_up(workspace_wait); 823 wake_up(workspace_wait);
823} 824}
diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
index 9d7621f271ff..6d183f60d63a 100644
--- a/fs/btrfs/ctree.c
+++ b/fs/btrfs/ctree.c
@@ -421,12 +421,6 @@ void btrfs_put_tree_mod_seq(struct btrfs_fs_info *fs_info,
421 spin_unlock(&fs_info->tree_mod_seq_lock); 421 spin_unlock(&fs_info->tree_mod_seq_lock);
422 422
423 /* 423 /*
424 * we removed the lowest blocker from the blocker list, so there may be
425 * more processible delayed refs.
426 */
427 wake_up(&fs_info->tree_mod_seq_wait);
428
429 /*
430 * anything that's lower than the lowest existing (read: blocked) 424 * anything that's lower than the lowest existing (read: blocked)
431 * sequence number can be removed from the tree. 425 * sequence number can be removed from the tree.
432 */ 426 */
@@ -631,6 +625,9 @@ __tree_mod_log_free_eb(struct btrfs_fs_info *fs_info, struct extent_buffer *eb)
631 u32 nritems; 625 u32 nritems;
632 int ret; 626 int ret;
633 627
628 if (btrfs_header_level(eb) == 0)
629 return;
630
634 nritems = btrfs_header_nritems(eb); 631 nritems = btrfs_header_nritems(eb);
635 for (i = nritems - 1; i >= 0; i--) { 632 for (i = nritems - 1; i >= 0; i--) {
636 ret = tree_mod_log_insert_key_locked(fs_info, eb, i, 633 ret = tree_mod_log_insert_key_locked(fs_info, eb, i,
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index 4bab807227ad..0d195b507660 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -1252,7 +1252,6 @@ struct btrfs_fs_info {
1252 atomic_t tree_mod_seq; 1252 atomic_t tree_mod_seq;
1253 struct list_head tree_mod_seq_list; 1253 struct list_head tree_mod_seq_list;
1254 struct seq_list tree_mod_seq_elem; 1254 struct seq_list tree_mod_seq_elem;
1255 wait_queue_head_t tree_mod_seq_wait;
1256 1255
1257 /* this protects tree_mod_log */ 1256 /* this protects tree_mod_log */
1258 rwlock_t tree_mod_log_lock; 1257 rwlock_t tree_mod_log_lock;
@@ -3192,7 +3191,7 @@ int btrfs_del_csums(struct btrfs_trans_handle *trans,
3192int btrfs_lookup_bio_sums(struct btrfs_root *root, struct inode *inode, 3191int btrfs_lookup_bio_sums(struct btrfs_root *root, struct inode *inode,
3193 struct bio *bio, u32 *dst); 3192 struct bio *bio, u32 *dst);
3194int btrfs_lookup_bio_sums_dio(struct btrfs_root *root, struct inode *inode, 3193int btrfs_lookup_bio_sums_dio(struct btrfs_root *root, struct inode *inode,
3195 struct bio *bio, u64 logical_offset, u32 *dst); 3194 struct bio *bio, u64 logical_offset);
3196int btrfs_insert_file_extent(struct btrfs_trans_handle *trans, 3195int btrfs_insert_file_extent(struct btrfs_trans_handle *trans,
3197 struct btrfs_root *root, 3196 struct btrfs_root *root,
3198 u64 objectid, u64 pos, 3197 u64 objectid, u64 pos,
diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c
index 335605c8ceab..07d5eeb1e6f1 100644
--- a/fs/btrfs/delayed-inode.c
+++ b/fs/btrfs/delayed-inode.c
@@ -512,8 +512,8 @@ static void __btrfs_remove_delayed_item(struct btrfs_delayed_item *delayed_item)
512 512
513 rb_erase(&delayed_item->rb_node, root); 513 rb_erase(&delayed_item->rb_node, root);
514 delayed_item->delayed_node->count--; 514 delayed_item->delayed_node->count--;
515 atomic_dec(&delayed_root->items); 515 if (atomic_dec_return(&delayed_root->items) <
516 if (atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND && 516 BTRFS_DELAYED_BACKGROUND &&
517 waitqueue_active(&delayed_root->wait)) 517 waitqueue_active(&delayed_root->wait))
518 wake_up(&delayed_root->wait); 518 wake_up(&delayed_root->wait);
519} 519}
@@ -1028,9 +1028,10 @@ do_again:
1028 btrfs_release_delayed_item(prev); 1028 btrfs_release_delayed_item(prev);
1029 ret = 0; 1029 ret = 0;
1030 btrfs_release_path(path); 1030 btrfs_release_path(path);
1031 if (curr) 1031 if (curr) {
1032 mutex_unlock(&node->mutex);
1032 goto do_again; 1033 goto do_again;
1033 else 1034 } else
1034 goto delete_fail; 1035 goto delete_fail;
1035 } 1036 }
1036 1037
@@ -1055,8 +1056,7 @@ static void btrfs_release_delayed_inode(struct btrfs_delayed_node *delayed_node)
1055 delayed_node->count--; 1056 delayed_node->count--;
1056 1057
1057 delayed_root = delayed_node->root->fs_info->delayed_root; 1058 delayed_root = delayed_node->root->fs_info->delayed_root;
1058 atomic_dec(&delayed_root->items); 1059 if (atomic_dec_return(&delayed_root->items) <
1059 if (atomic_read(&delayed_root->items) <
1060 BTRFS_DELAYED_BACKGROUND && 1060 BTRFS_DELAYED_BACKGROUND &&
1061 waitqueue_active(&delayed_root->wait)) 1061 waitqueue_active(&delayed_root->wait))
1062 wake_up(&delayed_root->wait); 1062 wake_up(&delayed_root->wait);
diff --git a/fs/btrfs/delayed-ref.c b/fs/btrfs/delayed-ref.c
index da7419ed01bb..ae9411773397 100644
--- a/fs/btrfs/delayed-ref.c
+++ b/fs/btrfs/delayed-ref.c
@@ -38,17 +38,14 @@
38static int comp_tree_refs(struct btrfs_delayed_tree_ref *ref2, 38static int comp_tree_refs(struct btrfs_delayed_tree_ref *ref2,
39 struct btrfs_delayed_tree_ref *ref1) 39 struct btrfs_delayed_tree_ref *ref1)
40{ 40{
41 if (ref1->node.type == BTRFS_TREE_BLOCK_REF_KEY) { 41 if (ref1->root < ref2->root)
42 if (ref1->root < ref2->root) 42 return -1;
43 return -1; 43 if (ref1->root > ref2->root)
44 if (ref1->root > ref2->root) 44 return 1;
45 return 1; 45 if (ref1->parent < ref2->parent)
46 } else { 46 return -1;
47 if (ref1->parent < ref2->parent) 47 if (ref1->parent > ref2->parent)
48 return -1; 48 return 1;
49 if (ref1->parent > ref2->parent)
50 return 1;
51 }
52 return 0; 49 return 0;
53} 50}
54 51
@@ -85,7 +82,8 @@ static int comp_data_refs(struct btrfs_delayed_data_ref *ref2,
85 * type of the delayed backrefs and content of delayed backrefs. 82 * type of the delayed backrefs and content of delayed backrefs.
86 */ 83 */
87static int comp_entry(struct btrfs_delayed_ref_node *ref2, 84static int comp_entry(struct btrfs_delayed_ref_node *ref2,
88 struct btrfs_delayed_ref_node *ref1) 85 struct btrfs_delayed_ref_node *ref1,
86 bool compare_seq)
89{ 87{
90 if (ref1->bytenr < ref2->bytenr) 88 if (ref1->bytenr < ref2->bytenr)
91 return -1; 89 return -1;
@@ -102,10 +100,12 @@ static int comp_entry(struct btrfs_delayed_ref_node *ref2,
102 if (ref1->type > ref2->type) 100 if (ref1->type > ref2->type)
103 return 1; 101 return 1;
104 /* merging of sequenced refs is not allowed */ 102 /* merging of sequenced refs is not allowed */
105 if (ref1->seq < ref2->seq) 103 if (compare_seq) {
106 return -1; 104 if (ref1->seq < ref2->seq)
107 if (ref1->seq > ref2->seq) 105 return -1;
108 return 1; 106 if (ref1->seq > ref2->seq)
107 return 1;
108 }
109 if (ref1->type == BTRFS_TREE_BLOCK_REF_KEY || 109 if (ref1->type == BTRFS_TREE_BLOCK_REF_KEY ||
110 ref1->type == BTRFS_SHARED_BLOCK_REF_KEY) { 110 ref1->type == BTRFS_SHARED_BLOCK_REF_KEY) {
111 return comp_tree_refs(btrfs_delayed_node_to_tree_ref(ref2), 111 return comp_tree_refs(btrfs_delayed_node_to_tree_ref(ref2),
@@ -139,7 +139,7 @@ static struct btrfs_delayed_ref_node *tree_insert(struct rb_root *root,
139 entry = rb_entry(parent_node, struct btrfs_delayed_ref_node, 139 entry = rb_entry(parent_node, struct btrfs_delayed_ref_node,
140 rb_node); 140 rb_node);
141 141
142 cmp = comp_entry(entry, ins); 142 cmp = comp_entry(entry, ins, 1);
143 if (cmp < 0) 143 if (cmp < 0)
144 p = &(*p)->rb_left; 144 p = &(*p)->rb_left;
145 else if (cmp > 0) 145 else if (cmp > 0)
@@ -233,6 +233,114 @@ int btrfs_delayed_ref_lock(struct btrfs_trans_handle *trans,
233 return 0; 233 return 0;
234} 234}
235 235
236static void inline drop_delayed_ref(struct btrfs_trans_handle *trans,
237 struct btrfs_delayed_ref_root *delayed_refs,
238 struct btrfs_delayed_ref_node *ref)
239{
240 rb_erase(&ref->rb_node, &delayed_refs->root);
241 ref->in_tree = 0;
242 btrfs_put_delayed_ref(ref);
243 delayed_refs->num_entries--;
244 if (trans->delayed_ref_updates)
245 trans->delayed_ref_updates--;
246}
247
248static int merge_ref(struct btrfs_trans_handle *trans,
249 struct btrfs_delayed_ref_root *delayed_refs,
250 struct btrfs_delayed_ref_node *ref, u64 seq)
251{
252 struct rb_node *node;
253 int merged = 0;
254 int mod = 0;
255 int done = 0;
256
257 node = rb_prev(&ref->rb_node);
258 while (node) {
259 struct btrfs_delayed_ref_node *next;
260
261 next = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
262 node = rb_prev(node);
263 if (next->bytenr != ref->bytenr)
264 break;
265 if (seq && next->seq >= seq)
266 break;
267 if (comp_entry(ref, next, 0))
268 continue;
269
270 if (ref->action == next->action) {
271 mod = next->ref_mod;
272 } else {
273 if (ref->ref_mod < next->ref_mod) {
274 struct btrfs_delayed_ref_node *tmp;
275
276 tmp = ref;
277 ref = next;
278 next = tmp;
279 done = 1;
280 }
281 mod = -next->ref_mod;
282 }
283
284 merged++;
285 drop_delayed_ref(trans, delayed_refs, next);
286 ref->ref_mod += mod;
287 if (ref->ref_mod == 0) {
288 drop_delayed_ref(trans, delayed_refs, ref);
289 break;
290 } else {
291 /*
292 * You can't have multiples of the same ref on a tree
293 * block.
294 */
295 WARN_ON(ref->type == BTRFS_TREE_BLOCK_REF_KEY ||
296 ref->type == BTRFS_SHARED_BLOCK_REF_KEY);
297 }
298
299 if (done)
300 break;
301 node = rb_prev(&ref->rb_node);
302 }
303
304 return merged;
305}
306
307void btrfs_merge_delayed_refs(struct btrfs_trans_handle *trans,
308 struct btrfs_fs_info *fs_info,
309 struct btrfs_delayed_ref_root *delayed_refs,
310 struct btrfs_delayed_ref_head *head)
311{
312 struct rb_node *node;
313 u64 seq = 0;
314
315 spin_lock(&fs_info->tree_mod_seq_lock);
316 if (!list_empty(&fs_info->tree_mod_seq_list)) {
317 struct seq_list *elem;
318
319 elem = list_first_entry(&fs_info->tree_mod_seq_list,
320 struct seq_list, list);
321 seq = elem->seq;
322 }
323 spin_unlock(&fs_info->tree_mod_seq_lock);
324
325 node = rb_prev(&head->node.rb_node);
326 while (node) {
327 struct btrfs_delayed_ref_node *ref;
328
329 ref = rb_entry(node, struct btrfs_delayed_ref_node,
330 rb_node);
331 if (ref->bytenr != head->node.bytenr)
332 break;
333
334 /* We can't merge refs that are outside of our seq count */
335 if (seq && ref->seq >= seq)
336 break;
337 if (merge_ref(trans, delayed_refs, ref, seq))
338 node = rb_prev(&head->node.rb_node);
339 else
340 node = rb_prev(node);
341 }
342}
343
236int btrfs_check_delayed_seq(struct btrfs_fs_info *fs_info, 344int btrfs_check_delayed_seq(struct btrfs_fs_info *fs_info,
237 struct btrfs_delayed_ref_root *delayed_refs, 345 struct btrfs_delayed_ref_root *delayed_refs,
238 u64 seq) 346 u64 seq)
@@ -336,18 +444,11 @@ update_existing_ref(struct btrfs_trans_handle *trans,
336 * every changing the extent allocation tree. 444 * every changing the extent allocation tree.
337 */ 445 */
338 existing->ref_mod--; 446 existing->ref_mod--;
339 if (existing->ref_mod == 0) { 447 if (existing->ref_mod == 0)
340 rb_erase(&existing->rb_node, 448 drop_delayed_ref(trans, delayed_refs, existing);
341 &delayed_refs->root); 449 else
342 existing->in_tree = 0;
343 btrfs_put_delayed_ref(existing);
344 delayed_refs->num_entries--;
345 if (trans->delayed_ref_updates)
346 trans->delayed_ref_updates--;
347 } else {
348 WARN_ON(existing->type == BTRFS_TREE_BLOCK_REF_KEY || 450 WARN_ON(existing->type == BTRFS_TREE_BLOCK_REF_KEY ||
349 existing->type == BTRFS_SHARED_BLOCK_REF_KEY); 451 existing->type == BTRFS_SHARED_BLOCK_REF_KEY);
350 }
351 } else { 452 } else {
352 WARN_ON(existing->type == BTRFS_TREE_BLOCK_REF_KEY || 453 WARN_ON(existing->type == BTRFS_TREE_BLOCK_REF_KEY ||
353 existing->type == BTRFS_SHARED_BLOCK_REF_KEY); 454 existing->type == BTRFS_SHARED_BLOCK_REF_KEY);
@@ -662,9 +763,6 @@ int btrfs_add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
662 add_delayed_tree_ref(fs_info, trans, &ref->node, bytenr, 763 add_delayed_tree_ref(fs_info, trans, &ref->node, bytenr,
663 num_bytes, parent, ref_root, level, action, 764 num_bytes, parent, ref_root, level, action,
664 for_cow); 765 for_cow);
665 if (!need_ref_seq(for_cow, ref_root) &&
666 waitqueue_active(&fs_info->tree_mod_seq_wait))
667 wake_up(&fs_info->tree_mod_seq_wait);
668 spin_unlock(&delayed_refs->lock); 766 spin_unlock(&delayed_refs->lock);
669 if (need_ref_seq(for_cow, ref_root)) 767 if (need_ref_seq(for_cow, ref_root))
670 btrfs_qgroup_record_ref(trans, &ref->node, extent_op); 768 btrfs_qgroup_record_ref(trans, &ref->node, extent_op);
@@ -713,9 +811,6 @@ int btrfs_add_delayed_data_ref(struct btrfs_fs_info *fs_info,
713 add_delayed_data_ref(fs_info, trans, &ref->node, bytenr, 811 add_delayed_data_ref(fs_info, trans, &ref->node, bytenr,
714 num_bytes, parent, ref_root, owner, offset, 812 num_bytes, parent, ref_root, owner, offset,
715 action, for_cow); 813 action, for_cow);
716 if (!need_ref_seq(for_cow, ref_root) &&
717 waitqueue_active(&fs_info->tree_mod_seq_wait))
718 wake_up(&fs_info->tree_mod_seq_wait);
719 spin_unlock(&delayed_refs->lock); 814 spin_unlock(&delayed_refs->lock);
720 if (need_ref_seq(for_cow, ref_root)) 815 if (need_ref_seq(for_cow, ref_root))
721 btrfs_qgroup_record_ref(trans, &ref->node, extent_op); 816 btrfs_qgroup_record_ref(trans, &ref->node, extent_op);
@@ -744,8 +839,6 @@ int btrfs_add_delayed_extent_op(struct btrfs_fs_info *fs_info,
744 num_bytes, BTRFS_UPDATE_DELAYED_HEAD, 839 num_bytes, BTRFS_UPDATE_DELAYED_HEAD,
745 extent_op->is_data); 840 extent_op->is_data);
746 841
747 if (waitqueue_active(&fs_info->tree_mod_seq_wait))
748 wake_up(&fs_info->tree_mod_seq_wait);
749 spin_unlock(&delayed_refs->lock); 842 spin_unlock(&delayed_refs->lock);
750 return 0; 843 return 0;
751} 844}
diff --git a/fs/btrfs/delayed-ref.h b/fs/btrfs/delayed-ref.h
index 0d7c90c366b6..ab5300595847 100644
--- a/fs/btrfs/delayed-ref.h
+++ b/fs/btrfs/delayed-ref.h
@@ -167,6 +167,10 @@ int btrfs_add_delayed_extent_op(struct btrfs_fs_info *fs_info,
167 struct btrfs_trans_handle *trans, 167 struct btrfs_trans_handle *trans,
168 u64 bytenr, u64 num_bytes, 168 u64 bytenr, u64 num_bytes,
169 struct btrfs_delayed_extent_op *extent_op); 169 struct btrfs_delayed_extent_op *extent_op);
170void btrfs_merge_delayed_refs(struct btrfs_trans_handle *trans,
171 struct btrfs_fs_info *fs_info,
172 struct btrfs_delayed_ref_root *delayed_refs,
173 struct btrfs_delayed_ref_head *head);
170 174
171struct btrfs_delayed_ref_head * 175struct btrfs_delayed_ref_head *
172btrfs_find_delayed_ref_head(struct btrfs_trans_handle *trans, u64 bytenr); 176btrfs_find_delayed_ref_head(struct btrfs_trans_handle *trans, u64 bytenr);
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 62e0cafd6e25..22e98e04c2ea 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -377,9 +377,13 @@ static int btree_read_extent_buffer_pages(struct btrfs_root *root,
377 ret = read_extent_buffer_pages(io_tree, eb, start, 377 ret = read_extent_buffer_pages(io_tree, eb, start,
378 WAIT_COMPLETE, 378 WAIT_COMPLETE,
379 btree_get_extent, mirror_num); 379 btree_get_extent, mirror_num);
380 if (!ret && !verify_parent_transid(io_tree, eb, 380 if (!ret) {
381 if (!verify_parent_transid(io_tree, eb,
381 parent_transid, 0)) 382 parent_transid, 0))
382 break; 383 break;
384 else
385 ret = -EIO;
386 }
383 387
384 /* 388 /*
385 * This buffer's crc is fine, but its contents are corrupted, so 389 * This buffer's crc is fine, but its contents are corrupted, so
@@ -754,9 +758,7 @@ static void run_one_async_done(struct btrfs_work *work)
754 limit = btrfs_async_submit_limit(fs_info); 758 limit = btrfs_async_submit_limit(fs_info);
755 limit = limit * 2 / 3; 759 limit = limit * 2 / 3;
756 760
757 atomic_dec(&fs_info->nr_async_submits); 761 if (atomic_dec_return(&fs_info->nr_async_submits) < limit &&
758
759 if (atomic_read(&fs_info->nr_async_submits) < limit &&
760 waitqueue_active(&fs_info->async_submit_wait)) 762 waitqueue_active(&fs_info->async_submit_wait))
761 wake_up(&fs_info->async_submit_wait); 763 wake_up(&fs_info->async_submit_wait);
762 764
@@ -2032,8 +2034,6 @@ int open_ctree(struct super_block *sb,
2032 fs_info->free_chunk_space = 0; 2034 fs_info->free_chunk_space = 0;
2033 fs_info->tree_mod_log = RB_ROOT; 2035 fs_info->tree_mod_log = RB_ROOT;
2034 2036
2035 init_waitqueue_head(&fs_info->tree_mod_seq_wait);
2036
2037 /* readahead state */ 2037 /* readahead state */
2038 INIT_RADIX_TREE(&fs_info->reada_tree, GFP_NOFS & ~__GFP_WAIT); 2038 INIT_RADIX_TREE(&fs_info->reada_tree, GFP_NOFS & ~__GFP_WAIT);
2039 spin_lock_init(&fs_info->reada_lock); 2039 spin_lock_init(&fs_info->reada_lock);
@@ -2528,8 +2528,7 @@ retry_root_backup:
2528 goto fail_trans_kthread; 2528 goto fail_trans_kthread;
2529 2529
2530 /* do not make disk changes in broken FS */ 2530 /* do not make disk changes in broken FS */
2531 if (btrfs_super_log_root(disk_super) != 0 && 2531 if (btrfs_super_log_root(disk_super) != 0) {
2532 !(fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR)) {
2533 u64 bytenr = btrfs_super_log_root(disk_super); 2532 u64 bytenr = btrfs_super_log_root(disk_super);
2534 2533
2535 if (fs_devices->rw_devices == 0) { 2534 if (fs_devices->rw_devices == 0) {
@@ -3189,30 +3188,14 @@ int close_ctree(struct btrfs_root *root)
3189 /* clear out the rbtree of defraggable inodes */ 3188 /* clear out the rbtree of defraggable inodes */
3190 btrfs_run_defrag_inodes(fs_info); 3189 btrfs_run_defrag_inodes(fs_info);
3191 3190
3192 /*
3193 * Here come 2 situations when btrfs is broken to flip readonly:
3194 *
3195 * 1. when btrfs flips readonly somewhere else before
3196 * btrfs_commit_super, sb->s_flags has MS_RDONLY flag,
3197 * and btrfs will skip to write sb directly to keep
3198 * ERROR state on disk.
3199 *
3200 * 2. when btrfs flips readonly just in btrfs_commit_super,
3201 * and in such case, btrfs cannot write sb via btrfs_commit_super,
3202 * and since fs_state has been set BTRFS_SUPER_FLAG_ERROR flag,
3203 * btrfs will cleanup all FS resources first and write sb then.
3204 */
3205 if (!(fs_info->sb->s_flags & MS_RDONLY)) { 3191 if (!(fs_info->sb->s_flags & MS_RDONLY)) {
3206 ret = btrfs_commit_super(root); 3192 ret = btrfs_commit_super(root);
3207 if (ret) 3193 if (ret)
3208 printk(KERN_ERR "btrfs: commit super ret %d\n", ret); 3194 printk(KERN_ERR "btrfs: commit super ret %d\n", ret);
3209 } 3195 }
3210 3196
3211 if (fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR) { 3197 if (fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR)
3212 ret = btrfs_error_commit_super(root); 3198 btrfs_error_commit_super(root);
3213 if (ret)
3214 printk(KERN_ERR "btrfs: commit super ret %d\n", ret);
3215 }
3216 3199
3217 btrfs_put_block_group_cache(fs_info); 3200 btrfs_put_block_group_cache(fs_info);
3218 3201
@@ -3434,18 +3417,11 @@ static int btrfs_check_super_valid(struct btrfs_fs_info *fs_info,
3434 if (read_only) 3417 if (read_only)
3435 return 0; 3418 return 0;
3436 3419
3437 if (fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR) {
3438 printk(KERN_WARNING "warning: mount fs with errors, "
3439 "running btrfsck is recommended\n");
3440 }
3441
3442 return 0; 3420 return 0;
3443} 3421}
3444 3422
3445int btrfs_error_commit_super(struct btrfs_root *root) 3423void btrfs_error_commit_super(struct btrfs_root *root)
3446{ 3424{
3447 int ret;
3448
3449 mutex_lock(&root->fs_info->cleaner_mutex); 3425 mutex_lock(&root->fs_info->cleaner_mutex);
3450 btrfs_run_delayed_iputs(root); 3426 btrfs_run_delayed_iputs(root);
3451 mutex_unlock(&root->fs_info->cleaner_mutex); 3427 mutex_unlock(&root->fs_info->cleaner_mutex);
@@ -3455,10 +3431,6 @@ int btrfs_error_commit_super(struct btrfs_root *root)
3455 3431
3456 /* cleanup FS via transaction */ 3432 /* cleanup FS via transaction */
3457 btrfs_cleanup_transaction(root); 3433 btrfs_cleanup_transaction(root);
3458
3459 ret = write_ctree_super(NULL, root, 0);
3460
3461 return ret;
3462} 3434}
3463 3435
3464static void btrfs_destroy_ordered_operations(struct btrfs_root *root) 3436static void btrfs_destroy_ordered_operations(struct btrfs_root *root)
@@ -3782,14 +3754,17 @@ int btrfs_cleanup_transaction(struct btrfs_root *root)
3782 /* FIXME: cleanup wait for commit */ 3754 /* FIXME: cleanup wait for commit */
3783 t->in_commit = 1; 3755 t->in_commit = 1;
3784 t->blocked = 1; 3756 t->blocked = 1;
3757 smp_mb();
3785 if (waitqueue_active(&root->fs_info->transaction_blocked_wait)) 3758 if (waitqueue_active(&root->fs_info->transaction_blocked_wait))
3786 wake_up(&root->fs_info->transaction_blocked_wait); 3759 wake_up(&root->fs_info->transaction_blocked_wait);
3787 3760
3788 t->blocked = 0; 3761 t->blocked = 0;
3762 smp_mb();
3789 if (waitqueue_active(&root->fs_info->transaction_wait)) 3763 if (waitqueue_active(&root->fs_info->transaction_wait))
3790 wake_up(&root->fs_info->transaction_wait); 3764 wake_up(&root->fs_info->transaction_wait);
3791 3765
3792 t->commit_done = 1; 3766 t->commit_done = 1;
3767 smp_mb();
3793 if (waitqueue_active(&t->commit_wait)) 3768 if (waitqueue_active(&t->commit_wait))
3794 wake_up(&t->commit_wait); 3769 wake_up(&t->commit_wait);
3795 3770
diff --git a/fs/btrfs/disk-io.h b/fs/btrfs/disk-io.h
index 95e147eea239..c5b00a735fef 100644
--- a/fs/btrfs/disk-io.h
+++ b/fs/btrfs/disk-io.h
@@ -54,7 +54,7 @@ int write_ctree_super(struct btrfs_trans_handle *trans,
54 struct btrfs_root *root, int max_mirrors); 54 struct btrfs_root *root, int max_mirrors);
55struct buffer_head *btrfs_read_dev_super(struct block_device *bdev); 55struct buffer_head *btrfs_read_dev_super(struct block_device *bdev);
56int btrfs_commit_super(struct btrfs_root *root); 56int btrfs_commit_super(struct btrfs_root *root);
57int btrfs_error_commit_super(struct btrfs_root *root); 57void btrfs_error_commit_super(struct btrfs_root *root);
58struct extent_buffer *btrfs_find_tree_block(struct btrfs_root *root, 58struct extent_buffer *btrfs_find_tree_block(struct btrfs_root *root,
59 u64 bytenr, u32 blocksize); 59 u64 bytenr, u32 blocksize);
60struct btrfs_root *btrfs_read_fs_root_no_radix(struct btrfs_root *tree_root, 60struct btrfs_root *btrfs_read_fs_root_no_radix(struct btrfs_root *tree_root,
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index 4e1b153b7c47..ba58024d40d3 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -2252,6 +2252,16 @@ static noinline int run_clustered_refs(struct btrfs_trans_handle *trans,
2252 } 2252 }
2253 2253
2254 /* 2254 /*
2255 * We need to try and merge add/drops of the same ref since we
2256 * can run into issues with relocate dropping the implicit ref
2257 * and then it being added back again before the drop can
2258 * finish. If we merged anything we need to re-loop so we can
2259 * get a good ref.
2260 */
2261 btrfs_merge_delayed_refs(trans, fs_info, delayed_refs,
2262 locked_ref);
2263
2264 /*
2255 * locked_ref is the head node, so we have to go one 2265 * locked_ref is the head node, so we have to go one
2256 * node back for any delayed ref updates 2266 * node back for any delayed ref updates
2257 */ 2267 */
@@ -2318,12 +2328,23 @@ static noinline int run_clustered_refs(struct btrfs_trans_handle *trans,
2318 ref->in_tree = 0; 2328 ref->in_tree = 0;
2319 rb_erase(&ref->rb_node, &delayed_refs->root); 2329 rb_erase(&ref->rb_node, &delayed_refs->root);
2320 delayed_refs->num_entries--; 2330 delayed_refs->num_entries--;
2321 /* 2331 if (locked_ref) {
2322 * we modified num_entries, but as we're currently running 2332 /*
2323 * delayed refs, skip 2333 * when we play the delayed ref, also correct the
2324 * wake_up(&delayed_refs->seq_wait); 2334 * ref_mod on head
2325 * here. 2335 */
2326 */ 2336 switch (ref->action) {
2337 case BTRFS_ADD_DELAYED_REF:
2338 case BTRFS_ADD_DELAYED_EXTENT:
2339 locked_ref->node.ref_mod -= ref->ref_mod;
2340 break;
2341 case BTRFS_DROP_DELAYED_REF:
2342 locked_ref->node.ref_mod += ref->ref_mod;
2343 break;
2344 default:
2345 WARN_ON(1);
2346 }
2347 }
2327 spin_unlock(&delayed_refs->lock); 2348 spin_unlock(&delayed_refs->lock);
2328 2349
2329 ret = run_one_delayed_ref(trans, root, ref, extent_op, 2350 ret = run_one_delayed_ref(trans, root, ref, extent_op,
@@ -2350,22 +2371,6 @@ next:
2350 return count; 2371 return count;
2351} 2372}
2352 2373
2353static void wait_for_more_refs(struct btrfs_fs_info *fs_info,
2354 struct btrfs_delayed_ref_root *delayed_refs,
2355 unsigned long num_refs,
2356 struct list_head *first_seq)
2357{
2358 spin_unlock(&delayed_refs->lock);
2359 pr_debug("waiting for more refs (num %ld, first %p)\n",
2360 num_refs, first_seq);
2361 wait_event(fs_info->tree_mod_seq_wait,
2362 num_refs != delayed_refs->num_entries ||
2363 fs_info->tree_mod_seq_list.next != first_seq);
2364 pr_debug("done waiting for more refs (num %ld, first %p)\n",
2365 delayed_refs->num_entries, fs_info->tree_mod_seq_list.next);
2366 spin_lock(&delayed_refs->lock);
2367}
2368
2369#ifdef SCRAMBLE_DELAYED_REFS 2374#ifdef SCRAMBLE_DELAYED_REFS
2370/* 2375/*
2371 * Normally delayed refs get processed in ascending bytenr order. This 2376 * Normally delayed refs get processed in ascending bytenr order. This
@@ -2460,13 +2465,11 @@ int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
2460 struct btrfs_delayed_ref_root *delayed_refs; 2465 struct btrfs_delayed_ref_root *delayed_refs;
2461 struct btrfs_delayed_ref_node *ref; 2466 struct btrfs_delayed_ref_node *ref;
2462 struct list_head cluster; 2467 struct list_head cluster;
2463 struct list_head *first_seq = NULL;
2464 int ret; 2468 int ret;
2465 u64 delayed_start; 2469 u64 delayed_start;
2466 int run_all = count == (unsigned long)-1; 2470 int run_all = count == (unsigned long)-1;
2467 int run_most = 0; 2471 int run_most = 0;
2468 unsigned long num_refs = 0; 2472 int loops;
2469 int consider_waiting;
2470 2473
2471 /* We'll clean this up in btrfs_cleanup_transaction */ 2474 /* We'll clean this up in btrfs_cleanup_transaction */
2472 if (trans->aborted) 2475 if (trans->aborted)
@@ -2484,7 +2487,7 @@ int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
2484 delayed_refs = &trans->transaction->delayed_refs; 2487 delayed_refs = &trans->transaction->delayed_refs;
2485 INIT_LIST_HEAD(&cluster); 2488 INIT_LIST_HEAD(&cluster);
2486again: 2489again:
2487 consider_waiting = 0; 2490 loops = 0;
2488 spin_lock(&delayed_refs->lock); 2491 spin_lock(&delayed_refs->lock);
2489 2492
2490#ifdef SCRAMBLE_DELAYED_REFS 2493#ifdef SCRAMBLE_DELAYED_REFS
@@ -2512,31 +2515,6 @@ again:
2512 if (ret) 2515 if (ret)
2513 break; 2516 break;
2514 2517
2515 if (delayed_start >= delayed_refs->run_delayed_start) {
2516 if (consider_waiting == 0) {
2517 /*
2518 * btrfs_find_ref_cluster looped. let's do one
2519 * more cycle. if we don't run any delayed ref
2520 * during that cycle (because we can't because
2521 * all of them are blocked) and if the number of
2522 * refs doesn't change, we avoid busy waiting.
2523 */
2524 consider_waiting = 1;
2525 num_refs = delayed_refs->num_entries;
2526 first_seq = root->fs_info->tree_mod_seq_list.next;
2527 } else {
2528 wait_for_more_refs(root->fs_info, delayed_refs,
2529 num_refs, first_seq);
2530 /*
2531 * after waiting, things have changed. we
2532 * dropped the lock and someone else might have
2533 * run some refs, built new clusters and so on.
2534 * therefore, we restart staleness detection.
2535 */
2536 consider_waiting = 0;
2537 }
2538 }
2539
2540 ret = run_clustered_refs(trans, root, &cluster); 2518 ret = run_clustered_refs(trans, root, &cluster);
2541 if (ret < 0) { 2519 if (ret < 0) {
2542 spin_unlock(&delayed_refs->lock); 2520 spin_unlock(&delayed_refs->lock);
@@ -2549,9 +2527,26 @@ again:
2549 if (count == 0) 2527 if (count == 0)
2550 break; 2528 break;
2551 2529
2552 if (ret || delayed_refs->run_delayed_start == 0) { 2530 if (delayed_start >= delayed_refs->run_delayed_start) {
2531 if (loops == 0) {
2532 /*
2533 * btrfs_find_ref_cluster looped. let's do one
2534 * more cycle. if we don't run any delayed ref
2535 * during that cycle (because we can't because
2536 * all of them are blocked), bail out.
2537 */
2538 loops = 1;
2539 } else {
2540 /*
2541 * no runnable refs left, stop trying
2542 */
2543 BUG_ON(run_all);
2544 break;
2545 }
2546 }
2547 if (ret) {
2553 /* refs were run, let's reset staleness detection */ 2548 /* refs were run, let's reset staleness detection */
2554 consider_waiting = 0; 2549 loops = 0;
2555 } 2550 }
2556 } 2551 }
2557 2552
@@ -3007,17 +3002,16 @@ again:
3007 } 3002 }
3008 spin_unlock(&block_group->lock); 3003 spin_unlock(&block_group->lock);
3009 3004
3010 num_pages = (int)div64_u64(block_group->key.offset, 1024 * 1024 * 1024); 3005 /*
3006 * Try to preallocate enough space based on how big the block group is.
3007 * Keep in mind this has to include any pinned space which could end up
3008 * taking up quite a bit since it's not folded into the other space
3009 * cache.
3010 */
3011 num_pages = (int)div64_u64(block_group->key.offset, 256 * 1024 * 1024);
3011 if (!num_pages) 3012 if (!num_pages)
3012 num_pages = 1; 3013 num_pages = 1;
3013 3014
3014 /*
3015 * Just to make absolutely sure we have enough space, we're going to
3016 * preallocate 12 pages worth of space for each block group. In
3017 * practice we ought to use at most 8, but we need extra space so we can
3018 * add our header and have a terminator between the extents and the
3019 * bitmaps.
3020 */
3021 num_pages *= 16; 3015 num_pages *= 16;
3022 num_pages *= PAGE_CACHE_SIZE; 3016 num_pages *= PAGE_CACHE_SIZE;
3023 3017
@@ -4571,8 +4565,10 @@ int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes)
4571 if (root->fs_info->quota_enabled) { 4565 if (root->fs_info->quota_enabled) {
4572 ret = btrfs_qgroup_reserve(root, num_bytes + 4566 ret = btrfs_qgroup_reserve(root, num_bytes +
4573 nr_extents * root->leafsize); 4567 nr_extents * root->leafsize);
4574 if (ret) 4568 if (ret) {
4569 mutex_unlock(&BTRFS_I(inode)->delalloc_mutex);
4575 return ret; 4570 return ret;
4571 }
4576 } 4572 }
4577 4573
4578 ret = reserve_metadata_bytes(root, block_rsv, to_reserve, flush); 4574 ret = reserve_metadata_bytes(root, block_rsv, to_reserve, flush);
@@ -5294,9 +5290,6 @@ static noinline int check_ref_cleanup(struct btrfs_trans_handle *trans,
5294 rb_erase(&head->node.rb_node, &delayed_refs->root); 5290 rb_erase(&head->node.rb_node, &delayed_refs->root);
5295 5291
5296 delayed_refs->num_entries--; 5292 delayed_refs->num_entries--;
5297 smp_mb();
5298 if (waitqueue_active(&root->fs_info->tree_mod_seq_wait))
5299 wake_up(&root->fs_info->tree_mod_seq_wait);
5300 5293
5301 /* 5294 /*
5302 * we don't take a ref on the node because we're removing it from the 5295 * we don't take a ref on the node because we're removing it from the
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index 45c81bb4ac82..4c878476bb91 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -2330,23 +2330,10 @@ static void end_bio_extent_readpage(struct bio *bio, int err)
2330 if (uptodate && tree->ops && tree->ops->readpage_end_io_hook) { 2330 if (uptodate && tree->ops && tree->ops->readpage_end_io_hook) {
2331 ret = tree->ops->readpage_end_io_hook(page, start, end, 2331 ret = tree->ops->readpage_end_io_hook(page, start, end,
2332 state, mirror); 2332 state, mirror);
2333 if (ret) { 2333 if (ret)
2334 /* no IO indicated but software detected errors
2335 * in the block, either checksum errors or
2336 * issues with the contents */
2337 struct btrfs_root *root =
2338 BTRFS_I(page->mapping->host)->root;
2339 struct btrfs_device *device;
2340
2341 uptodate = 0; 2334 uptodate = 0;
2342 device = btrfs_find_device_for_logical( 2335 else
2343 root, start, mirror);
2344 if (device)
2345 btrfs_dev_stat_inc_and_print(device,
2346 BTRFS_DEV_STAT_CORRUPTION_ERRS);
2347 } else {
2348 clean_io_failure(start, page); 2336 clean_io_failure(start, page);
2349 }
2350 } 2337 }
2351 2338
2352 if (!uptodate && tree->ops && tree->ops->readpage_io_failed_hook) { 2339 if (!uptodate && tree->ops && tree->ops->readpage_io_failed_hook) {
diff --git a/fs/btrfs/file-item.c b/fs/btrfs/file-item.c
index b45b9de0c21d..857d93cd01dc 100644
--- a/fs/btrfs/file-item.c
+++ b/fs/btrfs/file-item.c
@@ -272,9 +272,9 @@ int btrfs_lookup_bio_sums(struct btrfs_root *root, struct inode *inode,
272} 272}
273 273
274int btrfs_lookup_bio_sums_dio(struct btrfs_root *root, struct inode *inode, 274int btrfs_lookup_bio_sums_dio(struct btrfs_root *root, struct inode *inode,
275 struct bio *bio, u64 offset, u32 *dst) 275 struct bio *bio, u64 offset)
276{ 276{
277 return __btrfs_lookup_bio_sums(root, inode, bio, offset, dst, 1); 277 return __btrfs_lookup_bio_sums(root, inode, bio, offset, NULL, 1);
278} 278}
279 279
280int btrfs_lookup_csums_range(struct btrfs_root *root, u64 start, u64 end, 280int btrfs_lookup_csums_range(struct btrfs_root *root, u64 start, u64 end,
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 6e8f416773d4..ec154f954646 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -1008,9 +1008,7 @@ static noinline void async_cow_submit(struct btrfs_work *work)
1008 nr_pages = (async_cow->end - async_cow->start + PAGE_CACHE_SIZE) >> 1008 nr_pages = (async_cow->end - async_cow->start + PAGE_CACHE_SIZE) >>
1009 PAGE_CACHE_SHIFT; 1009 PAGE_CACHE_SHIFT;
1010 1010
1011 atomic_sub(nr_pages, &root->fs_info->async_delalloc_pages); 1011 if (atomic_sub_return(nr_pages, &root->fs_info->async_delalloc_pages) <
1012
1013 if (atomic_read(&root->fs_info->async_delalloc_pages) <
1014 5 * 1024 * 1024 && 1012 5 * 1024 * 1024 &&
1015 waitqueue_active(&root->fs_info->async_submit_wait)) 1013 waitqueue_active(&root->fs_info->async_submit_wait))
1016 wake_up(&root->fs_info->async_submit_wait); 1014 wake_up(&root->fs_info->async_submit_wait);
@@ -1885,8 +1883,11 @@ static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent)
1885 trans = btrfs_join_transaction_nolock(root); 1883 trans = btrfs_join_transaction_nolock(root);
1886 else 1884 else
1887 trans = btrfs_join_transaction(root); 1885 trans = btrfs_join_transaction(root);
1888 if (IS_ERR(trans)) 1886 if (IS_ERR(trans)) {
1889 return PTR_ERR(trans); 1887 ret = PTR_ERR(trans);
1888 trans = NULL;
1889 goto out;
1890 }
1890 trans->block_rsv = &root->fs_info->delalloc_block_rsv; 1891 trans->block_rsv = &root->fs_info->delalloc_block_rsv;
1891 ret = btrfs_update_inode_fallback(trans, root, inode); 1892 ret = btrfs_update_inode_fallback(trans, root, inode);
1892 if (ret) /* -ENOMEM or corruption */ 1893 if (ret) /* -ENOMEM or corruption */
@@ -3174,7 +3175,7 @@ int btrfs_unlink_subvol(struct btrfs_trans_handle *trans,
3174 btrfs_i_size_write(dir, dir->i_size - name_len * 2); 3175 btrfs_i_size_write(dir, dir->i_size - name_len * 2);
3175 inode_inc_iversion(dir); 3176 inode_inc_iversion(dir);
3176 dir->i_mtime = dir->i_ctime = CURRENT_TIME; 3177 dir->i_mtime = dir->i_ctime = CURRENT_TIME;
3177 ret = btrfs_update_inode(trans, root, dir); 3178 ret = btrfs_update_inode_fallback(trans, root, dir);
3178 if (ret) 3179 if (ret)
3179 btrfs_abort_transaction(trans, root, ret); 3180 btrfs_abort_transaction(trans, root, ret);
3180out: 3181out:
@@ -5774,18 +5775,112 @@ out:
5774 return ret; 5775 return ret;
5775} 5776}
5776 5777
5778static int lock_extent_direct(struct inode *inode, u64 lockstart, u64 lockend,
5779 struct extent_state **cached_state, int writing)
5780{
5781 struct btrfs_ordered_extent *ordered;
5782 int ret = 0;
5783
5784 while (1) {
5785 lock_extent_bits(&BTRFS_I(inode)->io_tree, lockstart, lockend,
5786 0, cached_state);
5787 /*
5788 * We're concerned with the entire range that we're going to be
5789 * doing DIO to, so we need to make sure theres no ordered
5790 * extents in this range.
5791 */
5792 ordered = btrfs_lookup_ordered_range(inode, lockstart,
5793 lockend - lockstart + 1);
5794
5795 /*
5796 * We need to make sure there are no buffered pages in this
5797 * range either, we could have raced between the invalidate in
5798 * generic_file_direct_write and locking the extent. The
5799 * invalidate needs to happen so that reads after a write do not
5800 * get stale data.
5801 */
5802 if (!ordered && (!writing ||
5803 !test_range_bit(&BTRFS_I(inode)->io_tree,
5804 lockstart, lockend, EXTENT_UPTODATE, 0,
5805 *cached_state)))
5806 break;
5807
5808 unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart, lockend,
5809 cached_state, GFP_NOFS);
5810
5811 if (ordered) {
5812 btrfs_start_ordered_extent(inode, ordered, 1);
5813 btrfs_put_ordered_extent(ordered);
5814 } else {
5815 /* Screw you mmap */
5816 ret = filemap_write_and_wait_range(inode->i_mapping,
5817 lockstart,
5818 lockend);
5819 if (ret)
5820 break;
5821
5822 /*
5823 * If we found a page that couldn't be invalidated just
5824 * fall back to buffered.
5825 */
5826 ret = invalidate_inode_pages2_range(inode->i_mapping,
5827 lockstart >> PAGE_CACHE_SHIFT,
5828 lockend >> PAGE_CACHE_SHIFT);
5829 if (ret)
5830 break;
5831 }
5832
5833 cond_resched();
5834 }
5835
5836 return ret;
5837}
5838
5777static int btrfs_get_blocks_direct(struct inode *inode, sector_t iblock, 5839static int btrfs_get_blocks_direct(struct inode *inode, sector_t iblock,
5778 struct buffer_head *bh_result, int create) 5840 struct buffer_head *bh_result, int create)
5779{ 5841{
5780 struct extent_map *em; 5842 struct extent_map *em;
5781 struct btrfs_root *root = BTRFS_I(inode)->root; 5843 struct btrfs_root *root = BTRFS_I(inode)->root;
5844 struct extent_state *cached_state = NULL;
5782 u64 start = iblock << inode->i_blkbits; 5845 u64 start = iblock << inode->i_blkbits;
5846 u64 lockstart, lockend;
5783 u64 len = bh_result->b_size; 5847 u64 len = bh_result->b_size;
5784 struct btrfs_trans_handle *trans; 5848 struct btrfs_trans_handle *trans;
5849 int unlock_bits = EXTENT_LOCKED;
5850 int ret;
5851
5852 if (create) {
5853 ret = btrfs_delalloc_reserve_space(inode, len);
5854 if (ret)
5855 return ret;
5856 unlock_bits |= EXTENT_DELALLOC | EXTENT_DIRTY;
5857 } else {
5858 len = min_t(u64, len, root->sectorsize);
5859 }
5860
5861 lockstart = start;
5862 lockend = start + len - 1;
5863
5864 /*
5865 * If this errors out it's because we couldn't invalidate pagecache for
5866 * this range and we need to fallback to buffered.
5867 */
5868 if (lock_extent_direct(inode, lockstart, lockend, &cached_state, create))
5869 return -ENOTBLK;
5870
5871 if (create) {
5872 ret = set_extent_bit(&BTRFS_I(inode)->io_tree, lockstart,
5873 lockend, EXTENT_DELALLOC, NULL,
5874 &cached_state, GFP_NOFS);
5875 if (ret)
5876 goto unlock_err;
5877 }
5785 5878
5786 em = btrfs_get_extent(inode, NULL, 0, start, len, 0); 5879 em = btrfs_get_extent(inode, NULL, 0, start, len, 0);
5787 if (IS_ERR(em)) 5880 if (IS_ERR(em)) {
5788 return PTR_ERR(em); 5881 ret = PTR_ERR(em);
5882 goto unlock_err;
5883 }
5789 5884
5790 /* 5885 /*
5791 * Ok for INLINE and COMPRESSED extents we need to fallback on buffered 5886 * Ok for INLINE and COMPRESSED extents we need to fallback on buffered
@@ -5804,17 +5899,16 @@ static int btrfs_get_blocks_direct(struct inode *inode, sector_t iblock,
5804 if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags) || 5899 if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags) ||
5805 em->block_start == EXTENT_MAP_INLINE) { 5900 em->block_start == EXTENT_MAP_INLINE) {
5806 free_extent_map(em); 5901 free_extent_map(em);
5807 return -ENOTBLK; 5902 ret = -ENOTBLK;
5903 goto unlock_err;
5808 } 5904 }
5809 5905
5810 /* Just a good old fashioned hole, return */ 5906 /* Just a good old fashioned hole, return */
5811 if (!create && (em->block_start == EXTENT_MAP_HOLE || 5907 if (!create && (em->block_start == EXTENT_MAP_HOLE ||
5812 test_bit(EXTENT_FLAG_PREALLOC, &em->flags))) { 5908 test_bit(EXTENT_FLAG_PREALLOC, &em->flags))) {
5813 free_extent_map(em); 5909 free_extent_map(em);
5814 /* DIO will do one hole at a time, so just unlock a sector */ 5910 ret = 0;
5815 unlock_extent(&BTRFS_I(inode)->io_tree, start, 5911 goto unlock_err;
5816 start + root->sectorsize - 1);
5817 return 0;
5818 } 5912 }
5819 5913
5820 /* 5914 /*
@@ -5827,8 +5921,9 @@ static int btrfs_get_blocks_direct(struct inode *inode, sector_t iblock,
5827 * 5921 *
5828 */ 5922 */
5829 if (!create) { 5923 if (!create) {
5830 len = em->len - (start - em->start); 5924 len = min(len, em->len - (start - em->start));
5831 goto map; 5925 lockstart = start + len;
5926 goto unlock;
5832 } 5927 }
5833 5928
5834 if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags) || 5929 if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags) ||
@@ -5860,7 +5955,7 @@ static int btrfs_get_blocks_direct(struct inode *inode, sector_t iblock,
5860 btrfs_end_transaction(trans, root); 5955 btrfs_end_transaction(trans, root);
5861 if (ret) { 5956 if (ret) {
5862 free_extent_map(em); 5957 free_extent_map(em);
5863 return ret; 5958 goto unlock_err;
5864 } 5959 }
5865 goto unlock; 5960 goto unlock;
5866 } 5961 }
@@ -5873,14 +5968,12 @@ must_cow:
5873 */ 5968 */
5874 len = bh_result->b_size; 5969 len = bh_result->b_size;
5875 em = btrfs_new_extent_direct(inode, em, start, len); 5970 em = btrfs_new_extent_direct(inode, em, start, len);
5876 if (IS_ERR(em)) 5971 if (IS_ERR(em)) {
5877 return PTR_ERR(em); 5972 ret = PTR_ERR(em);
5973 goto unlock_err;
5974 }
5878 len = min(len, em->len - (start - em->start)); 5975 len = min(len, em->len - (start - em->start));
5879unlock: 5976unlock:
5880 clear_extent_bit(&BTRFS_I(inode)->io_tree, start, start + len - 1,
5881 EXTENT_LOCKED | EXTENT_DELALLOC | EXTENT_DIRTY, 1,
5882 0, NULL, GFP_NOFS);
5883map:
5884 bh_result->b_blocknr = (em->block_start + (start - em->start)) >> 5977 bh_result->b_blocknr = (em->block_start + (start - em->start)) >>
5885 inode->i_blkbits; 5978 inode->i_blkbits;
5886 bh_result->b_size = len; 5979 bh_result->b_size = len;
@@ -5898,9 +5991,44 @@ map:
5898 i_size_write(inode, start + len); 5991 i_size_write(inode, start + len);
5899 } 5992 }
5900 5993
5994 /*
5995 * In the case of write we need to clear and unlock the entire range,
5996 * in the case of read we need to unlock only the end area that we
5997 * aren't using if there is any left over space.
5998 */
5999 if (lockstart < lockend) {
6000 if (create && len < lockend - lockstart) {
6001 clear_extent_bit(&BTRFS_I(inode)->io_tree, lockstart,
6002 lockstart + len - 1, unlock_bits, 1, 0,
6003 &cached_state, GFP_NOFS);
6004 /*
6005 * Beside unlock, we also need to cleanup reserved space
6006 * for the left range by attaching EXTENT_DO_ACCOUNTING.
6007 */
6008 clear_extent_bit(&BTRFS_I(inode)->io_tree,
6009 lockstart + len, lockend,
6010 unlock_bits | EXTENT_DO_ACCOUNTING,
6011 1, 0, NULL, GFP_NOFS);
6012 } else {
6013 clear_extent_bit(&BTRFS_I(inode)->io_tree, lockstart,
6014 lockend, unlock_bits, 1, 0,
6015 &cached_state, GFP_NOFS);
6016 }
6017 } else {
6018 free_extent_state(cached_state);
6019 }
6020
5901 free_extent_map(em); 6021 free_extent_map(em);
5902 6022
5903 return 0; 6023 return 0;
6024
6025unlock_err:
6026 if (create)
6027 unlock_bits |= EXTENT_DO_ACCOUNTING;
6028
6029 clear_extent_bit(&BTRFS_I(inode)->io_tree, lockstart, lockend,
6030 unlock_bits, 1, 0, &cached_state, GFP_NOFS);
6031 return ret;
5904} 6032}
5905 6033
5906struct btrfs_dio_private { 6034struct btrfs_dio_private {
@@ -5908,7 +6036,6 @@ struct btrfs_dio_private {
5908 u64 logical_offset; 6036 u64 logical_offset;
5909 u64 disk_bytenr; 6037 u64 disk_bytenr;
5910 u64 bytes; 6038 u64 bytes;
5911 u32 *csums;
5912 void *private; 6039 void *private;
5913 6040
5914 /* number of bios pending for this dio */ 6041 /* number of bios pending for this dio */
@@ -5928,7 +6055,6 @@ static void btrfs_endio_direct_read(struct bio *bio, int err)
5928 struct inode *inode = dip->inode; 6055 struct inode *inode = dip->inode;
5929 struct btrfs_root *root = BTRFS_I(inode)->root; 6056 struct btrfs_root *root = BTRFS_I(inode)->root;
5930 u64 start; 6057 u64 start;
5931 u32 *private = dip->csums;
5932 6058
5933 start = dip->logical_offset; 6059 start = dip->logical_offset;
5934 do { 6060 do {
@@ -5936,8 +6062,12 @@ static void btrfs_endio_direct_read(struct bio *bio, int err)
5936 struct page *page = bvec->bv_page; 6062 struct page *page = bvec->bv_page;
5937 char *kaddr; 6063 char *kaddr;
5938 u32 csum = ~(u32)0; 6064 u32 csum = ~(u32)0;
6065 u64 private = ~(u32)0;
5939 unsigned long flags; 6066 unsigned long flags;
5940 6067
6068 if (get_state_private(&BTRFS_I(inode)->io_tree,
6069 start, &private))
6070 goto failed;
5941 local_irq_save(flags); 6071 local_irq_save(flags);
5942 kaddr = kmap_atomic(page); 6072 kaddr = kmap_atomic(page);
5943 csum = btrfs_csum_data(root, kaddr + bvec->bv_offset, 6073 csum = btrfs_csum_data(root, kaddr + bvec->bv_offset,
@@ -5947,18 +6077,18 @@ static void btrfs_endio_direct_read(struct bio *bio, int err)
5947 local_irq_restore(flags); 6077 local_irq_restore(flags);
5948 6078
5949 flush_dcache_page(bvec->bv_page); 6079 flush_dcache_page(bvec->bv_page);
5950 if (csum != *private) { 6080 if (csum != private) {
6081failed:
5951 printk(KERN_ERR "btrfs csum failed ino %llu off" 6082 printk(KERN_ERR "btrfs csum failed ino %llu off"
5952 " %llu csum %u private %u\n", 6083 " %llu csum %u private %u\n",
5953 (unsigned long long)btrfs_ino(inode), 6084 (unsigned long long)btrfs_ino(inode),
5954 (unsigned long long)start, 6085 (unsigned long long)start,
5955 csum, *private); 6086 csum, (unsigned)private);
5956 err = -EIO; 6087 err = -EIO;
5957 } 6088 }
5958 } 6089 }
5959 6090
5960 start += bvec->bv_len; 6091 start += bvec->bv_len;
5961 private++;
5962 bvec++; 6092 bvec++;
5963 } while (bvec <= bvec_end); 6093 } while (bvec <= bvec_end);
5964 6094
@@ -5966,7 +6096,6 @@ static void btrfs_endio_direct_read(struct bio *bio, int err)
5966 dip->logical_offset + dip->bytes - 1); 6096 dip->logical_offset + dip->bytes - 1);
5967 bio->bi_private = dip->private; 6097 bio->bi_private = dip->private;
5968 6098
5969 kfree(dip->csums);
5970 kfree(dip); 6099 kfree(dip);
5971 6100
5972 /* If we had a csum failure make sure to clear the uptodate flag */ 6101 /* If we had a csum failure make sure to clear the uptodate flag */
@@ -6072,7 +6201,7 @@ static struct bio *btrfs_dio_bio_alloc(struct block_device *bdev,
6072 6201
6073static inline int __btrfs_submit_dio_bio(struct bio *bio, struct inode *inode, 6202static inline int __btrfs_submit_dio_bio(struct bio *bio, struct inode *inode,
6074 int rw, u64 file_offset, int skip_sum, 6203 int rw, u64 file_offset, int skip_sum,
6075 u32 *csums, int async_submit) 6204 int async_submit)
6076{ 6205{
6077 int write = rw & REQ_WRITE; 6206 int write = rw & REQ_WRITE;
6078 struct btrfs_root *root = BTRFS_I(inode)->root; 6207 struct btrfs_root *root = BTRFS_I(inode)->root;
@@ -6105,8 +6234,7 @@ static inline int __btrfs_submit_dio_bio(struct bio *bio, struct inode *inode,
6105 if (ret) 6234 if (ret)
6106 goto err; 6235 goto err;
6107 } else if (!skip_sum) { 6236 } else if (!skip_sum) {
6108 ret = btrfs_lookup_bio_sums_dio(root, inode, bio, 6237 ret = btrfs_lookup_bio_sums_dio(root, inode, bio, file_offset);
6109 file_offset, csums);
6110 if (ret) 6238 if (ret)
6111 goto err; 6239 goto err;
6112 } 6240 }
@@ -6132,10 +6260,8 @@ static int btrfs_submit_direct_hook(int rw, struct btrfs_dio_private *dip,
6132 u64 submit_len = 0; 6260 u64 submit_len = 0;
6133 u64 map_length; 6261 u64 map_length;
6134 int nr_pages = 0; 6262 int nr_pages = 0;
6135 u32 *csums = dip->csums;
6136 int ret = 0; 6263 int ret = 0;
6137 int async_submit = 0; 6264 int async_submit = 0;
6138 int write = rw & REQ_WRITE;
6139 6265
6140 map_length = orig_bio->bi_size; 6266 map_length = orig_bio->bi_size;
6141 ret = btrfs_map_block(map_tree, READ, start_sector << 9, 6267 ret = btrfs_map_block(map_tree, READ, start_sector << 9,
@@ -6171,16 +6297,13 @@ static int btrfs_submit_direct_hook(int rw, struct btrfs_dio_private *dip,
6171 atomic_inc(&dip->pending_bios); 6297 atomic_inc(&dip->pending_bios);
6172 ret = __btrfs_submit_dio_bio(bio, inode, rw, 6298 ret = __btrfs_submit_dio_bio(bio, inode, rw,
6173 file_offset, skip_sum, 6299 file_offset, skip_sum,
6174 csums, async_submit); 6300 async_submit);
6175 if (ret) { 6301 if (ret) {
6176 bio_put(bio); 6302 bio_put(bio);
6177 atomic_dec(&dip->pending_bios); 6303 atomic_dec(&dip->pending_bios);
6178 goto out_err; 6304 goto out_err;
6179 } 6305 }
6180 6306
6181 /* Write's use the ordered csums */
6182 if (!write && !skip_sum)
6183 csums = csums + nr_pages;
6184 start_sector += submit_len >> 9; 6307 start_sector += submit_len >> 9;
6185 file_offset += submit_len; 6308 file_offset += submit_len;
6186 6309
@@ -6210,7 +6333,7 @@ static int btrfs_submit_direct_hook(int rw, struct btrfs_dio_private *dip,
6210 6333
6211submit: 6334submit:
6212 ret = __btrfs_submit_dio_bio(bio, inode, rw, file_offset, skip_sum, 6335 ret = __btrfs_submit_dio_bio(bio, inode, rw, file_offset, skip_sum,
6213 csums, async_submit); 6336 async_submit);
6214 if (!ret) 6337 if (!ret)
6215 return 0; 6338 return 0;
6216 6339
@@ -6246,17 +6369,6 @@ static void btrfs_submit_direct(int rw, struct bio *bio, struct inode *inode,
6246 ret = -ENOMEM; 6369 ret = -ENOMEM;
6247 goto free_ordered; 6370 goto free_ordered;
6248 } 6371 }
6249 dip->csums = NULL;
6250
6251 /* Write's use the ordered csum stuff, so we don't need dip->csums */
6252 if (!write && !skip_sum) {
6253 dip->csums = kmalloc(sizeof(u32) * bio->bi_vcnt, GFP_NOFS);
6254 if (!dip->csums) {
6255 kfree(dip);
6256 ret = -ENOMEM;
6257 goto free_ordered;
6258 }
6259 }
6260 6372
6261 dip->private = bio->bi_private; 6373 dip->private = bio->bi_private;
6262 dip->inode = inode; 6374 dip->inode = inode;
@@ -6341,132 +6453,22 @@ static ssize_t check_direct_IO(struct btrfs_root *root, int rw, struct kiocb *io
6341out: 6453out:
6342 return retval; 6454 return retval;
6343} 6455}
6456
6344static ssize_t btrfs_direct_IO(int rw, struct kiocb *iocb, 6457static ssize_t btrfs_direct_IO(int rw, struct kiocb *iocb,
6345 const struct iovec *iov, loff_t offset, 6458 const struct iovec *iov, loff_t offset,
6346 unsigned long nr_segs) 6459 unsigned long nr_segs)
6347{ 6460{
6348 struct file *file = iocb->ki_filp; 6461 struct file *file = iocb->ki_filp;
6349 struct inode *inode = file->f_mapping->host; 6462 struct inode *inode = file->f_mapping->host;
6350 struct btrfs_ordered_extent *ordered;
6351 struct extent_state *cached_state = NULL;
6352 u64 lockstart, lockend;
6353 ssize_t ret;
6354 int writing = rw & WRITE;
6355 int write_bits = 0;
6356 size_t count = iov_length(iov, nr_segs);
6357 6463
6358 if (check_direct_IO(BTRFS_I(inode)->root, rw, iocb, iov, 6464 if (check_direct_IO(BTRFS_I(inode)->root, rw, iocb, iov,
6359 offset, nr_segs)) { 6465 offset, nr_segs))
6360 return 0; 6466 return 0;
6361 }
6362
6363 lockstart = offset;
6364 lockend = offset + count - 1;
6365
6366 if (writing) {
6367 ret = btrfs_delalloc_reserve_space(inode, count);
6368 if (ret)
6369 goto out;
6370 }
6371
6372 while (1) {
6373 lock_extent_bits(&BTRFS_I(inode)->io_tree, lockstart, lockend,
6374 0, &cached_state);
6375 /*
6376 * We're concerned with the entire range that we're going to be
6377 * doing DIO to, so we need to make sure theres no ordered
6378 * extents in this range.
6379 */
6380 ordered = btrfs_lookup_ordered_range(inode, lockstart,
6381 lockend - lockstart + 1);
6382
6383 /*
6384 * We need to make sure there are no buffered pages in this
6385 * range either, we could have raced between the invalidate in
6386 * generic_file_direct_write and locking the extent. The
6387 * invalidate needs to happen so that reads after a write do not
6388 * get stale data.
6389 */
6390 if (!ordered && (!writing ||
6391 !test_range_bit(&BTRFS_I(inode)->io_tree,
6392 lockstart, lockend, EXTENT_UPTODATE, 0,
6393 cached_state)))
6394 break;
6395
6396 unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart, lockend,
6397 &cached_state, GFP_NOFS);
6398
6399 if (ordered) {
6400 btrfs_start_ordered_extent(inode, ordered, 1);
6401 btrfs_put_ordered_extent(ordered);
6402 } else {
6403 /* Screw you mmap */
6404 ret = filemap_write_and_wait_range(file->f_mapping,
6405 lockstart,
6406 lockend);
6407 if (ret)
6408 goto out;
6409
6410 /*
6411 * If we found a page that couldn't be invalidated just
6412 * fall back to buffered.
6413 */
6414 ret = invalidate_inode_pages2_range(file->f_mapping,
6415 lockstart >> PAGE_CACHE_SHIFT,
6416 lockend >> PAGE_CACHE_SHIFT);
6417 if (ret) {
6418 if (ret == -EBUSY)
6419 ret = 0;
6420 goto out;
6421 }
6422 }
6423
6424 cond_resched();
6425 }
6426 6467
6427 /* 6468 return __blockdev_direct_IO(rw, iocb, inode,
6428 * we don't use btrfs_set_extent_delalloc because we don't want
6429 * the dirty or uptodate bits
6430 */
6431 if (writing) {
6432 write_bits = EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING;
6433 ret = set_extent_bit(&BTRFS_I(inode)->io_tree, lockstart, lockend,
6434 EXTENT_DELALLOC, NULL, &cached_state,
6435 GFP_NOFS);
6436 if (ret) {
6437 clear_extent_bit(&BTRFS_I(inode)->io_tree, lockstart,
6438 lockend, EXTENT_LOCKED | write_bits,
6439 1, 0, &cached_state, GFP_NOFS);
6440 goto out;
6441 }
6442 }
6443
6444 free_extent_state(cached_state);
6445 cached_state = NULL;
6446
6447 ret = __blockdev_direct_IO(rw, iocb, inode,
6448 BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev, 6469 BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev,
6449 iov, offset, nr_segs, btrfs_get_blocks_direct, NULL, 6470 iov, offset, nr_segs, btrfs_get_blocks_direct, NULL,
6450 btrfs_submit_direct, 0); 6471 btrfs_submit_direct, 0);
6451
6452 if (ret < 0 && ret != -EIOCBQUEUED) {
6453 clear_extent_bit(&BTRFS_I(inode)->io_tree, offset,
6454 offset + iov_length(iov, nr_segs) - 1,
6455 EXTENT_LOCKED | write_bits, 1, 0,
6456 &cached_state, GFP_NOFS);
6457 } else if (ret >= 0 && ret < iov_length(iov, nr_segs)) {
6458 /*
6459 * We're falling back to buffered, unlock the section we didn't
6460 * do IO on.
6461 */
6462 clear_extent_bit(&BTRFS_I(inode)->io_tree, offset + ret,
6463 offset + iov_length(iov, nr_segs) - 1,
6464 EXTENT_LOCKED | write_bits, 1, 0,
6465 &cached_state, GFP_NOFS);
6466 }
6467out:
6468 free_extent_state(cached_state);
6469 return ret;
6470} 6472}
6471 6473
6472static int btrfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, 6474static int btrfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index 7bb755677a22..9df50fa8a078 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -424,7 +424,7 @@ static noinline int create_subvol(struct btrfs_root *root,
424 uuid_le_gen(&new_uuid); 424 uuid_le_gen(&new_uuid);
425 memcpy(root_item.uuid, new_uuid.b, BTRFS_UUID_SIZE); 425 memcpy(root_item.uuid, new_uuid.b, BTRFS_UUID_SIZE);
426 root_item.otime.sec = cpu_to_le64(cur_time.tv_sec); 426 root_item.otime.sec = cpu_to_le64(cur_time.tv_sec);
427 root_item.otime.nsec = cpu_to_le64(cur_time.tv_nsec); 427 root_item.otime.nsec = cpu_to_le32(cur_time.tv_nsec);
428 root_item.ctime = root_item.otime; 428 root_item.ctime = root_item.otime;
429 btrfs_set_root_ctransid(&root_item, trans->transid); 429 btrfs_set_root_ctransid(&root_item, trans->transid);
430 btrfs_set_root_otransid(&root_item, trans->transid); 430 btrfs_set_root_otransid(&root_item, trans->transid);
diff --git a/fs/btrfs/locking.c b/fs/btrfs/locking.c
index a44eff074805..2a1762c66041 100644
--- a/fs/btrfs/locking.c
+++ b/fs/btrfs/locking.c
@@ -67,7 +67,7 @@ void btrfs_clear_lock_blocking_rw(struct extent_buffer *eb, int rw)
67{ 67{
68 if (eb->lock_nested) { 68 if (eb->lock_nested) {
69 read_lock(&eb->lock); 69 read_lock(&eb->lock);
70 if (&eb->lock_nested && current->pid == eb->lock_owner) { 70 if (eb->lock_nested && current->pid == eb->lock_owner) {
71 read_unlock(&eb->lock); 71 read_unlock(&eb->lock);
72 return; 72 return;
73 } 73 }
diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c
index bc424ae5a81a..38b42e7bc91d 100644
--- a/fs/btrfs/qgroup.c
+++ b/fs/btrfs/qgroup.c
@@ -1364,13 +1364,17 @@ int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans,
1364 spin_lock(&fs_info->qgroup_lock); 1364 spin_lock(&fs_info->qgroup_lock);
1365 1365
1366 dstgroup = add_qgroup_rb(fs_info, objectid); 1366 dstgroup = add_qgroup_rb(fs_info, objectid);
1367 if (!dstgroup) 1367 if (IS_ERR(dstgroup)) {
1368 ret = PTR_ERR(dstgroup);
1368 goto unlock; 1369 goto unlock;
1370 }
1369 1371
1370 if (srcid) { 1372 if (srcid) {
1371 srcgroup = find_qgroup_rb(fs_info, srcid); 1373 srcgroup = find_qgroup_rb(fs_info, srcid);
1372 if (!srcgroup) 1374 if (!srcgroup) {
1375 ret = -EINVAL;
1373 goto unlock; 1376 goto unlock;
1377 }
1374 dstgroup->rfer = srcgroup->rfer - level_size; 1378 dstgroup->rfer = srcgroup->rfer - level_size;
1375 dstgroup->rfer_cmpr = srcgroup->rfer_cmpr - level_size; 1379 dstgroup->rfer_cmpr = srcgroup->rfer_cmpr - level_size;
1376 srcgroup->excl = level_size; 1380 srcgroup->excl = level_size;
@@ -1379,8 +1383,10 @@ int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans,
1379 qgroup_dirty(fs_info, srcgroup); 1383 qgroup_dirty(fs_info, srcgroup);
1380 } 1384 }
1381 1385
1382 if (!inherit) 1386 if (!inherit) {
1387 ret = -EINVAL;
1383 goto unlock; 1388 goto unlock;
1389 }
1384 1390
1385 i_qgroups = (u64 *)(inherit + 1); 1391 i_qgroups = (u64 *)(inherit + 1);
1386 for (i = 0; i < inherit->num_qgroups; ++i) { 1392 for (i = 0; i < inherit->num_qgroups; ++i) {
diff --git a/fs/btrfs/root-tree.c b/fs/btrfs/root-tree.c
index 6bb465cca20f..10d8e4d88071 100644
--- a/fs/btrfs/root-tree.c
+++ b/fs/btrfs/root-tree.c
@@ -544,8 +544,8 @@ void btrfs_update_root_times(struct btrfs_trans_handle *trans,
544 struct timespec ct = CURRENT_TIME; 544 struct timespec ct = CURRENT_TIME;
545 545
546 spin_lock(&root->root_times_lock); 546 spin_lock(&root->root_times_lock);
547 item->ctransid = trans->transid; 547 item->ctransid = cpu_to_le64(trans->transid);
548 item->ctime.sec = cpu_to_le64(ct.tv_sec); 548 item->ctime.sec = cpu_to_le64(ct.tv_sec);
549 item->ctime.nsec = cpu_to_le64(ct.tv_nsec); 549 item->ctime.nsec = cpu_to_le32(ct.tv_nsec);
550 spin_unlock(&root->root_times_lock); 550 spin_unlock(&root->root_times_lock);
551} 551}
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
index f2eb24c477a3..83d6f9f9c220 100644
--- a/fs/btrfs/super.c
+++ b/fs/btrfs/super.c
@@ -838,7 +838,6 @@ int btrfs_sync_fs(struct super_block *sb, int wait)
838 struct btrfs_trans_handle *trans; 838 struct btrfs_trans_handle *trans;
839 struct btrfs_fs_info *fs_info = btrfs_sb(sb); 839 struct btrfs_fs_info *fs_info = btrfs_sb(sb);
840 struct btrfs_root *root = fs_info->tree_root; 840 struct btrfs_root *root = fs_info->tree_root;
841 int ret;
842 841
843 trace_btrfs_sync_fs(wait); 842 trace_btrfs_sync_fs(wait);
844 843
@@ -849,11 +848,17 @@ int btrfs_sync_fs(struct super_block *sb, int wait)
849 848
850 btrfs_wait_ordered_extents(root, 0, 0); 849 btrfs_wait_ordered_extents(root, 0, 0);
851 850
852 trans = btrfs_start_transaction(root, 0); 851 spin_lock(&fs_info->trans_lock);
852 if (!fs_info->running_transaction) {
853 spin_unlock(&fs_info->trans_lock);
854 return 0;
855 }
856 spin_unlock(&fs_info->trans_lock);
857
858 trans = btrfs_join_transaction(root);
853 if (IS_ERR(trans)) 859 if (IS_ERR(trans))
854 return PTR_ERR(trans); 860 return PTR_ERR(trans);
855 ret = btrfs_commit_transaction(trans, root); 861 return btrfs_commit_transaction(trans, root);
856 return ret;
857} 862}
858 863
859static int btrfs_show_options(struct seq_file *seq, struct dentry *dentry) 864static int btrfs_show_options(struct seq_file *seq, struct dentry *dentry)
@@ -1530,6 +1535,8 @@ static int btrfs_show_devname(struct seq_file *m, struct dentry *root)
1530 while (cur_devices) { 1535 while (cur_devices) {
1531 head = &cur_devices->devices; 1536 head = &cur_devices->devices;
1532 list_for_each_entry(dev, head, dev_list) { 1537 list_for_each_entry(dev, head, dev_list) {
1538 if (dev->missing)
1539 continue;
1533 if (!first_dev || dev->devid < first_dev->devid) 1540 if (!first_dev || dev->devid < first_dev->devid)
1534 first_dev = dev; 1541 first_dev = dev;
1535 } 1542 }
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
index 17be3dedacba..27c26004e050 100644
--- a/fs/btrfs/transaction.c
+++ b/fs/btrfs/transaction.c
@@ -1031,6 +1031,7 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
1031 1031
1032 btrfs_i_size_write(parent_inode, parent_inode->i_size + 1032 btrfs_i_size_write(parent_inode, parent_inode->i_size +
1033 dentry->d_name.len * 2); 1033 dentry->d_name.len * 2);
1034 parent_inode->i_mtime = parent_inode->i_ctime = CURRENT_TIME;
1034 ret = btrfs_update_inode(trans, parent_root, parent_inode); 1035 ret = btrfs_update_inode(trans, parent_root, parent_inode);
1035 if (ret) 1036 if (ret)
1036 goto abort_trans_dput; 1037 goto abort_trans_dput;
@@ -1066,7 +1067,7 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
1066 memcpy(new_root_item->parent_uuid, root->root_item.uuid, 1067 memcpy(new_root_item->parent_uuid, root->root_item.uuid,
1067 BTRFS_UUID_SIZE); 1068 BTRFS_UUID_SIZE);
1068 new_root_item->otime.sec = cpu_to_le64(cur_time.tv_sec); 1069 new_root_item->otime.sec = cpu_to_le64(cur_time.tv_sec);
1069 new_root_item->otime.nsec = cpu_to_le64(cur_time.tv_nsec); 1070 new_root_item->otime.nsec = cpu_to_le32(cur_time.tv_nsec);
1070 btrfs_set_root_otransid(new_root_item, trans->transid); 1071 btrfs_set_root_otransid(new_root_item, trans->transid);
1071 memset(&new_root_item->stime, 0, sizeof(new_root_item->stime)); 1072 memset(&new_root_item->stime, 0, sizeof(new_root_item->stime));
1072 memset(&new_root_item->rtime, 0, sizeof(new_root_item->rtime)); 1073 memset(&new_root_item->rtime, 0, sizeof(new_root_item->rtime));
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index e86ae04abe6a..88b969aeeb71 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -227,9 +227,8 @@ loop_lock:
227 cur = pending; 227 cur = pending;
228 pending = pending->bi_next; 228 pending = pending->bi_next;
229 cur->bi_next = NULL; 229 cur->bi_next = NULL;
230 atomic_dec(&fs_info->nr_async_bios);
231 230
232 if (atomic_read(&fs_info->nr_async_bios) < limit && 231 if (atomic_dec_return(&fs_info->nr_async_bios) < limit &&
233 waitqueue_active(&fs_info->async_submit_wait)) 232 waitqueue_active(&fs_info->async_submit_wait))
234 wake_up(&fs_info->async_submit_wait); 233 wake_up(&fs_info->async_submit_wait);
235 234
@@ -569,9 +568,11 @@ static int __btrfs_close_devices(struct btrfs_fs_devices *fs_devices)
569 memcpy(new_device, device, sizeof(*new_device)); 568 memcpy(new_device, device, sizeof(*new_device));
570 569
571 /* Safe because we are under uuid_mutex */ 570 /* Safe because we are under uuid_mutex */
572 name = rcu_string_strdup(device->name->str, GFP_NOFS); 571 if (device->name) {
573 BUG_ON(device->name && !name); /* -ENOMEM */ 572 name = rcu_string_strdup(device->name->str, GFP_NOFS);
574 rcu_assign_pointer(new_device->name, name); 573 BUG_ON(device->name && !name); /* -ENOMEM */
574 rcu_assign_pointer(new_device->name, name);
575 }
575 new_device->bdev = NULL; 576 new_device->bdev = NULL;
576 new_device->writeable = 0; 577 new_device->writeable = 0;
577 new_device->in_fs_metadata = 0; 578 new_device->in_fs_metadata = 0;
@@ -4605,28 +4606,6 @@ int btrfs_read_sys_array(struct btrfs_root *root)
4605 return ret; 4606 return ret;
4606} 4607}
4607 4608
4608struct btrfs_device *btrfs_find_device_for_logical(struct btrfs_root *root,
4609 u64 logical, int mirror_num)
4610{
4611 struct btrfs_mapping_tree *map_tree = &root->fs_info->mapping_tree;
4612 int ret;
4613 u64 map_length = 0;
4614 struct btrfs_bio *bbio = NULL;
4615 struct btrfs_device *device;
4616
4617 BUG_ON(mirror_num == 0);
4618 ret = btrfs_map_block(map_tree, WRITE, logical, &map_length, &bbio,
4619 mirror_num);
4620 if (ret) {
4621 BUG_ON(bbio != NULL);
4622 return NULL;
4623 }
4624 BUG_ON(mirror_num != bbio->mirror_num);
4625 device = bbio->stripes[mirror_num - 1].dev;
4626 kfree(bbio);
4627 return device;
4628}
4629
4630int btrfs_read_chunk_tree(struct btrfs_root *root) 4609int btrfs_read_chunk_tree(struct btrfs_root *root)
4631{ 4610{
4632 struct btrfs_path *path; 4611 struct btrfs_path *path;
diff --git a/fs/btrfs/volumes.h b/fs/btrfs/volumes.h
index 5479325987b3..53c06af92e8d 100644
--- a/fs/btrfs/volumes.h
+++ b/fs/btrfs/volumes.h
@@ -289,8 +289,6 @@ int btrfs_cancel_balance(struct btrfs_fs_info *fs_info);
289int btrfs_chunk_readonly(struct btrfs_root *root, u64 chunk_offset); 289int btrfs_chunk_readonly(struct btrfs_root *root, u64 chunk_offset);
290int find_free_dev_extent(struct btrfs_device *device, u64 num_bytes, 290int find_free_dev_extent(struct btrfs_device *device, u64 num_bytes,
291 u64 *start, u64 *max_avail); 291 u64 *start, u64 *max_avail);
292struct btrfs_device *btrfs_find_device_for_logical(struct btrfs_root *root,
293 u64 logical, int mirror_num);
294void btrfs_dev_stat_print_on_error(struct btrfs_device *device); 292void btrfs_dev_stat_print_on_error(struct btrfs_device *device);
295void btrfs_dev_stat_inc_and_print(struct btrfs_device *dev, int index); 293void btrfs_dev_stat_inc_and_print(struct btrfs_device *dev, int index);
296int btrfs_get_dev_stats(struct btrfs_root *root, 294int btrfs_get_dev_stats(struct btrfs_root *root,
diff --git a/fs/buffer.c b/fs/buffer.c
index 9f6d2e41281d..58e2e7b77372 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -914,7 +914,7 @@ link_dev_buffers(struct page *page, struct buffer_head *head)
914/* 914/*
915 * Initialise the state of a blockdev page's buffers. 915 * Initialise the state of a blockdev page's buffers.
916 */ 916 */
917static void 917static sector_t
918init_page_buffers(struct page *page, struct block_device *bdev, 918init_page_buffers(struct page *page, struct block_device *bdev,
919 sector_t block, int size) 919 sector_t block, int size)
920{ 920{
@@ -936,33 +936,41 @@ init_page_buffers(struct page *page, struct block_device *bdev,
936 block++; 936 block++;
937 bh = bh->b_this_page; 937 bh = bh->b_this_page;
938 } while (bh != head); 938 } while (bh != head);
939
940 /*
941 * Caller needs to validate requested block against end of device.
942 */
943 return end_block;
939} 944}
940 945
941/* 946/*
942 * Create the page-cache page that contains the requested block. 947 * Create the page-cache page that contains the requested block.
943 * 948 *
944 * This is user purely for blockdev mappings. 949 * This is used purely for blockdev mappings.
945 */ 950 */
946static struct page * 951static int
947grow_dev_page(struct block_device *bdev, sector_t block, 952grow_dev_page(struct block_device *bdev, sector_t block,
948 pgoff_t index, int size) 953 pgoff_t index, int size, int sizebits)
949{ 954{
950 struct inode *inode = bdev->bd_inode; 955 struct inode *inode = bdev->bd_inode;
951 struct page *page; 956 struct page *page;
952 struct buffer_head *bh; 957 struct buffer_head *bh;
958 sector_t end_block;
959 int ret = 0; /* Will call free_more_memory() */
953 960
954 page = find_or_create_page(inode->i_mapping, index, 961 page = find_or_create_page(inode->i_mapping, index,
955 (mapping_gfp_mask(inode->i_mapping) & ~__GFP_FS)|__GFP_MOVABLE); 962 (mapping_gfp_mask(inode->i_mapping) & ~__GFP_FS)|__GFP_MOVABLE);
956 if (!page) 963 if (!page)
957 return NULL; 964 return ret;
958 965
959 BUG_ON(!PageLocked(page)); 966 BUG_ON(!PageLocked(page));
960 967
961 if (page_has_buffers(page)) { 968 if (page_has_buffers(page)) {
962 bh = page_buffers(page); 969 bh = page_buffers(page);
963 if (bh->b_size == size) { 970 if (bh->b_size == size) {
964 init_page_buffers(page, bdev, block, size); 971 end_block = init_page_buffers(page, bdev,
965 return page; 972 index << sizebits, size);
973 goto done;
966 } 974 }
967 if (!try_to_free_buffers(page)) 975 if (!try_to_free_buffers(page))
968 goto failed; 976 goto failed;
@@ -982,14 +990,14 @@ grow_dev_page(struct block_device *bdev, sector_t block,
982 */ 990 */
983 spin_lock(&inode->i_mapping->private_lock); 991 spin_lock(&inode->i_mapping->private_lock);
984 link_dev_buffers(page, bh); 992 link_dev_buffers(page, bh);
985 init_page_buffers(page, bdev, block, size); 993 end_block = init_page_buffers(page, bdev, index << sizebits, size);
986 spin_unlock(&inode->i_mapping->private_lock); 994 spin_unlock(&inode->i_mapping->private_lock);
987 return page; 995done:
988 996 ret = (block < end_block) ? 1 : -ENXIO;
989failed: 997failed:
990 unlock_page(page); 998 unlock_page(page);
991 page_cache_release(page); 999 page_cache_release(page);
992 return NULL; 1000 return ret;
993} 1001}
994 1002
995/* 1003/*
@@ -999,7 +1007,6 @@ failed:
999static int 1007static int
1000grow_buffers(struct block_device *bdev, sector_t block, int size) 1008grow_buffers(struct block_device *bdev, sector_t block, int size)
1001{ 1009{
1002 struct page *page;
1003 pgoff_t index; 1010 pgoff_t index;
1004 int sizebits; 1011 int sizebits;
1005 1012
@@ -1023,22 +1030,14 @@ grow_buffers(struct block_device *bdev, sector_t block, int size)
1023 bdevname(bdev, b)); 1030 bdevname(bdev, b));
1024 return -EIO; 1031 return -EIO;
1025 } 1032 }
1026 block = index << sizebits; 1033
1027 /* Create a page with the proper size buffers.. */ 1034 /* Create a page with the proper size buffers.. */
1028 page = grow_dev_page(bdev, block, index, size); 1035 return grow_dev_page(bdev, block, index, size, sizebits);
1029 if (!page)
1030 return 0;
1031 unlock_page(page);
1032 page_cache_release(page);
1033 return 1;
1034} 1036}
1035 1037
1036static struct buffer_head * 1038static struct buffer_head *
1037__getblk_slow(struct block_device *bdev, sector_t block, int size) 1039__getblk_slow(struct block_device *bdev, sector_t block, int size)
1038{ 1040{
1039 int ret;
1040 struct buffer_head *bh;
1041
1042 /* Size must be multiple of hard sectorsize */ 1041 /* Size must be multiple of hard sectorsize */
1043 if (unlikely(size & (bdev_logical_block_size(bdev)-1) || 1042 if (unlikely(size & (bdev_logical_block_size(bdev)-1) ||
1044 (size < 512 || size > PAGE_SIZE))) { 1043 (size < 512 || size > PAGE_SIZE))) {
@@ -1051,21 +1050,20 @@ __getblk_slow(struct block_device *bdev, sector_t block, int size)
1051 return NULL; 1050 return NULL;
1052 } 1051 }
1053 1052
1054retry: 1053 for (;;) {
1055 bh = __find_get_block(bdev, block, size); 1054 struct buffer_head *bh;
1056 if (bh) 1055 int ret;
1057 return bh;
1058 1056
1059 ret = grow_buffers(bdev, block, size);
1060 if (ret == 0) {
1061 free_more_memory();
1062 goto retry;
1063 } else if (ret > 0) {
1064 bh = __find_get_block(bdev, block, size); 1057 bh = __find_get_block(bdev, block, size);
1065 if (bh) 1058 if (bh)
1066 return bh; 1059 return bh;
1060
1061 ret = grow_buffers(bdev, block, size);
1062 if (ret < 0)
1063 return NULL;
1064 if (ret == 0)
1065 free_more_memory();
1067 } 1066 }
1068 return NULL;
1069} 1067}
1070 1068
1071/* 1069/*
@@ -1321,10 +1319,6 @@ EXPORT_SYMBOL(__find_get_block);
1321 * which corresponds to the passed block_device, block and size. The 1319 * which corresponds to the passed block_device, block and size. The
1322 * returned buffer has its reference count incremented. 1320 * returned buffer has its reference count incremented.
1323 * 1321 *
1324 * __getblk() cannot fail - it just keeps trying. If you pass it an
1325 * illegal block number, __getblk() will happily return a buffer_head
1326 * which represents the non-existent block. Very weird.
1327 *
1328 * __getblk() will lock up the machine if grow_dev_page's try_to_free_buffers() 1322 * __getblk() will lock up the machine if grow_dev_page's try_to_free_buffers()
1329 * attempt is failing. FIXME, perhaps? 1323 * attempt is failing. FIXME, perhaps?
1330 */ 1324 */
diff --git a/fs/direct-io.c b/fs/direct-io.c
index 1faf4cb56f39..f86c720dba0e 100644
--- a/fs/direct-io.c
+++ b/fs/direct-io.c
@@ -1062,6 +1062,7 @@ do_blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
1062 unsigned long user_addr; 1062 unsigned long user_addr;
1063 size_t bytes; 1063 size_t bytes;
1064 struct buffer_head map_bh = { 0, }; 1064 struct buffer_head map_bh = { 0, };
1065 struct blk_plug plug;
1065 1066
1066 if (rw & WRITE) 1067 if (rw & WRITE)
1067 rw = WRITE_ODIRECT; 1068 rw = WRITE_ODIRECT;
@@ -1177,6 +1178,8 @@ do_blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
1177 PAGE_SIZE - user_addr / PAGE_SIZE); 1178 PAGE_SIZE - user_addr / PAGE_SIZE);
1178 } 1179 }
1179 1180
1181 blk_start_plug(&plug);
1182
1180 for (seg = 0; seg < nr_segs; seg++) { 1183 for (seg = 0; seg < nr_segs; seg++) {
1181 user_addr = (unsigned long)iov[seg].iov_base; 1184 user_addr = (unsigned long)iov[seg].iov_base;
1182 sdio.size += bytes = iov[seg].iov_len; 1185 sdio.size += bytes = iov[seg].iov_len;
@@ -1235,6 +1238,8 @@ do_blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
1235 if (sdio.bio) 1238 if (sdio.bio)
1236 dio_bio_submit(dio, &sdio); 1239 dio_bio_submit(dio, &sdio);
1237 1240
1241 blk_finish_plug(&plug);
1242
1238 /* 1243 /*
1239 * It is possible that, we return short IO due to end of file. 1244 * It is possible that, we return short IO due to end of file.
1240 * In that case, we need to release all the pages we got hold on. 1245 * In that case, we need to release all the pages we got hold on.
diff --git a/fs/jbd/journal.c b/fs/jbd/journal.c
index 09357508ec9a..a2862339323b 100644
--- a/fs/jbd/journal.c
+++ b/fs/jbd/journal.c
@@ -1113,6 +1113,11 @@ static void mark_journal_empty(journal_t *journal)
1113 1113
1114 BUG_ON(!mutex_is_locked(&journal->j_checkpoint_mutex)); 1114 BUG_ON(!mutex_is_locked(&journal->j_checkpoint_mutex));
1115 spin_lock(&journal->j_state_lock); 1115 spin_lock(&journal->j_state_lock);
1116 /* Is it already empty? */
1117 if (sb->s_start == 0) {
1118 spin_unlock(&journal->j_state_lock);
1119 return;
1120 }
1116 jbd_debug(1, "JBD: Marking journal as empty (seq %d)\n", 1121 jbd_debug(1, "JBD: Marking journal as empty (seq %d)\n",
1117 journal->j_tail_sequence); 1122 journal->j_tail_sequence);
1118 1123
diff --git a/fs/logfs/dev_bdev.c b/fs/logfs/dev_bdev.c
index df0de27c2733..e784a217b500 100644
--- a/fs/logfs/dev_bdev.c
+++ b/fs/logfs/dev_bdev.c
@@ -26,6 +26,7 @@ static int sync_request(struct page *page, struct block_device *bdev, int rw)
26 struct completion complete; 26 struct completion complete;
27 27
28 bio_init(&bio); 28 bio_init(&bio);
29 bio.bi_max_vecs = 1;
29 bio.bi_io_vec = &bio_vec; 30 bio.bi_io_vec = &bio_vec;
30 bio_vec.bv_page = page; 31 bio_vec.bv_page = page;
31 bio_vec.bv_len = PAGE_SIZE; 32 bio_vec.bv_len = PAGE_SIZE;
@@ -95,12 +96,11 @@ static int __bdev_writeseg(struct super_block *sb, u64 ofs, pgoff_t index,
95 struct address_space *mapping = super->s_mapping_inode->i_mapping; 96 struct address_space *mapping = super->s_mapping_inode->i_mapping;
96 struct bio *bio; 97 struct bio *bio;
97 struct page *page; 98 struct page *page;
98 struct request_queue *q = bdev_get_queue(sb->s_bdev); 99 unsigned int max_pages;
99 unsigned int max_pages = queue_max_hw_sectors(q) >> (PAGE_SHIFT - 9);
100 int i; 100 int i;
101 101
102 if (max_pages > BIO_MAX_PAGES) 102 max_pages = min(nr_pages, (size_t) bio_get_nr_vecs(super->s_bdev));
103 max_pages = BIO_MAX_PAGES; 103
104 bio = bio_alloc(GFP_NOFS, max_pages); 104 bio = bio_alloc(GFP_NOFS, max_pages);
105 BUG_ON(!bio); 105 BUG_ON(!bio);
106 106
@@ -190,12 +190,11 @@ static int do_erase(struct super_block *sb, u64 ofs, pgoff_t index,
190{ 190{
191 struct logfs_super *super = logfs_super(sb); 191 struct logfs_super *super = logfs_super(sb);
192 struct bio *bio; 192 struct bio *bio;
193 struct request_queue *q = bdev_get_queue(sb->s_bdev); 193 unsigned int max_pages;
194 unsigned int max_pages = queue_max_hw_sectors(q) >> (PAGE_SHIFT - 9);
195 int i; 194 int i;
196 195
197 if (max_pages > BIO_MAX_PAGES) 196 max_pages = min(nr_pages, (size_t) bio_get_nr_vecs(super->s_bdev));
198 max_pages = BIO_MAX_PAGES; 197
199 bio = bio_alloc(GFP_NOFS, max_pages); 198 bio = bio_alloc(GFP_NOFS, max_pages);
200 BUG_ON(!bio); 199 BUG_ON(!bio);
201 200
diff --git a/fs/logfs/inode.c b/fs/logfs/inode.c
index a422f42238b2..6984562738d3 100644
--- a/fs/logfs/inode.c
+++ b/fs/logfs/inode.c
@@ -156,10 +156,26 @@ static void __logfs_destroy_inode(struct inode *inode)
156 call_rcu(&inode->i_rcu, logfs_i_callback); 156 call_rcu(&inode->i_rcu, logfs_i_callback);
157} 157}
158 158
159static void __logfs_destroy_meta_inode(struct inode *inode)
160{
161 struct logfs_inode *li = logfs_inode(inode);
162 BUG_ON(li->li_block);
163 call_rcu(&inode->i_rcu, logfs_i_callback);
164}
165
159static void logfs_destroy_inode(struct inode *inode) 166static void logfs_destroy_inode(struct inode *inode)
160{ 167{
161 struct logfs_inode *li = logfs_inode(inode); 168 struct logfs_inode *li = logfs_inode(inode);
162 169
170 if (inode->i_ino < LOGFS_RESERVED_INOS) {
171 /*
172 * The reserved inodes are never destroyed unless we are in
173 * unmont path.
174 */
175 __logfs_destroy_meta_inode(inode);
176 return;
177 }
178
163 BUG_ON(list_empty(&li->li_freeing_list)); 179 BUG_ON(list_empty(&li->li_freeing_list));
164 spin_lock(&logfs_inode_lock); 180 spin_lock(&logfs_inode_lock);
165 li->li_refcount--; 181 li->li_refcount--;
@@ -373,8 +389,8 @@ static void logfs_put_super(struct super_block *sb)
373{ 389{
374 struct logfs_super *super = logfs_super(sb); 390 struct logfs_super *super = logfs_super(sb);
375 /* kill the meta-inodes */ 391 /* kill the meta-inodes */
376 iput(super->s_master_inode);
377 iput(super->s_segfile_inode); 392 iput(super->s_segfile_inode);
393 iput(super->s_master_inode);
378 iput(super->s_mapping_inode); 394 iput(super->s_mapping_inode);
379} 395}
380 396
diff --git a/fs/logfs/journal.c b/fs/logfs/journal.c
index 1e1c369df22b..2a09b8d73989 100644
--- a/fs/logfs/journal.c
+++ b/fs/logfs/journal.c
@@ -565,7 +565,7 @@ static void write_wbuf(struct super_block *sb, struct logfs_area *area,
565 index = ofs >> PAGE_SHIFT; 565 index = ofs >> PAGE_SHIFT;
566 page_ofs = ofs & (PAGE_SIZE - 1); 566 page_ofs = ofs & (PAGE_SIZE - 1);
567 567
568 page = find_lock_page(mapping, index); 568 page = find_or_create_page(mapping, index, GFP_NOFS);
569 BUG_ON(!page); 569 BUG_ON(!page);
570 memcpy(wbuf, page_address(page) + page_ofs, super->s_writesize); 570 memcpy(wbuf, page_address(page) + page_ofs, super->s_writesize);
571 unlock_page(page); 571 unlock_page(page);
diff --git a/fs/logfs/readwrite.c b/fs/logfs/readwrite.c
index f1cb512c5019..5be0abef603d 100644
--- a/fs/logfs/readwrite.c
+++ b/fs/logfs/readwrite.c
@@ -2189,7 +2189,6 @@ void logfs_evict_inode(struct inode *inode)
2189 return; 2189 return;
2190 } 2190 }
2191 2191
2192 BUG_ON(inode->i_ino < LOGFS_RESERVED_INOS);
2193 page = inode_to_page(inode); 2192 page = inode_to_page(inode);
2194 BUG_ON(!page); /* FIXME: Use emergency page */ 2193 BUG_ON(!page); /* FIXME: Use emergency page */
2195 logfs_put_write_page(page); 2194 logfs_put_write_page(page);
diff --git a/fs/logfs/segment.c b/fs/logfs/segment.c
index e28d090c98d6..038da0991794 100644
--- a/fs/logfs/segment.c
+++ b/fs/logfs/segment.c
@@ -886,7 +886,7 @@ static struct logfs_area *alloc_area(struct super_block *sb)
886 886
887static void map_invalidatepage(struct page *page, unsigned long l) 887static void map_invalidatepage(struct page *page, unsigned long l)
888{ 888{
889 BUG(); 889 return;
890} 890}
891 891
892static int map_releasepage(struct page *page, gfp_t g) 892static int map_releasepage(struct page *page, gfp_t g)
diff --git a/fs/nfsd/nfs4callback.c b/fs/nfsd/nfs4callback.c
index cbaf4f8bb7b7..4c7bd35b1876 100644
--- a/fs/nfsd/nfs4callback.c
+++ b/fs/nfsd/nfs4callback.c
@@ -651,12 +651,12 @@ static int setup_callback_client(struct nfs4_client *clp, struct nfs4_cb_conn *c
651 651
652 if (clp->cl_minorversion == 0) { 652 if (clp->cl_minorversion == 0) {
653 if (!clp->cl_cred.cr_principal && 653 if (!clp->cl_cred.cr_principal &&
654 (clp->cl_flavor >= RPC_AUTH_GSS_KRB5)) 654 (clp->cl_cred.cr_flavor >= RPC_AUTH_GSS_KRB5))
655 return -EINVAL; 655 return -EINVAL;
656 args.client_name = clp->cl_cred.cr_principal; 656 args.client_name = clp->cl_cred.cr_principal;
657 args.prognumber = conn->cb_prog, 657 args.prognumber = conn->cb_prog,
658 args.protocol = XPRT_TRANSPORT_TCP; 658 args.protocol = XPRT_TRANSPORT_TCP;
659 args.authflavor = clp->cl_flavor; 659 args.authflavor = clp->cl_cred.cr_flavor;
660 clp->cl_cb_ident = conn->cb_ident; 660 clp->cl_cb_ident = conn->cb_ident;
661 } else { 661 } else {
662 if (!conn->cb_xprt) 662 if (!conn->cb_xprt)
diff --git a/fs/nfsd/state.h b/fs/nfsd/state.h
index e6173147f982..22bd0a66c356 100644
--- a/fs/nfsd/state.h
+++ b/fs/nfsd/state.h
@@ -231,7 +231,6 @@ struct nfs4_client {
231 nfs4_verifier cl_verifier; /* generated by client */ 231 nfs4_verifier cl_verifier; /* generated by client */
232 time_t cl_time; /* time of last lease renewal */ 232 time_t cl_time; /* time of last lease renewal */
233 struct sockaddr_storage cl_addr; /* client ipaddress */ 233 struct sockaddr_storage cl_addr; /* client ipaddress */
234 u32 cl_flavor; /* setclientid pseudoflavor */
235 struct svc_cred cl_cred; /* setclientid principal */ 234 struct svc_cred cl_cred; /* setclientid principal */
236 clientid_t cl_clientid; /* generated by server */ 235 clientid_t cl_clientid; /* generated by server */
237 nfs4_verifier cl_confirm; /* generated by server */ 236 nfs4_verifier cl_confirm; /* generated by server */
diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c
index 36a29b753c79..c495a3055e2a 100644
--- a/fs/quota/dquot.c
+++ b/fs/quota/dquot.c
@@ -1589,10 +1589,10 @@ int __dquot_alloc_space(struct inode *inode, qsize_t number, int flags)
1589 goto out; 1589 goto out;
1590 } 1590 }
1591 1591
1592 down_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
1593 for (cnt = 0; cnt < MAXQUOTAS; cnt++) 1592 for (cnt = 0; cnt < MAXQUOTAS; cnt++)
1594 warn[cnt].w_type = QUOTA_NL_NOWARN; 1593 warn[cnt].w_type = QUOTA_NL_NOWARN;
1595 1594
1595 down_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
1596 spin_lock(&dq_data_lock); 1596 spin_lock(&dq_data_lock);
1597 for (cnt = 0; cnt < MAXQUOTAS; cnt++) { 1597 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1598 if (!dquots[cnt]) 1598 if (!dquots[cnt])
diff --git a/fs/reiserfs/bitmap.c b/fs/reiserfs/bitmap.c
index 4c0c7d163d15..a98b7740a0fc 100644
--- a/fs/reiserfs/bitmap.c
+++ b/fs/reiserfs/bitmap.c
@@ -1334,9 +1334,7 @@ struct buffer_head *reiserfs_read_bitmap_block(struct super_block *sb,
1334 else if (bitmap == 0) 1334 else if (bitmap == 0)
1335 block = (REISERFS_DISK_OFFSET_IN_BYTES >> sb->s_blocksize_bits) + 1; 1335 block = (REISERFS_DISK_OFFSET_IN_BYTES >> sb->s_blocksize_bits) + 1;
1336 1336
1337 reiserfs_write_unlock(sb);
1338 bh = sb_bread(sb, block); 1337 bh = sb_bread(sb, block);
1339 reiserfs_write_lock(sb);
1340 if (bh == NULL) 1338 if (bh == NULL)
1341 reiserfs_warning(sb, "sh-2029: %s: bitmap block (#%u) " 1339 reiserfs_warning(sb, "sh-2029: %s: bitmap block (#%u) "
1342 "reading failed", __func__, block); 1340 "reading failed", __func__, block);
diff --git a/fs/reiserfs/inode.c b/fs/reiserfs/inode.c
index a6d4268fb6c1..855da58db145 100644
--- a/fs/reiserfs/inode.c
+++ b/fs/reiserfs/inode.c
@@ -76,10 +76,10 @@ void reiserfs_evict_inode(struct inode *inode)
76 ; 76 ;
77 } 77 }
78 out: 78 out:
79 reiserfs_write_unlock_once(inode->i_sb, depth);
79 clear_inode(inode); /* note this must go after the journal_end to prevent deadlock */ 80 clear_inode(inode); /* note this must go after the journal_end to prevent deadlock */
80 dquot_drop(inode); 81 dquot_drop(inode);
81 inode->i_blocks = 0; 82 inode->i_blocks = 0;
82 reiserfs_write_unlock_once(inode->i_sb, depth);
83 return; 83 return;
84 84
85no_delete: 85no_delete:
diff --git a/fs/ubifs/debug.h b/fs/ubifs/debug.h
index 8b8cc4e945f4..760de723dadb 100644
--- a/fs/ubifs/debug.h
+++ b/fs/ubifs/debug.h
@@ -167,7 +167,7 @@ struct ubifs_global_debug_info {
167#define ubifs_dbg_msg(type, fmt, ...) \ 167#define ubifs_dbg_msg(type, fmt, ...) \
168 pr_debug("UBIFS DBG " type ": " fmt "\n", ##__VA_ARGS__) 168 pr_debug("UBIFS DBG " type ": " fmt "\n", ##__VA_ARGS__)
169 169
170#define DBG_KEY_BUF_LEN 32 170#define DBG_KEY_BUF_LEN 48
171#define ubifs_dbg_msg_key(type, key, fmt, ...) do { \ 171#define ubifs_dbg_msg_key(type, key, fmt, ...) do { \
172 char __tmp_key_buf[DBG_KEY_BUF_LEN]; \ 172 char __tmp_key_buf[DBG_KEY_BUF_LEN]; \
173 pr_debug("UBIFS DBG " type ": " fmt "%s\n", ##__VA_ARGS__, \ 173 pr_debug("UBIFS DBG " type ": " fmt "%s\n", ##__VA_ARGS__, \
diff --git a/fs/ubifs/lpt.c b/fs/ubifs/lpt.c
index ce33b2beb151..8640920766ed 100644
--- a/fs/ubifs/lpt.c
+++ b/fs/ubifs/lpt.c
@@ -1749,7 +1749,10 @@ int ubifs_lpt_init(struct ubifs_info *c, int rd, int wr)
1749 return 0; 1749 return 0;
1750 1750
1751out_err: 1751out_err:
1752 ubifs_lpt_free(c, 0); 1752 if (wr)
1753 ubifs_lpt_free(c, 1);
1754 if (rd)
1755 ubifs_lpt_free(c, 0);
1753 return err; 1756 return err;
1754} 1757}
1755 1758
diff --git a/fs/ubifs/recovery.c b/fs/ubifs/recovery.c
index c30d976b4be8..edeec499c048 100644
--- a/fs/ubifs/recovery.c
+++ b/fs/ubifs/recovery.c
@@ -788,7 +788,7 @@ struct ubifs_scan_leb *ubifs_recover_leb(struct ubifs_info *c, int lnum,
788 788
789corrupted_rescan: 789corrupted_rescan:
790 /* Re-scan the corrupted data with verbose messages */ 790 /* Re-scan the corrupted data with verbose messages */
791 ubifs_err("corruptio %d", ret); 791 ubifs_err("corruption %d", ret);
792 ubifs_scan_a_node(c, buf, len, lnum, offs, 1); 792 ubifs_scan_a_node(c, buf, len, lnum, offs, 1);
793corrupted: 793corrupted:
794 ubifs_scanned_corruption(c, lnum, offs, buf); 794 ubifs_scanned_corruption(c, lnum, offs, buf);
diff --git a/fs/ubifs/replay.c b/fs/ubifs/replay.c
index eba46d4a7619..94d78fc5d4e0 100644
--- a/fs/ubifs/replay.c
+++ b/fs/ubifs/replay.c
@@ -1026,7 +1026,6 @@ int ubifs_replay_journal(struct ubifs_info *c)
1026 c->replaying = 1; 1026 c->replaying = 1;
1027 lnum = c->ltail_lnum = c->lhead_lnum; 1027 lnum = c->ltail_lnum = c->lhead_lnum;
1028 1028
1029 lnum = UBIFS_LOG_LNUM;
1030 do { 1029 do {
1031 err = replay_log_leb(c, lnum, 0, c->sbuf); 1030 err = replay_log_leb(c, lnum, 0, c->sbuf);
1032 if (err == 1) 1031 if (err == 1)
@@ -1035,7 +1034,7 @@ int ubifs_replay_journal(struct ubifs_info *c)
1035 if (err) 1034 if (err)
1036 goto out; 1035 goto out;
1037 lnum = ubifs_next_log_lnum(c, lnum); 1036 lnum = ubifs_next_log_lnum(c, lnum);
1038 } while (lnum != UBIFS_LOG_LNUM); 1037 } while (lnum != c->ltail_lnum);
1039 1038
1040 err = replay_buds(c); 1039 err = replay_buds(c);
1041 if (err) 1040 if (err)
diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c
index c3fa6c5327a3..71a197f0f93d 100644
--- a/fs/ubifs/super.c
+++ b/fs/ubifs/super.c
@@ -1157,9 +1157,6 @@ static int check_free_space(struct ubifs_info *c)
1157 * 1157 *
1158 * This function mounts UBIFS file system. Returns zero in case of success and 1158 * This function mounts UBIFS file system. Returns zero in case of success and
1159 * a negative error code in case of failure. 1159 * a negative error code in case of failure.
1160 *
1161 * Note, the function does not de-allocate resources it it fails half way
1162 * through, and the caller has to do this instead.
1163 */ 1160 */
1164static int mount_ubifs(struct ubifs_info *c) 1161static int mount_ubifs(struct ubifs_info *c)
1165{ 1162{
diff --git a/fs/udf/inode.c b/fs/udf/inode.c
index fafaad795cd6..aa233469b3c1 100644
--- a/fs/udf/inode.c
+++ b/fs/udf/inode.c
@@ -1124,14 +1124,17 @@ int udf_setsize(struct inode *inode, loff_t newsize)
1124 if (err) 1124 if (err)
1125 return err; 1125 return err;
1126 down_write(&iinfo->i_data_sem); 1126 down_write(&iinfo->i_data_sem);
1127 } else 1127 } else {
1128 iinfo->i_lenAlloc = newsize; 1128 iinfo->i_lenAlloc = newsize;
1129 goto set_size;
1130 }
1129 } 1131 }
1130 err = udf_extend_file(inode, newsize); 1132 err = udf_extend_file(inode, newsize);
1131 if (err) { 1133 if (err) {
1132 up_write(&iinfo->i_data_sem); 1134 up_write(&iinfo->i_data_sem);
1133 return err; 1135 return err;
1134 } 1136 }
1137set_size:
1135 truncate_setsize(inode, newsize); 1138 truncate_setsize(inode, newsize);
1136 up_write(&iinfo->i_data_sem); 1139 up_write(&iinfo->i_data_sem);
1137 } else { 1140 } else {
diff --git a/fs/udf/super.c b/fs/udf/super.c
index dcbf98722afc..18fc038a438d 100644
--- a/fs/udf/super.c
+++ b/fs/udf/super.c
@@ -1344,6 +1344,7 @@ static int udf_load_logicalvol(struct super_block *sb, sector_t block,
1344 udf_err(sb, "error loading logical volume descriptor: " 1344 udf_err(sb, "error loading logical volume descriptor: "
1345 "Partition table too long (%u > %lu)\n", table_len, 1345 "Partition table too long (%u > %lu)\n", table_len,
1346 sb->s_blocksize - sizeof(*lvd)); 1346 sb->s_blocksize - sizeof(*lvd));
1347 ret = 1;
1347 goto out_bh; 1348 goto out_bh;
1348 } 1349 }
1349 1350
@@ -1388,8 +1389,10 @@ static int udf_load_logicalvol(struct super_block *sb, sector_t block,
1388 UDF_ID_SPARABLE, 1389 UDF_ID_SPARABLE,
1389 strlen(UDF_ID_SPARABLE))) { 1390 strlen(UDF_ID_SPARABLE))) {
1390 if (udf_load_sparable_map(sb, map, 1391 if (udf_load_sparable_map(sb, map,
1391 (struct sparablePartitionMap *)gpm) < 0) 1392 (struct sparablePartitionMap *)gpm) < 0) {
1393 ret = 1;
1392 goto out_bh; 1394 goto out_bh;
1395 }
1393 } else if (!strncmp(upm2->partIdent.ident, 1396 } else if (!strncmp(upm2->partIdent.ident,
1394 UDF_ID_METADATA, 1397 UDF_ID_METADATA,
1395 strlen(UDF_ID_METADATA))) { 1398 strlen(UDF_ID_METADATA))) {
@@ -2000,6 +2003,8 @@ static int udf_fill_super(struct super_block *sb, void *options, int silent)
2000 if (!silent) 2003 if (!silent)
2001 pr_notice("Rescanning with blocksize %d\n", 2004 pr_notice("Rescanning with blocksize %d\n",
2002 UDF_DEFAULT_BLOCKSIZE); 2005 UDF_DEFAULT_BLOCKSIZE);
2006 brelse(sbi->s_lvid_bh);
2007 sbi->s_lvid_bh = NULL;
2003 uopt.blocksize = UDF_DEFAULT_BLOCKSIZE; 2008 uopt.blocksize = UDF_DEFAULT_BLOCKSIZE;
2004 ret = udf_load_vrs(sb, &uopt, silent, &fileset); 2009 ret = udf_load_vrs(sb, &uopt, silent, &fileset);
2005 } 2010 }
diff --git a/fs/xfs/xfs_discard.c b/fs/xfs/xfs_discard.c
index f9c3fe304a17..69cf4fcde03e 100644
--- a/fs/xfs/xfs_discard.c
+++ b/fs/xfs/xfs_discard.c
@@ -179,12 +179,14 @@ xfs_ioc_trim(
179 * used by the fstrim application. In the end it really doesn't 179 * used by the fstrim application. In the end it really doesn't
180 * matter as trimming blocks is an advisory interface. 180 * matter as trimming blocks is an advisory interface.
181 */ 181 */
182 if (range.start >= XFS_FSB_TO_B(mp, mp->m_sb.sb_dblocks) ||
183 range.minlen > XFS_FSB_TO_B(mp, XFS_ALLOC_AG_MAX_USABLE(mp)))
184 return -XFS_ERROR(EINVAL);
185
182 start = BTOBB(range.start); 186 start = BTOBB(range.start);
183 end = start + BTOBBT(range.len) - 1; 187 end = start + BTOBBT(range.len) - 1;
184 minlen = BTOBB(max_t(u64, granularity, range.minlen)); 188 minlen = BTOBB(max_t(u64, granularity, range.minlen));
185 189
186 if (XFS_BB_TO_FSB(mp, start) >= mp->m_sb.sb_dblocks)
187 return -XFS_ERROR(EINVAL);
188 if (end > XFS_FSB_TO_BB(mp, mp->m_sb.sb_dblocks) - 1) 190 if (end > XFS_FSB_TO_BB(mp, mp->m_sb.sb_dblocks) - 1)
189 end = XFS_FSB_TO_BB(mp, mp->m_sb.sb_dblocks)- 1; 191 end = XFS_FSB_TO_BB(mp, mp->m_sb.sb_dblocks)- 1;
190 192
diff --git a/fs/xfs/xfs_ialloc.c b/fs/xfs/xfs_ialloc.c
index 21e37b55f7e5..5aceb3f8ecd6 100644
--- a/fs/xfs/xfs_ialloc.c
+++ b/fs/xfs/xfs_ialloc.c
@@ -962,23 +962,22 @@ xfs_dialloc(
962 if (!pag->pagi_freecount && !okalloc) 962 if (!pag->pagi_freecount && !okalloc)
963 goto nextag; 963 goto nextag;
964 964
965 /*
966 * Then read in the AGI buffer and recheck with the AGI buffer
967 * lock held.
968 */
965 error = xfs_ialloc_read_agi(mp, tp, agno, &agbp); 969 error = xfs_ialloc_read_agi(mp, tp, agno, &agbp);
966 if (error) 970 if (error)
967 goto out_error; 971 goto out_error;
968 972
969 /*
970 * Once the AGI has been read in we have to recheck
971 * pagi_freecount with the AGI buffer lock held.
972 */
973 if (pag->pagi_freecount) { 973 if (pag->pagi_freecount) {
974 xfs_perag_put(pag); 974 xfs_perag_put(pag);
975 goto out_alloc; 975 goto out_alloc;
976 } 976 }
977 977
978 if (!okalloc) { 978 if (!okalloc)
979 xfs_trans_brelse(tp, agbp); 979 goto nextag_relse_buffer;
980 goto nextag; 980
981 }
982 981
983 error = xfs_ialloc_ag_alloc(tp, agbp, &ialloced); 982 error = xfs_ialloc_ag_alloc(tp, agbp, &ialloced);
984 if (error) { 983 if (error) {
@@ -1007,6 +1006,8 @@ xfs_dialloc(
1007 return 0; 1006 return 0;
1008 } 1007 }
1009 1008
1009nextag_relse_buffer:
1010 xfs_trans_brelse(tp, agbp);
1010nextag: 1011nextag:
1011 xfs_perag_put(pag); 1012 xfs_perag_put(pag);
1012 if (++agno == mp->m_sb.sb_agcount) 1013 if (++agno == mp->m_sb.sb_agcount)
diff --git a/fs/xfs/xfs_rtalloc.c b/fs/xfs/xfs_rtalloc.c
index 92d4331cd4f1..ca28a4ba4b54 100644
--- a/fs/xfs/xfs_rtalloc.c
+++ b/fs/xfs/xfs_rtalloc.c
@@ -857,7 +857,7 @@ xfs_rtbuf_get(
857 xfs_buf_t *bp; /* block buffer, result */ 857 xfs_buf_t *bp; /* block buffer, result */
858 xfs_inode_t *ip; /* bitmap or summary inode */ 858 xfs_inode_t *ip; /* bitmap or summary inode */
859 xfs_bmbt_irec_t map; 859 xfs_bmbt_irec_t map;
860 int nmap; 860 int nmap = 1;
861 int error; /* error value */ 861 int error; /* error value */
862 862
863 ip = issum ? mp->m_rsumip : mp->m_rbmip; 863 ip = issum ? mp->m_rsumip : mp->m_rbmip;
diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h
index ced362533e3c..bfacf0d5a225 100644
--- a/include/drm/drm_crtc.h
+++ b/include/drm/drm_crtc.h
@@ -118,7 +118,8 @@ enum drm_mode_status {
118 .hdisplay = (hd), .hsync_start = (hss), .hsync_end = (hse), \ 118 .hdisplay = (hd), .hsync_start = (hss), .hsync_end = (hse), \
119 .htotal = (ht), .hskew = (hsk), .vdisplay = (vd), \ 119 .htotal = (ht), .hskew = (hsk), .vdisplay = (vd), \
120 .vsync_start = (vss), .vsync_end = (vse), .vtotal = (vt), \ 120 .vsync_start = (vss), .vsync_end = (vse), .vtotal = (vt), \
121 .vscan = (vs), .flags = (f), .vrefresh = 0 121 .vscan = (vs), .flags = (f), .vrefresh = 0, \
122 .base.type = DRM_MODE_OBJECT_MODE
122 123
123#define CRTC_INTERLACE_HALVE_V 0x1 /* halve V values for interlacing */ 124#define CRTC_INTERLACE_HALVE_V 0x1 /* halve V values for interlacing */
124 125
diff --git a/include/drm/drm_mode.h b/include/drm/drm_mode.h
index 5581980b14f6..3d6301b6ec16 100644
--- a/include/drm/drm_mode.h
+++ b/include/drm/drm_mode.h
@@ -359,8 +359,9 @@ struct drm_mode_mode_cmd {
359 struct drm_mode_modeinfo mode; 359 struct drm_mode_modeinfo mode;
360}; 360};
361 361
362#define DRM_MODE_CURSOR_BO (1<<0) 362#define DRM_MODE_CURSOR_BO 0x01
363#define DRM_MODE_CURSOR_MOVE (1<<1) 363#define DRM_MODE_CURSOR_MOVE 0x02
364#define DRM_MODE_CURSOR_FLAGS 0x03
364 365
365/* 366/*
366 * depending on the value in flags different members are used. 367 * depending on the value in flags different members are used.
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 4e72a9d48232..4a2ab7c85393 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -601,7 +601,7 @@ static inline void blk_clear_rl_full(struct request_list *rl, bool sync)
601 * it already be started by driver. 601 * it already be started by driver.
602 */ 602 */
603#define RQ_NOMERGE_FLAGS \ 603#define RQ_NOMERGE_FLAGS \
604 (REQ_NOMERGE | REQ_STARTED | REQ_SOFTBARRIER | REQ_FLUSH | REQ_FUA) 604 (REQ_NOMERGE | REQ_STARTED | REQ_SOFTBARRIER | REQ_FLUSH | REQ_FUA | REQ_DISCARD)
605#define rq_mergeable(rq) \ 605#define rq_mergeable(rq) \
606 (!((rq)->cmd_flags & RQ_NOMERGE_FLAGS) && \ 606 (!((rq)->cmd_flags & RQ_NOMERGE_FLAGS) && \
607 (((rq)->cmd_flags & REQ_DISCARD) || \ 607 (((rq)->cmd_flags & REQ_DISCARD) || \
@@ -894,6 +894,8 @@ extern void blk_queue_flush_queueable(struct request_queue *q, bool queueable);
894extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev); 894extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev);
895 895
896extern int blk_rq_map_sg(struct request_queue *, struct request *, struct scatterlist *); 896extern int blk_rq_map_sg(struct request_queue *, struct request *, struct scatterlist *);
897extern int blk_bio_map_sg(struct request_queue *q, struct bio *bio,
898 struct scatterlist *sglist);
897extern void blk_dump_rq_flags(struct request *, char *); 899extern void blk_dump_rq_flags(struct request *, char *);
898extern long nr_blockdev_pages(void); 900extern long nr_blockdev_pages(void);
899 901
@@ -1139,6 +1141,16 @@ static inline int queue_limit_discard_alignment(struct queue_limits *lim, sector
1139 & (lim->discard_granularity - 1); 1141 & (lim->discard_granularity - 1);
1140} 1142}
1141 1143
1144static inline int bdev_discard_alignment(struct block_device *bdev)
1145{
1146 struct request_queue *q = bdev_get_queue(bdev);
1147
1148 if (bdev != bdev->bd_contains)
1149 return bdev->bd_part->discard_alignment;
1150
1151 return q->limits.discard_alignment;
1152}
1153
1142static inline unsigned int queue_discard_zeroes_data(struct request_queue *q) 1154static inline unsigned int queue_discard_zeroes_data(struct request_queue *q)
1143{ 1155{
1144 if (q->limits.max_discard_sectors && q->limits.discard_zeroes_data == 1) 1156 if (q->limits.max_discard_sectors && q->limits.discard_zeroes_data == 1)
diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h
index 040b13b5c14a..279b1eaa8b73 100644
--- a/include/linux/cpuidle.h
+++ b/include/linux/cpuidle.h
@@ -194,6 +194,10 @@ static inline int cpuidle_play_dead(void) {return -ENODEV; }
194 194
195#ifdef CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED 195#ifdef CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED
196void cpuidle_coupled_parallel_barrier(struct cpuidle_device *dev, atomic_t *a); 196void cpuidle_coupled_parallel_barrier(struct cpuidle_device *dev, atomic_t *a);
197#else
198static inline void cpuidle_coupled_parallel_barrier(struct cpuidle_device *dev, atomic_t *a)
199{
200}
197#endif 201#endif
198 202
199/****************************** 203/******************************
diff --git a/include/linux/ktime.h b/include/linux/ktime.h
index 603bec2913b0..06177ba10a16 100644
--- a/include/linux/ktime.h
+++ b/include/linux/ktime.h
@@ -58,13 +58,6 @@ union ktime {
58 58
59typedef union ktime ktime_t; /* Kill this */ 59typedef union ktime ktime_t; /* Kill this */
60 60
61#define KTIME_MAX ((s64)~((u64)1 << 63))
62#if (BITS_PER_LONG == 64)
63# define KTIME_SEC_MAX (KTIME_MAX / NSEC_PER_SEC)
64#else
65# define KTIME_SEC_MAX LONG_MAX
66#endif
67
68/* 61/*
69 * ktime_t definitions when using the 64-bit scalar representation: 62 * ktime_t definitions when using the 64-bit scalar representation:
70 */ 63 */
diff --git a/include/linux/mv643xx_eth.h b/include/linux/mv643xx_eth.h
index 51bf8ada6dc0..49258e0ed1c6 100644
--- a/include/linux/mv643xx_eth.h
+++ b/include/linux/mv643xx_eth.h
@@ -15,6 +15,8 @@
15#define MV643XX_ETH_SIZE_REG_4 0x2224 15#define MV643XX_ETH_SIZE_REG_4 0x2224
16#define MV643XX_ETH_BASE_ADDR_ENABLE_REG 0x2290 16#define MV643XX_ETH_BASE_ADDR_ENABLE_REG 0x2290
17 17
18#define MV643XX_TX_CSUM_DEFAULT_LIMIT 0
19
18struct mv643xx_eth_shared_platform_data { 20struct mv643xx_eth_shared_platform_data {
19 struct mbus_dram_target_info *dram; 21 struct mbus_dram_target_info *dram;
20 struct platform_device *shared_smi; 22 struct platform_device *shared_smi;
diff --git a/include/linux/time.h b/include/linux/time.h
index c81c5e40fcb5..b51e664c83e7 100644
--- a/include/linux/time.h
+++ b/include/linux/time.h
@@ -107,11 +107,36 @@ static inline struct timespec timespec_sub(struct timespec lhs,
107 return ts_delta; 107 return ts_delta;
108} 108}
109 109
110#define KTIME_MAX ((s64)~((u64)1 << 63))
111#if (BITS_PER_LONG == 64)
112# define KTIME_SEC_MAX (KTIME_MAX / NSEC_PER_SEC)
113#else
114# define KTIME_SEC_MAX LONG_MAX
115#endif
116
110/* 117/*
111 * Returns true if the timespec is norm, false if denorm: 118 * Returns true if the timespec is norm, false if denorm:
112 */ 119 */
113#define timespec_valid(ts) \ 120static inline bool timespec_valid(const struct timespec *ts)
114 (((ts)->tv_sec >= 0) && (((unsigned long) (ts)->tv_nsec) < NSEC_PER_SEC)) 121{
122 /* Dates before 1970 are bogus */
123 if (ts->tv_sec < 0)
124 return false;
125 /* Can't have more nanoseconds then a second */
126 if ((unsigned long)ts->tv_nsec >= NSEC_PER_SEC)
127 return false;
128 return true;
129}
130
131static inline bool timespec_valid_strict(const struct timespec *ts)
132{
133 if (!timespec_valid(ts))
134 return false;
135 /* Disallow values that could overflow ktime_t */
136 if ((unsigned long long)ts->tv_sec >= KTIME_SEC_MAX)
137 return false;
138 return true;
139}
115 140
116extern void read_persistent_clock(struct timespec *ts); 141extern void read_persistent_clock(struct timespec *ts);
117extern void read_boot_clock(struct timespec *ts); 142extern void read_boot_clock(struct timespec *ts);
diff --git a/include/xen/events.h b/include/xen/events.h
index 9c641deb65d2..04399b28e821 100644
--- a/include/xen/events.h
+++ b/include/xen/events.h
@@ -58,8 +58,6 @@ void notify_remote_via_irq(int irq);
58 58
59void xen_irq_resume(void); 59void xen_irq_resume(void);
60 60
61void xen_hvm_prepare_kexec(struct shared_info *sip, unsigned long pfn);
62
63/* Clear an irq's pending state, in preparation for polling on it */ 61/* Clear an irq's pending state, in preparation for polling on it */
64void xen_clear_irq_pending(int irq); 62void xen_clear_irq_pending(int irq);
65void xen_set_irq_pending(int irq); 63void xen_set_irq_pending(int irq);
diff --git a/kernel/fork.c b/kernel/fork.c
index 3bd2280d79f6..2c8857e12855 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -455,8 +455,8 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
455 if (retval) 455 if (retval)
456 goto out; 456 goto out;
457 457
458 if (file && uprobe_mmap(tmp)) 458 if (file)
459 goto out; 459 uprobe_mmap(tmp);
460 } 460 }
461 /* a new mm has just been created */ 461 /* a new mm has just been created */
462 arch_dup_mmap(oldmm, mm); 462 arch_dup_mmap(oldmm, mm);
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
index e16af197a2bc..34e5eac81424 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -115,6 +115,7 @@ static void tk_xtime_add(struct timekeeper *tk, const struct timespec *ts)
115{ 115{
116 tk->xtime_sec += ts->tv_sec; 116 tk->xtime_sec += ts->tv_sec;
117 tk->xtime_nsec += (u64)ts->tv_nsec << tk->shift; 117 tk->xtime_nsec += (u64)ts->tv_nsec << tk->shift;
118 tk_normalize_xtime(tk);
118} 119}
119 120
120static void tk_set_wall_to_mono(struct timekeeper *tk, struct timespec wtm) 121static void tk_set_wall_to_mono(struct timekeeper *tk, struct timespec wtm)
@@ -276,7 +277,7 @@ static void timekeeping_forward_now(struct timekeeper *tk)
276 tk->xtime_nsec += cycle_delta * tk->mult; 277 tk->xtime_nsec += cycle_delta * tk->mult;
277 278
278 /* If arch requires, add in gettimeoffset() */ 279 /* If arch requires, add in gettimeoffset() */
279 tk->xtime_nsec += arch_gettimeoffset() << tk->shift; 280 tk->xtime_nsec += (u64)arch_gettimeoffset() << tk->shift;
280 281
281 tk_normalize_xtime(tk); 282 tk_normalize_xtime(tk);
282 283
@@ -427,7 +428,7 @@ int do_settimeofday(const struct timespec *tv)
427 struct timespec ts_delta, xt; 428 struct timespec ts_delta, xt;
428 unsigned long flags; 429 unsigned long flags;
429 430
430 if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC) 431 if (!timespec_valid_strict(tv))
431 return -EINVAL; 432 return -EINVAL;
432 433
433 write_seqlock_irqsave(&tk->lock, flags); 434 write_seqlock_irqsave(&tk->lock, flags);
@@ -463,6 +464,8 @@ int timekeeping_inject_offset(struct timespec *ts)
463{ 464{
464 struct timekeeper *tk = &timekeeper; 465 struct timekeeper *tk = &timekeeper;
465 unsigned long flags; 466 unsigned long flags;
467 struct timespec tmp;
468 int ret = 0;
466 469
467 if ((unsigned long)ts->tv_nsec >= NSEC_PER_SEC) 470 if ((unsigned long)ts->tv_nsec >= NSEC_PER_SEC)
468 return -EINVAL; 471 return -EINVAL;
@@ -471,10 +474,17 @@ int timekeeping_inject_offset(struct timespec *ts)
471 474
472 timekeeping_forward_now(tk); 475 timekeeping_forward_now(tk);
473 476
477 /* Make sure the proposed value is valid */
478 tmp = timespec_add(tk_xtime(tk), *ts);
479 if (!timespec_valid_strict(&tmp)) {
480 ret = -EINVAL;
481 goto error;
482 }
474 483
475 tk_xtime_add(tk, ts); 484 tk_xtime_add(tk, ts);
476 tk_set_wall_to_mono(tk, timespec_sub(tk->wall_to_monotonic, *ts)); 485 tk_set_wall_to_mono(tk, timespec_sub(tk->wall_to_monotonic, *ts));
477 486
487error: /* even if we error out, we forwarded the time, so call update */
478 timekeeping_update(tk, true); 488 timekeeping_update(tk, true);
479 489
480 write_sequnlock_irqrestore(&tk->lock, flags); 490 write_sequnlock_irqrestore(&tk->lock, flags);
@@ -482,7 +492,7 @@ int timekeeping_inject_offset(struct timespec *ts)
482 /* signal hrtimers about time change */ 492 /* signal hrtimers about time change */
483 clock_was_set(); 493 clock_was_set();
484 494
485 return 0; 495 return ret;
486} 496}
487EXPORT_SYMBOL(timekeeping_inject_offset); 497EXPORT_SYMBOL(timekeeping_inject_offset);
488 498
@@ -649,7 +659,20 @@ void __init timekeeping_init(void)
649 struct timespec now, boot, tmp; 659 struct timespec now, boot, tmp;
650 660
651 read_persistent_clock(&now); 661 read_persistent_clock(&now);
662 if (!timespec_valid_strict(&now)) {
663 pr_warn("WARNING: Persistent clock returned invalid value!\n"
664 " Check your CMOS/BIOS settings.\n");
665 now.tv_sec = 0;
666 now.tv_nsec = 0;
667 }
668
652 read_boot_clock(&boot); 669 read_boot_clock(&boot);
670 if (!timespec_valid_strict(&boot)) {
671 pr_warn("WARNING: Boot clock returned invalid value!\n"
672 " Check your CMOS/BIOS settings.\n");
673 boot.tv_sec = 0;
674 boot.tv_nsec = 0;
675 }
653 676
654 seqlock_init(&tk->lock); 677 seqlock_init(&tk->lock);
655 678
@@ -690,7 +713,7 @@ static struct timespec timekeeping_suspend_time;
690static void __timekeeping_inject_sleeptime(struct timekeeper *tk, 713static void __timekeeping_inject_sleeptime(struct timekeeper *tk,
691 struct timespec *delta) 714 struct timespec *delta)
692{ 715{
693 if (!timespec_valid(delta)) { 716 if (!timespec_valid_strict(delta)) {
694 printk(KERN_WARNING "__timekeeping_inject_sleeptime: Invalid " 717 printk(KERN_WARNING "__timekeeping_inject_sleeptime: Invalid "
695 "sleep delta value!\n"); 718 "sleep delta value!\n");
696 return; 719 return;
@@ -1129,6 +1152,10 @@ static void update_wall_time(void)
1129 offset = (clock->read(clock) - clock->cycle_last) & clock->mask; 1152 offset = (clock->read(clock) - clock->cycle_last) & clock->mask;
1130#endif 1153#endif
1131 1154
1155 /* Check if there's really nothing to do */
1156 if (offset < tk->cycle_interval)
1157 goto out;
1158
1132 /* 1159 /*
1133 * With NO_HZ we may have to accumulate many cycle_intervals 1160 * With NO_HZ we may have to accumulate many cycle_intervals
1134 * (think "ticks") worth of time at once. To do this efficiently, 1161 * (think "ticks") worth of time at once. To do this efficiently,
@@ -1161,9 +1188,9 @@ static void update_wall_time(void)
1161 * the vsyscall implementations are converted to use xtime_nsec 1188 * the vsyscall implementations are converted to use xtime_nsec
1162 * (shifted nanoseconds), this can be killed. 1189 * (shifted nanoseconds), this can be killed.
1163 */ 1190 */
1164 remainder = tk->xtime_nsec & ((1 << tk->shift) - 1); 1191 remainder = tk->xtime_nsec & ((1ULL << tk->shift) - 1);
1165 tk->xtime_nsec -= remainder; 1192 tk->xtime_nsec -= remainder;
1166 tk->xtime_nsec += 1 << tk->shift; 1193 tk->xtime_nsec += 1ULL << tk->shift;
1167 tk->ntp_error += remainder << tk->ntp_error_shift; 1194 tk->ntp_error += remainder << tk->ntp_error_shift;
1168 1195
1169 /* 1196 /*
diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c
index 60e4d7875672..6b245f64c8dd 100644
--- a/kernel/trace/trace_syscalls.c
+++ b/kernel/trace/trace_syscalls.c
@@ -506,6 +506,8 @@ static void perf_syscall_enter(void *ignore, struct pt_regs *regs, long id)
506 int size; 506 int size;
507 507
508 syscall_nr = syscall_get_nr(current, regs); 508 syscall_nr = syscall_get_nr(current, regs);
509 if (syscall_nr < 0)
510 return;
509 if (!test_bit(syscall_nr, enabled_perf_enter_syscalls)) 511 if (!test_bit(syscall_nr, enabled_perf_enter_syscalls))
510 return; 512 return;
511 513
@@ -580,6 +582,8 @@ static void perf_syscall_exit(void *ignore, struct pt_regs *regs, long ret)
580 int size; 582 int size;
581 583
582 syscall_nr = syscall_get_nr(current, regs); 584 syscall_nr = syscall_get_nr(current, regs);
585 if (syscall_nr < 0)
586 return;
583 if (!test_bit(syscall_nr, enabled_perf_exit_syscalls)) 587 if (!test_bit(syscall_nr, enabled_perf_exit_syscalls))
584 return; 588 return;
585 589
diff --git a/mm/filemap.c b/mm/filemap.c
index fa5ca304148e..384344575c37 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -1412,12 +1412,8 @@ generic_file_aio_read(struct kiocb *iocb, const struct iovec *iov,
1412 retval = filemap_write_and_wait_range(mapping, pos, 1412 retval = filemap_write_and_wait_range(mapping, pos,
1413 pos + iov_length(iov, nr_segs) - 1); 1413 pos + iov_length(iov, nr_segs) - 1);
1414 if (!retval) { 1414 if (!retval) {
1415 struct blk_plug plug;
1416
1417 blk_start_plug(&plug);
1418 retval = mapping->a_ops->direct_IO(READ, iocb, 1415 retval = mapping->a_ops->direct_IO(READ, iocb,
1419 iov, pos, nr_segs); 1416 iov, pos, nr_segs);
1420 blk_finish_plug(&plug);
1421 } 1417 }
1422 if (retval > 0) { 1418 if (retval > 0) {
1423 *ppos = pos + retval; 1419 *ppos = pos + retval;
@@ -2527,14 +2523,12 @@ ssize_t generic_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
2527{ 2523{
2528 struct file *file = iocb->ki_filp; 2524 struct file *file = iocb->ki_filp;
2529 struct inode *inode = file->f_mapping->host; 2525 struct inode *inode = file->f_mapping->host;
2530 struct blk_plug plug;
2531 ssize_t ret; 2526 ssize_t ret;
2532 2527
2533 BUG_ON(iocb->ki_pos != pos); 2528 BUG_ON(iocb->ki_pos != pos);
2534 2529
2535 sb_start_write(inode->i_sb); 2530 sb_start_write(inode->i_sb);
2536 mutex_lock(&inode->i_mutex); 2531 mutex_lock(&inode->i_mutex);
2537 blk_start_plug(&plug);
2538 ret = __generic_file_aio_write(iocb, iov, nr_segs, &iocb->ki_pos); 2532 ret = __generic_file_aio_write(iocb, iov, nr_segs, &iocb->ki_pos);
2539 mutex_unlock(&inode->i_mutex); 2533 mutex_unlock(&inode->i_mutex);
2540 2534
@@ -2545,7 +2539,6 @@ ssize_t generic_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
2545 if (err < 0 && ret > 0) 2539 if (err < 0 && ret > 0)
2546 ret = err; 2540 ret = err;
2547 } 2541 }
2548 blk_finish_plug(&plug);
2549 sb_end_write(inode->i_sb); 2542 sb_end_write(inode->i_sb);
2550 return ret; 2543 return ret;
2551} 2544}
diff --git a/mm/mmap.c b/mm/mmap.c
index 9adee9fc0d8a..ae18a48e7e4e 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -1356,9 +1356,8 @@ out:
1356 } else if ((flags & MAP_POPULATE) && !(flags & MAP_NONBLOCK)) 1356 } else if ((flags & MAP_POPULATE) && !(flags & MAP_NONBLOCK))
1357 make_pages_present(addr, addr + len); 1357 make_pages_present(addr, addr + len);
1358 1358
1359 if (file && uprobe_mmap(vma)) 1359 if (file)
1360 /* matching probes but cannot insert */ 1360 uprobe_mmap(vma);
1361 goto unmap_and_free_vma;
1362 1361
1363 return addr; 1362 return addr;
1364 1363
diff --git a/mm/slab.c b/mm/slab.c
index f8b0d539b482..811af03a14ef 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -3260,6 +3260,7 @@ force_grow:
3260 3260
3261 /* cache_grow can reenable interrupts, then ac could change. */ 3261 /* cache_grow can reenable interrupts, then ac could change. */
3262 ac = cpu_cache_get(cachep); 3262 ac = cpu_cache_get(cachep);
3263 node = numa_mem_id();
3263 3264
3264 /* no objects in sight? abort */ 3265 /* no objects in sight? abort */
3265 if (!x && (ac->avail == 0 || force_refill)) 3266 if (!x && (ac->avail == 0 || force_refill))
diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c
index 88f2bf671960..bac973a31367 100644
--- a/net/sunrpc/svc_xprt.c
+++ b/net/sunrpc/svc_xprt.c
@@ -316,7 +316,6 @@ static bool svc_xprt_has_something_to_do(struct svc_xprt *xprt)
316 */ 316 */
317void svc_xprt_enqueue(struct svc_xprt *xprt) 317void svc_xprt_enqueue(struct svc_xprt *xprt)
318{ 318{
319 struct svc_serv *serv = xprt->xpt_server;
320 struct svc_pool *pool; 319 struct svc_pool *pool;
321 struct svc_rqst *rqstp; 320 struct svc_rqst *rqstp;
322 int cpu; 321 int cpu;
@@ -362,8 +361,6 @@ void svc_xprt_enqueue(struct svc_xprt *xprt)
362 rqstp, rqstp->rq_xprt); 361 rqstp, rqstp->rq_xprt);
363 rqstp->rq_xprt = xprt; 362 rqstp->rq_xprt = xprt;
364 svc_xprt_get(xprt); 363 svc_xprt_get(xprt);
365 rqstp->rq_reserved = serv->sv_max_mesg;
366 atomic_add(rqstp->rq_reserved, &xprt->xpt_reserved);
367 pool->sp_stats.threads_woken++; 364 pool->sp_stats.threads_woken++;
368 wake_up(&rqstp->rq_wait); 365 wake_up(&rqstp->rq_wait);
369 } else { 366 } else {
@@ -640,8 +637,6 @@ int svc_recv(struct svc_rqst *rqstp, long timeout)
640 if (xprt) { 637 if (xprt) {
641 rqstp->rq_xprt = xprt; 638 rqstp->rq_xprt = xprt;
642 svc_xprt_get(xprt); 639 svc_xprt_get(xprt);
643 rqstp->rq_reserved = serv->sv_max_mesg;
644 atomic_add(rqstp->rq_reserved, &xprt->xpt_reserved);
645 640
646 /* As there is a shortage of threads and this request 641 /* As there is a shortage of threads and this request
647 * had to be queued, don't allow the thread to wait so 642 * had to be queued, don't allow the thread to wait so
@@ -738,6 +733,8 @@ int svc_recv(struct svc_rqst *rqstp, long timeout)
738 else 733 else
739 len = xprt->xpt_ops->xpo_recvfrom(rqstp); 734 len = xprt->xpt_ops->xpo_recvfrom(rqstp);
740 dprintk("svc: got len=%d\n", len); 735 dprintk("svc: got len=%d\n", len);
736 rqstp->rq_reserved = serv->sv_max_mesg;
737 atomic_add(rqstp->rq_reserved, &xprt->xpt_reserved);
741 } 738 }
742 svc_xprt_received(xprt); 739 svc_xprt_received(xprt);
743 740
@@ -794,7 +791,8 @@ int svc_send(struct svc_rqst *rqstp)
794 791
795 /* Grab mutex to serialize outgoing data. */ 792 /* Grab mutex to serialize outgoing data. */
796 mutex_lock(&xprt->xpt_mutex); 793 mutex_lock(&xprt->xpt_mutex);
797 if (test_bit(XPT_DEAD, &xprt->xpt_flags)) 794 if (test_bit(XPT_DEAD, &xprt->xpt_flags)
795 || test_bit(XPT_CLOSE, &xprt->xpt_flags))
798 len = -ENOTCONN; 796 len = -ENOTCONN;
799 else 797 else
800 len = xprt->xpt_ops->xpo_sendto(rqstp); 798 len = xprt->xpt_ops->xpo_sendto(rqstp);
diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
index 18bc130255a7..998aa8c1807c 100644
--- a/net/sunrpc/svcsock.c
+++ b/net/sunrpc/svcsock.c
@@ -1129,9 +1129,9 @@ static int svc_tcp_recvfrom(struct svc_rqst *rqstp)
1129 if (len >= 0) 1129 if (len >= 0)
1130 svsk->sk_tcplen += len; 1130 svsk->sk_tcplen += len;
1131 if (len != want) { 1131 if (len != want) {
1132 svc_tcp_save_pages(svsk, rqstp);
1132 if (len < 0 && len != -EAGAIN) 1133 if (len < 0 && len != -EAGAIN)
1133 goto err_other; 1134 goto err_other;
1134 svc_tcp_save_pages(svsk, rqstp);
1135 dprintk("svc: incomplete TCP record (%d of %d)\n", 1135 dprintk("svc: incomplete TCP record (%d of %d)\n",
1136 svsk->sk_tcplen, svsk->sk_reclen); 1136 svsk->sk_tcplen, svsk->sk_reclen);
1137 goto err_noclose; 1137 goto err_noclose;
diff --git a/tools/perf/util/python-ext-sources b/tools/perf/util/python-ext-sources
index 2884e67ee625..213362850abd 100644
--- a/tools/perf/util/python-ext-sources
+++ b/tools/perf/util/python-ext-sources
@@ -10,10 +10,12 @@ util/ctype.c
10util/evlist.c 10util/evlist.c
11util/evsel.c 11util/evsel.c
12util/cpumap.c 12util/cpumap.c
13util/hweight.c
13util/thread_map.c 14util/thread_map.c
14util/util.c 15util/util.c
15util/xyarray.c 16util/xyarray.c
16util/cgroup.c 17util/cgroup.c
17util/debugfs.c 18util/debugfs.c
19util/rblist.c
18util/strlist.c 20util/strlist.c
19../../lib/rbtree.c 21../../lib/rbtree.c
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 246852397e30..d617f69131d7 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -1976,9 +1976,10 @@ static long kvm_vcpu_compat_ioctl(struct file *filp,
1976 if (copy_from_user(&csigset, sigmask_arg->sigset, 1976 if (copy_from_user(&csigset, sigmask_arg->sigset,
1977 sizeof csigset)) 1977 sizeof csigset))
1978 goto out; 1978 goto out;
1979 } 1979 sigset_from_compat(&sigset, &csigset);
1980 sigset_from_compat(&sigset, &csigset); 1980 r = kvm_vcpu_ioctl_set_sigmask(vcpu, &sigset);
1981 r = kvm_vcpu_ioctl_set_sigmask(vcpu, &sigset); 1981 } else
1982 r = kvm_vcpu_ioctl_set_sigmask(vcpu, NULL);
1982 break; 1983 break;
1983 } 1984 }
1984 default: 1985 default: