aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Makefile2
-rw-r--r--arch/alpha/include/asm/unistd.h6
-rw-r--r--arch/alpha/kernel/systbls.S12
-rw-r--r--arch/alpha/kernel/time.c3
-rw-r--r--arch/arm/mach-omap2/board-3430sdp.c19
-rw-r--r--arch/arm/mach-omap2/board-rx51.c18
-rw-r--r--arch/arm/mach-omap2/cpuidle34xx.c436
-rw-r--r--arch/arm/mach-omap2/pm.h17
-rw-r--r--arch/arm/mach-omap2/pm34xx.c14
-rw-r--r--arch/mips/ar7/gpio.c4
-rw-r--r--arch/mips/include/asm/dma-mapping.h2
-rw-r--r--arch/mips/kernel/traps.c6
-rw-r--r--arch/mips/rb532/gpio.c2
-rw-r--r--arch/powerpc/platforms/83xx/suspend.c7
-rw-r--r--arch/powerpc/sysdev/fsl_msi.c7
-rw-r--r--arch/sparc/kernel/apc.c2
-rw-r--r--arch/sparc/kernel/pci_sabre.c5
-rw-r--r--arch/sparc/kernel/pci_schizo.c8
-rw-r--r--arch/sparc/kernel/pmc.c2
-rw-r--r--arch/sparc/kernel/smp_32.c10
-rw-r--r--arch/sparc/kernel/time_32.c2
-rw-r--r--arch/sparc/lib/checksum_32.S12
-rw-r--r--arch/um/os-Linux/util.c23
-rw-r--r--arch/x86/include/asm/apicdef.h1
-rw-r--r--arch/x86/include/asm/pgtable_types.h1
-rw-r--r--arch/x86/include/asm/uv/uv_bau.h17
-rw-r--r--arch/x86/include/asm/uv/uv_hub.h2
-rw-r--r--arch/x86/include/asm/uv/uv_mmrs.h16
-rw-r--r--arch/x86/include/asm/x86_init.h12
-rw-r--r--arch/x86/kernel/apic/x2apic_uv_x.c48
-rw-r--r--arch/x86/kernel/cpu/amd.c4
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce_amd.c1
-rw-r--r--arch/x86/kernel/cpu/mcheck/therm_throt.c12
-rw-r--r--arch/x86/kernel/kprobes.c5
-rw-r--r--arch/x86/kernel/x86_init.c4
-rw-r--r--arch/x86/mm/init.c24
-rw-r--r--arch/x86/platform/uv/tlb_uv.c92
-rw-r--r--arch/x86/xen/mmu.c138
-rw-r--r--block/blk-cgroup.c7
-rw-r--r--block/blk-cgroup.h3
-rw-r--r--block/blk-core.c4
-rw-r--r--block/blk-throttle.c9
-rw-r--r--block/cfq-iosched.c11
-rw-r--r--drivers/ata/libahci.c21
-rw-r--r--drivers/ata/libata-eh.c2
-rw-r--r--drivers/atm/fore200e.c7
-rw-r--r--drivers/block/DAC960.c1
-rw-r--r--drivers/block/amiflop.c1
-rw-r--r--drivers/block/ataflop.c1
-rw-r--r--drivers/block/floppy.c1
-rw-r--r--drivers/block/paride/pcd.c1
-rw-r--r--drivers/block/paride/pd.c1
-rw-r--r--drivers/block/paride/pf.c1
-rw-r--r--drivers/block/rbd.c177
-rw-r--r--drivers/block/swim.c1
-rw-r--r--drivers/block/swim3.c1
-rw-r--r--drivers/block/ub.c1
-rw-r--r--drivers/block/xsysace.c1
-rw-r--r--drivers/cdrom/cdrom.c6
-rw-r--r--drivers/cdrom/gdrom.c1
-rw-r--r--drivers/cdrom/viocd.c1
-rw-r--r--drivers/char/hw_random/n2-drv.c7
-rw-r--r--drivers/char/ipmi/ipmi_si_intf.c7
-rw-r--r--drivers/char/xilinx_hwicap/xilinx_hwicap.c14
-rw-r--r--drivers/edac/ppc4xx_edac.c2
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c26
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c2
-rw-r--r--drivers/gpu/drm/i915/intel_display.c2
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c5
-rw-r--r--drivers/gpu/drm/radeon/evergreend.h1
-rw-r--r--drivers/gpu/drm/radeon/radeon_atombios.c14
-rw-r--r--drivers/gpu/drm/radeon/reg_srcs/cayman1
-rw-r--r--drivers/gpu/drm/radeon/reg_srcs/evergreen1
-rw-r--r--drivers/gpu/vga/vga_switcheroo.c6
-rw-r--r--drivers/i2c/busses/i2c-mpc.c9
-rw-r--r--drivers/i2c/busses/i2c-pnx.c2
-rw-r--r--drivers/input/touchscreen/ads7846.c13
-rw-r--r--drivers/leds/leds-lm3530.c1
-rw-r--r--drivers/media/video/cx88/cx88-input.c2
-rw-r--r--drivers/media/video/soc_camera.c48
-rw-r--r--drivers/media/video/v4l2-device.c5
-rw-r--r--drivers/media/video/v4l2-subdev.c14
-rw-r--r--drivers/message/i2o/i2o_block.c1
-rw-r--r--drivers/mmc/core/host.c9
-rw-r--r--drivers/mmc/host/sdhci-of-core.c7
-rw-r--r--drivers/mtd/maps/physmap_of.c7
-rw-r--r--drivers/net/Makefile6
-rw-r--r--drivers/net/bonding/bond_3ad.h10
-rw-r--r--drivers/net/can/mscan/mpc5xxx_can.c7
-rw-r--r--drivers/net/ehea/ehea_main.c6
-rw-r--r--drivers/net/fs_enet/fs_enet-main.c9
-rw-r--r--drivers/net/fs_enet/mii-fec.c7
-rw-r--r--drivers/net/hydra.c14
-rw-r--r--drivers/net/ne-h8300.c16
-rw-r--r--drivers/net/sfc/mcdi.c49
-rw-r--r--drivers/net/sfc/nic.c7
-rw-r--r--drivers/net/sfc/nic.h2
-rw-r--r--drivers/net/sfc/siena.c25
-rw-r--r--drivers/net/sunhme.c7
-rw-r--r--drivers/net/vmxnet3/vmxnet3_ethtool.c3
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c8
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-core.c7
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-dev.h6
-rw-r--r--drivers/net/wireless/libertas/cmd.c6
-rw-r--r--drivers/net/zorro8390.c12
-rw-r--r--drivers/pci/setup-bus.c4
-rw-r--r--drivers/rapidio/switches/idt_gen2.c9
-rw-r--r--drivers/rapidio/switches/idtcps.c6
-rw-r--r--drivers/rapidio/switches/tsi57x.c6
-rw-r--r--drivers/rtc/rtc-davinci.c5
-rw-r--r--drivers/rtc/rtc-ds1286.c2
-rw-r--r--drivers/rtc/rtc-ep93xx.c5
-rw-r--r--drivers/rtc/rtc-m41t80.c5
-rw-r--r--drivers/rtc/rtc-max8925.c5
-rw-r--r--drivers/rtc/rtc-max8998.c5
-rw-r--r--drivers/rtc/rtc-mc13xxx.c8
-rw-r--r--drivers/rtc/rtc-msm6242.c3
-rw-r--r--drivers/rtc/rtc-mxc.c19
-rw-r--r--drivers/rtc/rtc-pcap.c4
-rw-r--r--drivers/rtc/rtc-rp5c01.c5
-rw-r--r--drivers/s390/char/tape_block.c1
-rw-r--r--drivers/scsi/qlogicpti.c7
-rw-r--r--drivers/scsi/scsi_lib.c20
-rw-r--r--drivers/scsi/scsi_scan.c2
-rw-r--r--drivers/tty/serial/of_serial.c7
-rw-r--r--drivers/usb/gadget/fsl_qe_udc.c7
-rw-r--r--drivers/video/fbmem.c223
-rw-r--r--drivers/watchdog/mpc8xxx_wdt.c7
-rw-r--r--fs/block_dev.c27
-rw-r--r--fs/btrfs/acl.c5
-rw-r--r--fs/btrfs/extent-tree.c37
-rw-r--r--fs/btrfs/ioctl.c24
-rw-r--r--fs/cifs/cifs_unicode.c14
-rw-r--r--fs/cifs/connect.c5
-rw-r--r--fs/configfs/dir.c39
-rw-r--r--fs/fuse/dir.c2
-rw-r--r--fs/namei.c2
-rw-r--r--fs/nfs/nfs4filelayout.c27
-rw-r--r--fs/nfs/nfs4filelayout.h2
-rw-r--r--fs/nfs/nfs4filelayoutdev.c34
-rw-r--r--fs/nfs/nfs4proc.c6
-rw-r--r--fs/nfs/pnfs.c34
-rw-r--r--fs/nfs/pnfs.h6
-rw-r--r--fs/nfs/read.c4
-rw-r--r--fs/nfs/write.c4
-rw-r--r--fs/ocfs2/cluster/heartbeat.c61
-rw-r--r--fs/ocfs2/dir.c2
-rw-r--r--fs/ocfs2/dlm/dlmdomain.c3
-rw-r--r--fs/ocfs2/dlm/dlmmaster.c3
-rw-r--r--fs/ocfs2/file.c12
-rw-r--r--fs/ocfs2/journal.c3
-rw-r--r--include/drm/drm_fb_helper.h2
-rw-r--r--include/linux/capability.h13
-rw-r--r--include/linux/cred.h10
-rw-r--r--include/linux/device.h1
-rw-r--r--include/linux/fb.h1
-rw-r--r--include/linux/fs.h1
-rw-r--r--include/linux/mmc/host.h1
-rw-r--r--include/linux/nfs_xdr.h1
-rw-r--r--include/linux/of_device.h8
-rw-r--r--include/linux/proc_fs.h2
-rw-r--r--include/net/inet_ecn.h16
-rw-r--r--include/net/llc_pdu.h8
-rw-r--r--include/scsi/scsi_device.h1
-rw-r--r--kernel/capability.c12
-rw-r--r--kernel/cred.c12
-rw-r--r--kernel/time/clocksource.c4
-rw-r--r--kernel/time/tick-broadcast.c12
-rw-r--r--lib/vsprintf.c2
-rw-r--r--mm/page_alloc.c1
-rw-r--r--mm/shmem.c10
-rw-r--r--mm/vmscan.c2
-rw-r--r--net/9p/protocol.c1
-rw-r--r--net/bridge/br_netfilter.c2
-rw-r--r--net/core/dev.c22
-rw-r--r--net/mac80211/tx.c4
-rw-r--r--net/netfilter/ipvs/ip_vs_app.c2
-rw-r--r--net/netfilter/ipvs/ip_vs_conn.c4
-rw-r--r--net/netfilter/ipvs/ip_vs_ctl.c6
-rw-r--r--security/selinux/ss/policydb.c4
-rw-r--r--sound/soc/codecs/ssm2602.c10
-rw-r--r--sound/soc/codecs/uda134x.c2
-rw-r--r--sound/soc/codecs/wm8903.c2
-rw-r--r--sound/soc/jz4740/jz4740-i2s.c2
-rw-r--r--sound/soc/mid-x86/sst_platform.c6
-rw-r--r--sound/soc/soc-core.c2
-rw-r--r--tools/perf/builtin-record.c2
-rw-r--r--tools/perf/builtin-test.c2
-rw-r--r--tools/perf/builtin-top.c8
-rw-r--r--tools/perf/util/evlist.c153
-rw-r--r--tools/perf/util/evlist.h3
-rw-r--r--tools/perf/util/python.c2
192 files changed, 1648 insertions, 1101 deletions
diff --git a/Makefile b/Makefile
index 41ea6fbec55a..123d858dae03 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
1VERSION = 2 1VERSION = 2
2PATCHLEVEL = 6 2PATCHLEVEL = 6
3SUBLEVEL = 39 3SUBLEVEL = 39
4EXTRAVERSION = -rc7 4EXTRAVERSION =
5NAME = Flesh-Eating Bats with Fangs 5NAME = Flesh-Eating Bats with Fangs
6 6
7# *DOCUMENTATION* 7# *DOCUMENTATION*
diff --git a/arch/alpha/include/asm/unistd.h b/arch/alpha/include/asm/unistd.h
index 058937bf5a77..b1834166922d 100644
--- a/arch/alpha/include/asm/unistd.h
+++ b/arch/alpha/include/asm/unistd.h
@@ -452,10 +452,14 @@
452#define __NR_fanotify_init 494 452#define __NR_fanotify_init 494
453#define __NR_fanotify_mark 495 453#define __NR_fanotify_mark 495
454#define __NR_prlimit64 496 454#define __NR_prlimit64 496
455#define __NR_name_to_handle_at 497
456#define __NR_open_by_handle_at 498
457#define __NR_clock_adjtime 499
458#define __NR_syncfs 500
455 459
456#ifdef __KERNEL__ 460#ifdef __KERNEL__
457 461
458#define NR_SYSCALLS 497 462#define NR_SYSCALLS 501
459 463
460#define __ARCH_WANT_IPC_PARSE_VERSION 464#define __ARCH_WANT_IPC_PARSE_VERSION
461#define __ARCH_WANT_OLD_READDIR 465#define __ARCH_WANT_OLD_READDIR
diff --git a/arch/alpha/kernel/systbls.S b/arch/alpha/kernel/systbls.S
index a6a1de9db16f..15f999d41c75 100644
--- a/arch/alpha/kernel/systbls.S
+++ b/arch/alpha/kernel/systbls.S
@@ -498,23 +498,27 @@ sys_call_table:
498 .quad sys_ni_syscall /* sys_timerfd */ 498 .quad sys_ni_syscall /* sys_timerfd */
499 .quad sys_eventfd 499 .quad sys_eventfd
500 .quad sys_recvmmsg 500 .quad sys_recvmmsg
501 .quad sys_fallocate /* 480 */ 501 .quad sys_fallocate /* 480 */
502 .quad sys_timerfd_create 502 .quad sys_timerfd_create
503 .quad sys_timerfd_settime 503 .quad sys_timerfd_settime
504 .quad sys_timerfd_gettime 504 .quad sys_timerfd_gettime
505 .quad sys_signalfd4 505 .quad sys_signalfd4
506 .quad sys_eventfd2 /* 485 */ 506 .quad sys_eventfd2 /* 485 */
507 .quad sys_epoll_create1 507 .quad sys_epoll_create1
508 .quad sys_dup3 508 .quad sys_dup3
509 .quad sys_pipe2 509 .quad sys_pipe2
510 .quad sys_inotify_init1 510 .quad sys_inotify_init1
511 .quad sys_preadv /* 490 */ 511 .quad sys_preadv /* 490 */
512 .quad sys_pwritev 512 .quad sys_pwritev
513 .quad sys_rt_tgsigqueueinfo 513 .quad sys_rt_tgsigqueueinfo
514 .quad sys_perf_event_open 514 .quad sys_perf_event_open
515 .quad sys_fanotify_init 515 .quad sys_fanotify_init
516 .quad sys_fanotify_mark /* 495 */ 516 .quad sys_fanotify_mark /* 495 */
517 .quad sys_prlimit64 517 .quad sys_prlimit64
518 .quad sys_name_to_handle_at
519 .quad sys_open_by_handle_at
520 .quad sys_clock_adjtime
521 .quad sys_syncfs /* 500 */
518 522
519 .size sys_call_table, . - sys_call_table 523 .size sys_call_table, . - sys_call_table
520 .type sys_call_table, @object 524 .type sys_call_table, @object
diff --git a/arch/alpha/kernel/time.c b/arch/alpha/kernel/time.c
index 918e8e0b72ff..818e74ed45dc 100644
--- a/arch/alpha/kernel/time.c
+++ b/arch/alpha/kernel/time.c
@@ -375,8 +375,7 @@ static struct clocksource clocksource_rpcc = {
375 375
376static inline void register_rpcc_clocksource(long cycle_freq) 376static inline void register_rpcc_clocksource(long cycle_freq)
377{ 377{
378 clocksource_calc_mult_shift(&clocksource_rpcc, cycle_freq, 4); 378 clocksource_register_hz(&clocksource_rpcc, cycle_freq);
379 clocksource_register(&clocksource_rpcc);
380} 379}
381#else /* !CONFIG_SMP */ 380#else /* !CONFIG_SMP */
382static inline void register_rpcc_clocksource(long cycle_freq) 381static inline void register_rpcc_clocksource(long cycle_freq)
diff --git a/arch/arm/mach-omap2/board-3430sdp.c b/arch/arm/mach-omap2/board-3430sdp.c
index 99218a5299ca..52dbdf3ab66c 100644
--- a/arch/arm/mach-omap2/board-3430sdp.c
+++ b/arch/arm/mach-omap2/board-3430sdp.c
@@ -59,24 +59,6 @@
59 59
60#define TWL4030_MSECURE_GPIO 22 60#define TWL4030_MSECURE_GPIO 22
61 61
62/* FIXME: These values need to be updated based on more profiling on 3430sdp*/
63static struct cpuidle_params omap3_cpuidle_params_table[] = {
64 /* C1 */
65 {1, 2, 2, 5},
66 /* C2 */
67 {1, 10, 10, 30},
68 /* C3 */
69 {1, 50, 50, 300},
70 /* C4 */
71 {1, 1500, 1800, 4000},
72 /* C5 */
73 {1, 2500, 7500, 12000},
74 /* C6 */
75 {1, 3000, 8500, 15000},
76 /* C7 */
77 {1, 10000, 30000, 300000},
78};
79
80static uint32_t board_keymap[] = { 62static uint32_t board_keymap[] = {
81 KEY(0, 0, KEY_LEFT), 63 KEY(0, 0, KEY_LEFT),
82 KEY(0, 1, KEY_RIGHT), 64 KEY(0, 1, KEY_RIGHT),
@@ -800,7 +782,6 @@ static void __init omap_3430sdp_init(void)
800 omap3_mux_init(board_mux, OMAP_PACKAGE_CBB); 782 omap3_mux_init(board_mux, OMAP_PACKAGE_CBB);
801 omap_board_config = sdp3430_config; 783 omap_board_config = sdp3430_config;
802 omap_board_config_size = ARRAY_SIZE(sdp3430_config); 784 omap_board_config_size = ARRAY_SIZE(sdp3430_config);
803 omap3_pm_init_cpuidle(omap3_cpuidle_params_table);
804 omap3430_i2c_init(); 785 omap3430_i2c_init();
805 omap_display_init(&sdp3430_dss_data); 786 omap_display_init(&sdp3430_dss_data);
806 if (omap_rev() > OMAP3430_REV_ES1_0) 787 if (omap_rev() > OMAP3430_REV_ES1_0)
diff --git a/arch/arm/mach-omap2/board-rx51.c b/arch/arm/mach-omap2/board-rx51.c
index f8ba20a14e62..fec4cac8fa0a 100644
--- a/arch/arm/mach-omap2/board-rx51.c
+++ b/arch/arm/mach-omap2/board-rx51.c
@@ -58,21 +58,25 @@ static struct platform_device leds_gpio = {
58 }, 58 },
59}; 59};
60 60
61/*
62 * cpuidle C-states definition override from the default values.
63 * The 'exit_latency' field is the sum of sleep and wake-up latencies.
64 */
61static struct cpuidle_params rx51_cpuidle_params[] = { 65static struct cpuidle_params rx51_cpuidle_params[] = {
62 /* C1 */ 66 /* C1 */
63 {1, 110, 162, 5}, 67 {110 + 162, 5 , 1},
64 /* C2 */ 68 /* C2 */
65 {1, 106, 180, 309}, 69 {106 + 180, 309, 1},
66 /* C3 */ 70 /* C3 */
67 {0, 107, 410, 46057}, 71 {107 + 410, 46057, 0},
68 /* C4 */ 72 /* C4 */
69 {0, 121, 3374, 46057}, 73 {121 + 3374, 46057, 0},
70 /* C5 */ 74 /* C5 */
71 {1, 855, 1146, 46057}, 75 {855 + 1146, 46057, 1},
72 /* C6 */ 76 /* C6 */
73 {0, 7580, 4134, 484329}, 77 {7580 + 4134, 484329, 0},
74 /* C7 */ 78 /* C7 */
75 {1, 7505, 15274, 484329}, 79 {7505 + 15274, 484329, 1},
76}; 80};
77 81
78static struct omap_lcd_config rx51_lcd_config = { 82static struct omap_lcd_config rx51_lcd_config = {
diff --git a/arch/arm/mach-omap2/cpuidle34xx.c b/arch/arm/mach-omap2/cpuidle34xx.c
index 1c240eff3918..4bf6e6e8b100 100644
--- a/arch/arm/mach-omap2/cpuidle34xx.c
+++ b/arch/arm/mach-omap2/cpuidle34xx.c
@@ -36,36 +36,6 @@
36 36
37#ifdef CONFIG_CPU_IDLE 37#ifdef CONFIG_CPU_IDLE
38 38
39#define OMAP3_MAX_STATES 7
40#define OMAP3_STATE_C1 0 /* C1 - MPU WFI + Core active */
41#define OMAP3_STATE_C2 1 /* C2 - MPU WFI + Core inactive */
42#define OMAP3_STATE_C3 2 /* C3 - MPU CSWR + Core inactive */
43#define OMAP3_STATE_C4 3 /* C4 - MPU OFF + Core iactive */
44#define OMAP3_STATE_C5 4 /* C5 - MPU RET + Core RET */
45#define OMAP3_STATE_C6 5 /* C6 - MPU OFF + Core RET */
46#define OMAP3_STATE_C7 6 /* C7 - MPU OFF + Core OFF */
47
48#define OMAP3_STATE_MAX OMAP3_STATE_C7
49
50#define CPUIDLE_FLAG_CHECK_BM 0x10000 /* use omap3_enter_idle_bm() */
51
52struct omap3_processor_cx {
53 u8 valid;
54 u8 type;
55 u32 sleep_latency;
56 u32 wakeup_latency;
57 u32 mpu_state;
58 u32 core_state;
59 u32 threshold;
60 u32 flags;
61 const char *desc;
62};
63
64struct omap3_processor_cx omap3_power_states[OMAP3_MAX_STATES];
65struct omap3_processor_cx current_cx_state;
66struct powerdomain *mpu_pd, *core_pd, *per_pd;
67struct powerdomain *cam_pd;
68
69/* 39/*
70 * The latencies/thresholds for various C states have 40 * The latencies/thresholds for various C states have
71 * to be configured from the respective board files. 41 * to be configured from the respective board files.
@@ -75,27 +45,31 @@ struct powerdomain *cam_pd;
75 */ 45 */
76static struct cpuidle_params cpuidle_params_table[] = { 46static struct cpuidle_params cpuidle_params_table[] = {
77 /* C1 */ 47 /* C1 */
78 {1, 2, 2, 5}, 48 {2 + 2, 5, 1},
79 /* C2 */ 49 /* C2 */
80 {1, 10, 10, 30}, 50 {10 + 10, 30, 1},
81 /* C3 */ 51 /* C3 */
82 {1, 50, 50, 300}, 52 {50 + 50, 300, 1},
83 /* C4 */ 53 /* C4 */
84 {1, 1500, 1800, 4000}, 54 {1500 + 1800, 4000, 1},
85 /* C5 */ 55 /* C5 */
86 {1, 2500, 7500, 12000}, 56 {2500 + 7500, 12000, 1},
87 /* C6 */ 57 /* C6 */
88 {1, 3000, 8500, 15000}, 58 {3000 + 8500, 15000, 1},
89 /* C7 */ 59 /* C7 */
90 {1, 10000, 30000, 300000}, 60 {10000 + 30000, 300000, 1},
91}; 61};
62#define OMAP3_NUM_STATES ARRAY_SIZE(cpuidle_params_table)
92 63
93static int omap3_idle_bm_check(void) 64/* Mach specific information to be recorded in the C-state driver_data */
94{ 65struct omap3_idle_statedata {
95 if (!omap3_can_sleep()) 66 u32 mpu_state;
96 return 1; 67 u32 core_state;
97 return 0; 68 u8 valid;
98} 69};
70struct omap3_idle_statedata omap3_idle_data[OMAP3_NUM_STATES];
71
72struct powerdomain *mpu_pd, *core_pd, *per_pd, *cam_pd;
99 73
100static int _cpuidle_allow_idle(struct powerdomain *pwrdm, 74static int _cpuidle_allow_idle(struct powerdomain *pwrdm,
101 struct clockdomain *clkdm) 75 struct clockdomain *clkdm)
@@ -122,12 +96,10 @@ static int _cpuidle_deny_idle(struct powerdomain *pwrdm,
122static int omap3_enter_idle(struct cpuidle_device *dev, 96static int omap3_enter_idle(struct cpuidle_device *dev,
123 struct cpuidle_state *state) 97 struct cpuidle_state *state)
124{ 98{
125 struct omap3_processor_cx *cx = cpuidle_get_statedata(state); 99 struct omap3_idle_statedata *cx = cpuidle_get_statedata(state);
126 struct timespec ts_preidle, ts_postidle, ts_idle; 100 struct timespec ts_preidle, ts_postidle, ts_idle;
127 u32 mpu_state = cx->mpu_state, core_state = cx->core_state; 101 u32 mpu_state = cx->mpu_state, core_state = cx->core_state;
128 102
129 current_cx_state = *cx;
130
131 /* Used to keep track of the total time in idle */ 103 /* Used to keep track of the total time in idle */
132 getnstimeofday(&ts_preidle); 104 getnstimeofday(&ts_preidle);
133 105
@@ -140,7 +112,8 @@ static int omap3_enter_idle(struct cpuidle_device *dev,
140 if (omap_irq_pending() || need_resched()) 112 if (omap_irq_pending() || need_resched())
141 goto return_sleep_time; 113 goto return_sleep_time;
142 114
143 if (cx->type == OMAP3_STATE_C1) { 115 /* Deny idle for C1 */
116 if (state == &dev->states[0]) {
144 pwrdm_for_each_clkdm(mpu_pd, _cpuidle_deny_idle); 117 pwrdm_for_each_clkdm(mpu_pd, _cpuidle_deny_idle);
145 pwrdm_for_each_clkdm(core_pd, _cpuidle_deny_idle); 118 pwrdm_for_each_clkdm(core_pd, _cpuidle_deny_idle);
146 } 119 }
@@ -148,7 +121,8 @@ static int omap3_enter_idle(struct cpuidle_device *dev,
148 /* Execute ARM wfi */ 121 /* Execute ARM wfi */
149 omap_sram_idle(); 122 omap_sram_idle();
150 123
151 if (cx->type == OMAP3_STATE_C1) { 124 /* Re-allow idle for C1 */
125 if (state == &dev->states[0]) {
152 pwrdm_for_each_clkdm(mpu_pd, _cpuidle_allow_idle); 126 pwrdm_for_each_clkdm(mpu_pd, _cpuidle_allow_idle);
153 pwrdm_for_each_clkdm(core_pd, _cpuidle_allow_idle); 127 pwrdm_for_each_clkdm(core_pd, _cpuidle_allow_idle);
154 } 128 }
@@ -164,41 +138,53 @@ return_sleep_time:
164} 138}
165 139
166/** 140/**
167 * next_valid_state - Find next valid c-state 141 * next_valid_state - Find next valid C-state
168 * @dev: cpuidle device 142 * @dev: cpuidle device
169 * @state: Currently selected c-state 143 * @state: Currently selected C-state
170 * 144 *
171 * If the current state is valid, it is returned back to the caller. 145 * If the current state is valid, it is returned back to the caller.
172 * Else, this function searches for a lower c-state which is still 146 * Else, this function searches for a lower c-state which is still
173 * valid (as defined in omap3_power_states[]). 147 * valid.
148 *
149 * A state is valid if the 'valid' field is enabled and
150 * if it satisfies the enable_off_mode condition.
174 */ 151 */
175static struct cpuidle_state *next_valid_state(struct cpuidle_device *dev, 152static struct cpuidle_state *next_valid_state(struct cpuidle_device *dev,
176 struct cpuidle_state *curr) 153 struct cpuidle_state *curr)
177{ 154{
178 struct cpuidle_state *next = NULL; 155 struct cpuidle_state *next = NULL;
179 struct omap3_processor_cx *cx; 156 struct omap3_idle_statedata *cx = cpuidle_get_statedata(curr);
157 u32 mpu_deepest_state = PWRDM_POWER_RET;
158 u32 core_deepest_state = PWRDM_POWER_RET;
180 159
181 cx = (struct omap3_processor_cx *)cpuidle_get_statedata(curr); 160 if (enable_off_mode) {
161 mpu_deepest_state = PWRDM_POWER_OFF;
162 /*
163 * Erratum i583: valable for ES rev < Es1.2 on 3630.
164 * CORE OFF mode is not supported in a stable form, restrict
165 * instead the CORE state to RET.
166 */
167 if (!IS_PM34XX_ERRATUM(PM_SDRC_WAKEUP_ERRATUM_i583))
168 core_deepest_state = PWRDM_POWER_OFF;
169 }
182 170
183 /* Check if current state is valid */ 171 /* Check if current state is valid */
184 if (cx->valid) { 172 if ((cx->valid) &&
173 (cx->mpu_state >= mpu_deepest_state) &&
174 (cx->core_state >= core_deepest_state)) {
185 return curr; 175 return curr;
186 } else { 176 } else {
187 u8 idx = OMAP3_STATE_MAX; 177 int idx = OMAP3_NUM_STATES - 1;
188 178
189 /* 179 /* Reach the current state starting at highest C-state */
190 * Reach the current state starting at highest C-state 180 for (; idx >= 0; idx--) {
191 */
192 for (; idx >= OMAP3_STATE_C1; idx--) {
193 if (&dev->states[idx] == curr) { 181 if (&dev->states[idx] == curr) {
194 next = &dev->states[idx]; 182 next = &dev->states[idx];
195 break; 183 break;
196 } 184 }
197 } 185 }
198 186
199 /* 187 /* Should never hit this condition */
200 * Should never hit this condition.
201 */
202 WARN_ON(next == NULL); 188 WARN_ON(next == NULL);
203 189
204 /* 190 /*
@@ -206,17 +192,17 @@ static struct cpuidle_state *next_valid_state(struct cpuidle_device *dev,
206 * Start search from the next (lower) state. 192 * Start search from the next (lower) state.
207 */ 193 */
208 idx--; 194 idx--;
209 for (; idx >= OMAP3_STATE_C1; idx--) { 195 for (; idx >= 0; idx--) {
210 struct omap3_processor_cx *cx;
211
212 cx = cpuidle_get_statedata(&dev->states[idx]); 196 cx = cpuidle_get_statedata(&dev->states[idx]);
213 if (cx->valid) { 197 if ((cx->valid) &&
198 (cx->mpu_state >= mpu_deepest_state) &&
199 (cx->core_state >= core_deepest_state)) {
214 next = &dev->states[idx]; 200 next = &dev->states[idx];
215 break; 201 break;
216 } 202 }
217 } 203 }
218 /* 204 /*
219 * C1 and C2 are always valid. 205 * C1 is always valid.
220 * So, no need to check for 'next==NULL' outside this loop. 206 * So, no need to check for 'next==NULL' outside this loop.
221 */ 207 */
222 } 208 }
@@ -229,36 +215,22 @@ static struct cpuidle_state *next_valid_state(struct cpuidle_device *dev,
229 * @dev: cpuidle device 215 * @dev: cpuidle device
230 * @state: The target state to be programmed 216 * @state: The target state to be programmed
231 * 217 *
232 * Used for C states with CPUIDLE_FLAG_CHECK_BM flag set. This 218 * This function checks for any pending activity and then programs
233 * function checks for any pending activity and then programs the 219 * the device to the specified or a safer state.
234 * device to the specified or a safer state.
235 */ 220 */
236static int omap3_enter_idle_bm(struct cpuidle_device *dev, 221static int omap3_enter_idle_bm(struct cpuidle_device *dev,
237 struct cpuidle_state *state) 222 struct cpuidle_state *state)
238{ 223{
239 struct cpuidle_state *new_state = next_valid_state(dev, state); 224 struct cpuidle_state *new_state;
240 u32 core_next_state, per_next_state = 0, per_saved_state = 0; 225 u32 core_next_state, per_next_state = 0, per_saved_state = 0, cam_state;
241 u32 cam_state; 226 struct omap3_idle_statedata *cx;
242 struct omap3_processor_cx *cx;
243 int ret; 227 int ret;
244 228
245 if ((state->flags & CPUIDLE_FLAG_CHECK_BM) && omap3_idle_bm_check()) { 229 if (!omap3_can_sleep()) {
246 BUG_ON(!dev->safe_state);
247 new_state = dev->safe_state; 230 new_state = dev->safe_state;
248 goto select_state; 231 goto select_state;
249 } 232 }
250 233
251 cx = cpuidle_get_statedata(state);
252 core_next_state = cx->core_state;
253
254 /*
255 * FIXME: we currently manage device-specific idle states
256 * for PER and CORE in combination with CPU-specific
257 * idle states. This is wrong, and device-specific
258 * idle management needs to be separated out into
259 * its own code.
260 */
261
262 /* 234 /*
263 * Prevent idle completely if CAM is active. 235 * Prevent idle completely if CAM is active.
264 * CAM does not have wakeup capability in OMAP3. 236 * CAM does not have wakeup capability in OMAP3.
@@ -270,9 +242,19 @@ static int omap3_enter_idle_bm(struct cpuidle_device *dev,
270 } 242 }
271 243
272 /* 244 /*
245 * FIXME: we currently manage device-specific idle states
246 * for PER and CORE in combination with CPU-specific
247 * idle states. This is wrong, and device-specific
248 * idle management needs to be separated out into
249 * its own code.
250 */
251
252 /*
273 * Prevent PER off if CORE is not in retention or off as this 253 * Prevent PER off if CORE is not in retention or off as this
274 * would disable PER wakeups completely. 254 * would disable PER wakeups completely.
275 */ 255 */
256 cx = cpuidle_get_statedata(state);
257 core_next_state = cx->core_state;
276 per_next_state = per_saved_state = pwrdm_read_next_pwrst(per_pd); 258 per_next_state = per_saved_state = pwrdm_read_next_pwrst(per_pd);
277 if ((per_next_state == PWRDM_POWER_OFF) && 259 if ((per_next_state == PWRDM_POWER_OFF) &&
278 (core_next_state > PWRDM_POWER_RET)) 260 (core_next_state > PWRDM_POWER_RET))
@@ -282,6 +264,8 @@ static int omap3_enter_idle_bm(struct cpuidle_device *dev,
282 if (per_next_state != per_saved_state) 264 if (per_next_state != per_saved_state)
283 pwrdm_set_next_pwrst(per_pd, per_next_state); 265 pwrdm_set_next_pwrst(per_pd, per_next_state);
284 266
267 new_state = next_valid_state(dev, state);
268
285select_state: 269select_state:
286 dev->last_state = new_state; 270 dev->last_state = new_state;
287 ret = omap3_enter_idle(dev, new_state); 271 ret = omap3_enter_idle(dev, new_state);
@@ -295,31 +279,6 @@ select_state:
295 279
296DEFINE_PER_CPU(struct cpuidle_device, omap3_idle_dev); 280DEFINE_PER_CPU(struct cpuidle_device, omap3_idle_dev);
297 281
298/**
299 * omap3_cpuidle_update_states() - Update the cpuidle states
300 * @mpu_deepest_state: Enable states up to and including this for mpu domain
301 * @core_deepest_state: Enable states up to and including this for core domain
302 *
303 * This goes through the list of states available and enables and disables the
304 * validity of C states based on deepest state that can be achieved for the
305 * variable domain
306 */
307void omap3_cpuidle_update_states(u32 mpu_deepest_state, u32 core_deepest_state)
308{
309 int i;
310
311 for (i = OMAP3_STATE_C1; i < OMAP3_MAX_STATES; i++) {
312 struct omap3_processor_cx *cx = &omap3_power_states[i];
313
314 if ((cx->mpu_state >= mpu_deepest_state) &&
315 (cx->core_state >= core_deepest_state)) {
316 cx->valid = 1;
317 } else {
318 cx->valid = 0;
319 }
320 }
321}
322
323void omap3_pm_init_cpuidle(struct cpuidle_params *cpuidle_board_params) 282void omap3_pm_init_cpuidle(struct cpuidle_params *cpuidle_board_params)
324{ 283{
325 int i; 284 int i;
@@ -327,212 +286,109 @@ void omap3_pm_init_cpuidle(struct cpuidle_params *cpuidle_board_params)
327 if (!cpuidle_board_params) 286 if (!cpuidle_board_params)
328 return; 287 return;
329 288
330 for (i = OMAP3_STATE_C1; i < OMAP3_MAX_STATES; i++) { 289 for (i = 0; i < OMAP3_NUM_STATES; i++) {
331 cpuidle_params_table[i].valid = 290 cpuidle_params_table[i].valid = cpuidle_board_params[i].valid;
332 cpuidle_board_params[i].valid; 291 cpuidle_params_table[i].exit_latency =
333 cpuidle_params_table[i].sleep_latency = 292 cpuidle_board_params[i].exit_latency;
334 cpuidle_board_params[i].sleep_latency; 293 cpuidle_params_table[i].target_residency =
335 cpuidle_params_table[i].wake_latency = 294 cpuidle_board_params[i].target_residency;
336 cpuidle_board_params[i].wake_latency;
337 cpuidle_params_table[i].threshold =
338 cpuidle_board_params[i].threshold;
339 } 295 }
340 return; 296 return;
341} 297}
342 298
343/* omap3_init_power_states - Initialises the OMAP3 specific C states.
344 *
345 * Below is the desciption of each C state.
346 * C1 . MPU WFI + Core active
347 * C2 . MPU WFI + Core inactive
348 * C3 . MPU CSWR + Core inactive
349 * C4 . MPU OFF + Core inactive
350 * C5 . MPU CSWR + Core CSWR
351 * C6 . MPU OFF + Core CSWR
352 * C7 . MPU OFF + Core OFF
353 */
354void omap_init_power_states(void)
355{
356 /* C1 . MPU WFI + Core active */
357 omap3_power_states[OMAP3_STATE_C1].valid =
358 cpuidle_params_table[OMAP3_STATE_C1].valid;
359 omap3_power_states[OMAP3_STATE_C1].type = OMAP3_STATE_C1;
360 omap3_power_states[OMAP3_STATE_C1].sleep_latency =
361 cpuidle_params_table[OMAP3_STATE_C1].sleep_latency;
362 omap3_power_states[OMAP3_STATE_C1].wakeup_latency =
363 cpuidle_params_table[OMAP3_STATE_C1].wake_latency;
364 omap3_power_states[OMAP3_STATE_C1].threshold =
365 cpuidle_params_table[OMAP3_STATE_C1].threshold;
366 omap3_power_states[OMAP3_STATE_C1].mpu_state = PWRDM_POWER_ON;
367 omap3_power_states[OMAP3_STATE_C1].core_state = PWRDM_POWER_ON;
368 omap3_power_states[OMAP3_STATE_C1].flags = CPUIDLE_FLAG_TIME_VALID;
369 omap3_power_states[OMAP3_STATE_C1].desc = "MPU ON + CORE ON";
370
371 /* C2 . MPU WFI + Core inactive */
372 omap3_power_states[OMAP3_STATE_C2].valid =
373 cpuidle_params_table[OMAP3_STATE_C2].valid;
374 omap3_power_states[OMAP3_STATE_C2].type = OMAP3_STATE_C2;
375 omap3_power_states[OMAP3_STATE_C2].sleep_latency =
376 cpuidle_params_table[OMAP3_STATE_C2].sleep_latency;
377 omap3_power_states[OMAP3_STATE_C2].wakeup_latency =
378 cpuidle_params_table[OMAP3_STATE_C2].wake_latency;
379 omap3_power_states[OMAP3_STATE_C2].threshold =
380 cpuidle_params_table[OMAP3_STATE_C2].threshold;
381 omap3_power_states[OMAP3_STATE_C2].mpu_state = PWRDM_POWER_ON;
382 omap3_power_states[OMAP3_STATE_C2].core_state = PWRDM_POWER_ON;
383 omap3_power_states[OMAP3_STATE_C2].flags = CPUIDLE_FLAG_TIME_VALID |
384 CPUIDLE_FLAG_CHECK_BM;
385 omap3_power_states[OMAP3_STATE_C2].desc = "MPU ON + CORE ON";
386
387 /* C3 . MPU CSWR + Core inactive */
388 omap3_power_states[OMAP3_STATE_C3].valid =
389 cpuidle_params_table[OMAP3_STATE_C3].valid;
390 omap3_power_states[OMAP3_STATE_C3].type = OMAP3_STATE_C3;
391 omap3_power_states[OMAP3_STATE_C3].sleep_latency =
392 cpuidle_params_table[OMAP3_STATE_C3].sleep_latency;
393 omap3_power_states[OMAP3_STATE_C3].wakeup_latency =
394 cpuidle_params_table[OMAP3_STATE_C3].wake_latency;
395 omap3_power_states[OMAP3_STATE_C3].threshold =
396 cpuidle_params_table[OMAP3_STATE_C3].threshold;
397 omap3_power_states[OMAP3_STATE_C3].mpu_state = PWRDM_POWER_RET;
398 omap3_power_states[OMAP3_STATE_C3].core_state = PWRDM_POWER_ON;
399 omap3_power_states[OMAP3_STATE_C3].flags = CPUIDLE_FLAG_TIME_VALID |
400 CPUIDLE_FLAG_CHECK_BM;
401 omap3_power_states[OMAP3_STATE_C3].desc = "MPU RET + CORE ON";
402
403 /* C4 . MPU OFF + Core inactive */
404 omap3_power_states[OMAP3_STATE_C4].valid =
405 cpuidle_params_table[OMAP3_STATE_C4].valid;
406 omap3_power_states[OMAP3_STATE_C4].type = OMAP3_STATE_C4;
407 omap3_power_states[OMAP3_STATE_C4].sleep_latency =
408 cpuidle_params_table[OMAP3_STATE_C4].sleep_latency;
409 omap3_power_states[OMAP3_STATE_C4].wakeup_latency =
410 cpuidle_params_table[OMAP3_STATE_C4].wake_latency;
411 omap3_power_states[OMAP3_STATE_C4].threshold =
412 cpuidle_params_table[OMAP3_STATE_C4].threshold;
413 omap3_power_states[OMAP3_STATE_C4].mpu_state = PWRDM_POWER_OFF;
414 omap3_power_states[OMAP3_STATE_C4].core_state = PWRDM_POWER_ON;
415 omap3_power_states[OMAP3_STATE_C4].flags = CPUIDLE_FLAG_TIME_VALID |
416 CPUIDLE_FLAG_CHECK_BM;
417 omap3_power_states[OMAP3_STATE_C4].desc = "MPU OFF + CORE ON";
418
419 /* C5 . MPU CSWR + Core CSWR*/
420 omap3_power_states[OMAP3_STATE_C5].valid =
421 cpuidle_params_table[OMAP3_STATE_C5].valid;
422 omap3_power_states[OMAP3_STATE_C5].type = OMAP3_STATE_C5;
423 omap3_power_states[OMAP3_STATE_C5].sleep_latency =
424 cpuidle_params_table[OMAP3_STATE_C5].sleep_latency;
425 omap3_power_states[OMAP3_STATE_C5].wakeup_latency =
426 cpuidle_params_table[OMAP3_STATE_C5].wake_latency;
427 omap3_power_states[OMAP3_STATE_C5].threshold =
428 cpuidle_params_table[OMAP3_STATE_C5].threshold;
429 omap3_power_states[OMAP3_STATE_C5].mpu_state = PWRDM_POWER_RET;
430 omap3_power_states[OMAP3_STATE_C5].core_state = PWRDM_POWER_RET;
431 omap3_power_states[OMAP3_STATE_C5].flags = CPUIDLE_FLAG_TIME_VALID |
432 CPUIDLE_FLAG_CHECK_BM;
433 omap3_power_states[OMAP3_STATE_C5].desc = "MPU RET + CORE RET";
434
435 /* C6 . MPU OFF + Core CSWR */
436 omap3_power_states[OMAP3_STATE_C6].valid =
437 cpuidle_params_table[OMAP3_STATE_C6].valid;
438 omap3_power_states[OMAP3_STATE_C6].type = OMAP3_STATE_C6;
439 omap3_power_states[OMAP3_STATE_C6].sleep_latency =
440 cpuidle_params_table[OMAP3_STATE_C6].sleep_latency;
441 omap3_power_states[OMAP3_STATE_C6].wakeup_latency =
442 cpuidle_params_table[OMAP3_STATE_C6].wake_latency;
443 omap3_power_states[OMAP3_STATE_C6].threshold =
444 cpuidle_params_table[OMAP3_STATE_C6].threshold;
445 omap3_power_states[OMAP3_STATE_C6].mpu_state = PWRDM_POWER_OFF;
446 omap3_power_states[OMAP3_STATE_C6].core_state = PWRDM_POWER_RET;
447 omap3_power_states[OMAP3_STATE_C6].flags = CPUIDLE_FLAG_TIME_VALID |
448 CPUIDLE_FLAG_CHECK_BM;
449 omap3_power_states[OMAP3_STATE_C6].desc = "MPU OFF + CORE RET";
450
451 /* C7 . MPU OFF + Core OFF */
452 omap3_power_states[OMAP3_STATE_C7].valid =
453 cpuidle_params_table[OMAP3_STATE_C7].valid;
454 omap3_power_states[OMAP3_STATE_C7].type = OMAP3_STATE_C7;
455 omap3_power_states[OMAP3_STATE_C7].sleep_latency =
456 cpuidle_params_table[OMAP3_STATE_C7].sleep_latency;
457 omap3_power_states[OMAP3_STATE_C7].wakeup_latency =
458 cpuidle_params_table[OMAP3_STATE_C7].wake_latency;
459 omap3_power_states[OMAP3_STATE_C7].threshold =
460 cpuidle_params_table[OMAP3_STATE_C7].threshold;
461 omap3_power_states[OMAP3_STATE_C7].mpu_state = PWRDM_POWER_OFF;
462 omap3_power_states[OMAP3_STATE_C7].core_state = PWRDM_POWER_OFF;
463 omap3_power_states[OMAP3_STATE_C7].flags = CPUIDLE_FLAG_TIME_VALID |
464 CPUIDLE_FLAG_CHECK_BM;
465 omap3_power_states[OMAP3_STATE_C7].desc = "MPU OFF + CORE OFF";
466
467 /*
468 * Erratum i583: implementation for ES rev < Es1.2 on 3630. We cannot
469 * enable OFF mode in a stable form for previous revisions.
470 * we disable C7 state as a result.
471 */
472 if (IS_PM34XX_ERRATUM(PM_SDRC_WAKEUP_ERRATUM_i583)) {
473 omap3_power_states[OMAP3_STATE_C7].valid = 0;
474 cpuidle_params_table[OMAP3_STATE_C7].valid = 0;
475 pr_warn("%s: core off state C7 disabled due to i583\n",
476 __func__);
477 }
478}
479
480struct cpuidle_driver omap3_idle_driver = { 299struct cpuidle_driver omap3_idle_driver = {
481 .name = "omap3_idle", 300 .name = "omap3_idle",
482 .owner = THIS_MODULE, 301 .owner = THIS_MODULE,
483}; 302};
484 303
304/* Helper to fill the C-state common data and register the driver_data */
305static inline struct omap3_idle_statedata *_fill_cstate(
306 struct cpuidle_device *dev,
307 int idx, const char *descr)
308{
309 struct omap3_idle_statedata *cx = &omap3_idle_data[idx];
310 struct cpuidle_state *state = &dev->states[idx];
311
312 state->exit_latency = cpuidle_params_table[idx].exit_latency;
313 state->target_residency = cpuidle_params_table[idx].target_residency;
314 state->flags = CPUIDLE_FLAG_TIME_VALID;
315 state->enter = omap3_enter_idle_bm;
316 cx->valid = cpuidle_params_table[idx].valid;
317 sprintf(state->name, "C%d", idx + 1);
318 strncpy(state->desc, descr, CPUIDLE_DESC_LEN);
319 cpuidle_set_statedata(state, cx);
320
321 return cx;
322}
323
485/** 324/**
486 * omap3_idle_init - Init routine for OMAP3 idle 325 * omap3_idle_init - Init routine for OMAP3 idle
487 * 326 *
488 * Registers the OMAP3 specific cpuidle driver with the cpuidle 327 * Registers the OMAP3 specific cpuidle driver to the cpuidle
489 * framework with the valid set of states. 328 * framework with the valid set of states.
490 */ 329 */
491int __init omap3_idle_init(void) 330int __init omap3_idle_init(void)
492{ 331{
493 int i, count = 0;
494 struct omap3_processor_cx *cx;
495 struct cpuidle_state *state;
496 struct cpuidle_device *dev; 332 struct cpuidle_device *dev;
333 struct omap3_idle_statedata *cx;
497 334
498 mpu_pd = pwrdm_lookup("mpu_pwrdm"); 335 mpu_pd = pwrdm_lookup("mpu_pwrdm");
499 core_pd = pwrdm_lookup("core_pwrdm"); 336 core_pd = pwrdm_lookup("core_pwrdm");
500 per_pd = pwrdm_lookup("per_pwrdm"); 337 per_pd = pwrdm_lookup("per_pwrdm");
501 cam_pd = pwrdm_lookup("cam_pwrdm"); 338 cam_pd = pwrdm_lookup("cam_pwrdm");
502 339
503 omap_init_power_states();
504 cpuidle_register_driver(&omap3_idle_driver); 340 cpuidle_register_driver(&omap3_idle_driver);
505
506 dev = &per_cpu(omap3_idle_dev, smp_processor_id()); 341 dev = &per_cpu(omap3_idle_dev, smp_processor_id());
507 342
508 for (i = OMAP3_STATE_C1; i < OMAP3_MAX_STATES; i++) { 343 /* C1 . MPU WFI + Core active */
509 cx = &omap3_power_states[i]; 344 cx = _fill_cstate(dev, 0, "MPU ON + CORE ON");
510 state = &dev->states[count]; 345 (&dev->states[0])->enter = omap3_enter_idle;
511 346 dev->safe_state = &dev->states[0];
512 if (!cx->valid) 347 cx->valid = 1; /* C1 is always valid */
513 continue; 348 cx->mpu_state = PWRDM_POWER_ON;
514 cpuidle_set_statedata(state, cx); 349 cx->core_state = PWRDM_POWER_ON;
515 state->exit_latency = cx->sleep_latency + cx->wakeup_latency;
516 state->target_residency = cx->threshold;
517 state->flags = cx->flags;
518 state->enter = (state->flags & CPUIDLE_FLAG_CHECK_BM) ?
519 omap3_enter_idle_bm : omap3_enter_idle;
520 if (cx->type == OMAP3_STATE_C1)
521 dev->safe_state = state;
522 sprintf(state->name, "C%d", count+1);
523 strncpy(state->desc, cx->desc, CPUIDLE_DESC_LEN);
524 count++;
525 }
526 350
527 if (!count) 351 /* C2 . MPU WFI + Core inactive */
528 return -EINVAL; 352 cx = _fill_cstate(dev, 1, "MPU ON + CORE ON");
529 dev->state_count = count; 353 cx->mpu_state = PWRDM_POWER_ON;
354 cx->core_state = PWRDM_POWER_ON;
355
356 /* C3 . MPU CSWR + Core inactive */
357 cx = _fill_cstate(dev, 2, "MPU RET + CORE ON");
358 cx->mpu_state = PWRDM_POWER_RET;
359 cx->core_state = PWRDM_POWER_ON;
530 360
531 if (enable_off_mode) 361 /* C4 . MPU OFF + Core inactive */
532 omap3_cpuidle_update_states(PWRDM_POWER_OFF, PWRDM_POWER_OFF); 362 cx = _fill_cstate(dev, 3, "MPU OFF + CORE ON");
533 else 363 cx->mpu_state = PWRDM_POWER_OFF;
534 omap3_cpuidle_update_states(PWRDM_POWER_RET, PWRDM_POWER_RET); 364 cx->core_state = PWRDM_POWER_ON;
365
366 /* C5 . MPU RET + Core RET */
367 cx = _fill_cstate(dev, 4, "MPU RET + CORE RET");
368 cx->mpu_state = PWRDM_POWER_RET;
369 cx->core_state = PWRDM_POWER_RET;
370
371 /* C6 . MPU OFF + Core RET */
372 cx = _fill_cstate(dev, 5, "MPU OFF + CORE RET");
373 cx->mpu_state = PWRDM_POWER_OFF;
374 cx->core_state = PWRDM_POWER_RET;
375
376 /* C7 . MPU OFF + Core OFF */
377 cx = _fill_cstate(dev, 6, "MPU OFF + CORE OFF");
378 /*
379 * Erratum i583: implementation for ES rev < Es1.2 on 3630. We cannot
380 * enable OFF mode in a stable form for previous revisions.
381 * We disable C7 state as a result.
382 */
383 if (IS_PM34XX_ERRATUM(PM_SDRC_WAKEUP_ERRATUM_i583)) {
384 cx->valid = 0;
385 pr_warn("%s: core off state C7 disabled due to i583\n",
386 __func__);
387 }
388 cx->mpu_state = PWRDM_POWER_OFF;
389 cx->core_state = PWRDM_POWER_OFF;
535 390
391 dev->state_count = OMAP3_NUM_STATES;
536 if (cpuidle_register_device(dev)) { 392 if (cpuidle_register_device(dev)) {
537 printk(KERN_ERR "%s: CPUidle register device failed\n", 393 printk(KERN_ERR "%s: CPUidle register device failed\n",
538 __func__); 394 __func__);
diff --git a/arch/arm/mach-omap2/pm.h b/arch/arm/mach-omap2/pm.h
index 797bfd12b643..45bcfce77352 100644
--- a/arch/arm/mach-omap2/pm.h
+++ b/arch/arm/mach-omap2/pm.h
@@ -36,11 +36,16 @@ static inline int omap4_opp_init(void)
36} 36}
37#endif 37#endif
38 38
39/*
40 * cpuidle mach specific parameters
41 *
42 * The board code can override the default C-states definition using
43 * omap3_pm_init_cpuidle
44 */
39struct cpuidle_params { 45struct cpuidle_params {
40 u8 valid; 46 u32 exit_latency; /* exit_latency = sleep + wake-up latencies */
41 u32 sleep_latency; 47 u32 target_residency;
42 u32 wake_latency; 48 u8 valid; /* validates the C-state */
43 u32 threshold;
44}; 49};
45 50
46#if defined(CONFIG_PM) && defined(CONFIG_CPU_IDLE) 51#if defined(CONFIG_PM) && defined(CONFIG_CPU_IDLE)
@@ -73,10 +78,6 @@ extern u32 sleep_while_idle;
73#define sleep_while_idle 0 78#define sleep_while_idle 0
74#endif 79#endif
75 80
76#if defined(CONFIG_CPU_IDLE)
77extern void omap3_cpuidle_update_states(u32, u32);
78#endif
79
80#if defined(CONFIG_PM_DEBUG) && defined(CONFIG_DEBUG_FS) 81#if defined(CONFIG_PM_DEBUG) && defined(CONFIG_DEBUG_FS)
81extern void pm_dbg_update_time(struct powerdomain *pwrdm, int prev); 82extern void pm_dbg_update_time(struct powerdomain *pwrdm, int prev);
82extern int pm_dbg_regset_save(int reg_set); 83extern int pm_dbg_regset_save(int reg_set);
diff --git a/arch/arm/mach-omap2/pm34xx.c b/arch/arm/mach-omap2/pm34xx.c
index 0c5e3a46a3ad..c155c9d1c82c 100644
--- a/arch/arm/mach-omap2/pm34xx.c
+++ b/arch/arm/mach-omap2/pm34xx.c
@@ -779,18 +779,6 @@ void omap3_pm_off_mode_enable(int enable)
779 else 779 else
780 state = PWRDM_POWER_RET; 780 state = PWRDM_POWER_RET;
781 781
782#ifdef CONFIG_CPU_IDLE
783 /*
784 * Erratum i583: implementation for ES rev < Es1.2 on 3630. We cannot
785 * enable OFF mode in a stable form for previous revisions, restrict
786 * instead to RET
787 */
788 if (IS_PM34XX_ERRATUM(PM_SDRC_WAKEUP_ERRATUM_i583))
789 omap3_cpuidle_update_states(state, PWRDM_POWER_RET);
790 else
791 omap3_cpuidle_update_states(state, state);
792#endif
793
794 list_for_each_entry(pwrst, &pwrst_list, node) { 782 list_for_each_entry(pwrst, &pwrst_list, node) {
795 if (IS_PM34XX_ERRATUM(PM_SDRC_WAKEUP_ERRATUM_i583) && 783 if (IS_PM34XX_ERRATUM(PM_SDRC_WAKEUP_ERRATUM_i583) &&
796 pwrst->pwrdm == core_pwrdm && 784 pwrst->pwrdm == core_pwrdm &&
@@ -895,8 +883,6 @@ static int __init omap3_pm_init(void)
895 883
896 pm_errata_configure(); 884 pm_errata_configure();
897 885
898 printk(KERN_ERR "Power Management for TI OMAP3.\n");
899
900 /* XXX prcm_setup_regs needs to be before enabling hw 886 /* XXX prcm_setup_regs needs to be before enabling hw
901 * supervised mode for powerdomains */ 887 * supervised mode for powerdomains */
902 prcm_setup_regs(); 888 prcm_setup_regs();
diff --git a/arch/mips/ar7/gpio.c b/arch/mips/ar7/gpio.c
index 425dfa5d6e12..bb571bcdb8f2 100644
--- a/arch/mips/ar7/gpio.c
+++ b/arch/mips/ar7/gpio.c
@@ -325,9 +325,7 @@ int __init ar7_gpio_init(void)
325 size = 0x1f; 325 size = 0x1f;
326 } 326 }
327 327
328 gpch->regs = ioremap_nocache(AR7_REGS_GPIO, 328 gpch->regs = ioremap_nocache(AR7_REGS_GPIO, size);
329 AR7_REGS_GPIO + 0x10);
330
331 if (!gpch->regs) { 329 if (!gpch->regs) {
332 printk(KERN_ERR "%s: failed to ioremap regs\n", 330 printk(KERN_ERR "%s: failed to ioremap regs\n",
333 gpch->chip.label); 331 gpch->chip.label);
diff --git a/arch/mips/include/asm/dma-mapping.h b/arch/mips/include/asm/dma-mapping.h
index 655f849bd08d..7aa37ddfca4b 100644
--- a/arch/mips/include/asm/dma-mapping.h
+++ b/arch/mips/include/asm/dma-mapping.h
@@ -5,7 +5,9 @@
5#include <asm/cache.h> 5#include <asm/cache.h>
6#include <asm-generic/dma-coherent.h> 6#include <asm-generic/dma-coherent.h>
7 7
8#ifndef CONFIG_SGI_IP27 /* Kludge to fix 2.6.39 build for IP27 */
8#include <dma-coherence.h> 9#include <dma-coherence.h>
10#endif
9 11
10extern struct dma_map_ops *mips_dma_map_ops; 12extern struct dma_map_ops *mips_dma_map_ops;
11 13
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index 71350f7f2d88..e9b3af27d844 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -374,7 +374,8 @@ void __noreturn die(const char *str, struct pt_regs *regs)
374 unsigned long dvpret = dvpe(); 374 unsigned long dvpret = dvpe();
375#endif /* CONFIG_MIPS_MT_SMTC */ 375#endif /* CONFIG_MIPS_MT_SMTC */
376 376
377 notify_die(DIE_OOPS, str, regs, 0, regs_to_trapnr(regs), SIGSEGV); 377 if (notify_die(DIE_OOPS, str, regs, 0, regs_to_trapnr(regs), SIGSEGV) == NOTIFY_STOP)
378 sig = 0;
378 379
379 console_verbose(); 380 console_verbose();
380 spin_lock_irq(&die_lock); 381 spin_lock_irq(&die_lock);
@@ -383,9 +384,6 @@ void __noreturn die(const char *str, struct pt_regs *regs)
383 mips_mt_regdump(dvpret); 384 mips_mt_regdump(dvpret);
384#endif /* CONFIG_MIPS_MT_SMTC */ 385#endif /* CONFIG_MIPS_MT_SMTC */
385 386
386 if (notify_die(DIE_OOPS, str, regs, 0, regs_to_trapnr(regs), SIGSEGV) == NOTIFY_STOP)
387 sig = 0;
388
389 printk("%s[#%d]:\n", str, ++die_counter); 387 printk("%s[#%d]:\n", str, ++die_counter);
390 show_registers(regs); 388 show_registers(regs);
391 add_taint(TAINT_DIE); 389 add_taint(TAINT_DIE);
diff --git a/arch/mips/rb532/gpio.c b/arch/mips/rb532/gpio.c
index 37de05d595e7..6c47dfeb7be3 100644
--- a/arch/mips/rb532/gpio.c
+++ b/arch/mips/rb532/gpio.c
@@ -185,7 +185,7 @@ int __init rb532_gpio_init(void)
185 struct resource *r; 185 struct resource *r;
186 186
187 r = rb532_gpio_reg0_res; 187 r = rb532_gpio_reg0_res;
188 rb532_gpio_chip->regbase = ioremap_nocache(r->start, r->end - r->start); 188 rb532_gpio_chip->regbase = ioremap_nocache(r->start, resource_size(r));
189 189
190 if (!rb532_gpio_chip->regbase) { 190 if (!rb532_gpio_chip->regbase) {
191 printk(KERN_ERR "rb532: cannot remap GPIO register 0\n"); 191 printk(KERN_ERR "rb532: cannot remap GPIO register 0\n");
diff --git a/arch/powerpc/platforms/83xx/suspend.c b/arch/powerpc/platforms/83xx/suspend.c
index 188272934cfb..104faa8aa23c 100644
--- a/arch/powerpc/platforms/83xx/suspend.c
+++ b/arch/powerpc/platforms/83xx/suspend.c
@@ -318,17 +318,20 @@ static const struct platform_suspend_ops mpc83xx_suspend_ops = {
318 .end = mpc83xx_suspend_end, 318 .end = mpc83xx_suspend_end,
319}; 319};
320 320
321static struct of_device_id pmc_match[];
321static int pmc_probe(struct platform_device *ofdev) 322static int pmc_probe(struct platform_device *ofdev)
322{ 323{
324 const struct of_device_id *match;
323 struct device_node *np = ofdev->dev.of_node; 325 struct device_node *np = ofdev->dev.of_node;
324 struct resource res; 326 struct resource res;
325 struct pmc_type *type; 327 struct pmc_type *type;
326 int ret = 0; 328 int ret = 0;
327 329
328 if (!ofdev->dev.of_match) 330 match = of_match_device(pmc_match, &ofdev->dev);
331 if (!match)
329 return -EINVAL; 332 return -EINVAL;
330 333
331 type = ofdev->dev.of_match->data; 334 type = match->data;
332 335
333 if (!of_device_is_available(np)) 336 if (!of_device_is_available(np))
334 return -ENODEV; 337 return -ENODEV;
diff --git a/arch/powerpc/sysdev/fsl_msi.c b/arch/powerpc/sysdev/fsl_msi.c
index d5679dc1e20f..01cd2f089512 100644
--- a/arch/powerpc/sysdev/fsl_msi.c
+++ b/arch/powerpc/sysdev/fsl_msi.c
@@ -304,8 +304,10 @@ static int __devinit fsl_msi_setup_hwirq(struct fsl_msi *msi,
304 return 0; 304 return 0;
305} 305}
306 306
307static const struct of_device_id fsl_of_msi_ids[];
307static int __devinit fsl_of_msi_probe(struct platform_device *dev) 308static int __devinit fsl_of_msi_probe(struct platform_device *dev)
308{ 309{
310 const struct of_device_id *match;
309 struct fsl_msi *msi; 311 struct fsl_msi *msi;
310 struct resource res; 312 struct resource res;
311 int err, i, j, irq_index, count; 313 int err, i, j, irq_index, count;
@@ -316,9 +318,10 @@ static int __devinit fsl_of_msi_probe(struct platform_device *dev)
316 u32 offset; 318 u32 offset;
317 static const u32 all_avail[] = { 0, NR_MSI_IRQS }; 319 static const u32 all_avail[] = { 0, NR_MSI_IRQS };
318 320
319 if (!dev->dev.of_match) 321 match = of_match_device(fsl_of_msi_ids, &dev->dev);
322 if (!match)
320 return -EINVAL; 323 return -EINVAL;
321 features = dev->dev.of_match->data; 324 features = match->data;
322 325
323 printk(KERN_DEBUG "Setting up Freescale MSI support\n"); 326 printk(KERN_DEBUG "Setting up Freescale MSI support\n");
324 327
diff --git a/arch/sparc/kernel/apc.c b/arch/sparc/kernel/apc.c
index f679c57644d5..1e34f29e58bb 100644
--- a/arch/sparc/kernel/apc.c
+++ b/arch/sparc/kernel/apc.c
@@ -165,7 +165,7 @@ static int __devinit apc_probe(struct platform_device *op)
165 return 0; 165 return 0;
166} 166}
167 167
168static struct of_device_id __initdata apc_match[] = { 168static struct of_device_id apc_match[] = {
169 { 169 {
170 .name = APC_OBPNAME, 170 .name = APC_OBPNAME,
171 }, 171 },
diff --git a/arch/sparc/kernel/pci_sabre.c b/arch/sparc/kernel/pci_sabre.c
index 948068a083fc..d1840dbdaa2f 100644
--- a/arch/sparc/kernel/pci_sabre.c
+++ b/arch/sparc/kernel/pci_sabre.c
@@ -452,8 +452,10 @@ static void __devinit sabre_pbm_init(struct pci_pbm_info *pbm,
452 sabre_scan_bus(pbm, &op->dev); 452 sabre_scan_bus(pbm, &op->dev);
453} 453}
454 454
455static const struct of_device_id sabre_match[];
455static int __devinit sabre_probe(struct platform_device *op) 456static int __devinit sabre_probe(struct platform_device *op)
456{ 457{
458 const struct of_device_id *match;
457 const struct linux_prom64_registers *pr_regs; 459 const struct linux_prom64_registers *pr_regs;
458 struct device_node *dp = op->dev.of_node; 460 struct device_node *dp = op->dev.of_node;
459 struct pci_pbm_info *pbm; 461 struct pci_pbm_info *pbm;
@@ -463,7 +465,8 @@ static int __devinit sabre_probe(struct platform_device *op)
463 const u32 *vdma; 465 const u32 *vdma;
464 u64 clear_irq; 466 u64 clear_irq;
465 467
466 hummingbird_p = op->dev.of_match && (op->dev.of_match->data != NULL); 468 match = of_match_device(sabre_match, &op->dev);
469 hummingbird_p = match && (match->data != NULL);
467 if (!hummingbird_p) { 470 if (!hummingbird_p) {
468 struct device_node *cpu_dp; 471 struct device_node *cpu_dp;
469 472
diff --git a/arch/sparc/kernel/pci_schizo.c b/arch/sparc/kernel/pci_schizo.c
index fecfcb2063c8..283fbc329a43 100644
--- a/arch/sparc/kernel/pci_schizo.c
+++ b/arch/sparc/kernel/pci_schizo.c
@@ -1458,11 +1458,15 @@ out_err:
1458 return err; 1458 return err;
1459} 1459}
1460 1460
1461static const struct of_device_id schizo_match[];
1461static int __devinit schizo_probe(struct platform_device *op) 1462static int __devinit schizo_probe(struct platform_device *op)
1462{ 1463{
1463 if (!op->dev.of_match) 1464 const struct of_device_id *match;
1465
1466 match = of_match_device(schizo_match, &op->dev);
1467 if (!match)
1464 return -EINVAL; 1468 return -EINVAL;
1465 return __schizo_init(op, (unsigned long) op->dev.of_match->data); 1469 return __schizo_init(op, (unsigned long)match->data);
1466} 1470}
1467 1471
1468/* The ordering of this table is very important. Some Tomatillo 1472/* The ordering of this table is very important. Some Tomatillo
diff --git a/arch/sparc/kernel/pmc.c b/arch/sparc/kernel/pmc.c
index 93d7b4465f8d..6a585d393580 100644
--- a/arch/sparc/kernel/pmc.c
+++ b/arch/sparc/kernel/pmc.c
@@ -69,7 +69,7 @@ static int __devinit pmc_probe(struct platform_device *op)
69 return 0; 69 return 0;
70} 70}
71 71
72static struct of_device_id __initdata pmc_match[] = { 72static struct of_device_id pmc_match[] = {
73 { 73 {
74 .name = PMC_OBPNAME, 74 .name = PMC_OBPNAME,
75 }, 75 },
diff --git a/arch/sparc/kernel/smp_32.c b/arch/sparc/kernel/smp_32.c
index 91c10fb70858..850a1360c0d6 100644
--- a/arch/sparc/kernel/smp_32.c
+++ b/arch/sparc/kernel/smp_32.c
@@ -53,6 +53,7 @@ cpumask_t smp_commenced_mask = CPU_MASK_NONE;
53void __cpuinit smp_store_cpu_info(int id) 53void __cpuinit smp_store_cpu_info(int id)
54{ 54{
55 int cpu_node; 55 int cpu_node;
56 int mid;
56 57
57 cpu_data(id).udelay_val = loops_per_jiffy; 58 cpu_data(id).udelay_val = loops_per_jiffy;
58 59
@@ -60,10 +61,13 @@ void __cpuinit smp_store_cpu_info(int id)
60 cpu_data(id).clock_tick = prom_getintdefault(cpu_node, 61 cpu_data(id).clock_tick = prom_getintdefault(cpu_node,
61 "clock-frequency", 0); 62 "clock-frequency", 0);
62 cpu_data(id).prom_node = cpu_node; 63 cpu_data(id).prom_node = cpu_node;
63 cpu_data(id).mid = cpu_get_hwmid(cpu_node); 64 mid = cpu_get_hwmid(cpu_node);
64 65
65 if (cpu_data(id).mid < 0) 66 if (mid < 0) {
66 panic("No MID found for CPU%d at node 0x%08d", id, cpu_node); 67 printk(KERN_NOTICE "No MID found for CPU%d at node 0x%08d", id, cpu_node);
68 mid = 0;
69 }
70 cpu_data(id).mid = mid;
67} 71}
68 72
69void __init smp_cpus_done(unsigned int max_cpus) 73void __init smp_cpus_done(unsigned int max_cpus)
diff --git a/arch/sparc/kernel/time_32.c b/arch/sparc/kernel/time_32.c
index 4e236391b635..96046a4024c2 100644
--- a/arch/sparc/kernel/time_32.c
+++ b/arch/sparc/kernel/time_32.c
@@ -168,7 +168,7 @@ static int __devinit clock_probe(struct platform_device *op)
168 return 0; 168 return 0;
169} 169}
170 170
171static struct of_device_id __initdata clock_match[] = { 171static struct of_device_id clock_match[] = {
172 { 172 {
173 .name = "eeprom", 173 .name = "eeprom",
174 }, 174 },
diff --git a/arch/sparc/lib/checksum_32.S b/arch/sparc/lib/checksum_32.S
index 3632cb34e914..0084c3361e15 100644
--- a/arch/sparc/lib/checksum_32.S
+++ b/arch/sparc/lib/checksum_32.S
@@ -289,10 +289,16 @@ cc_end_cruft:
289 289
290 /* Also, handle the alignment code out of band. */ 290 /* Also, handle the alignment code out of band. */
291cc_dword_align: 291cc_dword_align:
292 cmp %g1, 6 292 cmp %g1, 16
293 bl,a ccte 293 bge 1f
294 srl %g1, 1, %o3
2952: cmp %o3, 0
296 be,a ccte
294 andcc %g1, 0xf, %o3 297 andcc %g1, 0xf, %o3
295 andcc %o0, 0x1, %g0 298 andcc %o3, %o0, %g0 ! Check %o0 only (%o1 has the same last 2 bits)
299 be,a 2b
300 srl %o3, 1, %o3
3011: andcc %o0, 0x1, %g0
296 bne ccslow 302 bne ccslow
297 andcc %o0, 0x2, %g0 303 andcc %o0, 0x2, %g0
298 be 1f 304 be 1f
diff --git a/arch/um/os-Linux/util.c b/arch/um/os-Linux/util.c
index 6ea77979531c..42827cafa6af 100644
--- a/arch/um/os-Linux/util.c
+++ b/arch/um/os-Linux/util.c
@@ -5,6 +5,7 @@
5 5
6#include <stdio.h> 6#include <stdio.h>
7#include <stdlib.h> 7#include <stdlib.h>
8#include <unistd.h>
8#include <errno.h> 9#include <errno.h>
9#include <signal.h> 10#include <signal.h>
10#include <string.h> 11#include <string.h>
@@ -75,6 +76,26 @@ void setup_hostinfo(char *buf, int len)
75 host.release, host.version, host.machine); 76 host.release, host.version, host.machine);
76} 77}
77 78
79/*
80 * We cannot use glibc's abort(). It makes use of tgkill() which
81 * has no effect within UML's kernel threads.
82 * After that glibc would execute an invalid instruction to kill
83 * the calling process and UML crashes with SIGSEGV.
84 */
85static inline void __attribute__ ((noreturn)) uml_abort(void)
86{
87 sigset_t sig;
88
89 fflush(NULL);
90
91 if (!sigemptyset(&sig) && !sigaddset(&sig, SIGABRT))
92 sigprocmask(SIG_UNBLOCK, &sig, 0);
93
94 for (;;)
95 if (kill(getpid(), SIGABRT) < 0)
96 exit(127);
97}
98
78void os_dump_core(void) 99void os_dump_core(void)
79{ 100{
80 int pid; 101 int pid;
@@ -116,5 +137,5 @@ void os_dump_core(void)
116 while ((pid = waitpid(-1, NULL, WNOHANG | __WALL)) > 0) 137 while ((pid = waitpid(-1, NULL, WNOHANG | __WALL)) > 0)
117 os_kill_ptraced_process(pid, 0); 138 os_kill_ptraced_process(pid, 0);
118 139
119 abort(); 140 uml_abort();
120} 141}
diff --git a/arch/x86/include/asm/apicdef.h b/arch/x86/include/asm/apicdef.h
index d87988bacf3e..34595d5e1038 100644
--- a/arch/x86/include/asm/apicdef.h
+++ b/arch/x86/include/asm/apicdef.h
@@ -78,6 +78,7 @@
78#define APIC_DEST_LOGICAL 0x00800 78#define APIC_DEST_LOGICAL 0x00800
79#define APIC_DEST_PHYSICAL 0x00000 79#define APIC_DEST_PHYSICAL 0x00000
80#define APIC_DM_FIXED 0x00000 80#define APIC_DM_FIXED 0x00000
81#define APIC_DM_FIXED_MASK 0x00700
81#define APIC_DM_LOWEST 0x00100 82#define APIC_DM_LOWEST 0x00100
82#define APIC_DM_SMI 0x00200 83#define APIC_DM_SMI 0x00200
83#define APIC_DM_REMRD 0x00300 84#define APIC_DM_REMRD 0x00300
diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
index 7db7723d1f32..d56187c6b838 100644
--- a/arch/x86/include/asm/pgtable_types.h
+++ b/arch/x86/include/asm/pgtable_types.h
@@ -299,6 +299,7 @@ int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
299/* Install a pte for a particular vaddr in kernel space. */ 299/* Install a pte for a particular vaddr in kernel space. */
300void set_pte_vaddr(unsigned long vaddr, pte_t pte); 300void set_pte_vaddr(unsigned long vaddr, pte_t pte);
301 301
302extern void native_pagetable_reserve(u64 start, u64 end);
302#ifdef CONFIG_X86_32 303#ifdef CONFIG_X86_32
303extern void native_pagetable_setup_start(pgd_t *base); 304extern void native_pagetable_setup_start(pgd_t *base);
304extern void native_pagetable_setup_done(pgd_t *base); 305extern void native_pagetable_setup_done(pgd_t *base);
diff --git a/arch/x86/include/asm/uv/uv_bau.h b/arch/x86/include/asm/uv/uv_bau.h
index 3e094af443c3..130f1eeee5fe 100644
--- a/arch/x86/include/asm/uv/uv_bau.h
+++ b/arch/x86/include/asm/uv/uv_bau.h
@@ -94,6 +94,8 @@
94/* after this # consecutive successes, bump up the throttle if it was lowered */ 94/* after this # consecutive successes, bump up the throttle if it was lowered */
95#define COMPLETE_THRESHOLD 5 95#define COMPLETE_THRESHOLD 5
96 96
97#define UV_LB_SUBNODEID 0x10
98
97/* 99/*
98 * number of entries in the destination side payload queue 100 * number of entries in the destination side payload queue
99 */ 101 */
@@ -124,7 +126,7 @@
124 * The distribution specification (32 bytes) is interpreted as a 256-bit 126 * The distribution specification (32 bytes) is interpreted as a 256-bit
125 * distribution vector. Adjacent bits correspond to consecutive even numbered 127 * distribution vector. Adjacent bits correspond to consecutive even numbered
126 * nodeIDs. The result of adding the index of a given bit to the 15-bit 128 * nodeIDs. The result of adding the index of a given bit to the 15-bit
127 * 'base_dest_nodeid' field of the header corresponds to the 129 * 'base_dest_nasid' field of the header corresponds to the
128 * destination nodeID associated with that specified bit. 130 * destination nodeID associated with that specified bit.
129 */ 131 */
130struct bau_target_uvhubmask { 132struct bau_target_uvhubmask {
@@ -176,7 +178,7 @@ struct bau_msg_payload {
176struct bau_msg_header { 178struct bau_msg_header {
177 unsigned int dest_subnodeid:6; /* must be 0x10, for the LB */ 179 unsigned int dest_subnodeid:6; /* must be 0x10, for the LB */
178 /* bits 5:0 */ 180 /* bits 5:0 */
179 unsigned int base_dest_nodeid:15; /* nasid of the */ 181 unsigned int base_dest_nasid:15; /* nasid of the */
180 /* bits 20:6 */ /* first bit in uvhub map */ 182 /* bits 20:6 */ /* first bit in uvhub map */
181 unsigned int command:8; /* message type */ 183 unsigned int command:8; /* message type */
182 /* bits 28:21 */ 184 /* bits 28:21 */
@@ -378,6 +380,10 @@ struct ptc_stats {
378 unsigned long d_rcanceled; /* number of messages canceled by resets */ 380 unsigned long d_rcanceled; /* number of messages canceled by resets */
379}; 381};
380 382
383struct hub_and_pnode {
384 short uvhub;
385 short pnode;
386};
381/* 387/*
382 * one per-cpu; to locate the software tables 388 * one per-cpu; to locate the software tables
383 */ 389 */
@@ -399,10 +405,12 @@ struct bau_control {
399 int baudisabled; 405 int baudisabled;
400 int set_bau_off; 406 int set_bau_off;
401 short cpu; 407 short cpu;
408 short osnode;
402 short uvhub_cpu; 409 short uvhub_cpu;
403 short uvhub; 410 short uvhub;
404 short cpus_in_socket; 411 short cpus_in_socket;
405 short cpus_in_uvhub; 412 short cpus_in_uvhub;
413 short partition_base_pnode;
406 unsigned short message_number; 414 unsigned short message_number;
407 unsigned short uvhub_quiesce; 415 unsigned short uvhub_quiesce;
408 short socket_acknowledge_count[DEST_Q_SIZE]; 416 short socket_acknowledge_count[DEST_Q_SIZE];
@@ -422,15 +430,16 @@ struct bau_control {
422 int congested_period; 430 int congested_period;
423 cycles_t period_time; 431 cycles_t period_time;
424 long period_requests; 432 long period_requests;
433 struct hub_and_pnode *target_hub_and_pnode;
425}; 434};
426 435
427static inline int bau_uvhub_isset(int uvhub, struct bau_target_uvhubmask *dstp) 436static inline int bau_uvhub_isset(int uvhub, struct bau_target_uvhubmask *dstp)
428{ 437{
429 return constant_test_bit(uvhub, &dstp->bits[0]); 438 return constant_test_bit(uvhub, &dstp->bits[0]);
430} 439}
431static inline void bau_uvhub_set(int uvhub, struct bau_target_uvhubmask *dstp) 440static inline void bau_uvhub_set(int pnode, struct bau_target_uvhubmask *dstp)
432{ 441{
433 __set_bit(uvhub, &dstp->bits[0]); 442 __set_bit(pnode, &dstp->bits[0]);
434} 443}
435static inline void bau_uvhubs_clear(struct bau_target_uvhubmask *dstp, 444static inline void bau_uvhubs_clear(struct bau_target_uvhubmask *dstp,
436 int nbits) 445 int nbits)
diff --git a/arch/x86/include/asm/uv/uv_hub.h b/arch/x86/include/asm/uv/uv_hub.h
index a501741c2335..4298002d0c83 100644
--- a/arch/x86/include/asm/uv/uv_hub.h
+++ b/arch/x86/include/asm/uv/uv_hub.h
@@ -398,6 +398,8 @@ struct uv_blade_info {
398 unsigned short nr_online_cpus; 398 unsigned short nr_online_cpus;
399 unsigned short pnode; 399 unsigned short pnode;
400 short memory_nid; 400 short memory_nid;
401 spinlock_t nmi_lock;
402 unsigned long nmi_count;
401}; 403};
402extern struct uv_blade_info *uv_blade_info; 404extern struct uv_blade_info *uv_blade_info;
403extern short *uv_node_to_blade; 405extern short *uv_node_to_blade;
diff --git a/arch/x86/include/asm/uv/uv_mmrs.h b/arch/x86/include/asm/uv/uv_mmrs.h
index 20cafeac7455..f5bb64a823d7 100644
--- a/arch/x86/include/asm/uv/uv_mmrs.h
+++ b/arch/x86/include/asm/uv/uv_mmrs.h
@@ -5,7 +5,7 @@
5 * 5 *
6 * SGI UV MMR definitions 6 * SGI UV MMR definitions
7 * 7 *
8 * Copyright (C) 2007-2010 Silicon Graphics, Inc. All rights reserved. 8 * Copyright (C) 2007-2011 Silicon Graphics, Inc. All rights reserved.
9 */ 9 */
10 10
11#ifndef _ASM_X86_UV_UV_MMRS_H 11#ifndef _ASM_X86_UV_UV_MMRS_H
@@ -1099,5 +1099,19 @@ union uvh_rtc1_int_config_u {
1099 } s; 1099 } s;
1100}; 1100};
1101 1101
1102/* ========================================================================= */
1103/* UVH_SCRATCH5 */
1104/* ========================================================================= */
1105#define UVH_SCRATCH5 0x2d0200UL
1106#define UVH_SCRATCH5_32 0x00778
1107
1108#define UVH_SCRATCH5_SCRATCH5_SHFT 0
1109#define UVH_SCRATCH5_SCRATCH5_MASK 0xffffffffffffffffUL
1110union uvh_scratch5_u {
1111 unsigned long v;
1112 struct uvh_scratch5_s {
1113 unsigned long scratch5 : 64; /* RW, W1CS */
1114 } s;
1115};
1102 1116
1103#endif /* __ASM_UV_MMRS_X86_H__ */ 1117#endif /* __ASM_UV_MMRS_X86_H__ */
diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h
index 643ebf2e2ad8..d3d859035af9 100644
--- a/arch/x86/include/asm/x86_init.h
+++ b/arch/x86/include/asm/x86_init.h
@@ -68,6 +68,17 @@ struct x86_init_oem {
68}; 68};
69 69
70/** 70/**
71 * struct x86_init_mapping - platform specific initial kernel pagetable setup
72 * @pagetable_reserve: reserve a range of addresses for kernel pagetable usage
73 *
74 * For more details on the purpose of this hook, look in
75 * init_memory_mapping and the commit that added it.
76 */
77struct x86_init_mapping {
78 void (*pagetable_reserve)(u64 start, u64 end);
79};
80
81/**
71 * struct x86_init_paging - platform specific paging functions 82 * struct x86_init_paging - platform specific paging functions
72 * @pagetable_setup_start: platform specific pre paging_init() call 83 * @pagetable_setup_start: platform specific pre paging_init() call
73 * @pagetable_setup_done: platform specific post paging_init() call 84 * @pagetable_setup_done: platform specific post paging_init() call
@@ -123,6 +134,7 @@ struct x86_init_ops {
123 struct x86_init_mpparse mpparse; 134 struct x86_init_mpparse mpparse;
124 struct x86_init_irqs irqs; 135 struct x86_init_irqs irqs;
125 struct x86_init_oem oem; 136 struct x86_init_oem oem;
137 struct x86_init_mapping mapping;
126 struct x86_init_paging paging; 138 struct x86_init_paging paging;
127 struct x86_init_timers timers; 139 struct x86_init_timers timers;
128 struct x86_init_iommu iommu; 140 struct x86_init_iommu iommu;
diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c
index 33b10a0fc095..7acd2d2ac965 100644
--- a/arch/x86/kernel/apic/x2apic_uv_x.c
+++ b/arch/x86/kernel/apic/x2apic_uv_x.c
@@ -37,6 +37,13 @@
37#include <asm/smp.h> 37#include <asm/smp.h>
38#include <asm/x86_init.h> 38#include <asm/x86_init.h>
39#include <asm/emergency-restart.h> 39#include <asm/emergency-restart.h>
40#include <asm/nmi.h>
41
42/* BMC sets a bit this MMR non-zero before sending an NMI */
43#define UVH_NMI_MMR UVH_SCRATCH5
44#define UVH_NMI_MMR_CLEAR (UVH_NMI_MMR + 8)
45#define UV_NMI_PENDING_MASK (1UL << 63)
46DEFINE_PER_CPU(unsigned long, cpu_last_nmi_count);
40 47
41DEFINE_PER_CPU(int, x2apic_extra_bits); 48DEFINE_PER_CPU(int, x2apic_extra_bits);
42 49
@@ -642,18 +649,46 @@ void __cpuinit uv_cpu_init(void)
642 */ 649 */
643int uv_handle_nmi(struct notifier_block *self, unsigned long reason, void *data) 650int uv_handle_nmi(struct notifier_block *self, unsigned long reason, void *data)
644{ 651{
652 unsigned long real_uv_nmi;
653 int bid;
654
645 if (reason != DIE_NMIUNKNOWN) 655 if (reason != DIE_NMIUNKNOWN)
646 return NOTIFY_OK; 656 return NOTIFY_OK;
647 657
648 if (in_crash_kexec) 658 if (in_crash_kexec)
649 /* do nothing if entering the crash kernel */ 659 /* do nothing if entering the crash kernel */
650 return NOTIFY_OK; 660 return NOTIFY_OK;
661
651 /* 662 /*
652 * Use a lock so only one cpu prints at a time 663 * Each blade has an MMR that indicates when an NMI has been sent
653 * to prevent intermixed output. 664 * to cpus on the blade. If an NMI is detected, atomically
665 * clear the MMR and update a per-blade NMI count used to
666 * cause each cpu on the blade to notice a new NMI.
667 */
668 bid = uv_numa_blade_id();
669 real_uv_nmi = (uv_read_local_mmr(UVH_NMI_MMR) & UV_NMI_PENDING_MASK);
670
671 if (unlikely(real_uv_nmi)) {
672 spin_lock(&uv_blade_info[bid].nmi_lock);
673 real_uv_nmi = (uv_read_local_mmr(UVH_NMI_MMR) & UV_NMI_PENDING_MASK);
674 if (real_uv_nmi) {
675 uv_blade_info[bid].nmi_count++;
676 uv_write_local_mmr(UVH_NMI_MMR_CLEAR, UV_NMI_PENDING_MASK);
677 }
678 spin_unlock(&uv_blade_info[bid].nmi_lock);
679 }
680
681 if (likely(__get_cpu_var(cpu_last_nmi_count) == uv_blade_info[bid].nmi_count))
682 return NOTIFY_DONE;
683
684 __get_cpu_var(cpu_last_nmi_count) = uv_blade_info[bid].nmi_count;
685
686 /*
687 * Use a lock so only one cpu prints at a time.
688 * This prevents intermixed output.
654 */ 689 */
655 spin_lock(&uv_nmi_lock); 690 spin_lock(&uv_nmi_lock);
656 pr_info("NMI stack dump cpu %u:\n", smp_processor_id()); 691 pr_info("UV NMI stack dump cpu %u:\n", smp_processor_id());
657 dump_stack(); 692 dump_stack();
658 spin_unlock(&uv_nmi_lock); 693 spin_unlock(&uv_nmi_lock);
659 694
@@ -661,7 +696,8 @@ int uv_handle_nmi(struct notifier_block *self, unsigned long reason, void *data)
661} 696}
662 697
663static struct notifier_block uv_dump_stack_nmi_nb = { 698static struct notifier_block uv_dump_stack_nmi_nb = {
664 .notifier_call = uv_handle_nmi 699 .notifier_call = uv_handle_nmi,
700 .priority = NMI_LOCAL_LOW_PRIOR - 1,
665}; 701};
666 702
667void uv_register_nmi_notifier(void) 703void uv_register_nmi_notifier(void)
@@ -720,8 +756,9 @@ void __init uv_system_init(void)
720 printk(KERN_DEBUG "UV: Found %d blades\n", uv_num_possible_blades()); 756 printk(KERN_DEBUG "UV: Found %d blades\n", uv_num_possible_blades());
721 757
722 bytes = sizeof(struct uv_blade_info) * uv_num_possible_blades(); 758 bytes = sizeof(struct uv_blade_info) * uv_num_possible_blades();
723 uv_blade_info = kmalloc(bytes, GFP_KERNEL); 759 uv_blade_info = kzalloc(bytes, GFP_KERNEL);
724 BUG_ON(!uv_blade_info); 760 BUG_ON(!uv_blade_info);
761
725 for (blade = 0; blade < uv_num_possible_blades(); blade++) 762 for (blade = 0; blade < uv_num_possible_blades(); blade++)
726 uv_blade_info[blade].memory_nid = -1; 763 uv_blade_info[blade].memory_nid = -1;
727 764
@@ -747,6 +784,7 @@ void __init uv_system_init(void)
747 uv_blade_info[blade].pnode = pnode; 784 uv_blade_info[blade].pnode = pnode;
748 uv_blade_info[blade].nr_possible_cpus = 0; 785 uv_blade_info[blade].nr_possible_cpus = 0;
749 uv_blade_info[blade].nr_online_cpus = 0; 786 uv_blade_info[blade].nr_online_cpus = 0;
787 spin_lock_init(&uv_blade_info[blade].nmi_lock);
750 max_pnode = max(pnode, max_pnode); 788 max_pnode = max(pnode, max_pnode);
751 blade++; 789 blade++;
752 } 790 }
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index bb9eb29a52dd..6f9d1f6063e9 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -613,7 +613,7 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c)
613#endif 613#endif
614 614
615 /* As a rule processors have APIC timer running in deep C states */ 615 /* As a rule processors have APIC timer running in deep C states */
616 if (c->x86 >= 0xf && !cpu_has_amd_erratum(amd_erratum_400)) 616 if (c->x86 > 0xf && !cpu_has_amd_erratum(amd_erratum_400))
617 set_cpu_cap(c, X86_FEATURE_ARAT); 617 set_cpu_cap(c, X86_FEATURE_ARAT);
618 618
619 /* 619 /*
@@ -698,7 +698,7 @@ cpu_dev_register(amd_cpu_dev);
698 */ 698 */
699 699
700const int amd_erratum_400[] = 700const int amd_erratum_400[] =
701 AMD_OSVW_ERRATUM(1, AMD_MODEL_RANGE(0x0f, 0x4, 0x2, 0xff, 0xf), 701 AMD_OSVW_ERRATUM(1, AMD_MODEL_RANGE(0xf, 0x41, 0x2, 0xff, 0xf),
702 AMD_MODEL_RANGE(0x10, 0x2, 0x1, 0xff, 0xf)); 702 AMD_MODEL_RANGE(0x10, 0x2, 0x1, 0xff, 0xf));
703EXPORT_SYMBOL_GPL(amd_erratum_400); 703EXPORT_SYMBOL_GPL(amd_erratum_400);
704 704
diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd.c b/arch/x86/kernel/cpu/mcheck/mce_amd.c
index 167f97b5596e..bb0adad35143 100644
--- a/arch/x86/kernel/cpu/mcheck/mce_amd.c
+++ b/arch/x86/kernel/cpu/mcheck/mce_amd.c
@@ -509,6 +509,7 @@ recurse:
509out_free: 509out_free:
510 if (b) { 510 if (b) {
511 kobject_put(&b->kobj); 511 kobject_put(&b->kobj);
512 list_del(&b->miscj);
512 kfree(b); 513 kfree(b);
513 } 514 }
514 return err; 515 return err;
diff --git a/arch/x86/kernel/cpu/mcheck/therm_throt.c b/arch/x86/kernel/cpu/mcheck/therm_throt.c
index 6f8c5e9da97f..0f034460260d 100644
--- a/arch/x86/kernel/cpu/mcheck/therm_throt.c
+++ b/arch/x86/kernel/cpu/mcheck/therm_throt.c
@@ -446,18 +446,20 @@ void intel_init_thermal(struct cpuinfo_x86 *c)
446 */ 446 */
447 rdmsr(MSR_IA32_MISC_ENABLE, l, h); 447 rdmsr(MSR_IA32_MISC_ENABLE, l, h);
448 448
449 h = lvtthmr_init;
449 /* 450 /*
450 * The initial value of thermal LVT entries on all APs always reads 451 * The initial value of thermal LVT entries on all APs always reads
451 * 0x10000 because APs are woken up by BSP issuing INIT-SIPI-SIPI 452 * 0x10000 because APs are woken up by BSP issuing INIT-SIPI-SIPI
452 * sequence to them and LVT registers are reset to 0s except for 453 * sequence to them and LVT registers are reset to 0s except for
453 * the mask bits which are set to 1s when APs receive INIT IPI. 454 * the mask bits which are set to 1s when APs receive INIT IPI.
454 * Always restore the value that BIOS has programmed on AP based on 455 * If BIOS takes over the thermal interrupt and sets its interrupt
455 * BSP's info we saved since BIOS is always setting the same value 456 * delivery mode to SMI (not fixed), it restores the value that the
456 * for all threads/cores 457 * BIOS has programmed on AP based on BSP's info we saved since BIOS
458 * is always setting the same value for all threads/cores.
457 */ 459 */
458 apic_write(APIC_LVTTHMR, lvtthmr_init); 460 if ((h & APIC_DM_FIXED_MASK) != APIC_DM_FIXED)
461 apic_write(APIC_LVTTHMR, lvtthmr_init);
459 462
460 h = lvtthmr_init;
461 463
462 if ((l & MSR_IA32_MISC_ENABLE_TM1) && (h & APIC_DM_SMI)) { 464 if ((l & MSR_IA32_MISC_ENABLE_TM1) && (h & APIC_DM_SMI)) {
463 printk(KERN_DEBUG 465 printk(KERN_DEBUG
diff --git a/arch/x86/kernel/kprobes.c b/arch/x86/kernel/kprobes.c
index c969fd9d1566..f1a6244d7d93 100644
--- a/arch/x86/kernel/kprobes.c
+++ b/arch/x86/kernel/kprobes.c
@@ -1183,12 +1183,13 @@ static void __kprobes optimized_callback(struct optimized_kprobe *op,
1183 struct pt_regs *regs) 1183 struct pt_regs *regs)
1184{ 1184{
1185 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); 1185 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
1186 unsigned long flags;
1186 1187
1187 /* This is possible if op is under delayed unoptimizing */ 1188 /* This is possible if op is under delayed unoptimizing */
1188 if (kprobe_disabled(&op->kp)) 1189 if (kprobe_disabled(&op->kp))
1189 return; 1190 return;
1190 1191
1191 preempt_disable(); 1192 local_irq_save(flags);
1192 if (kprobe_running()) { 1193 if (kprobe_running()) {
1193 kprobes_inc_nmissed_count(&op->kp); 1194 kprobes_inc_nmissed_count(&op->kp);
1194 } else { 1195 } else {
@@ -1207,7 +1208,7 @@ static void __kprobes optimized_callback(struct optimized_kprobe *op,
1207 opt_pre_handler(&op->kp, regs); 1208 opt_pre_handler(&op->kp, regs);
1208 __this_cpu_write(current_kprobe, NULL); 1209 __this_cpu_write(current_kprobe, NULL);
1209 } 1210 }
1210 preempt_enable_no_resched(); 1211 local_irq_restore(flags);
1211} 1212}
1212 1213
1213static int __kprobes copy_optimized_instructions(u8 *dest, u8 *src) 1214static int __kprobes copy_optimized_instructions(u8 *dest, u8 *src)
diff --git a/arch/x86/kernel/x86_init.c b/arch/x86/kernel/x86_init.c
index c11514e9128b..75ef4b18e9b7 100644
--- a/arch/x86/kernel/x86_init.c
+++ b/arch/x86/kernel/x86_init.c
@@ -61,6 +61,10 @@ struct x86_init_ops x86_init __initdata = {
61 .banner = default_banner, 61 .banner = default_banner,
62 }, 62 },
63 63
64 .mapping = {
65 .pagetable_reserve = native_pagetable_reserve,
66 },
67
64 .paging = { 68 .paging = {
65 .pagetable_setup_start = native_pagetable_setup_start, 69 .pagetable_setup_start = native_pagetable_setup_start,
66 .pagetable_setup_done = native_pagetable_setup_done, 70 .pagetable_setup_done = native_pagetable_setup_done,
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
index 286d289b039b..37b8b0fe8320 100644
--- a/arch/x86/mm/init.c
+++ b/arch/x86/mm/init.c
@@ -81,6 +81,11 @@ static void __init find_early_table_space(unsigned long end, int use_pse,
81 end, pgt_buf_start << PAGE_SHIFT, pgt_buf_top << PAGE_SHIFT); 81 end, pgt_buf_start << PAGE_SHIFT, pgt_buf_top << PAGE_SHIFT);
82} 82}
83 83
84void __init native_pagetable_reserve(u64 start, u64 end)
85{
86 memblock_x86_reserve_range(start, end, "PGTABLE");
87}
88
84struct map_range { 89struct map_range {
85 unsigned long start; 90 unsigned long start;
86 unsigned long end; 91 unsigned long end;
@@ -272,9 +277,24 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
272 277
273 __flush_tlb_all(); 278 __flush_tlb_all();
274 279
280 /*
281 * Reserve the kernel pagetable pages we used (pgt_buf_start -
282 * pgt_buf_end) and free the other ones (pgt_buf_end - pgt_buf_top)
283 * so that they can be reused for other purposes.
284 *
285 * On native it just means calling memblock_x86_reserve_range, on Xen it
286 * also means marking RW the pagetable pages that we allocated before
287 * but that haven't been used.
288 *
289 * In fact on xen we mark RO the whole range pgt_buf_start -
290 * pgt_buf_top, because we have to make sure that when
291 * init_memory_mapping reaches the pagetable pages area, it maps
292 * RO all the pagetable pages, including the ones that are beyond
293 * pgt_buf_end at that time.
294 */
275 if (!after_bootmem && pgt_buf_end > pgt_buf_start) 295 if (!after_bootmem && pgt_buf_end > pgt_buf_start)
276 memblock_x86_reserve_range(pgt_buf_start << PAGE_SHIFT, 296 x86_init.mapping.pagetable_reserve(PFN_PHYS(pgt_buf_start),
277 pgt_buf_end << PAGE_SHIFT, "PGTABLE"); 297 PFN_PHYS(pgt_buf_end));
278 298
279 if (!after_bootmem) 299 if (!after_bootmem)
280 early_memtest(start, end); 300 early_memtest(start, end);
diff --git a/arch/x86/platform/uv/tlb_uv.c b/arch/x86/platform/uv/tlb_uv.c
index 7cb6424317f6..c58e0ea39ef5 100644
--- a/arch/x86/platform/uv/tlb_uv.c
+++ b/arch/x86/platform/uv/tlb_uv.c
@@ -699,16 +699,17 @@ const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask,
699 struct mm_struct *mm, 699 struct mm_struct *mm,
700 unsigned long va, unsigned int cpu) 700 unsigned long va, unsigned int cpu)
701{ 701{
702 int tcpu;
703 int uvhub;
704 int locals = 0; 702 int locals = 0;
705 int remotes = 0; 703 int remotes = 0;
706 int hubs = 0; 704 int hubs = 0;
705 int tcpu;
706 int tpnode;
707 struct bau_desc *bau_desc; 707 struct bau_desc *bau_desc;
708 struct cpumask *flush_mask; 708 struct cpumask *flush_mask;
709 struct ptc_stats *stat; 709 struct ptc_stats *stat;
710 struct bau_control *bcp; 710 struct bau_control *bcp;
711 struct bau_control *tbcp; 711 struct bau_control *tbcp;
712 struct hub_and_pnode *hpp;
712 713
713 /* kernel was booted 'nobau' */ 714 /* kernel was booted 'nobau' */
714 if (nobau) 715 if (nobau)
@@ -750,11 +751,18 @@ const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask,
750 bau_desc += UV_ITEMS_PER_DESCRIPTOR * bcp->uvhub_cpu; 751 bau_desc += UV_ITEMS_PER_DESCRIPTOR * bcp->uvhub_cpu;
751 bau_uvhubs_clear(&bau_desc->distribution, UV_DISTRIBUTION_SIZE); 752 bau_uvhubs_clear(&bau_desc->distribution, UV_DISTRIBUTION_SIZE);
752 753
753 /* cpu statistics */
754 for_each_cpu(tcpu, flush_mask) { 754 for_each_cpu(tcpu, flush_mask) {
755 uvhub = uv_cpu_to_blade_id(tcpu); 755 /*
756 bau_uvhub_set(uvhub, &bau_desc->distribution); 756 * The distribution vector is a bit map of pnodes, relative
757 if (uvhub == bcp->uvhub) 757 * to the partition base pnode (and the partition base nasid
758 * in the header).
759 * Translate cpu to pnode and hub using an array stored
760 * in local memory.
761 */
762 hpp = &bcp->socket_master->target_hub_and_pnode[tcpu];
763 tpnode = hpp->pnode - bcp->partition_base_pnode;
764 bau_uvhub_set(tpnode, &bau_desc->distribution);
765 if (hpp->uvhub == bcp->uvhub)
758 locals++; 766 locals++;
759 else 767 else
760 remotes++; 768 remotes++;
@@ -855,7 +863,7 @@ void uv_bau_message_interrupt(struct pt_regs *regs)
855 * an interrupt, but causes an error message to be returned to 863 * an interrupt, but causes an error message to be returned to
856 * the sender. 864 * the sender.
857 */ 865 */
858static void uv_enable_timeouts(void) 866static void __init uv_enable_timeouts(void)
859{ 867{
860 int uvhub; 868 int uvhub;
861 int nuvhubs; 869 int nuvhubs;
@@ -1326,10 +1334,10 @@ static int __init uv_ptc_init(void)
1326} 1334}
1327 1335
1328/* 1336/*
1329 * initialize the sending side's sending buffers 1337 * Initialize the sending side's sending buffers.
1330 */ 1338 */
1331static void 1339static void
1332uv_activation_descriptor_init(int node, int pnode) 1340uv_activation_descriptor_init(int node, int pnode, int base_pnode)
1333{ 1341{
1334 int i; 1342 int i;
1335 int cpu; 1343 int cpu;
@@ -1352,11 +1360,11 @@ uv_activation_descriptor_init(int node, int pnode)
1352 n = pa >> uv_nshift; 1360 n = pa >> uv_nshift;
1353 m = pa & uv_mmask; 1361 m = pa & uv_mmask;
1354 1362
1363 /* the 14-bit pnode */
1355 uv_write_global_mmr64(pnode, UVH_LB_BAU_SB_DESCRIPTOR_BASE, 1364 uv_write_global_mmr64(pnode, UVH_LB_BAU_SB_DESCRIPTOR_BASE,
1356 (n << UV_DESC_BASE_PNODE_SHIFT | m)); 1365 (n << UV_DESC_BASE_PNODE_SHIFT | m));
1357
1358 /* 1366 /*
1359 * initializing all 8 (UV_ITEMS_PER_DESCRIPTOR) descriptors for each 1367 * Initializing all 8 (UV_ITEMS_PER_DESCRIPTOR) descriptors for each
1360 * cpu even though we only use the first one; one descriptor can 1368 * cpu even though we only use the first one; one descriptor can
1361 * describe a broadcast to 256 uv hubs. 1369 * describe a broadcast to 256 uv hubs.
1362 */ 1370 */
@@ -1365,12 +1373,13 @@ uv_activation_descriptor_init(int node, int pnode)
1365 memset(bd2, 0, sizeof(struct bau_desc)); 1373 memset(bd2, 0, sizeof(struct bau_desc));
1366 bd2->header.sw_ack_flag = 1; 1374 bd2->header.sw_ack_flag = 1;
1367 /* 1375 /*
1368 * base_dest_nodeid is the nasid of the first uvhub 1376 * The base_dest_nasid set in the message header is the nasid
1369 * in the partition. The bit map will indicate uvhub numbers, 1377 * of the first uvhub in the partition. The bit map will
1370 * which are 0-N in a partition. Pnodes are unique system-wide. 1378 * indicate destination pnode numbers relative to that base.
1379 * They may not be consecutive if nasid striding is being used.
1371 */ 1380 */
1372 bd2->header.base_dest_nodeid = UV_PNODE_TO_NASID(uv_partition_base_pnode); 1381 bd2->header.base_dest_nasid = UV_PNODE_TO_NASID(base_pnode);
1373 bd2->header.dest_subnodeid = 0x10; /* the LB */ 1382 bd2->header.dest_subnodeid = UV_LB_SUBNODEID;
1374 bd2->header.command = UV_NET_ENDPOINT_INTD; 1383 bd2->header.command = UV_NET_ENDPOINT_INTD;
1375 bd2->header.int_both = 1; 1384 bd2->header.int_both = 1;
1376 /* 1385 /*
@@ -1442,7 +1451,7 @@ uv_payload_queue_init(int node, int pnode)
1442/* 1451/*
1443 * Initialization of each UV hub's structures 1452 * Initialization of each UV hub's structures
1444 */ 1453 */
1445static void __init uv_init_uvhub(int uvhub, int vector) 1454static void __init uv_init_uvhub(int uvhub, int vector, int base_pnode)
1446{ 1455{
1447 int node; 1456 int node;
1448 int pnode; 1457 int pnode;
@@ -1450,11 +1459,11 @@ static void __init uv_init_uvhub(int uvhub, int vector)
1450 1459
1451 node = uvhub_to_first_node(uvhub); 1460 node = uvhub_to_first_node(uvhub);
1452 pnode = uv_blade_to_pnode(uvhub); 1461 pnode = uv_blade_to_pnode(uvhub);
1453 uv_activation_descriptor_init(node, pnode); 1462 uv_activation_descriptor_init(node, pnode, base_pnode);
1454 uv_payload_queue_init(node, pnode); 1463 uv_payload_queue_init(node, pnode);
1455 /* 1464 /*
1456 * the below initialization can't be in firmware because the 1465 * The below initialization can't be in firmware because the
1457 * messaging IRQ will be determined by the OS 1466 * messaging IRQ will be determined by the OS.
1458 */ 1467 */
1459 apicid = uvhub_to_first_apicid(uvhub) | uv_apicid_hibits; 1468 apicid = uvhub_to_first_apicid(uvhub) | uv_apicid_hibits;
1460 uv_write_global_mmr64(pnode, UVH_BAU_DATA_CONFIG, 1469 uv_write_global_mmr64(pnode, UVH_BAU_DATA_CONFIG,
@@ -1491,10 +1500,11 @@ calculate_destination_timeout(void)
1491/* 1500/*
1492 * initialize the bau_control structure for each cpu 1501 * initialize the bau_control structure for each cpu
1493 */ 1502 */
1494static int __init uv_init_per_cpu(int nuvhubs) 1503static int __init uv_init_per_cpu(int nuvhubs, int base_part_pnode)
1495{ 1504{
1496 int i; 1505 int i;
1497 int cpu; 1506 int cpu;
1507 int tcpu;
1498 int pnode; 1508 int pnode;
1499 int uvhub; 1509 int uvhub;
1500 int have_hmaster; 1510 int have_hmaster;
@@ -1528,6 +1538,15 @@ static int __init uv_init_per_cpu(int nuvhubs)
1528 bcp = &per_cpu(bau_control, cpu); 1538 bcp = &per_cpu(bau_control, cpu);
1529 memset(bcp, 0, sizeof(struct bau_control)); 1539 memset(bcp, 0, sizeof(struct bau_control));
1530 pnode = uv_cpu_hub_info(cpu)->pnode; 1540 pnode = uv_cpu_hub_info(cpu)->pnode;
1541 if ((pnode - base_part_pnode) >= UV_DISTRIBUTION_SIZE) {
1542 printk(KERN_EMERG
1543 "cpu %d pnode %d-%d beyond %d; BAU disabled\n",
1544 cpu, pnode, base_part_pnode,
1545 UV_DISTRIBUTION_SIZE);
1546 return 1;
1547 }
1548 bcp->osnode = cpu_to_node(cpu);
1549 bcp->partition_base_pnode = uv_partition_base_pnode;
1531 uvhub = uv_cpu_hub_info(cpu)->numa_blade_id; 1550 uvhub = uv_cpu_hub_info(cpu)->numa_blade_id;
1532 *(uvhub_mask + (uvhub/8)) |= (1 << (uvhub%8)); 1551 *(uvhub_mask + (uvhub/8)) |= (1 << (uvhub%8));
1533 bdp = &uvhub_descs[uvhub]; 1552 bdp = &uvhub_descs[uvhub];
@@ -1536,7 +1555,7 @@ static int __init uv_init_per_cpu(int nuvhubs)
1536 bdp->pnode = pnode; 1555 bdp->pnode = pnode;
1537 /* kludge: 'assuming' one node per socket, and assuming that 1556 /* kludge: 'assuming' one node per socket, and assuming that
1538 disabling a socket just leaves a gap in node numbers */ 1557 disabling a socket just leaves a gap in node numbers */
1539 socket = (cpu_to_node(cpu) & 1); 1558 socket = bcp->osnode & 1;
1540 bdp->socket_mask |= (1 << socket); 1559 bdp->socket_mask |= (1 << socket);
1541 sdp = &bdp->socket[socket]; 1560 sdp = &bdp->socket[socket];
1542 sdp->cpu_number[sdp->num_cpus] = cpu; 1561 sdp->cpu_number[sdp->num_cpus] = cpu;
@@ -1585,6 +1604,20 @@ static int __init uv_init_per_cpu(int nuvhubs)
1585nextsocket: 1604nextsocket:
1586 socket++; 1605 socket++;
1587 socket_mask = (socket_mask >> 1); 1606 socket_mask = (socket_mask >> 1);
1607 /* each socket gets a local array of pnodes/hubs */
1608 bcp = smaster;
1609 bcp->target_hub_and_pnode = kmalloc_node(
1610 sizeof(struct hub_and_pnode) *
1611 num_possible_cpus(), GFP_KERNEL, bcp->osnode);
1612 memset(bcp->target_hub_and_pnode, 0,
1613 sizeof(struct hub_and_pnode) *
1614 num_possible_cpus());
1615 for_each_present_cpu(tcpu) {
1616 bcp->target_hub_and_pnode[tcpu].pnode =
1617 uv_cpu_hub_info(tcpu)->pnode;
1618 bcp->target_hub_and_pnode[tcpu].uvhub =
1619 uv_cpu_hub_info(tcpu)->numa_blade_id;
1620 }
1588 } 1621 }
1589 } 1622 }
1590 kfree(uvhub_descs); 1623 kfree(uvhub_descs);
@@ -1637,21 +1670,22 @@ static int __init uv_bau_init(void)
1637 spin_lock_init(&disable_lock); 1670 spin_lock_init(&disable_lock);
1638 congested_cycles = microsec_2_cycles(congested_response_us); 1671 congested_cycles = microsec_2_cycles(congested_response_us);
1639 1672
1640 if (uv_init_per_cpu(nuvhubs)) {
1641 nobau = 1;
1642 return 0;
1643 }
1644
1645 uv_partition_base_pnode = 0x7fffffff; 1673 uv_partition_base_pnode = 0x7fffffff;
1646 for (uvhub = 0; uvhub < nuvhubs; uvhub++) 1674 for (uvhub = 0; uvhub < nuvhubs; uvhub++) {
1647 if (uv_blade_nr_possible_cpus(uvhub) && 1675 if (uv_blade_nr_possible_cpus(uvhub) &&
1648 (uv_blade_to_pnode(uvhub) < uv_partition_base_pnode)) 1676 (uv_blade_to_pnode(uvhub) < uv_partition_base_pnode))
1649 uv_partition_base_pnode = uv_blade_to_pnode(uvhub); 1677 uv_partition_base_pnode = uv_blade_to_pnode(uvhub);
1678 }
1679
1680 if (uv_init_per_cpu(nuvhubs, uv_partition_base_pnode)) {
1681 nobau = 1;
1682 return 0;
1683 }
1650 1684
1651 vector = UV_BAU_MESSAGE; 1685 vector = UV_BAU_MESSAGE;
1652 for_each_possible_blade(uvhub) 1686 for_each_possible_blade(uvhub)
1653 if (uv_blade_nr_possible_cpus(uvhub)) 1687 if (uv_blade_nr_possible_cpus(uvhub))
1654 uv_init_uvhub(uvhub, vector); 1688 uv_init_uvhub(uvhub, vector, uv_partition_base_pnode);
1655 1689
1656 uv_enable_timeouts(); 1690 uv_enable_timeouts();
1657 alloc_intr_gate(vector, uv_bau_message_intr1); 1691 alloc_intr_gate(vector, uv_bau_message_intr1);
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
index 55c965b38c27..0684f3c74d53 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -1275,6 +1275,20 @@ static __init void xen_pagetable_setup_start(pgd_t *base)
1275{ 1275{
1276} 1276}
1277 1277
1278static __init void xen_mapping_pagetable_reserve(u64 start, u64 end)
1279{
1280 /* reserve the range used */
1281 native_pagetable_reserve(start, end);
1282
1283 /* set as RW the rest */
1284 printk(KERN_DEBUG "xen: setting RW the range %llx - %llx\n", end,
1285 PFN_PHYS(pgt_buf_top));
1286 while (end < PFN_PHYS(pgt_buf_top)) {
1287 make_lowmem_page_readwrite(__va(end));
1288 end += PAGE_SIZE;
1289 }
1290}
1291
1278static void xen_post_allocator_init(void); 1292static void xen_post_allocator_init(void);
1279 1293
1280static __init void xen_pagetable_setup_done(pgd_t *base) 1294static __init void xen_pagetable_setup_done(pgd_t *base)
@@ -1463,119 +1477,6 @@ static int xen_pgd_alloc(struct mm_struct *mm)
1463 return ret; 1477 return ret;
1464} 1478}
1465 1479
1466#ifdef CONFIG_X86_64
1467static __initdata u64 __last_pgt_set_rw = 0;
1468static __initdata u64 __pgt_buf_start = 0;
1469static __initdata u64 __pgt_buf_end = 0;
1470static __initdata u64 __pgt_buf_top = 0;
1471/*
1472 * As a consequence of the commit:
1473 *
1474 * commit 4b239f458c229de044d6905c2b0f9fe16ed9e01e
1475 * Author: Yinghai Lu <yinghai@kernel.org>
1476 * Date: Fri Dec 17 16:58:28 2010 -0800
1477 *
1478 * x86-64, mm: Put early page table high
1479 *
1480 * at some point init_memory_mapping is going to reach the pagetable pages
1481 * area and map those pages too (mapping them as normal memory that falls
1482 * in the range of addresses passed to init_memory_mapping as argument).
1483 * Some of those pages are already pagetable pages (they are in the range
1484 * pgt_buf_start-pgt_buf_end) therefore they are going to be mapped RO and
1485 * everything is fine.
1486 * Some of these pages are not pagetable pages yet (they fall in the range
1487 * pgt_buf_end-pgt_buf_top; for example the page at pgt_buf_end) so they
1488 * are going to be mapped RW. When these pages become pagetable pages and
1489 * are hooked into the pagetable, xen will find that the guest has already
1490 * a RW mapping of them somewhere and fail the operation.
1491 * The reason Xen requires pagetables to be RO is that the hypervisor needs
1492 * to verify that the pagetables are valid before using them. The validation
1493 * operations are called "pinning".
1494 *
1495 * In order to fix the issue we mark all the pages in the entire range
1496 * pgt_buf_start-pgt_buf_top as RO, however when the pagetable allocation
1497 * is completed only the range pgt_buf_start-pgt_buf_end is reserved by
1498 * init_memory_mapping. Hence the kernel is going to crash as soon as one
1499 * of the pages in the range pgt_buf_end-pgt_buf_top is reused (b/c those
1500 * ranges are RO).
1501 *
1502 * For this reason, 'mark_rw_past_pgt' is introduced which is called _after_
1503 * the init_memory_mapping has completed (in a perfect world we would
1504 * call this function from init_memory_mapping, but lets ignore that).
1505 *
1506 * Because we are called _after_ init_memory_mapping the pgt_buf_[start,
1507 * end,top] have all changed to new values (b/c init_memory_mapping
1508 * is called and setting up another new page-table). Hence, the first time
1509 * we enter this function, we save away the pgt_buf_start value and update
1510 * the pgt_buf_[end,top].
1511 *
1512 * When we detect that the "old" pgt_buf_start through pgt_buf_end
1513 * PFNs have been reserved (so memblock_x86_reserve_range has been called),
1514 * we immediately set out to RW the "old" pgt_buf_end through pgt_buf_top.
1515 *
1516 * And then we update those "old" pgt_buf_[end|top] with the new ones
1517 * so that we can redo this on the next pagetable.
1518 */
1519static __init void mark_rw_past_pgt(void) {
1520
1521 if (pgt_buf_end > pgt_buf_start) {
1522 u64 addr, size;
1523
1524 /* Save it away. */
1525 if (!__pgt_buf_start) {
1526 __pgt_buf_start = pgt_buf_start;
1527 __pgt_buf_end = pgt_buf_end;
1528 __pgt_buf_top = pgt_buf_top;
1529 return;
1530 }
1531 /* If we get the range that starts at __pgt_buf_end that means
1532 * the range is reserved, and that in 'init_memory_mapping'
1533 * the 'memblock_x86_reserve_range' has been called with the
1534 * outdated __pgt_buf_start, __pgt_buf_end (the "new"
1535 * pgt_buf_[start|end|top] refer now to a new pagetable.
1536 * Note: we are called _after_ the pgt_buf_[..] have been
1537 * updated.*/
1538
1539 addr = memblock_x86_find_in_range_size(PFN_PHYS(__pgt_buf_start),
1540 &size, PAGE_SIZE);
1541
1542 /* Still not reserved, meaning 'memblock_x86_reserve_range'
1543 * hasn't been called yet. Update the _end and _top.*/
1544 if (addr == PFN_PHYS(__pgt_buf_start)) {
1545 __pgt_buf_end = pgt_buf_end;
1546 __pgt_buf_top = pgt_buf_top;
1547 return;
1548 }
1549
1550 /* OK, the area is reserved, meaning it is time for us to
1551 * set RW for the old end->top PFNs. */
1552
1553 /* ..unless we had already done this. */
1554 if (__pgt_buf_end == __last_pgt_set_rw)
1555 return;
1556
1557 addr = PFN_PHYS(__pgt_buf_end);
1558
1559 /* set as RW the rest */
1560 printk(KERN_DEBUG "xen: setting RW the range %llx - %llx\n",
1561 PFN_PHYS(__pgt_buf_end), PFN_PHYS(__pgt_buf_top));
1562
1563 while (addr < PFN_PHYS(__pgt_buf_top)) {
1564 make_lowmem_page_readwrite(__va(addr));
1565 addr += PAGE_SIZE;
1566 }
1567 /* And update everything so that we are ready for the next
1568 * pagetable (the one created for regions past 4GB) */
1569 __last_pgt_set_rw = __pgt_buf_end;
1570 __pgt_buf_start = pgt_buf_start;
1571 __pgt_buf_end = pgt_buf_end;
1572 __pgt_buf_top = pgt_buf_top;
1573 }
1574 return;
1575}
1576#else
1577static __init void mark_rw_past_pgt(void) { }
1578#endif
1579static void xen_pgd_free(struct mm_struct *mm, pgd_t *pgd) 1480static void xen_pgd_free(struct mm_struct *mm, pgd_t *pgd)
1580{ 1481{
1581#ifdef CONFIG_X86_64 1482#ifdef CONFIG_X86_64
@@ -1602,14 +1503,6 @@ static __init pte_t mask_rw_pte(pte_t *ptep, pte_t pte)
1602 unsigned long pfn = pte_pfn(pte); 1503 unsigned long pfn = pte_pfn(pte);
1603 1504
1604 /* 1505 /*
1605 * A bit of optimization. We do not need to call the workaround
1606 * when xen_set_pte_init is called with a PTE with 0 as PFN.
1607 * That is b/c the pagetable at that point are just being populated
1608 * with empty values and we can save some cycles by not calling
1609 * the 'memblock' code.*/
1610 if (pfn)
1611 mark_rw_past_pgt();
1612 /*
1613 * If the new pfn is within the range of the newly allocated 1506 * If the new pfn is within the range of the newly allocated
1614 * kernel pagetable, and it isn't being mapped into an 1507 * kernel pagetable, and it isn't being mapped into an
1615 * early_ioremap fixmap slot as a freshly allocated page, make sure 1508 * early_ioremap fixmap slot as a freshly allocated page, make sure
@@ -2118,8 +2011,6 @@ __init void xen_ident_map_ISA(void)
2118 2011
2119static __init void xen_post_allocator_init(void) 2012static __init void xen_post_allocator_init(void)
2120{ 2013{
2121 mark_rw_past_pgt();
2122
2123#ifdef CONFIG_XEN_DEBUG 2014#ifdef CONFIG_XEN_DEBUG
2124 pv_mmu_ops.make_pte = PV_CALLEE_SAVE(xen_make_pte_debug); 2015 pv_mmu_ops.make_pte = PV_CALLEE_SAVE(xen_make_pte_debug);
2125#endif 2016#endif
@@ -2228,6 +2119,7 @@ static const struct pv_mmu_ops xen_mmu_ops __initdata = {
2228 2119
2229void __init xen_init_mmu_ops(void) 2120void __init xen_init_mmu_ops(void)
2230{ 2121{
2122 x86_init.mapping.pagetable_reserve = xen_mapping_pagetable_reserve;
2231 x86_init.paging.pagetable_setup_start = xen_pagetable_setup_start; 2123 x86_init.paging.pagetable_setup_start = xen_pagetable_setup_start;
2232 x86_init.paging.pagetable_setup_done = xen_pagetable_setup_done; 2124 x86_init.paging.pagetable_setup_done = xen_pagetable_setup_done;
2233 pv_mmu_ops = xen_mmu_ops; 2125 pv_mmu_ops = xen_mmu_ops;
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index f0605ab2a761..471fdcc5df85 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -114,6 +114,13 @@ struct blkio_cgroup *cgroup_to_blkio_cgroup(struct cgroup *cgroup)
114} 114}
115EXPORT_SYMBOL_GPL(cgroup_to_blkio_cgroup); 115EXPORT_SYMBOL_GPL(cgroup_to_blkio_cgroup);
116 116
117struct blkio_cgroup *task_blkio_cgroup(struct task_struct *tsk)
118{
119 return container_of(task_subsys_state(tsk, blkio_subsys_id),
120 struct blkio_cgroup, css);
121}
122EXPORT_SYMBOL_GPL(task_blkio_cgroup);
123
117static inline void 124static inline void
118blkio_update_group_weight(struct blkio_group *blkg, unsigned int weight) 125blkio_update_group_weight(struct blkio_group *blkg, unsigned int weight)
119{ 126{
diff --git a/block/blk-cgroup.h b/block/blk-cgroup.h
index 10919fae2d3a..c774930cc206 100644
--- a/block/blk-cgroup.h
+++ b/block/blk-cgroup.h
@@ -291,6 +291,7 @@ static inline void blkiocg_set_start_empty_time(struct blkio_group *blkg) {}
291#if defined(CONFIG_BLK_CGROUP) || defined(CONFIG_BLK_CGROUP_MODULE) 291#if defined(CONFIG_BLK_CGROUP) || defined(CONFIG_BLK_CGROUP_MODULE)
292extern struct blkio_cgroup blkio_root_cgroup; 292extern struct blkio_cgroup blkio_root_cgroup;
293extern struct blkio_cgroup *cgroup_to_blkio_cgroup(struct cgroup *cgroup); 293extern struct blkio_cgroup *cgroup_to_blkio_cgroup(struct cgroup *cgroup);
294extern struct blkio_cgroup *task_blkio_cgroup(struct task_struct *tsk);
294extern void blkiocg_add_blkio_group(struct blkio_cgroup *blkcg, 295extern void blkiocg_add_blkio_group(struct blkio_cgroup *blkcg,
295 struct blkio_group *blkg, void *key, dev_t dev, 296 struct blkio_group *blkg, void *key, dev_t dev,
296 enum blkio_policy_id plid); 297 enum blkio_policy_id plid);
@@ -314,6 +315,8 @@ void blkiocg_update_io_remove_stats(struct blkio_group *blkg,
314struct cgroup; 315struct cgroup;
315static inline struct blkio_cgroup * 316static inline struct blkio_cgroup *
316cgroup_to_blkio_cgroup(struct cgroup *cgroup) { return NULL; } 317cgroup_to_blkio_cgroup(struct cgroup *cgroup) { return NULL; }
318static inline struct blkio_cgroup *
319task_blkio_cgroup(struct task_struct *tsk) { return NULL; }
317 320
318static inline void blkiocg_add_blkio_group(struct blkio_cgroup *blkcg, 321static inline void blkiocg_add_blkio_group(struct blkio_cgroup *blkcg,
319 struct blkio_group *blkg, void *key, dev_t dev, 322 struct blkio_group *blkg, void *key, dev_t dev,
diff --git a/block/blk-core.c b/block/blk-core.c
index a2e58eeb3549..3fe00a14822a 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -316,8 +316,10 @@ EXPORT_SYMBOL(__blk_run_queue);
316 */ 316 */
317void blk_run_queue_async(struct request_queue *q) 317void blk_run_queue_async(struct request_queue *q)
318{ 318{
319 if (likely(!blk_queue_stopped(q))) 319 if (likely(!blk_queue_stopped(q))) {
320 __cancel_delayed_work(&q->delay_work);
320 queue_delayed_work(kblockd_workqueue, &q->delay_work, 0); 321 queue_delayed_work(kblockd_workqueue, &q->delay_work, 0);
322 }
321} 323}
322EXPORT_SYMBOL(blk_run_queue_async); 324EXPORT_SYMBOL(blk_run_queue_async);
323 325
diff --git a/block/blk-throttle.c b/block/blk-throttle.c
index 0475a22a420d..252a81a306f7 100644
--- a/block/blk-throttle.c
+++ b/block/blk-throttle.c
@@ -160,9 +160,8 @@ static void throtl_put_tg(struct throtl_grp *tg)
160} 160}
161 161
162static struct throtl_grp * throtl_find_alloc_tg(struct throtl_data *td, 162static struct throtl_grp * throtl_find_alloc_tg(struct throtl_data *td,
163 struct cgroup *cgroup) 163 struct blkio_cgroup *blkcg)
164{ 164{
165 struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgroup);
166 struct throtl_grp *tg = NULL; 165 struct throtl_grp *tg = NULL;
167 void *key = td; 166 void *key = td;
168 struct backing_dev_info *bdi = &td->queue->backing_dev_info; 167 struct backing_dev_info *bdi = &td->queue->backing_dev_info;
@@ -229,12 +228,12 @@ done:
229 228
230static struct throtl_grp * throtl_get_tg(struct throtl_data *td) 229static struct throtl_grp * throtl_get_tg(struct throtl_data *td)
231{ 230{
232 struct cgroup *cgroup;
233 struct throtl_grp *tg = NULL; 231 struct throtl_grp *tg = NULL;
232 struct blkio_cgroup *blkcg;
234 233
235 rcu_read_lock(); 234 rcu_read_lock();
236 cgroup = task_cgroup(current, blkio_subsys_id); 235 blkcg = task_blkio_cgroup(current);
237 tg = throtl_find_alloc_tg(td, cgroup); 236 tg = throtl_find_alloc_tg(td, blkcg);
238 if (!tg) 237 if (!tg)
239 tg = &td->root_tg; 238 tg = &td->root_tg;
240 rcu_read_unlock(); 239 rcu_read_unlock();
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index 5b52011e3a40..ab7a9e6a9b1c 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -1014,10 +1014,9 @@ void cfq_update_blkio_group_weight(void *key, struct blkio_group *blkg,
1014 cfqg->needs_update = true; 1014 cfqg->needs_update = true;
1015} 1015}
1016 1016
1017static struct cfq_group * 1017static struct cfq_group * cfq_find_alloc_cfqg(struct cfq_data *cfqd,
1018cfq_find_alloc_cfqg(struct cfq_data *cfqd, struct cgroup *cgroup, int create) 1018 struct blkio_cgroup *blkcg, int create)
1019{ 1019{
1020 struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgroup);
1021 struct cfq_group *cfqg = NULL; 1020 struct cfq_group *cfqg = NULL;
1022 void *key = cfqd; 1021 void *key = cfqd;
1023 int i, j; 1022 int i, j;
@@ -1079,12 +1078,12 @@ done:
1079 */ 1078 */
1080static struct cfq_group *cfq_get_cfqg(struct cfq_data *cfqd, int create) 1079static struct cfq_group *cfq_get_cfqg(struct cfq_data *cfqd, int create)
1081{ 1080{
1082 struct cgroup *cgroup; 1081 struct blkio_cgroup *blkcg;
1083 struct cfq_group *cfqg = NULL; 1082 struct cfq_group *cfqg = NULL;
1084 1083
1085 rcu_read_lock(); 1084 rcu_read_lock();
1086 cgroup = task_cgroup(current, blkio_subsys_id); 1085 blkcg = task_blkio_cgroup(current);
1087 cfqg = cfq_find_alloc_cfqg(cfqd, cgroup, create); 1086 cfqg = cfq_find_alloc_cfqg(cfqd, blkcg, create);
1088 if (!cfqg && create) 1087 if (!cfqg && create)
1089 cfqg = &cfqd->root_group; 1088 cfqg = &cfqd->root_group;
1090 rcu_read_unlock(); 1089 rcu_read_unlock();
diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
index ff9d832a163d..d38c40fe4ddb 100644
--- a/drivers/ata/libahci.c
+++ b/drivers/ata/libahci.c
@@ -561,27 +561,6 @@ void ahci_start_engine(struct ata_port *ap)
561{ 561{
562 void __iomem *port_mmio = ahci_port_base(ap); 562 void __iomem *port_mmio = ahci_port_base(ap);
563 u32 tmp; 563 u32 tmp;
564 u8 status;
565
566 status = readl(port_mmio + PORT_TFDATA) & 0xFF;
567
568 /*
569 * At end of section 10.1 of AHCI spec (rev 1.3), it states
570 * Software shall not set PxCMD.ST to 1 until it is determined
571 * that a functoinal device is present on the port as determined by
572 * PxTFD.STS.BSY=0, PxTFD.STS.DRQ=0 and PxSSTS.DET=3h
573 *
574 * Even though most AHCI host controllers work without this check,
575 * specific controller will fail under this condition
576 */
577 if (status & (ATA_BUSY | ATA_DRQ))
578 return;
579 else {
580 ahci_scr_read(&ap->link, SCR_STATUS, &tmp);
581
582 if ((tmp & 0xf) != 0x3)
583 return;
584 }
585 564
586 /* start DMA */ 565 /* start DMA */
587 tmp = readl(port_mmio + PORT_CMD); 566 tmp = readl(port_mmio + PORT_CMD);
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
index f26f2fe3480a..dad9fd660f37 100644
--- a/drivers/ata/libata-eh.c
+++ b/drivers/ata/libata-eh.c
@@ -3316,7 +3316,7 @@ static int ata_eh_set_lpm(struct ata_link *link, enum ata_lpm_policy policy,
3316 struct ata_eh_context *ehc = &link->eh_context; 3316 struct ata_eh_context *ehc = &link->eh_context;
3317 struct ata_device *dev, *link_dev = NULL, *lpm_dev = NULL; 3317 struct ata_device *dev, *link_dev = NULL, *lpm_dev = NULL;
3318 enum ata_lpm_policy old_policy = link->lpm_policy; 3318 enum ata_lpm_policy old_policy = link->lpm_policy;
3319 bool no_dipm = ap->flags & ATA_FLAG_NO_DIPM; 3319 bool no_dipm = link->ap->flags & ATA_FLAG_NO_DIPM;
3320 unsigned int hints = ATA_LPM_EMPTY | ATA_LPM_HIPM; 3320 unsigned int hints = ATA_LPM_EMPTY | ATA_LPM_HIPM;
3321 unsigned int err_mask; 3321 unsigned int err_mask;
3322 int rc; 3322 int rc;
diff --git a/drivers/atm/fore200e.c b/drivers/atm/fore200e.c
index bdd2719f3f68..bc9e702186dd 100644
--- a/drivers/atm/fore200e.c
+++ b/drivers/atm/fore200e.c
@@ -2643,16 +2643,19 @@ fore200e_init(struct fore200e* fore200e, struct device *parent)
2643} 2643}
2644 2644
2645#ifdef CONFIG_SBUS 2645#ifdef CONFIG_SBUS
2646static const struct of_device_id fore200e_sba_match[];
2646static int __devinit fore200e_sba_probe(struct platform_device *op) 2647static int __devinit fore200e_sba_probe(struct platform_device *op)
2647{ 2648{
2649 const struct of_device_id *match;
2648 const struct fore200e_bus *bus; 2650 const struct fore200e_bus *bus;
2649 struct fore200e *fore200e; 2651 struct fore200e *fore200e;
2650 static int index = 0; 2652 static int index = 0;
2651 int err; 2653 int err;
2652 2654
2653 if (!op->dev.of_match) 2655 match = of_match_device(fore200e_sba_match, &op->dev);
2656 if (!match)
2654 return -EINVAL; 2657 return -EINVAL;
2655 bus = op->dev.of_match->data; 2658 bus = match->data;
2656 2659
2657 fore200e = kzalloc(sizeof(struct fore200e), GFP_KERNEL); 2660 fore200e = kzalloc(sizeof(struct fore200e), GFP_KERNEL);
2658 if (!fore200e) 2661 if (!fore200e)
diff --git a/drivers/block/DAC960.c b/drivers/block/DAC960.c
index 8066d086578a..e086fbbbe853 100644
--- a/drivers/block/DAC960.c
+++ b/drivers/block/DAC960.c
@@ -2547,7 +2547,6 @@ static bool DAC960_RegisterBlockDevice(DAC960_Controller_T *Controller)
2547 disk->major = MajorNumber; 2547 disk->major = MajorNumber;
2548 disk->first_minor = n << DAC960_MaxPartitionsBits; 2548 disk->first_minor = n << DAC960_MaxPartitionsBits;
2549 disk->fops = &DAC960_BlockDeviceOperations; 2549 disk->fops = &DAC960_BlockDeviceOperations;
2550 disk->events = DISK_EVENT_MEDIA_CHANGE;
2551 } 2550 }
2552 /* 2551 /*
2553 Indicate the Block Device Registration completed successfully, 2552 Indicate the Block Device Registration completed successfully,
diff --git a/drivers/block/amiflop.c b/drivers/block/amiflop.c
index 456c0cc90dcf..8eba86bba599 100644
--- a/drivers/block/amiflop.c
+++ b/drivers/block/amiflop.c
@@ -1736,7 +1736,6 @@ static int __init fd_probe_drives(void)
1736 disk->major = FLOPPY_MAJOR; 1736 disk->major = FLOPPY_MAJOR;
1737 disk->first_minor = drive; 1737 disk->first_minor = drive;
1738 disk->fops = &floppy_fops; 1738 disk->fops = &floppy_fops;
1739 disk->events = DISK_EVENT_MEDIA_CHANGE;
1740 sprintf(disk->disk_name, "fd%d", drive); 1739 sprintf(disk->disk_name, "fd%d", drive);
1741 disk->private_data = &unit[drive]; 1740 disk->private_data = &unit[drive];
1742 set_capacity(disk, 880*2); 1741 set_capacity(disk, 880*2);
diff --git a/drivers/block/ataflop.c b/drivers/block/ataflop.c
index c871eae14120..ede16c64ff07 100644
--- a/drivers/block/ataflop.c
+++ b/drivers/block/ataflop.c
@@ -1964,7 +1964,6 @@ static int __init atari_floppy_init (void)
1964 unit[i].disk->first_minor = i; 1964 unit[i].disk->first_minor = i;
1965 sprintf(unit[i].disk->disk_name, "fd%d", i); 1965 sprintf(unit[i].disk->disk_name, "fd%d", i);
1966 unit[i].disk->fops = &floppy_fops; 1966 unit[i].disk->fops = &floppy_fops;
1967 unit[i].disk->events = DISK_EVENT_MEDIA_CHANGE;
1968 unit[i].disk->private_data = &unit[i]; 1967 unit[i].disk->private_data = &unit[i];
1969 unit[i].disk->queue = blk_init_queue(do_fd_request, 1968 unit[i].disk->queue = blk_init_queue(do_fd_request,
1970 &ataflop_lock); 1969 &ataflop_lock);
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index 301d7a9a41a6..db8f88586c8d 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -4205,7 +4205,6 @@ static int __init floppy_init(void)
4205 disks[dr]->major = FLOPPY_MAJOR; 4205 disks[dr]->major = FLOPPY_MAJOR;
4206 disks[dr]->first_minor = TOMINOR(dr); 4206 disks[dr]->first_minor = TOMINOR(dr);
4207 disks[dr]->fops = &floppy_fops; 4207 disks[dr]->fops = &floppy_fops;
4208 disks[dr]->events = DISK_EVENT_MEDIA_CHANGE;
4209 sprintf(disks[dr]->disk_name, "fd%d", dr); 4208 sprintf(disks[dr]->disk_name, "fd%d", dr);
4210 4209
4211 init_timer(&motor_off_timer[dr]); 4210 init_timer(&motor_off_timer[dr]);
diff --git a/drivers/block/paride/pcd.c b/drivers/block/paride/pcd.c
index 2f2ccf686251..8690e31d9932 100644
--- a/drivers/block/paride/pcd.c
+++ b/drivers/block/paride/pcd.c
@@ -320,7 +320,6 @@ static void pcd_init_units(void)
320 disk->first_minor = unit; 320 disk->first_minor = unit;
321 strcpy(disk->disk_name, cd->name); /* umm... */ 321 strcpy(disk->disk_name, cd->name); /* umm... */
322 disk->fops = &pcd_bdops; 322 disk->fops = &pcd_bdops;
323 disk->events = DISK_EVENT_MEDIA_CHANGE;
324 } 323 }
325} 324}
326 325
diff --git a/drivers/block/paride/pd.c b/drivers/block/paride/pd.c
index 21dfdb776869..869e7676d46f 100644
--- a/drivers/block/paride/pd.c
+++ b/drivers/block/paride/pd.c
@@ -837,7 +837,6 @@ static void pd_probe_drive(struct pd_unit *disk)
837 p->fops = &pd_fops; 837 p->fops = &pd_fops;
838 p->major = major; 838 p->major = major;
839 p->first_minor = (disk - pd) << PD_BITS; 839 p->first_minor = (disk - pd) << PD_BITS;
840 p->events = DISK_EVENT_MEDIA_CHANGE;
841 disk->gd = p; 840 disk->gd = p;
842 p->private_data = disk; 841 p->private_data = disk;
843 p->queue = pd_queue; 842 p->queue = pd_queue;
diff --git a/drivers/block/paride/pf.c b/drivers/block/paride/pf.c
index 7adeb1edbf43..f21b520ef419 100644
--- a/drivers/block/paride/pf.c
+++ b/drivers/block/paride/pf.c
@@ -294,7 +294,6 @@ static void __init pf_init_units(void)
294 disk->first_minor = unit; 294 disk->first_minor = unit;
295 strcpy(disk->disk_name, pf->name); 295 strcpy(disk->disk_name, pf->name);
296 disk->fops = &pf_fops; 296 disk->fops = &pf_fops;
297 disk->events = DISK_EVENT_MEDIA_CHANGE;
298 if (!(*drives[unit])[D_PRT]) 297 if (!(*drives[unit])[D_PRT])
299 pf_drive_count++; 298 pf_drive_count++;
300 } 299 }
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index 3e904717c1c0..9712fad82bc6 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -92,6 +92,8 @@ struct rbd_client {
92 struct list_head node; 92 struct list_head node;
93}; 93};
94 94
95struct rbd_req_coll;
96
95/* 97/*
96 * a single io request 98 * a single io request
97 */ 99 */
@@ -100,6 +102,24 @@ struct rbd_request {
100 struct bio *bio; /* cloned bio */ 102 struct bio *bio; /* cloned bio */
101 struct page **pages; /* list of used pages */ 103 struct page **pages; /* list of used pages */
102 u64 len; 104 u64 len;
105 int coll_index;
106 struct rbd_req_coll *coll;
107};
108
109struct rbd_req_status {
110 int done;
111 int rc;
112 u64 bytes;
113};
114
115/*
116 * a collection of requests
117 */
118struct rbd_req_coll {
119 int total;
120 int num_done;
121 struct kref kref;
122 struct rbd_req_status status[0];
103}; 123};
104 124
105struct rbd_snap { 125struct rbd_snap {
@@ -416,6 +436,17 @@ static void rbd_put_client(struct rbd_device *rbd_dev)
416 rbd_dev->client = NULL; 436 rbd_dev->client = NULL;
417} 437}
418 438
439/*
440 * Destroy requests collection
441 */
442static void rbd_coll_release(struct kref *kref)
443{
444 struct rbd_req_coll *coll =
445 container_of(kref, struct rbd_req_coll, kref);
446
447 dout("rbd_coll_release %p\n", coll);
448 kfree(coll);
449}
419 450
420/* 451/*
421 * Create a new header structure, translate header format from the on-disk 452 * Create a new header structure, translate header format from the on-disk
@@ -590,6 +621,14 @@ static u64 rbd_get_segment(struct rbd_image_header *header,
590 return len; 621 return len;
591} 622}
592 623
624static int rbd_get_num_segments(struct rbd_image_header *header,
625 u64 ofs, u64 len)
626{
627 u64 start_seg = ofs >> header->obj_order;
628 u64 end_seg = (ofs + len - 1) >> header->obj_order;
629 return end_seg - start_seg + 1;
630}
631
593/* 632/*
594 * bio helpers 633 * bio helpers
595 */ 634 */
@@ -735,6 +774,50 @@ static void rbd_destroy_ops(struct ceph_osd_req_op *ops)
735 kfree(ops); 774 kfree(ops);
736} 775}
737 776
777static void rbd_coll_end_req_index(struct request *rq,
778 struct rbd_req_coll *coll,
779 int index,
780 int ret, u64 len)
781{
782 struct request_queue *q;
783 int min, max, i;
784
785 dout("rbd_coll_end_req_index %p index %d ret %d len %lld\n",
786 coll, index, ret, len);
787
788 if (!rq)
789 return;
790
791 if (!coll) {
792 blk_end_request(rq, ret, len);
793 return;
794 }
795
796 q = rq->q;
797
798 spin_lock_irq(q->queue_lock);
799 coll->status[index].done = 1;
800 coll->status[index].rc = ret;
801 coll->status[index].bytes = len;
802 max = min = coll->num_done;
803 while (max < coll->total && coll->status[max].done)
804 max++;
805
806 for (i = min; i<max; i++) {
807 __blk_end_request(rq, coll->status[i].rc,
808 coll->status[i].bytes);
809 coll->num_done++;
810 kref_put(&coll->kref, rbd_coll_release);
811 }
812 spin_unlock_irq(q->queue_lock);
813}
814
815static void rbd_coll_end_req(struct rbd_request *req,
816 int ret, u64 len)
817{
818 rbd_coll_end_req_index(req->rq, req->coll, req->coll_index, ret, len);
819}
820
738/* 821/*
739 * Send ceph osd request 822 * Send ceph osd request
740 */ 823 */
@@ -749,6 +832,8 @@ static int rbd_do_request(struct request *rq,
749 int flags, 832 int flags,
750 struct ceph_osd_req_op *ops, 833 struct ceph_osd_req_op *ops,
751 int num_reply, 834 int num_reply,
835 struct rbd_req_coll *coll,
836 int coll_index,
752 void (*rbd_cb)(struct ceph_osd_request *req, 837 void (*rbd_cb)(struct ceph_osd_request *req,
753 struct ceph_msg *msg), 838 struct ceph_msg *msg),
754 struct ceph_osd_request **linger_req, 839 struct ceph_osd_request **linger_req,
@@ -763,12 +848,20 @@ static int rbd_do_request(struct request *rq,
763 struct ceph_osd_request_head *reqhead; 848 struct ceph_osd_request_head *reqhead;
764 struct rbd_image_header *header = &dev->header; 849 struct rbd_image_header *header = &dev->header;
765 850
766 ret = -ENOMEM;
767 req_data = kzalloc(sizeof(*req_data), GFP_NOIO); 851 req_data = kzalloc(sizeof(*req_data), GFP_NOIO);
768 if (!req_data) 852 if (!req_data) {
769 goto done; 853 if (coll)
854 rbd_coll_end_req_index(rq, coll, coll_index,
855 -ENOMEM, len);
856 return -ENOMEM;
857 }
770 858
771 dout("rbd_do_request len=%lld ofs=%lld\n", len, ofs); 859 if (coll) {
860 req_data->coll = coll;
861 req_data->coll_index = coll_index;
862 }
863
864 dout("rbd_do_request obj=%s ofs=%lld len=%lld\n", obj, len, ofs);
772 865
773 down_read(&header->snap_rwsem); 866 down_read(&header->snap_rwsem);
774 867
@@ -828,7 +921,8 @@ static int rbd_do_request(struct request *rq,
828 ret = ceph_osdc_wait_request(&dev->client->osdc, req); 921 ret = ceph_osdc_wait_request(&dev->client->osdc, req);
829 if (ver) 922 if (ver)
830 *ver = le64_to_cpu(req->r_reassert_version.version); 923 *ver = le64_to_cpu(req->r_reassert_version.version);
831 dout("reassert_ver=%lld\n", le64_to_cpu(req->r_reassert_version.version)); 924 dout("reassert_ver=%lld\n",
925 le64_to_cpu(req->r_reassert_version.version));
832 ceph_osdc_put_request(req); 926 ceph_osdc_put_request(req);
833 } 927 }
834 return ret; 928 return ret;
@@ -837,10 +931,8 @@ done_err:
837 bio_chain_put(req_data->bio); 931 bio_chain_put(req_data->bio);
838 ceph_osdc_put_request(req); 932 ceph_osdc_put_request(req);
839done_pages: 933done_pages:
934 rbd_coll_end_req(req_data, ret, len);
840 kfree(req_data); 935 kfree(req_data);
841done:
842 if (rq)
843 blk_end_request(rq, ret, len);
844 return ret; 936 return ret;
845} 937}
846 938
@@ -874,7 +966,7 @@ static void rbd_req_cb(struct ceph_osd_request *req, struct ceph_msg *msg)
874 bytes = req_data->len; 966 bytes = req_data->len;
875 } 967 }
876 968
877 blk_end_request(req_data->rq, rc, bytes); 969 rbd_coll_end_req(req_data, rc, bytes);
878 970
879 if (req_data->bio) 971 if (req_data->bio)
880 bio_chain_put(req_data->bio); 972 bio_chain_put(req_data->bio);
@@ -934,6 +1026,7 @@ static int rbd_req_sync_op(struct rbd_device *dev,
934 flags, 1026 flags,
935 ops, 1027 ops,
936 2, 1028 2,
1029 NULL, 0,
937 NULL, 1030 NULL,
938 linger_req, ver); 1031 linger_req, ver);
939 if (ret < 0) 1032 if (ret < 0)
@@ -959,7 +1052,9 @@ static int rbd_do_op(struct request *rq,
959 u64 snapid, 1052 u64 snapid,
960 int opcode, int flags, int num_reply, 1053 int opcode, int flags, int num_reply,
961 u64 ofs, u64 len, 1054 u64 ofs, u64 len,
962 struct bio *bio) 1055 struct bio *bio,
1056 struct rbd_req_coll *coll,
1057 int coll_index)
963{ 1058{
964 char *seg_name; 1059 char *seg_name;
965 u64 seg_ofs; 1060 u64 seg_ofs;
@@ -995,7 +1090,10 @@ static int rbd_do_op(struct request *rq,
995 flags, 1090 flags,
996 ops, 1091 ops,
997 num_reply, 1092 num_reply,
1093 coll, coll_index,
998 rbd_req_cb, 0, NULL); 1094 rbd_req_cb, 0, NULL);
1095
1096 rbd_destroy_ops(ops);
999done: 1097done:
1000 kfree(seg_name); 1098 kfree(seg_name);
1001 return ret; 1099 return ret;
@@ -1008,13 +1106,15 @@ static int rbd_req_write(struct request *rq,
1008 struct rbd_device *rbd_dev, 1106 struct rbd_device *rbd_dev,
1009 struct ceph_snap_context *snapc, 1107 struct ceph_snap_context *snapc,
1010 u64 ofs, u64 len, 1108 u64 ofs, u64 len,
1011 struct bio *bio) 1109 struct bio *bio,
1110 struct rbd_req_coll *coll,
1111 int coll_index)
1012{ 1112{
1013 return rbd_do_op(rq, rbd_dev, snapc, CEPH_NOSNAP, 1113 return rbd_do_op(rq, rbd_dev, snapc, CEPH_NOSNAP,
1014 CEPH_OSD_OP_WRITE, 1114 CEPH_OSD_OP_WRITE,
1015 CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK, 1115 CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK,
1016 2, 1116 2,
1017 ofs, len, bio); 1117 ofs, len, bio, coll, coll_index);
1018} 1118}
1019 1119
1020/* 1120/*
@@ -1024,14 +1124,16 @@ static int rbd_req_read(struct request *rq,
1024 struct rbd_device *rbd_dev, 1124 struct rbd_device *rbd_dev,
1025 u64 snapid, 1125 u64 snapid,
1026 u64 ofs, u64 len, 1126 u64 ofs, u64 len,
1027 struct bio *bio) 1127 struct bio *bio,
1128 struct rbd_req_coll *coll,
1129 int coll_index)
1028{ 1130{
1029 return rbd_do_op(rq, rbd_dev, NULL, 1131 return rbd_do_op(rq, rbd_dev, NULL,
1030 (snapid ? snapid : CEPH_NOSNAP), 1132 (snapid ? snapid : CEPH_NOSNAP),
1031 CEPH_OSD_OP_READ, 1133 CEPH_OSD_OP_READ,
1032 CEPH_OSD_FLAG_READ, 1134 CEPH_OSD_FLAG_READ,
1033 2, 1135 2,
1034 ofs, len, bio); 1136 ofs, len, bio, coll, coll_index);
1035} 1137}
1036 1138
1037/* 1139/*
@@ -1063,7 +1165,9 @@ static int rbd_req_sync_notify_ack(struct rbd_device *dev,
1063{ 1165{
1064 struct ceph_osd_req_op *ops; 1166 struct ceph_osd_req_op *ops;
1065 struct page **pages = NULL; 1167 struct page **pages = NULL;
1066 int ret = rbd_create_rw_ops(&ops, 1, CEPH_OSD_OP_NOTIFY_ACK, 0); 1168 int ret;
1169
1170 ret = rbd_create_rw_ops(&ops, 1, CEPH_OSD_OP_NOTIFY_ACK, 0);
1067 if (ret < 0) 1171 if (ret < 0)
1068 return ret; 1172 return ret;
1069 1173
@@ -1077,6 +1181,7 @@ static int rbd_req_sync_notify_ack(struct rbd_device *dev,
1077 CEPH_OSD_FLAG_READ, 1181 CEPH_OSD_FLAG_READ,
1078 ops, 1182 ops,
1079 1, 1183 1,
1184 NULL, 0,
1080 rbd_simple_req_cb, 0, NULL); 1185 rbd_simple_req_cb, 0, NULL);
1081 1186
1082 rbd_destroy_ops(ops); 1187 rbd_destroy_ops(ops);
@@ -1274,6 +1379,20 @@ static int rbd_req_sync_exec(struct rbd_device *dev,
1274 return ret; 1379 return ret;
1275} 1380}
1276 1381
1382static struct rbd_req_coll *rbd_alloc_coll(int num_reqs)
1383{
1384 struct rbd_req_coll *coll =
1385 kzalloc(sizeof(struct rbd_req_coll) +
1386 sizeof(struct rbd_req_status) * num_reqs,
1387 GFP_ATOMIC);
1388
1389 if (!coll)
1390 return NULL;
1391 coll->total = num_reqs;
1392 kref_init(&coll->kref);
1393 return coll;
1394}
1395
1277/* 1396/*
1278 * block device queue callback 1397 * block device queue callback
1279 */ 1398 */
@@ -1291,6 +1410,8 @@ static void rbd_rq_fn(struct request_queue *q)
1291 bool do_write; 1410 bool do_write;
1292 int size, op_size = 0; 1411 int size, op_size = 0;
1293 u64 ofs; 1412 u64 ofs;
1413 int num_segs, cur_seg = 0;
1414 struct rbd_req_coll *coll;
1294 1415
1295 /* peek at request from block layer */ 1416 /* peek at request from block layer */
1296 if (!rq) 1417 if (!rq)
@@ -1321,6 +1442,14 @@ static void rbd_rq_fn(struct request_queue *q)
1321 do_write ? "write" : "read", 1442 do_write ? "write" : "read",
1322 size, blk_rq_pos(rq) * 512ULL); 1443 size, blk_rq_pos(rq) * 512ULL);
1323 1444
1445 num_segs = rbd_get_num_segments(&rbd_dev->header, ofs, size);
1446 coll = rbd_alloc_coll(num_segs);
1447 if (!coll) {
1448 spin_lock_irq(q->queue_lock);
1449 __blk_end_request_all(rq, -ENOMEM);
1450 goto next;
1451 }
1452
1324 do { 1453 do {
1325 /* a bio clone to be passed down to OSD req */ 1454 /* a bio clone to be passed down to OSD req */
1326 dout("rq->bio->bi_vcnt=%d\n", rq->bio->bi_vcnt); 1455 dout("rq->bio->bi_vcnt=%d\n", rq->bio->bi_vcnt);
@@ -1328,35 +1457,41 @@ static void rbd_rq_fn(struct request_queue *q)
1328 rbd_dev->header.block_name, 1457 rbd_dev->header.block_name,
1329 ofs, size, 1458 ofs, size,
1330 NULL, NULL); 1459 NULL, NULL);
1460 kref_get(&coll->kref);
1331 bio = bio_chain_clone(&rq_bio, &next_bio, &bp, 1461 bio = bio_chain_clone(&rq_bio, &next_bio, &bp,
1332 op_size, GFP_ATOMIC); 1462 op_size, GFP_ATOMIC);
1333 if (!bio) { 1463 if (!bio) {
1334 spin_lock_irq(q->queue_lock); 1464 rbd_coll_end_req_index(rq, coll, cur_seg,
1335 __blk_end_request_all(rq, -ENOMEM); 1465 -ENOMEM, op_size);
1336 goto next; 1466 goto next_seg;
1337 } 1467 }
1338 1468
1469
1339 /* init OSD command: write or read */ 1470 /* init OSD command: write or read */
1340 if (do_write) 1471 if (do_write)
1341 rbd_req_write(rq, rbd_dev, 1472 rbd_req_write(rq, rbd_dev,
1342 rbd_dev->header.snapc, 1473 rbd_dev->header.snapc,
1343 ofs, 1474 ofs,
1344 op_size, bio); 1475 op_size, bio,
1476 coll, cur_seg);
1345 else 1477 else
1346 rbd_req_read(rq, rbd_dev, 1478 rbd_req_read(rq, rbd_dev,
1347 cur_snap_id(rbd_dev), 1479 cur_snap_id(rbd_dev),
1348 ofs, 1480 ofs,
1349 op_size, bio); 1481 op_size, bio,
1482 coll, cur_seg);
1350 1483
1484next_seg:
1351 size -= op_size; 1485 size -= op_size;
1352 ofs += op_size; 1486 ofs += op_size;
1353 1487
1488 cur_seg++;
1354 rq_bio = next_bio; 1489 rq_bio = next_bio;
1355 } while (size > 0); 1490 } while (size > 0);
1491 kref_put(&coll->kref, rbd_coll_release);
1356 1492
1357 if (bp) 1493 if (bp)
1358 bio_pair_release(bp); 1494 bio_pair_release(bp);
1359
1360 spin_lock_irq(q->queue_lock); 1495 spin_lock_irq(q->queue_lock);
1361next: 1496next:
1362 rq = blk_fetch_request(q); 1497 rq = blk_fetch_request(q);
diff --git a/drivers/block/swim.c b/drivers/block/swim.c
index 24a482f2fbd6..fd5adcd55944 100644
--- a/drivers/block/swim.c
+++ b/drivers/block/swim.c
@@ -858,7 +858,6 @@ static int __devinit swim_floppy_init(struct swim_priv *swd)
858 swd->unit[drive].disk->first_minor = drive; 858 swd->unit[drive].disk->first_minor = drive;
859 sprintf(swd->unit[drive].disk->disk_name, "fd%d", drive); 859 sprintf(swd->unit[drive].disk->disk_name, "fd%d", drive);
860 swd->unit[drive].disk->fops = &floppy_fops; 860 swd->unit[drive].disk->fops = &floppy_fops;
861 swd->unit[drive].disk->events = DISK_EVENT_MEDIA_CHANGE;
862 swd->unit[drive].disk->private_data = &swd->unit[drive]; 861 swd->unit[drive].disk->private_data = &swd->unit[drive];
863 swd->unit[drive].disk->queue = swd->queue; 862 swd->unit[drive].disk->queue = swd->queue;
864 set_capacity(swd->unit[drive].disk, 2880); 863 set_capacity(swd->unit[drive].disk, 2880);
diff --git a/drivers/block/swim3.c b/drivers/block/swim3.c
index 4c10f56facbf..773bfa792777 100644
--- a/drivers/block/swim3.c
+++ b/drivers/block/swim3.c
@@ -1163,7 +1163,6 @@ static int __devinit swim3_attach(struct macio_dev *mdev, const struct of_device
1163 disk->major = FLOPPY_MAJOR; 1163 disk->major = FLOPPY_MAJOR;
1164 disk->first_minor = i; 1164 disk->first_minor = i;
1165 disk->fops = &floppy_fops; 1165 disk->fops = &floppy_fops;
1166 disk->events = DISK_EVENT_MEDIA_CHANGE;
1167 disk->private_data = &floppy_states[i]; 1166 disk->private_data = &floppy_states[i];
1168 disk->queue = swim3_queue; 1167 disk->queue = swim3_queue;
1169 disk->flags |= GENHD_FL_REMOVABLE; 1168 disk->flags |= GENHD_FL_REMOVABLE;
diff --git a/drivers/block/ub.c b/drivers/block/ub.c
index 68b9430c7cfe..0e376d46bdd1 100644
--- a/drivers/block/ub.c
+++ b/drivers/block/ub.c
@@ -2334,7 +2334,6 @@ static int ub_probe_lun(struct ub_dev *sc, int lnum)
2334 disk->major = UB_MAJOR; 2334 disk->major = UB_MAJOR;
2335 disk->first_minor = lun->id * UB_PARTS_PER_LUN; 2335 disk->first_minor = lun->id * UB_PARTS_PER_LUN;
2336 disk->fops = &ub_bd_fops; 2336 disk->fops = &ub_bd_fops;
2337 disk->events = DISK_EVENT_MEDIA_CHANGE;
2338 disk->private_data = lun; 2337 disk->private_data = lun;
2339 disk->driverfs_dev = &sc->intf->dev; 2338 disk->driverfs_dev = &sc->intf->dev;
2340 2339
diff --git a/drivers/block/xsysace.c b/drivers/block/xsysace.c
index 645ff765cd12..6c7fd7db6dff 100644
--- a/drivers/block/xsysace.c
+++ b/drivers/block/xsysace.c
@@ -1005,7 +1005,6 @@ static int __devinit ace_setup(struct ace_device *ace)
1005 ace->gd->major = ace_major; 1005 ace->gd->major = ace_major;
1006 ace->gd->first_minor = ace->id * ACE_NUM_MINORS; 1006 ace->gd->first_minor = ace->id * ACE_NUM_MINORS;
1007 ace->gd->fops = &ace_fops; 1007 ace->gd->fops = &ace_fops;
1008 ace->gd->events = DISK_EVENT_MEDIA_CHANGE;
1009 ace->gd->queue = ace->queue; 1008 ace->gd->queue = ace->queue;
1010 ace->gd->private_data = ace; 1009 ace->gd->private_data = ace;
1011 snprintf(ace->gd->disk_name, 32, "xs%c", ace->id + 'a'); 1010 snprintf(ace->gd->disk_name, 32, "xs%c", ace->id + 'a');
diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
index 514dd8efaf73..75fb965b8f72 100644
--- a/drivers/cdrom/cdrom.c
+++ b/drivers/cdrom/cdrom.c
@@ -986,6 +986,9 @@ int cdrom_open(struct cdrom_device_info *cdi, struct block_device *bdev, fmode_t
986 986
987 cdinfo(CD_OPEN, "entering cdrom_open\n"); 987 cdinfo(CD_OPEN, "entering cdrom_open\n");
988 988
989 /* open is event synchronization point, check events first */
990 check_disk_change(bdev);
991
989 /* if this was a O_NONBLOCK open and we should honor the flags, 992 /* if this was a O_NONBLOCK open and we should honor the flags,
990 * do a quick open without drive/disc integrity checks. */ 993 * do a quick open without drive/disc integrity checks. */
991 cdi->use_count++; 994 cdi->use_count++;
@@ -1012,9 +1015,6 @@ int cdrom_open(struct cdrom_device_info *cdi, struct block_device *bdev, fmode_t
1012 1015
1013 cdinfo(CD_OPEN, "Use count for \"/dev/%s\" now %d\n", 1016 cdinfo(CD_OPEN, "Use count for \"/dev/%s\" now %d\n",
1014 cdi->name, cdi->use_count); 1017 cdi->name, cdi->use_count);
1015 /* Do this on open. Don't wait for mount, because they might
1016 not be mounting, but opening with O_NONBLOCK */
1017 check_disk_change(bdev);
1018 return 0; 1018 return 0;
1019err_release: 1019err_release:
1020 if (CDROM_CAN(CDC_LOCK) && cdi->options & CDO_LOCK) { 1020 if (CDROM_CAN(CDC_LOCK) && cdi->options & CDO_LOCK) {
diff --git a/drivers/cdrom/gdrom.c b/drivers/cdrom/gdrom.c
index b2b034fea34e..3ceaf006e7f0 100644
--- a/drivers/cdrom/gdrom.c
+++ b/drivers/cdrom/gdrom.c
@@ -803,7 +803,6 @@ static int __devinit probe_gdrom(struct platform_device *devptr)
803 goto probe_fail_cdrom_register; 803 goto probe_fail_cdrom_register;
804 } 804 }
805 gd.disk->fops = &gdrom_bdops; 805 gd.disk->fops = &gdrom_bdops;
806 gd.disk->events = DISK_EVENT_MEDIA_CHANGE;
807 /* latch on to the interrupt */ 806 /* latch on to the interrupt */
808 err = gdrom_set_interrupt_handlers(); 807 err = gdrom_set_interrupt_handlers();
809 if (err) 808 if (err)
diff --git a/drivers/cdrom/viocd.c b/drivers/cdrom/viocd.c
index 4e874c5fa605..e427fbe45999 100644
--- a/drivers/cdrom/viocd.c
+++ b/drivers/cdrom/viocd.c
@@ -626,7 +626,6 @@ static int viocd_probe(struct vio_dev *vdev, const struct vio_device_id *id)
626 gendisk->queue = q; 626 gendisk->queue = q;
627 gendisk->fops = &viocd_fops; 627 gendisk->fops = &viocd_fops;
628 gendisk->flags = GENHD_FL_CD|GENHD_FL_REMOVABLE; 628 gendisk->flags = GENHD_FL_CD|GENHD_FL_REMOVABLE;
629 gendisk->events = DISK_EVENT_MEDIA_CHANGE;
630 set_capacity(gendisk, 0); 629 set_capacity(gendisk, 0);
631 gendisk->private_data = d; 630 gendisk->private_data = d;
632 d->viocd_disk = gendisk; 631 d->viocd_disk = gendisk;
diff --git a/drivers/char/hw_random/n2-drv.c b/drivers/char/hw_random/n2-drv.c
index 43ac61978d8b..ac6739e085e3 100644
--- a/drivers/char/hw_random/n2-drv.c
+++ b/drivers/char/hw_random/n2-drv.c
@@ -619,15 +619,18 @@ static void __devinit n2rng_driver_version(void)
619 pr_info("%s", version); 619 pr_info("%s", version);
620} 620}
621 621
622static const struct of_device_id n2rng_match[];
622static int __devinit n2rng_probe(struct platform_device *op) 623static int __devinit n2rng_probe(struct platform_device *op)
623{ 624{
625 const struct of_device_id *match;
624 int victoria_falls; 626 int victoria_falls;
625 int err = -ENOMEM; 627 int err = -ENOMEM;
626 struct n2rng *np; 628 struct n2rng *np;
627 629
628 if (!op->dev.of_match) 630 match = of_match_device(n2rng_match, &op->dev);
631 if (!match)
629 return -EINVAL; 632 return -EINVAL;
630 victoria_falls = (op->dev.of_match->data != NULL); 633 victoria_falls = (match->data != NULL);
631 634
632 n2rng_driver_version(); 635 n2rng_driver_version();
633 np = kzalloc(sizeof(*np), GFP_KERNEL); 636 np = kzalloc(sizeof(*np), GFP_KERNEL);
diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
index cc6c9b2546a3..64c6b8530615 100644
--- a/drivers/char/ipmi/ipmi_si_intf.c
+++ b/drivers/char/ipmi/ipmi_si_intf.c
@@ -2554,9 +2554,11 @@ static struct pci_driver ipmi_pci_driver = {
2554}; 2554};
2555#endif /* CONFIG_PCI */ 2555#endif /* CONFIG_PCI */
2556 2556
2557static struct of_device_id ipmi_match[];
2557static int __devinit ipmi_probe(struct platform_device *dev) 2558static int __devinit ipmi_probe(struct platform_device *dev)
2558{ 2559{
2559#ifdef CONFIG_OF 2560#ifdef CONFIG_OF
2561 const struct of_device_id *match;
2560 struct smi_info *info; 2562 struct smi_info *info;
2561 struct resource resource; 2563 struct resource resource;
2562 const __be32 *regsize, *regspacing, *regshift; 2564 const __be32 *regsize, *regspacing, *regshift;
@@ -2566,7 +2568,8 @@ static int __devinit ipmi_probe(struct platform_device *dev)
2566 2568
2567 dev_info(&dev->dev, "probing via device tree\n"); 2569 dev_info(&dev->dev, "probing via device tree\n");
2568 2570
2569 if (!dev->dev.of_match) 2571 match = of_match_device(ipmi_match, &dev->dev);
2572 if (!match)
2570 return -EINVAL; 2573 return -EINVAL;
2571 2574
2572 ret = of_address_to_resource(np, 0, &resource); 2575 ret = of_address_to_resource(np, 0, &resource);
@@ -2601,7 +2604,7 @@ static int __devinit ipmi_probe(struct platform_device *dev)
2601 return -ENOMEM; 2604 return -ENOMEM;
2602 } 2605 }
2603 2606
2604 info->si_type = (enum si_type) dev->dev.of_match->data; 2607 info->si_type = (enum si_type) match->data;
2605 info->addr_source = SI_DEVICETREE; 2608 info->addr_source = SI_DEVICETREE;
2606 info->irq_setup = std_irq_setup; 2609 info->irq_setup = std_irq_setup;
2607 2610
diff --git a/drivers/char/xilinx_hwicap/xilinx_hwicap.c b/drivers/char/xilinx_hwicap/xilinx_hwicap.c
index d6412c16385f..39ccdeada791 100644
--- a/drivers/char/xilinx_hwicap/xilinx_hwicap.c
+++ b/drivers/char/xilinx_hwicap/xilinx_hwicap.c
@@ -715,13 +715,13 @@ static int __devexit hwicap_remove(struct device *dev)
715} 715}
716 716
717#ifdef CONFIG_OF 717#ifdef CONFIG_OF
718static int __devinit hwicap_of_probe(struct platform_device *op) 718static int __devinit hwicap_of_probe(struct platform_device *op,
719 const struct hwicap_driver_config *config)
719{ 720{
720 struct resource res; 721 struct resource res;
721 const unsigned int *id; 722 const unsigned int *id;
722 const char *family; 723 const char *family;
723 int rc; 724 int rc;
724 const struct hwicap_driver_config *config = op->dev.of_match->data;
725 const struct config_registers *regs; 725 const struct config_registers *regs;
726 726
727 727
@@ -751,20 +751,24 @@ static int __devinit hwicap_of_probe(struct platform_device *op)
751 regs); 751 regs);
752} 752}
753#else 753#else
754static inline int hwicap_of_probe(struct platform_device *op) 754static inline int hwicap_of_probe(struct platform_device *op,
755 const struct hwicap_driver_config *config)
755{ 756{
756 return -EINVAL; 757 return -EINVAL;
757} 758}
758#endif /* CONFIG_OF */ 759#endif /* CONFIG_OF */
759 760
761static const struct of_device_id __devinitconst hwicap_of_match[];
760static int __devinit hwicap_drv_probe(struct platform_device *pdev) 762static int __devinit hwicap_drv_probe(struct platform_device *pdev)
761{ 763{
764 const struct of_device_id *match;
762 struct resource *res; 765 struct resource *res;
763 const struct config_registers *regs; 766 const struct config_registers *regs;
764 const char *family; 767 const char *family;
765 768
766 if (pdev->dev.of_match) 769 match = of_match_device(hwicap_of_match, &pdev->dev);
767 return hwicap_of_probe(pdev); 770 if (match)
771 return hwicap_of_probe(pdev, match->data);
768 772
769 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 773 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
770 if (!res) 774 if (!res)
diff --git a/drivers/edac/ppc4xx_edac.c b/drivers/edac/ppc4xx_edac.c
index c1f0045ceb8e..af8e7b1aa290 100644
--- a/drivers/edac/ppc4xx_edac.c
+++ b/drivers/edac/ppc4xx_edac.c
@@ -1019,7 +1019,7 @@ ppc4xx_edac_mc_init(struct mem_ctl_info *mci,
1019 struct ppc4xx_edac_pdata *pdata = NULL; 1019 struct ppc4xx_edac_pdata *pdata = NULL;
1020 const struct device_node *np = op->dev.of_node; 1020 const struct device_node *np = op->dev.of_node;
1021 1021
1022 if (op->dev.of_match == NULL) 1022 if (of_match_device(ppc4xx_edac_match, &op->dev) == NULL)
1023 return -EINVAL; 1023 return -EINVAL;
1024 1024
1025 /* Initial driver pointers and private data */ 1025 /* Initial driver pointers and private data */
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index 11d7a72c22d9..140b9525b48a 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -1516,17 +1516,33 @@ bool drm_fb_helper_initial_config(struct drm_fb_helper *fb_helper, int bpp_sel)
1516} 1516}
1517EXPORT_SYMBOL(drm_fb_helper_initial_config); 1517EXPORT_SYMBOL(drm_fb_helper_initial_config);
1518 1518
1519bool drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper) 1519/**
1520 * drm_fb_helper_hotplug_event - respond to a hotplug notification by
1521 * probing all the outputs attached to the fb.
1522 * @fb_helper: the drm_fb_helper
1523 *
1524 * LOCKING:
1525 * Called at runtime, must take mode config lock.
1526 *
1527 * Scan the connectors attached to the fb_helper and try to put together a
1528 * setup after *notification of a change in output configuration.
1529 *
1530 * RETURNS:
1531 * 0 on success and a non-zero error code otherwise.
1532 */
1533int drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper)
1520{ 1534{
1535 struct drm_device *dev = fb_helper->dev;
1521 int count = 0; 1536 int count = 0;
1522 u32 max_width, max_height, bpp_sel; 1537 u32 max_width, max_height, bpp_sel;
1523 bool bound = false, crtcs_bound = false; 1538 bool bound = false, crtcs_bound = false;
1524 struct drm_crtc *crtc; 1539 struct drm_crtc *crtc;
1525 1540
1526 if (!fb_helper->fb) 1541 if (!fb_helper->fb)
1527 return false; 1542 return 0;
1528 1543
1529 list_for_each_entry(crtc, &fb_helper->dev->mode_config.crtc_list, head) { 1544 mutex_lock(&dev->mode_config.mutex);
1545 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
1530 if (crtc->fb) 1546 if (crtc->fb)
1531 crtcs_bound = true; 1547 crtcs_bound = true;
1532 if (crtc->fb == fb_helper->fb) 1548 if (crtc->fb == fb_helper->fb)
@@ -1535,7 +1551,8 @@ bool drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper)
1535 1551
1536 if (!bound && crtcs_bound) { 1552 if (!bound && crtcs_bound) {
1537 fb_helper->delayed_hotplug = true; 1553 fb_helper->delayed_hotplug = true;
1538 return false; 1554 mutex_unlock(&dev->mode_config.mutex);
1555 return 0;
1539 } 1556 }
1540 DRM_DEBUG_KMS("\n"); 1557 DRM_DEBUG_KMS("\n");
1541 1558
@@ -1546,6 +1563,7 @@ bool drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper)
1546 count = drm_fb_helper_probe_connector_modes(fb_helper, max_width, 1563 count = drm_fb_helper_probe_connector_modes(fb_helper, max_width,
1547 max_height); 1564 max_height);
1548 drm_setup_crtcs(fb_helper); 1565 drm_setup_crtcs(fb_helper);
1566 mutex_unlock(&dev->mode_config.mutex);
1549 1567
1550 return drm_fb_helper_single_fb_probe(fb_helper, bpp_sel); 1568 return drm_fb_helper_single_fb_probe(fb_helper, bpp_sel);
1551} 1569}
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index c34a8dd31d02..32d1b3e829c8 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -49,7 +49,7 @@ module_param_named(panel_ignore_lid, i915_panel_ignore_lid, int, 0600);
49unsigned int i915_powersave = 1; 49unsigned int i915_powersave = 1;
50module_param_named(powersave, i915_powersave, int, 0600); 50module_param_named(powersave, i915_powersave, int, 0600);
51 51
52unsigned int i915_semaphores = 1; 52unsigned int i915_semaphores = 0;
53module_param_named(semaphores, i915_semaphores, int, 0600); 53module_param_named(semaphores, i915_semaphores, int, 0600);
54 54
55unsigned int i915_enable_rc6 = 0; 55unsigned int i915_enable_rc6 = 0;
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 373c2a005ec1..2166ee071ddb 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -5154,6 +5154,8 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
5154 5154
5155 I915_WRITE(DSPCNTR(plane), dspcntr); 5155 I915_WRITE(DSPCNTR(plane), dspcntr);
5156 POSTING_READ(DSPCNTR(plane)); 5156 POSTING_READ(DSPCNTR(plane));
5157 if (!HAS_PCH_SPLIT(dev))
5158 intel_enable_plane(dev_priv, plane, pipe);
5157 5159
5158 ret = intel_pipe_set_base(crtc, x, y, old_fb); 5160 ret = intel_pipe_set_base(crtc, x, y, old_fb);
5159 5161
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index c20eac3379e6..9073e3bfb08c 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -1780,7 +1780,10 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
1780 1780
1781 1781
1782 mc_shared_chmap = RREG32(MC_SHARED_CHMAP); 1782 mc_shared_chmap = RREG32(MC_SHARED_CHMAP);
1783 mc_arb_ramcfg = RREG32(MC_ARB_RAMCFG); 1783 if (rdev->flags & RADEON_IS_IGP)
1784 mc_arb_ramcfg = RREG32(FUS_MC_ARB_RAMCFG);
1785 else
1786 mc_arb_ramcfg = RREG32(MC_ARB_RAMCFG);
1784 1787
1785 switch (rdev->config.evergreen.max_tile_pipes) { 1788 switch (rdev->config.evergreen.max_tile_pipes) {
1786 case 1: 1789 case 1:
diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h
index 94533849927e..fc40e0cc3451 100644
--- a/drivers/gpu/drm/radeon/evergreend.h
+++ b/drivers/gpu/drm/radeon/evergreend.h
@@ -200,6 +200,7 @@
200#define BURSTLENGTH_SHIFT 9 200#define BURSTLENGTH_SHIFT 9
201#define BURSTLENGTH_MASK 0x00000200 201#define BURSTLENGTH_MASK 0x00000200
202#define CHANSIZE_OVERRIDE (1 << 11) 202#define CHANSIZE_OVERRIDE (1 << 11)
203#define FUS_MC_ARB_RAMCFG 0x2768
203#define MC_VM_AGP_TOP 0x2028 204#define MC_VM_AGP_TOP 0x2028
204#define MC_VM_AGP_BOT 0x202C 205#define MC_VM_AGP_BOT 0x202C
205#define MC_VM_AGP_BASE 0x2030 206#define MC_VM_AGP_BASE 0x2030
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index dd881d035f09..90dfb2b8cf03 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -1574,9 +1574,17 @@ struct radeon_encoder_atom_dig *radeon_atombios_get_lvds_info(struct
1574 ATOM_FAKE_EDID_PATCH_RECORD *fake_edid_record; 1574 ATOM_FAKE_EDID_PATCH_RECORD *fake_edid_record;
1575 ATOM_PANEL_RESOLUTION_PATCH_RECORD *panel_res_record; 1575 ATOM_PANEL_RESOLUTION_PATCH_RECORD *panel_res_record;
1576 bool bad_record = false; 1576 bool bad_record = false;
1577 u8 *record = (u8 *)(mode_info->atom_context->bios + 1577 u8 *record;
1578 data_offset + 1578
1579 le16_to_cpu(lvds_info->info.usModePatchTableOffset)); 1579 if ((frev == 1) && (crev < 2))
1580 /* absolute */
1581 record = (u8 *)(mode_info->atom_context->bios +
1582 le16_to_cpu(lvds_info->info.usModePatchTableOffset));
1583 else
1584 /* relative */
1585 record = (u8 *)(mode_info->atom_context->bios +
1586 data_offset +
1587 le16_to_cpu(lvds_info->info.usModePatchTableOffset));
1580 while (*record != ATOM_RECORD_END_TYPE) { 1588 while (*record != ATOM_RECORD_END_TYPE) {
1581 switch (*record) { 1589 switch (*record) {
1582 case LCD_MODE_PATCH_RECORD_MODE_TYPE: 1590 case LCD_MODE_PATCH_RECORD_MODE_TYPE:
diff --git a/drivers/gpu/drm/radeon/reg_srcs/cayman b/drivers/gpu/drm/radeon/reg_srcs/cayman
index 6334f8ac1209..0aa8e85a9457 100644
--- a/drivers/gpu/drm/radeon/reg_srcs/cayman
+++ b/drivers/gpu/drm/radeon/reg_srcs/cayman
@@ -33,6 +33,7 @@ cayman 0x9400
330x00008E48 SQ_EX_ALLOC_TABLE_SLOTS 330x00008E48 SQ_EX_ALLOC_TABLE_SLOTS
340x00009100 SPI_CONFIG_CNTL 340x00009100 SPI_CONFIG_CNTL
350x0000913C SPI_CONFIG_CNTL_1 350x0000913C SPI_CONFIG_CNTL_1
360x00009508 TA_CNTL_AUX
360x00009830 DB_DEBUG 370x00009830 DB_DEBUG
370x00009834 DB_DEBUG2 380x00009834 DB_DEBUG2
380x00009838 DB_DEBUG3 390x00009838 DB_DEBUG3
diff --git a/drivers/gpu/drm/radeon/reg_srcs/evergreen b/drivers/gpu/drm/radeon/reg_srcs/evergreen
index 7e1637176e08..0e28cae7ea43 100644
--- a/drivers/gpu/drm/radeon/reg_srcs/evergreen
+++ b/drivers/gpu/drm/radeon/reg_srcs/evergreen
@@ -46,6 +46,7 @@ evergreen 0x9400
460x00008E48 SQ_EX_ALLOC_TABLE_SLOTS 460x00008E48 SQ_EX_ALLOC_TABLE_SLOTS
470x00009100 SPI_CONFIG_CNTL 470x00009100 SPI_CONFIG_CNTL
480x0000913C SPI_CONFIG_CNTL_1 480x0000913C SPI_CONFIG_CNTL_1
490x00009508 TA_CNTL_AUX
490x00009700 VC_CNTL 500x00009700 VC_CNTL
500x00009714 VC_ENHANCE 510x00009714 VC_ENHANCE
510x00009830 DB_DEBUG 520x00009830 DB_DEBUG
diff --git a/drivers/gpu/vga/vga_switcheroo.c b/drivers/gpu/vga/vga_switcheroo.c
index e01cacba685f..498b284e5ef9 100644
--- a/drivers/gpu/vga/vga_switcheroo.c
+++ b/drivers/gpu/vga/vga_switcheroo.c
@@ -219,9 +219,6 @@ static int vga_switchto_stage1(struct vga_switcheroo_client *new_client)
219 int i; 219 int i;
220 struct vga_switcheroo_client *active = NULL; 220 struct vga_switcheroo_client *active = NULL;
221 221
222 if (new_client->active == true)
223 return 0;
224
225 for (i = 0; i < VGA_SWITCHEROO_MAX_CLIENTS; i++) { 222 for (i = 0; i < VGA_SWITCHEROO_MAX_CLIENTS; i++) {
226 if (vgasr_priv.clients[i].active == true) { 223 if (vgasr_priv.clients[i].active == true) {
227 active = &vgasr_priv.clients[i]; 224 active = &vgasr_priv.clients[i];
@@ -372,6 +369,9 @@ vga_switcheroo_debugfs_write(struct file *filp, const char __user *ubuf,
372 goto out; 369 goto out;
373 } 370 }
374 371
372 if (client->active == true)
373 goto out;
374
375 /* okay we want a switch - test if devices are willing to switch */ 375 /* okay we want a switch - test if devices are willing to switch */
376 can_switch = true; 376 can_switch = true;
377 for (i = 0; i < VGA_SWITCHEROO_MAX_CLIENTS; i++) { 377 for (i = 0; i < VGA_SWITCHEROO_MAX_CLIENTS; i++) {
diff --git a/drivers/i2c/busses/i2c-mpc.c b/drivers/i2c/busses/i2c-mpc.c
index 75b984c519ac..107397a606b4 100644
--- a/drivers/i2c/busses/i2c-mpc.c
+++ b/drivers/i2c/busses/i2c-mpc.c
@@ -560,15 +560,18 @@ static struct i2c_adapter mpc_ops = {
560 .timeout = HZ, 560 .timeout = HZ,
561}; 561};
562 562
563static const struct of_device_id mpc_i2c_of_match[];
563static int __devinit fsl_i2c_probe(struct platform_device *op) 564static int __devinit fsl_i2c_probe(struct platform_device *op)
564{ 565{
566 const struct of_device_id *match;
565 struct mpc_i2c *i2c; 567 struct mpc_i2c *i2c;
566 const u32 *prop; 568 const u32 *prop;
567 u32 clock = MPC_I2C_CLOCK_LEGACY; 569 u32 clock = MPC_I2C_CLOCK_LEGACY;
568 int result = 0; 570 int result = 0;
569 int plen; 571 int plen;
570 572
571 if (!op->dev.of_match) 573 match = of_match_device(mpc_i2c_of_match, &op->dev);
574 if (!match)
572 return -EINVAL; 575 return -EINVAL;
573 576
574 i2c = kzalloc(sizeof(*i2c), GFP_KERNEL); 577 i2c = kzalloc(sizeof(*i2c), GFP_KERNEL);
@@ -605,8 +608,8 @@ static int __devinit fsl_i2c_probe(struct platform_device *op)
605 clock = *prop; 608 clock = *prop;
606 } 609 }
607 610
608 if (op->dev.of_match->data) { 611 if (match->data) {
609 struct mpc_i2c_data *data = op->dev.of_match->data; 612 struct mpc_i2c_data *data = match->data;
610 data->setup(op->dev.of_node, i2c, clock, data->prescaler); 613 data->setup(op->dev.of_node, i2c, clock, data->prescaler);
611 } else { 614 } else {
612 /* Backwards compatibility */ 615 /* Backwards compatibility */
diff --git a/drivers/i2c/busses/i2c-pnx.c b/drivers/i2c/busses/i2c-pnx.c
index a97e3fec8148..04be9f82e14b 100644
--- a/drivers/i2c/busses/i2c-pnx.c
+++ b/drivers/i2c/busses/i2c-pnx.c
@@ -65,7 +65,7 @@ static inline void i2c_pnx_arm_timer(struct i2c_pnx_algo_data *alg_data)
65 jiffies, expires); 65 jiffies, expires);
66 66
67 timer->expires = jiffies + expires; 67 timer->expires = jiffies + expires;
68 timer->data = (unsigned long)&alg_data; 68 timer->data = (unsigned long)alg_data;
69 69
70 add_timer(timer); 70 add_timer(timer);
71} 71}
diff --git a/drivers/input/touchscreen/ads7846.c b/drivers/input/touchscreen/ads7846.c
index c24946f51256..1de1c19dad30 100644
--- a/drivers/input/touchscreen/ads7846.c
+++ b/drivers/input/touchscreen/ads7846.c
@@ -281,17 +281,24 @@ struct ser_req {
281 u8 command; 281 u8 command;
282 u8 ref_off; 282 u8 ref_off;
283 u16 scratch; 283 u16 scratch;
284 __be16 sample;
285 struct spi_message msg; 284 struct spi_message msg;
286 struct spi_transfer xfer[6]; 285 struct spi_transfer xfer[6];
286 /*
287 * DMA (thus cache coherency maintenance) requires the
288 * transfer buffers to live in their own cache lines.
289 */
290 __be16 sample ____cacheline_aligned;
287}; 291};
288 292
289struct ads7845_ser_req { 293struct ads7845_ser_req {
290 u8 command[3]; 294 u8 command[3];
291 u8 pwrdown[3];
292 u8 sample[3];
293 struct spi_message msg; 295 struct spi_message msg;
294 struct spi_transfer xfer[2]; 296 struct spi_transfer xfer[2];
297 /*
298 * DMA (thus cache coherency maintenance) requires the
299 * transfer buffers to live in their own cache lines.
300 */
301 u8 sample[3] ____cacheline_aligned;
295}; 302};
296 303
297static int ads7846_read12_ser(struct device *dev, unsigned command) 304static int ads7846_read12_ser(struct device *dev, unsigned command)
diff --git a/drivers/leds/leds-lm3530.c b/drivers/leds/leds-lm3530.c
index e7089a1f6cb6..b37e6186d0fa 100644
--- a/drivers/leds/leds-lm3530.c
+++ b/drivers/leds/leds-lm3530.c
@@ -349,6 +349,7 @@ static const struct i2c_device_id lm3530_id[] = {
349 {LM3530_NAME, 0}, 349 {LM3530_NAME, 0},
350 {} 350 {}
351}; 351};
352MODULE_DEVICE_TABLE(i2c, lm3530_id);
352 353
353static struct i2c_driver lm3530_i2c_driver = { 354static struct i2c_driver lm3530_i2c_driver = {
354 .probe = lm3530_probe, 355 .probe = lm3530_probe,
diff --git a/drivers/media/video/cx88/cx88-input.c b/drivers/media/video/cx88/cx88-input.c
index c820e2f53527..3f442003623d 100644
--- a/drivers/media/video/cx88/cx88-input.c
+++ b/drivers/media/video/cx88/cx88-input.c
@@ -524,7 +524,7 @@ void cx88_ir_irq(struct cx88_core *core)
524 for (todo = 32; todo > 0; todo -= bits) { 524 for (todo = 32; todo > 0; todo -= bits) {
525 ev.pulse = samples & 0x80000000 ? false : true; 525 ev.pulse = samples & 0x80000000 ? false : true;
526 bits = min(todo, 32U - fls(ev.pulse ? samples : ~samples)); 526 bits = min(todo, 32U - fls(ev.pulse ? samples : ~samples));
527 ev.duration = (bits * NSEC_PER_SEC) / (1000 * ir_samplerate); 527 ev.duration = (bits * (NSEC_PER_SEC / 1000)) / ir_samplerate;
528 ir_raw_event_store_with_filter(ir->dev, &ev); 528 ir_raw_event_store_with_filter(ir->dev, &ev);
529 samples <<= bits; 529 samples <<= bits;
530 } 530 }
diff --git a/drivers/media/video/soc_camera.c b/drivers/media/video/soc_camera.c
index 3973f9a94753..ddb4c091dedc 100644
--- a/drivers/media/video/soc_camera.c
+++ b/drivers/media/video/soc_camera.c
@@ -136,11 +136,50 @@ unsigned long soc_camera_apply_sensor_flags(struct soc_camera_link *icl,
136} 136}
137EXPORT_SYMBOL(soc_camera_apply_sensor_flags); 137EXPORT_SYMBOL(soc_camera_apply_sensor_flags);
138 138
139#define pixfmtstr(x) (x) & 0xff, ((x) >> 8) & 0xff, ((x) >> 16) & 0xff, \
140 ((x) >> 24) & 0xff
141
142static int soc_camera_try_fmt(struct soc_camera_device *icd,
143 struct v4l2_format *f)
144{
145 struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
146 struct v4l2_pix_format *pix = &f->fmt.pix;
147 int ret;
148
149 dev_dbg(&icd->dev, "TRY_FMT(%c%c%c%c, %ux%u)\n",
150 pixfmtstr(pix->pixelformat), pix->width, pix->height);
151
152 pix->bytesperline = 0;
153 pix->sizeimage = 0;
154
155 ret = ici->ops->try_fmt(icd, f);
156 if (ret < 0)
157 return ret;
158
159 if (!pix->sizeimage) {
160 if (!pix->bytesperline) {
161 const struct soc_camera_format_xlate *xlate;
162
163 xlate = soc_camera_xlate_by_fourcc(icd, pix->pixelformat);
164 if (!xlate)
165 return -EINVAL;
166
167 ret = soc_mbus_bytes_per_line(pix->width,
168 xlate->host_fmt);
169 if (ret > 0)
170 pix->bytesperline = ret;
171 }
172 if (pix->bytesperline)
173 pix->sizeimage = pix->bytesperline * pix->height;
174 }
175
176 return 0;
177}
178
139static int soc_camera_try_fmt_vid_cap(struct file *file, void *priv, 179static int soc_camera_try_fmt_vid_cap(struct file *file, void *priv,
140 struct v4l2_format *f) 180 struct v4l2_format *f)
141{ 181{
142 struct soc_camera_device *icd = file->private_data; 182 struct soc_camera_device *icd = file->private_data;
143 struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
144 183
145 WARN_ON(priv != file->private_data); 184 WARN_ON(priv != file->private_data);
146 185
@@ -149,7 +188,7 @@ static int soc_camera_try_fmt_vid_cap(struct file *file, void *priv,
149 return -EINVAL; 188 return -EINVAL;
150 189
151 /* limit format to hardware capabilities */ 190 /* limit format to hardware capabilities */
152 return ici->ops->try_fmt(icd, f); 191 return soc_camera_try_fmt(icd, f);
153} 192}
154 193
155static int soc_camera_enum_input(struct file *file, void *priv, 194static int soc_camera_enum_input(struct file *file, void *priv,
@@ -362,9 +401,6 @@ static void soc_camera_free_user_formats(struct soc_camera_device *icd)
362 icd->user_formats = NULL; 401 icd->user_formats = NULL;
363} 402}
364 403
365#define pixfmtstr(x) (x) & 0xff, ((x) >> 8) & 0xff, ((x) >> 16) & 0xff, \
366 ((x) >> 24) & 0xff
367
368/* Called with .vb_lock held, or from the first open(2), see comment there */ 404/* Called with .vb_lock held, or from the first open(2), see comment there */
369static int soc_camera_set_fmt(struct soc_camera_device *icd, 405static int soc_camera_set_fmt(struct soc_camera_device *icd,
370 struct v4l2_format *f) 406 struct v4l2_format *f)
@@ -377,7 +413,7 @@ static int soc_camera_set_fmt(struct soc_camera_device *icd,
377 pixfmtstr(pix->pixelformat), pix->width, pix->height); 413 pixfmtstr(pix->pixelformat), pix->width, pix->height);
378 414
379 /* We always call try_fmt() before set_fmt() or set_crop() */ 415 /* We always call try_fmt() before set_fmt() or set_crop() */
380 ret = ici->ops->try_fmt(icd, f); 416 ret = soc_camera_try_fmt(icd, f);
381 if (ret < 0) 417 if (ret < 0)
382 return ret; 418 return ret;
383 419
diff --git a/drivers/media/video/v4l2-device.c b/drivers/media/video/v4l2-device.c
index 5aeaf876ba9b..4aae501f02d0 100644
--- a/drivers/media/video/v4l2-device.c
+++ b/drivers/media/video/v4l2-device.c
@@ -155,8 +155,10 @@ int v4l2_device_register_subdev(struct v4l2_device *v4l2_dev,
155 sd->v4l2_dev = v4l2_dev; 155 sd->v4l2_dev = v4l2_dev;
156 if (sd->internal_ops && sd->internal_ops->registered) { 156 if (sd->internal_ops && sd->internal_ops->registered) {
157 err = sd->internal_ops->registered(sd); 157 err = sd->internal_ops->registered(sd);
158 if (err) 158 if (err) {
159 module_put(sd->owner);
159 return err; 160 return err;
161 }
160 } 162 }
161 163
162 /* This just returns 0 if either of the two args is NULL */ 164 /* This just returns 0 if either of the two args is NULL */
@@ -164,6 +166,7 @@ int v4l2_device_register_subdev(struct v4l2_device *v4l2_dev,
164 if (err) { 166 if (err) {
165 if (sd->internal_ops && sd->internal_ops->unregistered) 167 if (sd->internal_ops && sd->internal_ops->unregistered)
166 sd->internal_ops->unregistered(sd); 168 sd->internal_ops->unregistered(sd);
169 module_put(sd->owner);
167 return err; 170 return err;
168 } 171 }
169 172
diff --git a/drivers/media/video/v4l2-subdev.c b/drivers/media/video/v4l2-subdev.c
index 0b8064490676..812729ebf09e 100644
--- a/drivers/media/video/v4l2-subdev.c
+++ b/drivers/media/video/v4l2-subdev.c
@@ -155,25 +155,25 @@ static long subdev_do_ioctl(struct file *file, unsigned int cmd, void *arg)
155 155
156 switch (cmd) { 156 switch (cmd) {
157 case VIDIOC_QUERYCTRL: 157 case VIDIOC_QUERYCTRL:
158 return v4l2_subdev_queryctrl(sd, arg); 158 return v4l2_queryctrl(sd->ctrl_handler, arg);
159 159
160 case VIDIOC_QUERYMENU: 160 case VIDIOC_QUERYMENU:
161 return v4l2_subdev_querymenu(sd, arg); 161 return v4l2_querymenu(sd->ctrl_handler, arg);
162 162
163 case VIDIOC_G_CTRL: 163 case VIDIOC_G_CTRL:
164 return v4l2_subdev_g_ctrl(sd, arg); 164 return v4l2_g_ctrl(sd->ctrl_handler, arg);
165 165
166 case VIDIOC_S_CTRL: 166 case VIDIOC_S_CTRL:
167 return v4l2_subdev_s_ctrl(sd, arg); 167 return v4l2_s_ctrl(sd->ctrl_handler, arg);
168 168
169 case VIDIOC_G_EXT_CTRLS: 169 case VIDIOC_G_EXT_CTRLS:
170 return v4l2_subdev_g_ext_ctrls(sd, arg); 170 return v4l2_g_ext_ctrls(sd->ctrl_handler, arg);
171 171
172 case VIDIOC_S_EXT_CTRLS: 172 case VIDIOC_S_EXT_CTRLS:
173 return v4l2_subdev_s_ext_ctrls(sd, arg); 173 return v4l2_s_ext_ctrls(sd->ctrl_handler, arg);
174 174
175 case VIDIOC_TRY_EXT_CTRLS: 175 case VIDIOC_TRY_EXT_CTRLS:
176 return v4l2_subdev_try_ext_ctrls(sd, arg); 176 return v4l2_try_ext_ctrls(sd->ctrl_handler, arg);
177 177
178 case VIDIOC_DQEVENT: 178 case VIDIOC_DQEVENT:
179 if (!(sd->flags & V4L2_SUBDEV_FL_HAS_EVENTS)) 179 if (!(sd->flags & V4L2_SUBDEV_FL_HAS_EVENTS))
diff --git a/drivers/message/i2o/i2o_block.c b/drivers/message/i2o/i2o_block.c
index 643ad52e3ca2..4796bbf0ae4e 100644
--- a/drivers/message/i2o/i2o_block.c
+++ b/drivers/message/i2o/i2o_block.c
@@ -1000,7 +1000,6 @@ static struct i2o_block_device *i2o_block_device_alloc(void)
1000 gd->major = I2O_MAJOR; 1000 gd->major = I2O_MAJOR;
1001 gd->queue = queue; 1001 gd->queue = queue;
1002 gd->fops = &i2o_block_fops; 1002 gd->fops = &i2o_block_fops;
1003 gd->events = DISK_EVENT_MEDIA_CHANGE;
1004 gd->private_data = dev; 1003 gd->private_data = dev;
1005 1004
1006 dev->gd = gd; 1005 dev->gd = gd;
diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c
index 2b200c1cfbba..461e6a17fb90 100644
--- a/drivers/mmc/core/host.c
+++ b/drivers/mmc/core/host.c
@@ -94,7 +94,7 @@ static void mmc_host_clk_gate_delayed(struct mmc_host *host)
94 spin_unlock_irqrestore(&host->clk_lock, flags); 94 spin_unlock_irqrestore(&host->clk_lock, flags);
95 return; 95 return;
96 } 96 }
97 mmc_claim_host(host); 97 mutex_lock(&host->clk_gate_mutex);
98 spin_lock_irqsave(&host->clk_lock, flags); 98 spin_lock_irqsave(&host->clk_lock, flags);
99 if (!host->clk_requests) { 99 if (!host->clk_requests) {
100 spin_unlock_irqrestore(&host->clk_lock, flags); 100 spin_unlock_irqrestore(&host->clk_lock, flags);
@@ -104,7 +104,7 @@ static void mmc_host_clk_gate_delayed(struct mmc_host *host)
104 pr_debug("%s: gated MCI clock\n", mmc_hostname(host)); 104 pr_debug("%s: gated MCI clock\n", mmc_hostname(host));
105 } 105 }
106 spin_unlock_irqrestore(&host->clk_lock, flags); 106 spin_unlock_irqrestore(&host->clk_lock, flags);
107 mmc_release_host(host); 107 mutex_unlock(&host->clk_gate_mutex);
108} 108}
109 109
110/* 110/*
@@ -130,7 +130,7 @@ void mmc_host_clk_ungate(struct mmc_host *host)
130{ 130{
131 unsigned long flags; 131 unsigned long flags;
132 132
133 mmc_claim_host(host); 133 mutex_lock(&host->clk_gate_mutex);
134 spin_lock_irqsave(&host->clk_lock, flags); 134 spin_lock_irqsave(&host->clk_lock, flags);
135 if (host->clk_gated) { 135 if (host->clk_gated) {
136 spin_unlock_irqrestore(&host->clk_lock, flags); 136 spin_unlock_irqrestore(&host->clk_lock, flags);
@@ -140,7 +140,7 @@ void mmc_host_clk_ungate(struct mmc_host *host)
140 } 140 }
141 host->clk_requests++; 141 host->clk_requests++;
142 spin_unlock_irqrestore(&host->clk_lock, flags); 142 spin_unlock_irqrestore(&host->clk_lock, flags);
143 mmc_release_host(host); 143 mutex_unlock(&host->clk_gate_mutex);
144} 144}
145 145
146/** 146/**
@@ -215,6 +215,7 @@ static inline void mmc_host_clk_init(struct mmc_host *host)
215 host->clk_gated = false; 215 host->clk_gated = false;
216 INIT_WORK(&host->clk_gate_work, mmc_host_clk_gate_work); 216 INIT_WORK(&host->clk_gate_work, mmc_host_clk_gate_work);
217 spin_lock_init(&host->clk_lock); 217 spin_lock_init(&host->clk_lock);
218 mutex_init(&host->clk_gate_mutex);
218} 219}
219 220
220/** 221/**
diff --git a/drivers/mmc/host/sdhci-of-core.c b/drivers/mmc/host/sdhci-of-core.c
index f9b611fc773e..60e4186a4345 100644
--- a/drivers/mmc/host/sdhci-of-core.c
+++ b/drivers/mmc/host/sdhci-of-core.c
@@ -124,8 +124,10 @@ static bool __devinit sdhci_of_wp_inverted(struct device_node *np)
124#endif 124#endif
125} 125}
126 126
127static const struct of_device_id sdhci_of_match[];
127static int __devinit sdhci_of_probe(struct platform_device *ofdev) 128static int __devinit sdhci_of_probe(struct platform_device *ofdev)
128{ 129{
130 const struct of_device_id *match;
129 struct device_node *np = ofdev->dev.of_node; 131 struct device_node *np = ofdev->dev.of_node;
130 struct sdhci_of_data *sdhci_of_data; 132 struct sdhci_of_data *sdhci_of_data;
131 struct sdhci_host *host; 133 struct sdhci_host *host;
@@ -134,9 +136,10 @@ static int __devinit sdhci_of_probe(struct platform_device *ofdev)
134 int size; 136 int size;
135 int ret; 137 int ret;
136 138
137 if (!ofdev->dev.of_match) 139 match = of_match_device(sdhci_of_match, &ofdev->dev);
140 if (!match)
138 return -EINVAL; 141 return -EINVAL;
139 sdhci_of_data = ofdev->dev.of_match->data; 142 sdhci_of_data = match->data;
140 143
141 if (!of_device_is_available(np)) 144 if (!of_device_is_available(np))
142 return -ENODEV; 145 return -ENODEV;
diff --git a/drivers/mtd/maps/physmap_of.c b/drivers/mtd/maps/physmap_of.c
index bd483f0c57e1..c1d33464aee8 100644
--- a/drivers/mtd/maps/physmap_of.c
+++ b/drivers/mtd/maps/physmap_of.c
@@ -214,11 +214,13 @@ static void __devinit of_free_probes(const char **probes)
214} 214}
215#endif 215#endif
216 216
217static struct of_device_id of_flash_match[];
217static int __devinit of_flash_probe(struct platform_device *dev) 218static int __devinit of_flash_probe(struct platform_device *dev)
218{ 219{
219#ifdef CONFIG_MTD_PARTITIONS 220#ifdef CONFIG_MTD_PARTITIONS
220 const char **part_probe_types; 221 const char **part_probe_types;
221#endif 222#endif
223 const struct of_device_id *match;
222 struct device_node *dp = dev->dev.of_node; 224 struct device_node *dp = dev->dev.of_node;
223 struct resource res; 225 struct resource res;
224 struct of_flash *info; 226 struct of_flash *info;
@@ -232,9 +234,10 @@ static int __devinit of_flash_probe(struct platform_device *dev)
232 struct mtd_info **mtd_list = NULL; 234 struct mtd_info **mtd_list = NULL;
233 resource_size_t res_size; 235 resource_size_t res_size;
234 236
235 if (!dev->dev.of_match) 237 match = of_match_device(of_flash_match, &dev->dev);
238 if (!match)
236 return -EINVAL; 239 return -EINVAL;
237 probe_type = dev->dev.of_match->data; 240 probe_type = match->data;
238 241
239 reg_tuple_size = (of_n_addr_cells(dp) + of_n_size_cells(dp)) * sizeof(u32); 242 reg_tuple_size = (of_n_addr_cells(dp) + of_n_size_cells(dp)) * sizeof(u32);
240 243
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index 01b604ad155e..e5a7375685ad 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -144,7 +144,7 @@ obj-$(CONFIG_NE3210) += ne3210.o 8390.o
144obj-$(CONFIG_SB1250_MAC) += sb1250-mac.o 144obj-$(CONFIG_SB1250_MAC) += sb1250-mac.o
145obj-$(CONFIG_B44) += b44.o 145obj-$(CONFIG_B44) += b44.o
146obj-$(CONFIG_FORCEDETH) += forcedeth.o 146obj-$(CONFIG_FORCEDETH) += forcedeth.o
147obj-$(CONFIG_NE_H8300) += ne-h8300.o 8390.o 147obj-$(CONFIG_NE_H8300) += ne-h8300.o
148obj-$(CONFIG_AX88796) += ax88796.o 148obj-$(CONFIG_AX88796) += ax88796.o
149obj-$(CONFIG_BCM63XX_ENET) += bcm63xx_enet.o 149obj-$(CONFIG_BCM63XX_ENET) += bcm63xx_enet.o
150obj-$(CONFIG_FTMAC100) += ftmac100.o 150obj-$(CONFIG_FTMAC100) += ftmac100.o
@@ -219,7 +219,7 @@ obj-$(CONFIG_SC92031) += sc92031.o
219obj-$(CONFIG_LP486E) += lp486e.o 219obj-$(CONFIG_LP486E) += lp486e.o
220 220
221obj-$(CONFIG_ETH16I) += eth16i.o 221obj-$(CONFIG_ETH16I) += eth16i.o
222obj-$(CONFIG_ZORRO8390) += zorro8390.o 8390.o 222obj-$(CONFIG_ZORRO8390) += zorro8390.o
223obj-$(CONFIG_HPLANCE) += hplance.o 7990.o 223obj-$(CONFIG_HPLANCE) += hplance.o 7990.o
224obj-$(CONFIG_MVME147_NET) += mvme147.o 7990.o 224obj-$(CONFIG_MVME147_NET) += mvme147.o 7990.o
225obj-$(CONFIG_EQUALIZER) += eql.o 225obj-$(CONFIG_EQUALIZER) += eql.o
@@ -231,7 +231,7 @@ obj-$(CONFIG_SGI_IOC3_ETH) += ioc3-eth.o
231obj-$(CONFIG_DECLANCE) += declance.o 231obj-$(CONFIG_DECLANCE) += declance.o
232obj-$(CONFIG_ATARILANCE) += atarilance.o 232obj-$(CONFIG_ATARILANCE) += atarilance.o
233obj-$(CONFIG_A2065) += a2065.o 233obj-$(CONFIG_A2065) += a2065.o
234obj-$(CONFIG_HYDRA) += hydra.o 8390.o 234obj-$(CONFIG_HYDRA) += hydra.o
235obj-$(CONFIG_ARIADNE) += ariadne.o 235obj-$(CONFIG_ARIADNE) += ariadne.o
236obj-$(CONFIG_CS89x0) += cs89x0.o 236obj-$(CONFIG_CS89x0) += cs89x0.o
237obj-$(CONFIG_MACSONIC) += macsonic.o 237obj-$(CONFIG_MACSONIC) += macsonic.o
diff --git a/drivers/net/bonding/bond_3ad.h b/drivers/net/bonding/bond_3ad.h
index b28baff70864..01b8a6af275b 100644
--- a/drivers/net/bonding/bond_3ad.h
+++ b/drivers/net/bonding/bond_3ad.h
@@ -39,7 +39,7 @@
39 39
40typedef struct mac_addr { 40typedef struct mac_addr {
41 u8 mac_addr_value[ETH_ALEN]; 41 u8 mac_addr_value[ETH_ALEN];
42} mac_addr_t; 42} __packed mac_addr_t;
43 43
44enum { 44enum {
45 BOND_AD_STABLE = 0, 45 BOND_AD_STABLE = 0,
@@ -134,12 +134,12 @@ typedef struct lacpdu {
134 u8 tlv_type_terminator; // = terminator 134 u8 tlv_type_terminator; // = terminator
135 u8 terminator_length; // = 0 135 u8 terminator_length; // = 0
136 u8 reserved_50[50]; // = 0 136 u8 reserved_50[50]; // = 0
137} lacpdu_t; 137} __packed lacpdu_t;
138 138
139typedef struct lacpdu_header { 139typedef struct lacpdu_header {
140 struct ethhdr hdr; 140 struct ethhdr hdr;
141 struct lacpdu lacpdu; 141 struct lacpdu lacpdu;
142} lacpdu_header_t; 142} __packed lacpdu_header_t;
143 143
144// Marker Protocol Data Unit(PDU) structure(43.5.3.2 in the 802.3ad standard) 144// Marker Protocol Data Unit(PDU) structure(43.5.3.2 in the 802.3ad standard)
145typedef struct bond_marker { 145typedef struct bond_marker {
@@ -155,12 +155,12 @@ typedef struct bond_marker {
155 u8 tlv_type_terminator; // = 0x00 155 u8 tlv_type_terminator; // = 0x00
156 u8 terminator_length; // = 0x00 156 u8 terminator_length; // = 0x00
157 u8 reserved_90[90]; // = 0 157 u8 reserved_90[90]; // = 0
158} bond_marker_t; 158} __packed bond_marker_t;
159 159
160typedef struct bond_marker_header { 160typedef struct bond_marker_header {
161 struct ethhdr hdr; 161 struct ethhdr hdr;
162 struct bond_marker marker; 162 struct bond_marker marker;
163} bond_marker_header_t; 163} __packed bond_marker_header_t;
164 164
165#pragma pack() 165#pragma pack()
166 166
diff --git a/drivers/net/can/mscan/mpc5xxx_can.c b/drivers/net/can/mscan/mpc5xxx_can.c
index bd1d811c204f..5fedc3375562 100644
--- a/drivers/net/can/mscan/mpc5xxx_can.c
+++ b/drivers/net/can/mscan/mpc5xxx_can.c
@@ -247,8 +247,10 @@ static u32 __devinit mpc512x_can_get_clock(struct platform_device *ofdev,
247} 247}
248#endif /* CONFIG_PPC_MPC512x */ 248#endif /* CONFIG_PPC_MPC512x */
249 249
250static struct of_device_id mpc5xxx_can_table[];
250static int __devinit mpc5xxx_can_probe(struct platform_device *ofdev) 251static int __devinit mpc5xxx_can_probe(struct platform_device *ofdev)
251{ 252{
253 const struct of_device_id *match;
252 struct mpc5xxx_can_data *data; 254 struct mpc5xxx_can_data *data;
253 struct device_node *np = ofdev->dev.of_node; 255 struct device_node *np = ofdev->dev.of_node;
254 struct net_device *dev; 256 struct net_device *dev;
@@ -258,9 +260,10 @@ static int __devinit mpc5xxx_can_probe(struct platform_device *ofdev)
258 int irq, mscan_clksrc = 0; 260 int irq, mscan_clksrc = 0;
259 int err = -ENOMEM; 261 int err = -ENOMEM;
260 262
261 if (!ofdev->dev.of_match) 263 match = of_match_device(mpc5xxx_can_table, &ofdev->dev);
264 if (!match)
262 return -EINVAL; 265 return -EINVAL;
263 data = (struct mpc5xxx_can_data *)ofdev->dev.of_match->data; 266 data = match->data;
264 267
265 base = of_iomap(np, 0); 268 base = of_iomap(np, 0);
266 if (!base) { 269 if (!base) {
diff --git a/drivers/net/ehea/ehea_main.c b/drivers/net/ehea/ehea_main.c
index 53c0f04b1b23..cf79cf759e13 100644
--- a/drivers/net/ehea/ehea_main.c
+++ b/drivers/net/ehea/ehea_main.c
@@ -2688,9 +2688,6 @@ static int ehea_open(struct net_device *dev)
2688 netif_start_queue(dev); 2688 netif_start_queue(dev);
2689 } 2689 }
2690 2690
2691 init_waitqueue_head(&port->swqe_avail_wq);
2692 init_waitqueue_head(&port->restart_wq);
2693
2694 mutex_unlock(&port->port_lock); 2691 mutex_unlock(&port->port_lock);
2695 2692
2696 return ret; 2693 return ret;
@@ -3276,6 +3273,9 @@ struct ehea_port *ehea_setup_single_port(struct ehea_adapter *adapter,
3276 3273
3277 INIT_WORK(&port->reset_task, ehea_reset_port); 3274 INIT_WORK(&port->reset_task, ehea_reset_port);
3278 3275
3276 init_waitqueue_head(&port->swqe_avail_wq);
3277 init_waitqueue_head(&port->restart_wq);
3278
3279 ret = register_netdev(dev); 3279 ret = register_netdev(dev);
3280 if (ret) { 3280 if (ret) {
3281 pr_err("register_netdev failed. ret=%d\n", ret); 3281 pr_err("register_netdev failed. ret=%d\n", ret);
diff --git a/drivers/net/fs_enet/fs_enet-main.c b/drivers/net/fs_enet/fs_enet-main.c
index 24cb953900dd..5131e61c358c 100644
--- a/drivers/net/fs_enet/fs_enet-main.c
+++ b/drivers/net/fs_enet/fs_enet-main.c
@@ -998,8 +998,10 @@ static const struct net_device_ops fs_enet_netdev_ops = {
998#endif 998#endif
999}; 999};
1000 1000
1001static struct of_device_id fs_enet_match[];
1001static int __devinit fs_enet_probe(struct platform_device *ofdev) 1002static int __devinit fs_enet_probe(struct platform_device *ofdev)
1002{ 1003{
1004 const struct of_device_id *match;
1003 struct net_device *ndev; 1005 struct net_device *ndev;
1004 struct fs_enet_private *fep; 1006 struct fs_enet_private *fep;
1005 struct fs_platform_info *fpi; 1007 struct fs_platform_info *fpi;
@@ -1007,14 +1009,15 @@ static int __devinit fs_enet_probe(struct platform_device *ofdev)
1007 const u8 *mac_addr; 1009 const u8 *mac_addr;
1008 int privsize, len, ret = -ENODEV; 1010 int privsize, len, ret = -ENODEV;
1009 1011
1010 if (!ofdev->dev.of_match) 1012 match = of_match_device(fs_enet_match, &ofdev->dev);
1013 if (!match)
1011 return -EINVAL; 1014 return -EINVAL;
1012 1015
1013 fpi = kzalloc(sizeof(*fpi), GFP_KERNEL); 1016 fpi = kzalloc(sizeof(*fpi), GFP_KERNEL);
1014 if (!fpi) 1017 if (!fpi)
1015 return -ENOMEM; 1018 return -ENOMEM;
1016 1019
1017 if (!IS_FEC(ofdev->dev.of_match)) { 1020 if (!IS_FEC(match)) {
1018 data = of_get_property(ofdev->dev.of_node, "fsl,cpm-command", &len); 1021 data = of_get_property(ofdev->dev.of_node, "fsl,cpm-command", &len);
1019 if (!data || len != 4) 1022 if (!data || len != 4)
1020 goto out_free_fpi; 1023 goto out_free_fpi;
@@ -1049,7 +1052,7 @@ static int __devinit fs_enet_probe(struct platform_device *ofdev)
1049 fep->dev = &ofdev->dev; 1052 fep->dev = &ofdev->dev;
1050 fep->ndev = ndev; 1053 fep->ndev = ndev;
1051 fep->fpi = fpi; 1054 fep->fpi = fpi;
1052 fep->ops = ofdev->dev.of_match->data; 1055 fep->ops = match->data;
1053 1056
1054 ret = fep->ops->setup_data(ndev); 1057 ret = fep->ops->setup_data(ndev);
1055 if (ret) 1058 if (ret)
diff --git a/drivers/net/fs_enet/mii-fec.c b/drivers/net/fs_enet/mii-fec.c
index 7e840d373ab3..6a2e150e75bb 100644
--- a/drivers/net/fs_enet/mii-fec.c
+++ b/drivers/net/fs_enet/mii-fec.c
@@ -101,17 +101,20 @@ static int fs_enet_fec_mii_reset(struct mii_bus *bus)
101 return 0; 101 return 0;
102} 102}
103 103
104static struct of_device_id fs_enet_mdio_fec_match[];
104static int __devinit fs_enet_mdio_probe(struct platform_device *ofdev) 105static int __devinit fs_enet_mdio_probe(struct platform_device *ofdev)
105{ 106{
107 const struct of_device_id *match;
106 struct resource res; 108 struct resource res;
107 struct mii_bus *new_bus; 109 struct mii_bus *new_bus;
108 struct fec_info *fec; 110 struct fec_info *fec;
109 int (*get_bus_freq)(struct device_node *); 111 int (*get_bus_freq)(struct device_node *);
110 int ret = -ENOMEM, clock, speed; 112 int ret = -ENOMEM, clock, speed;
111 113
112 if (!ofdev->dev.of_match) 114 match = of_match_device(fs_enet_mdio_fec_match, &ofdev->dev);
115 if (!match)
113 return -EINVAL; 116 return -EINVAL;
114 get_bus_freq = ofdev->dev.of_match->data; 117 get_bus_freq = match->data;
115 118
116 new_bus = mdiobus_alloc(); 119 new_bus = mdiobus_alloc();
117 if (!new_bus) 120 if (!new_bus)
diff --git a/drivers/net/hydra.c b/drivers/net/hydra.c
index c5ef62ceb840..1cd481c04202 100644
--- a/drivers/net/hydra.c
+++ b/drivers/net/hydra.c
@@ -98,15 +98,15 @@ static const struct net_device_ops hydra_netdev_ops = {
98 .ndo_open = hydra_open, 98 .ndo_open = hydra_open,
99 .ndo_stop = hydra_close, 99 .ndo_stop = hydra_close,
100 100
101 .ndo_start_xmit = ei_start_xmit, 101 .ndo_start_xmit = __ei_start_xmit,
102 .ndo_tx_timeout = ei_tx_timeout, 102 .ndo_tx_timeout = __ei_tx_timeout,
103 .ndo_get_stats = ei_get_stats, 103 .ndo_get_stats = __ei_get_stats,
104 .ndo_set_multicast_list = ei_set_multicast_list, 104 .ndo_set_multicast_list = __ei_set_multicast_list,
105 .ndo_validate_addr = eth_validate_addr, 105 .ndo_validate_addr = eth_validate_addr,
106 .ndo_set_mac_address = eth_mac_addr, 106 .ndo_set_mac_address = eth_mac_addr,
107 .ndo_change_mtu = eth_change_mtu, 107 .ndo_change_mtu = eth_change_mtu,
108#ifdef CONFIG_NET_POLL_CONTROLLER 108#ifdef CONFIG_NET_POLL_CONTROLLER
109 .ndo_poll_controller = ei_poll, 109 .ndo_poll_controller = __ei_poll,
110#endif 110#endif
111}; 111};
112 112
@@ -125,7 +125,7 @@ static int __devinit hydra_init(struct zorro_dev *z)
125 0x10, 0x12, 0x14, 0x16, 0x18, 0x1a, 0x1c, 0x1e, 125 0x10, 0x12, 0x14, 0x16, 0x18, 0x1a, 0x1c, 0x1e,
126 }; 126 };
127 127
128 dev = alloc_ei_netdev(); 128 dev = ____alloc_ei_netdev(0);
129 if (!dev) 129 if (!dev)
130 return -ENOMEM; 130 return -ENOMEM;
131 131
diff --git a/drivers/net/ne-h8300.c b/drivers/net/ne-h8300.c
index 30be8c634ebd..7298a34bc795 100644
--- a/drivers/net/ne-h8300.c
+++ b/drivers/net/ne-h8300.c
@@ -167,7 +167,7 @@ static void cleanup_card(struct net_device *dev)
167#ifndef MODULE 167#ifndef MODULE
168struct net_device * __init ne_probe(int unit) 168struct net_device * __init ne_probe(int unit)
169{ 169{
170 struct net_device *dev = alloc_ei_netdev(); 170 struct net_device *dev = ____alloc_ei_netdev(0);
171 int err; 171 int err;
172 172
173 if (!dev) 173 if (!dev)
@@ -197,15 +197,15 @@ static const struct net_device_ops ne_netdev_ops = {
197 .ndo_open = ne_open, 197 .ndo_open = ne_open,
198 .ndo_stop = ne_close, 198 .ndo_stop = ne_close,
199 199
200 .ndo_start_xmit = ei_start_xmit, 200 .ndo_start_xmit = __ei_start_xmit,
201 .ndo_tx_timeout = ei_tx_timeout, 201 .ndo_tx_timeout = __ei_tx_timeout,
202 .ndo_get_stats = ei_get_stats, 202 .ndo_get_stats = __ei_get_stats,
203 .ndo_set_multicast_list = ei_set_multicast_list, 203 .ndo_set_multicast_list = __ei_set_multicast_list,
204 .ndo_validate_addr = eth_validate_addr, 204 .ndo_validate_addr = eth_validate_addr,
205 .ndo_set_mac_address = eth_mac_addr, 205 .ndo_set_mac_address = eth_mac_addr,
206 .ndo_change_mtu = eth_change_mtu, 206 .ndo_change_mtu = eth_change_mtu,
207#ifdef CONFIG_NET_POLL_CONTROLLER 207#ifdef CONFIG_NET_POLL_CONTROLLER
208 .ndo_poll_controller = ei_poll, 208 .ndo_poll_controller = __ei_poll,
209#endif 209#endif
210}; 210};
211 211
@@ -637,7 +637,7 @@ int init_module(void)
637 int err; 637 int err;
638 638
639 for (this_dev = 0; this_dev < MAX_NE_CARDS; this_dev++) { 639 for (this_dev = 0; this_dev < MAX_NE_CARDS; this_dev++) {
640 struct net_device *dev = alloc_ei_netdev(); 640 struct net_device *dev = ____alloc_ei_netdev(0);
641 if (!dev) 641 if (!dev)
642 break; 642 break;
643 if (io[this_dev]) { 643 if (io[this_dev]) {
diff --git a/drivers/net/sfc/mcdi.c b/drivers/net/sfc/mcdi.c
index d98479030ef2..3dd45ed61f0a 100644
--- a/drivers/net/sfc/mcdi.c
+++ b/drivers/net/sfc/mcdi.c
@@ -50,6 +50,20 @@ static inline struct efx_mcdi_iface *efx_mcdi(struct efx_nic *efx)
50 return &nic_data->mcdi; 50 return &nic_data->mcdi;
51} 51}
52 52
53static inline void
54efx_mcdi_readd(struct efx_nic *efx, efx_dword_t *value, unsigned reg)
55{
56 struct siena_nic_data *nic_data = efx->nic_data;
57 value->u32[0] = (__force __le32)__raw_readl(nic_data->mcdi_smem + reg);
58}
59
60static inline void
61efx_mcdi_writed(struct efx_nic *efx, const efx_dword_t *value, unsigned reg)
62{
63 struct siena_nic_data *nic_data = efx->nic_data;
64 __raw_writel((__force u32)value->u32[0], nic_data->mcdi_smem + reg);
65}
66
53void efx_mcdi_init(struct efx_nic *efx) 67void efx_mcdi_init(struct efx_nic *efx)
54{ 68{
55 struct efx_mcdi_iface *mcdi; 69 struct efx_mcdi_iface *mcdi;
@@ -70,8 +84,8 @@ static void efx_mcdi_copyin(struct efx_nic *efx, unsigned cmd,
70 const u8 *inbuf, size_t inlen) 84 const u8 *inbuf, size_t inlen)
71{ 85{
72 struct efx_mcdi_iface *mcdi = efx_mcdi(efx); 86 struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
73 unsigned pdu = FR_CZ_MC_TREG_SMEM + MCDI_PDU(efx); 87 unsigned pdu = MCDI_PDU(efx);
74 unsigned doorbell = FR_CZ_MC_TREG_SMEM + MCDI_DOORBELL(efx); 88 unsigned doorbell = MCDI_DOORBELL(efx);
75 unsigned int i; 89 unsigned int i;
76 efx_dword_t hdr; 90 efx_dword_t hdr;
77 u32 xflags, seqno; 91 u32 xflags, seqno;
@@ -92,30 +106,28 @@ static void efx_mcdi_copyin(struct efx_nic *efx, unsigned cmd,
92 MCDI_HEADER_SEQ, seqno, 106 MCDI_HEADER_SEQ, seqno,
93 MCDI_HEADER_XFLAGS, xflags); 107 MCDI_HEADER_XFLAGS, xflags);
94 108
95 efx_writed(efx, &hdr, pdu); 109 efx_mcdi_writed(efx, &hdr, pdu);
96 110
97 for (i = 0; i < inlen; i += 4) { 111 for (i = 0; i < inlen; i += 4)
98 _efx_writed(efx, *((__le32 *)(inbuf + i)), pdu + 4 + i); 112 efx_mcdi_writed(efx, (const efx_dword_t *)(inbuf + i),
99 /* use wmb() within loop to inhibit write combining */ 113 pdu + 4 + i);
100 wmb();
101 }
102 114
103 /* ring the doorbell with a distinctive value */ 115 /* ring the doorbell with a distinctive value */
104 _efx_writed(efx, (__force __le32) 0x45789abc, doorbell); 116 EFX_POPULATE_DWORD_1(hdr, EFX_DWORD_0, 0x45789abc);
105 wmb(); 117 efx_mcdi_writed(efx, &hdr, doorbell);
106} 118}
107 119
108static void efx_mcdi_copyout(struct efx_nic *efx, u8 *outbuf, size_t outlen) 120static void efx_mcdi_copyout(struct efx_nic *efx, u8 *outbuf, size_t outlen)
109{ 121{
110 struct efx_mcdi_iface *mcdi = efx_mcdi(efx); 122 struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
111 unsigned int pdu = FR_CZ_MC_TREG_SMEM + MCDI_PDU(efx); 123 unsigned int pdu = MCDI_PDU(efx);
112 int i; 124 int i;
113 125
114 BUG_ON(atomic_read(&mcdi->state) == MCDI_STATE_QUIESCENT); 126 BUG_ON(atomic_read(&mcdi->state) == MCDI_STATE_QUIESCENT);
115 BUG_ON(outlen & 3 || outlen >= 0x100); 127 BUG_ON(outlen & 3 || outlen >= 0x100);
116 128
117 for (i = 0; i < outlen; i += 4) 129 for (i = 0; i < outlen; i += 4)
118 *((__le32 *)(outbuf + i)) = _efx_readd(efx, pdu + 4 + i); 130 efx_mcdi_readd(efx, (efx_dword_t *)(outbuf + i), pdu + 4 + i);
119} 131}
120 132
121static int efx_mcdi_poll(struct efx_nic *efx) 133static int efx_mcdi_poll(struct efx_nic *efx)
@@ -123,7 +135,7 @@ static int efx_mcdi_poll(struct efx_nic *efx)
123 struct efx_mcdi_iface *mcdi = efx_mcdi(efx); 135 struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
124 unsigned int time, finish; 136 unsigned int time, finish;
125 unsigned int respseq, respcmd, error; 137 unsigned int respseq, respcmd, error;
126 unsigned int pdu = FR_CZ_MC_TREG_SMEM + MCDI_PDU(efx); 138 unsigned int pdu = MCDI_PDU(efx);
127 unsigned int rc, spins; 139 unsigned int rc, spins;
128 efx_dword_t reg; 140 efx_dword_t reg;
129 141
@@ -149,8 +161,7 @@ static int efx_mcdi_poll(struct efx_nic *efx)
149 161
150 time = get_seconds(); 162 time = get_seconds();
151 163
152 rmb(); 164 efx_mcdi_readd(efx, &reg, pdu);
153 efx_readd(efx, &reg, pdu);
154 165
155 /* All 1's indicates that shared memory is in reset (and is 166 /* All 1's indicates that shared memory is in reset (and is
156 * not a valid header). Wait for it to come out reset before 167 * not a valid header). Wait for it to come out reset before
@@ -177,7 +188,7 @@ static int efx_mcdi_poll(struct efx_nic *efx)
177 respseq, mcdi->seqno); 188 respseq, mcdi->seqno);
178 rc = EIO; 189 rc = EIO;
179 } else if (error) { 190 } else if (error) {
180 efx_readd(efx, &reg, pdu + 4); 191 efx_mcdi_readd(efx, &reg, pdu + 4);
181 switch (EFX_DWORD_FIELD(reg, EFX_DWORD_0)) { 192 switch (EFX_DWORD_FIELD(reg, EFX_DWORD_0)) {
182#define TRANSLATE_ERROR(name) \ 193#define TRANSLATE_ERROR(name) \
183 case MC_CMD_ERR_ ## name: \ 194 case MC_CMD_ERR_ ## name: \
@@ -211,21 +222,21 @@ out:
211/* Test and clear MC-rebooted flag for this port/function */ 222/* Test and clear MC-rebooted flag for this port/function */
212int efx_mcdi_poll_reboot(struct efx_nic *efx) 223int efx_mcdi_poll_reboot(struct efx_nic *efx)
213{ 224{
214 unsigned int addr = FR_CZ_MC_TREG_SMEM + MCDI_REBOOT_FLAG(efx); 225 unsigned int addr = MCDI_REBOOT_FLAG(efx);
215 efx_dword_t reg; 226 efx_dword_t reg;
216 uint32_t value; 227 uint32_t value;
217 228
218 if (efx_nic_rev(efx) < EFX_REV_SIENA_A0) 229 if (efx_nic_rev(efx) < EFX_REV_SIENA_A0)
219 return false; 230 return false;
220 231
221 efx_readd(efx, &reg, addr); 232 efx_mcdi_readd(efx, &reg, addr);
222 value = EFX_DWORD_FIELD(reg, EFX_DWORD_0); 233 value = EFX_DWORD_FIELD(reg, EFX_DWORD_0);
223 234
224 if (value == 0) 235 if (value == 0)
225 return 0; 236 return 0;
226 237
227 EFX_ZERO_DWORD(reg); 238 EFX_ZERO_DWORD(reg);
228 efx_writed(efx, &reg, addr); 239 efx_mcdi_writed(efx, &reg, addr);
229 240
230 if (value == MC_STATUS_DWORD_ASSERT) 241 if (value == MC_STATUS_DWORD_ASSERT)
231 return -EINTR; 242 return -EINTR;
diff --git a/drivers/net/sfc/nic.c b/drivers/net/sfc/nic.c
index 10f1cb79c147..9b29a8d7c449 100644
--- a/drivers/net/sfc/nic.c
+++ b/drivers/net/sfc/nic.c
@@ -1937,6 +1937,13 @@ void efx_nic_get_regs(struct efx_nic *efx, void *buf)
1937 1937
1938 size = min_t(size_t, table->step, 16); 1938 size = min_t(size_t, table->step, 16);
1939 1939
1940 if (table->offset >= efx->type->mem_map_size) {
1941 /* No longer mapped; return dummy data */
1942 memcpy(buf, "\xde\xc0\xad\xde", 4);
1943 buf += table->rows * size;
1944 continue;
1945 }
1946
1940 for (i = 0; i < table->rows; i++) { 1947 for (i = 0; i < table->rows; i++) {
1941 switch (table->step) { 1948 switch (table->step) {
1942 case 4: /* 32-bit register or SRAM */ 1949 case 4: /* 32-bit register or SRAM */
diff --git a/drivers/net/sfc/nic.h b/drivers/net/sfc/nic.h
index a42db6e35be3..d91701abd331 100644
--- a/drivers/net/sfc/nic.h
+++ b/drivers/net/sfc/nic.h
@@ -143,10 +143,12 @@ static inline struct falcon_board *falcon_board(struct efx_nic *efx)
143/** 143/**
144 * struct siena_nic_data - Siena NIC state 144 * struct siena_nic_data - Siena NIC state
145 * @mcdi: Management-Controller-to-Driver Interface 145 * @mcdi: Management-Controller-to-Driver Interface
146 * @mcdi_smem: MCDI shared memory mapping. The mapping is always uncacheable.
146 * @wol_filter_id: Wake-on-LAN packet filter id 147 * @wol_filter_id: Wake-on-LAN packet filter id
147 */ 148 */
148struct siena_nic_data { 149struct siena_nic_data {
149 struct efx_mcdi_iface mcdi; 150 struct efx_mcdi_iface mcdi;
151 void __iomem *mcdi_smem;
150 int wol_filter_id; 152 int wol_filter_id;
151}; 153};
152 154
diff --git a/drivers/net/sfc/siena.c b/drivers/net/sfc/siena.c
index e4dd8986b1fe..837869b71db9 100644
--- a/drivers/net/sfc/siena.c
+++ b/drivers/net/sfc/siena.c
@@ -220,12 +220,26 @@ static int siena_probe_nic(struct efx_nic *efx)
220 efx_reado(efx, &reg, FR_AZ_CS_DEBUG); 220 efx_reado(efx, &reg, FR_AZ_CS_DEBUG);
221 efx->net_dev->dev_id = EFX_OWORD_FIELD(reg, FRF_CZ_CS_PORT_NUM) - 1; 221 efx->net_dev->dev_id = EFX_OWORD_FIELD(reg, FRF_CZ_CS_PORT_NUM) - 1;
222 222
223 /* Initialise MCDI */
224 nic_data->mcdi_smem = ioremap_nocache(efx->membase_phys +
225 FR_CZ_MC_TREG_SMEM,
226 FR_CZ_MC_TREG_SMEM_STEP *
227 FR_CZ_MC_TREG_SMEM_ROWS);
228 if (!nic_data->mcdi_smem) {
229 netif_err(efx, probe, efx->net_dev,
230 "could not map MCDI at %llx+%x\n",
231 (unsigned long long)efx->membase_phys +
232 FR_CZ_MC_TREG_SMEM,
233 FR_CZ_MC_TREG_SMEM_STEP * FR_CZ_MC_TREG_SMEM_ROWS);
234 rc = -ENOMEM;
235 goto fail1;
236 }
223 efx_mcdi_init(efx); 237 efx_mcdi_init(efx);
224 238
225 /* Recover from a failed assertion before probing */ 239 /* Recover from a failed assertion before probing */
226 rc = efx_mcdi_handle_assertion(efx); 240 rc = efx_mcdi_handle_assertion(efx);
227 if (rc) 241 if (rc)
228 goto fail1; 242 goto fail2;
229 243
230 /* Let the BMC know that the driver is now in charge of link and 244 /* Let the BMC know that the driver is now in charge of link and
231 * filter settings. We must do this before we reset the NIC */ 245 * filter settings. We must do this before we reset the NIC */
@@ -280,6 +294,7 @@ fail4:
280fail3: 294fail3:
281 efx_mcdi_drv_attach(efx, false, NULL); 295 efx_mcdi_drv_attach(efx, false, NULL);
282fail2: 296fail2:
297 iounmap(nic_data->mcdi_smem);
283fail1: 298fail1:
284 kfree(efx->nic_data); 299 kfree(efx->nic_data);
285 return rc; 300 return rc;
@@ -359,6 +374,8 @@ static int siena_init_nic(struct efx_nic *efx)
359 374
360static void siena_remove_nic(struct efx_nic *efx) 375static void siena_remove_nic(struct efx_nic *efx)
361{ 376{
377 struct siena_nic_data *nic_data = efx->nic_data;
378
362 efx_nic_free_buffer(efx, &efx->irq_status); 379 efx_nic_free_buffer(efx, &efx->irq_status);
363 380
364 siena_reset_hw(efx, RESET_TYPE_ALL); 381 siena_reset_hw(efx, RESET_TYPE_ALL);
@@ -368,7 +385,8 @@ static void siena_remove_nic(struct efx_nic *efx)
368 efx_mcdi_drv_attach(efx, false, NULL); 385 efx_mcdi_drv_attach(efx, false, NULL);
369 386
370 /* Tear down the private nic state */ 387 /* Tear down the private nic state */
371 kfree(efx->nic_data); 388 iounmap(nic_data->mcdi_smem);
389 kfree(nic_data);
372 efx->nic_data = NULL; 390 efx->nic_data = NULL;
373} 391}
374 392
@@ -606,8 +624,7 @@ struct efx_nic_type siena_a0_nic_type = {
606 .default_mac_ops = &efx_mcdi_mac_operations, 624 .default_mac_ops = &efx_mcdi_mac_operations,
607 625
608 .revision = EFX_REV_SIENA_A0, 626 .revision = EFX_REV_SIENA_A0,
609 .mem_map_size = (FR_CZ_MC_TREG_SMEM + 627 .mem_map_size = FR_CZ_MC_TREG_SMEM, /* MC_TREG_SMEM mapped separately */
610 FR_CZ_MC_TREG_SMEM_STEP * FR_CZ_MC_TREG_SMEM_ROWS),
611 .txd_ptr_tbl_base = FR_BZ_TX_DESC_PTR_TBL, 628 .txd_ptr_tbl_base = FR_BZ_TX_DESC_PTR_TBL,
612 .rxd_ptr_tbl_base = FR_BZ_RX_DESC_PTR_TBL, 629 .rxd_ptr_tbl_base = FR_BZ_RX_DESC_PTR_TBL,
613 .buf_tbl_base = FR_BZ_BUF_FULL_TBL, 630 .buf_tbl_base = FR_BZ_BUF_FULL_TBL,
diff --git a/drivers/net/sunhme.c b/drivers/net/sunhme.c
index eb4f59fb01e9..bff2f7999ff0 100644
--- a/drivers/net/sunhme.c
+++ b/drivers/net/sunhme.c
@@ -3237,15 +3237,18 @@ static void happy_meal_pci_exit(void)
3237#endif 3237#endif
3238 3238
3239#ifdef CONFIG_SBUS 3239#ifdef CONFIG_SBUS
3240static const struct of_device_id hme_sbus_match[];
3240static int __devinit hme_sbus_probe(struct platform_device *op) 3241static int __devinit hme_sbus_probe(struct platform_device *op)
3241{ 3242{
3243 const struct of_device_id *match;
3242 struct device_node *dp = op->dev.of_node; 3244 struct device_node *dp = op->dev.of_node;
3243 const char *model = of_get_property(dp, "model", NULL); 3245 const char *model = of_get_property(dp, "model", NULL);
3244 int is_qfe; 3246 int is_qfe;
3245 3247
3246 if (!op->dev.of_match) 3248 match = of_match_device(hme_sbus_match, &op->dev);
3249 if (!match)
3247 return -EINVAL; 3250 return -EINVAL;
3248 is_qfe = (op->dev.of_match->data != NULL); 3251 is_qfe = (match->data != NULL);
3249 3252
3250 if (!is_qfe && model && !strcmp(model, "SUNW,sbus-qfe")) 3253 if (!is_qfe && model && !strcmp(model, "SUNW,sbus-qfe"))
3251 is_qfe = 1; 3254 is_qfe = 1;
diff --git a/drivers/net/vmxnet3/vmxnet3_ethtool.c b/drivers/net/vmxnet3/vmxnet3_ethtool.c
index 51f2ef142a5b..976467253d20 100644
--- a/drivers/net/vmxnet3/vmxnet3_ethtool.c
+++ b/drivers/net/vmxnet3/vmxnet3_ethtool.c
@@ -311,6 +311,9 @@ vmxnet3_set_flags(struct net_device *netdev, u32 data)
311 /* toggle the LRO feature*/ 311 /* toggle the LRO feature*/
312 netdev->features ^= NETIF_F_LRO; 312 netdev->features ^= NETIF_F_LRO;
313 313
314 /* Update private LRO flag */
315 adapter->lro = lro_requested;
316
314 /* update harware LRO capability accordingly */ 317 /* update harware LRO capability accordingly */
315 if (lro_requested) 318 if (lro_requested)
316 adapter->shared->devRead.misc.uptFeatures |= 319 adapter->shared->devRead.misc.uptFeatures |=
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index 17d04ff8d678..1482fa650833 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -2141,6 +2141,8 @@ static void ath9k_set_coverage_class(struct ieee80211_hw *hw, u8 coverage_class)
2141static void ath9k_flush(struct ieee80211_hw *hw, bool drop) 2141static void ath9k_flush(struct ieee80211_hw *hw, bool drop)
2142{ 2142{
2143 struct ath_softc *sc = hw->priv; 2143 struct ath_softc *sc = hw->priv;
2144 struct ath_hw *ah = sc->sc_ah;
2145 struct ath_common *common = ath9k_hw_common(ah);
2144 int timeout = 200; /* ms */ 2146 int timeout = 200; /* ms */
2145 int i, j; 2147 int i, j;
2146 2148
@@ -2149,6 +2151,12 @@ static void ath9k_flush(struct ieee80211_hw *hw, bool drop)
2149 2151
2150 cancel_delayed_work_sync(&sc->tx_complete_work); 2152 cancel_delayed_work_sync(&sc->tx_complete_work);
2151 2153
2154 if (sc->sc_flags & SC_OP_INVALID) {
2155 ath_dbg(common, ATH_DBG_ANY, "Device not present\n");
2156 mutex_unlock(&sc->mutex);
2157 return;
2158 }
2159
2152 if (drop) 2160 if (drop)
2153 timeout = 1; 2161 timeout = 1;
2154 2162
diff --git a/drivers/net/wireless/iwlegacy/iwl-core.c b/drivers/net/wireless/iwlegacy/iwl-core.c
index c1511b14b239..42db0fc8b921 100644
--- a/drivers/net/wireless/iwlegacy/iwl-core.c
+++ b/drivers/net/wireless/iwlegacy/iwl-core.c
@@ -2155,6 +2155,13 @@ int iwl_legacy_mac_config(struct ieee80211_hw *hw, u32 changed)
2155 goto set_ch_out; 2155 goto set_ch_out;
2156 } 2156 }
2157 2157
2158 if (priv->iw_mode == NL80211_IFTYPE_ADHOC &&
2159 !iwl_legacy_is_channel_ibss(ch_info)) {
2160 IWL_DEBUG_MAC80211(priv, "leave - not IBSS channel\n");
2161 ret = -EINVAL;
2162 goto set_ch_out;
2163 }
2164
2158 spin_lock_irqsave(&priv->lock, flags); 2165 spin_lock_irqsave(&priv->lock, flags);
2159 2166
2160 for_each_context(priv, ctx) { 2167 for_each_context(priv, ctx) {
diff --git a/drivers/net/wireless/iwlegacy/iwl-dev.h b/drivers/net/wireless/iwlegacy/iwl-dev.h
index 9ee849d669f3..f43ac1eb9014 100644
--- a/drivers/net/wireless/iwlegacy/iwl-dev.h
+++ b/drivers/net/wireless/iwlegacy/iwl-dev.h
@@ -1411,6 +1411,12 @@ iwl_legacy_is_channel_passive(const struct iwl_channel_info *ch)
1411 return (!(ch->flags & EEPROM_CHANNEL_ACTIVE)) ? 1 : 0; 1411 return (!(ch->flags & EEPROM_CHANNEL_ACTIVE)) ? 1 : 0;
1412} 1412}
1413 1413
1414static inline int
1415iwl_legacy_is_channel_ibss(const struct iwl_channel_info *ch)
1416{
1417 return (ch->flags & EEPROM_CHANNEL_IBSS) ? 1 : 0;
1418}
1419
1414static inline void 1420static inline void
1415__iwl_legacy_free_pages(struct iwl_priv *priv, struct page *page) 1421__iwl_legacy_free_pages(struct iwl_priv *priv, struct page *page)
1416{ 1422{
diff --git a/drivers/net/wireless/libertas/cmd.c b/drivers/net/wireless/libertas/cmd.c
index 7e8a658b7670..f3ac62431a30 100644
--- a/drivers/net/wireless/libertas/cmd.c
+++ b/drivers/net/wireless/libertas/cmd.c
@@ -1339,8 +1339,8 @@ int lbs_execute_next_command(struct lbs_private *priv)
1339 cpu_to_le16(PS_MODE_ACTION_EXIT_PS)) { 1339 cpu_to_le16(PS_MODE_ACTION_EXIT_PS)) {
1340 lbs_deb_host( 1340 lbs_deb_host(
1341 "EXEC_NEXT_CMD: ignore ENTER_PS cmd\n"); 1341 "EXEC_NEXT_CMD: ignore ENTER_PS cmd\n");
1342 list_del(&cmdnode->list);
1343 spin_lock_irqsave(&priv->driver_lock, flags); 1342 spin_lock_irqsave(&priv->driver_lock, flags);
1343 list_del(&cmdnode->list);
1344 lbs_complete_command(priv, cmdnode, 0); 1344 lbs_complete_command(priv, cmdnode, 0);
1345 spin_unlock_irqrestore(&priv->driver_lock, flags); 1345 spin_unlock_irqrestore(&priv->driver_lock, flags);
1346 1346
@@ -1352,8 +1352,8 @@ int lbs_execute_next_command(struct lbs_private *priv)
1352 (priv->psstate == PS_STATE_PRE_SLEEP)) { 1352 (priv->psstate == PS_STATE_PRE_SLEEP)) {
1353 lbs_deb_host( 1353 lbs_deb_host(
1354 "EXEC_NEXT_CMD: ignore EXIT_PS cmd in sleep\n"); 1354 "EXEC_NEXT_CMD: ignore EXIT_PS cmd in sleep\n");
1355 list_del(&cmdnode->list);
1356 spin_lock_irqsave(&priv->driver_lock, flags); 1355 spin_lock_irqsave(&priv->driver_lock, flags);
1356 list_del(&cmdnode->list);
1357 lbs_complete_command(priv, cmdnode, 0); 1357 lbs_complete_command(priv, cmdnode, 0);
1358 spin_unlock_irqrestore(&priv->driver_lock, flags); 1358 spin_unlock_irqrestore(&priv->driver_lock, flags);
1359 priv->needtowakeup = 1; 1359 priv->needtowakeup = 1;
@@ -1366,7 +1366,9 @@ int lbs_execute_next_command(struct lbs_private *priv)
1366 "EXEC_NEXT_CMD: sending EXIT_PS\n"); 1366 "EXEC_NEXT_CMD: sending EXIT_PS\n");
1367 } 1367 }
1368 } 1368 }
1369 spin_lock_irqsave(&priv->driver_lock, flags);
1369 list_del(&cmdnode->list); 1370 list_del(&cmdnode->list);
1371 spin_unlock_irqrestore(&priv->driver_lock, flags);
1370 lbs_deb_host("EXEC_NEXT_CMD: sending command 0x%04x\n", 1372 lbs_deb_host("EXEC_NEXT_CMD: sending command 0x%04x\n",
1371 le16_to_cpu(cmd->command)); 1373 le16_to_cpu(cmd->command));
1372 lbs_submit_command(priv, cmdnode); 1374 lbs_submit_command(priv, cmdnode);
diff --git a/drivers/net/zorro8390.c b/drivers/net/zorro8390.c
index b78a38d9172a..8c7c522a056a 100644
--- a/drivers/net/zorro8390.c
+++ b/drivers/net/zorro8390.c
@@ -126,7 +126,7 @@ static int __devinit zorro8390_init_one(struct zorro_dev *z,
126 126
127 board = z->resource.start; 127 board = z->resource.start;
128 ioaddr = board+cards[i].offset; 128 ioaddr = board+cards[i].offset;
129 dev = alloc_ei_netdev(); 129 dev = ____alloc_ei_netdev(0);
130 if (!dev) 130 if (!dev)
131 return -ENOMEM; 131 return -ENOMEM;
132 if (!request_mem_region(ioaddr, NE_IO_EXTENT*2, DRV_NAME)) { 132 if (!request_mem_region(ioaddr, NE_IO_EXTENT*2, DRV_NAME)) {
@@ -146,15 +146,15 @@ static int __devinit zorro8390_init_one(struct zorro_dev *z,
146static const struct net_device_ops zorro8390_netdev_ops = { 146static const struct net_device_ops zorro8390_netdev_ops = {
147 .ndo_open = zorro8390_open, 147 .ndo_open = zorro8390_open,
148 .ndo_stop = zorro8390_close, 148 .ndo_stop = zorro8390_close,
149 .ndo_start_xmit = ei_start_xmit, 149 .ndo_start_xmit = __ei_start_xmit,
150 .ndo_tx_timeout = ei_tx_timeout, 150 .ndo_tx_timeout = __ei_tx_timeout,
151 .ndo_get_stats = ei_get_stats, 151 .ndo_get_stats = __ei_get_stats,
152 .ndo_set_multicast_list = ei_set_multicast_list, 152 .ndo_set_multicast_list = __ei_set_multicast_list,
153 .ndo_validate_addr = eth_validate_addr, 153 .ndo_validate_addr = eth_validate_addr,
154 .ndo_set_mac_address = eth_mac_addr, 154 .ndo_set_mac_address = eth_mac_addr,
155 .ndo_change_mtu = eth_change_mtu, 155 .ndo_change_mtu = eth_change_mtu,
156#ifdef CONFIG_NET_POLL_CONTROLLER 156#ifdef CONFIG_NET_POLL_CONTROLLER
157 .ndo_poll_controller = ei_poll, 157 .ndo_poll_controller = __ei_poll,
158#endif 158#endif
159}; 159};
160 160
diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c
index ebf51ad1b714..a806cb321d2e 100644
--- a/drivers/pci/setup-bus.c
+++ b/drivers/pci/setup-bus.c
@@ -579,7 +579,7 @@ static void pbus_size_io(struct pci_bus *bus, resource_size_t min_size,
579 } 579 }
580 size0 = calculate_iosize(size, min_size, size1, 580 size0 = calculate_iosize(size, min_size, size1,
581 resource_size(b_res), 4096); 581 resource_size(b_res), 4096);
582 size1 = !add_size? size0: 582 size1 = (!add_head || (add_head && !add_size)) ? size0 :
583 calculate_iosize(size, min_size+add_size, size1, 583 calculate_iosize(size, min_size+add_size, size1,
584 resource_size(b_res), 4096); 584 resource_size(b_res), 4096);
585 if (!size0 && !size1) { 585 if (!size0 && !size1) {
@@ -677,7 +677,7 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask,
677 align += aligns[order]; 677 align += aligns[order];
678 } 678 }
679 size0 = calculate_memsize(size, min_size, 0, resource_size(b_res), min_align); 679 size0 = calculate_memsize(size, min_size, 0, resource_size(b_res), min_align);
680 size1 = !add_size ? size : 680 size1 = (!add_head || (add_head && !add_size)) ? size0 :
681 calculate_memsize(size, min_size+add_size, 0, 681 calculate_memsize(size, min_size+add_size, 0,
682 resource_size(b_res), min_align); 682 resource_size(b_res), min_align);
683 if (!size0 && !size1) { 683 if (!size0 && !size1) {
diff --git a/drivers/rapidio/switches/idt_gen2.c b/drivers/rapidio/switches/idt_gen2.c
index ac2701b22e71..043ee3136e40 100644
--- a/drivers/rapidio/switches/idt_gen2.c
+++ b/drivers/rapidio/switches/idt_gen2.c
@@ -95,6 +95,9 @@ idtg2_route_add_entry(struct rio_mport *mport, u16 destid, u8 hopcount,
95 else 95 else
96 table++; 96 table++;
97 97
98 if (route_port == RIO_INVALID_ROUTE)
99 route_port = IDT_DEFAULT_ROUTE;
100
98 rio_mport_write_config_32(mport, destid, hopcount, 101 rio_mport_write_config_32(mport, destid, hopcount,
99 LOCAL_RTE_CONF_DESTID_SEL, table); 102 LOCAL_RTE_CONF_DESTID_SEL, table);
100 103
@@ -411,6 +414,12 @@ static int idtg2_switch_init(struct rio_dev *rdev, int do_enum)
411 rdev->rswitch->em_handle = idtg2_em_handler; 414 rdev->rswitch->em_handle = idtg2_em_handler;
412 rdev->rswitch->sw_sysfs = idtg2_sysfs; 415 rdev->rswitch->sw_sysfs = idtg2_sysfs;
413 416
417 if (do_enum) {
418 /* Ensure that default routing is disabled on startup */
419 rio_write_config_32(rdev,
420 RIO_STD_RTE_DEFAULT_PORT, IDT_NO_ROUTE);
421 }
422
414 return 0; 423 return 0;
415} 424}
416 425
diff --git a/drivers/rapidio/switches/idtcps.c b/drivers/rapidio/switches/idtcps.c
index 3a971077e7bf..d06ee2d44b44 100644
--- a/drivers/rapidio/switches/idtcps.c
+++ b/drivers/rapidio/switches/idtcps.c
@@ -26,6 +26,9 @@ idtcps_route_add_entry(struct rio_mport *mport, u16 destid, u8 hopcount,
26{ 26{
27 u32 result; 27 u32 result;
28 28
29 if (route_port == RIO_INVALID_ROUTE)
30 route_port = CPS_DEFAULT_ROUTE;
31
29 if (table == RIO_GLOBAL_TABLE) { 32 if (table == RIO_GLOBAL_TABLE) {
30 rio_mport_write_config_32(mport, destid, hopcount, 33 rio_mport_write_config_32(mport, destid, hopcount,
31 RIO_STD_RTE_CONF_DESTID_SEL_CSR, route_destid); 34 RIO_STD_RTE_CONF_DESTID_SEL_CSR, route_destid);
@@ -130,6 +133,9 @@ static int idtcps_switch_init(struct rio_dev *rdev, int do_enum)
130 /* set TVAL = ~50us */ 133 /* set TVAL = ~50us */
131 rio_write_config_32(rdev, 134 rio_write_config_32(rdev,
132 rdev->phys_efptr + RIO_PORT_LINKTO_CTL_CSR, 0x8e << 8); 135 rdev->phys_efptr + RIO_PORT_LINKTO_CTL_CSR, 0x8e << 8);
136 /* Ensure that default routing is disabled on startup */
137 rio_write_config_32(rdev,
138 RIO_STD_RTE_DEFAULT_PORT, CPS_NO_ROUTE);
133 } 139 }
134 140
135 return 0; 141 return 0;
diff --git a/drivers/rapidio/switches/tsi57x.c b/drivers/rapidio/switches/tsi57x.c
index 1a62934bfebc..db8b8028988d 100644
--- a/drivers/rapidio/switches/tsi57x.c
+++ b/drivers/rapidio/switches/tsi57x.c
@@ -303,6 +303,12 @@ static int tsi57x_switch_init(struct rio_dev *rdev, int do_enum)
303 rdev->rswitch->em_init = tsi57x_em_init; 303 rdev->rswitch->em_init = tsi57x_em_init;
304 rdev->rswitch->em_handle = tsi57x_em_handler; 304 rdev->rswitch->em_handle = tsi57x_em_handler;
305 305
306 if (do_enum) {
307 /* Ensure that default routing is disabled on startup */
308 rio_write_config_32(rdev, RIO_STD_RTE_DEFAULT_PORT,
309 RIO_INVALID_ROUTE);
310 }
311
306 return 0; 312 return 0;
307} 313}
308 314
diff --git a/drivers/rtc/rtc-davinci.c b/drivers/rtc/rtc-davinci.c
index 8d46838dff8a..755e1fe914af 100644
--- a/drivers/rtc/rtc-davinci.c
+++ b/drivers/rtc/rtc-davinci.c
@@ -524,6 +524,8 @@ static int __init davinci_rtc_probe(struct platform_device *pdev)
524 goto fail2; 524 goto fail2;
525 } 525 }
526 526
527 platform_set_drvdata(pdev, davinci_rtc);
528
527 davinci_rtc->rtc = rtc_device_register(pdev->name, &pdev->dev, 529 davinci_rtc->rtc = rtc_device_register(pdev->name, &pdev->dev,
528 &davinci_rtc_ops, THIS_MODULE); 530 &davinci_rtc_ops, THIS_MODULE);
529 if (IS_ERR(davinci_rtc->rtc)) { 531 if (IS_ERR(davinci_rtc->rtc)) {
@@ -553,8 +555,6 @@ static int __init davinci_rtc_probe(struct platform_device *pdev)
553 555
554 rtcss_write(davinci_rtc, PRTCSS_RTC_CCTRL_CAEN, PRTCSS_RTC_CCTRL); 556 rtcss_write(davinci_rtc, PRTCSS_RTC_CCTRL_CAEN, PRTCSS_RTC_CCTRL);
555 557
556 platform_set_drvdata(pdev, davinci_rtc);
557
558 device_init_wakeup(&pdev->dev, 0); 558 device_init_wakeup(&pdev->dev, 0);
559 559
560 return 0; 560 return 0;
@@ -562,6 +562,7 @@ static int __init davinci_rtc_probe(struct platform_device *pdev)
562fail4: 562fail4:
563 rtc_device_unregister(davinci_rtc->rtc); 563 rtc_device_unregister(davinci_rtc->rtc);
564fail3: 564fail3:
565 platform_set_drvdata(pdev, NULL);
565 iounmap(davinci_rtc->base); 566 iounmap(davinci_rtc->base);
566fail2: 567fail2:
567 release_mem_region(davinci_rtc->pbase, davinci_rtc->base_size); 568 release_mem_region(davinci_rtc->pbase, davinci_rtc->base_size);
diff --git a/drivers/rtc/rtc-ds1286.c b/drivers/rtc/rtc-ds1286.c
index 60ce69600828..47e681df31e2 100644
--- a/drivers/rtc/rtc-ds1286.c
+++ b/drivers/rtc/rtc-ds1286.c
@@ -355,6 +355,7 @@ static int __devinit ds1286_probe(struct platform_device *pdev)
355 goto out; 355 goto out;
356 } 356 }
357 spin_lock_init(&priv->lock); 357 spin_lock_init(&priv->lock);
358 platform_set_drvdata(pdev, priv);
358 rtc = rtc_device_register("ds1286", &pdev->dev, 359 rtc = rtc_device_register("ds1286", &pdev->dev,
359 &ds1286_ops, THIS_MODULE); 360 &ds1286_ops, THIS_MODULE);
360 if (IS_ERR(rtc)) { 361 if (IS_ERR(rtc)) {
@@ -362,7 +363,6 @@ static int __devinit ds1286_probe(struct platform_device *pdev)
362 goto out; 363 goto out;
363 } 364 }
364 priv->rtc = rtc; 365 priv->rtc = rtc;
365 platform_set_drvdata(pdev, priv);
366 return 0; 366 return 0;
367 367
368out: 368out:
diff --git a/drivers/rtc/rtc-ep93xx.c b/drivers/rtc/rtc-ep93xx.c
index 11ae64dcbf3c..335551d333b2 100644
--- a/drivers/rtc/rtc-ep93xx.c
+++ b/drivers/rtc/rtc-ep93xx.c
@@ -151,6 +151,7 @@ static int __init ep93xx_rtc_probe(struct platform_device *pdev)
151 return -ENXIO; 151 return -ENXIO;
152 152
153 pdev->dev.platform_data = ep93xx_rtc; 153 pdev->dev.platform_data = ep93xx_rtc;
154 platform_set_drvdata(pdev, rtc);
154 155
155 rtc = rtc_device_register(pdev->name, 156 rtc = rtc_device_register(pdev->name,
156 &pdev->dev, &ep93xx_rtc_ops, THIS_MODULE); 157 &pdev->dev, &ep93xx_rtc_ops, THIS_MODULE);
@@ -159,8 +160,6 @@ static int __init ep93xx_rtc_probe(struct platform_device *pdev)
159 goto exit; 160 goto exit;
160 } 161 }
161 162
162 platform_set_drvdata(pdev, rtc);
163
164 err = sysfs_create_group(&pdev->dev.kobj, &ep93xx_rtc_sysfs_files); 163 err = sysfs_create_group(&pdev->dev.kobj, &ep93xx_rtc_sysfs_files);
165 if (err) 164 if (err)
166 goto fail; 165 goto fail;
@@ -168,9 +167,9 @@ static int __init ep93xx_rtc_probe(struct platform_device *pdev)
168 return 0; 167 return 0;
169 168
170fail: 169fail:
171 platform_set_drvdata(pdev, NULL);
172 rtc_device_unregister(rtc); 170 rtc_device_unregister(rtc);
173exit: 171exit:
172 platform_set_drvdata(pdev, NULL);
174 pdev->dev.platform_data = NULL; 173 pdev->dev.platform_data = NULL;
175 return err; 174 return err;
176} 175}
diff --git a/drivers/rtc/rtc-m41t80.c b/drivers/rtc/rtc-m41t80.c
index 69fe664a2228..eda128fc1d38 100644
--- a/drivers/rtc/rtc-m41t80.c
+++ b/drivers/rtc/rtc-m41t80.c
@@ -783,6 +783,9 @@ static int m41t80_probe(struct i2c_client *client,
783 goto exit; 783 goto exit;
784 } 784 }
785 785
786 clientdata->features = id->driver_data;
787 i2c_set_clientdata(client, clientdata);
788
786 rtc = rtc_device_register(client->name, &client->dev, 789 rtc = rtc_device_register(client->name, &client->dev,
787 &m41t80_rtc_ops, THIS_MODULE); 790 &m41t80_rtc_ops, THIS_MODULE);
788 if (IS_ERR(rtc)) { 791 if (IS_ERR(rtc)) {
@@ -792,8 +795,6 @@ static int m41t80_probe(struct i2c_client *client,
792 } 795 }
793 796
794 clientdata->rtc = rtc; 797 clientdata->rtc = rtc;
795 clientdata->features = id->driver_data;
796 i2c_set_clientdata(client, clientdata);
797 798
798 /* Make sure HT (Halt Update) bit is cleared */ 799 /* Make sure HT (Halt Update) bit is cleared */
799 rc = i2c_smbus_read_byte_data(client, M41T80_REG_ALARM_HOUR); 800 rc = i2c_smbus_read_byte_data(client, M41T80_REG_ALARM_HOUR);
diff --git a/drivers/rtc/rtc-max8925.c b/drivers/rtc/rtc-max8925.c
index 20494b5edc3c..3bc046f427e0 100644
--- a/drivers/rtc/rtc-max8925.c
+++ b/drivers/rtc/rtc-max8925.c
@@ -258,6 +258,8 @@ static int __devinit max8925_rtc_probe(struct platform_device *pdev)
258 } 258 }
259 259
260 dev_set_drvdata(&pdev->dev, info); 260 dev_set_drvdata(&pdev->dev, info);
261 /* XXX - isn't this redundant? */
262 platform_set_drvdata(pdev, info);
261 263
262 info->rtc_dev = rtc_device_register("max8925-rtc", &pdev->dev, 264 info->rtc_dev = rtc_device_register("max8925-rtc", &pdev->dev,
263 &max8925_rtc_ops, THIS_MODULE); 265 &max8925_rtc_ops, THIS_MODULE);
@@ -267,10 +269,9 @@ static int __devinit max8925_rtc_probe(struct platform_device *pdev)
267 goto out_rtc; 269 goto out_rtc;
268 } 270 }
269 271
270 platform_set_drvdata(pdev, info);
271
272 return 0; 272 return 0;
273out_rtc: 273out_rtc:
274 platform_set_drvdata(pdev, NULL);
274 free_irq(chip->irq_base + MAX8925_IRQ_RTC_ALARM0, info); 275 free_irq(chip->irq_base + MAX8925_IRQ_RTC_ALARM0, info);
275out_irq: 276out_irq:
276 kfree(info); 277 kfree(info);
diff --git a/drivers/rtc/rtc-max8998.c b/drivers/rtc/rtc-max8998.c
index 3f7bc6b9fefa..2e48aa604273 100644
--- a/drivers/rtc/rtc-max8998.c
+++ b/drivers/rtc/rtc-max8998.c
@@ -265,6 +265,8 @@ static int __devinit max8998_rtc_probe(struct platform_device *pdev)
265 info->rtc = max8998->rtc; 265 info->rtc = max8998->rtc;
266 info->irq = max8998->irq_base + MAX8998_IRQ_ALARM0; 266 info->irq = max8998->irq_base + MAX8998_IRQ_ALARM0;
267 267
268 platform_set_drvdata(pdev, info);
269
268 info->rtc_dev = rtc_device_register("max8998-rtc", &pdev->dev, 270 info->rtc_dev = rtc_device_register("max8998-rtc", &pdev->dev,
269 &max8998_rtc_ops, THIS_MODULE); 271 &max8998_rtc_ops, THIS_MODULE);
270 272
@@ -274,8 +276,6 @@ static int __devinit max8998_rtc_probe(struct platform_device *pdev)
274 goto out_rtc; 276 goto out_rtc;
275 } 277 }
276 278
277 platform_set_drvdata(pdev, info);
278
279 ret = request_threaded_irq(info->irq, NULL, max8998_rtc_alarm_irq, 0, 279 ret = request_threaded_irq(info->irq, NULL, max8998_rtc_alarm_irq, 0,
280 "rtc-alarm0", info); 280 "rtc-alarm0", info);
281 281
@@ -293,6 +293,7 @@ static int __devinit max8998_rtc_probe(struct platform_device *pdev)
293 return 0; 293 return 0;
294 294
295out_rtc: 295out_rtc:
296 platform_set_drvdata(pdev, NULL);
296 kfree(info); 297 kfree(info);
297 return ret; 298 return ret;
298} 299}
diff --git a/drivers/rtc/rtc-mc13xxx.c b/drivers/rtc/rtc-mc13xxx.c
index c5ac03793e79..a1a278bc340d 100644
--- a/drivers/rtc/rtc-mc13xxx.c
+++ b/drivers/rtc/rtc-mc13xxx.c
@@ -349,11 +349,15 @@ static int __devinit mc13xxx_rtc_probe(struct platform_device *pdev)
349 if (ret) 349 if (ret)
350 goto err_alarm_irq_request; 350 goto err_alarm_irq_request;
351 351
352 mc13xxx_unlock(mc13xxx);
353
352 priv->rtc = rtc_device_register(pdev->name, 354 priv->rtc = rtc_device_register(pdev->name,
353 &pdev->dev, &mc13xxx_rtc_ops, THIS_MODULE); 355 &pdev->dev, &mc13xxx_rtc_ops, THIS_MODULE);
354 if (IS_ERR(priv->rtc)) { 356 if (IS_ERR(priv->rtc)) {
355 ret = PTR_ERR(priv->rtc); 357 ret = PTR_ERR(priv->rtc);
356 358
359 mc13xxx_lock(mc13xxx);
360
357 mc13xxx_irq_free(mc13xxx, MC13XXX_IRQ_TODA, priv); 361 mc13xxx_irq_free(mc13xxx, MC13XXX_IRQ_TODA, priv);
358err_alarm_irq_request: 362err_alarm_irq_request:
359 363
@@ -365,12 +369,12 @@ err_reset_irq_status:
365 mc13xxx_irq_free(mc13xxx, MC13XXX_IRQ_RTCRST, priv); 369 mc13xxx_irq_free(mc13xxx, MC13XXX_IRQ_RTCRST, priv);
366err_reset_irq_request: 370err_reset_irq_request:
367 371
372 mc13xxx_unlock(mc13xxx);
373
368 platform_set_drvdata(pdev, NULL); 374 platform_set_drvdata(pdev, NULL);
369 kfree(priv); 375 kfree(priv);
370 } 376 }
371 377
372 mc13xxx_unlock(mc13xxx);
373
374 return ret; 378 return ret;
375} 379}
376 380
diff --git a/drivers/rtc/rtc-msm6242.c b/drivers/rtc/rtc-msm6242.c
index 67820626e18f..fcb113c11122 100644
--- a/drivers/rtc/rtc-msm6242.c
+++ b/drivers/rtc/rtc-msm6242.c
@@ -214,6 +214,7 @@ static int __init msm6242_rtc_probe(struct platform_device *dev)
214 error = -ENOMEM; 214 error = -ENOMEM;
215 goto out_free_priv; 215 goto out_free_priv;
216 } 216 }
217 platform_set_drvdata(dev, priv);
217 218
218 rtc = rtc_device_register("rtc-msm6242", &dev->dev, &msm6242_rtc_ops, 219 rtc = rtc_device_register("rtc-msm6242", &dev->dev, &msm6242_rtc_ops,
219 THIS_MODULE); 220 THIS_MODULE);
@@ -223,10 +224,10 @@ static int __init msm6242_rtc_probe(struct platform_device *dev)
223 } 224 }
224 225
225 priv->rtc = rtc; 226 priv->rtc = rtc;
226 platform_set_drvdata(dev, priv);
227 return 0; 227 return 0;
228 228
229out_unmap: 229out_unmap:
230 platform_set_drvdata(dev, NULL);
230 iounmap(priv->regs); 231 iounmap(priv->regs);
231out_free_priv: 232out_free_priv:
232 kfree(priv); 233 kfree(priv);
diff --git a/drivers/rtc/rtc-mxc.c b/drivers/rtc/rtc-mxc.c
index 826ab64a8fa9..d814417bee8c 100644
--- a/drivers/rtc/rtc-mxc.c
+++ b/drivers/rtc/rtc-mxc.c
@@ -418,14 +418,6 @@ static int __init mxc_rtc_probe(struct platform_device *pdev)
418 goto exit_put_clk; 418 goto exit_put_clk;
419 } 419 }
420 420
421 rtc = rtc_device_register(pdev->name, &pdev->dev, &mxc_rtc_ops,
422 THIS_MODULE);
423 if (IS_ERR(rtc)) {
424 ret = PTR_ERR(rtc);
425 goto exit_put_clk;
426 }
427
428 pdata->rtc = rtc;
429 platform_set_drvdata(pdev, pdata); 421 platform_set_drvdata(pdev, pdata);
430 422
431 /* Configure and enable the RTC */ 423 /* Configure and enable the RTC */
@@ -438,8 +430,19 @@ static int __init mxc_rtc_probe(struct platform_device *pdev)
438 pdata->irq = -1; 430 pdata->irq = -1;
439 } 431 }
440 432
433 rtc = rtc_device_register(pdev->name, &pdev->dev, &mxc_rtc_ops,
434 THIS_MODULE);
435 if (IS_ERR(rtc)) {
436 ret = PTR_ERR(rtc);
437 goto exit_clr_drvdata;
438 }
439
440 pdata->rtc = rtc;
441
441 return 0; 442 return 0;
442 443
444exit_clr_drvdata:
445 platform_set_drvdata(pdev, NULL);
443exit_put_clk: 446exit_put_clk:
444 clk_disable(pdata->clk); 447 clk_disable(pdata->clk);
445 clk_put(pdata->clk); 448 clk_put(pdata->clk);
diff --git a/drivers/rtc/rtc-pcap.c b/drivers/rtc/rtc-pcap.c
index a633abc42896..cd4f198cc2ef 100644
--- a/drivers/rtc/rtc-pcap.c
+++ b/drivers/rtc/rtc-pcap.c
@@ -151,6 +151,8 @@ static int __devinit pcap_rtc_probe(struct platform_device *pdev)
151 151
152 pcap_rtc->pcap = dev_get_drvdata(pdev->dev.parent); 152 pcap_rtc->pcap = dev_get_drvdata(pdev->dev.parent);
153 153
154 platform_set_drvdata(pdev, pcap_rtc);
155
154 pcap_rtc->rtc = rtc_device_register("pcap", &pdev->dev, 156 pcap_rtc->rtc = rtc_device_register("pcap", &pdev->dev,
155 &pcap_rtc_ops, THIS_MODULE); 157 &pcap_rtc_ops, THIS_MODULE);
156 if (IS_ERR(pcap_rtc->rtc)) { 158 if (IS_ERR(pcap_rtc->rtc)) {
@@ -158,7 +160,6 @@ static int __devinit pcap_rtc_probe(struct platform_device *pdev)
158 goto fail_rtc; 160 goto fail_rtc;
159 } 161 }
160 162
161 platform_set_drvdata(pdev, pcap_rtc);
162 163
163 timer_irq = pcap_to_irq(pcap_rtc->pcap, PCAP_IRQ_1HZ); 164 timer_irq = pcap_to_irq(pcap_rtc->pcap, PCAP_IRQ_1HZ);
164 alarm_irq = pcap_to_irq(pcap_rtc->pcap, PCAP_IRQ_TODA); 165 alarm_irq = pcap_to_irq(pcap_rtc->pcap, PCAP_IRQ_TODA);
@@ -177,6 +178,7 @@ fail_alarm:
177fail_timer: 178fail_timer:
178 rtc_device_unregister(pcap_rtc->rtc); 179 rtc_device_unregister(pcap_rtc->rtc);
179fail_rtc: 180fail_rtc:
181 platform_set_drvdata(pdev, NULL);
180 kfree(pcap_rtc); 182 kfree(pcap_rtc);
181 return err; 183 return err;
182} 184}
diff --git a/drivers/rtc/rtc-rp5c01.c b/drivers/rtc/rtc-rp5c01.c
index 694da39b6dd2..359da6d020b9 100644
--- a/drivers/rtc/rtc-rp5c01.c
+++ b/drivers/rtc/rtc-rp5c01.c
@@ -249,15 +249,15 @@ static int __init rp5c01_rtc_probe(struct platform_device *dev)
249 249
250 spin_lock_init(&priv->lock); 250 spin_lock_init(&priv->lock);
251 251
252 platform_set_drvdata(dev, priv);
253
252 rtc = rtc_device_register("rtc-rp5c01", &dev->dev, &rp5c01_rtc_ops, 254 rtc = rtc_device_register("rtc-rp5c01", &dev->dev, &rp5c01_rtc_ops,
253 THIS_MODULE); 255 THIS_MODULE);
254 if (IS_ERR(rtc)) { 256 if (IS_ERR(rtc)) {
255 error = PTR_ERR(rtc); 257 error = PTR_ERR(rtc);
256 goto out_unmap; 258 goto out_unmap;
257 } 259 }
258
259 priv->rtc = rtc; 260 priv->rtc = rtc;
260 platform_set_drvdata(dev, priv);
261 261
262 error = sysfs_create_bin_file(&dev->dev.kobj, &priv->nvram_attr); 262 error = sysfs_create_bin_file(&dev->dev.kobj, &priv->nvram_attr);
263 if (error) 263 if (error)
@@ -268,6 +268,7 @@ static int __init rp5c01_rtc_probe(struct platform_device *dev)
268out_unregister: 268out_unregister:
269 rtc_device_unregister(rtc); 269 rtc_device_unregister(rtc);
270out_unmap: 270out_unmap:
271 platform_set_drvdata(dev, NULL);
271 iounmap(priv->regs); 272 iounmap(priv->regs);
272out_free_priv: 273out_free_priv:
273 kfree(priv); 274 kfree(priv);
diff --git a/drivers/s390/char/tape_block.c b/drivers/s390/char/tape_block.c
index 83cea9a55e2f..1b3924c2fffd 100644
--- a/drivers/s390/char/tape_block.c
+++ b/drivers/s390/char/tape_block.c
@@ -236,7 +236,6 @@ tapeblock_setup_device(struct tape_device * device)
236 disk->major = tapeblock_major; 236 disk->major = tapeblock_major;
237 disk->first_minor = device->first_minor; 237 disk->first_minor = device->first_minor;
238 disk->fops = &tapeblock_fops; 238 disk->fops = &tapeblock_fops;
239 disk->events = DISK_EVENT_MEDIA_CHANGE;
240 disk->private_data = tape_get_device(device); 239 disk->private_data = tape_get_device(device);
241 disk->queue = blkdat->request_queue; 240 disk->queue = blkdat->request_queue;
242 set_capacity(disk, 0); 241 set_capacity(disk, 0);
diff --git a/drivers/scsi/qlogicpti.c b/drivers/scsi/qlogicpti.c
index e2d45c91b8e8..9689d41c7888 100644
--- a/drivers/scsi/qlogicpti.c
+++ b/drivers/scsi/qlogicpti.c
@@ -1292,8 +1292,10 @@ static struct scsi_host_template qpti_template = {
1292 .use_clustering = ENABLE_CLUSTERING, 1292 .use_clustering = ENABLE_CLUSTERING,
1293}; 1293};
1294 1294
1295static const struct of_device_id qpti_match[];
1295static int __devinit qpti_sbus_probe(struct platform_device *op) 1296static int __devinit qpti_sbus_probe(struct platform_device *op)
1296{ 1297{
1298 const struct of_device_id *match;
1297 struct scsi_host_template *tpnt; 1299 struct scsi_host_template *tpnt;
1298 struct device_node *dp = op->dev.of_node; 1300 struct device_node *dp = op->dev.of_node;
1299 struct Scsi_Host *host; 1301 struct Scsi_Host *host;
@@ -1301,9 +1303,10 @@ static int __devinit qpti_sbus_probe(struct platform_device *op)
1301 static int nqptis; 1303 static int nqptis;
1302 const char *fcode; 1304 const char *fcode;
1303 1305
1304 if (!op->dev.of_match) 1306 match = of_match_device(qpti_match, &op->dev);
1307 if (!match)
1305 return -EINVAL; 1308 return -EINVAL;
1306 tpnt = op->dev.of_match->data; 1309 tpnt = match->data;
1307 1310
1308 /* Sometimes Antares cards come up not completely 1311 /* Sometimes Antares cards come up not completely
1309 * setup, and we get a report of a zero IRQ. 1312 * setup, and we get a report of a zero IRQ.
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 0bac91e72370..ec1803a48723 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -74,8 +74,6 @@ struct kmem_cache *scsi_sdb_cache;
74 */ 74 */
75#define SCSI_QUEUE_DELAY 3 75#define SCSI_QUEUE_DELAY 3
76 76
77static void scsi_run_queue(struct request_queue *q);
78
79/* 77/*
80 * Function: scsi_unprep_request() 78 * Function: scsi_unprep_request()
81 * 79 *
@@ -161,7 +159,7 @@ static int __scsi_queue_insert(struct scsi_cmnd *cmd, int reason, int unbusy)
161 blk_requeue_request(q, cmd->request); 159 blk_requeue_request(q, cmd->request);
162 spin_unlock_irqrestore(q->queue_lock, flags); 160 spin_unlock_irqrestore(q->queue_lock, flags);
163 161
164 scsi_run_queue(q); 162 kblockd_schedule_work(q, &device->requeue_work);
165 163
166 return 0; 164 return 0;
167} 165}
@@ -438,7 +436,11 @@ static void scsi_run_queue(struct request_queue *q)
438 continue; 436 continue;
439 } 437 }
440 438
441 blk_run_queue_async(sdev->request_queue); 439 spin_unlock(shost->host_lock);
440 spin_lock(sdev->request_queue->queue_lock);
441 __blk_run_queue(sdev->request_queue);
442 spin_unlock(sdev->request_queue->queue_lock);
443 spin_lock(shost->host_lock);
442 } 444 }
443 /* put any unprocessed entries back */ 445 /* put any unprocessed entries back */
444 list_splice(&starved_list, &shost->starved_list); 446 list_splice(&starved_list, &shost->starved_list);
@@ -447,6 +449,16 @@ static void scsi_run_queue(struct request_queue *q)
447 blk_run_queue(q); 449 blk_run_queue(q);
448} 450}
449 451
452void scsi_requeue_run_queue(struct work_struct *work)
453{
454 struct scsi_device *sdev;
455 struct request_queue *q;
456
457 sdev = container_of(work, struct scsi_device, requeue_work);
458 q = sdev->request_queue;
459 scsi_run_queue(q);
460}
461
450/* 462/*
451 * Function: scsi_requeue_command() 463 * Function: scsi_requeue_command()
452 * 464 *
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index 087821fac8fe..58584dc0724a 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -242,6 +242,7 @@ static struct scsi_device *scsi_alloc_sdev(struct scsi_target *starget,
242 int display_failure_msg = 1, ret; 242 int display_failure_msg = 1, ret;
243 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); 243 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
244 extern void scsi_evt_thread(struct work_struct *work); 244 extern void scsi_evt_thread(struct work_struct *work);
245 extern void scsi_requeue_run_queue(struct work_struct *work);
245 246
246 sdev = kzalloc(sizeof(*sdev) + shost->transportt->device_size, 247 sdev = kzalloc(sizeof(*sdev) + shost->transportt->device_size,
247 GFP_ATOMIC); 248 GFP_ATOMIC);
@@ -264,6 +265,7 @@ static struct scsi_device *scsi_alloc_sdev(struct scsi_target *starget,
264 INIT_LIST_HEAD(&sdev->event_list); 265 INIT_LIST_HEAD(&sdev->event_list);
265 spin_lock_init(&sdev->list_lock); 266 spin_lock_init(&sdev->list_lock);
266 INIT_WORK(&sdev->event_work, scsi_evt_thread); 267 INIT_WORK(&sdev->event_work, scsi_evt_thread);
268 INIT_WORK(&sdev->requeue_work, scsi_requeue_run_queue);
267 269
268 sdev->sdev_gendev.parent = get_device(&starget->dev); 270 sdev->sdev_gendev.parent = get_device(&starget->dev);
269 sdev->sdev_target = starget; 271 sdev->sdev_target = starget;
diff --git a/drivers/tty/serial/of_serial.c b/drivers/tty/serial/of_serial.c
index 0e8eec516df4..c911b2419abb 100644
--- a/drivers/tty/serial/of_serial.c
+++ b/drivers/tty/serial/of_serial.c
@@ -80,14 +80,17 @@ static int __devinit of_platform_serial_setup(struct platform_device *ofdev,
80/* 80/*
81 * Try to register a serial port 81 * Try to register a serial port
82 */ 82 */
83static struct of_device_id of_platform_serial_table[];
83static int __devinit of_platform_serial_probe(struct platform_device *ofdev) 84static int __devinit of_platform_serial_probe(struct platform_device *ofdev)
84{ 85{
86 const struct of_device_id *match;
85 struct of_serial_info *info; 87 struct of_serial_info *info;
86 struct uart_port port; 88 struct uart_port port;
87 int port_type; 89 int port_type;
88 int ret; 90 int ret;
89 91
90 if (!ofdev->dev.of_match) 92 match = of_match_device(of_platform_serial_table, &ofdev->dev);
93 if (!match)
91 return -EINVAL; 94 return -EINVAL;
92 95
93 if (of_find_property(ofdev->dev.of_node, "used-by-rtas", NULL)) 96 if (of_find_property(ofdev->dev.of_node, "used-by-rtas", NULL))
@@ -97,7 +100,7 @@ static int __devinit of_platform_serial_probe(struct platform_device *ofdev)
97 if (info == NULL) 100 if (info == NULL)
98 return -ENOMEM; 101 return -ENOMEM;
99 102
100 port_type = (unsigned long)ofdev->dev.of_match->data; 103 port_type = (unsigned long)match->data;
101 ret = of_platform_serial_setup(ofdev, port_type, &port); 104 ret = of_platform_serial_setup(ofdev, port_type, &port);
102 if (ret) 105 if (ret)
103 goto out; 106 goto out;
diff --git a/drivers/usb/gadget/fsl_qe_udc.c b/drivers/usb/gadget/fsl_qe_udc.c
index 36613b37c504..3a68e09309f7 100644
--- a/drivers/usb/gadget/fsl_qe_udc.c
+++ b/drivers/usb/gadget/fsl_qe_udc.c
@@ -2539,15 +2539,18 @@ static void qe_udc_release(struct device *dev)
2539} 2539}
2540 2540
2541/* Driver probe functions */ 2541/* Driver probe functions */
2542static const struct of_device_id qe_udc_match[];
2542static int __devinit qe_udc_probe(struct platform_device *ofdev) 2543static int __devinit qe_udc_probe(struct platform_device *ofdev)
2543{ 2544{
2545 const struct of_device_id *match;
2544 struct device_node *np = ofdev->dev.of_node; 2546 struct device_node *np = ofdev->dev.of_node;
2545 struct qe_ep *ep; 2547 struct qe_ep *ep;
2546 unsigned int ret = 0; 2548 unsigned int ret = 0;
2547 unsigned int i; 2549 unsigned int i;
2548 const void *prop; 2550 const void *prop;
2549 2551
2550 if (!ofdev->dev.of_match) 2552 match = of_match_device(qe_udc_match, &ofdev->dev);
2553 if (!match)
2551 return -EINVAL; 2554 return -EINVAL;
2552 2555
2553 prop = of_get_property(np, "mode", NULL); 2556 prop = of_get_property(np, "mode", NULL);
@@ -2561,7 +2564,7 @@ static int __devinit qe_udc_probe(struct platform_device *ofdev)
2561 return -ENOMEM; 2564 return -ENOMEM;
2562 } 2565 }
2563 2566
2564 udc_controller->soc_type = (unsigned long)ofdev->dev.of_match->data; 2567 udc_controller->soc_type = (unsigned long)match->data;
2565 udc_controller->usb_regs = of_iomap(np, 0); 2568 udc_controller->usb_regs = of_iomap(np, 0);
2566 if (!udc_controller->usb_regs) { 2569 if (!udc_controller->usb_regs) {
2567 ret = -ENOMEM; 2570 ret = -ENOMEM;
diff --git a/drivers/video/fbmem.c b/drivers/video/fbmem.c
index e0c2284924b6..5aac00eb1830 100644
--- a/drivers/video/fbmem.c
+++ b/drivers/video/fbmem.c
@@ -42,9 +42,34 @@
42 42
43#define FBPIXMAPSIZE (1024 * 8) 43#define FBPIXMAPSIZE (1024 * 8)
44 44
45static DEFINE_MUTEX(registration_lock);
45struct fb_info *registered_fb[FB_MAX] __read_mostly; 46struct fb_info *registered_fb[FB_MAX] __read_mostly;
46int num_registered_fb __read_mostly; 47int num_registered_fb __read_mostly;
47 48
49static struct fb_info *get_fb_info(unsigned int idx)
50{
51 struct fb_info *fb_info;
52
53 if (idx >= FB_MAX)
54 return ERR_PTR(-ENODEV);
55
56 mutex_lock(&registration_lock);
57 fb_info = registered_fb[idx];
58 if (fb_info)
59 atomic_inc(&fb_info->count);
60 mutex_unlock(&registration_lock);
61
62 return fb_info;
63}
64
65static void put_fb_info(struct fb_info *fb_info)
66{
67 if (!atomic_dec_and_test(&fb_info->count))
68 return;
69 if (fb_info->fbops->fb_destroy)
70 fb_info->fbops->fb_destroy(fb_info);
71}
72
48int lock_fb_info(struct fb_info *info) 73int lock_fb_info(struct fb_info *info)
49{ 74{
50 mutex_lock(&info->lock); 75 mutex_lock(&info->lock);
@@ -647,6 +672,7 @@ int fb_show_logo(struct fb_info *info, int rotate) { return 0; }
647 672
648static void *fb_seq_start(struct seq_file *m, loff_t *pos) 673static void *fb_seq_start(struct seq_file *m, loff_t *pos)
649{ 674{
675 mutex_lock(&registration_lock);
650 return (*pos < FB_MAX) ? pos : NULL; 676 return (*pos < FB_MAX) ? pos : NULL;
651} 677}
652 678
@@ -658,6 +684,7 @@ static void *fb_seq_next(struct seq_file *m, void *v, loff_t *pos)
658 684
659static void fb_seq_stop(struct seq_file *m, void *v) 685static void fb_seq_stop(struct seq_file *m, void *v)
660{ 686{
687 mutex_unlock(&registration_lock);
661} 688}
662 689
663static int fb_seq_show(struct seq_file *m, void *v) 690static int fb_seq_show(struct seq_file *m, void *v)
@@ -690,13 +717,30 @@ static const struct file_operations fb_proc_fops = {
690 .release = seq_release, 717 .release = seq_release,
691}; 718};
692 719
693static ssize_t 720/*
694fb_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) 721 * We hold a reference to the fb_info in file->private_data,
722 * but if the current registered fb has changed, we don't
723 * actually want to use it.
724 *
725 * So look up the fb_info using the inode minor number,
726 * and just verify it against the reference we have.
727 */
728static struct fb_info *file_fb_info(struct file *file)
695{ 729{
696 unsigned long p = *ppos;
697 struct inode *inode = file->f_path.dentry->d_inode; 730 struct inode *inode = file->f_path.dentry->d_inode;
698 int fbidx = iminor(inode); 731 int fbidx = iminor(inode);
699 struct fb_info *info = registered_fb[fbidx]; 732 struct fb_info *info = registered_fb[fbidx];
733
734 if (info != file->private_data)
735 info = NULL;
736 return info;
737}
738
739static ssize_t
740fb_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
741{
742 unsigned long p = *ppos;
743 struct fb_info *info = file_fb_info(file);
700 u8 *buffer, *dst; 744 u8 *buffer, *dst;
701 u8 __iomem *src; 745 u8 __iomem *src;
702 int c, cnt = 0, err = 0; 746 int c, cnt = 0, err = 0;
@@ -761,9 +805,7 @@ static ssize_t
761fb_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) 805fb_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos)
762{ 806{
763 unsigned long p = *ppos; 807 unsigned long p = *ppos;
764 struct inode *inode = file->f_path.dentry->d_inode; 808 struct fb_info *info = file_fb_info(file);
765 int fbidx = iminor(inode);
766 struct fb_info *info = registered_fb[fbidx];
767 u8 *buffer, *src; 809 u8 *buffer, *src;
768 u8 __iomem *dst; 810 u8 __iomem *dst;
769 int c, cnt = 0, err = 0; 811 int c, cnt = 0, err = 0;
@@ -1141,10 +1183,10 @@ static long do_fb_ioctl(struct fb_info *info, unsigned int cmd,
1141 1183
1142static long fb_ioctl(struct file *file, unsigned int cmd, unsigned long arg) 1184static long fb_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
1143{ 1185{
1144 struct inode *inode = file->f_path.dentry->d_inode; 1186 struct fb_info *info = file_fb_info(file);
1145 int fbidx = iminor(inode);
1146 struct fb_info *info = registered_fb[fbidx];
1147 1187
1188 if (!info)
1189 return -ENODEV;
1148 return do_fb_ioctl(info, cmd, arg); 1190 return do_fb_ioctl(info, cmd, arg);
1149} 1191}
1150 1192
@@ -1265,12 +1307,13 @@ static int fb_get_fscreeninfo(struct fb_info *info, unsigned int cmd,
1265static long fb_compat_ioctl(struct file *file, unsigned int cmd, 1307static long fb_compat_ioctl(struct file *file, unsigned int cmd,
1266 unsigned long arg) 1308 unsigned long arg)
1267{ 1309{
1268 struct inode *inode = file->f_path.dentry->d_inode; 1310 struct fb_info *info = file_fb_info(file);
1269 int fbidx = iminor(inode); 1311 struct fb_ops *fb;
1270 struct fb_info *info = registered_fb[fbidx];
1271 struct fb_ops *fb = info->fbops;
1272 long ret = -ENOIOCTLCMD; 1312 long ret = -ENOIOCTLCMD;
1273 1313
1314 if (!info)
1315 return -ENODEV;
1316 fb = info->fbops;
1274 switch(cmd) { 1317 switch(cmd) {
1275 case FBIOGET_VSCREENINFO: 1318 case FBIOGET_VSCREENINFO:
1276 case FBIOPUT_VSCREENINFO: 1319 case FBIOPUT_VSCREENINFO:
@@ -1303,16 +1346,18 @@ static long fb_compat_ioctl(struct file *file, unsigned int cmd,
1303static int 1346static int
1304fb_mmap(struct file *file, struct vm_area_struct * vma) 1347fb_mmap(struct file *file, struct vm_area_struct * vma)
1305{ 1348{
1306 int fbidx = iminor(file->f_path.dentry->d_inode); 1349 struct fb_info *info = file_fb_info(file);
1307 struct fb_info *info = registered_fb[fbidx]; 1350 struct fb_ops *fb;
1308 struct fb_ops *fb = info->fbops;
1309 unsigned long off; 1351 unsigned long off;
1310 unsigned long start; 1352 unsigned long start;
1311 u32 len; 1353 u32 len;
1312 1354
1355 if (!info)
1356 return -ENODEV;
1313 if (vma->vm_pgoff > (~0UL >> PAGE_SHIFT)) 1357 if (vma->vm_pgoff > (~0UL >> PAGE_SHIFT))
1314 return -EINVAL; 1358 return -EINVAL;
1315 off = vma->vm_pgoff << PAGE_SHIFT; 1359 off = vma->vm_pgoff << PAGE_SHIFT;
1360 fb = info->fbops;
1316 if (!fb) 1361 if (!fb)
1317 return -ENODEV; 1362 return -ENODEV;
1318 mutex_lock(&info->mm_lock); 1363 mutex_lock(&info->mm_lock);
@@ -1361,14 +1406,16 @@ __releases(&info->lock)
1361 struct fb_info *info; 1406 struct fb_info *info;
1362 int res = 0; 1407 int res = 0;
1363 1408
1364 if (fbidx >= FB_MAX) 1409 info = get_fb_info(fbidx);
1365 return -ENODEV; 1410 if (!info) {
1366 info = registered_fb[fbidx];
1367 if (!info)
1368 request_module("fb%d", fbidx); 1411 request_module("fb%d", fbidx);
1369 info = registered_fb[fbidx]; 1412 info = get_fb_info(fbidx);
1370 if (!info) 1413 if (!info)
1371 return -ENODEV; 1414 return -ENODEV;
1415 }
1416 if (IS_ERR(info))
1417 return PTR_ERR(info);
1418
1372 mutex_lock(&info->lock); 1419 mutex_lock(&info->lock);
1373 if (!try_module_get(info->fbops->owner)) { 1420 if (!try_module_get(info->fbops->owner)) {
1374 res = -ENODEV; 1421 res = -ENODEV;
@@ -1386,6 +1433,8 @@ __releases(&info->lock)
1386#endif 1433#endif
1387out: 1434out:
1388 mutex_unlock(&info->lock); 1435 mutex_unlock(&info->lock);
1436 if (res)
1437 put_fb_info(info);
1389 return res; 1438 return res;
1390} 1439}
1391 1440
@@ -1401,6 +1450,7 @@ __releases(&info->lock)
1401 info->fbops->fb_release(info,1); 1450 info->fbops->fb_release(info,1);
1402 module_put(info->fbops->owner); 1451 module_put(info->fbops->owner);
1403 mutex_unlock(&info->lock); 1452 mutex_unlock(&info->lock);
1453 put_fb_info(info);
1404 return 0; 1454 return 0;
1405} 1455}
1406 1456
@@ -1487,8 +1537,10 @@ static bool fb_do_apertures_overlap(struct apertures_struct *gena,
1487 return false; 1537 return false;
1488} 1538}
1489 1539
1540static int do_unregister_framebuffer(struct fb_info *fb_info);
1541
1490#define VGA_FB_PHYS 0xA0000 1542#define VGA_FB_PHYS 0xA0000
1491void remove_conflicting_framebuffers(struct apertures_struct *a, 1543static void do_remove_conflicting_framebuffers(struct apertures_struct *a,
1492 const char *name, bool primary) 1544 const char *name, bool primary)
1493{ 1545{
1494 int i; 1546 int i;
@@ -1510,43 +1562,32 @@ void remove_conflicting_framebuffers(struct apertures_struct *a,
1510 printk(KERN_INFO "fb: conflicting fb hw usage " 1562 printk(KERN_INFO "fb: conflicting fb hw usage "
1511 "%s vs %s - removing generic driver\n", 1563 "%s vs %s - removing generic driver\n",
1512 name, registered_fb[i]->fix.id); 1564 name, registered_fb[i]->fix.id);
1513 unregister_framebuffer(registered_fb[i]); 1565 do_unregister_framebuffer(registered_fb[i]);
1514 } 1566 }
1515 } 1567 }
1516} 1568}
1517EXPORT_SYMBOL(remove_conflicting_framebuffers);
1518 1569
1519/** 1570static int do_register_framebuffer(struct fb_info *fb_info)
1520 * register_framebuffer - registers a frame buffer device
1521 * @fb_info: frame buffer info structure
1522 *
1523 * Registers a frame buffer device @fb_info.
1524 *
1525 * Returns negative errno on error, or zero for success.
1526 *
1527 */
1528
1529int
1530register_framebuffer(struct fb_info *fb_info)
1531{ 1571{
1532 int i; 1572 int i;
1533 struct fb_event event; 1573 struct fb_event event;
1534 struct fb_videomode mode; 1574 struct fb_videomode mode;
1535 1575
1536 if (num_registered_fb == FB_MAX)
1537 return -ENXIO;
1538
1539 if (fb_check_foreignness(fb_info)) 1576 if (fb_check_foreignness(fb_info))
1540 return -ENOSYS; 1577 return -ENOSYS;
1541 1578
1542 remove_conflicting_framebuffers(fb_info->apertures, fb_info->fix.id, 1579 do_remove_conflicting_framebuffers(fb_info->apertures, fb_info->fix.id,
1543 fb_is_primary_device(fb_info)); 1580 fb_is_primary_device(fb_info));
1544 1581
1582 if (num_registered_fb == FB_MAX)
1583 return -ENXIO;
1584
1545 num_registered_fb++; 1585 num_registered_fb++;
1546 for (i = 0 ; i < FB_MAX; i++) 1586 for (i = 0 ; i < FB_MAX; i++)
1547 if (!registered_fb[i]) 1587 if (!registered_fb[i])
1548 break; 1588 break;
1549 fb_info->node = i; 1589 fb_info->node = i;
1590 atomic_set(&fb_info->count, 1);
1550 mutex_init(&fb_info->lock); 1591 mutex_init(&fb_info->lock);
1551 mutex_init(&fb_info->mm_lock); 1592 mutex_init(&fb_info->mm_lock);
1552 1593
@@ -1592,36 +1633,14 @@ register_framebuffer(struct fb_info *fb_info)
1592 return 0; 1633 return 0;
1593} 1634}
1594 1635
1595 1636static int do_unregister_framebuffer(struct fb_info *fb_info)
1596/**
1597 * unregister_framebuffer - releases a frame buffer device
1598 * @fb_info: frame buffer info structure
1599 *
1600 * Unregisters a frame buffer device @fb_info.
1601 *
1602 * Returns negative errno on error, or zero for success.
1603 *
1604 * This function will also notify the framebuffer console
1605 * to release the driver.
1606 *
1607 * This is meant to be called within a driver's module_exit()
1608 * function. If this is called outside module_exit(), ensure
1609 * that the driver implements fb_open() and fb_release() to
1610 * check that no processes are using the device.
1611 */
1612
1613int
1614unregister_framebuffer(struct fb_info *fb_info)
1615{ 1637{
1616 struct fb_event event; 1638 struct fb_event event;
1617 int i, ret = 0; 1639 int i, ret = 0;
1618 1640
1619 i = fb_info->node; 1641 i = fb_info->node;
1620 if (!registered_fb[i]) { 1642 if (i < 0 || i >= FB_MAX || registered_fb[i] != fb_info)
1621 ret = -EINVAL; 1643 return -EINVAL;
1622 goto done;
1623 }
1624
1625 1644
1626 if (!lock_fb_info(fb_info)) 1645 if (!lock_fb_info(fb_info))
1627 return -ENODEV; 1646 return -ENODEV;
@@ -1629,16 +1648,14 @@ unregister_framebuffer(struct fb_info *fb_info)
1629 ret = fb_notifier_call_chain(FB_EVENT_FB_UNBIND, &event); 1648 ret = fb_notifier_call_chain(FB_EVENT_FB_UNBIND, &event);
1630 unlock_fb_info(fb_info); 1649 unlock_fb_info(fb_info);
1631 1650
1632 if (ret) { 1651 if (ret)
1633 ret = -EINVAL; 1652 return -EINVAL;
1634 goto done;
1635 }
1636 1653
1637 if (fb_info->pixmap.addr && 1654 if (fb_info->pixmap.addr &&
1638 (fb_info->pixmap.flags & FB_PIXMAP_DEFAULT)) 1655 (fb_info->pixmap.flags & FB_PIXMAP_DEFAULT))
1639 kfree(fb_info->pixmap.addr); 1656 kfree(fb_info->pixmap.addr);
1640 fb_destroy_modelist(&fb_info->modelist); 1657 fb_destroy_modelist(&fb_info->modelist);
1641 registered_fb[i]=NULL; 1658 registered_fb[i] = NULL;
1642 num_registered_fb--; 1659 num_registered_fb--;
1643 fb_cleanup_device(fb_info); 1660 fb_cleanup_device(fb_info);
1644 device_destroy(fb_class, MKDEV(FB_MAJOR, i)); 1661 device_destroy(fb_class, MKDEV(FB_MAJOR, i));
@@ -1646,9 +1663,65 @@ unregister_framebuffer(struct fb_info *fb_info)
1646 fb_notifier_call_chain(FB_EVENT_FB_UNREGISTERED, &event); 1663 fb_notifier_call_chain(FB_EVENT_FB_UNREGISTERED, &event);
1647 1664
1648 /* this may free fb info */ 1665 /* this may free fb info */
1649 if (fb_info->fbops->fb_destroy) 1666 put_fb_info(fb_info);
1650 fb_info->fbops->fb_destroy(fb_info); 1667 return 0;
1651done: 1668}
1669
1670void remove_conflicting_framebuffers(struct apertures_struct *a,
1671 const char *name, bool primary)
1672{
1673 mutex_lock(&registration_lock);
1674 do_remove_conflicting_framebuffers(a, name, primary);
1675 mutex_unlock(&registration_lock);
1676}
1677EXPORT_SYMBOL(remove_conflicting_framebuffers);
1678
1679/**
1680 * register_framebuffer - registers a frame buffer device
1681 * @fb_info: frame buffer info structure
1682 *
1683 * Registers a frame buffer device @fb_info.
1684 *
1685 * Returns negative errno on error, or zero for success.
1686 *
1687 */
1688int
1689register_framebuffer(struct fb_info *fb_info)
1690{
1691 int ret;
1692
1693 mutex_lock(&registration_lock);
1694 ret = do_register_framebuffer(fb_info);
1695 mutex_unlock(&registration_lock);
1696
1697 return ret;
1698}
1699
1700/**
1701 * unregister_framebuffer - releases a frame buffer device
1702 * @fb_info: frame buffer info structure
1703 *
1704 * Unregisters a frame buffer device @fb_info.
1705 *
1706 * Returns negative errno on error, or zero for success.
1707 *
1708 * This function will also notify the framebuffer console
1709 * to release the driver.
1710 *
1711 * This is meant to be called within a driver's module_exit()
1712 * function. If this is called outside module_exit(), ensure
1713 * that the driver implements fb_open() and fb_release() to
1714 * check that no processes are using the device.
1715 */
1716int
1717unregister_framebuffer(struct fb_info *fb_info)
1718{
1719 int ret;
1720
1721 mutex_lock(&registration_lock);
1722 ret = do_unregister_framebuffer(fb_info);
1723 mutex_unlock(&registration_lock);
1724
1652 return ret; 1725 return ret;
1653} 1726}
1654 1727
diff --git a/drivers/watchdog/mpc8xxx_wdt.c b/drivers/watchdog/mpc8xxx_wdt.c
index 528bceb220fd..eed5436ffb51 100644
--- a/drivers/watchdog/mpc8xxx_wdt.c
+++ b/drivers/watchdog/mpc8xxx_wdt.c
@@ -185,17 +185,20 @@ static struct miscdevice mpc8xxx_wdt_miscdev = {
185 .fops = &mpc8xxx_wdt_fops, 185 .fops = &mpc8xxx_wdt_fops,
186}; 186};
187 187
188static const struct of_device_id mpc8xxx_wdt_match[];
188static int __devinit mpc8xxx_wdt_probe(struct platform_device *ofdev) 189static int __devinit mpc8xxx_wdt_probe(struct platform_device *ofdev)
189{ 190{
190 int ret; 191 int ret;
192 const struct of_device_id *match;
191 struct device_node *np = ofdev->dev.of_node; 193 struct device_node *np = ofdev->dev.of_node;
192 struct mpc8xxx_wdt_type *wdt_type; 194 struct mpc8xxx_wdt_type *wdt_type;
193 u32 freq = fsl_get_sys_freq(); 195 u32 freq = fsl_get_sys_freq();
194 bool enabled; 196 bool enabled;
195 197
196 if (!ofdev->dev.of_match) 198 match = of_match_device(mpc8xxx_wdt_match, &ofdev->dev);
199 if (!match)
197 return -EINVAL; 200 return -EINVAL;
198 wdt_type = ofdev->dev.of_match->data; 201 wdt_type = match->data;
199 202
200 if (!freq || freq == -1) 203 if (!freq || freq == -1)
201 return -EINVAL; 204 return -EINVAL;
diff --git a/fs/block_dev.c b/fs/block_dev.c
index 5147bdd3b8e1..257b00e98428 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -1102,6 +1102,7 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part)
1102 if (!bdev->bd_part) 1102 if (!bdev->bd_part)
1103 goto out_clear; 1103 goto out_clear;
1104 1104
1105 ret = 0;
1105 if (disk->fops->open) { 1106 if (disk->fops->open) {
1106 ret = disk->fops->open(bdev, mode); 1107 ret = disk->fops->open(bdev, mode);
1107 if (ret == -ERESTARTSYS) { 1108 if (ret == -ERESTARTSYS) {
@@ -1118,9 +1119,18 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part)
1118 put_disk(disk); 1119 put_disk(disk);
1119 goto restart; 1120 goto restart;
1120 } 1121 }
1121 if (ret)
1122 goto out_clear;
1123 } 1122 }
1123 /*
1124 * If the device is invalidated, rescan partition
1125 * if open succeeded or failed with -ENOMEDIUM.
1126 * The latter is necessary to prevent ghost
1127 * partitions on a removed medium.
1128 */
1129 if (bdev->bd_invalidated && (!ret || ret == -ENOMEDIUM))
1130 rescan_partitions(disk, bdev);
1131 if (ret)
1132 goto out_clear;
1133
1124 if (!bdev->bd_openers) { 1134 if (!bdev->bd_openers) {
1125 bd_set_size(bdev,(loff_t)get_capacity(disk)<<9); 1135 bd_set_size(bdev,(loff_t)get_capacity(disk)<<9);
1126 bdi = blk_get_backing_dev_info(bdev); 1136 bdi = blk_get_backing_dev_info(bdev);
@@ -1128,8 +1138,6 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part)
1128 bdi = &default_backing_dev_info; 1138 bdi = &default_backing_dev_info;
1129 bdev_inode_switch_bdi(bdev->bd_inode, bdi); 1139 bdev_inode_switch_bdi(bdev->bd_inode, bdi);
1130 } 1140 }
1131 if (bdev->bd_invalidated)
1132 rescan_partitions(disk, bdev);
1133 } else { 1141 } else {
1134 struct block_device *whole; 1142 struct block_device *whole;
1135 whole = bdget_disk(disk, 0); 1143 whole = bdget_disk(disk, 0);
@@ -1153,13 +1161,14 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part)
1153 } 1161 }
1154 } else { 1162 } else {
1155 if (bdev->bd_contains == bdev) { 1163 if (bdev->bd_contains == bdev) {
1156 if (bdev->bd_disk->fops->open) { 1164 ret = 0;
1165 if (bdev->bd_disk->fops->open)
1157 ret = bdev->bd_disk->fops->open(bdev, mode); 1166 ret = bdev->bd_disk->fops->open(bdev, mode);
1158 if (ret) 1167 /* the same as first opener case, read comment there */
1159 goto out_unlock_bdev; 1168 if (bdev->bd_invalidated && (!ret || ret == -ENOMEDIUM))
1160 }
1161 if (bdev->bd_invalidated)
1162 rescan_partitions(bdev->bd_disk, bdev); 1169 rescan_partitions(bdev->bd_disk, bdev);
1170 if (ret)
1171 goto out_unlock_bdev;
1163 } 1172 }
1164 /* only one opener holds refs to the module and disk */ 1173 /* only one opener holds refs to the module and disk */
1165 module_put(disk->fops->owner); 1174 module_put(disk->fops->owner);
diff --git a/fs/btrfs/acl.c b/fs/btrfs/acl.c
index 5d505aaa72fb..44ea5b92e1ba 100644
--- a/fs/btrfs/acl.c
+++ b/fs/btrfs/acl.c
@@ -178,12 +178,13 @@ static int btrfs_xattr_acl_set(struct dentry *dentry, const char *name,
178 178
179 if (value) { 179 if (value) {
180 acl = posix_acl_from_xattr(value, size); 180 acl = posix_acl_from_xattr(value, size);
181 if (IS_ERR(acl))
182 return PTR_ERR(acl);
183
181 if (acl) { 184 if (acl) {
182 ret = posix_acl_valid(acl); 185 ret = posix_acl_valid(acl);
183 if (ret) 186 if (ret)
184 goto out; 187 goto out;
185 } else if (IS_ERR(acl)) {
186 return PTR_ERR(acl);
187 } 188 }
188 } 189 }
189 190
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index cd52f7f556ef..9ee6bd55e16c 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -8856,23 +8856,38 @@ out:
8856int btrfs_init_space_info(struct btrfs_fs_info *fs_info) 8856int btrfs_init_space_info(struct btrfs_fs_info *fs_info)
8857{ 8857{
8858 struct btrfs_space_info *space_info; 8858 struct btrfs_space_info *space_info;
8859 struct btrfs_super_block *disk_super;
8860 u64 features;
8861 u64 flags;
8862 int mixed = 0;
8859 int ret; 8863 int ret;
8860 8864
8861 ret = update_space_info(fs_info, BTRFS_BLOCK_GROUP_SYSTEM, 0, 0, 8865 disk_super = &fs_info->super_copy;
8862 &space_info); 8866 if (!btrfs_super_root(disk_super))
8863 if (ret) 8867 return 1;
8864 return ret;
8865 8868
8866 ret = update_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA, 0, 0, 8869 features = btrfs_super_incompat_flags(disk_super);
8867 &space_info); 8870 if (features & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS)
8868 if (ret) 8871 mixed = 1;
8869 return ret;
8870 8872
8871 ret = update_space_info(fs_info, BTRFS_BLOCK_GROUP_DATA, 0, 0, 8873 flags = BTRFS_BLOCK_GROUP_SYSTEM;
8872 &space_info); 8874 ret = update_space_info(fs_info, flags, 0, 0, &space_info);
8873 if (ret) 8875 if (ret)
8874 return ret; 8876 goto out;
8875 8877
8878 if (mixed) {
8879 flags = BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_DATA;
8880 ret = update_space_info(fs_info, flags, 0, 0, &space_info);
8881 } else {
8882 flags = BTRFS_BLOCK_GROUP_METADATA;
8883 ret = update_space_info(fs_info, flags, 0, 0, &space_info);
8884 if (ret)
8885 goto out;
8886
8887 flags = BTRFS_BLOCK_GROUP_DATA;
8888 ret = update_space_info(fs_info, flags, 0, 0, &space_info);
8889 }
8890out:
8876 return ret; 8891 return ret;
8877} 8892}
8878 8893
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index ffb48d6c5433..2616f7ed4799 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -81,6 +81,13 @@ static unsigned int btrfs_flags_to_ioctl(unsigned int flags)
81 iflags |= FS_NOATIME_FL; 81 iflags |= FS_NOATIME_FL;
82 if (flags & BTRFS_INODE_DIRSYNC) 82 if (flags & BTRFS_INODE_DIRSYNC)
83 iflags |= FS_DIRSYNC_FL; 83 iflags |= FS_DIRSYNC_FL;
84 if (flags & BTRFS_INODE_NODATACOW)
85 iflags |= FS_NOCOW_FL;
86
87 if ((flags & BTRFS_INODE_COMPRESS) && !(flags & BTRFS_INODE_NOCOMPRESS))
88 iflags |= FS_COMPR_FL;
89 else if (flags & BTRFS_INODE_NOCOMPRESS)
90 iflags |= FS_NOCOMP_FL;
84 91
85 return iflags; 92 return iflags;
86} 93}
@@ -144,16 +151,13 @@ static int check_flags(unsigned int flags)
144 if (flags & ~(FS_IMMUTABLE_FL | FS_APPEND_FL | \ 151 if (flags & ~(FS_IMMUTABLE_FL | FS_APPEND_FL | \
145 FS_NOATIME_FL | FS_NODUMP_FL | \ 152 FS_NOATIME_FL | FS_NODUMP_FL | \
146 FS_SYNC_FL | FS_DIRSYNC_FL | \ 153 FS_SYNC_FL | FS_DIRSYNC_FL | \
147 FS_NOCOMP_FL | FS_COMPR_FL | \ 154 FS_NOCOMP_FL | FS_COMPR_FL |
148 FS_NOCOW_FL | FS_COW_FL)) 155 FS_NOCOW_FL))
149 return -EOPNOTSUPP; 156 return -EOPNOTSUPP;
150 157
151 if ((flags & FS_NOCOMP_FL) && (flags & FS_COMPR_FL)) 158 if ((flags & FS_NOCOMP_FL) && (flags & FS_COMPR_FL))
152 return -EINVAL; 159 return -EINVAL;
153 160
154 if ((flags & FS_NOCOW_FL) && (flags & FS_COW_FL))
155 return -EINVAL;
156
157 return 0; 161 return 0;
158} 162}
159 163
@@ -218,6 +222,10 @@ static int btrfs_ioctl_setflags(struct file *file, void __user *arg)
218 ip->flags |= BTRFS_INODE_DIRSYNC; 222 ip->flags |= BTRFS_INODE_DIRSYNC;
219 else 223 else
220 ip->flags &= ~BTRFS_INODE_DIRSYNC; 224 ip->flags &= ~BTRFS_INODE_DIRSYNC;
225 if (flags & FS_NOCOW_FL)
226 ip->flags |= BTRFS_INODE_NODATACOW;
227 else
228 ip->flags &= ~BTRFS_INODE_NODATACOW;
221 229
222 /* 230 /*
223 * The COMPRESS flag can only be changed by users, while the NOCOMPRESS 231 * The COMPRESS flag can only be changed by users, while the NOCOMPRESS
@@ -230,11 +238,9 @@ static int btrfs_ioctl_setflags(struct file *file, void __user *arg)
230 } else if (flags & FS_COMPR_FL) { 238 } else if (flags & FS_COMPR_FL) {
231 ip->flags |= BTRFS_INODE_COMPRESS; 239 ip->flags |= BTRFS_INODE_COMPRESS;
232 ip->flags &= ~BTRFS_INODE_NOCOMPRESS; 240 ip->flags &= ~BTRFS_INODE_NOCOMPRESS;
241 } else {
242 ip->flags &= ~(BTRFS_INODE_COMPRESS | BTRFS_INODE_NOCOMPRESS);
233 } 243 }
234 if (flags & FS_NOCOW_FL)
235 ip->flags |= BTRFS_INODE_NODATACOW;
236 else if (flags & FS_COW_FL)
237 ip->flags &= ~BTRFS_INODE_NODATACOW;
238 244
239 trans = btrfs_join_transaction(root, 1); 245 trans = btrfs_join_transaction(root, 1);
240 BUG_ON(IS_ERR(trans)); 246 BUG_ON(IS_ERR(trans));
diff --git a/fs/cifs/cifs_unicode.c b/fs/cifs/cifs_unicode.c
index 23d43cde4306..1b2e180b018d 100644
--- a/fs/cifs/cifs_unicode.c
+++ b/fs/cifs/cifs_unicode.c
@@ -277,6 +277,7 @@ cifsConvertToUCS(__le16 *target, const char *source, int srclen,
277 277
278 for (i = 0, j = 0; i < srclen; j++) { 278 for (i = 0, j = 0; i < srclen; j++) {
279 src_char = source[i]; 279 src_char = source[i];
280 charlen = 1;
280 switch (src_char) { 281 switch (src_char) {
281 case 0: 282 case 0:
282 put_unaligned(0, &target[j]); 283 put_unaligned(0, &target[j]);
@@ -316,16 +317,13 @@ cifsConvertToUCS(__le16 *target, const char *source, int srclen,
316 dst_char = cpu_to_le16(0x003f); 317 dst_char = cpu_to_le16(0x003f);
317 charlen = 1; 318 charlen = 1;
318 } 319 }
319 /*
320 * character may take more than one byte in the source
321 * string, but will take exactly two bytes in the
322 * target string
323 */
324 i += charlen;
325 continue;
326 } 320 }
321 /*
322 * character may take more than one byte in the source string,
323 * but will take exactly two bytes in the target string
324 */
325 i += charlen;
327 put_unaligned(dst_char, &target[j]); 326 put_unaligned(dst_char, &target[j]);
328 i++; /* move to next char in source string */
329 } 327 }
330 328
331ctoUCS_out: 329ctoUCS_out:
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index 05f1dcf7d79a..277262a8e82f 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -2673,6 +2673,11 @@ is_path_accessible(int xid, struct cifsTconInfo *tcon,
2673 0 /* not legacy */, cifs_sb->local_nls, 2673 0 /* not legacy */, cifs_sb->local_nls,
2674 cifs_sb->mnt_cifs_flags & 2674 cifs_sb->mnt_cifs_flags &
2675 CIFS_MOUNT_MAP_SPECIAL_CHR); 2675 CIFS_MOUNT_MAP_SPECIAL_CHR);
2676
2677 if (rc == -EOPNOTSUPP || rc == -EINVAL)
2678 rc = SMBQueryInformation(xid, tcon, full_path, pfile_info,
2679 cifs_sb->local_nls, cifs_sb->mnt_cifs_flags &
2680 CIFS_MOUNT_MAP_SPECIAL_CHR);
2676 kfree(pfile_info); 2681 kfree(pfile_info);
2677 return rc; 2682 return rc;
2678} 2683}
diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c
index 3313dd19f543..9a37a9b6de3a 100644
--- a/fs/configfs/dir.c
+++ b/fs/configfs/dir.c
@@ -53,11 +53,14 @@ DEFINE_SPINLOCK(configfs_dirent_lock);
53static void configfs_d_iput(struct dentry * dentry, 53static void configfs_d_iput(struct dentry * dentry,
54 struct inode * inode) 54 struct inode * inode)
55{ 55{
56 struct configfs_dirent * sd = dentry->d_fsdata; 56 struct configfs_dirent *sd = dentry->d_fsdata;
57 57
58 if (sd) { 58 if (sd) {
59 BUG_ON(sd->s_dentry != dentry); 59 BUG_ON(sd->s_dentry != dentry);
60 /* Coordinate with configfs_readdir */
61 spin_lock(&configfs_dirent_lock);
60 sd->s_dentry = NULL; 62 sd->s_dentry = NULL;
63 spin_unlock(&configfs_dirent_lock);
61 configfs_put(sd); 64 configfs_put(sd);
62 } 65 }
63 iput(inode); 66 iput(inode);
@@ -689,7 +692,8 @@ static int create_default_group(struct config_group *parent_group,
689 sd = child->d_fsdata; 692 sd = child->d_fsdata;
690 sd->s_type |= CONFIGFS_USET_DEFAULT; 693 sd->s_type |= CONFIGFS_USET_DEFAULT;
691 } else { 694 } else {
692 d_delete(child); 695 BUG_ON(child->d_inode);
696 d_drop(child);
693 dput(child); 697 dput(child);
694 } 698 }
695 } 699 }
@@ -1545,7 +1549,7 @@ static int configfs_readdir(struct file * filp, void * dirent, filldir_t filldir
1545 struct configfs_dirent * parent_sd = dentry->d_fsdata; 1549 struct configfs_dirent * parent_sd = dentry->d_fsdata;
1546 struct configfs_dirent *cursor = filp->private_data; 1550 struct configfs_dirent *cursor = filp->private_data;
1547 struct list_head *p, *q = &cursor->s_sibling; 1551 struct list_head *p, *q = &cursor->s_sibling;
1548 ino_t ino; 1552 ino_t ino = 0;
1549 int i = filp->f_pos; 1553 int i = filp->f_pos;
1550 1554
1551 switch (i) { 1555 switch (i) {
@@ -1573,6 +1577,7 @@ static int configfs_readdir(struct file * filp, void * dirent, filldir_t filldir
1573 struct configfs_dirent *next; 1577 struct configfs_dirent *next;
1574 const char * name; 1578 const char * name;
1575 int len; 1579 int len;
1580 struct inode *inode = NULL;
1576 1581
1577 next = list_entry(p, struct configfs_dirent, 1582 next = list_entry(p, struct configfs_dirent,
1578 s_sibling); 1583 s_sibling);
@@ -1581,9 +1586,28 @@ static int configfs_readdir(struct file * filp, void * dirent, filldir_t filldir
1581 1586
1582 name = configfs_get_name(next); 1587 name = configfs_get_name(next);
1583 len = strlen(name); 1588 len = strlen(name);
1584 if (next->s_dentry) 1589
1585 ino = next->s_dentry->d_inode->i_ino; 1590 /*
1586 else 1591 * We'll have a dentry and an inode for
1592 * PINNED items and for open attribute
1593 * files. We lock here to prevent a race
1594 * with configfs_d_iput() clearing
1595 * s_dentry before calling iput().
1596 *
1597 * Why do we go to the trouble? If
1598 * someone has an attribute file open,
1599 * the inode number should match until
1600 * they close it. Beyond that, we don't
1601 * care.
1602 */
1603 spin_lock(&configfs_dirent_lock);
1604 dentry = next->s_dentry;
1605 if (dentry)
1606 inode = dentry->d_inode;
1607 if (inode)
1608 ino = inode->i_ino;
1609 spin_unlock(&configfs_dirent_lock);
1610 if (!inode)
1587 ino = iunique(configfs_sb, 2); 1611 ino = iunique(configfs_sb, 2);
1588 1612
1589 if (filldir(dirent, name, len, filp->f_pos, ino, 1613 if (filldir(dirent, name, len, filp->f_pos, ino,
@@ -1683,7 +1707,8 @@ int configfs_register_subsystem(struct configfs_subsystem *subsys)
1683 err = configfs_attach_group(sd->s_element, &group->cg_item, 1707 err = configfs_attach_group(sd->s_element, &group->cg_item,
1684 dentry); 1708 dentry);
1685 if (err) { 1709 if (err) {
1686 d_delete(dentry); 1710 BUG_ON(dentry->d_inode);
1711 d_drop(dentry);
1687 dput(dentry); 1712 dput(dentry);
1688 } else { 1713 } else {
1689 spin_lock(&configfs_dirent_lock); 1714 spin_lock(&configfs_dirent_lock);
diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
index c6ba49bd95b3..b32eb29a4e6f 100644
--- a/fs/fuse/dir.c
+++ b/fs/fuse/dir.c
@@ -174,7 +174,7 @@ static int fuse_dentry_revalidate(struct dentry *entry, struct nameidata *nd)
174 if (!inode) 174 if (!inode)
175 return 0; 175 return 0;
176 176
177 if (nd->flags & LOOKUP_RCU) 177 if (nd && (nd->flags & LOOKUP_RCU))
178 return -ECHILD; 178 return -ECHILD;
179 179
180 fc = get_fuse_conn(inode); 180 fc = get_fuse_conn(inode);
diff --git a/fs/namei.c b/fs/namei.c
index 54fc993e3027..e3c4f112ebf7 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -179,7 +179,7 @@ EXPORT_SYMBOL(putname);
179static int acl_permission_check(struct inode *inode, int mask, unsigned int flags, 179static int acl_permission_check(struct inode *inode, int mask, unsigned int flags,
180 int (*check_acl)(struct inode *inode, int mask, unsigned int flags)) 180 int (*check_acl)(struct inode *inode, int mask, unsigned int flags))
181{ 181{
182 umode_t mode = inode->i_mode; 182 unsigned int mode = inode->i_mode;
183 183
184 mask &= MAY_READ | MAY_WRITE | MAY_EXEC; 184 mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
185 185
diff --git a/fs/nfs/nfs4filelayout.c b/fs/nfs/nfs4filelayout.c
index 6f8192f4cfc7..be79dc9f386d 100644
--- a/fs/nfs/nfs4filelayout.c
+++ b/fs/nfs/nfs4filelayout.c
@@ -117,6 +117,8 @@ static int filelayout_async_handle_error(struct rpc_task *task,
117 case -EKEYEXPIRED: 117 case -EKEYEXPIRED:
118 rpc_delay(task, FILELAYOUT_POLL_RETRY_MAX); 118 rpc_delay(task, FILELAYOUT_POLL_RETRY_MAX);
119 break; 119 break;
120 case -NFS4ERR_RETRY_UNCACHED_REP:
121 break;
120 default: 122 default:
121 dprintk("%s DS error. Retry through MDS %d\n", __func__, 123 dprintk("%s DS error. Retry through MDS %d\n", __func__,
122 task->tk_status); 124 task->tk_status);
@@ -416,7 +418,8 @@ static int
416filelayout_check_layout(struct pnfs_layout_hdr *lo, 418filelayout_check_layout(struct pnfs_layout_hdr *lo,
417 struct nfs4_filelayout_segment *fl, 419 struct nfs4_filelayout_segment *fl,
418 struct nfs4_layoutget_res *lgr, 420 struct nfs4_layoutget_res *lgr,
419 struct nfs4_deviceid *id) 421 struct nfs4_deviceid *id,
422 gfp_t gfp_flags)
420{ 423{
421 struct nfs4_file_layout_dsaddr *dsaddr; 424 struct nfs4_file_layout_dsaddr *dsaddr;
422 int status = -EINVAL; 425 int status = -EINVAL;
@@ -439,7 +442,7 @@ filelayout_check_layout(struct pnfs_layout_hdr *lo,
439 /* find and reference the deviceid */ 442 /* find and reference the deviceid */
440 dsaddr = nfs4_fl_find_get_deviceid(id); 443 dsaddr = nfs4_fl_find_get_deviceid(id);
441 if (dsaddr == NULL) { 444 if (dsaddr == NULL) {
442 dsaddr = get_device_info(lo->plh_inode, id); 445 dsaddr = get_device_info(lo->plh_inode, id, gfp_flags);
443 if (dsaddr == NULL) 446 if (dsaddr == NULL)
444 goto out; 447 goto out;
445 } 448 }
@@ -500,7 +503,8 @@ static int
500filelayout_decode_layout(struct pnfs_layout_hdr *flo, 503filelayout_decode_layout(struct pnfs_layout_hdr *flo,
501 struct nfs4_filelayout_segment *fl, 504 struct nfs4_filelayout_segment *fl,
502 struct nfs4_layoutget_res *lgr, 505 struct nfs4_layoutget_res *lgr,
503 struct nfs4_deviceid *id) 506 struct nfs4_deviceid *id,
507 gfp_t gfp_flags)
504{ 508{
505 struct xdr_stream stream; 509 struct xdr_stream stream;
506 struct xdr_buf buf = { 510 struct xdr_buf buf = {
@@ -516,7 +520,7 @@ filelayout_decode_layout(struct pnfs_layout_hdr *flo,
516 520
517 dprintk("%s: set_layout_map Begin\n", __func__); 521 dprintk("%s: set_layout_map Begin\n", __func__);
518 522
519 scratch = alloc_page(GFP_KERNEL); 523 scratch = alloc_page(gfp_flags);
520 if (!scratch) 524 if (!scratch)
521 return -ENOMEM; 525 return -ENOMEM;
522 526
@@ -554,13 +558,13 @@ filelayout_decode_layout(struct pnfs_layout_hdr *flo,
554 goto out_err; 558 goto out_err;
555 559
556 fl->fh_array = kzalloc(fl->num_fh * sizeof(struct nfs_fh *), 560 fl->fh_array = kzalloc(fl->num_fh * sizeof(struct nfs_fh *),
557 GFP_KERNEL); 561 gfp_flags);
558 if (!fl->fh_array) 562 if (!fl->fh_array)
559 goto out_err; 563 goto out_err;
560 564
561 for (i = 0; i < fl->num_fh; i++) { 565 for (i = 0; i < fl->num_fh; i++) {
562 /* Do we want to use a mempool here? */ 566 /* Do we want to use a mempool here? */
563 fl->fh_array[i] = kmalloc(sizeof(struct nfs_fh), GFP_KERNEL); 567 fl->fh_array[i] = kmalloc(sizeof(struct nfs_fh), gfp_flags);
564 if (!fl->fh_array[i]) 568 if (!fl->fh_array[i])
565 goto out_err_free; 569 goto out_err_free;
566 570
@@ -605,19 +609,20 @@ filelayout_free_lseg(struct pnfs_layout_segment *lseg)
605 609
606static struct pnfs_layout_segment * 610static struct pnfs_layout_segment *
607filelayout_alloc_lseg(struct pnfs_layout_hdr *layoutid, 611filelayout_alloc_lseg(struct pnfs_layout_hdr *layoutid,
608 struct nfs4_layoutget_res *lgr) 612 struct nfs4_layoutget_res *lgr,
613 gfp_t gfp_flags)
609{ 614{
610 struct nfs4_filelayout_segment *fl; 615 struct nfs4_filelayout_segment *fl;
611 int rc; 616 int rc;
612 struct nfs4_deviceid id; 617 struct nfs4_deviceid id;
613 618
614 dprintk("--> %s\n", __func__); 619 dprintk("--> %s\n", __func__);
615 fl = kzalloc(sizeof(*fl), GFP_KERNEL); 620 fl = kzalloc(sizeof(*fl), gfp_flags);
616 if (!fl) 621 if (!fl)
617 return NULL; 622 return NULL;
618 623
619 rc = filelayout_decode_layout(layoutid, fl, lgr, &id); 624 rc = filelayout_decode_layout(layoutid, fl, lgr, &id, gfp_flags);
620 if (rc != 0 || filelayout_check_layout(layoutid, fl, lgr, &id)) { 625 if (rc != 0 || filelayout_check_layout(layoutid, fl, lgr, &id, gfp_flags)) {
621 _filelayout_free_lseg(fl); 626 _filelayout_free_lseg(fl);
622 return NULL; 627 return NULL;
623 } 628 }
@@ -633,7 +638,7 @@ filelayout_alloc_lseg(struct pnfs_layout_hdr *layoutid,
633 int size = (fl->stripe_type == STRIPE_SPARSE) ? 638 int size = (fl->stripe_type == STRIPE_SPARSE) ?
634 fl->dsaddr->ds_num : fl->dsaddr->stripe_count; 639 fl->dsaddr->ds_num : fl->dsaddr->stripe_count;
635 640
636 fl->commit_buckets = kcalloc(size, sizeof(struct list_head), GFP_KERNEL); 641 fl->commit_buckets = kcalloc(size, sizeof(struct list_head), gfp_flags);
637 if (!fl->commit_buckets) { 642 if (!fl->commit_buckets) {
638 filelayout_free_lseg(&fl->generic_hdr); 643 filelayout_free_lseg(&fl->generic_hdr);
639 return NULL; 644 return NULL;
diff --git a/fs/nfs/nfs4filelayout.h b/fs/nfs/nfs4filelayout.h
index 7c44579f5832..2b461d77b43a 100644
--- a/fs/nfs/nfs4filelayout.h
+++ b/fs/nfs/nfs4filelayout.h
@@ -104,6 +104,6 @@ extern struct nfs4_file_layout_dsaddr *
104nfs4_fl_find_get_deviceid(struct nfs4_deviceid *dev_id); 104nfs4_fl_find_get_deviceid(struct nfs4_deviceid *dev_id);
105extern void nfs4_fl_put_deviceid(struct nfs4_file_layout_dsaddr *dsaddr); 105extern void nfs4_fl_put_deviceid(struct nfs4_file_layout_dsaddr *dsaddr);
106struct nfs4_file_layout_dsaddr * 106struct nfs4_file_layout_dsaddr *
107get_device_info(struct inode *inode, struct nfs4_deviceid *dev_id); 107get_device_info(struct inode *inode, struct nfs4_deviceid *dev_id, gfp_t gfp_flags);
108 108
109#endif /* FS_NFS_NFS4FILELAYOUT_H */ 109#endif /* FS_NFS_NFS4FILELAYOUT_H */
diff --git a/fs/nfs/nfs4filelayoutdev.c b/fs/nfs/nfs4filelayoutdev.c
index de5350f2b249..db07c7af1395 100644
--- a/fs/nfs/nfs4filelayoutdev.c
+++ b/fs/nfs/nfs4filelayoutdev.c
@@ -225,11 +225,11 @@ nfs4_fl_free_deviceid(struct nfs4_file_layout_dsaddr *dsaddr)
225} 225}
226 226
227static struct nfs4_pnfs_ds * 227static struct nfs4_pnfs_ds *
228nfs4_pnfs_ds_add(struct inode *inode, u32 ip_addr, u32 port) 228nfs4_pnfs_ds_add(struct inode *inode, u32 ip_addr, u32 port, gfp_t gfp_flags)
229{ 229{
230 struct nfs4_pnfs_ds *tmp_ds, *ds; 230 struct nfs4_pnfs_ds *tmp_ds, *ds;
231 231
232 ds = kzalloc(sizeof(*tmp_ds), GFP_KERNEL); 232 ds = kzalloc(sizeof(*tmp_ds), gfp_flags);
233 if (!ds) 233 if (!ds)
234 goto out; 234 goto out;
235 235
@@ -261,7 +261,7 @@ out:
261 * Currently only support ipv4, and one multi-path address. 261 * Currently only support ipv4, and one multi-path address.
262 */ 262 */
263static struct nfs4_pnfs_ds * 263static struct nfs4_pnfs_ds *
264decode_and_add_ds(struct xdr_stream *streamp, struct inode *inode) 264decode_and_add_ds(struct xdr_stream *streamp, struct inode *inode, gfp_t gfp_flags)
265{ 265{
266 struct nfs4_pnfs_ds *ds = NULL; 266 struct nfs4_pnfs_ds *ds = NULL;
267 char *buf; 267 char *buf;
@@ -303,7 +303,7 @@ decode_and_add_ds(struct xdr_stream *streamp, struct inode *inode)
303 rlen); 303 rlen);
304 goto out_err; 304 goto out_err;
305 } 305 }
306 buf = kmalloc(rlen + 1, GFP_KERNEL); 306 buf = kmalloc(rlen + 1, gfp_flags);
307 if (!buf) { 307 if (!buf) {
308 dprintk("%s: Not enough memory\n", __func__); 308 dprintk("%s: Not enough memory\n", __func__);
309 goto out_err; 309 goto out_err;
@@ -333,7 +333,7 @@ decode_and_add_ds(struct xdr_stream *streamp, struct inode *inode)
333 sscanf(pstr, "-%d-%d", &tmp[0], &tmp[1]); 333 sscanf(pstr, "-%d-%d", &tmp[0], &tmp[1]);
334 port = htons((tmp[0] << 8) | (tmp[1])); 334 port = htons((tmp[0] << 8) | (tmp[1]));
335 335
336 ds = nfs4_pnfs_ds_add(inode, ip_addr, port); 336 ds = nfs4_pnfs_ds_add(inode, ip_addr, port, gfp_flags);
337 dprintk("%s: Decoded address and port %s\n", __func__, buf); 337 dprintk("%s: Decoded address and port %s\n", __func__, buf);
338out_free: 338out_free:
339 kfree(buf); 339 kfree(buf);
@@ -343,7 +343,7 @@ out_err:
343 343
344/* Decode opaque device data and return the result */ 344/* Decode opaque device data and return the result */
345static struct nfs4_file_layout_dsaddr* 345static struct nfs4_file_layout_dsaddr*
346decode_device(struct inode *ino, struct pnfs_device *pdev) 346decode_device(struct inode *ino, struct pnfs_device *pdev, gfp_t gfp_flags)
347{ 347{
348 int i; 348 int i;
349 u32 cnt, num; 349 u32 cnt, num;
@@ -362,7 +362,7 @@ decode_device(struct inode *ino, struct pnfs_device *pdev)
362 struct page *scratch; 362 struct page *scratch;
363 363
364 /* set up xdr stream */ 364 /* set up xdr stream */
365 scratch = alloc_page(GFP_KERNEL); 365 scratch = alloc_page(gfp_flags);
366 if (!scratch) 366 if (!scratch)
367 goto out_err; 367 goto out_err;
368 368
@@ -384,7 +384,7 @@ decode_device(struct inode *ino, struct pnfs_device *pdev)
384 } 384 }
385 385
386 /* read stripe indices */ 386 /* read stripe indices */
387 stripe_indices = kcalloc(cnt, sizeof(u8), GFP_KERNEL); 387 stripe_indices = kcalloc(cnt, sizeof(u8), gfp_flags);
388 if (!stripe_indices) 388 if (!stripe_indices)
389 goto out_err_free_scratch; 389 goto out_err_free_scratch;
390 390
@@ -423,7 +423,7 @@ decode_device(struct inode *ino, struct pnfs_device *pdev)
423 423
424 dsaddr = kzalloc(sizeof(*dsaddr) + 424 dsaddr = kzalloc(sizeof(*dsaddr) +
425 (sizeof(struct nfs4_pnfs_ds *) * (num - 1)), 425 (sizeof(struct nfs4_pnfs_ds *) * (num - 1)),
426 GFP_KERNEL); 426 gfp_flags);
427 if (!dsaddr) 427 if (!dsaddr)
428 goto out_err_free_stripe_indices; 428 goto out_err_free_stripe_indices;
429 429
@@ -452,7 +452,7 @@ decode_device(struct inode *ino, struct pnfs_device *pdev)
452 for (j = 0; j < mp_count; j++) { 452 for (j = 0; j < mp_count; j++) {
453 if (j == 0) { 453 if (j == 0) {
454 dsaddr->ds_list[i] = decode_and_add_ds(&stream, 454 dsaddr->ds_list[i] = decode_and_add_ds(&stream,
455 ino); 455 ino, gfp_flags);
456 if (dsaddr->ds_list[i] == NULL) 456 if (dsaddr->ds_list[i] == NULL)
457 goto out_err_free_deviceid; 457 goto out_err_free_deviceid;
458 } else { 458 } else {
@@ -503,12 +503,12 @@ out_err:
503 * available devices. 503 * available devices.
504 */ 504 */
505static struct nfs4_file_layout_dsaddr * 505static struct nfs4_file_layout_dsaddr *
506decode_and_add_device(struct inode *inode, struct pnfs_device *dev) 506decode_and_add_device(struct inode *inode, struct pnfs_device *dev, gfp_t gfp_flags)
507{ 507{
508 struct nfs4_file_layout_dsaddr *d, *new; 508 struct nfs4_file_layout_dsaddr *d, *new;
509 long hash; 509 long hash;
510 510
511 new = decode_device(inode, dev); 511 new = decode_device(inode, dev, gfp_flags);
512 if (!new) { 512 if (!new) {
513 printk(KERN_WARNING "%s: Could not decode or add device\n", 513 printk(KERN_WARNING "%s: Could not decode or add device\n",
514 __func__); 514 __func__);
@@ -537,7 +537,7 @@ decode_and_add_device(struct inode *inode, struct pnfs_device *dev)
537 * of available devices, and return it. 537 * of available devices, and return it.
538 */ 538 */
539struct nfs4_file_layout_dsaddr * 539struct nfs4_file_layout_dsaddr *
540get_device_info(struct inode *inode, struct nfs4_deviceid *dev_id) 540get_device_info(struct inode *inode, struct nfs4_deviceid *dev_id, gfp_t gfp_flags)
541{ 541{
542 struct pnfs_device *pdev = NULL; 542 struct pnfs_device *pdev = NULL;
543 u32 max_resp_sz; 543 u32 max_resp_sz;
@@ -556,17 +556,17 @@ get_device_info(struct inode *inode, struct nfs4_deviceid *dev_id)
556 dprintk("%s inode %p max_resp_sz %u max_pages %d\n", 556 dprintk("%s inode %p max_resp_sz %u max_pages %d\n",
557 __func__, inode, max_resp_sz, max_pages); 557 __func__, inode, max_resp_sz, max_pages);
558 558
559 pdev = kzalloc(sizeof(struct pnfs_device), GFP_KERNEL); 559 pdev = kzalloc(sizeof(struct pnfs_device), gfp_flags);
560 if (pdev == NULL) 560 if (pdev == NULL)
561 return NULL; 561 return NULL;
562 562
563 pages = kzalloc(max_pages * sizeof(struct page *), GFP_KERNEL); 563 pages = kzalloc(max_pages * sizeof(struct page *), gfp_flags);
564 if (pages == NULL) { 564 if (pages == NULL) {
565 kfree(pdev); 565 kfree(pdev);
566 return NULL; 566 return NULL;
567 } 567 }
568 for (i = 0; i < max_pages; i++) { 568 for (i = 0; i < max_pages; i++) {
569 pages[i] = alloc_page(GFP_KERNEL); 569 pages[i] = alloc_page(gfp_flags);
570 if (!pages[i]) 570 if (!pages[i])
571 goto out_free; 571 goto out_free;
572 } 572 }
@@ -587,7 +587,7 @@ get_device_info(struct inode *inode, struct nfs4_deviceid *dev_id)
587 * Found new device, need to decode it and then add it to the 587 * Found new device, need to decode it and then add it to the
588 * list of known devices for this mountpoint. 588 * list of known devices for this mountpoint.
589 */ 589 */
590 dsaddr = decode_and_add_device(inode, pdev); 590 dsaddr = decode_and_add_device(inode, pdev, gfp_flags);
591out_free: 591out_free:
592 for (i = 0; i < max_pages; i++) 592 for (i = 0; i < max_pages; i++)
593 __free_page(pages[i]); 593 __free_page(pages[i]);
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index 69c0f3c5ee7a..cf1b339c3937 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -300,6 +300,7 @@ static int nfs4_handle_exception(struct nfs_server *server, int errorcode, struc
300 ret = nfs4_delay(server->client, &exception->timeout); 300 ret = nfs4_delay(server->client, &exception->timeout);
301 if (ret != 0) 301 if (ret != 0)
302 break; 302 break;
303 case -NFS4ERR_RETRY_UNCACHED_REP:
303 case -NFS4ERR_OLD_STATEID: 304 case -NFS4ERR_OLD_STATEID:
304 exception->retry = 1; 305 exception->retry = 1;
305 break; 306 break;
@@ -3695,6 +3696,7 @@ nfs4_async_handle_error(struct rpc_task *task, const struct nfs_server *server,
3695 rpc_delay(task, NFS4_POLL_RETRY_MAX); 3696 rpc_delay(task, NFS4_POLL_RETRY_MAX);
3696 task->tk_status = 0; 3697 task->tk_status = 0;
3697 return -EAGAIN; 3698 return -EAGAIN;
3699 case -NFS4ERR_RETRY_UNCACHED_REP:
3698 case -NFS4ERR_OLD_STATEID: 3700 case -NFS4ERR_OLD_STATEID:
3699 task->tk_status = 0; 3701 task->tk_status = 0;
3700 return -EAGAIN; 3702 return -EAGAIN;
@@ -4844,6 +4846,8 @@ static void nfs4_get_lease_time_done(struct rpc_task *task, void *calldata)
4844 dprintk("%s Retry: tk_status %d\n", __func__, task->tk_status); 4846 dprintk("%s Retry: tk_status %d\n", __func__, task->tk_status);
4845 rpc_delay(task, NFS4_POLL_RETRY_MIN); 4847 rpc_delay(task, NFS4_POLL_RETRY_MIN);
4846 task->tk_status = 0; 4848 task->tk_status = 0;
4849 /* fall through */
4850 case -NFS4ERR_RETRY_UNCACHED_REP:
4847 nfs_restart_rpc(task, data->clp); 4851 nfs_restart_rpc(task, data->clp);
4848 return; 4852 return;
4849 } 4853 }
@@ -5479,6 +5483,8 @@ static int nfs41_reclaim_complete_handle_errors(struct rpc_task *task, struct nf
5479 break; 5483 break;
5480 case -NFS4ERR_DELAY: 5484 case -NFS4ERR_DELAY:
5481 rpc_delay(task, NFS4_POLL_RETRY_MAX); 5485 rpc_delay(task, NFS4_POLL_RETRY_MAX);
5486 /* fall through */
5487 case -NFS4ERR_RETRY_UNCACHED_REP:
5482 return -EAGAIN; 5488 return -EAGAIN;
5483 default: 5489 default:
5484 nfs4_schedule_lease_recovery(clp); 5490 nfs4_schedule_lease_recovery(clp);
diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c
index ff681ab65d31..f57f5281a520 100644
--- a/fs/nfs/pnfs.c
+++ b/fs/nfs/pnfs.c
@@ -383,6 +383,7 @@ pnfs_destroy_all_layouts(struct nfs_client *clp)
383 plh_layouts); 383 plh_layouts);
384 dprintk("%s freeing layout for inode %lu\n", __func__, 384 dprintk("%s freeing layout for inode %lu\n", __func__,
385 lo->plh_inode->i_ino); 385 lo->plh_inode->i_ino);
386 list_del_init(&lo->plh_layouts);
386 pnfs_destroy_layout(NFS_I(lo->plh_inode)); 387 pnfs_destroy_layout(NFS_I(lo->plh_inode));
387 } 388 }
388} 389}
@@ -466,7 +467,8 @@ pnfs_choose_layoutget_stateid(nfs4_stateid *dst, struct pnfs_layout_hdr *lo,
466static struct pnfs_layout_segment * 467static struct pnfs_layout_segment *
467send_layoutget(struct pnfs_layout_hdr *lo, 468send_layoutget(struct pnfs_layout_hdr *lo,
468 struct nfs_open_context *ctx, 469 struct nfs_open_context *ctx,
469 u32 iomode) 470 u32 iomode,
471 gfp_t gfp_flags)
470{ 472{
471 struct inode *ino = lo->plh_inode; 473 struct inode *ino = lo->plh_inode;
472 struct nfs_server *server = NFS_SERVER(ino); 474 struct nfs_server *server = NFS_SERVER(ino);
@@ -479,7 +481,7 @@ send_layoutget(struct pnfs_layout_hdr *lo,
479 dprintk("--> %s\n", __func__); 481 dprintk("--> %s\n", __func__);
480 482
481 BUG_ON(ctx == NULL); 483 BUG_ON(ctx == NULL);
482 lgp = kzalloc(sizeof(*lgp), GFP_KERNEL); 484 lgp = kzalloc(sizeof(*lgp), gfp_flags);
483 if (lgp == NULL) 485 if (lgp == NULL)
484 return NULL; 486 return NULL;
485 487
@@ -487,12 +489,12 @@ send_layoutget(struct pnfs_layout_hdr *lo,
487 max_resp_sz = server->nfs_client->cl_session->fc_attrs.max_resp_sz; 489 max_resp_sz = server->nfs_client->cl_session->fc_attrs.max_resp_sz;
488 max_pages = max_resp_sz >> PAGE_SHIFT; 490 max_pages = max_resp_sz >> PAGE_SHIFT;
489 491
490 pages = kzalloc(max_pages * sizeof(struct page *), GFP_KERNEL); 492 pages = kzalloc(max_pages * sizeof(struct page *), gfp_flags);
491 if (!pages) 493 if (!pages)
492 goto out_err_free; 494 goto out_err_free;
493 495
494 for (i = 0; i < max_pages; i++) { 496 for (i = 0; i < max_pages; i++) {
495 pages[i] = alloc_page(GFP_KERNEL); 497 pages[i] = alloc_page(gfp_flags);
496 if (!pages[i]) 498 if (!pages[i])
497 goto out_err_free; 499 goto out_err_free;
498 } 500 }
@@ -508,6 +510,7 @@ send_layoutget(struct pnfs_layout_hdr *lo,
508 lgp->args.layout.pages = pages; 510 lgp->args.layout.pages = pages;
509 lgp->args.layout.pglen = max_pages * PAGE_SIZE; 511 lgp->args.layout.pglen = max_pages * PAGE_SIZE;
510 lgp->lsegpp = &lseg; 512 lgp->lsegpp = &lseg;
513 lgp->gfp_flags = gfp_flags;
511 514
512 /* Synchronously retrieve layout information from server and 515 /* Synchronously retrieve layout information from server and
513 * store in lseg. 516 * store in lseg.
@@ -665,11 +668,11 @@ pnfs_insert_layout(struct pnfs_layout_hdr *lo,
665} 668}
666 669
667static struct pnfs_layout_hdr * 670static struct pnfs_layout_hdr *
668alloc_init_layout_hdr(struct inode *ino) 671alloc_init_layout_hdr(struct inode *ino, gfp_t gfp_flags)
669{ 672{
670 struct pnfs_layout_hdr *lo; 673 struct pnfs_layout_hdr *lo;
671 674
672 lo = kzalloc(sizeof(struct pnfs_layout_hdr), GFP_KERNEL); 675 lo = kzalloc(sizeof(struct pnfs_layout_hdr), gfp_flags);
673 if (!lo) 676 if (!lo)
674 return NULL; 677 return NULL;
675 atomic_set(&lo->plh_refcount, 1); 678 atomic_set(&lo->plh_refcount, 1);
@@ -681,7 +684,7 @@ alloc_init_layout_hdr(struct inode *ino)
681} 684}
682 685
683static struct pnfs_layout_hdr * 686static struct pnfs_layout_hdr *
684pnfs_find_alloc_layout(struct inode *ino) 687pnfs_find_alloc_layout(struct inode *ino, gfp_t gfp_flags)
685{ 688{
686 struct nfs_inode *nfsi = NFS_I(ino); 689 struct nfs_inode *nfsi = NFS_I(ino);
687 struct pnfs_layout_hdr *new = NULL; 690 struct pnfs_layout_hdr *new = NULL;
@@ -696,7 +699,7 @@ pnfs_find_alloc_layout(struct inode *ino)
696 return nfsi->layout; 699 return nfsi->layout;
697 } 700 }
698 spin_unlock(&ino->i_lock); 701 spin_unlock(&ino->i_lock);
699 new = alloc_init_layout_hdr(ino); 702 new = alloc_init_layout_hdr(ino, gfp_flags);
700 spin_lock(&ino->i_lock); 703 spin_lock(&ino->i_lock);
701 704
702 if (likely(nfsi->layout == NULL)) /* Won the race? */ 705 if (likely(nfsi->layout == NULL)) /* Won the race? */
@@ -756,7 +759,8 @@ pnfs_find_lseg(struct pnfs_layout_hdr *lo, u32 iomode)
756struct pnfs_layout_segment * 759struct pnfs_layout_segment *
757pnfs_update_layout(struct inode *ino, 760pnfs_update_layout(struct inode *ino,
758 struct nfs_open_context *ctx, 761 struct nfs_open_context *ctx,
759 enum pnfs_iomode iomode) 762 enum pnfs_iomode iomode,
763 gfp_t gfp_flags)
760{ 764{
761 struct nfs_inode *nfsi = NFS_I(ino); 765 struct nfs_inode *nfsi = NFS_I(ino);
762 struct nfs_client *clp = NFS_SERVER(ino)->nfs_client; 766 struct nfs_client *clp = NFS_SERVER(ino)->nfs_client;
@@ -767,7 +771,7 @@ pnfs_update_layout(struct inode *ino,
767 if (!pnfs_enabled_sb(NFS_SERVER(ino))) 771 if (!pnfs_enabled_sb(NFS_SERVER(ino)))
768 return NULL; 772 return NULL;
769 spin_lock(&ino->i_lock); 773 spin_lock(&ino->i_lock);
770 lo = pnfs_find_alloc_layout(ino); 774 lo = pnfs_find_alloc_layout(ino, gfp_flags);
771 if (lo == NULL) { 775 if (lo == NULL) {
772 dprintk("%s ERROR: can't get pnfs_layout_hdr\n", __func__); 776 dprintk("%s ERROR: can't get pnfs_layout_hdr\n", __func__);
773 goto out_unlock; 777 goto out_unlock;
@@ -807,7 +811,7 @@ pnfs_update_layout(struct inode *ino,
807 spin_unlock(&clp->cl_lock); 811 spin_unlock(&clp->cl_lock);
808 } 812 }
809 813
810 lseg = send_layoutget(lo, ctx, iomode); 814 lseg = send_layoutget(lo, ctx, iomode, gfp_flags);
811 if (!lseg && first) { 815 if (!lseg && first) {
812 spin_lock(&clp->cl_lock); 816 spin_lock(&clp->cl_lock);
813 list_del_init(&lo->plh_layouts); 817 list_del_init(&lo->plh_layouts);
@@ -846,7 +850,7 @@ pnfs_layout_process(struct nfs4_layoutget *lgp)
846 goto out; 850 goto out;
847 } 851 }
848 /* Inject layout blob into I/O device driver */ 852 /* Inject layout blob into I/O device driver */
849 lseg = NFS_SERVER(ino)->pnfs_curr_ld->alloc_lseg(lo, res); 853 lseg = NFS_SERVER(ino)->pnfs_curr_ld->alloc_lseg(lo, res, lgp->gfp_flags);
850 if (!lseg || IS_ERR(lseg)) { 854 if (!lseg || IS_ERR(lseg)) {
851 if (!lseg) 855 if (!lseg)
852 status = -ENOMEM; 856 status = -ENOMEM;
@@ -899,7 +903,8 @@ static int pnfs_read_pg_test(struct nfs_pageio_descriptor *pgio,
899 /* This is first coelesce call for a series of nfs_pages */ 903 /* This is first coelesce call for a series of nfs_pages */
900 pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode, 904 pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
901 prev->wb_context, 905 prev->wb_context,
902 IOMODE_READ); 906 IOMODE_READ,
907 GFP_KERNEL);
903 } 908 }
904 return NFS_SERVER(pgio->pg_inode)->pnfs_curr_ld->pg_test(pgio, prev, req); 909 return NFS_SERVER(pgio->pg_inode)->pnfs_curr_ld->pg_test(pgio, prev, req);
905} 910}
@@ -921,7 +926,8 @@ static int pnfs_write_pg_test(struct nfs_pageio_descriptor *pgio,
921 /* This is first coelesce call for a series of nfs_pages */ 926 /* This is first coelesce call for a series of nfs_pages */
922 pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode, 927 pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
923 prev->wb_context, 928 prev->wb_context,
924 IOMODE_RW); 929 IOMODE_RW,
930 GFP_NOFS);
925 } 931 }
926 return NFS_SERVER(pgio->pg_inode)->pnfs_curr_ld->pg_test(pgio, prev, req); 932 return NFS_SERVER(pgio->pg_inode)->pnfs_curr_ld->pg_test(pgio, prev, req);
927} 933}
diff --git a/fs/nfs/pnfs.h b/fs/nfs/pnfs.h
index bc4827202e7a..0c015bad9e7a 100644
--- a/fs/nfs/pnfs.h
+++ b/fs/nfs/pnfs.h
@@ -70,7 +70,7 @@ struct pnfs_layoutdriver_type {
70 const u32 id; 70 const u32 id;
71 const char *name; 71 const char *name;
72 struct module *owner; 72 struct module *owner;
73 struct pnfs_layout_segment * (*alloc_lseg) (struct pnfs_layout_hdr *layoutid, struct nfs4_layoutget_res *lgr); 73 struct pnfs_layout_segment * (*alloc_lseg) (struct pnfs_layout_hdr *layoutid, struct nfs4_layoutget_res *lgr, gfp_t gfp_flags);
74 void (*free_lseg) (struct pnfs_layout_segment *lseg); 74 void (*free_lseg) (struct pnfs_layout_segment *lseg);
75 75
76 /* test for nfs page cache coalescing */ 76 /* test for nfs page cache coalescing */
@@ -126,7 +126,7 @@ void get_layout_hdr(struct pnfs_layout_hdr *lo);
126void put_lseg(struct pnfs_layout_segment *lseg); 126void put_lseg(struct pnfs_layout_segment *lseg);
127struct pnfs_layout_segment * 127struct pnfs_layout_segment *
128pnfs_update_layout(struct inode *ino, struct nfs_open_context *ctx, 128pnfs_update_layout(struct inode *ino, struct nfs_open_context *ctx,
129 enum pnfs_iomode access_type); 129 enum pnfs_iomode access_type, gfp_t gfp_flags);
130void set_pnfs_layoutdriver(struct nfs_server *, u32 id); 130void set_pnfs_layoutdriver(struct nfs_server *, u32 id);
131void unset_pnfs_layoutdriver(struct nfs_server *); 131void unset_pnfs_layoutdriver(struct nfs_server *);
132enum pnfs_try_status pnfs_try_to_write_data(struct nfs_write_data *, 132enum pnfs_try_status pnfs_try_to_write_data(struct nfs_write_data *,
@@ -245,7 +245,7 @@ static inline void put_lseg(struct pnfs_layout_segment *lseg)
245 245
246static inline struct pnfs_layout_segment * 246static inline struct pnfs_layout_segment *
247pnfs_update_layout(struct inode *ino, struct nfs_open_context *ctx, 247pnfs_update_layout(struct inode *ino, struct nfs_open_context *ctx,
248 enum pnfs_iomode access_type) 248 enum pnfs_iomode access_type, gfp_t gfp_flags)
249{ 249{
250 return NULL; 250 return NULL;
251} 251}
diff --git a/fs/nfs/read.c b/fs/nfs/read.c
index 7cded2b12a05..2bcf0dc306a1 100644
--- a/fs/nfs/read.c
+++ b/fs/nfs/read.c
@@ -288,7 +288,7 @@ static int nfs_pagein_multi(struct nfs_pageio_descriptor *desc)
288 atomic_set(&req->wb_complete, requests); 288 atomic_set(&req->wb_complete, requests);
289 289
290 BUG_ON(desc->pg_lseg != NULL); 290 BUG_ON(desc->pg_lseg != NULL);
291 lseg = pnfs_update_layout(desc->pg_inode, req->wb_context, IOMODE_READ); 291 lseg = pnfs_update_layout(desc->pg_inode, req->wb_context, IOMODE_READ, GFP_KERNEL);
292 ClearPageError(page); 292 ClearPageError(page);
293 offset = 0; 293 offset = 0;
294 nbytes = desc->pg_count; 294 nbytes = desc->pg_count;
@@ -351,7 +351,7 @@ static int nfs_pagein_one(struct nfs_pageio_descriptor *desc)
351 } 351 }
352 req = nfs_list_entry(data->pages.next); 352 req = nfs_list_entry(data->pages.next);
353 if ((!lseg) && list_is_singular(&data->pages)) 353 if ((!lseg) && list_is_singular(&data->pages))
354 lseg = pnfs_update_layout(desc->pg_inode, req->wb_context, IOMODE_READ); 354 lseg = pnfs_update_layout(desc->pg_inode, req->wb_context, IOMODE_READ, GFP_KERNEL);
355 355
356 ret = nfs_read_rpcsetup(req, data, &nfs_read_full_ops, desc->pg_count, 356 ret = nfs_read_rpcsetup(req, data, &nfs_read_full_ops, desc->pg_count,
357 0, lseg); 357 0, lseg);
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index 3bd5d7e80f6c..49c715b4ac92 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -939,7 +939,7 @@ static int nfs_flush_multi(struct nfs_pageio_descriptor *desc)
939 atomic_set(&req->wb_complete, requests); 939 atomic_set(&req->wb_complete, requests);
940 940
941 BUG_ON(desc->pg_lseg); 941 BUG_ON(desc->pg_lseg);
942 lseg = pnfs_update_layout(desc->pg_inode, req->wb_context, IOMODE_RW); 942 lseg = pnfs_update_layout(desc->pg_inode, req->wb_context, IOMODE_RW, GFP_NOFS);
943 ClearPageError(page); 943 ClearPageError(page);
944 offset = 0; 944 offset = 0;
945 nbytes = desc->pg_count; 945 nbytes = desc->pg_count;
@@ -1013,7 +1013,7 @@ static int nfs_flush_one(struct nfs_pageio_descriptor *desc)
1013 } 1013 }
1014 req = nfs_list_entry(data->pages.next); 1014 req = nfs_list_entry(data->pages.next);
1015 if ((!lseg) && list_is_singular(&data->pages)) 1015 if ((!lseg) && list_is_singular(&data->pages))
1016 lseg = pnfs_update_layout(desc->pg_inode, req->wb_context, IOMODE_RW); 1016 lseg = pnfs_update_layout(desc->pg_inode, req->wb_context, IOMODE_RW, GFP_NOFS);
1017 1017
1018 if ((desc->pg_ioflags & FLUSH_COND_STABLE) && 1018 if ((desc->pg_ioflags & FLUSH_COND_STABLE) &&
1019 (desc->pg_moreio || NFS_I(desc->pg_inode)->ncommit)) 1019 (desc->pg_moreio || NFS_I(desc->pg_inode)->ncommit))
diff --git a/fs/ocfs2/cluster/heartbeat.c b/fs/ocfs2/cluster/heartbeat.c
index 643720209a98..9a3e6bbff27b 100644
--- a/fs/ocfs2/cluster/heartbeat.c
+++ b/fs/ocfs2/cluster/heartbeat.c
@@ -539,25 +539,41 @@ static int o2hb_verify_crc(struct o2hb_region *reg,
539 539
540/* We want to make sure that nobody is heartbeating on top of us -- 540/* We want to make sure that nobody is heartbeating on top of us --
541 * this will help detect an invalid configuration. */ 541 * this will help detect an invalid configuration. */
542static int o2hb_check_last_timestamp(struct o2hb_region *reg) 542static void o2hb_check_last_timestamp(struct o2hb_region *reg)
543{ 543{
544 int node_num, ret;
545 struct o2hb_disk_slot *slot; 544 struct o2hb_disk_slot *slot;
546 struct o2hb_disk_heartbeat_block *hb_block; 545 struct o2hb_disk_heartbeat_block *hb_block;
546 char *errstr;
547 547
548 node_num = o2nm_this_node(); 548 slot = &reg->hr_slots[o2nm_this_node()];
549
550 ret = 1;
551 slot = &reg->hr_slots[node_num];
552 /* Don't check on our 1st timestamp */ 549 /* Don't check on our 1st timestamp */
553 if (slot->ds_last_time) { 550 if (!slot->ds_last_time)
554 hb_block = slot->ds_raw_block; 551 return;
555 552
556 if (le64_to_cpu(hb_block->hb_seq) != slot->ds_last_time) 553 hb_block = slot->ds_raw_block;
557 ret = 0; 554 if (le64_to_cpu(hb_block->hb_seq) == slot->ds_last_time &&
558 } 555 le64_to_cpu(hb_block->hb_generation) == slot->ds_last_generation &&
556 hb_block->hb_node == slot->ds_node_num)
557 return;
559 558
560 return ret; 559#define ERRSTR1 "Another node is heartbeating on device"
560#define ERRSTR2 "Heartbeat generation mismatch on device"
561#define ERRSTR3 "Heartbeat sequence mismatch on device"
562
563 if (hb_block->hb_node != slot->ds_node_num)
564 errstr = ERRSTR1;
565 else if (le64_to_cpu(hb_block->hb_generation) !=
566 slot->ds_last_generation)
567 errstr = ERRSTR2;
568 else
569 errstr = ERRSTR3;
570
571 mlog(ML_ERROR, "%s (%s): expected(%u:0x%llx, 0x%llx), "
572 "ondisk(%u:0x%llx, 0x%llx)\n", errstr, reg->hr_dev_name,
573 slot->ds_node_num, (unsigned long long)slot->ds_last_generation,
574 (unsigned long long)slot->ds_last_time, hb_block->hb_node,
575 (unsigned long long)le64_to_cpu(hb_block->hb_generation),
576 (unsigned long long)le64_to_cpu(hb_block->hb_seq));
561} 577}
562 578
563static inline void o2hb_prepare_block(struct o2hb_region *reg, 579static inline void o2hb_prepare_block(struct o2hb_region *reg,
@@ -983,9 +999,7 @@ static int o2hb_do_disk_heartbeat(struct o2hb_region *reg)
983 /* With an up to date view of the slots, we can check that no 999 /* With an up to date view of the slots, we can check that no
984 * other node has been improperly configured to heartbeat in 1000 * other node has been improperly configured to heartbeat in
985 * our slot. */ 1001 * our slot. */
986 if (!o2hb_check_last_timestamp(reg)) 1002 o2hb_check_last_timestamp(reg);
987 mlog(ML_ERROR, "Device \"%s\": another node is heartbeating "
988 "in our slot!\n", reg->hr_dev_name);
989 1003
990 /* fill in the proper info for our next heartbeat */ 1004 /* fill in the proper info for our next heartbeat */
991 o2hb_prepare_block(reg, reg->hr_generation); 1005 o2hb_prepare_block(reg, reg->hr_generation);
@@ -999,8 +1013,8 @@ static int o2hb_do_disk_heartbeat(struct o2hb_region *reg)
999 } 1013 }
1000 1014
1001 i = -1; 1015 i = -1;
1002 while((i = find_next_bit(configured_nodes, O2NM_MAX_NODES, i + 1)) < O2NM_MAX_NODES) { 1016 while((i = find_next_bit(configured_nodes,
1003 1017 O2NM_MAX_NODES, i + 1)) < O2NM_MAX_NODES) {
1004 change |= o2hb_check_slot(reg, &reg->hr_slots[i]); 1018 change |= o2hb_check_slot(reg, &reg->hr_slots[i]);
1005 } 1019 }
1006 1020
@@ -1690,6 +1704,7 @@ static ssize_t o2hb_region_dev_write(struct o2hb_region *reg,
1690 struct file *filp = NULL; 1704 struct file *filp = NULL;
1691 struct inode *inode = NULL; 1705 struct inode *inode = NULL;
1692 ssize_t ret = -EINVAL; 1706 ssize_t ret = -EINVAL;
1707 int live_threshold;
1693 1708
1694 if (reg->hr_bdev) 1709 if (reg->hr_bdev)
1695 goto out; 1710 goto out;
@@ -1766,8 +1781,18 @@ static ssize_t o2hb_region_dev_write(struct o2hb_region *reg,
1766 * A node is considered live after it has beat LIVE_THRESHOLD 1781 * A node is considered live after it has beat LIVE_THRESHOLD
1767 * times. We're not steady until we've given them a chance 1782 * times. We're not steady until we've given them a chance
1768 * _after_ our first read. 1783 * _after_ our first read.
1784 * The default threshold is bare minimum so as to limit the delay
1785 * during mounts. For global heartbeat, the threshold doubled for the
1786 * first region.
1769 */ 1787 */
1770 atomic_set(&reg->hr_steady_iterations, O2HB_LIVE_THRESHOLD + 1); 1788 live_threshold = O2HB_LIVE_THRESHOLD;
1789 if (o2hb_global_heartbeat_active()) {
1790 spin_lock(&o2hb_live_lock);
1791 if (o2hb_pop_count(&o2hb_region_bitmap, O2NM_MAX_REGIONS) == 1)
1792 live_threshold <<= 1;
1793 spin_unlock(&o2hb_live_lock);
1794 }
1795 atomic_set(&reg->hr_steady_iterations, live_threshold + 1);
1771 1796
1772 hb_task = kthread_run(o2hb_thread, reg, "o2hb-%s", 1797 hb_task = kthread_run(o2hb_thread, reg, "o2hb-%s",
1773 reg->hr_item.ci_name); 1798 reg->hr_item.ci_name);
diff --git a/fs/ocfs2/dir.c b/fs/ocfs2/dir.c
index 9fe5b8fd658f..8582e3f4f120 100644
--- a/fs/ocfs2/dir.c
+++ b/fs/ocfs2/dir.c
@@ -2868,7 +2868,7 @@ static int ocfs2_expand_inline_dir(struct inode *dir, struct buffer_head *di_bh,
2868 bytes = blocks_wanted << sb->s_blocksize_bits; 2868 bytes = blocks_wanted << sb->s_blocksize_bits;
2869 struct ocfs2_super *osb = OCFS2_SB(dir->i_sb); 2869 struct ocfs2_super *osb = OCFS2_SB(dir->i_sb);
2870 struct ocfs2_inode_info *oi = OCFS2_I(dir); 2870 struct ocfs2_inode_info *oi = OCFS2_I(dir);
2871 struct ocfs2_alloc_context *data_ac; 2871 struct ocfs2_alloc_context *data_ac = NULL;
2872 struct ocfs2_alloc_context *meta_ac = NULL; 2872 struct ocfs2_alloc_context *meta_ac = NULL;
2873 struct buffer_head *dirdata_bh = NULL; 2873 struct buffer_head *dirdata_bh = NULL;
2874 struct buffer_head *dx_root_bh = NULL; 2874 struct buffer_head *dx_root_bh = NULL;
diff --git a/fs/ocfs2/dlm/dlmdomain.c b/fs/ocfs2/dlm/dlmdomain.c
index 7540a492eaba..3b179d6cbde0 100644
--- a/fs/ocfs2/dlm/dlmdomain.c
+++ b/fs/ocfs2/dlm/dlmdomain.c
@@ -1614,7 +1614,8 @@ static int dlm_try_to_join_domain(struct dlm_ctxt *dlm)
1614 spin_unlock(&dlm->spinlock); 1614 spin_unlock(&dlm->spinlock);
1615 1615
1616 /* Support for global heartbeat and node info was added in 1.1 */ 1616 /* Support for global heartbeat and node info was added in 1.1 */
1617 if (dlm_protocol.pv_major > 1 || dlm_protocol.pv_minor > 0) { 1617 if (dlm->dlm_locking_proto.pv_major > 1 ||
1618 dlm->dlm_locking_proto.pv_minor > 0) {
1618 status = dlm_send_nodeinfo(dlm, ctxt->yes_resp_map); 1619 status = dlm_send_nodeinfo(dlm, ctxt->yes_resp_map);
1619 if (status) { 1620 if (status) {
1620 mlog_errno(status); 1621 mlog_errno(status);
diff --git a/fs/ocfs2/dlm/dlmmaster.c b/fs/ocfs2/dlm/dlmmaster.c
index fede57ed005f..84d166328cf7 100644
--- a/fs/ocfs2/dlm/dlmmaster.c
+++ b/fs/ocfs2/dlm/dlmmaster.c
@@ -2574,6 +2574,9 @@ fail:
2574 res->state &= ~DLM_LOCK_RES_MIGRATING; 2574 res->state &= ~DLM_LOCK_RES_MIGRATING;
2575 wake = 1; 2575 wake = 1;
2576 spin_unlock(&res->spinlock); 2576 spin_unlock(&res->spinlock);
2577 if (dlm_is_host_down(ret))
2578 dlm_wait_for_node_death(dlm, target,
2579 DLM_NODE_DEATH_WAIT_MAX);
2577 goto leave; 2580 goto leave;
2578 } 2581 }
2579 2582
diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c
index 41565ae52856..89659d6dc206 100644
--- a/fs/ocfs2/file.c
+++ b/fs/ocfs2/file.c
@@ -1607,6 +1607,9 @@ static void ocfs2_calc_trunc_pos(struct inode *inode,
1607 range = le32_to_cpu(rec->e_cpos) + ocfs2_rec_clusters(el, rec); 1607 range = le32_to_cpu(rec->e_cpos) + ocfs2_rec_clusters(el, rec);
1608 1608
1609 if (le32_to_cpu(rec->e_cpos) >= trunc_start) { 1609 if (le32_to_cpu(rec->e_cpos) >= trunc_start) {
1610 /*
1611 * remove an entire extent record.
1612 */
1610 *trunc_cpos = le32_to_cpu(rec->e_cpos); 1613 *trunc_cpos = le32_to_cpu(rec->e_cpos);
1611 /* 1614 /*
1612 * Skip holes if any. 1615 * Skip holes if any.
@@ -1617,7 +1620,16 @@ static void ocfs2_calc_trunc_pos(struct inode *inode,
1617 *blkno = le64_to_cpu(rec->e_blkno); 1620 *blkno = le64_to_cpu(rec->e_blkno);
1618 *trunc_end = le32_to_cpu(rec->e_cpos); 1621 *trunc_end = le32_to_cpu(rec->e_cpos);
1619 } else if (range > trunc_start) { 1622 } else if (range > trunc_start) {
1623 /*
1624 * remove a partial extent record, which means we're
1625 * removing the last extent record.
1626 */
1620 *trunc_cpos = trunc_start; 1627 *trunc_cpos = trunc_start;
1628 /*
1629 * skip hole if any.
1630 */
1631 if (range < *trunc_end)
1632 *trunc_end = range;
1621 *trunc_len = *trunc_end - trunc_start; 1633 *trunc_len = *trunc_end - trunc_start;
1622 coff = trunc_start - le32_to_cpu(rec->e_cpos); 1634 coff = trunc_start - le32_to_cpu(rec->e_cpos);
1623 *blkno = le64_to_cpu(rec->e_blkno) + 1635 *blkno = le64_to_cpu(rec->e_blkno) +
diff --git a/fs/ocfs2/journal.c b/fs/ocfs2/journal.c
index b141a44605ca..295d56454e8b 100644
--- a/fs/ocfs2/journal.c
+++ b/fs/ocfs2/journal.c
@@ -1260,6 +1260,9 @@ void ocfs2_complete_mount_recovery(struct ocfs2_super *osb)
1260{ 1260{
1261 struct ocfs2_journal *journal = osb->journal; 1261 struct ocfs2_journal *journal = osb->journal;
1262 1262
1263 if (ocfs2_is_hard_readonly(osb))
1264 return;
1265
1263 /* No need to queue up our truncate_log as regular cleanup will catch 1266 /* No need to queue up our truncate_log as regular cleanup will catch
1264 * that */ 1267 * that */
1265 ocfs2_queue_recovery_completion(journal, osb->slot_num, 1268 ocfs2_queue_recovery_completion(journal, osb->slot_num,
diff --git a/include/drm/drm_fb_helper.h b/include/drm/drm_fb_helper.h
index ade09d7b4271..c99c3d3e7811 100644
--- a/include/drm/drm_fb_helper.h
+++ b/include/drm/drm_fb_helper.h
@@ -127,7 +127,7 @@ void drm_fb_helper_fill_fix(struct fb_info *info, uint32_t pitch,
127 127
128int drm_fb_helper_setcmap(struct fb_cmap *cmap, struct fb_info *info); 128int drm_fb_helper_setcmap(struct fb_cmap *cmap, struct fb_info *info);
129 129
130bool drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper); 130int drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper);
131bool drm_fb_helper_initial_config(struct drm_fb_helper *fb_helper, int bpp_sel); 131bool drm_fb_helper_initial_config(struct drm_fb_helper *fb_helper, int bpp_sel);
132int drm_fb_helper_single_add_all_connectors(struct drm_fb_helper *fb_helper); 132int drm_fb_helper_single_add_all_connectors(struct drm_fb_helper *fb_helper);
133int drm_fb_helper_debug_enter(struct fb_info *info); 133int drm_fb_helper_debug_enter(struct fb_info *info);
diff --git a/include/linux/capability.h b/include/linux/capability.h
index 16ee8b49a200..d4675af963fa 100644
--- a/include/linux/capability.h
+++ b/include/linux/capability.h
@@ -546,18 +546,7 @@ extern bool has_capability_noaudit(struct task_struct *t, int cap);
546extern bool capable(int cap); 546extern bool capable(int cap);
547extern bool ns_capable(struct user_namespace *ns, int cap); 547extern bool ns_capable(struct user_namespace *ns, int cap);
548extern bool task_ns_capable(struct task_struct *t, int cap); 548extern bool task_ns_capable(struct task_struct *t, int cap);
549 549extern bool nsown_capable(int cap);
550/**
551 * nsown_capable - Check superior capability to one's own user_ns
552 * @cap: The capability in question
553 *
554 * Return true if the current task has the given superior capability
555 * targeted at its own user namespace.
556 */
557static inline bool nsown_capable(int cap)
558{
559 return ns_capable(current_user_ns(), cap);
560}
561 550
562/* audit system wants to get cap info from files as well */ 551/* audit system wants to get cap info from files as well */
563extern int get_vfs_caps_from_disk(const struct dentry *dentry, struct cpu_vfs_cap_data *cpu_caps); 552extern int get_vfs_caps_from_disk(const struct dentry *dentry, struct cpu_vfs_cap_data *cpu_caps);
diff --git a/include/linux/cred.h b/include/linux/cred.h
index 9aeeb0ba2003..be16b61283cc 100644
--- a/include/linux/cred.h
+++ b/include/linux/cred.h
@@ -146,6 +146,7 @@ struct cred {
146 void *security; /* subjective LSM security */ 146 void *security; /* subjective LSM security */
147#endif 147#endif
148 struct user_struct *user; /* real user ID subscription */ 148 struct user_struct *user; /* real user ID subscription */
149 struct user_namespace *user_ns; /* cached user->user_ns */
149 struct group_info *group_info; /* supplementary groups for euid/fsgid */ 150 struct group_info *group_info; /* supplementary groups for euid/fsgid */
150 struct rcu_head rcu; /* RCU deletion hook */ 151 struct rcu_head rcu; /* RCU deletion hook */
151}; 152};
@@ -354,10 +355,15 @@ static inline void put_cred(const struct cred *_cred)
354#define current_fsgid() (current_cred_xxx(fsgid)) 355#define current_fsgid() (current_cred_xxx(fsgid))
355#define current_cap() (current_cred_xxx(cap_effective)) 356#define current_cap() (current_cred_xxx(cap_effective))
356#define current_user() (current_cred_xxx(user)) 357#define current_user() (current_cred_xxx(user))
357#define _current_user_ns() (current_cred_xxx(user)->user_ns)
358#define current_security() (current_cred_xxx(security)) 358#define current_security() (current_cred_xxx(security))
359 359
360extern struct user_namespace *current_user_ns(void); 360#ifdef CONFIG_USER_NS
361#define current_user_ns() (current_cred_xxx(user_ns))
362#else
363extern struct user_namespace init_user_ns;
364#define current_user_ns() (&init_user_ns)
365#endif
366
361 367
362#define current_uid_gid(_uid, _gid) \ 368#define current_uid_gid(_uid, _gid) \
363do { \ 369do { \
diff --git a/include/linux/device.h b/include/linux/device.h
index ab8dfc095709..d08399db6e2c 100644
--- a/include/linux/device.h
+++ b/include/linux/device.h
@@ -442,7 +442,6 @@ struct device {
442 struct dev_archdata archdata; 442 struct dev_archdata archdata;
443 443
444 struct device_node *of_node; /* associated device tree node */ 444 struct device_node *of_node; /* associated device tree node */
445 const struct of_device_id *of_match; /* matching of_device_id from driver */
446 445
447 dev_t devt; /* dev_t, creates the sysfs "dev" */ 446 dev_t devt; /* dev_t, creates the sysfs "dev" */
448 447
diff --git a/include/linux/fb.h b/include/linux/fb.h
index df728c1c29ed..6a8274877171 100644
--- a/include/linux/fb.h
+++ b/include/linux/fb.h
@@ -832,6 +832,7 @@ struct fb_tile_ops {
832#define FBINFO_CAN_FORCE_OUTPUT 0x200000 832#define FBINFO_CAN_FORCE_OUTPUT 0x200000
833 833
834struct fb_info { 834struct fb_info {
835 atomic_t count;
835 int node; 836 int node;
836 int flags; 837 int flags;
837 struct mutex lock; /* Lock for open/release/ioctl funcs */ 838 struct mutex lock; /* Lock for open/release/ioctl funcs */
diff --git a/include/linux/fs.h b/include/linux/fs.h
index dbd860af0804..cdf9495df204 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -358,7 +358,6 @@ struct inodes_stat_t {
358#define FS_EXTENT_FL 0x00080000 /* Extents */ 358#define FS_EXTENT_FL 0x00080000 /* Extents */
359#define FS_DIRECTIO_FL 0x00100000 /* Use direct i/o */ 359#define FS_DIRECTIO_FL 0x00100000 /* Use direct i/o */
360#define FS_NOCOW_FL 0x00800000 /* Do not cow file */ 360#define FS_NOCOW_FL 0x00800000 /* Do not cow file */
361#define FS_COW_FL 0x02000000 /* Cow file */
362#define FS_RESERVED_FL 0x80000000 /* reserved for ext2 lib */ 361#define FS_RESERVED_FL 0x80000000 /* reserved for ext2 lib */
363 362
364#define FS_FL_USER_VISIBLE 0x0003DFFF /* User visible flags */ 363#define FS_FL_USER_VISIBLE 0x0003DFFF /* User visible flags */
diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h
index eb792cb6d745..bcb793ec7374 100644
--- a/include/linux/mmc/host.h
+++ b/include/linux/mmc/host.h
@@ -183,6 +183,7 @@ struct mmc_host {
183 struct work_struct clk_gate_work; /* delayed clock gate */ 183 struct work_struct clk_gate_work; /* delayed clock gate */
184 unsigned int clk_old; /* old clock value cache */ 184 unsigned int clk_old; /* old clock value cache */
185 spinlock_t clk_lock; /* lock for clk fields */ 185 spinlock_t clk_lock; /* lock for clk fields */
186 struct mutex clk_gate_mutex; /* mutex for clock gating */
186#endif 187#endif
187 188
188 /* host specific block data */ 189 /* host specific block data */
diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h
index 890dce242639..7e371f7df9c4 100644
--- a/include/linux/nfs_xdr.h
+++ b/include/linux/nfs_xdr.h
@@ -233,6 +233,7 @@ struct nfs4_layoutget {
233 struct nfs4_layoutget_args args; 233 struct nfs4_layoutget_args args;
234 struct nfs4_layoutget_res res; 234 struct nfs4_layoutget_res res;
235 struct pnfs_layout_segment **lsegpp; 235 struct pnfs_layout_segment **lsegpp;
236 gfp_t gfp_flags;
236}; 237};
237 238
238struct nfs4_getdeviceinfo_args { 239struct nfs4_getdeviceinfo_args {
diff --git a/include/linux/of_device.h b/include/linux/of_device.h
index 8bfe6c1d4365..ae5638480ef2 100644
--- a/include/linux/of_device.h
+++ b/include/linux/of_device.h
@@ -21,8 +21,7 @@ extern void of_device_make_bus_id(struct device *dev);
21static inline int of_driver_match_device(struct device *dev, 21static inline int of_driver_match_device(struct device *dev,
22 const struct device_driver *drv) 22 const struct device_driver *drv)
23{ 23{
24 dev->of_match = of_match_device(drv->of_match_table, dev); 24 return of_match_device(drv->of_match_table, dev) != NULL;
25 return dev->of_match != NULL;
26} 25}
27 26
28extern struct platform_device *of_dev_get(struct platform_device *dev); 27extern struct platform_device *of_dev_get(struct platform_device *dev);
@@ -58,6 +57,11 @@ static inline int of_device_uevent(struct device *dev,
58 57
59static inline void of_device_node_put(struct device *dev) { } 58static inline void of_device_node_put(struct device *dev) { }
60 59
60static inline const struct of_device_id *of_match_device(
61 const struct of_device_id *matches, const struct device *dev)
62{
63 return NULL;
64}
61#endif /* CONFIG_OF_DEVICE */ 65#endif /* CONFIG_OF_DEVICE */
62 66
63#endif /* _LINUX_OF_DEVICE_H */ 67#endif /* _LINUX_OF_DEVICE_H */
diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h
index 838c1149251a..eaf4350c0f90 100644
--- a/include/linux/proc_fs.h
+++ b/include/linux/proc_fs.h
@@ -208,6 +208,8 @@ static inline struct proc_dir_entry *proc_symlink(const char *name,
208 struct proc_dir_entry *parent,const char *dest) {return NULL;} 208 struct proc_dir_entry *parent,const char *dest) {return NULL;}
209static inline struct proc_dir_entry *proc_mkdir(const char *name, 209static inline struct proc_dir_entry *proc_mkdir(const char *name,
210 struct proc_dir_entry *parent) {return NULL;} 210 struct proc_dir_entry *parent) {return NULL;}
211static inline struct proc_dir_entry *proc_mkdir_mode(const char *name,
212 mode_t mode, struct proc_dir_entry *parent) { return NULL; }
211 213
212static inline struct proc_dir_entry *create_proc_read_entry(const char *name, 214static inline struct proc_dir_entry *create_proc_read_entry(const char *name,
213 mode_t mode, struct proc_dir_entry *base, 215 mode_t mode, struct proc_dir_entry *base,
diff --git a/include/net/inet_ecn.h b/include/net/inet_ecn.h
index 88bdd010d65d..2fa8d1341a0a 100644
--- a/include/net/inet_ecn.h
+++ b/include/net/inet_ecn.h
@@ -38,9 +38,19 @@ static inline __u8 INET_ECN_encapsulate(__u8 outer, __u8 inner)
38 return outer; 38 return outer;
39} 39}
40 40
41#define INET_ECN_xmit(sk) do { inet_sk(sk)->tos |= INET_ECN_ECT_0; } while (0) 41static inline void INET_ECN_xmit(struct sock *sk)
42#define INET_ECN_dontxmit(sk) \ 42{
43 do { inet_sk(sk)->tos &= ~INET_ECN_MASK; } while (0) 43 inet_sk(sk)->tos |= INET_ECN_ECT_0;
44 if (inet6_sk(sk) != NULL)
45 inet6_sk(sk)->tclass |= INET_ECN_ECT_0;
46}
47
48static inline void INET_ECN_dontxmit(struct sock *sk)
49{
50 inet_sk(sk)->tos &= ~INET_ECN_MASK;
51 if (inet6_sk(sk) != NULL)
52 inet6_sk(sk)->tclass &= ~INET_ECN_MASK;
53}
44 54
45#define IP6_ECN_flow_init(label) do { \ 55#define IP6_ECN_flow_init(label) do { \
46 (label) &= ~htonl(INET_ECN_MASK << 20); \ 56 (label) &= ~htonl(INET_ECN_MASK << 20); \
diff --git a/include/net/llc_pdu.h b/include/net/llc_pdu.h
index 75b8e2968c9b..f57e7d46a453 100644
--- a/include/net/llc_pdu.h
+++ b/include/net/llc_pdu.h
@@ -199,7 +199,7 @@ struct llc_pdu_sn {
199 u8 ssap; 199 u8 ssap;
200 u8 ctrl_1; 200 u8 ctrl_1;
201 u8 ctrl_2; 201 u8 ctrl_2;
202}; 202} __packed;
203 203
204static inline struct llc_pdu_sn *llc_pdu_sn_hdr(struct sk_buff *skb) 204static inline struct llc_pdu_sn *llc_pdu_sn_hdr(struct sk_buff *skb)
205{ 205{
@@ -211,7 +211,7 @@ struct llc_pdu_un {
211 u8 dsap; 211 u8 dsap;
212 u8 ssap; 212 u8 ssap;
213 u8 ctrl_1; 213 u8 ctrl_1;
214}; 214} __packed;
215 215
216static inline struct llc_pdu_un *llc_pdu_un_hdr(struct sk_buff *skb) 216static inline struct llc_pdu_un *llc_pdu_un_hdr(struct sk_buff *skb)
217{ 217{
@@ -359,7 +359,7 @@ struct llc_xid_info {
359 u8 fmt_id; /* always 0x81 for LLC */ 359 u8 fmt_id; /* always 0x81 for LLC */
360 u8 type; /* different if NULL/non-NULL LSAP */ 360 u8 type; /* different if NULL/non-NULL LSAP */
361 u8 rw; /* sender receive window */ 361 u8 rw; /* sender receive window */
362}; 362} __packed;
363 363
364/** 364/**
365 * llc_pdu_init_as_xid_cmd - sets bytes 3, 4 & 5 of LLC header as XID 365 * llc_pdu_init_as_xid_cmd - sets bytes 3, 4 & 5 of LLC header as XID
@@ -415,7 +415,7 @@ struct llc_frmr_info {
415 u8 curr_ssv; /* current send state variable val */ 415 u8 curr_ssv; /* current send state variable val */
416 u8 curr_rsv; /* current receive state variable */ 416 u8 curr_rsv; /* current receive state variable */
417 u8 ind_bits; /* indicator bits set with macro */ 417 u8 ind_bits; /* indicator bits set with macro */
418}; 418} __packed;
419 419
420extern void llc_pdu_set_cmd_rsp(struct sk_buff *skb, u8 type); 420extern void llc_pdu_set_cmd_rsp(struct sk_buff *skb, u8 type);
421extern void llc_pdu_set_pf_bit(struct sk_buff *skb, u8 bit_value); 421extern void llc_pdu_set_pf_bit(struct sk_buff *skb, u8 bit_value);
diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
index 2d3ec5094685..dd82e02ddde3 100644
--- a/include/scsi/scsi_device.h
+++ b/include/scsi/scsi_device.h
@@ -169,6 +169,7 @@ struct scsi_device {
169 sdev_dev; 169 sdev_dev;
170 170
171 struct execute_work ew; /* used to get process context on put */ 171 struct execute_work ew; /* used to get process context on put */
172 struct work_struct requeue_work;
172 173
173 struct scsi_dh_data *scsi_dh_data; 174 struct scsi_dh_data *scsi_dh_data;
174 enum scsi_device_state sdev_state; 175 enum scsi_device_state sdev_state;
diff --git a/kernel/capability.c b/kernel/capability.c
index bf0c734d0c12..32a80e08ff4b 100644
--- a/kernel/capability.c
+++ b/kernel/capability.c
@@ -399,3 +399,15 @@ bool task_ns_capable(struct task_struct *t, int cap)
399 return ns_capable(task_cred_xxx(t, user)->user_ns, cap); 399 return ns_capable(task_cred_xxx(t, user)->user_ns, cap);
400} 400}
401EXPORT_SYMBOL(task_ns_capable); 401EXPORT_SYMBOL(task_ns_capable);
402
403/**
404 * nsown_capable - Check superior capability to one's own user_ns
405 * @cap: The capability in question
406 *
407 * Return true if the current task has the given superior capability
408 * targeted at its own user namespace.
409 */
410bool nsown_capable(int cap)
411{
412 return ns_capable(current_user_ns(), cap);
413}
diff --git a/kernel/cred.c b/kernel/cred.c
index 5557b55048df..8093c16b84b1 100644
--- a/kernel/cred.c
+++ b/kernel/cred.c
@@ -54,6 +54,7 @@ struct cred init_cred = {
54 .cap_effective = CAP_INIT_EFF_SET, 54 .cap_effective = CAP_INIT_EFF_SET,
55 .cap_bset = CAP_INIT_BSET, 55 .cap_bset = CAP_INIT_BSET,
56 .user = INIT_USER, 56 .user = INIT_USER,
57 .user_ns = &init_user_ns,
57 .group_info = &init_groups, 58 .group_info = &init_groups,
58#ifdef CONFIG_KEYS 59#ifdef CONFIG_KEYS
59 .tgcred = &init_tgcred, 60 .tgcred = &init_tgcred,
@@ -410,6 +411,11 @@ int copy_creds(struct task_struct *p, unsigned long clone_flags)
410 goto error_put; 411 goto error_put;
411 } 412 }
412 413
414 /* cache user_ns in cred. Doesn't need a refcount because it will
415 * stay pinned by cred->user
416 */
417 new->user_ns = new->user->user_ns;
418
413#ifdef CONFIG_KEYS 419#ifdef CONFIG_KEYS
414 /* new threads get their own thread keyrings if their parent already 420 /* new threads get their own thread keyrings if their parent already
415 * had one */ 421 * had one */
@@ -741,12 +747,6 @@ int set_create_files_as(struct cred *new, struct inode *inode)
741} 747}
742EXPORT_SYMBOL(set_create_files_as); 748EXPORT_SYMBOL(set_create_files_as);
743 749
744struct user_namespace *current_user_ns(void)
745{
746 return _current_user_ns();
747}
748EXPORT_SYMBOL(current_user_ns);
749
750#ifdef CONFIG_DEBUG_CREDENTIALS 750#ifdef CONFIG_DEBUG_CREDENTIALS
751 751
752bool creds_are_invalid(const struct cred *cred) 752bool creds_are_invalid(const struct cred *cred)
diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c
index 6519cf62d9cd..0e17c10f8a9d 100644
--- a/kernel/time/clocksource.c
+++ b/kernel/time/clocksource.c
@@ -685,8 +685,8 @@ int __clocksource_register_scale(struct clocksource *cs, u32 scale, u32 freq)
685 /* Add clocksource to the clcoksource list */ 685 /* Add clocksource to the clcoksource list */
686 mutex_lock(&clocksource_mutex); 686 mutex_lock(&clocksource_mutex);
687 clocksource_enqueue(cs); 687 clocksource_enqueue(cs);
688 clocksource_select();
689 clocksource_enqueue_watchdog(cs); 688 clocksource_enqueue_watchdog(cs);
689 clocksource_select();
690 mutex_unlock(&clocksource_mutex); 690 mutex_unlock(&clocksource_mutex);
691 return 0; 691 return 0;
692} 692}
@@ -706,8 +706,8 @@ int clocksource_register(struct clocksource *cs)
706 706
707 mutex_lock(&clocksource_mutex); 707 mutex_lock(&clocksource_mutex);
708 clocksource_enqueue(cs); 708 clocksource_enqueue(cs);
709 clocksource_select();
710 clocksource_enqueue_watchdog(cs); 709 clocksource_enqueue_watchdog(cs);
710 clocksource_select();
711 mutex_unlock(&clocksource_mutex); 711 mutex_unlock(&clocksource_mutex);
712 return 0; 712 return 0;
713} 713}
diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
index da800ffa810c..723c7637e55a 100644
--- a/kernel/time/tick-broadcast.c
+++ b/kernel/time/tick-broadcast.c
@@ -522,10 +522,11 @@ static void tick_broadcast_init_next_event(struct cpumask *mask,
522 */ 522 */
523void tick_broadcast_setup_oneshot(struct clock_event_device *bc) 523void tick_broadcast_setup_oneshot(struct clock_event_device *bc)
524{ 524{
525 int cpu = smp_processor_id();
526
525 /* Set it up only once ! */ 527 /* Set it up only once ! */
526 if (bc->event_handler != tick_handle_oneshot_broadcast) { 528 if (bc->event_handler != tick_handle_oneshot_broadcast) {
527 int was_periodic = bc->mode == CLOCK_EVT_MODE_PERIODIC; 529 int was_periodic = bc->mode == CLOCK_EVT_MODE_PERIODIC;
528 int cpu = smp_processor_id();
529 530
530 bc->event_handler = tick_handle_oneshot_broadcast; 531 bc->event_handler = tick_handle_oneshot_broadcast;
531 clockevents_set_mode(bc, CLOCK_EVT_MODE_ONESHOT); 532 clockevents_set_mode(bc, CLOCK_EVT_MODE_ONESHOT);
@@ -551,6 +552,15 @@ void tick_broadcast_setup_oneshot(struct clock_event_device *bc)
551 tick_broadcast_set_event(tick_next_period, 1); 552 tick_broadcast_set_event(tick_next_period, 1);
552 } else 553 } else
553 bc->next_event.tv64 = KTIME_MAX; 554 bc->next_event.tv64 = KTIME_MAX;
555 } else {
556 /*
557 * The first cpu which switches to oneshot mode sets
558 * the bit for all other cpus which are in the general
559 * (periodic) broadcast mask. So the bit is set and
560 * would prevent the first broadcast enter after this
561 * to program the bc device.
562 */
563 tick_broadcast_clear_oneshot(cpu);
554 } 564 }
555} 565}
556 566
diff --git a/lib/vsprintf.c b/lib/vsprintf.c
index bc0ac6b333dc..dfd60192bc2e 100644
--- a/lib/vsprintf.c
+++ b/lib/vsprintf.c
@@ -797,7 +797,7 @@ char *uuid_string(char *buf, char *end, const u8 *addr,
797 return string(buf, end, uuid, spec); 797 return string(buf, end, uuid, spec);
798} 798}
799 799
800int kptr_restrict = 1; 800int kptr_restrict __read_mostly;
801 801
802/* 802/*
803 * Show a '%p' thing. A kernel extension is that the '%p' is followed 803 * Show a '%p' thing. A kernel extension is that the '%p' is followed
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 570d944daeb5..3f8bce264df6 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -2358,6 +2358,7 @@ EXPORT_SYMBOL(alloc_pages_exact);
2358/** 2358/**
2359 * alloc_pages_exact_nid - allocate an exact number of physically-contiguous 2359 * alloc_pages_exact_nid - allocate an exact number of physically-contiguous
2360 * pages on a node. 2360 * pages on a node.
2361 * @nid: the preferred node ID where memory should be allocated
2361 * @size: the number of bytes to allocate 2362 * @size: the number of bytes to allocate
2362 * @gfp_mask: GFP flags for the allocation 2363 * @gfp_mask: GFP flags for the allocation
2363 * 2364 *
diff --git a/mm/shmem.c b/mm/shmem.c
index 9e755c166cc5..dfc7069102ee 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -1037,7 +1037,6 @@ static int shmem_writepage(struct page *page, struct writeback_control *wbc)
1037 struct address_space *mapping; 1037 struct address_space *mapping;
1038 unsigned long index; 1038 unsigned long index;
1039 struct inode *inode; 1039 struct inode *inode;
1040 bool unlock_mutex = false;
1041 1040
1042 BUG_ON(!PageLocked(page)); 1041 BUG_ON(!PageLocked(page));
1043 mapping = page->mapping; 1042 mapping = page->mapping;
@@ -1072,15 +1071,14 @@ static int shmem_writepage(struct page *page, struct writeback_control *wbc)
1072 * we've taken the spinlock, because shmem_unuse_inode() will 1071 * we've taken the spinlock, because shmem_unuse_inode() will
1073 * prune a !swapped inode from the swaplist under both locks. 1072 * prune a !swapped inode from the swaplist under both locks.
1074 */ 1073 */
1075 if (swap.val && list_empty(&info->swaplist)) { 1074 if (swap.val) {
1076 mutex_lock(&shmem_swaplist_mutex); 1075 mutex_lock(&shmem_swaplist_mutex);
1077 /* move instead of add in case we're racing */ 1076 if (list_empty(&info->swaplist))
1078 list_move_tail(&info->swaplist, &shmem_swaplist); 1077 list_add_tail(&info->swaplist, &shmem_swaplist);
1079 unlock_mutex = true;
1080 } 1078 }
1081 1079
1082 spin_lock(&info->lock); 1080 spin_lock(&info->lock);
1083 if (unlock_mutex) 1081 if (swap.val)
1084 mutex_unlock(&shmem_swaplist_mutex); 1082 mutex_unlock(&shmem_swaplist_mutex);
1085 1083
1086 if (index >= info->next_index) { 1084 if (index >= info->next_index) {
diff --git a/mm/vmscan.c b/mm/vmscan.c
index f6b435c80079..8bfd45050a61 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -937,7 +937,7 @@ keep_lumpy:
937 * back off and wait for congestion to clear because further reclaim 937 * back off and wait for congestion to clear because further reclaim
938 * will encounter the same problem 938 * will encounter the same problem
939 */ 939 */
940 if (nr_dirty == nr_congested && nr_dirty != 0) 940 if (nr_dirty && nr_dirty == nr_congested && scanning_global_lru(sc))
941 zone_set_flag(zone, ZONE_CONGESTED); 941 zone_set_flag(zone, ZONE_CONGESTED);
942 942
943 free_page_list(&free_pages); 943 free_page_list(&free_pages);
diff --git a/net/9p/protocol.c b/net/9p/protocol.c
index b58a501cf3d1..a873277cb996 100644
--- a/net/9p/protocol.c
+++ b/net/9p/protocol.c
@@ -674,6 +674,7 @@ int p9dirent_read(char *buf, int len, struct p9_dirent *dirent,
674 } 674 }
675 675
676 strcpy(dirent->d_name, nameptr); 676 strcpy(dirent->d_name, nameptr);
677 kfree(nameptr);
677 678
678out: 679out:
679 return fake_pdu.offset; 680 return fake_pdu.offset;
diff --git a/net/bridge/br_netfilter.c b/net/bridge/br_netfilter.c
index f3bc322c5891..74ef4d4846a4 100644
--- a/net/bridge/br_netfilter.c
+++ b/net/bridge/br_netfilter.c
@@ -737,7 +737,7 @@ static unsigned int br_nf_forward_ip(unsigned int hook, struct sk_buff *skb,
737 nf_bridge->mask |= BRNF_PKT_TYPE; 737 nf_bridge->mask |= BRNF_PKT_TYPE;
738 } 738 }
739 739
740 if (br_parse_ip_options(skb)) 740 if (pf == PF_INET && br_parse_ip_options(skb))
741 return NF_DROP; 741 return NF_DROP;
742 742
743 /* The physdev module checks on this */ 743 /* The physdev module checks on this */
diff --git a/net/core/dev.c b/net/core/dev.c
index 92009440d28b..b624fe4d9bd7 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -5186,27 +5186,27 @@ u32 netdev_fix_features(struct net_device *dev, u32 features)
5186 /* Fix illegal checksum combinations */ 5186 /* Fix illegal checksum combinations */
5187 if ((features & NETIF_F_HW_CSUM) && 5187 if ((features & NETIF_F_HW_CSUM) &&
5188 (features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) { 5188 (features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
5189 netdev_info(dev, "mixed HW and IP checksum settings.\n"); 5189 netdev_warn(dev, "mixed HW and IP checksum settings.\n");
5190 features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM); 5190 features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM);
5191 } 5191 }
5192 5192
5193 if ((features & NETIF_F_NO_CSUM) && 5193 if ((features & NETIF_F_NO_CSUM) &&
5194 (features & (NETIF_F_HW_CSUM|NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) { 5194 (features & (NETIF_F_HW_CSUM|NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
5195 netdev_info(dev, "mixed no checksumming and other settings.\n"); 5195 netdev_warn(dev, "mixed no checksumming and other settings.\n");
5196 features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM|NETIF_F_HW_CSUM); 5196 features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM|NETIF_F_HW_CSUM);
5197 } 5197 }
5198 5198
5199 /* Fix illegal SG+CSUM combinations. */ 5199 /* Fix illegal SG+CSUM combinations. */
5200 if ((features & NETIF_F_SG) && 5200 if ((features & NETIF_F_SG) &&
5201 !(features & NETIF_F_ALL_CSUM)) { 5201 !(features & NETIF_F_ALL_CSUM)) {
5202 netdev_info(dev, 5202 netdev_dbg(dev,
5203 "Dropping NETIF_F_SG since no checksum feature.\n"); 5203 "Dropping NETIF_F_SG since no checksum feature.\n");
5204 features &= ~NETIF_F_SG; 5204 features &= ~NETIF_F_SG;
5205 } 5205 }
5206 5206
5207 /* TSO requires that SG is present as well. */ 5207 /* TSO requires that SG is present as well. */
5208 if ((features & NETIF_F_ALL_TSO) && !(features & NETIF_F_SG)) { 5208 if ((features & NETIF_F_ALL_TSO) && !(features & NETIF_F_SG)) {
5209 netdev_info(dev, "Dropping TSO features since no SG feature.\n"); 5209 netdev_dbg(dev, "Dropping TSO features since no SG feature.\n");
5210 features &= ~NETIF_F_ALL_TSO; 5210 features &= ~NETIF_F_ALL_TSO;
5211 } 5211 }
5212 5212
@@ -5216,7 +5216,7 @@ u32 netdev_fix_features(struct net_device *dev, u32 features)
5216 5216
5217 /* Software GSO depends on SG. */ 5217 /* Software GSO depends on SG. */
5218 if ((features & NETIF_F_GSO) && !(features & NETIF_F_SG)) { 5218 if ((features & NETIF_F_GSO) && !(features & NETIF_F_SG)) {
5219 netdev_info(dev, "Dropping NETIF_F_GSO since no SG feature.\n"); 5219 netdev_dbg(dev, "Dropping NETIF_F_GSO since no SG feature.\n");
5220 features &= ~NETIF_F_GSO; 5220 features &= ~NETIF_F_GSO;
5221 } 5221 }
5222 5222
@@ -5226,13 +5226,13 @@ u32 netdev_fix_features(struct net_device *dev, u32 features)
5226 if (!((features & NETIF_F_GEN_CSUM) || 5226 if (!((features & NETIF_F_GEN_CSUM) ||
5227 (features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM)) 5227 (features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))
5228 == (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) { 5228 == (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
5229 netdev_info(dev, 5229 netdev_dbg(dev,
5230 "Dropping NETIF_F_UFO since no checksum offload features.\n"); 5230 "Dropping NETIF_F_UFO since no checksum offload features.\n");
5231 features &= ~NETIF_F_UFO; 5231 features &= ~NETIF_F_UFO;
5232 } 5232 }
5233 5233
5234 if (!(features & NETIF_F_SG)) { 5234 if (!(features & NETIF_F_SG)) {
5235 netdev_info(dev, 5235 netdev_dbg(dev,
5236 "Dropping NETIF_F_UFO since no NETIF_F_SG feature.\n"); 5236 "Dropping NETIF_F_UFO since no NETIF_F_SG feature.\n");
5237 features &= ~NETIF_F_UFO; 5237 features &= ~NETIF_F_UFO;
5238 } 5238 }
@@ -5414,12 +5414,6 @@ int register_netdevice(struct net_device *dev)
5414 dev->features |= NETIF_F_SOFT_FEATURES; 5414 dev->features |= NETIF_F_SOFT_FEATURES;
5415 dev->wanted_features = dev->features & dev->hw_features; 5415 dev->wanted_features = dev->features & dev->hw_features;
5416 5416
5417 /* Avoid warning from netdev_fix_features() for GSO without SG */
5418 if (!(dev->wanted_features & NETIF_F_SG)) {
5419 dev->wanted_features &= ~NETIF_F_GSO;
5420 dev->features &= ~NETIF_F_GSO;
5421 }
5422
5423 /* Enable GRO and NETIF_F_HIGHDMA for vlans by default, 5417 /* Enable GRO and NETIF_F_HIGHDMA for vlans by default,
5424 * vlan_dev_init() will do the dev->features check, so these features 5418 * vlan_dev_init() will do the dev->features check, so these features
5425 * are enabled only if supported by underlying device. 5419 * are enabled only if supported by underlying device.
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index ce4596ed1268..bd1224fd216a 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -237,6 +237,10 @@ ieee80211_tx_h_dynamic_ps(struct ieee80211_tx_data *tx)
237 &local->dynamic_ps_disable_work); 237 &local->dynamic_ps_disable_work);
238 } 238 }
239 239
240 /* Don't restart the timer if we're not disassociated */
241 if (!ifmgd->associated)
242 return TX_CONTINUE;
243
240 mod_timer(&local->dynamic_ps_timer, jiffies + 244 mod_timer(&local->dynamic_ps_timer, jiffies +
241 msecs_to_jiffies(local->hw.conf.dynamic_ps_timeout)); 245 msecs_to_jiffies(local->hw.conf.dynamic_ps_timeout));
242 246
diff --git a/net/netfilter/ipvs/ip_vs_app.c b/net/netfilter/ipvs/ip_vs_app.c
index 51f3af7c4743..059af3120be7 100644
--- a/net/netfilter/ipvs/ip_vs_app.c
+++ b/net/netfilter/ipvs/ip_vs_app.c
@@ -572,7 +572,7 @@ static const struct file_operations ip_vs_app_fops = {
572 .open = ip_vs_app_open, 572 .open = ip_vs_app_open,
573 .read = seq_read, 573 .read = seq_read,
574 .llseek = seq_lseek, 574 .llseek = seq_lseek,
575 .release = seq_release, 575 .release = seq_release_net,
576}; 576};
577#endif 577#endif
578 578
diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c
index d3fd91bbba49..bf28ac2fc99b 100644
--- a/net/netfilter/ipvs/ip_vs_conn.c
+++ b/net/netfilter/ipvs/ip_vs_conn.c
@@ -1046,7 +1046,7 @@ static const struct file_operations ip_vs_conn_fops = {
1046 .open = ip_vs_conn_open, 1046 .open = ip_vs_conn_open,
1047 .read = seq_read, 1047 .read = seq_read,
1048 .llseek = seq_lseek, 1048 .llseek = seq_lseek,
1049 .release = seq_release, 1049 .release = seq_release_net,
1050}; 1050};
1051 1051
1052static const char *ip_vs_origin_name(unsigned flags) 1052static const char *ip_vs_origin_name(unsigned flags)
@@ -1114,7 +1114,7 @@ static const struct file_operations ip_vs_conn_sync_fops = {
1114 .open = ip_vs_conn_sync_open, 1114 .open = ip_vs_conn_sync_open,
1115 .read = seq_read, 1115 .read = seq_read,
1116 .llseek = seq_lseek, 1116 .llseek = seq_lseek,
1117 .release = seq_release, 1117 .release = seq_release_net,
1118}; 1118};
1119 1119
1120#endif 1120#endif
diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
index ea722810faf3..37890f228b19 100644
--- a/net/netfilter/ipvs/ip_vs_ctl.c
+++ b/net/netfilter/ipvs/ip_vs_ctl.c
@@ -2066,7 +2066,7 @@ static const struct file_operations ip_vs_info_fops = {
2066 .open = ip_vs_info_open, 2066 .open = ip_vs_info_open,
2067 .read = seq_read, 2067 .read = seq_read,
2068 .llseek = seq_lseek, 2068 .llseek = seq_lseek,
2069 .release = seq_release_private, 2069 .release = seq_release_net,
2070}; 2070};
2071 2071
2072#endif 2072#endif
@@ -2109,7 +2109,7 @@ static const struct file_operations ip_vs_stats_fops = {
2109 .open = ip_vs_stats_seq_open, 2109 .open = ip_vs_stats_seq_open,
2110 .read = seq_read, 2110 .read = seq_read,
2111 .llseek = seq_lseek, 2111 .llseek = seq_lseek,
2112 .release = single_release, 2112 .release = single_release_net,
2113}; 2113};
2114 2114
2115static int ip_vs_stats_percpu_show(struct seq_file *seq, void *v) 2115static int ip_vs_stats_percpu_show(struct seq_file *seq, void *v)
@@ -2178,7 +2178,7 @@ static const struct file_operations ip_vs_stats_percpu_fops = {
2178 .open = ip_vs_stats_percpu_seq_open, 2178 .open = ip_vs_stats_percpu_seq_open,
2179 .read = seq_read, 2179 .read = seq_read,
2180 .llseek = seq_lseek, 2180 .llseek = seq_lseek,
2181 .release = single_release, 2181 .release = single_release_net,
2182}; 2182};
2183#endif 2183#endif
2184 2184
diff --git a/security/selinux/ss/policydb.c b/security/selinux/ss/policydb.c
index e6e7ce0d3d55..7102457661d6 100644
--- a/security/selinux/ss/policydb.c
+++ b/security/selinux/ss/policydb.c
@@ -1819,8 +1819,6 @@ static int filename_trans_read(struct policydb *p, void *fp)
1819 goto out; 1819 goto out;
1820 nel = le32_to_cpu(buf[0]); 1820 nel = le32_to_cpu(buf[0]);
1821 1821
1822 printk(KERN_ERR "%s: nel=%d\n", __func__, nel);
1823
1824 last = p->filename_trans; 1822 last = p->filename_trans;
1825 while (last && last->next) 1823 while (last && last->next)
1826 last = last->next; 1824 last = last->next;
@@ -1857,8 +1855,6 @@ static int filename_trans_read(struct policydb *p, void *fp)
1857 goto out; 1855 goto out;
1858 name[len] = 0; 1856 name[len] = 0;
1859 1857
1860 printk(KERN_ERR "%s: ft=%p ft->name=%p ft->name=%s\n", __func__, ft, ft->name, ft->name);
1861
1862 rc = next_entry(buf, fp, sizeof(u32) * 4); 1858 rc = next_entry(buf, fp, sizeof(u32) * 4);
1863 if (rc) 1859 if (rc)
1864 goto out; 1860 goto out;
diff --git a/sound/soc/codecs/ssm2602.c b/sound/soc/codecs/ssm2602.c
index 2727befd158e..b04d28039c16 100644
--- a/sound/soc/codecs/ssm2602.c
+++ b/sound/soc/codecs/ssm2602.c
@@ -139,7 +139,7 @@ SOC_DOUBLE_R("Capture Volume", SSM2602_LINVOL, SSM2602_RINVOL, 0, 31, 0),
139SOC_DOUBLE_R("Capture Switch", SSM2602_LINVOL, SSM2602_RINVOL, 7, 1, 1), 139SOC_DOUBLE_R("Capture Switch", SSM2602_LINVOL, SSM2602_RINVOL, 7, 1, 1),
140 140
141SOC_SINGLE("Mic Boost (+20dB)", SSM2602_APANA, 0, 1, 0), 141SOC_SINGLE("Mic Boost (+20dB)", SSM2602_APANA, 0, 1, 0),
142SOC_SINGLE("Mic Boost2 (+20dB)", SSM2602_APANA, 7, 1, 0), 142SOC_SINGLE("Mic Boost2 (+20dB)", SSM2602_APANA, 8, 1, 0),
143SOC_SINGLE("Mic Switch", SSM2602_APANA, 1, 1, 1), 143SOC_SINGLE("Mic Switch", SSM2602_APANA, 1, 1, 1),
144 144
145SOC_SINGLE("Sidetone Playback Volume", SSM2602_APANA, 6, 3, 1), 145SOC_SINGLE("Sidetone Playback Volume", SSM2602_APANA, 6, 3, 1),
@@ -602,7 +602,7 @@ static struct snd_soc_codec_driver soc_codec_dev_ssm2602 = {
602 .read = ssm2602_read_reg_cache, 602 .read = ssm2602_read_reg_cache,
603 .write = ssm2602_write, 603 .write = ssm2602_write,
604 .set_bias_level = ssm2602_set_bias_level, 604 .set_bias_level = ssm2602_set_bias_level,
605 .reg_cache_size = sizeof(ssm2602_reg), 605 .reg_cache_size = ARRAY_SIZE(ssm2602_reg),
606 .reg_word_size = sizeof(u16), 606 .reg_word_size = sizeof(u16),
607 .reg_cache_default = ssm2602_reg, 607 .reg_cache_default = ssm2602_reg,
608}; 608};
@@ -614,7 +614,7 @@ static struct snd_soc_codec_driver soc_codec_dev_ssm2602 = {
614 * low = 0x1a 614 * low = 0x1a
615 * high = 0x1b 615 * high = 0x1b
616 */ 616 */
617static int ssm2602_i2c_probe(struct i2c_client *i2c, 617static int __devinit ssm2602_i2c_probe(struct i2c_client *i2c,
618 const struct i2c_device_id *id) 618 const struct i2c_device_id *id)
619{ 619{
620 struct ssm2602_priv *ssm2602; 620 struct ssm2602_priv *ssm2602;
@@ -635,7 +635,7 @@ static int ssm2602_i2c_probe(struct i2c_client *i2c,
635 return ret; 635 return ret;
636} 636}
637 637
638static int ssm2602_i2c_remove(struct i2c_client *client) 638static int __devexit ssm2602_i2c_remove(struct i2c_client *client)
639{ 639{
640 snd_soc_unregister_codec(&client->dev); 640 snd_soc_unregister_codec(&client->dev);
641 kfree(i2c_get_clientdata(client)); 641 kfree(i2c_get_clientdata(client));
@@ -655,7 +655,7 @@ static struct i2c_driver ssm2602_i2c_driver = {
655 .owner = THIS_MODULE, 655 .owner = THIS_MODULE,
656 }, 656 },
657 .probe = ssm2602_i2c_probe, 657 .probe = ssm2602_i2c_probe,
658 .remove = ssm2602_i2c_remove, 658 .remove = __devexit_p(ssm2602_i2c_remove),
659 .id_table = ssm2602_i2c_id, 659 .id_table = ssm2602_i2c_id,
660}; 660};
661#endif 661#endif
diff --git a/sound/soc/codecs/uda134x.c b/sound/soc/codecs/uda134x.c
index 48ffd406a71d..a7b8f301bad3 100644
--- a/sound/soc/codecs/uda134x.c
+++ b/sound/soc/codecs/uda134x.c
@@ -601,9 +601,7 @@ static struct snd_soc_codec_driver soc_codec_dev_uda134x = {
601 .reg_cache_step = 1, 601 .reg_cache_step = 1,
602 .read = uda134x_read_reg_cache, 602 .read = uda134x_read_reg_cache,
603 .write = uda134x_write, 603 .write = uda134x_write,
604#ifdef POWER_OFF_ON_STANDBY
605 .set_bias_level = uda134x_set_bias_level, 604 .set_bias_level = uda134x_set_bias_level,
606#endif
607}; 605};
608 606
609static int __devinit uda134x_codec_probe(struct platform_device *pdev) 607static int __devinit uda134x_codec_probe(struct platform_device *pdev)
diff --git a/sound/soc/codecs/wm8903.c b/sound/soc/codecs/wm8903.c
index f52b623bb692..824d1c8c8a35 100644
--- a/sound/soc/codecs/wm8903.c
+++ b/sound/soc/codecs/wm8903.c
@@ -692,7 +692,7 @@ SOC_ENUM("DRC Smoothing Threshold", drc_smoothing),
692SOC_SINGLE_TLV("DRC Startup Volume", WM8903_DRC_0, 6, 18, 0, drc_tlv_startup), 692SOC_SINGLE_TLV("DRC Startup Volume", WM8903_DRC_0, 6, 18, 0, drc_tlv_startup),
693 693
694SOC_DOUBLE_R_TLV("Digital Capture Volume", WM8903_ADC_DIGITAL_VOLUME_LEFT, 694SOC_DOUBLE_R_TLV("Digital Capture Volume", WM8903_ADC_DIGITAL_VOLUME_LEFT,
695 WM8903_ADC_DIGITAL_VOLUME_RIGHT, 1, 96, 0, digital_tlv), 695 WM8903_ADC_DIGITAL_VOLUME_RIGHT, 1, 120, 0, digital_tlv),
696SOC_ENUM("ADC Companding Mode", adc_companding), 696SOC_ENUM("ADC Companding Mode", adc_companding),
697SOC_SINGLE("ADC Companding Switch", WM8903_AUDIO_INTERFACE_0, 3, 1, 0), 697SOC_SINGLE("ADC Companding Switch", WM8903_AUDIO_INTERFACE_0, 3, 1, 0),
698 698
diff --git a/sound/soc/jz4740/jz4740-i2s.c b/sound/soc/jz4740/jz4740-i2s.c
index 419bf4f5534a..cd22a54b2f14 100644
--- a/sound/soc/jz4740/jz4740-i2s.c
+++ b/sound/soc/jz4740/jz4740-i2s.c
@@ -133,7 +133,7 @@ static void jz4740_i2s_shutdown(struct snd_pcm_substream *substream,
133 struct jz4740_i2s *i2s = snd_soc_dai_get_drvdata(dai); 133 struct jz4740_i2s *i2s = snd_soc_dai_get_drvdata(dai);
134 uint32_t conf; 134 uint32_t conf;
135 135
136 if (!dai->active) 136 if (dai->active)
137 return; 137 return;
138 138
139 conf = jz4740_i2s_read(i2s, JZ_REG_AIC_CONF); 139 conf = jz4740_i2s_read(i2s, JZ_REG_AIC_CONF);
diff --git a/sound/soc/mid-x86/sst_platform.c b/sound/soc/mid-x86/sst_platform.c
index d567c322a2fb..6b1f9d3bf34e 100644
--- a/sound/soc/mid-x86/sst_platform.c
+++ b/sound/soc/mid-x86/sst_platform.c
@@ -376,6 +376,11 @@ static int sst_platform_pcm_hw_params(struct snd_pcm_substream *substream,
376 return 0; 376 return 0;
377} 377}
378 378
379static int sst_platform_pcm_hw_free(struct snd_pcm_substream *substream)
380{
381 return snd_pcm_lib_free_pages(substream);
382}
383
379static struct snd_pcm_ops sst_platform_ops = { 384static struct snd_pcm_ops sst_platform_ops = {
380 .open = sst_platform_open, 385 .open = sst_platform_open,
381 .close = sst_platform_close, 386 .close = sst_platform_close,
@@ -384,6 +389,7 @@ static struct snd_pcm_ops sst_platform_ops = {
384 .trigger = sst_platform_pcm_trigger, 389 .trigger = sst_platform_pcm_trigger,
385 .pointer = sst_platform_pcm_pointer, 390 .pointer = sst_platform_pcm_pointer,
386 .hw_params = sst_platform_pcm_hw_params, 391 .hw_params = sst_platform_pcm_hw_params,
392 .hw_free = sst_platform_pcm_hw_free,
387}; 393};
388 394
389static void sst_pcm_free(struct snd_pcm *pcm) 395static void sst_pcm_free(struct snd_pcm *pcm)
diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c
index d8562ce4de7a..dd55d1069468 100644
--- a/sound/soc/soc-core.c
+++ b/sound/soc/soc-core.c
@@ -3291,6 +3291,8 @@ int snd_soc_register_card(struct snd_soc_card *card)
3291 if (!card->name || !card->dev) 3291 if (!card->name || !card->dev)
3292 return -EINVAL; 3292 return -EINVAL;
3293 3293
3294 dev_set_drvdata(card->dev, card);
3295
3294 snd_soc_initialize_card_lists(card); 3296 snd_soc_initialize_card_lists(card);
3295 3297
3296 soc_init_card_debugfs(card); 3298 soc_init_card_debugfs(card);
diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c
index 416538248a4b..0974f957b8fa 100644
--- a/tools/perf/builtin-record.c
+++ b/tools/perf/builtin-record.c
@@ -427,7 +427,7 @@ static void mmap_read_all(void)
427{ 427{
428 int i; 428 int i;
429 429
430 for (i = 0; i < evsel_list->cpus->nr; i++) { 430 for (i = 0; i < evsel_list->nr_mmaps; i++) {
431 if (evsel_list->mmap[i].base) 431 if (evsel_list->mmap[i].base)
432 mmap_read(&evsel_list->mmap[i]); 432 mmap_read(&evsel_list->mmap[i]);
433 } 433 }
diff --git a/tools/perf/builtin-test.c b/tools/perf/builtin-test.c
index 11e3c8458362..2f9a337b182f 100644
--- a/tools/perf/builtin-test.c
+++ b/tools/perf/builtin-test.c
@@ -549,7 +549,7 @@ static int test__basic_mmap(void)
549 ++foo; 549 ++foo;
550 } 550 }
551 551
552 while ((event = perf_evlist__read_on_cpu(evlist, 0)) != NULL) { 552 while ((event = perf_evlist__mmap_read(evlist, 0)) != NULL) {
553 struct perf_sample sample; 553 struct perf_sample sample;
554 554
555 if (event->header.type != PERF_RECORD_SAMPLE) { 555 if (event->header.type != PERF_RECORD_SAMPLE) {
diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c
index 7e3d6e310bf8..ebfc7cf5f63b 100644
--- a/tools/perf/builtin-top.c
+++ b/tools/perf/builtin-top.c
@@ -801,12 +801,12 @@ static void perf_event__process_sample(const union perf_event *event,
801 } 801 }
802} 802}
803 803
804static void perf_session__mmap_read_cpu(struct perf_session *self, int cpu) 804static void perf_session__mmap_read_idx(struct perf_session *self, int idx)
805{ 805{
806 struct perf_sample sample; 806 struct perf_sample sample;
807 union perf_event *event; 807 union perf_event *event;
808 808
809 while ((event = perf_evlist__read_on_cpu(top.evlist, cpu)) != NULL) { 809 while ((event = perf_evlist__mmap_read(top.evlist, idx)) != NULL) {
810 perf_session__parse_sample(self, event, &sample); 810 perf_session__parse_sample(self, event, &sample);
811 811
812 if (event->header.type == PERF_RECORD_SAMPLE) 812 if (event->header.type == PERF_RECORD_SAMPLE)
@@ -820,8 +820,8 @@ static void perf_session__mmap_read(struct perf_session *self)
820{ 820{
821 int i; 821 int i;
822 822
823 for (i = 0; i < top.evlist->cpus->nr; i++) 823 for (i = 0; i < top.evlist->nr_mmaps; i++)
824 perf_session__mmap_read_cpu(self, i); 824 perf_session__mmap_read_idx(self, i);
825} 825}
826 826
827static void start_counters(struct perf_evlist *evlist) 827static void start_counters(struct perf_evlist *evlist)
diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c
index 45da8d186b49..23eb22b05d27 100644
--- a/tools/perf/util/evlist.c
+++ b/tools/perf/util/evlist.c
@@ -166,11 +166,11 @@ struct perf_evsel *perf_evlist__id2evsel(struct perf_evlist *evlist, u64 id)
166 return NULL; 166 return NULL;
167} 167}
168 168
169union perf_event *perf_evlist__read_on_cpu(struct perf_evlist *evlist, int cpu) 169union perf_event *perf_evlist__mmap_read(struct perf_evlist *evlist, int idx)
170{ 170{
171 /* XXX Move this to perf.c, making it generally available */ 171 /* XXX Move this to perf.c, making it generally available */
172 unsigned int page_size = sysconf(_SC_PAGE_SIZE); 172 unsigned int page_size = sysconf(_SC_PAGE_SIZE);
173 struct perf_mmap *md = &evlist->mmap[cpu]; 173 struct perf_mmap *md = &evlist->mmap[idx];
174 unsigned int head = perf_mmap__read_head(md); 174 unsigned int head = perf_mmap__read_head(md);
175 unsigned int old = md->prev; 175 unsigned int old = md->prev;
176 unsigned char *data = md->base + page_size; 176 unsigned char *data = md->base + page_size;
@@ -235,31 +235,37 @@ union perf_event *perf_evlist__read_on_cpu(struct perf_evlist *evlist, int cpu)
235 235
236void perf_evlist__munmap(struct perf_evlist *evlist) 236void perf_evlist__munmap(struct perf_evlist *evlist)
237{ 237{
238 int cpu; 238 int i;
239 239
240 for (cpu = 0; cpu < evlist->cpus->nr; cpu++) { 240 for (i = 0; i < evlist->nr_mmaps; i++) {
241 if (evlist->mmap[cpu].base != NULL) { 241 if (evlist->mmap[i].base != NULL) {
242 munmap(evlist->mmap[cpu].base, evlist->mmap_len); 242 munmap(evlist->mmap[i].base, evlist->mmap_len);
243 evlist->mmap[cpu].base = NULL; 243 evlist->mmap[i].base = NULL;
244 } 244 }
245 } 245 }
246
247 free(evlist->mmap);
248 evlist->mmap = NULL;
246} 249}
247 250
248int perf_evlist__alloc_mmap(struct perf_evlist *evlist) 251int perf_evlist__alloc_mmap(struct perf_evlist *evlist)
249{ 252{
250 evlist->mmap = zalloc(evlist->cpus->nr * sizeof(struct perf_mmap)); 253 evlist->nr_mmaps = evlist->cpus->nr;
254 if (evlist->cpus->map[0] == -1)
255 evlist->nr_mmaps = evlist->threads->nr;
256 evlist->mmap = zalloc(evlist->nr_mmaps * sizeof(struct perf_mmap));
251 return evlist->mmap != NULL ? 0 : -ENOMEM; 257 return evlist->mmap != NULL ? 0 : -ENOMEM;
252} 258}
253 259
254static int __perf_evlist__mmap(struct perf_evlist *evlist, struct perf_evsel *evsel, 260static int __perf_evlist__mmap(struct perf_evlist *evlist, struct perf_evsel *evsel,
255 int cpu, int prot, int mask, int fd) 261 int idx, int prot, int mask, int fd)
256{ 262{
257 evlist->mmap[cpu].prev = 0; 263 evlist->mmap[idx].prev = 0;
258 evlist->mmap[cpu].mask = mask; 264 evlist->mmap[idx].mask = mask;
259 evlist->mmap[cpu].base = mmap(NULL, evlist->mmap_len, prot, 265 evlist->mmap[idx].base = mmap(NULL, evlist->mmap_len, prot,
260 MAP_SHARED, fd, 0); 266 MAP_SHARED, fd, 0);
261 if (evlist->mmap[cpu].base == MAP_FAILED) { 267 if (evlist->mmap[idx].base == MAP_FAILED) {
262 if (evlist->cpus->map[cpu] == -1 && evsel->attr.inherit) 268 if (evlist->cpus->map[idx] == -1 && evsel->attr.inherit)
263 ui__warning("Inherit is not allowed on per-task " 269 ui__warning("Inherit is not allowed on per-task "
264 "events using mmap.\n"); 270 "events using mmap.\n");
265 return -1; 271 return -1;
@@ -269,6 +275,86 @@ static int __perf_evlist__mmap(struct perf_evlist *evlist, struct perf_evsel *ev
269 return 0; 275 return 0;
270} 276}
271 277
278static int perf_evlist__mmap_per_cpu(struct perf_evlist *evlist, int prot, int mask)
279{
280 struct perf_evsel *evsel;
281 int cpu, thread;
282
283 for (cpu = 0; cpu < evlist->cpus->nr; cpu++) {
284 int output = -1;
285
286 for (thread = 0; thread < evlist->threads->nr; thread++) {
287 list_for_each_entry(evsel, &evlist->entries, node) {
288 int fd = FD(evsel, cpu, thread);
289
290 if (output == -1) {
291 output = fd;
292 if (__perf_evlist__mmap(evlist, evsel, cpu,
293 prot, mask, output) < 0)
294 goto out_unmap;
295 } else {
296 if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, output) != 0)
297 goto out_unmap;
298 }
299
300 if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
301 perf_evlist__id_add_fd(evlist, evsel, cpu, thread, fd) < 0)
302 goto out_unmap;
303 }
304 }
305 }
306
307 return 0;
308
309out_unmap:
310 for (cpu = 0; cpu < evlist->cpus->nr; cpu++) {
311 if (evlist->mmap[cpu].base != NULL) {
312 munmap(evlist->mmap[cpu].base, evlist->mmap_len);
313 evlist->mmap[cpu].base = NULL;
314 }
315 }
316 return -1;
317}
318
319static int perf_evlist__mmap_per_thread(struct perf_evlist *evlist, int prot, int mask)
320{
321 struct perf_evsel *evsel;
322 int thread;
323
324 for (thread = 0; thread < evlist->threads->nr; thread++) {
325 int output = -1;
326
327 list_for_each_entry(evsel, &evlist->entries, node) {
328 int fd = FD(evsel, 0, thread);
329
330 if (output == -1) {
331 output = fd;
332 if (__perf_evlist__mmap(evlist, evsel, thread,
333 prot, mask, output) < 0)
334 goto out_unmap;
335 } else {
336 if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, output) != 0)
337 goto out_unmap;
338 }
339
340 if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
341 perf_evlist__id_add_fd(evlist, evsel, 0, thread, fd) < 0)
342 goto out_unmap;
343 }
344 }
345
346 return 0;
347
348out_unmap:
349 for (thread = 0; thread < evlist->threads->nr; thread++) {
350 if (evlist->mmap[thread].base != NULL) {
351 munmap(evlist->mmap[thread].base, evlist->mmap_len);
352 evlist->mmap[thread].base = NULL;
353 }
354 }
355 return -1;
356}
357
272/** perf_evlist__mmap - Create per cpu maps to receive events 358/** perf_evlist__mmap - Create per cpu maps to receive events
273 * 359 *
274 * @evlist - list of events 360 * @evlist - list of events
@@ -287,11 +373,11 @@ static int __perf_evlist__mmap(struct perf_evlist *evlist, struct perf_evsel *ev
287int perf_evlist__mmap(struct perf_evlist *evlist, int pages, bool overwrite) 373int perf_evlist__mmap(struct perf_evlist *evlist, int pages, bool overwrite)
288{ 374{
289 unsigned int page_size = sysconf(_SC_PAGE_SIZE); 375 unsigned int page_size = sysconf(_SC_PAGE_SIZE);
290 int mask = pages * page_size - 1, cpu; 376 int mask = pages * page_size - 1;
291 struct perf_evsel *first_evsel, *evsel; 377 struct perf_evsel *evsel;
292 const struct cpu_map *cpus = evlist->cpus; 378 const struct cpu_map *cpus = evlist->cpus;
293 const struct thread_map *threads = evlist->threads; 379 const struct thread_map *threads = evlist->threads;
294 int thread, prot = PROT_READ | (overwrite ? 0 : PROT_WRITE); 380 int prot = PROT_READ | (overwrite ? 0 : PROT_WRITE);
295 381
296 if (evlist->mmap == NULL && perf_evlist__alloc_mmap(evlist) < 0) 382 if (evlist->mmap == NULL && perf_evlist__alloc_mmap(evlist) < 0)
297 return -ENOMEM; 383 return -ENOMEM;
@@ -301,43 +387,18 @@ int perf_evlist__mmap(struct perf_evlist *evlist, int pages, bool overwrite)
301 387
302 evlist->overwrite = overwrite; 388 evlist->overwrite = overwrite;
303 evlist->mmap_len = (pages + 1) * page_size; 389 evlist->mmap_len = (pages + 1) * page_size;
304 first_evsel = list_entry(evlist->entries.next, struct perf_evsel, node);
305 390
306 list_for_each_entry(evsel, &evlist->entries, node) { 391 list_for_each_entry(evsel, &evlist->entries, node) {
307 if ((evsel->attr.read_format & PERF_FORMAT_ID) && 392 if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
308 evsel->sample_id == NULL && 393 evsel->sample_id == NULL &&
309 perf_evsel__alloc_id(evsel, cpus->nr, threads->nr) < 0) 394 perf_evsel__alloc_id(evsel, cpus->nr, threads->nr) < 0)
310 return -ENOMEM; 395 return -ENOMEM;
311
312 for (cpu = 0; cpu < cpus->nr; cpu++) {
313 for (thread = 0; thread < threads->nr; thread++) {
314 int fd = FD(evsel, cpu, thread);
315
316 if (evsel->idx || thread) {
317 if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT,
318 FD(first_evsel, cpu, 0)) != 0)
319 goto out_unmap;
320 } else if (__perf_evlist__mmap(evlist, evsel, cpu,
321 prot, mask, fd) < 0)
322 goto out_unmap;
323
324 if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
325 perf_evlist__id_add_fd(evlist, evsel, cpu, thread, fd) < 0)
326 goto out_unmap;
327 }
328 }
329 } 396 }
330 397
331 return 0; 398 if (evlist->cpus->map[0] == -1)
399 return perf_evlist__mmap_per_thread(evlist, prot, mask);
332 400
333out_unmap: 401 return perf_evlist__mmap_per_cpu(evlist, prot, mask);
334 for (cpu = 0; cpu < cpus->nr; cpu++) {
335 if (evlist->mmap[cpu].base != NULL) {
336 munmap(evlist->mmap[cpu].base, evlist->mmap_len);
337 evlist->mmap[cpu].base = NULL;
338 }
339 }
340 return -1;
341} 402}
342 403
343int perf_evlist__create_maps(struct perf_evlist *evlist, pid_t target_pid, 404int perf_evlist__create_maps(struct perf_evlist *evlist, pid_t target_pid,
@@ -348,7 +409,7 @@ int perf_evlist__create_maps(struct perf_evlist *evlist, pid_t target_pid,
348 if (evlist->threads == NULL) 409 if (evlist->threads == NULL)
349 return -1; 410 return -1;
350 411
351 if (target_tid != -1) 412 if (cpu_list == NULL && target_tid != -1)
352 evlist->cpus = cpu_map__dummy_new(); 413 evlist->cpus = cpu_map__dummy_new();
353 else 414 else
354 evlist->cpus = cpu_map__new(cpu_list); 415 evlist->cpus = cpu_map__new(cpu_list);
diff --git a/tools/perf/util/evlist.h b/tools/perf/util/evlist.h
index 8b1cb7a4c5f1..7109d7add14e 100644
--- a/tools/perf/util/evlist.h
+++ b/tools/perf/util/evlist.h
@@ -17,6 +17,7 @@ struct perf_evlist {
17 struct hlist_head heads[PERF_EVLIST__HLIST_SIZE]; 17 struct hlist_head heads[PERF_EVLIST__HLIST_SIZE];
18 int nr_entries; 18 int nr_entries;
19 int nr_fds; 19 int nr_fds;
20 int nr_mmaps;
20 int mmap_len; 21 int mmap_len;
21 bool overwrite; 22 bool overwrite;
22 union perf_event event_copy; 23 union perf_event event_copy;
@@ -46,7 +47,7 @@ void perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd);
46 47
47struct perf_evsel *perf_evlist__id2evsel(struct perf_evlist *evlist, u64 id); 48struct perf_evsel *perf_evlist__id2evsel(struct perf_evlist *evlist, u64 id);
48 49
49union perf_event *perf_evlist__read_on_cpu(struct perf_evlist *self, int cpu); 50union perf_event *perf_evlist__mmap_read(struct perf_evlist *self, int idx);
50 51
51int perf_evlist__alloc_mmap(struct perf_evlist *evlist); 52int perf_evlist__alloc_mmap(struct perf_evlist *evlist);
52int perf_evlist__mmap(struct perf_evlist *evlist, int pages, bool overwrite); 53int perf_evlist__mmap(struct perf_evlist *evlist, int pages, bool overwrite);
diff --git a/tools/perf/util/python.c b/tools/perf/util/python.c
index f5e38451fdc5..99c722672f84 100644
--- a/tools/perf/util/python.c
+++ b/tools/perf/util/python.c
@@ -680,7 +680,7 @@ static PyObject *pyrf_evlist__read_on_cpu(struct pyrf_evlist *pevlist,
680 &cpu, &sample_id_all)) 680 &cpu, &sample_id_all))
681 return NULL; 681 return NULL;
682 682
683 event = perf_evlist__read_on_cpu(evlist, cpu); 683 event = perf_evlist__mmap_read(evlist, cpu);
684 if (event != NULL) { 684 if (event != NULL) {
685 struct perf_evsel *first; 685 struct perf_evsel *first;
686 PyObject *pyevent = pyrf_event__new(event); 686 PyObject *pyevent = pyrf_event__new(event);